diff --git a/client-rs/Cargo.lock b/client-rs/Cargo.lock index 59ba818..e340484 100644 --- a/client-rs/Cargo.lock +++ b/client-rs/Cargo.lock @@ -1,6 +1,6 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -version = 3 +version = 4 [[package]] name = "addr2line" @@ -62,7 +62,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a101d4d016f47f13890a74290fdd17b05dd175191d9337bc600791fb96e4dea8" dependencies = [ "alloy-eips", - "alloy-primitives 0.8.15", + "alloy-primitives 0.8.18", "alloy-rlp", "alloy-serde", "alloy-trie", @@ -77,7 +77,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0069cf0642457f87a01a014f6dc29d5d893cd4fd8fddf0c3cdfad1bb3ebafc41" dependencies = [ - "alloy-primitives 0.8.15", + "alloy-primitives 0.8.18", "alloy-rlp", "serde", ] @@ -88,7 +88,7 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c986539255fb839d1533c128e190e557e52ff652c9ef62939e233a81dd93f7e" dependencies = [ - "alloy-primitives 0.8.15", + "alloy-primitives 0.8.18", "alloy-rlp", "derive_more 1.0.0", "serde", @@ -102,7 +102,7 @@ checksum = "8b6755b093afef5925f25079dd5a7c8d096398b804ba60cb5275397b06b31689" dependencies = [ "alloy-eip2930", "alloy-eip7702", - "alloy-primitives 0.8.15", + "alloy-primitives 0.8.18", "alloy-rlp", "alloy-serde", "c-kzg", @@ -135,9 +135,9 @@ dependencies = [ [[package]] name = "alloy-primitives" -version = "0.8.15" +version = "0.8.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6259a506ab13e1d658796c31e6e39d2e2ee89243bcc505ddc613b35732e0a430" +checksum = "788bb18e8f61d5d9340b52143f27771daf7e1dccbaf2741621d2493f9debf52e" dependencies = [ "alloy-rlp", "bytes", @@ -146,7 +146,6 @@ dependencies = [ "derive_more 1.0.0", "foldhash", "hashbrown 0.15.2", - "hex-literal", "indexmap 2.7.0", "itoa", "k256", @@ -180,7 +179,7 @@ checksum = "5a833d97bf8a5f0f878daf2c8451fff7de7f9de38baa5a45d936ec718d81255a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.93", + "syn 2.0.95", ] [[package]] @@ -190,12 +189,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cc37861dc8cbf5da35d346139fbe6e03ee7823cc21138a2c4a590d3b0b4b24be" dependencies = [ "alloy-eips", - "alloy-primitives 0.8.15", + "alloy-primitives 0.8.18", "alloy-rpc-types-engine", "alloy-serde", "serde", "serde_with", - "thiserror 2.0.9", + "thiserror 2.0.10", ] [[package]] @@ -206,7 +205,7 @@ checksum = "5d297268357e3eae834ddd6888b15f764cbc0f4b3be9265f5f6ec239013f3d68" dependencies = [ "alloy-consensus", "alloy-eips", - "alloy-primitives 0.8.15", + "alloy-primitives 0.8.18", "alloy-rlp", "alloy-serde", "derive_more 1.0.0", @@ -220,7 +219,7 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9afa753a97002a33b2ccb707d9f15f31c81b8c1b786c95b73cc62bb1d1fd0c3f" dependencies = [ - "alloy-primitives 0.8.15", + "alloy-primitives 0.8.18", "serde", "serde_json", ] @@ -231,7 +230,7 @@ version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6917c79e837aa7b77b7a6dae9f89cbe15313ac161c4d3cfaf8909ef21f3d22d8" dependencies = [ - "alloy-primitives 0.8.15", + "alloy-primitives 0.8.18", "alloy-rlp", "arrayvec", "derive_more 1.0.0", @@ -458,13 +457,13 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.83" +version = "0.1.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" +checksum = "3f934833b4b7233644e5848f235df3f57ed8c80f1528a26c3dfa13d2147fa056" dependencies = [ "proc-macro2", "quote", - "syn 2.0.93", + "syn 2.0.95", ] [[package]] @@ -481,7 +480,7 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.93", + "syn 2.0.95", ] [[package]] @@ -490,6 +489,61 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" +[[package]] +name = "axum" +version = "0.7.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edca88bc138befd0323b20752846e6587272d3b03b0343c8ea28a6f819e6e71f" +dependencies = [ + "async-trait", + "axum-core", + "bytes", + "futures-util", + "http 1.2.0", + "http-body 1.0.1", + "http-body-util", + "hyper 1.5.2", + "hyper-util", + "itoa", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "rustversion", + "serde", + "serde_json", + "serde_path_to_error", + "serde_urlencoded", + "sync_wrapper 1.0.2", + "tokio", + "tower", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "axum-core" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09f2bd6146b97ae3359fa0cc6d6b376d9539582c7b4220f041a33ec24c226199" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http 1.2.0", + "http-body 1.0.1", + "http-body-util", + "mime", + "pin-project-lite", + "rustversion", + "sync_wrapper 1.0.2", + "tower-layer", + "tower-service", + "tracing", +] + [[package]] name = "backtrace" version = "0.3.74" @@ -537,7 +591,7 @@ dependencies = [ "reqwest 0.11.27", "serde", "tokio", - "types 0.2.1 (git+https://github.com/petscheit/lighthouse)", + "types", ] [[package]] @@ -591,24 +645,7 @@ dependencies = [ [[package]] name = "bls" version = "0.2.0" -dependencies = [ - "arbitrary", - "blst", - "ethereum-types", - "ethereum_hashing 0.6.0", - "ethereum_serde_utils 0.5.2", - "ethereum_ssz", - "hex", - "rand", - "serde", - "tree_hash 0.6.0", - "zeroize", -] - -[[package]] -name = "bls" -version = "0.2.0" -source = "git+https://github.com/petscheit/lighthouse#dad6bfb285942b8f3076b26a446bc8bb8d114968" +source = "git+https://github.com/petscheit/lighthouse.git#dad6bfb285942b8f3076b26a446bc8bb8d114968" dependencies = [ "arbitrary", "blst", @@ -691,9 +728,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.6" +version = "1.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d6dbb628b8f8555f86d0323c2eb39e3ec81901f4b83e091db8a6a76d316a333" +checksum = "a012a0df96dd6d06ba9a1b29d6402d1a5d77c6befd2566afdc26e10603dc93d7" dependencies = [ "shlex", ] @@ -712,8 +749,10 @@ checksum = "7e36cc9d416881d2e24f9a963be5fb1cd90966419ac844274161d10488b3e825" dependencies = [ "android-tzdata", "iana-time-zone", + "js-sys", "num-traits", "serde", + "wasm-bindgen", "windows-targets 0.52.6", ] @@ -729,9 +768,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.23" +version = "4.5.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3135e7ec2ef7b10c6ed8950f0f792ed96ee093fa088608f1c76e569722700c84" +checksum = "a8eb5e908ef3a6efbe1ed62520fb7287959888c88485abe072543190ecc66783" dependencies = [ "clap_builder", "clap_derive", @@ -739,9 +778,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.23" +version = "4.5.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30582fc632330df2bd26877bde0c1f4470d57c582bbc070376afcd04d8cb4838" +checksum = "96b01801b5fc6a0a232407abc821660c9c6d25a1cafc0d4f85f29fb8d9afc121" dependencies = [ "anstream", "anstyle", @@ -751,14 +790,14 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.18" +version = "4.5.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab" +checksum = "54b755194d6389280185988721fffba69495eed5ee9feeee9a599b53db80318c" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.93", + "syn 2.0.95", ] [[package]] @@ -771,28 +810,43 @@ checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" name = "client-rs" version = "0.1.0" dependencies = [ - "alloy-primitives 0.8.15", + "alloy-primitives 0.8.18", "alloy-rpc-types-beacon", + "axum", "beacon-state-proof", "bls12_381", + "chrono", "clap", "dotenv", "ethereum_serde_utils 0.7.0", + "futures", "glob", "hex", "itertools 0.13.0", + "num-traits", + "num_cpus", + "postgres-types", "rand", - "reqwest 0.12.11", + "reqwest 0.12.12", "serde", "serde_derive", "serde_json", "sha2", "starknet", "starknet-crypto", + "thiserror 2.0.10", "tokio", + "tokio-postgres", + "tokio-stream", + "tokio-util", + "tower", + "tower-http", + "tracing", + "tracing-subscriber", "tree_hash 0.8.0", "tree_hash_derive 0.8.0", - "types 0.2.1", + "types", + "uuid 1.11.0", ] [[package]] @@ -804,30 +858,15 @@ checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990" [[package]] name = "compare_fields" version = "0.2.0" +source = "git+https://github.com/petscheit/lighthouse.git#dad6bfb285942b8f3076b26a446bc8bb8d114968" dependencies = [ "itertools 0.10.5", ] -[[package]] -name = "compare_fields" -version = "0.2.0" -source = "git+https://github.com/petscheit/lighthouse#dad6bfb285942b8f3076b26a446bc8bb8d114968" -dependencies = [ - "itertools 0.10.5", -] - -[[package]] -name = "compare_fields_derive" -version = "0.2.0" -dependencies = [ - "quote", - "syn 1.0.109", -] - [[package]] name = "compare_fields_derive" version = "0.2.0" -source = "git+https://github.com/petscheit/lighthouse#dad6bfb285942b8f3076b26a446bc8bb8d114968" +source = "git+https://github.com/petscheit/lighthouse.git#dad6bfb285942b8f3076b26a446bc8bb8d114968" dependencies = [ "quote", "syn 1.0.109", @@ -999,7 +1038,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.11.1", - "syn 2.0.93", + "syn 2.0.95", ] [[package]] @@ -1021,7 +1060,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core 0.20.10", "quote", - "syn 2.0.93", + "syn 2.0.95", ] [[package]] @@ -1063,7 +1102,7 @@ checksum = "30542c1ad912e0e3d22a1935c290e12e8a29d704a420177a31faad4a601a0800" dependencies = [ "proc-macro2", "quote", - "syn 2.0.93", + "syn 2.0.95", ] [[package]] @@ -1076,7 +1115,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.1", - "syn 2.0.93", + "syn 2.0.95", ] [[package]] @@ -1096,7 +1135,7 @@ checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" dependencies = [ "proc-macro2", "quote", - "syn 2.0.93", + "syn 2.0.95", "unicode-xid", ] @@ -1129,7 +1168,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.93", + "syn 2.0.95", ] [[package]] @@ -1230,28 +1269,15 @@ dependencies = [ "sha2", "sha3", "thiserror 1.0.69", - "uuid", + "uuid 0.8.2", ] [[package]] name = "eth2_interop_keypairs" version = "0.2.0" +source = "git+https://github.com/petscheit/lighthouse.git#dad6bfb285942b8f3076b26a446bc8bb8d114968" dependencies = [ - "bls 0.2.0", - "ethereum_hashing 0.6.0", - "hex", - "lazy_static", - "num-bigint", - "serde", - "serde_yaml", -] - -[[package]] -name = "eth2_interop_keypairs" -version = "0.2.0" -source = "git+https://github.com/petscheit/lighthouse#dad6bfb285942b8f3076b26a446bc8bb8d114968" -dependencies = [ - "bls 0.2.0 (git+https://github.com/petscheit/lighthouse)", + "bls", "ethereum_hashing 0.6.0", "hex", "lazy_static", @@ -1329,7 +1355,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70cbccfccf81d67bff0ab36e591fa536c8a935b078a7b0e58c1d00d418332fc9" dependencies = [ - "alloy-primitives 0.8.15", + "alloy-primitives 0.8.18", "hex", "serde", "serde_derive", @@ -1475,6 +1501,21 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" +[[package]] +name = "futures" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + [[package]] name = "futures-channel" version = "0.3.31" @@ -1482,6 +1523,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" dependencies = [ "futures-core", + "futures-sink", ] [[package]] @@ -1490,6 +1532,34 @@ version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" +[[package]] +name = "futures-executor" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-io" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" + +[[package]] +name = "futures-macro" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.95", +] + [[package]] name = "futures-sink" version = "0.3.31" @@ -1508,10 +1578,16 @@ version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" dependencies = [ + "futures-channel", "futures-core", + "futures-io", + "futures-macro", + "futures-sink", "futures-task", + "memchr", "pin-project-lite", "pin-utils", + "slab", ] [[package]] @@ -1775,6 +1851,7 @@ dependencies = [ "http 1.2.0", "http-body 1.0.1", "httparse", + "httpdate", "itoa", "pin-project-lite", "smallvec", @@ -1999,7 +2076,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.93", + "syn 2.0.95", ] [[package]] @@ -2064,7 +2141,7 @@ checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.93", + "syn 2.0.95", ] [[package]] @@ -2101,14 +2178,7 @@ dependencies = [ [[package]] name = "int_to_bytes" version = "0.2.0" -dependencies = [ - "bytes", -] - -[[package]] -name = "int_to_bytes" -version = "0.2.0" -source = "git+https://github.com/petscheit/lighthouse#dad6bfb285942b8f3076b26a446bc8bb8d114968" +source = "git+https://github.com/petscheit/lighthouse.git#dad6bfb285942b8f3076b26a446bc8bb8d114968" dependencies = [ "bytes", ] @@ -2194,23 +2264,7 @@ dependencies = [ [[package]] name = "kzg" version = "0.1.0" -dependencies = [ - "arbitrary", - "c-kzg", - "derivative", - "ethereum_hashing 0.6.0", - "ethereum_serde_utils 0.5.2", - "ethereum_ssz", - "ethereum_ssz_derive", - "hex", - "serde", - "tree_hash 0.6.0", -] - -[[package]] -name = "kzg" -version = "0.1.0" -source = "git+https://github.com/petscheit/lighthouse#dad6bfb285942b8f3076b26a446bc8bb8d114968" +source = "git+https://github.com/petscheit/lighthouse.git#dad6bfb285942b8f3076b26a446bc8bb8d114968" dependencies = [ "arbitrary", "c-kzg", @@ -2277,9 +2331,9 @@ dependencies = [ [[package]] name = "linux-raw-sys" -version = "0.4.14" +version = "0.4.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" +checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" [[package]] name = "litemap" @@ -2310,30 +2364,36 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e2e65a1a2e43cfcb47a895c4c8b10d1f4a61097f9f254f183aee60cad9c651d" [[package]] -name = "memchr" -version = "2.7.4" +name = "matchit" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" +checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" [[package]] -name = "merkle_proof" -version = "0.2.0" +name = "md-5" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d89e7ee0cfbedfc4da3340218492196241d89eefb6dab27de5df917a6d2e78cf" dependencies = [ - "ethereum-types", - "ethereum_hashing 0.6.0", - "lazy_static", - "safe_arith 0.1.0", + "cfg-if", + "digest 0.10.7", ] +[[package]] +name = "memchr" +version = "2.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" + [[package]] name = "merkle_proof" version = "0.2.0" -source = "git+https://github.com/petscheit/lighthouse#dad6bfb285942b8f3076b26a446bc8bb8d114968" +source = "git+https://github.com/petscheit/lighthouse.git#dad6bfb285942b8f3076b26a446bc8bb8d114968" dependencies = [ "ethereum-types", "ethereum_hashing 0.6.0", "lazy_static", - "safe_arith 0.1.0 (git+https://github.com/petscheit/lighthouse)", + "safe_arith", ] [[package]] @@ -2435,6 +2495,16 @@ dependencies = [ "tempfile", ] +[[package]] +name = "nu-ansi-term" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" +dependencies = [ + "overload", + "winapi", +] + [[package]] name = "num-bigint" version = "0.4.6" @@ -2482,9 +2552,9 @@ dependencies = [ [[package]] name = "nybbles" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3409fc85ac27b27d971ea7cd1aabafd2eefa6de7e481c8d4f707225c117e81a" +checksum = "8983bb634df7248924ee0c4c3a749609b5abcb082c28fffe3254b3eb3602b307" dependencies = [ "const-hex", "serde", @@ -2529,7 +2599,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.93", + "syn 2.0.95", ] [[package]] @@ -2550,6 +2620,12 @@ dependencies = [ "vcpkg", ] +[[package]] +name = "overload" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" + [[package]] name = "pairing" version = "0.23.0" @@ -2636,15 +2712,33 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b7cafe60d6cf8e62e1b9b2ea516a089c008945bb5a275416789e7db0bc199dc" dependencies = [ "memchr", - "thiserror 2.0.9", + "thiserror 2.0.10", "ucd-trie", ] +[[package]] +name = "phf" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd6780a80ae0c52cc120a26a1a42c1ae51b247a253e4e06113d23d2c2edd078" +dependencies = [ + "phf_shared", +] + +[[package]] +name = "phf_shared" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67eabc2ef2a60eb7faa00097bd1ffdb5bd28e62bf39990626a582201b7a754e5" +dependencies = [ + "siphasher", +] + [[package]] name = "pin-project-lite" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "915a1e146535de9163f3987b8944ed8cf49a18bb0056bcebcdcece385cece4ff" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" [[package]] name = "pin-utils" @@ -2668,6 +2762,50 @@ version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2" +[[package]] +name = "postgres-derive" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69700ea4603c5ef32d447708e6a19cd3e8ac197a000842e97f527daea5e4175f" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn 2.0.95", +] + +[[package]] +name = "postgres-protocol" +version = "0.6.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "acda0ebdebc28befa84bee35e651e4c5f09073d668c7aed4cf7e23c3cda84b23" +dependencies = [ + "base64 0.22.1", + "byteorder", + "bytes", + "fallible-iterator", + "hmac", + "md-5", + "memchr", + "rand", + "sha2", + "stringprep", +] + +[[package]] +name = "postgres-types" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f66ea23a2d0e5734297357705193335e0a957696f34bed2f2faefacb2fec336f" +dependencies = [ + "bytes", + "chrono", + "fallible-iterator", + "postgres-derive", + "postgres-protocol", + "uuid 1.11.0", +] + [[package]] name = "powerfmt" version = "0.2.0" @@ -2899,9 +3037,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.11" +version = "0.12.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fe060fe50f524be480214aba758c71f99f90ee8c83c5a36b5e9e1d568eb4eb3" +checksum = "43e734407157c3c2034e0258f5e4473ddb361b1e85f95a66690d67264d7cd1da" dependencies = [ "base64 0.22.1", "bytes", @@ -2933,11 +3071,13 @@ dependencies = [ "system-configuration 0.6.1", "tokio", "tokio-native-tls", + "tokio-util", "tower", "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", + "wasm-streams", "web-sys", "windows-registry", ] @@ -3070,9 +3210,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.42" +version = "0.38.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f93dc38ecbab2eb790ff964bb77fa94faf256fd3e73285fd7ba0903b76bedb85" +checksum = "a78891ee6bf2340288408954ac787aa063d8e8817e9f53abb37c695c6d834ef6" dependencies = [ "bitflags 2.6.0", "errno", @@ -3178,11 +3318,7 @@ checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" [[package]] name = "safe_arith" version = "0.1.0" - -[[package]] -name = "safe_arith" -version = "0.1.0" -source = "git+https://github.com/petscheit/lighthouse#dad6bfb285942b8f3076b26a446bc8bb8d114968" +source = "git+https://github.com/petscheit/lighthouse.git#dad6bfb285942b8f3076b26a446bc8bb8d114968" [[package]] name = "salsa20" @@ -3259,9 +3395,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.13.0" +version = "2.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1863fd3768cd83c56a7f60faa4dc0d403f1b6df0a38c3c25f44b7894e45370d5" +checksum = "49db231d56a190491cb4aeda9527f1ad45345af50b0851622a7adb8c03b01c32" dependencies = [ "core-foundation-sys", "libc", @@ -3308,14 +3444,14 @@ checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.93", + "syn 2.0.95", ] [[package]] name = "serde_json" -version = "1.0.134" +version = "1.0.135" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d00f4175c42ee48b15416f6193a959ba3a0d67fc699a0db9ad12df9f83991c7d" +checksum = "2b0d7ba2887406110130a978386c4e1befb98c674b4fba677954e4db976630d9" dependencies = [ "itoa", "memchr", @@ -3334,6 +3470,16 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_path_to_error" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af99884400da37c88f5e9146b7f1fd0fbcae8f6eec4e9da38b67d05486f814a6" +dependencies = [ + "itoa", + "serde", +] + [[package]] name = "serde_urlencoded" version = "0.7.1" @@ -3373,7 +3519,7 @@ dependencies = [ "darling 0.20.10", "proc-macro2", "quote", - "syn 2.0.93", + "syn 2.0.95", ] [[package]] @@ -3420,6 +3566,15 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "sharded-slab" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" +dependencies = [ + "lazy_static", +] + [[package]] name = "shlex" version = "1.3.0" @@ -3445,6 +3600,12 @@ dependencies = [ "rand_core", ] +[[package]] +name = "siphasher" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d" + [[package]] name = "slab" version = "0.4.9" @@ -3622,7 +3783,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8986a940af916fc0a034f4e42c6ba76d94f1e97216d75447693dfd7aefaf3ef2" dependencies = [ "starknet-core", - "syn 2.0.93", + "syn 2.0.95", ] [[package]] @@ -3683,6 +3844,17 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" +[[package]] +name = "stringprep" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b4df3d392d81bd458a8a621b8bffbd2302a12ffe288a9d931670948749463b1" +dependencies = [ + "unicode-bidi", + "unicode-normalization", + "unicode-properties", +] + [[package]] name = "strsim" version = "0.10.0" @@ -3714,7 +3886,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.93", + "syn 2.0.95", ] [[package]] @@ -3740,15 +3912,7 @@ dependencies = [ [[package]] name = "swap_or_not_shuffle" version = "0.2.0" -dependencies = [ - "ethereum-types", - "ethereum_hashing 0.6.0", -] - -[[package]] -name = "swap_or_not_shuffle" -version = "0.2.0" -source = "git+https://github.com/petscheit/lighthouse#dad6bfb285942b8f3076b26a446bc8bb8d114968" +source = "git+https://github.com/petscheit/lighthouse.git#dad6bfb285942b8f3076b26a446bc8bb8d114968" dependencies = [ "ethereum-types", "ethereum_hashing 0.6.0", @@ -3767,9 +3931,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.93" +version = "2.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c786062daee0d6db1132800e623df74274a0a87322d8e183338e01b3d98d058" +checksum = "46f71c0377baf4ef1cc3e3402ded576dccc315800fbc62dfc7fe04b009773b4a" dependencies = [ "proc-macro2", "quote", @@ -3799,7 +3963,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.93", + "syn 2.0.95", ] [[package]] @@ -3852,12 +4016,13 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tempfile" -version = "3.14.0" +version = "3.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28cce251fcbc87fac86a866eeb0d6c2d536fc16d06f184bb61aeae11aa4cee0c" +checksum = "9a8a559c81686f576e8cd0290cd2a24a2a9ad80c98b3478856500fcbd7acd704" dependencies = [ "cfg-if", "fastrand", + "getrandom", "once_cell", "rustix", "windows-sys 0.59.0", @@ -3866,15 +4031,7 @@ dependencies = [ [[package]] name = "test_random_derive" version = "0.2.0" -dependencies = [ - "quote", - "syn 1.0.109", -] - -[[package]] -name = "test_random_derive" -version = "0.2.0" -source = "git+https://github.com/petscheit/lighthouse#dad6bfb285942b8f3076b26a446bc8bb8d114968" +source = "git+https://github.com/petscheit/lighthouse.git#dad6bfb285942b8f3076b26a446bc8bb8d114968" dependencies = [ "quote", "syn 1.0.109", @@ -3891,11 +4048,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.9" +version = "2.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f072643fd0190df67a8bab670c20ef5d8737177d6ac6b2e9a236cb096206b2cc" +checksum = "a3ac7f54ca534db81081ef1c1e7f6ea8a3ef428d2fc069097c079443d24124d3" dependencies = [ - "thiserror-impl 2.0.9", + "thiserror-impl 2.0.10", ] [[package]] @@ -3906,18 +4063,28 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.93", + "syn 2.0.95", ] [[package]] name = "thiserror-impl" -version = "2.0.9" +version = "2.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b50fa271071aae2e6ee85f842e2e28ba8cd2c5fb67f11fcb1fd70b276f9e7d4" +checksum = "9e9465d30713b56a37ede7185763c3492a91be2f5fa68d958c44e41ab9248beb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.93", + "syn 2.0.95", +] + +[[package]] +name = "thread_local" +version = "1.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" +dependencies = [ + "cfg-if", + "once_cell", ] [[package]] @@ -3979,11 +4146,26 @@ dependencies = [ "zerovec", ] +[[package]] +name = "tinyvec" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "022db8904dfa342efe721985167e9fcd16c29b226db4397ed752a761cfce81e8" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" + [[package]] name = "tokio" -version = "1.42.0" +version = "1.43.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cec9b21b0450273377fc97bd4c33a8acffc8c996c987a7c5b319a0083707551" +checksum = "3d61fa4ffa3de412bfea335c6ecff681de2b609ba3c77ef3e00e521813a9ed9e" dependencies = [ "backtrace", "bytes", @@ -3999,13 +4181,13 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" +checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.93", + "syn 2.0.95", ] [[package]] @@ -4018,6 +4200,32 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-postgres" +version = "0.7.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b5d3742945bc7d7f210693b0c58ae542c6fd47b17adbbda0885f3dcb34a6bdb" +dependencies = [ + "async-trait", + "byteorder", + "bytes", + "fallible-iterator", + "futures-channel", + "futures-util", + "log", + "parking_lot", + "percent-encoding", + "phf", + "pin-project-lite", + "postgres-protocol", + "postgres-types", + "rand", + "socket2", + "tokio", + "tokio-util", + "whoami", +] + [[package]] name = "tokio-rustls" version = "0.24.1" @@ -4038,6 +4246,17 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-stream" +version = "0.1.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio", +] + [[package]] name = "tokio-util" version = "0.7.13" @@ -4081,6 +4300,24 @@ dependencies = [ "tokio", "tower-layer", "tower-service", + "tracing", +] + +[[package]] +name = "tower-http" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "403fa3b783d4b626a8ad51d766ab03cb6d2dbfc46b1c5d4448395e6628dc9697" +dependencies = [ + "bitflags 2.6.0", + "bytes", + "http 1.2.0", + "http-body 1.0.1", + "pin-project-lite", + "tokio", + "tower-layer", + "tower-service", + "tracing", ] [[package]] @@ -4101,10 +4338,23 @@ version = "0.1.41" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" dependencies = [ + "log", "pin-project-lite", + "tracing-attributes", "tracing-core", ] +[[package]] +name = "tracing-attributes" +version = "0.1.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.95", +] + [[package]] name = "tracing-core" version = "0.1.33" @@ -4112,6 +4362,32 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c" dependencies = [ "once_cell", + "valuable", +] + +[[package]] +name = "tracing-log" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" +dependencies = [ + "log", + "once_cell", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.3.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008" +dependencies = [ + "nu-ansi-term", + "sharded-slab", + "smallvec", + "thread_local", + "tracing-core", + "tracing-log", ] [[package]] @@ -4131,7 +4407,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "373495c23db675a5192de8b610395e1bec324d596f9e6111192ce903dc11403a" dependencies = [ - "alloy-primitives 0.8.15", + "alloy-primitives 0.8.18", "ethereum_hashing 0.7.0", "smallvec", ] @@ -4156,7 +4432,7 @@ dependencies = [ "darling 0.20.10", "proc-macro2", "quote", - "syn 2.0.93", + "syn 2.0.95", ] [[package]] @@ -4184,78 +4460,29 @@ checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" [[package]] name = "types" version = "0.2.1" +source = "git+https://github.com/petscheit/lighthouse.git#dad6bfb285942b8f3076b26a446bc8bb8d114968" dependencies = [ "alloy-primitives 0.7.7", "alloy-rlp", "arbitrary", - "bls 0.2.0", - "compare_fields 0.2.0", - "compare_fields_derive 0.2.0", - "derivative", - "eth2_interop_keypairs 0.2.0", - "ethereum-types", - "ethereum_hashing 0.6.0", - "ethereum_serde_utils 0.5.2", - "ethereum_ssz", - "ethereum_ssz_derive", - "hex", - "int_to_bytes 0.2.0", - "itertools 0.10.5", - "kzg 0.1.0", - "lazy_static", - "log", - "maplit", - "merkle_proof 0.2.0", - "metastruct", - "milhouse", - "parking_lot", - "rand", - "rand_xorshift", - "rayon", - "regex", - "rpds", - "rusqlite", - "safe_arith 0.1.0", - "serde", - "serde_json", - "serde_yaml", - "slog", - "smallvec", - "ssz_types", - "superstruct", - "swap_or_not_shuffle 0.2.0", - "tempfile", - "test_random_derive 0.2.0", - "tree_hash 0.6.0", - "tree_hash_derive 0.6.0", -] - -[[package]] -name = "types" -version = "0.2.1" -source = "git+https://github.com/petscheit/lighthouse#dad6bfb285942b8f3076b26a446bc8bb8d114968" -dependencies = [ - "alloy-primitives 0.7.7", - "alloy-rlp", - "arbitrary", - "bls 0.2.0 (git+https://github.com/petscheit/lighthouse)", - "compare_fields 0.2.0 (git+https://github.com/petscheit/lighthouse)", - "compare_fields_derive 0.2.0 (git+https://github.com/petscheit/lighthouse)", + "bls", + "compare_fields", + "compare_fields_derive", "derivative", - "eth2_interop_keypairs 0.2.0 (git+https://github.com/petscheit/lighthouse)", + "eth2_interop_keypairs", "ethereum-types", "ethereum_hashing 0.6.0", "ethereum_serde_utils 0.5.2", "ethereum_ssz", "ethereum_ssz_derive", "hex", - "int_to_bytes 0.2.0 (git+https://github.com/petscheit/lighthouse)", + "int_to_bytes", "itertools 0.10.5", - "kzg 0.1.0 (git+https://github.com/petscheit/lighthouse)", + "kzg", "lazy_static", "log", "maplit", - "merkle_proof 0.2.0 (git+https://github.com/petscheit/lighthouse)", + "merkle_proof", "metastruct", "milhouse", "parking_lot", @@ -4265,7 +4492,7 @@ dependencies = [ "regex", "rpds", "rusqlite", - "safe_arith 0.1.0 (git+https://github.com/petscheit/lighthouse)", + "safe_arith", "serde", "serde_json", "serde_yaml", @@ -4273,9 +4500,9 @@ dependencies = [ "smallvec", "ssz_types", "superstruct", - "swap_or_not_shuffle 0.2.0 (git+https://github.com/petscheit/lighthouse)", + "swap_or_not_shuffle", "tempfile", - "test_random_derive 0.2.0 (git+https://github.com/petscheit/lighthouse)", + "test_random_derive", "tree_hash 0.6.0", "tree_hash_derive 0.6.0", ] @@ -4311,12 +4538,33 @@ version = "2.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" +[[package]] +name = "unicode-bidi" +version = "0.3.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c1cb5db39152898a79168971543b1cb5020dff7fe43c8dc468b0885f5e29df5" + [[package]] name = "unicode-ident" version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "adb9e6ca4f869e1180728b7950e35922a7fc6397f7b641499e8f3ef06e50dc83" +[[package]] +name = "unicode-normalization" +version = "0.1.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956" +dependencies = [ + "tinyvec", +] + +[[package]] +name = "unicode-properties" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e70f2a8b45122e719eb623c01822704c4e0907e7e426a05927e1a1cfff5b75d0" + [[package]] name = "unicode-xid" version = "0.2.6" @@ -4374,6 +4622,28 @@ dependencies = [ "serde", ] +[[package]] +name = "uuid" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8c5f0a0af699448548ad1a2fbf920fb4bee257eae39953ba95cb84891a0446a" +dependencies = [ + "getrandom", + "rand", + "uuid-macro-internal", +] + +[[package]] +name = "uuid-macro-internal" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b91f57fe13a38d0ce9e28a03463d8d3c2468ed03d75375110ec71d93b449a08" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.95", +] + [[package]] name = "valuable" version = "0.1.0" @@ -4422,6 +4692,12 @@ version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +[[package]] +name = "wasite" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8dad83b4f25e74f184f64c43b150b91efe7647395b42289f38e50566d82855b" + [[package]] name = "wasm-bindgen" version = "0.2.99" @@ -4443,7 +4719,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn 2.0.93", + "syn 2.0.95", "wasm-bindgen-shared", ] @@ -4478,7 +4754,7 @@ checksum = "30d7a95b763d3c45903ed6c81f156801839e5ee968bb07e534c44df0fcd330c2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.93", + "syn 2.0.95", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -4489,6 +4765,19 @@ version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "943aab3fdaaa029a6e0271b35ea10b72b943135afe9bffca82384098ad0e06a6" +[[package]] +name = "wasm-streams" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15053d8d85c7eccdbefef60f06769760a563c7f0a9d6902a13d35c7800b0ad65" +dependencies = [ + "futures-util", + "js-sys", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + [[package]] name = "web-sys" version = "0.3.76" @@ -4505,6 +4794,39 @@ version = "0.25.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" +[[package]] +name = "whoami" +version = "1.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "372d5b87f58ec45c384ba03563b03544dc5fadc3983e434b286913f5b4a9bb6d" +dependencies = [ + "redox_syscall", + "wasite", + "web-sys", +] + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + [[package]] name = "windows-core" version = "0.52.0" @@ -4694,9 +5016,9 @@ checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" -version = "0.6.20" +version = "0.6.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36c1fec1a2bb5866f07c25f68c26e565c4c200aebb96d7e55710c19d3e8ac49b" +checksum = "39281189af81c07ec09db316b302a3e67bf9bd7cbf6c820b50e35fee9c2fa980" dependencies = [ "memchr", ] @@ -4752,7 +5074,7 @@ checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.93", + "syn 2.0.95", "synstructure", ] @@ -4774,7 +5096,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.93", + "syn 2.0.95", ] [[package]] @@ -4794,7 +5116,7 @@ checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" dependencies = [ "proc-macro2", "quote", - "syn 2.0.93", + "syn 2.0.95", "synstructure", ] @@ -4815,7 +5137,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.93", + "syn 2.0.95", ] [[package]] @@ -4837,5 +5159,5 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.93", + "syn 2.0.95", ] diff --git a/client-rs/Cargo.toml b/client-rs/Cargo.toml index dda3b60..9fd96fc 100644 --- a/client-rs/Cargo.toml +++ b/client-rs/Cargo.toml @@ -3,6 +3,14 @@ name = "client-rs" version = "0.1.0" edition = "2021" +[[bin]] +name = "daemon" +path = "src/daemon.rs" + +[[bin]] +name = "cli" +path = "src/main.rs" + [dependencies] alloy-primitives = "0.8.13" ethereum_serde_utils = "0.7.0" @@ -12,9 +20,9 @@ serde_derive = "1.0.215" serde_json = "1.0.133" tokio = { version = "1.0", features = ["full"] } beacon-state-proof = { git = "https://github.com/petscheit/beacon-state-proof" } -types = { path = "../../lighthouse/consensus/types", package = "types" } +types = { git = "https://github.com/petscheit/lighthouse.git", package = "types" } sha2 = "0.10.8" -reqwest = { version = "0.12.9", features = ["json", "multipart"] } +reqwest = { version = "0.12.9", features = ["json", "multipart", "stream"] } rand = "0.8.5" alloy-rpc-types-beacon = "0.7.2" itertools = "0.13.0" @@ -24,5 +32,29 @@ starknet = "0.12.0" tree_hash_derive = "0.8.0" tree_hash = "0.8.0" dotenv = "0.15" +tokio-postgres = { version = "0.7.12", features = [ + "with-uuid-1", + "with-chrono-0_4", +] } +axum = "0.7.9" +thiserror = "2.0.9" +tracing = "0.1.41" +tracing-subscriber = "0.3.19" +tokio-stream = "0.1.17" +futures = "0.3" +uuid = { version = "1.11.0", features = [ + "v4", + "fast-rng", + "macro-diagnostics", +] } +postgres-types = { version = "0.2.8", features = ["derive"] } +num_cpus = "1.16.0" + + starknet-crypto = "0.7.3" glob = "0.3.2" +num-traits = "0.2.19" +tower = "0.5.2" +tower-http = { version = "0.6.2", features = ["trace", "timeout"] } +chrono = { version = "0.4.39", features = ["serde"] } +tokio-util = "0.7.13" diff --git a/client-rs/Dockerfile b/client-rs/Dockerfile new file mode 100644 index 0000000..6df7fdb --- /dev/null +++ b/client-rs/Dockerfile @@ -0,0 +1,39 @@ +FROM rust:1.72 as builder + +WORKDIR /usr/src/app + +COPY Cargo.toml Cargo.lock ./ + +COPY src ./src + +RUN cargo build --release --bin daemon + +FROM debian:bullseye + +RUN apt-get update && apt-get install -y \ + libpq-dev \ + postgresql \ + && rm -rf /var/lib/apt/lists/* + +RUN mkdir -p /usr/src/app/batches + +WORKDIR /usr/src/app + +COPY --from=builder /usr/src/app/target/release/daemon /usr/src/app/ + +RUN mkdir -p /var/lib/postgresql/data && chown -R postgres:postgres /var/lib/postgresql + +COPY scripts/entrypoint.sh /usr/local/bin/entrypoint.sh +RUN chmod +x /usr/local/bin/entrypoint.sh + +EXPOSE 5432 + +USER postgres + +RUN /usr/lib/postgresql/14/bin/initdb -D /var/lib/postgresql/data + +USER root + +ENTRYPOINT ["/usr/local/bin/entrypoint.sh"] + +CMD ["/usr/src/app/daemon"] diff --git a/client-rs/db_structure.sql b/client-rs/db_structure.sql new file mode 100644 index 0000000..09f6848 --- /dev/null +++ b/client-rs/db_structure.sql @@ -0,0 +1,45 @@ +CREATE TABLE jobs ( + job_uuid UUID PRIMARY KEY, + job_status TEXT NOT NULL, + atlantic_proof_generate_batch_id TEXT NULL, + atlantic_proof_wrapper_batch_id TEXT NULL, + slot BIGINT NOT NULL, -- Slot associated with the job + batch_range_begin_epoch BIGINT NULL, + batch_range_end_epoch BIGINT NULL, + type TEXT NOT NULL, + tx_hash TEXT NULL, + failed_at_step TEXT NULL, + retries_count BIGINT NULL, + last_failure_time TIMESTAMP NULL, + updated_at TIMESTAMP DEFAULT NOW (), + created_at TIMESTAMP DEFAULT NOW () +); + +CREATE TABLE epoch_merkle_paths ( + epoch_id BIGINT NOT NULL, + path_index BIGINT NOT NULL, + merkle_path TEXT NOT NULL, + PRIMARY KEY (epoch_id, path_index) -- Ensures uniqueness of the combination +); + +CREATE TABLE verified_epoch ( + epoch_id BIGINT PRIMARY KEY, + beacon_header_root TEXT NOT NULL, -- Header root hash of the Beacon chain header + beacon_state_root TEXT NOT NULL, -- State root hash of the Beacon chain state + slot BIGINT NOT NULL, -- The number of slot at which this epoch was verified + committee_hash TEXT NOT NULL, -- Sync committee hash of the sync commitee related to this epoch + n_signers BIGINT NOT NULL, -- Number of epoch signers + execution_header_hash TEXT NOT NULL, -- Execution layer blockhash + execution_header_height BIGINT NOT NULL -- Execution layer height +); + +CREATE TABLE verified_sync_committee ( + sync_committee_id BIGINT PRIMARY KEY, -- Unique identifier for sync committee (slot number/0x2000) + sync_committee_hash TEXT NOT NULL -- Sync committee hash that we are creating inside bankai +); + +CREATE TABLE daemon_state ( + latest_known_beacon_slot BIGINT NOT NULL, + latest_known_beacon_block BYTEA NOT NULL, + updated_at TIMESTAMP DEFAULT NOW () +); diff --git a/client-rs/docker-compose.dev.yml b/client-rs/docker-compose.dev.yml new file mode 100644 index 0000000..cec21ec --- /dev/null +++ b/client-rs/docker-compose.dev.yml @@ -0,0 +1,31 @@ +services: + postgres: + image: postgres:14 + container_name: postgres + environment: + POSTGRESQL_USER: ${POSTGRES_USER:-postgres} + POSTGRESQL_PASSWORD: ${POSTGRES_PASSWORD:-postgres} + POSTGRESQL_DB_NAME: ${POSTGRES_DB:-bankai} + volumes: + - pgdata:/var/lib/postgresql/data + ports: + - ${POSTGRES_PORT:-5432}:${POSTGRES_PORT:-5432} + + daemon: + build: + context: . + dockerfile: Dockerfile + container_name: bankai-daemon + depends_on: + - postgres + env_file: + - .env.sepolia + #environment: + # POSTGRESQL_USER + volumes: + - ./batches:/usr/src/app/batches + ports: + - "3000:3000" + +volumes: + pgdata: diff --git a/client-rs/scripts/entrypoint.sh b/client-rs/scripts/entrypoint.sh new file mode 100644 index 0000000..aa08453 --- /dev/null +++ b/client-rs/scripts/entrypoint.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash +set -e + +su postgres -c "/usr/lib/postgresql/14/bin/pg_ctl -D /var/lib/postgresql/data -l logfile start" + +sleep 5 + +su postgres -c "psql -c \"CREATE USER postgres WITH SUPERUSER PASSWORD 'postgres';\"" || true +su postgres -c "psql -c \"CREATE DATABASE bankai_sepolia;\"" || true + +# We need to do migration here, create initial DB structure form DB file + +echo "PostgreSQL is running. Starting the daemon..." + +exec "$@" diff --git a/client-rs/scripts/wait-for-postgres.sh b/client-rs/scripts/wait-for-postgres.sh new file mode 100644 index 0000000..14e0584 --- /dev/null +++ b/client-rs/scripts/wait-for-postgres.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +while ! nc -z localhost 5434; do sleep 1; done +echo "Postgres ready" +sleep 1 +exit 0 diff --git a/client-rs/src/bankai_client.rs b/client-rs/src/bankai_client.rs new file mode 100644 index 0000000..a00b67b --- /dev/null +++ b/client-rs/src/bankai_client.rs @@ -0,0 +1,96 @@ +use crate::constants; +use crate::{ + contract_init::ContractInitializationData, + epoch_update::EpochUpdate, + state::Error, + sync_committee::SyncCommitteeUpdate, + utils::{ + atlantic_client::AtlanticClient, rpc::BeaconRpcClient, starknet_client::StarknetClient, + transactor_client::TransactorClient, + }, + BankaiConfig, +}; +use dotenv::from_filename; +use std::env; +use tracing::info; + +#[derive(Debug)] +pub struct BankaiClient { + pub client: BeaconRpcClient, + pub starknet_client: StarknetClient, + pub config: BankaiConfig, + pub atlantic_client: AtlanticClient, + pub transactor_client: TransactorClient, +} + +impl BankaiClient { + pub async fn new() -> Self { + from_filename(".env.sepolia").ok(); + let config = BankaiConfig::default(); + Self { + client: BeaconRpcClient::new(env::var("BEACON_RPC_URL").unwrap()), + starknet_client: StarknetClient::new( + env::var("STARKNET_RPC_URL").unwrap().as_str(), + env::var("STARKNET_ADDRESS").unwrap().as_str(), + env::var("STARKNET_PRIVATE_KEY").unwrap().as_str(), + ) + .await + .unwrap(), + atlantic_client: AtlanticClient::new( + config.atlantic_endpoint.clone(), + env::var("ATLANTIC_API_KEY").unwrap(), + ), + transactor_client: TransactorClient::new( + config.transactor_endpoint.clone(), + env::var("TRANSACTOR_API_KEY").unwrap(), + ), + config, + } + } + + pub async fn get_sync_committee_update( + &self, + mut slot: u64, + ) -> Result { + let mut attempts = 0; + + // Before we start generating the proof, we ensure the slot was not missed + let _header = loop { + match self.client.get_header(slot).await { + Ok(header) => break header, + Err(Error::EmptySlotDetected(_)) => { + attempts += 1; + if attempts >= constants::MAX_SKIPPED_SLOTS_RETRY_ATTEMPTS { + return Err(Error::EmptySlotDetected(slot)); + } + slot += 1; + info!( + "Empty slot detected! Attempt {}/{}. Fetching slot: {}", + attempts, + constants::MAX_SKIPPED_SLOTS_RETRY_ATTEMPTS, + slot + ); + } + Err(e) => return Err(e), // Propagate other errors immediately + } + }; + + let proof: SyncCommitteeUpdate = SyncCommitteeUpdate::new(&self.client, slot).await?; + + Ok(proof) + } + + pub async fn get_epoch_proof(&self, slot: u64) -> Result { + let epoch_proof = EpochUpdate::new(&self.client, slot).await?; + Ok(epoch_proof) + } + + pub async fn get_contract_initialization_data( + &self, + slot: u64, + config: &BankaiConfig, + ) -> Result { + let contract_init = ContractInitializationData::new(&self.client, slot, config).await?; + Ok(contract_init) + } +} diff --git a/client-rs/src/config.rs b/client-rs/src/config.rs index 72f234f..567b9d3 100644 --- a/client-rs/src/config.rs +++ b/client-rs/src/config.rs @@ -1,6 +1,11 @@ +use crate::constants::{ + MAX_CONCURRENT_PIE_GENERATIONS, MAX_CONCURRENT_RPC_DATA_FETCH_JOBS, STARKNET_SEPOLIA, +}; use starknet::core::types::Felt; +use std::sync::Arc; +use tokio::sync::Semaphore; -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct BankaiConfig { pub contract_class_hash: Felt, pub contract_address: Felt, @@ -12,17 +17,21 @@ pub struct BankaiConfig { pub epoch_batch_circuit_path: String, pub committee_circuit_path: String, pub atlantic_endpoint: String, + pub transactor_endpoint: String, + pub pie_generation_semaphore: Arc, + pub epoch_data_fetching_semaphore: Arc, + pub proof_settlement_chain_id: Felt, } impl Default for BankaiConfig { fn default() -> Self { Self { contract_class_hash: Felt::from_hex( - "0x02b5b08b233132464c437cf15509338e65ae7acc20419a37a9449a1d8e927f46", + "0x00034b6d1cd9858aeabcee33ef5ec5cd04be155d79ca2bbf9036700cb6c7c287", ) .unwrap(), contract_address: Felt::from_hex( - "0x440b622a97fab3f31a35e7e710a8a508f6693d61d74171b5c2304f5e37ccde8", + "0x1b7b70023bc2429d4453ce75d75f3e8b01b0730ca83068a82b4d17aa88a25e3", ) .unwrap(), committee_update_program_hash: Felt::from_hex( @@ -34,7 +43,7 @@ impl Default for BankaiConfig { ) .unwrap(), epoch_batch_program_hash: Felt::from_hex( - "0x19bc492f1036c889939a5174e8f77ffbe89676c8d5f1adef0a825d2a6cc2a2f", + "0x5f4dad2d8549e91c25694875eb02fc2910eeead0e1a13d3061464a3eaa4bd8d", ) .unwrap(), contract_path: "../contract/target/release/bankai_BankaiContract.contract_class.json" @@ -43,6 +52,13 @@ impl Default for BankaiConfig { epoch_batch_circuit_path: "../cairo/build/epoch_batch.json".to_string(), committee_circuit_path: "../cairo/build/committee_update.json".to_string(), atlantic_endpoint: "https://atlantic.api.herodotus.cloud".to_string(), + transactor_endpoint: "https://staging.api.herodotus.cloud".to_string(), + // Set how many concurrent pie generation (trace generation) tasks are allowed + pie_generation_semaphore: Arc::new(Semaphore::new(MAX_CONCURRENT_PIE_GENERATIONS)), // 3 at once + epoch_data_fetching_semaphore: Arc::new(Semaphore::new( + MAX_CONCURRENT_RPC_DATA_FETCH_JOBS, + )), // 2 at once + proof_settlement_chain_id: Felt::from_hex(STARKNET_SEPOLIA).unwrap(), } } } diff --git a/client-rs/src/constants.rs b/client-rs/src/constants.rs new file mode 100644 index 0000000..c5e0296 --- /dev/null +++ b/client-rs/src/constants.rs @@ -0,0 +1,17 @@ +pub const SLOTS_PER_EPOCH: u64 = 32; // For mainnet +pub const SLOTS_PER_SYNC_COMMITTEE: u64 = 8192; // For mainnet +pub const TARGET_BATCH_SIZE: u64 = 32; // Defines how many epochs in one batch +pub const EPOCHS_PER_SYNC_COMMITTEE: u64 = 256; // For mainnet +pub const MAX_CONCURRENT_JOBS_IN_PROGRESS: u64 = 16; // Define the limit of how many jobs can be in state "in progress" concurrently +pub const MAX_CONCURRENT_PIE_GENERATIONS: usize = 1; // Define how many concurrent trace (pie file) generation jobs are allowed to not exhaust resources +pub const MAX_CONCURRENT_RPC_DATA_FETCH_JOBS: usize = 1; // Define how many data fetching jobs can be performed concurrently to not overload RPC +pub const STARKNET_SEPOLIA: &str = "0x534e5f5345504f4c4941"; +pub const STARKNET_MAINNET: &str = "0x534e5f4d41494e"; +pub const USE_TRANSACTOR: bool = false; +pub const MAX_JOB_RETRIES_COUNT: u64 = 10; +pub const BEACON_CHAIN_LISTENER_ENABLED: bool = true; +pub const JOBS_RETRY_ENABLED: bool = true; +pub const JOBS_RESUME_ENABLED: bool = true; +pub const RETRY_DELAY_MS: u64 = 300_0000; +pub const MAX_SKIPPED_SLOTS_RETRY_ATTEMPTS: u64 = 5; // How many skipped slots in row can be on Beacon Chain before we throw error +pub const JOBS_RETRY_CHECK_INTERVAL: u64 = 600; // In seconds diff --git a/client-rs/src/daemon.rs b/client-rs/src/daemon.rs new file mode 100644 index 0000000..34313e9 --- /dev/null +++ b/client-rs/src/daemon.rs @@ -0,0 +1,1591 @@ +#![allow(dead_code)] +#![allow(unused_imports)] +mod bankai_client; +mod config; +mod constants; +mod contract_init; +pub mod epoch_batch; +mod epoch_update; +mod execution_header; +mod helpers; +mod routes; +mod state; +mod sync_committee; +mod traits; +mod utils; +//use alloy_primitives::TxHash; +//use alloy_primitives::FixedBytes; +use alloy_rpc_types_beacon::events::HeadEvent; +use axum::{ + extract::DefaultBodyLimit, + //http::{header, StatusCode}, + routing::get, + Router, +}; +use bankai_client::BankaiClient; +use config::BankaiConfig; +//use constants::SLOTS_PER_EPOCH; +use dotenv::from_filename; +use helpers::{ + get_first_epoch_for_sync_committee, get_first_slot_for_epoch, get_last_epoch_for_sync_committee, +}; +use num_traits::cast::ToPrimitive; +use reqwest; +use routes::dashboard::handle_get_dashboard; +use starknet::core::types::Felt; +use state::check_env_vars; +use state::{AppState, Job}; +use state::{AtlanticJobType, Error, JobStatus, JobType}; +use std::env; +use std::sync::Arc; +use tokio::sync::mpsc; +use tokio::{signal, task}; +use tokio_stream::StreamExt; +use tower::ServiceBuilder; +use tower_http::{timeout::TimeoutLayer, trace::TraceLayer}; +use tracing::{debug, error, info, warn, Level}; +use tracing_subscriber::FmtSubscriber; +use traits::Provable; +use utils::{cairo_runner::CairoRunner, database_manager::DatabaseManager}; +//use std::error::Error as StdError; +use epoch_batch::EpochUpdateBatch; +use routes::{ + handle_get_committee_hash, + handle_get_decommitment_data_by_epoch, + handle_get_decommitment_data_by_execution_height, + handle_get_decommitment_data_by_slot, + handle_get_epoch_proof, // handle_get_epoch_update, + handle_get_job_status, + handle_get_latest_verified_committee, + handle_get_latest_verified_slot, + handle_get_merkle_paths_for_epoch, + handle_get_status, + handle_root_route, +}; +use std::net::SocketAddr; +use sync_committee::SyncCommitteeUpdate; +use tokio::time::{timeout, Duration}; +use uuid::Uuid; + +#[tokio::main(flavor = "multi_thread", worker_threads = 2)] +async fn main() -> Result<(), Box> { + // Load .env.sepolia file + from_filename(".env.sepolia").ok(); + + let slot_listener_toggle = constants::BEACON_CHAIN_LISTENER_ENABLED; + + let subscriber = FmtSubscriber::builder() + //.with_max_level(Level::DEBUG) + .with_max_level(Level::INFO) + .finish(); + + tracing::subscriber::set_global_default(subscriber).expect("setting default subscriber failed"); + + // Validate environment variables + let _ = check_env_vars().map_err(|e| { + error!("Error: {}", e); + std::process::exit(1); // Exit if validation fails + }); + + info!("Starting Bankai light-client daemon..."); + + let (tx, mut rx): (mpsc::Sender, mpsc::Receiver) = mpsc::channel(32); + + //let (tx, mut rx) = mpsc::channel(32); + + let connection_string = format!( + "host={} user={} password={} dbname={}", + env::var("POSTGRESQL_HOST").unwrap().as_str(), + env::var("POSTGRESQL_USER").unwrap().as_str(), + env::var("POSTGRESQL_PASSWORD").unwrap().as_str(), + env::var("POSTGRESQL_DB_NAME").unwrap().as_str() + ); + + // Create a new DatabaseManager + let db_manager = Arc::new(DatabaseManager::new(&connection_string).await); + + let bankai = Arc::new(BankaiClient::new().await); + + // Beacon node endpoint construction for events + let events_endpoint = format!( + "{}/eth/v1/events?topics=head", + env::var("BEACON_RPC_URL").unwrap().as_str() + ); + + //let events_endpoint = format!("{}/eth/v1/events?topics=head", beacon_node_url) + let db_manager_for_listener = db_manager.clone(); + let db_manager_for_watcher = db_manager.clone(); + let bankai_for_listener = bankai.clone(); + + let tx_for_listener = tx.clone(); + let tx_for_watcher = tx.clone(); + + let app_state: AppState = AppState { + db_manager: db_manager.clone(), + tx, + bankai: bankai.clone(), + }; + + tokio::spawn(async move { + loop { + info!("[HEARTBEAT] Daemon is alive"); + tokio::time::sleep(std::time::Duration::from_secs(30)).await; + } + }); + + //Spawn a background task to process jobs + tokio::spawn(async move { + while let Some(job) = rx.recv().await { + let job_id = job.job_id; + let db_clone = db_manager.clone(); + let bankai_clone = Arc::clone(&bankai); + + // Spawn a *new task* for each job — now they can run in parallel + tokio::spawn(async move { + match process_job(job, db_clone.clone(), bankai_clone.clone()).await { + Ok(_) => { + info!("Job {} completed successfully", job_id); + } + Err(e) => { + let job_data = db_clone.get_job_by_id(job_id).await.unwrap().unwrap(); + let _ = db_clone.set_failure_info(job_id, job_data.job_status).await; + let _ = db_clone.update_job_status(job_id, JobStatus::Error).await; + error!("Error processing job {}: {}", job_id, e); + } + } + }); + } + }); + + let app = Router::new() + .route("/", get(handle_root_route)) + .route("/status", get(handle_get_status)) + .route( + "/get_epoch_decommitment_data/by_epoch/:epoch_id", + get(handle_get_decommitment_data_by_epoch), + ) + .route( + "/get_epoch_decommitment_data/by_slot/:slot", + get(handle_get_decommitment_data_by_slot), + ) + .route( + "/get_epoch_decommitment_data/by_execution_height/:execution_layer_height", + get(handle_get_decommitment_data_by_execution_height), + ) + // ASCI-Art dashboard + .route("/dashboard", get(handle_get_dashboard)) + // Some debug routes + .route("/get_pending_atlantic_jobs", get(handle_get_epoch_proof)) + .route( + "/get_verified_epoch_proof/:epoch", + get(handle_get_epoch_proof), + ) + .route( + "/get_verified_committee_hash/:committee_id", + get(handle_get_committee_hash), + ) + .route( + "/get_merkle_paths_for_epoch/:epoch_id", + get(handle_get_merkle_paths_for_epoch), + ) + // .route( + // "/debug/get_epoch_update/:slot", + // get(handle_get_epoch_update), + // ) + .route( + "/debug/get_latest_verified_epoch", + get(handle_get_latest_verified_slot), + ) + .route( + "/debug/get_latest_verified_committee", + get(handle_get_latest_verified_committee), + ) + .route("/debug/get_job_status", get(handle_get_job_status)) + // .route("/get-merkle-inclusion-proof", get(handle_get_merkle_inclusion_proof)) + .layer(DefaultBodyLimit::disable()) + .layer( + ServiceBuilder::new().layer(TraceLayer::new_for_http()), // Example: for logging/tracing + ) + .layer(( + // Graceful shutdown will wait for outstanding requests to complete + // Because of this timeourt setting, requests don't hang forever + TimeoutLayer::new(Duration::from_secs(10)), + )) + .with_state(app_state); + + let addr = "0.0.0.0:3001".parse::()?; + let listener = tokio::net::TcpListener::bind(addr).await.unwrap(); + + info!("Bankai RPC HTTP server is listening on http://{}", addr); + + let server_task = tokio::spawn(async move { + let _ = axum::serve(listener, app) + .with_graceful_shutdown(shutdown_signal()) + .await + .unwrap(); + }); + + // After RPC init, we do some startup checks before start listening to beacon chain: + // + // 🔄 Resume any unfinished jobs before processing new ones + if constants::JOBS_RESUME_ENABLED { + resume_unfinished_jobs(db_manager_for_listener.clone(), tx_for_listener.clone()).await?; + } + + // Retry any failed jobs before processing new ones + if constants::JOBS_RETRY_ENABLED { + retry_failed_jobs(db_manager_for_listener.clone(), tx_for_listener.clone()).await?; + } + + //enqueue_sync_committee_jobs(); + //enqueue_batch_epochs_jobs(); + // + + // Listen for the new slots on BeaconChain + // Create an HTTP client + + if slot_listener_toggle { + // loop { + // let bankai_for_listener = bankai_for_listener.clone(); + // let db_manager_for_listener = db_manager_for_listener.clone(); + // let tx_for_listener = tx_for_listener.clone(); + // let events_endpoint = events_endpoint.clone(); + let http_stream_client = reqwest::Client::new(); + + let _listener_worker_handle = tokio::spawn(async move { + loop { + // Send the request to the Beacon node + let response = match http_stream_client + .get(&events_endpoint) + //.timeout(std::time::Duration::from_secs(30)) - cannot do this because this will give timeout after each duration since we not using HTTP Pooling here but HTTP streaming + .send() + .await + { + Ok(r) => r, + Err(e) => { + error!("Failed to connect: {}", e); + tokio::time::sleep(std::time::Duration::from_secs(5)).await; + continue; // retry + } + }; + + if !response.status().is_success() { + error!("Got non-200: {}", response.status()); + tokio::time::sleep(std::time::Duration::from_secs(5)).await; + continue; // retry + } + + info!("Listening for new slots, epochs and sync committee updates..."); + + let mut stream = response.bytes_stream(); + + loop { + match timeout(Duration::from_secs(30), stream.next()).await { + // Timed out; handle it locally + Err(_elapsed) => { + warn!( + "Timed out waiting for new slot beacon chain event chunk. Maybe some slots was skipped. Will reconnect..." + ); + break; + } + Ok(Some(Ok(bytes))) => { + if let Ok(event_text) = String::from_utf8(bytes.to_vec()) { + // Preprocess the event text + if let Some(json_data) = + helpers::extract_json_from_event(&event_text) + { + match serde_json::from_str::(&json_data) { + Ok(parsed_event) => { + let epoch_id = + helpers::slot_to_epoch_id(parsed_event.slot); + let sync_committee_id = + helpers::slot_to_sync_committee_id( + parsed_event.slot, + ); + info!( + "[EVENT] New beacon slot detected: {} | Block: {} | Epoch: {} | Sync committee: {} | Is epoch transition: {}", + parsed_event.slot, parsed_event.block, epoch_id, sync_committee_id, parsed_event.epoch_transition + ); + + match handle_beacon_chain_head_event( + parsed_event, + bankai_for_listener.clone(), + db_manager_for_listener.clone(), + tx_for_listener.clone(), + ) + .await + { + Ok(()) => { + // Event was handled successfully. + } + Err(e) => { + error!("Error handling beacon chain head event: {:?}", e); + } + } + } + Err(err) => { + warn!("Failed to parse JSON data: {}", err); + } + } + } else { + warn!("No valid JSON data found in event: {}", event_text); + } + } + } + Ok(Some(Err(e))) => { + warn!("Beacon chain client stream error: {}", e); + break; // break the while, then reconnect + } + Ok(None) => { + warn!("Beacon chain client stream ended"); + // Stream ended + break; + } + } + } + // If we got here because of `timeout` returning `Err(_)`, that means 30s + // passed without a single chunk of data arriving or + // the RPC server has closed connection or some other unknown network error occured + + // If we exit the while, we reconnect in the outer loop + info!("Timeout waiting for next event, reconnecting to beacon node..."); + } + }); + // Handle panics inside listener worker(this also can be done other ways, but this for now) + // match listener_worker_handle.await { + // Ok(_) => warn!("Listener worker finished normally."), + // Err(e) if e.is_panic() => { + // error!("Listener worker panicked! Restarting..."); + // tokio::time::sleep(Duration::from_secs(1)).await; + // } + // Err(_) => error!("Listener worker failed for some reason."), + // } + //} + } + + // Run check and retry failed jobs periodicially + tokio::spawn(async move { + loop { + tokio::time::sleep(std::time::Duration::from_secs( + constants::JOBS_RETRY_CHECK_INTERVAL, + )) + .await; + retry_failed_jobs(db_manager_for_watcher.clone(), tx_for_watcher.clone()).await; + } + }); + + // Wait for the server task to finish + server_task.await?; + + Ok(()) +} + +async fn handle_beacon_chain_head_event( + parsed_event: HeadEvent, + bankai: Arc, + db_manager: Arc, + tx: mpsc::Sender, +) -> Result<(), Error> { + let current_epoch_id = helpers::slot_to_epoch_id(parsed_event.slot); + let current_sync_committee_id = helpers::slot_to_sync_committee_id(parsed_event.slot); + + if parsed_event.epoch_transition { + //info!("Beacon Chain epoch transition detected. New epoch: {} | Starting processing epoch proving...", epoch_id); + info!( + "Beacon Chain epoch transition detected. New epoch: {}", + current_epoch_id + ); + + // Check also now if slot is the moment of switch to new sync committee set + if parsed_event.slot % constants::SLOTS_PER_SYNC_COMMITTEE == 0 { + info!( + "Beacon Chain sync committee rotation occured. Slot {} | Sync committee id: {}", + parsed_event.slot, current_sync_committee_id + ); + } + } + + let _ = db_manager + .update_daemon_state_info(parsed_event.slot, parsed_event.block) + .await; + + // We can do all circuit computations up to latest slot in advance, but the onchain broadcasts must be send in correct order + // By correct order mean that within the same sync committe the epochs are not needed to be broadcasted in order + // but the order of sync_commite_update->epoch_update must be correct, we firstly need to have correct sync committe veryfied + // before we verify epoch "belonging" to this sync committee + + let latest_verified_epoch_slot = bankai + .starknet_client + .get_latest_epoch_slot(&bankai.config) + .await + .unwrap() + .to_u64() + .unwrap(); + + let latest_verified_sync_committee_id = bankai + .starknet_client + .get_latest_committee_id(&bankai.config) + .await + .unwrap() + .to_u64() + .unwrap(); + + let lowest_required_committee_update_slot = + (latest_verified_sync_committee_id) * constants::SLOTS_PER_SYNC_COMMITTEE; + + let latest_verified_epoch_id = helpers::slot_to_epoch_id(latest_verified_epoch_slot); + let epochs_behind = current_epoch_id - latest_verified_epoch_id; + + let _ = evaluate_jobs_statuses( + db_manager.clone(), + latest_verified_sync_committee_id, + latest_verified_epoch_slot, + ) + .await + .map_err(|e| { + error!("Error evaluating jobs statuses: {}", e); + }); + let _ = broadcast_onchain_ready_jobs(db_manager.clone(), bankai.clone()) + .await + .map_err(|e| { + error!("Error executing broadcast onchain ready jobs: {}", e); + }); + + // We getting the last slot in progress to determine next slots to prove + //let mut last_slot_in_progress: u64 = 0; + // /let mut last_epoch_in_progress: u64 = 0; + // let mut last_sync_committee_in_progress: u64 = 0; + // + + let last_epoch_in_progress = db_manager + .get_latest_epoch_in_progress() + .await + .unwrap() + .unwrap(); + + let last_done_epoch = db_manager.get_latest_done_epoch().await.unwrap().unwrap(); + + let last_sync_committee_in_progress = db_manager + .get_latest_sync_committee_in_progress() + .await + .unwrap() + .unwrap(); + + let last_done_sync_committee = db_manager + .get_latest_done_sync_committee() + .await + .unwrap() + .unwrap(); + + let mut latest_scheduled_epoch = last_epoch_in_progress; + let mut latest_scheduled_sync_committee = last_sync_committee_in_progress; + + if latest_verified_epoch_id > last_epoch_in_progress { + if last_epoch_in_progress == 0 { + //info!("Starting daemon on clean epochs jobs table"); + } else { + warn!( + "Something may be wrong, last verified epoch is greather than last epoch in progress" + ); + } + // So we should schedule the greater epoch, which is + latest_scheduled_epoch = latest_verified_epoch_id; + } + + if latest_verified_sync_committee_id > last_sync_committee_in_progress { + if last_sync_committee_in_progress == 0 { + //info!("Starting daemon on clean sync committees jobs table"); + } else { + warn!( + "Something may be wrong, last verified sync committee is greather than last sync committee in progress" + ); + } + + latest_scheduled_sync_committee = latest_verified_sync_committee_id; + } + + info!( + "Current state: Beacon Chain: [Slot: {} Epoch: {} Sync Committee: {}] | Latest verified: [Slot: {} Epoch: {} Sync Committee: {}] | Latest in progress: [Epoch: {} Sync Committee: {}] | Latest done: [Epoch: {} Sync Committee: {}] | Sync in progress...", + parsed_event.slot, current_epoch_id, current_sync_committee_id, latest_verified_epoch_slot, latest_verified_epoch_id, latest_verified_sync_committee_id, last_epoch_in_progress, last_sync_committee_in_progress, last_done_epoch, last_done_sync_committee + ); + + // Decide basing on actual state + // if helpers::get_sync_committee_id_by_epoch(latest_scheduled_epoch + 1) + // > latest_scheduled_sync_committee + // { + // // We reached end of current sync committee, need to schedule new sync committee proving + // match run_sync_committee_update_job( + // db_manager.clone(), + // latest_scheduled_sync_committee + 1, + // tx.clone(), + // ) + // .await + // { + // Ok(()) => {} + // Err(e) => { + // error!("Error while creating sync committee update job: {}", e); + // } + // }; + // } + // + + if !(latest_verified_epoch_slot < lowest_required_committee_update_slot) { + info!( + "Lowest required committee update slot: {}", + lowest_required_committee_update_slot + ); + if last_sync_committee_in_progress < latest_scheduled_sync_committee { + if last_done_sync_committee < latest_scheduled_sync_committee { + // This last check because the delay of data from sequencer update after verification onchain + match run_sync_committee_update_job( + db_manager.clone(), + latest_verified_epoch_slot, + tx.clone(), + ) + .await + { + Ok(()) => {} + Err(e) => { + error!("Error while creating sync committee update job: {}", e); + } + }; + } + } + } + + let current_sync_committee_epochs_left = + helpers::get_last_epoch_for_sync_committee(current_sync_committee_id) - current_epoch_id; + info!( + "{} epochs left in current beacon chain sync committee", + current_sync_committee_epochs_left + ); + + // Decide basing on actual state + if epochs_behind > constants::TARGET_BATCH_SIZE { + // is_node_in_sync = true; + + warn!( + "Bankai is out of sync now. Node is {} epochs behind network. | Sync in progress...", + epochs_behind + ); + + // Check if we have in progress all epochs that need to be processed, if no, run job + if latest_scheduled_epoch < (current_epoch_id - constants::TARGET_BATCH_SIZE) { + // And chceck how many jobs are already in progress and if we fit in the limit + let in_progress_jobs_count = db_manager.count_jobs_in_progress().await.unwrap(); + if in_progress_jobs_count.unwrap() >= constants::MAX_CONCURRENT_JOBS_IN_PROGRESS { + info!( + "Currently not starting new batch epoch job, MAX_CONCURRENT_JOBS_IN_PROGRESS limit reached, jobs in progress: {}", + in_progress_jobs_count.unwrap() + ); + return Ok(()); + } + + let epoch_to_start_from = latest_scheduled_epoch + 1; + let mut epoch_to_end_on = latest_scheduled_epoch + constants::TARGET_BATCH_SIZE; // To create batch with size of constants::TARGET_BATCH_SIZE epochs + + let currently_processed_sync_committee_id = + helpers::get_sync_committee_id_by_epoch(epoch_to_start_from); + + info!( + "Currently processed sync committee epochs ranges from {} to {}. Next sync committee epochs ranges: {} to {}", + helpers::get_first_epoch_for_sync_committee(currently_processed_sync_committee_id), + helpers::get_last_epoch_for_sync_committee(currently_processed_sync_committee_id), + helpers::get_first_epoch_for_sync_committee(currently_processed_sync_committee_id + 1), + helpers::get_last_epoch_for_sync_committee(currently_processed_sync_committee_id + 1) + ); + + if helpers::get_last_epoch_for_sync_committee(currently_processed_sync_committee_id) + == epoch_to_start_from + {} + + // Edge cases handling // + // Handle the edge case where there is only one epoch in batch left to proccess and this epoch is last epoch in sync committee, if we follow the betch size of 32 always, this souldnt happen: + if epoch_to_start_from + == helpers::get_last_epoch_for_sync_committee(currently_processed_sync_committee_id) + { + warn!("edge case: only one epoch left to proccess in batch in this sync committee"); + epoch_to_end_on = epoch_to_start_from; + } + // Same, if we follow the betch size of 32 always, this souldnt happen, but if we have not same size batches, it can be trigerred also: + else if epoch_to_end_on + > helpers::get_last_epoch_for_sync_committee(currently_processed_sync_committee_id) + { + warn!("edge case: batch end epoch {} overlaps with the next sync committee, truncating to the last epoch: {} of corresponding sync committee: {}", + epoch_to_end_on, helpers::get_last_epoch_for_sync_committee(currently_processed_sync_committee_id), currently_processed_sync_committee_id); + // The end epoch is further that current sync committee + // In this case we can simply assingn sync commite latest epoch as epoch_to_end_on + epoch_to_end_on = helpers::get_last_epoch_for_sync_committee( + currently_processed_sync_committee_id, + ); + } + // + // info!( + // "{} epochs left to proccess in associated sync committee term", + // helpers::get_last_epoch_for_sync_committee(currently_processed_sync_committee_id) + // - latest_scheduled_epoch + // ); + // + // Mitigate the issue when Starknet Sequencer RPC responds about last verified slot with delay + if last_done_epoch < epoch_to_start_from { + match run_batch_epoch_update_job( + db_manager.clone(), + get_first_slot_for_epoch(epoch_to_start_from) + + (constants::SLOTS_PER_EPOCH * constants::TARGET_BATCH_SIZE), + epoch_to_start_from, + epoch_to_end_on, + tx.clone(), + ) + .await + { + Ok(()) => {} + Err(e) => { + error!("Error while creating job: {}", e); + } + }; + } + } else { + debug!("All reqired jobs are now queued and processing"); + } + } else if epochs_behind == constants::TARGET_BATCH_SIZE { + if last_epoch_in_progress < current_epoch_id { + // This is when we are synced properly and new epoch batch needs to be inserted + info!( + "Target batch size reached. Starting processing next epoch batch. Current Beacon Chain epoch: {} Latest verified epoch: {}", + current_epoch_id, latest_verified_epoch_id + ); + + let epoch_to_start_from = latest_scheduled_epoch + 1; + let epoch_to_end_on = latest_scheduled_epoch + constants::TARGET_BATCH_SIZE; + match run_batch_epoch_update_job( + db_manager.clone(), + get_first_slot_for_epoch(epoch_to_start_from) + + (constants::SLOTS_PER_EPOCH * constants::TARGET_BATCH_SIZE), + epoch_to_start_from, + epoch_to_end_on, + tx.clone(), + ) + .await + { + Ok(()) => {} + Err(e) => { + error!("Error while creating job: {}", e); + } + }; + } + } else if epochs_behind < constants::TARGET_BATCH_SIZE { + // When we are in sync and not yet reached the TARGET_BATCH_SIZE epochs lagging behind actual beacon chian state + let eppchs_left = constants::TARGET_BATCH_SIZE - epochs_behind; + info!("Target batch size not reached yet, daemon is in sync, {} epochs left to start new batch job", eppchs_left); + } + + // Check if sync committee update is needed + + if latest_verified_epoch_slot % constants::SLOTS_PER_SYNC_COMMITTEE == 0 {} + + // When we doing EpochBatchUpdate the slot is latest_batch_output + // So for each batch update we takin into account effectiviely the latest slot from given batch + + //let db_client = db_client.clone(); + Ok(()) +} + +// // This function will enqueue sync committee jobs in database with status CREATED up to the latest sync committee +// async fn enqueue_sync_committee_jobs( +// db_manager: Arc, +// bankai: Arc, +// ) -> Result<(), Box> { +// } + +// // This function will enqueue epoch batch update jobs in database with status CREATED up to the latest able to prove epoch batch +// async fn enqueue_batch_epochs_jobs( +// db_manager: Arc, +// bankai: Arc, +// ) -> Result<(), Box> { +// } + +async fn run_batch_epoch_update_job( + db_manager: Arc, + slot: u64, + batch_range_begin_epoch: u64, + batch_range_end_epoch: u64, + tx: mpsc::Sender, +) -> Result<(), Box> { + let job_id = Uuid::new_v4(); + let job = Job { + job_id: job_id.clone(), + job_type: JobType::EpochBatchUpdate, + job_status: JobStatus::Created, + slot: Some(slot), + batch_range_begin_epoch: Some(batch_range_begin_epoch), + batch_range_end_epoch: Some(batch_range_end_epoch), + }; + + // Check to ensure if both epochs belongs to same sync committee + if helpers::get_sync_committee_id_by_epoch(batch_range_begin_epoch) + != helpers::get_sync_committee_id_by_epoch(batch_range_end_epoch) + { + return Err( + "Batch range start epoch belongs to different committee than batch range end epoch" + .into(), + ); + } + + match db_manager.create_job(job.clone()).await { + // Insert new job record to DB + Ok(()) => { + // Handle success + info!( + "[EPOCH BATCH UPDATE] Job created successfully with ID: {} Epochs range from {} to {} | Sync committee involved: {}", + job_id, batch_range_begin_epoch, batch_range_end_epoch, helpers::get_sync_committee_id_by_epoch(batch_range_end_epoch) + ); + if tx.send(job).await.is_err() { + return Err("Failed to send job".into()); + } + // If starting committee update job, first ensule that the corresponding slot is registered in contract + Ok(()) + } + Err(e) => { + // Handle the error + return Err(e.into()); + } + } +} + +async fn run_sync_committee_update_job( + db_manager: Arc, + //sync_committee_id: u64, + slot: u64, + tx: mpsc::Sender, +) -> Result<(), Box> { + let job_id = Uuid::new_v4(); + let job = Job { + job_id: job_id.clone(), + job_type: JobType::SyncCommitteeUpdate, + job_status: JobStatus::Created, + slot: Some(slot), + // : Some(helpers::get_first_slot_for_sync_committee( + // sync_committee_id, + // )), + batch_range_begin_epoch: None, + batch_range_end_epoch: None, + }; + + match db_manager.create_job(job.clone()).await { + // Insert new job record to DB + Ok(()) => { + // Handle success + info!( + "[SYHC COMMITTEE UPDATE] Job created successfully with ID: {}", + job_id + ); + if tx.send(job).await.is_err() { + return Err("Failed to send job".into()); + } + // If starting committee update job, first ensure that the corresponding slot is registered in contract + Ok(()) + } + Err(e) => { + // Handle the error + return Err(e.into()); + } + } +} + +async fn evaluate_jobs_statuses( + db_manager: Arc, + latest_verified_sync_committee_id: u64, + _latest_verified_epoch_slot: u64, +) -> Result<(), Box> { + // The purpose of this function is to manage the sequential nature of onchain verification of epochs and sync committees + // Firstly we get all jobs with status OFFCHAIN_COMPUTATION_FINISHED + // We calculating the start and end epoch for provided last verified sync committe + // and setting READY_TO_BROADCAST status for epochs up to the last epoch belonging to provided latest_verified_sync_committee_id + let first_epoch = get_first_epoch_for_sync_committee(latest_verified_sync_committee_id + 1); + let last_epoch = get_last_epoch_for_sync_committee(latest_verified_sync_committee_id + 1); + + //let first_epoch = first_epoch - 32; // So we also broadcast first epoch from next sync committee + + info!( + "Evaluating jobs for epochs range from {} to {}, for sync committee {}", + first_epoch, last_epoch, latest_verified_sync_committee_id + ); + + db_manager + .set_ready_to_broadcast_for_batch_epochs_to(last_epoch) // Set READY_TO_BROADCAST when OFFCHAIN_COMPUTATION_FINISHED + .await?; + + // db_manager + // .set_ready_to_broadcast_for_batch_epochs(first_epoch, last_epoch) // Set READY_TO_BROADCAST when OFFCHAIN_COMPUTATION_FINISHED + // .await?; + + db_manager + .set_ready_to_broadcast_for_sync_committee(latest_verified_sync_committee_id) + .await?; + + Ok(()) +} + +async fn resume_unfinished_jobs( + db_manager: Arc, + tx: mpsc::Sender, +) -> Result<(), Box> { + info!("Checking for unfinished jobs..."); + + // Fetch jobs that were in progress before shutdown + let unfinished_jobs = db_manager + .get_jobs_with_statuses(vec![ + JobStatus::Created, + JobStatus::StartedFetchingInputs, + JobStatus::ProgramInputsPrepared, + JobStatus::StartedTraceGeneration, + JobStatus::PieGenerated, + JobStatus::AtlanticProofRequested, + JobStatus::AtlanticProofRetrieved, + JobStatus::WrapProofRequested, + JobStatus::WrappedProofDone, + ]) + .await?; + + if unfinished_jobs.is_empty() { + info!("No unfinished jobs found."); + return Ok(()); + } + + info!( + "Found {} unfinished jobs. Resuming processing...", + unfinished_jobs.len() + ); + + for job in unfinished_jobs { + let job_id = job.job_uuid; + let job_to_resume = Job { + job_id, + job_type: job.job_type, + job_status: job.job_status.clone(), + slot: Some(job.slot.to_u64().unwrap()), + batch_range_begin_epoch: job.batch_range_begin_epoch.to_u64(), + batch_range_end_epoch: job.batch_range_end_epoch.to_u64(), + }; + + let resumed_from_step = job.job_status.clone(); + let tx_clone = tx.clone(); + tokio::spawn(async move { + match job_to_resume.job_type { + JobType::SyncCommitteeUpdate => { + info!( + "Resuming job {} from step {}... (sync committee update job for sync committee {})", + job_id, + resumed_from_step.to_string(), + helpers::slot_to_sync_committee_id(job.slot.to_u64().unwrap()) + ); + } + JobType::EpochBatchUpdate => { + info!( + "Resuming job {} from step {}... (batch epoch update job for epochs from {} to {})", + job_id, resumed_from_step.to_string(), job.batch_range_begin_epoch, job.batch_range_end_epoch + ); + } + } + + if tx_clone.send(job_to_resume).await.is_err() { + // return Err("Failed to send job".into()); + error!("Error resuming job: {}", job_id); + } + }); + + tokio::time::sleep(Duration::from_millis(500)).await; + } + + Ok(()) +} + +async fn retry_failed_jobs( + db_manager: Arc, + tx: mpsc::Sender, +) -> Result<(), Box> { + info!("Checking for failed jobs..."); + + // Fetch failed jobs + let errored_jobs = db_manager + .get_jobs_with_statuses(vec![JobStatus::Error]) + .await?; + + if errored_jobs.is_empty() { + info!("No failed jobs found."); + return Ok(()); + } + + warn!( + "Found {} failed jobs. Trying to retry these jobs...", + errored_jobs.len() + ); + + for job in errored_jobs { + let job_id = job.job_uuid; + + let failed_at_step = job.failed_at_step.unwrap_or(JobStatus::Created); + + let job_to_retry = Job { + job_id, + job_type: job.job_type, + job_status: failed_at_step.clone(), + slot: Some(job.slot.to_u64().unwrap()), + batch_range_begin_epoch: job.batch_range_begin_epoch.to_u64(), + batch_range_end_epoch: job.batch_range_end_epoch.to_u64(), + }; + + let db_clone = db_manager.clone(); + let tx_clone = tx.clone(); + tokio::spawn(async move { + match job_to_retry.job_type { + JobType::SyncCommitteeUpdate => { + info!( + "Requesting retry of failed job {} failed previously at step {}... (sync committee update job for sync committee {})", + job_id, + failed_at_step.to_string(), + helpers::slot_to_sync_committee_id(job.slot.to_u64().unwrap()) + ); + } + JobType::EpochBatchUpdate => { + info!( + "Requesting retry of failed job {} failed previously at step {} ... (batch epoch update job for epochs from {} to {})", + job_id, + failed_at_step.to_string(), + job.batch_range_begin_epoch, + job.batch_range_end_epoch + ); + } + } + + let _ = db_clone + .update_job_status(job_id, failed_at_step.clone()) + .await; + if failed_at_step != JobStatus::OffchainComputationFinished + && failed_at_step != JobStatus::ReadyToBroadcastOnchain + && failed_at_step != JobStatus::ProofVerifyCalledOnchain + // These jobs are done sequentially, not in parallel + { + if tx_clone.send(job_to_retry).await.is_err() { + // return Err("Failed to send job".into()); + // Update the status to status what was at the error occurene time + error!("Error retrying job: {}", job_id); + } + } + }); + + tokio::time::sleep(Duration::from_millis(500)).await; + } + + Ok(()) +} + +async fn broadcast_onchain_ready_jobs( + db_manager: Arc, + bankai: Arc, +) -> Result<(), Box> { + // Fetch jobs with the status `ReadyToBroadcastOnchain` + let jobs = db_manager + .get_jobs_with_status(JobStatus::ReadyToBroadcastOnchain) + .await?; + + // Iterate through the jobs and process them + for job in jobs { + match job.job_type { + JobType::EpochBatchUpdate => { + let circuit_inputs = EpochUpdateBatch::from_json::( + job.batch_range_begin_epoch.try_into().unwrap(), + job.batch_range_end_epoch.try_into().unwrap(), + )?; + + info!( + "[EPOCH BATCH JOB] Calling epoch batch update onchain for epochs range from {} to {}...", + job.batch_range_begin_epoch, job.batch_range_end_epoch + ); + + // Submit to Starknet + let send_result = bankai + .starknet_client + .submit_update(circuit_inputs.expected_circuit_outputs, &bankai.config) + .await; + + let txhash = match send_result { + Ok(txhash) => { + info!("[EPOCH BATCH JOB] Transaction sent: {}", txhash); + txhash + } + Err(e) => { + error!("[EPOCH BATCH JOB] Transaction sending error: {:?}", e); + let _ = db_manager + .set_failure_info(job.job_uuid, JobStatus::ReadyToBroadcastOnchain) + .await?; + db_manager + .update_job_status(job.job_uuid, JobStatus::Error) + .await?; + + continue; + } + }; + + info!( + "[EPOCH BATCH JOB] Successfully called batch epoch update onchain for job_uuid: {}, txhash: {}", + job.job_uuid, txhash.to_hex_string() + ); + + db_manager + .update_job_status(job.job_uuid, JobStatus::ProofVerifyCalledOnchain) + .await?; + + let _ = db_manager.set_job_txhash(job.job_uuid, txhash).await; + + let confirmation_result = + bankai.starknet_client.wait_for_confirmation(txhash).await; + + match confirmation_result { + Ok(_) => { + info!("[EPOCH BATCH JOB] Transaction is confirmed on-chain!"); + db_manager + .update_job_status(job.job_uuid, JobStatus::Done) + .await?; + + // Iterate over and insert epochs proofs to db + for (index, epoch) in + circuit_inputs.circuit_inputs.epochs.iter().enumerate() + { + info!( + "Inserting epoch data to DB: Index in batch: {}: {:?}", + index, epoch.expected_circuit_outputs + ); + db_manager + .insert_verified_epoch_circuit_outputs( + helpers::slot_to_epoch_id(epoch.expected_circuit_outputs.slot), //index.to_u64().unwrap(), + epoch.expected_circuit_outputs.beacon_header_root, + epoch.expected_circuit_outputs.beacon_state_root, + epoch.expected_circuit_outputs.slot, + epoch.expected_circuit_outputs.committee_hash, + epoch.expected_circuit_outputs.n_signers, + epoch.expected_circuit_outputs.execution_header_hash, + epoch.expected_circuit_outputs.execution_header_height, + ) + .await?; + } + + // Remove the related PIE file since it is no longer needed after the verification is successfult onchain + } + Err(e) => { + error!("[EPOCH BATCH JOB] Transaction failed or timed out: {:?}", e); + let _ = db_manager + .set_failure_info(job.job_uuid, JobStatus::ReadyToBroadcastOnchain) + .await?; + db_manager + .update_job_status(job.job_uuid, JobStatus::Error) + .await?; + } + } + + // let epoch_proof = bankai + // .starknet_client + // .get_epoch_proof(job.slot.try_into().unwrap(), &bankai.config) + // .await + // .unwrap(); + + // db_manager + // .insert_verified_epoch( + // job.batch_range_end_epoch.try_into().unwrap(), + // epoch_proof, + // ) + // .await?; + } + //JobType::EpochUpdate => {} + JobType::SyncCommitteeUpdate => { + let sync_committee_update_inputs = SyncCommitteeUpdate::from_json::< + SyncCommitteeUpdate, + >(job.slot.to_u64().unwrap())?; + + let sync_commite_id = + helpers::slot_to_sync_committee_id(job.slot.to_u64().unwrap()); + + info!( + "[SYNC COMMITTEE JOB] Calling sync committee ID {} update onchain...", + sync_commite_id + ); + + let send_result = bankai + .starknet_client + .submit_update( + sync_committee_update_inputs.expected_circuit_outputs, + &bankai.config, + ) + .await; + + let txhash = match send_result { + Ok(txhash) => { + info!("[SYNC COMMITTEE JOB] Transaction sent: {}", txhash); + txhash + } + Err(e) => { + error!("[SYNC COMMITTEE JOB] Transaction sending error: {:?}", e); + let _ = db_manager + .set_failure_info(job.job_uuid, JobStatus::ReadyToBroadcastOnchain) + .await?; + db_manager + .update_job_status(job.job_uuid, JobStatus::Error) + .await?; + + continue; + } + }; + + info!("[SYNC COMMITTEE JOB] Successfully called sync committee ID {} update onchain, transaction confirmed, txhash: {}", sync_commite_id, txhash); + + db_manager.set_job_txhash(job.job_uuid, txhash).await?; + + let confirmation_result = + bankai.starknet_client.wait_for_confirmation(txhash).await; + + match confirmation_result { + Ok(_) => { + info!("[SYNC COMMITTEE JOB] Transaction is confirmed on-chain!"); + db_manager + .update_job_status(job.job_uuid, JobStatus::Done) + .await?; + + // Insert data to DB after successful onchain sync committee verification + //let sync_committee_hash = update.expected_circuit_outputs.committee_hash; + let sync_committee_hash = match bankai + .starknet_client + .get_committee_hash(job.slot.to_u64().unwrap(), &bankai.config) + .await + { + Ok(sync_committee_hash) => sync_committee_hash, + Err(e) => { + // Handle the error + return Err(e.into()); + } + }; + + let sync_committee_hash_str = sync_committee_hash + .iter() + .map(|felt| felt.to_hex_string()) + .collect::>() + .join(""); + + db_manager + .insert_verified_sync_committee( + job.slot.to_u64().unwrap(), + sync_committee_hash_str, + ) + .await?; + } + Err(e) => { + eprintln!( + "[SYNC COMMITTEE JOB] Transaction failed or timed out: {:?}", + e + ); + let _ = db_manager + .set_failure_info(job.job_uuid, JobStatus::ReadyToBroadcastOnchain) + .await?; + db_manager + .update_job_status(job.job_uuid, JobStatus::Error) + .await?; + } + } + } + } + } + + Ok(()) +} + +// mpsc jobs // +async fn process_job( + job: Job, + db_manager: Arc, + bankai: Arc, +) -> Result<(), Box> { + let mut current_status = job.job_status.clone(); + let job_data = db_manager.get_job_by_id(job.job_id).await?.unwrap(); + let mut batch_id = job_data + .atlantic_proof_generate_batch_id + .unwrap_or("".to_string()); + let mut wrapping_batch_id = job_data + .atlantic_proof_wrapper_batch_id + .unwrap_or("".to_string()); + loop { + match job.job_type { + JobType::SyncCommitteeUpdate => { + // Sync committee job + let updated_committee_slot = job.slot.unwrap(); + let update_committee_id = + helpers::get_sync_committee_id_by_slot(updated_committee_slot); + match current_status { + JobStatus::Created => { + info!("[SYNC COMMITTEE JOB] Started processing sync committee job: {} for sync committee ID: {} (Slot: {})", + job.job_id, update_committee_id, updated_committee_slot ); + + let sync_committe_update_program_inputs = bankai + .get_sync_committee_update(updated_committee_slot.try_into().unwrap()) + .await?; + + info!( + "[SYNC COMMITTEE JOB] Sync committee update program inputs generated: {:?}", + sync_committe_update_program_inputs + ); + + let input_path = sync_committe_update_program_inputs.export(); + info!( + "[SYNC COMMITTEE JOB] Circuit inputs saved at {:?}", + input_path + ); + + db_manager + .update_job_status(job.job_id, JobStatus::ProgramInputsPrepared) + .await?; + + current_status = JobStatus::ProgramInputsPrepared; + } + JobStatus::ProgramInputsPrepared | JobStatus::StartedTraceGeneration => { + let sync_committe_update_program_inputs = + SyncCommitteeUpdate::from_json::( + job.slot.unwrap(), + )?; + info!( + "[SYNC COMMITTEE JOB] Starting Cairo execution and PIE generation for Sync Committee: {}...", + update_committee_id + ); + + CairoRunner::generate_pie( + &sync_committe_update_program_inputs, + &bankai.config, + Some(db_manager.clone()), + Some(job.job_id), + ) + .await?; + + db_manager + .update_job_status(job.job_id, JobStatus::PieGenerated) + .await?; + + info!( + "[SYNC COMMITTEE JOB] Pie generated successfully for Sync Committee: {}...", + update_committee_id + ); + + current_status = JobStatus::PieGenerated; + } + JobStatus::PieGenerated => { + let sync_committe_update_program_inputs = + SyncCommitteeUpdate::from_json::( + job.slot.unwrap(), + )?; + + info!("[SYNC COMMITTEE JOB] Sending proof generation query to Atlantic..."); + + batch_id = bankai + .atlantic_client + .submit_batch(sync_committe_update_program_inputs) + .await?; + + db_manager + .update_job_status(job.job_id, JobStatus::AtlanticProofRequested) + .await?; + db_manager + .set_atlantic_job_queryid( + job.job_id, + batch_id.clone(), + AtlanticJobType::ProofGeneration, + ) + .await?; + + info!( "[SYNC COMMITTEE JOB] Proof generation batch submitted to atlantic. QueryID: {}", + batch_id ); + + current_status = JobStatus::AtlanticProofRequested; + } + JobStatus::AtlanticProofRequested | JobStatus::AtlanticProofRetrieved => { + // Pool for Atlantic execution done + info!( + "[SYNC COMMITTEE JOB] Waiting for completion of Atlantic job. QueryID: {}", + batch_id + ); + bankai + .atlantic_client + .poll_batch_status_until_done( + &batch_id, + Duration::new(10, 0), + usize::MAX, + ) + .await?; + + info!( + "[SYNC COMMITTEE JOB] Proof generation done by Atlantic. QueryID: {}", + batch_id + ); + + let proof = bankai + .atlantic_client + .fetch_proof(batch_id.as_str()) + .await?; + + info!( + "[SYNC COMMITTEE JOB] Proof retrieved from Atlantic. QueryID: {}", + batch_id + ); + + db_manager + .update_job_status(job.job_id, JobStatus::AtlanticProofRetrieved) + .await?; + + // Submit wrapped proof request + info!("[SYNC COMMITTEE JOB] Sending proof wrapping query to Atlantic.."); + wrapping_batch_id = + bankai.atlantic_client.submit_wrapped_proof(proof).await?; + info!( + "[SYNC COMMITTEE JOB] Proof wrapping query submitted to Atlantic. Wrapping QueryID: {}", + wrapping_batch_id + ); + + db_manager + .update_job_status(job.job_id, JobStatus::WrapProofRequested) + .await?; + db_manager + .set_atlantic_job_queryid( + job.job_id, + wrapping_batch_id.clone(), + AtlanticJobType::ProofWrapping, + ) + .await?; + + current_status = JobStatus::WrapProofRequested; + } + JobStatus::WrapProofRequested => { + info!( + "[SYNC COMMITTEE JOB] Waiting for completion of Atlantic proof wrappinf job. QueryID: {}", + wrapping_batch_id + ); + // Pool for Atlantic execution done + bankai + .atlantic_client + .poll_batch_status_until_done( + &wrapping_batch_id, + Duration::new(10, 0), + usize::MAX, + ) + .await?; + + db_manager + .update_job_status(job.job_id, JobStatus::WrappedProofDone) + .await?; + + info!("[SYNC COMMITTEE JOB] Proof wrapping done by Atlantic. Fact registered on Integrity. Wrapping QueryID: {}", wrapping_batch_id); + + db_manager + .update_job_status(job.job_id, JobStatus::OffchainComputationFinished) + .await?; + break; + } + _ => { + error!("[SYNC COMMITTEE JOB] Unexpected behaviour"); + break; + } + } + } + + JobType::EpochBatchUpdate => { + match current_status { + JobStatus::Created | JobStatus::StartedFetchingInputs => { + info!("[BATCH EPOCH JOB] Preparing inputs for program for epochs from {} to {}...", job.batch_range_begin_epoch.unwrap(), job.batch_range_end_epoch.unwrap()); + let circuit_inputs = EpochUpdateBatch::new_by_epoch_range( + &bankai, + db_manager.clone(), + job.batch_range_begin_epoch.unwrap(), + job.batch_range_end_epoch.unwrap(), + job.job_id, + ) + .await?; + + let input_path = circuit_inputs.export(); + info!("[BATCH EPOCH JOB] Circuit inputs saved at {:?}", input_path); + + db_manager + .update_job_status(job.job_id, JobStatus::ProgramInputsPrepared) + .await?; + + current_status = JobStatus::ProgramInputsPrepared; + } + JobStatus::ProgramInputsPrepared | JobStatus::StartedTraceGeneration => { + let circuit_inputs = EpochUpdateBatch::from_json::( + job.batch_range_begin_epoch.unwrap(), + job.batch_range_end_epoch.unwrap(), + )?; + + info!("[BATCH EPOCH JOB] Starting trace generation..."); + + CairoRunner::generate_pie( + &circuit_inputs, + &bankai.config, + Some(db_manager.clone()), + Some(job.job_id), + ) + .await?; + + db_manager + .update_job_status(job.job_id, JobStatus::PieGenerated) + .await?; + + current_status = JobStatus::PieGenerated; + } + JobStatus::PieGenerated => { + let circuit_inputs = EpochUpdateBatch::from_json::( + job.batch_range_begin_epoch.unwrap(), + job.batch_range_end_epoch.unwrap(), + )?; + + info!("[BATCH EPOCH JOB] Uploading PIE and sending proof generation request to Atlantic..."); + + batch_id = bankai.atlantic_client.submit_batch(circuit_inputs).await?; + + info!( + "[BATCH EPOCH JOB] Proof generation batch submitted to Atlantic. QueryID: {}", + batch_id + ); + + db_manager + .update_job_status(job.job_id, JobStatus::AtlanticProofRequested) + .await?; + db_manager + .set_atlantic_job_queryid( + job.job_id, + batch_id.clone(), + AtlanticJobType::ProofGeneration, + ) + .await?; + + current_status = JobStatus::AtlanticProofRequested; + } + JobStatus::AtlanticProofRequested | JobStatus::AtlanticProofRetrieved => { + // Pool for Atlantic execution done + info!( + "[BATCH EPOCH JOB] Waiting for completion of Atlantic proof generation job. QueryID: {}", + batch_id + ); + + bankai + .atlantic_client + .poll_batch_status_until_done( + &batch_id, + Duration::new(10, 0), + usize::MAX, + ) + .await?; + + info!( + "[BATCH EPOCH JOB] Proof generation done by Atlantic. QueryID: {}", + batch_id + ); + + let proof = bankai + .atlantic_client + .fetch_proof(batch_id.as_str()) + .await?; + + info!( + "[BATCH EPOCH JOB] Proof retrieved from Atlantic. QueryID: {}", + batch_id + ); + + db_manager + .update_job_status(job.job_id, JobStatus::AtlanticProofRetrieved) + .await?; + + // 5) Submit wrapped proof request + info!( + "[BATCH EPOCH JOB] Uploading proof and sending wrapping query to Atlantic.." + ); + wrapping_batch_id = + bankai.atlantic_client.submit_wrapped_proof(proof).await?; + info!( + "[BATCH EPOCH JOB] Proof wrapping query submitted to Atlantic. Wrapping QueryID: {}", + wrapping_batch_id + ); + + db_manager + .update_job_status(job.job_id, JobStatus::WrapProofRequested) + .await?; + + db_manager + .set_atlantic_job_queryid( + job.job_id, + wrapping_batch_id.clone(), + AtlanticJobType::ProofWrapping, + ) + .await?; + + current_status = JobStatus::WrapProofRequested; + } + JobStatus::WrapProofRequested => { + // Pool for Atlantic execution done + info!( + "[BATCH EPOCH JOB] Waiting for completion of Atlantic proof wrapping job. QueryID: {}", + wrapping_batch_id + ); + + bankai + .atlantic_client + .poll_batch_status_until_done( + &wrapping_batch_id, + Duration::new(10, 0), + usize::MAX, + ) + .await?; + + db_manager + .update_job_status(job.job_id, JobStatus::WrappedProofDone) + .await?; + + info!("[BATCH EPOCH JOB] Proof wrapping done by Atlantic. Fact registered on Integrity. Wrapping QueryID: {}", wrapping_batch_id); + + db_manager + .update_job_status(job.job_id, JobStatus::OffchainComputationFinished) + .await?; + + break; + } + _ => { + error!("[BATCH EPOCH JOB] Unexpected behaviour"); + break; + } + } + } + } + } + Ok(()) +} + +async fn shutdown_signal() { + let ctrl_c = async { + signal::ctrl_c() + .await + .expect("failed to install Ctrl+C handler"); + }; + + #[cfg(unix)] + let terminate = async { + signal::unix::signal(signal::unix::SignalKind::terminate()) + .expect("failed to install signal handler") + .recv() + .await; + }; + + #[cfg(not(unix))] + let terminate = std::future::pending::<()>(); + + tokio::select! { + _ = ctrl_c => { + info!("Gracefully shutting down..."); + }, + _ = terminate => { + info!("Gracefully shutting down..."); + }, + } +} diff --git a/client-rs/src/epoch_batch.rs b/client-rs/src/epoch_batch.rs index 998cddb..a32648a 100644 --- a/client-rs/src/epoch_batch.rs +++ b/client-rs/src/epoch_batch.rs @@ -1,18 +1,28 @@ +use crate::constants::{SLOTS_PER_EPOCH, TARGET_BATCH_SIZE}; use crate::epoch_update::{EpochUpdate, ExpectedEpochUpdateOutputs}; +use crate::helpers::{ + self, calculate_slots_range_for_batch, get_first_slot_for_epoch, + get_sync_committee_id_by_epoch, slot_to_epoch_id, +}; +use crate::state::JobStatus; use crate::traits::{Provable, Submittable}; use crate::utils::hashing::get_committee_hash; + use crate::utils::merkle::poseidon::{compute_paths, compute_root, hash_path}; use crate::{BankaiClient, Error}; use alloy_primitives::FixedBytes; use hex; +use num_traits::ToPrimitive; use serde::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; use starknet::macros::selector; use starknet_crypto::Felt; use std::fs; +use uuid::Uuid; -const TARGET_BATCH_SIZE: u64 = 32; -const SLOTS_PER_EPOCH: u64 = 32; +use crate::utils::database_manager::DatabaseManager; +use std::sync::Arc; +use tracing::{debug, info, trace}; #[derive(Debug, Serialize, Deserialize)] pub struct EpochUpdateBatch { @@ -39,26 +49,38 @@ impl EpochUpdateBatch { .starknet_client .get_batching_range(&bankai.config) .await?; - println!("Slots in Term: Start {}, End {}", start_slot, end_slot); + info!("Slots in Term: Start {}, End {}", start_slot, end_slot); let epoch_gap = (end_slot - start_slot) / SLOTS_PER_EPOCH; - println!("Available Epochs: {}", epoch_gap); + info!( + "Available Epochs in this Sync Committee period: {}", + epoch_gap + ); // if the gap is smaller then x2 the target size, use the entire gap if epoch_gap >= TARGET_BATCH_SIZE * 2 { end_slot = start_slot + TARGET_BATCH_SIZE * SLOTS_PER_EPOCH; } - println!("Selected Slots: Start {}, End {}", start_slot, end_slot); - println!("Epoch Count: {}", (end_slot - start_slot) / SLOTS_PER_EPOCH); + info!("Selected Slots: Start {}, End {}", start_slot, end_slot); + info!("Epoch Count: {}", (end_slot - start_slot) / SLOTS_PER_EPOCH); let mut epochs = vec![]; // Fetch epochs sequentially from start_slot to end_slot, incrementing by 32 each time let mut current_slot = start_slot; while current_slot < end_slot { + // Current slot is the starting slot of epoch + info!( + "Getting data for slot: {} Epoch: {} Epochs batch position {}/{}", + current_slot, + slot_to_epoch_id(current_slot), + epochs.len(), + TARGET_BATCH_SIZE + ); let epoch_update = EpochUpdate::new(&bankai.client, current_slot).await?; epochs.push(epoch_update); current_slot += 32; + //info!("epochspush"); } let circuit_inputs = EpochUpdateBatchInputs { @@ -92,6 +114,205 @@ impl EpochUpdateBatch { Ok(batch) } + + // pub(crate) async fn new_by_slot( + // bankai: &BankaiClient, + // db_manager: Arc, + // slot: u64, + // ) -> Result { + // let _permit = bankai + // .config + // .epoch_data_fetching_semaphore + // .clone() + // .acquire_owned() + // .await + // .map_err(|e| Error::CairoRunError(format!("Semaphore error: {}", e)))?; + + // let (start_slot, end_slot) = calculate_slots_range_for_batch(slot); + // let mut epochs = vec![]; + + // // Fetch epochs sequentially from start_slot to end_slot, incrementing by 32 each time + // let mut current_slot = start_slot; + // while current_slot < end_slot { + // info!( + // "Getting data for slot: {} Epoch: {} Epochs batch position {}/{}", + // current_slot, + // slot_to_epoch_id(current_slot), + // epochs.len(), + // TARGET_BATCH_SIZE + // ); + // let epoch_update = EpochUpdate::new(&bankai.client, current_slot).await?; + + // epochs.push(epoch_update); + // current_slot += 32; + // } + + // let circuit_inputs = EpochUpdateBatchInputs { + // committee_hash: get_committee_hash(epochs[0].circuit_inputs.aggregate_pub.0), + // epochs, + // }; + + // let expected_circuit_outputs = ExpectedEpochBatchOutputs::from_inputs(&circuit_inputs); + + // let epoch_hashes = circuit_inputs + // .epochs + // .iter() + // .map(|epoch| epoch.expected_circuit_outputs.hash()) + // .collect::>(); + + // let (root, paths) = compute_paths(epoch_hashes.clone()); + + // // Verify each path matches the root + // current_slot = start_slot; + // for (index, path) in paths.iter().enumerate() { + // let computed_root = hash_path(epoch_hashes[index], path, index); + // if computed_root != root { + // panic!("Path {} does not match root", index); + // } + // // Insert merkle paths to database + // let current_epoch = slot_to_epoch_id(current_slot); + // for (path_index, current_path) in path.iter().enumerate() { + // db_manager + // .insert_merkle_path_for_epoch( + // current_epoch, + // path_index.to_u64().unwrap(), + // current_path.to_hex_string(), + // ) + // .await + // .map_err(|e| Error::DatabaseError(e.to_string()))?; + // } + // current_slot += 32; + // } + + // info!("Paths {:?}", paths); + + // let batch = EpochUpdateBatch { + // circuit_inputs, + // expected_circuit_outputs, + // merkle_paths: paths, + // }; + + // Ok(batch) + // } + + pub(crate) async fn new_by_epoch_range( + bankai: &BankaiClient, + db_manager: Arc, + start_epoch: u64, + end_epoch: u64, + job_id: Uuid, + ) -> Result { + let _permit = bankai + .config + .epoch_data_fetching_semaphore + .clone() + .acquire_owned() + .await + .map_err(|e| Error::CairoRunError(format!("Semaphore error: {}", e)))?; + + let _ = db_manager + .update_job_status(job_id, JobStatus::StartedFetchingInputs) + .await; + + let mut epochs = vec![]; + + // Fetch epochs sequentially from start_slot to end_slot, incrementing by 32 each time + let calculated_batch_size = end_epoch - start_epoch + 1; + let mut current_epoch = start_epoch; + while current_epoch <= end_epoch { + info!( + "Getting data for Epoch: {} (SyncCommittee: {}) First slot for this epoch: {} | Epochs batch position {}/{}", + current_epoch, + get_sync_committee_id_by_epoch(current_epoch), + get_first_slot_for_epoch(current_epoch), + epochs.len()+1, + calculated_batch_size + ); + let epoch_update = + EpochUpdate::new(&bankai.client, get_first_slot_for_epoch(current_epoch)).await?; + + epochs.push(epoch_update); + current_epoch += 1; + } + + let circuit_inputs = EpochUpdateBatchInputs { + committee_hash: get_committee_hash(epochs[0].circuit_inputs.aggregate_pub.0), + epochs, + }; + + let expected_circuit_outputs = ExpectedEpochBatchOutputs::from_inputs(&circuit_inputs); + + let epoch_hashes = circuit_inputs + .epochs + .iter() + .map(|epoch| epoch.expected_circuit_outputs.hash()) + .collect::>(); + + let (root, paths) = compute_paths(epoch_hashes.clone()); + + // Verify each path matches the root + current_epoch = start_epoch; + for (index, path) in paths.iter().enumerate() { + let computed_root = hash_path(epoch_hashes[index], path, index); + if computed_root != root { + panic!("Path {} does not match root", index); + } + // Insert merkle paths to database + //let current_epoch = slot_to_epoch_id(current_slot); + for (path_index, current_path) in path.iter().enumerate() { + db_manager + .insert_merkle_path_for_epoch( + current_epoch, + path_index.to_u64().unwrap(), + current_path.to_hex_string(), + ) + .await + .map_err(|e| Error::DatabaseError(e.to_string()))?; + } + current_epoch += 1; + } + + trace!("Paths for epochs {:?}", paths); + + let batch = EpochUpdateBatch { + circuit_inputs, + expected_circuit_outputs, + merkle_paths: paths, + }; + + Ok(batch) + } +} + +impl EpochUpdateBatch { + pub fn from_json(first_epoch: u64, last_epoch: u64) -> Result + where + T: serde::de::DeserializeOwned, + { + info!( + "Trying to read file batches/epoch_batch/{}_to_{}/input_batch_{}_to_{}.json", + first_epoch, last_epoch, first_epoch, last_epoch + ); + // Pattern match for files like: batches/epoch_batch/6709248_to_6710272/input_batch_6709248_to_6710272.json + let path = format!( + "batches/epoch_batch/{}_to_{}/input_batch_{}_to_{}.json", + first_epoch, last_epoch, first_epoch, last_epoch + ); + debug!(path); + let glob_pattern = glob::glob(&path) + .map_err(|e| Error::IoError(std::io::Error::new(std::io::ErrorKind::Other, e)))?; + + // Take the first matching file + let path = glob_pattern.take(1).next().ok_or_else(|| { + Error::IoError(std::io::Error::new( + std::io::ErrorKind::NotFound, + "No matching file found", + )) + })?; + + let json = fs::read_to_string(path.unwrap()).map_err(Error::IoError)?; + serde_json::from_str(&json).map_err(|e| Error::DeserializeError(e.to_string())) + } } impl Provable for EpochUpdateBatch { @@ -112,6 +333,7 @@ impl Provable for EpochUpdateBatch { .circuit_inputs .header .slot; + let first_epoch = helpers::slot_to_epoch_id(first_slot); let last_slot = self .circuit_inputs .epochs @@ -120,40 +342,17 @@ impl Provable for EpochUpdateBatch { .circuit_inputs .header .slot; - let dir_path = format!("batches/epoch_batch/{}_to_{}", first_slot, last_slot); + let last_epoch = helpers::slot_to_epoch_id(last_slot); + let dir_path = format!("batches/epoch_batch/{}_to_{}", first_epoch, last_epoch); fs::create_dir_all(dir_path.clone()).map_err(Error::IoError)?; let path = format!( "{}/input_batch_{}_to_{}.json", - dir_path, first_slot, last_slot + dir_path, first_epoch, last_epoch ); fs::write(path.clone(), json).map_err(Error::IoError)?; Ok(path) } - fn from_json(slot: u64) -> Result - where - T: serde::de::DeserializeOwned, - { - // Pattern match for files like: batches/epoch_batch/6709248_to_6710272/input_batch_6709248_to_6710272.json - let path = format!( - "batches/epoch_batch/*_to_{}/input_batch_*_to_{}.json", - slot, slot - ); - let glob_pattern = glob::glob(&path) - .map_err(|e| Error::IoError(std::io::Error::new(std::io::ErrorKind::Other, e)))?; - - // Take the first matching file - let path = glob_pattern.take(1).next().ok_or_else(|| { - Error::IoError(std::io::Error::new( - std::io::ErrorKind::NotFound, - "No matching file found", - )) - })?; - - let json = fs::read_to_string(path.unwrap()).map_err(Error::IoError)?; - serde_json::from_str(&json).map_err(|e| Error::DeserializeError(e.to_string())) - } - fn proof_type(&self) -> crate::traits::ProofType { crate::traits::ProofType::EpochBatch } @@ -167,6 +366,7 @@ impl Provable for EpochUpdateBatch { .circuit_inputs .header .slot; + let first_epoch = helpers::slot_to_epoch_id(first_slot); let last_slot = self .circuit_inputs .epochs @@ -175,9 +375,35 @@ impl Provable for EpochUpdateBatch { .circuit_inputs .header .slot; + let last_epoch = helpers::slot_to_epoch_id(last_slot); format!( "batches/epoch_batch/{}_to_{}/pie_batch_{}_to_{}.zip", - first_slot, last_slot, first_slot, last_slot + first_epoch, last_epoch, first_epoch, last_epoch + ) + } + + fn inputs_path(&self) -> String { + let first_slot = self + .circuit_inputs + .epochs + .first() + .unwrap() + .circuit_inputs + .header + .slot; + let first_epoch = helpers::slot_to_epoch_id(first_slot); + let last_slot = self + .circuit_inputs + .epochs + .last() + .unwrap() + .circuit_inputs + .header + .slot; + let last_epoch = helpers::slot_to_epoch_id(last_slot); + format!( + "batches/epoch_batch/{}_to_{}/input_batch_{}_to_{}.json", + first_epoch, last_epoch, first_epoch, last_epoch ) } } diff --git a/client-rs/src/epoch_update.rs b/client-rs/src/epoch_update.rs index af33ed4..a6d966b 100644 --- a/client-rs/src/epoch_update.rs +++ b/client-rs/src/epoch_update.rs @@ -1,5 +1,6 @@ use std::fs; +use crate::constants; use crate::{ execution_header::ExecutionHeaderProof, traits::{ProofType, Provable, Submittable}, @@ -15,6 +16,7 @@ use serde::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; use starknet::{core::types::Felt, macros::selector}; use starknet_crypto::poseidon_hash_many; +use tracing::info; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; @@ -35,6 +37,17 @@ impl EpochUpdate { } } +impl EpochUpdate { + pub fn from_json(slot: u64) -> Result + where + T: serde::de::DeserializeOwned, + { + let path = format!("batches/epoch/{}/input_{}.json", slot, slot); + let json = fs::read_to_string(path).map_err(Error::IoError)?; + serde_json::from_str(&json).map_err(|e| Error::DeserializeError(e.to_string())) + } +} + impl Provable for EpochUpdate { fn id(&self) -> String { let mut hasher = Sha256::new(); @@ -55,15 +68,6 @@ impl Provable for EpochUpdate { Ok(path) } - fn from_json(slot: u64) -> Result - where - T: serde::de::DeserializeOwned, - { - let path = format!("batches/epoch/{}/input_{}.json", slot, slot); - let json = fs::read_to_string(path).map_err(Error::IoError)?; - serde_json::from_str(&json).map_err(|e| Error::DeserializeError(e.to_string())) - } - fn pie_path(&self) -> String { format!( "batches/epoch/{}/pie_{}.zip", @@ -74,6 +78,13 @@ impl Provable for EpochUpdate { fn proof_type(&self) -> ProofType { ProofType::Epoch } + + fn inputs_path(&self) -> String { + format!( + "batches/epoch/{}/input_{}.json", + self.circuit_inputs.header.slot, self.circuit_inputs.header.slot + ) + } } /// Contains all necessary inputs for generating and verifying epoch proofs @@ -163,18 +174,22 @@ impl EpochCircuitInputs { mut slot: u64, ) -> Result { let mut attempts = 0; - const MAX_ATTEMPTS: u8 = 3; let header = loop { match client.get_header(slot).await { Ok(header) => break header, Err(Error::EmptySlotDetected(_)) => { attempts += 1; - if attempts >= MAX_ATTEMPTS { + if attempts >= constants::MAX_SKIPPED_SLOTS_RETRY_ATTEMPTS { return Err(Error::EmptySlotDetected(slot)); } slot += 1; - println!("Empty slot detected! Attempt {}/{}. Fetching slot: {}", attempts, MAX_ATTEMPTS, slot); + info!( + "Empty slot detected! Attempt {}/{}. Fetching slot: {}", + attempts, + constants::MAX_SKIPPED_SLOTS_RETRY_ATTEMPTS, + slot + ); } Err(e) => return Err(e), // Propagate other errors immediately } @@ -182,7 +197,6 @@ impl EpochCircuitInputs { let sync_agg = client.get_sync_aggregate(slot).await?; let validator_pubs = client.get_sync_committee_validator_pubs(slot).await?; - // Process the sync committee data let signature_point = Self::extract_signature_point(&sync_agg)?; let non_signers = Self::derive_non_signers(&sync_agg, &validator_pubs); diff --git a/client-rs/src/helpers.rs b/client-rs/src/helpers.rs new file mode 100644 index 0000000..986c56a --- /dev/null +++ b/client-rs/src/helpers.rs @@ -0,0 +1,93 @@ +use crate::{ + constants::{ + EPOCHS_PER_SYNC_COMMITTEE, SLOTS_PER_EPOCH, SLOTS_PER_SYNC_COMMITTEE, TARGET_BATCH_SIZE, + }, + Error, +}; +use alloy_primitives::FixedBytes; +use starknet::core::types::Felt; +use tracing::info; + +pub fn slot_to_epoch_id(slot: u64) -> u64 { + slot / SLOTS_PER_EPOCH +} + +pub fn slot_to_sync_committee_id(slot: u64) -> u64 { + slot / SLOTS_PER_SYNC_COMMITTEE +} + +pub fn calculate_slots_range_for_batch(first_slot: u64) -> (u64, u64) { + let start_slot = (u64::try_from(first_slot).unwrap() / 32) * 32 + 32; + let term = start_slot / 0x2000; + let mut end_slot = (term + 1) * 0x2000 - 32; + + info!("Slots in Term: Start {}, End {}", start_slot, end_slot); + let epoch_gap = (end_slot - start_slot) / SLOTS_PER_EPOCH; + info!( + "Available Epochs in this Sync Committee period: {}", + epoch_gap + ); + + // if the gap is smaller then x2 the target size, use the entire gap + if epoch_gap >= TARGET_BATCH_SIZE * 2 { + end_slot = start_slot + TARGET_BATCH_SIZE * SLOTS_PER_EPOCH; + } + + info!("Selected Slots: Start {}, End {}", start_slot, end_slot); + info!("Epoch Count: {}", (end_slot - start_slot) / SLOTS_PER_EPOCH); + + (start_slot, end_slot) +} + +/// Computes the slot numbers for term of specified slot +pub async fn calculate_batching_range_for_slot(slot: u64) -> Result<(u64, u64), Error> { + let next_epoch_slot = (u64::try_from(slot).unwrap() / 32) * 32 + 32; + let term = next_epoch_slot / 0x2000; + let terms_last_epoch_slot = (term + 1) * 0x2000 - 32; + Ok((next_epoch_slot, terms_last_epoch_slot)) +} + +/// Returns the first epoch signed by the specified sync committee +pub fn get_first_epoch_for_sync_committee(sync_committee_id: u64) -> u64 { + sync_committee_id * EPOCHS_PER_SYNC_COMMITTEE +} + +/// Returns the last epoch signed by the specified sync committee +pub fn get_last_epoch_for_sync_committee(sync_committee_id: u64) -> u64 { + (sync_committee_id + 1) * EPOCHS_PER_SYNC_COMMITTEE - 1 +} + +pub fn get_first_slot_for_epoch(epoch: u64) -> u64 { + epoch * SLOTS_PER_EPOCH +} + +pub fn get_last_slot_for_epoch(epoch: u64) -> u64 { + (epoch + 1) * SLOTS_PER_EPOCH - 1 +} + +pub fn get_sync_committee_id_by_epoch(epoch: u64) -> u64 { + epoch / EPOCHS_PER_SYNC_COMMITTEE +} + +pub fn get_sync_committee_id_by_slot(slot: u64) -> u64 { + slot / SLOTS_PER_SYNC_COMMITTEE +} + +pub fn get_first_slot_for_sync_committee(sync_committee: u64) -> u64 { + sync_committee * SLOTS_PER_SYNC_COMMITTEE +} + +pub fn get_last_slot_for_sync_committee(sync_committee: u64) -> u64 { + (sync_committee + 1) * SLOTS_PER_SYNC_COMMITTEE - 1 +} + +// Since beacon chain RPCs have different response structure (quicknode responds different than nidereal) we use this event extraction logic +pub fn extract_json_from_event(event_text: &str) -> Option { + for line in event_text.lines() { + if line.starts_with("data:") { + // Extract the JSON after "data:" + return Some(line.trim_start_matches("data:").trim().to_string()); + } + } + None +} diff --git a/client-rs/src/main.rs b/client-rs/src/main.rs index dbeeb4a..395bfa5 100644 --- a/client-rs/src/main.rs +++ b/client-rs/src/main.rs @@ -1,8 +1,14 @@ +#![allow(dead_code)] +#![allow(unused_imports)] +mod bankai_client; mod config; +mod constants; mod contract_init; pub mod epoch_batch; mod epoch_update; mod execution_header; +mod helpers; +mod state; mod sync_committee; mod traits; mod utils; @@ -21,110 +27,97 @@ use utils::{ rpc::BeaconRpcClient, starknet_client::{StarknetClient, StarknetError}, }; + +use bankai_client::BankaiClient; // use rand::Rng; // use std::fs::File; // use std::io::Write; use clap::{Parser, Subcommand}; use dotenv::from_filename; +use state::Error; use std::env; +use tracing::Level; +use tracing_subscriber::FmtSubscriber; -#[derive(Debug)] -pub enum Error { - InvalidProof, - RpcError(reqwest::Error), - DeserializeError(String), - IoError(std::io::Error), - StarknetError(StarknetError), - BeaconStateProofError(BeaconStateProofError), - BlockNotFound, - FetchSyncCommitteeError, - FailedFetchingBeaconState, - InvalidBLSPoint, - MissingRpcUrl, - EmptySlotDetected(u64), - RequiresNewerEpoch(Felt), - CairoRunError(String), - AtlanticError(reqwest::Error), - InvalidResponse(String), - InvalidMerkleTree, -} - -impl From for Error { - fn from(e: StarknetError) -> Self { - Error::StarknetError(e) - } -} +// impl From for Error { +// fn from(e: StarknetError) -> Self { +// Error::StarknetError(e) +// } +// } -struct BankaiClient { - client: BeaconRpcClient, - starknet_client: StarknetClient, - config: BankaiConfig, - atlantic_client: AtlanticClient, -} +// struct BankaiClient { +// client: BeaconRpcClient, +// starknet_client: StarknetClient, +// config: BankaiConfig, +// atlantic_client: AtlanticClient, +// } -impl BankaiClient { - pub async fn new() -> Self { - from_filename(".env.sepolia").ok(); - let config = BankaiConfig::default(); - Self { - client: BeaconRpcClient::new(env::var("BEACON_RPC_URL").unwrap()), - starknet_client: StarknetClient::new( - env::var("STARKNET_RPC_URL").unwrap().as_str(), - env::var("STARKNET_ADDRESS").unwrap().as_str(), - env::var("STARKNET_PRIVATE_KEY").unwrap().as_str(), - ) - .await - .unwrap(), - atlantic_client: AtlanticClient::new( - config.atlantic_endpoint.clone(), - env::var("ATLANTIC_API_KEY").unwrap(), - ), - config, - } - } +// impl BankaiClient { +// pub async fn new() -> Self { +// from_filename(".env.sepolia").ok(); +// let config = BankaiConfig::default(); +// Self { +// client: BeaconRpcClient::new(env::var("BEACON_RPC_URL").unwrap()), +// starknet_client: StarknetClient::new( +// env::var("STARKNET_RPC_URL").unwrap().as_str(), +// env::var("STARKNET_ADDRESS").unwrap().as_str(), +// env::var("STARKNET_PRIVATE_KEY").unwrap().as_str(), +// ) +// .await +// .unwrap(), +// atlantic_client: AtlanticClient::new( +// config.atlantic_endpoint.clone(), +// env::var("ATLANTIC_API_KEY").unwrap(), +// ), +// config, +// } +// } - pub async fn get_sync_committee_update( - &self, - mut slot: u64, - ) -> Result { - let mut attempts = 0; - const MAX_ATTEMPTS: u8 = 3; +// pub async fn get_sync_committee_update( +// &self, +// mut slot: u64, +// ) -> Result { +// let mut attempts = 0; +// const MAX_ATTEMPTS: u8 = 3; - // Before we start generating the proof, we ensure the slot was not missed - let _header = loop { - match self.client.get_header(slot).await { - Ok(header) => break header, - Err(Error::EmptySlotDetected(_)) => { - attempts += 1; - if attempts >= MAX_ATTEMPTS { - return Err(Error::EmptySlotDetected(slot)); - } - slot += 1; - println!("Empty slot detected! Attempt {}/{}. Fetching slot: {}", attempts, MAX_ATTEMPTS, slot); - } - Err(e) => return Err(e), // Propagate other errors immediately - } - }; +// // Before we start generating the proof, we ensure the slot was not missed +// let _header = loop { +// match self.client.get_header(slot).await { +// Ok(header) => break header, +// Err(Error::EmptySlotDetected(_)) => { +// attempts += 1; +// if attempts >= MAX_ATTEMPTS { +// return Err(Error::EmptySlotDetected(slot)); +// } +// slot += 1; +// println!( +// "Empty slot detected! Attempt {}/{}. Fetching slot: {}", +// attempts, MAX_ATTEMPTS, slot +// ); +// } +// Err(e) => return Err(e), // Propagate other errors immediately +// } +// }; - let proof: SyncCommitteeUpdate = SyncCommitteeUpdate::new(&self.client, slot).await?; +// let proof: SyncCommitteeUpdate = SyncCommitteeUpdate::new(&self.client, slot).await?; - Ok(proof) - } +// Ok(proof) +// } - pub async fn get_epoch_proof(&self, slot: u64) -> Result { - let epoch_proof = EpochUpdate::new(&self.client, slot).await?; - Ok(epoch_proof) - } +// pub async fn get_epoch_proof(&self, slot: u64) -> Result { +// let epoch_proof = EpochUpdate::new(&self.client, slot).await?; +// Ok(epoch_proof) +// } - pub async fn get_contract_initialization_data( - &self, - slot: u64, - config: &BankaiConfig, - ) -> Result { - let contract_init = ContractInitializationData::new(&self.client, slot, config).await?; - Ok(contract_init) - } -} +// pub async fn get_contract_initialization_data( +// &self, +// slot: u64, +// config: &BankaiConfig, +// ) -> Result { +// let contract_init = ContractInitializationData::new(&self.client, slot, config).await?; +// Ok(contract_init) +// } +// } #[derive(Subcommand)] enum Commands { @@ -159,6 +152,10 @@ enum Commands { ProveNextCommittee, ProveNextEpoch, ProveNextEpochBatch, + ProveCommitteeAtSlot { + #[arg(long, short)] + slot: u64, + }, CheckBatchStatus { #[arg(long, short)] batch_id: String, @@ -167,6 +164,10 @@ enum Commands { #[arg(long, short)] batch_id: String, }, + GetEpochProof { + #[arg(long, short)] + epoch_id: u64, + }, VerifyEpoch { #[arg(long, short)] batch_id: String, @@ -177,7 +178,9 @@ enum Commands { #[arg(long, short)] batch_id: String, #[arg(long, short)] - slot: u64, + first_slot: u64, + #[arg(long, short)] + last_slot: u64, }, VerifyCommittee { #[arg(long, short)] @@ -207,6 +210,13 @@ async fn main() -> Result<(), Error> { // Load .env.sepolia file from_filename(".env.sepolia").ok(); + let subscriber = FmtSubscriber::builder() + // .with_max_level(Level::TRACE) + .with_max_level(Level::INFO) + .finish(); + + tracing::subscriber::set_global_default(subscriber).expect("setting default subscriber failed"); + let cli = Cli::parse(); let bankai = BankaiClient::new().await; @@ -287,18 +297,19 @@ async fn main() -> Result<(), Error> { .await?; let lowest_committee_update_slot = (latest_committee_id) * Felt::from(0x2000); println!("Min Slot Required: {}", lowest_committee_update_slot); - let latest_epoch = bankai + let latest_epoch_slot = bankai .starknet_client .get_latest_epoch_slot(&bankai.config) .await?; - println!("Latest epoch: {}", latest_epoch); - if latest_epoch < lowest_committee_update_slot { - return Err(Error::RequiresNewerEpoch(latest_epoch)); + println!("Latest epoch slot: {}", latest_epoch_slot); + if latest_epoch_slot < lowest_committee_update_slot { + return Err(Error::RequiresNewerEpoch(latest_epoch_slot)); } let update = bankai - .get_sync_committee_update(latest_epoch.try_into().unwrap()) + .get_sync_committee_update(latest_epoch_slot.try_into().unwrap()) .await?; - CairoRunner::generate_pie(&update, &bankai.config)?; + let _ = update.export()?; + CairoRunner::generate_pie(&update, &bankai.config, None, None).await?; let batch_id = bankai.atlantic_client.submit_batch(update).await?; println!("Batch Submitted: {}", batch_id); } @@ -311,15 +322,37 @@ async fn main() -> Result<(), Error> { // make sure next_epoch % 32 == 0 let next_epoch = (u64::try_from(latest_epoch).unwrap() / 32) * 32 + 32; println!("Fetching Inputs for Epoch: {}", next_epoch); - let proof = bankai.get_epoch_proof(next_epoch).await?; - CairoRunner::generate_pie(&proof, &bankai.config)?; - let batch_id = bankai.atlantic_client.submit_batch(proof).await?; + // let proof = bankai.get_epoch_proof(next_epoch).await?; + let epoch_update = EpochUpdate::new(&bankai.client, next_epoch).await?; + let _ = epoch_update.export()?; + CairoRunner::generate_pie(&epoch_update, &bankai.config, None, None).await?; + let batch_id = bankai.atlantic_client.submit_batch(epoch_update).await?; println!("Batch Submitted: {}", batch_id); } Commands::ProveNextEpochBatch => { - let proof = EpochUpdateBatch::new(&bankai).await?; - CairoRunner::generate_pie(&proof, &bankai.config)?; - let batch_id = bankai.atlantic_client.submit_batch(proof).await?; + let epoch_update = EpochUpdateBatch::new(&bankai).await?; + println!("Update contents: {:?}", epoch_update); + let _ = epoch_update.export()?; + CairoRunner::generate_pie(&epoch_update, &bankai.config, None, None).await?; + let batch_id = bankai.atlantic_client.submit_batch(epoch_update).await?; + println!("Batch Submitted: {}", batch_id); + } + Commands::ProveCommitteeAtSlot { slot } => { + let latest_committee_id = bankai + .starknet_client + .get_latest_committee_id(&bankai.config) + .await?; + let lowest_committee_update_slot = (latest_committee_id) * Felt::from(0x2000); + println!("Min Slot Required: {}", lowest_committee_update_slot); + // if slot < lowest_committee_update_slot { + // return Err(Error::RequiresNewerEpoch(slot)); + // } + let update = bankai + .get_sync_committee_update(slot.try_into().unwrap()) + .await?; + let _ = update.export()?; + CairoRunner::generate_pie(&update, &bankai.config, None, None).await?; + let batch_id = bankai.atlantic_client.submit_batch(update).await?; println!("Batch Submitted: {}", batch_id); } Commands::VerifyEpoch { batch_id, slot } => { @@ -338,13 +371,18 @@ async fn main() -> Result<(), Error> { println!("Batch not completed yet. Status: {}", status); } } - Commands::VerifyEpochBatch { batch_id, slot } => { + Commands::VerifyEpochBatch { + batch_id, + first_slot, + last_slot, + } => { let status = bankai .atlantic_client .check_batch_status(batch_id.as_str()) .await?; if status == "DONE" { - let update = EpochUpdateBatch::from_json::(slot)?; + let update = + EpochUpdateBatch::from_json::(first_slot, last_slot)?; bankai .starknet_client .submit_update(update.expected_circuit_outputs, &bankai.config) @@ -386,6 +424,14 @@ async fn main() -> Result<(), Error> { println!("Batch not completed yet. Status: {}", status); } } + Commands::GetEpochProof { epoch_id } => { + let epoch_proof = bankai + .starknet_client + .get_epoch_proof(epoch_id, &bankai.config) + .await?; + + println!("Retrieved epoch proof from contract: {:?}", epoch_proof); + } } Ok(()) diff --git a/client-rs/src/routes/dashboard.rs b/client-rs/src/routes/dashboard.rs new file mode 100644 index 0000000..5ac4d8a --- /dev/null +++ b/client-rs/src/routes/dashboard.rs @@ -0,0 +1,256 @@ +use crate::{ + helpers, + state::{AppState, JobStatus}, +}; +use axum::extract::State; +use num_traits::{SaturatingSub, ToPrimitive}; + +pub async fn handle_get_dashboard(State(state): State) -> String { + let db = state.db_manager.clone(); + let bankai = state.bankai.clone(); + + // Fetch required stats + let latest_beacon_slot = bankai.client.get_head_slot().await.unwrap_or_default(); + let latest_verified_slot = bankai + .starknet_client + .get_latest_epoch_slot(&bankai.config) + .await + .unwrap_or_default() + .to_string() + .parse::() + .unwrap_or(0); + + let latest_beacon_committee = helpers::get_sync_committee_id_by_slot(latest_beacon_slot); + + let latest_verified_committee = bankai + .starknet_client + .get_latest_committee_id(&bankai.config) + .await + .unwrap_or_default() + .to_string() + .parse::() + .unwrap_or(0) + - 1; + + // Calculate success rate from database + let total_jobs = db.count_total_jobs().await.unwrap_or(0); + let successful_jobs = db.count_successful_jobs().await.unwrap_or(0); + let success_rate = if total_jobs > 0 { + ((successful_jobs as f64 / total_jobs as f64) * 100.0).round() + } else { + 0.0 + }; + + // Calculate average job duration + let avg_duration = db.get_average_job_duration().await.unwrap_or(0); + let avg_duration_str = format!("{}s", avg_duration); + + let jobs_in_progress = db + .count_jobs_in_progress() + .await + .unwrap_or(Some(0)) + .unwrap(); + + // Fetch last 20 batch jobs + let recent_batches = db.get_recent_batch_jobs(20).await.unwrap_or_default(); + + // Format batch information + let batch_info = recent_batches + .iter() + .map(|entry| { + format!( + "║ Batch {:}: {} -> {} [{}] {:<32} {:<66} {} ║", + entry.job.job_uuid.to_string()[..8].to_string(), + entry.job.batch_range_begin_epoch, + entry.job.batch_range_end_epoch, + match entry.job.job_status { + JobStatus::Done => "✓", + JobStatus::Error => "✗", + _ => "⋯", + }, + entry.job.job_status.to_string(), + entry + .tx_hash + .as_ref() + .map_or("-".to_string(), |s| s.clone()), + entry.updated_at + ) + }) + .collect::>() + .join("\n"); + + let batch_display = if recent_batches.is_empty() { + " ║ No recent batches found ║ " + .to_string() + } else { + batch_info + }; + + // Fetch last 20 batch jobs + let recent_sync_committee_jobs = db + .get_recent_sync_committee_jobs(20) + .await + .unwrap_or_default(); + + // Format batch information + let sync_committee_info = recent_sync_committee_jobs + .iter() + .map(|entry| { + format!( + "║ Job {:}: {} {} [{}] {:<32} {:<66} {} ║", + entry.job.job_uuid.to_string()[..8].to_string(), + entry.job.slot, + helpers::get_sync_committee_id_by_slot(entry.job.slot.to_u64().unwrap()), + match entry.job.job_status { + JobStatus::Done => "✓", + JobStatus::Error => "✗", + _ => "⋯", + }, + entry.job.job_status.to_string(), + entry + .tx_hash + .as_ref() + .map_or("-".to_string(), |s| s.clone()), + entry.updated_at + ) + }) + .collect::>() + .join("\n"); + + let sync_committee_jobs_display = if recent_batches.is_empty() { + " ║ No recent sync committee jobs found ║ " + .to_string() + } else { + sync_committee_info + }; + + // Update system health indicators with simpler checks + let daemon_status = "● Active"; + let db_status = if db.is_connected().await { + "● Connected" + } else { + "○ Disconnected" + }; + let beacon_status = if bankai.client.get_head_slot().await.is_ok() { + "● Connected" + } else { + "○ Disconnected" + }; + + let epoch_gap = + (latest_beacon_slot.saturating_sub(latest_verified_slot) as f64 / 32.0).round() as u64; + + create_ascii_dashboard( + latest_beacon_slot, + latest_verified_slot, + latest_beacon_committee, + latest_verified_committee, + epoch_gap, + success_rate, + &avg_duration_str, + jobs_in_progress, + daemon_status, + db_status, + beacon_status, + &batch_display, + &sync_committee_jobs_display, + ) +} + +pub fn create_ascii_dashboard( + latest_beacon_slot: u64, + latest_verified_slot: u64, + latest_beacon_committee: u64, + latest_verified_committee: u64, + epoch_gap: u64, + success_rate: f64, + avg_duration_str: &str, + jobs_in_progress: u64, + daemon_status: &str, + db_status: &str, + beacon_status: &str, + batch_display: &str, + sync_committee_jobs_display: &str, +) -> String { + format!( + r#" +BBBBBBBBBBBBBBBBB kkkkkkkk iiii +B::::::::::::::::B k::::::k i::::i +B::::::BBBBBB:::::B k::::::k iiii +BB:::::B B:::::B k::::::k + B::::B B:::::B aaaaaaaaaaaaa nnnn nnnnnnnn k:::::k kkkkkkk aaaaaaaaaaaaa iiiiiii + B::::B B:::::B a::::::::::::a n:::nn::::::::nn k:::::k k:::::k a::::::::::::a i:::::i + B::::BBBBBB:::::B aaaaaaaaa:::::a n::::::::::::::nn k:::::k k:::::k aaaaaaaaa:::::a i::::i + B:::::::::::::BB a::::a nn:::::::::::::::n k:::::k k:::::k a::::a i::::i + B::::BBBBBB:::::B aaaaaaa:::::a n:::::nnnn:::::n k::::::k:::::k aaaaaaa:::::a i::::i + B::::B B:::::B aa::::::::::::a n::::n n::::n k:::::::::::k aa::::::::::::a i::::i + B::::B B:::::B a::::aaaa::::::a n::::n n::::n k:::::::::::k a::::aaaa::::::a i::::i + B::::B B:::::Ba::::a a:::::a n::::n n::::n k::::::k:::::k a::::a a:::::a i::::i +BB:::::BBBBBB::::::Ba::::a a:::::a n::::n n::::nk::::::k k:::::k a::::a a:::::a i::::::i +B:::::::::::::::::B a:::::aaaa::::::a n::::n n::::nk::::::k k:::::k a:::::aaaa::::::a i::::::i +B::::::::::::::::B a::::::::::aa:::a n::::n n::::nk::::::k k:::::k a::::::::::aa:::ai::::::i +BBBBBBBBBBBBBBBBB aaaaaaaaaa aaaa nnnnnn nnnnnnkkkkkkkk kkkkkkk aaaaaaaaaa aaaaiiiiiiii + _ _ _ _ _ + | |__ _ _ | | | | ___ _ __ ___ __| | ___ | |_ _ _ ___ + | '_ \| | | | | |_| |/ _ \ '__/ _ \ / _` |/ _ \| __| | | / __| + | |_) | |_| | | _ | __/ | | (_) | (_| | (_) | |_| |_| \__ \ + |_.__/ \__, | |_| |_|\___|_| \___/ \__,_|\___/ \__|\__,_|___/ + |___/ + +╔════════════════════════════════════════ DASHBOARD OVERVIEW ══════════════════════════════════════════════════════════════════════════════════════════════════════╗ +║ ║ +║ • Daemon: {daemon_status:<12} • Database: {db_status:<12} • Beacon: {beacon_status:<12} ║ +║ ║ +║ Metrics: ║ +║ • Success Rate: {success_rate:<10} ║ +║ • Average Duration: {avg_duration:<10} ║ +║ • Jobs in Progress: {jobs_in_progress:<10} ║ +║ ║ +║ Beacon Info: ║ +║ • Latest Beacon Slot: {latest_beacon_slot:<12} • Latest Beacon Committee: {latest_beacon_committee:<12} ║ +║ • Latest Verified Slot: {latest_verified_slot:<12} • Latest Verified Committee: {latest_verified_committee:<12} ║ +║ • Epoch Gap: {epoch_gap:<12} ║ +║ ║ +╠═══════════════════════════════════════ RECENT BATCH JOBS ════════════════════════════════════════════════════════════════════════════════════════════════════════╣ +║ UUID: FROM: TO: STATUS: TX: TIMESTAMP: ║ +║ ──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────── ║ +{batch_display_block} +╠══════════════════════════════ RECENT SYNC COMMITTEE JOBS ═════════════════════════════════════════════════════════════════════════════════════════════════════╣ +║ UUID: SLOT: COMMITTEE: STATUS: TX: TIMESTAMP: ║ +║ ──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────── ║ +{sync_committee_jobs_display_block} +╚══════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════╝ + + ____ _ +| _ \ _____ _____ _ __ ___ __| | +| |_) / _ \ \ /\ / / _ \ '__/ _ \/ _` | +| __/ (_) \ V V / __/ | | __/ (_| | +|_| \___/ \_/\_/ \___|_| \___|\__,_| + _ + | |__ _ _ + | '_ \| | | | + | |_) | |_| | + |_.__/ \__, | + |___/ + ____ + / ___| __ _ _ __ __ _ __ _ __ _ _ _ + | | _ / _` | '__/ _` |/ _` |/ _` | ( \/ ) + | |_| | (_| | | | (_| | (_| | (_| | \ / + \____|\__,_|_| \__,_|\__, |\__,_| \/ + |___/ +"#, + daemon_status = daemon_status, + db_status = db_status, + beacon_status = beacon_status, + success_rate = format!("{:.2}%", success_rate), + avg_duration = avg_duration_str, + jobs_in_progress = jobs_in_progress, + latest_beacon_slot = latest_beacon_slot, + latest_verified_slot = latest_verified_slot, + latest_beacon_committee = latest_beacon_committee, + latest_verified_committee = latest_verified_committee, + epoch_gap = epoch_gap, + batch_display_block = batch_display, + sync_committee_jobs_display_block = sync_committee_jobs_display + ) +} diff --git a/client-rs/src/routes/mod.rs b/client-rs/src/routes/mod.rs new file mode 100644 index 0000000..de98c13 --- /dev/null +++ b/client-rs/src/routes/mod.rs @@ -0,0 +1,275 @@ +use crate::{helpers, state::AppState}; +use alloy_primitives::map::HashMap; +use axum::{ + extract::{Path, State}, + response::IntoResponse, + routing::{get, post}, + Json, Router, +}; +use num_traits::cast::ToPrimitive; +use serde_json::{json, Value}; +use tracing::error; +use uuid::Uuid; + +pub mod dashboard; + +// RPC requests handling functions // +pub async fn handle_root_route(State(_state): State) -> impl IntoResponse { + Json(json!({ "success": true, "message": "Bankai daemon running" })) +} + +// Handler for GET /status +pub async fn handle_get_status(State(state): State) -> impl IntoResponse { + let last_epoch_in_progress = match state.db_manager.get_latest_epoch_in_progress().await { + Ok(Some(epoch)) => { + let last_epoch_in_progress = epoch.to_u64().unwrap(); + last_epoch_in_progress + } + Ok(None) => 0, + Err(_) => 0, + }; + let in_progress_jobs_count = state.db_manager.count_jobs_in_progress().await.unwrap(); + let last_sync_committee_in_progress = state + .db_manager + .get_latest_sync_committee_in_progress() + .await + .unwrap() + .unwrap(); + + // let beacon_chain_state = state + // .db_manager + // .get_latest_known_beacon_chain_state() + // .await + // .unwrap(); + // + let jobs_status_counts = state + .db_manager + .get_jobs_count_by_status() + .await + .unwrap_or_default(); + + let mut jobs_status_map = HashMap::new(); + for job_status_count in jobs_status_counts { + jobs_status_map.insert(job_status_count.status.to_string(), job_status_count.count); + } + + Json(json!({ "success": true, "details": { + "last_epoch_in_progress": last_epoch_in_progress, + "last_sync_committee_in_progress": last_sync_committee_in_progress, + "jobs_in_progress_count": in_progress_jobs_count, + "jobs_statuses": jobs_status_map + } })) +} + +// // Handler for GET /epoch/:slot +// pub async fn handle_get_epoch_update( +// Path(slot): Path, +// State(state): State, +// ) -> impl IntoResponse { +// match state.bankai.get_epoch_proof(slot).await { +// Ok(epoch_update) => { +// // Convert the data to `serde_json::Value` +// let value: Value = serde_json::to_value(epoch_update).unwrap_or_else(|err| { +// eprintln!("Failed to serialize EpochUpdate: {:?}", err); +// json!({ "error": "Internal server error" }) +// }); +// Json(value) +// } +// Err(err) => { +// eprintln!("Failed to fetch proof: {:?}", err); +// Json(json!({ "error": "Failed to fetch proof" })) +// } +// } +// } + +pub async fn handle_get_epoch_proof( + Path(slot): Path, + State(state): State, +) -> impl IntoResponse { + match state + .bankai + .starknet_client + .get_epoch_proof(slot, &state.bankai.config) + .await + { + Ok(epoch_update) => { + // Convert `EpochUpdate` to `serde_json::Value` + let value = serde_json::to_value(epoch_update).unwrap_or_else(|err| { + eprintln!("Failed to serialize EpochUpdate: {:?}", err); + json!({ "error": "Internal server error" }) + }); + Json(value) + } + Err(err) => { + eprintln!("Failed to fetch proof: {:?}", err); + Json(json!({ "error": "Failed to fetch proof" })) + } + } +} + +pub async fn handle_get_committee_hash( + Path(committee_id): Path, + State(state): State, +) -> impl IntoResponse { + match state + .bankai + .starknet_client + .get_committee_hash(committee_id, &state.bankai.config) + .await + { + Ok(committee_hash) => { + // Convert `EpochUpdate` to `serde_json::Value` + let value = serde_json::to_value(committee_hash).unwrap_or_else(|err| { + eprintln!("Failed to serialize EpochUpdate: {:?}", err); + json!({ "error": "Internal server error" }) + }); + Json(value) + } + Err(err) => { + eprintln!("Failed to fetch proof: {:?}", err); + Json(json!({ "error": "Failed to fetch proof" })) + } + } +} + +pub async fn handle_get_latest_verified_slot(State(state): State) -> impl IntoResponse { + match state + .bankai + .starknet_client + .get_latest_epoch_slot(&state.bankai.config) + .await + { + Ok(latest_epoch) => { + // Convert `Felt` to a string and parse it as a hexadecimal number + let hex_string = latest_epoch.to_string(); // Ensure this converts to a "0x..." string + match u64::from_str_radix(hex_string.trim_start_matches("0x"), 16) { + Ok(decimal_epoch) => Json(json!({ "latest_verified_slot": decimal_epoch })), + Err(err) => { + eprintln!("Failed to parse latest_epoch as decimal: {:?}", err); + Json(json!({ "error": "Invalid epoch format" })) + } + } + } + Err(err) => { + eprintln!("Failed to fetch latest epoch: {:?}", err); + Json(json!({ "error": "Failed to fetch latest epoch" })) + } + } +} + +pub async fn handle_get_latest_verified_committee( + State(state): State, +) -> impl IntoResponse { + match state + .bankai + .starknet_client + .get_latest_committee_id(&state.bankai.config) + .await + { + Ok(latest_epoch) => { + // Convert `Felt` to a string and parse it as a hexadecimal number + let hex_string = latest_epoch.to_string(); // Ensure this converts to a "0x..." string + match u64::from_str_radix(hex_string.trim_start_matches("0x"), 16) { + Ok(committee_hash) => Json(json!({ "latest_verified_committee": committee_hash })), + Err(err) => { + eprintln!( + "Failed to parse latest_verified_committee as decimal: {:?}", + err + ); + Json(json!({ "error": "Invalid committee format" })) + } + } + } + Err(err) => { + eprintln!("Failed to fetch latest epoch: {:?}", err); + Json(json!({ "error": "Failed to fetch latest epoch" })) + } + } +} + +pub async fn handle_get_job_status( + Path(job_id): Path, + State(state): State, +) -> impl IntoResponse { + match state + .db_manager + .fetch_job_status(Uuid::parse_str(job_id.to_string().as_str()).unwrap()) + .await + { + Ok(Some(job_status)) => Json(json!({ "status": job_status.to_string()})), + Ok(None) => Json(json!({ "error": "Job not found" })), + Err(err) => { + eprintln!("Failed to fetch job status: {:?}", err); + Json(json!({ "error": "Failed to fetch job status" })) + } + } +} + +pub async fn handle_get_merkle_paths_for_epoch( + Path(epoch_id): Path, + State(state): State, +) -> impl IntoResponse { + match state.db_manager.get_merkle_paths_for_epoch(epoch_id).await { + Ok(merkle_paths) => { + if merkle_paths.len() > 0 { + Json(json!({ "epoch_id": epoch_id, "merkle_paths": merkle_paths })) + } else { + Json(json!({ "error": "Epoch not available now" })) + } + } + Err(err) => { + error!("Failed to fetch merkle paths epoch: {:?}", err); + Json(json!({ "error": "Failed to fetch latest epoch" })) + } + } +} + +pub async fn handle_get_decommitment_data_by_epoch( + Path(epoch_id): Path, + State(state): State, +) -> impl IntoResponse { + match state.db_manager.get_merkle_paths_for_epoch(epoch_id).await { + Ok(merkle_paths) => { + if merkle_paths.len() > 0 { + let circuit_outputs_decommitment_data = state + .db_manager + .get_epoch_decommitment_data(epoch_id) + .await + .unwrap(); //ExpectedEpochUpdateOutputs + + Json(json!({ + "epoch_id": epoch_id, + "decommitment_data_for_epoch": { + "merkle_paths": merkle_paths, + "circuit_outputs": circuit_outputs_decommitment_data + } + })) + } else { + Json(json!({ "error": "Epoch not available now" })) + } + } + Err(err) => { + error!("Failed to fetch merkle paths epoch: {:?}", err); + Json(json!({ "error": "Failed to fetch latest epoch" })) + } + } +} + +pub async fn handle_get_decommitment_data_by_slot( + Path(slot_id): Path, + State(state): State, +) -> impl IntoResponse { + let epoch_id = helpers::slot_to_epoch_id(slot_id.to_u64().unwrap()); + + handle_get_decommitment_data_by_epoch(Path(epoch_id.to_i32().unwrap()), State(state)).await +} + +pub async fn handle_get_decommitment_data_by_execution_height( + Path(slot_id): Path, + State(state): State, +) -> impl IntoResponse { + // Convert slot -> epoch + let epoch_id = helpers::slot_to_epoch_id(slot_id.to_u64().unwrap()); + + handle_get_decommitment_data_by_epoch(Path(epoch_id.to_i32().unwrap()), State(state)).await +} diff --git a/client-rs/src/state.rs b/client-rs/src/state.rs new file mode 100644 index 0000000..d36ff2f --- /dev/null +++ b/client-rs/src/state.rs @@ -0,0 +1,259 @@ +use crate::bankai_client::BankaiClient; +use crate::utils::database_manager::DatabaseManager; +use crate::utils::starknet_client::StarknetError; +use postgres_types::{FromSql, ToSql}; +use starknet::core::types::Felt; +use std::env; +use std::fmt; +use std::str::FromStr; +use std::sync::Arc; +use tokio::sync::mpsc; +use uuid::Uuid; + +#[derive(Clone, Debug)] +pub struct Job { + pub job_id: Uuid, + pub job_type: JobType, + pub job_status: JobStatus, + pub slot: Option, + pub batch_range_begin_epoch: Option, + pub batch_range_end_epoch: Option, +} + +#[derive(Clone, Debug)] +pub struct AppState { + pub db_manager: Arc, + pub tx: mpsc::Sender, + pub bankai: Arc, +} + +#[derive(Debug, FromSql, ToSql, Clone, Eq, Hash, PartialEq)] +#[postgres(name = "job_status")] +pub enum JobStatus { + #[postgres(name = "CREATED")] + Created, // Can act as queued and be picked up by worker to proccess + #[postgres(name = "PROGRAM_INPUTS_PREPARED")] + ProgramInputsPrepared, + #[postgres(name = "STARTED_FETCHING_INPUTS")] + StartedFetchingInputs, + #[postgres(name = "STARTED_TRACE_GENERATION")] + StartedTraceGeneration, + #[postgres(name = "PIE_GENERATED")] + PieGenerated, + #[postgres(name = "OFFCHAIN_PROOF_REQUESTED")] + AtlanticProofRequested, + #[postgres(name = "OFFCHAIN_PROOF_RETRIEVED")] + AtlanticProofRetrieved, + #[postgres(name = "WRAP_PROOF_REQUESTED")] + WrapProofRequested, + #[postgres(name = "WRAPPED_PROOF_DONE")] + WrappedProofDone, + #[postgres(name = "OFFCHAIN_COMPUTATION_FINISHED")] + OffchainComputationFinished, + #[postgres(name = "READY_TO_BROADCAST_ONCHAIN")] + ReadyToBroadcastOnchain, + #[postgres(name = "PROOF_VERIFY_CALLED_ONCHAIN")] + ProofVerifyCalledOnchain, + #[postgres(name = "DONE")] + Done, + #[postgres(name = "ERROR")] + Error, + #[postgres(name = "CANCELLED")] + Cancelled, +} + +impl ToString for JobStatus { + fn to_string(&self) -> String { + match self { + JobStatus::Created => "CREATED".to_string(), + JobStatus::StartedFetchingInputs => "STARTED_FETCHING_INPUTS".to_string(), + JobStatus::ProgramInputsPrepared => "PROGRAM_INPUTS_PREPARED".to_string(), + JobStatus::StartedTraceGeneration => "STARTED_TRACE_GENERATION".to_string(), + JobStatus::PieGenerated => "PIE_GENERATED".to_string(), + JobStatus::AtlanticProofRequested => "OFFCHAIN_PROOF_REQUESTED".to_string(), + JobStatus::AtlanticProofRetrieved => "OFFCHAIN_PROOF_RETRIEVED".to_string(), + JobStatus::WrapProofRequested => "WRAP_PROOF_REQUESTED".to_string(), + JobStatus::WrappedProofDone => "WRAPPED_PROOF_DONE".to_string(), + JobStatus::OffchainComputationFinished => "OFFCHAIN_COMPUTATION_FINISHED".to_string(), + JobStatus::ReadyToBroadcastOnchain => "READY_TO_BROADCAST_ONCHAIN".to_string(), + JobStatus::ProofVerifyCalledOnchain => "PROOF_VERIFY_CALLED_ONCHAIN".to_string(), + JobStatus::Done => "DONE".to_string(), + JobStatus::Cancelled => "CANCELLED".to_string(), + JobStatus::Error => "ERROR".to_string(), + } + } +} + +impl FromStr for JobStatus { + type Err = String; + + fn from_str(s: &str) -> Result { + match s { + "CREATED" => Ok(JobStatus::Created), + "STARTED_FETCHING_INPUTS" => Ok(JobStatus::StartedFetchingInputs), + "PROGRAM_INPUTS_PREPARED" => Ok(JobStatus::ProgramInputsPrepared), + "STARTED_TRACE_GENERATION" => Ok(JobStatus::StartedTraceGeneration), + "PIE_GENERATED" => Ok(JobStatus::PieGenerated), + "OFFCHAIN_PROOF_REQUESTED" => Ok(JobStatus::AtlanticProofRequested), + "OFFCHAIN_PROOF_RETRIEVED" => Ok(JobStatus::AtlanticProofRetrieved), + "WRAP_PROOF_REQUESTED" => Ok(JobStatus::WrapProofRequested), + "WRAPPED_PROOF_DONE" => Ok(JobStatus::WrappedProofDone), + "OFFCHAIN_COMPUTATION_FINISHED" => Ok(JobStatus::OffchainComputationFinished), + "READY_TO_BROADCAST_ONCHAIN" => Ok(JobStatus::ReadyToBroadcastOnchain), + "PROOF_VERIFY_CALLED_ONCHAIN" => Ok(JobStatus::ProofVerifyCalledOnchain), + "DONE" => Ok(JobStatus::Done), + "CANCELLED" => Ok(JobStatus::Cancelled), + "ERROR" => Ok(JobStatus::Error), + _ => Err(format!("Invalid job status: {}", s)), + } + } +} + +#[derive(Debug, FromSql, ToSql, Clone)] +pub enum JobType { + //EpochUpdate, + EpochBatchUpdate, + SyncCommitteeUpdate, +} + +impl FromStr for JobType { + type Err = String; + + fn from_str(s: &str) -> Result { + match s { + //"EPOCH_UPDATE" => Ok(JobType::EpochUpdate), + "EPOCH_BATCH_UPDATE" => Ok(JobType::EpochBatchUpdate), + "SYNC_COMMITTEE_UPDATE" => Ok(JobType::SyncCommitteeUpdate), + _ => Err(format!("Invalid job type: {}", s)), + } + } +} + +impl fmt::Display for JobType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let value = match self { + //JobType::EpochUpdate => "EPOCH_UPDATE", + JobType::EpochBatchUpdate => "EPOCH_BATCH_UPDATE", + JobType::SyncCommitteeUpdate => "SYNC_COMMITTEE_UPDATE", + }; + write!(f, "{}", value) + } +} + +#[derive(Debug, FromSql, ToSql)] +pub enum AtlanticJobType { + ProofGeneration, + ProofWrapping, +} + +// Checking status of env vars +pub fn check_env_vars() -> Result<(), String> { + let required_vars = [ + "BEACON_RPC_URL", + "STARKNET_RPC_URL", + "STARKNET_ADDRESS", + "STARKNET_PRIVATE_KEY", + "ATLANTIC_API_KEY", + "PROOF_REGISTRY", + "POSTGRESQL_HOST", + "POSTGRESQL_USER", + "POSTGRESQL_PASSWORD", + "POSTGRESQL_DB_NAME", + "RPC_LISTEN_HOST", + "RPC_LISTEN_PORT", + "TRANSACTOR_API_KEY", + ]; + + for &var in &required_vars { + if env::var(var).is_err() { + return Err(format!("Environment variable `{}` is not set", var)); + } + } + + Ok(()) +} + +/// Errors types + +impl std::fmt::Display for StarknetError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + StarknetError::ProviderError(err) => write!(f, "Provider error: {}", err), + StarknetError::AccountError(msg) => write!(f, "Account error: {}", msg), + StarknetError::TransactionError(msg) => write!(f, "Transaction error: {}", msg), + StarknetError::TimeoutError => { + write!(f, "Waiting for transaction timeout error") + } + } + } +} + +impl std::error::Error for StarknetError {} + +#[allow(unused)] +#[derive(Debug)] +pub enum Error { + InvalidProof, + RpcError(reqwest::Error), + DeserializeError(String), + IoError(std::io::Error), + StarknetError(StarknetError), + BlockNotFound, + FetchSyncCommitteeError, + FailedFetchingBeaconState, + InvalidBLSPoint, + MissingRpcUrl, + EmptySlotDetected(u64), + RequiresNewerEpoch(Felt), + CairoRunError(String), + AtlanticError(reqwest::Error), + InvalidResponse(String), + PoolingTimeout(String), + InvalidMerkleTree, + DatabaseError(String), + TransactorError(reqwest::Error), +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Error::InvalidProof => write!(f, "Invalid proof provided"), + Error::RpcError(err) => write!(f, "RPC error: {}", err), + Error::DeserializeError(msg) => write!(f, "Deserialization error: {}", msg), + Error::IoError(err) => write!(f, "I/O error: {}", err), + Error::StarknetError(err) => write!(f, "Starknet error: {}", err), + Error::BlockNotFound => write!(f, "Block not found"), + Error::FetchSyncCommitteeError => write!(f, "Failed to fetch sync committee"), + Error::FailedFetchingBeaconState => write!(f, "Failed to fetch beacon state"), + Error::InvalidBLSPoint => write!(f, "Invalid BLS point"), + Error::MissingRpcUrl => write!(f, "Missing RPC URL"), + Error::EmptySlotDetected(slot) => write!(f, "Empty slot detected: {}", slot), + Error::RequiresNewerEpoch(felt) => write!(f, "Requires newer epoch: {}", felt), + Error::CairoRunError(msg) => write!(f, "Cairo run error: {}", msg), + Error::AtlanticError(err) => write!(f, "Atlantic RPC error: {}", err), + Error::InvalidResponse(msg) => write!(f, "Invalid response: {}", msg), + Error::PoolingTimeout(msg) => write!(f, "Pooling timeout: {}", msg), + Error::InvalidMerkleTree => write!(f, "Invalid Merkle Tree"), + Error::DatabaseError(msg) => write!(f, "Database error: {}", msg), + Error::TransactorError(msg) => write!(f, "Transactor error: {}", msg), + } + } +} + +impl std::error::Error for Error { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + Error::RpcError(err) => Some(err), + Error::IoError(err) => Some(err), + Error::StarknetError(err) => Some(err), + Error::AtlanticError(err) => Some(err), + _ => None, // No underlying source for other variants + } + } +} + +impl From for Error { + fn from(e: StarknetError) -> Self { + Error::StarknetError(e) + } +} diff --git a/client-rs/src/sync_committee.rs b/client-rs/src/sync_committee.rs index afa2c6f..997b845 100644 --- a/client-rs/src/sync_committee.rs +++ b/client-rs/src/sync_committee.rs @@ -40,6 +40,15 @@ impl SyncCommitteeUpdate { expected_circuit_outputs, }) } + + pub fn from_json(slot: u64) -> Result + where + T: serde::de::DeserializeOwned, + { + let path = format!("batches/committee/{}/input_{}.json", slot, slot); + let json: String = fs::read_to_string(path).map_err(Error::IoError)?; + serde_json::from_str(&json).map_err(|e| Error::DeserializeError(e.to_string())) + } } impl Provable for SyncCommitteeUpdate { @@ -63,15 +72,6 @@ impl Provable for SyncCommitteeUpdate { Ok(path) } - fn from_json(slot: u64) -> Result - where - T: serde::de::DeserializeOwned, - { - let path = format!("batches/committee/{}/input_{}.json", slot, slot); - let json: String = fs::read_to_string(path).map_err(Error::IoError)?; - serde_json::from_str(&json).map_err(|e| Error::DeserializeError(e.to_string())) - } - fn pie_path(&self) -> String { format!( "batches/committee/{}/pie_{}.zip", @@ -80,6 +80,13 @@ impl Provable for SyncCommitteeUpdate { ) } + fn inputs_path(&self) -> String { + format!( + "batches/committee/{}/input_{}.json", + self.circuit_inputs.beacon_slot, self.circuit_inputs.beacon_slot, + ) + } + fn proof_type(&self) -> ProofType { ProofType::SyncCommittee } diff --git a/client-rs/src/traits.rs b/client-rs/src/traits.rs index 40414b5..5ae8574 100644 --- a/client-rs/src/traits.rs +++ b/client-rs/src/traits.rs @@ -19,9 +19,10 @@ pub enum ProofType { pub trait Provable: Serialize { fn id(&self) -> String; fn export(&self) -> Result; - fn from_json(slot: u64) -> Result - where - T: serde::de::DeserializeOwned; + // fn from_json(slot: u64) -> Result + // where + // T: serde::de::DeserializeOwned; fn proof_type(&self) -> ProofType; fn pie_path(&self) -> String; + fn inputs_path(&self) -> String; } diff --git a/client-rs/src/utils/atlantic_client.rs b/client-rs/src/utils/atlantic_client.rs index 1d43eeb..3ffa0f5 100644 --- a/client-rs/src/utils/atlantic_client.rs +++ b/client-rs/src/utils/atlantic_client.rs @@ -1,9 +1,17 @@ -use std::{env, fs}; - use crate::traits::{ProofType, Provable}; use crate::Error; +use futures::StreamExt; use reqwest::multipart::{Form, Part}; +use reqwest::Body; use serde::{Deserialize, Serialize}; +use std::env; +use std::path::PathBuf; +use tokio::fs; +use tokio::time::{sleep, Duration}; +use tokio_util::io::ReaderStream; +use tracing::{debug, error, info, trace}; + +#[derive(Debug)] pub struct AtlanticClient { endpoint: String, api_key: String, @@ -25,13 +33,61 @@ impl AtlanticClient { } pub async fn submit_batch(&self, batch: impl Provable) -> Result { - let pie_path = batch.pie_path(); + let pie_path: PathBuf = batch.pie_path().into(); + + let meta = fs::metadata(pie_path.clone()) + .await + .map_err(Error::IoError)?; + let total_bytes = meta.len(); + + let file = fs::File::open(pie_path.clone()) + .await + .map_err(Error::IoError)?; + + let stream = ReaderStream::new(file); + + let progress_stream = stream.scan( + (0_u64, 10_u64), + move |(uploaded, next_threshold), chunk_result| { + match chunk_result { + Ok(chunk) => { + *uploaded += chunk.len() as u64; + let percent = (*uploaded as f64 / total_bytes as f64) * 100.0; + + if percent >= *next_threshold as f64 && *next_threshold <= 100 { + info!( + "Uploaded {}% of the PIE file to Atlantic API...", + *next_threshold + ); + *next_threshold += 10; + } + + // Pass the chunk further down the stream + futures::future::ready(Some(Ok(chunk))) + } + Err(e) => { + // Forward the error + futures::future::ready(Some(Err(e))) + } + } + }, + ); // Read the file as bytes - let file_bytes = fs::read(&pie_path).map_err(Error::IoError)?; - let file_part = Part::bytes(file_bytes) - .file_name(pie_path) // Provide a filename - .mime_str("application/zip") // Specify MIME type + // let file_bytes = fs::read(&pie_path).map_err(Error::IoError)?; + // let file_part = Part::bytes(file_bytes) + // .file_name(pie_path) // Provide a filename + // .mime_str("application/zip") // Specify MIME type + // .map_err(Error::AtlanticError)?; + let file_part = Part::stream(Body::wrap_stream(progress_stream)) + .file_name( + pie_path + .file_name() + .unwrap_or_default() + .to_string_lossy() + .to_string(), + ) + .mime_str("application/zip") .map_err(Error::AtlanticError)?; let external_id = format!( @@ -61,9 +117,9 @@ impl AtlanticClient { .map_err(Error::AtlanticError)?; if !response.status().is_success() { - println!("Error status: {}", response.status()); + error!("Error status: {}", response.status()); let error_text = response.text().await.map_err(Error::AtlanticError)?; - println!("Error response: {}", error_text); + error!("Error response: {}", error_text); return Err(Error::InvalidResponse(format!( "Request failed: {}", error_text @@ -81,7 +137,7 @@ impl AtlanticClient { } pub async fn submit_wrapped_proof(&self, proof: StarkProof) -> Result { - println!("Uploading to Atlantic..."); + info!("Uploading to Atlantic..."); // Serialize the proof to JSON string let proof_json = serde_json::to_string(&proof).map_err(|e| Error::DeserializeError(e.to_string()))?; @@ -172,4 +228,41 @@ impl AtlanticClient { Ok(status.to_string()) } + + pub async fn poll_batch_status_until_done( + &self, + batch_id: &str, + sleep_duration: Duration, + max_retries: usize, + ) -> Result { + for attempt in 1..=max_retries { + debug!("Pooling Atlantic for update... {}", batch_id); + let status = self.check_batch_status(batch_id).await?; + + if status == "DONE" { + return Ok(true); + } + + if status == "FAILED" { + return Err(Error::InvalidResponse(format!( + "Atlantic processing failed for query {}", + batch_id + ))); + } + + trace!( + "Batch {} not completed yet. Status: {}. Pooling attempt {}/{}", + batch_id, + status, + attempt, + max_retries + ); + sleep(sleep_duration).await; + } + + return Err(Error::InvalidResponse(format!( + "Pooling timeout for batch {}", + batch_id + ))); + } } diff --git a/client-rs/src/utils/cairo_runner.rs b/client-rs/src/utils/cairo_runner.rs index 4c94294..a88960d 100644 --- a/client-rs/src/utils/cairo_runner.rs +++ b/client-rs/src/utils/cairo_runner.rs @@ -1,11 +1,43 @@ +use std::sync::Arc; + +use crate::state::JobStatus; use crate::traits::ProofType; use crate::BankaiConfig; use crate::{traits::Provable, Error}; +use tokio::task; +use tokio::task::JoinError; +use tracing::info; +use uuid::Uuid; + +use super::database_manager::DatabaseManager; pub struct CairoRunner(); impl CairoRunner { - pub fn generate_pie(input: &impl Provable, config: &BankaiConfig) -> Result<(), Error> { + pub async fn generate_pie( + input: &impl Provable, + config: &BankaiConfig, + db_manager: Option>, + job_id: Option, + ) -> Result<(), Error> { + // Acquire a permit from the semaphore. + // If all permits are in use we will wait until one is available. + let _permit = config + .pie_generation_semaphore + .clone() + .acquire_owned() + .await + .map_err(|e| Error::CairoRunError(format!("Semaphore error: {}", e)))?; + + match db_manager { + None => {} + Some(db) => { + let _ = db + .update_job_status(job_id.unwrap(), JobStatus::StartedTraceGeneration) + .await; + } + } + let input_path = input.export()?; let program_path = match input.proof_type() { @@ -15,20 +47,26 @@ impl CairoRunner { }; let pie_path = input.pie_path(); - println!("Generating trace..."); + info!("Generating trace..."); let start_time = std::time::Instant::now(); - // Execute cairo-run command - let output = std::process::Command::new("sh") - .arg("-c") - .arg(format!( - "source ../venv/bin/activate && cairo-run --program {} --program_input {} --cairo_pie_output {} --layout=all_cairo", - program_path, - input_path, - pie_path - )) - .output() - .map_err(|e| Error::CairoRunError(format!("Failed to execute commands: {}", e)))?; + // Offload the blocking command execution to a dedicated thread + let output = task::spawn_blocking(move || { + std::process::Command::new("../venv/bin/cairo-run") + .arg("--program") + .arg(&program_path) + .arg("--program_input") + .arg(&input_path) + .arg("--cairo_pie_output") + .arg(&pie_path) + .arg("--layout=all_cairo") + .output() + .map_err(|e| Error::CairoRunError(format!("Failed to execute commands: {}", e))) + }) + .await + .map_err(|join_err: JoinError| { + Error::CairoRunError(format!("spawn_blocking failed: {}", join_err)) + })??; let duration = start_time.elapsed(); @@ -37,7 +75,7 @@ impl CairoRunner { String::from_utf8_lossy(&output.stderr).to_string(), )); } else { - println!("Trace generated successfully in {:.2?}!", duration); + info!("Trace generated successfully in {:.2?}!", duration); } Ok(()) diff --git a/client-rs/src/utils/database_manager.rs b/client-rs/src/utils/database_manager.rs new file mode 100644 index 0000000..5a6bc05 --- /dev/null +++ b/client-rs/src/utils/database_manager.rs @@ -0,0 +1,1045 @@ +use crate::epoch_update::ExpectedEpochUpdateOutputs; +use crate::helpers; +use crate::state::{AtlanticJobType, Error, Job, JobStatus, JobType}; +use crate::utils::starknet_client::EpochProof; +use alloy_primitives::hex::{FromHex, ToHexExt}; +use alloy_primitives::FixedBytes; +use starknet::core::types::Felt; +use std::str::FromStr; +//use std::error::Error; +use chrono::NaiveDateTime; +use num_traits::ToPrimitive; +use std::collections::HashMap; +use tokio_postgres::{Client, Row}; +use tracing::{debug, error, info, warn}; +use uuid::Uuid; + +#[derive(Debug)] +pub struct JobSchema { + pub job_uuid: uuid::Uuid, + pub job_status: JobStatus, + pub slot: i64, + pub batch_range_begin_epoch: i64, + pub batch_range_end_epoch: i64, + pub job_type: JobType, + pub atlantic_proof_generate_batch_id: Option, + pub atlantic_proof_wrapper_batch_id: Option, + pub failed_at_step: Option, + pub retries_count: Option, + pub last_failure_time: Option, //pub updated_at: i64, +} + +#[derive(Debug)] +pub struct JobWithTimestamps { + pub job: JobSchema, + pub created_at: String, + pub updated_at: String, + pub tx_hash: Option, +} + +pub struct JobStatusCount { + pub status: JobStatus, + pub count: i64, +} + +#[derive(Debug)] +pub struct DatabaseManager { + client: Client, +} + +impl DatabaseManager { + pub async fn new(db_url: &str) -> Self { + let client = match tokio_postgres::connect(db_url, tokio_postgres::NoTls).await { + Ok((client, connection)) => { + tokio::spawn(async move { + if let Err(e) = connection.await { + eprintln!("Connection error: {}", e); + } + }); + + info!("Connected to the database successfully!"); + client + } + Err(err) => { + error!("Failed to connect to the database: {}", err); + std::process::exit(1); // Exit with non-zero status code + } + }; + + Self { client } + } + + pub async fn insert_verified_epoch( + &self, + epoch_id: u64, + epoch_proof: EpochProof, + ) -> Result<(), Box> { + self.client + .execute( + "INSERT INTO verified_epoch (epoch_id, header_root, state_root, n_signers) + VALUES ($1, $2, $3, $4, $4, $6)", + &[ + &epoch_id.to_string(), + &epoch_proof.header_root.to_string(), + &epoch_proof.state_root.to_string(), + &epoch_proof.n_signers.to_string(), + &epoch_proof.execution_hash.to_string(), + &epoch_proof.execution_height.to_string(), + ], + ) + .await?; + + Ok(()) + } + + pub async fn insert_verified_epoch_circuit_outputs( + &self, + epoch_id: u64, + beacon_header_root: FixedBytes<32>, + beacon_state_root: FixedBytes<32>, + slot: u64, + committee_hash: FixedBytes<32>, + n_signers: u64, + execution_header_hash: FixedBytes<32>, + execution_header_height: u64, + ) -> Result<(), Box> { + self.client + .execute( + "INSERT INTO verified_epoch (epoch_id, beacon_header_root, beacon_state_root, slot, committee_hash, n_signers, execution_header_hash, execution_header_height) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8)", + &[ + &epoch_id.to_i64(), + &beacon_header_root.encode_hex_with_prefix(), + &beacon_state_root.encode_hex_with_prefix(), + &slot.to_i64(), + &committee_hash.encode_hex_with_prefix(), + &n_signers.to_i64(), + &execution_header_hash.encode_hex_with_prefix(), + &execution_header_height.to_i64(), + ], + ) + .await?; + + Ok(()) + } + + pub async fn insert_verified_sync_committee( + &self, + sync_committee_id: u64, + sync_committee_hash: String, + ) -> Result<(), Box> { + self.client + .execute( + "INSERT INTO verified_sync_committee (sync_committee_id, sync_committee_hash) + VALUES ($1, $2)", + &[&sync_committee_id.to_string(), &sync_committee_hash], + ) + .await?; + + Ok(()) + } + + pub async fn set_atlantic_job_queryid( + &self, + job_id: Uuid, + batch_id: String, + atlantic_job_type: AtlanticJobType, + ) -> Result<(), Box> { + match atlantic_job_type { + AtlanticJobType::ProofGeneration => { + self.client + .execute( + "UPDATE jobs SET atlantic_proof_generate_batch_id = $1, updated_at = NOW() WHERE job_uuid = $2", + &[&batch_id.to_string(), &job_id], + ) + .await?; + } + AtlanticJobType::ProofWrapping => { + self.client + .execute( + "UPDATE jobs SET atlantic_proof_wrapper_batch_id = $1, updated_at = NOW() WHERE job_uuid = $2", + &[&batch_id.to_string(), &job_id], + ) + .await?; + } // _ => { + // println!("Unk", status); + // } + } + + Ok(()) + } + + pub async fn create_job( + &self, + job: Job, + ) -> Result<(), Box> { + match job.job_type { + JobType::EpochBatchUpdate => { + self.client + .execute( + "INSERT INTO jobs (job_uuid, job_status, slot, type, batch_range_begin_epoch, batch_range_end_epoch) VALUES ($1, $2, $3, $4, $5, $6)", + &[ + &job.job_id, + &job.job_status.to_string(), + &(job.slot.unwrap() as i64), + &"EPOCH_BATCH_UPDATE", + &(job.batch_range_begin_epoch.unwrap() as i64), + &(job.batch_range_end_epoch.unwrap() as i64), + ], + ) + .await + .map_err(|e| Error::DatabaseError(e.to_string()))?; + } + //JobType::EpochUpdate => {} + JobType::SyncCommitteeUpdate => { + self.client + .execute( + "INSERT INTO jobs (job_uuid, job_status, slot, type) VALUES ($1, $2, $3, $4)", + &[ + &job.job_id, + &job.job_status.to_string(), + &(job.slot.unwrap() as i64), + &"SYNC_COMMITTEE_UPDATE", + ], + ) + .await + .map_err(|e| Error::DatabaseError(e.to_string()))?; + } + } + + Ok(()) + } + + pub async fn fetch_job_status( + &self, + job_id: Uuid, + ) -> Result, Box> { + let row_opt = self + .client + .query_opt("SELECT status FROM jobs WHERE job_uuid = $1", &[&job_id]) + .await?; + + Ok(row_opt.map(|row| row.get("status"))) + } + + pub async fn get_job_by_id( + &self, + job_id: Uuid, + ) -> Result, Box> { + let row_opt = self + .client + .query_opt("SELECT * FROM jobs WHERE job_uuid = $1", &[&job_id]) + .await?; + + row_opt.map(Self::map_row_to_job).transpose() + } + // pub async fn get_latest_slot_id_in_progress( + // &self, + // ) -> Result, Box> { + // // Query the latest slot with job_status in ('in_progress', 'initialized') + // let row_opt = self + // .client + // .query_opt( + // "SELECT slot FROM jobs + // WHERE job_status NOT IN ('DONE', 'CANCELLED', 'ERROR') + // ORDER BY slot DESC + // LIMIT 1", + // &[], + // ) + // .await?; + + // // Extract and return the slot ID + // if let Some(row) = row_opt { + // Ok(Some(row.get::<_, i64>("slot").to_u64().unwrap())) + // } else { + // Ok(Some(0)) + // } + // } + + pub async fn get_latest_epoch_in_progress( + &self, + ) -> Result, Box> { + // Query the latest slot with job_status in ('in_progress', 'initialized') + // //, 'CANCELLED', 'ERROR' + let row_opt = self + .client + .query_opt( + "SELECT batch_range_end_epoch FROM jobs + WHERE job_status NOT IN ('DONE') + AND batch_range_end_epoch != 0 + AND type = 'EPOCH_BATCH_UPDATE' + ORDER BY batch_range_end_epoch DESC + LIMIT 1", + &[], + ) + .await?; + + // Extract and return the slot ID + if let Some(row) = row_opt { + Ok(Some( + row.get::<_, i64>("batch_range_end_epoch").to_u64().unwrap(), + )) + } else { + Ok(Some(0)) + } + } + + pub async fn get_latest_done_epoch( + &self, + ) -> Result, Box> { + // Query the latest slot with job_status in ('in_progress', 'initialized') + // //, 'CANCELLED', 'ERROR' + let row_opt = self + .client + .query_opt( + "SELECT batch_range_end_epoch FROM jobs + WHERE job_status = 'DONE' + AND batch_range_end_epoch != 0 + AND type = 'EPOCH_BATCH_UPDATE' + ORDER BY batch_range_end_epoch DESC + LIMIT 1", + &[], + ) + .await?; + + // Extract and return the slot ID + if let Some(row) = row_opt { + Ok(Some( + row.get::<_, i64>("batch_range_end_epoch").to_u64().unwrap(), + )) + } else { + Ok(Some(0)) + } + } + + pub async fn get_latest_sync_committee_in_progress( + &self, + ) -> Result, Box> { + // Query the latest slot with job_status in ('in_progress', 'initialized') + let row_opt = self + .client + .query_opt( + "SELECT slot FROM jobs + WHERE job_status NOT IN ('DONE') + AND type = 'SYNC_COMMITTEE_UPDATE' + ORDER BY slot DESC + LIMIT 1", + &[], + ) + .await?; + + // Extract and return the slot ID + if let Some(row) = row_opt { + Ok(Some(helpers::slot_to_sync_committee_id( + row.get::<_, i64>("slot").to_u64().unwrap(), + ))) + } else { + Ok(Some(0)) + } + } + + pub async fn get_latest_done_sync_committee( + &self, + ) -> Result, Box> { + // Query the latest slot with job_status in ('in_progress', 'initialized') + let row_opt = self + .client + .query_opt( + "SELECT slot FROM jobs + WHERE job_status = 'DONE' + AND type = 'SYNC_COMMITTEE_UPDATE' + ORDER BY slot DESC + LIMIT 1", + &[], + ) + .await?; + + // Extract and return the slot ID + if let Some(row) = row_opt { + Ok(Some(helpers::slot_to_sync_committee_id( + row.get::<_, i64>("slot").to_u64().unwrap(), + ))) + } else { + Ok(Some(0)) + } + } + + pub async fn count_jobs_in_progress( + &self, + ) -> Result, Box> { + // Query the latest slot with job_status in ('in_progress', 'initialized') + let row_opt = self + .client + .query_opt( + "SELECT COUNT(job_uuid) as count FROM jobs + WHERE job_status NOT IN ('DONE', 'CANCELLED', 'ERROR') + AND type = 'EPOCH_BATCH_UPDATE' + ", + &[], + ) + .await?; + + // Extract and return the slot ID + if let Some(row) = row_opt { + Ok(Some(row.get::<_, i64>("count").to_u64().unwrap())) + } else { + Ok(Some(0)) + } + } + + pub async fn get_merkle_paths_for_epoch( + &self, + epoch_id: i32, + ) -> Result, Box> { + // Query all merkle paths for the given epoch_id + let rows = self + .client + .query( + "SELECT merkle_path FROM epoch_merkle_paths + WHERE epoch_id = $1 + ORDER BY path_index ASC", + &[&epoch_id.to_i64()], + ) + .await?; + + let paths: Vec = rows + .iter() + .map(|row| row.get::<_, String>("merkle_path")) + .collect(); + + Ok(paths) + } + + pub async fn get_epoch_decommitment_data( + &self, + epoch_id: i32, + ) -> Result> { + let row = self + .client + .query_one( + r#" + SELECT + beacon_header_root, + beacon_state_root, + slot, + committee_hash, + n_signers, + execution_header_hash, + execution_header_height + FROM verified_epoch + WHERE epoch_id = $1 + "#, + &[&epoch_id.to_i64()], + ) + .await?; + + Ok(ExpectedEpochUpdateOutputs { + beacon_header_root: FixedBytes::from_hex(row.get::<_, String>("beacon_header_root")) + .unwrap(), + beacon_state_root: FixedBytes::from_hex(row.get::<_, String>("beacon_state_root")) + .unwrap(), + slot: row.get::<_, i64>("slot") as u64, + committee_hash: FixedBytes::from_hex(row.get::<_, String>("committee_hash")).unwrap(), + n_signers: row.get::<_, i64>("n_signers") as u64, + execution_header_hash: FixedBytes::from_hex( + row.get::<_, String>("execution_header_hash"), + ) + .unwrap(), + execution_header_height: row.get::<_, i64>("execution_header_height") as u64, + }) + } + + // pub async fn get_compute_finsihed_jobs_to_proccess_onchain_call( + // &self, + // last_epoch: JobStatus, + // ) -> Result, Box> { + // let rows = self + // .client + // .query( + // "SELECT * FROM jobs + // WHERE job_status = 'OFFCHAIN_COMPUTATION_FINISHED' AND job_type = 'EPOCH_BATCH_UPDATE' AND batch_range_end_epoch <= $1", + // &[&last_epoch], + // ) + // .await?; + + // // Map rows into Job structs + // let jobs: Vec = rows + // .into_iter() + // .map(|row: Row| JobSchema { + // job_uuid: row.get("job_uuid"), + // job_status: row.get("job_status"), + // slot: row.get("slot"), + // batch_range_begin_epoch: row.get("batch_range_begin_epoch"), + // batch_range_end_epoch: row.get("batch_range_end_epoch"), + // job_type: row.get("type"), + // updated_at: row.get("updated_at"), + // }) + // .collect(); + + // Ok(jobs) + // } + + pub async fn get_jobs_with_status( + &self, + desired_status: JobStatus, + ) -> Result, Box> { + // Query all jobs with the given job_status + let rows = self + .client + .query( + "SELECT * FROM jobs + WHERE job_status = $1", + &[&desired_status.to_string()], + ) + .await?; + + let jobs = rows + .iter() + .cloned() + .map(Self::map_row_to_job) + .collect::, _>>()?; + + Ok(jobs) + } + + pub async fn update_job_status( + &self, + job_id: Uuid, + new_status: JobStatus, + ) -> Result<(), Box> { + info!( + "Job {} status changed to {}", + job_id, + new_status.to_string() + ); + self.client + .execute( + "UPDATE jobs SET job_status = $1, updated_at = NOW() WHERE job_uuid = $2", + &[&new_status.to_string(), &job_id], + ) + .await?; + Ok(()) + } + + pub async fn set_failure_info( + &self, + job_id: Uuid, + failed_at_step: JobStatus, + ) -> Result<(), Box> { + self.client + .execute( + "UPDATE jobs SET failed_at_step = $1, updated_at = NOW(), last_failure_time = NOW() WHERE job_uuid = $2", + &[&failed_at_step.to_string(), &job_id], + ) + .await?; + Ok(()) + } + + pub async fn count_epoch_jobs_waiting_for_sync_committe_update( + &self, + latest_verified_sync_committee: u64, + ) -> Result> { + let epoch_to_start_check_from = + helpers::get_last_epoch_for_sync_committee(latest_verified_sync_committee) + 1; // So we getting first epoch number from latest unverified committee + let row = self + .client + .query_one( + "SELECT COUNT(*) as count FROM jobs WHERE batch_range_begin_epoch >= $1 + AND job_status = 'OFFCHAIN_COMPUTATION_FINISHED'", + &[&epoch_to_start_check_from.to_i64()], + ) + .await?; + + Ok(row.get::<_, i64>("count").to_u64().unwrap_or(0)) + } + + pub async fn set_ready_to_broadcast_for_batch_epochs( + &self, + first_epoch: u64, + last_epoch: u64, + ) -> Result<(), Box> { + let rows_affected = self.client + .execute( + "UPDATE jobs + SET job_status = 'READY_TO_BROADCAST_ONCHAIN', updated_at = NOW() + WHERE batch_range_begin_epoch >= $1 AND batch_range_end_epoch <= $2 AND type = 'EPOCH_BATCH_UPDATE' + AND job_status = 'OFFCHAIN_COMPUTATION_FINISHED'", + &[&first_epoch.to_i64(), &last_epoch.to_i64()], + ) + .await?; + + if rows_affected > 0 { + info!( + "{} EPOCH_BATCH_UPDATE jobs changed state to READY_TO_BROADCAST_ONCHAIN", + rows_affected + ); + } + Ok(()) + } + + pub async fn set_ready_to_broadcast_for_batch_epochs_to( + &self, + to_epoch: u64, + ) -> Result<(), Box> { + let rows_affected = self + .client + .execute( + "UPDATE jobs + SET job_status = 'READY_TO_BROADCAST_ONCHAIN', updated_at = NOW() + WHERE batch_range_end_epoch <= $1 AND type = 'EPOCH_BATCH_UPDATE' + AND job_status = 'OFFCHAIN_COMPUTATION_FINISHED'", + &[&to_epoch.to_i64()], + ) + .await?; + + if rows_affected > 0 { + info!( + "{} EPOCH_BATCH_UPDATE jobs changed state to READY_TO_BROADCAST_ONCHAIN", + rows_affected + ); + } + Ok(()) + } + + pub async fn set_ready_to_broadcast_for_sync_committee( + &self, + sync_committee_id: u64, + ) -> Result<(), Box> { + let sync_commite_first_slot = helpers::get_first_slot_for_sync_committee(sync_committee_id); + let sync_commite_last_slot = helpers::get_last_slot_for_sync_committee(sync_committee_id); + + debug!( + "Setting syn committee between slots {} and {} to READY_TO_BROADCAST_ONCHAIN", + sync_commite_first_slot, sync_commite_last_slot + ); + + let rows_affected = self + .client + .execute( + "UPDATE jobs + SET job_status = 'READY_TO_BROADCAST_ONCHAIN', updated_at = NOW() + WHERE type = 'SYNC_COMMITTEE_UPDATE' + AND job_status = 'OFFCHAIN_COMPUTATION_FINISHED' + AND slot BETWEEN $1 AND $2 + ", + &[ + &sync_commite_first_slot.to_i64(), + &sync_commite_last_slot.to_i64(), + ], + ) + .await?; + + if rows_affected == 1 { + info!( + "{} SYNC_COMMITTEE_UPDATE jobs changed state to READY_TO_BROADCAST_ONCHAIN", + rows_affected + ); + } else if rows_affected > 1 { + warn!( + "{} SYNC_COMMITTEE_UPDATE jobs changed state to READY_TO_BROADCAST_ONCHAIN in one query, something may be wrong!", + rows_affected + ); + } + Ok(()) + } + + pub async fn set_job_txhash( + &self, + job_id: Uuid, + txhash: Felt, + ) -> Result<(), Box> { + self.client + .execute( + "UPDATE jobs SET tx_hash = $1, updated_at = NOW() WHERE job_uuid = $2", + &[&txhash.to_hex_string(), &job_id], + ) + .await?; + Ok(()) + } + + // pub async fn cancell_all_unfinished_jobs( + // &self, + // ) -> Result<(), Box> { + // self.client + // .execute( + // "UPDATE jobs SET status = $1, updated_at = NOW() WHERE status = 'FETCHING'", + // &[&JobStatus::Cancelled.to_string()], + // ) + // .await?; + // Ok(()) + // } + + pub async fn insert_merkle_path_for_epoch( + &self, + epoch: u64, + path_index: u64, + path: String, + ) -> Result<(), Box> { + let rows_affected =self.client + .execute( + "INSERT INTO epoch_merkle_paths (epoch_id, path_index, merkle_path) VALUES ($1, $2, $3) + ON CONFLICT (epoch_id, path_index) DO NOTHING", + &[&epoch.to_i64(), &path_index.to_i64(), &path], + ) + .await?; + + if rows_affected == 0 { + warn!("Combination of epoch_id and path_index already exists, skipping insertion of epoch merkle patch for epoch {} and index {}", epoch, path_index); + } + Ok(()) + } + + pub async fn get_jobs_with_statuses( + &self, + desired_statuses: Vec, + ) -> Result, Box> { + if desired_statuses.is_empty() { + return Ok(vec![]); + } + + let status_strings: Vec = desired_statuses.iter().map(|s| s.to_string()).collect(); + + let placeholders: Vec = (1..=status_strings.len()) + .map(|i| format!("${}", i)) + .collect(); + let query = format!( + "SELECT * FROM jobs WHERE job_status IN ({})", + placeholders.join(", ") + ); + + let params: Vec<&(dyn tokio_postgres::types::ToSql + Sync)> = status_strings + .iter() + .map(|s| s as &(dyn tokio_postgres::types::ToSql + Sync)) + .collect(); + + let rows = self.client.query(&query, ¶ms).await?; + + let jobs = rows + .iter() + .cloned() + .map(Self::map_row_to_job) + .collect::, _>>()?; + + Ok(jobs) + } + + // async fn fetch_job_by_status( + // client: &Client, + // status: JobStatus, + // ) -> Result, Box> { + // let tx = client.transaction().await?; + + // let row_opt = tx + // .query_opt( + // r#" + // SELECT job_id, status + // FROM jobs + // WHERE status = $1 + // ORDER BY updated_at ASC + // LIMIT 1 + // FOR UPDATE SKIP LOCKED + // "#, + // &[&status], + // ) + // .await?; + + // let job = if let Some(row) = row_opt { + // Some(Job { + // job_id: row.get("job_id"), + // job_type: row.get("type"), + // job_status: row.get("status"), + // slot: row.get("slot"), + // }) + // } else { + // None + // }; + + // tx.commit().await?; + // Ok(job) + // } + + // async fn add_verified_epoch( + // client: Arc, + // slot: u64, + // ) -> Result<(), Box> { + // client + // .execute( + // "INSERT INTO verified_epochs (slot, job_status, slot, type) VALUES ($1, $2, $3, $4)", + // &[&slot, &status.to_string(), &(slot as i64), &"EPOCH_UPDATE"], + // ) + // .await?; + + // Ok(()) + // } + // + + // pub async fn insert_job_log_entry( + // &self, + // job_id: u64, + // event_type: JobLogEntry, + // details: String, + // ) -> Result<(), Box> { + // self.client + // .execute( + // "INSERT INTO job_logs (job_id, event_type, details) + // VALUES ($1, $2, $3)", + // &[&job_id.to_string(), &event_type.to_string(), &details], + // ) + // .await?; + + // Ok(()) + // } + // + pub async fn update_daemon_state_info( + &self, + latest_known_beacon_slot: u64, + latest_known_beacon_block: FixedBytes<32>, + ) -> Result<(), Box> { + self.client + .execute( + "UPDATE daemon_state SET latest_known_beacon_slot = $1, latest_known_beacon_block = NOW()", + &[&latest_known_beacon_slot.to_string(), &latest_known_beacon_block.to_string()], + ) + .await?; + Ok(()) + } + + pub async fn count_total_jobs(&self) -> Result> { + let row = self + .client + .query_one("SELECT COUNT(*) as count FROM jobs WHERE job_status = 'DONE' OR job_status = 'ERROR'", &[]) + .await?; + + Ok(row.get::<_, i64>("count").to_u64().unwrap_or(0)) + } + + pub async fn count_successful_jobs( + &self, + ) -> Result> { + let row = self + .client + .query_one( + "SELECT COUNT(*) as count FROM jobs WHERE job_status = 'DONE'", + &[], + ) + .await?; + + Ok(row.get::<_, i64>("count").to_u64().unwrap_or(0)) + } + + pub async fn get_average_job_duration( + &self, + ) -> Result> { + let row = self + .client + .query_one( + "SELECT EXTRACT(EPOCH FROM AVG(updated_at - created_at))::INTEGER as avg_duration + FROM jobs + WHERE job_status = 'DONE' + LIMIT 20", + &[], + ) + .await?; + + Ok(i64::from( + row.get::<_, Option>("avg_duration").unwrap_or(0), + )) + } + + pub async fn get_recent_batch_jobs( + &self, + limit: i64, + ) -> Result, Box> { + let rows = self + .client + .query( + "SELECT *, + to_char(created_at, 'YYYY-MM-DD HH24:MI:SS') as created_time, + to_char(updated_at, 'YYYY-MM-DD HH24:MI:SS') as updated_time + FROM jobs + WHERE type = 'EPOCH_BATCH_UPDATE' + ORDER BY batch_range_begin_epoch DESC + LIMIT $1", + &[&limit], + ) + .await?; + + let jobs = rows + .into_iter() + .map(|row| { + let job = Self::map_row_to_job(row.clone()).unwrap(); + JobWithTimestamps { + job, + created_at: row.get("created_time"), + updated_at: row.get("updated_time"), + tx_hash: row.get("tx_hash"), + } + }) + .collect(); + + Ok(jobs) + } + + pub async fn get_recent_sync_committee_jobs( + &self, + limit: i64, + ) -> Result, Box> { + let rows = self + .client + .query( + "SELECT *, + to_char(created_at, 'YYYY-MM-DD HH24:MI:SS') as created_time, + to_char(updated_at, 'YYYY-MM-DD HH24:MI:SS') as updated_time + FROM jobs + WHERE type = 'SYNC_COMMITTEE_UPDATE' + ORDER BY slot DESC + LIMIT $1", + &[&limit], + ) + .await?; + + let jobs = rows + .into_iter() + .map(|row| { + let job = Self::map_row_to_job(row.clone()).unwrap(); + JobWithTimestamps { + job, + created_at: row.get("created_time"), + updated_at: row.get("updated_time"), + tx_hash: row.get("tx_hash"), + } + }) + .collect(); + + Ok(jobs) + } + + pub async fn is_connected(&self) -> bool { + match self.client.query_one("SELECT 1", &[]).await { + Ok(_) => true, + Err(_) => false, + } + } + + pub async fn get_recent_atlantic_queries_in_progress( + &self, + limit: i64, + ) -> Result, Box> { + let rows = self + .client + .query( + "SELECT atlantic_proof_generate_batch_id, atlantic_proof_wrapper_batch_id + FROM jobs + WHERE job_status != 'DONE' + ORDER BY slot DESC + LIMIT $1", + &[&limit], + ) + .await?; + + let jobs = rows + .into_iter() + .map(|row| { + let job = Self::map_row_to_job(row.clone()).unwrap(); + JobWithTimestamps { + job, + created_at: row.get("created_time"), + updated_at: row.get("updated_time"), + tx_hash: row.get("tx_hash"), + } + }) + .collect(); + + Ok(jobs) + } + + pub async fn get_jobs_count_by_status( + &self, + ) -> Result, Box> { + let rows = self + .client + .query( + "SELECT job_status, COUNT(*) AS job_count FROM jobs GROUP BY job_status", + &[], + ) + .await?; + + let mut db_counts: HashMap = HashMap::new(); + for row in rows { + let status_str: String = row.get("job_status"); + let status_count: i64 = row.get("job_count"); + + let job_status = JobStatus::from_str(&status_str) + .map_err(|err| format!("Failed to parse job status from DB row: {}", err))?; + + db_counts.insert(job_status, status_count); + } + + let all_possible_statuses = vec![ + JobStatus::Created, + JobStatus::StartedFetchingInputs, + JobStatus::ProgramInputsPrepared, + JobStatus::StartedTraceGeneration, + JobStatus::PieGenerated, + JobStatus::AtlanticProofRequested, + JobStatus::AtlanticProofRetrieved, + JobStatus::WrapProofRequested, + JobStatus::WrappedProofDone, + JobStatus::OffchainComputationFinished, + JobStatus::ReadyToBroadcastOnchain, + JobStatus::ProofVerifyCalledOnchain, + JobStatus::Done, + JobStatus::Error, + JobStatus::Cancelled, + ]; + + let mut result = Vec::with_capacity(all_possible_statuses.len()); + for status in all_possible_statuses { + let count = db_counts.get(&status).copied().unwrap_or(0); + result.push(JobStatusCount { status, count }); + } + + Ok(result) + } + + // Helper functions + fn map_row_to_job(row: Row) -> Result> { + let job_status_str: String = row.get("job_status"); + let job_status = job_status_str + .parse::() + .map_err(|err| format!("Failed to parse job status: {}", err))?; + + let job_type_str: String = row.get("type"); + let job_type = job_type_str + .parse::() + .map_err(|err| format!("Failed to parse job type: {}", err))?; + + let failed_at_step: Option = row + .get::<_, Option>("failed_at_step") + .map(|step| { + step.parse::() + .map_err(|err| format!("Failed to parse job type: {}", err)) + }) + .transpose()?; + + let last_failure_time: Option = row.get("last_failure_time"); + + Ok(JobSchema { + job_uuid: row.get("job_uuid"), + job_status, + slot: row.get("slot"), + batch_range_begin_epoch: row + .get::<&str, Option>("batch_range_begin_epoch") + .unwrap_or(0), + batch_range_end_epoch: row + .get::<&str, Option>("batch_range_end_epoch") + .unwrap_or(0), + job_type, + atlantic_proof_generate_batch_id: row.get("atlantic_proof_generate_batch_id"), + atlantic_proof_wrapper_batch_id: row.get("atlantic_proof_wrapper_batch_id"), + failed_at_step, + retries_count: row.get("retries_count"), + last_failure_time, + }) + } +} diff --git a/client-rs/src/utils/events.rs b/client-rs/src/utils/events.rs new file mode 100644 index 0000000..3f06f57 --- /dev/null +++ b/client-rs/src/utils/events.rs @@ -0,0 +1,9 @@ +use tokio::sync::broadcast; + +lazy_static::lazy_static! { + static ref SEMAPHORE_EVENT: broadcast::Sender = broadcast::channel(10).0; +} + +pub fn subscribe_to_semaphore_events() -> broadcast::Receiver { + SEMAPHORE_EVENT.subscribe() +} diff --git a/client-rs/src/utils/mod.rs b/client-rs/src/utils/mod.rs index 896e84c..ea70144 100644 --- a/client-rs/src/utils/mod.rs +++ b/client-rs/src/utils/mod.rs @@ -1,6 +1,8 @@ pub mod atlantic_client; pub mod cairo_runner; +pub mod database_manager; pub mod hashing; pub mod merkle; pub mod rpc; pub mod starknet_client; +pub mod transactor_client; diff --git a/client-rs/src/utils/rpc.rs b/client-rs/src/utils/rpc.rs index 85f35d3..8a3347d 100644 --- a/client-rs/src/utils/rpc.rs +++ b/client-rs/src/utils/rpc.rs @@ -1,3 +1,4 @@ +use crate::constants; use crate::epoch_update::SyncCommitteeValidatorPubs; use crate::Error; use alloy_rpc_types_beacon::events::light_client_finality::SyncAggregate; @@ -5,11 +6,13 @@ use alloy_rpc_types_beacon::header::HeaderResponse; use itertools::Itertools; use reqwest::Client; use serde_json::Value; +use tracing::warn; use types::eth_spec::MainnetEthSpec; use types::{BeaconBlockBody, FullPayload}; /// A client for interacting with the Ethereum Beacon Chain RPC endpoints. /// Provides methods to fetch headers, sync aggregates, and validator information. +#[derive(Debug)] pub(crate) struct BeaconRpcClient { provider: Client, pub rpc_url: String, @@ -77,9 +80,8 @@ impl BeaconRpcClient { /// the previous slot's header. pub async fn get_sync_aggregate(&self, mut slot: u64) -> Result { slot += 1; // signature is in the next slot - + let mut attempts = 0; - const MAX_ATTEMPTS: u8 = 3; // Ensure the slot is not missed and increment in case it is let _header = loop { @@ -87,11 +89,16 @@ impl BeaconRpcClient { Ok(header) => break header, Err(Error::EmptySlotDetected(_)) => { attempts += 1; - if attempts >= MAX_ATTEMPTS { + if attempts >= constants::MAX_SKIPPED_SLOTS_RETRY_ATTEMPTS { return Err(Error::EmptySlotDetected(slot)); } slot += 1; - println!("Empty slot detected! Attempt {}/{}. Fetching slot: {}", attempts, MAX_ATTEMPTS, slot); + warn!( + "Empty slot detected! Attempt {}/{}. Fetching slot: {}", + attempts, + constants::MAX_SKIPPED_SLOTS_RETRY_ATTEMPTS, + slot + ); } Err(e) => return Err(e), // Propagate other errors immediately } @@ -195,4 +202,20 @@ impl BeaconRpcClient { let pubkeys = self.fetch_validator_pubkeys(&indexes).await?; Ok(pubkeys.into()) } + + /// Fetches the current head slot of the beacon chain. + /// + /// # Returns + /// The current slot number of the beacon chain head. + pub async fn get_head_slot(&self) -> Result { + let json = self.get_json("eth/v1/beacon/headers/head").await?; + + let slot = json["data"]["header"]["message"]["slot"] + .as_str() + .ok_or(Error::DeserializeError("Missing slot field".into()))? + .parse() + .map_err(|e: std::num::ParseIntError| Error::DeserializeError(e.to_string()))?; + + Ok(slot) + } } diff --git a/client-rs/src/utils/starknet_client.rs b/client-rs/src/utils/starknet_client.rs index 79a041b..be30b57 100644 --- a/client-rs/src/utils/starknet_client.rs +++ b/client-rs/src/utils/starknet_client.rs @@ -1,13 +1,20 @@ +use alloy_primitives::FixedBytes; +use serde::{Deserialize, Serialize}; use starknet::accounts::{Account, ConnectedAccount}; use starknet::core::types::{Call, FunctionCall}; use starknet::macros::selector; use starknet::providers::{Provider, ProviderError}; +use tracing::{debug, error, info, trace}; + use starknet::{ accounts::{ExecutionEncoding, SingleOwnerAccount}, contract::ContractFactory, core::{ chain_id, - types::{contract::SierraClass, BlockId, BlockTag, Felt}, + types::{ + contract::SierraClass, BlockId, BlockTag, Felt, TransactionExecutionStatus, + TransactionStatus, + }, }, macros::felt, providers::{ @@ -17,10 +24,55 @@ use starknet::{ signers::{LocalWallet, SigningKey}, }; use std::sync::Arc; +use tokio::time::{sleep, Duration}; use crate::contract_init::ContractInitializationData; use crate::traits::Submittable; use crate::BankaiConfig; + +#[derive(Debug, Serialize, Deserialize)] +pub struct EpochProof { + pub header_root: FixedBytes<32>, + pub state_root: FixedBytes<32>, + pub n_signers: u64, + pub execution_hash: FixedBytes<32>, + pub execution_height: u64, +} + +impl EpochProof { + pub fn from_contract_return_value(calldata: Vec) -> Result { + if calldata.len() != 8 { + return Err("Invalid return value length. Expected 8 elements.".to_string()); + } + + let header_root = combine_to_fixed_bytes(calldata[0], calldata[1])?; + let state_root = combine_to_fixed_bytes(calldata[2], calldata[3])?; + let n_signers = calldata[4].try_into().unwrap(); + let execution_hash = combine_to_fixed_bytes(calldata[5], calldata[6])?; + let execution_height = calldata[7].try_into().unwrap(); + + Ok(EpochProof { + header_root, + state_root, + n_signers, + execution_hash, + execution_height, + }) + } +} + +fn combine_to_fixed_bytes(high: Felt, low: Felt) -> Result, String> { + let mut bytes = [0u8; 32]; + let high_bytes = high.to_bytes_le(); + let low_bytes = low.to_bytes_le(); + + bytes[0..16].copy_from_slice(&high_bytes); + bytes[16..32].copy_from_slice(&low_bytes); + + Ok(FixedBytes::from_slice(bytes.as_slice())) +} + +#[derive(Debug)] pub struct StarknetClient { account: Arc, LocalWallet>>, // provider: Arc>, @@ -30,6 +82,8 @@ pub struct StarknetClient { pub enum StarknetError { ProviderError(ProviderError), AccountError(String), + TransactionError(String), + TimeoutError, } impl StarknetClient { @@ -70,8 +124,11 @@ impl StarknetClient { class_hash ); + let mut params = init_data.to_calldata(); + params.push(self.account.address()); + let contract_factory = ContractFactory::new(class_hash, self.account.clone()); - let deploy_tx = contract_factory.deploy_v1(init_data.to_calldata(), felt!("1337"), false); + let deploy_tx = contract_factory.deploy_v1(params, felt!("1337"), false); let contract_address = deploy_tx.deployed_address(); @@ -81,39 +138,58 @@ impl StarknetClient { contract_address ); - deploy_tx - .send() - .await - .map_err(|e| StarknetError::AccountError(e.to_string()))?; - - Ok(contract_address) + match deploy_tx.send().await { + Ok(_result) => { + info!("Deployment transaction sent successfully"); + Ok(contract_address) + } + Err(e) => { + error!("Deployment failed with error: {:#?}", e); + Err(StarknetError::AccountError(format!( + "Deployment failed: {:#?}", + e + ))) + } + } } pub async fn submit_update( &self, update: impl Submittable, config: &BankaiConfig, - ) -> Result<(), StarknetError> { - let result = self - .account - .execute_v1(vec![Call { - to: config.contract_address, - selector: update.get_contract_selector(), - calldata: update.to_calldata(), - }]) - .send() - .await - .map_err(|e| StarknetError::AccountError(e.to_string()))?; + ) -> Result { + let selector = update.get_contract_selector(); + let calldata = update.to_calldata(); + + let call = Call { + to: config.contract_address, + selector, + calldata, + }; + + let send_result = self.account.execute_v1(vec![call]).send().await; - println!("tx_hash: {:?}", result.transaction_hash); - Ok(()) + match send_result { + Ok(tx_response) => { + let tx_hash = tx_response.transaction_hash; + info!("Transaction sent successfully! Hash: {:#x}", tx_hash); + Ok(tx_hash) + } + Err(e) => { + error!("Transaction execution error: {:#?}", e); + return Err(StarknetError::TransactionError(format!( + "TransactionExecutionError: {:#?}", + e + ))); + } + } } pub async fn get_committee_hash( &self, slot: u64, config: &BankaiConfig, - ) -> Result<(), StarknetError> { + ) -> Result, StarknetError> { let committee_id = slot / 0x2000_u64; let committee_hash = self .account @@ -129,14 +205,14 @@ impl StarknetClient { .await .map_err(StarknetError::ProviderError)?; println!("committee_hash: {:?}", committee_hash); - Ok(()) + Ok(committee_hash) } pub async fn get_epoch_proof( &self, slot: u64, config: &BankaiConfig, - ) -> Result<(), StarknetError> { + ) -> Result { let epoch_proof = self .account .provider() @@ -151,7 +227,7 @@ impl StarknetClient { .await .map_err(StarknetError::ProviderError)?; println!("epoch_proof: {:?}", epoch_proof); - Ok(()) + Ok(EpochProof::from_contract_return_value(epoch_proof).unwrap()) } pub async fn get_latest_epoch_slot( @@ -164,7 +240,7 @@ impl StarknetClient { .call( FunctionCall { contract_address: config.contract_address, - entry_point_selector: selector!("get_latest_epoch"), + entry_point_selector: selector!("get_latest_epoch_slot"), calldata: vec![], }, BlockId::Tag(BlockTag::Latest), @@ -180,10 +256,10 @@ impl StarknetClient { config: &BankaiConfig, ) -> Result<(u64, u64), StarknetError> { let latest_epoch_slot = self.get_latest_epoch_slot(config).await?; - let next_epoch = (u64::try_from(latest_epoch_slot).unwrap() / 32) * 32 + 32; - let term = next_epoch / 0x2000; - let terms_last_epoch = (term + 1) * 0x2000 - 32; - Ok((next_epoch, terms_last_epoch)) + let next_epoch_slot = (u64::try_from(latest_epoch_slot).unwrap() / 32) * 32 + 32; + let term = next_epoch_slot / 0x2000; + let terms_last_epoch_slot = (term + 1) * 0x2000 - 32; + Ok((next_epoch_slot, terms_last_epoch_slot)) } pub async fn get_latest_committee_id( @@ -203,7 +279,65 @@ impl StarknetClient { ) .await .map_err(StarknetError::ProviderError)?; - println!("latest_committee_id: {:?}", latest_committee_id); + //println!("latest_committee_id: {:?}", latest_committee_id); Ok(*latest_committee_id.first().unwrap()) } + + pub async fn wait_for_confirmation(&self, tx_hash: Felt) -> Result<(), StarknetError> { + let max_retries = 20; + let delay = Duration::from_secs(5); + + for attempt in 0..max_retries { + match self.get_transaction_status(tx_hash).await { + Ok(status) => { + info!("Starknet transaction status: {:?}", status); + match status { + TransactionStatus::AcceptedOnL1(TransactionExecutionStatus::Succeeded) + | TransactionStatus::AcceptedOnL2(TransactionExecutionStatus::Succeeded) => { + info!("Starknet transaction confirmed: {:?}", tx_hash); + return Ok(()); + } + TransactionStatus::Rejected => { + return Err(StarknetError::TransactionError( + "Transaction rejected".to_string(), + )); + } + _ => { + info!( + "Transaction is still pending (attempt {} of {}), sleeping...", + attempt + 1, + max_retries + ); + sleep(delay).await; + } + } + } + Err(err) => { + // If the transaction hash is not even found yet, or other unknown error + + error!( + "Error fetching transaction status for tx_hash={:?}: {:?}", + tx_hash, err + ); + + sleep(delay).await; + } + } + } + + Err(StarknetError::TimeoutError) + } + + pub async fn get_transaction_status( + &self, + tx_hash: Felt, + ) -> Result { + let provider = self.account.provider(); + let tx_status = provider + .get_transaction_status(tx_hash) + .await + .map_err(StarknetError::ProviderError)?; + + Ok(tx_status) + } } diff --git a/client-rs/src/utils/transactor_client.rs b/client-rs/src/utils/transactor_client.rs new file mode 100644 index 0000000..06db491 --- /dev/null +++ b/client-rs/src/utils/transactor_client.rs @@ -0,0 +1,161 @@ +use crate::{config::BankaiConfig, traits::Submittable, Error}; +use reqwest::{ + header::{AUTHORIZATION, CONTENT_TYPE}, + Client, +}; +use serde::{Deserialize, Serialize}; +use tokio::time::{sleep, Duration}; +use tracing::{debug, error, info, trace}; + +#[derive(Debug)] +pub struct TransactorClient { + endpoint: String, + api_key: String, + pub client: Client, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct TransactorResponse { + pub transactor_status: String, + pub tx: TransactionDetails, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct TransactionDetails { + pub hash: Option, + pub multicall_status: Option, +} + +#[derive(Debug, Serialize)] +pub struct TransactorRequest { + pub chain_id: String, + pub contract_invocations: Vec, +} + +#[derive(Debug, Serialize)] +pub struct ContractInvocation { + pub value: String, + pub chain_id: String, + pub calldata: String, + pub method_selector: String, + pub contract_address: String, +} + +impl TransactorClient { + pub fn new(endpoint: String, api_key: String) -> Self { + Self { + endpoint, + api_key, + client: Client::new(), + } + } + + pub async fn send_transaction( + &self, + request: TransactorRequest, + ) -> Result { + let url = format!("{}/transactor", self.endpoint); + let response = self + .client + .post(&url) + .header(AUTHORIZATION, format!("Bearer {}", self.api_key)) + .header(CONTENT_TYPE, "application/json") + .json(&request) + .send() + .await + .map_err(Error::TransactorError)?; + + let response_data: TransactorResponse = + response.json().await.map_err(Error::TransactorError)?; + Ok(response_data) + } + + pub async fn check_transaction_status( + &self, + transaction_id: &str, + ) -> Result { + let url = format!("{}/transactor/{}", self.endpoint, transaction_id); + let response = self + .client + .get(&url) + .header(AUTHORIZATION, format!("Bearer {}", self.api_key)) + .header(CONTENT_TYPE, "application/json") + .send() + .await + .map_err(Error::TransactorError)?; + + let response_data: TransactorResponse = + response.json().await.map_err(Error::TransactorError)?; + Ok(response_data) + } + + pub async fn poll_transaction_status_until_done( + &self, + transaction_id: &str, + sleep_duration: Duration, + max_retries: usize, + ) -> Result { + for attempt in 1..=max_retries { + debug!("Polling Transactor for update... {}", transaction_id); + let status_response = self.check_transaction_status(transaction_id).await?; + let status = status_response.transactor_status; + + if status == "OK_SUCCESS" { + return Ok(true); + } + + if status == "KO_FAILED_TO_ESTIMATE_GAS" || status == "KO_WITH_ERRORS" { + return Err(Error::InvalidResponse(format!( + "Transactor processing failed for transaction {} with status: {}", + transaction_id, status + ))); + } + + trace!( + "Transaction {} not completed yet. Status: {}. Polling attempt {}/{}", + transaction_id, + status, + attempt, + max_retries + ); + sleep(sleep_duration).await; + } + + Err(Error::InvalidResponse(format!( + "Polling timeout for transaction {}", + transaction_id + ))) + } + + pub async fn submit_update( + &self, + update: impl Submittable, + config: &BankaiConfig, + ) -> Result { + let request = TransactorRequest { + chain_id: config.proof_settlement_chain_id.clone().to_hex_string(), + contract_invocations: vec![ContractInvocation { + value: "0".to_string(), + chain_id: config.proof_settlement_chain_id.clone().to_hex_string(), + calldata: update + .to_calldata() + .iter() + .map(|felt| felt.to_hex_string()) + .collect(), + method_selector: "".to_string(), + contract_address: config.contract_address.clone().to_hex_string(), + }], + }; + + let response = self.send_transaction(request).await?; + + if let Some(hash) = response.tx.hash { + println!("Transaction sent with tx_hash: {:?}", hash); + Ok(hash) + } else { + Err(Error::InvalidResponse( + "Transaction hash not found".to_string(), + )) + } + } +} diff --git a/contract/Scarb.lock b/contract/Scarb.lock index 61ffa98..09d8572 100644 --- a/contract/Scarb.lock +++ b/contract/Scarb.lock @@ -6,6 +6,8 @@ name = "bankai" version = "0.1.0" dependencies = [ "integrity", + "openzeppelin_access", + "openzeppelin_upgrades", ] [[package]] @@ -13,3 +15,24 @@ name = "integrity" version = "2.0.0" source = "registry+https://scarbs.xyz/" checksum = "sha256:f5e91cd5280bc0c02cfb12ce1a521e25064956dd12f3e38fca3d841c538723a0" + +[[package]] +name = "openzeppelin_access" +version = "0.20.0" +source = "registry+https://scarbs.xyz/" +checksum = "sha256:7734901a0ca7a7065e69416fea615dd1dc586c8dc9e76c032f25ee62e8b2a06c" +dependencies = [ + "openzeppelin_introspection", +] + +[[package]] +name = "openzeppelin_introspection" +version = "0.20.0" +source = "registry+https://scarbs.xyz/" +checksum = "sha256:13e04a2190684e6804229a77a6c56de7d033db8b9ef519e5e8dee400a70d8a3d" + +[[package]] +name = "openzeppelin_upgrades" +version = "0.20.0" +source = "registry+https://scarbs.xyz/" +checksum = "sha256:15fdd63f6b50a0fda7b3f8f434120aaf7637bcdfe6fd8d275ad57343d5ede5e1" diff --git a/contract/Scarb.toml b/contract/Scarb.toml index 36c3c1e..5811e3a 100644 --- a/contract/Scarb.toml +++ b/contract/Scarb.toml @@ -9,6 +9,8 @@ edition = "2024_07" starknet = "2.9.1" cairo_test = "2.9.1" integrity = "2.0.0" +openzeppelin_access = "0.20.0" +openzeppelin_upgrades = "0.20.0" [[target.starknet-contract]] sierra = true \ No newline at end of file diff --git a/contract/src/interface.cairo b/contract/src/interface.cairo new file mode 100644 index 0000000..a05ece8 --- /dev/null +++ b/contract/src/interface.cairo @@ -0,0 +1,108 @@ +/// Interface for the Bankai contract, which manages Ethereum consensus verification on StarkNet +/// This contract enables trustless bridging of Ethereum consensus data to StarkNet +use super::types::EpochProof; + +#[starknet::interface] +pub trait IBankaiContract { + /// Returns the hash of a specific validator committee + fn get_committee_hash(self: @TContractState, committee_id: u64) -> u256; + + /// Returns the slot number of the most recent verified epoch + fn get_latest_epoch_slot(self: @TContractState) -> u64; + + /// Returns the ID of the most recent validator committee + fn get_latest_committee_id(self: @TContractState) -> u64; + + /// Returns the SHARP program hash used for committee updates + fn get_committee_update_program_hash(self: @TContractState) -> felt252; + + /// Returns the SHARP program hash used for epoch updates + fn get_epoch_update_program_hash(self: @TContractState) -> felt252; + + /// Returns the SHARP program hash used for epoch batching + fn get_epoch_batch_program_hash(self: @TContractState) -> felt252; + + /// Retrieves the epoch proof for a given slot + fn get_epoch_proof(self: @TContractState, slot: u64) -> EpochProof; + + /// Verifies and stores a new validator committee update + /// @param beacon_state_root - The beacon chain state root containing the committee + /// @param committee_hash - Hash of the new committee's public key + /// @param slot - Slot number where this committee becomes active + fn verify_committee_update( + ref self: TContractState, beacon_state_root: u256, committee_hash: u256, slot: u64, + ); + + /// Verifies and stores a new epoch update + /// @param header_root - SSZ root of the beacon block header + /// @param beacon_state_root - Root of the beacon state + /// @param slot - Slot number of this epoch + /// @param committee_hash - Hash of the signing committee + /// @param n_signers - Number of validators that signed + /// @param execution_hash - Hash of the execution layer header + /// @param execution_height - Height of the execution block + fn verify_epoch_update( + ref self: TContractState, + header_root: u256, + beacon_state_root: u256, + slot: u64, + committee_hash: u256, + n_signers: u64, + execution_hash: u256, + execution_height: u64, + ); + + /// Verifies and stores a batch of epoch updates + /// @param batch_root - Merkle root of the batch of epochs + /// Parameters same as verify_epoch_update + fn verify_epoch_batch( + ref self: TContractState, + batch_root: felt252, + header_root: u256, + beacon_state_root: u256, + slot: u64, + committee_hash: u256, + n_signers: u64, + execution_hash: u256, + execution_height: u64, + ); + + /// Extracts and verifies a single epoch from a previously verified batch + /// @param batch_root - Root of the verified batch + /// @param merkle_index - Index of this epoch in the batch + /// @param merkle_path - Merkle proof path + /// Other parameters same as verify_epoch_update + fn decommit_batched_epoch( + ref self: TContractState, + batch_root: felt252, + merkle_index: u16, + merkle_path: Array, + header_root: u256, + beacon_state_root: u256, + slot: u64, + committee_hash: u256, + n_signers: u64, + execution_hash: u256, + execution_height: u64, + ); + + /// Proposes an update to the SHARP program hashes (requires owner + timelock) + fn propose_program_hash_update( + ref self: TContractState, + new_committee_hash: felt252, + new_epoch_hash: felt252, + new_batch_hash: felt252, + ); + + /// Executes a proposed program hash update after timelock expires + fn execute_program_hash_update(ref self: TContractState); + + /// Pauses the contract (owner only) + fn pause(ref self: TContractState); + + /// Unpauses the contract (owner only) + fn unpause(ref self: TContractState); + + /// Returns whether the contract is currently paused + fn is_paused(self: @TContractState) -> bool; +} diff --git a/contract/src/lib.cairo b/contract/src/lib.cairo index 91e42b2..e9a5b3c 100644 --- a/contract/src/lib.cairo +++ b/contract/src/lib.cairo @@ -1,139 +1,119 @@ -#[derive(Drop, starknet::Store, Serde)] -pub struct EpochProof { - // Hash of the beacon header (root since ssz) - header_root: u256, - // state root at the mapped slot - beacon_state_root: u256, - // Number of signers (out of 512) - n_signers: u64, - // Hash of the execution header - execution_hash: u256, - // Height of the execution header - execution_height: u64, -} - -#[starknet::interface] -pub trait IBankaiContract { - fn get_committee_hash(self: @TContractState, committee_id: u64) -> u256; - fn get_latest_epoch(self: @TContractState) -> u64; - fn get_latest_committee_id(self: @TContractState) -> u64; - fn get_committee_update_program_hash(self: @TContractState) -> felt252; - fn get_epoch_update_program_hash(self: @TContractState) -> felt252; - fn get_epoch_proof(self: @TContractState, slot: u64) -> EpochProof; - fn verify_committee_update( - ref self: TContractState, beacon_state_root: u256, committee_hash: u256, slot: u64, - ); - fn verify_epoch_update( - ref self: TContractState, - header_root: u256, - beacon_state_root: u256, - slot: u64, - committee_hash: u256, - n_signers: u64, - execution_hash: u256, - execution_height: u64, - ); - - fn verify_epoch_batch( - ref self: TContractState, - batch_root: felt252, - header_root: u256, - beacon_state_root: u256, - slot: u64, - committee_hash: u256, - n_signers: u64, - execution_hash: u256, - execution_height: u64, - ); +pub mod interface; +pub mod types; - fn decommit_batched_epoch( - ref self: TContractState, - batch_root: felt252, - merkle_index: u16, - merkle_path: Array, - header_root: u256, - beacon_state_root: u256, - slot: u64, - committee_hash: u256, - n_signers: u64, - execution_hash: u256, - execution_height: u64, - ); -} +pub use interface::IBankaiContract; pub mod utils; #[starknet::contract] pub mod BankaiContract { - use super::EpochProof; + use super::types::{ + EpochProof, CommitteeUpdated, EpochUpdated, EpochBatch, EpochDecommitted, Paused, + Unpaused, + }; use starknet::storage::{ Map, StorageMapReadAccess, StorageMapWriteAccess, StoragePointerReadAccess, StoragePointerWriteAccess, }; - use starknet::{ContractAddress, get_caller_address}; + use starknet::ClassHash; + + use starknet::{ContractAddress, get_block_timestamp}; use integrity::{ Integrity, IntegrityWithConfig, SHARP_BOOTLOADER_PROGRAM_HASH, VerifierConfiguration, }; - use crate::utils::{calculate_wrapped_bootloaded_fact_hash, WRAPPER_PROGRAM_HASH, hash_path, compute_leaf_hash}; + use crate::utils::{ + calculate_wrapped_bootloaded_fact_hash, WRAPPER_PROGRAM_HASH, hash_path, compute_leaf_hash, + }; + + use openzeppelin_access::ownable::OwnableComponent; + use openzeppelin_upgrades::UpgradeableComponent; + use openzeppelin_upgrades::interface::IUpgradeable; + + component!(path: OwnableComponent, storage: ownable, event: OwnableEvent); + component!(path: UpgradeableComponent, storage: upgradeable, event: UpgradeableEvent); + + + // Ownable Mixin + #[abi(embed_v0)] + impl OwnableMixinImpl = OwnableComponent::OwnableMixinImpl; + impl OwnableInternalImpl = OwnableComponent::InternalImpl; + + impl UpgradeableInternalImpl = UpgradeableComponent::InternalImpl; + + + /// Events emitted by the contract #[event] #[derive(Drop, starknet::Event)] pub enum Event { + /// Emitted when a new validator committee is verified CommitteeUpdated: CommitteeUpdated, + /// Emitted when a new epoch is verified EpochUpdated: EpochUpdated, + /// Emitted when a batch of epochs is verified EpochBatch: EpochBatch, - EpochDecommitted: EpochDecommitted + /// Emitted when an epoch is extracted from a batch + EpochDecommitted: EpochDecommitted, + /// Emitted when the contract is paused + Paused: Paused, + /// Emitted when the contract is unpaused + Unpaused: Unpaused, + OwnableEvent: OwnableComponent::Event, + UpgradeableEvent: UpgradeableComponent::Event, } - #[derive(Drop, starknet::Event)] - pub struct CommitteeUpdated { - committee_id: u64, - committee_hash: u256, - } - - #[derive(Drop, starknet::Event)] - pub struct EpochUpdated { - // Hash of the beacon header (root since ssz) - beacon_root: u256, - // Slot of the beacon header - slot: u64, - // Hash of the execution header - execution_hash: u256, - // Height of the execution header - execution_height: u64, - } - - #[derive(Drop, starknet::Event)] - pub struct EpochBatch { - batch_root: felt252, - beacon_root: u256, - slot: u64, - execution_hash: u256, - execution_height: u64, - } - - #[derive(Drop, starknet::Event)] - pub struct EpochDecommitted { - batch_root: felt252, - slot: u64, - execution_hash: u256, - execution_height: u64 - } + /// Time delay required for program hash updates (48 hours in seconds) + /// This delay provides a security window for detecting malicious updates + const UPDATE_DELAY: u64 = 172800; + /// Contract storage layout #[storage] struct Storage { - committee: Map::< - u64, u256, - >, // maps committee index to committee hash (sha256(x || y)) of aggregate key - epochs: Map::, // maps beacon slot to header root and state root - batches: Map::, // Available batch roots - owner: ContractAddress, - latest_epoch: u64, + // Committee Management + /// Maps committee index to committee hash (sha256(x || y)) of aggregate key + committee: Map::, + /// ID of the most recent committee latest_committee_id: u64, + /// ID of the initial trusted committee initialization_committee: u64, + // Epoch Management + /// Maps beacon slot to header root and state root + epochs: Map::, + /// Most recent verified epoch slot + latest_epoch_slot: u64, + // Batch Management + /// Tracks verified batch roots + batches: Map::, + // Program Hash Management + /// Current SHARP program hash for committee updates committee_update_program_hash: felt252, + /// Current SHARP program hash for epoch updates epoch_update_program_hash: felt252, + /// Current SHARP program hash for epoch batching epoch_batch_program_hash: felt252, + /// Proposed new committee program hash (pending timelock) + pending_committee_program_hash: felt252, + /// Proposed new epoch program hash (pending timelock) + pending_epoch_program_hash: felt252, + /// Proposed new batch program hash (pending timelock) + pending_batch_program_hash: felt252, + /// Timestamp when pending program hash update can be executed + pending_update_timestamp: u64, + // Contract Management + /// Contract pause state for emergency stops + paused: bool, + /// OpenZeppelin ownable component storage + #[substorage(v0)] + pub ownable: OwnableComponent::Storage, + /// OpenZeppelin upgradeable component storage + #[substorage(v0)] + upgradeable: UpgradeableComponent::Storage, } + /// Contract constructor + /// @param committee_id - ID of the initial trusted committee + /// @param committee_hash - Hash of the initial committee's public key + /// @param committee_update_program_hash - Initial SHARP program hash for committee updates + /// @param epoch_update_program_hash - Initial SHARP program hash for epoch updates + /// @param epoch_batch_program_hash - Initial SHARP program hash for epoch batching #[constructor] pub fn constructor( ref self: ContractState, @@ -142,9 +122,11 @@ pub mod BankaiContract { committee_update_program_hash: felt252, epoch_update_program_hash: felt252, epoch_batch_program_hash: felt252, + owner: ContractAddress, ) { - self.owner.write(get_caller_address()); - self.latest_epoch.write(0); + // Initialize owner as contract deployer + self.ownable.initializer(owner); + self.latest_epoch_slot.write(0); // Write trusted initial committee self.initialization_committee.write(committee_id); @@ -157,43 +139,78 @@ pub mod BankaiContract { self.epoch_batch_program_hash.write(epoch_batch_program_hash); } + /// Implementation of the upgradeable interface + #[abi(embed_v0)] + impl UpgradeableImpl of IUpgradeable { + /// Upgrades the contract to a new implementation + /// @param new_class_hash - The class hash of the new implementation + /// @dev Can only be called by the contract owner + fn upgrade(ref self: ContractState, new_class_hash: ClassHash) { + self.ownable.assert_only_owner(); + self.upgradeable.upgrade(new_class_hash); + } + } + + /// Core implementation of the Bankai contract interface #[abi(embed_v0)] impl BankaiContractImpl of super::IBankaiContract { + /// Retrieves the hash of a specific validator committee + /// @param committee_id - The unique identifier of the committee + /// @return The aggregate public key hash of the committee fn get_committee_hash(self: @ContractState, committee_id: u64) -> u256 { self.committee.read(committee_id) } - fn get_latest_epoch(self: @ContractState) -> u64 { - self.latest_epoch.read() + /// Returns the slot number of the most recent verified epoch + fn get_latest_epoch_slot(self: @ContractState) -> u64 { + self.latest_epoch_slot.read() } + /// Returns the ID of the most recent validator committee fn get_latest_committee_id(self: @ContractState) -> u64 { self.latest_committee_id.read() } + /// Returns the current SHARP program hash for committee updates fn get_committee_update_program_hash(self: @ContractState) -> felt252 { self.committee_update_program_hash.read() } + /// Returns the current SHARP program hash for epoch updates fn get_epoch_update_program_hash(self: @ContractState) -> felt252 { self.epoch_update_program_hash.read() } + /// Returns the current SHARP program hash for epoch batching + fn get_epoch_batch_program_hash(self: @ContractState) -> felt252 { + self.epoch_batch_program_hash.read() + } + + /// Retrieves the epoch proof for a given slot + /// @param slot - The slot number to query + /// @return The epoch proof containing consensus and execution data fn get_epoch_proof(self: @ContractState, slot: u64) -> EpochProof { self.epochs.read(slot) } + /// Verifies and stores a new validator committee update + /// @dev Requires a valid SHARP proof and matching beacon state root + /// @param beacon_state_root - The beacon chain state root containing the committee + /// @param committee_hash - Hash of the new committee's public key + /// @param slot - Slot number where this committee becomes active + /// @custom:throws 'Contract is paused' if contract is paused + /// @custom:throws 'Invalid State Root!' if beacon state root doesn't match + /// @custom:throws 'Invalid Fact Hash!' if SHARP proof is invalid fn verify_committee_update( ref self: ContractState, beacon_state_root: u256, committee_hash: u256, slot: u64, ) { + assert(!self.paused.read(), 'Contract is paused'); let epoch_proof = self.epochs.read(slot); assert(beacon_state_root == epoch_proof.beacon_state_root, 'Invalid State Root!'); - // for now we dont ensure the fact hash is valid let fact_hash = compute_committee_proof_fact_hash( @self, beacon_state_root, committee_hash, slot, ); - // println!("fact_hash: {:?}", fact_hash); assert(is_valid_fact_hash(fact_hash), 'Invalid Fact Hash!'); // The new committee is always assigned at the start of the previous committee @@ -211,6 +228,11 @@ pub mod BankaiContract { ); } + /// Verifies and stores a new epoch update + /// @dev Requires a valid SHARP proof and matching committee hash + /// @custom:throws 'Contract is paused' if contract is paused + /// @custom:throws 'Invalid Committee Hash!' if committee hash doesn't match + /// @custom:throws 'Invalid Fact Hash!' if SHARP proof is invalid fn verify_epoch_update( ref self: ContractState, header_root: u256, @@ -221,32 +243,49 @@ pub mod BankaiContract { execution_hash: u256, execution_height: u64, ) { - + assert(!self.paused.read(), 'Contract is paused'); let signing_committee_id = (slot / 0x2000); let valid_committee_hash = self.committee.read(signing_committee_id); assert(committee_hash == valid_committee_hash, 'Invalid Committee Hash!'); let fact_hash = compute_epoch_proof_fact_hash( - @self, header_root, beacon_state_root, slot, committee_hash, n_signers, execution_hash, execution_height, + @self, + header_root, + beacon_state_root, + slot, + committee_hash, + n_signers, + execution_hash, + execution_height, ); assert(is_valid_fact_hash(fact_hash), 'Invalid Fact Hash!'); let epoch_proof = EpochProof { - header_root: header_root, beacon_state_root: beacon_state_root, n_signers: n_signers, execution_hash: execution_hash, execution_height: execution_height, + header_root, beacon_state_root, n_signers, execution_hash, execution_height, }; self.epochs.write(slot, epoch_proof); - let latest_epoch = self.latest_epoch.read(); + let latest_epoch = self.latest_epoch_slot.read(); if slot > latest_epoch { - self.latest_epoch.write(slot); + self.latest_epoch_slot.write(slot); } - self.emit(Event::EpochUpdated(EpochUpdated { - beacon_root: header_root, slot: slot, execution_hash: execution_hash, execution_height: execution_height, - })); + self + .emit( + Event::EpochUpdated( + EpochUpdated { + beacon_root: header_root, slot, execution_hash, execution_height, + }, + ), + ); } + /// Verifies and stores a batch of epoch updates + /// @dev Requires a valid SHARP proof and matching committee hash + /// @custom:throws 'Contract is paused' if contract is paused + /// @custom:throws 'Invalid Committee Hash!' if committee hash doesn't match + /// @custom:throws 'Invalid Fact Hash!' if SHARP proof is invalid fn verify_epoch_batch( ref self: ContractState, batch_root: felt252, @@ -257,34 +296,58 @@ pub mod BankaiContract { n_signers: u64, execution_hash: u256, execution_height: u64, - ) { + ) { + assert(!self.paused.read(), 'Contract is paused'); + let signing_committee_id = (slot / 0x2000); let valid_committee_hash = self.committee.read(signing_committee_id); assert(committee_hash == valid_committee_hash, 'Invalid Committee Hash!'); let fact_hash = compute_epoch_batch_fact_hash( - @self, batch_root, header_root, beacon_state_root, slot, committee_hash, n_signers, execution_hash, execution_height, + @self, + batch_root, + header_root, + beacon_state_root, + slot, + committee_hash, + n_signers, + execution_hash, + execution_height, ); assert(is_valid_fact_hash(fact_hash), 'Invalid Fact Hash!'); let epoch_proof = EpochProof { - header_root: header_root, beacon_state_root: beacon_state_root, n_signers: n_signers, execution_hash: execution_hash, execution_height: execution_height, + header_root, beacon_state_root, n_signers, execution_hash, execution_height, }; self.epochs.write(slot, epoch_proof); - self.emit(Event::EpochBatch(EpochBatch { - batch_root: batch_root, beacon_root: header_root, slot: slot, execution_hash: execution_hash, execution_height: execution_height, - })); + self + .emit( + Event::EpochBatch( + EpochBatch { + batch_root, + beacon_root: header_root, + slot, + execution_hash, + execution_height, + }, + ), + ); self.batches.write(batch_root, true); - let latest_epoch = self.latest_epoch.read(); + let latest_epoch = self.latest_epoch_slot.read(); if slot > latest_epoch { - self.latest_epoch.write(slot); + self.latest_epoch_slot.write(slot); } } + /// Extracts and verifies a single epoch from a previously verified batch + /// @dev Verifies the Merkle proof against the stored batch root + /// @custom:throws 'Contract is paused' if contract is paused + /// @custom:throws 'Batch root not known!' if batch_root hasn't been verified + /// @custom:throws 'Invalid Batch Merkle Root!' if Merkle proof is invalid fn decommit_batched_epoch( ref self: ContractState, batch_root: felt252, @@ -298,26 +361,105 @@ pub mod BankaiContract { execution_hash: u256, execution_height: u64, ) { - + assert(!self.paused.read(), 'Contract is paused'); let known_batch_root = self.batches.read(batch_root); assert(known_batch_root, 'Batch root not known!'); - let leaf = compute_leaf_hash(header_root, beacon_state_root, slot, committee_hash, n_signers, execution_hash, execution_height); + let leaf = compute_leaf_hash( + header_root, + beacon_state_root, + slot, + committee_hash, + n_signers, + execution_hash, + execution_height, + ); let computed_root = hash_path(leaf, merkle_path, merkle_index); assert(computed_root == batch_root, 'Invalid Batch Merkle Root!'); let epoch_proof = EpochProof { - header_root: header_root, beacon_state_root: beacon_state_root, n_signers: n_signers, execution_hash: execution_hash, execution_height: execution_height, + header_root, beacon_state_root, n_signers, execution_hash, execution_height, }; self.epochs.write(slot, epoch_proof); - self.emit(Event::EpochDecommitted(EpochDecommitted { - batch_root: batch_root, slot: slot, execution_hash: execution_hash, execution_height: execution_height, - })); + + self + .emit( + Event::EpochDecommitted( + EpochDecommitted { batch_root, slot, execution_hash, execution_height }, + ), + ); } + /// Proposes an update to the SHARP program hashes + /// @dev Requires owner access and initiates the timelock period + /// @param new_committee_hash - New program hash for committee verification + /// @param new_epoch_hash - New program hash for epoch verification + /// @param new_batch_hash - New program hash for batch verification + /// @custom:throws 'Contract is paused' if contract is paused + fn propose_program_hash_update( + ref self: ContractState, + new_committee_hash: felt252, + new_epoch_hash: felt252, + new_batch_hash: felt252, + ) { + assert(!self.paused.read(), 'Contract is paused'); + self.ownable.assert_only_owner(); + + self.pending_committee_program_hash.write(new_committee_hash); + self.pending_epoch_program_hash.write(new_epoch_hash); + self.pending_batch_program_hash.write(new_batch_hash); + self.pending_update_timestamp.write(get_block_timestamp() + UPDATE_DELAY); + } + + /// Executes a proposed program hash update after timelock expires + /// @dev Can only be called by owner after timelock period + /// @custom:throws 'Delay not elapsed' if timelock period hasn't passed + fn execute_program_hash_update(ref self: ContractState) { + self.ownable.assert_only_owner(); + assert( + get_block_timestamp() >= self.pending_update_timestamp.read(), 'Delay not elapsed', + ); + + // Update program hashes + self.committee_update_program_hash.write(self.pending_committee_program_hash.read()); + self.epoch_update_program_hash.write(self.pending_epoch_program_hash.read()); + self.epoch_batch_program_hash.write(self.pending_batch_program_hash.read()); + + // Clear pending updates + self.pending_committee_program_hash.write(0); + self.pending_epoch_program_hash.write(0); + self.pending_batch_program_hash.write(0); + self.pending_update_timestamp.write(0); + } + + /// Pauses all contract operations + /// @dev Can only be called by owner + /// @custom:throws 'Contract is already paused' if already paused + fn pause(ref self: ContractState) { + self.ownable.assert_only_owner(); + assert(!self.paused.read(), 'Contract is already paused'); + self.paused.write(true); + self.emit(Event::Paused(Paused {})); + } + + /// Unpauses contract operations + /// @dev Can only be called by owner + /// @custom:throws 'Contract is not paused' if not paused + fn unpause(ref self: ContractState) { + self.ownable.assert_only_owner(); + assert(self.paused.read(), 'Contract is not paused'); + self.paused.write(false); + self.emit(Event::Unpaused(Unpaused {})); + } + + /// Returns whether the contract is currently paused + fn is_paused(self: @ContractState) -> bool { + self.paused.read() + } } + /// Internal helper functions for computing fact hashes fn compute_committee_proof_fact_hash( self: @ContractState, beacon_state_root: u256, committee_hash: u256, slot: u64, ) -> felt252 { @@ -326,14 +468,15 @@ pub mod BankaiContract { SHARP_BOOTLOADER_PROGRAM_HASH, self.committee_update_program_hash.read(), [ - beacon_state_root.low.into(), beacon_state_root.high.into(), committee_hash.low.into(), - committee_hash.high.into(), slot.into(), + beacon_state_root.low.into(), beacon_state_root.high.into(), + committee_hash.low.into(), committee_hash.high.into(), slot.into(), ] .span(), ); return fact_hash; } + /// Computes fact hash for epoch proof verification fn compute_epoch_proof_fact_hash( self: @ContractState, header_root: u256, @@ -359,6 +502,7 @@ pub mod BankaiContract { return fact_hash; } + /// Computes fact hash for epoch batch verification fn compute_epoch_batch_fact_hash( self: @ContractState, batch_root: felt252, @@ -375,8 +519,7 @@ pub mod BankaiContract { SHARP_BOOTLOADER_PROGRAM_HASH, self.epoch_batch_program_hash.read(), [ - batch_root, header_root.low.into(), - header_root.high.into(), state_root.low.into(), + batch_root, header_root.low.into(), header_root.high.into(), state_root.low.into(), state_root.high.into(), slot.into(), committee_hash.low.into(), committee_hash.high.into(), n_signers.into(), execution_hash.low.into(), execution_hash.high.into(), execution_height.into(), @@ -398,4 +541,4 @@ pub mod BankaiContract { let integrity = Integrity::new().with_config(config, SECURITY_BITS); integrity.is_fact_hash_valid(fact_hash) } -} \ No newline at end of file +} diff --git a/contract/src/types.cairo b/contract/src/types.cairo new file mode 100644 index 0000000..555bb1d --- /dev/null +++ b/contract/src/types.cairo @@ -0,0 +1,73 @@ +/// Represents a proof of an Ethereum beacon chain epoch, containing crucial consensus and execution +/// data +#[derive(Drop, starknet::Store, Serde)] +pub struct EpochProof { + /// Hash of the beacon chain header (SSZ root) + pub header_root: u256, + /// State root of the beacon chain at the corresponding slot + pub beacon_state_root: u256, + /// Number of validators that signed (out of 512 possible) + pub n_signers: u64, + /// Hash of the execution layer (EL) header + pub execution_hash: u256, + /// Block height of the execution layer header + pub execution_height: u64, +} + +/// Event emitted when a new committee is validated and stored +#[derive(Drop, starknet::Event)] +pub struct CommitteeUpdated { + /// Unique identifier for the committee + pub committee_id: u64, + /// Aggregate public key hash of the committee + pub committee_hash: u256, +} + +/// Event emitted when a new epoch is validated and stored +#[derive(Drop, starknet::Event)] +pub struct EpochUpdated { + /// Hash of the beacon header (SSZ root) + pub beacon_root: u256, + /// Slot number of the beacon header + pub slot: u64, + /// Hash of the execution layer header + pub execution_hash: u256, + /// Block height of the execution header + pub execution_height: u64, +} + +/// Event emitted when a batch of epochs is validated +#[derive(Drop, starknet::Event)] +pub struct EpochBatch { + /// Merkle root of the batch + pub batch_root: felt252, + /// Hash of the beacon header + pub beacon_root: u256, + /// Slot number + pub slot: u64, + /// Hash of the execution header + pub execution_hash: u256, + /// Block height of the execution header + pub execution_height: u64, +} + +/// Event emitted when an epoch is extracted from a verified batch +#[derive(Drop, starknet::Event)] +pub struct EpochDecommitted { + /// Root of the batch containing this epoch + pub batch_root: felt252, + /// Slot number + pub slot: u64, + /// Hash of the execution header + pub execution_hash: u256, + /// Block height of the execution header + pub execution_height: u64, +} + +/// Emitted when the contract is paused +#[derive(Drop, starknet::Event)] +pub struct Paused {} + +/// Emitted when the contract is unpaused +#[derive(Drop, starknet::Event)] +pub struct Unpaused {} diff --git a/contract/src/utils.cairo b/contract/src/utils.cairo index 4b1fb57..56b6303 100644 --- a/contract/src/utils.cairo +++ b/contract/src/utils.cairo @@ -45,8 +45,8 @@ pub fn hash_path(leaf: felt252, path: Array, index: u16) -> felt252 { // Get the sibling node let sibling = *path.at(i); - // Determine left and right nodes based on current_index - let (left, right) = if (current_index - 2 * (current_index / 2)) == 0 { + // Determine left and right nodes based on current_index's least significant bit + let (left, right) = if (current_index & 1_u16 == 0_u16) { (current_hash, sibling) } else { (sibling, current_hash) @@ -55,7 +55,7 @@ pub fn hash_path(leaf: felt252, path: Array, index: u16) -> felt252 { // Hash the pair using Poseidon let (hash, _, _) = hades_permutation(left, right, 2); current_hash = hash; - + // Update index for next level current_index = current_index / 2; i += 1; @@ -85,9 +85,9 @@ pub fn compute_leaf_hash( n_signers.into(), execution_hash.low.into(), execution_hash.high.into(), - execution_height.into() + execution_height.into(), ]; - + // Hash all values with Poseidon poseidon_hash_span(values.span()) } @@ -95,16 +95,19 @@ pub fn compute_leaf_hash( #[cfg(test)] mod tests { use super::*; - + #[test] fn test_leaf_hash_computation() { // Test values from JSON file let header_root = 0xcee6e3a29b289c3d0eb1f08f6cbf965a2f5771f54ca781fbf1f9d9a5e898d602_u256; - let beacon_state_root = 0xac1d83f6ab8c04205b698f9b5dbe93a1136000ca0162941bf129029ad402906c_u256; + let beacon_state_root = + 0xac1d83f6ab8c04205b698f9b5dbe93a1136000ca0162941bf129029ad402906c_u256; let slot = 6710272_u64; - let committee_hash = 0x3ccf068854b1612cc9537f6fd2a56fb0734722ce40b89685f84e17a6986510d3_u256; + let committee_hash = + 0x3ccf068854b1612cc9537f6fd2a56fb0734722ce40b89685f84e17a6986510d3_u256; let n_signers = 479_u64; - let execution_hash = 0xc2c133b1ea59352cef6c0434e0007cdba4bdc216afd32fdf6b40c4a135a8535e_u256; + let execution_hash = + 0xc2c133b1ea59352cef6c0434e0007cdba4bdc216afd32fdf6b40c4a135a8535e_u256; let execution_height = 7440225_u64; // Compute hash using our function @@ -117,9 +120,9 @@ mod tests { execution_hash, execution_height, ); - + let expected_hash = 0xBA8230D3714675CA5E80A257F3F2F581959A5E474E40101C52153192FD7728; - + // Assert they match assert_eq!(computed_hash, expected_hash, "Leaf hash computation mismatch"); } @@ -129,11 +132,14 @@ mod tests { fn test_hash_path_verification() { // Same leaf data as previous test let header_root = 0xcee6e3a29b289c3d0eb1f08f6cbf965a2f5771f54ca781fbf1f9d9a5e898d602_u256; - let beacon_state_root = 0xac1d83f6ab8c04205b698f9b5dbe93a1136000ca0162941bf129029ad402906c_u256; + let beacon_state_root = + 0xac1d83f6ab8c04205b698f9b5dbe93a1136000ca0162941bf129029ad402906c_u256; let slot = 6710272_u64; - let committee_hash = 0x3ccf068854b1612cc9537f6fd2a56fb0734722ce40b89685f84e17a6986510d3_u256; + let committee_hash = + 0x3ccf068854b1612cc9537f6fd2a56fb0734722ce40b89685f84e17a6986510d3_u256; let n_signers = 479_u64; - let execution_hash = 0xc2c133b1ea59352cef6c0434e0007cdba4bdc216afd32fdf6b40c4a135a8535e_u256; + let execution_hash = + 0xc2c133b1ea59352cef6c0434e0007cdba4bdc216afd32fdf6b40c4a135a8535e_u256; let execution_height = 7440225_u64; // Compute the leaf hash @@ -150,27 +156,22 @@ mod tests { // Merkle path from JSON let path = array![ 0x0, - 0x293d3e8a80f400daaaffdd5932e2bcc8814bab8f414a75dcacf87318f8b14c5, - 0x296ec483967ad3fbe3407233db378b6284cc1fcc78d62457b97a4be6744ad0d, + 0x293d3e8a80f400daaaffdd5932e2bcc8814bab8f414a75dcacf87318f8b14c5, + 0x296ec483967ad3fbe3407233db378b6284cc1fcc78d62457b97a4be6744ad0d, 0x4127be83b42296fe28f98f8fdda29b96e22e5d90501f7d31b84e729ec2fac3f, - 0x33883305ab0df1ab7610153578a4d510b845841b84d90ed993133ce4ce8f827, - 0x6114fdf0455660a422ac813130104438c7baf332cc1eca0618957a3aeb68795 + 0x33883305ab0df1ab7610153578a4d510b845841b84d90ed993133ce4ce8f827, + 0x6114fdf0455660a422ac813130104438c7baf332cc1eca0618957a3aeb68795, ]; let index = 32_u16; - - // Compute root using hash_path - let computed_root = hash_path( - leaf_hash, - path, - index - ); - println!("Computed root: {:?}", computed_root); + // Compute root using hash_path + let computed_root = hash_path(leaf_hash, path, index); // Expected root is the first value in the JSON array (0x0) - let expected_root = 3014209719831846118507369742452047831482182187060364606511726060971609846063; - + let expected_root = + 3014209719831846118507369742452047831482182187060364606511726060971609846063; + assert_eq!(computed_root, expected_root, "Merkle root computation mismatch"); } -} \ No newline at end of file +} diff --git a/contract/tests/test_bankai.cairo b/contract/tests/test_bankai.cairo new file mode 100644 index 0000000..f0e5cc5 --- /dev/null +++ b/contract/tests/test_bankai.cairo @@ -0,0 +1,298 @@ +use bankai::BankaiContract; +use bankai::IBankaiContract; + +#[cfg(test)] +mod tests { + use super::{BankaiContract, IBankaiContract}; + use starknet::contract_address_const; + use starknet::testing::set_caller_address; + use starknet::testing::set_block_timestamp; + use starknet::ClassHash; + use openzeppelin_upgrades::interface::IUpgradeable; + use openzeppelin_access::ownable::interface::IOwnable; + + // Helper function to deploy the contract for testing + fn deploy_contract() -> BankaiContract::ContractState { + let mut state = BankaiContract::contract_state_for_testing(); + + // Set caller as contract deployer + let owner = contract_address_const::<0x123>(); + set_caller_address(owner); + + // Initialize with some test values + BankaiContract::constructor( + ref state, + 1, // committee_id + 1234.into(), // committee_hash + 111.into(), // committee_update_program_hash + 222.into(), // epoch_update_program_hash + 333.into() // epoch_batch_program_hash + ); + + state + } + + #[test] + fn test_constructor() { + let state = deploy_contract(); + let owner = contract_address_const::<0x123>(); + + assert!(!IBankaiContract::is_paused(@state)); + assert_eq!(IBankaiContract::get_latest_epoch_slot(@state), 0); + assert_eq!(IBankaiContract::get_latest_committee_id(@state), 1); + assert_eq!(IBankaiContract::get_committee_hash(@state, 1), 1234.into()); + assert_eq!(IBankaiContract::get_committee_update_program_hash(@state), 111); + assert_eq!(IBankaiContract::get_epoch_update_program_hash(@state), 222); + // Use the ownable component to check owner + assert_eq!(state.ownable.owner(), owner); + } + + #[test] + fn test_pause_unpause() { + let mut state = deploy_contract(); + let owner = contract_address_const::<0x123>(); + set_caller_address(owner); + + // Test initial state + assert!(!IBankaiContract::is_paused(@state)); + + // Test pause + IBankaiContract::pause(ref state); + assert!(IBankaiContract::is_paused(@state)); + + // Test unpause + IBankaiContract::unpause(ref state); + assert!(!IBankaiContract::is_paused(@state)); + } + + #[test] + #[should_panic(expected: ('Caller is not the owner',))] + fn test_pause_unauthorized() { + let mut state = deploy_contract(); + + // Try to pause from different address + let other = contract_address_const::<0x456>(); + set_caller_address(other); + IBankaiContract::pause(ref state); + } + + #[test] + fn test_transfer_ownership() { + let mut state = deploy_contract(); + let owner = contract_address_const::<0x123>(); + let new_owner = contract_address_const::<0x456>(); + + // Set caller as current owner + set_caller_address(owner); + + // Use the ownable component directly + state.ownable.transfer_ownership(new_owner); + + // Verify new owner + assert_eq!(state.ownable.owner(), new_owner); + } + + #[test] + #[should_panic(expected: ('Caller is not the owner',))] + fn test_transfer_ownership_unauthorized() { + let mut state = deploy_contract(); + let non_owner = contract_address_const::<0x456>(); + let new_owner = contract_address_const::<0x789>(); + + // Try to transfer ownership from non-owner address + set_caller_address(non_owner); + state.ownable.transfer_ownership(new_owner); + } + + #[test] + fn test_renounce_ownership() { + let mut state = deploy_contract(); + let owner = contract_address_const::<0x123>(); + + // Set caller as current owner + set_caller_address(owner); + + // Renounce ownership + state.ownable.renounce_ownership(); + + // Verify owner is now zero address + assert_eq!(state.ownable.owner().into(), 0); + } + + #[test] + #[should_panic(expected: ('Caller is not the owner',))] + fn test_renounce_ownership_unauthorized() { + let mut state = deploy_contract(); + let non_owner = contract_address_const::<0x456>(); + + // Try to renounce ownership from non-owner address + set_caller_address(non_owner); + state.ownable.renounce_ownership(); + } + + #[test] + fn test_program_hash_update() { + let mut state = deploy_contract(); + let owner = contract_address_const::<0x123>(); + set_caller_address(owner); + + // Set initial timestamp + set_block_timestamp(1000); + + // Propose update + IBankaiContract::propose_program_hash_update( + ref state, + 444.into(), // new_committee_hash + 555.into(), // new_epoch_hash + 666.into() // new_batch_hash + ); + + // Execute after delay + set_block_timestamp(1000 + 172800); // After 48-hour delay + IBankaiContract::execute_program_hash_update(ref state); + + // Verify updates + assert_eq!(IBankaiContract::get_committee_update_program_hash(@state), 444); + assert_eq!(IBankaiContract::get_epoch_update_program_hash(@state), 555); + assert_eq!(IBankaiContract::get_epoch_batch_program_hash(@state), 666); + } + + #[test] + #[should_panic(expected: ('Delay not elapsed',))] + fn test_program_hash_update_too_early() { + let mut state = deploy_contract(); + let owner = contract_address_const::<0x123>(); + set_caller_address(owner); + + // Set initial timestamp + set_block_timestamp(1000); + + // Propose update + IBankaiContract::propose_program_hash_update( + ref state, + 444.into(), // new_committee_hash + 555.into(), // new_epoch_hash + 666.into() // new_batch_hash + ); + + // Try to execute before delay + set_block_timestamp(1000 + 172799); // Just before 48-hour delay + IBankaiContract::execute_program_hash_update(ref state); + } + + #[test] + #[should_panic(expected: ('Caller is not the owner',))] + fn test_program_hash_update_unauthorized() { + let mut state = deploy_contract(); + let non_owner = contract_address_const::<0x456>(); + set_caller_address(non_owner); + + IBankaiContract::propose_program_hash_update(ref state, 444.into(), 555.into(), 666.into()); + } + + #[test] + fn test_getters() { + let state = deploy_contract(); + + assert_eq!(IBankaiContract::get_committee_hash(@state, 1), 1234.into()); + assert_eq!(IBankaiContract::get_latest_epoch_slot(@state), 0); + assert_eq!(IBankaiContract::get_latest_committee_id(@state), 1); + assert_eq!(IBankaiContract::get_committee_update_program_hash(@state), 111); + assert_eq!(IBankaiContract::get_epoch_update_program_hash(@state), 222); + } + + #[test] + #[should_panic(expected: ('CLASS_HASH_NOT_FOUND',))] + fn test_upgrade() { + let mut state = deploy_contract(); + let owner = contract_address_const::<0x123>(); + let new_class_hash: ClassHash = 123.try_into().unwrap(); + + set_caller_address(owner); + + // Attempt upgrade + IUpgradeable::upgrade(ref state, new_class_hash); + // Note: In a real test environment, you'd want to verify the upgrade + // was successful, but this requires additional test infrastructure + } + + #[test] + #[should_panic(expected: ('Caller is not the owner',))] + fn test_upgrade_unauthorized() { + let mut state = deploy_contract(); + let non_owner = contract_address_const::<0x456>(); + let new_class_hash: ClassHash = 123.try_into().unwrap(); + + set_caller_address(non_owner); + IUpgradeable::upgrade(ref state, new_class_hash); + } + + #[test] + fn test_paused_state_prevents_operations() { + let mut state = deploy_contract(); + let owner = contract_address_const::<0x123>(); + set_caller_address(owner); + + // Pause the contract + IBankaiContract::pause(ref state); + assert!(IBankaiContract::is_paused(@state)); + // Verify that operations are prevented when paused + // Note: You might want to add more specific tests for each operation + // that should be prevented when paused + } + + #[test] + #[should_panic(expected: ('Contract is already paused',))] + fn test_double_pause() { + let mut state = deploy_contract(); + let owner = contract_address_const::<0x123>(); + set_caller_address(owner); + + IBankaiContract::pause(ref state); + IBankaiContract::pause(ref state); // Should fail + } + + #[test] + #[should_panic(expected: ('Contract is not paused',))] + fn test_double_unpause() { + let mut state = deploy_contract(); + let owner = contract_address_const::<0x123>(); + set_caller_address(owner); + + IBankaiContract::unpause(ref state); // Should fail when not paused + } + + #[test] + fn test_program_hash_update_full_flow() { + let mut state = deploy_contract(); + let owner = contract_address_const::<0x123>(); + set_caller_address(owner); + + // Set initial timestamp + set_block_timestamp(1000); + + // Store initial values + let initial_committee_hash = IBankaiContract::get_committee_update_program_hash(@state); + let initial_epoch_hash = IBankaiContract::get_epoch_update_program_hash(@state); + let initial_batch_hash = IBankaiContract::get_epoch_batch_program_hash(@state); + + // Propose update + IBankaiContract::propose_program_hash_update(ref state, 444.into(), 555.into(), 666.into()); + + // Verify values haven't changed before delay + assert_eq!( + IBankaiContract::get_committee_update_program_hash(@state), initial_committee_hash, + ); + assert_eq!(IBankaiContract::get_epoch_update_program_hash(@state), initial_epoch_hash); + assert_eq!(IBankaiContract::get_epoch_batch_program_hash(@state), initial_batch_hash); + + // Execute after delay + set_block_timestamp(1000 + 172800); + IBankaiContract::execute_program_hash_update(ref state); + + // Verify all values updated + assert_eq!(IBankaiContract::get_committee_update_program_hash(@state), 444); + assert_eq!(IBankaiContract::get_epoch_update_program_hash(@state), 555); + assert_eq!(IBankaiContract::get_epoch_batch_program_hash(@state), 666); + } +} diff --git a/scripts/setup.sh b/scripts/setup.sh index 962ad58..a78dbbe 100755 --- a/scripts/setup.sh +++ b/scripts/setup.sh @@ -1,3 +1,4 @@ +#!/bin/bash # Check if python3.10 is installed if ! command -v python3.10 >/dev/null; then echo "python3.10 is not installed. Please install Python 3.10 and try again." diff --git a/tests/test_bankai.cairo b/tests/test_bankai.cairo new file mode 100644 index 0000000..0519ecb --- /dev/null +++ b/tests/test_bankai.cairo @@ -0,0 +1 @@ + \ No newline at end of file