From e5fd1a19ffa651de2ce3a6b38d09b1dd1d8bfc51 Mon Sep 17 00:00:00 2001 From: lakewik Date: Wed, 18 Dec 2024 14:28:07 +0100 Subject: [PATCH 01/66] Fix install script --- scripts/setup.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/setup.sh b/scripts/setup.sh index 962ad58..a78dbbe 100755 --- a/scripts/setup.sh +++ b/scripts/setup.sh @@ -1,3 +1,4 @@ +#!/bin/bash # Check if python3.10 is installed if ! command -v python3.10 >/dev/null; then echo "python3.10 is not installed. Please install Python 3.10 and try again." From 0c50aaa9f647394d85869e1c32f1852258193cfd Mon Sep 17 00:00:00 2001 From: lakewik Date: Wed, 8 Jan 2025 10:18:50 +0100 Subject: [PATCH 02/66] WIP: Initial implementation of daemon --- client-rs/Cargo.lock | 483 +++++++++- client-rs/Cargo.toml | 30 +- client-rs/src/cli.rs | 338 +++++++ client-rs/src/config.rs | 2 +- client-rs/src/epoch_update.rs | 7 + client-rs/src/main.rs | 1182 ++++++++++++++++++++---- client-rs/src/utils/atlantic_client.rs | 46 +- client-rs/src/utils/cairo_runner.rs | 21 +- client-rs/src/utils/rpc.rs | 1 + client-rs/src/utils/starknet_client.rs | 8 +- 10 files changed, 1910 insertions(+), 208 deletions(-) create mode 100644 client-rs/src/cli.rs diff --git a/client-rs/Cargo.lock b/client-rs/Cargo.lock index b445d6c..dbca1d6 100644 --- a/client-rs/Cargo.lock +++ b/client-rs/Cargo.lock @@ -1,6 +1,6 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -version = 3 +version = 4 [[package]] name = "addr2line" @@ -195,7 +195,7 @@ dependencies = [ "alloy-serde", "serde", "serde_with", - "thiserror 2.0.6", + "thiserror 2.0.9", ] [[package]] @@ -490,6 +490,61 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" +[[package]] +name = "axum" +version = "0.7.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edca88bc138befd0323b20752846e6587272d3b03b0343c8ea28a6f819e6e71f" +dependencies = [ + "async-trait", + "axum-core", + "bytes", + "futures-util", + "http 1.2.0", + "http-body 1.0.1", + "http-body-util", + "hyper 1.5.1", + "hyper-util", + "itoa", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "rustversion", + "serde", + "serde_json", + "serde_path_to_error", + "serde_urlencoded", + "sync_wrapper 1.0.2", + "tokio", + "tower", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "axum-core" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09f2bd6146b97ae3359fa0cc6d6b376d9539582c7b4220f041a33ec24c226199" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http 1.2.0", + "http-body 1.0.1", + "http-body-util", + "mime", + "pin-project-lite", + "rustversion", + "sync_wrapper 1.0.2", + "tower-layer", + "tower-service", + "tracing", +] + [[package]] name = "backtrace" version = "0.3.74" @@ -756,13 +811,17 @@ version = "0.1.0" dependencies = [ "alloy-primitives 0.8.15", "alloy-rpc-types-beacon", + "axum", "beacon-state-proof", "bls12_381", "clap", "dotenv", "ethereum_serde_utils 0.7.0", + "futures", "hex", "itertools 0.13.0", + "num_cpus", + "postgres-types", "rand", "reqwest 0.12.9", "serde", @@ -770,9 +829,15 @@ dependencies = [ "serde_json", "sha2", "starknet", + "thiserror 2.0.9", "tokio", + "tokio-postgres", + "tokio-stream", + "tracing", + "tracing-subscriber", "tree_hash 0.8.0", "tree_hash_derive 0.8.0", + "uuid 1.11.0", ] [[package]] @@ -1195,7 +1260,7 @@ dependencies = [ "sha2", "sha3", "thiserror 1.0.69", - "uuid", + "uuid 0.8.2", ] [[package]] @@ -1416,6 +1481,21 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" +[[package]] +name = "futures" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + [[package]] name = "futures-channel" version = "0.3.31" @@ -1423,6 +1503,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" dependencies = [ "futures-core", + "futures-sink", ] [[package]] @@ -1431,6 +1512,34 @@ version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" +[[package]] +name = "futures-executor" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-io" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" + +[[package]] +name = "futures-macro" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.90", +] + [[package]] name = "futures-sink" version = "0.3.31" @@ -1449,10 +1558,16 @@ version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" dependencies = [ + "futures-channel", "futures-core", + "futures-io", + "futures-macro", + "futures-sink", "futures-task", + "memchr", "pin-project-lite", "pin-utils", + "slab", ] [[package]] @@ -1716,6 +1831,7 @@ dependencies = [ "http 1.2.0", "http-body 1.0.1", "httparse", + "httpdate", "itoa", "pin-project-lite", "smallvec", @@ -2227,6 +2343,22 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e2e65a1a2e43cfcb47a895c4c8b10d1f4a61097f9f254f183aee60cad9c651d" +[[package]] +name = "matchit" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" + +[[package]] +name = "md-5" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d89e7ee0cfbedfc4da3340218492196241d89eefb6dab27de5df917a6d2e78cf" +dependencies = [ + "cfg-if", + "digest 0.10.7", +] + [[package]] name = "memchr" version = "2.7.4" @@ -2343,6 +2475,16 @@ dependencies = [ "tempfile", ] +[[package]] +name = "nu-ansi-term" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" +dependencies = [ + "overload", + "winapi", +] + [[package]] name = "num-bigint" version = "0.4.6" @@ -2458,6 +2600,12 @@ dependencies = [ "vcpkg", ] +[[package]] +name = "overload" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" + [[package]] name = "pairing" version = "0.23.0" @@ -2544,10 +2692,28 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b7cafe60d6cf8e62e1b9b2ea516a089c008945bb5a275416789e7db0bc199dc" dependencies = [ "memchr", - "thiserror 2.0.6", + "thiserror 2.0.9", "ucd-trie", ] +[[package]] +name = "phf" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ade2d8b8f33c7333b51bcf0428d37e217e9f32192ae4772156f65063b8ce03dc" +dependencies = [ + "phf_shared", +] + +[[package]] +name = "phf_shared" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90fcb95eef784c2ac79119d1dd819e162b5da872ce6f3c3abe1e8ca1c082f72b" +dependencies = [ + "siphasher", +] + [[package]] name = "pin-project-lite" version = "0.2.15" @@ -2576,6 +2742,49 @@ version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2" +[[package]] +name = "postgres-derive" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69700ea4603c5ef32d447708e6a19cd3e8ac197a000842e97f527daea5e4175f" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn 2.0.90", +] + +[[package]] +name = "postgres-protocol" +version = "0.6.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "acda0ebdebc28befa84bee35e651e4c5f09073d668c7aed4cf7e23c3cda84b23" +dependencies = [ + "base64 0.22.1", + "byteorder", + "bytes", + "fallible-iterator", + "hmac", + "md-5", + "memchr", + "rand", + "sha2", + "stringprep", +] + +[[package]] +name = "postgres-types" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f66ea23a2d0e5734297357705193335e0a957696f34bed2f2faefacb2fec336f" +dependencies = [ + "bytes", + "fallible-iterator", + "postgres-derive", + "postgres-protocol", + "uuid 1.11.0", +] + [[package]] name = "powerfmt" version = "0.2.0" @@ -2841,10 +3050,12 @@ dependencies = [ "system-configuration 0.6.1", "tokio", "tokio-native-tls", + "tokio-util", "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", + "wasm-streams", "web-sys", "windows-registry", ] @@ -3235,6 +3446,16 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_path_to_error" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af99884400da37c88f5e9146b7f1fd0fbcae8f6eec4e9da38b67d05486f814a6" +dependencies = [ + "itoa", + "serde", +] + [[package]] name = "serde_urlencoded" version = "0.7.1" @@ -3321,6 +3542,15 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "sharded-slab" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" +dependencies = [ + "lazy_static", +] + [[package]] name = "shlex" version = "1.3.0" @@ -3346,6 +3576,12 @@ dependencies = [ "rand_core", ] +[[package]] +name = "siphasher" +version = "0.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38b58827f4464d87d377d175e90bf58eb00fd8716ff0a62f80356b5e61555d0d" + [[package]] name = "slab" version = "0.4.9" @@ -3584,6 +3820,17 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" +[[package]] +name = "stringprep" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b4df3d392d81bd458a8a621b8bffbd2302a12ffe288a9d931670948749463b1" +dependencies = [ + "unicode-bidi", + "unicode-normalization", + "unicode-properties", +] + [[package]] name = "strsim" version = "0.10.0" @@ -3776,11 +4023,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.6" +version = "2.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fec2a1820ebd077e2b90c4df007bebf344cd394098a13c563957d0afc83ea47" +checksum = "f072643fd0190df67a8bab670c20ef5d8737177d6ac6b2e9a236cb096206b2cc" dependencies = [ - "thiserror-impl 2.0.6", + "thiserror-impl 2.0.9", ] [[package]] @@ -3796,15 +4043,25 @@ dependencies = [ [[package]] name = "thiserror-impl" -version = "2.0.6" +version = "2.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d65750cab40f4ff1929fb1ba509e9914eb756131cef4210da8d5d700d26f6312" +checksum = "7b50fa271071aae2e6ee85f842e2e28ba8cd2c5fb67f11fcb1fd70b276f9e7d4" dependencies = [ "proc-macro2", "quote", "syn 2.0.90", ] +[[package]] +name = "thread_local" +version = "1.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" +dependencies = [ + "cfg-if", + "once_cell", +] + [[package]] name = "threadpool" version = "1.8.1" @@ -3864,6 +4121,21 @@ dependencies = [ "zerovec", ] +[[package]] +name = "tinyvec" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "022db8904dfa342efe721985167e9fcd16c29b226db4397ed752a761cfce81e8" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" + [[package]] name = "tokio" version = "1.42.0" @@ -3903,6 +4175,32 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-postgres" +version = "0.7.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b5d3742945bc7d7f210693b0c58ae542c6fd47b17adbbda0885f3dcb34a6bdb" +dependencies = [ + "async-trait", + "byteorder", + "bytes", + "fallible-iterator", + "futures-channel", + "futures-util", + "log", + "parking_lot", + "percent-encoding", + "phf", + "pin-project-lite", + "postgres-protocol", + "postgres-types", + "rand", + "socket2", + "tokio", + "tokio-util", + "whoami", +] + [[package]] name = "tokio-rustls" version = "0.24.1" @@ -3923,6 +4221,17 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-stream" +version = "0.1.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio", +] + [[package]] name = "tokio-util" version = "0.7.13" @@ -3953,6 +4262,28 @@ dependencies = [ "winnow", ] +[[package]] +name = "tower" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" +dependencies = [ + "futures-core", + "futures-util", + "pin-project-lite", + "sync_wrapper 1.0.2", + "tokio", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-layer" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" + [[package]] name = "tower-service" version = "0.3.3" @@ -3965,10 +4296,23 @@ version = "0.1.41" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" dependencies = [ + "log", "pin-project-lite", + "tracing-attributes", "tracing-core", ] +[[package]] +name = "tracing-attributes" +version = "0.1.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.90", +] + [[package]] name = "tracing-core" version = "0.1.33" @@ -3976,6 +4320,32 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c" dependencies = [ "once_cell", + "valuable", +] + +[[package]] +name = "tracing-log" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" +dependencies = [ + "log", + "once_cell", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.3.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008" +dependencies = [ + "nu-ansi-term", + "sharded-slab", + "smallvec", + "thread_local", + "tracing-core", + "tracing-log", ] [[package]] @@ -4126,12 +4496,33 @@ version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7e51b68083f157f853b6379db119d1c1be0e6e4dec98101079dec41f6f5cf6df" +[[package]] +name = "unicode-bidi" +version = "0.3.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c1cb5db39152898a79168971543b1cb5020dff7fe43c8dc468b0885f5e29df5" + [[package]] name = "unicode-ident" version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "adb9e6ca4f869e1180728b7950e35922a7fc6397f7b641499e8f3ef06e50dc83" +[[package]] +name = "unicode-normalization" +version = "0.1.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956" +dependencies = [ + "tinyvec", +] + +[[package]] +name = "unicode-properties" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e70f2a8b45122e719eb623c01822704c4e0907e7e426a05927e1a1cfff5b75d0" + [[package]] name = "unicode-xid" version = "0.2.6" @@ -4189,6 +4580,28 @@ dependencies = [ "serde", ] +[[package]] +name = "uuid" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8c5f0a0af699448548ad1a2fbf920fb4bee257eae39953ba95cb84891a0446a" +dependencies = [ + "getrandom", + "rand", + "uuid-macro-internal", +] + +[[package]] +name = "uuid-macro-internal" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b91f57fe13a38d0ce9e28a03463d8d3c2468ed03d75375110ec71d93b449a08" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.90", +] + [[package]] name = "valuable" version = "0.1.0" @@ -4237,6 +4650,12 @@ version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +[[package]] +name = "wasite" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8dad83b4f25e74f184f64c43b150b91efe7647395b42289f38e50566d82855b" + [[package]] name = "wasm-bindgen" version = "0.2.99" @@ -4304,6 +4723,19 @@ version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "943aab3fdaaa029a6e0271b35ea10b72b943135afe9bffca82384098ad0e06a6" +[[package]] +name = "wasm-streams" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b65dc4c90b63b118468cf747d8bf3566c1913ef60be765b5730ead9e0a3ba129" +dependencies = [ + "futures-util", + "js-sys", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + [[package]] name = "web-sys" version = "0.3.76" @@ -4320,6 +4752,39 @@ version = "0.25.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" +[[package]] +name = "whoami" +version = "1.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "372d5b87f58ec45c384ba03563b03544dc5fadc3983e434b286913f5b4a9bb6d" +dependencies = [ + "redox_syscall", + "wasite", + "web-sys", +] + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + [[package]] name = "windows-core" version = "0.52.0" diff --git a/client-rs/Cargo.toml b/client-rs/Cargo.toml index a7c12c5..d59be79 100644 --- a/client-rs/Cargo.toml +++ b/client-rs/Cargo.toml @@ -3,6 +3,15 @@ name = "client-rs" version = "0.1.0" edition = "2021" +[[bin]] +name = "daemon" # Binary name (used with `cargo run --bin bin1`) +path = "src/main.rs" # Path to the source file for this binary + +[[bin]] +name = "cli" +path = "src/cli.rs" + + [dependencies] alloy-primitives = "0.8.13" ethereum_serde_utils = "0.7.0" @@ -13,7 +22,7 @@ serde_json = "1.0.133" tokio = { version = "1.0", features = ["full"] } beacon-state-proof = { git = "https://github.com/petscheit/beacon-state-proof" } sha2 = "0.10.8" -reqwest = { version = "0.12.9", features = ["json", "multipart"] } +reqwest = { version = "0.12.9", features = ["json", "multipart", "stream"] } rand = "0.8.5" alloy-rpc-types-beacon = "0.7.2" itertools = "0.13.0" @@ -22,4 +31,21 @@ clap = { version = "4.5.22", features = ["derive"] } starknet = "0.12.0" tree_hash_derive = "0.8.0" tree_hash = "0.8.0" -dotenv = "0.15" \ No newline at end of file +dotenv = "0.15" +tokio-postgres = { version = "0.7.12", features = [ + "with-uuid-1" ] } +axum = "0.7.9" +thiserror = "2.0.9" +tracing = "0.1.41" +tracing-subscriber = "0.3.19" +tokio-stream = "0.1.17" +futures = "0.3" +uuid = { version = "1.11.0", features = [ + "v4", + "fast-rng", + "macro-diagnostics" +] } +postgres-types = { version = "0.2.8", features = ["derive"] } +num_cpus = "1.16.0" + + diff --git a/client-rs/src/cli.rs b/client-rs/src/cli.rs new file mode 100644 index 0000000..a5f5942 --- /dev/null +++ b/client-rs/src/cli.rs @@ -0,0 +1,338 @@ +mod config; +mod contract_init; +mod epoch_update; +mod sync_committee; +mod traits; +mod utils; + +use config::BankaiConfig; +use contract_init::ContractInitializationData; +use epoch_update::EpochUpdate; +use starknet::core::types::Felt; +use sync_committee::SyncCommitteeUpdate; +use traits::Provable; +use utils::{atlantic_client::AtlanticClient, cairo_runner::CairoRunner}; +use utils::{ + rpc::BeaconRpcClient, + starknet_client::{StarknetClient, StarknetError}, +}; +// use rand::Rng; +// use std::fs::File; +// use std::io::Write; +use clap::{Parser, Subcommand}; +use dotenv::from_filename; +use std::env; + +#[derive(Debug)] +pub enum Error { + InvalidProof, + RpcError(reqwest::Error), + DeserializeError(String), + IoError(std::io::Error), + StarknetError(StarknetError), + BlockNotFound, + FetchSyncCommitteeError, + FailedFetchingBeaconState, + InvalidBLSPoint, + MissingRpcUrl, + EmptySlotDetected(u64), + RequiresNewerEpoch(Felt), + CairoRunError(String), + AtlanticError(reqwest::Error), + InvalidResponse(String), +} + +impl From for Error { + fn from(e: StarknetError) -> Self { + Error::StarknetError(e) + } +} + +struct BankaiClient { + client: BeaconRpcClient, + starknet_client: StarknetClient, + config: BankaiConfig, + atlantic_client: AtlanticClient, +} + +impl BankaiClient { + pub async fn new() -> Self { + from_filename(".env.sepolia").ok(); + let config = BankaiConfig::default(); + Self { + client: BeaconRpcClient::new(env::var("BEACON_RPC_URL").unwrap()), + starknet_client: StarknetClient::new( + env::var("STARKNET_RPC_URL").unwrap().as_str(), + env::var("STARKNET_ADDRESS").unwrap().as_str(), + env::var("STARKNET_PRIVATE_KEY").unwrap().as_str(), + ) + .await + .unwrap(), + atlantic_client: AtlanticClient::new( + config.atlantic_endpoint.clone(), + env::var("ATLANTIC_API_KEY").unwrap(), + ), + config, + } + } + + pub async fn get_sync_committee_update( + &self, + mut slot: u64, + ) -> Result { + // Before we start generating the proof, we ensure the slot was not missed + match self.client.get_header(slot).await { + Ok(header) => header, + Err(Error::EmptySlotDetected(_)) => { + slot += 1; + println!("Empty slot detected! Fetching slot: {}", slot); + self.client.get_header(slot).await? + } + Err(e) => return Err(e), // Propagate other errors immediately + }; + + let proof: SyncCommitteeUpdate = SyncCommitteeUpdate::new(&self.client, slot).await?; + + Ok(proof) + } + + pub async fn get_epoch_proof(&self, slot: u64) -> Result { + let epoch_proof = EpochUpdate::new(&self.client, slot).await?; + Ok(epoch_proof) + } + + pub async fn get_contract_initialization_data( + &self, + slot: u64, + config: &BankaiConfig, + ) -> Result { + let contract_init = ContractInitializationData::new(&self.client, slot, config).await?; + Ok(contract_init) + } +} + +#[derive(Subcommand)] +enum Commands { + /// Generate a sync committee update proof for a given slot + CommitteeUpdate { + #[arg(long, short)] + slot: u64, + /// Export output to a JSON file + #[arg(long, short)] + export: Option, + }, + /// Generate an epoch update proof for a given slot + EpochUpdate { + #[arg(long, short)] + slot: u64, + /// Export output to a JSON file + #[arg(long, short)] + export: Option, + }, + /// Generate contract initialization data for a given slot + ContractInit { + #[arg(long, short)] + slot: u64, + /// Export output to a JSON file + #[arg(long, short)] + export: Option, + }, + DeployContract { + #[arg(long, short)] + slot: u64, + }, + ProveNextCommittee, + ProveNextEpoch, + CheckBatchStatus { + #[arg(long, short)] + batch_id: String, + }, + SubmitWrappedProof { + #[arg(long, short)] + batch_id: String, + }, + VerifyEpoch { + #[arg(long, short)] + batch_id: String, + #[arg(long, short)] + slot: u64, + }, + VerifyCommittee { + #[arg(long, short)] + batch_id: String, + #[arg(long, short)] + slot: u64, + }, +} + +#[derive(Parser)] +#[command(author, version, about, long_about = None)] +struct Cli { + /// Optional RPC URL (defaults to RPC_URL_BEACON environment variable) + #[arg(long, short)] + rpc_url: Option, + + #[command(subcommand)] + command: Commands, +} + +#[tokio::main] +async fn main() -> Result<(), Error> { + // Load .env.sepolia file + from_filename(".env.sepolia").ok(); + + let cli = Cli::parse(); + let bankai = BankaiClient::new().await; + + match cli.command { + Commands::CommitteeUpdate { slot, export } => { + println!("SyncCommittee command received with slot: {}", slot); + let proof = bankai.get_sync_committee_update(slot).await?; + let json = serde_json::to_string_pretty(&proof) + .map_err(|e| Error::DeserializeError(e.to_string()))?; + + if let Some(path) = export { + match std::fs::write(path.clone(), json) { + Ok(_) => println!("Proof exported to {}", path), + Err(e) => return Err(Error::IoError(e)), + } + } else { + println!("{}", json); + } + } + Commands::EpochUpdate { slot, export } => { + println!("Epoch command received with slot: {}", slot); + let proof = bankai.get_epoch_proof(slot).await?; + let json = serde_json::to_string_pretty(&proof) + .map_err(|e| Error::DeserializeError(e.to_string()))?; + + if let Some(path) = export { + match std::fs::write(path.clone(), json) { + Ok(_) => println!("Proof exported to {}", path), + Err(e) => return Err(Error::IoError(e)), + } + } else { + println!("{}", json); + } + } + Commands::ContractInit { slot, export } => { + println!("ContractInit command received with slot: {}", slot); + let contract_init = bankai + .get_contract_initialization_data(slot, &bankai.config) + .await?; + let json = serde_json::to_string_pretty(&contract_init) + .map_err(|e| Error::DeserializeError(e.to_string()))?; + + if let Some(path) = export { + match std::fs::write(path.clone(), json) { + Ok(_) => println!("Contract initialization data exported to {}", path), + Err(e) => return Err(Error::IoError(e)), + } + } else { + println!("{}", json); + } + } + Commands::DeployContract { slot } => { + let contract_init = bankai + .get_contract_initialization_data(slot, &bankai.config) + .await?; + bankai + .starknet_client + .deploy_contract(contract_init, &bankai.config) + .await?; + } + Commands::CheckBatchStatus { batch_id } => { + let status = bankai + .atlantic_client + .check_batch_status(batch_id.as_str()) + .await?; + println!("Batch Status: {}", status); + } + Commands::ProveNextCommittee => { + let latest_committee_id = bankai + .starknet_client + .get_latest_committee_id(&bankai.config) + .await?; + let lowest_committee_update_slot = (latest_committee_id) * Felt::from(0x2000); + println!("Min Slot Required: {}", lowest_committee_update_slot); + let latest_epoch = bankai + .starknet_client + .get_latest_epoch(&bankai.config) + .await?; + println!("Latest epoch: {}", latest_epoch); + if latest_epoch < lowest_committee_update_slot { + return Err(Error::RequiresNewerEpoch(latest_epoch)); + } + let update = bankai + .get_sync_committee_update(latest_epoch.try_into().unwrap()) + .await?; + CairoRunner::generate_pie(&update, &bankai.config)?; + let batch_id = bankai.atlantic_client.submit_batch(update).await?; + println!("Batch Submitted: {}", batch_id); + } + Commands::ProveNextEpoch => { + let latest_epoch = bankai + .starknet_client + .get_latest_epoch(&bankai.config) + .await?; + println!("Latest Epoch: {}", latest_epoch); + // make sure next_epoch % 32 == 0 + let next_epoch = (u64::try_from(latest_epoch).unwrap() / 32) * 32 + 32; + println!("Fetching Inputs for Epoch: {}", next_epoch); + let proof = bankai.get_epoch_proof(next_epoch).await?; + CairoRunner::generate_pie(&proof, &bankai.config)?; + let batch_id = bankai.atlantic_client.submit_batch(proof).await?; + println!("Batch Submitted: {}", batch_id); + } + Commands::VerifyEpoch { batch_id, slot } => { + let status = bankai + .atlantic_client + .check_batch_status(batch_id.as_str()) + .await?; + if status == "DONE" { + let update = EpochUpdate::from_json::(slot)?; + bankai + .starknet_client + .submit_update(update.expected_circuit_outputs, &bankai.config) + .await?; + println!("Successfully submitted epoch update"); + } else { + println!("Batch not completed yet. Status: {}", status); + } + } + Commands::VerifyCommittee { batch_id, slot } => { + let status = bankai + .atlantic_client + .check_batch_status(batch_id.as_str()) + .await?; + if status == "DONE" { + let update = SyncCommitteeUpdate::from_json::(slot)?; + bankai + .starknet_client + .submit_update(update.expected_circuit_outputs, &bankai.config) + .await?; + println!("Successfully submitted sync committee update"); + } else { + println!("Batch not completed yet. Status: {}", status); + } + } + Commands::SubmitWrappedProof { batch_id } => { + let status = bankai + .atlantic_client + .check_batch_status(batch_id.as_str()) + .await?; + if status == "DONE" { + let proof = bankai + .atlantic_client + .fetch_proof(batch_id.as_str()) + .await?; + let batch_id = bankai.atlantic_client.submit_wrapped_proof(proof).await?; + println!("Batch Submitted: {}", batch_id); + } else { + println!("Batch not completed yet. Status: {}", status); + } + } + } + + Ok(()) +} diff --git a/client-rs/src/config.rs b/client-rs/src/config.rs index ba53726..2eb3773 100644 --- a/client-rs/src/config.rs +++ b/client-rs/src/config.rs @@ -1,6 +1,6 @@ use starknet::core::types::Felt; -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct BankaiConfig { pub contract_class_hash: Felt, pub contract_address: Felt, diff --git a/client-rs/src/epoch_update.rs b/client-rs/src/epoch_update.rs index 178b9bd..cae9c89 100644 --- a/client-rs/src/epoch_update.rs +++ b/client-rs/src/epoch_update.rs @@ -16,6 +16,13 @@ use starknet::{core::types::Felt, macros::selector}; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; +#[derive(Debug, Serialize, Deserialize)] +pub struct EpochProof { + pub header_root: FixedBytes<32>, + pub state_root: FixedBytes<32>, + pub n_signers: u64, +} + #[derive(Debug, Serialize, Deserialize)] pub struct EpochUpdate { pub circuit_inputs: EpochCircuitInputs, diff --git a/client-rs/src/main.rs b/client-rs/src/main.rs index a5f5942..c4b2573 100644 --- a/client-rs/src/main.rs +++ b/client-rs/src/main.rs @@ -5,23 +5,117 @@ mod sync_committee; mod traits; mod utils; +use alloy_primitives::TxHash; use config::BankaiConfig; +use serde_json::json; + +use alloy_primitives::FixedBytes; +use alloy_rpc_types_beacon::events::HeadEvent; +use axum::{ + extract::{DefaultBodyLimit, Path, State}, + //http::{header, StatusCode}, + response::{IntoResponse, Json, Response}, + routing::{get, post}, + Router, +}; use contract_init::ContractInitializationData; -use epoch_update::EpochUpdate; +use dotenv::from_filename; +use epoch_update::{EpochProof, EpochUpdate}; +use postgres_types::{FromSql, ToSql}; +use reqwest; use starknet::core::types::Felt; -use sync_committee::SyncCommitteeUpdate; +use std::env; +use std::sync::Arc; +use tokio::sync::mpsc; +use tokio::task; +use tokio_postgres::{Client, NoTls}; +use tokio_stream::StreamExt; +use tracing::{error, info, trace, warn, Level}; +use tracing_subscriber::FmtSubscriber; use traits::Provable; use utils::{atlantic_client::AtlanticClient, cairo_runner::CairoRunner}; use utils::{ rpc::BeaconRpcClient, + // bankai_client::BankaiClient, starknet_client::{StarknetClient, StarknetError}, }; -// use rand::Rng; -// use std::fs::File; -// use std::io::Write; -use clap::{Parser, Subcommand}; -use dotenv::from_filename; -use std::env; +//use std::error::Error as StdError; +use std::fmt; +use std::net::SocketAddr; +use sync_committee::SyncCommitteeUpdate; +use tokio::time::Duration; +use uuid::Uuid; + +const SLOTS_PER_EPOCH: u64 = 32; // For mainnet +const SLOTS_PER_SYNC_COMMITTEE: u64 = 8192; // For mainnet + +impl std::fmt::Display for StarknetError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + StarknetError::ProviderError(err) => write!(f, "Provider error: {}", err), + StarknetError::AccountError(msg) => write!(f, "Account error: {}", msg), + } + } +} + +impl std::error::Error for StarknetError {} + +#[derive(Debug, FromSql, ToSql)] +#[postgres(name = "job_status")] +enum JobStatus { + #[postgres(name = "CREATED")] + Created, + #[postgres(name = "FETCHED_PROOF")] + FetchedProof, + #[postgres(name = "PIE_GENERATED")] + PieGenerated, + #[postgres(name = "OFFCHAIN_PROOF_REQUESTED")] + OffchainProofRequested, + #[postgres(name = "OFFCHAIN_PROOF_RETRIEVED")] + OffchainProofRetrieved, + #[postgres(name = "WRAP_PROOF_REQUESTED")] + WrapProofRequested, + #[postgres(name = "WRAPPED_PROOF_DONE")] + WrappedProofDone, + #[postgres(name = "PROOF_DECOMMITMENT_CALLED")] + ProofDecommitmentCalled, + #[postgres(name = "VERIFIED_FACT_REGISTERED")] + VerifiedFactRegistered, + #[postgres(name = "ERROR")] + Cancelled, + #[postgres(name = "CANCELLED")] + Error, +} + +impl ToString for JobStatus { + fn to_string(&self) -> String { + match self { + JobStatus::Created => "CREATED".to_string(), + JobStatus::FetchedProof => "FETCHED_PROOF".to_string(), + JobStatus::PieGenerated => "PIE_GENERATED".to_string(), + JobStatus::OffchainProofRequested => "OFFCHAIN_PROOF_REQUESTED".to_string(), + JobStatus::OffchainProofRetrieved => "OFFCHAIN_PROOF_RETRIEVED".to_string(), + JobStatus::WrapProofRequested => "WRAP_PROOF_REQUESTED".to_string(), + JobStatus::WrappedProofDone => "WRAPPED_PROOF_DONE".to_string(), + JobStatus::ProofDecommitmentCalled => "PROOF_DECOMMITMENT_CALLED".to_string(), + JobStatus::VerifiedFactRegistered => "VERIFIED_FACT_REGISTERED".to_string(), + JobStatus::Cancelled => "CANCELLED".to_string(), + JobStatus::Error => "ERROR".to_string(), + } + } +} + +#[derive(Debug, FromSql, ToSql)] +enum JobType { + EpochUpdate, + SyncComiteeUpdate, +} + +#[derive(Debug, FromSql, ToSql)] +enum AtlanticJobType { + ProofGeneration, + ProofWrapping, +} #[derive(Debug)] pub enum Error { @@ -40,6 +134,42 @@ pub enum Error { CairoRunError(String), AtlanticError(reqwest::Error), InvalidResponse(String), + PoolingTimeout(String), +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Error::InvalidProof => write!(f, "Invalid proof provided"), + Error::RpcError(err) => write!(f, "RPC error: {}", err), + Error::DeserializeError(msg) => write!(f, "Deserialization error: {}", msg), + Error::IoError(err) => write!(f, "I/O error: {}", err), + Error::StarknetError(err) => write!(f, "Starknet error: {}", err), + Error::BlockNotFound => write!(f, "Block not found"), + Error::FetchSyncCommitteeError => write!(f, "Failed to fetch sync committee"), + Error::FailedFetchingBeaconState => write!(f, "Failed to fetch beacon state"), + Error::InvalidBLSPoint => write!(f, "Invalid BLS point"), + Error::MissingRpcUrl => write!(f, "Missing RPC URL"), + Error::EmptySlotDetected(slot) => write!(f, "Empty slot detected: {}", slot), + Error::RequiresNewerEpoch(felt) => write!(f, "Requires newer epoch: {}", felt), + Error::CairoRunError(msg) => write!(f, "Cairo run error: {}", msg), + Error::AtlanticError(err) => write!(f, "Atlantic RPC error: {}", err), + Error::InvalidResponse(msg) => write!(f, "Invalid response: {}", msg), + Error::PoolingTimeout(msg) => write!(f, "Pooling timeout: {}", msg), + } + } +} + +impl std::error::Error for Error { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + Error::RpcError(err) => Some(err), + Error::IoError(err) => Some(err), + Error::StarknetError(err) => Some(err), + Error::AtlanticError(err) => Some(err), + _ => None, // No underlying source for other variants + } + } } impl From for Error { @@ -48,6 +178,22 @@ impl From for Error { } } +#[derive(Debug)] +struct Job { + job_id: Uuid, + job_type: JobType, + job_status: JobStatus, + slot: u64, +} + +#[derive(Clone, Debug)] +struct AppState { + db_client: Arc, + tx: mpsc::Sender, + bankai: Arc, +} + +#[derive(Debug)] struct BankaiClient { client: BeaconRpcClient, starknet_client: StarknetClient, @@ -111,228 +257,902 @@ impl BankaiClient { } } -#[derive(Subcommand)] -enum Commands { - /// Generate a sync committee update proof for a given slot - CommitteeUpdate { - #[arg(long, short)] - slot: u64, - /// Export output to a JSON file - #[arg(long, short)] - export: Option, - }, - /// Generate an epoch update proof for a given slot - EpochUpdate { - #[arg(long, short)] - slot: u64, - /// Export output to a JSON file - #[arg(long, short)] - export: Option, - }, - /// Generate contract initialization data for a given slot - ContractInit { - #[arg(long, short)] - slot: u64, - /// Export output to a JSON file - #[arg(long, short)] - export: Option, - }, - DeployContract { - #[arg(long, short)] - slot: u64, - }, - ProveNextCommittee, - ProveNextEpoch, - CheckBatchStatus { - #[arg(long, short)] - batch_id: String, - }, - SubmitWrappedProof { - #[arg(long, short)] - batch_id: String, - }, - VerifyEpoch { - #[arg(long, short)] - batch_id: String, - #[arg(long, short)] - slot: u64, - }, - VerifyCommittee { - #[arg(long, short)] - batch_id: String, - #[arg(long, short)] - slot: u64, - }, +fn check_env_vars() -> Result<(), String> { + let required_vars = [ + "BEACON_RPC_URL", + "STARKNET_RPC_URL", + "STARKNET_ADDRESS", + "STARKNET_PRIVATE_KEY", + "ATLANTIC_API_KEY", + "PROOF_REGISTRY", + "POSTGRESQL_HOST", + "POSTGRESQL_USER", + "POSTGRESQL_PASSWORD", + "POSTGRESQL_DB_NAME", + "RPC_LISTEN_HOST", + "RPC_LISTEN_PORT", + ]; + + for &var in &required_vars { + if env::var(var).is_err() { + return Err(format!("Environment variable `{}` is not set", var)); + } + } + + Ok(()) } -#[derive(Parser)] -#[command(author, version, about, long_about = None)] -struct Cli { - /// Optional RPC URL (defaults to RPC_URL_BEACON environment variable) - #[arg(long, short)] - rpc_url: Option, +fn slot_to_epoch(slot: u64) -> u64 { + slot / SLOTS_PER_EPOCH +} - #[command(subcommand)] - command: Commands, +fn slot_to_sync_committee_id(slot: u64) -> u64 { + slot / SLOTS_PER_SYNC_COMMITTEE } #[tokio::main] -async fn main() -> Result<(), Error> { +//async fn main() { +async fn main() -> Result<(), Box> { // Load .env.sepolia file from_filename(".env.sepolia").ok(); - let cli = Cli::parse(); - let bankai = BankaiClient::new().await; + let slot_listener_toggle = true; + + let subscriber = FmtSubscriber::builder() + .with_max_level(Level::DEBUG) + //.with_max_level(Level::INFO) + .finish(); + + tracing::subscriber::set_global_default(subscriber).expect("setting default subscriber failed"); + + // Validate environment variables + check_env_vars().map_err(|e| { + error!("Error: {}", e); + std::process::exit(1); // Exit if validation fails + }); - match cli.command { - Commands::CommitteeUpdate { slot, export } => { - println!("SyncCommittee command received with slot: {}", slot); - let proof = bankai.get_sync_committee_update(slot).await?; - let json = serde_json::to_string_pretty(&proof) - .map_err(|e| Error::DeserializeError(e.to_string()))?; + info!("Starting Bankai light-client daemon..."); - if let Some(path) = export { - match std::fs::write(path.clone(), json) { - Ok(_) => println!("Proof exported to {}", path), - Err(e) => return Err(Error::IoError(e)), + //let database_host = env::var("DATABASE_HOST").expect("DATABASE_HOST must be set"); + let (tx, mut rx): (mpsc::Sender, mpsc::Receiver) = mpsc::channel(32); + + //let (tx, mut rx) = mpsc::channel(32); + + let connection_string = "host=localhost user=root password=root dbname=bankai"; + // let connection_string = format!( + // "host={} user={} password={} dbname={}", + // env::var("POSTGRESQL_HOST").unwrap().as_str(), + // env::var("POSTGRESQL_USER").unwrap().as_str(), + // env::var("POSTGRESQL_PASSWORD").unwrap().as_str(), + // env::var("POSTGRESQL_DB_NAME").unwrap().as_str() + // ); + let _connection_result: Result< + ( + Client, + tokio_postgres::Connection, + ), + tokio_postgres::Error, + > = tokio_postgres::connect(connection_string, NoTls).await; + + let db_client = match tokio_postgres::connect(connection_string, NoTls).await { + Ok((client, connection)) => { + // Spawn a task to manage the connection + tokio::spawn(async move { + if let Err(e) = connection.await { + eprintln!("Connection error: {}", e); } - } else { - println!("{}", json); - } + }); + + info!("Connected to the database successfully!"); + + // Wrap the client in an Arc for shared ownership + Arc::new(client) } - Commands::EpochUpdate { slot, export } => { - println!("Epoch command received with slot: {}", slot); - let proof = bankai.get_epoch_proof(slot).await?; - let json = serde_json::to_string_pretty(&proof) - .map_err(|e| Error::DeserializeError(e.to_string()))?; - - if let Some(path) = export { - match std::fs::write(path.clone(), json) { - Ok(_) => println!("Proof exported to {}", path), - Err(e) => return Err(Error::IoError(e)), + Err(err) => { + error!("Failed to connect to the database: {}", err); + std::process::exit(1); // Exit with a non-zero status code + } + }; + + //let db_client_for_task = Arc::new(db_client); + + let bankai = Arc::new(BankaiClient::new().await); + // Clone the Arc for use in async task + let bankai_for_task = Arc::clone(&bankai); + + // Beacon node endpoint construction for ervents + let events_endpoint = format!( + "{}/eth/v1/events?topics=head", + env::var("BEACON_RPC_URL").unwrap().as_str() + ); + //let events_endpoint = format!("{}/eth/v1/events?topics=head", beacon_node_url); + + //Spawn a background task to process jobs + tokio::spawn({ + let bankai_for_job = Arc::clone(&bankai); + let db_client_for_job = Arc::clone(&db_client); + async move { + while let Some(job) = rx.recv().await { + let job_id = job.job_id.clone(); + if let Err(e) = + process_job(job, db_client_for_job.clone(), bankai_for_job.clone()).await + { + update_job_status(&db_client_for_job.clone(), job_id, JobStatus::Error).await; + error!("Error processing job {}: {}", job_id, e); } - } else { - println!("{}", json); } } - Commands::ContractInit { slot, export } => { - println!("ContractInit command received with slot: {}", slot); - let contract_init = bankai - .get_contract_initialization_data(slot, &bankai.config) - .await?; - let json = serde_json::to_string_pretty(&contract_init) - .map_err(|e| Error::DeserializeError(e.to_string()))?; + }); - if let Some(path) = export { - match std::fs::write(path.clone(), json) { - Ok(_) => println!("Contract initialization data exported to {}", path), - Err(e) => return Err(Error::IoError(e)), + // let db_client_for_task =db_client.clone(); + let db_client_for_state = db_client.clone(); + let tx_for_task = tx.clone(); + + let app_state: AppState = AppState { + db_client: db_client_for_state, + tx, + bankai, + }; + + let app = Router::new() + .route("/status", get(handle_get_status)) + //.route("/get-epoch-proof/:slot", get(handle_get_epoch_proof)) + //.route("/get-committee-hash/:committee_id", get(handle_get_committee_hash)) + .route( + "/debug/get-epoch-update/:slot", + get(handle_get_epoch_update), + ) + .route( + "/debug/get-latest-verified-slot", + get(handle_get_latest_verified_slot), + ) + // .route("/debug/get-job-status", get(handle_get_job_status)) + // .route("/get-merkle-inclusion-proof", get(handle_get_merkle_inclusion_proof)) + .layer(DefaultBodyLimit::disable()) + .with_state(app_state); + + let addr = "0.0.0.0:3000".parse::()?; + let listener = tokio::net::TcpListener::bind(addr).await.unwrap(); + + info!("Bankai RPC HTTP server is listening on http://{}", addr); + + let server_task = tokio::spawn(async move { + let _ = axum::serve(listener, app).await; + }); + + // Listen for the new slots on BeaconChain + // Create an HTTP client + let http_stream_client = reqwest::Client::new(); + + // Send the request to the Beacon node + let response = http_stream_client + .get(&events_endpoint) + .send() + .await + .unwrap(); + + //let db_client = Arc::new(&db_client); + if slot_listener_toggle { + task::spawn({ + async move { + // Check if response is successful; if not, bail out early + // TODO: need to implement resilience and potentialy use multiple providers (implement something like fallbackprovider functionality in ethers), handle reconnection if connection is lost for various reasons + if !response.status().is_success() { + error!("Failed to connect: HTTP {}", response.status()); + return; + } + + info!("Listening for new slots, epochs and sync committee updates..."); + let mut stream = response.bytes_stream(); + + while let Some(chunk) = stream.next().await { + let Ok(bytes) = chunk else { + warn!("Error reading stream: {}", chunk.err().unwrap()); + continue; + }; + + let Ok(text) = String::from_utf8(bytes.to_vec()) else { + warn!("Failed to parse UTF-8."); + continue; + }; + + if text.is_empty() { + continue; + } + + trace!("New slot event detected: {}", text); + + // Search for JSON start + let Some(json_start) = text.find('{') else { + warn!("No JSON data found in the input."); + continue; + }; + + // Try parsing the JSON substring into your event structsync_committee_id + let Ok(parsed_event) = serde_json::from_str::(&text[json_start..]) + else { + warn!("Failed to parse JSON data received from Beacon Chain event."); + continue; + }; + + info!( + "New slot event detected: {} | Is epoch transition: {}", + parsed_event.slot, parsed_event.epoch_transition + ); + + if parsed_event.epoch_transition { + info!("Epoch transition detected! Starting processing..."); + + // Check also now if slot is the moment of switch to new sync committee set + if parsed_event.slot % SLOTS_PER_SYNC_COMMITTEE == 0 { + let sync_committee_id = slot_to_sync_committee_id(parsed_event.slot); + info!("In this slot sync committee rotation taken place. Slot {} Sync committee id: {}", parsed_event.slot, sync_committee_id); + // We should probably now start sync committee verify job + } + + let job_id = Uuid::new_v4(); + let job = Job { + job_id: job_id.clone(), + job_type: JobType::EpochUpdate, + job_status: JobStatus::Created, + slot: parsed_event.slot - 32, + }; + + let db_client = db_client.clone(); + match create_job(db_client, job_id, parsed_event.slot).await { + // Insert new job record to DB + Ok(()) => { + // Handle success + info!("Job created successfully with ID: {}", job_id); + if tx_for_task.send(job).await.is_err() { + error!("Failed to send job."); + } + // let job = Job { + // job_id: job_id.clone(), + // job_type: JobType::EpochUpdate, + // job_status: JobStatus::Created, + // slot: parsed_event.slot - 32, + // }; + // if tx_for_task.send(job).await.is_err() { + // error!("Failed to send job."); + // } + // + // If starting committee update job, first ensule that the corresponding slot is registered in contract + } + Err(e) => { + // Handle the error + error!("Error creating job: {}", e); + } + } + + // match bankai_for_task.get_epoch_proof(parsed_event.slot - 32).await { + // Ok(proof) => info!("Epoch proof fetched successfully: {:?}", proof), + // Err(e) => error!("Failed to fetch epoch proof: {:?}", e), + // } + } } - } else { - println!("{}", json); } + }); + } + + // Wait for the server task to finish + server_task.await?; + + Ok(()) +} + +async fn set_atlantic_job_queryid( + client: &Client, + job_id: Uuid, + batch_id: String, + atlantic_job_type: AtlanticJobType, +) -> Result<(), Box> { + match atlantic_job_type { + AtlanticJobType::ProofGeneration => { + client + .execute( + "UPDATE jobs SET atlantic_batch_id_proof_generation = $1, updated_at = NOW() WHERE job_uuid = $2", + &[&batch_id.to_string(), &job_id], + ) + .await?; } - Commands::DeployContract { slot } => { - let contract_init = bankai - .get_contract_initialization_data(slot, &bankai.config) + AtlanticJobType::ProofWrapping => { + client + .execute( + "UPDATE jobs SET atlantic_batch_id_proof_wrapping = $1, updated_at = NOW() WHERE job_uuid = $2", + &[&batch_id.to_string(), &job_id], + ) + .await?; + } // _ => { + // println!("Unk", status); + // } + } + + Ok(()) +} + +async fn insert_verified_epoch( + client: &Client, + epoch_id: u64, + epoch_proof: EpochProof, +) -> Result<(), Box> { + let status = JobStatus::Created; // new job starts at 'Created' + + client + .execute( + "INSERT INTO verified_epoch (epoch_id, header_root, state_root, n_signers) VALUES ($1)", + &[ + &epoch_id.to_string(), + &epoch_proof.header_root.to_string(), + &epoch_proof.state_root.to_string(), + &epoch_proof.n_signers.to_string(), + ], + ) + .await?; + + Ok(()) +} + +async fn insert_verified_sync_committee( + client: &Client, + sync_committee_id: u64, + sync_committee_hash: FixedBytes<32>, +) -> Result<(), Box> { + let status = JobStatus::Created; // new job starts at 'Created' + + client + .execute( + "INSERT INTO verified_sync_committee (sync_committee_id, sync_committee_hash) VALUES ($1)", + &[&sync_committee_id.to_string(), &sync_committee_hash.to_string()], + ) + .await?; + + Ok(()) +} + +async fn create_job( + client: Arc, + job_id: Uuid, + slot: u64, +) -> Result<(), Box> { + let status = JobStatus::Created; // new job starts at 'Created' + + client + .execute( + "INSERT INTO jobs (job_uuid, job_status, slot, type) VALUES ($1, $2, $3, $4)", + &[ + &job_id, + &status.to_string(), + &(slot as i64), + &"EPOCH_UPDATE", + ], + ) + .await?; + + Ok(()) +} + +async fn fetch_job_status( + client: &Client, + job_id: Uuid, +) -> Result, Box> { + let row_opt = client + .query_opt("SELECT status FROM jobs WHERE job_id = $1", &[&job_id]) + .await?; + + Ok(row_opt.map(|row| row.get("status"))) +} + +async fn update_job_status( + client: &Client, + job_id: Uuid, + new_status: JobStatus, +) -> Result<(), Box> { + client + .execute( + "UPDATE jobs SET job_status = $1, updated_at = NOW() WHERE job_uuid = $2", + &[&new_status.to_string(), &job_id], + ) + .await?; + Ok(()) +} + +async fn set_job_txhash( + client: &Client, + job_id: Uuid, + txhash: Felt, +) -> Result<(), Box> { + client + .execute( + "UPDATE jobs SET tx_hash = $1, updated_at = NOW() WHERE job_uuid = $2", + &[&txhash.to_string(), &job_id], + ) + .await?; + Ok(()) +} + +async fn cancell_all_unfinished_jobs( + client: &Client, +) -> Result<(), Box> { + client + .execute( + "UPDATE jobs SET status = $1, updated_at = NOW() WHERE status = 'FETCHING'", + &[&JobStatus::Cancelled.to_string()], + ) + .await?; + Ok(()) +} + +// async fn fetch_job_by_status( +// client: &Client, +// status: JobStatus, +// ) -> Result, Box> { +// let tx = client.transaction().await?; + +// let row_opt = tx +// .query_opt( +// r#" +// SELECT job_id, status +// FROM jobs +// WHERE status = $1 +// ORDER BY updated_at ASC +// LIMIT 1 +// FOR UPDATE SKIP LOCKED +// "#, +// &[&status], +// ) +// .await?; + +// let job = if let Some(row) = row_opt { +// Some(Job { +// job_id: row.get("job_id"), +// job_type: row.get("type"), +// job_status: row.get("status"), +// slot: row.get("slot"), +// }) +// } else { +// None +// }; + +// tx.commit().await?; +// Ok(job) +// } + +// async fn add_verified_epoch( +// client: Arc, +// slot: u64, +// ) -> Result<(), Box> { +// client +// .execute( +// "INSERT INTO verified_epochs (slot, job_status, slot, type) VALUES ($1, $2, $3, $4)", +// &[&slot, &status.to_string(), &(slot as i64), &"EPOCH_UPDATE"], +// ) +// .await?; + +// Ok(()) +// } + +// async fn worker_task(mut rx: Receiver, db_client: Client) -> Result<(), Box> { +// while let Some(job_id) = rx.recv().await { +// println!("Worker received job {job_id}"); + +// // 4a) Check current status in DB +// if let Some(status) = fetch_job_status(&db_client, job_id).await? { +// match status { +// JobStatus::Created => { +// println!("Fetching proof for job {job_id}..."); +// // Then update status +// update_job_status(&db_client, job_id, JobStatus::FetchedProof).await?; +// println!("Job {job_id} updated to FetchedProof"); +// } +// JobStatus::FetchedProof => { +// // Already fetched, maybe do next step... +// println!("Job {job_id} is already FetchedProof; ignoring for now."); +// } +// _ => { +// println!("Job {job_id} in status {:?}, no action needed.", status); +// } +// } +// } else { +// eprintln!("No job found in DB for ID = {job_id}"); +// } +// } +// Ok(()) +// } + +// mpsc jobs // +async fn process_job( + job: Job, + db_client: Arc, + bankai: Arc, +) -> Result<(), Box> { + match job.job_type { + JobType::EpochUpdate => { + // Epoch job + info!( + "[EPOCH JOB] Started processing epoch job: {} for epoch {}", + job.job_id, job.slot + ); + + //update_job_status(&db_client, job.job_id, JobStatus::Created).await?; + + // 1) Fetch the latest on-chain verified epoch + let latest_epoch = bankai + .starknet_client + .get_latest_epoch(&bankai.config) .await?; + + info!( + "[EPOCH JOB] Latest onchain verified epoch: {}", + latest_epoch + ); + + // make sure next_epoch % 32 == 0 + let next_epoch = (u64::try_from(latest_epoch).unwrap() / 32) * 32 + 32; + info!( + "[EPOCH JOB] Fetching Inputs for next Epoch: {}...", + next_epoch + ); + + // 2) Fetch the proof + let proof = bankai.get_epoch_proof(next_epoch).await?; + info!( + "[EPOCH JOB] Fetched Inputs successfully for Epoch: {}", + next_epoch + ); + + update_job_status(&db_client, job.job_id, JobStatus::FetchedProof).await?; + + // 3) Generate PIE + info!( + "[EPOCH JOB] Starting Cairo execution and PIE generation for Epoch: {}...", + next_epoch + ); + + CairoRunner::generate_pie(&proof, &bankai.config)?; + + info!( + "[EPOCH JOB] Pie generated successfully for Epoch: {}...", + next_epoch + ); + + update_job_status(&db_client, job.job_id, JobStatus::PieGenerated).await?; + + // 4) Submit offchain proof-generation job to Atlantic + info!("[EPOCH JOB] Sending proof generation query to Atlantic..."); + + let batch_id = bankai.atlantic_client.submit_batch(proof).await?; + + info!( + "[EPOCH JOB] Proof generation batch submitted to Atlantic. QueryID: {}", + batch_id + ); + + update_job_status(&db_client, job.job_id, JobStatus::OffchainProofRequested).await?; + set_atlantic_job_queryid( + &db_client, + job.job_id, + batch_id.clone(), + AtlanticJobType::ProofGeneration, + ) + .await?; + + // Pool for Atlantic execution done bankai - .starknet_client - .deploy_contract(contract_init, &bankai.config) + .atlantic_client + .poll_batch_status_until_done(&batch_id, Duration::new(10, 0), usize::MAX) .await?; - } - Commands::CheckBatchStatus { batch_id } => { - let status = bankai + + info!( + "[EPOCH JOB] Proof generation done by Atlantic. QueryID: {}", + batch_id + ); + + let proof = bankai + .atlantic_client + .fetch_proof(batch_id.as_str()) + .await?; + + info!( + "[EPOCH JOB] Proof retrieved from Atlantic. QueryID: {}", + batch_id + ); + + update_job_status(&db_client, job.job_id, JobStatus::OffchainProofRetrieved).await?; + + // 5) Submit wrapped proof request + info!("[EPOCH JOB] Sending proof wrapping query to Atlantic.."); + let wrapping_batch_id = bankai.atlantic_client.submit_wrapped_proof(proof).await?; + info!( + "[EPOCH JOB] Proof wrapping query submitted to Atlantic. Wrapping QueryID: {}", + wrapping_batch_id + ); + + update_job_status(&db_client, job.job_id, JobStatus::WrapProofRequested).await?; + set_atlantic_job_queryid( + &db_client, + job.job_id, + wrapping_batch_id.clone(), + AtlanticJobType::ProofWrapping, + ) + .await?; + + // Pool for Atlantic execution done + bankai .atlantic_client - .check_batch_status(batch_id.as_str()) + .poll_batch_status_until_done(&wrapping_batch_id, Duration::new(10, 0), usize::MAX) + .await?; + + update_job_status(&db_client, job.job_id, JobStatus::WrappedProofDone).await?; + + info!("[EPOCH JOB] Proof wrapping done by Atlantic. Fact registered on Integrity. Wrapping QueryID: {}", wrapping_batch_id); + + update_job_status(&db_client, job.job_id, JobStatus::VerifiedFactRegistered).await?; + + // 6) Submit epoch update onchain + info!("[EPOCH JOB] Calling epoch update onchain..."); + let update = EpochUpdate::from_json::(next_epoch)?; + + let txhash = bankai + .starknet_client + .submit_update(update.expected_circuit_outputs, &bankai.config) .await?; - println!("Batch Status: {}", status); + + set_job_txhash(&db_client, job.job_id, txhash).await?; + + info!("[EPOCH JOB] Successfully submitted epoch update..."); + + update_job_status(&db_client, job.job_id, JobStatus::ProofDecommitmentCalled).await?; + + // Now we can get proof from contract? + // bankai.starknet_client.get_epoch_proof( + // &self, + // slot: u64, + // config: &BankaiConfig) + + // Insert data to DB after successful onchain epoch verification + // insert_verified_epoch(&db_client, job.slot / 0x2000, epoch_proof).await?; } - Commands::ProveNextCommittee => { + JobType::SyncComiteeUpdate => { + // Sync committee job + info!( + "[SYNC COMMITTEE JOB] Started processing sync committee job: {} for epoch {}", + job.job_id, job.slot + ); + let latest_committee_id = bankai .starknet_client .get_latest_committee_id(&bankai.config) .await?; - let lowest_committee_update_slot = (latest_committee_id) * Felt::from(0x2000); - println!("Min Slot Required: {}", lowest_committee_update_slot); + + info!( + "[SYNC COMMITTEE JOB] Latest onchain verified sync committee: {}", + latest_committee_id + ); + let latest_epoch = bankai .starknet_client .get_latest_epoch(&bankai.config) .await?; - println!("Latest epoch: {}", latest_epoch); + + let lowest_committee_update_slot = (latest_committee_id) * Felt::from(0x2000); + if latest_epoch < lowest_committee_update_slot { - return Err(Error::RequiresNewerEpoch(latest_epoch)); + error!("[SYNC COMMITTEE JOB] Epoch update requires newer epoch",); + //return Err(Error::RequiresNewerEpoch(latest_epoch)); } + let update = bankai .get_sync_committee_update(latest_epoch.try_into().unwrap()) .await?; + + info!( + "[SYNC COMMITTEE JOB] Received sync committee update: {:?}", + update + ); + + info!( + "[SYNC COMMITTEE JOB] Starting Cairo execution and PIE generation for Sync Committee: {:?}...", + latest_committee_id + ); + CairoRunner::generate_pie(&update, &bankai.config)?; + + update_job_status(&db_client, job.job_id, JobStatus::PieGenerated).await?; + + info!( + "[SYNC COMMITTEE JOB] Pie generated successfully for Sync Committee: {}...", + latest_committee_id + ); + info!("[SYNC COMMITTEE JOB] Sending proof generation query to Atlantic..."); + let batch_id = bankai.atlantic_client.submit_batch(update).await?; - println!("Batch Submitted: {}", batch_id); - } - Commands::ProveNextEpoch => { - let latest_epoch = bankai - .starknet_client - .get_latest_epoch(&bankai.config) - .await?; - println!("Latest Epoch: {}", latest_epoch); - // make sure next_epoch % 32 == 0 - let next_epoch = (u64::try_from(latest_epoch).unwrap() / 32) * 32 + 32; - println!("Fetching Inputs for Epoch: {}", next_epoch); - let proof = bankai.get_epoch_proof(next_epoch).await?; - CairoRunner::generate_pie(&proof, &bankai.config)?; - let batch_id = bankai.atlantic_client.submit_batch(proof).await?; - println!("Batch Submitted: {}", batch_id); - } - Commands::VerifyEpoch { batch_id, slot } => { - let status = bankai + + update_job_status(&db_client, job.job_id, JobStatus::OffchainProofRequested).await?; + set_atlantic_job_queryid( + &db_client, + job.job_id, + batch_id.clone(), + AtlanticJobType::ProofGeneration, + ) + .await?; + + info!( + "[SYNC COMMITTEE JOB] Proof generation batch submitted to atlantic. QueryID: {}", + batch_id + ); + + // Pool for Atlantic execution done + bankai .atlantic_client - .check_batch_status(batch_id.as_str()) + .poll_batch_status_until_done(&batch_id, Duration::new(10, 0), usize::MAX) .await?; - if status == "DONE" { - let update = EpochUpdate::from_json::(slot)?; - bankai - .starknet_client - .submit_update(update.expected_circuit_outputs, &bankai.config) - .await?; - println!("Successfully submitted epoch update"); - } else { - println!("Batch not completed yet. Status: {}", status); - } - } - Commands::VerifyCommittee { batch_id, slot } => { - let status = bankai + + info!( + "[SYNC COMMITTEE JOB] Proof generation done by Atlantic. QueryID: {}", + batch_id + ); + + let proof = bankai .atlantic_client - .check_batch_status(batch_id.as_str()) + .fetch_proof(batch_id.as_str()) .await?; - if status == "DONE" { - let update = SyncCommitteeUpdate::from_json::(slot)?; - bankai - .starknet_client - .submit_update(update.expected_circuit_outputs, &bankai.config) - .await?; - println!("Successfully submitted sync committee update"); - } else { - println!("Batch not completed yet. Status: {}", status); - } - } - Commands::SubmitWrappedProof { batch_id } => { - let status = bankai + + info!( + "[SYNC COMMITTEE JOB] Proof retrieved from Atlantic. QueryID: {}", + batch_id + ); + + update_job_status(&db_client, job.job_id, JobStatus::OffchainProofRetrieved).await?; + + // 5) Submit wrapped proof request + info!("[SYNC COMMITTEE JOB] Sending proof wrapping query to Atlantic.."); + let wrapping_batch_id = bankai.atlantic_client.submit_wrapped_proof(proof).await?; + info!( + "[SYNC COMMITTEE JOB] Proof wrapping query submitted to Atlantic. Wrapping QueryID: {}", + wrapping_batch_id + ); + + update_job_status(&db_client, job.job_id, JobStatus::WrapProofRequested).await?; + set_atlantic_job_queryid( + &db_client, + job.job_id, + wrapping_batch_id.clone(), + AtlanticJobType::ProofWrapping, + ) + .await?; + + // Pool for Atlantic execution done + bankai .atlantic_client - .check_batch_status(batch_id.as_str()) + .poll_batch_status_until_done(&wrapping_batch_id, Duration::new(10, 0), usize::MAX) .await?; - if status == "DONE" { - let proof = bankai - .atlantic_client - .fetch_proof(batch_id.as_str()) - .await?; - let batch_id = bankai.atlantic_client.submit_wrapped_proof(proof).await?; - println!("Batch Submitted: {}", batch_id); - } else { - println!("Batch not completed yet. Status: {}", status); - } + + update_job_status(&db_client, job.job_id, JobStatus::WrappedProofDone).await?; + + info!("[SYNC COMMITTEE JOB] Proof wrapping done by Atlantic. Fact registered on Integrity. Wrapping QueryID: {}", wrapping_batch_id); + + update_job_status(&db_client, job.job_id, JobStatus::VerifiedFactRegistered).await?; + + let update = SyncCommitteeUpdate::from_json::(job.slot)?; + + info!("[SYNC COMMITTEE JOB] Calling sync committee update onchain..."); + + let txhash = bankai + .starknet_client + .submit_update(update.expected_circuit_outputs, &bankai.config) + .await?; + + set_job_txhash(&db_client, job.job_id, txhash).await?; + + // Insert data to DB after successful onchain sync committee verification + //insert_verified_sync_committee(&db_client, job.slot, sync_committee_hash).await?; } } Ok(()) } + +// RPC requests handling functions // + +async fn handle_get_status(State(state): State) -> impl IntoResponse { + Json(json!({ "success": true })) +} + +async fn handle_get_epoch_update( + Path(slot): Path, + State(state): State, +) -> impl IntoResponse { + match state.bankai.get_epoch_proof(slot).await { + Ok(epoch_update) => { + // Convert `EpochUpdate` to `serde_json::Value` + let value = serde_json::to_value(epoch_update).unwrap_or_else(|err| { + eprintln!("Failed to serialize EpochUpdate: {:?}", err); + json!({ "error": "Internal server error" }) + }); + Json(value) + } + Err(err) => { + eprintln!("Failed to fetch proof: {:?}", err); + Json(json!({ "error": "Failed to fetch proof" })) + } + } +} + +// async fn handle_get_epoch_proof( +// Path(slot): Path, +// State(state): State, +// ) -> impl IntoResponse { +// match state.bankai.starknet_client.get_epoch_proof(slot).await { +// Ok(epoch_update) => { +// // Convert `EpochUpdate` to `serde_json::Value` +// let value = serde_json::to_value(epoch_update).unwrap_or_else(|err| { +// eprintln!("Failed to serialize EpochUpdate: {:?}", err); +// json!({ "error": "Internal server error" }) +// }); +// Json(value) +// } +// Err(err) => { +// eprintln!("Failed to fetch proof: {:?}", err); +// Json(json!({ "error": "Failed to fetch proof" })) +// } +// } +// } + +// async fn handle_get_committee_hash( +// Path(committee_id): Path, +// State(state): State, +// ) -> impl IntoResponse { +// match state.bankai.starknet_client.get_committee_hash(committee_id).await { +// Ok(committee_hash) => { +// // Convert `EpochUpdate` to `serde_json::Value` +// let value = serde_json::to_value(committee_hash).unwrap_or_else(|err| { +// eprintln!("Failed to serialize EpochUpdate: {:?}", err); +// json!({ "error": "Internal server error" }) +// }); +// Json(value) +// } +// Err(err) => { +// eprintln!("Failed to fetch proof: {:?}", err); +// Json(json!({ "error": "Failed to fetch proof" })) +// } +// } +// } + +async fn handle_get_latest_verified_slot(State(state): State) -> impl IntoResponse { + match state + .bankai + .starknet_client + .get_latest_epoch(&state.bankai.config) + .await + { + Ok(latest_epoch) => { + // Convert `Felt` to a string and parse it as a hexadecimal number + let hex_string = latest_epoch.to_string(); // Ensure this converts to a "0x..." string + match u64::from_str_radix(hex_string.trim_start_matches("0x"), 16) { + Ok(decimal_epoch) => Json(json!({ "latest_verified_slot": decimal_epoch })), + Err(err) => { + eprintln!("Failed to parse latest_epoch as decimal: {:?}", err); + Json(json!({ "error": "Invalid epoch format" })) + } + } + } + Err(err) => { + eprintln!("Failed to fetch latest epoch: {:?}", err); + Json(json!({ "error": "Failed to fetch latest epoch" })) + } + } +} + +// async fn handle_get_job_status( +// Path(job_id): Path, +// State(state): State, +// ) -> impl IntoResponse { +// match fetch_job_status(&state.db_client, job_id).await { +// Ok(job_status) => Json(job_status), +// Err(err) => { +// eprintln!("Failed to fetch job status: {:?}", err); +// Json(json!({ "error": "Failed to fetch job status" })) +// } +// } +// } diff --git a/client-rs/src/utils/atlantic_client.rs b/client-rs/src/utils/atlantic_client.rs index 87f4d6e..abfab86 100644 --- a/client-rs/src/utils/atlantic_client.rs +++ b/client-rs/src/utils/atlantic_client.rs @@ -4,6 +4,10 @@ use crate::traits::{ProofType, Provable}; use crate::Error; use reqwest::multipart::{Form, Part}; use serde::{Deserialize, Serialize}; +use tokio::time::{sleep, Duration}; +use tracing::{debug, error, info, trace}; + +#[derive(Debug)] pub struct AtlanticClient { endpoint: String, api_key: String, @@ -60,9 +64,9 @@ impl AtlanticClient { .map_err(Error::AtlanticError)?; if !response.status().is_success() { - println!("Error status: {}", response.status()); + error!("Error status: {}", response.status()); let error_text = response.text().await.map_err(Error::AtlanticError)?; - println!("Error response: {}", error_text); + error!("Error response: {}", error_text); return Err(Error::InvalidResponse(format!( "Request failed: {}", error_text @@ -80,7 +84,7 @@ impl AtlanticClient { } pub async fn submit_wrapped_proof(&self, proof: StarkProof) -> Result { - println!("Uploading to Atlantic..."); + info!("Uploading to Atlantic..."); // Serialize the proof to JSON string let proof_json = serde_json::to_string(&proof).map_err(|e| Error::DeserializeError(e.to_string()))?; @@ -171,4 +175,40 @@ impl AtlanticClient { Ok(status.to_string()) } + + pub async fn poll_batch_status_until_done( + &self, + batch_id: &str, + sleep_duration: Duration, + max_retries: usize, + ) -> Result { + for attempt in 1..=max_retries { + debug!("Pooling Atlantic for update... {}", batch_id); + let status = self.check_batch_status(batch_id).await?; + + if status == "DONE" { + return Ok(true); + } + + if status == "FAILED" { + return Err(Error::InvalidResponse(format!( + "Atlantic processing failed for query {}", + batch_id + ))); + } + + trace!( + "Batch not completed yet. Status: {}. Pooling attempt {}/{}", + status, + attempt, + max_retries + ); + sleep(sleep_duration).await; + } + + return Err(Error::InvalidResponse(format!( + "Pooling timeout for batch {}", + batch_id + ))); + } } diff --git a/client-rs/src/utils/cairo_runner.rs b/client-rs/src/utils/cairo_runner.rs index fefea03..a67ca4c 100644 --- a/client-rs/src/utils/cairo_runner.rs +++ b/client-rs/src/utils/cairo_runner.rs @@ -1,6 +1,7 @@ use crate::traits::ProofType; use crate::BankaiConfig; use crate::{traits::Provable, Error}; +use tracing::info; pub struct CairoRunner(); @@ -14,18 +15,18 @@ impl CairoRunner { }; let pie_path = input.pie_path(); - println!("Generating trace..."); + info!("Generating trace..."); let start_time = std::time::Instant::now(); // Execute cairo-run command - let output = std::process::Command::new("sh") - .arg("-c") - .arg(format!( - "source ../venv/bin/activate && cairo-run --program {} --program_input {} --cairo_pie_output {} --layout=all_cairo", - program_path, - input_path, - pie_path - )) + let output = std::process::Command::new("../venv/bin/cairo-run") + .arg("--program") + .arg(program_path) + .arg("--program_input") + .arg(input_path) + .arg("--cairo_pie_output") + .arg(pie_path) + .arg("--layout=all_cairo") .output() .map_err(|e| Error::CairoRunError(format!("Failed to execute commands: {}", e)))?; @@ -36,7 +37,7 @@ impl CairoRunner { String::from_utf8_lossy(&output.stderr).to_string(), )); } else { - println!("Trace generated successfully in {:.2?}!", duration); + info!("Trace generated successfully in {:.2?}!", duration); } Ok(()) diff --git a/client-rs/src/utils/rpc.rs b/client-rs/src/utils/rpc.rs index ecc32da..bda4043 100644 --- a/client-rs/src/utils/rpc.rs +++ b/client-rs/src/utils/rpc.rs @@ -8,6 +8,7 @@ use serde_json::Value; /// A client for interacting with the Ethereum Beacon Chain RPC endpoints. /// Provides methods to fetch headers, sync aggregates, and validator information. +#[derive(Debug)] pub(crate) struct BeaconRpcClient { provider: Client, pub rpc_url: String, diff --git a/client-rs/src/utils/starknet_client.rs b/client-rs/src/utils/starknet_client.rs index db2313c..3ae3f55 100644 --- a/client-rs/src/utils/starknet_client.rs +++ b/client-rs/src/utils/starknet_client.rs @@ -21,6 +21,8 @@ use std::sync::Arc; use crate::contract_init::ContractInitializationData; use crate::traits::Submittable; use crate::BankaiConfig; + +#[derive(Debug)] pub struct StarknetClient { account: Arc, LocalWallet>>, // provider: Arc>, @@ -93,7 +95,7 @@ impl StarknetClient { &self, update: impl Submittable, config: &BankaiConfig, - ) -> Result<(), StarknetError> { + ) -> Result { let result = self .account .execute_v1(vec![Call { @@ -106,7 +108,9 @@ impl StarknetClient { .map_err(|e| StarknetError::AccountError(e.to_string()))?; println!("tx_hash: {:?}", result.transaction_hash); - Ok(()) + + // Return the transaction hash + Ok(result.transaction_hash) } pub async fn get_committee_hash( From 99a6e308d6f1a1d963c2aa3010eb1d2becad2b5d Mon Sep 17 00:00:00 2001 From: lakewik Date: Thu, 9 Jan 2025 21:51:18 +0100 Subject: [PATCH 03/66] Prepare to merge --- client-rs/Cargo.toml | 4 +- client-rs/src/cli.rs | 338 ------- client-rs/src/daemon.rs | 1158 +++++++++++++++++++++++ client-rs/src/main.rs | 1182 ++++-------------------- client-rs/src/utils/atlantic_client.rs | 3 +- 5 files changed, 1343 insertions(+), 1342 deletions(-) delete mode 100644 client-rs/src/cli.rs create mode 100644 client-rs/src/daemon.rs diff --git a/client-rs/Cargo.toml b/client-rs/Cargo.toml index d59be79..33416c4 100644 --- a/client-rs/Cargo.toml +++ b/client-rs/Cargo.toml @@ -5,11 +5,11 @@ edition = "2021" [[bin]] name = "daemon" # Binary name (used with `cargo run --bin bin1`) -path = "src/main.rs" # Path to the source file for this binary +path = "src/daemon.rs" # Path to the source file for this binary [[bin]] name = "cli" -path = "src/cli.rs" +path = "src/main.rs" [dependencies] diff --git a/client-rs/src/cli.rs b/client-rs/src/cli.rs deleted file mode 100644 index a5f5942..0000000 --- a/client-rs/src/cli.rs +++ /dev/null @@ -1,338 +0,0 @@ -mod config; -mod contract_init; -mod epoch_update; -mod sync_committee; -mod traits; -mod utils; - -use config::BankaiConfig; -use contract_init::ContractInitializationData; -use epoch_update::EpochUpdate; -use starknet::core::types::Felt; -use sync_committee::SyncCommitteeUpdate; -use traits::Provable; -use utils::{atlantic_client::AtlanticClient, cairo_runner::CairoRunner}; -use utils::{ - rpc::BeaconRpcClient, - starknet_client::{StarknetClient, StarknetError}, -}; -// use rand::Rng; -// use std::fs::File; -// use std::io::Write; -use clap::{Parser, Subcommand}; -use dotenv::from_filename; -use std::env; - -#[derive(Debug)] -pub enum Error { - InvalidProof, - RpcError(reqwest::Error), - DeserializeError(String), - IoError(std::io::Error), - StarknetError(StarknetError), - BlockNotFound, - FetchSyncCommitteeError, - FailedFetchingBeaconState, - InvalidBLSPoint, - MissingRpcUrl, - EmptySlotDetected(u64), - RequiresNewerEpoch(Felt), - CairoRunError(String), - AtlanticError(reqwest::Error), - InvalidResponse(String), -} - -impl From for Error { - fn from(e: StarknetError) -> Self { - Error::StarknetError(e) - } -} - -struct BankaiClient { - client: BeaconRpcClient, - starknet_client: StarknetClient, - config: BankaiConfig, - atlantic_client: AtlanticClient, -} - -impl BankaiClient { - pub async fn new() -> Self { - from_filename(".env.sepolia").ok(); - let config = BankaiConfig::default(); - Self { - client: BeaconRpcClient::new(env::var("BEACON_RPC_URL").unwrap()), - starknet_client: StarknetClient::new( - env::var("STARKNET_RPC_URL").unwrap().as_str(), - env::var("STARKNET_ADDRESS").unwrap().as_str(), - env::var("STARKNET_PRIVATE_KEY").unwrap().as_str(), - ) - .await - .unwrap(), - atlantic_client: AtlanticClient::new( - config.atlantic_endpoint.clone(), - env::var("ATLANTIC_API_KEY").unwrap(), - ), - config, - } - } - - pub async fn get_sync_committee_update( - &self, - mut slot: u64, - ) -> Result { - // Before we start generating the proof, we ensure the slot was not missed - match self.client.get_header(slot).await { - Ok(header) => header, - Err(Error::EmptySlotDetected(_)) => { - slot += 1; - println!("Empty slot detected! Fetching slot: {}", slot); - self.client.get_header(slot).await? - } - Err(e) => return Err(e), // Propagate other errors immediately - }; - - let proof: SyncCommitteeUpdate = SyncCommitteeUpdate::new(&self.client, slot).await?; - - Ok(proof) - } - - pub async fn get_epoch_proof(&self, slot: u64) -> Result { - let epoch_proof = EpochUpdate::new(&self.client, slot).await?; - Ok(epoch_proof) - } - - pub async fn get_contract_initialization_data( - &self, - slot: u64, - config: &BankaiConfig, - ) -> Result { - let contract_init = ContractInitializationData::new(&self.client, slot, config).await?; - Ok(contract_init) - } -} - -#[derive(Subcommand)] -enum Commands { - /// Generate a sync committee update proof for a given slot - CommitteeUpdate { - #[arg(long, short)] - slot: u64, - /// Export output to a JSON file - #[arg(long, short)] - export: Option, - }, - /// Generate an epoch update proof for a given slot - EpochUpdate { - #[arg(long, short)] - slot: u64, - /// Export output to a JSON file - #[arg(long, short)] - export: Option, - }, - /// Generate contract initialization data for a given slot - ContractInit { - #[arg(long, short)] - slot: u64, - /// Export output to a JSON file - #[arg(long, short)] - export: Option, - }, - DeployContract { - #[arg(long, short)] - slot: u64, - }, - ProveNextCommittee, - ProveNextEpoch, - CheckBatchStatus { - #[arg(long, short)] - batch_id: String, - }, - SubmitWrappedProof { - #[arg(long, short)] - batch_id: String, - }, - VerifyEpoch { - #[arg(long, short)] - batch_id: String, - #[arg(long, short)] - slot: u64, - }, - VerifyCommittee { - #[arg(long, short)] - batch_id: String, - #[arg(long, short)] - slot: u64, - }, -} - -#[derive(Parser)] -#[command(author, version, about, long_about = None)] -struct Cli { - /// Optional RPC URL (defaults to RPC_URL_BEACON environment variable) - #[arg(long, short)] - rpc_url: Option, - - #[command(subcommand)] - command: Commands, -} - -#[tokio::main] -async fn main() -> Result<(), Error> { - // Load .env.sepolia file - from_filename(".env.sepolia").ok(); - - let cli = Cli::parse(); - let bankai = BankaiClient::new().await; - - match cli.command { - Commands::CommitteeUpdate { slot, export } => { - println!("SyncCommittee command received with slot: {}", slot); - let proof = bankai.get_sync_committee_update(slot).await?; - let json = serde_json::to_string_pretty(&proof) - .map_err(|e| Error::DeserializeError(e.to_string()))?; - - if let Some(path) = export { - match std::fs::write(path.clone(), json) { - Ok(_) => println!("Proof exported to {}", path), - Err(e) => return Err(Error::IoError(e)), - } - } else { - println!("{}", json); - } - } - Commands::EpochUpdate { slot, export } => { - println!("Epoch command received with slot: {}", slot); - let proof = bankai.get_epoch_proof(slot).await?; - let json = serde_json::to_string_pretty(&proof) - .map_err(|e| Error::DeserializeError(e.to_string()))?; - - if let Some(path) = export { - match std::fs::write(path.clone(), json) { - Ok(_) => println!("Proof exported to {}", path), - Err(e) => return Err(Error::IoError(e)), - } - } else { - println!("{}", json); - } - } - Commands::ContractInit { slot, export } => { - println!("ContractInit command received with slot: {}", slot); - let contract_init = bankai - .get_contract_initialization_data(slot, &bankai.config) - .await?; - let json = serde_json::to_string_pretty(&contract_init) - .map_err(|e| Error::DeserializeError(e.to_string()))?; - - if let Some(path) = export { - match std::fs::write(path.clone(), json) { - Ok(_) => println!("Contract initialization data exported to {}", path), - Err(e) => return Err(Error::IoError(e)), - } - } else { - println!("{}", json); - } - } - Commands::DeployContract { slot } => { - let contract_init = bankai - .get_contract_initialization_data(slot, &bankai.config) - .await?; - bankai - .starknet_client - .deploy_contract(contract_init, &bankai.config) - .await?; - } - Commands::CheckBatchStatus { batch_id } => { - let status = bankai - .atlantic_client - .check_batch_status(batch_id.as_str()) - .await?; - println!("Batch Status: {}", status); - } - Commands::ProveNextCommittee => { - let latest_committee_id = bankai - .starknet_client - .get_latest_committee_id(&bankai.config) - .await?; - let lowest_committee_update_slot = (latest_committee_id) * Felt::from(0x2000); - println!("Min Slot Required: {}", lowest_committee_update_slot); - let latest_epoch = bankai - .starknet_client - .get_latest_epoch(&bankai.config) - .await?; - println!("Latest epoch: {}", latest_epoch); - if latest_epoch < lowest_committee_update_slot { - return Err(Error::RequiresNewerEpoch(latest_epoch)); - } - let update = bankai - .get_sync_committee_update(latest_epoch.try_into().unwrap()) - .await?; - CairoRunner::generate_pie(&update, &bankai.config)?; - let batch_id = bankai.atlantic_client.submit_batch(update).await?; - println!("Batch Submitted: {}", batch_id); - } - Commands::ProveNextEpoch => { - let latest_epoch = bankai - .starknet_client - .get_latest_epoch(&bankai.config) - .await?; - println!("Latest Epoch: {}", latest_epoch); - // make sure next_epoch % 32 == 0 - let next_epoch = (u64::try_from(latest_epoch).unwrap() / 32) * 32 + 32; - println!("Fetching Inputs for Epoch: {}", next_epoch); - let proof = bankai.get_epoch_proof(next_epoch).await?; - CairoRunner::generate_pie(&proof, &bankai.config)?; - let batch_id = bankai.atlantic_client.submit_batch(proof).await?; - println!("Batch Submitted: {}", batch_id); - } - Commands::VerifyEpoch { batch_id, slot } => { - let status = bankai - .atlantic_client - .check_batch_status(batch_id.as_str()) - .await?; - if status == "DONE" { - let update = EpochUpdate::from_json::(slot)?; - bankai - .starknet_client - .submit_update(update.expected_circuit_outputs, &bankai.config) - .await?; - println!("Successfully submitted epoch update"); - } else { - println!("Batch not completed yet. Status: {}", status); - } - } - Commands::VerifyCommittee { batch_id, slot } => { - let status = bankai - .atlantic_client - .check_batch_status(batch_id.as_str()) - .await?; - if status == "DONE" { - let update = SyncCommitteeUpdate::from_json::(slot)?; - bankai - .starknet_client - .submit_update(update.expected_circuit_outputs, &bankai.config) - .await?; - println!("Successfully submitted sync committee update"); - } else { - println!("Batch not completed yet. Status: {}", status); - } - } - Commands::SubmitWrappedProof { batch_id } => { - let status = bankai - .atlantic_client - .check_batch_status(batch_id.as_str()) - .await?; - if status == "DONE" { - let proof = bankai - .atlantic_client - .fetch_proof(batch_id.as_str()) - .await?; - let batch_id = bankai.atlantic_client.submit_wrapped_proof(proof).await?; - println!("Batch Submitted: {}", batch_id); - } else { - println!("Batch not completed yet. Status: {}", status); - } - } - } - - Ok(()) -} diff --git a/client-rs/src/daemon.rs b/client-rs/src/daemon.rs new file mode 100644 index 0000000..c4b2573 --- /dev/null +++ b/client-rs/src/daemon.rs @@ -0,0 +1,1158 @@ +mod config; +mod contract_init; +mod epoch_update; +mod sync_committee; +mod traits; +mod utils; + +use alloy_primitives::TxHash; +use config::BankaiConfig; +use serde_json::json; + +use alloy_primitives::FixedBytes; +use alloy_rpc_types_beacon::events::HeadEvent; +use axum::{ + extract::{DefaultBodyLimit, Path, State}, + //http::{header, StatusCode}, + response::{IntoResponse, Json, Response}, + routing::{get, post}, + Router, +}; +use contract_init::ContractInitializationData; +use dotenv::from_filename; +use epoch_update::{EpochProof, EpochUpdate}; +use postgres_types::{FromSql, ToSql}; +use reqwest; +use starknet::core::types::Felt; +use std::env; +use std::sync::Arc; +use tokio::sync::mpsc; +use tokio::task; +use tokio_postgres::{Client, NoTls}; +use tokio_stream::StreamExt; +use tracing::{error, info, trace, warn, Level}; +use tracing_subscriber::FmtSubscriber; +use traits::Provable; +use utils::{atlantic_client::AtlanticClient, cairo_runner::CairoRunner}; +use utils::{ + rpc::BeaconRpcClient, + // bankai_client::BankaiClient, + starknet_client::{StarknetClient, StarknetError}, +}; +//use std::error::Error as StdError; +use std::fmt; +use std::net::SocketAddr; +use sync_committee::SyncCommitteeUpdate; +use tokio::time::Duration; +use uuid::Uuid; + +const SLOTS_PER_EPOCH: u64 = 32; // For mainnet +const SLOTS_PER_SYNC_COMMITTEE: u64 = 8192; // For mainnet + +impl std::fmt::Display for StarknetError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + StarknetError::ProviderError(err) => write!(f, "Provider error: {}", err), + StarknetError::AccountError(msg) => write!(f, "Account error: {}", msg), + } + } +} + +impl std::error::Error for StarknetError {} + +#[derive(Debug, FromSql, ToSql)] +#[postgres(name = "job_status")] +enum JobStatus { + #[postgres(name = "CREATED")] + Created, + #[postgres(name = "FETCHED_PROOF")] + FetchedProof, + #[postgres(name = "PIE_GENERATED")] + PieGenerated, + #[postgres(name = "OFFCHAIN_PROOF_REQUESTED")] + OffchainProofRequested, + #[postgres(name = "OFFCHAIN_PROOF_RETRIEVED")] + OffchainProofRetrieved, + #[postgres(name = "WRAP_PROOF_REQUESTED")] + WrapProofRequested, + #[postgres(name = "WRAPPED_PROOF_DONE")] + WrappedProofDone, + #[postgres(name = "PROOF_DECOMMITMENT_CALLED")] + ProofDecommitmentCalled, + #[postgres(name = "VERIFIED_FACT_REGISTERED")] + VerifiedFactRegistered, + #[postgres(name = "ERROR")] + Cancelled, + #[postgres(name = "CANCELLED")] + Error, +} + +impl ToString for JobStatus { + fn to_string(&self) -> String { + match self { + JobStatus::Created => "CREATED".to_string(), + JobStatus::FetchedProof => "FETCHED_PROOF".to_string(), + JobStatus::PieGenerated => "PIE_GENERATED".to_string(), + JobStatus::OffchainProofRequested => "OFFCHAIN_PROOF_REQUESTED".to_string(), + JobStatus::OffchainProofRetrieved => "OFFCHAIN_PROOF_RETRIEVED".to_string(), + JobStatus::WrapProofRequested => "WRAP_PROOF_REQUESTED".to_string(), + JobStatus::WrappedProofDone => "WRAPPED_PROOF_DONE".to_string(), + JobStatus::ProofDecommitmentCalled => "PROOF_DECOMMITMENT_CALLED".to_string(), + JobStatus::VerifiedFactRegistered => "VERIFIED_FACT_REGISTERED".to_string(), + JobStatus::Cancelled => "CANCELLED".to_string(), + JobStatus::Error => "ERROR".to_string(), + } + } +} + +#[derive(Debug, FromSql, ToSql)] +enum JobType { + EpochUpdate, + SyncComiteeUpdate, +} + +#[derive(Debug, FromSql, ToSql)] +enum AtlanticJobType { + ProofGeneration, + ProofWrapping, +} + +#[derive(Debug)] +pub enum Error { + InvalidProof, + RpcError(reqwest::Error), + DeserializeError(String), + IoError(std::io::Error), + StarknetError(StarknetError), + BlockNotFound, + FetchSyncCommitteeError, + FailedFetchingBeaconState, + InvalidBLSPoint, + MissingRpcUrl, + EmptySlotDetected(u64), + RequiresNewerEpoch(Felt), + CairoRunError(String), + AtlanticError(reqwest::Error), + InvalidResponse(String), + PoolingTimeout(String), +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Error::InvalidProof => write!(f, "Invalid proof provided"), + Error::RpcError(err) => write!(f, "RPC error: {}", err), + Error::DeserializeError(msg) => write!(f, "Deserialization error: {}", msg), + Error::IoError(err) => write!(f, "I/O error: {}", err), + Error::StarknetError(err) => write!(f, "Starknet error: {}", err), + Error::BlockNotFound => write!(f, "Block not found"), + Error::FetchSyncCommitteeError => write!(f, "Failed to fetch sync committee"), + Error::FailedFetchingBeaconState => write!(f, "Failed to fetch beacon state"), + Error::InvalidBLSPoint => write!(f, "Invalid BLS point"), + Error::MissingRpcUrl => write!(f, "Missing RPC URL"), + Error::EmptySlotDetected(slot) => write!(f, "Empty slot detected: {}", slot), + Error::RequiresNewerEpoch(felt) => write!(f, "Requires newer epoch: {}", felt), + Error::CairoRunError(msg) => write!(f, "Cairo run error: {}", msg), + Error::AtlanticError(err) => write!(f, "Atlantic RPC error: {}", err), + Error::InvalidResponse(msg) => write!(f, "Invalid response: {}", msg), + Error::PoolingTimeout(msg) => write!(f, "Pooling timeout: {}", msg), + } + } +} + +impl std::error::Error for Error { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + Error::RpcError(err) => Some(err), + Error::IoError(err) => Some(err), + Error::StarknetError(err) => Some(err), + Error::AtlanticError(err) => Some(err), + _ => None, // No underlying source for other variants + } + } +} + +impl From for Error { + fn from(e: StarknetError) -> Self { + Error::StarknetError(e) + } +} + +#[derive(Debug)] +struct Job { + job_id: Uuid, + job_type: JobType, + job_status: JobStatus, + slot: u64, +} + +#[derive(Clone, Debug)] +struct AppState { + db_client: Arc, + tx: mpsc::Sender, + bankai: Arc, +} + +#[derive(Debug)] +struct BankaiClient { + client: BeaconRpcClient, + starknet_client: StarknetClient, + config: BankaiConfig, + atlantic_client: AtlanticClient, +} + +impl BankaiClient { + pub async fn new() -> Self { + from_filename(".env.sepolia").ok(); + let config = BankaiConfig::default(); + Self { + client: BeaconRpcClient::new(env::var("BEACON_RPC_URL").unwrap()), + starknet_client: StarknetClient::new( + env::var("STARKNET_RPC_URL").unwrap().as_str(), + env::var("STARKNET_ADDRESS").unwrap().as_str(), + env::var("STARKNET_PRIVATE_KEY").unwrap().as_str(), + ) + .await + .unwrap(), + atlantic_client: AtlanticClient::new( + config.atlantic_endpoint.clone(), + env::var("ATLANTIC_API_KEY").unwrap(), + ), + config, + } + } + + pub async fn get_sync_committee_update( + &self, + mut slot: u64, + ) -> Result { + // Before we start generating the proof, we ensure the slot was not missed + match self.client.get_header(slot).await { + Ok(header) => header, + Err(Error::EmptySlotDetected(_)) => { + slot += 1; + println!("Empty slot detected! Fetching slot: {}", slot); + self.client.get_header(slot).await? + } + Err(e) => return Err(e), // Propagate other errors immediately + }; + + let proof: SyncCommitteeUpdate = SyncCommitteeUpdate::new(&self.client, slot).await?; + + Ok(proof) + } + + pub async fn get_epoch_proof(&self, slot: u64) -> Result { + let epoch_proof = EpochUpdate::new(&self.client, slot).await?; + Ok(epoch_proof) + } + + pub async fn get_contract_initialization_data( + &self, + slot: u64, + config: &BankaiConfig, + ) -> Result { + let contract_init = ContractInitializationData::new(&self.client, slot, config).await?; + Ok(contract_init) + } +} + +fn check_env_vars() -> Result<(), String> { + let required_vars = [ + "BEACON_RPC_URL", + "STARKNET_RPC_URL", + "STARKNET_ADDRESS", + "STARKNET_PRIVATE_KEY", + "ATLANTIC_API_KEY", + "PROOF_REGISTRY", + "POSTGRESQL_HOST", + "POSTGRESQL_USER", + "POSTGRESQL_PASSWORD", + "POSTGRESQL_DB_NAME", + "RPC_LISTEN_HOST", + "RPC_LISTEN_PORT", + ]; + + for &var in &required_vars { + if env::var(var).is_err() { + return Err(format!("Environment variable `{}` is not set", var)); + } + } + + Ok(()) +} + +fn slot_to_epoch(slot: u64) -> u64 { + slot / SLOTS_PER_EPOCH +} + +fn slot_to_sync_committee_id(slot: u64) -> u64 { + slot / SLOTS_PER_SYNC_COMMITTEE +} + +#[tokio::main] +//async fn main() { +async fn main() -> Result<(), Box> { + // Load .env.sepolia file + from_filename(".env.sepolia").ok(); + + let slot_listener_toggle = true; + + let subscriber = FmtSubscriber::builder() + .with_max_level(Level::DEBUG) + //.with_max_level(Level::INFO) + .finish(); + + tracing::subscriber::set_global_default(subscriber).expect("setting default subscriber failed"); + + // Validate environment variables + check_env_vars().map_err(|e| { + error!("Error: {}", e); + std::process::exit(1); // Exit if validation fails + }); + + info!("Starting Bankai light-client daemon..."); + + //let database_host = env::var("DATABASE_HOST").expect("DATABASE_HOST must be set"); + let (tx, mut rx): (mpsc::Sender, mpsc::Receiver) = mpsc::channel(32); + + //let (tx, mut rx) = mpsc::channel(32); + + let connection_string = "host=localhost user=root password=root dbname=bankai"; + // let connection_string = format!( + // "host={} user={} password={} dbname={}", + // env::var("POSTGRESQL_HOST").unwrap().as_str(), + // env::var("POSTGRESQL_USER").unwrap().as_str(), + // env::var("POSTGRESQL_PASSWORD").unwrap().as_str(), + // env::var("POSTGRESQL_DB_NAME").unwrap().as_str() + // ); + let _connection_result: Result< + ( + Client, + tokio_postgres::Connection, + ), + tokio_postgres::Error, + > = tokio_postgres::connect(connection_string, NoTls).await; + + let db_client = match tokio_postgres::connect(connection_string, NoTls).await { + Ok((client, connection)) => { + // Spawn a task to manage the connection + tokio::spawn(async move { + if let Err(e) = connection.await { + eprintln!("Connection error: {}", e); + } + }); + + info!("Connected to the database successfully!"); + + // Wrap the client in an Arc for shared ownership + Arc::new(client) + } + Err(err) => { + error!("Failed to connect to the database: {}", err); + std::process::exit(1); // Exit with a non-zero status code + } + }; + + //let db_client_for_task = Arc::new(db_client); + + let bankai = Arc::new(BankaiClient::new().await); + // Clone the Arc for use in async task + let bankai_for_task = Arc::clone(&bankai); + + // Beacon node endpoint construction for ervents + let events_endpoint = format!( + "{}/eth/v1/events?topics=head", + env::var("BEACON_RPC_URL").unwrap().as_str() + ); + //let events_endpoint = format!("{}/eth/v1/events?topics=head", beacon_node_url); + + //Spawn a background task to process jobs + tokio::spawn({ + let bankai_for_job = Arc::clone(&bankai); + let db_client_for_job = Arc::clone(&db_client); + async move { + while let Some(job) = rx.recv().await { + let job_id = job.job_id.clone(); + if let Err(e) = + process_job(job, db_client_for_job.clone(), bankai_for_job.clone()).await + { + update_job_status(&db_client_for_job.clone(), job_id, JobStatus::Error).await; + error!("Error processing job {}: {}", job_id, e); + } + } + } + }); + + // let db_client_for_task =db_client.clone(); + let db_client_for_state = db_client.clone(); + let tx_for_task = tx.clone(); + + let app_state: AppState = AppState { + db_client: db_client_for_state, + tx, + bankai, + }; + + let app = Router::new() + .route("/status", get(handle_get_status)) + //.route("/get-epoch-proof/:slot", get(handle_get_epoch_proof)) + //.route("/get-committee-hash/:committee_id", get(handle_get_committee_hash)) + .route( + "/debug/get-epoch-update/:slot", + get(handle_get_epoch_update), + ) + .route( + "/debug/get-latest-verified-slot", + get(handle_get_latest_verified_slot), + ) + // .route("/debug/get-job-status", get(handle_get_job_status)) + // .route("/get-merkle-inclusion-proof", get(handle_get_merkle_inclusion_proof)) + .layer(DefaultBodyLimit::disable()) + .with_state(app_state); + + let addr = "0.0.0.0:3000".parse::()?; + let listener = tokio::net::TcpListener::bind(addr).await.unwrap(); + + info!("Bankai RPC HTTP server is listening on http://{}", addr); + + let server_task = tokio::spawn(async move { + let _ = axum::serve(listener, app).await; + }); + + // Listen for the new slots on BeaconChain + // Create an HTTP client + let http_stream_client = reqwest::Client::new(); + + // Send the request to the Beacon node + let response = http_stream_client + .get(&events_endpoint) + .send() + .await + .unwrap(); + + //let db_client = Arc::new(&db_client); + if slot_listener_toggle { + task::spawn({ + async move { + // Check if response is successful; if not, bail out early + // TODO: need to implement resilience and potentialy use multiple providers (implement something like fallbackprovider functionality in ethers), handle reconnection if connection is lost for various reasons + if !response.status().is_success() { + error!("Failed to connect: HTTP {}", response.status()); + return; + } + + info!("Listening for new slots, epochs and sync committee updates..."); + let mut stream = response.bytes_stream(); + + while let Some(chunk) = stream.next().await { + let Ok(bytes) = chunk else { + warn!("Error reading stream: {}", chunk.err().unwrap()); + continue; + }; + + let Ok(text) = String::from_utf8(bytes.to_vec()) else { + warn!("Failed to parse UTF-8."); + continue; + }; + + if text.is_empty() { + continue; + } + + trace!("New slot event detected: {}", text); + + // Search for JSON start + let Some(json_start) = text.find('{') else { + warn!("No JSON data found in the input."); + continue; + }; + + // Try parsing the JSON substring into your event structsync_committee_id + let Ok(parsed_event) = serde_json::from_str::(&text[json_start..]) + else { + warn!("Failed to parse JSON data received from Beacon Chain event."); + continue; + }; + + info!( + "New slot event detected: {} | Is epoch transition: {}", + parsed_event.slot, parsed_event.epoch_transition + ); + + if parsed_event.epoch_transition { + info!("Epoch transition detected! Starting processing..."); + + // Check also now if slot is the moment of switch to new sync committee set + if parsed_event.slot % SLOTS_PER_SYNC_COMMITTEE == 0 { + let sync_committee_id = slot_to_sync_committee_id(parsed_event.slot); + info!("In this slot sync committee rotation taken place. Slot {} Sync committee id: {}", parsed_event.slot, sync_committee_id); + // We should probably now start sync committee verify job + } + + let job_id = Uuid::new_v4(); + let job = Job { + job_id: job_id.clone(), + job_type: JobType::EpochUpdate, + job_status: JobStatus::Created, + slot: parsed_event.slot - 32, + }; + + let db_client = db_client.clone(); + match create_job(db_client, job_id, parsed_event.slot).await { + // Insert new job record to DB + Ok(()) => { + // Handle success + info!("Job created successfully with ID: {}", job_id); + if tx_for_task.send(job).await.is_err() { + error!("Failed to send job."); + } + // let job = Job { + // job_id: job_id.clone(), + // job_type: JobType::EpochUpdate, + // job_status: JobStatus::Created, + // slot: parsed_event.slot - 32, + // }; + // if tx_for_task.send(job).await.is_err() { + // error!("Failed to send job."); + // } + // + // If starting committee update job, first ensule that the corresponding slot is registered in contract + } + Err(e) => { + // Handle the error + error!("Error creating job: {}", e); + } + } + + // match bankai_for_task.get_epoch_proof(parsed_event.slot - 32).await { + // Ok(proof) => info!("Epoch proof fetched successfully: {:?}", proof), + // Err(e) => error!("Failed to fetch epoch proof: {:?}", e), + // } + } + } + } + }); + } + + // Wait for the server task to finish + server_task.await?; + + Ok(()) +} + +async fn set_atlantic_job_queryid( + client: &Client, + job_id: Uuid, + batch_id: String, + atlantic_job_type: AtlanticJobType, +) -> Result<(), Box> { + match atlantic_job_type { + AtlanticJobType::ProofGeneration => { + client + .execute( + "UPDATE jobs SET atlantic_batch_id_proof_generation = $1, updated_at = NOW() WHERE job_uuid = $2", + &[&batch_id.to_string(), &job_id], + ) + .await?; + } + AtlanticJobType::ProofWrapping => { + client + .execute( + "UPDATE jobs SET atlantic_batch_id_proof_wrapping = $1, updated_at = NOW() WHERE job_uuid = $2", + &[&batch_id.to_string(), &job_id], + ) + .await?; + } // _ => { + // println!("Unk", status); + // } + } + + Ok(()) +} + +async fn insert_verified_epoch( + client: &Client, + epoch_id: u64, + epoch_proof: EpochProof, +) -> Result<(), Box> { + let status = JobStatus::Created; // new job starts at 'Created' + + client + .execute( + "INSERT INTO verified_epoch (epoch_id, header_root, state_root, n_signers) VALUES ($1)", + &[ + &epoch_id.to_string(), + &epoch_proof.header_root.to_string(), + &epoch_proof.state_root.to_string(), + &epoch_proof.n_signers.to_string(), + ], + ) + .await?; + + Ok(()) +} + +async fn insert_verified_sync_committee( + client: &Client, + sync_committee_id: u64, + sync_committee_hash: FixedBytes<32>, +) -> Result<(), Box> { + let status = JobStatus::Created; // new job starts at 'Created' + + client + .execute( + "INSERT INTO verified_sync_committee (sync_committee_id, sync_committee_hash) VALUES ($1)", + &[&sync_committee_id.to_string(), &sync_committee_hash.to_string()], + ) + .await?; + + Ok(()) +} + +async fn create_job( + client: Arc, + job_id: Uuid, + slot: u64, +) -> Result<(), Box> { + let status = JobStatus::Created; // new job starts at 'Created' + + client + .execute( + "INSERT INTO jobs (job_uuid, job_status, slot, type) VALUES ($1, $2, $3, $4)", + &[ + &job_id, + &status.to_string(), + &(slot as i64), + &"EPOCH_UPDATE", + ], + ) + .await?; + + Ok(()) +} + +async fn fetch_job_status( + client: &Client, + job_id: Uuid, +) -> Result, Box> { + let row_opt = client + .query_opt("SELECT status FROM jobs WHERE job_id = $1", &[&job_id]) + .await?; + + Ok(row_opt.map(|row| row.get("status"))) +} + +async fn update_job_status( + client: &Client, + job_id: Uuid, + new_status: JobStatus, +) -> Result<(), Box> { + client + .execute( + "UPDATE jobs SET job_status = $1, updated_at = NOW() WHERE job_uuid = $2", + &[&new_status.to_string(), &job_id], + ) + .await?; + Ok(()) +} + +async fn set_job_txhash( + client: &Client, + job_id: Uuid, + txhash: Felt, +) -> Result<(), Box> { + client + .execute( + "UPDATE jobs SET tx_hash = $1, updated_at = NOW() WHERE job_uuid = $2", + &[&txhash.to_string(), &job_id], + ) + .await?; + Ok(()) +} + +async fn cancell_all_unfinished_jobs( + client: &Client, +) -> Result<(), Box> { + client + .execute( + "UPDATE jobs SET status = $1, updated_at = NOW() WHERE status = 'FETCHING'", + &[&JobStatus::Cancelled.to_string()], + ) + .await?; + Ok(()) +} + +// async fn fetch_job_by_status( +// client: &Client, +// status: JobStatus, +// ) -> Result, Box> { +// let tx = client.transaction().await?; + +// let row_opt = tx +// .query_opt( +// r#" +// SELECT job_id, status +// FROM jobs +// WHERE status = $1 +// ORDER BY updated_at ASC +// LIMIT 1 +// FOR UPDATE SKIP LOCKED +// "#, +// &[&status], +// ) +// .await?; + +// let job = if let Some(row) = row_opt { +// Some(Job { +// job_id: row.get("job_id"), +// job_type: row.get("type"), +// job_status: row.get("status"), +// slot: row.get("slot"), +// }) +// } else { +// None +// }; + +// tx.commit().await?; +// Ok(job) +// } + +// async fn add_verified_epoch( +// client: Arc, +// slot: u64, +// ) -> Result<(), Box> { +// client +// .execute( +// "INSERT INTO verified_epochs (slot, job_status, slot, type) VALUES ($1, $2, $3, $4)", +// &[&slot, &status.to_string(), &(slot as i64), &"EPOCH_UPDATE"], +// ) +// .await?; + +// Ok(()) +// } + +// async fn worker_task(mut rx: Receiver, db_client: Client) -> Result<(), Box> { +// while let Some(job_id) = rx.recv().await { +// println!("Worker received job {job_id}"); + +// // 4a) Check current status in DB +// if let Some(status) = fetch_job_status(&db_client, job_id).await? { +// match status { +// JobStatus::Created => { +// println!("Fetching proof for job {job_id}..."); +// // Then update status +// update_job_status(&db_client, job_id, JobStatus::FetchedProof).await?; +// println!("Job {job_id} updated to FetchedProof"); +// } +// JobStatus::FetchedProof => { +// // Already fetched, maybe do next step... +// println!("Job {job_id} is already FetchedProof; ignoring for now."); +// } +// _ => { +// println!("Job {job_id} in status {:?}, no action needed.", status); +// } +// } +// } else { +// eprintln!("No job found in DB for ID = {job_id}"); +// } +// } +// Ok(()) +// } + +// mpsc jobs // +async fn process_job( + job: Job, + db_client: Arc, + bankai: Arc, +) -> Result<(), Box> { + match job.job_type { + JobType::EpochUpdate => { + // Epoch job + info!( + "[EPOCH JOB] Started processing epoch job: {} for epoch {}", + job.job_id, job.slot + ); + + //update_job_status(&db_client, job.job_id, JobStatus::Created).await?; + + // 1) Fetch the latest on-chain verified epoch + let latest_epoch = bankai + .starknet_client + .get_latest_epoch(&bankai.config) + .await?; + + info!( + "[EPOCH JOB] Latest onchain verified epoch: {}", + latest_epoch + ); + + // make sure next_epoch % 32 == 0 + let next_epoch = (u64::try_from(latest_epoch).unwrap() / 32) * 32 + 32; + info!( + "[EPOCH JOB] Fetching Inputs for next Epoch: {}...", + next_epoch + ); + + // 2) Fetch the proof + let proof = bankai.get_epoch_proof(next_epoch).await?; + info!( + "[EPOCH JOB] Fetched Inputs successfully for Epoch: {}", + next_epoch + ); + + update_job_status(&db_client, job.job_id, JobStatus::FetchedProof).await?; + + // 3) Generate PIE + info!( + "[EPOCH JOB] Starting Cairo execution and PIE generation for Epoch: {}...", + next_epoch + ); + + CairoRunner::generate_pie(&proof, &bankai.config)?; + + info!( + "[EPOCH JOB] Pie generated successfully for Epoch: {}...", + next_epoch + ); + + update_job_status(&db_client, job.job_id, JobStatus::PieGenerated).await?; + + // 4) Submit offchain proof-generation job to Atlantic + info!("[EPOCH JOB] Sending proof generation query to Atlantic..."); + + let batch_id = bankai.atlantic_client.submit_batch(proof).await?; + + info!( + "[EPOCH JOB] Proof generation batch submitted to Atlantic. QueryID: {}", + batch_id + ); + + update_job_status(&db_client, job.job_id, JobStatus::OffchainProofRequested).await?; + set_atlantic_job_queryid( + &db_client, + job.job_id, + batch_id.clone(), + AtlanticJobType::ProofGeneration, + ) + .await?; + + // Pool for Atlantic execution done + bankai + .atlantic_client + .poll_batch_status_until_done(&batch_id, Duration::new(10, 0), usize::MAX) + .await?; + + info!( + "[EPOCH JOB] Proof generation done by Atlantic. QueryID: {}", + batch_id + ); + + let proof = bankai + .atlantic_client + .fetch_proof(batch_id.as_str()) + .await?; + + info!( + "[EPOCH JOB] Proof retrieved from Atlantic. QueryID: {}", + batch_id + ); + + update_job_status(&db_client, job.job_id, JobStatus::OffchainProofRetrieved).await?; + + // 5) Submit wrapped proof request + info!("[EPOCH JOB] Sending proof wrapping query to Atlantic.."); + let wrapping_batch_id = bankai.atlantic_client.submit_wrapped_proof(proof).await?; + info!( + "[EPOCH JOB] Proof wrapping query submitted to Atlantic. Wrapping QueryID: {}", + wrapping_batch_id + ); + + update_job_status(&db_client, job.job_id, JobStatus::WrapProofRequested).await?; + set_atlantic_job_queryid( + &db_client, + job.job_id, + wrapping_batch_id.clone(), + AtlanticJobType::ProofWrapping, + ) + .await?; + + // Pool for Atlantic execution done + bankai + .atlantic_client + .poll_batch_status_until_done(&wrapping_batch_id, Duration::new(10, 0), usize::MAX) + .await?; + + update_job_status(&db_client, job.job_id, JobStatus::WrappedProofDone).await?; + + info!("[EPOCH JOB] Proof wrapping done by Atlantic. Fact registered on Integrity. Wrapping QueryID: {}", wrapping_batch_id); + + update_job_status(&db_client, job.job_id, JobStatus::VerifiedFactRegistered).await?; + + // 6) Submit epoch update onchain + info!("[EPOCH JOB] Calling epoch update onchain..."); + let update = EpochUpdate::from_json::(next_epoch)?; + + let txhash = bankai + .starknet_client + .submit_update(update.expected_circuit_outputs, &bankai.config) + .await?; + + set_job_txhash(&db_client, job.job_id, txhash).await?; + + info!("[EPOCH JOB] Successfully submitted epoch update..."); + + update_job_status(&db_client, job.job_id, JobStatus::ProofDecommitmentCalled).await?; + + // Now we can get proof from contract? + // bankai.starknet_client.get_epoch_proof( + // &self, + // slot: u64, + // config: &BankaiConfig) + + // Insert data to DB after successful onchain epoch verification + // insert_verified_epoch(&db_client, job.slot / 0x2000, epoch_proof).await?; + } + JobType::SyncComiteeUpdate => { + // Sync committee job + info!( + "[SYNC COMMITTEE JOB] Started processing sync committee job: {} for epoch {}", + job.job_id, job.slot + ); + + let latest_committee_id = bankai + .starknet_client + .get_latest_committee_id(&bankai.config) + .await?; + + info!( + "[SYNC COMMITTEE JOB] Latest onchain verified sync committee: {}", + latest_committee_id + ); + + let latest_epoch = bankai + .starknet_client + .get_latest_epoch(&bankai.config) + .await?; + + let lowest_committee_update_slot = (latest_committee_id) * Felt::from(0x2000); + + if latest_epoch < lowest_committee_update_slot { + error!("[SYNC COMMITTEE JOB] Epoch update requires newer epoch",); + //return Err(Error::RequiresNewerEpoch(latest_epoch)); + } + + let update = bankai + .get_sync_committee_update(latest_epoch.try_into().unwrap()) + .await?; + + info!( + "[SYNC COMMITTEE JOB] Received sync committee update: {:?}", + update + ); + + info!( + "[SYNC COMMITTEE JOB] Starting Cairo execution and PIE generation for Sync Committee: {:?}...", + latest_committee_id + ); + + CairoRunner::generate_pie(&update, &bankai.config)?; + + update_job_status(&db_client, job.job_id, JobStatus::PieGenerated).await?; + + info!( + "[SYNC COMMITTEE JOB] Pie generated successfully for Sync Committee: {}...", + latest_committee_id + ); + info!("[SYNC COMMITTEE JOB] Sending proof generation query to Atlantic..."); + + let batch_id = bankai.atlantic_client.submit_batch(update).await?; + + update_job_status(&db_client, job.job_id, JobStatus::OffchainProofRequested).await?; + set_atlantic_job_queryid( + &db_client, + job.job_id, + batch_id.clone(), + AtlanticJobType::ProofGeneration, + ) + .await?; + + info!( + "[SYNC COMMITTEE JOB] Proof generation batch submitted to atlantic. QueryID: {}", + batch_id + ); + + // Pool for Atlantic execution done + bankai + .atlantic_client + .poll_batch_status_until_done(&batch_id, Duration::new(10, 0), usize::MAX) + .await?; + + info!( + "[SYNC COMMITTEE JOB] Proof generation done by Atlantic. QueryID: {}", + batch_id + ); + + let proof = bankai + .atlantic_client + .fetch_proof(batch_id.as_str()) + .await?; + + info!( + "[SYNC COMMITTEE JOB] Proof retrieved from Atlantic. QueryID: {}", + batch_id + ); + + update_job_status(&db_client, job.job_id, JobStatus::OffchainProofRetrieved).await?; + + // 5) Submit wrapped proof request + info!("[SYNC COMMITTEE JOB] Sending proof wrapping query to Atlantic.."); + let wrapping_batch_id = bankai.atlantic_client.submit_wrapped_proof(proof).await?; + info!( + "[SYNC COMMITTEE JOB] Proof wrapping query submitted to Atlantic. Wrapping QueryID: {}", + wrapping_batch_id + ); + + update_job_status(&db_client, job.job_id, JobStatus::WrapProofRequested).await?; + set_atlantic_job_queryid( + &db_client, + job.job_id, + wrapping_batch_id.clone(), + AtlanticJobType::ProofWrapping, + ) + .await?; + + // Pool for Atlantic execution done + bankai + .atlantic_client + .poll_batch_status_until_done(&wrapping_batch_id, Duration::new(10, 0), usize::MAX) + .await?; + + update_job_status(&db_client, job.job_id, JobStatus::WrappedProofDone).await?; + + info!("[SYNC COMMITTEE JOB] Proof wrapping done by Atlantic. Fact registered on Integrity. Wrapping QueryID: {}", wrapping_batch_id); + + update_job_status(&db_client, job.job_id, JobStatus::VerifiedFactRegistered).await?; + + let update = SyncCommitteeUpdate::from_json::(job.slot)?; + + info!("[SYNC COMMITTEE JOB] Calling sync committee update onchain..."); + + let txhash = bankai + .starknet_client + .submit_update(update.expected_circuit_outputs, &bankai.config) + .await?; + + set_job_txhash(&db_client, job.job_id, txhash).await?; + + // Insert data to DB after successful onchain sync committee verification + //insert_verified_sync_committee(&db_client, job.slot, sync_committee_hash).await?; + } + } + + Ok(()) +} + +// RPC requests handling functions // + +async fn handle_get_status(State(state): State) -> impl IntoResponse { + Json(json!({ "success": true })) +} + +async fn handle_get_epoch_update( + Path(slot): Path, + State(state): State, +) -> impl IntoResponse { + match state.bankai.get_epoch_proof(slot).await { + Ok(epoch_update) => { + // Convert `EpochUpdate` to `serde_json::Value` + let value = serde_json::to_value(epoch_update).unwrap_or_else(|err| { + eprintln!("Failed to serialize EpochUpdate: {:?}", err); + json!({ "error": "Internal server error" }) + }); + Json(value) + } + Err(err) => { + eprintln!("Failed to fetch proof: {:?}", err); + Json(json!({ "error": "Failed to fetch proof" })) + } + } +} + +// async fn handle_get_epoch_proof( +// Path(slot): Path, +// State(state): State, +// ) -> impl IntoResponse { +// match state.bankai.starknet_client.get_epoch_proof(slot).await { +// Ok(epoch_update) => { +// // Convert `EpochUpdate` to `serde_json::Value` +// let value = serde_json::to_value(epoch_update).unwrap_or_else(|err| { +// eprintln!("Failed to serialize EpochUpdate: {:?}", err); +// json!({ "error": "Internal server error" }) +// }); +// Json(value) +// } +// Err(err) => { +// eprintln!("Failed to fetch proof: {:?}", err); +// Json(json!({ "error": "Failed to fetch proof" })) +// } +// } +// } + +// async fn handle_get_committee_hash( +// Path(committee_id): Path, +// State(state): State, +// ) -> impl IntoResponse { +// match state.bankai.starknet_client.get_committee_hash(committee_id).await { +// Ok(committee_hash) => { +// // Convert `EpochUpdate` to `serde_json::Value` +// let value = serde_json::to_value(committee_hash).unwrap_or_else(|err| { +// eprintln!("Failed to serialize EpochUpdate: {:?}", err); +// json!({ "error": "Internal server error" }) +// }); +// Json(value) +// } +// Err(err) => { +// eprintln!("Failed to fetch proof: {:?}", err); +// Json(json!({ "error": "Failed to fetch proof" })) +// } +// } +// } + +async fn handle_get_latest_verified_slot(State(state): State) -> impl IntoResponse { + match state + .bankai + .starknet_client + .get_latest_epoch(&state.bankai.config) + .await + { + Ok(latest_epoch) => { + // Convert `Felt` to a string and parse it as a hexadecimal number + let hex_string = latest_epoch.to_string(); // Ensure this converts to a "0x..." string + match u64::from_str_radix(hex_string.trim_start_matches("0x"), 16) { + Ok(decimal_epoch) => Json(json!({ "latest_verified_slot": decimal_epoch })), + Err(err) => { + eprintln!("Failed to parse latest_epoch as decimal: {:?}", err); + Json(json!({ "error": "Invalid epoch format" })) + } + } + } + Err(err) => { + eprintln!("Failed to fetch latest epoch: {:?}", err); + Json(json!({ "error": "Failed to fetch latest epoch" })) + } + } +} + +// async fn handle_get_job_status( +// Path(job_id): Path, +// State(state): State, +// ) -> impl IntoResponse { +// match fetch_job_status(&state.db_client, job_id).await { +// Ok(job_status) => Json(job_status), +// Err(err) => { +// eprintln!("Failed to fetch job status: {:?}", err); +// Json(json!({ "error": "Failed to fetch job status" })) +// } +// } +// } diff --git a/client-rs/src/main.rs b/client-rs/src/main.rs index c4b2573..a5f5942 100644 --- a/client-rs/src/main.rs +++ b/client-rs/src/main.rs @@ -5,117 +5,23 @@ mod sync_committee; mod traits; mod utils; -use alloy_primitives::TxHash; use config::BankaiConfig; -use serde_json::json; - -use alloy_primitives::FixedBytes; -use alloy_rpc_types_beacon::events::HeadEvent; -use axum::{ - extract::{DefaultBodyLimit, Path, State}, - //http::{header, StatusCode}, - response::{IntoResponse, Json, Response}, - routing::{get, post}, - Router, -}; use contract_init::ContractInitializationData; -use dotenv::from_filename; -use epoch_update::{EpochProof, EpochUpdate}; -use postgres_types::{FromSql, ToSql}; -use reqwest; +use epoch_update::EpochUpdate; use starknet::core::types::Felt; -use std::env; -use std::sync::Arc; -use tokio::sync::mpsc; -use tokio::task; -use tokio_postgres::{Client, NoTls}; -use tokio_stream::StreamExt; -use tracing::{error, info, trace, warn, Level}; -use tracing_subscriber::FmtSubscriber; +use sync_committee::SyncCommitteeUpdate; use traits::Provable; use utils::{atlantic_client::AtlanticClient, cairo_runner::CairoRunner}; use utils::{ rpc::BeaconRpcClient, - // bankai_client::BankaiClient, starknet_client::{StarknetClient, StarknetError}, }; -//use std::error::Error as StdError; -use std::fmt; -use std::net::SocketAddr; -use sync_committee::SyncCommitteeUpdate; -use tokio::time::Duration; -use uuid::Uuid; - -const SLOTS_PER_EPOCH: u64 = 32; // For mainnet -const SLOTS_PER_SYNC_COMMITTEE: u64 = 8192; // For mainnet - -impl std::fmt::Display for StarknetError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - StarknetError::ProviderError(err) => write!(f, "Provider error: {}", err), - StarknetError::AccountError(msg) => write!(f, "Account error: {}", msg), - } - } -} - -impl std::error::Error for StarknetError {} - -#[derive(Debug, FromSql, ToSql)] -#[postgres(name = "job_status")] -enum JobStatus { - #[postgres(name = "CREATED")] - Created, - #[postgres(name = "FETCHED_PROOF")] - FetchedProof, - #[postgres(name = "PIE_GENERATED")] - PieGenerated, - #[postgres(name = "OFFCHAIN_PROOF_REQUESTED")] - OffchainProofRequested, - #[postgres(name = "OFFCHAIN_PROOF_RETRIEVED")] - OffchainProofRetrieved, - #[postgres(name = "WRAP_PROOF_REQUESTED")] - WrapProofRequested, - #[postgres(name = "WRAPPED_PROOF_DONE")] - WrappedProofDone, - #[postgres(name = "PROOF_DECOMMITMENT_CALLED")] - ProofDecommitmentCalled, - #[postgres(name = "VERIFIED_FACT_REGISTERED")] - VerifiedFactRegistered, - #[postgres(name = "ERROR")] - Cancelled, - #[postgres(name = "CANCELLED")] - Error, -} - -impl ToString for JobStatus { - fn to_string(&self) -> String { - match self { - JobStatus::Created => "CREATED".to_string(), - JobStatus::FetchedProof => "FETCHED_PROOF".to_string(), - JobStatus::PieGenerated => "PIE_GENERATED".to_string(), - JobStatus::OffchainProofRequested => "OFFCHAIN_PROOF_REQUESTED".to_string(), - JobStatus::OffchainProofRetrieved => "OFFCHAIN_PROOF_RETRIEVED".to_string(), - JobStatus::WrapProofRequested => "WRAP_PROOF_REQUESTED".to_string(), - JobStatus::WrappedProofDone => "WRAPPED_PROOF_DONE".to_string(), - JobStatus::ProofDecommitmentCalled => "PROOF_DECOMMITMENT_CALLED".to_string(), - JobStatus::VerifiedFactRegistered => "VERIFIED_FACT_REGISTERED".to_string(), - JobStatus::Cancelled => "CANCELLED".to_string(), - JobStatus::Error => "ERROR".to_string(), - } - } -} - -#[derive(Debug, FromSql, ToSql)] -enum JobType { - EpochUpdate, - SyncComiteeUpdate, -} - -#[derive(Debug, FromSql, ToSql)] -enum AtlanticJobType { - ProofGeneration, - ProofWrapping, -} +// use rand::Rng; +// use std::fs::File; +// use std::io::Write; +use clap::{Parser, Subcommand}; +use dotenv::from_filename; +use std::env; #[derive(Debug)] pub enum Error { @@ -134,42 +40,6 @@ pub enum Error { CairoRunError(String), AtlanticError(reqwest::Error), InvalidResponse(String), - PoolingTimeout(String), -} - -impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Error::InvalidProof => write!(f, "Invalid proof provided"), - Error::RpcError(err) => write!(f, "RPC error: {}", err), - Error::DeserializeError(msg) => write!(f, "Deserialization error: {}", msg), - Error::IoError(err) => write!(f, "I/O error: {}", err), - Error::StarknetError(err) => write!(f, "Starknet error: {}", err), - Error::BlockNotFound => write!(f, "Block not found"), - Error::FetchSyncCommitteeError => write!(f, "Failed to fetch sync committee"), - Error::FailedFetchingBeaconState => write!(f, "Failed to fetch beacon state"), - Error::InvalidBLSPoint => write!(f, "Invalid BLS point"), - Error::MissingRpcUrl => write!(f, "Missing RPC URL"), - Error::EmptySlotDetected(slot) => write!(f, "Empty slot detected: {}", slot), - Error::RequiresNewerEpoch(felt) => write!(f, "Requires newer epoch: {}", felt), - Error::CairoRunError(msg) => write!(f, "Cairo run error: {}", msg), - Error::AtlanticError(err) => write!(f, "Atlantic RPC error: {}", err), - Error::InvalidResponse(msg) => write!(f, "Invalid response: {}", msg), - Error::PoolingTimeout(msg) => write!(f, "Pooling timeout: {}", msg), - } - } -} - -impl std::error::Error for Error { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - Error::RpcError(err) => Some(err), - Error::IoError(err) => Some(err), - Error::StarknetError(err) => Some(err), - Error::AtlanticError(err) => Some(err), - _ => None, // No underlying source for other variants - } - } } impl From for Error { @@ -178,22 +48,6 @@ impl From for Error { } } -#[derive(Debug)] -struct Job { - job_id: Uuid, - job_type: JobType, - job_status: JobStatus, - slot: u64, -} - -#[derive(Clone, Debug)] -struct AppState { - db_client: Arc, - tx: mpsc::Sender, - bankai: Arc, -} - -#[derive(Debug)] struct BankaiClient { client: BeaconRpcClient, starknet_client: StarknetClient, @@ -257,902 +111,228 @@ impl BankaiClient { } } -fn check_env_vars() -> Result<(), String> { - let required_vars = [ - "BEACON_RPC_URL", - "STARKNET_RPC_URL", - "STARKNET_ADDRESS", - "STARKNET_PRIVATE_KEY", - "ATLANTIC_API_KEY", - "PROOF_REGISTRY", - "POSTGRESQL_HOST", - "POSTGRESQL_USER", - "POSTGRESQL_PASSWORD", - "POSTGRESQL_DB_NAME", - "RPC_LISTEN_HOST", - "RPC_LISTEN_PORT", - ]; - - for &var in &required_vars { - if env::var(var).is_err() { - return Err(format!("Environment variable `{}` is not set", var)); - } - } - - Ok(()) +#[derive(Subcommand)] +enum Commands { + /// Generate a sync committee update proof for a given slot + CommitteeUpdate { + #[arg(long, short)] + slot: u64, + /// Export output to a JSON file + #[arg(long, short)] + export: Option, + }, + /// Generate an epoch update proof for a given slot + EpochUpdate { + #[arg(long, short)] + slot: u64, + /// Export output to a JSON file + #[arg(long, short)] + export: Option, + }, + /// Generate contract initialization data for a given slot + ContractInit { + #[arg(long, short)] + slot: u64, + /// Export output to a JSON file + #[arg(long, short)] + export: Option, + }, + DeployContract { + #[arg(long, short)] + slot: u64, + }, + ProveNextCommittee, + ProveNextEpoch, + CheckBatchStatus { + #[arg(long, short)] + batch_id: String, + }, + SubmitWrappedProof { + #[arg(long, short)] + batch_id: String, + }, + VerifyEpoch { + #[arg(long, short)] + batch_id: String, + #[arg(long, short)] + slot: u64, + }, + VerifyCommittee { + #[arg(long, short)] + batch_id: String, + #[arg(long, short)] + slot: u64, + }, } -fn slot_to_epoch(slot: u64) -> u64 { - slot / SLOTS_PER_EPOCH -} +#[derive(Parser)] +#[command(author, version, about, long_about = None)] +struct Cli { + /// Optional RPC URL (defaults to RPC_URL_BEACON environment variable) + #[arg(long, short)] + rpc_url: Option, -fn slot_to_sync_committee_id(slot: u64) -> u64 { - slot / SLOTS_PER_SYNC_COMMITTEE + #[command(subcommand)] + command: Commands, } #[tokio::main] -//async fn main() { -async fn main() -> Result<(), Box> { +async fn main() -> Result<(), Error> { // Load .env.sepolia file from_filename(".env.sepolia").ok(); - let slot_listener_toggle = true; - - let subscriber = FmtSubscriber::builder() - .with_max_level(Level::DEBUG) - //.with_max_level(Level::INFO) - .finish(); - - tracing::subscriber::set_global_default(subscriber).expect("setting default subscriber failed"); - - // Validate environment variables - check_env_vars().map_err(|e| { - error!("Error: {}", e); - std::process::exit(1); // Exit if validation fails - }); + let cli = Cli::parse(); + let bankai = BankaiClient::new().await; - info!("Starting Bankai light-client daemon..."); + match cli.command { + Commands::CommitteeUpdate { slot, export } => { + println!("SyncCommittee command received with slot: {}", slot); + let proof = bankai.get_sync_committee_update(slot).await?; + let json = serde_json::to_string_pretty(&proof) + .map_err(|e| Error::DeserializeError(e.to_string()))?; - //let database_host = env::var("DATABASE_HOST").expect("DATABASE_HOST must be set"); - let (tx, mut rx): (mpsc::Sender, mpsc::Receiver) = mpsc::channel(32); - - //let (tx, mut rx) = mpsc::channel(32); - - let connection_string = "host=localhost user=root password=root dbname=bankai"; - // let connection_string = format!( - // "host={} user={} password={} dbname={}", - // env::var("POSTGRESQL_HOST").unwrap().as_str(), - // env::var("POSTGRESQL_USER").unwrap().as_str(), - // env::var("POSTGRESQL_PASSWORD").unwrap().as_str(), - // env::var("POSTGRESQL_DB_NAME").unwrap().as_str() - // ); - let _connection_result: Result< - ( - Client, - tokio_postgres::Connection, - ), - tokio_postgres::Error, - > = tokio_postgres::connect(connection_string, NoTls).await; - - let db_client = match tokio_postgres::connect(connection_string, NoTls).await { - Ok((client, connection)) => { - // Spawn a task to manage the connection - tokio::spawn(async move { - if let Err(e) = connection.await { - eprintln!("Connection error: {}", e); + if let Some(path) = export { + match std::fs::write(path.clone(), json) { + Ok(_) => println!("Proof exported to {}", path), + Err(e) => return Err(Error::IoError(e)), } - }); - - info!("Connected to the database successfully!"); - - // Wrap the client in an Arc for shared ownership - Arc::new(client) - } - Err(err) => { - error!("Failed to connect to the database: {}", err); - std::process::exit(1); // Exit with a non-zero status code + } else { + println!("{}", json); + } } - }; - - //let db_client_for_task = Arc::new(db_client); - - let bankai = Arc::new(BankaiClient::new().await); - // Clone the Arc for use in async task - let bankai_for_task = Arc::clone(&bankai); - - // Beacon node endpoint construction for ervents - let events_endpoint = format!( - "{}/eth/v1/events?topics=head", - env::var("BEACON_RPC_URL").unwrap().as_str() - ); - //let events_endpoint = format!("{}/eth/v1/events?topics=head", beacon_node_url); - - //Spawn a background task to process jobs - tokio::spawn({ - let bankai_for_job = Arc::clone(&bankai); - let db_client_for_job = Arc::clone(&db_client); - async move { - while let Some(job) = rx.recv().await { - let job_id = job.job_id.clone(); - if let Err(e) = - process_job(job, db_client_for_job.clone(), bankai_for_job.clone()).await - { - update_job_status(&db_client_for_job.clone(), job_id, JobStatus::Error).await; - error!("Error processing job {}: {}", job_id, e); + Commands::EpochUpdate { slot, export } => { + println!("Epoch command received with slot: {}", slot); + let proof = bankai.get_epoch_proof(slot).await?; + let json = serde_json::to_string_pretty(&proof) + .map_err(|e| Error::DeserializeError(e.to_string()))?; + + if let Some(path) = export { + match std::fs::write(path.clone(), json) { + Ok(_) => println!("Proof exported to {}", path), + Err(e) => return Err(Error::IoError(e)), } + } else { + println!("{}", json); } } - }); - - // let db_client_for_task =db_client.clone(); - let db_client_for_state = db_client.clone(); - let tx_for_task = tx.clone(); - - let app_state: AppState = AppState { - db_client: db_client_for_state, - tx, - bankai, - }; - - let app = Router::new() - .route("/status", get(handle_get_status)) - //.route("/get-epoch-proof/:slot", get(handle_get_epoch_proof)) - //.route("/get-committee-hash/:committee_id", get(handle_get_committee_hash)) - .route( - "/debug/get-epoch-update/:slot", - get(handle_get_epoch_update), - ) - .route( - "/debug/get-latest-verified-slot", - get(handle_get_latest_verified_slot), - ) - // .route("/debug/get-job-status", get(handle_get_job_status)) - // .route("/get-merkle-inclusion-proof", get(handle_get_merkle_inclusion_proof)) - .layer(DefaultBodyLimit::disable()) - .with_state(app_state); - - let addr = "0.0.0.0:3000".parse::()?; - let listener = tokio::net::TcpListener::bind(addr).await.unwrap(); - - info!("Bankai RPC HTTP server is listening on http://{}", addr); - - let server_task = tokio::spawn(async move { - let _ = axum::serve(listener, app).await; - }); - - // Listen for the new slots on BeaconChain - // Create an HTTP client - let http_stream_client = reqwest::Client::new(); - - // Send the request to the Beacon node - let response = http_stream_client - .get(&events_endpoint) - .send() - .await - .unwrap(); - - //let db_client = Arc::new(&db_client); - if slot_listener_toggle { - task::spawn({ - async move { - // Check if response is successful; if not, bail out early - // TODO: need to implement resilience and potentialy use multiple providers (implement something like fallbackprovider functionality in ethers), handle reconnection if connection is lost for various reasons - if !response.status().is_success() { - error!("Failed to connect: HTTP {}", response.status()); - return; - } - - info!("Listening for new slots, epochs and sync committee updates..."); - let mut stream = response.bytes_stream(); - - while let Some(chunk) = stream.next().await { - let Ok(bytes) = chunk else { - warn!("Error reading stream: {}", chunk.err().unwrap()); - continue; - }; - - let Ok(text) = String::from_utf8(bytes.to_vec()) else { - warn!("Failed to parse UTF-8."); - continue; - }; - - if text.is_empty() { - continue; - } - - trace!("New slot event detected: {}", text); - - // Search for JSON start - let Some(json_start) = text.find('{') else { - warn!("No JSON data found in the input."); - continue; - }; - - // Try parsing the JSON substring into your event structsync_committee_id - let Ok(parsed_event) = serde_json::from_str::(&text[json_start..]) - else { - warn!("Failed to parse JSON data received from Beacon Chain event."); - continue; - }; - - info!( - "New slot event detected: {} | Is epoch transition: {}", - parsed_event.slot, parsed_event.epoch_transition - ); - - if parsed_event.epoch_transition { - info!("Epoch transition detected! Starting processing..."); - - // Check also now if slot is the moment of switch to new sync committee set - if parsed_event.slot % SLOTS_PER_SYNC_COMMITTEE == 0 { - let sync_committee_id = slot_to_sync_committee_id(parsed_event.slot); - info!("In this slot sync committee rotation taken place. Slot {} Sync committee id: {}", parsed_event.slot, sync_committee_id); - // We should probably now start sync committee verify job - } - - let job_id = Uuid::new_v4(); - let job = Job { - job_id: job_id.clone(), - job_type: JobType::EpochUpdate, - job_status: JobStatus::Created, - slot: parsed_event.slot - 32, - }; - - let db_client = db_client.clone(); - match create_job(db_client, job_id, parsed_event.slot).await { - // Insert new job record to DB - Ok(()) => { - // Handle success - info!("Job created successfully with ID: {}", job_id); - if tx_for_task.send(job).await.is_err() { - error!("Failed to send job."); - } - // let job = Job { - // job_id: job_id.clone(), - // job_type: JobType::EpochUpdate, - // job_status: JobStatus::Created, - // slot: parsed_event.slot - 32, - // }; - // if tx_for_task.send(job).await.is_err() { - // error!("Failed to send job."); - // } - // - // If starting committee update job, first ensule that the corresponding slot is registered in contract - } - Err(e) => { - // Handle the error - error!("Error creating job: {}", e); - } - } + Commands::ContractInit { slot, export } => { + println!("ContractInit command received with slot: {}", slot); + let contract_init = bankai + .get_contract_initialization_data(slot, &bankai.config) + .await?; + let json = serde_json::to_string_pretty(&contract_init) + .map_err(|e| Error::DeserializeError(e.to_string()))?; - // match bankai_for_task.get_epoch_proof(parsed_event.slot - 32).await { - // Ok(proof) => info!("Epoch proof fetched successfully: {:?}", proof), - // Err(e) => error!("Failed to fetch epoch proof: {:?}", e), - // } - } + if let Some(path) = export { + match std::fs::write(path.clone(), json) { + Ok(_) => println!("Contract initialization data exported to {}", path), + Err(e) => return Err(Error::IoError(e)), } + } else { + println!("{}", json); } - }); - } - - // Wait for the server task to finish - server_task.await?; - - Ok(()) -} - -async fn set_atlantic_job_queryid( - client: &Client, - job_id: Uuid, - batch_id: String, - atlantic_job_type: AtlanticJobType, -) -> Result<(), Box> { - match atlantic_job_type { - AtlanticJobType::ProofGeneration => { - client - .execute( - "UPDATE jobs SET atlantic_batch_id_proof_generation = $1, updated_at = NOW() WHERE job_uuid = $2", - &[&batch_id.to_string(), &job_id], - ) - .await?; } - AtlanticJobType::ProofWrapping => { - client - .execute( - "UPDATE jobs SET atlantic_batch_id_proof_wrapping = $1, updated_at = NOW() WHERE job_uuid = $2", - &[&batch_id.to_string(), &job_id], - ) - .await?; - } // _ => { - // println!("Unk", status); - // } - } - - Ok(()) -} - -async fn insert_verified_epoch( - client: &Client, - epoch_id: u64, - epoch_proof: EpochProof, -) -> Result<(), Box> { - let status = JobStatus::Created; // new job starts at 'Created' - - client - .execute( - "INSERT INTO verified_epoch (epoch_id, header_root, state_root, n_signers) VALUES ($1)", - &[ - &epoch_id.to_string(), - &epoch_proof.header_root.to_string(), - &epoch_proof.state_root.to_string(), - &epoch_proof.n_signers.to_string(), - ], - ) - .await?; - - Ok(()) -} - -async fn insert_verified_sync_committee( - client: &Client, - sync_committee_id: u64, - sync_committee_hash: FixedBytes<32>, -) -> Result<(), Box> { - let status = JobStatus::Created; // new job starts at 'Created' - - client - .execute( - "INSERT INTO verified_sync_committee (sync_committee_id, sync_committee_hash) VALUES ($1)", - &[&sync_committee_id.to_string(), &sync_committee_hash.to_string()], - ) - .await?; - - Ok(()) -} - -async fn create_job( - client: Arc, - job_id: Uuid, - slot: u64, -) -> Result<(), Box> { - let status = JobStatus::Created; // new job starts at 'Created' - - client - .execute( - "INSERT INTO jobs (job_uuid, job_status, slot, type) VALUES ($1, $2, $3, $4)", - &[ - &job_id, - &status.to_string(), - &(slot as i64), - &"EPOCH_UPDATE", - ], - ) - .await?; - - Ok(()) -} - -async fn fetch_job_status( - client: &Client, - job_id: Uuid, -) -> Result, Box> { - let row_opt = client - .query_opt("SELECT status FROM jobs WHERE job_id = $1", &[&job_id]) - .await?; - - Ok(row_opt.map(|row| row.get("status"))) -} - -async fn update_job_status( - client: &Client, - job_id: Uuid, - new_status: JobStatus, -) -> Result<(), Box> { - client - .execute( - "UPDATE jobs SET job_status = $1, updated_at = NOW() WHERE job_uuid = $2", - &[&new_status.to_string(), &job_id], - ) - .await?; - Ok(()) -} - -async fn set_job_txhash( - client: &Client, - job_id: Uuid, - txhash: Felt, -) -> Result<(), Box> { - client - .execute( - "UPDATE jobs SET tx_hash = $1, updated_at = NOW() WHERE job_uuid = $2", - &[&txhash.to_string(), &job_id], - ) - .await?; - Ok(()) -} - -async fn cancell_all_unfinished_jobs( - client: &Client, -) -> Result<(), Box> { - client - .execute( - "UPDATE jobs SET status = $1, updated_at = NOW() WHERE status = 'FETCHING'", - &[&JobStatus::Cancelled.to_string()], - ) - .await?; - Ok(()) -} - -// async fn fetch_job_by_status( -// client: &Client, -// status: JobStatus, -// ) -> Result, Box> { -// let tx = client.transaction().await?; - -// let row_opt = tx -// .query_opt( -// r#" -// SELECT job_id, status -// FROM jobs -// WHERE status = $1 -// ORDER BY updated_at ASC -// LIMIT 1 -// FOR UPDATE SKIP LOCKED -// "#, -// &[&status], -// ) -// .await?; - -// let job = if let Some(row) = row_opt { -// Some(Job { -// job_id: row.get("job_id"), -// job_type: row.get("type"), -// job_status: row.get("status"), -// slot: row.get("slot"), -// }) -// } else { -// None -// }; - -// tx.commit().await?; -// Ok(job) -// } - -// async fn add_verified_epoch( -// client: Arc, -// slot: u64, -// ) -> Result<(), Box> { -// client -// .execute( -// "INSERT INTO verified_epochs (slot, job_status, slot, type) VALUES ($1, $2, $3, $4)", -// &[&slot, &status.to_string(), &(slot as i64), &"EPOCH_UPDATE"], -// ) -// .await?; - -// Ok(()) -// } - -// async fn worker_task(mut rx: Receiver, db_client: Client) -> Result<(), Box> { -// while let Some(job_id) = rx.recv().await { -// println!("Worker received job {job_id}"); - -// // 4a) Check current status in DB -// if let Some(status) = fetch_job_status(&db_client, job_id).await? { -// match status { -// JobStatus::Created => { -// println!("Fetching proof for job {job_id}..."); -// // Then update status -// update_job_status(&db_client, job_id, JobStatus::FetchedProof).await?; -// println!("Job {job_id} updated to FetchedProof"); -// } -// JobStatus::FetchedProof => { -// // Already fetched, maybe do next step... -// println!("Job {job_id} is already FetchedProof; ignoring for now."); -// } -// _ => { -// println!("Job {job_id} in status {:?}, no action needed.", status); -// } -// } -// } else { -// eprintln!("No job found in DB for ID = {job_id}"); -// } -// } -// Ok(()) -// } - -// mpsc jobs // -async fn process_job( - job: Job, - db_client: Arc, - bankai: Arc, -) -> Result<(), Box> { - match job.job_type { - JobType::EpochUpdate => { - // Epoch job - info!( - "[EPOCH JOB] Started processing epoch job: {} for epoch {}", - job.job_id, job.slot - ); - - //update_job_status(&db_client, job.job_id, JobStatus::Created).await?; - - // 1) Fetch the latest on-chain verified epoch - let latest_epoch = bankai - .starknet_client - .get_latest_epoch(&bankai.config) + Commands::DeployContract { slot } => { + let contract_init = bankai + .get_contract_initialization_data(slot, &bankai.config) .await?; - - info!( - "[EPOCH JOB] Latest onchain verified epoch: {}", - latest_epoch - ); - - // make sure next_epoch % 32 == 0 - let next_epoch = (u64::try_from(latest_epoch).unwrap() / 32) * 32 + 32; - info!( - "[EPOCH JOB] Fetching Inputs for next Epoch: {}...", - next_epoch - ); - - // 2) Fetch the proof - let proof = bankai.get_epoch_proof(next_epoch).await?; - info!( - "[EPOCH JOB] Fetched Inputs successfully for Epoch: {}", - next_epoch - ); - - update_job_status(&db_client, job.job_id, JobStatus::FetchedProof).await?; - - // 3) Generate PIE - info!( - "[EPOCH JOB] Starting Cairo execution and PIE generation for Epoch: {}...", - next_epoch - ); - - CairoRunner::generate_pie(&proof, &bankai.config)?; - - info!( - "[EPOCH JOB] Pie generated successfully for Epoch: {}...", - next_epoch - ); - - update_job_status(&db_client, job.job_id, JobStatus::PieGenerated).await?; - - // 4) Submit offchain proof-generation job to Atlantic - info!("[EPOCH JOB] Sending proof generation query to Atlantic..."); - - let batch_id = bankai.atlantic_client.submit_batch(proof).await?; - - info!( - "[EPOCH JOB] Proof generation batch submitted to Atlantic. QueryID: {}", - batch_id - ); - - update_job_status(&db_client, job.job_id, JobStatus::OffchainProofRequested).await?; - set_atlantic_job_queryid( - &db_client, - job.job_id, - batch_id.clone(), - AtlanticJobType::ProofGeneration, - ) - .await?; - - // Pool for Atlantic execution done bankai - .atlantic_client - .poll_batch_status_until_done(&batch_id, Duration::new(10, 0), usize::MAX) - .await?; - - info!( - "[EPOCH JOB] Proof generation done by Atlantic. QueryID: {}", - batch_id - ); - - let proof = bankai - .atlantic_client - .fetch_proof(batch_id.as_str()) + .starknet_client + .deploy_contract(contract_init, &bankai.config) .await?; - - info!( - "[EPOCH JOB] Proof retrieved from Atlantic. QueryID: {}", - batch_id - ); - - update_job_status(&db_client, job.job_id, JobStatus::OffchainProofRetrieved).await?; - - // 5) Submit wrapped proof request - info!("[EPOCH JOB] Sending proof wrapping query to Atlantic.."); - let wrapping_batch_id = bankai.atlantic_client.submit_wrapped_proof(proof).await?; - info!( - "[EPOCH JOB] Proof wrapping query submitted to Atlantic. Wrapping QueryID: {}", - wrapping_batch_id - ); - - update_job_status(&db_client, job.job_id, JobStatus::WrapProofRequested).await?; - set_atlantic_job_queryid( - &db_client, - job.job_id, - wrapping_batch_id.clone(), - AtlanticJobType::ProofWrapping, - ) - .await?; - - // Pool for Atlantic execution done - bankai + } + Commands::CheckBatchStatus { batch_id } => { + let status = bankai .atlantic_client - .poll_batch_status_until_done(&wrapping_batch_id, Duration::new(10, 0), usize::MAX) - .await?; - - update_job_status(&db_client, job.job_id, JobStatus::WrappedProofDone).await?; - - info!("[EPOCH JOB] Proof wrapping done by Atlantic. Fact registered on Integrity. Wrapping QueryID: {}", wrapping_batch_id); - - update_job_status(&db_client, job.job_id, JobStatus::VerifiedFactRegistered).await?; - - // 6) Submit epoch update onchain - info!("[EPOCH JOB] Calling epoch update onchain..."); - let update = EpochUpdate::from_json::(next_epoch)?; - - let txhash = bankai - .starknet_client - .submit_update(update.expected_circuit_outputs, &bankai.config) + .check_batch_status(batch_id.as_str()) .await?; - - set_job_txhash(&db_client, job.job_id, txhash).await?; - - info!("[EPOCH JOB] Successfully submitted epoch update..."); - - update_job_status(&db_client, job.job_id, JobStatus::ProofDecommitmentCalled).await?; - - // Now we can get proof from contract? - // bankai.starknet_client.get_epoch_proof( - // &self, - // slot: u64, - // config: &BankaiConfig) - - // Insert data to DB after successful onchain epoch verification - // insert_verified_epoch(&db_client, job.slot / 0x2000, epoch_proof).await?; + println!("Batch Status: {}", status); } - JobType::SyncComiteeUpdate => { - // Sync committee job - info!( - "[SYNC COMMITTEE JOB] Started processing sync committee job: {} for epoch {}", - job.job_id, job.slot - ); - + Commands::ProveNextCommittee => { let latest_committee_id = bankai .starknet_client .get_latest_committee_id(&bankai.config) .await?; - - info!( - "[SYNC COMMITTEE JOB] Latest onchain verified sync committee: {}", - latest_committee_id - ); - + let lowest_committee_update_slot = (latest_committee_id) * Felt::from(0x2000); + println!("Min Slot Required: {}", lowest_committee_update_slot); let latest_epoch = bankai .starknet_client .get_latest_epoch(&bankai.config) .await?; - - let lowest_committee_update_slot = (latest_committee_id) * Felt::from(0x2000); - + println!("Latest epoch: {}", latest_epoch); if latest_epoch < lowest_committee_update_slot { - error!("[SYNC COMMITTEE JOB] Epoch update requires newer epoch",); - //return Err(Error::RequiresNewerEpoch(latest_epoch)); + return Err(Error::RequiresNewerEpoch(latest_epoch)); } - let update = bankai .get_sync_committee_update(latest_epoch.try_into().unwrap()) .await?; - - info!( - "[SYNC COMMITTEE JOB] Received sync committee update: {:?}", - update - ); - - info!( - "[SYNC COMMITTEE JOB] Starting Cairo execution and PIE generation for Sync Committee: {:?}...", - latest_committee_id - ); - CairoRunner::generate_pie(&update, &bankai.config)?; - - update_job_status(&db_client, job.job_id, JobStatus::PieGenerated).await?; - - info!( - "[SYNC COMMITTEE JOB] Pie generated successfully for Sync Committee: {}...", - latest_committee_id - ); - info!("[SYNC COMMITTEE JOB] Sending proof generation query to Atlantic..."); - let batch_id = bankai.atlantic_client.submit_batch(update).await?; - - update_job_status(&db_client, job.job_id, JobStatus::OffchainProofRequested).await?; - set_atlantic_job_queryid( - &db_client, - job.job_id, - batch_id.clone(), - AtlanticJobType::ProofGeneration, - ) - .await?; - - info!( - "[SYNC COMMITTEE JOB] Proof generation batch submitted to atlantic. QueryID: {}", - batch_id - ); - - // Pool for Atlantic execution done - bankai - .atlantic_client - .poll_batch_status_until_done(&batch_id, Duration::new(10, 0), usize::MAX) + println!("Batch Submitted: {}", batch_id); + } + Commands::ProveNextEpoch => { + let latest_epoch = bankai + .starknet_client + .get_latest_epoch(&bankai.config) .await?; - - info!( - "[SYNC COMMITTEE JOB] Proof generation done by Atlantic. QueryID: {}", - batch_id - ); - - let proof = bankai + println!("Latest Epoch: {}", latest_epoch); + // make sure next_epoch % 32 == 0 + let next_epoch = (u64::try_from(latest_epoch).unwrap() / 32) * 32 + 32; + println!("Fetching Inputs for Epoch: {}", next_epoch); + let proof = bankai.get_epoch_proof(next_epoch).await?; + CairoRunner::generate_pie(&proof, &bankai.config)?; + let batch_id = bankai.atlantic_client.submit_batch(proof).await?; + println!("Batch Submitted: {}", batch_id); + } + Commands::VerifyEpoch { batch_id, slot } => { + let status = bankai .atlantic_client - .fetch_proof(batch_id.as_str()) + .check_batch_status(batch_id.as_str()) .await?; - - info!( - "[SYNC COMMITTEE JOB] Proof retrieved from Atlantic. QueryID: {}", - batch_id - ); - - update_job_status(&db_client, job.job_id, JobStatus::OffchainProofRetrieved).await?; - - // 5) Submit wrapped proof request - info!("[SYNC COMMITTEE JOB] Sending proof wrapping query to Atlantic.."); - let wrapping_batch_id = bankai.atlantic_client.submit_wrapped_proof(proof).await?; - info!( - "[SYNC COMMITTEE JOB] Proof wrapping query submitted to Atlantic. Wrapping QueryID: {}", - wrapping_batch_id - ); - - update_job_status(&db_client, job.job_id, JobStatus::WrapProofRequested).await?; - set_atlantic_job_queryid( - &db_client, - job.job_id, - wrapping_batch_id.clone(), - AtlanticJobType::ProofWrapping, - ) - .await?; - - // Pool for Atlantic execution done - bankai + if status == "DONE" { + let update = EpochUpdate::from_json::(slot)?; + bankai + .starknet_client + .submit_update(update.expected_circuit_outputs, &bankai.config) + .await?; + println!("Successfully submitted epoch update"); + } else { + println!("Batch not completed yet. Status: {}", status); + } + } + Commands::VerifyCommittee { batch_id, slot } => { + let status = bankai .atlantic_client - .poll_batch_status_until_done(&wrapping_batch_id, Duration::new(10, 0), usize::MAX) + .check_batch_status(batch_id.as_str()) .await?; - - update_job_status(&db_client, job.job_id, JobStatus::WrappedProofDone).await?; - - info!("[SYNC COMMITTEE JOB] Proof wrapping done by Atlantic. Fact registered on Integrity. Wrapping QueryID: {}", wrapping_batch_id); - - update_job_status(&db_client, job.job_id, JobStatus::VerifiedFactRegistered).await?; - - let update = SyncCommitteeUpdate::from_json::(job.slot)?; - - info!("[SYNC COMMITTEE JOB] Calling sync committee update onchain..."); - - let txhash = bankai - .starknet_client - .submit_update(update.expected_circuit_outputs, &bankai.config) + if status == "DONE" { + let update = SyncCommitteeUpdate::from_json::(slot)?; + bankai + .starknet_client + .submit_update(update.expected_circuit_outputs, &bankai.config) + .await?; + println!("Successfully submitted sync committee update"); + } else { + println!("Batch not completed yet. Status: {}", status); + } + } + Commands::SubmitWrappedProof { batch_id } => { + let status = bankai + .atlantic_client + .check_batch_status(batch_id.as_str()) .await?; - - set_job_txhash(&db_client, job.job_id, txhash).await?; - - // Insert data to DB after successful onchain sync committee verification - //insert_verified_sync_committee(&db_client, job.slot, sync_committee_hash).await?; + if status == "DONE" { + let proof = bankai + .atlantic_client + .fetch_proof(batch_id.as_str()) + .await?; + let batch_id = bankai.atlantic_client.submit_wrapped_proof(proof).await?; + println!("Batch Submitted: {}", batch_id); + } else { + println!("Batch not completed yet. Status: {}", status); + } } } Ok(()) } - -// RPC requests handling functions // - -async fn handle_get_status(State(state): State) -> impl IntoResponse { - Json(json!({ "success": true })) -} - -async fn handle_get_epoch_update( - Path(slot): Path, - State(state): State, -) -> impl IntoResponse { - match state.bankai.get_epoch_proof(slot).await { - Ok(epoch_update) => { - // Convert `EpochUpdate` to `serde_json::Value` - let value = serde_json::to_value(epoch_update).unwrap_or_else(|err| { - eprintln!("Failed to serialize EpochUpdate: {:?}", err); - json!({ "error": "Internal server error" }) - }); - Json(value) - } - Err(err) => { - eprintln!("Failed to fetch proof: {:?}", err); - Json(json!({ "error": "Failed to fetch proof" })) - } - } -} - -// async fn handle_get_epoch_proof( -// Path(slot): Path, -// State(state): State, -// ) -> impl IntoResponse { -// match state.bankai.starknet_client.get_epoch_proof(slot).await { -// Ok(epoch_update) => { -// // Convert `EpochUpdate` to `serde_json::Value` -// let value = serde_json::to_value(epoch_update).unwrap_or_else(|err| { -// eprintln!("Failed to serialize EpochUpdate: {:?}", err); -// json!({ "error": "Internal server error" }) -// }); -// Json(value) -// } -// Err(err) => { -// eprintln!("Failed to fetch proof: {:?}", err); -// Json(json!({ "error": "Failed to fetch proof" })) -// } -// } -// } - -// async fn handle_get_committee_hash( -// Path(committee_id): Path, -// State(state): State, -// ) -> impl IntoResponse { -// match state.bankai.starknet_client.get_committee_hash(committee_id).await { -// Ok(committee_hash) => { -// // Convert `EpochUpdate` to `serde_json::Value` -// let value = serde_json::to_value(committee_hash).unwrap_or_else(|err| { -// eprintln!("Failed to serialize EpochUpdate: {:?}", err); -// json!({ "error": "Internal server error" }) -// }); -// Json(value) -// } -// Err(err) => { -// eprintln!("Failed to fetch proof: {:?}", err); -// Json(json!({ "error": "Failed to fetch proof" })) -// } -// } -// } - -async fn handle_get_latest_verified_slot(State(state): State) -> impl IntoResponse { - match state - .bankai - .starknet_client - .get_latest_epoch(&state.bankai.config) - .await - { - Ok(latest_epoch) => { - // Convert `Felt` to a string and parse it as a hexadecimal number - let hex_string = latest_epoch.to_string(); // Ensure this converts to a "0x..." string - match u64::from_str_radix(hex_string.trim_start_matches("0x"), 16) { - Ok(decimal_epoch) => Json(json!({ "latest_verified_slot": decimal_epoch })), - Err(err) => { - eprintln!("Failed to parse latest_epoch as decimal: {:?}", err); - Json(json!({ "error": "Invalid epoch format" })) - } - } - } - Err(err) => { - eprintln!("Failed to fetch latest epoch: {:?}", err); - Json(json!({ "error": "Failed to fetch latest epoch" })) - } - } -} - -// async fn handle_get_job_status( -// Path(job_id): Path, -// State(state): State, -// ) -> impl IntoResponse { -// match fetch_job_status(&state.db_client, job_id).await { -// Ok(job_status) => Json(job_status), -// Err(err) => { -// eprintln!("Failed to fetch job status: {:?}", err); -// Json(json!({ "error": "Failed to fetch job status" })) -// } -// } -// } diff --git a/client-rs/src/utils/atlantic_client.rs b/client-rs/src/utils/atlantic_client.rs index abfab86..8517185 100644 --- a/client-rs/src/utils/atlantic_client.rs +++ b/client-rs/src/utils/atlantic_client.rs @@ -198,7 +198,8 @@ impl AtlanticClient { } trace!( - "Batch not completed yet. Status: {}. Pooling attempt {}/{}", + "Batch {} not completed yet. Status: {}. Pooling attempt {}/{}", + batch_id, status, attempt, max_retries From e4cddcd1bb71817c0ece34f4a2272a548b8df41d Mon Sep 17 00:00:00 2001 From: lakewik Date: Fri, 10 Jan 2025 18:56:06 +0100 Subject: [PATCH 04/66] Update cargo.toml --- client-rs/Cargo.lock | 408 ++++++++++++++++--------------------------- client-rs/Cargo.toml | 2 +- 2 files changed, 149 insertions(+), 261 deletions(-) diff --git a/client-rs/Cargo.lock b/client-rs/Cargo.lock index 61f048e..48804ea 100644 --- a/client-rs/Cargo.lock +++ b/client-rs/Cargo.lock @@ -1,6 +1,6 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -version = 4 +version = 3 [[package]] name = "addr2line" @@ -62,7 +62,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a101d4d016f47f13890a74290fdd17b05dd175191d9337bc600791fb96e4dea8" dependencies = [ "alloy-eips", - "alloy-primitives 0.8.15", + "alloy-primitives 0.8.18", "alloy-rlp", "alloy-serde", "alloy-trie", @@ -77,7 +77,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0069cf0642457f87a01a014f6dc29d5d893cd4fd8fddf0c3cdfad1bb3ebafc41" dependencies = [ - "alloy-primitives 0.8.15", + "alloy-primitives 0.8.18", "alloy-rlp", "serde", ] @@ -88,7 +88,7 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c986539255fb839d1533c128e190e557e52ff652c9ef62939e233a81dd93f7e" dependencies = [ - "alloy-primitives 0.8.15", + "alloy-primitives 0.8.18", "alloy-rlp", "derive_more 1.0.0", "serde", @@ -102,7 +102,7 @@ checksum = "8b6755b093afef5925f25079dd5a7c8d096398b804ba60cb5275397b06b31689" dependencies = [ "alloy-eip2930", "alloy-eip7702", - "alloy-primitives 0.8.15", + "alloy-primitives 0.8.18", "alloy-rlp", "alloy-serde", "c-kzg", @@ -135,9 +135,9 @@ dependencies = [ [[package]] name = "alloy-primitives" -version = "0.8.15" +version = "0.8.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6259a506ab13e1d658796c31e6e39d2e2ee89243bcc505ddc613b35732e0a430" +checksum = "788bb18e8f61d5d9340b52143f27771daf7e1dccbaf2741621d2493f9debf52e" dependencies = [ "alloy-rlp", "bytes", @@ -146,7 +146,6 @@ dependencies = [ "derive_more 1.0.0", "foldhash", "hashbrown 0.15.2", - "hex-literal", "indexmap 2.7.0", "itoa", "k256", @@ -180,7 +179,7 @@ checksum = "5a833d97bf8a5f0f878daf2c8451fff7de7f9de38baa5a45d936ec718d81255a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.93", + "syn 2.0.95", ] [[package]] @@ -190,12 +189,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cc37861dc8cbf5da35d346139fbe6e03ee7823cc21138a2c4a590d3b0b4b24be" dependencies = [ "alloy-eips", - "alloy-primitives 0.8.15", + "alloy-primitives 0.8.18", "alloy-rpc-types-engine", "alloy-serde", "serde", "serde_with", - "thiserror 2.0.9", + "thiserror 2.0.10", ] [[package]] @@ -206,7 +205,7 @@ checksum = "5d297268357e3eae834ddd6888b15f764cbc0f4b3be9265f5f6ec239013f3d68" dependencies = [ "alloy-consensus", "alloy-eips", - "alloy-primitives 0.8.15", + "alloy-primitives 0.8.18", "alloy-rlp", "alloy-serde", "derive_more 1.0.0", @@ -220,7 +219,7 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9afa753a97002a33b2ccb707d9f15f31c81b8c1b786c95b73cc62bb1d1fd0c3f" dependencies = [ - "alloy-primitives 0.8.15", + "alloy-primitives 0.8.18", "serde", "serde_json", ] @@ -231,7 +230,7 @@ version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6917c79e837aa7b77b7a6dae9f89cbe15313ac161c4d3cfaf8909ef21f3d22d8" dependencies = [ - "alloy-primitives 0.8.15", + "alloy-primitives 0.8.18", "alloy-rlp", "arrayvec", "derive_more 1.0.0", @@ -458,13 +457,13 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.83" +version = "0.1.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" +checksum = "3f934833b4b7233644e5848f235df3f57ed8c80f1528a26c3dfa13d2147fa056" dependencies = [ "proc-macro2", "quote", - "syn 2.0.93", + "syn 2.0.95", ] [[package]] @@ -481,7 +480,7 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.93", + "syn 2.0.95", ] [[package]] @@ -503,7 +502,7 @@ dependencies = [ "http 1.2.0", "http-body 1.0.1", "http-body-util", - "hyper 1.5.1", + "hyper 1.5.2", "hyper-util", "itoa", "matchit", @@ -592,7 +591,7 @@ dependencies = [ "reqwest 0.11.27", "serde", "tokio", - "types 0.2.1 (git+https://github.com/petscheit/lighthouse)", + "types", ] [[package]] @@ -643,23 +642,6 @@ dependencies = [ "generic-array", ] -[[package]] -name = "bls" -version = "0.2.0" -dependencies = [ - "arbitrary", - "blst", - "ethereum-types", - "ethereum_hashing 0.6.0", - "ethereum_serde_utils 0.5.2", - "ethereum_ssz", - "hex", - "rand", - "serde", - "tree_hash 0.6.0", - "zeroize", -] - [[package]] name = "bls" version = "0.2.0" @@ -746,9 +728,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.6" +version = "1.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d6dbb628b8f8555f86d0323c2eb39e3ec81901f4b83e091db8a6a76d316a333" +checksum = "a012a0df96dd6d06ba9a1b29d6402d1a5d77c6befd2566afdc26e10603dc93d7" dependencies = [ "shlex", ] @@ -784,9 +766,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.23" +version = "4.5.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3135e7ec2ef7b10c6ed8950f0f792ed96ee093fa088608f1c76e569722700c84" +checksum = "a8eb5e908ef3a6efbe1ed62520fb7287959888c88485abe072543190ecc66783" dependencies = [ "clap_builder", "clap_derive", @@ -794,9 +776,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.23" +version = "4.5.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30582fc632330df2bd26877bde0c1f4470d57c582bbc070376afcd04d8cb4838" +checksum = "96b01801b5fc6a0a232407abc821660c9c6d25a1cafc0d4f85f29fb8d9afc121" dependencies = [ "anstream", "anstyle", @@ -806,14 +788,14 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.18" +version = "4.5.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab" +checksum = "54b755194d6389280185988721fffba69495eed5ee9feeee9a599b53db80318c" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.93", + "syn 2.0.95", ] [[package]] @@ -826,7 +808,7 @@ checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" name = "client-rs" version = "0.1.0" dependencies = [ - "alloy-primitives 0.8.15", + "alloy-primitives 0.8.18", "alloy-rpc-types-beacon", "axum", "beacon-state-proof", @@ -834,17 +816,21 @@ dependencies = [ "clap", "dotenv", "ethereum_serde_utils 0.7.0", + "futures", + "glob", "hex", "itertools 0.13.0", "num_cpus", "postgres-types", "rand", - "reqwest 0.12.11", + "reqwest 0.12.12", "serde", "serde_derive", "serde_json", "sha2", "starknet", + "starknet-crypto", + "thiserror 2.0.10", "tokio", "tokio-postgres", "tokio-stream", @@ -852,6 +838,7 @@ dependencies = [ "tracing-subscriber", "tree_hash 0.8.0", "tree_hash_derive 0.8.0", + "uuid 1.11.0", ] [[package]] @@ -860,13 +847,6 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990" -[[package]] -name = "compare_fields" -version = "0.2.0" -dependencies = [ - "itertools 0.10.5", -] - [[package]] name = "compare_fields" version = "0.2.0" @@ -875,14 +855,6 @@ dependencies = [ "itertools 0.10.5", ] -[[package]] -name = "compare_fields_derive" -version = "0.2.0" -dependencies = [ - "quote", - "syn 1.0.109", -] - [[package]] name = "compare_fields_derive" version = "0.2.0" @@ -1058,7 +1030,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.11.1", - "syn 2.0.93", + "syn 2.0.95", ] [[package]] @@ -1080,7 +1052,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core 0.20.10", "quote", - "syn 2.0.93", + "syn 2.0.95", ] [[package]] @@ -1122,7 +1094,7 @@ checksum = "30542c1ad912e0e3d22a1935c290e12e8a29d704a420177a31faad4a601a0800" dependencies = [ "proc-macro2", "quote", - "syn 2.0.93", + "syn 2.0.95", ] [[package]] @@ -1135,7 +1107,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.1", - "syn 2.0.93", + "syn 2.0.95", ] [[package]] @@ -1155,7 +1127,7 @@ checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" dependencies = [ "proc-macro2", "quote", - "syn 2.0.93", + "syn 2.0.95", "unicode-xid", ] @@ -1188,7 +1160,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.93", + "syn 2.0.95", ] [[package]] @@ -1292,25 +1264,12 @@ dependencies = [ "uuid 0.8.2", ] -[[package]] -name = "eth2_interop_keypairs" -version = "0.2.0" -dependencies = [ - "bls 0.2.0", - "ethereum_hashing 0.6.0", - "hex", - "lazy_static", - "num-bigint", - "serde", - "serde_yaml", -] - [[package]] name = "eth2_interop_keypairs" version = "0.2.0" source = "git+https://github.com/petscheit/lighthouse#dad6bfb285942b8f3076b26a446bc8bb8d114968" dependencies = [ - "bls 0.2.0 (git+https://github.com/petscheit/lighthouse)", + "bls", "ethereum_hashing 0.6.0", "hex", "lazy_static", @@ -1388,7 +1347,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70cbccfccf81d67bff0ab36e591fa536c8a935b078a7b0e58c1d00d418332fc9" dependencies = [ - "alloy-primitives 0.8.15", + "alloy-primitives 0.8.18", "hex", "serde", "serde_derive", @@ -1590,7 +1549,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -2109,7 +2068,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.93", + "syn 2.0.95", ] [[package]] @@ -2174,7 +2133,7 @@ checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.93", + "syn 2.0.95", ] [[package]] @@ -2208,13 +2167,6 @@ dependencies = [ "generic-array", ] -[[package]] -name = "int_to_bytes" -version = "0.2.0" -dependencies = [ - "bytes", -] - [[package]] name = "int_to_bytes" version = "0.2.0" @@ -2301,22 +2253,6 @@ dependencies = [ "sha3-asm", ] -[[package]] -name = "kzg" -version = "0.1.0" -dependencies = [ - "arbitrary", - "c-kzg", - "derivative", - "ethereum_hashing 0.6.0", - "ethereum_serde_utils 0.5.2", - "ethereum_ssz", - "ethereum_ssz_derive", - "hex", - "serde", - "tree_hash 0.6.0", -] - [[package]] name = "kzg" version = "0.1.0" @@ -2387,9 +2323,9 @@ dependencies = [ [[package]] name = "linux-raw-sys" -version = "0.4.14" +version = "0.4.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" +checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" [[package]] name = "litemap" @@ -2441,16 +2377,6 @@ version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" -[[package]] -name = "merkle_proof" -version = "0.2.0" -dependencies = [ - "ethereum-types", - "ethereum_hashing 0.6.0", - "lazy_static", - "safe_arith 0.1.0", -] - [[package]] name = "merkle_proof" version = "0.2.0" @@ -2459,7 +2385,7 @@ dependencies = [ "ethereum-types", "ethereum_hashing 0.6.0", "lazy_static", - "safe_arith 0.1.0 (git+https://github.com/petscheit/lighthouse)", + "safe_arith", ] [[package]] @@ -2618,9 +2544,9 @@ dependencies = [ [[package]] name = "nybbles" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3409fc85ac27b27d971ea7cd1aabafd2eefa6de7e481c8d4f707225c117e81a" +checksum = "8983bb634df7248924ee0c4c3a749609b5abcb082c28fffe3254b3eb3602b307" dependencies = [ "const-hex", "serde", @@ -2665,7 +2591,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.93", + "syn 2.0.95", ] [[package]] @@ -2778,33 +2704,33 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b7cafe60d6cf8e62e1b9b2ea516a089c008945bb5a275416789e7db0bc199dc" dependencies = [ "memchr", - "thiserror 2.0.9", + "thiserror 2.0.10", "ucd-trie", ] [[package]] name = "phf" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ade2d8b8f33c7333b51bcf0428d37e217e9f32192ae4772156f65063b8ce03dc" +checksum = "1fd6780a80ae0c52cc120a26a1a42c1ae51b247a253e4e06113d23d2c2edd078" dependencies = [ "phf_shared", ] [[package]] name = "phf_shared" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90fcb95eef784c2ac79119d1dd819e162b5da872ce6f3c3abe1e8ca1c082f72b" +checksum = "67eabc2ef2a60eb7faa00097bd1ffdb5bd28e62bf39990626a582201b7a754e5" dependencies = [ "siphasher", ] [[package]] name = "pin-project-lite" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "915a1e146535de9163f3987b8944ed8cf49a18bb0056bcebcdcece385cece4ff" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" [[package]] name = "pin-utils" @@ -2837,7 +2763,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -3102,9 +3028,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.11" +version = "0.12.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fe060fe50f524be480214aba758c71f99f90ee8c83c5a36b5e9e1d568eb4eb3" +checksum = "43e734407157c3c2034e0258f5e4473ddb361b1e85f95a66690d67264d7cd1da" dependencies = [ "base64 0.22.1", "bytes", @@ -3136,6 +3062,8 @@ dependencies = [ "system-configuration 0.6.1", "tokio", "tokio-native-tls", + "tokio-util", + "tower", "tower-service", "url", "wasm-bindgen", @@ -3273,9 +3201,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.42" +version = "0.38.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f93dc38ecbab2eb790ff964bb77fa94faf256fd3e73285fd7ba0903b76bedb85" +checksum = "a78891ee6bf2340288408954ac787aa063d8e8817e9f53abb37c695c6d834ef6" dependencies = [ "bitflags 2.6.0", "errno", @@ -3378,10 +3306,6 @@ version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" -[[package]] -name = "safe_arith" -version = "0.1.0" - [[package]] name = "safe_arith" version = "0.1.0" @@ -3462,9 +3386,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.13.0" +version = "2.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1863fd3768cd83c56a7f60faa4dc0d403f1b6df0a38c3c25f44b7894e45370d5" +checksum = "49db231d56a190491cb4aeda9527f1ad45345af50b0851622a7adb8c03b01c32" dependencies = [ "core-foundation-sys", "libc", @@ -3511,14 +3435,14 @@ checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.93", + "syn 2.0.95", ] [[package]] name = "serde_json" -version = "1.0.134" +version = "1.0.135" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d00f4175c42ee48b15416f6193a959ba3a0d67fc699a0db9ad12df9f83991c7d" +checksum = "2b0d7ba2887406110130a978386c4e1befb98c674b4fba677954e4db976630d9" dependencies = [ "itoa", "memchr", @@ -3586,7 +3510,7 @@ dependencies = [ "darling 0.20.10", "proc-macro2", "quote", - "syn 2.0.93", + "syn 2.0.95", ] [[package]] @@ -3669,9 +3593,9 @@ dependencies = [ [[package]] name = "siphasher" -version = "0.3.11" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38b58827f4464d87d377d175e90bf58eb00fd8716ff0a62f80356b5e61555d0d" +checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d" [[package]] name = "slab" @@ -3850,7 +3774,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8986a940af916fc0a034f4e42c6ba76d94f1e97216d75447693dfd7aefaf3ef2" dependencies = [ "starknet-core", - "syn 2.0.93", + "syn 2.0.95", ] [[package]] @@ -3953,7 +3877,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.93", + "syn 2.0.95", ] [[package]] @@ -3976,14 +3900,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "swap_or_not_shuffle" -version = "0.2.0" -dependencies = [ - "ethereum-types", - "ethereum_hashing 0.6.0", -] - [[package]] name = "swap_or_not_shuffle" version = "0.2.0" @@ -4006,9 +3922,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.93" +version = "2.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c786062daee0d6db1132800e623df74274a0a87322d8e183338e01b3d98d058" +checksum = "46f71c0377baf4ef1cc3e3402ded576dccc315800fbc62dfc7fe04b009773b4a" dependencies = [ "proc-macro2", "quote", @@ -4038,7 +3954,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.93", + "syn 2.0.95", ] [[package]] @@ -4091,25 +4007,18 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tempfile" -version = "3.14.0" +version = "3.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28cce251fcbc87fac86a866eeb0d6c2d536fc16d06f184bb61aeae11aa4cee0c" +checksum = "9a8a559c81686f576e8cd0290cd2a24a2a9ad80c98b3478856500fcbd7acd704" dependencies = [ "cfg-if", "fastrand", + "getrandom", "once_cell", "rustix", "windows-sys 0.59.0", ] -[[package]] -name = "test_random_derive" -version = "0.2.0" -dependencies = [ - "quote", - "syn 1.0.109", -] - [[package]] name = "test_random_derive" version = "0.2.0" @@ -4130,11 +4039,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.9" +version = "2.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f072643fd0190df67a8bab670c20ef5d8737177d6ac6b2e9a236cb096206b2cc" +checksum = "a3ac7f54ca534db81081ef1c1e7f6ea8a3ef428d2fc069097c079443d24124d3" dependencies = [ - "thiserror-impl 2.0.9", + "thiserror-impl 2.0.10", ] [[package]] @@ -4145,18 +4054,18 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.93", + "syn 2.0.95", ] [[package]] name = "thiserror-impl" -version = "2.0.9" +version = "2.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b50fa271071aae2e6ee85f842e2e28ba8cd2c5fb67f11fcb1fd70b276f9e7d4" +checksum = "9e9465d30713b56a37ede7185763c3492a91be2f5fa68d958c44e41ab9248beb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.93", + "syn 2.0.95", ] [[package]] @@ -4245,9 +4154,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.42.0" +version = "1.43.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cec9b21b0450273377fc97bd4c33a8acffc8c996c987a7c5b319a0083707551" +checksum = "3d61fa4ffa3de412bfea335c6ecff681de2b609ba3c77ef3e00e521813a9ed9e" dependencies = [ "backtrace", "bytes", @@ -4263,13 +4172,13 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" +checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.93", + "syn 2.0.95", ] [[package]] @@ -4369,6 +4278,28 @@ dependencies = [ "winnow", ] +[[package]] +name = "tower" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" +dependencies = [ + "futures-core", + "futures-util", + "pin-project-lite", + "sync_wrapper 1.0.2", + "tokio", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-layer" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" + [[package]] name = "tower-service" version = "0.3.3" @@ -4395,7 +4326,7 @@ checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -4450,7 +4381,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "373495c23db675a5192de8b610395e1bec324d596f9e6111192ce903dc11403a" dependencies = [ - "alloy-primitives 0.8.15", + "alloy-primitives 0.8.18", "ethereum_hashing 0.7.0", "smallvec", ] @@ -4475,7 +4406,7 @@ dependencies = [ "darling 0.20.10", "proc-macro2", "quote", - "syn 2.0.93", + "syn 2.0.95", ] [[package]] @@ -4500,55 +4431,6 @@ version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" -[[package]] -name = "types" -version = "0.2.1" -dependencies = [ - "alloy-primitives 0.7.7", - "alloy-rlp", - "arbitrary", - "bls 0.2.0", - "compare_fields 0.2.0", - "compare_fields_derive 0.2.0", - "derivative", - "eth2_interop_keypairs 0.2.0", - "ethereum-types", - "ethereum_hashing 0.6.0", - "ethereum_serde_utils 0.5.2", - "ethereum_ssz", - "ethereum_ssz_derive", - "hex", - "int_to_bytes 0.2.0", - "itertools 0.10.5", - "kzg 0.1.0", - "lazy_static", - "log", - "maplit", - "merkle_proof 0.2.0", - "metastruct", - "milhouse", - "parking_lot", - "rand", - "rand_xorshift", - "rayon", - "regex", - "rpds", - "rusqlite", - "safe_arith 0.1.0", - "serde", - "serde_json", - "serde_yaml", - "slog", - "smallvec", - "ssz_types", - "superstruct", - "swap_or_not_shuffle 0.2.0", - "tempfile", - "test_random_derive 0.2.0", - "tree_hash 0.6.0", - "tree_hash_derive 0.6.0", -] - [[package]] name = "types" version = "0.2.1" @@ -4557,24 +4439,24 @@ dependencies = [ "alloy-primitives 0.7.7", "alloy-rlp", "arbitrary", - "bls 0.2.0 (git+https://github.com/petscheit/lighthouse)", - "compare_fields 0.2.0 (git+https://github.com/petscheit/lighthouse)", - "compare_fields_derive 0.2.0 (git+https://github.com/petscheit/lighthouse)", + "bls", + "compare_fields", + "compare_fields_derive", "derivative", - "eth2_interop_keypairs 0.2.0 (git+https://github.com/petscheit/lighthouse)", + "eth2_interop_keypairs", "ethereum-types", "ethereum_hashing 0.6.0", "ethereum_serde_utils 0.5.2", "ethereum_ssz", "ethereum_ssz_derive", "hex", - "int_to_bytes 0.2.0 (git+https://github.com/petscheit/lighthouse)", + "int_to_bytes", "itertools 0.10.5", - "kzg 0.1.0 (git+https://github.com/petscheit/lighthouse)", + "kzg", "lazy_static", "log", "maplit", - "merkle_proof 0.2.0 (git+https://github.com/petscheit/lighthouse)", + "merkle_proof", "metastruct", "milhouse", "parking_lot", @@ -4584,7 +4466,7 @@ dependencies = [ "regex", "rpds", "rusqlite", - "safe_arith 0.1.0 (git+https://github.com/petscheit/lighthouse)", + "safe_arith", "serde", "serde_json", "serde_yaml", @@ -4592,9 +4474,9 @@ dependencies = [ "smallvec", "ssz_types", "superstruct", - "swap_or_not_shuffle 0.2.0 (git+https://github.com/petscheit/lighthouse)", + "swap_or_not_shuffle", "tempfile", - "test_random_derive 0.2.0 (git+https://github.com/petscheit/lighthouse)", + "test_random_derive", "tree_hash 0.6.0", "tree_hash_derive 0.6.0", ] @@ -4628,7 +4510,13 @@ checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" name = "unicase" version = "2.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e51b68083f157f853b6379db119d1c1be0e6e4dec98101079dec41f6f5cf6df" +checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" + +[[package]] +name = "unicode-bidi" +version = "0.3.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c1cb5db39152898a79168971543b1cb5020dff7fe43c8dc468b0885f5e29df5" [[package]] name = "unicode-ident" @@ -4727,7 +4615,7 @@ checksum = "6b91f57fe13a38d0ce9e28a03463d8d3c2468ed03d75375110ec71d93b449a08" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -4805,7 +4693,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn 2.0.93", + "syn 2.0.95", "wasm-bindgen-shared", ] @@ -4840,7 +4728,7 @@ checksum = "30d7a95b763d3c45903ed6c81f156801839e5ee968bb07e534c44df0fcd330c2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.93", + "syn 2.0.95", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -4853,9 +4741,9 @@ checksum = "943aab3fdaaa029a6e0271b35ea10b72b943135afe9bffca82384098ad0e06a6" [[package]] name = "wasm-streams" -version = "0.4.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b65dc4c90b63b118468cf747d8bf3566c1913ef60be765b5730ead9e0a3ba129" +checksum = "15053d8d85c7eccdbefef60f06769760a563c7f0a9d6902a13d35c7800b0ad65" dependencies = [ "futures-util", "js-sys", @@ -5102,9 +4990,9 @@ checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" -version = "0.6.20" +version = "0.6.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36c1fec1a2bb5866f07c25f68c26e565c4c200aebb96d7e55710c19d3e8ac49b" +checksum = "39281189af81c07ec09db316b302a3e67bf9bd7cbf6c820b50e35fee9c2fa980" dependencies = [ "memchr", ] @@ -5160,7 +5048,7 @@ checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.93", + "syn 2.0.95", "synstructure", ] @@ -5182,7 +5070,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.93", + "syn 2.0.95", ] [[package]] @@ -5202,7 +5090,7 @@ checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" dependencies = [ "proc-macro2", "quote", - "syn 2.0.93", + "syn 2.0.95", "synstructure", ] @@ -5223,7 +5111,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.93", + "syn 2.0.95", ] [[package]] @@ -5245,5 +5133,5 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.93", + "syn 2.0.95", ] diff --git a/client-rs/Cargo.toml b/client-rs/Cargo.toml index 2ffa537..f69e20d 100644 --- a/client-rs/Cargo.toml +++ b/client-rs/Cargo.toml @@ -21,7 +21,7 @@ serde_derive = "1.0.215" serde_json = "1.0.133" tokio = { version = "1.0", features = ["full"] } beacon-state-proof = { git = "https://github.com/petscheit/beacon-state-proof" } -types = { path = "../../lighthouse/consensus/types", package = "types" } +#types = { path = "../../lighthouse/consensus/types", package = "types" } sha2 = "0.10.8" reqwest = { version = "0.12.9", features = ["json", "multipart", "stream"] } rand = "0.8.5" From 83474efd66d22ac110d32204c57df8fb0e03ec24 Mon Sep 17 00:00:00 2001 From: lakewik Date: Fri, 10 Jan 2025 19:00:36 +0100 Subject: [PATCH 05/66] Update cargo.toml --- client-rs/Cargo.lock | 23 ++++++++++++----------- client-rs/Cargo.toml | 2 +- 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/client-rs/Cargo.lock b/client-rs/Cargo.lock index 48804ea..fdd6f57 100644 --- a/client-rs/Cargo.lock +++ b/client-rs/Cargo.lock @@ -645,7 +645,7 @@ dependencies = [ [[package]] name = "bls" version = "0.2.0" -source = "git+https://github.com/petscheit/lighthouse#dad6bfb285942b8f3076b26a446bc8bb8d114968" +source = "git+https://github.com/petscheit/lighthouse.git#dad6bfb285942b8f3076b26a446bc8bb8d114968" dependencies = [ "arbitrary", "blst", @@ -838,6 +838,7 @@ dependencies = [ "tracing-subscriber", "tree_hash 0.8.0", "tree_hash_derive 0.8.0", + "types", "uuid 1.11.0", ] @@ -850,7 +851,7 @@ checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990" [[package]] name = "compare_fields" version = "0.2.0" -source = "git+https://github.com/petscheit/lighthouse#dad6bfb285942b8f3076b26a446bc8bb8d114968" +source = "git+https://github.com/petscheit/lighthouse.git#dad6bfb285942b8f3076b26a446bc8bb8d114968" dependencies = [ "itertools 0.10.5", ] @@ -858,7 +859,7 @@ dependencies = [ [[package]] name = "compare_fields_derive" version = "0.2.0" -source = "git+https://github.com/petscheit/lighthouse#dad6bfb285942b8f3076b26a446bc8bb8d114968" +source = "git+https://github.com/petscheit/lighthouse.git#dad6bfb285942b8f3076b26a446bc8bb8d114968" dependencies = [ "quote", "syn 1.0.109", @@ -1267,7 +1268,7 @@ dependencies = [ [[package]] name = "eth2_interop_keypairs" version = "0.2.0" -source = "git+https://github.com/petscheit/lighthouse#dad6bfb285942b8f3076b26a446bc8bb8d114968" +source = "git+https://github.com/petscheit/lighthouse.git#dad6bfb285942b8f3076b26a446bc8bb8d114968" dependencies = [ "bls", "ethereum_hashing 0.6.0", @@ -2170,7 +2171,7 @@ dependencies = [ [[package]] name = "int_to_bytes" version = "0.2.0" -source = "git+https://github.com/petscheit/lighthouse#dad6bfb285942b8f3076b26a446bc8bb8d114968" +source = "git+https://github.com/petscheit/lighthouse.git#dad6bfb285942b8f3076b26a446bc8bb8d114968" dependencies = [ "bytes", ] @@ -2256,7 +2257,7 @@ dependencies = [ [[package]] name = "kzg" version = "0.1.0" -source = "git+https://github.com/petscheit/lighthouse#dad6bfb285942b8f3076b26a446bc8bb8d114968" +source = "git+https://github.com/petscheit/lighthouse.git#dad6bfb285942b8f3076b26a446bc8bb8d114968" dependencies = [ "arbitrary", "c-kzg", @@ -2380,7 +2381,7 @@ checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" [[package]] name = "merkle_proof" version = "0.2.0" -source = "git+https://github.com/petscheit/lighthouse#dad6bfb285942b8f3076b26a446bc8bb8d114968" +source = "git+https://github.com/petscheit/lighthouse.git#dad6bfb285942b8f3076b26a446bc8bb8d114968" dependencies = [ "ethereum-types", "ethereum_hashing 0.6.0", @@ -3309,7 +3310,7 @@ checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" [[package]] name = "safe_arith" version = "0.1.0" -source = "git+https://github.com/petscheit/lighthouse#dad6bfb285942b8f3076b26a446bc8bb8d114968" +source = "git+https://github.com/petscheit/lighthouse.git#dad6bfb285942b8f3076b26a446bc8bb8d114968" [[package]] name = "salsa20" @@ -3903,7 +3904,7 @@ dependencies = [ [[package]] name = "swap_or_not_shuffle" version = "0.2.0" -source = "git+https://github.com/petscheit/lighthouse#dad6bfb285942b8f3076b26a446bc8bb8d114968" +source = "git+https://github.com/petscheit/lighthouse.git#dad6bfb285942b8f3076b26a446bc8bb8d114968" dependencies = [ "ethereum-types", "ethereum_hashing 0.6.0", @@ -4022,7 +4023,7 @@ dependencies = [ [[package]] name = "test_random_derive" version = "0.2.0" -source = "git+https://github.com/petscheit/lighthouse#dad6bfb285942b8f3076b26a446bc8bb8d114968" +source = "git+https://github.com/petscheit/lighthouse.git#dad6bfb285942b8f3076b26a446bc8bb8d114968" dependencies = [ "quote", "syn 1.0.109", @@ -4434,7 +4435,7 @@ checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" [[package]] name = "types" version = "0.2.1" -source = "git+https://github.com/petscheit/lighthouse#dad6bfb285942b8f3076b26a446bc8bb8d114968" +source = "git+https://github.com/petscheit/lighthouse.git#dad6bfb285942b8f3076b26a446bc8bb8d114968" dependencies = [ "alloy-primitives 0.7.7", "alloy-rlp", diff --git a/client-rs/Cargo.toml b/client-rs/Cargo.toml index f69e20d..dce00d8 100644 --- a/client-rs/Cargo.toml +++ b/client-rs/Cargo.toml @@ -21,7 +21,7 @@ serde_derive = "1.0.215" serde_json = "1.0.133" tokio = { version = "1.0", features = ["full"] } beacon-state-proof = { git = "https://github.com/petscheit/beacon-state-proof" } -#types = { path = "../../lighthouse/consensus/types", package = "types" } +types = { git = "https://github.com/petscheit/lighthouse.git", package = "types" } sha2 = "0.10.8" reqwest = { version = "0.12.9", features = ["json", "multipart", "stream"] } rand = "0.8.5" From 01a2931b84444fc384cc7142623344582e941da9 Mon Sep 17 00:00:00 2001 From: lakewik Date: Fri, 10 Jan 2025 19:37:05 +0100 Subject: [PATCH 06/66] Fixes after merge --- client-rs/src/daemon.rs | 68 ++++++++++++++++++++++------------------- 1 file changed, 37 insertions(+), 31 deletions(-) diff --git a/client-rs/src/daemon.rs b/client-rs/src/daemon.rs index c4b2573..349a03c 100644 --- a/client-rs/src/daemon.rs +++ b/client-rs/src/daemon.rs @@ -1,11 +1,14 @@ mod config; mod contract_init; +pub mod epoch_batch; mod epoch_update; +mod execution_header; mod sync_committee; mod traits; mod utils; -use alloy_primitives::TxHash; + +//use alloy_primitives::TxHash; use config::BankaiConfig; use serde_json::json; @@ -14,8 +17,8 @@ use alloy_rpc_types_beacon::events::HeadEvent; use axum::{ extract::{DefaultBodyLimit, Path, State}, //http::{header, StatusCode}, - response::{IntoResponse, Json, Response}, - routing::{get, post}, + response::{IntoResponse, Json}, + routing::{get}, Router, }; use contract_init::ContractInitializationData; @@ -60,7 +63,7 @@ impl std::fmt::Display for StarknetError { impl std::error::Error for StarknetError {} -#[derive(Debug, FromSql, ToSql)] +#[derive(Debug, FromSql, ToSql, Clone)] #[postgres(name = "job_status")] enum JobStatus { #[postgres(name = "CREATED")] @@ -105,7 +108,7 @@ impl ToString for JobStatus { } } -#[derive(Debug, FromSql, ToSql)] +#[derive(Debug, FromSql, ToSql, Clone)] enum JobType { EpochUpdate, SyncComiteeUpdate, @@ -135,6 +138,7 @@ pub enum Error { AtlanticError(reqwest::Error), InvalidResponse(String), PoolingTimeout(String), + InvalidMerkleTree } impl fmt::Display for Error { @@ -156,6 +160,7 @@ impl fmt::Display for Error { Error::AtlanticError(err) => write!(f, "Atlantic RPC error: {}", err), Error::InvalidResponse(msg) => write!(f, "Invalid response: {}", msg), Error::PoolingTimeout(msg) => write!(f, "Pooling timeout: {}", msg), + Error::InvalidMerkleTree => write!(f, "Invalid Merkle Tree"), } } } @@ -178,7 +183,7 @@ impl From for Error { } } -#[derive(Debug)] +#[derive(Clone, Debug)] struct Job { job_id: Uuid, job_type: JobType, @@ -226,15 +231,23 @@ impl BankaiClient { &self, mut slot: u64, ) -> Result { + let mut attempts = 0; + const MAX_ATTEMPTS: u8 = 3; + // Before we start generating the proof, we ensure the slot was not missed - match self.client.get_header(slot).await { - Ok(header) => header, - Err(Error::EmptySlotDetected(_)) => { - slot += 1; - println!("Empty slot detected! Fetching slot: {}", slot); - self.client.get_header(slot).await? + let _header = loop { + match self.client.get_header(slot).await { + Ok(header) => break header, + Err(Error::EmptySlotDetected(_)) => { + attempts += 1; + if attempts >= MAX_ATTEMPTS { + return Err(Error::EmptySlotDetected(slot)); + } + slot += 1; + println!("Empty slot detected! Attempt {}/{}. Fetching slot: {}", attempts, MAX_ATTEMPTS, slot); + } + Err(e) => return Err(e), // Propagate other errors immediately } - Err(e) => return Err(e), // Propagate other errors immediately }; let proof: SyncCommitteeUpdate = SyncCommitteeUpdate::new(&self.client, slot).await?; @@ -358,7 +371,7 @@ async fn main() -> Result<(), Box> { let bankai = Arc::new(BankaiClient::new().await); // Clone the Arc for use in async task - let bankai_for_task = Arc::clone(&bankai); + //let bankai_for_task = Arc::clone(&bankai); // Beacon node endpoint construction for ervents let events_endpoint = format!( @@ -495,11 +508,11 @@ async fn main() -> Result<(), Box> { job_id: job_id.clone(), job_type: JobType::EpochUpdate, job_status: JobStatus::Created, - slot: parsed_event.slot - 32, + slot: parsed_event.slot, }; let db_client = db_client.clone(); - match create_job(db_client, job_id, parsed_event.slot).await { + match create_job(db_client, job.clone()).await { // Insert new job record to DB Ok(()) => { // Handle success @@ -576,8 +589,6 @@ async fn insert_verified_epoch( epoch_id: u64, epoch_proof: EpochProof, ) -> Result<(), Box> { - let status = JobStatus::Created; // new job starts at 'Created' - client .execute( "INSERT INTO verified_epoch (epoch_id, header_root, state_root, n_signers) VALUES ($1)", @@ -598,8 +609,6 @@ async fn insert_verified_sync_committee( sync_committee_id: u64, sync_committee_hash: FixedBytes<32>, ) -> Result<(), Box> { - let status = JobStatus::Created; // new job starts at 'Created' - client .execute( "INSERT INTO verified_sync_committee (sync_committee_id, sync_committee_hash) VALUES ($1)", @@ -612,18 +621,15 @@ async fn insert_verified_sync_committee( async fn create_job( client: Arc, - job_id: Uuid, - slot: u64, + job: Job ) -> Result<(), Box> { - let status = JobStatus::Created; // new job starts at 'Created' - client .execute( "INSERT INTO jobs (job_uuid, job_status, slot, type) VALUES ($1, $2, $3, $4)", &[ - &job_id, - &status.to_string(), - &(slot as i64), + &job.job_id, + &job.job_status.to_string(), + &(job.slot as i64), &"EPOCH_UPDATE", ], ) @@ -779,7 +785,7 @@ async fn process_job( // 1) Fetch the latest on-chain verified epoch let latest_epoch = bankai .starknet_client - .get_latest_epoch(&bankai.config) + .get_latest_epoch_slot(&bankai.config) .await?; info!( @@ -788,7 +794,7 @@ async fn process_job( ); // make sure next_epoch % 32 == 0 - let next_epoch = (u64::try_from(latest_epoch).unwrap() / 32) * 32 + 32; + let next_epoch = (u64::try_from(latest_epoch).unwrap() / SLOTS_PER_EPOCH) * SLOTS_PER_EPOCH + SLOTS_PER_EPOCH; info!( "[EPOCH JOB] Fetching Inputs for next Epoch: {}...", next_epoch @@ -932,7 +938,7 @@ async fn process_job( let latest_epoch = bankai .starknet_client - .get_latest_epoch(&bankai.config) + .get_latest_epoch_slot(&bankai.config) .await?; let lowest_committee_update_slot = (latest_committee_id) * Felt::from(0x2000); @@ -1123,7 +1129,7 @@ async fn handle_get_latest_verified_slot(State(state): State) -> impl match state .bankai .starknet_client - .get_latest_epoch(&state.bankai.config) + .get_latest_epoch_slot(&state.bankai.config) .await { Ok(latest_epoch) => { From b8d94d23574a1f04b37448ed03e16bc53e8a6f12 Mon Sep 17 00:00:00 2001 From: lakewik Date: Fri, 10 Jan 2025 19:51:42 +0100 Subject: [PATCH 07/66] Change names to more represent actual values --- client-rs/src/daemon.rs | 6 +++--- contract/src/lib.cairo | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/client-rs/src/daemon.rs b/client-rs/src/daemon.rs index 349a03c..de38d2f 100644 --- a/client-rs/src/daemon.rs +++ b/client-rs/src/daemon.rs @@ -783,14 +783,14 @@ async fn process_job( //update_job_status(&db_client, job.job_id, JobStatus::Created).await?; // 1) Fetch the latest on-chain verified epoch - let latest_epoch = bankai + let latest_epoch_slot = bankai .starknet_client .get_latest_epoch_slot(&bankai.config) .await?; info!( - "[EPOCH JOB] Latest onchain verified epoch: {}", - latest_epoch + "[EPOCH JOB] Latest onchain verified epoch slot: {}", + latest_epoch_slot ); // make sure next_epoch % 32 == 0 diff --git a/contract/src/lib.cairo b/contract/src/lib.cairo index d355aba..ce1bec7 100644 --- a/contract/src/lib.cairo +++ b/contract/src/lib.cairo @@ -144,7 +144,7 @@ pub mod BankaiContract { epoch_batch_program_hash: felt252, ) { self.owner.write(get_caller_address()); - self.latest_epoch.write(0); + self.latest_epoch_slot.write(0); // Write trusted initial committee self.initialization_committee.write(committee_id); @@ -163,8 +163,8 @@ pub mod BankaiContract { self.committee.read(committee_id) } - fn get_latest_epoch(self: @ContractState) -> u64 { - self.latest_epoch.read() + fn get_latest_epoch_slot(self: @ContractState) -> u64 { + self.latest_epoch_slot.read() } fn get_latest_committee_id(self: @ContractState) -> u64 { @@ -237,7 +237,7 @@ pub mod BankaiContract { }; self.epochs.write(slot, epoch_proof); - self.latest_epoch.write(slot); + self.latest_epoch_slot.write(slot); self.emit(Event::EpochUpdated(EpochUpdated { beacon_root: header_root, slot: slot, execution_hash: execution_hash, execution_height: execution_height, })); @@ -269,7 +269,7 @@ pub mod BankaiContract { }; self.epochs.write(slot, epoch_proof); - self.latest_epoch.write(slot); + self.latest_epoch_slot.write(slot); self.emit(Event::EpochBatch(EpochBatch { batch_root: batch_root, beacon_root: header_root, slot: slot, execution_hash: execution_hash, execution_height: execution_height, })); @@ -305,7 +305,7 @@ pub mod BankaiContract { }; self.epochs.write(slot, epoch_proof); - self.latest_epoch.write(slot); + self.latest_epoch_slot.write(slot); self.emit(Event::EpochDecommitted(EpochDecommitted { batch_root: batch_root, slot: slot, execution_hash: execution_hash, execution_height: execution_height, })); From 2a4e6f65349a4b9f3c4a68e055db1a84fadcd342 Mon Sep 17 00:00:00 2001 From: lakewik Date: Mon, 13 Jan 2025 11:05:56 +0100 Subject: [PATCH 08/66] Start implementing batched scheduler --- client-rs/Cargo.lock | 3 +- client-rs/Cargo.toml | 1 + client-rs/src/constants.rs | 3 + client-rs/src/daemon.rs | 696 ++++++++++++++++++------- client-rs/src/epoch_batch.rs | 128 ++++- client-rs/src/epoch_update.rs | 6 +- client-rs/src/helpers.rs | 9 + client-rs/src/main.rs | 23 +- client-rs/src/utils/starknet_client.rs | 20 +- contract/src/lib.cairo | 8 +- 10 files changed, 676 insertions(+), 221 deletions(-) create mode 100644 client-rs/src/constants.rs create mode 100644 client-rs/src/helpers.rs diff --git a/client-rs/Cargo.lock b/client-rs/Cargo.lock index fdd6f57..6921c6a 100644 --- a/client-rs/Cargo.lock +++ b/client-rs/Cargo.lock @@ -1,6 +1,6 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -version = 3 +version = 4 [[package]] name = "addr2line" @@ -820,6 +820,7 @@ dependencies = [ "glob", "hex", "itertools 0.13.0", + "num-traits", "num_cpus", "postgres-types", "rand", diff --git a/client-rs/Cargo.toml b/client-rs/Cargo.toml index dce00d8..867efdd 100644 --- a/client-rs/Cargo.toml +++ b/client-rs/Cargo.toml @@ -52,3 +52,4 @@ num_cpus = "1.16.0" starknet-crypto = "0.7.3" glob = "0.3.2" +num-traits = "0.2.19" diff --git a/client-rs/src/constants.rs b/client-rs/src/constants.rs new file mode 100644 index 0000000..b6ba121 --- /dev/null +++ b/client-rs/src/constants.rs @@ -0,0 +1,3 @@ +pub const SLOTS_PER_EPOCH: u64 = 32; // For mainnet +pub const SLOTS_PER_SYNC_COMMITTEE: u64 = 8192; // For mainnet +pub const TARGET_BATCH_SIZE: u64 = 3; // Defines how many epochs in one batch diff --git a/client-rs/src/daemon.rs b/client-rs/src/daemon.rs index de38d2f..0ea859b 100644 --- a/client-rs/src/daemon.rs +++ b/client-rs/src/daemon.rs @@ -1,31 +1,33 @@ mod config; +mod constants; mod contract_init; pub mod epoch_batch; mod epoch_update; mod execution_header; +mod helpers; mod sync_committee; mod traits; mod utils; - //use alloy_primitives::TxHash; -use config::BankaiConfig; -use serde_json::json; - use alloy_primitives::FixedBytes; use alloy_rpc_types_beacon::events::HeadEvent; use axum::{ extract::{DefaultBodyLimit, Path, State}, //http::{header, StatusCode}, response::{IntoResponse, Json}, - routing::{get}, + routing::get, Router, }; +use config::BankaiConfig; +use constants::SLOTS_PER_EPOCH; use contract_init::ContractInitializationData; use dotenv::from_filename; use epoch_update::{EpochProof, EpochUpdate}; +use num_traits::cast::ToPrimitive; use postgres_types::{FromSql, ToSql}; use reqwest; +use serde_json::json; use starknet::core::types::Felt; use std::env; use std::sync::Arc; @@ -43,15 +45,13 @@ use utils::{ starknet_client::{StarknetClient, StarknetError}, }; //use std::error::Error as StdError; +use epoch_batch::EpochUpdateBatch; use std::fmt; use std::net::SocketAddr; use sync_committee::SyncCommitteeUpdate; use tokio::time::Duration; use uuid::Uuid; -const SLOTS_PER_EPOCH: u64 = 32; // For mainnet -const SLOTS_PER_SYNC_COMMITTEE: u64 = 8192; // For mainnet - impl std::fmt::Display for StarknetError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { @@ -80,14 +80,16 @@ enum JobStatus { WrapProofRequested, #[postgres(name = "WRAPPED_PROOF_DONE")] WrappedProofDone, - #[postgres(name = "PROOF_DECOMMITMENT_CALLED")] - ProofDecommitmentCalled, + #[postgres(name = "READY_TO_BROADCAST")] + ReadyToBroadcast, + #[postgres(name = "PROOF_VERIFY_CALLED_ONCHAIN")] + ProofVerifyCalledOnchain, #[postgres(name = "VERIFIED_FACT_REGISTERED")] VerifiedFactRegistered, #[postgres(name = "ERROR")] - Cancelled, - #[postgres(name = "CANCELLED")] Error, + #[postgres(name = "CANCELLED")] + Cancelled, } impl ToString for JobStatus { @@ -100,7 +102,8 @@ impl ToString for JobStatus { JobStatus::OffchainProofRetrieved => "OFFCHAIN_PROOF_RETRIEVED".to_string(), JobStatus::WrapProofRequested => "WRAP_PROOF_REQUESTED".to_string(), JobStatus::WrappedProofDone => "WRAPPED_PROOF_DONE".to_string(), - JobStatus::ProofDecommitmentCalled => "PROOF_DECOMMITMENT_CALLED".to_string(), + JobStatus::ReadyToBroadcast => "READY_TO_BROADCAST".to_string(), + JobStatus::ProofVerifyCalledOnchain => "PROOF_VERIFY_CALLED_ONCHAIN".to_string(), JobStatus::VerifiedFactRegistered => "VERIFIED_FACT_REGISTERED".to_string(), JobStatus::Cancelled => "CANCELLED".to_string(), JobStatus::Error => "ERROR".to_string(), @@ -111,6 +114,7 @@ impl ToString for JobStatus { #[derive(Debug, FromSql, ToSql, Clone)] enum JobType { EpochUpdate, + EpochBatchUpdate, SyncComiteeUpdate, } @@ -138,7 +142,7 @@ pub enum Error { AtlanticError(reqwest::Error), InvalidResponse(String), PoolingTimeout(String), - InvalidMerkleTree + InvalidMerkleTree, } impl fmt::Display for Error { @@ -244,7 +248,10 @@ impl BankaiClient { return Err(Error::EmptySlotDetected(slot)); } slot += 1; - println!("Empty slot detected! Attempt {}/{}. Fetching slot: {}", attempts, MAX_ATTEMPTS, slot); + info!( + "Empty slot detected! Attempt {}/{}. Fetching slot: {}", + attempts, MAX_ATTEMPTS, slot + ); } Err(e) => return Err(e), // Propagate other errors immediately } @@ -295,25 +302,28 @@ fn check_env_vars() -> Result<(), String> { Ok(()) } -fn slot_to_epoch(slot: u64) -> u64 { - slot / SLOTS_PER_EPOCH -} - -fn slot_to_sync_committee_id(slot: u64) -> u64 { - slot / SLOTS_PER_SYNC_COMMITTEE +// Since beacon chain RPCs have different response structure (quicknode responds different than nidereal) we use this event extraction logic +fn extract_json(event_text: &str) -> Option { + for line in event_text.lines() { + if line.starts_with("data:") { + // Extract the JSON after "data:" + return Some(line.trim_start_matches("data:").trim().to_string()); + } + } + None } #[tokio::main] //async fn main() { -async fn main() -> Result<(), Box> { +async fn main() -> Result<(), Box> { // Load .env.sepolia file from_filename(".env.sepolia").ok(); let slot_listener_toggle = true; let subscriber = FmtSubscriber::builder() - .with_max_level(Level::DEBUG) - //.with_max_level(Level::INFO) + //.with_max_level(Level::DEBUG) + .with_max_level(Level::INFO) .finish(); tracing::subscriber::set_global_default(subscriber).expect("setting default subscriber failed"); @@ -331,7 +341,7 @@ async fn main() -> Result<(), Box> { //let (tx, mut rx) = mpsc::channel(32); - let connection_string = "host=localhost user=root password=root dbname=bankai"; + let connection_string = "host=localhost user=meow password=meow dbname=bankai"; // let connection_string = format!( // "host={} user={} password={} dbname={}", // env::var("POSTGRESQL_HOST").unwrap().as_str(), @@ -378,39 +388,52 @@ async fn main() -> Result<(), Box> { "{}/eth/v1/events?topics=head", env::var("BEACON_RPC_URL").unwrap().as_str() ); - //let events_endpoint = format!("{}/eth/v1/events?topics=head", beacon_node_url); + //let events_endpoint = format!("{}/eth/v1/events?topics=head", beacon_node_url) + let db_client_for_state = db_client.clone(); + let db_client_for_listener = db_client.clone(); + let bankai_for_state = bankai.clone(); + let bankai_for_listener = bankai.clone(); //Spawn a background task to process jobs - tokio::spawn({ - let bankai_for_job = Arc::clone(&bankai); - let db_client_for_job = Arc::clone(&db_client); - async move { - while let Some(job) = rx.recv().await { - let job_id = job.job_id.clone(); - if let Err(e) = - process_job(job, db_client_for_job.clone(), bankai_for_job.clone()).await - { - update_job_status(&db_client_for_job.clone(), job_id, JobStatus::Error).await; - error!("Error processing job {}: {}", job_id, e); + tokio::spawn(async move { + while let Some(job) = rx.recv().await { + let job_id = job.job_id; + let db_clone = Arc::clone(&db_client); + let bankai_clone = Arc::clone(&bankai); + + // Spawn a *new task* for each job — now they can run in parallel + tokio::spawn(async move { + match process_job(job, db_clone.clone(), bankai_clone.clone()).await { + Ok(_) => { + info!("Job {} completed successfully", job_id); + } + Err(e) => { + update_job_status(&db_clone, job_id, JobStatus::Error).await; + error!("Error processing job {}: {}", job_id, e); + } } - } + }); } }); // let db_client_for_task =db_client.clone(); - let db_client_for_state = db_client.clone(); + let tx_for_task = tx.clone(); let app_state: AppState = AppState { db_client: db_client_for_state, tx, - bankai, + bankai: bankai_for_state, }; let app = Router::new() .route("/status", get(handle_get_status)) //.route("/get-epoch-proof/:slot", get(handle_get_epoch_proof)) //.route("/get-committee-hash/:committee_id", get(handle_get_committee_hash)) + .route( + "/get_merkle_paths_for_epoch/:slot", + get(handle_get_merkle_paths_for_epoch), + ) .route( "/debug/get-epoch-update/:slot", get(handle_get_epoch_update), @@ -459,89 +482,174 @@ async fn main() -> Result<(), Box> { let mut stream = response.bytes_stream(); while let Some(chunk) = stream.next().await { - let Ok(bytes) = chunk else { - warn!("Error reading stream: {}", chunk.err().unwrap()); - continue; - }; - - let Ok(text) = String::from_utf8(bytes.to_vec()) else { - warn!("Failed to parse UTF-8."); - continue; - }; - - if text.is_empty() { - continue; - } - - trace!("New slot event detected: {}", text); - - // Search for JSON start - let Some(json_start) = text.find('{') else { - warn!("No JSON data found in the input."); - continue; - }; - - // Try parsing the JSON substring into your event structsync_committee_id - let Ok(parsed_event) = serde_json::from_str::(&text[json_start..]) - else { - warn!("Failed to parse JSON data received from Beacon Chain event."); - continue; - }; - - info!( - "New slot event detected: {} | Is epoch transition: {}", - parsed_event.slot, parsed_event.epoch_transition - ); - - if parsed_event.epoch_transition { - info!("Epoch transition detected! Starting processing..."); - - // Check also now if slot is the moment of switch to new sync committee set - if parsed_event.slot % SLOTS_PER_SYNC_COMMITTEE == 0 { - let sync_committee_id = slot_to_sync_committee_id(parsed_event.slot); - info!("In this slot sync committee rotation taken place. Slot {} Sync committee id: {}", parsed_event.slot, sync_committee_id); - // We should probably now start sync committee verify job - } - - let job_id = Uuid::new_v4(); - let job = Job { - job_id: job_id.clone(), - job_type: JobType::EpochUpdate, - job_status: JobStatus::Created, - slot: parsed_event.slot, - }; - - let db_client = db_client.clone(); - match create_job(db_client, job.clone()).await { - // Insert new job record to DB - Ok(()) => { - // Handle success - info!("Job created successfully with ID: {}", job_id); - if tx_for_task.send(job).await.is_err() { - error!("Failed to send job."); + match chunk { + Ok(bytes) => { + if let Ok(event_text) = String::from_utf8(bytes.to_vec()) { + // Preprocess the event text + if let Some(json_data) = extract_json(&event_text) { + match serde_json::from_str::(&json_data) { + Ok(parsed_event) => { + //let is_node_in_sync = false; + let bankai = bankai_for_listener.clone(); + + let epoch_id = + helpers::slot_to_epoch_id(parsed_event.slot); + let sync_committee_id = + helpers::slot_to_sync_committee_id( + parsed_event.slot, + ); + + info!( + "New slot event detected: {} | Block: {} | Epoch: {} | Sync committee: {} | Is epoch transition: {}", + parsed_event.slot, parsed_event.block, epoch_id, sync_committee_id, parsed_event.epoch_transition + ); + + let latest_epoch_slot = bankai + .starknet_client + .get_latest_epoch_slot(&bankai.config) + .await + .unwrap() + .to_u64() + .unwrap(); + + let latest_verified_epoch_id = + helpers::slot_to_epoch_id(latest_epoch_slot); + let epochs_behind = epoch_id - latest_verified_epoch_id; + + // We getting the last slot in progress to determine next slots to prove + let mut last_slot_in_progress: u64 = 1000000; + match get_latest_slot_id_in_progress( + &db_client_for_listener.clone(), + ) + .await + { + Ok(Some(slot)) => { + last_slot_in_progress = slot.to_u64().unwrap(); + info!( + "Latest in progress slot: {} Epoch: {}", + last_slot_in_progress, + helpers::slot_to_epoch_id( + last_slot_in_progress + ) + ); + } + Ok(None) => { + warn!("No any in progress slot"); + } + Err(e) => { + error!( + "Error while getting latest in progress slot ID: {}", + e + ); + } + } + + if epochs_behind > constants::TARGET_BATCH_SIZE { + // is_node_in_sync = true; + + warn!( + "Bankai is out of sync now. Node is {} epochs behind network. Current Beacon Chain epoch: {} Latest verified epoch: {} Sync in progress...", + epochs_behind, epoch_id, latest_verified_epoch_id + ); + + match run_batch_update_job( + db_client_for_listener.clone(), + last_slot_in_progress + + (constants::SLOTS_PER_EPOCH + * constants::TARGET_BATCH_SIZE), + tx_for_task.clone(), + ) + .await + { + // Insert new job record to DB + Ok(()) => {} + Err(e) => {} + }; + + // let epoch_update = EpochUpdateBatch::new_by_slot( + // &bankai, + // &db_client_for_listener.clone(), + // last_slot_in_progress + // + constants::SLOTS_PER_EPOCH, + // ) + // .await?; + } + + // Check if sync committee update is needed + //sync_committee_id + + if latest_epoch_slot + % constants::SLOTS_PER_SYNC_COMMITTEE + == 0 + {} + + //return; + + // When we doing EpochBatchUpdate the slot is latest_batch_output + // So for each batch update we takin into account effectiviely the latest slot from given batch + + let db_client = db_client_for_listener.clone(); + + // evaluete_jobs_statuses(); + // broadcast_ready_jobs(); + + // We can do all circuit computations up to latest slot in advance, but the onchain broadcasts must be send in correct order + // By correct order mean that within the same sync committe the epochs are not needed to be broadcasted in order + // but the order of sync_commite_update->epoch_update must be correct, we firstly need to have correct sync committe veryfied + // before we verify epoch "belonging" to this sync committee + + if parsed_event.epoch_transition { + info!("Beacon Chain epoch transition detected. New epoch: {} | Starting processing epoch proving...", epoch_id); + + // Check also now if slot is the moment of switch to new sync committee set + if parsed_event.slot + % constants::SLOTS_PER_SYNC_COMMITTEE + == 0 + { + info!("Beacon Chain sync committee rotation occured. Slot {} | Sync committee id: {}", parsed_event.slot, sync_committee_id); + } + + let job_id = Uuid::new_v4(); + let job = Job { + job_id: job_id.clone(), + job_type: JobType::EpochBatchUpdate, + job_status: JobStatus::Created, + slot: parsed_event.slot, // It is the last slot for given batch + }; + + let db_client = db_client_for_listener.clone(); + match create_job(db_client, job.clone()).await { + // Insert new job record to DB + Ok(()) => { + // Handle success + info!( + "Job created successfully with ID: {}", + job_id + ); + if tx_for_task.send(job).await.is_err() { + error!("Failed to send job."); + } + // If starting committee update job, first ensule that the corresponding slot is registered in contract + } + Err(e) => { + // Handle the error + error!("Error creating job: {}", e); + } + } + } + } + Err(err) => { + warn!("Failed to parse JSON data: {}", err); + } + } + } else { + warn!("No valid JSON data found in event: {}", event_text); } - // let job = Job { - // job_id: job_id.clone(), - // job_type: JobType::EpochUpdate, - // job_status: JobStatus::Created, - // slot: parsed_event.slot - 32, - // }; - // if tx_for_task.send(job).await.is_err() { - // error!("Failed to send job."); - // } - // - // If starting committee update job, first ensule that the corresponding slot is registered in contract - } - Err(e) => { - // Handle the error - error!("Error creating job: {}", e); } } - - // match bankai_for_task.get_epoch_proof(parsed_event.slot - 32).await { - // Ok(proof) => info!("Epoch proof fetched successfully: {:?}", proof), - // Err(e) => error!("Failed to fetch epoch proof: {:?}", e), - // } + Err(err) => { + warn!("Error reading event stream: {}", err); + } } } } @@ -554,6 +662,37 @@ async fn main() -> Result<(), Box> { Ok(()) } +async fn run_batch_update_job( + db_client: Arc, + slot: u64, + tx: mpsc::Sender, +) -> Result<(), Box> { + let job_id = Uuid::new_v4(); + let job = Job { + job_id: job_id.clone(), + job_type: JobType::EpochBatchUpdate, + job_status: JobStatus::Created, + slot, + }; + + match create_job(db_client, job.clone()).await { + // Insert new job record to DB + Ok(()) => { + // Handle success + info!("Job created successfully with ID: {}", job_id); + if tx.send(job).await.is_err() { + return Err("Failed to send job".into()); + } + // If starting committee update job, first ensule that the corresponding slot is registered in contract + Ok(()) + } + Err(e) => { + // Handle the error + return Err(e.into()); + } + } +} + async fn set_atlantic_job_queryid( client: &Client, job_id: Uuid, @@ -621,7 +760,7 @@ async fn insert_verified_sync_committee( async fn create_job( client: Arc, - job: Job + job: Job, ) -> Result<(), Box> { client .execute( @@ -649,6 +788,50 @@ async fn fetch_job_status( Ok(row_opt.map(|row| row.get("status"))) } +pub async fn get_latest_slot_id_in_progress( + client: &Client, +) -> Result, Box> { + // Query the latest slot with job_status in ('in_progress', 'initialized') + let row_opt = client + .query_opt( + "SELECT slot FROM jobs + WHERE job_status IN ($1, $2) + ORDER BY slot DESC + LIMIT 1", + &[&"CREATED", &"PIE_GENERATED"], + ) + .await?; + + // Extract and return the slot ID + if let Some(row) = row_opt { + Ok(Some(row.get::<_, i64>("slot"))) + } else { + Ok(None) + } +} + +pub async fn get_merkle_paths_for_epoch( + client: &Client, + epoch_id: i32, +) -> Result, Box> { + // Query all merkle paths for the given epoch_id + let rows = client + .query( + "SELECT merkle_path FROM epoch_merkle_paths + WHERE epoch_id = $1 + ORDER BY path_index ASC", + &[&epoch_id], + ) + .await?; + + let paths: Vec = rows + .iter() + .map(|row| row.get::<_, String>("merkle_path")) + .collect(); + + Ok(paths) +} + async fn update_job_status( client: &Client, job_id: Uuid, @@ -783,18 +966,22 @@ async fn process_job( //update_job_status(&db_client, job.job_id, JobStatus::Created).await?; // 1) Fetch the latest on-chain verified epoch - let latest_epoch_slot = bankai - .starknet_client - .get_latest_epoch_slot(&bankai.config) - .await?; + // let latest_epoch_slot = bankai + // .starknet_client + // .get_latest_epoch_slot(&bankai.config) + // .await?; - info!( - "[EPOCH JOB] Latest onchain verified epoch slot: {}", - latest_epoch_slot - ); + // info!( + // "[EPOCH JOB] Latest onchain verified epoch slot: {}", + // latest_epoch_slot + // ); + + //let latest_epoch_slot = ; // make sure next_epoch % 32 == 0 - let next_epoch = (u64::try_from(latest_epoch).unwrap() / SLOTS_PER_EPOCH) * SLOTS_PER_EPOCH + SLOTS_PER_EPOCH; + let next_epoch = (u64::try_from(job.slot).unwrap() / constants::SLOTS_PER_EPOCH) + * constants::SLOTS_PER_EPOCH + + constants::SLOTS_PER_EPOCH; info!( "[EPOCH JOB] Fetching Inputs for next Epoch: {}...", next_epoch @@ -824,91 +1011,91 @@ async fn process_job( update_job_status(&db_client, job.job_id, JobStatus::PieGenerated).await?; - // 4) Submit offchain proof-generation job to Atlantic - info!("[EPOCH JOB] Sending proof generation query to Atlantic..."); + // // 4) Submit offchain proof-generation job to Atlantic + // info!("[EPOCH JOB] Sending proof generation query to Atlantic..."); - let batch_id = bankai.atlantic_client.submit_batch(proof).await?; + // let batch_id = bankai.atlantic_client.submit_batch(proof).await?; - info!( - "[EPOCH JOB] Proof generation batch submitted to Atlantic. QueryID: {}", - batch_id - ); + // info!( + // "[EPOCH JOB] Proof generation batch submitted to Atlantic. QueryID: {}", + // batch_id + // ); - update_job_status(&db_client, job.job_id, JobStatus::OffchainProofRequested).await?; - set_atlantic_job_queryid( - &db_client, - job.job_id, - batch_id.clone(), - AtlanticJobType::ProofGeneration, - ) - .await?; + // update_job_status(&db_client, job.job_id, JobStatus::OffchainProofRequested).await?; + // set_atlantic_job_queryid( + // &db_client, + // job.job_id, + // batch_id.clone(), + // AtlanticJobType::ProofGeneration, + // ) + // .await?; - // Pool for Atlantic execution done - bankai - .atlantic_client - .poll_batch_status_until_done(&batch_id, Duration::new(10, 0), usize::MAX) - .await?; + // // Pool for Atlantic execution done + // bankai + // .atlantic_client + // .poll_batch_status_until_done(&batch_id, Duration::new(10, 0), usize::MAX) + // .await?; - info!( - "[EPOCH JOB] Proof generation done by Atlantic. QueryID: {}", - batch_id - ); + // info!( + // "[EPOCH JOB] Proof generation done by Atlantic. QueryID: {}", + // batch_id + // ); - let proof = bankai - .atlantic_client - .fetch_proof(batch_id.as_str()) - .await?; + // let proof = bankai + // .atlantic_client + // .fetch_proof(batch_id.as_str()) + // .await?; - info!( - "[EPOCH JOB] Proof retrieved from Atlantic. QueryID: {}", - batch_id - ); + // info!( + // "[EPOCH JOB] Proof retrieved from Atlantic. QueryID: {}", + // batch_id + // ); - update_job_status(&db_client, job.job_id, JobStatus::OffchainProofRetrieved).await?; + // update_job_status(&db_client, job.job_id, JobStatus::OffchainProofRetrieved).await?; - // 5) Submit wrapped proof request - info!("[EPOCH JOB] Sending proof wrapping query to Atlantic.."); - let wrapping_batch_id = bankai.atlantic_client.submit_wrapped_proof(proof).await?; - info!( - "[EPOCH JOB] Proof wrapping query submitted to Atlantic. Wrapping QueryID: {}", - wrapping_batch_id - ); + // // 5) Submit wrapped proof request + // info!("[EPOCH JOB] Sending proof wrapping query to Atlantic.."); + // let wrapping_batch_id = bankai.atlantic_client.submit_wrapped_proof(proof).await?; + // info!( + // "[EPOCH JOB] Proof wrapping query submitted to Atlantic. Wrapping QueryID: {}", + // wrapping_batch_id + // ); - update_job_status(&db_client, job.job_id, JobStatus::WrapProofRequested).await?; - set_atlantic_job_queryid( - &db_client, - job.job_id, - wrapping_batch_id.clone(), - AtlanticJobType::ProofWrapping, - ) - .await?; + // update_job_status(&db_client, job.job_id, JobStatus::WrapProofRequested).await?; + // set_atlantic_job_queryid( + // &db_client, + // job.job_id, + // wrapping_batch_id.clone(), + // AtlanticJobType::ProofWrapping, + // ) + // .await?; - // Pool for Atlantic execution done - bankai - .atlantic_client - .poll_batch_status_until_done(&wrapping_batch_id, Duration::new(10, 0), usize::MAX) - .await?; + // // Pool for Atlantic execution done + // bankai + // .atlantic_client + // .poll_batch_status_until_done(&wrapping_batch_id, Duration::new(10, 0), usize::MAX) + // .await?; - update_job_status(&db_client, job.job_id, JobStatus::WrappedProofDone).await?; + // update_job_status(&db_client, job.job_id, JobStatus::WrappedProofDone).await?; - info!("[EPOCH JOB] Proof wrapping done by Atlantic. Fact registered on Integrity. Wrapping QueryID: {}", wrapping_batch_id); + // info!("[EPOCH JOB] Proof wrapping done by Atlantic. Fact registered on Integrity. Wrapping QueryID: {}", wrapping_batch_id); - update_job_status(&db_client, job.job_id, JobStatus::VerifiedFactRegistered).await?; + // update_job_status(&db_client, job.job_id, JobStatus::VerifiedFactRegistered).await?; - // 6) Submit epoch update onchain - info!("[EPOCH JOB] Calling epoch update onchain..."); - let update = EpochUpdate::from_json::(next_epoch)?; + // // 6) Submit epoch update onchain + // info!("[EPOCH JOB] Calling epoch update onchain..."); + // let update = EpochUpdate::from_json::(next_epoch)?; - let txhash = bankai - .starknet_client - .submit_update(update.expected_circuit_outputs, &bankai.config) - .await?; + // let txhash = bankai + // .starknet_client + // .submit_update(update.expected_circuit_outputs, &bankai.config) + // .await?; - set_job_txhash(&db_client, job.job_id, txhash).await?; + // set_job_txhash(&db_client, job.job_id, txhash).await?; - info!("[EPOCH JOB] Successfully submitted epoch update..."); + // info!("[EPOCH JOB] Successfully submitted epoch update..."); - update_job_status(&db_client, job.job_id, JobStatus::ProofDecommitmentCalled).await?; + // update_job_status(&db_client, job.job_id, JobStatus::ProofDecommitmentCalled).await?; // Now we can get proof from contract? // bankai.starknet_client.get_epoch_proof( @@ -922,7 +1109,7 @@ async fn process_job( JobType::SyncComiteeUpdate => { // Sync committee job info!( - "[SYNC COMMITTEE JOB] Started processing sync committee job: {} for epoch {}", + "[SYNC COMMITTEE JOB] Started processing sync committee job: {} for slot {}", job.job_id, job.slot ); @@ -932,7 +1119,7 @@ async fn process_job( .await?; info!( - "[SYNC COMMITTEE JOB] Latest onchain verified sync committee: {}", + "[SYNC COMMITTEE JOB] Latest onchain verified sync committee id: {}", latest_committee_id ); @@ -944,7 +1131,7 @@ async fn process_job( let lowest_committee_update_slot = (latest_committee_id) * Felt::from(0x2000); if latest_epoch < lowest_committee_update_slot { - error!("[SYNC COMMITTEE JOB] Epoch update requires newer epoch",); + error!("[SYNC COMMITTEE JOB] Sync committee update requires newer epoch verified. The lowest needed slot is {}", lowest_committee_update_slot); //return Err(Error::RequiresNewerEpoch(latest_epoch)); } @@ -1054,6 +1241,101 @@ async fn process_job( // Insert data to DB after successful onchain sync committee verification //insert_verified_sync_committee(&db_client, job.slot, sync_committee_hash).await?; } + JobType::EpochBatchUpdate => { + let proof = EpochUpdateBatch::new_by_slot(&bankai, &db_client, job.slot).await?; + + CairoRunner::generate_pie(&proof, &bankai.config)?; + let batch_id = bankai.atlantic_client.submit_batch(proof).await?; + + info!( + "[EPOCH JOB] Proof generation batch submitted to Atlantic. QueryID: {}", + batch_id + ); + + update_job_status(&db_client, job.job_id, JobStatus::OffchainProofRequested).await?; + set_atlantic_job_queryid( + &db_client, + job.job_id, + batch_id.clone(), + AtlanticJobType::ProofGeneration, + ) + .await?; + + // Pool for Atlantic execution done + bankai + .atlantic_client + .poll_batch_status_until_done(&batch_id, Duration::new(10, 0), usize::MAX) + .await?; + + info!( + "[EPOCH JOB] Proof generation done by Atlantic. QueryID: {}", + batch_id + ); + + let proof = bankai + .atlantic_client + .fetch_proof(batch_id.as_str()) + .await?; + + info!( + "[EPOCH JOB] Proof retrieved from Atlantic. QueryID: {}", + batch_id + ); + + update_job_status(&db_client, job.job_id, JobStatus::OffchainProofRetrieved).await?; + + // 5) Submit wrapped proof request + info!("[EPOCH JOB] Sending proof wrapping query to Atlantic.."); + let wrapping_batch_id = bankai.atlantic_client.submit_wrapped_proof(proof).await?; + info!( + "[EPOCH JOB] Proof wrapping query submitted to Atlantic. Wrapping QueryID: {}", + wrapping_batch_id + ); + + update_job_status(&db_client, job.job_id, JobStatus::WrapProofRequested).await?; + set_atlantic_job_queryid( + &db_client, + job.job_id, + wrapping_batch_id.clone(), + AtlanticJobType::ProofWrapping, + ) + .await?; + + // Pool for Atlantic execution done + bankai + .atlantic_client + .poll_batch_status_until_done(&wrapping_batch_id, Duration::new(10, 0), usize::MAX) + .await?; + + update_job_status(&db_client, job.job_id, JobStatus::WrappedProofDone).await?; + + info!("[EPOCH JOB] Proof wrapping done by Atlantic. Fact registered on Integrity. Wrapping QueryID: {}", wrapping_batch_id); + + update_job_status(&db_client, job.job_id, JobStatus::VerifiedFactRegistered).await?; + + // 6) Submit epoch update onchain + info!("[EPOCH JOB] Calling epoch update onchain..."); + //let update = EpochUpdate::from_json::(next_epoch)?; + + // let txhash = bankai + // .starknet_client + // .submit_update(update.expected_circuit_outputs, &bankai.config) + // .await?; + + // set_job_txhash(&db_client, job.job_id, txhash).await?; + + // info!("[EPOCH JOB] Successfully submitted epoch update..."); + + // update_job_status(&db_client, job.job_id, JobStatus::ProofDecommitmentCalled).await?; + + // bankai.starknet_client.get_epoch_proof( + // &self, + // slot: u64, + // config: &BankaiConfig) + + //Insert data to DB after successful onchain epoch verification + // insert_verified_epochs_batch(&db_client, job.slot / 0x2000, epoch_proof).await?; + } } Ok(()) @@ -1162,3 +1444,19 @@ async fn handle_get_latest_verified_slot(State(state): State) -> impl // } // } // } + +async fn handle_get_merkle_paths_for_epoch( + Path(epoch_id): Path, + State(state): State, +) -> impl IntoResponse { + match get_merkle_paths_for_epoch(&state.db_client, epoch_id).await { + Ok(merkle_paths) => { + info!("paths: {:?}", merkle_paths); + Json(merkle_paths); + } + Err(err) => { + error!("Failed to fetch merkle paths epoch: {:?}", err); + Json(json!({ "error": "Failed to fetch latest epoch" })); + } + } +} diff --git a/client-rs/src/epoch_batch.rs b/client-rs/src/epoch_batch.rs index c95c68b..3975a12 100644 --- a/client-rs/src/epoch_batch.rs +++ b/client-rs/src/epoch_batch.rs @@ -1,18 +1,20 @@ +use crate::constants::{SLOTS_PER_EPOCH, TARGET_BATCH_SIZE}; use crate::epoch_update::{EpochUpdate, ExpectedEpochUpdateOutputs}; +use crate::helpers::slot_to_epoch_id; use crate::traits::{Provable, Submittable}; use crate::utils::hashing::get_committee_hash; use crate::utils::merkle::poseidon::{compute_paths, compute_root, hash_path}; use crate::{BankaiClient, Error}; use alloy_primitives::FixedBytes; use hex; +use num_traits::ToPrimitive; use serde::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; use starknet::macros::selector; use starknet_crypto::Felt; use std::fs; - -const TARGET_BATCH_SIZE: u64 = 32; -const SLOTS_PER_EPOCH: u64 = 32; +use tokio_postgres::Client; +use tracing::{error, info, warn}; #[derive(Debug, Serialize, Deserialize)] pub struct EpochUpdateBatch { @@ -39,27 +41,39 @@ impl EpochUpdateBatch { .starknet_client .get_batching_range(&bankai.config) .await?; - println!("Slots in Term: Start {}, End {}", start_slot, end_slot); + info!("Slots in Term: Start {}, End {}", start_slot, end_slot); let epoch_gap = (end_slot - start_slot) / SLOTS_PER_EPOCH; - println!("Available Epochs: {}", epoch_gap); + info!( + "Available Epochs in this Sync Committee period: {}", + epoch_gap + ); // if the gap is smaller then x2 the target size, use the entire gap if epoch_gap >= TARGET_BATCH_SIZE * 2 { end_slot = start_slot + TARGET_BATCH_SIZE * SLOTS_PER_EPOCH; } - println!("Selected Slots: Start {}, End {}", start_slot, end_slot); - println!("Epoch Count: {}", (end_slot - start_slot) / SLOTS_PER_EPOCH); + info!("Selected Slots: Start {}, End {}", start_slot, end_slot); + info!("Epoch Count: {}", (end_slot - start_slot) / SLOTS_PER_EPOCH); let mut epochs = vec![]; // Fetch epochs sequentially from start_slot to end_slot, incrementing by 32 each time let mut current_slot = start_slot; - while current_slot <= end_slot { + while current_slot < end_slot { + // Current slot is the starting slot of epoch + info!( + "Getting data for slot: {} Epoch: {} Epochs batch position {}/{}", + current_slot, + slot_to_epoch_id(current_slot), + epochs.len(), + TARGET_BATCH_SIZE + ); let epoch_update = EpochUpdate::new(&bankai.client, current_slot).await?; epochs.push(epoch_update); current_slot += 32; + //info!("epochspush"); } let circuit_inputs = EpochUpdateBatchInputs { @@ -93,6 +107,104 @@ impl EpochUpdateBatch { Ok(batch) } + + pub(crate) async fn new_by_slot( + bankai: &BankaiClient, + db_client: &Client, + slot: u64, + ) -> Result { + let (start_slot, mut end_slot) = bankai + .starknet_client + .get_batching_range_for_slot(&bankai.config, slot) + .await?; + info!("Slots in Term: Start {}, End {}", start_slot, end_slot); + let epoch_gap = (end_slot - start_slot) / SLOTS_PER_EPOCH; + info!( + "Available Epochs in this Sync Committee period: {}", + epoch_gap + ); + + // if the gap is smaller then x2 the target size, use the entire gap + if epoch_gap >= TARGET_BATCH_SIZE * 2 { + end_slot = start_slot + TARGET_BATCH_SIZE * SLOTS_PER_EPOCH; + } + + info!("Selected Slots: Start {}, End {}", start_slot, end_slot); + info!("Epoch Count: {}", (end_slot - start_slot) / SLOTS_PER_EPOCH); + + let mut epochs = vec![]; + + // Fetch epochs sequentially from start_slot to end_slot, incrementing by 32 each time + let mut current_slot = start_slot; + while current_slot < end_slot { + info!( + "Getting data for slot: {} Epoch: {} Epochs batch position {}/{}", + current_slot, + slot_to_epoch_id(current_slot), + epochs.len(), + TARGET_BATCH_SIZE + ); + let epoch_update = EpochUpdate::new(&bankai.client, current_slot).await?; + + epochs.push(epoch_update); + current_slot += 32; + } + + let circuit_inputs = EpochUpdateBatchInputs { + committee_hash: get_committee_hash(epochs[0].circuit_inputs.aggregate_pub.0), + epochs, + }; + + let expected_circuit_outputs = ExpectedEpochBatchOutputs::from_inputs(&circuit_inputs); + + let epoch_hashes = circuit_inputs + .epochs + .iter() + .map(|epoch| epoch.expected_circuit_outputs.hash()) + .collect::>(); + + let (root, paths) = compute_paths(epoch_hashes.clone()); + + // Verify each path matches the root + current_slot = start_slot; + for (index, path) in paths.iter().enumerate() { + let computed_root = hash_path(epoch_hashes[index], path, index); + if computed_root != root { + panic!("Path {} does not match root", index); + } + // Insert merkle paths to database + let current_epoch = slot_to_epoch_id(current_slot); + for (path_index, current_path) in path.iter().enumerate() { + match db_client + .execute( + "INSERT INTO epoch_merkle_paths (epoch_id, path_index, merkle_path) VALUES ($1, $2, $3)", + &[¤t_epoch.to_i32(), &path_index.to_i32(), ¤t_path.to_string()], + ) + .await + { + // Insert new job record to DB + Ok((status)) => { + // Merkle path inserted + } + Err(e) => { + // Failed to insert merkle path + error!("Unable to insert merkle path for epoch to database, {}", e); + } + }; + } + current_slot += 32; + } + + info!("Paths {:?}", paths); + + let batch = EpochUpdateBatch { + circuit_inputs, + expected_circuit_outputs, + merkle_paths: paths, + }; + + Ok(batch) + } } impl Provable for EpochUpdateBatch { diff --git a/client-rs/src/epoch_update.rs b/client-rs/src/epoch_update.rs index 8c1c58e..af6525a 100644 --- a/client-rs/src/epoch_update.rs +++ b/client-rs/src/epoch_update.rs @@ -15,6 +15,7 @@ use serde::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; use starknet::{core::types::Felt, macros::selector}; use starknet_crypto::poseidon_hash_many; +use tracing::info; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; @@ -181,7 +182,10 @@ impl EpochCircuitInputs { return Err(Error::EmptySlotDetected(slot)); } slot += 1; - println!("Empty slot detected! Attempt {}/{}. Fetching slot: {}", attempts, MAX_ATTEMPTS, slot); + info!( + "Empty slot detected! Attempt {}/{}. Fetching slot: {}", + attempts, MAX_ATTEMPTS, slot + ); } Err(e) => return Err(e), // Propagate other errors immediately } diff --git a/client-rs/src/helpers.rs b/client-rs/src/helpers.rs new file mode 100644 index 0000000..57f3ad4 --- /dev/null +++ b/client-rs/src/helpers.rs @@ -0,0 +1,9 @@ +use crate::constants::{SLOTS_PER_EPOCH, SLOTS_PER_SYNC_COMMITTEE}; + +pub fn slot_to_epoch_id(slot: u64) -> u64 { + slot / SLOTS_PER_EPOCH +} + +pub fn slot_to_sync_committee_id(slot: u64) -> u64 { + slot / SLOTS_PER_SYNC_COMMITTEE +} diff --git a/client-rs/src/main.rs b/client-rs/src/main.rs index dbeeb4a..5ab0781 100644 --- a/client-rs/src/main.rs +++ b/client-rs/src/main.rs @@ -1,8 +1,10 @@ mod config; +mod constants; mod contract_init; pub mod epoch_batch; mod epoch_update; mod execution_header; +mod helpers; mod sync_committee; mod traits; mod utils; @@ -27,6 +29,8 @@ use utils::{ use clap::{Parser, Subcommand}; use dotenv::from_filename; use std::env; +use tracing::Level; +use tracing_subscriber::FmtSubscriber; #[derive(Debug)] pub enum Error { @@ -100,7 +104,10 @@ impl BankaiClient { return Err(Error::EmptySlotDetected(slot)); } slot += 1; - println!("Empty slot detected! Attempt {}/{}. Fetching slot: {}", attempts, MAX_ATTEMPTS, slot); + println!( + "Empty slot detected! Attempt {}/{}. Fetching slot: {}", + attempts, MAX_ATTEMPTS, slot + ); } Err(e) => return Err(e), // Propagate other errors immediately } @@ -207,6 +214,13 @@ async fn main() -> Result<(), Error> { // Load .env.sepolia file from_filename(".env.sepolia").ok(); + let subscriber = FmtSubscriber::builder() + //.with_max_level(Level::DEBUG) + .with_max_level(Level::INFO) + .finish(); + + tracing::subscriber::set_global_default(subscriber).expect("setting default subscriber failed"); + let cli = Cli::parse(); let bankai = BankaiClient::new().await; @@ -317,9 +331,10 @@ async fn main() -> Result<(), Error> { println!("Batch Submitted: {}", batch_id); } Commands::ProveNextEpochBatch => { - let proof = EpochUpdateBatch::new(&bankai).await?; - CairoRunner::generate_pie(&proof, &bankai.config)?; - let batch_id = bankai.atlantic_client.submit_batch(proof).await?; + let epoch_update = EpochUpdateBatch::new(&bankai).await?; + println!("Update contents: {:?}", epoch_update); + CairoRunner::generate_pie(&epoch_update, &bankai.config)?; + let batch_id = bankai.atlantic_client.submit_batch(epoch_update).await?; println!("Batch Submitted: {}", batch_id); } Commands::VerifyEpoch { batch_id, slot } => { diff --git a/client-rs/src/utils/starknet_client.rs b/client-rs/src/utils/starknet_client.rs index 08ea7da..30798da 100644 --- a/client-rs/src/utils/starknet_client.rs +++ b/client-rs/src/utils/starknet_client.rs @@ -184,10 +184,22 @@ impl StarknetClient { config: &BankaiConfig, ) -> Result<(u64, u64), StarknetError> { let latest_epoch_slot = self.get_latest_epoch_slot(config).await?; - let next_epoch = (u64::try_from(latest_epoch_slot).unwrap() / 32) * 32 + 32; - let term = next_epoch / 0x2000; - let terms_last_epoch = (term + 1) * 0x2000 - 32; - Ok((next_epoch, terms_last_epoch)) + let next_epoch_slot = (u64::try_from(latest_epoch_slot).unwrap() / 32) * 32 + 32; + let term = next_epoch_slot / 0x2000; + let terms_last_epoch_slot = (term + 1) * 0x2000 - 32; + Ok((next_epoch_slot, terms_last_epoch_slot)) + } + + // Computes the slot numbers for term of specified slot + pub async fn get_batching_range_for_slot( + &self, + config: &BankaiConfig, + slot: u64, + ) -> Result<(u64, u64), StarknetError> { + let next_epoch_slot = (u64::try_from(slot).unwrap() / 32) * 32 + 32; + let term = next_epoch_slot / 0x2000; + let terms_last_epoch_slot = (term + 1) * 0x2000 - 32; + Ok((next_epoch_slot, terms_last_epoch_slot)) } pub async fn get_latest_committee_id( diff --git a/contract/src/lib.cairo b/contract/src/lib.cairo index ce1bec7..5ff1de2 100644 --- a/contract/src/lib.cairo +++ b/contract/src/lib.cairo @@ -15,7 +15,7 @@ pub struct EpochProof { #[starknet::interface] pub trait IBankaiContract { fn get_committee_hash(self: @TContractState, committee_id: u64) -> u256; - fn get_latest_epoch(self: @TContractState) -> u64; + fn get_latest_epoch_slot(self: @TContractState) -> u64; fn get_latest_committee_id(self: @TContractState) -> u64; fn get_committee_update_program_hash(self: @TContractState) -> felt252; fn get_epoch_update_program_hash(self: @TContractState) -> felt252; @@ -126,7 +126,7 @@ pub mod BankaiContract { epochs: Map::, // maps beacon slot to header root and state root batches: Map::, // Available batch roots owner: ContractAddress, - latest_epoch: u64, + latest_epoch_slot: u64, latest_committee_id: u64, initialization_committee: u64, committee_update_program_hash: felt252, @@ -313,7 +313,7 @@ pub mod BankaiContract { } - + fn compute_committee_proof_fact_hash( self: @ContractState, beacon_state_root: u256, committee_hash: u256, slot: u64, ) -> felt252 { @@ -371,7 +371,7 @@ pub mod BankaiContract { SHARP_BOOTLOADER_PROGRAM_HASH, self.epoch_batch_program_hash.read(), [ - batch_root, header_root.low.into(), + batch_root, header_root.low.into(), header_root.high.into(), state_root.low.into(), state_root.high.into(), slot.into(), committee_hash.low.into(), committee_hash.high.into(), n_signers.into(), execution_hash.low.into(), From 4d389847b318e5db7b5bbab92a4c971a78ab45ff Mon Sep 17 00:00:00 2001 From: lakewik Date: Mon, 13 Jan 2025 11:41:35 +0100 Subject: [PATCH 09/66] Working merkle paths retrieval for epoch --- client-rs/src/daemon.rs | 11 +++++++---- client-rs/src/epoch_batch.rs | 2 +- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/client-rs/src/daemon.rs b/client-rs/src/daemon.rs index 0ea859b..a2b014e 100644 --- a/client-rs/src/daemon.rs +++ b/client-rs/src/daemon.rs @@ -431,7 +431,7 @@ async fn main() -> Result<(), Box> { //.route("/get-epoch-proof/:slot", get(handle_get_epoch_proof)) //.route("/get-committee-hash/:committee_id", get(handle_get_committee_hash)) .route( - "/get_merkle_paths_for_epoch/:slot", + "/get_merkle_paths_for_epoch/:epoch_id", get(handle_get_merkle_paths_for_epoch), ) .route( @@ -1451,12 +1451,15 @@ async fn handle_get_merkle_paths_for_epoch( ) -> impl IntoResponse { match get_merkle_paths_for_epoch(&state.db_client, epoch_id).await { Ok(merkle_paths) => { - info!("paths: {:?}", merkle_paths); - Json(merkle_paths); + if merkle_paths.len() > 0 { + Json(json!({ "epoch_id": epoch_id, "merkle_paths": merkle_paths })) + } else { + Json(json!({ "error": "Epoch not available now" })) + } } Err(err) => { error!("Failed to fetch merkle paths epoch: {:?}", err); - Json(json!({ "error": "Failed to fetch latest epoch" })); + Json(json!({ "error": "Failed to fetch latest epoch" })) } } } diff --git a/client-rs/src/epoch_batch.rs b/client-rs/src/epoch_batch.rs index 3975a12..13c567d 100644 --- a/client-rs/src/epoch_batch.rs +++ b/client-rs/src/epoch_batch.rs @@ -178,7 +178,7 @@ impl EpochUpdateBatch { match db_client .execute( "INSERT INTO epoch_merkle_paths (epoch_id, path_index, merkle_path) VALUES ($1, $2, $3)", - &[¤t_epoch.to_i32(), &path_index.to_i32(), ¤t_path.to_string()], + &[¤t_epoch.to_i32(), &path_index.to_i32(), ¤t_path.to_hex_string()], ) .await { From 81169f93bfb27ff9f2ebfae24356cfd206283455 Mon Sep 17 00:00:00 2001 From: lakewik Date: Mon, 13 Jan 2025 13:43:18 +0100 Subject: [PATCH 10/66] Spawn blocking thead to avoid blocking other I/O like axum requests by trace generation --- client-rs/src/daemon.rs | 315 +++++++++++------------- client-rs/src/epoch_batch.rs | 22 +- client-rs/src/epoch_update.rs | 2 + client-rs/src/helpers.rs | 37 ++- client-rs/src/utils/cairo_runner.rs | 32 ++- client-rs/src/utils/database_manager.rs | 57 +++++ client-rs/src/utils/starknet_client.rs | 12 - 7 files changed, 255 insertions(+), 222 deletions(-) create mode 100644 client-rs/src/utils/database_manager.rs diff --git a/client-rs/src/daemon.rs b/client-rs/src/daemon.rs index a2b014e..aa5f301 100644 --- a/client-rs/src/daemon.rs +++ b/client-rs/src/daemon.rs @@ -418,7 +418,7 @@ async fn main() -> Result<(), Box> { // let db_client_for_task =db_client.clone(); - let tx_for_task = tx.clone(); + let tx_for_listener = tx.clone(); let app_state: AppState = AppState { db_client: db_client_for_state, @@ -489,154 +489,24 @@ async fn main() -> Result<(), Box> { if let Some(json_data) = extract_json(&event_text) { match serde_json::from_str::(&json_data) { Ok(parsed_event) => { - //let is_node_in_sync = false; - let bankai = bankai_for_listener.clone(); - let epoch_id = helpers::slot_to_epoch_id(parsed_event.slot); let sync_committee_id = helpers::slot_to_sync_committee_id( parsed_event.slot, ); - info!( "New slot event detected: {} | Block: {} | Epoch: {} | Sync committee: {} | Is epoch transition: {}", parsed_event.slot, parsed_event.block, epoch_id, sync_committee_id, parsed_event.epoch_transition ); - let latest_epoch_slot = bankai - .starknet_client - .get_latest_epoch_slot(&bankai.config) - .await - .unwrap() - .to_u64() - .unwrap(); - - let latest_verified_epoch_id = - helpers::slot_to_epoch_id(latest_epoch_slot); - let epochs_behind = epoch_id - latest_verified_epoch_id; - - // We getting the last slot in progress to determine next slots to prove - let mut last_slot_in_progress: u64 = 1000000; - match get_latest_slot_id_in_progress( - &db_client_for_listener.clone(), + handle_beacon_chain_head_event( + parsed_event, + bankai_for_listener.clone(), + db_client_for_listener.clone(), + tx_for_listener.clone(), ) - .await - { - Ok(Some(slot)) => { - last_slot_in_progress = slot.to_u64().unwrap(); - info!( - "Latest in progress slot: {} Epoch: {}", - last_slot_in_progress, - helpers::slot_to_epoch_id( - last_slot_in_progress - ) - ); - } - Ok(None) => { - warn!("No any in progress slot"); - } - Err(e) => { - error!( - "Error while getting latest in progress slot ID: {}", - e - ); - } - } - - if epochs_behind > constants::TARGET_BATCH_SIZE { - // is_node_in_sync = true; - - warn!( - "Bankai is out of sync now. Node is {} epochs behind network. Current Beacon Chain epoch: {} Latest verified epoch: {} Sync in progress...", - epochs_behind, epoch_id, latest_verified_epoch_id - ); - - match run_batch_update_job( - db_client_for_listener.clone(), - last_slot_in_progress - + (constants::SLOTS_PER_EPOCH - * constants::TARGET_BATCH_SIZE), - tx_for_task.clone(), - ) - .await - { - // Insert new job record to DB - Ok(()) => {} - Err(e) => {} - }; - - // let epoch_update = EpochUpdateBatch::new_by_slot( - // &bankai, - // &db_client_for_listener.clone(), - // last_slot_in_progress - // + constants::SLOTS_PER_EPOCH, - // ) - // .await?; - } - - // Check if sync committee update is needed - //sync_committee_id - - if latest_epoch_slot - % constants::SLOTS_PER_SYNC_COMMITTEE - == 0 - {} - - //return; - - // When we doing EpochBatchUpdate the slot is latest_batch_output - // So for each batch update we takin into account effectiviely the latest slot from given batch - - let db_client = db_client_for_listener.clone(); - - // evaluete_jobs_statuses(); - // broadcast_ready_jobs(); - - // We can do all circuit computations up to latest slot in advance, but the onchain broadcasts must be send in correct order - // By correct order mean that within the same sync committe the epochs are not needed to be broadcasted in order - // but the order of sync_commite_update->epoch_update must be correct, we firstly need to have correct sync committe veryfied - // before we verify epoch "belonging" to this sync committee - - if parsed_event.epoch_transition { - info!("Beacon Chain epoch transition detected. New epoch: {} | Starting processing epoch proving...", epoch_id); - - // Check also now if slot is the moment of switch to new sync committee set - if parsed_event.slot - % constants::SLOTS_PER_SYNC_COMMITTEE - == 0 - { - info!("Beacon Chain sync committee rotation occured. Slot {} | Sync committee id: {}", parsed_event.slot, sync_committee_id); - } - - let job_id = Uuid::new_v4(); - let job = Job { - job_id: job_id.clone(), - job_type: JobType::EpochBatchUpdate, - job_status: JobStatus::Created, - slot: parsed_event.slot, // It is the last slot for given batch - }; - - let db_client = db_client_for_listener.clone(); - match create_job(db_client, job.clone()).await { - // Insert new job record to DB - Ok(()) => { - // Handle success - info!( - "Job created successfully with ID: {}", - job_id - ); - if tx_for_task.send(job).await.is_err() { - error!("Failed to send job."); - } - // If starting committee update job, first ensule that the corresponding slot is registered in contract - } - Err(e) => { - // Handle the error - error!("Error creating job: {}", e); - } - } - } + .await; } Err(err) => { warn!("Failed to parse JSON data: {}", err); @@ -662,6 +532,132 @@ async fn main() -> Result<(), Box> { Ok(()) } +async fn handle_beacon_chain_head_event( + parsed_event: HeadEvent, + bankai: Arc, + db_client: Arc, + tx: mpsc::Sender, +) -> () { + let epoch_id = helpers::slot_to_epoch_id(parsed_event.slot); + let sync_committee_id = helpers::slot_to_sync_committee_id(parsed_event.slot); + + let latest_epoch_slot = bankai + .starknet_client + .get_latest_epoch_slot(&bankai.config) + .await + .unwrap() + .to_u64() + .unwrap(); + + let latest_verified_epoch_id = helpers::slot_to_epoch_id(latest_epoch_slot); + let epochs_behind = epoch_id - latest_verified_epoch_id; + + // We getting the last slot in progress to determine next slots to prove + let mut last_slot_in_progress: u64 = 1000000; + match get_latest_slot_id_in_progress(&db_client.clone()).await { + Ok(Some(slot)) => { + last_slot_in_progress = slot.to_u64().unwrap(); + info!( + "Latest in progress slot: {} Epoch: {}", + last_slot_in_progress, + helpers::slot_to_epoch_id(last_slot_in_progress) + ); + } + Ok(None) => { + warn!("No any in progress slot"); + } + Err(e) => { + error!("Error while getting latest in progress slot ID: {}", e); + } + } + + if epochs_behind > constants::TARGET_BATCH_SIZE { + // is_node_in_sync = true; + + warn!( + "Bankai is out of sync now. Node is {} epochs behind network. Current Beacon Chain epoch: {} Latest verified epoch: {} Sync in progress...", + epochs_behind, epoch_id, latest_verified_epoch_id + ); + + match run_batch_update_job( + db_client.clone(), + last_slot_in_progress + (constants::SLOTS_PER_EPOCH * constants::TARGET_BATCH_SIZE), + tx.clone(), + ) + .await + { + // Insert new job record to DB + Ok(()) => {} + Err(e) => {} + }; + + // let epoch_update = EpochUpdateBatch::new_by_slot( + // &bankai, + // &db_client_for_listener.clone(), + // last_slot_in_progress + // + constants::SLOTS_PER_EPOCH, + // ) + // .await?; + } + + // Check if sync committee update is needed + //sync_committee_id + + if latest_epoch_slot % constants::SLOTS_PER_SYNC_COMMITTEE == 0 {} + + //return; + + // When we doing EpochBatchUpdate the slot is latest_batch_output + // So for each batch update we takin into account effectiviely the latest slot from given batch + + let db_client = db_client.clone(); + + // evaluete_jobs_statuses(); + // broadcast_ready_jobs(); + + // We can do all circuit computations up to latest slot in advance, but the onchain broadcasts must be send in correct order + // By correct order mean that within the same sync committe the epochs are not needed to be broadcasted in order + // but the order of sync_commite_update->epoch_update must be correct, we firstly need to have correct sync committe veryfied + // before we verify epoch "belonging" to this sync committee + + // if parsed_event.epoch_transition { + // info!("Beacon Chain epoch transition detected. New epoch: {} | Starting processing epoch proving...", epoch_id); + + // // Check also now if slot is the moment of switch to new sync committee set + // if parsed_event.slot % constants::SLOTS_PER_SYNC_COMMITTEE == 0 { + // info!( + // "Beacon Chain sync committee rotation occured. Slot {} | Sync committee id: {}", + // parsed_event.slot, sync_committee_id + // ); + // } + + // let job_id = Uuid::new_v4(); + // let job = Job { + // job_id: job_id.clone(), + // job_type: JobType::EpochBatchUpdate, + // job_status: JobStatus::Created, + // slot: parsed_event.slot, // It is the last slot for given batch + // }; + + // let db_client = db_client_for_listener.clone(); + // match create_job(db_client, job.clone()).await { + // // Insert new job record to DB + // Ok(()) => { + // // Handle success + // info!("Job created successfully with ID: {}", job_id); + // if tx_for_task.send(job).await.is_err() { + // error!("Failed to send job."); + // } + // // If starting committee update job, first ensule that the corresponding slot is registered in contract + // } + // Err(e) => { + // // Handle the error + // error!("Error creating job: {}", e); + // } + // } + // } +} + async fn run_batch_update_job( db_client: Arc, slot: u64, @@ -703,7 +699,7 @@ async fn set_atlantic_job_queryid( AtlanticJobType::ProofGeneration => { client .execute( - "UPDATE jobs SET atlantic_batch_id_proof_generation = $1, updated_at = NOW() WHERE job_uuid = $2", + "UPDATE jobs SET atlantic_proof_generate_batch_id = $1, updated_at = NOW() WHERE job_uuid = $2", &[&batch_id.to_string(), &job_id], ) .await?; @@ -711,7 +707,7 @@ async fn set_atlantic_job_queryid( AtlanticJobType::ProofWrapping => { client .execute( - "UPDATE jobs SET atlantic_batch_id_proof_wrapping = $1, updated_at = NOW() WHERE job_uuid = $2", + "UPDATE jobs SET atlantic_proof_wrapper_batch_id = $1, updated_at = NOW() WHERE job_uuid = $2", &[&batch_id.to_string(), &job_id], ) .await?; @@ -723,41 +719,6 @@ async fn set_atlantic_job_queryid( Ok(()) } -async fn insert_verified_epoch( - client: &Client, - epoch_id: u64, - epoch_proof: EpochProof, -) -> Result<(), Box> { - client - .execute( - "INSERT INTO verified_epoch (epoch_id, header_root, state_root, n_signers) VALUES ($1)", - &[ - &epoch_id.to_string(), - &epoch_proof.header_root.to_string(), - &epoch_proof.state_root.to_string(), - &epoch_proof.n_signers.to_string(), - ], - ) - .await?; - - Ok(()) -} - -async fn insert_verified_sync_committee( - client: &Client, - sync_committee_id: u64, - sync_committee_hash: FixedBytes<32>, -) -> Result<(), Box> { - client - .execute( - "INSERT INTO verified_sync_committee (sync_committee_id, sync_committee_hash) VALUES ($1)", - &[&sync_committee_id.to_string(), &sync_committee_hash.to_string()], - ) - .await?; - - Ok(()) -} - async fn create_job( client: Arc, job: Job, @@ -1002,7 +963,7 @@ async fn process_job( next_epoch ); - CairoRunner::generate_pie(&proof, &bankai.config)?; + CairoRunner::generate_pie(&proof, &bankai.config).await?; info!( "[EPOCH JOB] Pie generated successfully for Epoch: {}...", @@ -1149,7 +1110,7 @@ async fn process_job( latest_committee_id ); - CairoRunner::generate_pie(&update, &bankai.config)?; + CairoRunner::generate_pie(&update, &bankai.config).await?; update_job_status(&db_client, job.job_id, JobStatus::PieGenerated).await?; @@ -1244,7 +1205,7 @@ async fn process_job( JobType::EpochBatchUpdate => { let proof = EpochUpdateBatch::new_by_slot(&bankai, &db_client, job.slot).await?; - CairoRunner::generate_pie(&proof, &bankai.config)?; + CairoRunner::generate_pie(&proof, &bankai.config).await?; let batch_id = bankai.atlantic_client.submit_batch(proof).await?; info!( diff --git a/client-rs/src/epoch_batch.rs b/client-rs/src/epoch_batch.rs index 13c567d..4ddcea4 100644 --- a/client-rs/src/epoch_batch.rs +++ b/client-rs/src/epoch_batch.rs @@ -1,6 +1,6 @@ use crate::constants::{SLOTS_PER_EPOCH, TARGET_BATCH_SIZE}; use crate::epoch_update::{EpochUpdate, ExpectedEpochUpdateOutputs}; -use crate::helpers::slot_to_epoch_id; +use crate::helpers::{calculate_slots_range_for_batch, slot_to_epoch_id}; use crate::traits::{Provable, Submittable}; use crate::utils::hashing::get_committee_hash; use crate::utils::merkle::poseidon::{compute_paths, compute_root, hash_path}; @@ -113,25 +113,7 @@ impl EpochUpdateBatch { db_client: &Client, slot: u64, ) -> Result { - let (start_slot, mut end_slot) = bankai - .starknet_client - .get_batching_range_for_slot(&bankai.config, slot) - .await?; - info!("Slots in Term: Start {}, End {}", start_slot, end_slot); - let epoch_gap = (end_slot - start_slot) / SLOTS_PER_EPOCH; - info!( - "Available Epochs in this Sync Committee period: {}", - epoch_gap - ); - - // if the gap is smaller then x2 the target size, use the entire gap - if epoch_gap >= TARGET_BATCH_SIZE * 2 { - end_slot = start_slot + TARGET_BATCH_SIZE * SLOTS_PER_EPOCH; - } - - info!("Selected Slots: Start {}, End {}", start_slot, end_slot); - info!("Epoch Count: {}", (end_slot - start_slot) / SLOTS_PER_EPOCH); - + let (start_slot, end_slot) = calculate_slots_range_for_batch(slot); let mut epochs = vec![]; // Fetch epochs sequentially from start_slot to end_slot, incrementing by 32 each time diff --git a/client-rs/src/epoch_update.rs b/client-rs/src/epoch_update.rs index af6525a..a7b7289 100644 --- a/client-rs/src/epoch_update.rs +++ b/client-rs/src/epoch_update.rs @@ -24,6 +24,8 @@ pub struct EpochProof { pub header_root: FixedBytes<32>, pub state_root: FixedBytes<32>, pub n_signers: u64, + pub execution_hash: FixedBytes<32>, + pub execution_height: u64, } #[derive(Debug, Serialize, Deserialize)] diff --git a/client-rs/src/helpers.rs b/client-rs/src/helpers.rs index 57f3ad4..fce6dd2 100644 --- a/client-rs/src/helpers.rs +++ b/client-rs/src/helpers.rs @@ -1,4 +1,8 @@ -use crate::constants::{SLOTS_PER_EPOCH, SLOTS_PER_SYNC_COMMITTEE}; +use crate::{ + constants::{SLOTS_PER_EPOCH, SLOTS_PER_SYNC_COMMITTEE, TARGET_BATCH_SIZE}, + Error, +}; +use tracing::info; pub fn slot_to_epoch_id(slot: u64) -> u64 { slot / SLOTS_PER_EPOCH @@ -7,3 +11,34 @@ pub fn slot_to_epoch_id(slot: u64) -> u64 { pub fn slot_to_sync_committee_id(slot: u64) -> u64 { slot / SLOTS_PER_SYNC_COMMITTEE } + +pub fn calculate_slots_range_for_batch(first_slot: u64) -> (u64, u64) { + let start_slot = (u64::try_from(first_slot).unwrap() / 32) * 32 + 32; + let term = start_slot / 0x2000; + let mut end_slot = (term + 1) * 0x2000 - 32; + + info!("Slots in Term: Start {}, End {}", start_slot, end_slot); + let epoch_gap = (end_slot - start_slot) / SLOTS_PER_EPOCH; + info!( + "Available Epochs in this Sync Committee period: {}", + epoch_gap + ); + + // if the gap is smaller then x2 the target size, use the entire gap + if epoch_gap >= TARGET_BATCH_SIZE * 2 { + end_slot = start_slot + TARGET_BATCH_SIZE * SLOTS_PER_EPOCH; + } + + info!("Selected Slots: Start {}, End {}", start_slot, end_slot); + info!("Epoch Count: {}", (end_slot - start_slot) / SLOTS_PER_EPOCH); + + (start_slot, end_slot) +} + +// Computes the slot numbers for term of specified slot +pub async fn calculate_batching_range_for_slot(slot: u64) -> Result<(u64, u64), Error> { + let next_epoch_slot = (u64::try_from(slot).unwrap() / 32) * 32 + 32; + let term = next_epoch_slot / 0x2000; + let terms_last_epoch_slot = (term + 1) * 0x2000 - 32; + Ok((next_epoch_slot, terms_last_epoch_slot)) +} diff --git a/client-rs/src/utils/cairo_runner.rs b/client-rs/src/utils/cairo_runner.rs index 792fd33..f3a9e28 100644 --- a/client-rs/src/utils/cairo_runner.rs +++ b/client-rs/src/utils/cairo_runner.rs @@ -1,12 +1,14 @@ use crate::traits::ProofType; use crate::BankaiConfig; use crate::{traits::Provable, Error}; +use tokio::task; +use tokio::task::JoinError; use tracing::info; pub struct CairoRunner(); impl CairoRunner { - pub fn generate_pie(input: &impl Provable, config: &BankaiConfig) -> Result<(), Error> { + pub async fn generate_pie(input: &impl Provable, config: &BankaiConfig) -> Result<(), Error> { let input_path = input.export()?; let program_path = match input.proof_type() { @@ -19,17 +21,23 @@ impl CairoRunner { info!("Generating trace..."); let start_time = std::time::Instant::now(); - // Execute cairo-run command - let output = std::process::Command::new("../venv/bin/cairo-run") - .arg("--program") - .arg(program_path) - .arg("--program_input") - .arg(input_path) - .arg("--cairo_pie_output") - .arg(pie_path) - .arg("--layout=all_cairo") - .output() - .map_err(|e| Error::CairoRunError(format!("Failed to execute commands: {}", e)))?; + // Offload the blocking command execution to a dedicated thread + let output = task::spawn_blocking(move || { + std::process::Command::new("../venv/bin/cairo-run") + .arg("--program") + .arg(&program_path) + .arg("--program_input") + .arg(&input_path) + .arg("--cairo_pie_output") + .arg(&pie_path) + .arg("--layout=all_cairo") + .output() + .map_err(|e| Error::CairoRunError(format!("Failed to execute commands: {}", e))) + }) + .await + .map_err(|join_err: JoinError| { + Error::CairoRunError(format!("spawn_blocking failed: {}", join_err)) + })??; let duration = start_time.elapsed(); diff --git a/client-rs/src/utils/database_manager.rs b/client-rs/src/utils/database_manager.rs new file mode 100644 index 0000000..d82bb9b --- /dev/null +++ b/client-rs/src/utils/database_manager.rs @@ -0,0 +1,57 @@ +impl DatabaseManager { + pub async fn new(db_url: &str) -> Result> { + let (client, connection) = tokio_postgres::connect(db_url, NoTls).await?; + + // Spawn a task to handle the connection so it is always polled + tokio::spawn(async move { + if let Err(e) = connection.await { + eprintln!("Database connection error: {}", e); + } + }); + + Ok(Self { client }) + } + + /// Inserts a verified epoch into the `verified_epoch` table. + pub async fn insert_verified_epoch( + &self, + epoch_id: u64, + epoch_proof: EpochProof, + ) -> Result<(), Box> { + self.client + .execute( + "INSERT INTO verified_epoch (epoch_id, header_root, state_root, n_signers) + VALUES ($1, $2, $3, $4)", + &[ + &epoch_id.to_string(), + &epoch_proof.header_root.to_string(), + &epoch_proof.state_root.to_string(), + &epoch_proof.n_signers.to_string(), + &epoch_proof.execution_hash.to_string(), + &epoch_proof.execution_height.to_string(), + ], + ) + .await?; + + Ok(()) + } + + pub async fn insert_verified_sync_committee( + &self, + sync_committee_id: u64, + sync_committee_hash: FixedBytes<32>, + ) -> Result<(), Box> { + self.client + .execute( + "INSERT INTO verified_sync_committee (sync_committee_id, sync_committee_hash) + VALUES ($1, $2)", + &[ + &sync_committee_id.to_string(), + &sync_committee_hash.to_string(), + ], + ) + .await?; + + Ok(()) + } +} diff --git a/client-rs/src/utils/starknet_client.rs b/client-rs/src/utils/starknet_client.rs index 30798da..d6b896c 100644 --- a/client-rs/src/utils/starknet_client.rs +++ b/client-rs/src/utils/starknet_client.rs @@ -190,18 +190,6 @@ impl StarknetClient { Ok((next_epoch_slot, terms_last_epoch_slot)) } - // Computes the slot numbers for term of specified slot - pub async fn get_batching_range_for_slot( - &self, - config: &BankaiConfig, - slot: u64, - ) -> Result<(u64, u64), StarknetError> { - let next_epoch_slot = (u64::try_from(slot).unwrap() / 32) * 32 + 32; - let term = next_epoch_slot / 0x2000; - let terms_last_epoch_slot = (term + 1) * 0x2000 - 32; - Ok((next_epoch_slot, terms_last_epoch_slot)) - } - pub async fn get_latest_committee_id( &self, config: &BankaiConfig, From 336c864abbb70f740f1f7d0745220cf7f8f4d8c2 Mon Sep 17 00:00:00 2001 From: lakewik Date: Mon, 13 Jan 2025 14:18:23 +0100 Subject: [PATCH 11/66] Add Semaphore to control how many concurrent trace generation jobs are allowed --- client-rs/src/config.rs | 5 +++++ client-rs/src/utils/cairo_runner.rs | 9 +++++++++ client-rs/src/utils/database_manager.rs | 1 - 3 files changed, 14 insertions(+), 1 deletion(-) diff --git a/client-rs/src/config.rs b/client-rs/src/config.rs index bc9bf84..3629b0b 100644 --- a/client-rs/src/config.rs +++ b/client-rs/src/config.rs @@ -1,4 +1,6 @@ use starknet::core::types::Felt; +use std::sync::Arc; +use tokio::sync::Semaphore; #[derive(Clone, Debug)] pub struct BankaiConfig { @@ -12,6 +14,7 @@ pub struct BankaiConfig { pub epoch_batch_circuit_path: String, pub committee_circuit_path: String, pub atlantic_endpoint: String, + pub pie_generation_semaphore: Arc, } impl Default for BankaiConfig { @@ -43,6 +46,8 @@ impl Default for BankaiConfig { epoch_batch_circuit_path: "../cairo/build/epoch_batch.json".to_string(), committee_circuit_path: "../cairo/build/committee_update.json".to_string(), atlantic_endpoint: "https://atlantic.api.herodotus.cloud".to_string(), + // Set how many concurrent pie generation (trace generation) tasks are allowed + pie_generation_semaphore: Arc::new(Semaphore::new(3)), // 3 at once } } } diff --git a/client-rs/src/utils/cairo_runner.rs b/client-rs/src/utils/cairo_runner.rs index f3a9e28..819d407 100644 --- a/client-rs/src/utils/cairo_runner.rs +++ b/client-rs/src/utils/cairo_runner.rs @@ -9,6 +9,15 @@ pub struct CairoRunner(); impl CairoRunner { pub async fn generate_pie(input: &impl Provable, config: &BankaiConfig) -> Result<(), Error> { + // Acquire a permit from the semaphore. + // If all permits are in use we will wait until one is available. + let _permit = config + .pie_generation_semaphore + .clone() + .acquire_owned() + .await + .map_err(|e| Error::CairoRunError(format!("Semaphore error: {}", e)))?; + let input_path = input.export()?; let program_path = match input.proof_type() { diff --git a/client-rs/src/utils/database_manager.rs b/client-rs/src/utils/database_manager.rs index d82bb9b..b43c77c 100644 --- a/client-rs/src/utils/database_manager.rs +++ b/client-rs/src/utils/database_manager.rs @@ -12,7 +12,6 @@ impl DatabaseManager { Ok(Self { client }) } - /// Inserts a verified epoch into the `verified_epoch` table. pub async fn insert_verified_epoch( &self, epoch_id: u64, From e5837a0d5976a0e8b9c4fdefbaac56ca7a15bdaf Mon Sep 17 00:00:00 2001 From: lakewik Date: Mon, 13 Jan 2025 19:11:10 +0100 Subject: [PATCH 12/66] Divide main to separate modules, improve state handling --- client-rs/Cargo.lock | 18 + client-rs/Cargo.toml | 15 +- client-rs/src/bankai_client.rs | 88 +++++ client-rs/src/config.rs | 2 + client-rs/src/daemon.rs | 427 ++---------------------- client-rs/src/epoch_batch.rs | 8 + client-rs/src/main.rs | 6 +- client-rs/src/routes/mod.rs | 133 ++++++++ client-rs/src/state.rs | 188 +++++++++++ client-rs/src/utils/database_manager.rs | 46 ++- client-rs/src/utils/mod.rs | 1 + 11 files changed, 515 insertions(+), 417 deletions(-) create mode 100644 client-rs/src/bankai_client.rs create mode 100644 client-rs/src/routes/mod.rs create mode 100644 client-rs/src/state.rs diff --git a/client-rs/Cargo.lock b/client-rs/Cargo.lock index 6921c6a..daf951c 100644 --- a/client-rs/Cargo.lock +++ b/client-rs/Cargo.lock @@ -835,6 +835,8 @@ dependencies = [ "tokio", "tokio-postgres", "tokio-stream", + "tower", + "tower-http", "tracing", "tracing-subscriber", "tree_hash 0.8.0", @@ -4296,6 +4298,22 @@ dependencies = [ "tracing", ] +[[package]] +name = "tower-http" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "403fa3b783d4b626a8ad51d766ab03cb6d2dbfc46b1c5d4448395e6628dc9697" +dependencies = [ + "bitflags 2.6.0", + "bytes", + "http 1.2.0", + "http-body 1.0.1", + "pin-project-lite", + "tower-layer", + "tower-service", + "tracing", +] + [[package]] name = "tower-layer" version = "0.3.3" diff --git a/client-rs/Cargo.toml b/client-rs/Cargo.toml index 867efdd..b0a9278 100644 --- a/client-rs/Cargo.toml +++ b/client-rs/Cargo.toml @@ -4,8 +4,8 @@ version = "0.1.0" edition = "2021" [[bin]] -name = "daemon" # Binary name (used with `cargo run --bin bin1`) -path = "src/daemon.rs" # Path to the source file for this binary +name = "daemon" # Binary name (used with `cargo run --bin bin1`) +path = "src/daemon.rs" # Path to the source file for this binary [[bin]] name = "cli" @@ -33,8 +33,7 @@ starknet = "0.12.0" tree_hash_derive = "0.8.0" tree_hash = "0.8.0" dotenv = "0.15" -tokio-postgres = { version = "0.7.12", features = [ - "with-uuid-1" ] } +tokio-postgres = { version = "0.7.12", features = ["with-uuid-1"] } axum = "0.7.9" thiserror = "2.0.9" tracing = "0.1.41" @@ -42,9 +41,9 @@ tracing-subscriber = "0.3.19" tokio-stream = "0.1.17" futures = "0.3" uuid = { version = "1.11.0", features = [ - "v4", - "fast-rng", - "macro-diagnostics" + "v4", + "fast-rng", + "macro-diagnostics", ] } postgres-types = { version = "0.2.8", features = ["derive"] } num_cpus = "1.16.0" @@ -53,3 +52,5 @@ num_cpus = "1.16.0" starknet-crypto = "0.7.3" glob = "0.3.2" num-traits = "0.2.19" +tower = "0.5.2" +tower-http = { version = "0.6.2", features = ["trace"] } diff --git a/client-rs/src/bankai_client.rs b/client-rs/src/bankai_client.rs new file mode 100644 index 0000000..a196d31 --- /dev/null +++ b/client-rs/src/bankai_client.rs @@ -0,0 +1,88 @@ +use crate::{ + contract_init::ContractInitializationData, + epoch_update::EpochUpdate, + state::Error, + sync_committee::SyncCommitteeUpdate, + utils::{ + atlantic_client::AtlanticClient, rpc::BeaconRpcClient, starknet_client::StarknetClient, + }, + BankaiConfig, +}; +use dotenv::from_filename; +use std::env; +use tracing::info; + +#[derive(Debug)] +pub struct BankaiClient { + pub client: BeaconRpcClient, + pub starknet_client: StarknetClient, + pub config: BankaiConfig, + pub atlantic_client: AtlanticClient, +} + +impl BankaiClient { + pub async fn new() -> Self { + from_filename(".env.sepolia").ok(); + let config = BankaiConfig::default(); + Self { + client: BeaconRpcClient::new(env::var("BEACON_RPC_URL").unwrap()), + starknet_client: StarknetClient::new( + env::var("STARKNET_RPC_URL").unwrap().as_str(), + env::var("STARKNET_ADDRESS").unwrap().as_str(), + env::var("STARKNET_PRIVATE_KEY").unwrap().as_str(), + ) + .await + .unwrap(), + atlantic_client: AtlanticClient::new( + config.atlantic_endpoint.clone(), + env::var("ATLANTIC_API_KEY").unwrap(), + ), + config, + } + } + + pub async fn get_sync_committee_update( + &self, + mut slot: u64, + ) -> Result { + let mut attempts = 0; + const MAX_ATTEMPTS: u8 = 3; + + // Before we start generating the proof, we ensure the slot was not missed + let _header = loop { + match self.client.get_header(slot).await { + Ok(header) => break header, + Err(Error::EmptySlotDetected(_)) => { + attempts += 1; + if attempts >= MAX_ATTEMPTS { + return Err(Error::EmptySlotDetected(slot)); + } + slot += 1; + info!( + "Empty slot detected! Attempt {}/{}. Fetching slot: {}", + attempts, MAX_ATTEMPTS, slot + ); + } + Err(e) => return Err(e), // Propagate other errors immediately + } + }; + + let proof: SyncCommitteeUpdate = SyncCommitteeUpdate::new(&self.client, slot).await?; + + Ok(proof) + } + + pub async fn get_epoch_proof(&self, slot: u64) -> Result { + let epoch_proof = EpochUpdate::new(&self.client, slot).await?; + Ok(epoch_proof) + } + + pub async fn get_contract_initialization_data( + &self, + slot: u64, + config: &BankaiConfig, + ) -> Result { + let contract_init = ContractInitializationData::new(&self.client, slot, config).await?; + Ok(contract_init) + } +} diff --git a/client-rs/src/config.rs b/client-rs/src/config.rs index 3629b0b..41d5ebd 100644 --- a/client-rs/src/config.rs +++ b/client-rs/src/config.rs @@ -15,6 +15,7 @@ pub struct BankaiConfig { pub committee_circuit_path: String, pub atlantic_endpoint: String, pub pie_generation_semaphore: Arc, + pub epoch_data_fetching_semaphore: Arc, } impl Default for BankaiConfig { @@ -48,6 +49,7 @@ impl Default for BankaiConfig { atlantic_endpoint: "https://atlantic.api.herodotus.cloud".to_string(), // Set how many concurrent pie generation (trace generation) tasks are allowed pie_generation_semaphore: Arc::new(Semaphore::new(3)), // 3 at once + epoch_data_fetching_semaphore: Arc::new(Semaphore::new(2)), // 2 at once } } } diff --git a/client-rs/src/daemon.rs b/client-rs/src/daemon.rs index aa5f301..ac11d39 100644 --- a/client-rs/src/daemon.rs +++ b/client-rs/src/daemon.rs @@ -1,3 +1,4 @@ +mod bankai_client; mod config; mod constants; mod contract_init; @@ -5,10 +6,11 @@ pub mod epoch_batch; mod epoch_update; mod execution_header; mod helpers; +mod routes; +mod state; mod sync_committee; mod traits; mod utils; - //use alloy_primitives::TxHash; use alloy_primitives::FixedBytes; use alloy_rpc_types_beacon::events::HeadEvent; @@ -19,26 +21,33 @@ use axum::{ routing::get, Router, }; +use bankai_client::BankaiClient; use config::BankaiConfig; use constants::SLOTS_PER_EPOCH; use contract_init::ContractInitializationData; use dotenv::from_filename; use epoch_update::{EpochProof, EpochUpdate}; use num_traits::cast::ToPrimitive; -use postgres_types::{FromSql, ToSql}; use reqwest; use serde_json::json; use starknet::core::types::Felt; +use state::check_env_vars; +use state::{AppState, Job}; +use state::{AtlanticJobType, Error, JobStatus, JobType}; use std::env; use std::sync::Arc; use tokio::sync::mpsc; use tokio::task; use tokio_postgres::{Client, NoTls}; use tokio_stream::StreamExt; +use tower::ServiceBuilder; +use tower_http::trace::TraceLayer; use tracing::{error, info, trace, warn, Level}; use tracing_subscriber::FmtSubscriber; use traits::Provable; -use utils::{atlantic_client::AtlanticClient, cairo_runner::CairoRunner}; +use utils::{ + atlantic_client::AtlanticClient, cairo_runner::CairoRunner, database_manager::DatabaseManager, +}; use utils::{ rpc::BeaconRpcClient, // bankai_client::BankaiClient, @@ -46,262 +55,16 @@ use utils::{ }; //use std::error::Error as StdError; use epoch_batch::EpochUpdateBatch; +use routes::{ + handle_get_epoch_update, handle_get_latest_verified_slot, handle_get_merkle_paths_for_epoch, + handle_get_status, +}; use std::fmt; use std::net::SocketAddr; use sync_committee::SyncCommitteeUpdate; use tokio::time::Duration; use uuid::Uuid; -impl std::fmt::Display for StarknetError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - StarknetError::ProviderError(err) => write!(f, "Provider error: {}", err), - StarknetError::AccountError(msg) => write!(f, "Account error: {}", msg), - } - } -} - -impl std::error::Error for StarknetError {} - -#[derive(Debug, FromSql, ToSql, Clone)] -#[postgres(name = "job_status")] -enum JobStatus { - #[postgres(name = "CREATED")] - Created, - #[postgres(name = "FETCHED_PROOF")] - FetchedProof, - #[postgres(name = "PIE_GENERATED")] - PieGenerated, - #[postgres(name = "OFFCHAIN_PROOF_REQUESTED")] - OffchainProofRequested, - #[postgres(name = "OFFCHAIN_PROOF_RETRIEVED")] - OffchainProofRetrieved, - #[postgres(name = "WRAP_PROOF_REQUESTED")] - WrapProofRequested, - #[postgres(name = "WRAPPED_PROOF_DONE")] - WrappedProofDone, - #[postgres(name = "READY_TO_BROADCAST")] - ReadyToBroadcast, - #[postgres(name = "PROOF_VERIFY_CALLED_ONCHAIN")] - ProofVerifyCalledOnchain, - #[postgres(name = "VERIFIED_FACT_REGISTERED")] - VerifiedFactRegistered, - #[postgres(name = "ERROR")] - Error, - #[postgres(name = "CANCELLED")] - Cancelled, -} - -impl ToString for JobStatus { - fn to_string(&self) -> String { - match self { - JobStatus::Created => "CREATED".to_string(), - JobStatus::FetchedProof => "FETCHED_PROOF".to_string(), - JobStatus::PieGenerated => "PIE_GENERATED".to_string(), - JobStatus::OffchainProofRequested => "OFFCHAIN_PROOF_REQUESTED".to_string(), - JobStatus::OffchainProofRetrieved => "OFFCHAIN_PROOF_RETRIEVED".to_string(), - JobStatus::WrapProofRequested => "WRAP_PROOF_REQUESTED".to_string(), - JobStatus::WrappedProofDone => "WRAPPED_PROOF_DONE".to_string(), - JobStatus::ReadyToBroadcast => "READY_TO_BROADCAST".to_string(), - JobStatus::ProofVerifyCalledOnchain => "PROOF_VERIFY_CALLED_ONCHAIN".to_string(), - JobStatus::VerifiedFactRegistered => "VERIFIED_FACT_REGISTERED".to_string(), - JobStatus::Cancelled => "CANCELLED".to_string(), - JobStatus::Error => "ERROR".to_string(), - } - } -} - -#[derive(Debug, FromSql, ToSql, Clone)] -enum JobType { - EpochUpdate, - EpochBatchUpdate, - SyncComiteeUpdate, -} - -#[derive(Debug, FromSql, ToSql)] -enum AtlanticJobType { - ProofGeneration, - ProofWrapping, -} - -#[derive(Debug)] -pub enum Error { - InvalidProof, - RpcError(reqwest::Error), - DeserializeError(String), - IoError(std::io::Error), - StarknetError(StarknetError), - BlockNotFound, - FetchSyncCommitteeError, - FailedFetchingBeaconState, - InvalidBLSPoint, - MissingRpcUrl, - EmptySlotDetected(u64), - RequiresNewerEpoch(Felt), - CairoRunError(String), - AtlanticError(reqwest::Error), - InvalidResponse(String), - PoolingTimeout(String), - InvalidMerkleTree, -} - -impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Error::InvalidProof => write!(f, "Invalid proof provided"), - Error::RpcError(err) => write!(f, "RPC error: {}", err), - Error::DeserializeError(msg) => write!(f, "Deserialization error: {}", msg), - Error::IoError(err) => write!(f, "I/O error: {}", err), - Error::StarknetError(err) => write!(f, "Starknet error: {}", err), - Error::BlockNotFound => write!(f, "Block not found"), - Error::FetchSyncCommitteeError => write!(f, "Failed to fetch sync committee"), - Error::FailedFetchingBeaconState => write!(f, "Failed to fetch beacon state"), - Error::InvalidBLSPoint => write!(f, "Invalid BLS point"), - Error::MissingRpcUrl => write!(f, "Missing RPC URL"), - Error::EmptySlotDetected(slot) => write!(f, "Empty slot detected: {}", slot), - Error::RequiresNewerEpoch(felt) => write!(f, "Requires newer epoch: {}", felt), - Error::CairoRunError(msg) => write!(f, "Cairo run error: {}", msg), - Error::AtlanticError(err) => write!(f, "Atlantic RPC error: {}", err), - Error::InvalidResponse(msg) => write!(f, "Invalid response: {}", msg), - Error::PoolingTimeout(msg) => write!(f, "Pooling timeout: {}", msg), - Error::InvalidMerkleTree => write!(f, "Invalid Merkle Tree"), - } - } -} - -impl std::error::Error for Error { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - Error::RpcError(err) => Some(err), - Error::IoError(err) => Some(err), - Error::StarknetError(err) => Some(err), - Error::AtlanticError(err) => Some(err), - _ => None, // No underlying source for other variants - } - } -} - -impl From for Error { - fn from(e: StarknetError) -> Self { - Error::StarknetError(e) - } -} - -#[derive(Clone, Debug)] -struct Job { - job_id: Uuid, - job_type: JobType, - job_status: JobStatus, - slot: u64, -} - -#[derive(Clone, Debug)] -struct AppState { - db_client: Arc, - tx: mpsc::Sender, - bankai: Arc, -} - -#[derive(Debug)] -struct BankaiClient { - client: BeaconRpcClient, - starknet_client: StarknetClient, - config: BankaiConfig, - atlantic_client: AtlanticClient, -} - -impl BankaiClient { - pub async fn new() -> Self { - from_filename(".env.sepolia").ok(); - let config = BankaiConfig::default(); - Self { - client: BeaconRpcClient::new(env::var("BEACON_RPC_URL").unwrap()), - starknet_client: StarknetClient::new( - env::var("STARKNET_RPC_URL").unwrap().as_str(), - env::var("STARKNET_ADDRESS").unwrap().as_str(), - env::var("STARKNET_PRIVATE_KEY").unwrap().as_str(), - ) - .await - .unwrap(), - atlantic_client: AtlanticClient::new( - config.atlantic_endpoint.clone(), - env::var("ATLANTIC_API_KEY").unwrap(), - ), - config, - } - } - - pub async fn get_sync_committee_update( - &self, - mut slot: u64, - ) -> Result { - let mut attempts = 0; - const MAX_ATTEMPTS: u8 = 3; - - // Before we start generating the proof, we ensure the slot was not missed - let _header = loop { - match self.client.get_header(slot).await { - Ok(header) => break header, - Err(Error::EmptySlotDetected(_)) => { - attempts += 1; - if attempts >= MAX_ATTEMPTS { - return Err(Error::EmptySlotDetected(slot)); - } - slot += 1; - info!( - "Empty slot detected! Attempt {}/{}. Fetching slot: {}", - attempts, MAX_ATTEMPTS, slot - ); - } - Err(e) => return Err(e), // Propagate other errors immediately - } - }; - - let proof: SyncCommitteeUpdate = SyncCommitteeUpdate::new(&self.client, slot).await?; - - Ok(proof) - } - - pub async fn get_epoch_proof(&self, slot: u64) -> Result { - let epoch_proof = EpochUpdate::new(&self.client, slot).await?; - Ok(epoch_proof) - } - - pub async fn get_contract_initialization_data( - &self, - slot: u64, - config: &BankaiConfig, - ) -> Result { - let contract_init = ContractInitializationData::new(&self.client, slot, config).await?; - Ok(contract_init) - } -} - -fn check_env_vars() -> Result<(), String> { - let required_vars = [ - "BEACON_RPC_URL", - "STARKNET_RPC_URL", - "STARKNET_ADDRESS", - "STARKNET_PRIVATE_KEY", - "ATLANTIC_API_KEY", - "PROOF_REGISTRY", - "POSTGRESQL_HOST", - "POSTGRESQL_USER", - "POSTGRESQL_PASSWORD", - "POSTGRESQL_DB_NAME", - "RPC_LISTEN_HOST", - "RPC_LISTEN_PORT", - ]; - - for &var in &required_vars { - if env::var(var).is_err() { - return Err(format!("Environment variable `{}` is not set", var)); - } - } - - Ok(()) -} - // Since beacon chain RPCs have different response structure (quicknode responds different than nidereal) we use this event extraction logic fn extract_json(event_text: &str) -> Option { for line in event_text.lines() { @@ -378,6 +141,8 @@ async fn main() -> Result<(), Box> { }; //let db_client_for_task = Arc::new(db_client); + // Create a new DatabaseManager + let db_manager = Arc::new(DatabaseManager::new(connection_string).await); let bankai = Arc::new(BankaiClient::new().await); // Clone the Arc for use in async task @@ -445,6 +210,9 @@ async fn main() -> Result<(), Box> { // .route("/debug/get-job-status", get(handle_get_job_status)) // .route("/get-merkle-inclusion-proof", get(handle_get_merkle_inclusion_proof)) .layer(DefaultBodyLimit::disable()) + .layer( + ServiceBuilder::new().layer(TraceLayer::new_for_http()), // Example: for logging/tracing + ) .with_state(app_state); let addr = "0.0.0.0:3000".parse::()?; @@ -689,36 +457,6 @@ async fn run_batch_update_job( } } -async fn set_atlantic_job_queryid( - client: &Client, - job_id: Uuid, - batch_id: String, - atlantic_job_type: AtlanticJobType, -) -> Result<(), Box> { - match atlantic_job_type { - AtlanticJobType::ProofGeneration => { - client - .execute( - "UPDATE jobs SET atlantic_proof_generate_batch_id = $1, updated_at = NOW() WHERE job_uuid = $2", - &[&batch_id.to_string(), &job_id], - ) - .await?; - } - AtlanticJobType::ProofWrapping => { - client - .execute( - "UPDATE jobs SET atlantic_proof_wrapper_batch_id = $1, updated_at = NOW() WHERE job_uuid = $2", - &[&batch_id.to_string(), &job_id], - ) - .await?; - } // _ => { - // println!("Unk", status); - // } - } - - Ok(()) -} - async fn create_job( client: Arc, job: Job, @@ -1301,126 +1039,3 @@ async fn process_job( Ok(()) } - -// RPC requests handling functions // - -async fn handle_get_status(State(state): State) -> impl IntoResponse { - Json(json!({ "success": true })) -} - -async fn handle_get_epoch_update( - Path(slot): Path, - State(state): State, -) -> impl IntoResponse { - match state.bankai.get_epoch_proof(slot).await { - Ok(epoch_update) => { - // Convert `EpochUpdate` to `serde_json::Value` - let value = serde_json::to_value(epoch_update).unwrap_or_else(|err| { - eprintln!("Failed to serialize EpochUpdate: {:?}", err); - json!({ "error": "Internal server error" }) - }); - Json(value) - } - Err(err) => { - eprintln!("Failed to fetch proof: {:?}", err); - Json(json!({ "error": "Failed to fetch proof" })) - } - } -} - -// async fn handle_get_epoch_proof( -// Path(slot): Path, -// State(state): State, -// ) -> impl IntoResponse { -// match state.bankai.starknet_client.get_epoch_proof(slot).await { -// Ok(epoch_update) => { -// // Convert `EpochUpdate` to `serde_json::Value` -// let value = serde_json::to_value(epoch_update).unwrap_or_else(|err| { -// eprintln!("Failed to serialize EpochUpdate: {:?}", err); -// json!({ "error": "Internal server error" }) -// }); -// Json(value) -// } -// Err(err) => { -// eprintln!("Failed to fetch proof: {:?}", err); -// Json(json!({ "error": "Failed to fetch proof" })) -// } -// } -// } - -// async fn handle_get_committee_hash( -// Path(committee_id): Path, -// State(state): State, -// ) -> impl IntoResponse { -// match state.bankai.starknet_client.get_committee_hash(committee_id).await { -// Ok(committee_hash) => { -// // Convert `EpochUpdate` to `serde_json::Value` -// let value = serde_json::to_value(committee_hash).unwrap_or_else(|err| { -// eprintln!("Failed to serialize EpochUpdate: {:?}", err); -// json!({ "error": "Internal server error" }) -// }); -// Json(value) -// } -// Err(err) => { -// eprintln!("Failed to fetch proof: {:?}", err); -// Json(json!({ "error": "Failed to fetch proof" })) -// } -// } -// } - -async fn handle_get_latest_verified_slot(State(state): State) -> impl IntoResponse { - match state - .bankai - .starknet_client - .get_latest_epoch_slot(&state.bankai.config) - .await - { - Ok(latest_epoch) => { - // Convert `Felt` to a string and parse it as a hexadecimal number - let hex_string = latest_epoch.to_string(); // Ensure this converts to a "0x..." string - match u64::from_str_radix(hex_string.trim_start_matches("0x"), 16) { - Ok(decimal_epoch) => Json(json!({ "latest_verified_slot": decimal_epoch })), - Err(err) => { - eprintln!("Failed to parse latest_epoch as decimal: {:?}", err); - Json(json!({ "error": "Invalid epoch format" })) - } - } - } - Err(err) => { - eprintln!("Failed to fetch latest epoch: {:?}", err); - Json(json!({ "error": "Failed to fetch latest epoch" })) - } - } -} - -// async fn handle_get_job_status( -// Path(job_id): Path, -// State(state): State, -// ) -> impl IntoResponse { -// match fetch_job_status(&state.db_client, job_id).await { -// Ok(job_status) => Json(job_status), -// Err(err) => { -// eprintln!("Failed to fetch job status: {:?}", err); -// Json(json!({ "error": "Failed to fetch job status" })) -// } -// } -// } - -async fn handle_get_merkle_paths_for_epoch( - Path(epoch_id): Path, - State(state): State, -) -> impl IntoResponse { - match get_merkle_paths_for_epoch(&state.db_client, epoch_id).await { - Ok(merkle_paths) => { - if merkle_paths.len() > 0 { - Json(json!({ "epoch_id": epoch_id, "merkle_paths": merkle_paths })) - } else { - Json(json!({ "error": "Epoch not available now" })) - } - } - Err(err) => { - error!("Failed to fetch merkle paths epoch: {:?}", err); - Json(json!({ "error": "Failed to fetch latest epoch" })) - } - } -} diff --git a/client-rs/src/epoch_batch.rs b/client-rs/src/epoch_batch.rs index 4ddcea4..8fda361 100644 --- a/client-rs/src/epoch_batch.rs +++ b/client-rs/src/epoch_batch.rs @@ -113,6 +113,14 @@ impl EpochUpdateBatch { db_client: &Client, slot: u64, ) -> Result { + let _permit = bankai + .config + .epoch_data_fetching_semaphore + .clone() + .acquire_owned() + .await + .map_err(|e| Error::CairoRunError(format!("Semaphore error: {}", e)))?; + let (start_slot, end_slot) = calculate_slots_range_for_batch(slot); let mut epochs = vec![]; diff --git a/client-rs/src/main.rs b/client-rs/src/main.rs index 5ab0781..91facbf 100644 --- a/client-rs/src/main.rs +++ b/client-rs/src/main.rs @@ -312,7 +312,7 @@ async fn main() -> Result<(), Error> { let update = bankai .get_sync_committee_update(latest_epoch.try_into().unwrap()) .await?; - CairoRunner::generate_pie(&update, &bankai.config)?; + CairoRunner::generate_pie(&update, &bankai.config).await?; let batch_id = bankai.atlantic_client.submit_batch(update).await?; println!("Batch Submitted: {}", batch_id); } @@ -326,14 +326,14 @@ async fn main() -> Result<(), Error> { let next_epoch = (u64::try_from(latest_epoch).unwrap() / 32) * 32 + 32; println!("Fetching Inputs for Epoch: {}", next_epoch); let proof = bankai.get_epoch_proof(next_epoch).await?; - CairoRunner::generate_pie(&proof, &bankai.config)?; + CairoRunner::generate_pie(&proof, &bankai.config).await?; let batch_id = bankai.atlantic_client.submit_batch(proof).await?; println!("Batch Submitted: {}", batch_id); } Commands::ProveNextEpochBatch => { let epoch_update = EpochUpdateBatch::new(&bankai).await?; println!("Update contents: {:?}", epoch_update); - CairoRunner::generate_pie(&epoch_update, &bankai.config)?; + CairoRunner::generate_pie(&epoch_update, &bankai.config).await?; let batch_id = bankai.atlantic_client.submit_batch(epoch_update).await?; println!("Batch Submitted: {}", batch_id); } diff --git a/client-rs/src/routes/mod.rs b/client-rs/src/routes/mod.rs new file mode 100644 index 0000000..951f67e --- /dev/null +++ b/client-rs/src/routes/mod.rs @@ -0,0 +1,133 @@ +use crate::state::AppState; +use axum::{ + extract::{Path, State}, + response::IntoResponse, + Json, +}; +use serde_json::{json, Value}; +use tracing::{error, info, trace, warn, Level}; + +// RPC requests handling functions // + +// Handler for GET /status +pub async fn handle_get_status(State(_state): State) -> impl IntoResponse { + Json(json!({ "success": true })) +} + +// Handler for GET /epoch/:slot +pub async fn handle_get_epoch_update( + Path(slot): Path, + State(state): State, +) -> impl IntoResponse { + match state.bankai.get_epoch_proof(slot).await { + Ok(epoch_update) => { + // Convert the data to `serde_json::Value` + let value: Value = serde_json::to_value(epoch_update).unwrap_or_else(|err| { + eprintln!("Failed to serialize EpochUpdate: {:?}", err); + json!({ "error": "Internal server error" }) + }); + Json(value) + } + Err(err) => { + eprintln!("Failed to fetch proof: {:?}", err); + Json(json!({ "error": "Failed to fetch proof" })) + } + } +} + +// async fn handle_get_epoch_proof( +// Path(slot): Path, +// State(state): State, +// ) -> impl IntoResponse { +// match state.bankai.starknet_client.get_epoch_proof(slot).await { +// Ok(epoch_update) => { +// // Convert `EpochUpdate` to `serde_json::Value` +// let value = serde_json::to_value(epoch_update).unwrap_or_else(|err| { +// eprintln!("Failed to serialize EpochUpdate: {:?}", err); +// json!({ "error": "Internal server error" }) +// }); +// Json(value) +// } +// Err(err) => { +// eprintln!("Failed to fetch proof: {:?}", err); +// Json(json!({ "error": "Failed to fetch proof" })) +// } +// } +// } + +// async fn handle_get_committee_hash( +// Path(committee_id): Path, +// State(state): State, +// ) -> impl IntoResponse { +// match state.bankai.starknet_client.get_committee_hash(committee_id).await { +// Ok(committee_hash) => { +// // Convert `EpochUpdate` to `serde_json::Value` +// let value = serde_json::to_value(committee_hash).unwrap_or_else(|err| { +// eprintln!("Failed to serialize EpochUpdate: {:?}", err); +// json!({ "error": "Internal server error" }) +// }); +// Json(value) +// } +// Err(err) => { +// eprintln!("Failed to fetch proof: {:?}", err); +// Json(json!({ "error": "Failed to fetch proof" })) +// } +// } +// } + +pub async fn handle_get_latest_verified_slot(State(state): State) -> impl IntoResponse { + match state + .bankai + .starknet_client + .get_latest_epoch_slot(&state.bankai.config) + .await + { + Ok(latest_epoch) => { + // Convert `Felt` to a string and parse it as a hexadecimal number + let hex_string = latest_epoch.to_string(); // Ensure this converts to a "0x..." string + match u64::from_str_radix(hex_string.trim_start_matches("0x"), 16) { + Ok(decimal_epoch) => Json(json!({ "latest_verified_slot": decimal_epoch })), + Err(err) => { + eprintln!("Failed to parse latest_epoch as decimal: {:?}", err); + Json(json!({ "error": "Invalid epoch format" })) + } + } + } + Err(err) => { + eprintln!("Failed to fetch latest epoch: {:?}", err); + Json(json!({ "error": "Failed to fetch latest epoch" })) + } + } +} + +// async fn handle_get_job_status( +// Path(job_id): Path, +// State(state): State, +// ) -> impl IntoResponse { +// match fetch_job_status(&state.db_client, job_id).await { +// Ok(job_status) => Json(job_status), +// Err(err) => { +// eprintln!("Failed to fetch job status: {:?}", err); +// Json(json!({ "error": "Failed to fetch job status" })) +// } +// } +// } + +pub async fn handle_get_merkle_paths_for_epoch( + Path(epoch_id): Path, + State(state): State, +) -> impl IntoResponse { + match get_merkle_paths_for_epoch(&state.db_client, epoch_id).await { + Ok(merkle_paths) => { + if merkle_paths.len() > 0 { + Json(json!({ "epoch_id": epoch_id, "merkle_paths": merkle_paths })) + } else { + Json(json!({ "error": "Epoch not available now" })) + } + } + Err(err) => { + error!("Failed to fetch merkle paths epoch: {:?}", err); + Json(json!({ "error": "Failed to fetch latest epoch" })) + } + } +} diff --git a/client-rs/src/state.rs b/client-rs/src/state.rs new file mode 100644 index 0000000..b7f2640 --- /dev/null +++ b/client-rs/src/state.rs @@ -0,0 +1,188 @@ +use crate::bankai_client::BankaiClient; +use crate::utils::starknet_client::StarknetError; +use postgres_types::{FromSql, ToSql}; +use starknet::core::types::Felt; +use std::env; +use std::fmt; +use std::sync::Arc; +use tokio::sync::mpsc; +use tokio_postgres::Client; +use uuid::Uuid; + +#[derive(Clone, Debug)] +pub struct Job { + pub job_id: Uuid, + pub job_type: JobType, + pub job_status: JobStatus, + pub slot: u64, +} + +#[derive(Clone, Debug)] +pub struct AppState { + pub db_client: Arc, + pub tx: mpsc::Sender, + pub bankai: Arc, +} + +#[derive(Debug, FromSql, ToSql, Clone)] +#[postgres(name = "job_status")] +pub enum JobStatus { + #[postgres(name = "CREATED")] + Created, + #[postgres(name = "FETCHED_PROOF")] + FetchedProof, + #[postgres(name = "PIE_GENERATED")] + PieGenerated, + #[postgres(name = "OFFCHAIN_PROOF_REQUESTED")] + OffchainProofRequested, + #[postgres(name = "OFFCHAIN_PROOF_RETRIEVED")] + OffchainProofRetrieved, + #[postgres(name = "WRAP_PROOF_REQUESTED")] + WrapProofRequested, + #[postgres(name = "WRAPPED_PROOF_DONE")] + WrappedProofDone, + #[postgres(name = "READY_TO_BROADCAST")] + ReadyToBroadcast, + #[postgres(name = "PROOF_VERIFY_CALLED_ONCHAIN")] + ProofVerifyCalledOnchain, + #[postgres(name = "VERIFIED_FACT_REGISTERED")] + VerifiedFactRegistered, + #[postgres(name = "ERROR")] + Error, + #[postgres(name = "CANCELLED")] + Cancelled, +} + +impl ToString for JobStatus { + fn to_string(&self) -> String { + match self { + JobStatus::Created => "CREATED".to_string(), + JobStatus::FetchedProof => "FETCHED_PROOF".to_string(), + JobStatus::PieGenerated => "PIE_GENERATED".to_string(), + JobStatus::OffchainProofRequested => "OFFCHAIN_PROOF_REQUESTED".to_string(), + JobStatus::OffchainProofRetrieved => "OFFCHAIN_PROOF_RETRIEVED".to_string(), + JobStatus::WrapProofRequested => "WRAP_PROOF_REQUESTED".to_string(), + JobStatus::WrappedProofDone => "WRAPPED_PROOF_DONE".to_string(), + JobStatus::ReadyToBroadcast => "READY_TO_BROADCAST".to_string(), + JobStatus::ProofVerifyCalledOnchain => "PROOF_VERIFY_CALLED_ONCHAIN".to_string(), + JobStatus::VerifiedFactRegistered => "VERIFIED_FACT_REGISTERED".to_string(), + JobStatus::Cancelled => "CANCELLED".to_string(), + JobStatus::Error => "ERROR".to_string(), + } + } +} + +#[derive(Debug, FromSql, ToSql, Clone)] +pub enum JobType { + EpochUpdate, + EpochBatchUpdate, + SyncComiteeUpdate, +} + +#[derive(Debug, FromSql, ToSql)] +pub enum AtlanticJobType { + ProofGeneration, + ProofWrapping, +} + +// Checking status of env vars +pub fn check_env_vars() -> Result<(), String> { + let required_vars = [ + "BEACON_RPC_URL", + "STARKNET_RPC_URL", + "STARKNET_ADDRESS", + "STARKNET_PRIVATE_KEY", + "ATLANTIC_API_KEY", + "PROOF_REGISTRY", + "POSTGRESQL_HOST", + "POSTGRESQL_USER", + "POSTGRESQL_PASSWORD", + "POSTGRESQL_DB_NAME", + "RPC_LISTEN_HOST", + "RPC_LISTEN_PORT", + ]; + + for &var in &required_vars { + if env::var(var).is_err() { + return Err(format!("Environment variable `{}` is not set", var)); + } + } + + Ok(()) +} + +/// Errors types + +impl std::fmt::Display for StarknetError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + StarknetError::ProviderError(err) => write!(f, "Provider error: {}", err), + StarknetError::AccountError(msg) => write!(f, "Account error: {}", msg), + } + } +} + +impl std::error::Error for StarknetError {} + +#[derive(Debug)] +pub enum Error { + InvalidProof, + RpcError(reqwest::Error), + DeserializeError(String), + IoError(std::io::Error), + StarknetError(StarknetError), + BlockNotFound, + FetchSyncCommitteeError, + FailedFetchingBeaconState, + InvalidBLSPoint, + MissingRpcUrl, + EmptySlotDetected(u64), + RequiresNewerEpoch(Felt), + CairoRunError(String), + AtlanticError(reqwest::Error), + InvalidResponse(String), + PoolingTimeout(String), + InvalidMerkleTree, +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Error::InvalidProof => write!(f, "Invalid proof provided"), + Error::RpcError(err) => write!(f, "RPC error: {}", err), + Error::DeserializeError(msg) => write!(f, "Deserialization error: {}", msg), + Error::IoError(err) => write!(f, "I/O error: {}", err), + Error::StarknetError(err) => write!(f, "Starknet error: {}", err), + Error::BlockNotFound => write!(f, "Block not found"), + Error::FetchSyncCommitteeError => write!(f, "Failed to fetch sync committee"), + Error::FailedFetchingBeaconState => write!(f, "Failed to fetch beacon state"), + Error::InvalidBLSPoint => write!(f, "Invalid BLS point"), + Error::MissingRpcUrl => write!(f, "Missing RPC URL"), + Error::EmptySlotDetected(slot) => write!(f, "Empty slot detected: {}", slot), + Error::RequiresNewerEpoch(felt) => write!(f, "Requires newer epoch: {}", felt), + Error::CairoRunError(msg) => write!(f, "Cairo run error: {}", msg), + Error::AtlanticError(err) => write!(f, "Atlantic RPC error: {}", err), + Error::InvalidResponse(msg) => write!(f, "Invalid response: {}", msg), + Error::PoolingTimeout(msg) => write!(f, "Pooling timeout: {}", msg), + Error::InvalidMerkleTree => write!(f, "Invalid Merkle Tree"), + } + } +} + +impl std::error::Error for Error { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + Error::RpcError(err) => Some(err), + Error::IoError(err) => Some(err), + Error::StarknetError(err) => Some(err), + Error::AtlanticError(err) => Some(err), + _ => None, // No underlying source for other variants + } + } +} + +impl From for Error { + fn from(e: StarknetError) -> Self { + Error::StarknetError(e) + } +} diff --git a/client-rs/src/utils/database_manager.rs b/client-rs/src/utils/database_manager.rs index b43c77c..b03c7a4 100644 --- a/client-rs/src/utils/database_manager.rs +++ b/client-rs/src/utils/database_manager.rs @@ -1,3 +1,15 @@ +use crate::epoch_update::{EpochProof, EpochUpdate}; +use crate::state::{AtlanticJobType, JobStatus, JobType}; +use alloy_primitives::FixedBytes; +use std::error::Error; +use tokio_postgres::{Client, NoTls}; +use tracing::{error, info}; +use uuid::Uuid; + +pub struct DatabaseManager { + client: Client, +} + impl DatabaseManager { pub async fn new(db_url: &str) -> Result> { let (client, connection) = tokio_postgres::connect(db_url, NoTls).await?; @@ -5,10 +17,12 @@ impl DatabaseManager { // Spawn a task to handle the connection so it is always polled tokio::spawn(async move { if let Err(e) = connection.await { - eprintln!("Database connection error: {}", e); + error!("Database connection error: {}", e); } }); + info!("Successfully connected to the database!"); + Ok(Self { client }) } @@ -53,4 +67,34 @@ impl DatabaseManager { Ok(()) } + + pub async fn set_atlantic_job_queryid( + &self, + job_id: Uuid, + batch_id: String, + atlantic_job_type: AtlanticJobType, + ) -> Result<(), Box> { + match atlantic_job_type { + AtlanticJobType::ProofGeneration => { + self.client + .execute( + "UPDATE jobs SET atlantic_proof_generate_batch_id = $1, updated_at = NOW() WHERE job_uuid = $2", + &[&batch_id.to_string(), &job_id], + ) + .await?; + } + AtlanticJobType::ProofWrapping => { + self.client + .execute( + "UPDATE jobs SET atlantic_proof_wrapper_batch_id = $1, updated_at = NOW() WHERE job_uuid = $2", + &[&batch_id.to_string(), &job_id], + ) + .await?; + } // _ => { + // println!("Unk", status); + // } + } + + Ok(()) + } } diff --git a/client-rs/src/utils/mod.rs b/client-rs/src/utils/mod.rs index 896e84c..22fecea 100644 --- a/client-rs/src/utils/mod.rs +++ b/client-rs/src/utils/mod.rs @@ -1,5 +1,6 @@ pub mod atlantic_client; pub mod cairo_runner; +pub mod database_manager; pub mod hashing; pub mod merkle; pub mod rpc; From 6ad446847f0058aa0d41c751c0a8ec4725c7e59b Mon Sep 17 00:00:00 2001 From: lakewik Date: Wed, 15 Jan 2025 13:15:51 +0100 Subject: [PATCH 13/66] Structurize code & create DatabaseManager --- client-rs/src/daemon.rs | 297 ++++-------------------- client-rs/src/epoch_batch.rs | 24 +- client-rs/src/routes/mod.rs | 2 +- client-rs/src/state.rs | 5 +- client-rs/src/utils/database_manager.rs | 213 +++++++++++++++-- 5 files changed, 261 insertions(+), 280 deletions(-) diff --git a/client-rs/src/daemon.rs b/client-rs/src/daemon.rs index ac11d39..4fed0c3 100644 --- a/client-rs/src/daemon.rs +++ b/client-rs/src/daemon.rs @@ -59,7 +59,6 @@ use routes::{ handle_get_epoch_update, handle_get_latest_verified_slot, handle_get_merkle_paths_for_epoch, handle_get_status, }; -use std::fmt; use std::net::SocketAddr; use sync_committee::SyncCommitteeUpdate; use tokio::time::Duration; @@ -104,66 +103,43 @@ async fn main() -> Result<(), Box> { //let (tx, mut rx) = mpsc::channel(32); - let connection_string = "host=localhost user=meow password=meow dbname=bankai"; - // let connection_string = format!( - // "host={} user={} password={} dbname={}", - // env::var("POSTGRESQL_HOST").unwrap().as_str(), - // env::var("POSTGRESQL_USER").unwrap().as_str(), - // env::var("POSTGRESQL_PASSWORD").unwrap().as_str(), - // env::var("POSTGRESQL_DB_NAME").unwrap().as_str() - // ); - let _connection_result: Result< - ( - Client, - tokio_postgres::Connection, - ), - tokio_postgres::Error, - > = tokio_postgres::connect(connection_string, NoTls).await; - - let db_client = match tokio_postgres::connect(connection_string, NoTls).await { - Ok((client, connection)) => { - // Spawn a task to manage the connection - tokio::spawn(async move { - if let Err(e) = connection.await { - eprintln!("Connection error: {}", e); - } - }); - - info!("Connected to the database successfully!"); - - // Wrap the client in an Arc for shared ownership - Arc::new(client) - } - Err(err) => { - error!("Failed to connect to the database: {}", err); - std::process::exit(1); // Exit with a non-zero status code - } - }; + let connection_string = format!( + "host={} user={} password={} dbname={}", + env::var("POSTGRESQL_HOST").unwrap().as_str(), + env::var("POSTGRESQL_USER").unwrap().as_str(), + env::var("POSTGRESQL_PASSWORD").unwrap().as_str(), + env::var("POSTGRESQL_DB_NAME").unwrap().as_str() + ); - //let db_client_for_task = Arc::new(db_client); // Create a new DatabaseManager - let db_manager = Arc::new(DatabaseManager::new(connection_string).await); + let db_manager = Arc::new(DatabaseManager::new(&connection_string).await); let bankai = Arc::new(BankaiClient::new().await); - // Clone the Arc for use in async task - //let bankai_for_task = Arc::clone(&bankai); - // Beacon node endpoint construction for ervents + // Beacon node endpoint construction for events let events_endpoint = format!( "{}/eth/v1/events?topics=head", env::var("BEACON_RPC_URL").unwrap().as_str() ); + //let events_endpoint = format!("{}/eth/v1/events?topics=head", beacon_node_url) - let db_client_for_state = db_client.clone(); - let db_client_for_listener = db_client.clone(); - let bankai_for_state = bankai.clone(); + let db_manager_for_listener = db_manager.clone(); let bankai_for_listener = bankai.clone(); + + let tx_for_listener = tx.clone(); + + let app_state: AppState = AppState { + db_manager: db_manager.clone(), + tx, + bankai: bankai.clone(), + }; + //Spawn a background task to process jobs tokio::spawn(async move { while let Some(job) = rx.recv().await { let job_id = job.job_id; - let db_clone = Arc::clone(&db_client); + let db_clone = db_manager.clone(); let bankai_clone = Arc::clone(&bankai); // Spawn a *new task* for each job — now they can run in parallel @@ -173,7 +149,7 @@ async fn main() -> Result<(), Box> { info!("Job {} completed successfully", job_id); } Err(e) => { - update_job_status(&db_clone, job_id, JobStatus::Error).await; + db_clone.update_job_status(job_id, JobStatus::Error).await; error!("Error processing job {}: {}", job_id, e); } } @@ -181,16 +157,6 @@ async fn main() -> Result<(), Box> { } }); - // let db_client_for_task =db_client.clone(); - - let tx_for_listener = tx.clone(); - - let app_state: AppState = AppState { - db_client: db_client_for_state, - tx, - bankai: bankai_for_state, - }; - let app = Router::new() .route("/status", get(handle_get_status)) //.route("/get-epoch-proof/:slot", get(handle_get_epoch_proof)) @@ -271,7 +237,7 @@ async fn main() -> Result<(), Box> { handle_beacon_chain_head_event( parsed_event, bankai_for_listener.clone(), - db_client_for_listener.clone(), + db_manager_for_listener.clone(), tx_for_listener.clone(), ) .await; @@ -303,7 +269,7 @@ async fn main() -> Result<(), Box> { async fn handle_beacon_chain_head_event( parsed_event: HeadEvent, bankai: Arc, - db_client: Arc, + db_manager: Arc, tx: mpsc::Sender, ) -> () { let epoch_id = helpers::slot_to_epoch_id(parsed_event.slot); @@ -322,7 +288,7 @@ async fn handle_beacon_chain_head_event( // We getting the last slot in progress to determine next slots to prove let mut last_slot_in_progress: u64 = 1000000; - match get_latest_slot_id_in_progress(&db_client.clone()).await { + match db_manager.get_latest_slot_id_in_progress().await { Ok(Some(slot)) => { last_slot_in_progress = slot.to_u64().unwrap(); info!( @@ -348,7 +314,7 @@ async fn handle_beacon_chain_head_event( ); match run_batch_update_job( - db_client.clone(), + db_manager.clone(), last_slot_in_progress + (constants::SLOTS_PER_EPOCH * constants::TARGET_BATCH_SIZE), tx.clone(), ) @@ -378,7 +344,7 @@ async fn handle_beacon_chain_head_event( // When we doing EpochBatchUpdate the slot is latest_batch_output // So for each batch update we takin into account effectiviely the latest slot from given batch - let db_client = db_client.clone(); + //let db_client = db_client.clone(); // evaluete_jobs_statuses(); // broadcast_ready_jobs(); @@ -427,7 +393,7 @@ async fn handle_beacon_chain_head_event( } async fn run_batch_update_job( - db_client: Arc, + db_manager: Arc, slot: u64, tx: mpsc::Sender, ) -> Result<(), Box> { @@ -439,7 +405,7 @@ async fn run_batch_update_job( slot, }; - match create_job(db_client, job.clone()).await { + match db_manager.create_job(job.clone()).await { // Insert new job record to DB Ok(()) => { // Handle success @@ -457,168 +423,7 @@ async fn run_batch_update_job( } } -async fn create_job( - client: Arc, - job: Job, -) -> Result<(), Box> { - client - .execute( - "INSERT INTO jobs (job_uuid, job_status, slot, type) VALUES ($1, $2, $3, $4)", - &[ - &job.job_id, - &job.job_status.to_string(), - &(job.slot as i64), - &"EPOCH_UPDATE", - ], - ) - .await?; - - Ok(()) -} - -async fn fetch_job_status( - client: &Client, - job_id: Uuid, -) -> Result, Box> { - let row_opt = client - .query_opt("SELECT status FROM jobs WHERE job_id = $1", &[&job_id]) - .await?; - - Ok(row_opt.map(|row| row.get("status"))) -} - -pub async fn get_latest_slot_id_in_progress( - client: &Client, -) -> Result, Box> { - // Query the latest slot with job_status in ('in_progress', 'initialized') - let row_opt = client - .query_opt( - "SELECT slot FROM jobs - WHERE job_status IN ($1, $2) - ORDER BY slot DESC - LIMIT 1", - &[&"CREATED", &"PIE_GENERATED"], - ) - .await?; - - // Extract and return the slot ID - if let Some(row) = row_opt { - Ok(Some(row.get::<_, i64>("slot"))) - } else { - Ok(None) - } -} - -pub async fn get_merkle_paths_for_epoch( - client: &Client, - epoch_id: i32, -) -> Result, Box> { - // Query all merkle paths for the given epoch_id - let rows = client - .query( - "SELECT merkle_path FROM epoch_merkle_paths - WHERE epoch_id = $1 - ORDER BY path_index ASC", - &[&epoch_id], - ) - .await?; - - let paths: Vec = rows - .iter() - .map(|row| row.get::<_, String>("merkle_path")) - .collect(); - - Ok(paths) -} - -async fn update_job_status( - client: &Client, - job_id: Uuid, - new_status: JobStatus, -) -> Result<(), Box> { - client - .execute( - "UPDATE jobs SET job_status = $1, updated_at = NOW() WHERE job_uuid = $2", - &[&new_status.to_string(), &job_id], - ) - .await?; - Ok(()) -} -async fn set_job_txhash( - client: &Client, - job_id: Uuid, - txhash: Felt, -) -> Result<(), Box> { - client - .execute( - "UPDATE jobs SET tx_hash = $1, updated_at = NOW() WHERE job_uuid = $2", - &[&txhash.to_string(), &job_id], - ) - .await?; - Ok(()) -} - -async fn cancell_all_unfinished_jobs( - client: &Client, -) -> Result<(), Box> { - client - .execute( - "UPDATE jobs SET status = $1, updated_at = NOW() WHERE status = 'FETCHING'", - &[&JobStatus::Cancelled.to_string()], - ) - .await?; - Ok(()) -} - -// async fn fetch_job_by_status( -// client: &Client, -// status: JobStatus, -// ) -> Result, Box> { -// let tx = client.transaction().await?; - -// let row_opt = tx -// .query_opt( -// r#" -// SELECT job_id, status -// FROM jobs -// WHERE status = $1 -// ORDER BY updated_at ASC -// LIMIT 1 -// FOR UPDATE SKIP LOCKED -// "#, -// &[&status], -// ) -// .await?; - -// let job = if let Some(row) = row_opt { -// Some(Job { -// job_id: row.get("job_id"), -// job_type: row.get("type"), -// job_status: row.get("status"), -// slot: row.get("slot"), -// }) -// } else { -// None -// }; - -// tx.commit().await?; -// Ok(job) -// } - -// async fn add_verified_epoch( -// client: Arc, -// slot: u64, -// ) -> Result<(), Box> { -// client -// .execute( -// "INSERT INTO verified_epochs (slot, job_status, slot, type) VALUES ($1, $2, $3, $4)", -// &[&slot, &status.to_string(), &(slot as i64), &"EPOCH_UPDATE"], -// ) -// .await?; - -// Ok(()) -// } // async fn worker_task(mut rx: Receiver, db_client: Client) -> Result<(), Box> { // while let Some(job_id) = rx.recv().await { @@ -651,7 +456,7 @@ async fn cancell_all_unfinished_jobs( // mpsc jobs // async fn process_job( job: Job, - db_client: Arc, + db_manager: Arc, bankai: Arc, ) -> Result<(), Box> { match job.job_type { @@ -693,7 +498,7 @@ async fn process_job( next_epoch ); - update_job_status(&db_client, job.job_id, JobStatus::FetchedProof).await?; + db_manager.update_job_status(job.job_id, JobStatus::FetchedProof).await?; // 3) Generate PIE info!( @@ -708,7 +513,7 @@ async fn process_job( next_epoch ); - update_job_status(&db_client, job.job_id, JobStatus::PieGenerated).await?; + db_manager.update_job_status(job.job_id, JobStatus::PieGenerated).await?; // // 4) Submit offchain proof-generation job to Atlantic // info!("[EPOCH JOB] Sending proof generation query to Atlantic..."); @@ -850,7 +655,7 @@ async fn process_job( CairoRunner::generate_pie(&update, &bankai.config).await?; - update_job_status(&db_client, job.job_id, JobStatus::PieGenerated).await?; + db_manager.update_job_status(job.job_id, JobStatus::PieGenerated).await?; info!( "[SYNC COMMITTEE JOB] Pie generated successfully for Sync Committee: {}...", @@ -860,9 +665,8 @@ async fn process_job( let batch_id = bankai.atlantic_client.submit_batch(update).await?; - update_job_status(&db_client, job.job_id, JobStatus::OffchainProofRequested).await?; - set_atlantic_job_queryid( - &db_client, + db_manager.update_job_status(job.job_id, JobStatus::OffchainProofRequested).await?; + db_manager.set_atlantic_job_queryid( job.job_id, batch_id.clone(), AtlanticJobType::ProofGeneration, @@ -895,7 +699,7 @@ async fn process_job( batch_id ); - update_job_status(&db_client, job.job_id, JobStatus::OffchainProofRetrieved).await?; + db_manager.update_job_status(job.job_id, JobStatus::OffchainProofRetrieved).await?; // 5) Submit wrapped proof request info!("[SYNC COMMITTEE JOB] Sending proof wrapping query to Atlantic.."); @@ -905,9 +709,8 @@ async fn process_job( wrapping_batch_id ); - update_job_status(&db_client, job.job_id, JobStatus::WrapProofRequested).await?; - set_atlantic_job_queryid( - &db_client, + db_manager.update_job_status(job.job_id, JobStatus::WrapProofRequested).await?; + db_manager.set_atlantic_job_queryid( job.job_id, wrapping_batch_id.clone(), AtlanticJobType::ProofWrapping, @@ -920,11 +723,11 @@ async fn process_job( .poll_batch_status_until_done(&wrapping_batch_id, Duration::new(10, 0), usize::MAX) .await?; - update_job_status(&db_client, job.job_id, JobStatus::WrappedProofDone).await?; + db_manager.update_job_status(job.job_id, JobStatus::WrappedProofDone).await?; info!("[SYNC COMMITTEE JOB] Proof wrapping done by Atlantic. Fact registered on Integrity. Wrapping QueryID: {}", wrapping_batch_id); - update_job_status(&db_client, job.job_id, JobStatus::VerifiedFactRegistered).await?; + db_manager.update_job_status(job.job_id, JobStatus::VerifiedFactRegistered).await?; let update = SyncCommitteeUpdate::from_json::(job.slot)?; @@ -935,13 +738,13 @@ async fn process_job( .submit_update(update.expected_circuit_outputs, &bankai.config) .await?; - set_job_txhash(&db_client, job.job_id, txhash).await?; + db_manager.set_job_txhash(job.job_id, txhash).await?; // Insert data to DB after successful onchain sync committee verification //insert_verified_sync_committee(&db_client, job.slot, sync_committee_hash).await?; } JobType::EpochBatchUpdate => { - let proof = EpochUpdateBatch::new_by_slot(&bankai, &db_client, job.slot).await?; + let proof = EpochUpdateBatch::new_by_slot(&bankai, db_manager.clone(), job.slot).await?; CairoRunner::generate_pie(&proof, &bankai.config).await?; let batch_id = bankai.atlantic_client.submit_batch(proof).await?; @@ -951,9 +754,8 @@ async fn process_job( batch_id ); - update_job_status(&db_client, job.job_id, JobStatus::OffchainProofRequested).await?; - set_atlantic_job_queryid( - &db_client, + db_manager.update_job_status(job.job_id, JobStatus::OffchainProofRequested).await?; + db_manager.set_atlantic_job_queryid( job.job_id, batch_id.clone(), AtlanticJobType::ProofGeneration, @@ -981,7 +783,7 @@ async fn process_job( batch_id ); - update_job_status(&db_client, job.job_id, JobStatus::OffchainProofRetrieved).await?; + db_manager.update_job_status(job.job_id, JobStatus::OffchainProofRetrieved).await?; // 5) Submit wrapped proof request info!("[EPOCH JOB] Sending proof wrapping query to Atlantic.."); @@ -991,9 +793,8 @@ async fn process_job( wrapping_batch_id ); - update_job_status(&db_client, job.job_id, JobStatus::WrapProofRequested).await?; - set_atlantic_job_queryid( - &db_client, + db_manager.update_job_status(job.job_id, JobStatus::WrapProofRequested).await?; + db_manager.set_atlantic_job_queryid( job.job_id, wrapping_batch_id.clone(), AtlanticJobType::ProofWrapping, @@ -1006,11 +807,11 @@ async fn process_job( .poll_batch_status_until_done(&wrapping_batch_id, Duration::new(10, 0), usize::MAX) .await?; - update_job_status(&db_client, job.job_id, JobStatus::WrappedProofDone).await?; + db_manager.update_job_status(job.job_id, JobStatus::WrappedProofDone).await?; info!("[EPOCH JOB] Proof wrapping done by Atlantic. Fact registered on Integrity. Wrapping QueryID: {}", wrapping_batch_id); - update_job_status(&db_client, job.job_id, JobStatus::VerifiedFactRegistered).await?; + db_manager.update_job_status(job.job_id, JobStatus::VerifiedFactRegistered).await?; // 6) Submit epoch update onchain info!("[EPOCH JOB] Calling epoch update onchain..."); diff --git a/client-rs/src/epoch_batch.rs b/client-rs/src/epoch_batch.rs index 8fda361..f94a4b5 100644 --- a/client-rs/src/epoch_batch.rs +++ b/client-rs/src/epoch_batch.rs @@ -15,6 +15,11 @@ use starknet_crypto::Felt; use std::fs; use tokio_postgres::Client; use tracing::{error, info, warn}; +use crate::utils::{ + database_manager::DatabaseManager, +}; +use std::sync::Arc; + #[derive(Debug, Serialize, Deserialize)] pub struct EpochUpdateBatch { @@ -110,7 +115,7 @@ impl EpochUpdateBatch { pub(crate) async fn new_by_slot( bankai: &BankaiClient, - db_client: &Client, + db_manager: Arc, slot: u64, ) -> Result { let _permit = bankai @@ -165,22 +170,7 @@ impl EpochUpdateBatch { // Insert merkle paths to database let current_epoch = slot_to_epoch_id(current_slot); for (path_index, current_path) in path.iter().enumerate() { - match db_client - .execute( - "INSERT INTO epoch_merkle_paths (epoch_id, path_index, merkle_path) VALUES ($1, $2, $3)", - &[¤t_epoch.to_i32(), &path_index.to_i32(), ¤t_path.to_hex_string()], - ) - .await - { - // Insert new job record to DB - Ok((status)) => { - // Merkle path inserted - } - Err(e) => { - // Failed to insert merkle path - error!("Unable to insert merkle path for epoch to database, {}", e); - } - }; + db_manager.insert_merkle_path_for_epoch(current_epoch.to_i32().unwrap(), path_index.to_i32().unwrap(), current_path.to_hex_string()); } current_slot += 32; } diff --git a/client-rs/src/routes/mod.rs b/client-rs/src/routes/mod.rs index 951f67e..eb0c878 100644 --- a/client-rs/src/routes/mod.rs +++ b/client-rs/src/routes/mod.rs @@ -117,7 +117,7 @@ pub async fn handle_get_merkle_paths_for_epoch( Path(epoch_id): Path, State(state): State, ) -> impl IntoResponse { - match get_merkle_paths_for_epoch(&state.db_client, epoch_id).await { + match state.db_manager.get_merkle_paths_for_epoch(epoch_id).await { Ok(merkle_paths) => { if merkle_paths.len() > 0 { Json(json!({ "epoch_id": epoch_id, "merkle_paths": merkle_paths })) diff --git a/client-rs/src/state.rs b/client-rs/src/state.rs index b7f2640..e70e923 100644 --- a/client-rs/src/state.rs +++ b/client-rs/src/state.rs @@ -1,5 +1,8 @@ use crate::bankai_client::BankaiClient; use crate::utils::starknet_client::StarknetError; +use crate::utils::{ + database_manager::DatabaseManager, +}; use postgres_types::{FromSql, ToSql}; use starknet::core::types::Felt; use std::env; @@ -19,7 +22,7 @@ pub struct Job { #[derive(Clone, Debug)] pub struct AppState { - pub db_client: Arc, + pub db_manager: Arc, pub tx: mpsc::Sender, pub bankai: Arc, } diff --git a/client-rs/src/utils/database_manager.rs b/client-rs/src/utils/database_manager.rs index b03c7a4..89d9cab 100644 --- a/client-rs/src/utils/database_manager.rs +++ b/client-rs/src/utils/database_manager.rs @@ -1,30 +1,39 @@ use crate::epoch_update::{EpochProof, EpochUpdate}; -use crate::state::{AtlanticJobType, JobStatus, JobType}; +use crate::state::{AtlanticJobType, JobStatus, JobType, Job}; use alloy_primitives::FixedBytes; use std::error::Error; use tokio_postgres::{Client, NoTls}; use tracing::{error, info}; use uuid::Uuid; +use starknet::core::types::Felt; +#[derive(Debug)] pub struct DatabaseManager { client: Client, } impl DatabaseManager { - pub async fn new(db_url: &str) -> Result> { - let (client, connection) = tokio_postgres::connect(db_url, NoTls).await?; - - // Spawn a task to handle the connection so it is always polled - tokio::spawn(async move { - if let Err(e) = connection.await { - error!("Database connection error: {}", e); + pub async fn new(db_url: &str) -> Self { + let client = match tokio_postgres::connect(db_url, tokio_postgres::NoTls).await { + Ok((client, connection)) => { + tokio::spawn(async move { + if let Err(e) = connection.await { + eprintln!("Connection error: {}", e); + } + }); + + info!("Connected to the database successfully!"); + client } - }); - - info!("Successfully connected to the database!"); - - Ok(Self { client }) + Err(err) => { + error!("Failed to connect to the database: {}", err); + std::process::exit(1); // Exit with non-zero status code + } + }; + + Self { client } } + pub async fn insert_verified_epoch( &self, @@ -97,4 +106,182 @@ impl DatabaseManager { Ok(()) } + + pub async fn create_job( + &self, + job: Job, + ) -> Result<(), Box> { + self.client + .execute( + "INSERT INTO jobs (job_uuid, job_status, slot, type) VALUES ($1, $2, $3, $4)", + &[ + &job.job_id, + &job.job_status.to_string(), + &(job.slot as i64), + &"EPOCH_UPDATE", + ], + ) + .await?; + + Ok(()) + } + + pub async fn fetch_job_status( + &self, + job_id: Uuid, + ) -> Result, Box> { + let row_opt = self.client + .query_opt("SELECT status FROM jobs WHERE job_id = $1", &[&job_id]) + .await?; + + Ok(row_opt.map(|row| row.get("status"))) + } + + pub async fn get_latest_slot_id_in_progress( + &self, + ) -> Result, Box> { + // Query the latest slot with job_status in ('in_progress', 'initialized') + let row_opt = self.client + .query_opt( + "SELECT slot FROM jobs + WHERE job_status IN ($1, $2) + ORDER BY slot DESC + LIMIT 1", + &[&"CREATED", &"PIE_GENERATED"], + ) + .await?; + + // Extract and return the slot ID + if let Some(row) = row_opt { + Ok(Some(row.get::<_, i64>("slot"))) + } else { + Ok(None) + } + } + + pub async fn get_merkle_paths_for_epoch( + &self, + epoch_id: i32, + ) -> Result, Box> { + // Query all merkle paths for the given epoch_id + let rows = self.client + .query( + "SELECT merkle_path FROM epoch_merkle_paths + WHERE epoch_id = $1 + ORDER BY path_index ASC", + &[&epoch_id], + ) + .await?; + + let paths: Vec = rows + .iter() + .map(|row| row.get::<_, String>("merkle_path")) + .collect(); + + Ok(paths) + } + + pub async fn update_job_status( + &self, + job_id: Uuid, + new_status: JobStatus, + ) -> Result<(), Box> { + self.client + .execute( + "UPDATE jobs SET job_status = $1, updated_at = NOW() WHERE job_uuid = $2", + &[&new_status.to_string(), &job_id], + ) + .await?; + Ok(()) + } + + pub async fn set_job_txhash( + &self, + job_id: Uuid, + txhash: Felt, + ) -> Result<(), Box> { + self.client + .execute( + "UPDATE jobs SET tx_hash = $1, updated_at = NOW() WHERE job_uuid = $2", + &[&txhash.to_string(), &job_id], + ) + .await?; + Ok(()) + } + + pub async fn cancell_all_unfinished_jobs( + &self, + ) -> Result<(), Box> { + self.client + .execute( + "UPDATE jobs SET status = $1, updated_at = NOW() WHERE status = 'FETCHING'", + &[&JobStatus::Cancelled.to_string()], + ) + .await?; + Ok(()) + } + + pub async fn insert_merkle_path_for_epoch( + &self, + epoch: i32, + path_index: i32, + path: String + ) -> Result<(), Box> { + self.client + .execute( + "INSERT INTO epoch_merkle_paths (epoch_id, path_index, merkle_path) VALUES ($1, $2, $3)", + &[&epoch, &path_index, &path], + ) + .await?; + Ok(()) + } + + // async fn fetch_job_by_status( + // client: &Client, + // status: JobStatus, + // ) -> Result, Box> { + // let tx = client.transaction().await?; + + // let row_opt = tx + // .query_opt( + // r#" + // SELECT job_id, status + // FROM jobs + // WHERE status = $1 + // ORDER BY updated_at ASC + // LIMIT 1 + // FOR UPDATE SKIP LOCKED + // "#, + // &[&status], + // ) + // .await?; + + // let job = if let Some(row) = row_opt { + // Some(Job { + // job_id: row.get("job_id"), + // job_type: row.get("type"), + // job_status: row.get("status"), + // slot: row.get("slot"), + // }) + // } else { + // None + // }; + + // tx.commit().await?; + // Ok(job) + // } + + // async fn add_verified_epoch( + // client: Arc, + // slot: u64, + // ) -> Result<(), Box> { + // client + // .execute( + // "INSERT INTO verified_epochs (slot, job_status, slot, type) VALUES ($1, $2, $3, $4)", + // &[&slot, &status.to_string(), &(slot as i64), &"EPOCH_UPDATE"], + // ) + // .await?; + + // Ok(()) + // } } From 091b1f8e427ac1efb3c3d1a382b59a9ccbacd0e7 Mon Sep 17 00:00:00 2001 From: lakewik Date: Fri, 17 Jan 2025 18:11:52 +0100 Subject: [PATCH 14/66] feat: Add worker for transaction broadcasting, major sync committe update improvements --- client-rs/Cargo.lock | 3 + client-rs/Cargo.toml | 6 + client-rs/db_structure.sql | 36 ++ client-rs/src/bankai_client.rs | 1 + client-rs/src/config.rs | 2 +- client-rs/src/constants.rs | 3 +- client-rs/src/daemon.rs | 428 +++++++++++++++++------- client-rs/src/epoch_batch.rs | 71 ++-- client-rs/src/epoch_update.rs | 20 +- client-rs/src/helpers.rs | 16 +- client-rs/src/main.rs | 185 +++++----- client-rs/src/routes/mod.rs | 18 +- client-rs/src/state.rs | 73 +++- client-rs/src/sync_committee.rs | 18 +- client-rs/src/traits.rs | 6 +- client-rs/src/utils/database_manager.rs | 155 +++++++-- client-rs/src/utils/rpc.rs | 8 +- client-rs/src/utils/starknet_client.rs | 9 + 18 files changed, 739 insertions(+), 319 deletions(-) create mode 100644 client-rs/db_structure.sql diff --git a/client-rs/Cargo.lock b/client-rs/Cargo.lock index daf951c..e4b1bcd 100644 --- a/client-rs/Cargo.lock +++ b/client-rs/Cargo.lock @@ -749,8 +749,10 @@ checksum = "7e36cc9d416881d2e24f9a963be5fb1cd90966419ac844274161d10488b3e825" dependencies = [ "android-tzdata", "iana-time-zone", + "js-sys", "num-traits", "serde", + "wasm-bindgen", "windows-targets 0.52.6", ] @@ -813,6 +815,7 @@ dependencies = [ "axum", "beacon-state-proof", "bls12_381", + "chrono", "clap", "dotenv", "ethereum_serde_utils 0.7.0", diff --git a/client-rs/Cargo.toml b/client-rs/Cargo.toml index b0a9278..eb5bec4 100644 --- a/client-rs/Cargo.toml +++ b/client-rs/Cargo.toml @@ -3,6 +3,10 @@ name = "client-rs" version = "0.1.0" edition = "2021" +[features] +daemon = [] +cli = [] + [[bin]] name = "daemon" # Binary name (used with `cargo run --bin bin1`) path = "src/daemon.rs" # Path to the source file for this binary @@ -10,6 +14,7 @@ path = "src/daemon.rs" # Path to the source file for this binary [[bin]] name = "cli" path = "src/main.rs" +required-features = ["cli"] [dependencies] @@ -54,3 +59,4 @@ glob = "0.3.2" num-traits = "0.2.19" tower = "0.5.2" tower-http = { version = "0.6.2", features = ["trace"] } +chrono = "0.4.39" diff --git a/client-rs/db_structure.sql b/client-rs/db_structure.sql new file mode 100644 index 0000000..899ec1a --- /dev/null +++ b/client-rs/db_structure.sql @@ -0,0 +1,36 @@ +CREATE TABLE jobs ( + job_uuid UUID PRIMARY KEY, + job_status TEXT NOT NULL, + atlantic_proof_generate_batch_id TEXT NULL, + atlantic_proof_wrapper_batch_id, TEXT NULL, + slot BIGINT NOT NULL, -- Slot associated with the job + batch_range_begin_epoch BIGINT NOT NULL, + batch_range_end_epoch BIGINT NOT NULL, + type TEXT NOT NULL, + updated_at TIMESTAMP DEFAULT NOW(), + created_at TIMESTAMP DEFAULT NOW() +); + + +CREATE TABLE epoch_merkle_paths ( + epoch_id BIGINT NOT NULL, + path_index BIGINT NOT NULL, + merkle_path TEXT NOT NULL, + PRIMARY KEY (epoch_id, path_index) -- Ensures uniqueness of the combination +); + + +CREATE TABLE verified_epoch ( + epoch_id UUID PRIMARY KEY, + header_root TEXT NOT NULL, -- Header root hash of the Beacon chain header + state_root TEXT NOT NULL, -- State root hash of the Beacon chain state + n_signers INTEGER NOT NULL, -- Number of epoch signers + execution_hash TEXT NOT NULL, -- Execution layer blockhash + execution_height BIGINT NOT NULL -- Execution layer height +); + + +CREATE TABLE verified_sync_committee ( + sync_committee_id UUID PRIMARY KEY, -- Unique identifier for sync committee (slot number/0x2000) + sync_committee_hash TEXT NOT NULL -- Sync committee hash that we are creating inside bankai +); diff --git a/client-rs/src/bankai_client.rs b/client-rs/src/bankai_client.rs index a196d31..e308a95 100644 --- a/client-rs/src/bankai_client.rs +++ b/client-rs/src/bankai_client.rs @@ -77,6 +77,7 @@ impl BankaiClient { Ok(epoch_proof) } + #[cfg(feature = "cli")] pub async fn get_contract_initialization_data( &self, slot: u64, diff --git a/client-rs/src/config.rs b/client-rs/src/config.rs index 41d5ebd..99e692f 100644 --- a/client-rs/src/config.rs +++ b/client-rs/src/config.rs @@ -48,7 +48,7 @@ impl Default for BankaiConfig { committee_circuit_path: "../cairo/build/committee_update.json".to_string(), atlantic_endpoint: "https://atlantic.api.herodotus.cloud".to_string(), // Set how many concurrent pie generation (trace generation) tasks are allowed - pie_generation_semaphore: Arc::new(Semaphore::new(3)), // 3 at once + pie_generation_semaphore: Arc::new(Semaphore::new(1)), // 3 at once epoch_data_fetching_semaphore: Arc::new(Semaphore::new(2)), // 2 at once } } diff --git a/client-rs/src/constants.rs b/client-rs/src/constants.rs index b6ba121..cdab480 100644 --- a/client-rs/src/constants.rs +++ b/client-rs/src/constants.rs @@ -1,3 +1,4 @@ pub const SLOTS_PER_EPOCH: u64 = 32; // For mainnet pub const SLOTS_PER_SYNC_COMMITTEE: u64 = 8192; // For mainnet -pub const TARGET_BATCH_SIZE: u64 = 3; // Defines how many epochs in one batch +pub const TARGET_BATCH_SIZE: u64 = 32; // Defines how many epochs in one batch +pub const EPOCHS_PER_SYNC_COMMITTEE: u64 = 256; diff --git a/client-rs/src/daemon.rs b/client-rs/src/daemon.rs index 4fed0c3..c2fa477 100644 --- a/client-rs/src/daemon.rs +++ b/client-rs/src/daemon.rs @@ -12,24 +12,19 @@ mod sync_committee; mod traits; mod utils; //use alloy_primitives::TxHash; -use alloy_primitives::FixedBytes; use alloy_rpc_types_beacon::events::HeadEvent; use axum::{ - extract::{DefaultBodyLimit, Path, State}, + extract::DefaultBodyLimit, //http::{header, StatusCode}, - response::{IntoResponse, Json}, routing::get, Router, }; use bankai_client::BankaiClient; use config::BankaiConfig; -use constants::SLOTS_PER_EPOCH; -use contract_init::ContractInitializationData; +//use constants::SLOTS_PER_EPOCH; use dotenv::from_filename; -use epoch_update::{EpochProof, EpochUpdate}; use num_traits::cast::ToPrimitive; use reqwest; -use serde_json::json; use starknet::core::types::Felt; use state::check_env_vars; use state::{AppState, Job}; @@ -38,20 +33,15 @@ use std::env; use std::sync::Arc; use tokio::sync::mpsc; use tokio::task; -use tokio_postgres::{Client, NoTls}; use tokio_stream::StreamExt; use tower::ServiceBuilder; use tower_http::trace::TraceLayer; -use tracing::{error, info, trace, warn, Level}; +use tracing::{debug, error, info, warn, Level}; use tracing_subscriber::FmtSubscriber; use traits::Provable; use utils::{ - atlantic_client::AtlanticClient, cairo_runner::CairoRunner, database_manager::DatabaseManager, -}; -use utils::{ - rpc::BeaconRpcClient, - // bankai_client::BankaiClient, - starknet_client::{StarknetClient, StarknetError}, + cairo_runner::CairoRunner, + database_manager::{DatabaseManager, JobSchema}, }; //use std::error::Error as StdError; use epoch_batch::EpochUpdateBatch; @@ -91,7 +81,7 @@ async fn main() -> Result<(), Box> { tracing::subscriber::set_global_default(subscriber).expect("setting default subscriber failed"); // Validate environment variables - check_env_vars().map_err(|e| { + let _ = check_env_vars().map_err(|e| { error!("Error: {}", e); std::process::exit(1); // Exit if validation fails }); @@ -126,7 +116,6 @@ async fn main() -> Result<(), Box> { let db_manager_for_listener = db_manager.clone(); let bankai_for_listener = bankai.clone(); - let tx_for_listener = tx.clone(); let app_state: AppState = AppState { @@ -149,7 +138,7 @@ async fn main() -> Result<(), Box> { info!("Job {} completed successfully", job_id); } Err(e) => { - db_clone.update_job_status(job_id, JobStatus::Error).await; + let _ = db_clone.update_job_status(job_id, JobStatus::Error).await; error!("Error processing job {}: {}", job_id, e); } } @@ -287,14 +276,18 @@ async fn handle_beacon_chain_head_event( let epochs_behind = epoch_id - latest_verified_epoch_id; // We getting the last slot in progress to determine next slots to prove - let mut last_slot_in_progress: u64 = 1000000; + let mut last_slot_in_progress: u64 = 0; + let mut last_epoch_in_progress: u64 = 0; + let mut last_sync_committee_in_progress: u64 = 0; match db_manager.get_latest_slot_id_in_progress().await { Ok(Some(slot)) => { last_slot_in_progress = slot.to_u64().unwrap(); + last_epoch_in_progress = helpers::slot_to_epoch_id(last_slot_in_progress); + last_sync_committee_in_progress = + helpers::slot_to_sync_committee_id(last_slot_in_progress); info!( - "Latest in progress slot: {} Epoch: {}", - last_slot_in_progress, - helpers::slot_to_epoch_id(last_slot_in_progress) + "Latest in progress slot: {} Epoch: {} Sync committee: {}", + last_slot_in_progress, last_epoch_in_progress, last_sync_committee_in_progress ); } Ok(None) => { @@ -305,25 +298,34 @@ async fn handle_beacon_chain_head_event( } } + let latest_verified_sync_committee_id = 1; + if epochs_behind > constants::TARGET_BATCH_SIZE { // is_node_in_sync = true; warn!( - "Bankai is out of sync now. Node is {} epochs behind network. Current Beacon Chain epoch: {} Latest verified epoch: {} Sync in progress...", - epochs_behind, epoch_id, latest_verified_epoch_id + "Bankai is out of sync now. Node is {} epochs behind network. Current Beacon Chain state: [Epoch: {} Sync Committee: {}] | Latest verified: [Epoch: {} Sync Committee: {}] | Sync in progress...", + epochs_behind, epoch_id, sync_committee_id, latest_verified_epoch_id, latest_verified_sync_committee_id ); - match run_batch_update_job( - db_manager.clone(), - last_slot_in_progress + (constants::SLOTS_PER_EPOCH * constants::TARGET_BATCH_SIZE), - tx.clone(), - ) - .await - { - // Insert new job record to DB - Ok(()) => {} - Err(e) => {} - }; + if last_epoch_in_progress < (epoch_id - constants::TARGET_BATCH_SIZE) { + // Check if we have in progress all epochs that need to be processed, if no, run job + match run_batch_epoch_update_job( + db_manager.clone(), + last_slot_in_progress + (constants::SLOTS_PER_EPOCH * constants::TARGET_BATCH_SIZE), + tx.clone(), + ) + .await + { + // Insert new job record to DB + Ok(()) => {} + Err(e) => { + error!("Error while creating job: {}", e); + } + }; + } else { + debug!("All reqired jobs are now queued and processing"); + } // let epoch_update = EpochUpdateBatch::new_by_slot( // &bankai, @@ -332,6 +334,12 @@ async fn handle_beacon_chain_head_event( // + constants::SLOTS_PER_EPOCH, // ) // .await?; + } else if epochs_behind == constants::TARGET_BATCH_SIZE { + // This is when we are synced properly and new epoch batch needs to be inserted + info!( + "Starting syncing next epoch batch. Current Beacon Chain epoch: {} Latest verified epoch: {}", + epoch_id, latest_verified_epoch_id + ); } // Check if sync committee update is needed @@ -346,53 +354,57 @@ async fn handle_beacon_chain_head_event( //let db_client = db_client.clone(); - // evaluete_jobs_statuses(); - // broadcast_ready_jobs(); + //evaluate_jobs_statuses(db_manager.clone()).await; + broadcast_onchain_ready_jobs(db_manager.clone(), bankai.clone()).await; // We can do all circuit computations up to latest slot in advance, but the onchain broadcasts must be send in correct order // By correct order mean that within the same sync committe the epochs are not needed to be broadcasted in order // but the order of sync_commite_update->epoch_update must be correct, we firstly need to have correct sync committe veryfied // before we verify epoch "belonging" to this sync committee - // if parsed_event.epoch_transition { - // info!("Beacon Chain epoch transition detected. New epoch: {} | Starting processing epoch proving...", epoch_id); - - // // Check also now if slot is the moment of switch to new sync committee set - // if parsed_event.slot % constants::SLOTS_PER_SYNC_COMMITTEE == 0 { - // info!( - // "Beacon Chain sync committee rotation occured. Slot {} | Sync committee id: {}", - // parsed_event.slot, sync_committee_id - // ); - // } - - // let job_id = Uuid::new_v4(); - // let job = Job { - // job_id: job_id.clone(), - // job_type: JobType::EpochBatchUpdate, - // job_status: JobStatus::Created, - // slot: parsed_event.slot, // It is the last slot for given batch - // }; - - // let db_client = db_client_for_listener.clone(); - // match create_job(db_client, job.clone()).await { - // // Insert new job record to DB - // Ok(()) => { - // // Handle success - // info!("Job created successfully with ID: {}", job_id); - // if tx_for_task.send(job).await.is_err() { - // error!("Failed to send job."); - // } - // // If starting committee update job, first ensule that the corresponding slot is registered in contract - // } - // Err(e) => { - // // Handle the error - // error!("Error creating job: {}", e); - // } - // } - // } + if parsed_event.epoch_transition { + //info!("Beacon Chain epoch transition detected. New epoch: {} | Starting processing epoch proving...", epoch_id); + info!( + "Beacon Chain epoch transition detected. New epoch: {}", + epoch_id + ); + + // Check also now if slot is the moment of switch to new sync committee set + if parsed_event.slot % constants::SLOTS_PER_SYNC_COMMITTEE == 0 { + info!( + "Beacon Chain sync committee rotation occured. Slot {} | Sync committee id: {}", + parsed_event.slot, sync_committee_id + ); + } + + // let job_id = Uuid::new_v4(); + // let job = Job { + // job_id: job_id.clone(),evaluate_jobs_statuses + // job_type: JobType::EpochBatchUpdate, + // job_status: JobStatus::Created, + // slot: parsed_event.slot, // It is the last slot for given batch + // }; + + // let db_client = db_client_for_listener.clone(); + // match create_job(db_client, job.clone()).await { + // // Insert new job record to DB + // Ok(()) => { + // // Handle success + // info!("Job created successfully with ID: {}", job_id); + // if tx_for_task.send(job).await.is_err() { + // error!("Failed to send job."); + // } + // // If starting committee update job, first ensule that the corresponding slot is registered in contract + // } + // Err(e) => { + // // Handle the error + // error!("Error creating job: {}", e); + // } + // } + } } -async fn run_batch_update_job( +async fn run_batch_epoch_update_job( db_manager: Arc, slot: u64, tx: mpsc::Sender, @@ -409,7 +421,10 @@ async fn run_batch_update_job( // Insert new job record to DB Ok(()) => { // Handle success - info!("Job created successfully with ID: {}", job_id); + info!( + "[EPOCH BATCH UPDATE] Job created successfully with ID: {}", + job_id + ); if tx.send(job).await.is_err() { return Err("Failed to send job".into()); } @@ -423,7 +438,140 @@ async fn run_batch_update_job( } } +async fn run_batch_sync_committee_update_job( + db_manager: Arc, + slot: u64, + tx: mpsc::Sender, +) -> Result<(), Box> { + let job_id = Uuid::new_v4(); + let job = Job { + job_id: job_id.clone(), + job_type: JobType::SyncCommitteeUpdate, + job_status: JobStatus::Created, + slot, + }; + match db_manager.create_job(job.clone()).await { + // Insert new job record to DB + Ok(()) => { + // Handle success + info!( + "[SYHC COMMITTEE UPDATE] Job created successfully with ID: {}", + job_id + ); + if tx.send(job).await.is_err() { + return Err("Failed to send job".into()); + } + // If starting committee update job, first ensule that the corresponding slot is registered in contract + Ok(()) + } + Err(e) => { + // Handle the error + return Err(e.into()); + } + } +} + +async fn evaluate_jobs_statuses( + db_manager: Arc, + last_verified_epoch: u64, +) -> Result<(), Box> { + // The purpose of this function is to manage the sequential nature of onchain verification of epochs and sync committees + // Firstly we get all jobs with status OFFCHAIN_COMPUTATION_FINISHED + let jobs = db_manager + .get_compute_finsihed_jobs_to_proccess_onchain_call(last_verified_epoch) + .await?; + + // Iterate through the jobs and process them + for job in jobs { + match job.job_type { + JobType::EpochBatchUpdate => { + let update = EpochUpdateBatch::from_json::( + job.batch_range_begin_epoch.try_into().unwrap(), + job.batch_range_end_epoch.try_into().unwrap(), + )?; + + println!( + "Successfully submitted batch epoch update for job_uuid: {}", + job.job_uuid + ); + } + JobType::EpochUpdate => {} + JobType::SyncCommitteeUpdate => {} + } + } + + Ok(()) +} + +async fn broadcast_onchain_ready_jobs( + db_manager: Arc, + bankai: Arc, +) -> Result<(), Box> { + // Fetch jobs with the status `ReadyToBroadcastOnchain` + let jobs = db_manager + .get_jobs_with_status(JobStatus::ReadyToBroadcastOnchain) + .await?; + + // Iterate through the jobs and process them + for job in jobs { + match job.job_type { + JobType::EpochBatchUpdate => { + let update = EpochUpdateBatch::from_json::( + job.batch_range_begin_epoch.try_into().unwrap(), + job.batch_range_end_epoch.try_into().unwrap(), + )?; + + info!( + "[SYNC COMMITTEE JOB] Calling epoch batch update onchain for epochs range from {} to {}...", + job.batch_range_begin_epoch, job.batch_range_end_epoch + ); + + // Submit to Starknet + let txhash = bankai + .starknet_client + .submit_update(update.expected_circuit_outputs, &bankai.config) + .await?; + + println!( + "[EPOCH BATCH JOB] Successfully called batch epoch update onchain for job_uuid: {}, txhash: {}", + job.job_uuid, txhash + ); + } + JobType::EpochUpdate => {} + JobType::SyncCommitteeUpdate => { + let update = SyncCommitteeUpdate::from_json::( + job.slot.to_u64().unwrap(), + )?; + + let sync_commite_id = + helpers::slot_to_sync_committee_id(job.slot.to_u64().unwrap()); + + info!( + "[SYNC COMMITTEE JOB] Calling sync committee ID {} update onchain...", + sync_commite_id + ); + + let txhash = bankai + .starknet_client + .submit_update(update.expected_circuit_outputs, &bankai.config) + .await?; + + info!("[SYNC COMMITTEE JOB] Successfully called sync committee ID {} update onchain, transaction confirmed, txhash: {}", sync_commite_id, txhash); + + db_manager.set_job_txhash(job.job_uuid, txhash).await?; + + // Insert data to DB after successful onchain sync committee verification + //let sync_committee_hash = update.expected_circuit_outputs.committee_hash; + db_manager + .insert_verified_sync_committee(job.slot.to_u64().unwrap(), sync_committee_hash) + .await?; + } + } + } + + Ok(()) +} // async fn worker_task(mut rx: Receiver, db_client: Client) -> Result<(), Box> { // while let Some(job_id) = rx.recv().await { @@ -498,7 +646,9 @@ async fn process_job( next_epoch ); - db_manager.update_job_status(job.job_id, JobStatus::FetchedProof).await?; + db_manager + .update_job_status(job.job_id, JobStatus::ProgramInputsPrepared) + .await?; // 3) Generate PIE info!( @@ -513,7 +663,9 @@ async fn process_job( next_epoch ); - db_manager.update_job_status(job.job_id, JobStatus::PieGenerated).await?; + db_manager + .update_job_status(job.job_id, JobStatus::PieGenerated) + .await?; // // 4) Submit offchain proof-generation job to Atlantic // info!("[EPOCH JOB] Sending proof generation query to Atlantic..."); @@ -610,7 +762,7 @@ async fn process_job( // Insert data to DB after successful onchain epoch verification // insert_verified_epoch(&db_client, job.slot / 0x2000, epoch_proof).await?; } - JobType::SyncComiteeUpdate => { + JobType::SyncCommitteeUpdate => { // Sync committee job info!( "[SYNC COMMITTEE JOB] Started processing sync committee job: {} for slot {}", @@ -655,7 +807,9 @@ async fn process_job( CairoRunner::generate_pie(&update, &bankai.config).await?; - db_manager.update_job_status(job.job_id, JobStatus::PieGenerated).await?; + db_manager + .update_job_status(job.job_id, JobStatus::PieGenerated) + .await?; info!( "[SYNC COMMITTEE JOB] Pie generated successfully for Sync Committee: {}...", @@ -665,13 +819,16 @@ async fn process_job( let batch_id = bankai.atlantic_client.submit_batch(update).await?; - db_manager.update_job_status(job.job_id, JobStatus::OffchainProofRequested).await?; - db_manager.set_atlantic_job_queryid( - job.job_id, - batch_id.clone(), - AtlanticJobType::ProofGeneration, - ) - .await?; + db_manager + .update_job_status(job.job_id, JobStatus::OffchainProofRequested) + .await?; + db_manager + .set_atlantic_job_queryid( + job.job_id, + batch_id.clone(), + AtlanticJobType::ProofGeneration, + ) + .await?; info!( "[SYNC COMMITTEE JOB] Proof generation batch submitted to atlantic. QueryID: {}", @@ -699,7 +856,9 @@ async fn process_job( batch_id ); - db_manager.update_job_status(job.job_id, JobStatus::OffchainProofRetrieved).await?; + db_manager + .update_job_status(job.job_id, JobStatus::OffchainProofRetrieved) + .await?; // 5) Submit wrapped proof request info!("[SYNC COMMITTEE JOB] Sending proof wrapping query to Atlantic.."); @@ -709,13 +868,16 @@ async fn process_job( wrapping_batch_id ); - db_manager.update_job_status(job.job_id, JobStatus::WrapProofRequested).await?; - db_manager.set_atlantic_job_queryid( - job.job_id, - wrapping_batch_id.clone(), - AtlanticJobType::ProofWrapping, - ) - .await?; + db_manager + .update_job_status(job.job_id, JobStatus::WrapProofRequested) + .await?; + db_manager + .set_atlantic_job_queryid( + job.job_id, + wrapping_batch_id.clone(), + AtlanticJobType::ProofWrapping, + ) + .await?; // Pool for Atlantic execution done bankai @@ -723,11 +885,15 @@ async fn process_job( .poll_batch_status_until_done(&wrapping_batch_id, Duration::new(10, 0), usize::MAX) .await?; - db_manager.update_job_status(job.job_id, JobStatus::WrappedProofDone).await?; + db_manager + .update_job_status(job.job_id, JobStatus::WrappedProofDone) + .await?; info!("[SYNC COMMITTEE JOB] Proof wrapping done by Atlantic. Fact registered on Integrity. Wrapping QueryID: {}", wrapping_batch_id); - db_manager.update_job_status(job.job_id, JobStatus::VerifiedFactRegistered).await?; + db_manager + .update_job_status(job.job_id, JobStatus::VerifiedFactRegistered) + .await?; let update = SyncCommitteeUpdate::from_json::(job.slot)?; @@ -744,23 +910,42 @@ async fn process_job( //insert_verified_sync_committee(&db_client, job.slot, sync_committee_hash).await?; } JobType::EpochBatchUpdate => { - let proof = EpochUpdateBatch::new_by_slot(&bankai, db_manager.clone(), job.slot).await?; + info!("[BATCH EPOCH JOB] Preparing inputs for program..."); + + let proof = + EpochUpdateBatch::new_by_slot(&bankai, db_manager.clone(), job.slot).await?; + + db_manager + .update_job_status(job.job_id, JobStatus::ProgramInputsPrepared) + .await?; + + info!("[BATCH EPOCH JOB] Starting trace generation..."); CairoRunner::generate_pie(&proof, &bankai.config).await?; + + db_manager + .update_job_status(job.job_id, JobStatus::PieGenerated) + .await?; + + info!("[BATCH EPOCH JOB] Uploading PIE and sending proof generation request to Atlantic..."); + let batch_id = bankai.atlantic_client.submit_batch(proof).await?; info!( - "[EPOCH JOB] Proof generation batch submitted to Atlantic. QueryID: {}", + "[BATCH EPOCH JOB] Proof generation batch submitted to Atlantic. QueryID: {}", batch_id ); - db_manager.update_job_status(job.job_id, JobStatus::OffchainProofRequested).await?; - db_manager.set_atlantic_job_queryid( - job.job_id, - batch_id.clone(), - AtlanticJobType::ProofGeneration, - ) - .await?; + db_manager + .update_job_status(job.job_id, JobStatus::OffchainProofRequested) + .await?; + db_manager + .set_atlantic_job_queryid( + job.job_id, + batch_id.clone(), + AtlanticJobType::ProofGeneration, + ) + .await?; // Pool for Atlantic execution done bankai @@ -769,7 +954,7 @@ async fn process_job( .await?; info!( - "[EPOCH JOB] Proof generation done by Atlantic. QueryID: {}", + "[BATCH EPOCH JOB] Proof generation done by Atlantic. QueryID: {}", batch_id ); @@ -783,23 +968,28 @@ async fn process_job( batch_id ); - db_manager.update_job_status(job.job_id, JobStatus::OffchainProofRetrieved).await?; + db_manager + .update_job_status(job.job_id, JobStatus::OffchainProofRetrieved) + .await?; // 5) Submit wrapped proof request - info!("[EPOCH JOB] Sending proof wrapping query to Atlantic.."); + info!("[EPOCH JOB] Uploading proof and sending wrapping query to Atlantic.."); let wrapping_batch_id = bankai.atlantic_client.submit_wrapped_proof(proof).await?; info!( "[EPOCH JOB] Proof wrapping query submitted to Atlantic. Wrapping QueryID: {}", wrapping_batch_id ); - db_manager.update_job_status(job.job_id, JobStatus::WrapProofRequested).await?; - db_manager.set_atlantic_job_queryid( - job.job_id, - wrapping_batch_id.clone(), - AtlanticJobType::ProofWrapping, - ) - .await?; + db_manager + .update_job_status(job.job_id, JobStatus::WrapProofRequested) + .await?; + db_manager + .set_atlantic_job_queryid( + job.job_id, + wrapping_batch_id.clone(), + AtlanticJobType::ProofWrapping, + ) + .await?; // Pool for Atlantic execution done bankai @@ -807,11 +997,15 @@ async fn process_job( .poll_batch_status_until_done(&wrapping_batch_id, Duration::new(10, 0), usize::MAX) .await?; - db_manager.update_job_status(job.job_id, JobStatus::WrappedProofDone).await?; + db_manager + .update_job_status(job.job_id, JobStatus::WrappedProofDone) + .await?; info!("[EPOCH JOB] Proof wrapping done by Atlantic. Fact registered on Integrity. Wrapping QueryID: {}", wrapping_batch_id); - db_manager.update_job_status(job.job_id, JobStatus::VerifiedFactRegistered).await?; + db_manager + .update_job_status(job.job_id, JobStatus::VerifiedFactRegistered) + .await?; // 6) Submit epoch update onchain info!("[EPOCH JOB] Calling epoch update onchain..."); diff --git a/client-rs/src/epoch_batch.rs b/client-rs/src/epoch_batch.rs index f94a4b5..c0da37c 100644 --- a/client-rs/src/epoch_batch.rs +++ b/client-rs/src/epoch_batch.rs @@ -3,6 +3,7 @@ use crate::epoch_update::{EpochUpdate, ExpectedEpochUpdateOutputs}; use crate::helpers::{calculate_slots_range_for_batch, slot_to_epoch_id}; use crate::traits::{Provable, Submittable}; use crate::utils::hashing::get_committee_hash; + use crate::utils::merkle::poseidon::{compute_paths, compute_root, hash_path}; use crate::{BankaiClient, Error}; use alloy_primitives::FixedBytes; @@ -13,13 +14,10 @@ use sha2::{Digest, Sha256}; use starknet::macros::selector; use starknet_crypto::Felt; use std::fs; -use tokio_postgres::Client; -use tracing::{error, info, warn}; -use crate::utils::{ - database_manager::DatabaseManager, -}; -use std::sync::Arc; +use crate::utils::database_manager::DatabaseManager; +use std::sync::Arc; +use tracing::{debug, info}; #[derive(Debug, Serialize, Deserialize)] pub struct EpochUpdateBatch { @@ -41,6 +39,7 @@ pub struct ExpectedEpochBatchOutputs { } impl EpochUpdateBatch { + #[cfg(feature = "cli")] pub(crate) async fn new(bankai: &BankaiClient) -> Result { let (start_slot, mut end_slot) = bankai .starknet_client @@ -170,7 +169,14 @@ impl EpochUpdateBatch { // Insert merkle paths to database let current_epoch = slot_to_epoch_id(current_slot); for (path_index, current_path) in path.iter().enumerate() { - db_manager.insert_merkle_path_for_epoch(current_epoch.to_i32().unwrap(), path_index.to_i32().unwrap(), current_path.to_hex_string()); + db_manager + .insert_merkle_path_for_epoch( + current_epoch.to_i32().unwrap(), + path_index.to_i32().unwrap(), + current_path.to_hex_string(), + ) + .await + .map_err(|e| Error::DatabaseError(e.to_string()))?; } current_slot += 32; } @@ -187,6 +193,33 @@ impl EpochUpdateBatch { } } +impl EpochUpdateBatch { + pub fn from_json(first_slot: u64, last_slot: u64) -> Result + where + T: serde::de::DeserializeOwned, + { + // Pattern match for files like: batches/epoch_batch/6709248_to_6710272/input_batch_6709248_to_6710272.json + let path = format!( + "batches/epoch_batch/{}_to_{}/input_batch_{}_to_{}.json", + first_slot, last_slot, first_slot, last_slot + ); + debug!(path); + let glob_pattern = glob::glob(&path) + .map_err(|e| Error::IoError(std::io::Error::new(std::io::ErrorKind::Other, e)))?; + + // Take the first matching file + let path = glob_pattern.take(1).next().ok_or_else(|| { + Error::IoError(std::io::Error::new( + std::io::ErrorKind::NotFound, + "No matching file found", + )) + })?; + + let json = fs::read_to_string(path.unwrap()).map_err(Error::IoError)?; + serde_json::from_str(&json).map_err(|e| Error::DeserializeError(e.to_string())) + } +} + impl Provable for EpochUpdateBatch { fn id(&self) -> String { let mut hasher = Sha256::new(); @@ -223,30 +256,6 @@ impl Provable for EpochUpdateBatch { Ok(path) } - fn from_json(slot: u64) -> Result - where - T: serde::de::DeserializeOwned, - { - // Pattern match for files like: batches/epoch_batch/6709248_to_6710272/input_batch_6709248_to_6710272.json - let path = format!( - "batches/epoch_batch/*_to_{}/input_batch_*_to_{}.json", - slot, slot - ); - let glob_pattern = glob::glob(&path) - .map_err(|e| Error::IoError(std::io::Error::new(std::io::ErrorKind::Other, e)))?; - - // Take the first matching file - let path = glob_pattern.take(1).next().ok_or_else(|| { - Error::IoError(std::io::Error::new( - std::io::ErrorKind::NotFound, - "No matching file found", - )) - })?; - - let json = fs::read_to_string(path.unwrap()).map_err(Error::IoError)?; - serde_json::from_str(&json).map_err(|e| Error::DeserializeError(e.to_string())) - } - fn proof_type(&self) -> crate::traits::ProofType { crate::traits::ProofType::EpochBatch } diff --git a/client-rs/src/epoch_update.rs b/client-rs/src/epoch_update.rs index a7b7289..cbed8c0 100644 --- a/client-rs/src/epoch_update.rs +++ b/client-rs/src/epoch_update.rs @@ -45,6 +45,17 @@ impl EpochUpdate { } } +impl EpochUpdate { + pub fn from_json(slot: u64) -> Result + where + T: serde::de::DeserializeOwned, + { + let path = format!("batches/epoch/{}/input_{}.json", slot, slot); + let json = fs::read_to_string(path).map_err(Error::IoError)?; + serde_json::from_str(&json).map_err(|e| Error::DeserializeError(e.to_string())) + } +} + impl Provable for EpochUpdate { fn id(&self) -> String { let mut hasher = Sha256::new(); @@ -65,15 +76,6 @@ impl Provable for EpochUpdate { Ok(path) } - fn from_json(slot: u64) -> Result - where - T: serde::de::DeserializeOwned, - { - let path = format!("batches/epoch/{}/input_{}.json", slot, slot); - let json = fs::read_to_string(path).map_err(Error::IoError)?; - serde_json::from_str(&json).map_err(|e| Error::DeserializeError(e.to_string())) - } - fn pie_path(&self) -> String { format!( "batches/epoch/{}/pie_{}.zip", diff --git a/client-rs/src/helpers.rs b/client-rs/src/helpers.rs index fce6dd2..8c99a81 100644 --- a/client-rs/src/helpers.rs +++ b/client-rs/src/helpers.rs @@ -1,5 +1,7 @@ use crate::{ - constants::{SLOTS_PER_EPOCH, SLOTS_PER_SYNC_COMMITTEE, TARGET_BATCH_SIZE}, + constants::{ + EPOCHS_PER_SYNC_COMMITTEE, SLOTS_PER_EPOCH, SLOTS_PER_SYNC_COMMITTEE, TARGET_BATCH_SIZE, + }, Error, }; use tracing::info; @@ -35,10 +37,20 @@ pub fn calculate_slots_range_for_batch(first_slot: u64) -> (u64, u64) { (start_slot, end_slot) } -// Computes the slot numbers for term of specified slot +/// Computes the slot numbers for term of specified slot pub async fn calculate_batching_range_for_slot(slot: u64) -> Result<(u64, u64), Error> { let next_epoch_slot = (u64::try_from(slot).unwrap() / 32) * 32 + 32; let term = next_epoch_slot / 0x2000; let terms_last_epoch_slot = (term + 1) * 0x2000 - 32; Ok((next_epoch_slot, terms_last_epoch_slot)) } + +/// Returns the first epoch signed by the specified sync committee +pub fn get_first_epoch_for_sync_committee(sync_committee_id: u64) -> u64 { + sync_committee_id * EPOCHS_PER_SYNC_COMMITTEE +} + +/// Returns the last epoch signed by the specified sync committee +pub fn get_last_epoch_for_sync_committee(sync_committee_id: u64) -> u64 { + (sync_committee_id + 1) * EPOCHS_PER_SYNC_COMMITTEE - 1 +} diff --git a/client-rs/src/main.rs b/client-rs/src/main.rs index 91facbf..3ef3ff4 100644 --- a/client-rs/src/main.rs +++ b/client-rs/src/main.rs @@ -1,3 +1,4 @@ +mod bankai_client; mod config; mod constants; mod contract_init; @@ -5,6 +6,7 @@ pub mod epoch_batch; mod epoch_update; mod execution_header; mod helpers; +mod state; mod sync_committee; mod traits; mod utils; @@ -23,115 +25,97 @@ use utils::{ rpc::BeaconRpcClient, starknet_client::{StarknetClient, StarknetError}, }; + +use bankai_client::BankaiClient; // use rand::Rng; // use std::fs::File; // use std::io::Write; use clap::{Parser, Subcommand}; use dotenv::from_filename; +use state::Error; use std::env; use tracing::Level; use tracing_subscriber::FmtSubscriber; -#[derive(Debug)] -pub enum Error { - InvalidProof, - RpcError(reqwest::Error), - DeserializeError(String), - IoError(std::io::Error), - StarknetError(StarknetError), - BeaconStateProofError(BeaconStateProofError), - BlockNotFound, - FetchSyncCommitteeError, - FailedFetchingBeaconState, - InvalidBLSPoint, - MissingRpcUrl, - EmptySlotDetected(u64), - RequiresNewerEpoch(Felt), - CairoRunError(String), - AtlanticError(reqwest::Error), - InvalidResponse(String), - InvalidMerkleTree, -} +// impl From for Error { +// fn from(e: StarknetError) -> Self { +// Error::StarknetError(e) +// } +// } -impl From for Error { - fn from(e: StarknetError) -> Self { - Error::StarknetError(e) - } -} +// struct BankaiClient { +// client: BeaconRpcClient, +// starknet_client: StarknetClient, +// config: BankaiConfig, +// atlantic_client: AtlanticClient, +// } -struct BankaiClient { - client: BeaconRpcClient, - starknet_client: StarknetClient, - config: BankaiConfig, - atlantic_client: AtlanticClient, -} +// impl BankaiClient { +// pub async fn new() -> Self { +// from_filename(".env.sepolia").ok(); +// let config = BankaiConfig::default(); +// Self { +// client: BeaconRpcClient::new(env::var("BEACON_RPC_URL").unwrap()), +// starknet_client: StarknetClient::new( +// env::var("STARKNET_RPC_URL").unwrap().as_str(), +// env::var("STARKNET_ADDRESS").unwrap().as_str(), +// env::var("STARKNET_PRIVATE_KEY").unwrap().as_str(), +// ) +// .await +// .unwrap(), +// atlantic_client: AtlanticClient::new( +// config.atlantic_endpoint.clone(), +// env::var("ATLANTIC_API_KEY").unwrap(), +// ), +// config, +// } +// } -impl BankaiClient { - pub async fn new() -> Self { - from_filename(".env.sepolia").ok(); - let config = BankaiConfig::default(); - Self { - client: BeaconRpcClient::new(env::var("BEACON_RPC_URL").unwrap()), - starknet_client: StarknetClient::new( - env::var("STARKNET_RPC_URL").unwrap().as_str(), - env::var("STARKNET_ADDRESS").unwrap().as_str(), - env::var("STARKNET_PRIVATE_KEY").unwrap().as_str(), - ) - .await - .unwrap(), - atlantic_client: AtlanticClient::new( - config.atlantic_endpoint.clone(), - env::var("ATLANTIC_API_KEY").unwrap(), - ), - config, - } - } +// pub async fn get_sync_committee_update( +// &self, +// mut slot: u64, +// ) -> Result { +// let mut attempts = 0; +// const MAX_ATTEMPTS: u8 = 3; - pub async fn get_sync_committee_update( - &self, - mut slot: u64, - ) -> Result { - let mut attempts = 0; - const MAX_ATTEMPTS: u8 = 3; +// // Before we start generating the proof, we ensure the slot was not missed +// let _header = loop { +// match self.client.get_header(slot).await { +// Ok(header) => break header, +// Err(Error::EmptySlotDetected(_)) => { +// attempts += 1; +// if attempts >= MAX_ATTEMPTS { +// return Err(Error::EmptySlotDetected(slot)); +// } +// slot += 1; +// println!( +// "Empty slot detected! Attempt {}/{}. Fetching slot: {}", +// attempts, MAX_ATTEMPTS, slot +// ); +// } +// Err(e) => return Err(e), // Propagate other errors immediately +// } +// }; - // Before we start generating the proof, we ensure the slot was not missed - let _header = loop { - match self.client.get_header(slot).await { - Ok(header) => break header, - Err(Error::EmptySlotDetected(_)) => { - attempts += 1; - if attempts >= MAX_ATTEMPTS { - return Err(Error::EmptySlotDetected(slot)); - } - slot += 1; - println!( - "Empty slot detected! Attempt {}/{}. Fetching slot: {}", - attempts, MAX_ATTEMPTS, slot - ); - } - Err(e) => return Err(e), // Propagate other errors immediately - } - }; +// let proof: SyncCommitteeUpdate = SyncCommitteeUpdate::new(&self.client, slot).await?; - let proof: SyncCommitteeUpdate = SyncCommitteeUpdate::new(&self.client, slot).await?; +// Ok(proof) +// } - Ok(proof) - } - - pub async fn get_epoch_proof(&self, slot: u64) -> Result { - let epoch_proof = EpochUpdate::new(&self.client, slot).await?; - Ok(epoch_proof) - } +// pub async fn get_epoch_proof(&self, slot: u64) -> Result { +// let epoch_proof = EpochUpdate::new(&self.client, slot).await?; +// Ok(epoch_proof) +// } - pub async fn get_contract_initialization_data( - &self, - slot: u64, - config: &BankaiConfig, - ) -> Result { - let contract_init = ContractInitializationData::new(&self.client, slot, config).await?; - Ok(contract_init) - } -} +// pub async fn get_contract_initialization_data( +// &self, +// slot: u64, +// config: &BankaiConfig, +// ) -> Result { +// let contract_init = ContractInitializationData::new(&self.client, slot, config).await?; +// Ok(contract_init) +// } +// } #[derive(Subcommand)] enum Commands { @@ -184,7 +168,9 @@ enum Commands { #[arg(long, short)] batch_id: String, #[arg(long, short)] - slot: u64, + first_slot: u64, + #[arg(long, short)] + last_slot: u64, }, VerifyCommittee { #[arg(long, short)] @@ -215,8 +201,8 @@ async fn main() -> Result<(), Error> { from_filename(".env.sepolia").ok(); let subscriber = FmtSubscriber::builder() - //.with_max_level(Level::DEBUG) - .with_max_level(Level::INFO) + .with_max_level(Level::TRACE) + //.with_max_level(Level::INFO) .finish(); tracing::subscriber::set_global_default(subscriber).expect("setting default subscriber failed"); @@ -353,13 +339,18 @@ async fn main() -> Result<(), Error> { println!("Batch not completed yet. Status: {}", status); } } - Commands::VerifyEpochBatch { batch_id, slot } => { + Commands::VerifyEpochBatch { + batch_id, + first_slot, + last_slot, + } => { let status = bankai .atlantic_client .check_batch_status(batch_id.as_str()) .await?; if status == "DONE" { - let update = EpochUpdateBatch::from_json::(slot)?; + let update = + EpochUpdateBatch::from_json::(first_slot, last_slot)?; bankai .starknet_client .submit_update(update.expected_circuit_outputs, &bankai.config) diff --git a/client-rs/src/routes/mod.rs b/client-rs/src/routes/mod.rs index eb0c878..ab7754c 100644 --- a/client-rs/src/routes/mod.rs +++ b/client-rs/src/routes/mod.rs @@ -4,14 +4,26 @@ use axum::{ response::IntoResponse, Json, }; +use num_traits::cast::ToPrimitive; use serde_json::{json, Value}; -use tracing::{error, info, trace, warn, Level}; +use tracing::error; // RPC requests handling functions // // Handler for GET /status -pub async fn handle_get_status(State(_state): State) -> impl IntoResponse { - Json(json!({ "success": true })) +pub async fn handle_get_status(State(state): State) -> impl IntoResponse { + let last_slot_in_progress = match state.db_manager.get_latest_slot_id_in_progress().await { + Ok(Some(slot)) => { + let last_slot_in_progress = slot.to_u64().unwrap(); + last_slot_in_progress + } + Ok(None) => 0, + Err(e) => 0, + }; + + Json(json!({ "success": true, "details": { + "last_slot_in_progress": last_slot_in_progress + } })) } // Handler for GET /epoch/:slot diff --git a/client-rs/src/state.rs b/client-rs/src/state.rs index e70e923..0a585da 100644 --- a/client-rs/src/state.rs +++ b/client-rs/src/state.rs @@ -1,15 +1,13 @@ use crate::bankai_client::BankaiClient; +use crate::utils::database_manager::DatabaseManager; use crate::utils::starknet_client::StarknetError; -use crate::utils::{ - database_manager::DatabaseManager, -}; use postgres_types::{FromSql, ToSql}; use starknet::core::types::Felt; use std::env; use std::fmt; +use std::str::FromStr; use std::sync::Arc; use tokio::sync::mpsc; -use tokio_postgres::Client; use uuid::Uuid; #[derive(Clone, Debug)] @@ -32,8 +30,8 @@ pub struct AppState { pub enum JobStatus { #[postgres(name = "CREATED")] Created, - #[postgres(name = "FETCHED_PROOF")] - FetchedProof, + #[postgres(name = "PROGRAM_INPUTS_PREPARED")] + ProgramInputsPrepared, #[postgres(name = "PIE_GENERATED")] PieGenerated, #[postgres(name = "OFFCHAIN_PROOF_REQUESTED")] @@ -44,8 +42,10 @@ pub enum JobStatus { WrapProofRequested, #[postgres(name = "WRAPPED_PROOF_DONE")] WrappedProofDone, - #[postgres(name = "READY_TO_BROADCAST")] - ReadyToBroadcast, + #[postgres(name = "OFFCHAIN_COMPUTATION_FINISHED")] + OffchainComputationFinished, + #[postgres(name = "READY_TO_BROADCAST_ONCHAIN")] + ReadyToBroadcastOnchain, #[postgres(name = "PROOF_VERIFY_CALLED_ONCHAIN")] ProofVerifyCalledOnchain, #[postgres(name = "VERIFIED_FACT_REGISTERED")] @@ -60,13 +60,14 @@ impl ToString for JobStatus { fn to_string(&self) -> String { match self { JobStatus::Created => "CREATED".to_string(), - JobStatus::FetchedProof => "FETCHED_PROOF".to_string(), + JobStatus::ProgramInputsPrepared => "PROGRAM_INPUTS_PREPARED".to_string(), JobStatus::PieGenerated => "PIE_GENERATED".to_string(), JobStatus::OffchainProofRequested => "OFFCHAIN_PROOF_REQUESTED".to_string(), JobStatus::OffchainProofRetrieved => "OFFCHAIN_PROOF_RETRIEVED".to_string(), JobStatus::WrapProofRequested => "WRAP_PROOF_REQUESTED".to_string(), JobStatus::WrappedProofDone => "WRAPPED_PROOF_DONE".to_string(), - JobStatus::ReadyToBroadcast => "READY_TO_BROADCAST".to_string(), + JobStatus::OffchainComputationFinished => "OFFCHAIN_COMPUTATION_FINISHED".to_string(), + JobStatus::ReadyToBroadcastOnchain => "READY_TO_BROADCAST_ONCHAIN".to_string(), JobStatus::ProofVerifyCalledOnchain => "PROOF_VERIFY_CALLED_ONCHAIN".to_string(), JobStatus::VerifiedFactRegistered => "VERIFIED_FACT_REGISTERED".to_string(), JobStatus::Cancelled => "CANCELLED".to_string(), @@ -75,11 +76,58 @@ impl ToString for JobStatus { } } +impl FromStr for JobStatus { + type Err = String; + + fn from_str(s: &str) -> Result { + match s { + "CREATED" => Ok(JobStatus::Created), + "PROGRAM_INPUTS_PREPARED" => Ok(JobStatus::ProgramInputsPrepared), + "PIE_GENERATED" => Ok(JobStatus::PieGenerated), + "OFFCHAIN_PROOF_REQUESTED" => Ok(JobStatus::OffchainProofRequested), + "OFFCHAIN_PROOF_RETRIEVED" => Ok(JobStatus::OffchainProofRetrieved), + "WRAP_PROOF_REQUESTED" => Ok(JobStatus::WrapProofRequested), + "WRAPPED_PROOF_DONE" => Ok(JobStatus::WrappedProofDone), + "OFFCHAIN_COMPUTATION_FINISHED" => Ok(JobStatus::OffchainComputationFinished), + "READY_TO_BROADCAST_ONCHAIN" => Ok(JobStatus::ReadyToBroadcastOnchain), + "PROOF_VERIFY_CALLED_ONCHAIN" => Ok(JobStatus::ProofVerifyCalledOnchain), + "VERIFIED_FACT_REGISTERED" => Ok(JobStatus::VerifiedFactRegistered), + "CANCELLED" => Ok(JobStatus::Cancelled), + "ERROR" => Ok(JobStatus::Error), + _ => Err(format!("Invalid job status: {}", s)), + } + } +} + #[derive(Debug, FromSql, ToSql, Clone)] pub enum JobType { EpochUpdate, EpochBatchUpdate, - SyncComiteeUpdate, + SyncCommitteeUpdate, +} + +impl FromStr for JobType { + type Err = String; + + fn from_str(s: &str) -> Result { + match s { + "EPOCH_UPDATE" => Ok(JobType::EpochUpdate), + "EPOCH_BATCH_UPDATE" => Ok(JobType::EpochBatchUpdate), + "SYNC_COMMITTEE_UPDATE" => Ok(JobType::SyncCommitteeUpdate), + _ => Err(format!("Invalid job type: {}", s)), + } + } +} + +impl fmt::Display for JobType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let value = match self { + JobType::EpochUpdate => "EPOCH_UPDATE", + JobType::EpochBatchUpdate => "EPOCH_BATCH_UPDATE", + JobType::SyncCommitteeUpdate => "SYNC_COMMITTEE_UPDATE", + }; + write!(f, "{}", value) + } } #[derive(Debug, FromSql, ToSql)] @@ -127,6 +175,7 @@ impl std::fmt::Display for StarknetError { impl std::error::Error for StarknetError {} +#[allow(unused)] #[derive(Debug)] pub enum Error { InvalidProof, @@ -146,6 +195,7 @@ pub enum Error { InvalidResponse(String), PoolingTimeout(String), InvalidMerkleTree, + DatabaseError(String), } impl fmt::Display for Error { @@ -168,6 +218,7 @@ impl fmt::Display for Error { Error::InvalidResponse(msg) => write!(f, "Invalid response: {}", msg), Error::PoolingTimeout(msg) => write!(f, "Pooling timeout: {}", msg), Error::InvalidMerkleTree => write!(f, "Invalid Merkle Tree"), + Error::DatabaseError(msg) => write!(f, "Database error: {}", msg), } } } diff --git a/client-rs/src/sync_committee.rs b/client-rs/src/sync_committee.rs index afa2c6f..05949e9 100644 --- a/client-rs/src/sync_committee.rs +++ b/client-rs/src/sync_committee.rs @@ -40,6 +40,15 @@ impl SyncCommitteeUpdate { expected_circuit_outputs, }) } + + pub fn from_json(slot: u64) -> Result + where + T: serde::de::DeserializeOwned, + { + let path = format!("batches/committee/{}/input_{}.json", slot, slot); + let json: String = fs::read_to_string(path).map_err(Error::IoError)?; + serde_json::from_str(&json).map_err(|e| Error::DeserializeError(e.to_string())) + } } impl Provable for SyncCommitteeUpdate { @@ -63,15 +72,6 @@ impl Provable for SyncCommitteeUpdate { Ok(path) } - fn from_json(slot: u64) -> Result - where - T: serde::de::DeserializeOwned, - { - let path = format!("batches/committee/{}/input_{}.json", slot, slot); - let json: String = fs::read_to_string(path).map_err(Error::IoError)?; - serde_json::from_str(&json).map_err(|e| Error::DeserializeError(e.to_string())) - } - fn pie_path(&self) -> String { format!( "batches/committee/{}/pie_{}.zip", diff --git a/client-rs/src/traits.rs b/client-rs/src/traits.rs index 40414b5..53668f3 100644 --- a/client-rs/src/traits.rs +++ b/client-rs/src/traits.rs @@ -19,9 +19,9 @@ pub enum ProofType { pub trait Provable: Serialize { fn id(&self) -> String; fn export(&self) -> Result; - fn from_json(slot: u64) -> Result - where - T: serde::de::DeserializeOwned; + // fn from_json(slot: u64) -> Result + // where + // T: serde::de::DeserializeOwned; fn proof_type(&self) -> ProofType; fn pie_path(&self) -> String; } diff --git a/client-rs/src/utils/database_manager.rs b/client-rs/src/utils/database_manager.rs index 89d9cab..dc7ce0d 100644 --- a/client-rs/src/utils/database_manager.rs +++ b/client-rs/src/utils/database_manager.rs @@ -1,11 +1,24 @@ -use crate::epoch_update::{EpochProof, EpochUpdate}; -use crate::state::{AtlanticJobType, JobStatus, JobType, Job}; +use crate::epoch_update::EpochProof; +use crate::state::{AtlanticJobType, Error, Job, JobStatus, JobType}; use alloy_primitives::FixedBytes; -use std::error::Error; -use tokio_postgres::{Client, NoTls}; +use starknet::core::types::Felt; +use std::str::FromStr; +//use std::error::Error; +use chrono::NaiveDateTime; +use tokio_postgres::{Client, Row}; use tracing::{error, info}; use uuid::Uuid; -use starknet::core::types::Felt; + +#[derive(Debug)] +pub struct JobSchema { + pub job_uuid: uuid::Uuid, + pub job_status: JobStatus, + pub slot: i64, + pub batch_range_begin_epoch: i64, + pub batch_range_end_epoch: i64, + pub job_type: JobType, + pub updated_at: i64, +} #[derive(Debug)] pub struct DatabaseManager { @@ -21,7 +34,7 @@ impl DatabaseManager { eprintln!("Connection error: {}", e); } }); - + info!("Connected to the database successfully!"); client } @@ -30,20 +43,19 @@ impl DatabaseManager { std::process::exit(1); // Exit with non-zero status code } }; - + Self { client } } - pub async fn insert_verified_epoch( &self, epoch_id: u64, epoch_proof: EpochProof, - ) -> Result<(), Box> { + ) -> Result<(), Box> { self.client .execute( "INSERT INTO verified_epoch (epoch_id, header_root, state_root, n_signers) - VALUES ($1, $2, $3, $4)", + VALUES ($1, $2, $3, $4, $4, $6)", &[ &epoch_id.to_string(), &epoch_proof.header_root.to_string(), @@ -62,7 +74,7 @@ impl DatabaseManager { &self, sync_committee_id: u64, sync_committee_hash: FixedBytes<32>, - ) -> Result<(), Box> { + ) -> Result<(), Box> { self.client .execute( "INSERT INTO verified_sync_committee (sync_committee_id, sync_committee_hash) @@ -121,27 +133,30 @@ impl DatabaseManager { &"EPOCH_UPDATE", ], ) - .await?; - + .await + .map_err(|e| Error::DatabaseError(e.to_string()))?; + Ok(()) } - + pub async fn fetch_job_status( &self, job_id: Uuid, ) -> Result, Box> { - let row_opt = self.client + let row_opt = self + .client .query_opt("SELECT status FROM jobs WHERE job_id = $1", &[&job_id]) .await?; - + Ok(row_opt.map(|row| row.get("status"))) } - + pub async fn get_latest_slot_id_in_progress( &self, ) -> Result, Box> { // Query the latest slot with job_status in ('in_progress', 'initialized') - let row_opt = self.client + let row_opt = self + .client .query_opt( "SELECT slot FROM jobs WHERE job_status IN ($1, $2) @@ -150,7 +165,7 @@ impl DatabaseManager { &[&"CREATED", &"PIE_GENERATED"], ) .await?; - + // Extract and return the slot ID if let Some(row) = row_opt { Ok(Some(row.get::<_, i64>("slot"))) @@ -158,13 +173,14 @@ impl DatabaseManager { Ok(None) } } - + pub async fn get_merkle_paths_for_epoch( &self, epoch_id: i32, ) -> Result, Box> { // Query all merkle paths for the given epoch_id - let rows = self.client + let rows = self + .client .query( "SELECT merkle_path FROM epoch_merkle_paths WHERE epoch_id = $1 @@ -172,15 +188,88 @@ impl DatabaseManager { &[&epoch_id], ) .await?; - + let paths: Vec = rows .iter() .map(|row| row.get::<_, String>("merkle_path")) .collect(); - + Ok(paths) } - + + pub async fn get_compute_finsihed_jobs_to_proccess_onchain_call( + &self, + last_epoch: JobStatus, + ) -> Result, Box> { + let rows = self + .client + .query( + "SELECT * FROM jobs + WHERE job_status = 'OFFCHAIN_COMPUTATION_FINISHED' AND job_type = 'EPOCH_BATCH_UPDATE' AND batch_range_end_epoch <= $1", + &[&last_epoch], + ) + .await?; + + // Map rows into Job structs + let jobs: Vec = rows + .into_iter() + .map(|row: Row| JobSchema { + job_uuid: row.get("job_uuid"), + job_status: row.get("job_status"), + slot: row.get("slot"), + batch_range_begin_epoch: row.get("batch_range_begin_epoch"), + batch_range_end_epoch: row.get("batch_range_end_epoch"), + job_type: row.get("type"), + updated_at: row.get("updated_at"), + }) + .collect(); + + Ok(jobs) + } + + pub async fn get_jobs_with_status( + &self, + desired_status: JobStatus, + ) -> Result, Box> { + // Query all jobs with the given job_status + let rows = self + .client + .query( + "SELECT * FROM jobs + WHERE job_status = $1", + &[&desired_status], + ) + .await?; + + // Map rows into JobSchema structs + let jobs: Vec = rows + .into_iter() + .map( + |row: Row| -> Result> { + let job_type_str: String = row.get("type"); + let job_status_str: String = row.get("job_status"); + + let job_type = JobType::from_str(&job_type_str) + .map_err(|err| format!("Failed to parse job type: {}", err))?; + let job_status = JobStatus::from_str(&job_status_str) + .map_err(|err| format!("Failed to parse job status: {}", err))?; + + Ok(JobSchema { + job_uuid: row.get("job_uuid"), + job_status, + slot: row.get("slot"), + batch_range_begin_epoch: row.get("batch_range_begin_epoch"), + batch_range_end_epoch: row.get("batch_range_end_epoch"), + job_type, + updated_at: row.get("updated_at"), + }) + }, + ) + .collect::, _>>()?; + + Ok(jobs) + } + pub async fn update_job_status( &self, job_id: Uuid, @@ -194,7 +283,7 @@ impl DatabaseManager { .await?; Ok(()) } - + pub async fn set_job_txhash( &self, job_id: Uuid, @@ -208,7 +297,7 @@ impl DatabaseManager { .await?; Ok(()) } - + pub async fn cancell_all_unfinished_jobs( &self, ) -> Result<(), Box> { @@ -225,7 +314,7 @@ impl DatabaseManager { &self, epoch: i32, path_index: i32, - path: String + path: String, ) -> Result<(), Box> { self.client .execute( @@ -235,13 +324,13 @@ impl DatabaseManager { .await?; Ok(()) } - + // async fn fetch_job_by_status( // client: &Client, // status: JobStatus, // ) -> Result, Box> { // let tx = client.transaction().await?; - + // let row_opt = tx // .query_opt( // r#" @@ -255,7 +344,7 @@ impl DatabaseManager { // &[&status], // ) // .await?; - + // let job = if let Some(row) = row_opt { // Some(Job { // job_id: row.get("job_id"), @@ -266,11 +355,11 @@ impl DatabaseManager { // } else { // None // }; - + // tx.commit().await?; // Ok(job) // } - + // async fn add_verified_epoch( // client: Arc, // slot: u64, @@ -281,7 +370,7 @@ impl DatabaseManager { // &[&slot, &status.to_string(), &(slot as i64), &"EPOCH_UPDATE"], // ) // .await?; - + // Ok(()) // } } diff --git a/client-rs/src/utils/rpc.rs b/client-rs/src/utils/rpc.rs index d6763ae..fcd595b 100644 --- a/client-rs/src/utils/rpc.rs +++ b/client-rs/src/utils/rpc.rs @@ -5,6 +5,7 @@ use alloy_rpc_types_beacon::header::HeaderResponse; use itertools::Itertools; use reqwest::Client; use serde_json::Value; +use tracing::warn; use types::eth_spec::MainnetEthSpec; use types::{BeaconBlockBody, FullPayload}; @@ -78,7 +79,7 @@ impl BeaconRpcClient { /// the previous slot's header. pub async fn get_sync_aggregate(&self, mut slot: u64) -> Result { slot += 1; // signature is in the next slot - + let mut attempts = 0; const MAX_ATTEMPTS: u8 = 3; @@ -92,7 +93,10 @@ impl BeaconRpcClient { return Err(Error::EmptySlotDetected(slot)); } slot += 1; - println!("Empty slot detected! Attempt {}/{}. Fetching slot: {}", attempts, MAX_ATTEMPTS, slot); + warn!( + "Empty slot detected! Attempt {}/{}. Fetching slot: {}", + attempts, MAX_ATTEMPTS, slot + ); } Err(e) => return Err(e), // Propagate other errors immediately } diff --git a/client-rs/src/utils/starknet_client.rs b/client-rs/src/utils/starknet_client.rs index d6b896c..dc09de4 100644 --- a/client-rs/src/utils/starknet_client.rs +++ b/client-rs/src/utils/starknet_client.rs @@ -57,6 +57,7 @@ impl StarknetClient { }) } + #[cfg(feature = "cli")] pub async fn deploy_contract( &self, init_data: ContractInitializationData, @@ -96,6 +97,14 @@ impl StarknetClient { update: impl Submittable, config: &BankaiConfig, ) -> Result { + println!( + "{:?}", + vec![Call { + to: config.contract_address, + selector: update.get_contract_selector(), + calldata: update.to_calldata(), + }] + ); let result = self .account .execute_v1(vec![Call { From 640bb092ed1a7cb99e959dd5aa70d0e7ab491b13 Mon Sep 17 00:00:00 2001 From: lakewik Date: Fri, 17 Jan 2025 19:58:00 +0100 Subject: [PATCH 15/66] feat: add committee hash insert to db --- client-rs/src/daemon.rs | 12 ++++++++++++ client-rs/src/helpers.rs | 8 ++++++++ 2 files changed, 20 insertions(+) diff --git a/client-rs/src/daemon.rs b/client-rs/src/daemon.rs index c2fa477..d90439a 100644 --- a/client-rs/src/daemon.rs +++ b/client-rs/src/daemon.rs @@ -563,6 +563,18 @@ async fn broadcast_onchain_ready_jobs( // Insert data to DB after successful onchain sync committee verification //let sync_committee_hash = update.expected_circuit_outputs.committee_hash; + let sync_committee_hash = match bankai + .starknet_client + .get_committee_hash(slot, &bankai.config) + .await + { + Ok((sync_committee_hash)) => sync_committee_hash, + Err(e) => { + // Handle the error + return Err(e.into()); + } + }; + db_manager .insert_verified_sync_committee(job.slot.to_u64().unwrap(), sync_committee_hash) .await?; diff --git a/client-rs/src/helpers.rs b/client-rs/src/helpers.rs index 8c99a81..3c8347a 100644 --- a/client-rs/src/helpers.rs +++ b/client-rs/src/helpers.rs @@ -54,3 +54,11 @@ pub fn get_first_epoch_for_sync_committee(sync_committee_id: u64) -> u64 { pub fn get_last_epoch_for_sync_committee(sync_committee_id: u64) -> u64 { (sync_committee_id + 1) * EPOCHS_PER_SYNC_COMMITTEE - 1 } + +pub fn get_first_slot_for_epoch(slot: u64) -> u64 { + slot * SLOTS_PER_EPOCH +} + +pub fn get_last_slot_for_epoch(slot: u64) -> u64 { + (slot + 1) * SLOTS_PER_EPOCH - 1 +} From 726c0b53f23a8621add7207a3ad2694411b6bd03 Mon Sep 17 00:00:00 2001 From: lakewik Date: Tue, 21 Jan 2025 06:15:46 +0100 Subject: [PATCH 16/66] Fix state persistence --- client-rs/src/constants.rs | 1 + client-rs/src/daemon.rs | 201 +++++++++++++----------- client-rs/src/epoch_batch.rs | 83 +++++++++- client-rs/src/routes/mod.rs | 4 +- client-rs/src/state.rs | 2 + client-rs/src/utils/database_manager.rs | 105 ++++++++++--- client-rs/src/utils/starknet_client.rs | 6 +- 7 files changed, 282 insertions(+), 120 deletions(-) diff --git a/client-rs/src/constants.rs b/client-rs/src/constants.rs index cdab480..f32a94f 100644 --- a/client-rs/src/constants.rs +++ b/client-rs/src/constants.rs @@ -2,3 +2,4 @@ pub const SLOTS_PER_EPOCH: u64 = 32; // For mainnet pub const SLOTS_PER_SYNC_COMMITTEE: u64 = 8192; // For mainnet pub const TARGET_BATCH_SIZE: u64 = 32; // Defines how many epochs in one batch pub const EPOCHS_PER_SYNC_COMMITTEE: u64 = 256; +pub const MAX_CONCURRENT_JOBS_IN_PROGRESS: u64 = 8; diff --git a/client-rs/src/daemon.rs b/client-rs/src/daemon.rs index d90439a..339319c 100644 --- a/client-rs/src/daemon.rs +++ b/client-rs/src/daemon.rs @@ -12,6 +12,7 @@ mod sync_committee; mod traits; mod utils; //use alloy_primitives::TxHash; +use alloy_primitives::FixedBytes; use alloy_rpc_types_beacon::events::HeadEvent; use axum::{ extract::DefaultBodyLimit, @@ -277,26 +278,32 @@ async fn handle_beacon_chain_head_event( // We getting the last slot in progress to determine next slots to prove let mut last_slot_in_progress: u64 = 0; - let mut last_epoch_in_progress: u64 = 0; + // /let mut last_epoch_in_progress: u64 = 0; let mut last_sync_committee_in_progress: u64 = 0; - match db_manager.get_latest_slot_id_in_progress().await { - Ok(Some(slot)) => { - last_slot_in_progress = slot.to_u64().unwrap(); - last_epoch_in_progress = helpers::slot_to_epoch_id(last_slot_in_progress); - last_sync_committee_in_progress = - helpers::slot_to_sync_committee_id(last_slot_in_progress); - info!( - "Latest in progress slot: {} Epoch: {} Sync committee: {}", - last_slot_in_progress, last_epoch_in_progress, last_sync_committee_in_progress - ); - } - Ok(None) => { - warn!("No any in progress slot"); - } - Err(e) => { - error!("Error while getting latest in progress slot ID: {}", e); - } - } + // match db_manager.get_latest_slot_id_in_progress().await { + // Ok(Some(slot)) => { + // last_slot_in_progress = slot.to_u64().unwrap(); + // last_epoch_in_progress = helpers::slot_to_epoch_id(last_slot_in_progress); + // last_sync_committee_in_progress = + // helpers::slot_to_sync_committee_id(last_slot_in_progress); + // info!( + // "Latest in progress slot: {} Epoch: {} Sync committee: {}", + // last_slot_in_progress, last_epoch_in_progress, last_sync_committee_in_progress + // ); + // } + // Ok(None) => { + // warn!("No any in progress slot"); + // } + // Err(e) => { + // error!("Error while getting latest in progress slot ID: {}", e); + // } + // } + + let last_epoch_in_progress = db_manager + .get_latest_epoch_in_progress() + .await + .unwrap() + .unwrap(); let latest_verified_sync_committee_id = 1; @@ -308,11 +315,24 @@ async fn handle_beacon_chain_head_event( epochs_behind, epoch_id, sync_committee_id, latest_verified_epoch_id, latest_verified_sync_committee_id ); + // Check if we have in progress all epochs that need to be processed, if no, run job if last_epoch_in_progress < (epoch_id - constants::TARGET_BATCH_SIZE) { - // Check if we have in progress all epochs that need to be processed, if no, run job + // And chceck how many jobs are already in progress and if we fit in the limit + let in_progress_jobs_count = db_manager.count_jobs_in_progress().await.unwrap(); + if in_progress_jobs_count.unwrap().to_u64().unwrap() + >= constants::MAX_CONCURRENT_JOBS_IN_PROGRESS + { + info!( + "Currently not starting new job, MAX_CONCURRENT_JOBS_IN_PROGRESS limit reached" + ); + return; + } + match run_batch_epoch_update_job( db_manager.clone(), last_slot_in_progress + (constants::SLOTS_PER_EPOCH * constants::TARGET_BATCH_SIZE), + last_epoch_in_progress, + last_epoch_in_progress + constants::TARGET_BATCH_SIZE, tx.clone(), ) .await @@ -337,7 +357,7 @@ async fn handle_beacon_chain_head_event( } else if epochs_behind == constants::TARGET_BATCH_SIZE { // This is when we are synced properly and new epoch batch needs to be inserted info!( - "Starting syncing next epoch batch. Current Beacon Chain epoch: {} Latest verified epoch: {}", + "Starting provessing next epoch batch. Current Beacon Chain epoch: {} Latest verified epoch: {}", epoch_id, latest_verified_epoch_id ); } @@ -407,6 +427,8 @@ async fn handle_beacon_chain_head_event( async fn run_batch_epoch_update_job( db_manager: Arc, slot: u64, + batch_range_begin_epoch: u64, + batch_range_end_epoch: u64, tx: mpsc::Sender, ) -> Result<(), Box> { let job_id = Uuid::new_v4(); @@ -415,6 +437,8 @@ async fn run_batch_epoch_update_job( job_type: JobType::EpochBatchUpdate, job_status: JobStatus::Created, slot, + batch_range_begin_epoch: Some(batch_range_begin_epoch), + batch_range_end_epoch: Some(batch_range_end_epoch), }; match db_manager.create_job(job.clone()).await { @@ -438,7 +462,7 @@ async fn run_batch_epoch_update_job( } } -async fn run_batch_sync_committee_update_job( +async fn run_sync_committee_update_job( db_manager: Arc, slot: u64, tx: mpsc::Sender, @@ -449,6 +473,8 @@ async fn run_batch_sync_committee_update_job( job_type: JobType::SyncCommitteeUpdate, job_status: JobStatus::Created, slot, + batch_range_begin_epoch: None, + batch_range_end_epoch: None, }; match db_manager.create_job(job.clone()).await { @@ -462,7 +488,7 @@ async fn run_batch_sync_committee_update_job( if tx.send(job).await.is_err() { return Err("Failed to send job".into()); } - // If starting committee update job, first ensule that the corresponding slot is registered in contract + // If starting committee update job, first ensure that the corresponding slot is registered in contract Ok(()) } Err(e) => { @@ -472,37 +498,37 @@ async fn run_batch_sync_committee_update_job( } } -async fn evaluate_jobs_statuses( - db_manager: Arc, - last_verified_epoch: u64, -) -> Result<(), Box> { - // The purpose of this function is to manage the sequential nature of onchain verification of epochs and sync committees - // Firstly we get all jobs with status OFFCHAIN_COMPUTATION_FINISHED - let jobs = db_manager - .get_compute_finsihed_jobs_to_proccess_onchain_call(last_verified_epoch) - .await?; - - // Iterate through the jobs and process them - for job in jobs { - match job.job_type { - JobType::EpochBatchUpdate => { - let update = EpochUpdateBatch::from_json::( - job.batch_range_begin_epoch.try_into().unwrap(), - job.batch_range_end_epoch.try_into().unwrap(), - )?; - - println!( - "Successfully submitted batch epoch update for job_uuid: {}", - job.job_uuid - ); - } - JobType::EpochUpdate => {} - JobType::SyncCommitteeUpdate => {} - } - } +// async fn evaluate_jobs_statuses( +// db_manager: Arc, +// last_verified_epoch: u64, +// ) -> Result<(), Box> { +// // The purpose of this function is to manage the sequential nature of onchain verification of epochs and sync committees +// // Firstly we get all jobs with status OFFCHAIN_COMPUTATION_FINISHED +// let jobs = db_manager +// .get_compute_finsihed_jobs_to_proccess_onchain_call(last_verified_epoch) +// .await?; + +// // Iterate through the jobs and process them +// for job in jobs { +// match job.job_type { +// JobType::EpochBatchUpdate => { +// let update = EpochUpdateBatch::from_json::( +// job.batch_range_begin_epoch.try_into().unwrap(), +// job.batch_range_end_epoch.try_into().unwrap(), +// )?; + +// println!( +// "Successfully submitted batch epoch update for job_uuid: {}", +// job.job_uuid +// ); +// } +// JobType::EpochUpdate => {} +// JobType::SyncCommitteeUpdate => {} +// } +// } - Ok(()) -} +// Ok(()) +// } async fn broadcast_onchain_ready_jobs( db_manager: Arc, @@ -537,6 +563,14 @@ async fn broadcast_onchain_ready_jobs( "[EPOCH BATCH JOB] Successfully called batch epoch update onchain for job_uuid: {}, txhash: {}", job.job_uuid, txhash ); + + let epoch_proof = bankai + .starknet_client + .get_epoch_proof(job.slot.try_into().unwrap(), &bankai.config); + + // db_manager + // .insert_verified_epochs_batch(job.slot / 0x2000, epoch_proof) + // .await?; } JobType::EpochUpdate => {} JobType::SyncCommitteeUpdate => { @@ -565,7 +599,7 @@ async fn broadcast_onchain_ready_jobs( //let sync_committee_hash = update.expected_circuit_outputs.committee_hash; let sync_committee_hash = match bankai .starknet_client - .get_committee_hash(slot, &bankai.config) + .get_committee_hash(job.slot.to_u64().unwrap(), &bankai.config) .await { Ok((sync_committee_hash)) => sync_committee_hash, @@ -575,8 +609,17 @@ async fn broadcast_onchain_ready_jobs( } }; + let sync_committee_hash_str = sync_committee_hash + .iter() + .map(|felt| felt.to_hex_string()) + .collect::>() + .join(""); + db_manager - .insert_verified_sync_committee(job.slot.to_u64().unwrap(), sync_committee_hash) + .insert_verified_sync_committee( + job.slot.to_u64().unwrap(), + sync_committee_hash_str, + ) .await?; } } @@ -906,26 +949,17 @@ async fn process_job( db_manager .update_job_status(job.job_id, JobStatus::VerifiedFactRegistered) .await?; - - let update = SyncCommitteeUpdate::from_json::(job.slot)?; - - info!("[SYNC COMMITTEE JOB] Calling sync committee update onchain..."); - - let txhash = bankai - .starknet_client - .submit_update(update.expected_circuit_outputs, &bankai.config) - .await?; - - db_manager.set_job_txhash(job.job_id, txhash).await?; - - // Insert data to DB after successful onchain sync committee verification - //insert_verified_sync_committee(&db_client, job.slot, sync_committee_hash).await?; } JobType::EpochBatchUpdate => { info!("[BATCH EPOCH JOB] Preparing inputs for program..."); - let proof = - EpochUpdateBatch::new_by_slot(&bankai, db_manager.clone(), job.slot).await?; + let proof = EpochUpdateBatch::new_by_epoch_range( + &bankai, + db_manager.clone(), + job.batch_range_begin_epoch.unwrap(), + job.batch_range_end_epoch.unwrap(), + ) + .await?; db_manager .update_job_status(job.job_id, JobStatus::ProgramInputsPrepared) @@ -1016,31 +1050,8 @@ async fn process_job( info!("[EPOCH JOB] Proof wrapping done by Atlantic. Fact registered on Integrity. Wrapping QueryID: {}", wrapping_batch_id); db_manager - .update_job_status(job.job_id, JobStatus::VerifiedFactRegistered) + .update_job_status(job.job_id, JobStatus::OffchainComputationFinished) .await?; - - // 6) Submit epoch update onchain - info!("[EPOCH JOB] Calling epoch update onchain..."); - //let update = EpochUpdate::from_json::(next_epoch)?; - - // let txhash = bankai - // .starknet_client - // .submit_update(update.expected_circuit_outputs, &bankai.config) - // .await?; - - // set_job_txhash(&db_client, job.job_id, txhash).await?; - - // info!("[EPOCH JOB] Successfully submitted epoch update..."); - - // update_job_status(&db_client, job.job_id, JobStatus::ProofDecommitmentCalled).await?; - - // bankai.starknet_client.get_epoch_proof( - // &self, - // slot: u64, - // config: &BankaiConfig) - - //Insert data to DB after successful onchain epoch verification - // insert_verified_epochs_batch(&db_client, job.slot / 0x2000, epoch_proof).await?; } } diff --git a/client-rs/src/epoch_batch.rs b/client-rs/src/epoch_batch.rs index c0da37c..93bac16 100644 --- a/client-rs/src/epoch_batch.rs +++ b/client-rs/src/epoch_batch.rs @@ -1,6 +1,6 @@ use crate::constants::{SLOTS_PER_EPOCH, TARGET_BATCH_SIZE}; use crate::epoch_update::{EpochUpdate, ExpectedEpochUpdateOutputs}; -use crate::helpers::{calculate_slots_range_for_batch, slot_to_epoch_id}; +use crate::helpers::{calculate_slots_range_for_batch, get_first_slot_for_epoch, slot_to_epoch_id}; use crate::traits::{Provable, Submittable}; use crate::utils::hashing::get_committee_hash; @@ -191,6 +191,87 @@ impl EpochUpdateBatch { Ok(batch) } + + pub(crate) async fn new_by_epoch_range( + bankai: &BankaiClient, + db_manager: Arc, + start_epoch: u64, + end_epoch: u64, + ) -> Result { + let _permit = bankai + .config + .epoch_data_fetching_semaphore + .clone() + .acquire_owned() + .await + .map_err(|e| Error::CairoRunError(format!("Semaphore error: {}", e)))?; + + let mut epochs = vec![]; + + // Fetch epochs sequentially from start_slot to end_slot, incrementing by 32 each time + let mut current_epoch = start_epoch; + while current_epoch < end_epoch { + info!( + "Getting data for Epoch: {} First slot for this epoch: {} | Epochs batch position {}/{}", + current_epoch, + get_first_slot_for_epoch(current_epoch), + epochs.len(), + TARGET_BATCH_SIZE + ); + let epoch_update = + EpochUpdate::new(&bankai.client, get_first_slot_for_epoch(current_epoch)).await?; + + epochs.push(epoch_update); + current_epoch += 1; + } + + let circuit_inputs = EpochUpdateBatchInputs { + committee_hash: get_committee_hash(epochs[0].circuit_inputs.aggregate_pub.0), + epochs, + }; + + let expected_circuit_outputs = ExpectedEpochBatchOutputs::from_inputs(&circuit_inputs); + + let epoch_hashes = circuit_inputs + .epochs + .iter() + .map(|epoch| epoch.expected_circuit_outputs.hash()) + .collect::>(); + + let (root, paths) = compute_paths(epoch_hashes.clone()); + + // Verify each path matches the root + current_epoch = start_epoch; + for (index, path) in paths.iter().enumerate() { + let computed_root = hash_path(epoch_hashes[index], path, index); + if computed_root != root { + panic!("Path {} does not match root", index); + } + // Insert merkle paths to database + //let current_epoch = slot_to_epoch_id(current_slot); + for (path_index, current_path) in path.iter().enumerate() { + db_manager + .insert_merkle_path_for_epoch( + current_epoch.to_i32().unwrap(), + path_index.to_i32().unwrap(), + current_path.to_hex_string(), + ) + .await + .map_err(|e| Error::DatabaseError(e.to_string()))?; + } + current_epoch += 1; + } + + info!("Paths {:?}", paths); + + let batch = EpochUpdateBatch { + circuit_inputs, + expected_circuit_outputs, + merkle_paths: paths, + }; + + Ok(batch) + } } impl EpochUpdateBatch { diff --git a/client-rs/src/routes/mod.rs b/client-rs/src/routes/mod.rs index ab7754c..99a4380 100644 --- a/client-rs/src/routes/mod.rs +++ b/client-rs/src/routes/mod.rs @@ -20,9 +20,11 @@ pub async fn handle_get_status(State(state): State) -> impl IntoRespon Ok(None) => 0, Err(e) => 0, }; + let in_progress_jobs_count = state.db_manager.count_jobs_in_progress().await.unwrap(); Json(json!({ "success": true, "details": { - "last_slot_in_progress": last_slot_in_progress + "last_slot_in_progress": last_slot_in_progress, + "jobs_in_progress_count": in_progress_jobs_count } })) } diff --git a/client-rs/src/state.rs b/client-rs/src/state.rs index 0a585da..47e3003 100644 --- a/client-rs/src/state.rs +++ b/client-rs/src/state.rs @@ -16,6 +16,8 @@ pub struct Job { pub job_type: JobType, pub job_status: JobStatus, pub slot: u64, + pub batch_range_begin_epoch: Option, + pub batch_range_end_epoch: Option, } #[derive(Clone, Debug)] diff --git a/client-rs/src/utils/database_manager.rs b/client-rs/src/utils/database_manager.rs index dc7ce0d..ce0be18 100644 --- a/client-rs/src/utils/database_manager.rs +++ b/client-rs/src/utils/database_manager.rs @@ -5,6 +5,7 @@ use starknet::core::types::Felt; use std::str::FromStr; //use std::error::Error; use chrono::NaiveDateTime; +use num_traits::ToPrimitive; use tokio_postgres::{Client, Row}; use tracing::{error, info}; use uuid::Uuid; @@ -73,16 +74,13 @@ impl DatabaseManager { pub async fn insert_verified_sync_committee( &self, sync_committee_id: u64, - sync_committee_hash: FixedBytes<32>, + sync_committee_hash: String, ) -> Result<(), Box> { self.client .execute( "INSERT INTO verified_sync_committee (sync_committee_id, sync_committee_hash) VALUES ($1, $2)", - &[ - &sync_committee_id.to_string(), - &sync_committee_hash.to_string(), - ], + &[&sync_committee_id.to_string(), &sync_committee_hash], ) .await?; @@ -123,18 +121,39 @@ impl DatabaseManager { &self, job: Job, ) -> Result<(), Box> { - self.client - .execute( - "INSERT INTO jobs (job_uuid, job_status, slot, type) VALUES ($1, $2, $3, $4)", - &[ - &job.job_id, - &job.job_status.to_string(), - &(job.slot as i64), - &"EPOCH_UPDATE", - ], - ) - .await - .map_err(|e| Error::DatabaseError(e.to_string()))?; + match job.job_type { + JobType::EpochBatchUpdate => { + self.client + .execute( + "INSERT INTO jobs (job_uuid, job_status, slot, type, batch_range_begin_epoch, batch_range_end_epoch) VALUES ($1, $2, $3, $4, $5, $6)", + &[ + &job.job_id, + &job.job_status.to_string(), + &(job.slot as i64), + &"EPOCH_BATCH_UPDATE", + &(job.batch_range_begin_epoch.unwrap() as i64), + &(job.batch_range_end_epoch.unwrap() as i64), + ], + ) + .await + .map_err(|e| Error::DatabaseError(e.to_string()))?; + } + JobType::EpochUpdate => {} + JobType::SyncCommitteeUpdate => { + self.client + .execute( + "INSERT INTO jobs (job_uuid, job_status, slot, type) VALUES ($1, $2, $3, $4, $5, $6)", + &[ + &job.job_id, + &job.job_status.to_string(), + &(job.slot as i64), + &"SYNC_COMMITTEE_UPDATE", + ], + ) + .await + .map_err(|e| Error::DatabaseError(e.to_string()))?; + } + } Ok(()) } @@ -153,7 +172,7 @@ impl DatabaseManager { pub async fn get_latest_slot_id_in_progress( &self, - ) -> Result, Box> { + ) -> Result, Box> { // Query the latest slot with job_status in ('in_progress', 'initialized') let row_opt = self .client @@ -168,9 +187,55 @@ impl DatabaseManager { // Extract and return the slot ID if let Some(row) = row_opt { - Ok(Some(row.get::<_, i64>("slot"))) + Ok(Some(row.get::<_, i64>("slot").to_u64().unwrap())) + } else { + Ok(Some(0)) + } + } + + pub async fn get_latest_epoch_in_progress( + &self, + ) -> Result, Box> { + // Query the latest slot with job_status in ('in_progress', 'initialized') + let row_opt = self + .client + .query_opt( + "SELECT batch_range_end_epoch FROM jobs + WHERE job_status IN ($1, $2) + ORDER BY batch_range_end_epoch DESC + LIMIT 1", + &[&"CREATED", &"PIE_GENERATED"], + ) + .await?; + + // Extract and return the slot ID + if let Some(row) = row_opt { + Ok(Some( + row.get::<_, i64>("batch_range_end_epoch").to_u64().unwrap(), + )) + } else { + Ok(Some(0)) + } + } + + pub async fn count_jobs_in_progress( + &self, + ) -> Result, Box> { + // Query the latest slot with job_status in ('in_progress', 'initialized') + let row_opt = self + .client + .query_opt( + "SELECT COUNT(job_uuid) as count FROM jobs + WHERE job_status IN ('PIE_GENERATED', 'CREATED')", + &[], + ) + .await?; + + // Extract and return the slot ID + if let Some(row) = row_opt { + Ok(Some(row.get::<_, i64>("count").to_u64().unwrap())) } else { - Ok(None) + Ok(Some(0)) } } diff --git a/client-rs/src/utils/starknet_client.rs b/client-rs/src/utils/starknet_client.rs index dc09de4..79085b6 100644 --- a/client-rs/src/utils/starknet_client.rs +++ b/client-rs/src/utils/starknet_client.rs @@ -126,7 +126,7 @@ impl StarknetClient { &self, slot: u64, config: &BankaiConfig, - ) -> Result<(), StarknetError> { + ) -> Result, StarknetError> { let committee_id = slot / 0x2000_u64; let committee_hash = self .account @@ -141,8 +141,8 @@ impl StarknetClient { ) .await .map_err(StarknetError::ProviderError)?; - println!("committee_hash: {:?}", committee_hash); - Ok(()) + //println!("committee_hash: {:?}", committee_hash); + Ok((committee_hash)) } pub async fn get_epoch_proof( From 96d1a37a100401b6cc3f899bef03b41a7b912f23 Mon Sep 17 00:00:00 2001 From: lakewik Date: Wed, 22 Jan 2025 15:41:24 +0100 Subject: [PATCH 17/66] Refactor, add missing RPC endpoints, various jobs improvements --- client-rs/db_structure.sql | 25 +- client-rs/src/constants.rs | 2 +- client-rs/src/daemon.rs | 607 ++++++++++++------------ client-rs/src/epoch_batch.rs | 177 +++---- client-rs/src/epoch_update.rs | 9 - client-rs/src/helpers.rs | 17 + client-rs/src/main.rs | 12 + client-rs/src/routes/mod.rs | 143 ++++-- client-rs/src/state.rs | 8 +- client-rs/src/utils/database_manager.rs | 96 ++-- client-rs/src/utils/starknet_client.rs | 52 +- 11 files changed, 643 insertions(+), 505 deletions(-) diff --git a/client-rs/db_structure.sql b/client-rs/db_structure.sql index 899ec1a..d2e77e8 100644 --- a/client-rs/db_structure.sql +++ b/client-rs/db_structure.sql @@ -2,16 +2,15 @@ CREATE TABLE jobs ( job_uuid UUID PRIMARY KEY, job_status TEXT NOT NULL, atlantic_proof_generate_batch_id TEXT NULL, - atlantic_proof_wrapper_batch_id, TEXT NULL, - slot BIGINT NOT NULL, -- Slot associated with the job + atlantic_proof_wrapper_batch_id TEXT NULL, + slot BIGINT NOT NULL, -- Slot associated with the job batch_range_begin_epoch BIGINT NOT NULL, batch_range_end_epoch BIGINT NOT NULL, type TEXT NOT NULL, - updated_at TIMESTAMP DEFAULT NOW(), - created_at TIMESTAMP DEFAULT NOW() + updated_at TIMESTAMP DEFAULT NOW (), + created_at TIMESTAMP DEFAULT NOW () ); - CREATE TABLE epoch_merkle_paths ( epoch_id BIGINT NOT NULL, path_index BIGINT NOT NULL, @@ -19,18 +18,16 @@ CREATE TABLE epoch_merkle_paths ( PRIMARY KEY (epoch_id, path_index) -- Ensures uniqueness of the combination ); - CREATE TABLE verified_epoch ( epoch_id UUID PRIMARY KEY, - header_root TEXT NOT NULL, -- Header root hash of the Beacon chain header - state_root TEXT NOT NULL, -- State root hash of the Beacon chain state - n_signers INTEGER NOT NULL, -- Number of epoch signers - execution_hash TEXT NOT NULL, -- Execution layer blockhash - execution_height BIGINT NOT NULL -- Execution layer height + header_root TEXT NOT NULL, -- Header root hash of the Beacon chain header + state_root TEXT NOT NULL, -- State root hash of the Beacon chain state + n_signers INTEGER NOT NULL, -- Number of epoch signers + execution_hash TEXT NOT NULL, -- Execution layer blockhash + execution_height BIGINT NOT NULL -- Execution layer height ); - CREATE TABLE verified_sync_committee ( - sync_committee_id UUID PRIMARY KEY, -- Unique identifier for sync committee (slot number/0x2000) - sync_committee_hash TEXT NOT NULL -- Sync committee hash that we are creating inside bankai + sync_committee_id UUID PRIMARY KEY, -- Unique identifier for sync committee (slot number/0x2000) + sync_committee_hash TEXT NOT NULL -- Sync committee hash that we are creating inside bankai ); diff --git a/client-rs/src/constants.rs b/client-rs/src/constants.rs index f32a94f..ddb84c0 100644 --- a/client-rs/src/constants.rs +++ b/client-rs/src/constants.rs @@ -2,4 +2,4 @@ pub const SLOTS_PER_EPOCH: u64 = 32; // For mainnet pub const SLOTS_PER_SYNC_COMMITTEE: u64 = 8192; // For mainnet pub const TARGET_BATCH_SIZE: u64 = 32; // Defines how many epochs in one batch pub const EPOCHS_PER_SYNC_COMMITTEE: u64 = 256; -pub const MAX_CONCURRENT_JOBS_IN_PROGRESS: u64 = 8; +pub const MAX_CONCURRENT_JOBS_IN_PROGRESS: u64 = 16; diff --git a/client-rs/src/daemon.rs b/client-rs/src/daemon.rs index 339319c..eafe2b6 100644 --- a/client-rs/src/daemon.rs +++ b/client-rs/src/daemon.rs @@ -12,7 +12,7 @@ mod sync_committee; mod traits; mod utils; //use alloy_primitives::TxHash; -use alloy_primitives::FixedBytes; +//use alloy_primitives::FixedBytes; use alloy_rpc_types_beacon::events::HeadEvent; use axum::{ extract::DefaultBodyLimit, @@ -24,6 +24,7 @@ use bankai_client::BankaiClient; use config::BankaiConfig; //use constants::SLOTS_PER_EPOCH; use dotenv::from_filename; +use helpers::{get_first_epoch_for_sync_committee, get_first_slot_for_epoch}; use num_traits::cast::ToPrimitive; use reqwest; use starknet::core::types::Felt; @@ -39,15 +40,16 @@ use tower::ServiceBuilder; use tower_http::trace::TraceLayer; use tracing::{debug, error, info, warn, Level}; use tracing_subscriber::FmtSubscriber; -use traits::Provable; -use utils::{ - cairo_runner::CairoRunner, - database_manager::{DatabaseManager, JobSchema}, -}; +use utils::{cairo_runner::CairoRunner, database_manager::DatabaseManager}; //use std::error::Error as StdError; use epoch_batch::EpochUpdateBatch; use routes::{ - handle_get_epoch_update, handle_get_latest_verified_slot, handle_get_merkle_paths_for_epoch, + handle_get_committee_hash, + handle_get_epoch_proof, // handle_get_epoch_update, + handle_get_job_status, + handle_get_latest_verified_committee, + handle_get_latest_verified_slot, + handle_get_merkle_paths_for_epoch, handle_get_status, }; use std::net::SocketAddr; @@ -55,19 +57,7 @@ use sync_committee::SyncCommitteeUpdate; use tokio::time::Duration; use uuid::Uuid; -// Since beacon chain RPCs have different response structure (quicknode responds different than nidereal) we use this event extraction logic -fn extract_json(event_text: &str) -> Option { - for line in event_text.lines() { - if line.starts_with("data:") { - // Extract the JSON after "data:" - return Some(line.trim_start_matches("data:").trim().to_string()); - } - } - None -} - #[tokio::main] -//async fn main() { async fn main() -> Result<(), Box> { // Load .env.sepolia file from_filename(".env.sepolia").ok(); @@ -89,7 +79,6 @@ async fn main() -> Result<(), Box> { info!("Starting Bankai light-client daemon..."); - //let database_host = env::var("DATABASE_HOST").expect("DATABASE_HOST must be set"); let (tx, mut rx): (mpsc::Sender, mpsc::Receiver) = mpsc::channel(32); //let (tx, mut rx) = mpsc::channel(32); @@ -149,21 +138,31 @@ async fn main() -> Result<(), Box> { let app = Router::new() .route("/status", get(handle_get_status)) - //.route("/get-epoch-proof/:slot", get(handle_get_epoch_proof)) - //.route("/get-committee-hash/:committee_id", get(handle_get_committee_hash)) + .route( + "/get_verified_epoch_proof/:epoch", + get(handle_get_epoch_proof), + ) + .route( + "/get_verified_committee_hash/:committee_id", + get(handle_get_committee_hash), + ) .route( "/get_merkle_paths_for_epoch/:epoch_id", get(handle_get_merkle_paths_for_epoch), ) + // .route( + // "/debug/get_epoch_update/:slot", + // get(handle_get_epoch_update), + // ) .route( - "/debug/get-epoch-update/:slot", - get(handle_get_epoch_update), + "/debug/get_latest_verified_epoch", + get(handle_get_latest_verified_slot), ) .route( - "/debug/get-latest-verified-slot", - get(handle_get_latest_verified_slot), + "/debug/get_latest_verified_committee", + get(handle_get_latest_verified_committee), ) - // .route("/debug/get-job-status", get(handle_get_job_status)) + .route("/debug/get_job_status", get(handle_get_job_status)) // .route("/get-merkle-inclusion-proof", get(handle_get_merkle_inclusion_proof)) .layer(DefaultBodyLimit::disable()) .layer( @@ -191,7 +190,6 @@ async fn main() -> Result<(), Box> { .await .unwrap(); - //let db_client = Arc::new(&db_client); if slot_listener_toggle { task::spawn({ async move { @@ -210,7 +208,9 @@ async fn main() -> Result<(), Box> { Ok(bytes) => { if let Ok(event_text) = String::from_utf8(bytes.to_vec()) { // Preprocess the event text - if let Some(json_data) = extract_json(&event_text) { + if let Some(json_data) = + helpers::extract_json_from_event(&event_text) + { match serde_json::from_str::(&json_data) { Ok(parsed_event) => { let epoch_id = @@ -220,7 +220,7 @@ async fn main() -> Result<(), Box> { parsed_event.slot, ); info!( - "New slot event detected: {} | Block: {} | Epoch: {} | Sync committee: {} | Is epoch transition: {}", + "[EVENT] New beacon slot detected: {} | Block: {} | Epoch: {} | Sync committee: {} | Is epoch transition: {}", parsed_event.slot, parsed_event.block, epoch_id, sync_committee_id, parsed_event.epoch_transition ); @@ -265,7 +265,7 @@ async fn handle_beacon_chain_head_event( let epoch_id = helpers::slot_to_epoch_id(parsed_event.slot); let sync_committee_id = helpers::slot_to_sync_committee_id(parsed_event.slot); - let latest_epoch_slot = bankai + let latest_verified_epoch_slot = bankai .starknet_client .get_latest_epoch_slot(&bankai.config) .await @@ -273,31 +273,21 @@ async fn handle_beacon_chain_head_event( .to_u64() .unwrap(); - let latest_verified_epoch_id = helpers::slot_to_epoch_id(latest_epoch_slot); + let latest_verified_sync_committee_id = bankai + .starknet_client + .get_latest_committee_id(&bankai.config) + .await + .unwrap() + .to_u64() + .unwrap(); + + let latest_verified_epoch_id = helpers::slot_to_epoch_id(latest_verified_epoch_slot); let epochs_behind = epoch_id - latest_verified_epoch_id; // We getting the last slot in progress to determine next slots to prove - let mut last_slot_in_progress: u64 = 0; + //let mut last_slot_in_progress: u64 = 0; // /let mut last_epoch_in_progress: u64 = 0; - let mut last_sync_committee_in_progress: u64 = 0; - // match db_manager.get_latest_slot_id_in_progress().await { - // Ok(Some(slot)) => { - // last_slot_in_progress = slot.to_u64().unwrap(); - // last_epoch_in_progress = helpers::slot_to_epoch_id(last_slot_in_progress); - // last_sync_committee_in_progress = - // helpers::slot_to_sync_committee_id(last_slot_in_progress); - // info!( - // "Latest in progress slot: {} Epoch: {} Sync committee: {}", - // last_slot_in_progress, last_epoch_in_progress, last_sync_committee_in_progress - // ); - // } - // Ok(None) => { - // warn!("No any in progress slot"); - // } - // Err(e) => { - // error!("Error while getting latest in progress slot ID: {}", e); - // } - // } + // let mut last_sync_committee_in_progress: u64 = 0; let last_epoch_in_progress = db_manager .get_latest_epoch_in_progress() @@ -305,39 +295,97 @@ async fn handle_beacon_chain_head_event( .unwrap() .unwrap(); - let latest_verified_sync_committee_id = 1; + let last_sync_committee_in_progress = 1; + + let mut latest_scheduled_epoch = last_epoch_in_progress; + if latest_verified_epoch_id > last_epoch_in_progress { + if last_epoch_in_progress == 0 { + info!("Starting daemon on clean jobs table"); + } else { + warn!( + "Something may be wrong, last verified epoch is greather than last epoch in progress" + ); + } + + // So we should schedule the greater epoch, which id + latest_scheduled_epoch = latest_verified_epoch_id; + } + + // Decide basing on actual state if epochs_behind > constants::TARGET_BATCH_SIZE { // is_node_in_sync = true; warn!( - "Bankai is out of sync now. Node is {} epochs behind network. Current Beacon Chain state: [Epoch: {} Sync Committee: {}] | Latest verified: [Epoch: {} Sync Committee: {}] | Sync in progress...", - epochs_behind, epoch_id, sync_committee_id, latest_verified_epoch_id, latest_verified_sync_committee_id + "Bankai is out of sync now. Node is {} epochs behind network. Current Beacon Chain state: [Slot: {} Epoch: {} Sync Committee: {}] | Latest verified: [Slot: {} Epoch: {} Sync Committee: {}] | Latest in progress: [Epoch: {} Sync Committee: {}] | Sync in progress...", + epochs_behind, parsed_event.slot, epoch_id, sync_committee_id, latest_verified_epoch_slot, latest_verified_epoch_id, latest_verified_sync_committee_id, last_epoch_in_progress, last_sync_committee_in_progress ); // Check if we have in progress all epochs that need to be processed, if no, run job - if last_epoch_in_progress < (epoch_id - constants::TARGET_BATCH_SIZE) { + if latest_scheduled_epoch < (epoch_id - constants::TARGET_BATCH_SIZE) { // And chceck how many jobs are already in progress and if we fit in the limit let in_progress_jobs_count = db_manager.count_jobs_in_progress().await.unwrap(); - if in_progress_jobs_count.unwrap().to_u64().unwrap() - >= constants::MAX_CONCURRENT_JOBS_IN_PROGRESS - { + if in_progress_jobs_count.unwrap() >= constants::MAX_CONCURRENT_JOBS_IN_PROGRESS { info!( - "Currently not starting new job, MAX_CONCURRENT_JOBS_IN_PROGRESS limit reached" + "Currently not starting new job, MAX_CONCURRENT_JOBS_IN_PROGRESS limit reached, jobs in progress: {}", + in_progress_jobs_count.unwrap() ); return; } + let currently_processed_sync_committee_id = + helpers::get_sync_committee_id_by_epoch(latest_scheduled_epoch); + + info!( + "Currently processed sync committee epochs ranges from {} to {}", + helpers::get_first_epoch_for_sync_committee(currently_processed_sync_committee_id), + helpers::get_last_epoch_for_sync_committee(currently_processed_sync_committee_id) + ); + + if helpers::get_last_epoch_for_sync_committee(currently_processed_sync_committee_id) + == latest_scheduled_epoch + { + // We reached end of current sync committee, need to schedule new sync committee proving + match run_sync_committee_update_job( + db_manager.clone(), + currently_processed_sync_committee_id + 1, + tx.clone(), + ) + .await + { + Ok(()) => {} + Err(e) => { + error!("Error while creating job: {}", e); + } + }; + } + + let epoch_to_start_from = latest_scheduled_epoch + 1; + let mut epoch_to_end_on = latest_scheduled_epoch + 1 + constants::TARGET_BATCH_SIZE; + + // Iw we follow the betch size of 32 always, this souldnt happen, but if we have not same size batches, it can be trigerred + if epoch_to_end_on + > helpers::get_last_epoch_for_sync_committee(currently_processed_sync_committee_id) + { + // The end epoch is further that current sync committee + // In this case we can simply assingn sync commite latest epoch as epoch_to_end_on + epoch_to_end_on = helpers::get_last_epoch_for_sync_committee( + currently_processed_sync_committee_id, + ); + } + + //info!("{} epochs let to proccess in associated sync committee term",); + match run_batch_epoch_update_job( db_manager.clone(), - last_slot_in_progress + (constants::SLOTS_PER_EPOCH * constants::TARGET_BATCH_SIZE), - last_epoch_in_progress, - last_epoch_in_progress + constants::TARGET_BATCH_SIZE, + get_first_slot_for_epoch(epoch_to_start_from) + + (constants::SLOTS_PER_EPOCH * constants::TARGET_BATCH_SIZE), + epoch_to_start_from, + epoch_to_end_on, tx.clone(), ) .await { - // Insert new job record to DB Ok(()) => {} Err(e) => { error!("Error while creating job: {}", e); @@ -346,42 +394,33 @@ async fn handle_beacon_chain_head_event( } else { debug!("All reqired jobs are now queued and processing"); } - - // let epoch_update = EpochUpdateBatch::new_by_slot( - // &bankai, - // &db_client_for_listener.clone(), - // last_slot_in_progress - // + constants::SLOTS_PER_EPOCH, - // ) - // .await?; } else if epochs_behind == constants::TARGET_BATCH_SIZE { // This is when we are synced properly and new epoch batch needs to be inserted info!( - "Starting provessing next epoch batch. Current Beacon Chain epoch: {} Latest verified epoch: {}", + "Starting processing next epoch batch. Current Beacon Chain epoch: {} Latest verified epoch: {}", epoch_id, latest_verified_epoch_id ); + } else if epochs_behind < constants::TARGET_BATCH_SIZE { + // When we are in sync and not yet reached the TARGET_BATCH_SIZE epochs lagging behind actual beacon chian state + debug!("Target batch size not reached yet, daemon is in sync"); } // Check if sync committee update is needed - //sync_committee_id - if latest_epoch_slot % constants::SLOTS_PER_SYNC_COMMITTEE == 0 {} - - //return; + if latest_verified_epoch_slot % constants::SLOTS_PER_SYNC_COMMITTEE == 0 {} // When we doing EpochBatchUpdate the slot is latest_batch_output // So for each batch update we takin into account effectiviely the latest slot from given batch //let db_client = db_client.clone(); - //evaluate_jobs_statuses(db_manager.clone()).await; - broadcast_onchain_ready_jobs(db_manager.clone(), bankai.clone()).await; + let _ = evaluate_jobs_statuses(db_manager.clone(), latest_verified_sync_committee_id).await; + let _ = broadcast_onchain_ready_jobs(db_manager.clone(), bankai.clone()).await; // We can do all circuit computations up to latest slot in advance, but the onchain broadcasts must be send in correct order // By correct order mean that within the same sync committe the epochs are not needed to be broadcasted in order // but the order of sync_commite_update->epoch_update must be correct, we firstly need to have correct sync committe veryfied // before we verify epoch "belonging" to this sync committee - if parsed_event.epoch_transition { //info!("Beacon Chain epoch transition detected. New epoch: {} | Starting processing epoch proving...", epoch_id); info!( @@ -396,31 +435,6 @@ async fn handle_beacon_chain_head_event( parsed_event.slot, sync_committee_id ); } - - // let job_id = Uuid::new_v4(); - // let job = Job { - // job_id: job_id.clone(),evaluate_jobs_statuses - // job_type: JobType::EpochBatchUpdate, - // job_status: JobStatus::Created, - // slot: parsed_event.slot, // It is the last slot for given batch - // }; - - // let db_client = db_client_for_listener.clone(); - // match create_job(db_client, job.clone()).await { - // // Insert new job record to DB - // Ok(()) => { - // // Handle success - // info!("Job created successfully with ID: {}", job_id); - // if tx_for_task.send(job).await.is_err() { - // error!("Failed to send job."); - // } - // // If starting committee update job, first ensule that the corresponding slot is registered in contract - // } - // Err(e) => { - // // Handle the error - // error!("Error creating job: {}", e); - // } - // } } } @@ -446,8 +460,8 @@ async fn run_batch_epoch_update_job( Ok(()) => { // Handle success info!( - "[EPOCH BATCH UPDATE] Job created successfully with ID: {}", - job_id + "[EPOCH BATCH UPDATE] Job created successfully with ID: {} Epochs range from {} to {}", + job_id, batch_range_begin_epoch, batch_range_end_epoch ); if tx.send(job).await.is_err() { return Err("Failed to send job".into()); @@ -498,37 +512,23 @@ async fn run_sync_committee_update_job( } } -// async fn evaluate_jobs_statuses( -// db_manager: Arc, -// last_verified_epoch: u64, -// ) -> Result<(), Box> { -// // The purpose of this function is to manage the sequential nature of onchain verification of epochs and sync committees -// // Firstly we get all jobs with status OFFCHAIN_COMPUTATION_FINISHED -// let jobs = db_manager -// .get_compute_finsihed_jobs_to_proccess_onchain_call(last_verified_epoch) -// .await?; - -// // Iterate through the jobs and process them -// for job in jobs { -// match job.job_type { -// JobType::EpochBatchUpdate => { -// let update = EpochUpdateBatch::from_json::( -// job.batch_range_begin_epoch.try_into().unwrap(), -// job.batch_range_end_epoch.try_into().unwrap(), -// )?; - -// println!( -// "Successfully submitted batch epoch update for job_uuid: {}", -// job.job_uuid -// ); -// } -// JobType::EpochUpdate => {} -// JobType::SyncCommitteeUpdate => {} -// } -// } +async fn evaluate_jobs_statuses( + db_manager: Arc, + latest_verified_sync_committee_id: u64, +) -> Result<(), Box> { + // The purpose of this function is to manage the sequential nature of onchain verification of epochs and sync committees + // Firstly we get all jobs with status OFFCHAIN_COMPUTATION_FINISHED + // We calculating the start and end epoch for provided last verified sync committe + // and setting READY_TO_BROADCAST status for epochs up to the last epoch belonging to provided latest_verified_sync_committee_id + let first_epoch = get_first_epoch_for_sync_committee(latest_verified_sync_committee_id); + let last_epoch = get_first_epoch_for_sync_committee(latest_verified_sync_committee_id); + + db_manager + .set_ready_to_broadcast_for_batch_epochs(first_epoch, last_epoch) // Set READY_TO_BROADCAST when OFFCHAIN_COMPUTATION_FINISHED + .await?; -// Ok(()) -// } + Ok(()) +} async fn broadcast_onchain_ready_jobs( db_manager: Arc, @@ -564,12 +564,21 @@ async fn broadcast_onchain_ready_jobs( job.job_uuid, txhash ); - let epoch_proof = bankai - .starknet_client - .get_epoch_proof(job.slot.try_into().unwrap(), &bankai.config); + db_manager + .update_job_status(job.job_uuid, JobStatus::Done) + .await?; + + // let epoch_proof = bankai + // .starknet_client + // .get_epoch_proof(job.slot.try_into().unwrap(), &bankai.config) + // .await + // .unwrap(); // db_manager - // .insert_verified_epochs_batch(job.slot / 0x2000, epoch_proof) + // .insert_verified_epoch( + // job.batch_range_end_epoch.try_into().unwrap(), + // epoch_proof, + // ) // .await?; } JobType::EpochUpdate => {} @@ -595,6 +604,10 @@ async fn broadcast_onchain_ready_jobs( db_manager.set_job_txhash(job.job_uuid, txhash).await?; + db_manager + .update_job_status(job.job_uuid, JobStatus::Done) + .await?; + // Insert data to DB after successful onchain sync committee verification //let sync_committee_hash = update.expected_circuit_outputs.committee_hash; let sync_committee_hash = match bankai @@ -602,7 +615,7 @@ async fn broadcast_onchain_ready_jobs( .get_committee_hash(job.slot.to_u64().unwrap(), &bankai.config) .await { - Ok((sync_committee_hash)) => sync_committee_hash, + Ok(sync_committee_hash) => sync_committee_hash, Err(e) => { // Handle the error return Err(e.into()); @@ -663,160 +676,6 @@ async fn process_job( bankai: Arc, ) -> Result<(), Box> { match job.job_type { - JobType::EpochUpdate => { - // Epoch job - info!( - "[EPOCH JOB] Started processing epoch job: {} for epoch {}", - job.job_id, job.slot - ); - - //update_job_status(&db_client, job.job_id, JobStatus::Created).await?; - - // 1) Fetch the latest on-chain verified epoch - // let latest_epoch_slot = bankai - // .starknet_client - // .get_latest_epoch_slot(&bankai.config) - // .await?; - - // info!( - // "[EPOCH JOB] Latest onchain verified epoch slot: {}", - // latest_epoch_slot - // ); - - //let latest_epoch_slot = ; - - // make sure next_epoch % 32 == 0 - let next_epoch = (u64::try_from(job.slot).unwrap() / constants::SLOTS_PER_EPOCH) - * constants::SLOTS_PER_EPOCH - + constants::SLOTS_PER_EPOCH; - info!( - "[EPOCH JOB] Fetching Inputs for next Epoch: {}...", - next_epoch - ); - - // 2) Fetch the proof - let proof = bankai.get_epoch_proof(next_epoch).await?; - info!( - "[EPOCH JOB] Fetched Inputs successfully for Epoch: {}", - next_epoch - ); - - db_manager - .update_job_status(job.job_id, JobStatus::ProgramInputsPrepared) - .await?; - - // 3) Generate PIE - info!( - "[EPOCH JOB] Starting Cairo execution and PIE generation for Epoch: {}...", - next_epoch - ); - - CairoRunner::generate_pie(&proof, &bankai.config).await?; - - info!( - "[EPOCH JOB] Pie generated successfully for Epoch: {}...", - next_epoch - ); - - db_manager - .update_job_status(job.job_id, JobStatus::PieGenerated) - .await?; - - // // 4) Submit offchain proof-generation job to Atlantic - // info!("[EPOCH JOB] Sending proof generation query to Atlantic..."); - - // let batch_id = bankai.atlantic_client.submit_batch(proof).await?; - - // info!( - // "[EPOCH JOB] Proof generation batch submitted to Atlantic. QueryID: {}", - // batch_id - // ); - - // update_job_status(&db_client, job.job_id, JobStatus::OffchainProofRequested).await?; - // set_atlantic_job_queryid( - // &db_client, - // job.job_id, - // batch_id.clone(), - // AtlanticJobType::ProofGeneration, - // ) - // .await?; - - // // Pool for Atlantic execution done - // bankai - // .atlantic_client - // .poll_batch_status_until_done(&batch_id, Duration::new(10, 0), usize::MAX) - // .await?; - - // info!( - // "[EPOCH JOB] Proof generation done by Atlantic. QueryID: {}", - // batch_id - // ); - - // let proof = bankai - // .atlantic_client - // .fetch_proof(batch_id.as_str()) - // .await?; - - // info!( - // "[EPOCH JOB] Proof retrieved from Atlantic. QueryID: {}", - // batch_id - // ); - - // update_job_status(&db_client, job.job_id, JobStatus::OffchainProofRetrieved).await?; - - // // 5) Submit wrapped proof request - // info!("[EPOCH JOB] Sending proof wrapping query to Atlantic.."); - // let wrapping_batch_id = bankai.atlantic_client.submit_wrapped_proof(proof).await?; - // info!( - // "[EPOCH JOB] Proof wrapping query submitted to Atlantic. Wrapping QueryID: {}", - // wrapping_batch_id - // ); - - // update_job_status(&db_client, job.job_id, JobStatus::WrapProofRequested).await?; - // set_atlantic_job_queryid( - // &db_client, - // job.job_id, - // wrapping_batch_id.clone(), - // AtlanticJobType::ProofWrapping, - // ) - // .await?; - - // // Pool for Atlantic execution done - // bankai - // .atlantic_client - // .poll_batch_status_until_done(&wrapping_batch_id, Duration::new(10, 0), usize::MAX) - // .await?; - - // update_job_status(&db_client, job.job_id, JobStatus::WrappedProofDone).await?; - - // info!("[EPOCH JOB] Proof wrapping done by Atlantic. Fact registered on Integrity. Wrapping QueryID: {}", wrapping_batch_id); - - // update_job_status(&db_client, job.job_id, JobStatus::VerifiedFactRegistered).await?; - - // // 6) Submit epoch update onchain - // info!("[EPOCH JOB] Calling epoch update onchain..."); - // let update = EpochUpdate::from_json::(next_epoch)?; - - // let txhash = bankai - // .starknet_client - // .submit_update(update.expected_circuit_outputs, &bankai.config) - // .await?; - - // set_job_txhash(&db_client, job.job_id, txhash).await?; - - // info!("[EPOCH JOB] Successfully submitted epoch update..."); - - // update_job_status(&db_client, job.job_id, JobStatus::ProofDecommitmentCalled).await?; - - // Now we can get proof from contract? - // bankai.starknet_client.get_epoch_proof( - // &self, - // slot: u64, - // config: &BankaiConfig) - - // Insert data to DB after successful onchain epoch verification - // insert_verified_epoch(&db_client, job.slot / 0x2000, epoch_proof).await?; - } JobType::SyncCommitteeUpdate => { // Sync committee job info!( @@ -947,7 +806,7 @@ async fn process_job( info!("[SYNC COMMITTEE JOB] Proof wrapping done by Atlantic. Fact registered on Integrity. Wrapping QueryID: {}", wrapping_batch_id); db_manager - .update_job_status(job.job_id, JobStatus::VerifiedFactRegistered) + .update_job_status(job.job_id, JobStatus::OffchainComputationFinished) .await?; } JobType::EpochBatchUpdate => { @@ -1053,6 +912,160 @@ async fn process_job( .update_job_status(job.job_id, JobStatus::OffchainComputationFinished) .await?; } + JobType::EpochUpdate => { + // Epoch job + info!( + "[EPOCH JOB] Started processing epoch job: {} for epoch {}", + job.job_id, job.slot + ); + + //update_job_status(&db_client, job.job_id, JobStatus::Created).await?; + + // 1) Fetch the latest on-chain verified epoch + // let latest_epoch_slot = bankai + // .starknet_client + // .get_latest_epoch_slot(&bankai.config) + // .await?; + + // info!( + // "[EPOCH JOB] Latest onchain verified epoch slot: {}", + // latest_epoch_slot + // ); + + //let latest_epoch_slot = ; + + // make sure next_epoch % 32 == 0 + // let next_epoch = (u64::try_from(job.slot).unwrap() / constants::SLOTS_PER_EPOCH) + // * constants::SLOTS_PER_EPOCH + // + constants::SLOTS_PER_EPOCH; + // info!( + // "[EPOCH JOB] Fetching Inputs for next Epoch: {}...", + // next_epoch + // ); + + // // 2) Fetch the proof + // let proof = bankai.get_epoch_proof(next_epoch).await?; + // info!( + // "[EPOCH JOB] Fetched Inputs successfully for Epoch: {}", + // next_epoch + // ); + + // db_manager + // .update_job_status(job.job_id, JobStatus::ProgramInputsPrepared) + // .await?; + + // // 3) Generate PIE + // info!( + // "[EPOCH JOB] Starting Cairo execution and PIE generation for Epoch: {}...", + // next_epoch + // ); + + // CairoRunner::generate_pie(&proof, &bankai.config).await?; + + // info!( + // "[EPOCH JOB] Pie generated successfully for Epoch: {}...", + // next_epoch + // ); + + // db_manager + // .update_job_status(job.job_id, JobStatus::PieGenerated) + // .await?; + + // // 4) Submit offchain proof-generation job to Atlantic + // info!("[EPOCH JOB] Sending proof generation query to Atlantic..."); + + // let batch_id = bankai.atlantic_client.submit_batch(proof).await?; + + // info!( + // "[EPOCH JOB] Proof generation batch submitted to Atlantic. QueryID: {}", + // batch_id + // ); + + // update_job_status(&db_client, job.job_id, JobStatus::OffchainProofRequested).await?; + // set_atlantic_job_queryid( + // &db_client, + // job.job_id, + // batch_id.clone(), + // AtlanticJobType::ProofGeneration, + // ) + // .await?; + + // // Pool for Atlantic execution done + // bankai + // .atlantic_client + // .poll_batch_status_until_done(&batch_id, Duration::new(10, 0), usize::MAX) + // .await?; + + // info!( + // "[EPOCH JOB] Proof generation done by Atlantic. QueryID: {}", + // batch_id + // ); + + // let proof = bankai + // .atlantic_client + // .fetch_proof(batch_id.as_str()) + // .await?; + + // info!( + // "[EPOCH JOB] Proof retrieved from Atlantic. QueryID: {}", + // batch_id + // ); + + // update_job_status(&db_client, job.job_id, JobStatus::OffchainProofRetrieved).await?; + + // // 5) Submit wrapped proof request + // info!("[EPOCH JOB] Sending proof wrapping query to Atlantic.."); + // let wrapping_batch_id = bankai.atlantic_client.submit_wrapped_proof(proof).await?; + // info!( + // "[EPOCH JOB] Proof wrapping query submitted to Atlantic. Wrapping QueryID: {}", + // wrapping_batch_id + // ); + + // update_job_status(&db_client, job.job_id, JobStatus::WrapProofRequested).await?; + // set_atlantic_job_queryid( + // &db_client, + // job.job_id, + // wrapping_batch_id.clone(), + // AtlanticJobType::ProofWrapping, + // ) + // .await?; + + // // Pool for Atlantic execution done + // bankai + // .atlantic_client + // .poll_batch_status_until_done(&wrapping_batch_id, Duration::new(10, 0), usize::MAX) + // .await?; + + // update_job_status(&db_client, job.job_id, JobStatus::WrappedProofDone).await?; + + // info!("[EPOCH JOB] Proof wrapping done by Atlantic. Fact registered on Integrity. Wrapping QueryID: {}", wrapping_batch_id); + + // update_job_status(&db_client, job.job_id, JobStatus::VerifiedFactRegistered).await?; + + // // 6) Submit epoch update onchain + // info!("[EPOCH JOB] Calling epoch update onchain..."); + // let update = EpochUpdate::from_json::(next_epoch)?; + + // let txhash = bankai + // .starknet_client + // .submit_update(update.expected_circuit_outputs, &bankai.config) + // .await?; + + // set_job_txhash(&db_client, job.job_id, txhash).await?; + + // info!("[EPOCH JOB] Successfully submitted epoch update..."); + + // update_job_status(&db_client, job.job_id, JobStatus::ProofDecommitmentCalled).await?; + + // Now we can get proof from contract? + // bankai.starknet_client.get_epoch_proof( + // &self, + // slot: u64, + // config: &BankaiConfig) + + // Insert data to DB after successful onchain epoch verification + // insert_verified_epoch(&db_client, job.slot / 0x2000, epoch_proof).await?; + } } Ok(()) diff --git a/client-rs/src/epoch_batch.rs b/client-rs/src/epoch_batch.rs index 93bac16..2aa3dd6 100644 --- a/client-rs/src/epoch_batch.rs +++ b/client-rs/src/epoch_batch.rs @@ -1,6 +1,9 @@ use crate::constants::{SLOTS_PER_EPOCH, TARGET_BATCH_SIZE}; use crate::epoch_update::{EpochUpdate, ExpectedEpochUpdateOutputs}; -use crate::helpers::{calculate_slots_range_for_batch, get_first_slot_for_epoch, slot_to_epoch_id}; +use crate::helpers::{ + calculate_slots_range_for_batch, get_first_slot_for_epoch, get_sync_committee_id_by_epoch, + slot_to_epoch_id, +}; use crate::traits::{Provable, Submittable}; use crate::utils::hashing::get_committee_hash; @@ -112,85 +115,85 @@ impl EpochUpdateBatch { Ok(batch) } - pub(crate) async fn new_by_slot( - bankai: &BankaiClient, - db_manager: Arc, - slot: u64, - ) -> Result { - let _permit = bankai - .config - .epoch_data_fetching_semaphore - .clone() - .acquire_owned() - .await - .map_err(|e| Error::CairoRunError(format!("Semaphore error: {}", e)))?; - - let (start_slot, end_slot) = calculate_slots_range_for_batch(slot); - let mut epochs = vec![]; - - // Fetch epochs sequentially from start_slot to end_slot, incrementing by 32 each time - let mut current_slot = start_slot; - while current_slot < end_slot { - info!( - "Getting data for slot: {} Epoch: {} Epochs batch position {}/{}", - current_slot, - slot_to_epoch_id(current_slot), - epochs.len(), - TARGET_BATCH_SIZE - ); - let epoch_update = EpochUpdate::new(&bankai.client, current_slot).await?; - - epochs.push(epoch_update); - current_slot += 32; - } - - let circuit_inputs = EpochUpdateBatchInputs { - committee_hash: get_committee_hash(epochs[0].circuit_inputs.aggregate_pub.0), - epochs, - }; - - let expected_circuit_outputs = ExpectedEpochBatchOutputs::from_inputs(&circuit_inputs); - - let epoch_hashes = circuit_inputs - .epochs - .iter() - .map(|epoch| epoch.expected_circuit_outputs.hash()) - .collect::>(); - - let (root, paths) = compute_paths(epoch_hashes.clone()); - - // Verify each path matches the root - current_slot = start_slot; - for (index, path) in paths.iter().enumerate() { - let computed_root = hash_path(epoch_hashes[index], path, index); - if computed_root != root { - panic!("Path {} does not match root", index); - } - // Insert merkle paths to database - let current_epoch = slot_to_epoch_id(current_slot); - for (path_index, current_path) in path.iter().enumerate() { - db_manager - .insert_merkle_path_for_epoch( - current_epoch.to_i32().unwrap(), - path_index.to_i32().unwrap(), - current_path.to_hex_string(), - ) - .await - .map_err(|e| Error::DatabaseError(e.to_string()))?; - } - current_slot += 32; - } - - info!("Paths {:?}", paths); - - let batch = EpochUpdateBatch { - circuit_inputs, - expected_circuit_outputs, - merkle_paths: paths, - }; - - Ok(batch) - } + // pub(crate) async fn new_by_slot( + // bankai: &BankaiClient, + // db_manager: Arc, + // slot: u64, + // ) -> Result { + // let _permit = bankai + // .config + // .epoch_data_fetching_semaphore + // .clone() + // .acquire_owned() + // .await + // .map_err(|e| Error::CairoRunError(format!("Semaphore error: {}", e)))?; + + // let (start_slot, end_slot) = calculate_slots_range_for_batch(slot); + // let mut epochs = vec![]; + + // // Fetch epochs sequentially from start_slot to end_slot, incrementing by 32 each time + // let mut current_slot = start_slot; + // while current_slot < end_slot { + // info!( + // "Getting data for slot: {} Epoch: {} Epochs batch position {}/{}", + // current_slot, + // slot_to_epoch_id(current_slot), + // epochs.len(), + // TARGET_BATCH_SIZE + // ); + // let epoch_update = EpochUpdate::new(&bankai.client, current_slot).await?; + + // epochs.push(epoch_update); + // current_slot += 32; + // } + + // let circuit_inputs = EpochUpdateBatchInputs { + // committee_hash: get_committee_hash(epochs[0].circuit_inputs.aggregate_pub.0), + // epochs, + // }; + + // let expected_circuit_outputs = ExpectedEpochBatchOutputs::from_inputs(&circuit_inputs); + + // let epoch_hashes = circuit_inputs + // .epochs + // .iter() + // .map(|epoch| epoch.expected_circuit_outputs.hash()) + // .collect::>(); + + // let (root, paths) = compute_paths(epoch_hashes.clone()); + + // // Verify each path matches the root + // current_slot = start_slot; + // for (index, path) in paths.iter().enumerate() { + // let computed_root = hash_path(epoch_hashes[index], path, index); + // if computed_root != root { + // panic!("Path {} does not match root", index); + // } + // // Insert merkle paths to database + // let current_epoch = slot_to_epoch_id(current_slot); + // for (path_index, current_path) in path.iter().enumerate() { + // db_manager + // .insert_merkle_path_for_epoch( + // current_epoch, + // path_index.to_u64().unwrap(), + // current_path.to_hex_string(), + // ) + // .await + // .map_err(|e| Error::DatabaseError(e.to_string()))?; + // } + // current_slot += 32; + // } + + // info!("Paths {:?}", paths); + + // let batch = EpochUpdateBatch { + // circuit_inputs, + // expected_circuit_outputs, + // merkle_paths: paths, + // }; + + // Ok(batch) + // } pub(crate) async fn new_by_epoch_range( bankai: &BankaiClient, @@ -209,14 +212,16 @@ impl EpochUpdateBatch { let mut epochs = vec![]; // Fetch epochs sequentially from start_slot to end_slot, incrementing by 32 each time + let calculated_batch_size = end_epoch - start_epoch; let mut current_epoch = start_epoch; - while current_epoch < end_epoch { + while current_epoch <= end_epoch { info!( - "Getting data for Epoch: {} First slot for this epoch: {} | Epochs batch position {}/{}", + "Getting data for Epoch: {} (SyncCommittee: {}) First slot for this epoch: {} | Epochs batch position {}/{}", current_epoch, + get_sync_committee_id_by_epoch(current_epoch), get_first_slot_for_epoch(current_epoch), - epochs.len(), - TARGET_BATCH_SIZE + epochs.len()+1, + calculated_batch_size ); let epoch_update = EpochUpdate::new(&bankai.client, get_first_slot_for_epoch(current_epoch)).await?; @@ -252,8 +257,8 @@ impl EpochUpdateBatch { for (path_index, current_path) in path.iter().enumerate() { db_manager .insert_merkle_path_for_epoch( - current_epoch.to_i32().unwrap(), - path_index.to_i32().unwrap(), + current_epoch, + path_index.to_u64().unwrap(), current_path.to_hex_string(), ) .await diff --git a/client-rs/src/epoch_update.rs b/client-rs/src/epoch_update.rs index cbed8c0..c16daeb 100644 --- a/client-rs/src/epoch_update.rs +++ b/client-rs/src/epoch_update.rs @@ -19,15 +19,6 @@ use tracing::info; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; -#[derive(Debug, Serialize, Deserialize)] -pub struct EpochProof { - pub header_root: FixedBytes<32>, - pub state_root: FixedBytes<32>, - pub n_signers: u64, - pub execution_hash: FixedBytes<32>, - pub execution_height: u64, -} - #[derive(Debug, Serialize, Deserialize)] pub struct EpochUpdate { pub circuit_inputs: EpochCircuitInputs, diff --git a/client-rs/src/helpers.rs b/client-rs/src/helpers.rs index 3c8347a..6483a51 100644 --- a/client-rs/src/helpers.rs +++ b/client-rs/src/helpers.rs @@ -4,6 +4,8 @@ use crate::{ }, Error, }; +use alloy_primitives::FixedBytes; +use starknet::core::types::Felt; use tracing::info; pub fn slot_to_epoch_id(slot: u64) -> u64 { @@ -62,3 +64,18 @@ pub fn get_first_slot_for_epoch(slot: u64) -> u64 { pub fn get_last_slot_for_epoch(slot: u64) -> u64 { (slot + 1) * SLOTS_PER_EPOCH - 1 } + +pub fn get_sync_committee_id_by_epoch(epoch: u64) -> u64 { + epoch / EPOCHS_PER_SYNC_COMMITTEE +} + +// Since beacon chain RPCs have different response structure (quicknode responds different than nidereal) we use this event extraction logic +pub fn extract_json_from_event(event_text: &str) -> Option { + for line in event_text.lines() { + if line.starts_with("data:") { + // Extract the JSON after "data:" + return Some(line.trim_start_matches("data:").trim().to_string()); + } + } + None +} diff --git a/client-rs/src/main.rs b/client-rs/src/main.rs index 3ef3ff4..4565e7e 100644 --- a/client-rs/src/main.rs +++ b/client-rs/src/main.rs @@ -158,6 +158,10 @@ enum Commands { #[arg(long, short)] batch_id: String, }, + GetEpochProof { + #[arg(long, short)] + epoch_id: u64, + }, VerifyEpoch { #[arg(long, short)] batch_id: String, @@ -392,6 +396,14 @@ async fn main() -> Result<(), Error> { println!("Batch not completed yet. Status: {}", status); } } + Commands::GetEpochProof { epoch_id } => { + let epoch_proof = bankai + .starknet_client + .get_epoch_proof(epoch_id, &bankai.config) + .await?; + + println!("Retrieved epoch proof from contract: {:?}", epoch_proof); + } } Ok(()) diff --git a/client-rs/src/routes/mod.rs b/client-rs/src/routes/mod.rs index 99a4380..581d7b5 100644 --- a/client-rs/src/routes/mod.rs +++ b/client-rs/src/routes/mod.rs @@ -7,6 +7,7 @@ use axum::{ use num_traits::cast::ToPrimitive; use serde_json::{json, Value}; use tracing::error; +use uuid::Uuid; // RPC requests handling functions // @@ -49,45 +50,55 @@ pub async fn handle_get_epoch_update( } } -// async fn handle_get_epoch_proof( -// Path(slot): Path, -// State(state): State, -// ) -> impl IntoResponse { -// match state.bankai.starknet_client.get_epoch_proof(slot).await { -// Ok(epoch_update) => { -// // Convert `EpochUpdate` to `serde_json::Value` -// let value = serde_json::to_value(epoch_update).unwrap_or_else(|err| { -// eprintln!("Failed to serialize EpochUpdate: {:?}", err); -// json!({ "error": "Internal server error" }) -// }); -// Json(value) -// } -// Err(err) => { -// eprintln!("Failed to fetch proof: {:?}", err); -// Json(json!({ "error": "Failed to fetch proof" })) -// } -// } -// } +pub async fn handle_get_epoch_proof( + Path(slot): Path, + State(state): State, +) -> impl IntoResponse { + match state + .bankai + .starknet_client + .get_epoch_proof(slot, &state.bankai.config) + .await + { + Ok(epoch_update) => { + // Convert `EpochUpdate` to `serde_json::Value` + let value = serde_json::to_value(epoch_update).unwrap_or_else(|err| { + eprintln!("Failed to serialize EpochUpdate: {:?}", err); + json!({ "error": "Internal server error" }) + }); + Json(value) + } + Err(err) => { + eprintln!("Failed to fetch proof: {:?}", err); + Json(json!({ "error": "Failed to fetch proof" })) + } + } +} -// async fn handle_get_committee_hash( -// Path(committee_id): Path, -// State(state): State, -// ) -> impl IntoResponse { -// match state.bankai.starknet_client.get_committee_hash(committee_id).await { -// Ok(committee_hash) => { -// // Convert `EpochUpdate` to `serde_json::Value` -// let value = serde_json::to_value(committee_hash).unwrap_or_else(|err| { -// eprintln!("Failed to serialize EpochUpdate: {:?}", err); -// json!({ "error": "Internal server error" }) -// }); -// Json(value) -// } -// Err(err) => { -// eprintln!("Failed to fetch proof: {:?}", err); -// Json(json!({ "error": "Failed to fetch proof" })) -// } -// } -// } +pub async fn handle_get_committee_hash( + Path(committee_id): Path, + State(state): State, +) -> impl IntoResponse { + match state + .bankai + .starknet_client + .get_committee_hash(committee_id, &state.bankai.config) + .await + { + Ok(committee_hash) => { + // Convert `EpochUpdate` to `serde_json::Value` + let value = serde_json::to_value(committee_hash).unwrap_or_else(|err| { + eprintln!("Failed to serialize EpochUpdate: {:?}", err); + json!({ "error": "Internal server error" }) + }); + Json(value) + } + Err(err) => { + eprintln!("Failed to fetch proof: {:?}", err); + Json(json!({ "error": "Failed to fetch proof" })) + } + } +} pub async fn handle_get_latest_verified_slot(State(state): State) -> impl IntoResponse { match state @@ -114,18 +125,50 @@ pub async fn handle_get_latest_verified_slot(State(state): State) -> i } } -// async fn handle_get_job_status( -// Path(job_id): Path, -// State(state): State, -// ) -> impl IntoResponse { -// match fetch_job_status(&state.db_client, job_id).await { -// Ok(job_status) => Json(job_status), -// Err(err) => { -// eprintln!("Failed to fetch job status: {:?}", err); -// Json(json!({ "error": "Failed to fetch job status" })) -// } -// } -// } +pub async fn handle_get_latest_verified_committee( + State(state): State, +) -> impl IntoResponse { + match state + .bankai + .starknet_client + .get_latest_committee_id(&state.bankai.config) + .await + { + Ok(latest_epoch) => { + // Convert `Felt` to a string and parse it as a hexadecimal number + let hex_string = latest_epoch.to_string(); // Ensure this converts to a "0x..." string + match u64::from_str_radix(hex_string.trim_start_matches("0x"), 16) { + Ok(decimal_epoch) => Json(json!({ "latest_verified_epoch": decimal_epoch })), + Err(err) => { + eprintln!("Failed to parse latest_epoch as decimal: {:?}", err); + Json(json!({ "error": "Invalid epoch format" })) + } + } + } + Err(err) => { + eprintln!("Failed to fetch latest epoch: {:?}", err); + Json(json!({ "error": "Failed to fetch latest epoch" })) + } + } +} + +pub async fn handle_get_job_status( + Path(job_id): Path, + State(state): State, +) -> impl IntoResponse { + match state + .db_manager + .fetch_job_status(Uuid::parse_str(job_id.to_string().as_str()).unwrap()) + .await + { + Ok(Some(job_status)) => Json(json!({ "status": job_status.to_string()})), + Ok(None) => Json(json!({ "error": "Job not found" })), + Err(err) => { + eprintln!("Failed to fetch job status: {:?}", err); + Json(json!({ "error": "Failed to fetch job status" })) + } + } +} pub async fn handle_get_merkle_paths_for_epoch( Path(epoch_id): Path, diff --git a/client-rs/src/state.rs b/client-rs/src/state.rs index 47e3003..84fad15 100644 --- a/client-rs/src/state.rs +++ b/client-rs/src/state.rs @@ -50,8 +50,8 @@ pub enum JobStatus { ReadyToBroadcastOnchain, #[postgres(name = "PROOF_VERIFY_CALLED_ONCHAIN")] ProofVerifyCalledOnchain, - #[postgres(name = "VERIFIED_FACT_REGISTERED")] - VerifiedFactRegistered, + #[postgres(name = "DONE")] + Done, #[postgres(name = "ERROR")] Error, #[postgres(name = "CANCELLED")] @@ -71,7 +71,7 @@ impl ToString for JobStatus { JobStatus::OffchainComputationFinished => "OFFCHAIN_COMPUTATION_FINISHED".to_string(), JobStatus::ReadyToBroadcastOnchain => "READY_TO_BROADCAST_ONCHAIN".to_string(), JobStatus::ProofVerifyCalledOnchain => "PROOF_VERIFY_CALLED_ONCHAIN".to_string(), - JobStatus::VerifiedFactRegistered => "VERIFIED_FACT_REGISTERED".to_string(), + JobStatus::Done => "DONE".to_string(), JobStatus::Cancelled => "CANCELLED".to_string(), JobStatus::Error => "ERROR".to_string(), } @@ -93,7 +93,7 @@ impl FromStr for JobStatus { "OFFCHAIN_COMPUTATION_FINISHED" => Ok(JobStatus::OffchainComputationFinished), "READY_TO_BROADCAST_ONCHAIN" => Ok(JobStatus::ReadyToBroadcastOnchain), "PROOF_VERIFY_CALLED_ONCHAIN" => Ok(JobStatus::ProofVerifyCalledOnchain), - "VERIFIED_FACT_REGISTERED" => Ok(JobStatus::VerifiedFactRegistered), + "DONE" => Ok(JobStatus::Done), "CANCELLED" => Ok(JobStatus::Cancelled), "ERROR" => Ok(JobStatus::Error), _ => Err(format!("Invalid job status: {}", s)), diff --git a/client-rs/src/utils/database_manager.rs b/client-rs/src/utils/database_manager.rs index ce0be18..73a5999 100644 --- a/client-rs/src/utils/database_manager.rs +++ b/client-rs/src/utils/database_manager.rs @@ -1,5 +1,5 @@ -use crate::epoch_update::EpochProof; use crate::state::{AtlanticJobType, Error, Job, JobStatus, JobType}; +use crate::utils::starknet_client::EpochProof; use alloy_primitives::FixedBytes; use starknet::core::types::Felt; use std::str::FromStr; @@ -201,7 +201,7 @@ impl DatabaseManager { .client .query_opt( "SELECT batch_range_end_epoch FROM jobs - WHERE job_status IN ($1, $2) + WHERE job_status IN ($1, $2) AND batch_range_end_epoch != NULL ORDER BY batch_range_end_epoch DESC LIMIT 1", &[&"CREATED", &"PIE_GENERATED"], @@ -262,35 +262,35 @@ impl DatabaseManager { Ok(paths) } - pub async fn get_compute_finsihed_jobs_to_proccess_onchain_call( - &self, - last_epoch: JobStatus, - ) -> Result, Box> { - let rows = self - .client - .query( - "SELECT * FROM jobs - WHERE job_status = 'OFFCHAIN_COMPUTATION_FINISHED' AND job_type = 'EPOCH_BATCH_UPDATE' AND batch_range_end_epoch <= $1", - &[&last_epoch], - ) - .await?; + // pub async fn get_compute_finsihed_jobs_to_proccess_onchain_call( + // &self, + // last_epoch: JobStatus, + // ) -> Result, Box> { + // let rows = self + // .client + // .query( + // "SELECT * FROM jobs + // WHERE job_status = 'OFFCHAIN_COMPUTATION_FINISHED' AND job_type = 'EPOCH_BATCH_UPDATE' AND batch_range_end_epoch <= $1", + // &[&last_epoch], + // ) + // .await?; - // Map rows into Job structs - let jobs: Vec = rows - .into_iter() - .map(|row: Row| JobSchema { - job_uuid: row.get("job_uuid"), - job_status: row.get("job_status"), - slot: row.get("slot"), - batch_range_begin_epoch: row.get("batch_range_begin_epoch"), - batch_range_end_epoch: row.get("batch_range_end_epoch"), - job_type: row.get("type"), - updated_at: row.get("updated_at"), - }) - .collect(); + // // Map rows into Job structs + // let jobs: Vec = rows + // .into_iter() + // .map(|row: Row| JobSchema { + // job_uuid: row.get("job_uuid"), + // job_status: row.get("job_status"), + // slot: row.get("slot"), + // batch_range_begin_epoch: row.get("batch_range_begin_epoch"), + // batch_range_end_epoch: row.get("batch_range_end_epoch"), + // job_type: row.get("type"), + // updated_at: row.get("updated_at"), + // }) + // .collect(); - Ok(jobs) - } + // Ok(jobs) + // } pub async fn get_jobs_with_status( &self, @@ -349,42 +349,58 @@ impl DatabaseManager { Ok(()) } - pub async fn set_job_txhash( + pub async fn set_ready_to_broadcast_for_batch_epochs( &self, - job_id: Uuid, - txhash: Felt, + first_epoch: u64, + last_epoch: u64, ) -> Result<(), Box> { self.client .execute( - "UPDATE jobs SET tx_hash = $1, updated_at = NOW() WHERE job_uuid = $2", - &[&txhash.to_string(), &job_id], + "UPDATE jobs + SET job_status = 'READY_TO_BROADCAST', updated_at = NOW() + WHERE batch_range_begin_epoch >= $1 AND batch_range_end_epoch << $2 AND job_type = 'EPOCH_UPDATE_BATCH'", + &[&first_epoch.to_string(), &last_epoch.to_string()], ) .await?; Ok(()) } - pub async fn cancell_all_unfinished_jobs( + pub async fn set_job_txhash( &self, + job_id: Uuid, + txhash: Felt, ) -> Result<(), Box> { self.client .execute( - "UPDATE jobs SET status = $1, updated_at = NOW() WHERE status = 'FETCHING'", - &[&JobStatus::Cancelled.to_string()], + "UPDATE jobs SET tx_hash = $1, updated_at = NOW() WHERE job_uuid = $2", + &[&txhash.to_string(), &job_id], ) .await?; Ok(()) } + // pub async fn cancell_all_unfinished_jobs( + // &self, + // ) -> Result<(), Box> { + // self.client + // .execute( + // "UPDATE jobs SET status = $1, updated_at = NOW() WHERE status = 'FETCHING'", + // &[&JobStatus::Cancelled.to_string()], + // ) + // .await?; + // Ok(()) + // } + pub async fn insert_merkle_path_for_epoch( &self, - epoch: i32, - path_index: i32, + epoch: u64, + path_index: u64, path: String, ) -> Result<(), Box> { self.client .execute( "INSERT INTO epoch_merkle_paths (epoch_id, path_index, merkle_path) VALUES ($1, $2, $3)", - &[&epoch, &path_index, &path], + &[&epoch.to_i64(), &path_index.to_i64(), &path], ) .await?; Ok(()) diff --git a/client-rs/src/utils/starknet_client.rs b/client-rs/src/utils/starknet_client.rs index 79085b6..ed5d435 100644 --- a/client-rs/src/utils/starknet_client.rs +++ b/client-rs/src/utils/starknet_client.rs @@ -1,3 +1,5 @@ +use alloy_primitives::FixedBytes; +use serde::{Deserialize, Serialize}; use starknet::accounts::{Account, ConnectedAccount}; use starknet::core::types::{Call, FunctionCall}; use starknet::macros::selector; @@ -22,6 +24,48 @@ use crate::contract_init::ContractInitializationData; use crate::traits::Submittable; use crate::BankaiConfig; +#[derive(Debug, Serialize, Deserialize)] +pub struct EpochProof { + pub header_root: FixedBytes<32>, + pub state_root: FixedBytes<32>, + pub n_signers: u64, + pub execution_hash: FixedBytes<32>, + pub execution_height: u64, +} + +impl EpochProof { + pub fn from_contract_return_value(calldata: Vec) -> Result { + if calldata.len() != 8 { + return Err("Invalid return value length. Expected 8 elements.".to_string()); + } + + let header_root = combine_to_fixed_bytes(calldata[0], calldata[1])?; + let state_root = combine_to_fixed_bytes(calldata[2], calldata[3])?; + let n_signers = calldata[4].try_into().unwrap(); + let execution_hash = combine_to_fixed_bytes(calldata[5], calldata[6])?; + let execution_height = calldata[7].try_into().unwrap(); + + Ok(EpochProof { + header_root, + state_root, + n_signers, + execution_hash, + execution_height, + }) + } +} + +fn combine_to_fixed_bytes(high: Felt, low: Felt) -> Result, String> { + let mut bytes = [0u8; 32]; + let high_bytes = high.to_bytes_le(); + let low_bytes = low.to_bytes_le(); + + bytes[0..16].copy_from_slice(&high_bytes); + bytes[16..32].copy_from_slice(&low_bytes); + + Ok(FixedBytes::from_slice(bytes.as_slice())) +} + #[derive(Debug)] pub struct StarknetClient { account: Arc, LocalWallet>>, @@ -141,15 +185,15 @@ impl StarknetClient { ) .await .map_err(StarknetError::ProviderError)?; - //println!("committee_hash: {:?}", committee_hash); - Ok((committee_hash)) + println!("committee_hash: {:?}", committee_hash); + Ok(committee_hash) } pub async fn get_epoch_proof( &self, slot: u64, config: &BankaiConfig, - ) -> Result<(), StarknetError> { + ) -> Result { let epoch_proof = self .account .provider() @@ -164,7 +208,7 @@ impl StarknetClient { .await .map_err(StarknetError::ProviderError)?; println!("epoch_proof: {:?}", epoch_proof); - Ok(()) + Ok(EpochProof::from_contract_return_value(epoch_proof).unwrap()) } pub async fn get_latest_epoch_slot( From 5c680def8aeded77593e5e47a5f2023981755168 Mon Sep 17 00:00:00 2001 From: lakewik Date: Wed, 22 Jan 2025 15:56:23 +0100 Subject: [PATCH 18/66] Fix queries --- client-rs/src/daemon.rs | 6 ++- client-rs/src/routes/mod.rs | 50 ++++++++++----------- client-rs/src/utils/database_manager.rs | 59 +++++++++++++++++++------ 3 files changed, 75 insertions(+), 40 deletions(-) diff --git a/client-rs/src/daemon.rs b/client-rs/src/daemon.rs index eafe2b6..21c0bd0 100644 --- a/client-rs/src/daemon.rs +++ b/client-rs/src/daemon.rs @@ -295,7 +295,11 @@ async fn handle_beacon_chain_head_event( .unwrap() .unwrap(); - let last_sync_committee_in_progress = 1; + let last_sync_committee_in_progress = db_manager + .get_latest_sync_committee_in_progress() + .await + .unwrap() + .unwrap(); let mut latest_scheduled_epoch = last_epoch_in_progress; diff --git a/client-rs/src/routes/mod.rs b/client-rs/src/routes/mod.rs index 581d7b5..c60f403 100644 --- a/client-rs/src/routes/mod.rs +++ b/client-rs/src/routes/mod.rs @@ -13,10 +13,10 @@ use uuid::Uuid; // Handler for GET /status pub async fn handle_get_status(State(state): State) -> impl IntoResponse { - let last_slot_in_progress = match state.db_manager.get_latest_slot_id_in_progress().await { - Ok(Some(slot)) => { - let last_slot_in_progress = slot.to_u64().unwrap(); - last_slot_in_progress + let last_epoch_in_progress = match state.db_manager.get_latest_epoch_in_progress().await { + Ok(Some(epoch)) => { + let last_epoch_in_progress = epoch.to_u64().unwrap(); + last_epoch_in_progress } Ok(None) => 0, Err(e) => 0, @@ -24,31 +24,31 @@ pub async fn handle_get_status(State(state): State) -> impl IntoRespon let in_progress_jobs_count = state.db_manager.count_jobs_in_progress().await.unwrap(); Json(json!({ "success": true, "details": { - "last_slot_in_progress": last_slot_in_progress, + "last_epoch_in_progress": last_epoch_in_progress, "jobs_in_progress_count": in_progress_jobs_count } })) } -// Handler for GET /epoch/:slot -pub async fn handle_get_epoch_update( - Path(slot): Path, - State(state): State, -) -> impl IntoResponse { - match state.bankai.get_epoch_proof(slot).await { - Ok(epoch_update) => { - // Convert the data to `serde_json::Value` - let value: Value = serde_json::to_value(epoch_update).unwrap_or_else(|err| { - eprintln!("Failed to serialize EpochUpdate: {:?}", err); - json!({ "error": "Internal server error" }) - }); - Json(value) - } - Err(err) => { - eprintln!("Failed to fetch proof: {:?}", err); - Json(json!({ "error": "Failed to fetch proof" })) - } - } -} +// // Handler for GET /epoch/:slot +// pub async fn handle_get_epoch_update( +// Path(slot): Path, +// State(state): State, +// ) -> impl IntoResponse { +// match state.bankai.get_epoch_proof(slot).await { +// Ok(epoch_update) => { +// // Convert the data to `serde_json::Value` +// let value: Value = serde_json::to_value(epoch_update).unwrap_or_else(|err| { +// eprintln!("Failed to serialize EpochUpdate: {:?}", err); +// json!({ "error": "Internal server error" }) +// }); +// Json(value) +// } +// Err(err) => { +// eprintln!("Failed to fetch proof: {:?}", err); +// Json(json!({ "error": "Failed to fetch proof" })) +// } +// } +// } pub async fn handle_get_epoch_proof( Path(slot): Path, diff --git a/client-rs/src/utils/database_manager.rs b/client-rs/src/utils/database_manager.rs index 73a5999..1dc84e5 100644 --- a/client-rs/src/utils/database_manager.rs +++ b/client-rs/src/utils/database_manager.rs @@ -1,3 +1,4 @@ +use crate::helpers; use crate::state::{AtlanticJobType, Error, Job, JobStatus, JobType}; use crate::utils::starknet_client::EpochProof; use alloy_primitives::FixedBytes; @@ -170,49 +171,77 @@ impl DatabaseManager { Ok(row_opt.map(|row| row.get("status"))) } - pub async fn get_latest_slot_id_in_progress( + // pub async fn get_latest_slot_id_in_progress( + // &self, + // ) -> Result, Box> { + // // Query the latest slot with job_status in ('in_progress', 'initialized') + // let row_opt = self + // .client + // .query_opt( + // "SELECT slot FROM jobs + // WHERE job_status NOT IN ('DONE', 'CANCELLED', 'ERROR') + // ORDER BY slot DESC + // LIMIT 1", + // &[], + // ) + // .await?; + + // // Extract and return the slot ID + // if let Some(row) = row_opt { + // Ok(Some(row.get::<_, i64>("slot").to_u64().unwrap())) + // } else { + // Ok(Some(0)) + // } + // } + + pub async fn get_latest_epoch_in_progress( &self, ) -> Result, Box> { // Query the latest slot with job_status in ('in_progress', 'initialized') let row_opt = self .client .query_opt( - "SELECT slot FROM jobs - WHERE job_status IN ($1, $2) - ORDER BY slot DESC + "SELECT batch_range_end_epoch FROM jobs + WHERE job_status NOT IN ('DONE', 'CANCELLED', 'ERROR') + AND batch_range_end_epoch != 0 + AND type = 'EPOCH_BATCH_UPDATE' + ORDER BY batch_range_end_epoch DESC LIMIT 1", - &[&"CREATED", &"PIE_GENERATED"], + &[], ) .await?; // Extract and return the slot ID if let Some(row) = row_opt { - Ok(Some(row.get::<_, i64>("slot").to_u64().unwrap())) + Ok(Some( + row.get::<_, i64>("batch_range_end_epoch").to_u64().unwrap(), + )) } else { Ok(Some(0)) } } - pub async fn get_latest_epoch_in_progress( + pub async fn get_latest_sync_committee_in_progress( &self, ) -> Result, Box> { // Query the latest slot with job_status in ('in_progress', 'initialized') let row_opt = self .client .query_opt( - "SELECT batch_range_end_epoch FROM jobs - WHERE job_status IN ($1, $2) AND batch_range_end_epoch != NULL - ORDER BY batch_range_end_epoch DESC + "SELECT slot FROM jobs + WHERE job_status NOT IN ('DONE', 'CANCELLED', 'ERROR') + AND type = 'SYNC_COMMITTEE_UPDATE' + ORDER BY slot DESC LIMIT 1", - &[&"CREATED", &"PIE_GENERATED"], + &[], ) .await?; // Extract and return the slot ID if let Some(row) = row_opt { - Ok(Some( + Ok(Some(helpers::slot_to_sync_committee_id( row.get::<_, i64>("batch_range_end_epoch").to_u64().unwrap(), - )) + ))) } else { Ok(Some(0)) } @@ -226,7 +255,9 @@ impl DatabaseManager { .client .query_opt( "SELECT COUNT(job_uuid) as count FROM jobs - WHERE job_status IN ('PIE_GENERATED', 'CREATED')", + WHERE job_status NOT IN ('DONE', 'CANCELLED', 'ERROR') + AND type = 'EPOCH_BATCH_UPDATE' + ", &[], ) .await?; From 4a4fbf4575f1f9070752b81fb44078d65423f4eb Mon Sep 17 00:00:00 2001 From: lakewik Date: Wed, 22 Jan 2025 16:04:48 +0100 Subject: [PATCH 19/66] Fix constants --- client-rs/src/config.rs | 7 +++++-- client-rs/src/constants.rs | 6 ++++-- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/client-rs/src/config.rs b/client-rs/src/config.rs index 99e692f..06690a3 100644 --- a/client-rs/src/config.rs +++ b/client-rs/src/config.rs @@ -1,3 +1,4 @@ +use crate::constants::{MAX_CONCURRENT_PIE_GENERATIONS, MAX_CONCURRENT_RPC_DATA_FETCH_JOBS}; use starknet::core::types::Felt; use std::sync::Arc; use tokio::sync::Semaphore; @@ -48,8 +49,10 @@ impl Default for BankaiConfig { committee_circuit_path: "../cairo/build/committee_update.json".to_string(), atlantic_endpoint: "https://atlantic.api.herodotus.cloud".to_string(), // Set how many concurrent pie generation (trace generation) tasks are allowed - pie_generation_semaphore: Arc::new(Semaphore::new(1)), // 3 at once - epoch_data_fetching_semaphore: Arc::new(Semaphore::new(2)), // 2 at once + pie_generation_semaphore: Arc::new(Semaphore::new(MAX_CONCURRENT_PIE_GENERATIONS)), // 3 at once + epoch_data_fetching_semaphore: Arc::new(Semaphore::new( + MAX_CONCURRENT_RPC_DATA_FETCH_JOBS, + )), // 2 at once } } } diff --git a/client-rs/src/constants.rs b/client-rs/src/constants.rs index ddb84c0..43b41fe 100644 --- a/client-rs/src/constants.rs +++ b/client-rs/src/constants.rs @@ -1,5 +1,7 @@ pub const SLOTS_PER_EPOCH: u64 = 32; // For mainnet pub const SLOTS_PER_SYNC_COMMITTEE: u64 = 8192; // For mainnet pub const TARGET_BATCH_SIZE: u64 = 32; // Defines how many epochs in one batch -pub const EPOCHS_PER_SYNC_COMMITTEE: u64 = 256; -pub const MAX_CONCURRENT_JOBS_IN_PROGRESS: u64 = 16; +pub const EPOCHS_PER_SYNC_COMMITTEE: u64 = 256; // For mainnet +pub const MAX_CONCURRENT_JOBS_IN_PROGRESS: u64 = 16; // Define the limit of how many jobs can be in state "in progress" concurrently +pub const MAX_CONCURRENT_PIE_GENERATIONS: usize = 3; // Define how many concurrent trace (pie file) generation jobs are allowed to not exhaust resources +pub const MAX_CONCURRENT_RPC_DATA_FETCH_JOBS: usize = 4; // Define how many data fetching jobs can be performed concurrently to not overload RPC From f096be3cd307d35f83d100c1c1a3f4cbd981baab Mon Sep 17 00:00:00 2001 From: lakewik Date: Fri, 24 Jan 2025 21:32:51 +0100 Subject: [PATCH 20/66] Improve batches handling --- client-rs/Cargo.toml | 2 +- client-rs/src/constants.rs | 2 +- client-rs/src/daemon.rs | 189 +++++++++++++++++------- client-rs/src/epoch_batch.rs | 4 + client-rs/src/helpers.rs | 4 +- client-rs/src/state.rs | 2 +- client-rs/src/utils/database_manager.rs | 15 +- 7 files changed, 150 insertions(+), 68 deletions(-) diff --git a/client-rs/Cargo.toml b/client-rs/Cargo.toml index eb5bec4..b60f52d 100644 --- a/client-rs/Cargo.toml +++ b/client-rs/Cargo.toml @@ -59,4 +59,4 @@ glob = "0.3.2" num-traits = "0.2.19" tower = "0.5.2" tower-http = { version = "0.6.2", features = ["trace"] } -chrono = "0.4.39" +chrono = { version = "0.4.39", features = ["serde"] } diff --git a/client-rs/src/constants.rs b/client-rs/src/constants.rs index 43b41fe..66514c2 100644 --- a/client-rs/src/constants.rs +++ b/client-rs/src/constants.rs @@ -2,6 +2,6 @@ pub const SLOTS_PER_EPOCH: u64 = 32; // For mainnet pub const SLOTS_PER_SYNC_COMMITTEE: u64 = 8192; // For mainnet pub const TARGET_BATCH_SIZE: u64 = 32; // Defines how many epochs in one batch pub const EPOCHS_PER_SYNC_COMMITTEE: u64 = 256; // For mainnet -pub const MAX_CONCURRENT_JOBS_IN_PROGRESS: u64 = 16; // Define the limit of how many jobs can be in state "in progress" concurrently +pub const MAX_CONCURRENT_JOBS_IN_PROGRESS: u64 = 4; // Define the limit of how many jobs can be in state "in progress" concurrently pub const MAX_CONCURRENT_PIE_GENERATIONS: usize = 3; // Define how many concurrent trace (pie file) generation jobs are allowed to not exhaust resources pub const MAX_CONCURRENT_RPC_DATA_FETCH_JOBS: usize = 4; // Define how many data fetching jobs can be performed concurrently to not overload RPC diff --git a/client-rs/src/daemon.rs b/client-rs/src/daemon.rs index 21c0bd0..bff50de 100644 --- a/client-rs/src/daemon.rs +++ b/client-rs/src/daemon.rs @@ -190,6 +190,9 @@ async fn main() -> Result<(), Box> { .await .unwrap(); + //enqueue_sync_committee_jobs(); + //enqueue_batch_epochs_jobs(); + if slot_listener_toggle { task::spawn({ async move { @@ -262,9 +265,30 @@ async fn handle_beacon_chain_head_event( db_manager: Arc, tx: mpsc::Sender, ) -> () { - let epoch_id = helpers::slot_to_epoch_id(parsed_event.slot); + let current_epoch_id = helpers::slot_to_epoch_id(parsed_event.slot); let sync_committee_id = helpers::slot_to_sync_committee_id(parsed_event.slot); + if parsed_event.epoch_transition { + //info!("Beacon Chain epoch transition detected. New epoch: {} | Starting processing epoch proving...", epoch_id); + info!( + "Beacon Chain epoch transition detected. New epoch: {}", + current_epoch_id + ); + + // Check also now if slot is the moment of switch to new sync committee set + if parsed_event.slot % constants::SLOTS_PER_SYNC_COMMITTEE == 0 { + info!( + "Beacon Chain sync committee rotation occured. Slot {} | Sync committee id: {}", + parsed_event.slot, sync_committee_id + ); + } + } + + // We can do all circuit computations up to latest slot in advance, but the onchain broadcasts must be send in correct order + // By correct order mean that within the same sync committe the epochs are not needed to be broadcasted in order + // but the order of sync_commite_update->epoch_update must be correct, we firstly need to have correct sync committe veryfied + // before we verify epoch "belonging" to this sync committee + let latest_verified_epoch_slot = bankai .starknet_client .get_latest_epoch_slot(&bankai.config) @@ -282,12 +306,24 @@ async fn handle_beacon_chain_head_event( .unwrap(); let latest_verified_epoch_id = helpers::slot_to_epoch_id(latest_verified_epoch_slot); - let epochs_behind = epoch_id - latest_verified_epoch_id; + let epochs_behind = current_epoch_id - latest_verified_epoch_id; + + let _ = evaluate_jobs_statuses(db_manager.clone(), latest_verified_sync_committee_id) + .await + .map_err(|e| { + error!("Error evaluating jobs statuses: {}", e); + }); + let _ = broadcast_onchain_ready_jobs(db_manager.clone(), bankai.clone()) + .await + .map_err(|e| { + error!("Error executing broadcast onchain ready jobs: {}", e); + }); // We getting the last slot in progress to determine next slots to prove //let mut last_slot_in_progress: u64 = 0; // /let mut last_epoch_in_progress: u64 = 0; // let mut last_sync_committee_in_progress: u64 = 0; + // let last_epoch_in_progress = db_manager .get_latest_epoch_in_progress() @@ -322,16 +358,16 @@ async fn handle_beacon_chain_head_event( warn!( "Bankai is out of sync now. Node is {} epochs behind network. Current Beacon Chain state: [Slot: {} Epoch: {} Sync Committee: {}] | Latest verified: [Slot: {} Epoch: {} Sync Committee: {}] | Latest in progress: [Epoch: {} Sync Committee: {}] | Sync in progress...", - epochs_behind, parsed_event.slot, epoch_id, sync_committee_id, latest_verified_epoch_slot, latest_verified_epoch_id, latest_verified_sync_committee_id, last_epoch_in_progress, last_sync_committee_in_progress + epochs_behind, parsed_event.slot, current_epoch_id, sync_committee_id, latest_verified_epoch_slot, latest_verified_epoch_id, latest_verified_sync_committee_id, last_epoch_in_progress, last_sync_committee_in_progress ); // Check if we have in progress all epochs that need to be processed, if no, run job - if latest_scheduled_epoch < (epoch_id - constants::TARGET_BATCH_SIZE) { + if latest_scheduled_epoch < (current_epoch_id - constants::TARGET_BATCH_SIZE) { // And chceck how many jobs are already in progress and if we fit in the limit let in_progress_jobs_count = db_manager.count_jobs_in_progress().await.unwrap(); if in_progress_jobs_count.unwrap() >= constants::MAX_CONCURRENT_JOBS_IN_PROGRESS { info!( - "Currently not starting new job, MAX_CONCURRENT_JOBS_IN_PROGRESS limit reached, jobs in progress: {}", + "Currently not starting new batch epoch job, MAX_CONCURRENT_JOBS_IN_PROGRESS limit reached, jobs in progress: {}", in_progress_jobs_count.unwrap() ); return; @@ -341,45 +377,60 @@ async fn handle_beacon_chain_head_event( helpers::get_sync_committee_id_by_epoch(latest_scheduled_epoch); info!( - "Currently processed sync committee epochs ranges from {} to {}", + "Currently processed sync committee epochs ranges from {} to {}. Next sync committee epochs ranges: {} to {}", helpers::get_first_epoch_for_sync_committee(currently_processed_sync_committee_id), - helpers::get_last_epoch_for_sync_committee(currently_processed_sync_committee_id) + helpers::get_last_epoch_for_sync_committee(currently_processed_sync_committee_id), + helpers::get_first_epoch_for_sync_committee(currently_processed_sync_committee_id + 1), + helpers::get_last_epoch_for_sync_committee(currently_processed_sync_committee_id + 1) ); if helpers::get_last_epoch_for_sync_committee(currently_processed_sync_committee_id) == latest_scheduled_epoch { // We reached end of current sync committee, need to schedule new sync committee proving - match run_sync_committee_update_job( - db_manager.clone(), - currently_processed_sync_committee_id + 1, - tx.clone(), - ) - .await - { - Ok(()) => {} - Err(e) => { - error!("Error while creating job: {}", e); - } - }; + // match run_sync_committee_update_job( + // db_manager.clone(), + // currently_processed_sync_committee_id + 1, + // tx.clone(), + // ) + // .await + // { + // Ok(()) => {} + // Err(e) => { + // error!("Error while creating sync committee update job: {}", e); + // } + // }; } let epoch_to_start_from = latest_scheduled_epoch + 1; - let mut epoch_to_end_on = latest_scheduled_epoch + 1 + constants::TARGET_BATCH_SIZE; + let mut epoch_to_end_on = latest_scheduled_epoch + 1 + constants::TARGET_BATCH_SIZE; // To create batch with size of constants::TARGET_BATCH_SIZE epochs - // Iw we follow the betch size of 32 always, this souldnt happen, but if we have not same size batches, it can be trigerred - if epoch_to_end_on + // Edge cases handling // + // Handle the edge case where there is only one epoch in batch left to proccess and this epoch is last epoch in sync committee, if we follow the betch size of 32 always, this souldnt happen: + if latest_scheduled_epoch + == helpers::get_last_epoch_for_sync_committee(currently_processed_sync_committee_id) + { + warn!("edge case: only one epoch left to proccess in batch in this sync committee"); + epoch_to_end_on = epoch_to_start_from; + } + // Same, if we follow the betch size of 32 always, this souldnt happen, but if we have not same size batches, it can be trigerred also: + else if epoch_to_end_on > helpers::get_last_epoch_for_sync_committee(currently_processed_sync_committee_id) { + warn!("edge case: batch end epoch {} overlaps with the next sync committee, truncating to the last epoch: {} of corresponding sync committee: {}", + epoch_to_end_on, helpers::get_last_epoch_for_sync_committee(currently_processed_sync_committee_id), currently_processed_sync_committee_id); // The end epoch is further that current sync committee // In this case we can simply assingn sync commite latest epoch as epoch_to_end_on epoch_to_end_on = helpers::get_last_epoch_for_sync_committee( currently_processed_sync_committee_id, ); } - - //info!("{} epochs let to proccess in associated sync committee term",); - + // + // info!( + // "{} epochs left to proccess in associated sync committee term", + // helpers::get_last_epoch_for_sync_committee(currently_processed_sync_committee_id) + // - latest_scheduled_epoch + // ); match run_batch_epoch_update_job( db_manager.clone(), get_first_slot_for_epoch(epoch_to_start_from) @@ -402,8 +453,26 @@ async fn handle_beacon_chain_head_event( // This is when we are synced properly and new epoch batch needs to be inserted info!( "Starting processing next epoch batch. Current Beacon Chain epoch: {} Latest verified epoch: {}", - epoch_id, latest_verified_epoch_id + current_epoch_id, latest_verified_epoch_id ); + + let epoch_to_start_from = latest_scheduled_epoch + 1; + let epoch_to_end_on = latest_scheduled_epoch + 1 + constants::TARGET_BATCH_SIZE; + match run_batch_epoch_update_job( + db_manager.clone(), + get_first_slot_for_epoch(epoch_to_start_from) + + (constants::SLOTS_PER_EPOCH * constants::TARGET_BATCH_SIZE), + epoch_to_start_from, + epoch_to_end_on, + tx.clone(), + ) + .await + { + Ok(()) => {} + Err(e) => { + error!("Error while creating job: {}", e); + } + }; } else if epochs_behind < constants::TARGET_BATCH_SIZE { // When we are in sync and not yet reached the TARGET_BATCH_SIZE epochs lagging behind actual beacon chian state debug!("Target batch size not reached yet, daemon is in sync"); @@ -417,30 +486,21 @@ async fn handle_beacon_chain_head_event( // So for each batch update we takin into account effectiviely the latest slot from given batch //let db_client = db_client.clone(); +} - let _ = evaluate_jobs_statuses(db_manager.clone(), latest_verified_sync_committee_id).await; - let _ = broadcast_onchain_ready_jobs(db_manager.clone(), bankai.clone()).await; - - // We can do all circuit computations up to latest slot in advance, but the onchain broadcasts must be send in correct order - // By correct order mean that within the same sync committe the epochs are not needed to be broadcasted in order - // but the order of sync_commite_update->epoch_update must be correct, we firstly need to have correct sync committe veryfied - // before we verify epoch "belonging" to this sync committee - if parsed_event.epoch_transition { - //info!("Beacon Chain epoch transition detected. New epoch: {} | Starting processing epoch proving...", epoch_id); - info!( - "Beacon Chain epoch transition detected. New epoch: {}", - epoch_id - ); +// // This function will enqueue sync committee jobs in database with status CREATED up to the latest sync committee +// async fn enqueue_sync_committee_jobs( +// db_manager: Arc, +// bankai: Arc, +// ) -> Result<(), Box> { +// } - // Check also now if slot is the moment of switch to new sync committee set - if parsed_event.slot % constants::SLOTS_PER_SYNC_COMMITTEE == 0 { - info!( - "Beacon Chain sync committee rotation occured. Slot {} | Sync committee id: {}", - parsed_event.slot, sync_committee_id - ); - } - } -} +// // This function will enqueue epoch batch update jobs in database with status CREATED up to the latest able to prove epoch batch +// async fn enqueue_batch_epochs_jobs( +// db_manager: Arc, +// bankai: Arc, +// ) -> Result<(), Box> { +// } async fn run_batch_epoch_update_job( db_manager: Arc, @@ -459,13 +519,23 @@ async fn run_batch_epoch_update_job( batch_range_end_epoch: Some(batch_range_end_epoch), }; + // Check to ensure if both epochs belongs to same sync committee + if helpers::get_sync_committee_id_by_epoch(batch_range_begin_epoch) + != helpers::get_sync_committee_id_by_epoch(batch_range_end_epoch) + { + return Err( + "Batch range start epoch belongs to different committee than batch range end epoch" + .into(), + ); + } + match db_manager.create_job(job.clone()).await { // Insert new job record to DB Ok(()) => { // Handle success info!( - "[EPOCH BATCH UPDATE] Job created successfully with ID: {} Epochs range from {} to {}", - job_id, batch_range_begin_epoch, batch_range_end_epoch + "[EPOCH BATCH UPDATE] Job created successfully with ID: {} Epochs range from {} to {} | Sync committee involved: {}", + job_id, batch_range_begin_epoch, batch_range_end_epoch, helpers::get_sync_committee_id_by_epoch(batch_range_end_epoch) ); if tx.send(job).await.is_err() { return Err("Failed to send job".into()); @@ -548,8 +618,12 @@ async fn broadcast_onchain_ready_jobs( match job.job_type { JobType::EpochBatchUpdate => { let update = EpochUpdateBatch::from_json::( - job.batch_range_begin_epoch.try_into().unwrap(), - job.batch_range_end_epoch.try_into().unwrap(), + helpers::get_first_slot_for_epoch( + job.batch_range_begin_epoch.try_into().unwrap(), + ), + helpers::get_first_slot_for_epoch( + job.batch_range_end_epoch.try_into().unwrap(), + ), )?; info!( @@ -563,9 +637,9 @@ async fn broadcast_onchain_ready_jobs( .submit_update(update.expected_circuit_outputs, &bankai.config) .await?; - println!( + info!( "[EPOCH BATCH JOB] Successfully called batch epoch update onchain for job_uuid: {}, txhash: {}", - job.job_uuid, txhash + job.job_uuid, txhash.to_hex_string() ); db_manager @@ -703,9 +777,12 @@ async fn process_job( .await?; let lowest_committee_update_slot = (latest_committee_id) * Felt::from(0x2000); + let new_sync_committee_id = latest_committee_id + 1; + // This should be triggered on the final stage of onchain call submission TODO?// if latest_epoch < lowest_committee_update_slot { - error!("[SYNC COMMITTEE JOB] Sync committee update requires newer epoch verified. The lowest needed slot is {}", lowest_committee_update_slot); + error!("[SYNC COMMITTEE JOB] Sync committee update to sync committee {} requires newer epoch verified. The lowest needed slot is {} which corresponds to epoch {} and sync committee {}", + new_sync_committee_id, lowest_committee_update_slot, helpers::slot_to_epoch_id(lowest_committee_update_slot.to_u64().unwrap()), helpers::slot_to_sync_committee_id(lowest_committee_update_slot.to_u64().unwrap())); //return Err(Error::RequiresNewerEpoch(latest_epoch)); } @@ -719,7 +796,7 @@ async fn process_job( ); info!( - "[SYNC COMMITTEE JOB] Starting Cairo execution and PIE generation for Sync Committee: {:?}...", + "[SYNC COMMITTEE JOB] Starting Cairo execution and PIE generation for Sync Committee: {}...", latest_committee_id ); diff --git a/client-rs/src/epoch_batch.rs b/client-rs/src/epoch_batch.rs index 2aa3dd6..dd8e00f 100644 --- a/client-rs/src/epoch_batch.rs +++ b/client-rs/src/epoch_batch.rs @@ -284,6 +284,10 @@ impl EpochUpdateBatch { where T: serde::de::DeserializeOwned, { + info!( + "Trying to read file batches/epoch_batch/{}_to_{}/input_batch_{}_to_{}.json", + first_slot, last_slot, first_slot, last_slot + ); // Pattern match for files like: batches/epoch_batch/6709248_to_6710272/input_batch_6709248_to_6710272.json let path = format!( "batches/epoch_batch/{}_to_{}/input_batch_{}_to_{}.json", diff --git a/client-rs/src/helpers.rs b/client-rs/src/helpers.rs index 6483a51..221b9e4 100644 --- a/client-rs/src/helpers.rs +++ b/client-rs/src/helpers.rs @@ -61,8 +61,8 @@ pub fn get_first_slot_for_epoch(slot: u64) -> u64 { slot * SLOTS_PER_EPOCH } -pub fn get_last_slot_for_epoch(slot: u64) -> u64 { - (slot + 1) * SLOTS_PER_EPOCH - 1 +pub fn get_last_slot_for_epoch(epoch: u64) -> u64 { + (epoch + 1) * SLOTS_PER_EPOCH - 1 } pub fn get_sync_committee_id_by_epoch(epoch: u64) -> u64 { diff --git a/client-rs/src/state.rs b/client-rs/src/state.rs index 84fad15..0b4b88e 100644 --- a/client-rs/src/state.rs +++ b/client-rs/src/state.rs @@ -31,7 +31,7 @@ pub struct AppState { #[postgres(name = "job_status")] pub enum JobStatus { #[postgres(name = "CREATED")] - Created, + Created, // Can act as queued and be picked up by worker to proccess #[postgres(name = "PROGRAM_INPUTS_PREPARED")] ProgramInputsPrepared, #[postgres(name = "PIE_GENERATED")] diff --git a/client-rs/src/utils/database_manager.rs b/client-rs/src/utils/database_manager.rs index 1dc84e5..65fa4e5 100644 --- a/client-rs/src/utils/database_manager.rs +++ b/client-rs/src/utils/database_manager.rs @@ -19,7 +19,7 @@ pub struct JobSchema { pub batch_range_begin_epoch: i64, pub batch_range_end_epoch: i64, pub job_type: JobType, - pub updated_at: i64, + //pub updated_at: i64, } #[derive(Debug)] @@ -143,7 +143,7 @@ impl DatabaseManager { JobType::SyncCommitteeUpdate => { self.client .execute( - "INSERT INTO jobs (job_uuid, job_status, slot, type) VALUES ($1, $2, $3, $4, $5, $6)", + "INSERT INTO jobs (job_uuid, job_status, slot, type) VALUES ($1, $2, $3, $4)", &[ &job.job_id, &job.job_status.to_string(), @@ -333,7 +333,7 @@ impl DatabaseManager { .query( "SELECT * FROM jobs WHERE job_status = $1", - &[&desired_status], + &[&desired_status.to_string()], ) .await?; @@ -357,7 +357,7 @@ impl DatabaseManager { batch_range_begin_epoch: row.get("batch_range_begin_epoch"), batch_range_end_epoch: row.get("batch_range_end_epoch"), job_type, - updated_at: row.get("updated_at"), + //updated_at: row.get("updated_at"), }) }, ) @@ -388,9 +388,10 @@ impl DatabaseManager { self.client .execute( "UPDATE jobs - SET job_status = 'READY_TO_BROADCAST', updated_at = NOW() - WHERE batch_range_begin_epoch >= $1 AND batch_range_end_epoch << $2 AND job_type = 'EPOCH_UPDATE_BATCH'", - &[&first_epoch.to_string(), &last_epoch.to_string()], + SET job_status = 'READY_TO_BROADCAST_ONCHAIN', updated_at = NOW() + WHERE batch_range_begin_epoch >= $1 AND batch_range_end_epoch <= $2 AND type = 'EPOCH_BATCH_UPDATE' + AND job_status = 'OFFCHAIN_COMPUTATION_FINISHED'", + &[&first_epoch.to_i64(), &last_epoch.to_i64()], ) .await?; Ok(()) From 3e19f9bf285ed164916ff8aa8391e060d2e635da Mon Sep 17 00:00:00 2001 From: lakewik Date: Mon, 27 Jan 2025 15:40:01 +0100 Subject: [PATCH 21/66] Fix calculation of epochs ranges, add reconnection logic to beacon chain events listener, add more detailed status tracking --- client-rs/Cargo.lock | 1 + client-rs/Cargo.toml | 1 + client-rs/src/constants.rs | 6 +- client-rs/src/daemon.rs | 389 ++++++++++-------------- client-rs/src/state.rs | 9 +- client-rs/src/utils/atlantic_client.rs | 66 +++- client-rs/src/utils/database_manager.rs | 22 +- 7 files changed, 250 insertions(+), 244 deletions(-) diff --git a/client-rs/Cargo.lock b/client-rs/Cargo.lock index e4b1bcd..3336624 100644 --- a/client-rs/Cargo.lock +++ b/client-rs/Cargo.lock @@ -838,6 +838,7 @@ dependencies = [ "tokio", "tokio-postgres", "tokio-stream", + "tokio-util", "tower", "tower-http", "tracing", diff --git a/client-rs/Cargo.toml b/client-rs/Cargo.toml index b60f52d..4f1cfbb 100644 --- a/client-rs/Cargo.toml +++ b/client-rs/Cargo.toml @@ -60,3 +60,4 @@ num-traits = "0.2.19" tower = "0.5.2" tower-http = { version = "0.6.2", features = ["trace"] } chrono = { version = "0.4.39", features = ["serde"] } +tokio-util = "0.7.13" diff --git a/client-rs/src/constants.rs b/client-rs/src/constants.rs index 66514c2..b614473 100644 --- a/client-rs/src/constants.rs +++ b/client-rs/src/constants.rs @@ -2,6 +2,6 @@ pub const SLOTS_PER_EPOCH: u64 = 32; // For mainnet pub const SLOTS_PER_SYNC_COMMITTEE: u64 = 8192; // For mainnet pub const TARGET_BATCH_SIZE: u64 = 32; // Defines how many epochs in one batch pub const EPOCHS_PER_SYNC_COMMITTEE: u64 = 256; // For mainnet -pub const MAX_CONCURRENT_JOBS_IN_PROGRESS: u64 = 4; // Define the limit of how many jobs can be in state "in progress" concurrently -pub const MAX_CONCURRENT_PIE_GENERATIONS: usize = 3; // Define how many concurrent trace (pie file) generation jobs are allowed to not exhaust resources -pub const MAX_CONCURRENT_RPC_DATA_FETCH_JOBS: usize = 4; // Define how many data fetching jobs can be performed concurrently to not overload RPC +pub const MAX_CONCURRENT_JOBS_IN_PROGRESS: u64 = 3; // Define the limit of how many jobs can be in state "in progress" concurrently +pub const MAX_CONCURRENT_PIE_GENERATIONS: usize = 1; // Define how many concurrent trace (pie file) generation jobs are allowed to not exhaust resources +pub const MAX_CONCURRENT_RPC_DATA_FETCH_JOBS: usize = 1; // Define how many data fetching jobs can be performed concurrently to not overload RPC diff --git a/client-rs/src/daemon.rs b/client-rs/src/daemon.rs index bff50de..23b841e 100644 --- a/client-rs/src/daemon.rs +++ b/client-rs/src/daemon.rs @@ -54,7 +54,7 @@ use routes::{ }; use std::net::SocketAddr; use sync_committee::SyncCommitteeUpdate; -use tokio::time::Duration; +use tokio::time::{timeout, Duration}; use uuid::Uuid; #[tokio::main] @@ -114,6 +114,13 @@ async fn main() -> Result<(), Box> { bankai: bankai.clone(), }; + tokio::spawn(async move { + loop { + info!("[HEARTBEAT] Daemon is alive"); + tokio::time::sleep(std::time::Duration::from_secs(30)).await; + } + }); + //Spawn a background task to process jobs tokio::spawn(async move { while let Some(job) = rx.recv().await { @@ -183,75 +190,159 @@ async fn main() -> Result<(), Box> { // Create an HTTP client let http_stream_client = reqwest::Client::new(); - // Send the request to the Beacon node - let response = http_stream_client - .get(&events_endpoint) - .send() - .await - .unwrap(); - - //enqueue_sync_committee_jobs(); - //enqueue_batch_epochs_jobs(); - - if slot_listener_toggle { - task::spawn({ - async move { - // Check if response is successful; if not, bail out early - // TODO: need to implement resilience and potentialy use multiple providers (implement something like fallbackprovider functionality in ethers), handle reconnection if connection is lost for various reasons - if !response.status().is_success() { - error!("Failed to connect: HTTP {}", response.status()); - return; + tokio::spawn(async move { + loop { + let response = match http_stream_client + .get(&events_endpoint) + //.timeout(std::time::Duration::from_secs(30)) - cannot do this because this will give timeout after evach duration since we ussing HTTP Pooling here + .send() + .await + { + Ok(r) => r, + Err(e) => { + error!("Failed to connect: {}", e); + tokio::time::sleep(std::time::Duration::from_secs(5)).await; + continue; // retry } + }; - info!("Listening for new slots, epochs and sync committee updates..."); - let mut stream = response.bytes_stream(); - - while let Some(chunk) = stream.next().await { - match chunk { - Ok(bytes) => { - if let Ok(event_text) = String::from_utf8(bytes.to_vec()) { - // Preprocess the event text - if let Some(json_data) = - helpers::extract_json_from_event(&event_text) - { - match serde_json::from_str::(&json_data) { - Ok(parsed_event) => { - let epoch_id = - helpers::slot_to_epoch_id(parsed_event.slot); - let sync_committee_id = - helpers::slot_to_sync_committee_id( - parsed_event.slot, - ); - info!( + if !response.status().is_success() { + error!("Got non-200: {}", response.status()); + tokio::time::sleep(std::time::Duration::from_secs(5)).await; + continue; // retry + } + + info!("Listening for new slots..."); + + let mut stream = response.bytes_stream(); + + while let chunk_result = timeout(Duration::from_secs(30), stream.next()).await { + match chunk_result { + // Timed out; handle it locally + Err(_elapsed) => { + warn!( + "Timed out waiting for new slot beacon chain event chunk. Maybe some slots was skipped. Will reconnect..." + ); + break; + } + Ok(Some(Ok(bytes))) => { + if let Ok(event_text) = String::from_utf8(bytes.to_vec()) { + // Preprocess the event text + if let Some(json_data) = helpers::extract_json_from_event(&event_text) { + match serde_json::from_str::(&json_data) { + Ok(parsed_event) => { + let epoch_id = helpers::slot_to_epoch_id(parsed_event.slot); + let sync_committee_id = + helpers::slot_to_sync_committee_id(parsed_event.slot); + info!( "[EVENT] New beacon slot detected: {} | Block: {} | Epoch: {} | Sync committee: {} | Is epoch transition: {}", parsed_event.slot, parsed_event.block, epoch_id, sync_committee_id, parsed_event.epoch_transition ); - handle_beacon_chain_head_event( - parsed_event, - bankai_for_listener.clone(), - db_manager_for_listener.clone(), - tx_for_listener.clone(), - ) - .await; - } - Err(err) => { - warn!("Failed to parse JSON data: {}", err); - } + handle_beacon_chain_head_event( + parsed_event, + bankai_for_listener.clone(), + db_manager_for_listener.clone(), + tx_for_listener.clone(), + ) + .await; + } + Err(err) => { + warn!("Failed to parse JSON data: {}", err); } - } else { - warn!("No valid JSON data found in event: {}", event_text); } + } else { + warn!("No valid JSON data found in event: {}", event_text); } } - Err(err) => { - warn!("Error reading event stream: {}", err); - } + } + Ok(Some(Err(e))) => { + warn!("Beacon chain client stream error: {}", e); + break; // break the while, then reconnect + } + Ok(None) => { + warn!("Beacon chain client stream ended"); + // Stream ended + break; } } } - }); - } + // If we got here because of `timeout` returning `Err(_)`, that means 30s + // passed without a single chunk of data arriving or + // the RPC server has closed connection or some other unknown network error occured + + // If we exit the while, we reconnect in the outer loop + info!("Reconnecting to beacon node..."); + } + }); + + // // Send the request to the Beacon node + // let response = http_stream_client + // .get(&events_endpoint) + // .send() + // .await + // .unwrap(); + + // //enqueue_sync_committee_jobs(); + // //enqueue_batch_epochs_jobs(); + + // if slot_listener_toggle { + // tokio::spawn(async move { + // let mut interval = tokio::time::interval(std::time::Duration::from_secs(30)); + // loop { + // interval.tick().await; + // info!("[HEARTBEAT] Slot listener is alive"); + // } + // }); + // // Check if response is successful; if not, bail out early + // // TODO: need to implement resilience and potentialy use multiple providers (implement something like fallbackprovider functionality in ethers), handle reconnection if connection is lost for various reasons + // if !response.status().is_success() { + // error!("Failed to connect: HTTP {}", response.status()); + // return; + // } + + // info!("Listening for new slots, epochs and sync committee updates..."); + // let mut stream = response.bytes_stream(); + + // while let Some(chunk) = stream.next().await { + // match chunk { + // Ok(bytes) => { + // if let Ok(event_text) = String::from_utf8(bytes.to_vec()) { + // // Preprocess the event text + // if let Some(json_data) = helpers::extract_json_from_event(&event_text) { + // match serde_json::from_str::(&json_data) { + // Ok(parsed_event) => { + // let epoch_id = helpers::slot_to_epoch_id(parsed_event.slot); + // let sync_committee_id = + // helpers::slot_to_sync_committee_id(parsed_event.slot); + // info!( + // "[EVENT] New beacon slot detected: {} | Block: {} | Epoch: {} | Sync committee: {} | Is epoch transition: {}", + // parsed_event.slot, parsed_event.block, epoch_id, sync_committee_id, parsed_event.epoch_transition + // ); + + // handle_beacon_chain_head_event( + // parsed_event, + // bankai_for_listener.clone(), + // db_manager_for_listener.clone(), + // tx_for_listener.clone(), + // ) + // .await; + // } + // Err(err) => { + // warn!("Failed to parse JSON data: {}", err); + // } + // } + // } else { + // warn!("No valid JSON data found in event: {}", event_text); + // } + // } + // } + // Err(err) => { + // warn!("Error reading event stream: {}", err); + // } + // } + // } + // } // Wait for the server task to finish server_task.await?; @@ -388,22 +479,22 @@ async fn handle_beacon_chain_head_event( == latest_scheduled_epoch { // We reached end of current sync committee, need to schedule new sync committee proving - // match run_sync_committee_update_job( - // db_manager.clone(), - // currently_processed_sync_committee_id + 1, - // tx.clone(), - // ) - // .await - // { - // Ok(()) => {} - // Err(e) => { - // error!("Error while creating sync committee update job: {}", e); - // } - // }; + match run_sync_committee_update_job( + db_manager.clone(), + currently_processed_sync_committee_id + 1, + tx.clone(), + ) + .await + { + Ok(()) => {} + Err(e) => { + error!("Error while creating sync committee update job: {}", e); + } + }; } let epoch_to_start_from = latest_scheduled_epoch + 1; - let mut epoch_to_end_on = latest_scheduled_epoch + 1 + constants::TARGET_BATCH_SIZE; // To create batch with size of constants::TARGET_BATCH_SIZE epochs + let mut epoch_to_end_on = latest_scheduled_epoch + constants::TARGET_BATCH_SIZE; // To create batch with size of constants::TARGET_BATCH_SIZE epochs // Edge cases handling // // Handle the edge case where there is only one epoch in batch left to proccess and this epoch is last epoch in sync committee, if we follow the betch size of 32 always, this souldnt happen: @@ -457,7 +548,7 @@ async fn handle_beacon_chain_head_event( ); let epoch_to_start_from = latest_scheduled_epoch + 1; - let epoch_to_end_on = latest_scheduled_epoch + 1 + constants::TARGET_BATCH_SIZE; + let epoch_to_end_on = latest_scheduled_epoch + constants::TARGET_BATCH_SIZE; match run_batch_epoch_update_job( db_manager.clone(), get_first_slot_for_epoch(epoch_to_start_from) @@ -659,7 +750,7 @@ async fn broadcast_onchain_ready_jobs( // ) // .await?; } - JobType::EpochUpdate => {} + //JobType::EpochUpdate => {} JobType::SyncCommitteeUpdate => { let update = SyncCommitteeUpdate::from_json::( job.slot.to_u64().unwrap(), @@ -993,160 +1084,6 @@ async fn process_job( .update_job_status(job.job_id, JobStatus::OffchainComputationFinished) .await?; } - JobType::EpochUpdate => { - // Epoch job - info!( - "[EPOCH JOB] Started processing epoch job: {} for epoch {}", - job.job_id, job.slot - ); - - //update_job_status(&db_client, job.job_id, JobStatus::Created).await?; - - // 1) Fetch the latest on-chain verified epoch - // let latest_epoch_slot = bankai - // .starknet_client - // .get_latest_epoch_slot(&bankai.config) - // .await?; - - // info!( - // "[EPOCH JOB] Latest onchain verified epoch slot: {}", - // latest_epoch_slot - // ); - - //let latest_epoch_slot = ; - - // make sure next_epoch % 32 == 0 - // let next_epoch = (u64::try_from(job.slot).unwrap() / constants::SLOTS_PER_EPOCH) - // * constants::SLOTS_PER_EPOCH - // + constants::SLOTS_PER_EPOCH; - // info!( - // "[EPOCH JOB] Fetching Inputs for next Epoch: {}...", - // next_epoch - // ); - - // // 2) Fetch the proof - // let proof = bankai.get_epoch_proof(next_epoch).await?; - // info!( - // "[EPOCH JOB] Fetched Inputs successfully for Epoch: {}", - // next_epoch - // ); - - // db_manager - // .update_job_status(job.job_id, JobStatus::ProgramInputsPrepared) - // .await?; - - // // 3) Generate PIE - // info!( - // "[EPOCH JOB] Starting Cairo execution and PIE generation for Epoch: {}...", - // next_epoch - // ); - - // CairoRunner::generate_pie(&proof, &bankai.config).await?; - - // info!( - // "[EPOCH JOB] Pie generated successfully for Epoch: {}...", - // next_epoch - // ); - - // db_manager - // .update_job_status(job.job_id, JobStatus::PieGenerated) - // .await?; - - // // 4) Submit offchain proof-generation job to Atlantic - // info!("[EPOCH JOB] Sending proof generation query to Atlantic..."); - - // let batch_id = bankai.atlantic_client.submit_batch(proof).await?; - - // info!( - // "[EPOCH JOB] Proof generation batch submitted to Atlantic. QueryID: {}", - // batch_id - // ); - - // update_job_status(&db_client, job.job_id, JobStatus::OffchainProofRequested).await?; - // set_atlantic_job_queryid( - // &db_client, - // job.job_id, - // batch_id.clone(), - // AtlanticJobType::ProofGeneration, - // ) - // .await?; - - // // Pool for Atlantic execution done - // bankai - // .atlantic_client - // .poll_batch_status_until_done(&batch_id, Duration::new(10, 0), usize::MAX) - // .await?; - - // info!( - // "[EPOCH JOB] Proof generation done by Atlantic. QueryID: {}", - // batch_id - // ); - - // let proof = bankai - // .atlantic_client - // .fetch_proof(batch_id.as_str()) - // .await?; - - // info!( - // "[EPOCH JOB] Proof retrieved from Atlantic. QueryID: {}", - // batch_id - // ); - - // update_job_status(&db_client, job.job_id, JobStatus::OffchainProofRetrieved).await?; - - // // 5) Submit wrapped proof request - // info!("[EPOCH JOB] Sending proof wrapping query to Atlantic.."); - // let wrapping_batch_id = bankai.atlantic_client.submit_wrapped_proof(proof).await?; - // info!( - // "[EPOCH JOB] Proof wrapping query submitted to Atlantic. Wrapping QueryID: {}", - // wrapping_batch_id - // ); - - // update_job_status(&db_client, job.job_id, JobStatus::WrapProofRequested).await?; - // set_atlantic_job_queryid( - // &db_client, - // job.job_id, - // wrapping_batch_id.clone(), - // AtlanticJobType::ProofWrapping, - // ) - // .await?; - - // // Pool for Atlantic execution done - // bankai - // .atlantic_client - // .poll_batch_status_until_done(&wrapping_batch_id, Duration::new(10, 0), usize::MAX) - // .await?; - - // update_job_status(&db_client, job.job_id, JobStatus::WrappedProofDone).await?; - - // info!("[EPOCH JOB] Proof wrapping done by Atlantic. Fact registered on Integrity. Wrapping QueryID: {}", wrapping_batch_id); - - // update_job_status(&db_client, job.job_id, JobStatus::VerifiedFactRegistered).await?; - - // // 6) Submit epoch update onchain - // info!("[EPOCH JOB] Calling epoch update onchain..."); - // let update = EpochUpdate::from_json::(next_epoch)?; - - // let txhash = bankai - // .starknet_client - // .submit_update(update.expected_circuit_outputs, &bankai.config) - // .await?; - - // set_job_txhash(&db_client, job.job_id, txhash).await?; - - // info!("[EPOCH JOB] Successfully submitted epoch update..."); - - // update_job_status(&db_client, job.job_id, JobStatus::ProofDecommitmentCalled).await?; - - // Now we can get proof from contract? - // bankai.starknet_client.get_epoch_proof( - // &self, - // slot: u64, - // config: &BankaiConfig) - - // Insert data to DB after successful onchain epoch verification - // insert_verified_epoch(&db_client, job.slot / 0x2000, epoch_proof).await?; - } } Ok(()) diff --git a/client-rs/src/state.rs b/client-rs/src/state.rs index 0b4b88e..2ad320a 100644 --- a/client-rs/src/state.rs +++ b/client-rs/src/state.rs @@ -33,6 +33,8 @@ pub enum JobStatus { #[postgres(name = "CREATED")] Created, // Can act as queued and be picked up by worker to proccess #[postgres(name = "PROGRAM_INPUTS_PREPARED")] + StartedTraceGeneration, + #[postgres(name = "STARTED_TRACE_GENERATION")] ProgramInputsPrepared, #[postgres(name = "PIE_GENERATED")] PieGenerated, @@ -63,6 +65,7 @@ impl ToString for JobStatus { match self { JobStatus::Created => "CREATED".to_string(), JobStatus::ProgramInputsPrepared => "PROGRAM_INPUTS_PREPARED".to_string(), + JobStatus::StartedTraceGeneration => "STARTED_TRACE_GENERATION".to_string(), JobStatus::PieGenerated => "PIE_GENERATED".to_string(), JobStatus::OffchainProofRequested => "OFFCHAIN_PROOF_REQUESTED".to_string(), JobStatus::OffchainProofRetrieved => "OFFCHAIN_PROOF_RETRIEVED".to_string(), @@ -103,7 +106,7 @@ impl FromStr for JobStatus { #[derive(Debug, FromSql, ToSql, Clone)] pub enum JobType { - EpochUpdate, + //EpochUpdate, EpochBatchUpdate, SyncCommitteeUpdate, } @@ -113,7 +116,7 @@ impl FromStr for JobType { fn from_str(s: &str) -> Result { match s { - "EPOCH_UPDATE" => Ok(JobType::EpochUpdate), + //"EPOCH_UPDATE" => Ok(JobType::EpochUpdate), "EPOCH_BATCH_UPDATE" => Ok(JobType::EpochBatchUpdate), "SYNC_COMMITTEE_UPDATE" => Ok(JobType::SyncCommitteeUpdate), _ => Err(format!("Invalid job type: {}", s)), @@ -124,7 +127,7 @@ impl FromStr for JobType { impl fmt::Display for JobType { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let value = match self { - JobType::EpochUpdate => "EPOCH_UPDATE", + //JobType::EpochUpdate => "EPOCH_UPDATE", JobType::EpochBatchUpdate => "EPOCH_BATCH_UPDATE", JobType::SyncCommitteeUpdate => "SYNC_COMMITTEE_UPDATE", }; diff --git a/client-rs/src/utils/atlantic_client.rs b/client-rs/src/utils/atlantic_client.rs index 6b68cf5..520cbf6 100644 --- a/client-rs/src/utils/atlantic_client.rs +++ b/client-rs/src/utils/atlantic_client.rs @@ -1,10 +1,14 @@ -use std::{env, fs}; - use crate::traits::{ProofType, Provable}; use crate::Error; +use futures::{StreamExt, TryStreamExt}; use reqwest::multipart::{Form, Part}; +use reqwest::Body; use serde::{Deserialize, Serialize}; +use std::env; +use std::path::{Path, PathBuf}; +use tokio::fs; use tokio::time::{sleep, Duration}; +use tokio_util::io::ReaderStream; use tracing::{debug, error, info, trace}; #[derive(Debug)] @@ -29,13 +33,61 @@ impl AtlanticClient { } pub async fn submit_batch(&self, batch: impl Provable) -> Result { - let pie_path = batch.pie_path(); + let pie_path: PathBuf = batch.pie_path().into(); + + let meta = fs::metadata(pie_path.clone()) + .await + .map_err(Error::IoError)?; + let total_bytes = meta.len(); + + let file = fs::File::open(pie_path.clone()) + .await + .map_err(Error::IoError)?; + + let stream = ReaderStream::new(file); + + let progress_stream = stream.scan( + (0_u64, 10_u64), + move |(uploaded, next_threshold), chunk_result| { + match chunk_result { + Ok(chunk) => { + *uploaded += chunk.len() as u64; + let percent = (*uploaded as f64 / total_bytes as f64) * 100.0; + + if percent >= *next_threshold as f64 && *next_threshold <= 100 { + println!( + "Uploaded {}% of the file to Atlantic API...", + *next_threshold + ); + *next_threshold += 10; + } + + // Pass the chunk further down the stream + futures::future::ready(Some(Ok(chunk))) + } + Err(e) => { + // Forward the error + futures::future::ready(Some(Err(e))) + } + } + }, + ); // Read the file as bytes - let file_bytes = fs::read(&pie_path).map_err(Error::IoError)?; - let file_part = Part::bytes(file_bytes) - .file_name(pie_path) // Provide a filename - .mime_str("application/zip") // Specify MIME type + // let file_bytes = fs::read(&pie_path).map_err(Error::IoError)?; + // let file_part = Part::bytes(file_bytes) + // .file_name(pie_path) // Provide a filename + // .mime_str("application/zip") // Specify MIME type + // .map_err(Error::AtlanticError)?; + let file_part = Part::stream(Body::wrap_stream(progress_stream)) + .file_name( + pie_path + .file_name() + .unwrap_or_default() + .to_string_lossy() + .to_string(), + ) + .mime_str("application/zip") .map_err(Error::AtlanticError)?; let external_id = format!( diff --git a/client-rs/src/utils/database_manager.rs b/client-rs/src/utils/database_manager.rs index 65fa4e5..fe77f05 100644 --- a/client-rs/src/utils/database_manager.rs +++ b/client-rs/src/utils/database_manager.rs @@ -8,7 +8,7 @@ use std::str::FromStr; use chrono::NaiveDateTime; use num_traits::ToPrimitive; use tokio_postgres::{Client, Row}; -use tracing::{error, info}; +use tracing::{error, info, warn}; use uuid::Uuid; #[derive(Debug)] @@ -139,7 +139,7 @@ impl DatabaseManager { .await .map_err(|e| Error::DatabaseError(e.to_string()))?; } - JobType::EpochUpdate => {} + //JobType::EpochUpdate => {} JobType::SyncCommitteeUpdate => { self.client .execute( @@ -385,7 +385,7 @@ impl DatabaseManager { first_epoch: u64, last_epoch: u64, ) -> Result<(), Box> { - self.client + let rows_affected = self.client .execute( "UPDATE jobs SET job_status = 'READY_TO_BROADCAST_ONCHAIN', updated_at = NOW() @@ -394,6 +394,13 @@ impl DatabaseManager { &[&first_epoch.to_i64(), &last_epoch.to_i64()], ) .await?; + + if rows_affected > 0 { + info!( + "{} jobs changed state to READY_TO_BROADCAST_ONCHAIN", + rows_affected + ); + } Ok(()) } @@ -429,12 +436,17 @@ impl DatabaseManager { path_index: u64, path: String, ) -> Result<(), Box> { - self.client + let rows_affected =self.client .execute( - "INSERT INTO epoch_merkle_paths (epoch_id, path_index, merkle_path) VALUES ($1, $2, $3)", + "INSERT INTO epoch_merkle_paths (epoch_id, path_index, merkle_path) VALUES ($1, $2, $3) + ON CONFLICT (epoch_id, path_index) DO NOTHING", &[&epoch.to_i64(), &path_index.to_i64(), &path], ) .await?; + + if rows_affected == 0 { + warn!("Combination of epoch_id and path_index already exists, skipping insertion of epoch merkle patch for epoch {} and index {}", epoch, path_index); + } Ok(()) } From 2a69717717b451ddfc2ddbbbbbe96de23373da93 Mon Sep 17 00:00:00 2001 From: lakewik Date: Mon, 27 Jan 2025 19:45:13 +0100 Subject: [PATCH 22/66] Add ability to graceful shutdown & fix sync commite epochs counting --- client-rs/Cargo.lock | 1 + client-rs/Cargo.toml | 2 +- client-rs/src/daemon.rs | 132 +++++++++++++++++----------------------- 3 files changed, 58 insertions(+), 77 deletions(-) diff --git a/client-rs/Cargo.lock b/client-rs/Cargo.lock index 3336624..d01e5b2 100644 --- a/client-rs/Cargo.lock +++ b/client-rs/Cargo.lock @@ -4313,6 +4313,7 @@ dependencies = [ "http 1.2.0", "http-body 1.0.1", "pin-project-lite", + "tokio", "tower-layer", "tower-service", "tracing", diff --git a/client-rs/Cargo.toml b/client-rs/Cargo.toml index 4f1cfbb..8f97e0b 100644 --- a/client-rs/Cargo.toml +++ b/client-rs/Cargo.toml @@ -58,6 +58,6 @@ starknet-crypto = "0.7.3" glob = "0.3.2" num-traits = "0.2.19" tower = "0.5.2" -tower-http = { version = "0.6.2", features = ["trace"] } +tower-http = { version = "0.6.2", features = ["trace", "timeout"] } chrono = { version = "0.4.39", features = ["serde"] } tokio-util = "0.7.13" diff --git a/client-rs/src/daemon.rs b/client-rs/src/daemon.rs index 23b841e..32b5a97 100644 --- a/client-rs/src/daemon.rs +++ b/client-rs/src/daemon.rs @@ -24,7 +24,9 @@ use bankai_client::BankaiClient; use config::BankaiConfig; //use constants::SLOTS_PER_EPOCH; use dotenv::from_filename; -use helpers::{get_first_epoch_for_sync_committee, get_first_slot_for_epoch}; +use helpers::{ + get_first_epoch_for_sync_committee, get_first_slot_for_epoch, get_last_epoch_for_sync_committee, +}; use num_traits::cast::ToPrimitive; use reqwest; use starknet::core::types::Felt; @@ -34,10 +36,10 @@ use state::{AtlanticJobType, Error, JobStatus, JobType}; use std::env; use std::sync::Arc; use tokio::sync::mpsc; -use tokio::task; +use tokio::{signal, task}; use tokio_stream::StreamExt; use tower::ServiceBuilder; -use tower_http::trace::TraceLayer; +use tower_http::{timeout::TimeoutLayer, trace::TraceLayer}; use tracing::{debug, error, info, warn, Level}; use tracing_subscriber::FmtSubscriber; use utils::{cairo_runner::CairoRunner, database_manager::DatabaseManager}; @@ -57,7 +59,7 @@ use sync_committee::SyncCommitteeUpdate; use tokio::time::{timeout, Duration}; use uuid::Uuid; -#[tokio::main] +#[tokio::main(flavor = "multi_thread", worker_threads = 2)] async fn main() -> Result<(), Box> { // Load .env.sepolia file from_filename(".env.sepolia").ok(); @@ -175,6 +177,11 @@ async fn main() -> Result<(), Box> { .layer( ServiceBuilder::new().layer(TraceLayer::new_for_http()), // Example: for logging/tracing ) + .layer(( + // Graceful shutdown will wait for outstanding requests to complete + // Because of this timeourt setting, requests don't hang forever + TimeoutLayer::new(Duration::from_secs(10)), + )) .with_state(app_state); let addr = "0.0.0.0:3000".parse::()?; @@ -183,18 +190,26 @@ async fn main() -> Result<(), Box> { info!("Bankai RPC HTTP server is listening on http://{}", addr); let server_task = tokio::spawn(async move { - let _ = axum::serve(listener, app).await; + let _ = axum::serve(listener, app) + .with_graceful_shutdown(shutdown_signal()) + .await + .unwrap(); }); + //enqueue_sync_committee_jobs(); + //enqueue_batch_epochs_jobs(); + // + // Listen for the new slots on BeaconChain // Create an HTTP client let http_stream_client = reqwest::Client::new(); tokio::spawn(async move { loop { + // Send the request to the Beacon node let response = match http_stream_client .get(&events_endpoint) - //.timeout(std::time::Duration::from_secs(30)) - cannot do this because this will give timeout after evach duration since we ussing HTTP Pooling here + //.timeout(std::time::Duration::from_secs(30)) - cannot do this because this will give timeout after evach duration since we not using HTTP Pooling here but HTTP streaming .send() .await { @@ -212,7 +227,7 @@ async fn main() -> Result<(), Box> { continue; // retry } - info!("Listening for new slots..."); + info!("Listening for new slots, epochs and sync committee updates..."); let mut stream = response.bytes_stream(); @@ -276,74 +291,6 @@ async fn main() -> Result<(), Box> { } }); - // // Send the request to the Beacon node - // let response = http_stream_client - // .get(&events_endpoint) - // .send() - // .await - // .unwrap(); - - // //enqueue_sync_committee_jobs(); - // //enqueue_batch_epochs_jobs(); - - // if slot_listener_toggle { - // tokio::spawn(async move { - // let mut interval = tokio::time::interval(std::time::Duration::from_secs(30)); - // loop { - // interval.tick().await; - // info!("[HEARTBEAT] Slot listener is alive"); - // } - // }); - // // Check if response is successful; if not, bail out early - // // TODO: need to implement resilience and potentialy use multiple providers (implement something like fallbackprovider functionality in ethers), handle reconnection if connection is lost for various reasons - // if !response.status().is_success() { - // error!("Failed to connect: HTTP {}", response.status()); - // return; - // } - - // info!("Listening for new slots, epochs and sync committee updates..."); - // let mut stream = response.bytes_stream(); - - // while let Some(chunk) = stream.next().await { - // match chunk { - // Ok(bytes) => { - // if let Ok(event_text) = String::from_utf8(bytes.to_vec()) { - // // Preprocess the event text - // if let Some(json_data) = helpers::extract_json_from_event(&event_text) { - // match serde_json::from_str::(&json_data) { - // Ok(parsed_event) => { - // let epoch_id = helpers::slot_to_epoch_id(parsed_event.slot); - // let sync_committee_id = - // helpers::slot_to_sync_committee_id(parsed_event.slot); - // info!( - // "[EVENT] New beacon slot detected: {} | Block: {} | Epoch: {} | Sync committee: {} | Is epoch transition: {}", - // parsed_event.slot, parsed_event.block, epoch_id, sync_committee_id, parsed_event.epoch_transition - // ); - - // handle_beacon_chain_head_event( - // parsed_event, - // bankai_for_listener.clone(), - // db_manager_for_listener.clone(), - // tx_for_listener.clone(), - // ) - // .await; - // } - // Err(err) => { - // warn!("Failed to parse JSON data: {}", err); - // } - // } - // } else { - // warn!("No valid JSON data found in event: {}", event_text); - // } - // } - // } - // Err(err) => { - // warn!("Error reading event stream: {}", err); - // } - // } - // } - // } - // Wait for the server task to finish server_task.await?; @@ -686,7 +633,12 @@ async fn evaluate_jobs_statuses( // We calculating the start and end epoch for provided last verified sync committe // and setting READY_TO_BROADCAST status for epochs up to the last epoch belonging to provided latest_verified_sync_committee_id let first_epoch = get_first_epoch_for_sync_committee(latest_verified_sync_committee_id); - let last_epoch = get_first_epoch_for_sync_committee(latest_verified_sync_committee_id); + let last_epoch = get_last_epoch_for_sync_committee(latest_verified_sync_committee_id); + + info!( + "Evaluating jobs for epochs range from {} to {}, for sync committee {}", + first_epoch, last_epoch, latest_verified_sync_committee_id + ); db_manager .set_ready_to_broadcast_for_batch_epochs(first_epoch, last_epoch) // Set READY_TO_BROADCAST when OFFCHAIN_COMPUTATION_FINISHED @@ -1088,3 +1040,31 @@ async fn process_job( Ok(()) } + +async fn shutdown_signal() { + let ctrl_c = async { + signal::ctrl_c() + .await + .expect("failed to install Ctrl+C handler"); + }; + + #[cfg(unix)] + let terminate = async { + signal::unix::signal(signal::unix::SignalKind::terminate()) + .expect("failed to install signal handler") + .recv() + .await; + }; + + #[cfg(not(unix))] + let terminate = std::future::pending::<()>(); + + tokio::select! { + _ = ctrl_c => { + info!("Gracefully shutting down..."); + }, + _ = terminate => { + info!("Gracefully shutting down..."); + }, + } +} From 549e2621ca4582e207e5e8d02e7a87fd0a0cb468 Mon Sep 17 00:00:00 2001 From: lakewik Date: Fri, 31 Jan 2025 16:55:50 +0100 Subject: [PATCH 23/66] Implementation of Herodotus Transactor client --- client-rs/src/bankai_client.rs | 6 + client-rs/src/config.rs | 8 +- client-rs/src/constants.rs | 2 + client-rs/src/state.rs | 3 + client-rs/src/utils/mod.rs | 1 + client-rs/src/utils/transactor_client.rs | 161 +++++++++++++++++++++++ 6 files changed, 180 insertions(+), 1 deletion(-) create mode 100644 client-rs/src/utils/transactor_client.rs diff --git a/client-rs/src/bankai_client.rs b/client-rs/src/bankai_client.rs index e308a95..5067d86 100644 --- a/client-rs/src/bankai_client.rs +++ b/client-rs/src/bankai_client.rs @@ -5,6 +5,7 @@ use crate::{ sync_committee::SyncCommitteeUpdate, utils::{ atlantic_client::AtlanticClient, rpc::BeaconRpcClient, starknet_client::StarknetClient, + transactor_client::TransactorClient, }, BankaiConfig, }; @@ -18,6 +19,7 @@ pub struct BankaiClient { pub starknet_client: StarknetClient, pub config: BankaiConfig, pub atlantic_client: AtlanticClient, + pub transactor_client: TransactorClient, } impl BankaiClient { @@ -37,6 +39,10 @@ impl BankaiClient { config.atlantic_endpoint.clone(), env::var("ATLANTIC_API_KEY").unwrap(), ), + transactor_client: TransactorClient::new( + config.transactor_endpoint.clone(), + env::var("TRANSACTOR_API_KEY").unwrap(), + ), config, } } diff --git a/client-rs/src/config.rs b/client-rs/src/config.rs index 06690a3..cd7978e 100644 --- a/client-rs/src/config.rs +++ b/client-rs/src/config.rs @@ -1,4 +1,6 @@ -use crate::constants::{MAX_CONCURRENT_PIE_GENERATIONS, MAX_CONCURRENT_RPC_DATA_FETCH_JOBS}; +use crate::constants::{ + MAX_CONCURRENT_PIE_GENERATIONS, MAX_CONCURRENT_RPC_DATA_FETCH_JOBS, STARKNET_SEPOLIA, +}; use starknet::core::types::Felt; use std::sync::Arc; use tokio::sync::Semaphore; @@ -15,8 +17,10 @@ pub struct BankaiConfig { pub epoch_batch_circuit_path: String, pub committee_circuit_path: String, pub atlantic_endpoint: String, + pub transactor_endpoint: String, pub pie_generation_semaphore: Arc, pub epoch_data_fetching_semaphore: Arc, + pub proof_settlement_chain_id: Felt, } impl Default for BankaiConfig { @@ -48,11 +52,13 @@ impl Default for BankaiConfig { epoch_batch_circuit_path: "../cairo/build/epoch_batch.json".to_string(), committee_circuit_path: "../cairo/build/committee_update.json".to_string(), atlantic_endpoint: "https://atlantic.api.herodotus.cloud".to_string(), + transactor_endpoint: "https://staging.api.herodotus.cloud".to_string(), // Set how many concurrent pie generation (trace generation) tasks are allowed pie_generation_semaphore: Arc::new(Semaphore::new(MAX_CONCURRENT_PIE_GENERATIONS)), // 3 at once epoch_data_fetching_semaphore: Arc::new(Semaphore::new( MAX_CONCURRENT_RPC_DATA_FETCH_JOBS, )), // 2 at once + proof_settlement_chain_id: Felt::from_hex(STARKNET_SEPOLIA).unwrap(), } } } diff --git a/client-rs/src/constants.rs b/client-rs/src/constants.rs index b614473..2f62a2e 100644 --- a/client-rs/src/constants.rs +++ b/client-rs/src/constants.rs @@ -5,3 +5,5 @@ pub const EPOCHS_PER_SYNC_COMMITTEE: u64 = 256; // For mainnet pub const MAX_CONCURRENT_JOBS_IN_PROGRESS: u64 = 3; // Define the limit of how many jobs can be in state "in progress" concurrently pub const MAX_CONCURRENT_PIE_GENERATIONS: usize = 1; // Define how many concurrent trace (pie file) generation jobs are allowed to not exhaust resources pub const MAX_CONCURRENT_RPC_DATA_FETCH_JOBS: usize = 1; // Define how many data fetching jobs can be performed concurrently to not overload RPC +pub const STARKNET_SEPOLIA: &str = "0x534e5f5345504f4c4941"; +pub const STARKNET_MAINNET: &str = "0x534e5f4d41494e"; diff --git a/client-rs/src/state.rs b/client-rs/src/state.rs index 2ad320a..3d61761 100644 --- a/client-rs/src/state.rs +++ b/client-rs/src/state.rs @@ -156,6 +156,7 @@ pub fn check_env_vars() -> Result<(), String> { "POSTGRESQL_DB_NAME", "RPC_LISTEN_HOST", "RPC_LISTEN_PORT", + "TRANSACTOR_API_KEY", ]; for &var in &required_vars { @@ -201,6 +202,7 @@ pub enum Error { PoolingTimeout(String), InvalidMerkleTree, DatabaseError(String), + TransactorError(reqwest::Error), } impl fmt::Display for Error { @@ -224,6 +226,7 @@ impl fmt::Display for Error { Error::PoolingTimeout(msg) => write!(f, "Pooling timeout: {}", msg), Error::InvalidMerkleTree => write!(f, "Invalid Merkle Tree"), Error::DatabaseError(msg) => write!(f, "Database error: {}", msg), + Error::TransactorError(msg) => write!(f, "Transactor error: {}", msg), } } } diff --git a/client-rs/src/utils/mod.rs b/client-rs/src/utils/mod.rs index 22fecea..ea70144 100644 --- a/client-rs/src/utils/mod.rs +++ b/client-rs/src/utils/mod.rs @@ -5,3 +5,4 @@ pub mod hashing; pub mod merkle; pub mod rpc; pub mod starknet_client; +pub mod transactor_client; diff --git a/client-rs/src/utils/transactor_client.rs b/client-rs/src/utils/transactor_client.rs new file mode 100644 index 0000000..06db491 --- /dev/null +++ b/client-rs/src/utils/transactor_client.rs @@ -0,0 +1,161 @@ +use crate::{config::BankaiConfig, traits::Submittable, Error}; +use reqwest::{ + header::{AUTHORIZATION, CONTENT_TYPE}, + Client, +}; +use serde::{Deserialize, Serialize}; +use tokio::time::{sleep, Duration}; +use tracing::{debug, error, info, trace}; + +#[derive(Debug)] +pub struct TransactorClient { + endpoint: String, + api_key: String, + pub client: Client, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct TransactorResponse { + pub transactor_status: String, + pub tx: TransactionDetails, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct TransactionDetails { + pub hash: Option, + pub multicall_status: Option, +} + +#[derive(Debug, Serialize)] +pub struct TransactorRequest { + pub chain_id: String, + pub contract_invocations: Vec, +} + +#[derive(Debug, Serialize)] +pub struct ContractInvocation { + pub value: String, + pub chain_id: String, + pub calldata: String, + pub method_selector: String, + pub contract_address: String, +} + +impl TransactorClient { + pub fn new(endpoint: String, api_key: String) -> Self { + Self { + endpoint, + api_key, + client: Client::new(), + } + } + + pub async fn send_transaction( + &self, + request: TransactorRequest, + ) -> Result { + let url = format!("{}/transactor", self.endpoint); + let response = self + .client + .post(&url) + .header(AUTHORIZATION, format!("Bearer {}", self.api_key)) + .header(CONTENT_TYPE, "application/json") + .json(&request) + .send() + .await + .map_err(Error::TransactorError)?; + + let response_data: TransactorResponse = + response.json().await.map_err(Error::TransactorError)?; + Ok(response_data) + } + + pub async fn check_transaction_status( + &self, + transaction_id: &str, + ) -> Result { + let url = format!("{}/transactor/{}", self.endpoint, transaction_id); + let response = self + .client + .get(&url) + .header(AUTHORIZATION, format!("Bearer {}", self.api_key)) + .header(CONTENT_TYPE, "application/json") + .send() + .await + .map_err(Error::TransactorError)?; + + let response_data: TransactorResponse = + response.json().await.map_err(Error::TransactorError)?; + Ok(response_data) + } + + pub async fn poll_transaction_status_until_done( + &self, + transaction_id: &str, + sleep_duration: Duration, + max_retries: usize, + ) -> Result { + for attempt in 1..=max_retries { + debug!("Polling Transactor for update... {}", transaction_id); + let status_response = self.check_transaction_status(transaction_id).await?; + let status = status_response.transactor_status; + + if status == "OK_SUCCESS" { + return Ok(true); + } + + if status == "KO_FAILED_TO_ESTIMATE_GAS" || status == "KO_WITH_ERRORS" { + return Err(Error::InvalidResponse(format!( + "Transactor processing failed for transaction {} with status: {}", + transaction_id, status + ))); + } + + trace!( + "Transaction {} not completed yet. Status: {}. Polling attempt {}/{}", + transaction_id, + status, + attempt, + max_retries + ); + sleep(sleep_duration).await; + } + + Err(Error::InvalidResponse(format!( + "Polling timeout for transaction {}", + transaction_id + ))) + } + + pub async fn submit_update( + &self, + update: impl Submittable, + config: &BankaiConfig, + ) -> Result { + let request = TransactorRequest { + chain_id: config.proof_settlement_chain_id.clone().to_hex_string(), + contract_invocations: vec![ContractInvocation { + value: "0".to_string(), + chain_id: config.proof_settlement_chain_id.clone().to_hex_string(), + calldata: update + .to_calldata() + .iter() + .map(|felt| felt.to_hex_string()) + .collect(), + method_selector: "".to_string(), + contract_address: config.contract_address.clone().to_hex_string(), + }], + }; + + let response = self.send_transaction(request).await?; + + if let Some(hash) = response.tx.hash { + println!("Transaction sent with tx_hash: {:?}", hash); + Ok(hash) + } else { + Err(Error::InvalidResponse( + "Transaction hash not found".to_string(), + )) + } + } +} From 56c6bee64dd35c77644a6c72d64d3276efa2a1a1 Mon Sep 17 00:00:00 2001 From: lakewik Date: Mon, 3 Feb 2025 09:56:55 +0100 Subject: [PATCH 24/66] Various pipelines fixes --- client-rs/db_structure.sql | 1 + client-rs/src/constants.rs | 3 +- client-rs/src/daemon.rs | 843 +++++++++++++++--------- client-rs/src/epoch_batch.rs | 29 +- client-rs/src/epoch_update.rs | 62 +- client-rs/src/helpers.rs | 8 + client-rs/src/routes/mod.rs | 16 +- client-rs/src/state.rs | 18 +- client-rs/src/sync_committee.rs | 7 + client-rs/src/traits.rs | 3 +- client-rs/src/utils/atlantic_client.rs | 10 +- client-rs/src/utils/cairo_runner.rs | 7 +- client-rs/src/utils/database_manager.rs | 120 +++- client-rs/src/utils/starknet_client.rs | 82 ++- 14 files changed, 846 insertions(+), 363 deletions(-) diff --git a/client-rs/db_structure.sql b/client-rs/db_structure.sql index d2e77e8..b2de221 100644 --- a/client-rs/db_structure.sql +++ b/client-rs/db_structure.sql @@ -7,6 +7,7 @@ CREATE TABLE jobs ( batch_range_begin_epoch BIGINT NOT NULL, batch_range_end_epoch BIGINT NOT NULL, type TEXT NOT NULL, + tx_hash TEXT NULL, updated_at TIMESTAMP DEFAULT NOW (), created_at TIMESTAMP DEFAULT NOW () ); diff --git a/client-rs/src/constants.rs b/client-rs/src/constants.rs index 2f62a2e..458a52c 100644 --- a/client-rs/src/constants.rs +++ b/client-rs/src/constants.rs @@ -2,8 +2,9 @@ pub const SLOTS_PER_EPOCH: u64 = 32; // For mainnet pub const SLOTS_PER_SYNC_COMMITTEE: u64 = 8192; // For mainnet pub const TARGET_BATCH_SIZE: u64 = 32; // Defines how many epochs in one batch pub const EPOCHS_PER_SYNC_COMMITTEE: u64 = 256; // For mainnet -pub const MAX_CONCURRENT_JOBS_IN_PROGRESS: u64 = 3; // Define the limit of how many jobs can be in state "in progress" concurrently +pub const MAX_CONCURRENT_JOBS_IN_PROGRESS: u64 = 11; // Define the limit of how many jobs can be in state "in progress" concurrently pub const MAX_CONCURRENT_PIE_GENERATIONS: usize = 1; // Define how many concurrent trace (pie file) generation jobs are allowed to not exhaust resources pub const MAX_CONCURRENT_RPC_DATA_FETCH_JOBS: usize = 1; // Define how many data fetching jobs can be performed concurrently to not overload RPC pub const STARKNET_SEPOLIA: &str = "0x534e5f5345504f4c4941"; pub const STARKNET_MAINNET: &str = "0x534e5f4d41494e"; +pub const USE_TRANSACTOR: bool = false; diff --git a/client-rs/src/daemon.rs b/client-rs/src/daemon.rs index 32b5a97..5ac6ac0 100644 --- a/client-rs/src/daemon.rs +++ b/client-rs/src/daemon.rs @@ -1,3 +1,5 @@ +#![allow(dead_code)] +#![allow(unused_imports)] mod bankai_client; mod config; mod constants; @@ -42,6 +44,7 @@ use tower::ServiceBuilder; use tower_http::{timeout::TimeoutLayer, trace::TraceLayer}; use tracing::{debug, error, info, warn, Level}; use tracing_subscriber::FmtSubscriber; +use traits::Provable; use utils::{cairo_runner::CairoRunner, database_manager::DatabaseManager}; //use std::error::Error as StdError; use epoch_batch::EpochUpdateBatch; @@ -64,7 +67,7 @@ async fn main() -> Result<(), Box> { // Load .env.sepolia file from_filename(".env.sepolia").ok(); - let slot_listener_toggle = true; + //let slot_listener_toggle = true; let subscriber = FmtSubscriber::builder() //.with_max_level(Level::DEBUG) @@ -123,6 +126,9 @@ async fn main() -> Result<(), Box> { } }); + // 🔄 Resume any unfinished jobs before processing new ones + resume_unfinished_jobs(db_manager.clone(), tx_for_listener.clone()).await?; + //Spawn a background task to process jobs tokio::spawn(async move { while let Some(job) = rx.recv().await { @@ -231,8 +237,8 @@ async fn main() -> Result<(), Box> { let mut stream = response.bytes_stream(); - while let chunk_result = timeout(Duration::from_secs(30), stream.next()).await { - match chunk_result { + loop { + match timeout(Duration::from_secs(30), stream.next()).await { // Timed out; handle it locally Err(_elapsed) => { warn!( @@ -287,7 +293,7 @@ async fn main() -> Result<(), Box> { // the RPC server has closed connection or some other unknown network error occured // If we exit the while, we reconnect in the outer loop - info!("Reconnecting to beacon node..."); + info!("Timeout waiting for next event, reconnecting to beacon node..."); } }); @@ -304,7 +310,7 @@ async fn handle_beacon_chain_head_event( tx: mpsc::Sender, ) -> () { let current_epoch_id = helpers::slot_to_epoch_id(parsed_event.slot); - let sync_committee_id = helpers::slot_to_sync_committee_id(parsed_event.slot); + let current_sync_committee_id = helpers::slot_to_sync_committee_id(parsed_event.slot); if parsed_event.epoch_transition { //info!("Beacon Chain epoch transition detected. New epoch: {} | Starting processing epoch proving...", epoch_id); @@ -317,7 +323,7 @@ async fn handle_beacon_chain_head_event( if parsed_event.slot % constants::SLOTS_PER_SYNC_COMMITTEE == 0 { info!( "Beacon Chain sync committee rotation occured. Slot {} | Sync committee id: {}", - parsed_event.slot, sync_committee_id + parsed_event.slot, current_sync_committee_id ); } } @@ -376,27 +382,39 @@ async fn handle_beacon_chain_head_event( .unwrap(); let mut latest_scheduled_epoch = last_epoch_in_progress; + let mut latest_scheduled_sync_committee = last_sync_committee_in_progress; if latest_verified_epoch_id > last_epoch_in_progress { if last_epoch_in_progress == 0 { - info!("Starting daemon on clean jobs table"); + info!("Starting daemon on clean epochs jobs table"); } else { warn!( "Something may be wrong, last verified epoch is greather than last epoch in progress" ); } - - // So we should schedule the greater epoch, which id + // So we should schedule the greater epoch, which is latest_scheduled_epoch = latest_verified_epoch_id; } + if latest_verified_sync_committee_id > last_sync_committee_in_progress { + if last_sync_committee_in_progress == 0 { + info!("Starting daemon on clean sync committees jobs table"); + } else { + warn!( + "Something may be wrong, last verified sync committee is greather than last sync committee in progress" + ); + } + + latest_scheduled_sync_committee = latest_verified_sync_committee_id; + } + // Decide basing on actual state if epochs_behind > constants::TARGET_BATCH_SIZE { // is_node_in_sync = true; warn!( "Bankai is out of sync now. Node is {} epochs behind network. Current Beacon Chain state: [Slot: {} Epoch: {} Sync Committee: {}] | Latest verified: [Slot: {} Epoch: {} Sync Committee: {}] | Latest in progress: [Epoch: {} Sync Committee: {}] | Sync in progress...", - epochs_behind, parsed_event.slot, current_epoch_id, sync_committee_id, latest_verified_epoch_slot, latest_verified_epoch_id, latest_verified_sync_committee_id, last_epoch_in_progress, last_sync_committee_in_progress + epochs_behind, parsed_event.slot, current_epoch_id, current_sync_committee_id, latest_verified_epoch_slot, latest_verified_epoch_id, latest_verified_sync_committee_id, last_epoch_in_progress, last_sync_committee_in_progress ); // Check if we have in progress all epochs that need to be processed, if no, run job @@ -411,8 +429,11 @@ async fn handle_beacon_chain_head_event( return; } + let epoch_to_start_from = latest_scheduled_epoch + 1; + let mut epoch_to_end_on = latest_scheduled_epoch + constants::TARGET_BATCH_SIZE; // To create batch with size of constants::TARGET_BATCH_SIZE epochs + let currently_processed_sync_committee_id = - helpers::get_sync_committee_id_by_epoch(latest_scheduled_epoch); + helpers::get_sync_committee_id_by_epoch(epoch_to_start_from); info!( "Currently processed sync committee epochs ranges from {} to {}. Next sync committee epochs ranges: {} to {}", @@ -422,13 +443,13 @@ async fn handle_beacon_chain_head_event( helpers::get_last_epoch_for_sync_committee(currently_processed_sync_committee_id + 1) ); - if helpers::get_last_epoch_for_sync_committee(currently_processed_sync_committee_id) - == latest_scheduled_epoch + if helpers::get_sync_committee_id_by_epoch(epoch_to_start_from) + > latest_scheduled_sync_committee { // We reached end of current sync committee, need to schedule new sync committee proving match run_sync_committee_update_job( db_manager.clone(), - currently_processed_sync_committee_id + 1, + latest_scheduled_sync_committee + 1, tx.clone(), ) .await @@ -440,12 +461,13 @@ async fn handle_beacon_chain_head_event( }; } - let epoch_to_start_from = latest_scheduled_epoch + 1; - let mut epoch_to_end_on = latest_scheduled_epoch + constants::TARGET_BATCH_SIZE; // To create batch with size of constants::TARGET_BATCH_SIZE epochs + if helpers::get_last_epoch_for_sync_committee(currently_processed_sync_committee_id) + == epoch_to_start_from + {} // Edge cases handling // // Handle the edge case where there is only one epoch in batch left to proccess and this epoch is last epoch in sync committee, if we follow the betch size of 32 always, this souldnt happen: - if latest_scheduled_epoch + if epoch_to_start_from == helpers::get_last_epoch_for_sync_committee(currently_processed_sync_committee_id) { warn!("edge case: only one epoch left to proccess in batch in this sync committee"); @@ -552,7 +574,7 @@ async fn run_batch_epoch_update_job( job_id: job_id.clone(), job_type: JobType::EpochBatchUpdate, job_status: JobStatus::Created, - slot, + slot: Some(slot), batch_range_begin_epoch: Some(batch_range_begin_epoch), batch_range_end_epoch: Some(batch_range_end_epoch), }; @@ -590,7 +612,7 @@ async fn run_batch_epoch_update_job( async fn run_sync_committee_update_job( db_manager: Arc, - slot: u64, + sync_committee_id: u64, tx: mpsc::Sender, ) -> Result<(), Box> { let job_id = Uuid::new_v4(); @@ -598,7 +620,9 @@ async fn run_sync_committee_update_job( job_id: job_id.clone(), job_type: JobType::SyncCommitteeUpdate, job_status: JobStatus::Created, - slot, + slot: Some(helpers::get_first_slot_for_sync_committee( + sync_committee_id, + )), batch_range_begin_epoch: None, batch_range_end_epoch: None, }; @@ -647,6 +671,111 @@ async fn evaluate_jobs_statuses( Ok(()) } +async fn resume_unfinished_jobs( + db_manager: Arc, + tx: mpsc::Sender, +) -> Result<(), Box> { + info!("Checking for unfinished jobs at daemon start..."); + + // Fetch jobs that were in progress before shutdown + let unfinished_jobs = db_manager + .get_jobs_with_statuses(vec![ + JobStatus::Created, + JobStatus::PieGenerated, + JobStatus::AtlanticProofRequested, + JobStatus::AtlanticProofRetrieved, + JobStatus::WrapProofRequested, + JobStatus::WrappedProofDone, + ]) + .await?; + + if unfinished_jobs.is_empty() { + info!("No unfinished jobs found."); + return Ok(()); + } + + info!( + "Found {} unfinished jobs. Resuming processing...", + unfinished_jobs.len() + ); + + for job in unfinished_jobs { + let job_id = job.job_uuid; + let job_to_resume = Job { + job_id, + job_type: job.job_type, + job_status: job.job_status, + slot: Some(job.slot.to_u64().unwrap()), + batch_range_begin_epoch: job.batch_range_begin_epoch.to_u64(), + batch_range_end_epoch: job.batch_range_end_epoch.to_u64(), + }; + + let tx_clone = tx.clone(); + tokio::spawn(async move { + info!( + "Resuming job {} with status {}...", + job_id, + job_to_resume.job_status.to_string() + ); + if tx_clone.send(job_to_resume).await.is_err() { + // return Err("Failed to send job".into()); + error!("Error resuming job: {}", job_id); + } + }); + + tokio::time::sleep(Duration::from_secs(1)).await; + } + + Ok(()) +} + +async fn retry_failed_jobs( + db_manager: Arc, + tx: mpsc::Sender, +) -> Result<(), Box> { + info!("Checking for failed jobs at daemon start..."); + + // Fetch failed jobs + let errored_jobs = db_manager + .get_jobs_with_statuses(vec![JobStatus::Error]) + .await?; + + if errored_jobs.is_empty() { + info!("No failed jobs found."); + return Ok(()); + } + + warn!( + "Found {} failed jobs. Trying to retry these jobs...", + errored_jobs.len() + ); + + for job in errored_jobs { + let job_id = job.job_uuid; + let job_to_retry = Job { + job_id, + job_type: job.job_type, + job_status: JobStatus::Created, + slot: Some(job.slot.to_u64().unwrap()), + batch_range_begin_epoch: job.batch_range_begin_epoch.to_u64(), + batch_range_end_epoch: job.batch_range_end_epoch.to_u64(), + }; + + let tx_clone = tx.clone(); + tokio::spawn(async move { + info!("Retrying failed job {}...", job_id); + if tx_clone.send(job_to_retry).await.is_err() { + // return Err("Failed to send job".into()); + error!("Error retrying job: {}", job_id); + } + }); + + tokio::time::sleep(Duration::from_secs(1)).await; + } + + Ok(()) +} + async fn broadcast_onchain_ready_jobs( db_manager: Arc, bankai: Arc, @@ -660,7 +789,7 @@ async fn broadcast_onchain_ready_jobs( for job in jobs { match job.job_type { JobType::EpochBatchUpdate => { - let update = EpochUpdateBatch::from_json::( + let circuit_inputs = EpochUpdateBatch::from_json::( helpers::get_first_slot_for_epoch( job.batch_range_begin_epoch.try_into().unwrap(), ), @@ -677,7 +806,7 @@ async fn broadcast_onchain_ready_jobs( // Submit to Starknet let txhash = bankai .starknet_client - .submit_update(update.expected_circuit_outputs, &bankai.config) + .submit_update(circuit_inputs.expected_circuit_outputs, &bankai.config) .await?; info!( @@ -686,9 +815,36 @@ async fn broadcast_onchain_ready_jobs( ); db_manager - .update_job_status(job.job_uuid, JobStatus::Done) + .update_job_status(job.job_uuid, JobStatus::ProofVerifyCalledOnchain) .await?; + let send_result = db_manager.set_job_txhash(job.job_uuid, txhash).await?; + + let confirmation_result = + bankai.starknet_client.wait_for_confirmation(txhash).await; + + match confirmation_result { + Ok(_) => { + info!("[EPOCH BATCH JOB] Transaction is confirmed on-chain!"); + db_manager + .update_job_status(job.job_uuid, JobStatus::Done) + .await?; + + // Iterate over and insert epochs proofs to db + for (index, epoch) in + circuit_inputs.circuit_inputs.epochs.iter().enumerate() + { + println!("Epoch {}: {:?}", index, epoch.expected_circuit_outputs); + } + } + Err(e) => { + error!("[EPOCH BATCH JOB] Transaction failed or timed out: {:?}", e); + db_manager + .update_job_status(job.job_uuid, JobStatus::Error) + .await?; + } + } + // let epoch_proof = bankai // .starknet_client // .get_epoch_proof(job.slot.try_into().unwrap(), &bankai.config) @@ -704,9 +860,9 @@ async fn broadcast_onchain_ready_jobs( } //JobType::EpochUpdate => {} JobType::SyncCommitteeUpdate => { - let update = SyncCommitteeUpdate::from_json::( - job.slot.to_u64().unwrap(), - )?; + let sync_committee_update_inputs = SyncCommitteeUpdate::from_json::< + SyncCommitteeUpdate, + >(job.slot.to_u64().unwrap())?; let sync_commite_id = helpers::slot_to_sync_committee_id(job.slot.to_u64().unwrap()); @@ -718,43 +874,60 @@ async fn broadcast_onchain_ready_jobs( let txhash = bankai .starknet_client - .submit_update(update.expected_circuit_outputs, &bankai.config) + .submit_update( + sync_committee_update_inputs.expected_circuit_outputs, + &bankai.config, + ) .await?; info!("[SYNC COMMITTEE JOB] Successfully called sync committee ID {} update onchain, transaction confirmed, txhash: {}", sync_commite_id, txhash); db_manager.set_job_txhash(job.job_uuid, txhash).await?; - db_manager - .update_job_status(job.job_uuid, JobStatus::Done) - .await?; + let confirmation_result = + bankai.starknet_client.wait_for_confirmation(txhash).await; - // Insert data to DB after successful onchain sync committee verification - //let sync_committee_hash = update.expected_circuit_outputs.committee_hash; - let sync_committee_hash = match bankai - .starknet_client - .get_committee_hash(job.slot.to_u64().unwrap(), &bankai.config) - .await - { - Ok(sync_committee_hash) => sync_committee_hash, + match confirmation_result { + Ok(_) => { + info!("[EPOCH BATCH JOB] Transaction is confirmed on-chain!"); + db_manager + .update_job_status(job.job_uuid, JobStatus::Done) + .await?; + + // Insert data to DB after successful onchain sync committee verification + //let sync_committee_hash = update.expected_circuit_outputs.committee_hash; + let sync_committee_hash = match bankai + .starknet_client + .get_committee_hash(job.slot.to_u64().unwrap(), &bankai.config) + .await + { + Ok(sync_committee_hash) => sync_committee_hash, + Err(e) => { + // Handle the error + return Err(e.into()); + } + }; + + let sync_committee_hash_str = sync_committee_hash + .iter() + .map(|felt| felt.to_hex_string()) + .collect::>() + .join(""); + + db_manager + .insert_verified_sync_committee( + job.slot.to_u64().unwrap(), + sync_committee_hash_str, + ) + .await?; + } Err(e) => { - // Handle the error - return Err(e.into()); + eprintln!("[EPOCH BATCH JOB] Transaction failed or timed out: {:?}", e); + db_manager + .update_job_status(job.job_uuid, JobStatus::Error) + .await?; } - }; - - let sync_committee_hash_str = sync_committee_hash - .iter() - .map(|felt| felt.to_hex_string()) - .collect::>() - .join(""); - - db_manager - .insert_verified_sync_committee( - job.slot.to_u64().unwrap(), - sync_committee_hash_str, - ) - .await?; + } } } } @@ -762,282 +935,358 @@ async fn broadcast_onchain_ready_jobs( Ok(()) } -// async fn worker_task(mut rx: Receiver, db_client: Client) -> Result<(), Box> { -// while let Some(job_id) = rx.recv().await { -// println!("Worker received job {job_id}"); - -// // 4a) Check current status in DB -// if let Some(status) = fetch_job_status(&db_client, job_id).await? { -// match status { -// JobStatus::Created => { -// println!("Fetching proof for job {job_id}..."); -// // Then update status -// update_job_status(&db_client, job_id, JobStatus::FetchedProof).await?; -// println!("Job {job_id} updated to FetchedProof"); -// } -// JobStatus::FetchedProof => { -// // Already fetched, maybe do next step... -// println!("Job {job_id} is already FetchedProof; ignoring for now."); -// } -// _ => { -// println!("Job {job_id} in status {:?}, no action needed.", status); -// } -// } -// } else { -// eprintln!("No job found in DB for ID = {job_id}"); -// } -// } -// Ok(()) -// } - // mpsc jobs // async fn process_job( job: Job, db_manager: Arc, bankai: Arc, ) -> Result<(), Box> { - match job.job_type { - JobType::SyncCommitteeUpdate => { - // Sync committee job - info!( - "[SYNC COMMITTEE JOB] Started processing sync committee job: {} for slot {}", - job.job_id, job.slot - ); - - let latest_committee_id = bankai - .starknet_client - .get_latest_committee_id(&bankai.config) - .await?; - - info!( - "[SYNC COMMITTEE JOB] Latest onchain verified sync committee id: {}", - latest_committee_id - ); - - let latest_epoch = bankai - .starknet_client - .get_latest_epoch_slot(&bankai.config) - .await?; - - let lowest_committee_update_slot = (latest_committee_id) * Felt::from(0x2000); - let new_sync_committee_id = latest_committee_id + 1; - - // This should be triggered on the final stage of onchain call submission TODO?// - if latest_epoch < lowest_committee_update_slot { - error!("[SYNC COMMITTEE JOB] Sync committee update to sync committee {} requires newer epoch verified. The lowest needed slot is {} which corresponds to epoch {} and sync committee {}", - new_sync_committee_id, lowest_committee_update_slot, helpers::slot_to_epoch_id(lowest_committee_update_slot.to_u64().unwrap()), helpers::slot_to_sync_committee_id(lowest_committee_update_slot.to_u64().unwrap())); - //return Err(Error::RequiresNewerEpoch(latest_epoch)); - } - - let update = bankai - .get_sync_committee_update(latest_epoch.try_into().unwrap()) - .await?; - - info!( - "[SYNC COMMITTEE JOB] Received sync committee update: {:?}", - update - ); - - info!( - "[SYNC COMMITTEE JOB] Starting Cairo execution and PIE generation for Sync Committee: {}...", - latest_committee_id - ); - - CairoRunner::generate_pie(&update, &bankai.config).await?; - - db_manager - .update_job_status(job.job_id, JobStatus::PieGenerated) - .await?; - - info!( - "[SYNC COMMITTEE JOB] Pie generated successfully for Sync Committee: {}...", - latest_committee_id - ); - info!("[SYNC COMMITTEE JOB] Sending proof generation query to Atlantic..."); - - let batch_id = bankai.atlantic_client.submit_batch(update).await?; - - db_manager - .update_job_status(job.job_id, JobStatus::OffchainProofRequested) - .await?; - db_manager - .set_atlantic_job_queryid( - job.job_id, - batch_id.clone(), - AtlanticJobType::ProofGeneration, - ) - .await?; - - info!( - "[SYNC COMMITTEE JOB] Proof generation batch submitted to atlantic. QueryID: {}", - batch_id - ); - - // Pool for Atlantic execution done - bankai - .atlantic_client - .poll_batch_status_until_done(&batch_id, Duration::new(10, 0), usize::MAX) - .await?; - - info!( - "[SYNC COMMITTEE JOB] Proof generation done by Atlantic. QueryID: {}", - batch_id - ); - - let proof = bankai - .atlantic_client - .fetch_proof(batch_id.as_str()) - .await?; - - info!( - "[SYNC COMMITTEE JOB] Proof retrieved from Atlantic. QueryID: {}", - batch_id - ); - - db_manager - .update_job_status(job.job_id, JobStatus::OffchainProofRetrieved) - .await?; - - // 5) Submit wrapped proof request - info!("[SYNC COMMITTEE JOB] Sending proof wrapping query to Atlantic.."); - let wrapping_batch_id = bankai.atlantic_client.submit_wrapped_proof(proof).await?; - info!( - "[SYNC COMMITTEE JOB] Proof wrapping query submitted to Atlantic. Wrapping QueryID: {}", - wrapping_batch_id - ); + let mut current_status = job.job_status.clone(); + let job_data = db_manager.get_job_by_id(job.job_id).await?.unwrap(); + let mut batch_id = job_data + .atlantic_proof_generate_batch_id + .unwrap_or("".to_string()); + let mut wrapping_batch_id = job_data + .atlantic_proof_wrapper_batch_id + .unwrap_or("".to_string()); + loop { + match job.job_type { + JobType::SyncCommitteeUpdate => { + // Sync committee job + let updated_committee_slot = job.slot.unwrap(); + let update_committee_id = + helpers::get_sync_committee_id_by_slot(updated_committee_slot); + match current_status { + JobStatus::Created => { + info!("[SYNC COMMITTEE JOB] Started processing sync committee job: {} for sync committee ID: {} (Slot: {})", + job.job_id, update_committee_id, updated_committee_slot ); + + let sync_committe_update_program_inputs = bankai + .get_sync_committee_update(updated_committee_slot.try_into().unwrap()) + .await?; + + info!( + "[SYNC COMMITTEE JOB] Sync committee update program inputs generated: {:?}", + sync_committe_update_program_inputs + ); - db_manager - .update_job_status(job.job_id, JobStatus::WrapProofRequested) - .await?; - db_manager - .set_atlantic_job_queryid( - job.job_id, - wrapping_batch_id.clone(), - AtlanticJobType::ProofWrapping, - ) - .await?; + let input_path = sync_committe_update_program_inputs.export(); + info!( + "[SYNC COMMITTEE JOB] Circuit inputs saved at {:?}", + input_path + ); - // Pool for Atlantic execution done - bankai - .atlantic_client - .poll_batch_status_until_done(&wrapping_batch_id, Duration::new(10, 0), usize::MAX) - .await?; + db_manager + .update_job_status(job.job_id, JobStatus::ProgramInputsPrepared) + .await?; - db_manager - .update_job_status(job.job_id, JobStatus::WrappedProofDone) - .await?; + current_status = JobStatus::ProgramInputsPrepared; + } + JobStatus::ProgramInputsPrepared => { + let sync_committe_update_program_inputs = + SyncCommitteeUpdate::from_json::( + job.slot.unwrap(), + )?; + info!( + "[SYNC COMMITTEE JOB] Starting Cairo execution and PIE generation for Sync Committee: {}...", + update_committee_id + ); + + CairoRunner::generate_pie( + &sync_committe_update_program_inputs, + &bankai.config, + ) + .await?; + + db_manager + .update_job_status(job.job_id, JobStatus::PieGenerated) + .await?; + + info!( + "[SYNC COMMITTEE JOB] Pie generated successfully for Sync Committee: {}...", + update_committee_id + ); + + current_status = JobStatus::PieGenerated; + } + JobStatus::PieGenerated => { + let sync_committe_update_program_inputs = + SyncCommitteeUpdate::from_json::( + job.slot.unwrap(), + )?; + + info!("[SYNC COMMITTEE JOB] Sending proof generation query to Atlantic..."); + + batch_id = bankai + .atlantic_client + .submit_batch(sync_committe_update_program_inputs) + .await?; + + db_manager + .update_job_status(job.job_id, JobStatus::AtlanticProofRequested) + .await?; + db_manager + .set_atlantic_job_queryid( + job.job_id, + batch_id.clone(), + AtlanticJobType::ProofGeneration, + ) + .await?; + + info!( "[SYNC COMMITTEE JOB] Proof generation batch submitted to atlantic. QueryID: {}", + batch_id ); + + current_status = JobStatus::AtlanticProofRequested; + } + JobStatus::AtlanticProofRequested | JobStatus::AtlanticProofRetrieved => { + // Pool for Atlantic execution done + info!( + "[SYNC COMMITTEE JOB] Waiting for completion of Atlantic job. QueryID: {}", + batch_id + ); + bankai + .atlantic_client + .poll_batch_status_until_done( + &batch_id, + Duration::new(10, 0), + usize::MAX, + ) + .await?; + + info!( + "[SYNC COMMITTEE JOB] Proof generation done by Atlantic. QueryID: {}", + batch_id + ); - info!("[SYNC COMMITTEE JOB] Proof wrapping done by Atlantic. Fact registered on Integrity. Wrapping QueryID: {}", wrapping_batch_id); + let proof = bankai + .atlantic_client + .fetch_proof(batch_id.as_str()) + .await?; - db_manager - .update_job_status(job.job_id, JobStatus::OffchainComputationFinished) - .await?; - } - JobType::EpochBatchUpdate => { - info!("[BATCH EPOCH JOB] Preparing inputs for program..."); + info!( + "[SYNC COMMITTEE JOB] Proof retrieved from Atlantic. QueryID: {}", + batch_id + ); - let proof = EpochUpdateBatch::new_by_epoch_range( - &bankai, - db_manager.clone(), - job.batch_range_begin_epoch.unwrap(), - job.batch_range_end_epoch.unwrap(), - ) - .await?; + db_manager + .update_job_status(job.job_id, JobStatus::AtlanticProofRetrieved) + .await?; + + current_status = JobStatus::AtlanticProofRetrieved; + + // Submit wrapped proof request + info!("[SYNC COMMITTEE JOB] Sending proof wrapping query to Atlantic.."); + wrapping_batch_id = + bankai.atlantic_client.submit_wrapped_proof(proof).await?; + info!( + "[SYNC COMMITTEE JOB] Proof wrapping query submitted to Atlantic. Wrapping QueryID: {}", + wrapping_batch_id + ); + + db_manager + .update_job_status(job.job_id, JobStatus::WrapProofRequested) + .await?; + db_manager + .set_atlantic_job_queryid( + job.job_id, + wrapping_batch_id.clone(), + AtlanticJobType::ProofWrapping, + ) + .await?; + + current_status = JobStatus::WrapProofRequested; + } + JobStatus::WrapProofRequested => { + // Pool for Atlantic execution done + bankai + .atlantic_client + .poll_batch_status_until_done( + &wrapping_batch_id, + Duration::new(10, 0), + usize::MAX, + ) + .await?; + + db_manager + .update_job_status(job.job_id, JobStatus::WrappedProofDone) + .await?; + + info!("[SYNC COMMITTEE JOB] Proof wrapping done by Atlantic. Fact registered on Integrity. Wrapping QueryID: {}", wrapping_batch_id); + + db_manager + .update_job_status(job.job_id, JobStatus::OffchainComputationFinished) + .await?; + break; + } + _ => { + error!("[EPOCH JOB] Unexpected behaviour"); + break; + } + } + } - db_manager - .update_job_status(job.job_id, JobStatus::ProgramInputsPrepared) - .await?; + JobType::EpochBatchUpdate => { + match current_status { + JobStatus::Created => { + info!("[BATCH EPOCH JOB] Preparing inputs for program..."); + + let circuit_inputs = EpochUpdateBatch::new_by_epoch_range( + &bankai, + db_manager.clone(), + job.batch_range_begin_epoch.unwrap(), + job.batch_range_end_epoch.unwrap(), + ) + .await?; + + let input_path = circuit_inputs.export(); + info!("[BATCH EPOCH JOB] Circuit inputs saved at {:?}", input_path); + + db_manager + .update_job_status(job.job_id, JobStatus::ProgramInputsPrepared) + .await?; + + current_status = JobStatus::ProgramInputsPrepared; + } + JobStatus::ProgramInputsPrepared => { + let circuit_inputs = EpochUpdateBatch::from_json::( + helpers::get_first_slot_for_epoch(job.batch_range_begin_epoch.unwrap()), + helpers::get_first_slot_for_epoch(job.batch_range_end_epoch.unwrap()), + )?; - info!("[BATCH EPOCH JOB] Starting trace generation..."); + info!("[BATCH EPOCH JOB] Starting trace generation..."); - CairoRunner::generate_pie(&proof, &bankai.config).await?; + CairoRunner::generate_pie(&circuit_inputs, &bankai.config).await?; - db_manager - .update_job_status(job.job_id, JobStatus::PieGenerated) - .await?; + db_manager + .update_job_status(job.job_id, JobStatus::PieGenerated) + .await?; - info!("[BATCH EPOCH JOB] Uploading PIE and sending proof generation request to Atlantic..."); + current_status = JobStatus::PieGenerated; + } + JobStatus::PieGenerated => { + let circuit_inputs = EpochUpdateBatch::from_json::( + helpers::get_first_slot_for_epoch(job.batch_range_begin_epoch.unwrap()), + helpers::get_first_slot_for_epoch(job.batch_range_end_epoch.unwrap()), + )?; - let batch_id = bankai.atlantic_client.submit_batch(proof).await?; + info!("[BATCH EPOCH JOB] Uploading PIE and sending proof generation request to Atlantic..."); - info!( - "[BATCH EPOCH JOB] Proof generation batch submitted to Atlantic. QueryID: {}", - batch_id - ); + batch_id = bankai.atlantic_client.submit_batch(circuit_inputs).await?; - db_manager - .update_job_status(job.job_id, JobStatus::OffchainProofRequested) - .await?; - db_manager - .set_atlantic_job_queryid( - job.job_id, - batch_id.clone(), - AtlanticJobType::ProofGeneration, - ) - .await?; + info!( + "[BATCH EPOCH JOB] Proof generation batch submitted to Atlantic. QueryID: {}", + batch_id + ); - // Pool for Atlantic execution done - bankai - .atlantic_client - .poll_batch_status_until_done(&batch_id, Duration::new(10, 0), usize::MAX) - .await?; + db_manager + .update_job_status(job.job_id, JobStatus::AtlanticProofRequested) + .await?; + db_manager + .set_atlantic_job_queryid( + job.job_id, + batch_id.clone(), + AtlanticJobType::ProofGeneration, + ) + .await?; + + current_status = JobStatus::AtlanticProofRequested; + } + JobStatus::AtlanticProofRequested | JobStatus::AtlanticProofRetrieved => { + // Pool for Atlantic execution done + info!( + "[BATCH EPOCH JOB] Waiting for completion of Atlantic proof generation job. QueryID: {}", + batch_id + ); - info!( - "[BATCH EPOCH JOB] Proof generation done by Atlantic. QueryID: {}", - batch_id - ); + bankai + .atlantic_client + .poll_batch_status_until_done( + &batch_id, + Duration::new(10, 0), + usize::MAX, + ) + .await?; + + info!( + "[BATCH EPOCH JOB] Proof generation done by Atlantic. QueryID: {}", + batch_id + ); - let proof = bankai - .atlantic_client - .fetch_proof(batch_id.as_str()) - .await?; + let proof = bankai + .atlantic_client + .fetch_proof(batch_id.as_str()) + .await?; - info!( - "[EPOCH JOB] Proof retrieved from Atlantic. QueryID: {}", - batch_id - ); + info!( + "[EPOCH JOB] Proof retrieved from Atlantic. QueryID: {}", + batch_id + ); - db_manager - .update_job_status(job.job_id, JobStatus::OffchainProofRetrieved) - .await?; + db_manager + .update_job_status(job.job_id, JobStatus::AtlanticProofRetrieved) + .await?; - // 5) Submit wrapped proof request - info!("[EPOCH JOB] Uploading proof and sending wrapping query to Atlantic.."); - let wrapping_batch_id = bankai.atlantic_client.submit_wrapped_proof(proof).await?; - info!( - "[EPOCH JOB] Proof wrapping query submitted to Atlantic. Wrapping QueryID: {}", - wrapping_batch_id - ); + // 5) Submit wrapped proof request + info!( + "[EPOCH JOB] Uploading proof and sending wrapping query to Atlantic.." + ); + wrapping_batch_id = + bankai.atlantic_client.submit_wrapped_proof(proof).await?; + info!( + "[EPOCH JOB] Proof wrapping query submitted to Atlantic. Wrapping QueryID: {}", + wrapping_batch_id + ); + + db_manager + .update_job_status(job.job_id, JobStatus::WrapProofRequested) + .await?; + + db_manager + .set_atlantic_job_queryid( + job.job_id, + wrapping_batch_id.clone(), + AtlanticJobType::ProofWrapping, + ) + .await?; + + current_status = JobStatus::WrapProofRequested; + } + JobStatus::WrapProofRequested => { + // Pool for Atlantic execution done + info!( + "[SYNC COMMITTEE JOB] Waiting for completion of Atlantic proof wrapping job. QueryID: {}", + wrapping_batch_id + ); - db_manager - .update_job_status(job.job_id, JobStatus::WrapProofRequested) - .await?; - db_manager - .set_atlantic_job_queryid( - job.job_id, - wrapping_batch_id.clone(), - AtlanticJobType::ProofWrapping, - ) - .await?; + bankai + .atlantic_client + .poll_batch_status_until_done( + &wrapping_batch_id, + Duration::new(10, 0), + usize::MAX, + ) + .await?; - // Pool for Atlantic execution done - bankai - .atlantic_client - .poll_batch_status_until_done(&wrapping_batch_id, Duration::new(10, 0), usize::MAX) - .await?; + db_manager + .update_job_status(job.job_id, JobStatus::WrappedProofDone) + .await?; - db_manager - .update_job_status(job.job_id, JobStatus::WrappedProofDone) - .await?; + info!("[EPOCH JOB] Proof wrapping done by Atlantic. Fact registered on Integrity. Wrapping QueryID: {}", wrapping_batch_id); - info!("[EPOCH JOB] Proof wrapping done by Atlantic. Fact registered on Integrity. Wrapping QueryID: {}", wrapping_batch_id); + db_manager + .update_job_status(job.job_id, JobStatus::OffchainComputationFinished) + .await?; - db_manager - .update_job_status(job.job_id, JobStatus::OffchainComputationFinished) - .await?; + break; + } + _ => { + error!("[EPOCH JOB] Unexpected behaviour"); + break; + } + } + } } } - Ok(()) } diff --git a/client-rs/src/epoch_batch.rs b/client-rs/src/epoch_batch.rs index dd8e00f..4908d6d 100644 --- a/client-rs/src/epoch_batch.rs +++ b/client-rs/src/epoch_batch.rs @@ -20,7 +20,7 @@ use std::fs; use crate::utils::database_manager::DatabaseManager; use std::sync::Arc; -use tracing::{debug, info}; +use tracing::{debug, info, trace}; #[derive(Debug, Serialize, Deserialize)] pub struct EpochUpdateBatch { @@ -212,7 +212,7 @@ impl EpochUpdateBatch { let mut epochs = vec![]; // Fetch epochs sequentially from start_slot to end_slot, incrementing by 32 each time - let calculated_batch_size = end_epoch - start_epoch; + let calculated_batch_size = end_epoch - start_epoch + 1; let mut current_epoch = start_epoch; while current_epoch <= end_epoch { info!( @@ -267,7 +267,7 @@ impl EpochUpdateBatch { current_epoch += 1; } - info!("Paths {:?}", paths); + trace!("Paths for epochs {:?}", paths); let batch = EpochUpdateBatch { circuit_inputs, @@ -372,6 +372,29 @@ impl Provable for EpochUpdateBatch { first_slot, last_slot, first_slot, last_slot ) } + + fn inputs_path(&self) -> String { + let first_slot = self + .circuit_inputs + .epochs + .first() + .unwrap() + .circuit_inputs + .header + .slot; + let last_slot = self + .circuit_inputs + .epochs + .last() + .unwrap() + .circuit_inputs + .header + .slot; + format!( + "batches/epoch_batch/{}_to_{}/input_batch_{}_to_{}.json", + first_slot, last_slot, first_slot, last_slot + ) + } } impl Submittable for ExpectedEpochBatchOutputs { diff --git a/client-rs/src/epoch_update.rs b/client-rs/src/epoch_update.rs index c16daeb..4721822 100644 --- a/client-rs/src/epoch_update.rs +++ b/client-rs/src/epoch_update.rs @@ -47,37 +47,37 @@ impl EpochUpdate { } } -impl Provable for EpochUpdate { - fn id(&self) -> String { - let mut hasher = Sha256::new(); - hasher.update(b"epoch_update"); - hasher.update(self.circuit_inputs.header.tree_hash_root().as_slice()); - hex::encode(hasher.finalize().as_slice()) - } - - fn export(&self) -> Result { - let json = serde_json::to_string_pretty(&self).unwrap(); - let dir_path = format!("batches/epoch/{}", self.circuit_inputs.header.slot); - fs::create_dir_all(dir_path.clone()).map_err(Error::IoError)?; - let path = format!( - "{}/input_{}.json", - dir_path, self.circuit_inputs.header.slot - ); - fs::write(path.clone(), json).map_err(Error::IoError)?; - Ok(path) - } - - fn pie_path(&self) -> String { - format!( - "batches/epoch/{}/pie_{}.zip", - self.circuit_inputs.header.slot, self.circuit_inputs.header.slot - ) - } - - fn proof_type(&self) -> ProofType { - ProofType::Epoch - } -} +// impl Provable for EpochUpdate { +// fn id(&self) -> String { +// let mut hasher = Sha256::new(); +// hasher.update(b"epoch_update"); +// hasher.update(self.circuit_inputs.header.tree_hash_root().as_slice()); +// hex::encode(hasher.finalize().as_slice()) +// } + +// fn export(&self) -> Result { +// let json = serde_json::to_string_pretty(&self).unwrap(); +// let dir_path = format!("batches/epoch/{}", self.circuit_inputs.header.slot); +// fs::create_dir_all(dir_path.clone()).map_err(Error::IoError)?; +// let path = format!( +// "{}/input_{}.json", +// dir_path, self.circuit_inputs.header.slot +// ); +// fs::write(path.clone(), json).map_err(Error::IoError)?; +// Ok(path) +// } + +// fn pie_path(&self) -> String { +// format!( +// "batches/epoch/{}/pie_{}.zip", +// self.circuit_inputs.header.slot, self.circuit_inputs.header.slot +// ) +// } + +// fn proof_type(&self) -> ProofType { +// ProofType::Epoch +// } +// } /// Contains all necessary inputs for generating and verifying epoch proofs #[derive(Debug, Serialize, Deserialize)] diff --git a/client-rs/src/helpers.rs b/client-rs/src/helpers.rs index 221b9e4..412bcf3 100644 --- a/client-rs/src/helpers.rs +++ b/client-rs/src/helpers.rs @@ -69,6 +69,14 @@ pub fn get_sync_committee_id_by_epoch(epoch: u64) -> u64 { epoch / EPOCHS_PER_SYNC_COMMITTEE } +pub fn get_sync_committee_id_by_slot(epoch: u64) -> u64 { + epoch / SLOTS_PER_SYNC_COMMITTEE +} + +pub fn get_first_slot_for_sync_committee(slot: u64) -> u64 { + slot * SLOTS_PER_SYNC_COMMITTEE +} + // Since beacon chain RPCs have different response structure (quicknode responds different than nidereal) we use this event extraction logic pub fn extract_json_from_event(event_text: &str) -> Option { for line in event_text.lines() { diff --git a/client-rs/src/routes/mod.rs b/client-rs/src/routes/mod.rs index c60f403..acf6922 100644 --- a/client-rs/src/routes/mod.rs +++ b/client-rs/src/routes/mod.rs @@ -22,10 +22,24 @@ pub async fn handle_get_status(State(state): State) -> impl IntoRespon Err(e) => 0, }; let in_progress_jobs_count = state.db_manager.count_jobs_in_progress().await.unwrap(); + let last_sync_committee_in_progress = state + .db_manager + .get_latest_sync_committee_in_progress() + .await + .unwrap() + .unwrap(); + + // let beacon_chain_state = state + // .db_manager + // .get_latest_known_beacon_chain_state() + // .await + // .unwrap(); Json(json!({ "success": true, "details": { "last_epoch_in_progress": last_epoch_in_progress, - "jobs_in_progress_count": in_progress_jobs_count + "last_sync_committee_in_progress": last_sync_committee_in_progress, + "jobs_in_progress_count": in_progress_jobs_count, + } })) } diff --git a/client-rs/src/state.rs b/client-rs/src/state.rs index 3d61761..b9d2cd2 100644 --- a/client-rs/src/state.rs +++ b/client-rs/src/state.rs @@ -15,7 +15,7 @@ pub struct Job { pub job_id: Uuid, pub job_type: JobType, pub job_status: JobStatus, - pub slot: u64, + pub slot: Option, pub batch_range_begin_epoch: Option, pub batch_range_end_epoch: Option, } @@ -39,9 +39,9 @@ pub enum JobStatus { #[postgres(name = "PIE_GENERATED")] PieGenerated, #[postgres(name = "OFFCHAIN_PROOF_REQUESTED")] - OffchainProofRequested, + AtlanticProofRequested, #[postgres(name = "OFFCHAIN_PROOF_RETRIEVED")] - OffchainProofRetrieved, + AtlanticProofRetrieved, #[postgres(name = "WRAP_PROOF_REQUESTED")] WrapProofRequested, #[postgres(name = "WRAPPED_PROOF_DONE")] @@ -67,8 +67,8 @@ impl ToString for JobStatus { JobStatus::ProgramInputsPrepared => "PROGRAM_INPUTS_PREPARED".to_string(), JobStatus::StartedTraceGeneration => "STARTED_TRACE_GENERATION".to_string(), JobStatus::PieGenerated => "PIE_GENERATED".to_string(), - JobStatus::OffchainProofRequested => "OFFCHAIN_PROOF_REQUESTED".to_string(), - JobStatus::OffchainProofRetrieved => "OFFCHAIN_PROOF_RETRIEVED".to_string(), + JobStatus::AtlanticProofRequested => "OFFCHAIN_PROOF_REQUESTED".to_string(), + JobStatus::AtlanticProofRetrieved => "OFFCHAIN_PROOF_RETRIEVED".to_string(), JobStatus::WrapProofRequested => "WRAP_PROOF_REQUESTED".to_string(), JobStatus::WrappedProofDone => "WRAPPED_PROOF_DONE".to_string(), JobStatus::OffchainComputationFinished => "OFFCHAIN_COMPUTATION_FINISHED".to_string(), @@ -89,8 +89,8 @@ impl FromStr for JobStatus { "CREATED" => Ok(JobStatus::Created), "PROGRAM_INPUTS_PREPARED" => Ok(JobStatus::ProgramInputsPrepared), "PIE_GENERATED" => Ok(JobStatus::PieGenerated), - "OFFCHAIN_PROOF_REQUESTED" => Ok(JobStatus::OffchainProofRequested), - "OFFCHAIN_PROOF_RETRIEVED" => Ok(JobStatus::OffchainProofRetrieved), + "OFFCHAIN_PROOF_REQUESTED" => Ok(JobStatus::AtlanticProofRequested), + "OFFCHAIN_PROOF_RETRIEVED" => Ok(JobStatus::AtlanticProofRetrieved), "WRAP_PROOF_REQUESTED" => Ok(JobStatus::WrapProofRequested), "WRAPPED_PROOF_DONE" => Ok(JobStatus::WrappedProofDone), "OFFCHAIN_COMPUTATION_FINISHED" => Ok(JobStatus::OffchainComputationFinished), @@ -175,6 +175,10 @@ impl std::fmt::Display for StarknetError { match self { StarknetError::ProviderError(err) => write!(f, "Provider error: {}", err), StarknetError::AccountError(msg) => write!(f, "Account error: {}", msg), + StarknetError::TransactionError(msg) => write!(f, "Transaction error: {}", msg), + StarknetError::TimeoutError => { + write!(f, "Waiting for transaction timeout error") + } } } } diff --git a/client-rs/src/sync_committee.rs b/client-rs/src/sync_committee.rs index 05949e9..997b845 100644 --- a/client-rs/src/sync_committee.rs +++ b/client-rs/src/sync_committee.rs @@ -80,6 +80,13 @@ impl Provable for SyncCommitteeUpdate { ) } + fn inputs_path(&self) -> String { + format!( + "batches/committee/{}/input_{}.json", + self.circuit_inputs.beacon_slot, self.circuit_inputs.beacon_slot, + ) + } + fn proof_type(&self) -> ProofType { ProofType::SyncCommittee } diff --git a/client-rs/src/traits.rs b/client-rs/src/traits.rs index 53668f3..ccc79aa 100644 --- a/client-rs/src/traits.rs +++ b/client-rs/src/traits.rs @@ -11,7 +11,7 @@ pub trait Submittable { } pub enum ProofType { - Epoch, + //Epoch, EpochBatch, SyncCommittee, } @@ -24,4 +24,5 @@ pub trait Provable: Serialize { // T: serde::de::DeserializeOwned; fn proof_type(&self) -> ProofType; fn pie_path(&self) -> String; + fn inputs_path(&self) -> String; } diff --git a/client-rs/src/utils/atlantic_client.rs b/client-rs/src/utils/atlantic_client.rs index 520cbf6..e550183 100644 --- a/client-rs/src/utils/atlantic_client.rs +++ b/client-rs/src/utils/atlantic_client.rs @@ -1,11 +1,11 @@ use crate::traits::{ProofType, Provable}; use crate::Error; -use futures::{StreamExt, TryStreamExt}; +use futures::StreamExt; use reqwest::multipart::{Form, Part}; use reqwest::Body; use serde::{Deserialize, Serialize}; use std::env; -use std::path::{Path, PathBuf}; +use std::path::PathBuf; use tokio::fs; use tokio::time::{sleep, Duration}; use tokio_util::io::ReaderStream; @@ -55,8 +55,8 @@ impl AtlanticClient { let percent = (*uploaded as f64 / total_bytes as f64) * 100.0; if percent >= *next_threshold as f64 && *next_threshold <= 100 { - println!( - "Uploaded {}% of the file to Atlantic API...", + info!( + "Uploaded {}% of the PIE file to Atlantic API...", *next_threshold ); *next_threshold += 10; @@ -93,7 +93,7 @@ impl AtlanticClient { let external_id = format!( "update_{}", match batch.proof_type() { - ProofType::Epoch => "epoch", + //ProofType::Epoch => "epoch", ProofType::SyncCommittee => "sync_committee", ProofType::EpochBatch => "epoch_batch", } diff --git a/client-rs/src/utils/cairo_runner.rs b/client-rs/src/utils/cairo_runner.rs index 819d407..40d97b3 100644 --- a/client-rs/src/utils/cairo_runner.rs +++ b/client-rs/src/utils/cairo_runner.rs @@ -3,7 +3,7 @@ use crate::BankaiConfig; use crate::{traits::Provable, Error}; use tokio::task; use tokio::task::JoinError; -use tracing::info; +use tracing::{debug, info}; pub struct CairoRunner(); @@ -18,10 +18,11 @@ impl CairoRunner { .await .map_err(|e| Error::CairoRunError(format!("Semaphore error: {}", e)))?; - let input_path = input.export()?; + let input_path = input.inputs_path(); + info!("Cairo Input path: {}", input_path); let program_path = match input.proof_type() { - ProofType::Epoch => config.epoch_circuit_path.clone(), + //ProofType::Epoch => config.epoch_circuit_path.clone(), ProofType::SyncCommittee => config.committee_circuit_path.clone(), ProofType::EpochBatch => config.epoch_batch_circuit_path.clone(), }; diff --git a/client-rs/src/utils/database_manager.rs b/client-rs/src/utils/database_manager.rs index fe77f05..169df6e 100644 --- a/client-rs/src/utils/database_manager.rs +++ b/client-rs/src/utils/database_manager.rs @@ -19,6 +19,8 @@ pub struct JobSchema { pub batch_range_begin_epoch: i64, pub batch_range_end_epoch: i64, pub job_type: JobType, + pub atlantic_proof_generate_batch_id: Option, + pub atlantic_proof_wrapper_batch_id: Option, //pub updated_at: i64, } @@ -130,7 +132,7 @@ impl DatabaseManager { &[ &job.job_id, &job.job_status.to_string(), - &(job.slot as i64), + &(job.slot.unwrap() as i64), &"EPOCH_BATCH_UPDATE", &(job.batch_range_begin_epoch.unwrap() as i64), &(job.batch_range_end_epoch.unwrap() as i64), @@ -147,7 +149,7 @@ impl DatabaseManager { &[ &job.job_id, &job.job_status.to_string(), - &(job.slot as i64), + &(job.slot.unwrap() as i64), &"SYNC_COMMITTEE_UPDATE", ], ) @@ -165,12 +167,48 @@ impl DatabaseManager { ) -> Result, Box> { let row_opt = self .client - .query_opt("SELECT status FROM jobs WHERE job_id = $1", &[&job_id]) + .query_opt("SELECT status FROM jobs WHERE job_uuid = $1", &[&job_id]) .await?; Ok(row_opt.map(|row| row.get("status"))) } + pub async fn get_job_by_id( + &self, + job_id: Uuid, + ) -> Result, Box> { + let row_opt = self + .client + .query_opt("SELECT * FROM jobs WHERE job_uuid = $1", &[&job_id]) + .await?; + + Ok(row_opt.map(|row| { + let job_status_str: String = row.get("job_status"); + let job_status = job_status_str + .parse::() + .expect("Unknown job status from DB"); + + let job_type_str: String = row.get("type"); + let job_type = job_type_str + .parse::() + .expect("Unknown job type from DB"); + + JobSchema { + job_uuid: row.get("job_uuid"), + job_status, + slot: row.get("slot"), + batch_range_begin_epoch: row + .get::<&str, Option>("batch_range_begin_epoch") + .unwrap_or(0), + batch_range_end_epoch: row + .get::<&str, Option>("batch_range_end_epoch") + .unwrap_or(0), + job_type, + atlantic_proof_generate_batch_id: row.get("atlantic_proof_generate_batch_id"), + atlantic_proof_wrapper_batch_id: row.get("atlantic_proof_wrapper_batch_id"), + } + })) + } // pub async fn get_latest_slot_id_in_progress( // &self, // ) -> Result, Box> { @@ -198,11 +236,12 @@ impl DatabaseManager { &self, ) -> Result, Box> { // Query the latest slot with job_status in ('in_progress', 'initialized') + // //, 'CANCELLED', 'ERROR' let row_opt = self .client .query_opt( "SELECT batch_range_end_epoch FROM jobs - WHERE job_status NOT IN ('DONE', 'CANCELLED', 'ERROR') + WHERE job_status NOT IN ('DONE') AND batch_range_end_epoch != 0 AND type = 'EPOCH_BATCH_UPDATE' ORDER BY batch_range_end_epoch DESC @@ -240,7 +279,7 @@ impl DatabaseManager { // Extract and return the slot ID if let Some(row) = row_opt { Ok(Some(helpers::slot_to_sync_committee_id( - row.get::<_, i64>("batch_range_end_epoch").to_u64().unwrap(), + row.get::<_, i64>("slot").to_u64().unwrap(), ))) } else { Ok(Some(0)) @@ -354,9 +393,16 @@ impl DatabaseManager { job_uuid: row.get("job_uuid"), job_status, slot: row.get("slot"), - batch_range_begin_epoch: row.get("batch_range_begin_epoch"), - batch_range_end_epoch: row.get("batch_range_end_epoch"), + batch_range_begin_epoch: row + .get::<&str, Option>("batch_range_begin_epoch") + .unwrap_or(0), + batch_range_end_epoch: row + .get::<&str, Option>("batch_range_end_epoch") + .unwrap_or(0), job_type, + atlantic_proof_generate_batch_id: row + .get("atlantic_proof_generate_batch_id"), + atlantic_proof_wrapper_batch_id: row.get("atlantic_proof_wrapper_batch_id"), //updated_at: row.get("updated_at"), }) }, @@ -450,6 +496,66 @@ impl DatabaseManager { Ok(()) } + pub async fn get_jobs_with_statuses( + &self, + desired_statuses: Vec, + ) -> Result, Box> { + if desired_statuses.is_empty() { + return Ok(vec![]); + } + + let status_strings: Vec = desired_statuses.iter().map(|s| s.to_string()).collect(); + + let placeholders: Vec = (1..=status_strings.len()) + .map(|i| format!("${}", i)) + .collect(); + let query = format!( + "SELECT * FROM jobs WHERE job_status IN ({})", + placeholders.join(", ") + ); + + let params: Vec<&(dyn tokio_postgres::types::ToSql + Sync)> = status_strings + .iter() + .map(|s| s as &(dyn tokio_postgres::types::ToSql + Sync)) + .collect(); + + let rows = self.client.query(&query, ¶ms).await?; + + let jobs: Vec = rows + .into_iter() + .map( + |row: Row| -> Result> { + let job_type_str: String = row.get("type"); + let job_status_str: String = row.get("job_status"); + + let job_type = JobType::from_str(&job_type_str) + .map_err(|err| format!("Failed to parse job type: {}", err))?; + let job_status = JobStatus::from_str(&job_status_str) + .map_err(|err| format!("Failed to parse job status: {}", err))?; + + Ok(JobSchema { + job_uuid: row.get("job_uuid"), + job_status, + slot: row.get("slot"), + batch_range_begin_epoch: row + .get::<&str, Option>("batch_range_begin_epoch") + .unwrap_or(0), + batch_range_end_epoch: row + .get::<&str, Option>("batch_range_end_epoch") + .unwrap_or(0), + job_type, + atlantic_proof_generate_batch_id: row + .get("atlantic_proof_generate_batch_id"), + atlantic_proof_wrapper_batch_id: row.get("atlantic_proof_wrapper_batch_id"), + //updated_at: row.get("updated_at"), + }) + }, + ) + .collect::, _>>()?; + + Ok(jobs) + } + // async fn fetch_job_by_status( // client: &Client, // status: JobStatus, diff --git a/client-rs/src/utils/starknet_client.rs b/client-rs/src/utils/starknet_client.rs index ed5d435..c464d5a 100644 --- a/client-rs/src/utils/starknet_client.rs +++ b/client-rs/src/utils/starknet_client.rs @@ -4,12 +4,17 @@ use starknet::accounts::{Account, ConnectedAccount}; use starknet::core::types::{Call, FunctionCall}; use starknet::macros::selector; use starknet::providers::{Provider, ProviderError}; +use tracing::{debug, error, info, trace}; + use starknet::{ accounts::{ExecutionEncoding, SingleOwnerAccount}, contract::ContractFactory, core::{ chain_id, - types::{contract::SierraClass, BlockId, BlockTag, Felt}, + types::{ + contract::SierraClass, BlockId, BlockTag, Felt, TransactionExecutionStatus, + TransactionStatus, + }, }, macros::felt, providers::{ @@ -19,6 +24,7 @@ use starknet::{ signers::{LocalWallet, SigningKey}, }; use std::sync::Arc; +use tokio::time::{sleep, Duration}; use crate::contract_init::ContractInitializationData; use crate::traits::Submittable; @@ -76,6 +82,8 @@ pub struct StarknetClient { pub enum StarknetError { ProviderError(ProviderError), AccountError(String), + TransactionError(String), + TimeoutError, } impl StarknetClient { @@ -149,7 +157,7 @@ impl StarknetClient { calldata: update.to_calldata(), }] ); - let result = self + let send_result = self .account .execute_v1(vec![Call { to: config.contract_address, @@ -157,13 +165,30 @@ impl StarknetClient { calldata: update.to_calldata(), }]) .send() - .await - .map_err(|e| StarknetError::AccountError(e.to_string()))?; + .await; + //.map_err(|e| StarknetError::TransactionError(e.to_string()))?; + + match send_result { + Ok(tx_response) => { + let tx_hash = tx_response.transaction_hash; + info!("Transaction sent successfully! Hash: {:#x}", tx_hash); + Ok(tx_hash) + } + Err(e) => { + //error!("Transaction execution error: {:#?}", e); + + // Return a more descriptive error + Err(StarknetError::TransactionError(format!( + "TransactionExecutionError: {:#?}", + e + ))) + } + } - println!("tx_hash: {:?}", result.transaction_hash); + // println!("tx_hash: {:?}", result.transaction_hash); - // Return the transaction hash - Ok(result.transaction_hash) + // // Return the transaction hash + // Ok(result.transaction_hash) } pub async fn get_committee_hash( @@ -263,4 +288,47 @@ impl StarknetClient { println!("latest_committee_id: {:?}", latest_committee_id); Ok(*latest_committee_id.first().unwrap()) } + + pub async fn wait_for_confirmation(&self, tx_hash: Felt) -> Result<(), StarknetError> { + let max_retries = 20; + let delay = Duration::from_secs(5); + + for _ in 0..max_retries { + let status = self.get_transaction_status(tx_hash).await?; + + info!("Starknet transaction status: {:?}", status); + + match status { + TransactionStatus::AcceptedOnL1(TransactionExecutionStatus::Succeeded) + | TransactionStatus::AcceptedOnL2(TransactionExecutionStatus::Succeeded) => { + info!("Starknet transaction confirmed: {:?}", tx_hash); + return Ok(()); + } + TransactionStatus::Rejected => { + return Err(StarknetError::TransactionError( + "Transaction rejected".to_string(), + )); + } + _ => { + // Still pending, wait and retry + sleep(delay).await; + } + } + } + + Err(StarknetError::TimeoutError) + } + + pub async fn get_transaction_status( + &self, + tx_hash: Felt, + ) -> Result { + let provider = self.account.provider(); + let tx_status = provider + .get_transaction_status(tx_hash) + .await + .map_err(StarknetError::ProviderError)?; + + Ok(tx_status) + } } From 85d5475b34f40f1eddf5c54fcd96cd6ec2b8d2a7 Mon Sep 17 00:00:00 2001 From: petscheit Date: Mon, 3 Feb 2025 10:00:32 +0100 Subject: [PATCH 25/66] feat: add program hash update + pausing --- contract/src/lib.cairo | 243 ++++++++++++++++++++++++++++++++++++--- contract/src/utils.cairo | 2 - 2 files changed, 230 insertions(+), 15 deletions(-) diff --git a/contract/src/lib.cairo b/contract/src/lib.cairo index 91e42b2..cbab87f 100644 --- a/contract/src/lib.cairo +++ b/contract/src/lib.cairo @@ -59,6 +59,17 @@ pub trait IBankaiContract { execution_hash: u256, execution_height: u64, ); + + fn propose_program_hash_update( + ref self: TContractState, + new_committee_hash: felt252, + new_epoch_hash: felt252, + new_batch_hash: felt252 + ); + fn execute_program_hash_update(ref self: TContractState); + fn pause(ref self: TContractState); + fn unpause(ref self: TContractState); + fn is_paused(self: @TContractState) -> bool; } pub mod utils; @@ -69,7 +80,7 @@ pub mod BankaiContract { Map, StorageMapReadAccess, StorageMapWriteAccess, StoragePointerReadAccess, StoragePointerWriteAccess, }; - use starknet::{ContractAddress, get_caller_address}; + use starknet::{ContractAddress, get_caller_address, get_block_timestamp}; use integrity::{ Integrity, IntegrityWithConfig, SHARP_BOOTLOADER_PROGRAM_HASH, VerifierConfiguration, }; @@ -80,7 +91,9 @@ pub mod BankaiContract { CommitteeUpdated: CommitteeUpdated, EpochUpdated: EpochUpdated, EpochBatch: EpochBatch, - EpochDecommitted: EpochDecommitted + EpochDecommitted: EpochDecommitted, + Paused: Paused, + Unpaused: Unpaused, } #[derive(Drop, starknet::Event)] @@ -118,20 +131,43 @@ pub mod BankaiContract { execution_height: u64 } + /// Emitted when the contract is paused + #[derive(Drop, starknet::Event)] + pub struct Paused {} + + /// Emitted when the contract is unpaused + #[derive(Drop, starknet::Event)] + pub struct Unpaused {} + + /// Time delay required for program hash updates (48 hours in seconds) + const UPDATE_DELAY: u64 = 172800; + #[storage] struct Storage { - committee: Map::< - u64, u256, - >, // maps committee index to committee hash (sha256(x || y)) of aggregate key - epochs: Map::, // maps beacon slot to header root and state root - batches: Map::, // Available batch roots - owner: ContractAddress, - latest_epoch: u64, + // Committee Management + committee: Map::, // Maps committee index to committee hash (sha256(x || y)) of aggregate key latest_committee_id: u64, initialization_committee: u64, + + // Epoch Management + epochs: Map::, // Maps beacon slot to header root and state root + latest_epoch: u64, + + // Batch Management + batches: Map::, // Tracks verified batch roots + + // Program Hash Management committee_update_program_hash: felt252, epoch_update_program_hash: felt252, epoch_batch_program_hash: felt252, + pending_committee_program_hash: felt252, + pending_epoch_program_hash: felt252, + pending_batch_program_hash: felt252, + pending_update_timestamp: u64, + + // Access Control + owner: ContractAddress, + paused: bool, } #[constructor] @@ -186,14 +222,13 @@ pub mod BankaiContract { fn verify_committee_update( ref self: ContractState, beacon_state_root: u256, committee_hash: u256, slot: u64, ) { + assert(!self.paused.read(), 'Contract is paused'); let epoch_proof = self.epochs.read(slot); assert(beacon_state_root == epoch_proof.beacon_state_root, 'Invalid State Root!'); - // for now we dont ensure the fact hash is valid let fact_hash = compute_committee_proof_fact_hash( @self, beacon_state_root, committee_hash, slot, ); - // println!("fact_hash: {:?}", fact_hash); assert(is_valid_fact_hash(fact_hash), 'Invalid Fact Hash!'); // The new committee is always assigned at the start of the previous committee @@ -221,7 +256,7 @@ pub mod BankaiContract { execution_hash: u256, execution_height: u64, ) { - + assert(!self.paused.read(), 'Contract is paused'); let signing_committee_id = (slot / 0x2000); let valid_committee_hash = self.committee.read(signing_committee_id); assert(committee_hash == valid_committee_hash, 'Invalid Committee Hash!'); @@ -258,6 +293,7 @@ pub mod BankaiContract { execution_hash: u256, execution_height: u64, ) { + assert(!self.paused.read(), 'Contract is paused'); let signing_committee_id = (slot / 0x2000); let valid_committee_hash = self.committee.read(signing_committee_id); assert(committee_hash == valid_committee_hash, 'Invalid Committee Hash!'); @@ -298,7 +334,7 @@ pub mod BankaiContract { execution_hash: u256, execution_height: u64, ) { - + assert(!self.paused.read(), 'Contract is paused'); let known_batch_root = self.batches.read(batch_root); assert(known_batch_root, 'Batch root not known!'); @@ -315,9 +351,58 @@ pub mod BankaiContract { batch_root: batch_root, slot: slot, execution_hash: execution_hash, execution_height: execution_height, })); } + + fn propose_program_hash_update( + ref self: ContractState, + new_committee_hash: felt252, + new_epoch_hash: felt252, + new_batch_hash: felt252 + ) { + assert(!self.paused.read(), 'Contract is paused'); + assert(get_caller_address() == self.owner.read(), 'Caller is not owner'); + + self.pending_committee_program_hash.write(new_committee_hash); + self.pending_epoch_program_hash.write(new_epoch_hash); + self.pending_batch_program_hash.write(new_batch_hash); + self.pending_update_timestamp.write(get_block_timestamp() + UPDATE_DELAY); + } + fn execute_program_hash_update(ref self: ContractState) { + assert(get_caller_address() == self.owner.read(), 'Caller is not owner'); + assert(get_block_timestamp() >= self.pending_update_timestamp.read(), 'Delay not elapsed'); + + // Update program hashes + self.committee_update_program_hash.write(self.pending_committee_program_hash.read()); + self.epoch_update_program_hash.write(self.pending_epoch_program_hash.read()); + self.epoch_batch_program_hash.write(self.pending_batch_program_hash.read()); + + // Clear pending updates + self.pending_committee_program_hash.write(0); + self.pending_epoch_program_hash.write(0); + self.pending_batch_program_hash.write(0); + self.pending_update_timestamp.write(0); + } + + fn pause(ref self: ContractState) { + assert(get_caller_address() == self.owner.read(), 'Caller is not owner'); + assert(!self.paused.read(), 'Contract is already paused'); + self.paused.write(true); + self.emit(Event::Paused(Paused {})); + } + + fn unpause(ref self: ContractState) { + assert(get_caller_address() == self.owner.read(), 'Caller is not owner'); + assert(self.paused.read(), 'Contract is not paused'); + self.paused.write(false); + self.emit(Event::Unpaused(Unpaused {})); + } + + fn is_paused(self: @ContractState) -> bool { + self.paused.read() + } } + /// Internal helper functions for computing fact hashes fn compute_committee_proof_fact_hash( self: @ContractState, beacon_state_root: u256, committee_hash: u256, slot: u64, ) -> felt252 { @@ -334,6 +419,7 @@ pub mod BankaiContract { return fact_hash; } + /// Computes fact hash for epoch proof verification fn compute_epoch_proof_fact_hash( self: @ContractState, header_root: u256, @@ -359,6 +445,7 @@ pub mod BankaiContract { return fact_hash; } + /// Computes fact hash for epoch batch verification fn compute_epoch_batch_fact_hash( self: @ContractState, batch_root: felt252, @@ -398,4 +485,134 @@ pub mod BankaiContract { let integrity = Integrity::new().with_config(config, SECURITY_BITS); integrity.is_fact_hash_valid(fact_hash) } +} + +#[cfg(test)] +mod tests { + use super::BankaiContract; + use super::IBankaiContract; + use starknet::contract_address_const; + use starknet::testing::set_caller_address; + use starknet::testing::set_block_timestamp; + + // Helper function to deploy the contract for testing + fn deploy_contract() -> BankaiContract::ContractState { + let mut state = BankaiContract::contract_state_for_testing(); + + // Set caller as contract deployer + set_caller_address(contract_address_const::<0x123>()); + + // Initialize with some test values + BankaiContract::constructor( + ref state, + 1, // committee_id + 1234.into(), // committee_hash + 111.into(), // committee_update_program_hash + 222.into(), // epoch_update_program_hash + 333.into() // epoch_batch_program_hash + ); + + state + } + + #[test] + fn test_constructor() { + let state = deploy_contract(); + + assert!(!IBankaiContract::is_paused(@state)); + assert_eq!(IBankaiContract::get_latest_epoch(@state), 0); + assert_eq!(IBankaiContract::get_latest_committee_id(@state), 1); + assert_eq!(IBankaiContract::get_committee_hash(@state, 1), 1234.into()); + assert_eq!(IBankaiContract::get_committee_update_program_hash(@state), 111); + assert_eq!(IBankaiContract::get_epoch_update_program_hash(@state), 222); + } + + #[test] + fn test_pause_unpause() { + let mut state = deploy_contract(); + let owner = contract_address_const::<0x123>(); + set_caller_address(owner); + + // Test initial state + assert!(!IBankaiContract::is_paused(@state)); + + // Test pause + IBankaiContract::pause(ref state); + assert!(IBankaiContract::is_paused(@state)); + + // Test unpause + IBankaiContract::unpause(ref state); + assert!(!IBankaiContract::is_paused(@state)); + } + + #[test] + #[should_panic(expected: ('Caller is not owner',))] + fn test_pause_unauthorized() { + let mut state = deploy_contract(); + + // Try to pause from different address + let other = contract_address_const::<0x456>(); + set_caller_address(other); + IBankaiContract::pause(ref state); + } + + #[test] + fn test_program_hash_update() { + let mut state = deploy_contract(); + let owner = contract_address_const::<0x123>(); + set_caller_address(owner); + + // Set initial timestamp + set_block_timestamp(1000); + + // Propose update + IBankaiContract::propose_program_hash_update( + ref state, + 444.into(), // new_committee_hash + 555.into(), // new_epoch_hash + 666.into() // new_batch_hash + ); + + // Execute after delay + set_block_timestamp(1000 + 172800); // After delay + IBankaiContract::execute_program_hash_update(ref state); + + // Verify updates + assert_eq!(IBankaiContract::get_committee_update_program_hash(@state), 444); + assert_eq!(IBankaiContract::get_epoch_update_program_hash(@state), 555); + } + + #[test] + #[should_panic(expected: ('Delay not elapsed',))] + fn test_program_hash_update_too_early() { + let mut state = deploy_contract(); + let owner = contract_address_const::<0x123>(); + set_caller_address(owner); + + // Set initial timestamp + set_block_timestamp(1000); + + // Propose update + IBankaiContract::propose_program_hash_update( + ref state, + 444.into(), // new_committee_hash + 555.into(), // new_epoch_hash + 666.into() // new_batch_hash + ); + + // Try to execute before delay + set_block_timestamp(1000 + 172799); // Just before delay + IBankaiContract::execute_program_hash_update(ref state); + } + + #[test] + fn test_getters() { + let state = deploy_contract(); + + assert_eq!(IBankaiContract::get_committee_hash(@state, 1), 1234.into()); + assert_eq!(IBankaiContract::get_latest_epoch(@state), 0); + assert_eq!(IBankaiContract::get_latest_committee_id(@state), 1); + assert_eq!(IBankaiContract::get_committee_update_program_hash(@state), 111); + assert_eq!(IBankaiContract::get_epoch_update_program_hash(@state), 222); + } } \ No newline at end of file diff --git a/contract/src/utils.cairo b/contract/src/utils.cairo index 4b1fb57..105eed7 100644 --- a/contract/src/utils.cairo +++ b/contract/src/utils.cairo @@ -166,8 +166,6 @@ mod tests { index ); - println!("Computed root: {:?}", computed_root); - // Expected root is the first value in the JSON array (0x0) let expected_root = 3014209719831846118507369742452047831482182187060364606511726060971609846063; From 067be101139b9102831c166a46d8e822c40d41ae Mon Sep 17 00:00:00 2001 From: petscheit Date: Mon, 3 Feb 2025 12:49:09 +0100 Subject: [PATCH 26/66] chore: fix contract and add new config --- client-rs/src/config.rs | 6 +++--- contract/src/lib.cairo | 7 ++++--- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/client-rs/src/config.rs b/client-rs/src/config.rs index cd7978e..529e6d1 100644 --- a/client-rs/src/config.rs +++ b/client-rs/src/config.rs @@ -27,11 +27,11 @@ impl Default for BankaiConfig { fn default() -> Self { Self { contract_class_hash: Felt::from_hex( - "0x02b5b08b233132464c437cf15509338e65ae7acc20419a37a9449a1d8e927f46", + "0x01b15e6fcdaf36242fa13f1218fc4babfa59f54ca824458fabd0831f7acc4fe1", ) .unwrap(), contract_address: Felt::from_hex( - "0x440b622a97fab3f31a35e7e710a8a508f6693d61d74171b5c2304f5e37ccde8", + "0x12fc37bbf67de953814b4461c5459340ba812faad2b2d51b3865b8b793e7b3e", ) .unwrap(), committee_update_program_hash: Felt::from_hex( @@ -43,7 +43,7 @@ impl Default for BankaiConfig { ) .unwrap(), epoch_batch_program_hash: Felt::from_hex( - "0x19bc492f1036c889939a5174e8f77ffbe89676c8d5f1adef0a825d2a6cc2a2f", + "0x5f4dad2d8549e91c25694875eb02fc2910eeead0e1a13d3061464a3eaa4bd8d", ) .unwrap(), contract_path: "../contract/target/release/bankai_BankaiContract.contract_class.json" diff --git a/contract/src/lib.cairo b/contract/src/lib.cairo index 9a61c18..d41052d 100644 --- a/contract/src/lib.cairo +++ b/contract/src/lib.cairo @@ -294,6 +294,7 @@ pub mod BankaiContract { execution_height: u64, ) { assert(!self.paused.read(), 'Contract is paused'); + let signing_committee_id = (slot / 0x2000); let valid_committee_hash = self.committee.read(signing_committee_id); assert(committee_hash == valid_committee_hash, 'Invalid Committee Hash!'); @@ -317,7 +318,7 @@ pub mod BankaiContract { let latest_epoch = self.latest_epoch_slot.read(); if slot > latest_epoch { - self.latest_epoch.write(slot); + self.latest_epoch_slot.write(slot); } } @@ -521,7 +522,7 @@ mod tests { let state = deploy_contract(); assert!(!IBankaiContract::is_paused(@state)); - assert_eq!(IBankaiContract::get_latest_epoch(@state), 0); + assert_eq!(IBankaiContract::get_latest_epoch_slot(@state), 0); assert_eq!(IBankaiContract::get_latest_committee_id(@state), 1); assert_eq!(IBankaiContract::get_committee_hash(@state, 1), 1234.into()); assert_eq!(IBankaiContract::get_committee_update_program_hash(@state), 111); @@ -611,7 +612,7 @@ mod tests { let state = deploy_contract(); assert_eq!(IBankaiContract::get_committee_hash(@state, 1), 1234.into()); - assert_eq!(IBankaiContract::get_latest_epoch(@state), 0); + assert_eq!(IBankaiContract::get_latest_epoch_slot(@state), 0); assert_eq!(IBankaiContract::get_latest_committee_id(@state), 1); assert_eq!(IBankaiContract::get_committee_update_program_hash(@state), 111); assert_eq!(IBankaiContract::get_epoch_update_program_hash(@state), 222); From 10daacaf2a7f09657e07a9730646ddc5448c7af4 Mon Sep 17 00:00:00 2001 From: petscheit Date: Mon, 3 Feb 2025 12:50:48 +0100 Subject: [PATCH 27/66] feat: restore cli functionality --- client-rs/Cargo.toml | 10 +--- client-rs/src/bankai_client.rs | 1 - client-rs/src/epoch_batch.rs | 1 - client-rs/src/epoch_update.rs | 70 ++++++++++++++------------ client-rs/src/main.rs | 14 ++++-- client-rs/src/traits.rs | 2 +- client-rs/src/utils/atlantic_client.rs | 2 +- client-rs/src/utils/cairo_runner.rs | 2 +- client-rs/src/utils/rpc.rs | 2 +- client-rs/src/utils/starknet_client.rs | 55 +++++++++----------- 10 files changed, 78 insertions(+), 81 deletions(-) diff --git a/client-rs/Cargo.toml b/client-rs/Cargo.toml index 8f97e0b..2015ead 100644 --- a/client-rs/Cargo.toml +++ b/client-rs/Cargo.toml @@ -3,19 +3,13 @@ name = "client-rs" version = "0.1.0" edition = "2021" -[features] -daemon = [] -cli = [] - [[bin]] -name = "daemon" # Binary name (used with `cargo run --bin bin1`) -path = "src/daemon.rs" # Path to the source file for this binary +name = "daemon" +path = "src/daemon.rs" [[bin]] name = "cli" path = "src/main.rs" -required-features = ["cli"] - [dependencies] alloy-primitives = "0.8.13" diff --git a/client-rs/src/bankai_client.rs b/client-rs/src/bankai_client.rs index 5067d86..f114ea1 100644 --- a/client-rs/src/bankai_client.rs +++ b/client-rs/src/bankai_client.rs @@ -83,7 +83,6 @@ impl BankaiClient { Ok(epoch_proof) } - #[cfg(feature = "cli")] pub async fn get_contract_initialization_data( &self, slot: u64, diff --git a/client-rs/src/epoch_batch.rs b/client-rs/src/epoch_batch.rs index 01ef4b1..7db1789 100644 --- a/client-rs/src/epoch_batch.rs +++ b/client-rs/src/epoch_batch.rs @@ -42,7 +42,6 @@ pub struct ExpectedEpochBatchOutputs { } impl EpochUpdateBatch { - #[cfg(feature = "cli")] pub(crate) async fn new(bankai: &BankaiClient) -> Result { let (start_slot, mut end_slot) = bankai .starknet_client diff --git a/client-rs/src/epoch_update.rs b/client-rs/src/epoch_update.rs index 4721822..bf95e39 100644 --- a/client-rs/src/epoch_update.rs +++ b/client-rs/src/epoch_update.rs @@ -47,37 +47,44 @@ impl EpochUpdate { } } -// impl Provable for EpochUpdate { -// fn id(&self) -> String { -// let mut hasher = Sha256::new(); -// hasher.update(b"epoch_update"); -// hasher.update(self.circuit_inputs.header.tree_hash_root().as_slice()); -// hex::encode(hasher.finalize().as_slice()) -// } - -// fn export(&self) -> Result { -// let json = serde_json::to_string_pretty(&self).unwrap(); -// let dir_path = format!("batches/epoch/{}", self.circuit_inputs.header.slot); -// fs::create_dir_all(dir_path.clone()).map_err(Error::IoError)?; -// let path = format!( -// "{}/input_{}.json", -// dir_path, self.circuit_inputs.header.slot -// ); -// fs::write(path.clone(), json).map_err(Error::IoError)?; -// Ok(path) -// } - -// fn pie_path(&self) -> String { -// format!( -// "batches/epoch/{}/pie_{}.zip", -// self.circuit_inputs.header.slot, self.circuit_inputs.header.slot -// ) -// } - -// fn proof_type(&self) -> ProofType { -// ProofType::Epoch -// } -// } +impl Provable for EpochUpdate { + fn id(&self) -> String { + let mut hasher = Sha256::new(); + hasher.update(b"epoch_update"); + hasher.update(self.circuit_inputs.header.tree_hash_root().as_slice()); + hex::encode(hasher.finalize().as_slice()) + } + + fn export(&self) -> Result { + let json = serde_json::to_string_pretty(&self).unwrap(); + let dir_path = format!("batches/epoch/{}", self.circuit_inputs.header.slot); + fs::create_dir_all(dir_path.clone()).map_err(Error::IoError)?; + let path = format!( + "{}/input_{}.json", + dir_path, self.circuit_inputs.header.slot + ); + fs::write(path.clone(), json).map_err(Error::IoError)?; + Ok(path) + } + + fn pie_path(&self) -> String { + format!( + "batches/epoch/{}/pie_{}.zip", + self.circuit_inputs.header.slot, self.circuit_inputs.header.slot + ) + } + + fn proof_type(&self) -> ProofType { + ProofType::Epoch + } + + fn inputs_path(&self) -> String { + format!( + "batches/epoch/{}/input_{}.json", + self.circuit_inputs.header.slot, self.circuit_inputs.header.slot + ) + } +} /// Contains all necessary inputs for generating and verifying epoch proofs #[derive(Debug, Serialize, Deserialize)] @@ -188,7 +195,6 @@ impl EpochCircuitInputs { let sync_agg = client.get_sync_aggregate(slot).await?; let validator_pubs = client.get_sync_committee_validator_pubs(slot).await?; - // Process the sync committee data let signature_point = Self::extract_signature_point(&sync_agg)?; let non_signers = Self::derive_non_signers(&sync_agg, &validator_pubs); diff --git a/client-rs/src/main.rs b/client-rs/src/main.rs index 4565e7e..f71eaa6 100644 --- a/client-rs/src/main.rs +++ b/client-rs/src/main.rs @@ -205,8 +205,8 @@ async fn main() -> Result<(), Error> { from_filename(".env.sepolia").ok(); let subscriber = FmtSubscriber::builder() - .with_max_level(Level::TRACE) - //.with_max_level(Level::INFO) + // .with_max_level(Level::TRACE) + .with_max_level(Level::INFO) .finish(); tracing::subscriber::set_global_default(subscriber).expect("setting default subscriber failed"); @@ -302,6 +302,7 @@ async fn main() -> Result<(), Error> { let update = bankai .get_sync_committee_update(latest_epoch.try_into().unwrap()) .await?; + let _ = update.export()?; CairoRunner::generate_pie(&update, &bankai.config).await?; let batch_id = bankai.atlantic_client.submit_batch(update).await?; println!("Batch Submitted: {}", batch_id); @@ -315,14 +316,17 @@ async fn main() -> Result<(), Error> { // make sure next_epoch % 32 == 0 let next_epoch = (u64::try_from(latest_epoch).unwrap() / 32) * 32 + 32; println!("Fetching Inputs for Epoch: {}", next_epoch); - let proof = bankai.get_epoch_proof(next_epoch).await?; - CairoRunner::generate_pie(&proof, &bankai.config).await?; - let batch_id = bankai.atlantic_client.submit_batch(proof).await?; + // let proof = bankai.get_epoch_proof(next_epoch).await?; + let epoch_update = EpochUpdate::new(&bankai.client, next_epoch).await?; + let _ = epoch_update.export()?; + CairoRunner::generate_pie(&epoch_update, &bankai.config).await?; + let batch_id = bankai.atlantic_client.submit_batch(epoch_update).await?; println!("Batch Submitted: {}", batch_id); } Commands::ProveNextEpochBatch => { let epoch_update = EpochUpdateBatch::new(&bankai).await?; println!("Update contents: {:?}", epoch_update); + let _ = epoch_update.export()?; CairoRunner::generate_pie(&epoch_update, &bankai.config).await?; let batch_id = bankai.atlantic_client.submit_batch(epoch_update).await?; println!("Batch Submitted: {}", batch_id); diff --git a/client-rs/src/traits.rs b/client-rs/src/traits.rs index ccc79aa..5ae8574 100644 --- a/client-rs/src/traits.rs +++ b/client-rs/src/traits.rs @@ -11,7 +11,7 @@ pub trait Submittable { } pub enum ProofType { - //Epoch, + Epoch, EpochBatch, SyncCommittee, } diff --git a/client-rs/src/utils/atlantic_client.rs b/client-rs/src/utils/atlantic_client.rs index e550183..3ffa0f5 100644 --- a/client-rs/src/utils/atlantic_client.rs +++ b/client-rs/src/utils/atlantic_client.rs @@ -93,7 +93,7 @@ impl AtlanticClient { let external_id = format!( "update_{}", match batch.proof_type() { - //ProofType::Epoch => "epoch", + ProofType::Epoch => "epoch", ProofType::SyncCommittee => "sync_committee", ProofType::EpochBatch => "epoch_batch", } diff --git a/client-rs/src/utils/cairo_runner.rs b/client-rs/src/utils/cairo_runner.rs index 40d97b3..49b9fe4 100644 --- a/client-rs/src/utils/cairo_runner.rs +++ b/client-rs/src/utils/cairo_runner.rs @@ -22,7 +22,7 @@ impl CairoRunner { info!("Cairo Input path: {}", input_path); let program_path = match input.proof_type() { - //ProofType::Epoch => config.epoch_circuit_path.clone(), + ProofType::Epoch => config.epoch_circuit_path.clone(), ProofType::SyncCommittee => config.committee_circuit_path.clone(), ProofType::EpochBatch => config.epoch_batch_circuit_path.clone(), }; diff --git a/client-rs/src/utils/rpc.rs b/client-rs/src/utils/rpc.rs index fcd595b..2af5e49 100644 --- a/client-rs/src/utils/rpc.rs +++ b/client-rs/src/utils/rpc.rs @@ -104,7 +104,7 @@ impl BeaconRpcClient { let json = self .get_json(&format!("eth/v2/beacon/blocks/{}", slot)) - .await?; + .await?; serde_json::from_value(json["data"]["message"]["body"]["sync_aggregate"].clone()) .map_err(|e| Error::DeserializeError(e.to_string())) diff --git a/client-rs/src/utils/starknet_client.rs b/client-rs/src/utils/starknet_client.rs index c464d5a..0789448 100644 --- a/client-rs/src/utils/starknet_client.rs +++ b/client-rs/src/utils/starknet_client.rs @@ -109,7 +109,6 @@ impl StarknetClient { }) } - #[cfg(feature = "cli")] pub async fn deploy_contract( &self, init_data: ContractInitializationData, @@ -136,12 +135,19 @@ impl StarknetClient { contract_address ); - deploy_tx - .send() - .await - .map_err(|e| StarknetError::AccountError(e.to_string()))?; - - Ok(contract_address) + match deploy_tx.send().await { + Ok(_result) => { + info!("Deployment transaction sent successfully"); + Ok(contract_address) + } + Err(e) => { + error!("Deployment failed with error: {:#?}", e); + Err(StarknetError::AccountError(format!( + "Deployment failed: {:#?}", + e + ))) + } + } } pub async fn submit_update( @@ -149,24 +155,20 @@ impl StarknetClient { update: impl Submittable, config: &BankaiConfig, ) -> Result { - println!( - "{:?}", - vec![Call { - to: config.contract_address, - selector: update.get_contract_selector(), - calldata: update.to_calldata(), - }] - ); + let selector = update.get_contract_selector(); + let calldata = update.to_calldata(); + + let call = Call { + to: config.contract_address, + selector, + calldata, + }; + let send_result = self .account - .execute_v1(vec![Call { - to: config.contract_address, - selector: update.get_contract_selector(), - calldata: update.to_calldata(), - }]) + .execute_v1(vec![call]) .send() .await; - //.map_err(|e| StarknetError::TransactionError(e.to_string()))?; match send_result { Ok(tx_response) => { @@ -175,20 +177,13 @@ impl StarknetClient { Ok(tx_hash) } Err(e) => { - //error!("Transaction execution error: {:#?}", e); - - // Return a more descriptive error + error!("Transaction execution error: {:#?}", e); Err(StarknetError::TransactionError(format!( "TransactionExecutionError: {:#?}", e ))) } } - - // println!("tx_hash: {:?}", result.transaction_hash); - - // // Return the transaction hash - // Ok(result.transaction_hash) } pub async fn get_committee_hash( @@ -246,7 +241,7 @@ impl StarknetClient { .call( FunctionCall { contract_address: config.contract_address, - entry_point_selector: selector!("get_latest_epoch"), + entry_point_selector: selector!("get_latest_epoch_slot"), calldata: vec![], }, BlockId::Tag(BlockTag::Latest), From edebc6ac3b06acc3431a9ea870d750624ba478a2 Mon Sep 17 00:00:00 2001 From: petscheit Date: Mon, 3 Feb 2025 14:47:36 +0100 Subject: [PATCH 28/66] chore: refactor contracts, adding ownable and upgradeable + overall cleanup --- contract/Scarb.lock | 23 ++ contract/Scarb.toml | 2 + contract/src/interface.cairo | 108 +++++++ contract/src/lib.cairo | 499 +++++++++++++------------------ contract/src/types.cairo | 73 +++++ contract/src/utils.cairo | 57 ++-- contract/tests/test_bankai.cairo | 298 ++++++++++++++++++ tests/test_bankai.cairo | 1 + 8 files changed, 746 insertions(+), 315 deletions(-) create mode 100644 contract/src/interface.cairo create mode 100644 contract/src/types.cairo create mode 100644 contract/tests/test_bankai.cairo create mode 100644 tests/test_bankai.cairo diff --git a/contract/Scarb.lock b/contract/Scarb.lock index 61ffa98..09d8572 100644 --- a/contract/Scarb.lock +++ b/contract/Scarb.lock @@ -6,6 +6,8 @@ name = "bankai" version = "0.1.0" dependencies = [ "integrity", + "openzeppelin_access", + "openzeppelin_upgrades", ] [[package]] @@ -13,3 +15,24 @@ name = "integrity" version = "2.0.0" source = "registry+https://scarbs.xyz/" checksum = "sha256:f5e91cd5280bc0c02cfb12ce1a521e25064956dd12f3e38fca3d841c538723a0" + +[[package]] +name = "openzeppelin_access" +version = "0.20.0" +source = "registry+https://scarbs.xyz/" +checksum = "sha256:7734901a0ca7a7065e69416fea615dd1dc586c8dc9e76c032f25ee62e8b2a06c" +dependencies = [ + "openzeppelin_introspection", +] + +[[package]] +name = "openzeppelin_introspection" +version = "0.20.0" +source = "registry+https://scarbs.xyz/" +checksum = "sha256:13e04a2190684e6804229a77a6c56de7d033db8b9ef519e5e8dee400a70d8a3d" + +[[package]] +name = "openzeppelin_upgrades" +version = "0.20.0" +source = "registry+https://scarbs.xyz/" +checksum = "sha256:15fdd63f6b50a0fda7b3f8f434120aaf7637bcdfe6fd8d275ad57343d5ede5e1" diff --git a/contract/Scarb.toml b/contract/Scarb.toml index 36c3c1e..5811e3a 100644 --- a/contract/Scarb.toml +++ b/contract/Scarb.toml @@ -9,6 +9,8 @@ edition = "2024_07" starknet = "2.9.1" cairo_test = "2.9.1" integrity = "2.0.0" +openzeppelin_access = "0.20.0" +openzeppelin_upgrades = "0.20.0" [[target.starknet-contract]] sierra = true \ No newline at end of file diff --git a/contract/src/interface.cairo b/contract/src/interface.cairo new file mode 100644 index 0000000..a05ece8 --- /dev/null +++ b/contract/src/interface.cairo @@ -0,0 +1,108 @@ +/// Interface for the Bankai contract, which manages Ethereum consensus verification on StarkNet +/// This contract enables trustless bridging of Ethereum consensus data to StarkNet +use super::types::EpochProof; + +#[starknet::interface] +pub trait IBankaiContract { + /// Returns the hash of a specific validator committee + fn get_committee_hash(self: @TContractState, committee_id: u64) -> u256; + + /// Returns the slot number of the most recent verified epoch + fn get_latest_epoch_slot(self: @TContractState) -> u64; + + /// Returns the ID of the most recent validator committee + fn get_latest_committee_id(self: @TContractState) -> u64; + + /// Returns the SHARP program hash used for committee updates + fn get_committee_update_program_hash(self: @TContractState) -> felt252; + + /// Returns the SHARP program hash used for epoch updates + fn get_epoch_update_program_hash(self: @TContractState) -> felt252; + + /// Returns the SHARP program hash used for epoch batching + fn get_epoch_batch_program_hash(self: @TContractState) -> felt252; + + /// Retrieves the epoch proof for a given slot + fn get_epoch_proof(self: @TContractState, slot: u64) -> EpochProof; + + /// Verifies and stores a new validator committee update + /// @param beacon_state_root - The beacon chain state root containing the committee + /// @param committee_hash - Hash of the new committee's public key + /// @param slot - Slot number where this committee becomes active + fn verify_committee_update( + ref self: TContractState, beacon_state_root: u256, committee_hash: u256, slot: u64, + ); + + /// Verifies and stores a new epoch update + /// @param header_root - SSZ root of the beacon block header + /// @param beacon_state_root - Root of the beacon state + /// @param slot - Slot number of this epoch + /// @param committee_hash - Hash of the signing committee + /// @param n_signers - Number of validators that signed + /// @param execution_hash - Hash of the execution layer header + /// @param execution_height - Height of the execution block + fn verify_epoch_update( + ref self: TContractState, + header_root: u256, + beacon_state_root: u256, + slot: u64, + committee_hash: u256, + n_signers: u64, + execution_hash: u256, + execution_height: u64, + ); + + /// Verifies and stores a batch of epoch updates + /// @param batch_root - Merkle root of the batch of epochs + /// Parameters same as verify_epoch_update + fn verify_epoch_batch( + ref self: TContractState, + batch_root: felt252, + header_root: u256, + beacon_state_root: u256, + slot: u64, + committee_hash: u256, + n_signers: u64, + execution_hash: u256, + execution_height: u64, + ); + + /// Extracts and verifies a single epoch from a previously verified batch + /// @param batch_root - Root of the verified batch + /// @param merkle_index - Index of this epoch in the batch + /// @param merkle_path - Merkle proof path + /// Other parameters same as verify_epoch_update + fn decommit_batched_epoch( + ref self: TContractState, + batch_root: felt252, + merkle_index: u16, + merkle_path: Array, + header_root: u256, + beacon_state_root: u256, + slot: u64, + committee_hash: u256, + n_signers: u64, + execution_hash: u256, + execution_height: u64, + ); + + /// Proposes an update to the SHARP program hashes (requires owner + timelock) + fn propose_program_hash_update( + ref self: TContractState, + new_committee_hash: felt252, + new_epoch_hash: felt252, + new_batch_hash: felt252, + ); + + /// Executes a proposed program hash update after timelock expires + fn execute_program_hash_update(ref self: TContractState); + + /// Pauses the contract (owner only) + fn pause(ref self: TContractState); + + /// Unpauses the contract (owner only) + fn unpause(ref self: TContractState); + + /// Returns whether the contract is currently paused + fn is_paused(self: @TContractState) -> bool; +} diff --git a/contract/src/lib.cairo b/contract/src/lib.cairo index d41052d..07cf7cc 100644 --- a/contract/src/lib.cairo +++ b/contract/src/lib.cairo @@ -1,175 +1,119 @@ -#[derive(Drop, starknet::Store, Serde)] -pub struct EpochProof { - // Hash of the beacon header (root since ssz) - header_root: u256, - // state root at the mapped slot - beacon_state_root: u256, - // Number of signers (out of 512) - n_signers: u64, - // Hash of the execution header - execution_hash: u256, - // Height of the execution header - execution_height: u64, -} +pub mod interface; +pub mod types; -#[starknet::interface] -pub trait IBankaiContract { - fn get_committee_hash(self: @TContractState, committee_id: u64) -> u256; - fn get_latest_epoch_slot(self: @TContractState) -> u64; - fn get_latest_committee_id(self: @TContractState) -> u64; - fn get_committee_update_program_hash(self: @TContractState) -> felt252; - fn get_epoch_update_program_hash(self: @TContractState) -> felt252; - fn get_epoch_proof(self: @TContractState, slot: u64) -> EpochProof; - fn verify_committee_update( - ref self: TContractState, beacon_state_root: u256, committee_hash: u256, slot: u64, - ); - fn verify_epoch_update( - ref self: TContractState, - header_root: u256, - beacon_state_root: u256, - slot: u64, - committee_hash: u256, - n_signers: u64, - execution_hash: u256, - execution_height: u64, - ); - - fn verify_epoch_batch( - ref self: TContractState, - batch_root: felt252, - header_root: u256, - beacon_state_root: u256, - slot: u64, - committee_hash: u256, - n_signers: u64, - execution_hash: u256, - execution_height: u64, - ); - - fn decommit_batched_epoch( - ref self: TContractState, - batch_root: felt252, - merkle_index: u16, - merkle_path: Array, - header_root: u256, - beacon_state_root: u256, - slot: u64, - committee_hash: u256, - n_signers: u64, - execution_hash: u256, - execution_height: u64, - ); - - fn propose_program_hash_update( - ref self: TContractState, - new_committee_hash: felt252, - new_epoch_hash: felt252, - new_batch_hash: felt252 - ); - fn execute_program_hash_update(ref self: TContractState); - fn pause(ref self: TContractState); - fn unpause(ref self: TContractState); - fn is_paused(self: @TContractState) -> bool; -} +pub use interface::IBankaiContract; pub mod utils; #[starknet::contract] pub mod BankaiContract { - use super::EpochProof; + use super::types::{ + EpochProof, CommitteeUpdated, EpochUpdated, EpochBatch, EpochDecommitted, Paused, + Unpaused, + }; use starknet::storage::{ Map, StorageMapReadAccess, StorageMapWriteAccess, StoragePointerReadAccess, StoragePointerWriteAccess, }; - use starknet::{ContractAddress, get_caller_address, get_block_timestamp}; + use starknet::ClassHash; + + use starknet::{get_caller_address, get_block_timestamp}; use integrity::{ Integrity, IntegrityWithConfig, SHARP_BOOTLOADER_PROGRAM_HASH, VerifierConfiguration, }; - use crate::utils::{calculate_wrapped_bootloaded_fact_hash, WRAPPER_PROGRAM_HASH, hash_path, compute_leaf_hash}; + use crate::utils::{ + calculate_wrapped_bootloaded_fact_hash, WRAPPER_PROGRAM_HASH, hash_path, compute_leaf_hash, + }; + + use openzeppelin_access::ownable::OwnableComponent; + use openzeppelin_upgrades::UpgradeableComponent; + use openzeppelin_upgrades::interface::IUpgradeable; + + component!(path: OwnableComponent, storage: ownable, event: OwnableEvent); + component!(path: UpgradeableComponent, storage: upgradeable, event: UpgradeableEvent); + + + // Ownable Mixin + #[abi(embed_v0)] + impl OwnableMixinImpl = OwnableComponent::OwnableMixinImpl; + impl OwnableInternalImpl = OwnableComponent::InternalImpl; + + impl UpgradeableInternalImpl = UpgradeableComponent::InternalImpl; + + + /// Events emitted by the contract #[event] #[derive(Drop, starknet::Event)] pub enum Event { + /// Emitted when a new validator committee is verified CommitteeUpdated: CommitteeUpdated, + /// Emitted when a new epoch is verified EpochUpdated: EpochUpdated, + /// Emitted when a batch of epochs is verified EpochBatch: EpochBatch, + /// Emitted when an epoch is extracted from a batch EpochDecommitted: EpochDecommitted, + /// Emitted when the contract is paused Paused: Paused, + /// Emitted when the contract is unpaused Unpaused: Unpaused, + OwnableEvent: OwnableComponent::Event, + UpgradeableEvent: UpgradeableComponent::Event, } - #[derive(Drop, starknet::Event)] - pub struct CommitteeUpdated { - committee_id: u64, - committee_hash: u256, - } - - #[derive(Drop, starknet::Event)] - pub struct EpochUpdated { - // Hash of the beacon header (root since ssz) - beacon_root: u256, - // Slot of the beacon header - slot: u64, - // Hash of the execution header - execution_hash: u256, - // Height of the execution header - execution_height: u64, - } - - #[derive(Drop, starknet::Event)] - pub struct EpochBatch { - batch_root: felt252, - beacon_root: u256, - slot: u64, - execution_hash: u256, - execution_height: u64, - } - - #[derive(Drop, starknet::Event)] - pub struct EpochDecommitted { - batch_root: felt252, - slot: u64, - execution_hash: u256, - execution_height: u64 - } - - /// Emitted when the contract is paused - #[derive(Drop, starknet::Event)] - pub struct Paused {} - - /// Emitted when the contract is unpaused - #[derive(Drop, starknet::Event)] - pub struct Unpaused {} - /// Time delay required for program hash updates (48 hours in seconds) + /// This delay provides a security window for detecting malicious updates const UPDATE_DELAY: u64 = 172800; + /// Contract storage layout #[storage] struct Storage { // Committee Management - committee: Map::, // Maps committee index to committee hash (sha256(x || y)) of aggregate key + /// Maps committee index to committee hash (sha256(x || y)) of aggregate key + committee: Map::, + /// ID of the most recent committee latest_committee_id: u64, + /// ID of the initial trusted committee initialization_committee: u64, - // Epoch Management - epochs: Map::, // Maps beacon slot to header root and state root + /// Maps beacon slot to header root and state root + epochs: Map::, + /// Most recent verified epoch slot latest_epoch_slot: u64, - // Batch Management - batches: Map::, // Tracks verified batch roots - + /// Tracks verified batch roots + batches: Map::, // Program Hash Management + /// Current SHARP program hash for committee updates committee_update_program_hash: felt252, + /// Current SHARP program hash for epoch updates epoch_update_program_hash: felt252, + /// Current SHARP program hash for epoch batching epoch_batch_program_hash: felt252, + /// Proposed new committee program hash (pending timelock) pending_committee_program_hash: felt252, + /// Proposed new epoch program hash (pending timelock) pending_epoch_program_hash: felt252, + /// Proposed new batch program hash (pending timelock) pending_batch_program_hash: felt252, + /// Timestamp when pending program hash update can be executed pending_update_timestamp: u64, - - // Access Control - owner: ContractAddress, + // Contract Management + /// Contract pause state for emergency stops paused: bool, + /// OpenZeppelin ownable component storage + #[substorage(v0)] + pub ownable: OwnableComponent::Storage, + /// OpenZeppelin upgradeable component storage + #[substorage(v0)] + upgradeable: UpgradeableComponent::Storage, } + /// Contract constructor + /// @param committee_id - ID of the initial trusted committee + /// @param committee_hash - Hash of the initial committee's public key + /// @param committee_update_program_hash - Initial SHARP program hash for committee updates + /// @param epoch_update_program_hash - Initial SHARP program hash for epoch updates + /// @param epoch_batch_program_hash - Initial SHARP program hash for epoch batching #[constructor] pub fn constructor( ref self: ContractState, @@ -179,7 +123,8 @@ pub mod BankaiContract { epoch_update_program_hash: felt252, epoch_batch_program_hash: felt252, ) { - self.owner.write(get_caller_address()); + // Initialize owner as contract deployer + self.ownable.initializer(get_caller_address()); self.latest_epoch_slot.write(0); // Write trusted initial committee @@ -193,32 +138,68 @@ pub mod BankaiContract { self.epoch_batch_program_hash.write(epoch_batch_program_hash); } + /// Implementation of the upgradeable interface + #[abi(embed_v0)] + impl UpgradeableImpl of IUpgradeable { + /// Upgrades the contract to a new implementation + /// @param new_class_hash - The class hash of the new implementation + /// @dev Can only be called by the contract owner + fn upgrade(ref self: ContractState, new_class_hash: ClassHash) { + self.ownable.assert_only_owner(); + self.upgradeable.upgrade(new_class_hash); + } + } + + /// Core implementation of the Bankai contract interface #[abi(embed_v0)] impl BankaiContractImpl of super::IBankaiContract { + /// Retrieves the hash of a specific validator committee + /// @param committee_id - The unique identifier of the committee + /// @return The aggregate public key hash of the committee fn get_committee_hash(self: @ContractState, committee_id: u64) -> u256 { self.committee.read(committee_id) } + /// Returns the slot number of the most recent verified epoch fn get_latest_epoch_slot(self: @ContractState) -> u64 { self.latest_epoch_slot.read() } + /// Returns the ID of the most recent validator committee fn get_latest_committee_id(self: @ContractState) -> u64 { self.latest_committee_id.read() } + /// Returns the current SHARP program hash for committee updates fn get_committee_update_program_hash(self: @ContractState) -> felt252 { self.committee_update_program_hash.read() } + /// Returns the current SHARP program hash for epoch updates fn get_epoch_update_program_hash(self: @ContractState) -> felt252 { self.epoch_update_program_hash.read() } + /// Returns the current SHARP program hash for epoch batching + fn get_epoch_batch_program_hash(self: @ContractState) -> felt252 { + self.epoch_batch_program_hash.read() + } + + /// Retrieves the epoch proof for a given slot + /// @param slot - The slot number to query + /// @return The epoch proof containing consensus and execution data fn get_epoch_proof(self: @ContractState, slot: u64) -> EpochProof { self.epochs.read(slot) } + /// Verifies and stores a new validator committee update + /// @dev Requires a valid SHARP proof and matching beacon state root + /// @param beacon_state_root - The beacon chain state root containing the committee + /// @param committee_hash - Hash of the new committee's public key + /// @param slot - Slot number where this committee becomes active + /// @custom:throws 'Contract is paused' if contract is paused + /// @custom:throws 'Invalid State Root!' if beacon state root doesn't match + /// @custom:throws 'Invalid Fact Hash!' if SHARP proof is invalid fn verify_committee_update( ref self: ContractState, beacon_state_root: u256, committee_hash: u256, slot: u64, ) { @@ -246,6 +227,11 @@ pub mod BankaiContract { ); } + /// Verifies and stores a new epoch update + /// @dev Requires a valid SHARP proof and matching committee hash + /// @custom:throws 'Contract is paused' if contract is paused + /// @custom:throws 'Invalid Committee Hash!' if committee hash doesn't match + /// @custom:throws 'Invalid Fact Hash!' if SHARP proof is invalid fn verify_epoch_update( ref self: ContractState, header_root: u256, @@ -262,13 +248,20 @@ pub mod BankaiContract { assert(committee_hash == valid_committee_hash, 'Invalid Committee Hash!'); let fact_hash = compute_epoch_proof_fact_hash( - @self, header_root, beacon_state_root, slot, committee_hash, n_signers, execution_hash, execution_height, + @self, + header_root, + beacon_state_root, + slot, + committee_hash, + n_signers, + execution_hash, + execution_height, ); assert(is_valid_fact_hash(fact_hash), 'Invalid Fact Hash!'); let epoch_proof = EpochProof { - header_root: header_root, beacon_state_root: beacon_state_root, n_signers: n_signers, execution_hash: execution_hash, execution_height: execution_height, + header_root, beacon_state_root, n_signers, execution_hash, execution_height, }; self.epochs.write(slot, epoch_proof); @@ -277,11 +270,21 @@ pub mod BankaiContract { self.latest_epoch_slot.write(slot); } - self.emit(Event::EpochUpdated(EpochUpdated { - beacon_root: header_root, slot: slot, execution_hash: execution_hash, execution_height: execution_height, - })); + self + .emit( + Event::EpochUpdated( + EpochUpdated { + beacon_root: header_root, slot, execution_hash, execution_height, + }, + ), + ); } + /// Verifies and stores a batch of epoch updates + /// @dev Requires a valid SHARP proof and matching committee hash + /// @custom:throws 'Contract is paused' if contract is paused + /// @custom:throws 'Invalid Committee Hash!' if committee hash doesn't match + /// @custom:throws 'Invalid Fact Hash!' if SHARP proof is invalid fn verify_epoch_batch( ref self: ContractState, batch_root: felt252, @@ -292,7 +295,7 @@ pub mod BankaiContract { n_signers: u64, execution_hash: u256, execution_height: u64, - ) { + ) { assert(!self.paused.read(), 'Contract is paused'); let signing_committee_id = (slot / 0x2000); @@ -300,19 +303,36 @@ pub mod BankaiContract { assert(committee_hash == valid_committee_hash, 'Invalid Committee Hash!'); let fact_hash = compute_epoch_batch_fact_hash( - @self, batch_root, header_root, beacon_state_root, slot, committee_hash, n_signers, execution_hash, execution_height, + @self, + batch_root, + header_root, + beacon_state_root, + slot, + committee_hash, + n_signers, + execution_hash, + execution_height, ); assert(is_valid_fact_hash(fact_hash), 'Invalid Fact Hash!'); let epoch_proof = EpochProof { - header_root: header_root, beacon_state_root: beacon_state_root, n_signers: n_signers, execution_hash: execution_hash, execution_height: execution_height, + header_root, beacon_state_root, n_signers, execution_hash, execution_height, }; self.epochs.write(slot, epoch_proof); - self.emit(Event::EpochBatch(EpochBatch { - batch_root: batch_root, beacon_root: header_root, slot: slot, execution_hash: execution_hash, execution_height: execution_height, - })); + self + .emit( + Event::EpochBatch( + EpochBatch { + batch_root, + beacon_root: header_root, + slot, + execution_hash, + execution_height, + }, + ), + ); self.batches.write(batch_root, true); @@ -322,6 +342,11 @@ pub mod BankaiContract { } } + /// Extracts and verifies a single epoch from a previously verified batch + /// @dev Verifies the Merkle proof against the stored batch root + /// @custom:throws 'Contract is paused' if contract is paused + /// @custom:throws 'Batch root not known!' if batch_root hasn't been verified + /// @custom:throws 'Invalid Batch Merkle Root!' if Merkle proof is invalid fn decommit_batched_epoch( ref self: ContractState, batch_root: felt252, @@ -339,40 +364,62 @@ pub mod BankaiContract { let known_batch_root = self.batches.read(batch_root); assert(known_batch_root, 'Batch root not known!'); - let leaf = compute_leaf_hash(header_root, beacon_state_root, slot, committee_hash, n_signers, execution_hash, execution_height); + let leaf = compute_leaf_hash( + header_root, + beacon_state_root, + slot, + committee_hash, + n_signers, + execution_hash, + execution_height, + ); let computed_root = hash_path(leaf, merkle_path, merkle_index); assert(computed_root == batch_root, 'Invalid Batch Merkle Root!'); let epoch_proof = EpochProof { - header_root: header_root, beacon_state_root: beacon_state_root, n_signers: n_signers, execution_hash: execution_hash, execution_height: execution_height, + header_root, beacon_state_root, n_signers, execution_hash, execution_height, }; self.epochs.write(slot, epoch_proof); - - self.emit(Event::EpochDecommitted(EpochDecommitted { - batch_root: batch_root, slot: slot, execution_hash: execution_hash, execution_height: execution_height, - })); + + self + .emit( + Event::EpochDecommitted( + EpochDecommitted { batch_root, slot, execution_hash, execution_height }, + ), + ); } - + + /// Proposes an update to the SHARP program hashes + /// @dev Requires owner access and initiates the timelock period + /// @param new_committee_hash - New program hash for committee verification + /// @param new_epoch_hash - New program hash for epoch verification + /// @param new_batch_hash - New program hash for batch verification + /// @custom:throws 'Contract is paused' if contract is paused fn propose_program_hash_update( ref self: ContractState, new_committee_hash: felt252, new_epoch_hash: felt252, - new_batch_hash: felt252 + new_batch_hash: felt252, ) { assert(!self.paused.read(), 'Contract is paused'); - assert(get_caller_address() == self.owner.read(), 'Caller is not owner'); - + self.ownable.assert_only_owner(); + self.pending_committee_program_hash.write(new_committee_hash); self.pending_epoch_program_hash.write(new_epoch_hash); self.pending_batch_program_hash.write(new_batch_hash); self.pending_update_timestamp.write(get_block_timestamp() + UPDATE_DELAY); } + /// Executes a proposed program hash update after timelock expires + /// @dev Can only be called by owner after timelock period + /// @custom:throws 'Delay not elapsed' if timelock period hasn't passed fn execute_program_hash_update(ref self: ContractState) { - assert(get_caller_address() == self.owner.read(), 'Caller is not owner'); - assert(get_block_timestamp() >= self.pending_update_timestamp.read(), 'Delay not elapsed'); - + self.ownable.assert_only_owner(); + assert( + get_block_timestamp() >= self.pending_update_timestamp.read(), 'Delay not elapsed', + ); + // Update program hashes self.committee_update_program_hash.write(self.pending_committee_program_hash.read()); self.epoch_update_program_hash.write(self.pending_epoch_program_hash.read()); @@ -385,20 +432,27 @@ pub mod BankaiContract { self.pending_update_timestamp.write(0); } + /// Pauses all contract operations + /// @dev Can only be called by owner + /// @custom:throws 'Contract is already paused' if already paused fn pause(ref self: ContractState) { - assert(get_caller_address() == self.owner.read(), 'Caller is not owner'); + self.ownable.assert_only_owner(); assert(!self.paused.read(), 'Contract is already paused'); self.paused.write(true); self.emit(Event::Paused(Paused {})); } + /// Unpauses contract operations + /// @dev Can only be called by owner + /// @custom:throws 'Contract is not paused' if not paused fn unpause(ref self: ContractState) { - assert(get_caller_address() == self.owner.read(), 'Caller is not owner'); + self.ownable.assert_only_owner(); assert(self.paused.read(), 'Contract is not paused'); self.paused.write(false); self.emit(Event::Unpaused(Unpaused {})); } + /// Returns whether the contract is currently paused fn is_paused(self: @ContractState) -> bool { self.paused.read() } @@ -413,8 +467,8 @@ pub mod BankaiContract { SHARP_BOOTLOADER_PROGRAM_HASH, self.committee_update_program_hash.read(), [ - beacon_state_root.low.into(), beacon_state_root.high.into(), committee_hash.low.into(), - committee_hash.high.into(), slot.into(), + beacon_state_root.low.into(), beacon_state_root.high.into(), + committee_hash.low.into(), committee_hash.high.into(), slot.into(), ] .span(), ); @@ -464,8 +518,7 @@ pub mod BankaiContract { SHARP_BOOTLOADER_PROGRAM_HASH, self.epoch_batch_program_hash.read(), [ - batch_root, header_root.low.into(), - header_root.high.into(), state_root.low.into(), + batch_root, header_root.low.into(), header_root.high.into(), state_root.low.into(), state_root.high.into(), slot.into(), committee_hash.low.into(), committee_hash.high.into(), n_signers.into(), execution_hash.low.into(), execution_hash.high.into(), execution_height.into(), @@ -488,133 +541,3 @@ pub mod BankaiContract { integrity.is_fact_hash_valid(fact_hash) } } - -#[cfg(test)] -mod tests { - use super::BankaiContract; - use super::IBankaiContract; - use starknet::contract_address_const; - use starknet::testing::set_caller_address; - use starknet::testing::set_block_timestamp; - - // Helper function to deploy the contract for testing - fn deploy_contract() -> BankaiContract::ContractState { - let mut state = BankaiContract::contract_state_for_testing(); - - // Set caller as contract deployer - set_caller_address(contract_address_const::<0x123>()); - - // Initialize with some test values - BankaiContract::constructor( - ref state, - 1, // committee_id - 1234.into(), // committee_hash - 111.into(), // committee_update_program_hash - 222.into(), // epoch_update_program_hash - 333.into() // epoch_batch_program_hash - ); - - state - } - - #[test] - fn test_constructor() { - let state = deploy_contract(); - - assert!(!IBankaiContract::is_paused(@state)); - assert_eq!(IBankaiContract::get_latest_epoch_slot(@state), 0); - assert_eq!(IBankaiContract::get_latest_committee_id(@state), 1); - assert_eq!(IBankaiContract::get_committee_hash(@state, 1), 1234.into()); - assert_eq!(IBankaiContract::get_committee_update_program_hash(@state), 111); - assert_eq!(IBankaiContract::get_epoch_update_program_hash(@state), 222); - } - - #[test] - fn test_pause_unpause() { - let mut state = deploy_contract(); - let owner = contract_address_const::<0x123>(); - set_caller_address(owner); - - // Test initial state - assert!(!IBankaiContract::is_paused(@state)); - - // Test pause - IBankaiContract::pause(ref state); - assert!(IBankaiContract::is_paused(@state)); - - // Test unpause - IBankaiContract::unpause(ref state); - assert!(!IBankaiContract::is_paused(@state)); - } - - #[test] - #[should_panic(expected: ('Caller is not owner',))] - fn test_pause_unauthorized() { - let mut state = deploy_contract(); - - // Try to pause from different address - let other = contract_address_const::<0x456>(); - set_caller_address(other); - IBankaiContract::pause(ref state); - } - - #[test] - fn test_program_hash_update() { - let mut state = deploy_contract(); - let owner = contract_address_const::<0x123>(); - set_caller_address(owner); - - // Set initial timestamp - set_block_timestamp(1000); - - // Propose update - IBankaiContract::propose_program_hash_update( - ref state, - 444.into(), // new_committee_hash - 555.into(), // new_epoch_hash - 666.into() // new_batch_hash - ); - - // Execute after delay - set_block_timestamp(1000 + 172800); // After delay - IBankaiContract::execute_program_hash_update(ref state); - - // Verify updates - assert_eq!(IBankaiContract::get_committee_update_program_hash(@state), 444); - assert_eq!(IBankaiContract::get_epoch_update_program_hash(@state), 555); - } - - #[test] - #[should_panic(expected: ('Delay not elapsed',))] - fn test_program_hash_update_too_early() { - let mut state = deploy_contract(); - let owner = contract_address_const::<0x123>(); - set_caller_address(owner); - - // Set initial timestamp - set_block_timestamp(1000); - - // Propose update - IBankaiContract::propose_program_hash_update( - ref state, - 444.into(), // new_committee_hash - 555.into(), // new_epoch_hash - 666.into() // new_batch_hash - ); - - // Try to execute before delay - set_block_timestamp(1000 + 172799); // Just before delay - IBankaiContract::execute_program_hash_update(ref state); - } - - #[test] - fn test_getters() { - let state = deploy_contract(); - - assert_eq!(IBankaiContract::get_committee_hash(@state, 1), 1234.into()); - assert_eq!(IBankaiContract::get_latest_epoch_slot(@state), 0); - assert_eq!(IBankaiContract::get_latest_committee_id(@state), 1); - assert_eq!(IBankaiContract::get_committee_update_program_hash(@state), 111); - assert_eq!(IBankaiContract::get_epoch_update_program_hash(@state), 222); - } -} \ No newline at end of file diff --git a/contract/src/types.cairo b/contract/src/types.cairo new file mode 100644 index 0000000..555bb1d --- /dev/null +++ b/contract/src/types.cairo @@ -0,0 +1,73 @@ +/// Represents a proof of an Ethereum beacon chain epoch, containing crucial consensus and execution +/// data +#[derive(Drop, starknet::Store, Serde)] +pub struct EpochProof { + /// Hash of the beacon chain header (SSZ root) + pub header_root: u256, + /// State root of the beacon chain at the corresponding slot + pub beacon_state_root: u256, + /// Number of validators that signed (out of 512 possible) + pub n_signers: u64, + /// Hash of the execution layer (EL) header + pub execution_hash: u256, + /// Block height of the execution layer header + pub execution_height: u64, +} + +/// Event emitted when a new committee is validated and stored +#[derive(Drop, starknet::Event)] +pub struct CommitteeUpdated { + /// Unique identifier for the committee + pub committee_id: u64, + /// Aggregate public key hash of the committee + pub committee_hash: u256, +} + +/// Event emitted when a new epoch is validated and stored +#[derive(Drop, starknet::Event)] +pub struct EpochUpdated { + /// Hash of the beacon header (SSZ root) + pub beacon_root: u256, + /// Slot number of the beacon header + pub slot: u64, + /// Hash of the execution layer header + pub execution_hash: u256, + /// Block height of the execution header + pub execution_height: u64, +} + +/// Event emitted when a batch of epochs is validated +#[derive(Drop, starknet::Event)] +pub struct EpochBatch { + /// Merkle root of the batch + pub batch_root: felt252, + /// Hash of the beacon header + pub beacon_root: u256, + /// Slot number + pub slot: u64, + /// Hash of the execution header + pub execution_hash: u256, + /// Block height of the execution header + pub execution_height: u64, +} + +/// Event emitted when an epoch is extracted from a verified batch +#[derive(Drop, starknet::Event)] +pub struct EpochDecommitted { + /// Root of the batch containing this epoch + pub batch_root: felt252, + /// Slot number + pub slot: u64, + /// Hash of the execution header + pub execution_hash: u256, + /// Block height of the execution header + pub execution_height: u64, +} + +/// Emitted when the contract is paused +#[derive(Drop, starknet::Event)] +pub struct Paused {} + +/// Emitted when the contract is unpaused +#[derive(Drop, starknet::Event)] +pub struct Unpaused {} diff --git a/contract/src/utils.cairo b/contract/src/utils.cairo index 105eed7..56b6303 100644 --- a/contract/src/utils.cairo +++ b/contract/src/utils.cairo @@ -45,8 +45,8 @@ pub fn hash_path(leaf: felt252, path: Array, index: u16) -> felt252 { // Get the sibling node let sibling = *path.at(i); - // Determine left and right nodes based on current_index - let (left, right) = if (current_index - 2 * (current_index / 2)) == 0 { + // Determine left and right nodes based on current_index's least significant bit + let (left, right) = if (current_index & 1_u16 == 0_u16) { (current_hash, sibling) } else { (sibling, current_hash) @@ -55,7 +55,7 @@ pub fn hash_path(leaf: felt252, path: Array, index: u16) -> felt252 { // Hash the pair using Poseidon let (hash, _, _) = hades_permutation(left, right, 2); current_hash = hash; - + // Update index for next level current_index = current_index / 2; i += 1; @@ -85,9 +85,9 @@ pub fn compute_leaf_hash( n_signers.into(), execution_hash.low.into(), execution_hash.high.into(), - execution_height.into() + execution_height.into(), ]; - + // Hash all values with Poseidon poseidon_hash_span(values.span()) } @@ -95,16 +95,19 @@ pub fn compute_leaf_hash( #[cfg(test)] mod tests { use super::*; - + #[test] fn test_leaf_hash_computation() { // Test values from JSON file let header_root = 0xcee6e3a29b289c3d0eb1f08f6cbf965a2f5771f54ca781fbf1f9d9a5e898d602_u256; - let beacon_state_root = 0xac1d83f6ab8c04205b698f9b5dbe93a1136000ca0162941bf129029ad402906c_u256; + let beacon_state_root = + 0xac1d83f6ab8c04205b698f9b5dbe93a1136000ca0162941bf129029ad402906c_u256; let slot = 6710272_u64; - let committee_hash = 0x3ccf068854b1612cc9537f6fd2a56fb0734722ce40b89685f84e17a6986510d3_u256; + let committee_hash = + 0x3ccf068854b1612cc9537f6fd2a56fb0734722ce40b89685f84e17a6986510d3_u256; let n_signers = 479_u64; - let execution_hash = 0xc2c133b1ea59352cef6c0434e0007cdba4bdc216afd32fdf6b40c4a135a8535e_u256; + let execution_hash = + 0xc2c133b1ea59352cef6c0434e0007cdba4bdc216afd32fdf6b40c4a135a8535e_u256; let execution_height = 7440225_u64; // Compute hash using our function @@ -117,9 +120,9 @@ mod tests { execution_hash, execution_height, ); - + let expected_hash = 0xBA8230D3714675CA5E80A257F3F2F581959A5E474E40101C52153192FD7728; - + // Assert they match assert_eq!(computed_hash, expected_hash, "Leaf hash computation mismatch"); } @@ -129,11 +132,14 @@ mod tests { fn test_hash_path_verification() { // Same leaf data as previous test let header_root = 0xcee6e3a29b289c3d0eb1f08f6cbf965a2f5771f54ca781fbf1f9d9a5e898d602_u256; - let beacon_state_root = 0xac1d83f6ab8c04205b698f9b5dbe93a1136000ca0162941bf129029ad402906c_u256; + let beacon_state_root = + 0xac1d83f6ab8c04205b698f9b5dbe93a1136000ca0162941bf129029ad402906c_u256; let slot = 6710272_u64; - let committee_hash = 0x3ccf068854b1612cc9537f6fd2a56fb0734722ce40b89685f84e17a6986510d3_u256; + let committee_hash = + 0x3ccf068854b1612cc9537f6fd2a56fb0734722ce40b89685f84e17a6986510d3_u256; let n_signers = 479_u64; - let execution_hash = 0xc2c133b1ea59352cef6c0434e0007cdba4bdc216afd32fdf6b40c4a135a8535e_u256; + let execution_hash = + 0xc2c133b1ea59352cef6c0434e0007cdba4bdc216afd32fdf6b40c4a135a8535e_u256; let execution_height = 7440225_u64; // Compute the leaf hash @@ -150,25 +156,22 @@ mod tests { // Merkle path from JSON let path = array![ 0x0, - 0x293d3e8a80f400daaaffdd5932e2bcc8814bab8f414a75dcacf87318f8b14c5, - 0x296ec483967ad3fbe3407233db378b6284cc1fcc78d62457b97a4be6744ad0d, + 0x293d3e8a80f400daaaffdd5932e2bcc8814bab8f414a75dcacf87318f8b14c5, + 0x296ec483967ad3fbe3407233db378b6284cc1fcc78d62457b97a4be6744ad0d, 0x4127be83b42296fe28f98f8fdda29b96e22e5d90501f7d31b84e729ec2fac3f, - 0x33883305ab0df1ab7610153578a4d510b845841b84d90ed993133ce4ce8f827, - 0x6114fdf0455660a422ac813130104438c7baf332cc1eca0618957a3aeb68795 + 0x33883305ab0df1ab7610153578a4d510b845841b84d90ed993133ce4ce8f827, + 0x6114fdf0455660a422ac813130104438c7baf332cc1eca0618957a3aeb68795, ]; let index = 32_u16; - + // Compute root using hash_path - let computed_root = hash_path( - leaf_hash, - path, - index - ); + let computed_root = hash_path(leaf_hash, path, index); // Expected root is the first value in the JSON array (0x0) - let expected_root = 3014209719831846118507369742452047831482182187060364606511726060971609846063; - + let expected_root = + 3014209719831846118507369742452047831482182187060364606511726060971609846063; + assert_eq!(computed_root, expected_root, "Merkle root computation mismatch"); } -} \ No newline at end of file +} diff --git a/contract/tests/test_bankai.cairo b/contract/tests/test_bankai.cairo new file mode 100644 index 0000000..f0e5cc5 --- /dev/null +++ b/contract/tests/test_bankai.cairo @@ -0,0 +1,298 @@ +use bankai::BankaiContract; +use bankai::IBankaiContract; + +#[cfg(test)] +mod tests { + use super::{BankaiContract, IBankaiContract}; + use starknet::contract_address_const; + use starknet::testing::set_caller_address; + use starknet::testing::set_block_timestamp; + use starknet::ClassHash; + use openzeppelin_upgrades::interface::IUpgradeable; + use openzeppelin_access::ownable::interface::IOwnable; + + // Helper function to deploy the contract for testing + fn deploy_contract() -> BankaiContract::ContractState { + let mut state = BankaiContract::contract_state_for_testing(); + + // Set caller as contract deployer + let owner = contract_address_const::<0x123>(); + set_caller_address(owner); + + // Initialize with some test values + BankaiContract::constructor( + ref state, + 1, // committee_id + 1234.into(), // committee_hash + 111.into(), // committee_update_program_hash + 222.into(), // epoch_update_program_hash + 333.into() // epoch_batch_program_hash + ); + + state + } + + #[test] + fn test_constructor() { + let state = deploy_contract(); + let owner = contract_address_const::<0x123>(); + + assert!(!IBankaiContract::is_paused(@state)); + assert_eq!(IBankaiContract::get_latest_epoch_slot(@state), 0); + assert_eq!(IBankaiContract::get_latest_committee_id(@state), 1); + assert_eq!(IBankaiContract::get_committee_hash(@state, 1), 1234.into()); + assert_eq!(IBankaiContract::get_committee_update_program_hash(@state), 111); + assert_eq!(IBankaiContract::get_epoch_update_program_hash(@state), 222); + // Use the ownable component to check owner + assert_eq!(state.ownable.owner(), owner); + } + + #[test] + fn test_pause_unpause() { + let mut state = deploy_contract(); + let owner = contract_address_const::<0x123>(); + set_caller_address(owner); + + // Test initial state + assert!(!IBankaiContract::is_paused(@state)); + + // Test pause + IBankaiContract::pause(ref state); + assert!(IBankaiContract::is_paused(@state)); + + // Test unpause + IBankaiContract::unpause(ref state); + assert!(!IBankaiContract::is_paused(@state)); + } + + #[test] + #[should_panic(expected: ('Caller is not the owner',))] + fn test_pause_unauthorized() { + let mut state = deploy_contract(); + + // Try to pause from different address + let other = contract_address_const::<0x456>(); + set_caller_address(other); + IBankaiContract::pause(ref state); + } + + #[test] + fn test_transfer_ownership() { + let mut state = deploy_contract(); + let owner = contract_address_const::<0x123>(); + let new_owner = contract_address_const::<0x456>(); + + // Set caller as current owner + set_caller_address(owner); + + // Use the ownable component directly + state.ownable.transfer_ownership(new_owner); + + // Verify new owner + assert_eq!(state.ownable.owner(), new_owner); + } + + #[test] + #[should_panic(expected: ('Caller is not the owner',))] + fn test_transfer_ownership_unauthorized() { + let mut state = deploy_contract(); + let non_owner = contract_address_const::<0x456>(); + let new_owner = contract_address_const::<0x789>(); + + // Try to transfer ownership from non-owner address + set_caller_address(non_owner); + state.ownable.transfer_ownership(new_owner); + } + + #[test] + fn test_renounce_ownership() { + let mut state = deploy_contract(); + let owner = contract_address_const::<0x123>(); + + // Set caller as current owner + set_caller_address(owner); + + // Renounce ownership + state.ownable.renounce_ownership(); + + // Verify owner is now zero address + assert_eq!(state.ownable.owner().into(), 0); + } + + #[test] + #[should_panic(expected: ('Caller is not the owner',))] + fn test_renounce_ownership_unauthorized() { + let mut state = deploy_contract(); + let non_owner = contract_address_const::<0x456>(); + + // Try to renounce ownership from non-owner address + set_caller_address(non_owner); + state.ownable.renounce_ownership(); + } + + #[test] + fn test_program_hash_update() { + let mut state = deploy_contract(); + let owner = contract_address_const::<0x123>(); + set_caller_address(owner); + + // Set initial timestamp + set_block_timestamp(1000); + + // Propose update + IBankaiContract::propose_program_hash_update( + ref state, + 444.into(), // new_committee_hash + 555.into(), // new_epoch_hash + 666.into() // new_batch_hash + ); + + // Execute after delay + set_block_timestamp(1000 + 172800); // After 48-hour delay + IBankaiContract::execute_program_hash_update(ref state); + + // Verify updates + assert_eq!(IBankaiContract::get_committee_update_program_hash(@state), 444); + assert_eq!(IBankaiContract::get_epoch_update_program_hash(@state), 555); + assert_eq!(IBankaiContract::get_epoch_batch_program_hash(@state), 666); + } + + #[test] + #[should_panic(expected: ('Delay not elapsed',))] + fn test_program_hash_update_too_early() { + let mut state = deploy_contract(); + let owner = contract_address_const::<0x123>(); + set_caller_address(owner); + + // Set initial timestamp + set_block_timestamp(1000); + + // Propose update + IBankaiContract::propose_program_hash_update( + ref state, + 444.into(), // new_committee_hash + 555.into(), // new_epoch_hash + 666.into() // new_batch_hash + ); + + // Try to execute before delay + set_block_timestamp(1000 + 172799); // Just before 48-hour delay + IBankaiContract::execute_program_hash_update(ref state); + } + + #[test] + #[should_panic(expected: ('Caller is not the owner',))] + fn test_program_hash_update_unauthorized() { + let mut state = deploy_contract(); + let non_owner = contract_address_const::<0x456>(); + set_caller_address(non_owner); + + IBankaiContract::propose_program_hash_update(ref state, 444.into(), 555.into(), 666.into()); + } + + #[test] + fn test_getters() { + let state = deploy_contract(); + + assert_eq!(IBankaiContract::get_committee_hash(@state, 1), 1234.into()); + assert_eq!(IBankaiContract::get_latest_epoch_slot(@state), 0); + assert_eq!(IBankaiContract::get_latest_committee_id(@state), 1); + assert_eq!(IBankaiContract::get_committee_update_program_hash(@state), 111); + assert_eq!(IBankaiContract::get_epoch_update_program_hash(@state), 222); + } + + #[test] + #[should_panic(expected: ('CLASS_HASH_NOT_FOUND',))] + fn test_upgrade() { + let mut state = deploy_contract(); + let owner = contract_address_const::<0x123>(); + let new_class_hash: ClassHash = 123.try_into().unwrap(); + + set_caller_address(owner); + + // Attempt upgrade + IUpgradeable::upgrade(ref state, new_class_hash); + // Note: In a real test environment, you'd want to verify the upgrade + // was successful, but this requires additional test infrastructure + } + + #[test] + #[should_panic(expected: ('Caller is not the owner',))] + fn test_upgrade_unauthorized() { + let mut state = deploy_contract(); + let non_owner = contract_address_const::<0x456>(); + let new_class_hash: ClassHash = 123.try_into().unwrap(); + + set_caller_address(non_owner); + IUpgradeable::upgrade(ref state, new_class_hash); + } + + #[test] + fn test_paused_state_prevents_operations() { + let mut state = deploy_contract(); + let owner = contract_address_const::<0x123>(); + set_caller_address(owner); + + // Pause the contract + IBankaiContract::pause(ref state); + assert!(IBankaiContract::is_paused(@state)); + // Verify that operations are prevented when paused + // Note: You might want to add more specific tests for each operation + // that should be prevented when paused + } + + #[test] + #[should_panic(expected: ('Contract is already paused',))] + fn test_double_pause() { + let mut state = deploy_contract(); + let owner = contract_address_const::<0x123>(); + set_caller_address(owner); + + IBankaiContract::pause(ref state); + IBankaiContract::pause(ref state); // Should fail + } + + #[test] + #[should_panic(expected: ('Contract is not paused',))] + fn test_double_unpause() { + let mut state = deploy_contract(); + let owner = contract_address_const::<0x123>(); + set_caller_address(owner); + + IBankaiContract::unpause(ref state); // Should fail when not paused + } + + #[test] + fn test_program_hash_update_full_flow() { + let mut state = deploy_contract(); + let owner = contract_address_const::<0x123>(); + set_caller_address(owner); + + // Set initial timestamp + set_block_timestamp(1000); + + // Store initial values + let initial_committee_hash = IBankaiContract::get_committee_update_program_hash(@state); + let initial_epoch_hash = IBankaiContract::get_epoch_update_program_hash(@state); + let initial_batch_hash = IBankaiContract::get_epoch_batch_program_hash(@state); + + // Propose update + IBankaiContract::propose_program_hash_update(ref state, 444.into(), 555.into(), 666.into()); + + // Verify values haven't changed before delay + assert_eq!( + IBankaiContract::get_committee_update_program_hash(@state), initial_committee_hash, + ); + assert_eq!(IBankaiContract::get_epoch_update_program_hash(@state), initial_epoch_hash); + assert_eq!(IBankaiContract::get_epoch_batch_program_hash(@state), initial_batch_hash); + + // Execute after delay + set_block_timestamp(1000 + 172800); + IBankaiContract::execute_program_hash_update(ref state); + + // Verify all values updated + assert_eq!(IBankaiContract::get_committee_update_program_hash(@state), 444); + assert_eq!(IBankaiContract::get_epoch_update_program_hash(@state), 555); + assert_eq!(IBankaiContract::get_epoch_batch_program_hash(@state), 666); + } +} diff --git a/tests/test_bankai.cairo b/tests/test_bankai.cairo new file mode 100644 index 0000000..0519ecb --- /dev/null +++ b/tests/test_bankai.cairo @@ -0,0 +1 @@ + \ No newline at end of file From 2061633e1e5216739fed2af5a5f65a670fa5ea2d Mon Sep 17 00:00:00 2001 From: petscheit Date: Mon, 3 Feb 2025 14:54:04 +0100 Subject: [PATCH 29/66] chore: update config --- client-rs/src/config.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/client-rs/src/config.rs b/client-rs/src/config.rs index 529e6d1..80c952e 100644 --- a/client-rs/src/config.rs +++ b/client-rs/src/config.rs @@ -27,11 +27,11 @@ impl Default for BankaiConfig { fn default() -> Self { Self { contract_class_hash: Felt::from_hex( - "0x01b15e6fcdaf36242fa13f1218fc4babfa59f54ca824458fabd0831f7acc4fe1", + "0x020e0b39c5a14e0979081bde8a6670c9c8f69540f0078f0ec586ca51305bb3d0", ) .unwrap(), contract_address: Felt::from_hex( - "0x12fc37bbf67de953814b4461c5459340ba812faad2b2d51b3865b8b793e7b3e", + "0x1653a617deb89574e9328ce29e3a6893737bc38a4b5af0ec8265e0061f77c50", ) .unwrap(), committee_update_program_hash: Felt::from_hex( From b73060e4960e5f10dd05871c842d2d8beb61b02b Mon Sep 17 00:00:00 2001 From: lakewik Date: Tue, 4 Feb 2025 09:42:06 +0100 Subject: [PATCH 30/66] FIxes in retrying and changed batches file names --- client-rs/src/daemon.rs | 51 +++++++++++++++++++++---- client-rs/src/epoch_batch.rs | 24 +++++++----- client-rs/src/utils/database_manager.rs | 34 ++++++++++++++++- 3 files changed, 91 insertions(+), 18 deletions(-) diff --git a/client-rs/src/daemon.rs b/client-rs/src/daemon.rs index 5ac6ac0..313141f 100644 --- a/client-rs/src/daemon.rs +++ b/client-rs/src/daemon.rs @@ -126,6 +126,9 @@ async fn main() -> Result<(), Box> { } }); + // Retry any failed jobs before processing new ones + retry_failed_jobs(db_manager.clone(), tx_for_listener.clone()).await?; + // 🔄 Resume any unfinished jobs before processing new ones resume_unfinished_jobs(db_manager.clone(), tx_for_listener.clone()).await?; @@ -328,6 +331,10 @@ async fn handle_beacon_chain_head_event( } } + let _ = db_manager + .update_daemon_state_info(parsed_event.slot, parsed_event.block) + .await; + // We can do all circuit computations up to latest slot in advance, but the onchain broadcasts must be send in correct order // By correct order mean that within the same sync committe the epochs are not needed to be broadcasted in order // but the order of sync_commite_update->epoch_update must be correct, we firstly need to have correct sync committe veryfied @@ -712,18 +719,29 @@ async fn resume_unfinished_jobs( let tx_clone = tx.clone(); tokio::spawn(async move { - info!( - "Resuming job {} with status {}...", - job_id, - job_to_resume.job_status.to_string() - ); + match job_to_resume.job_type { + JobType::SyncCommitteeUpdate => { + info!( + "Resuming job {}... (sync committee update job for sync committee {})", + job_id, + helpers::slot_to_sync_committee_id(job.slot.to_u64().unwrap()) + ); + } + JobType::EpochBatchUpdate => { + info!( + "Resuming job {}... (batch epoch update job for epochs from {} to {})", + job_id, job.batch_range_begin_epoch, job.batch_range_end_epoch + ); + } + } + if tx_clone.send(job_to_resume).await.is_err() { // return Err("Failed to send job".into()); error!("Error resuming job: {}", job_id); } }); - tokio::time::sleep(Duration::from_secs(1)).await; + tokio::time::sleep(Duration::from_millis(500)).await; } Ok(()) @@ -763,14 +781,31 @@ async fn retry_failed_jobs( let tx_clone = tx.clone(); tokio::spawn(async move { - info!("Retrying failed job {}...", job_id); + match job_to_retry.job_type { + JobType::SyncCommitteeUpdate => { + info!( + "Requesting retry of failed job {}... (sync committee update job for sync committee {})", + job_id, + helpers::slot_to_sync_committee_id(job.slot.to_u64().unwrap()) + ); + } + JobType::EpochBatchUpdate => { + info!( + "Requesting retry of failed job {}... (batch epoch update job for epochs from {} to {})", + job_id, + job.batch_range_begin_epoch, + job.batch_range_end_epoch + ); + } + } + if tx_clone.send(job_to_retry).await.is_err() { // return Err("Failed to send job".into()); error!("Error retrying job: {}", job_id); } }); - tokio::time::sleep(Duration::from_secs(1)).await; + tokio::time::sleep(Duration::from_millis(500)).await; } Ok(()) diff --git a/client-rs/src/epoch_batch.rs b/client-rs/src/epoch_batch.rs index 7db1789..d6605bb 100644 --- a/client-rs/src/epoch_batch.rs +++ b/client-rs/src/epoch_batch.rs @@ -1,8 +1,8 @@ use crate::constants::{SLOTS_PER_EPOCH, TARGET_BATCH_SIZE}; use crate::epoch_update::{EpochUpdate, ExpectedEpochUpdateOutputs}; use crate::helpers::{ - calculate_slots_range_for_batch, get_first_slot_for_epoch, get_sync_committee_id_by_epoch, - slot_to_epoch_id, + self, calculate_slots_range_for_batch, get_first_slot_for_epoch, + get_sync_committee_id_by_epoch, slot_to_epoch_id, }; use crate::traits::{Provable, Submittable}; use crate::utils::hashing::get_committee_hash; @@ -278,18 +278,18 @@ impl EpochUpdateBatch { } impl EpochUpdateBatch { - pub fn from_json(first_slot: u64, last_slot: u64) -> Result + pub fn from_json(first_epoch: u64, last_epoch: u64) -> Result where T: serde::de::DeserializeOwned, { info!( "Trying to read file batches/epoch_batch/{}_to_{}/input_batch_{}_to_{}.json", - first_slot, last_slot, first_slot, last_slot + first_epoch, last_epoch, first_epoch, last_epoch ); // Pattern match for files like: batches/epoch_batch/6709248_to_6710272/input_batch_6709248_to_6710272.json let path = format!( "batches/epoch_batch/{}_to_{}/input_batch_{}_to_{}.json", - first_slot, last_slot, first_slot, last_slot + first_epoch, last_epoch, first_epoch, last_epoch ); debug!(path); let glob_pattern = glob::glob(&path) @@ -326,6 +326,7 @@ impl Provable for EpochUpdateBatch { .circuit_inputs .header .slot; + let first_epoch = helpers::slot_to_epoch_id(first_slot); let last_slot = self .circuit_inputs .epochs @@ -334,11 +335,12 @@ impl Provable for EpochUpdateBatch { .circuit_inputs .header .slot; - let dir_path = format!("batches/epoch_batch/{}_to_{}", first_slot, last_slot); + let last_epoch = helpers::slot_to_epoch_id(last_slot); + let dir_path = format!("batches/epoch_batch/{}_to_{}", first_epoch, last_epoch); fs::create_dir_all(dir_path.clone()).map_err(Error::IoError)?; let path = format!( "{}/input_batch_{}_to_{}.json", - dir_path, first_slot, last_slot + dir_path, first_epoch, last_epoch ); fs::write(path.clone(), json).map_err(Error::IoError)?; Ok(path) @@ -357,6 +359,7 @@ impl Provable for EpochUpdateBatch { .circuit_inputs .header .slot; + let first_epoch = helpers::slot_to_epoch_id(first_slot); let last_slot = self .circuit_inputs .epochs @@ -365,9 +368,10 @@ impl Provable for EpochUpdateBatch { .circuit_inputs .header .slot; + let last_epoch = helpers::slot_to_epoch_id(last_slot); format!( "batches/epoch_batch/{}_to_{}/pie_batch_{}_to_{}.zip", - first_slot, last_slot, first_slot, last_slot + first_epoch, last_epoch, first_epoch, last_epoch ) } @@ -380,6 +384,7 @@ impl Provable for EpochUpdateBatch { .circuit_inputs .header .slot; + let first_epoch = helpers::slot_to_epoch_id(first_slot); let last_slot = self .circuit_inputs .epochs @@ -388,9 +393,10 @@ impl Provable for EpochUpdateBatch { .circuit_inputs .header .slot; + let last_epoch = helpers::slot_to_epoch_id(last_slot); format!( "batches/epoch_batch/{}_to_{}/input_batch_{}_to_{}.json", - first_slot, last_slot, first_slot, last_slot + first_epoch, last_epoch, first_epoch, last_epoch ) } } diff --git a/client-rs/src/utils/database_manager.rs b/client-rs/src/utils/database_manager.rs index 169df6e..ac2f59d 100644 --- a/client-rs/src/utils/database_manager.rs +++ b/client-rs/src/utils/database_manager.rs @@ -268,7 +268,7 @@ impl DatabaseManager { .client .query_opt( "SELECT slot FROM jobs - WHERE job_status NOT IN ('DONE', 'CANCELLED', 'ERROR') + WHERE job_status NOT IN ('DONE') AND type = 'SYNC_COMMITTEE_UPDATE' ORDER BY slot DESC LIMIT 1", @@ -604,4 +604,36 @@ impl DatabaseManager { // Ok(()) // } + // + + // pub async fn insert_job_log_entry( + // &self, + // job_id: u64, + // event_type: JobLogEntry, + // details: String, + // ) -> Result<(), Box> { + // self.client + // .execute( + // "INSERT INTO job_logs (job_id, event_type, details) + // VALUES ($1, $2, $3)", + // &[&job_id.to_string(), &event_type.to_string(), &details], + // ) + // .await?; + + // Ok(()) + // } + // + pub async fn update_daemon_state_info( + &self, + latest_known_beacon_slot: u64, + latest_known_beacon_block: FixedBytes<32>, + ) -> Result<(), Box> { + self.client + .execute( + "UPDATE daemon_state SET latest_known_beacon_slot = $1, latest_known_beacon_block = NOW()", + &[&latest_known_beacon_slot.to_string(), &latest_known_beacon_block.to_string()], + ) + .await?; + Ok(()) + } } From 79a9f2a8026f7113c6ca70db06335381bbfa2ced Mon Sep 17 00:00:00 2001 From: petscheit Date: Tue, 4 Feb 2025 10:36:31 +0100 Subject: [PATCH 31/66] fix: owner init issue --- client-rs/src/config.rs | 4 ++-- client-rs/src/utils/starknet_client.rs | 5 ++++- contract/src/lib.cairo | 5 +++-- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/client-rs/src/config.rs b/client-rs/src/config.rs index 80c952e..567b9d3 100644 --- a/client-rs/src/config.rs +++ b/client-rs/src/config.rs @@ -27,11 +27,11 @@ impl Default for BankaiConfig { fn default() -> Self { Self { contract_class_hash: Felt::from_hex( - "0x020e0b39c5a14e0979081bde8a6670c9c8f69540f0078f0ec586ca51305bb3d0", + "0x00034b6d1cd9858aeabcee33ef5ec5cd04be155d79ca2bbf9036700cb6c7c287", ) .unwrap(), contract_address: Felt::from_hex( - "0x1653a617deb89574e9328ce29e3a6893737bc38a4b5af0ec8265e0061f77c50", + "0x1b7b70023bc2429d4453ce75d75f3e8b01b0730ca83068a82b4d17aa88a25e3", ) .unwrap(), committee_update_program_hash: Felt::from_hex( diff --git a/client-rs/src/utils/starknet_client.rs b/client-rs/src/utils/starknet_client.rs index 0789448..03a8a3f 100644 --- a/client-rs/src/utils/starknet_client.rs +++ b/client-rs/src/utils/starknet_client.rs @@ -124,8 +124,11 @@ impl StarknetClient { class_hash ); + let mut params = init_data.to_calldata(); + params.push(self.account.address()); + let contract_factory = ContractFactory::new(class_hash, self.account.clone()); - let deploy_tx = contract_factory.deploy_v1(init_data.to_calldata(), felt!("1337"), false); + let deploy_tx = contract_factory.deploy_v1(params, felt!("1337"), false); let contract_address = deploy_tx.deployed_address(); diff --git a/contract/src/lib.cairo b/contract/src/lib.cairo index 07cf7cc..e9a5b3c 100644 --- a/contract/src/lib.cairo +++ b/contract/src/lib.cairo @@ -16,7 +16,7 @@ pub mod BankaiContract { }; use starknet::ClassHash; - use starknet::{get_caller_address, get_block_timestamp}; + use starknet::{ContractAddress, get_block_timestamp}; use integrity::{ Integrity, IntegrityWithConfig, SHARP_BOOTLOADER_PROGRAM_HASH, VerifierConfiguration, }; @@ -122,9 +122,10 @@ pub mod BankaiContract { committee_update_program_hash: felt252, epoch_update_program_hash: felt252, epoch_batch_program_hash: felt252, + owner: ContractAddress, ) { // Initialize owner as contract deployer - self.ownable.initializer(get_caller_address()); + self.ownable.initializer(owner); self.latest_epoch_slot.write(0); // Write trusted initial committee From 76e1394805a8cfce4cb681d08d9742cf5d28dfcb Mon Sep 17 00:00:00 2001 From: lakewik Date: Tue, 4 Feb 2025 12:23:52 +0100 Subject: [PATCH 32/66] Epochs calculation fixes --- client-rs/Dockerfile | 39 ++++++++++++++++++++++++++ client-rs/docker-compose.dev.yml | 31 ++++++++++++++++++++ client-rs/scripts/wait-for-postgres.sh | 6 ++++ client-rs/src/constants.rs | 2 +- client-rs/src/daemon.rs | 19 +++++-------- 5 files changed, 84 insertions(+), 13 deletions(-) create mode 100644 client-rs/Dockerfile create mode 100644 client-rs/docker-compose.dev.yml create mode 100644 client-rs/scripts/wait-for-postgres.sh diff --git a/client-rs/Dockerfile b/client-rs/Dockerfile new file mode 100644 index 0000000..6df7fdb --- /dev/null +++ b/client-rs/Dockerfile @@ -0,0 +1,39 @@ +FROM rust:1.72 as builder + +WORKDIR /usr/src/app + +COPY Cargo.toml Cargo.lock ./ + +COPY src ./src + +RUN cargo build --release --bin daemon + +FROM debian:bullseye + +RUN apt-get update && apt-get install -y \ + libpq-dev \ + postgresql \ + && rm -rf /var/lib/apt/lists/* + +RUN mkdir -p /usr/src/app/batches + +WORKDIR /usr/src/app + +COPY --from=builder /usr/src/app/target/release/daemon /usr/src/app/ + +RUN mkdir -p /var/lib/postgresql/data && chown -R postgres:postgres /var/lib/postgresql + +COPY scripts/entrypoint.sh /usr/local/bin/entrypoint.sh +RUN chmod +x /usr/local/bin/entrypoint.sh + +EXPOSE 5432 + +USER postgres + +RUN /usr/lib/postgresql/14/bin/initdb -D /var/lib/postgresql/data + +USER root + +ENTRYPOINT ["/usr/local/bin/entrypoint.sh"] + +CMD ["/usr/src/app/daemon"] diff --git a/client-rs/docker-compose.dev.yml b/client-rs/docker-compose.dev.yml new file mode 100644 index 0000000..cec21ec --- /dev/null +++ b/client-rs/docker-compose.dev.yml @@ -0,0 +1,31 @@ +services: + postgres: + image: postgres:14 + container_name: postgres + environment: + POSTGRESQL_USER: ${POSTGRES_USER:-postgres} + POSTGRESQL_PASSWORD: ${POSTGRES_PASSWORD:-postgres} + POSTGRESQL_DB_NAME: ${POSTGRES_DB:-bankai} + volumes: + - pgdata:/var/lib/postgresql/data + ports: + - ${POSTGRES_PORT:-5432}:${POSTGRES_PORT:-5432} + + daemon: + build: + context: . + dockerfile: Dockerfile + container_name: bankai-daemon + depends_on: + - postgres + env_file: + - .env.sepolia + #environment: + # POSTGRESQL_USER + volumes: + - ./batches:/usr/src/app/batches + ports: + - "3000:3000" + +volumes: + pgdata: diff --git a/client-rs/scripts/wait-for-postgres.sh b/client-rs/scripts/wait-for-postgres.sh new file mode 100644 index 0000000..14e0584 --- /dev/null +++ b/client-rs/scripts/wait-for-postgres.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +while ! nc -z localhost 5434; do sleep 1; done +echo "Postgres ready" +sleep 1 +exit 0 diff --git a/client-rs/src/constants.rs b/client-rs/src/constants.rs index 458a52c..cd8d97e 100644 --- a/client-rs/src/constants.rs +++ b/client-rs/src/constants.rs @@ -2,7 +2,7 @@ pub const SLOTS_PER_EPOCH: u64 = 32; // For mainnet pub const SLOTS_PER_SYNC_COMMITTEE: u64 = 8192; // For mainnet pub const TARGET_BATCH_SIZE: u64 = 32; // Defines how many epochs in one batch pub const EPOCHS_PER_SYNC_COMMITTEE: u64 = 256; // For mainnet -pub const MAX_CONCURRENT_JOBS_IN_PROGRESS: u64 = 11; // Define the limit of how many jobs can be in state "in progress" concurrently +pub const MAX_CONCURRENT_JOBS_IN_PROGRESS: u64 = 1; // Define the limit of how many jobs can be in state "in progress" concurrently pub const MAX_CONCURRENT_PIE_GENERATIONS: usize = 1; // Define how many concurrent trace (pie file) generation jobs are allowed to not exhaust resources pub const MAX_CONCURRENT_RPC_DATA_FETCH_JOBS: usize = 1; // Define how many data fetching jobs can be performed concurrently to not overload RPC pub const STARKNET_SEPOLIA: &str = "0x534e5f5345504f4c4941"; diff --git a/client-rs/src/daemon.rs b/client-rs/src/daemon.rs index 313141f..6e57ea6 100644 --- a/client-rs/src/daemon.rs +++ b/client-rs/src/daemon.rs @@ -193,7 +193,7 @@ async fn main() -> Result<(), Box> { )) .with_state(app_state); - let addr = "0.0.0.0:3000".parse::()?; + let addr = "0.0.0.0:3001".parse::()?; let listener = tokio::net::TcpListener::bind(addr).await.unwrap(); info!("Bankai RPC HTTP server is listening on http://{}", addr); @@ -825,12 +825,8 @@ async fn broadcast_onchain_ready_jobs( match job.job_type { JobType::EpochBatchUpdate => { let circuit_inputs = EpochUpdateBatch::from_json::( - helpers::get_first_slot_for_epoch( - job.batch_range_begin_epoch.try_into().unwrap(), - ), - helpers::get_first_slot_for_epoch( - job.batch_range_end_epoch.try_into().unwrap(), - ), + job.batch_range_begin_epoch.try_into().unwrap(), + job.batch_range_end_epoch.try_into().unwrap(), )?; info!( @@ -1164,7 +1160,6 @@ async fn process_job( match current_status { JobStatus::Created => { info!("[BATCH EPOCH JOB] Preparing inputs for program..."); - let circuit_inputs = EpochUpdateBatch::new_by_epoch_range( &bankai, db_manager.clone(), @@ -1184,8 +1179,8 @@ async fn process_job( } JobStatus::ProgramInputsPrepared => { let circuit_inputs = EpochUpdateBatch::from_json::( - helpers::get_first_slot_for_epoch(job.batch_range_begin_epoch.unwrap()), - helpers::get_first_slot_for_epoch(job.batch_range_end_epoch.unwrap()), + job.batch_range_begin_epoch.unwrap(), + job.batch_range_end_epoch.unwrap(), )?; info!("[BATCH EPOCH JOB] Starting trace generation..."); @@ -1200,8 +1195,8 @@ async fn process_job( } JobStatus::PieGenerated => { let circuit_inputs = EpochUpdateBatch::from_json::( - helpers::get_first_slot_for_epoch(job.batch_range_begin_epoch.unwrap()), - helpers::get_first_slot_for_epoch(job.batch_range_end_epoch.unwrap()), + job.batch_range_begin_epoch.unwrap(), + job.batch_range_end_epoch.unwrap(), )?; info!("[BATCH EPOCH JOB] Uploading PIE and sending proof generation request to Atlantic..."); From 823d95646a0e9c996334ec7080c7060dd3c6bee2 Mon Sep 17 00:00:00 2001 From: lakewik Date: Tue, 4 Feb 2025 16:16:29 +0100 Subject: [PATCH 33/66] Fixes in job start and resume conditions --- client-rs/src/constants.rs | 2 +- client-rs/src/daemon.rs | 70 +++++++++++++++++++++---------------- client-rs/src/routes/mod.rs | 2 +- 3 files changed, 41 insertions(+), 33 deletions(-) diff --git a/client-rs/src/constants.rs b/client-rs/src/constants.rs index cd8d97e..714f36b 100644 --- a/client-rs/src/constants.rs +++ b/client-rs/src/constants.rs @@ -2,7 +2,7 @@ pub const SLOTS_PER_EPOCH: u64 = 32; // For mainnet pub const SLOTS_PER_SYNC_COMMITTEE: u64 = 8192; // For mainnet pub const TARGET_BATCH_SIZE: u64 = 32; // Defines how many epochs in one batch pub const EPOCHS_PER_SYNC_COMMITTEE: u64 = 256; // For mainnet -pub const MAX_CONCURRENT_JOBS_IN_PROGRESS: u64 = 1; // Define the limit of how many jobs can be in state "in progress" concurrently +pub const MAX_CONCURRENT_JOBS_IN_PROGRESS: u64 = 7; // Define the limit of how many jobs can be in state "in progress" concurrently pub const MAX_CONCURRENT_PIE_GENERATIONS: usize = 1; // Define how many concurrent trace (pie file) generation jobs are allowed to not exhaust resources pub const MAX_CONCURRENT_RPC_DATA_FETCH_JOBS: usize = 1; // Define how many data fetching jobs can be performed concurrently to not overload RPC pub const STARKNET_SEPOLIA: &str = "0x534e5f5345504f4c4941"; diff --git a/client-rs/src/daemon.rs b/client-rs/src/daemon.rs index 6e57ea6..78aded6 100644 --- a/client-rs/src/daemon.rs +++ b/client-rs/src/daemon.rs @@ -415,13 +415,18 @@ async fn handle_beacon_chain_head_event( latest_scheduled_sync_committee = latest_verified_sync_committee_id; } + info!( + "Current state: Beacon Chain: [Slot: {} Epoch: {} Sync Committee: {}] | Latest verified: [Slot: {} Epoch: {} Sync Committee: {}] | Latest in progress: [Epoch: {} Sync Committee: {}] | Sync in progress...", + parsed_event.slot, current_epoch_id, current_sync_committee_id, latest_verified_epoch_slot, latest_verified_epoch_id, latest_verified_sync_committee_id, last_epoch_in_progress, last_sync_committee_in_progress + ); + // Decide basing on actual state if epochs_behind > constants::TARGET_BATCH_SIZE { // is_node_in_sync = true; warn!( - "Bankai is out of sync now. Node is {} epochs behind network. Current Beacon Chain state: [Slot: {} Epoch: {} Sync Committee: {}] | Latest verified: [Slot: {} Epoch: {} Sync Committee: {}] | Latest in progress: [Epoch: {} Sync Committee: {}] | Sync in progress...", - epochs_behind, parsed_event.slot, current_epoch_id, current_sync_committee_id, latest_verified_epoch_slot, latest_verified_epoch_id, latest_verified_sync_committee_id, last_epoch_in_progress, last_sync_committee_in_progress + "Bankai is out of sync now. Node is {} epochs behind network. | Sync in progress...", + epochs_behind ); // Check if we have in progress all epochs that need to be processed, if no, run job @@ -517,32 +522,35 @@ async fn handle_beacon_chain_head_event( debug!("All reqired jobs are now queued and processing"); } } else if epochs_behind == constants::TARGET_BATCH_SIZE { - // This is when we are synced properly and new epoch batch needs to be inserted - info!( - "Starting processing next epoch batch. Current Beacon Chain epoch: {} Latest verified epoch: {}", - current_epoch_id, latest_verified_epoch_id - ); + if last_epoch_in_progress < (epochs_behind + current_epoch_id) { + // This is when we are synced properly and new epoch batch needs to be inserted + info!( + "Target batch size reached. Starting processing next epoch batch. Current Beacon Chain epoch: {} Latest verified epoch: {}", + current_epoch_id, latest_verified_epoch_id + ); - let epoch_to_start_from = latest_scheduled_epoch + 1; - let epoch_to_end_on = latest_scheduled_epoch + constants::TARGET_BATCH_SIZE; - match run_batch_epoch_update_job( - db_manager.clone(), - get_first_slot_for_epoch(epoch_to_start_from) - + (constants::SLOTS_PER_EPOCH * constants::TARGET_BATCH_SIZE), - epoch_to_start_from, - epoch_to_end_on, - tx.clone(), - ) - .await - { - Ok(()) => {} - Err(e) => { - error!("Error while creating job: {}", e); - } - }; + let epoch_to_start_from = latest_scheduled_epoch + 1; + let epoch_to_end_on = latest_scheduled_epoch + constants::TARGET_BATCH_SIZE; + match run_batch_epoch_update_job( + db_manager.clone(), + get_first_slot_for_epoch(epoch_to_start_from) + + (constants::SLOTS_PER_EPOCH * constants::TARGET_BATCH_SIZE), + epoch_to_start_from, + epoch_to_end_on, + tx.clone(), + ) + .await + { + Ok(()) => {} + Err(e) => { + error!("Error while creating job: {}", e); + } + }; + } } else if epochs_behind < constants::TARGET_BATCH_SIZE { // When we are in sync and not yet reached the TARGET_BATCH_SIZE epochs lagging behind actual beacon chian state - debug!("Target batch size not reached yet, daemon is in sync"); + let eppchs_left = constants::TARGET_BATCH_SIZE - epochs_behind; + info!("Target batch size not reached yet, daemon is in sync, {} epochs left to start new batch job", eppchs_left); } // Check if sync committee update is needed @@ -688,6 +696,8 @@ async fn resume_unfinished_jobs( let unfinished_jobs = db_manager .get_jobs_with_statuses(vec![ JobStatus::Created, + JobStatus::ProgramInputsPrepared, + JobStatus::StartedTraceGeneration, JobStatus::PieGenerated, JobStatus::AtlanticProofRequested, JobStatus::AtlanticProofRetrieved, @@ -830,7 +840,7 @@ async fn broadcast_onchain_ready_jobs( )?; info!( - "[SYNC COMMITTEE JOB] Calling epoch batch update onchain for epochs range from {} to {}...", + "[EPOCH BATCH JOB] Calling epoch batch update onchain for epochs range from {} to {}...", job.batch_range_begin_epoch, job.batch_range_end_epoch ); @@ -849,7 +859,7 @@ async fn broadcast_onchain_ready_jobs( .update_job_status(job.job_uuid, JobStatus::ProofVerifyCalledOnchain) .await?; - let send_result = db_manager.set_job_txhash(job.job_uuid, txhash).await?; + let _send_result = db_manager.set_job_txhash(job.job_uuid, txhash).await?; let confirmation_result = bankai.starknet_client.wait_for_confirmation(txhash).await; @@ -1013,7 +1023,7 @@ async fn process_job( current_status = JobStatus::ProgramInputsPrepared; } - JobStatus::ProgramInputsPrepared => { + JobStatus::ProgramInputsPrepared | JobStatus::StartedTraceGeneration => { let sync_committe_update_program_inputs = SyncCommitteeUpdate::from_json::( job.slot.unwrap(), @@ -1103,8 +1113,6 @@ async fn process_job( .update_job_status(job.job_id, JobStatus::AtlanticProofRetrieved) .await?; - current_status = JobStatus::AtlanticProofRetrieved; - // Submit wrapped proof request info!("[SYNC COMMITTEE JOB] Sending proof wrapping query to Atlantic.."); wrapping_batch_id = @@ -1177,7 +1185,7 @@ async fn process_job( current_status = JobStatus::ProgramInputsPrepared; } - JobStatus::ProgramInputsPrepared => { + JobStatus::ProgramInputsPrepared | JobStatus::StartedTraceGeneration => { let circuit_inputs = EpochUpdateBatch::from_json::( job.batch_range_begin_epoch.unwrap(), job.batch_range_end_epoch.unwrap(), diff --git a/client-rs/src/routes/mod.rs b/client-rs/src/routes/mod.rs index acf6922..f21938e 100644 --- a/client-rs/src/routes/mod.rs +++ b/client-rs/src/routes/mod.rs @@ -19,7 +19,7 @@ pub async fn handle_get_status(State(state): State) -> impl IntoRespon last_epoch_in_progress } Ok(None) => 0, - Err(e) => 0, + Err(_) => 0, }; let in_progress_jobs_count = state.db_manager.count_jobs_in_progress().await.unwrap(); let last_sync_committee_in_progress = state From cbd1145ea1372d54f6abc3becb583961ceff015c Mon Sep 17 00:00:00 2001 From: lakewik Date: Tue, 4 Feb 2025 18:18:36 +0100 Subject: [PATCH 34/66] Add more status info to endpoint --- client-rs/src/constants.rs | 1 + client-rs/src/daemon.rs | 21 ++++++---- client-rs/src/routes/mod.rs | 48 ++++++++++++++------- client-rs/src/state.rs | 2 +- client-rs/src/utils/database_manager.rs | 55 +++++++++++++++++++++++++ 5 files changed, 102 insertions(+), 25 deletions(-) diff --git a/client-rs/src/constants.rs b/client-rs/src/constants.rs index 714f36b..24817ee 100644 --- a/client-rs/src/constants.rs +++ b/client-rs/src/constants.rs @@ -8,3 +8,4 @@ pub const MAX_CONCURRENT_RPC_DATA_FETCH_JOBS: usize = 1; // Define how many data pub const STARKNET_SEPOLIA: &str = "0x534e5f5345504f4c4941"; pub const STARKNET_MAINNET: &str = "0x534e5f4d41494e"; pub const USE_TRANSACTOR: bool = false; +pub const MAX_JOB_RETRIES_COUNT: u64 = 10; diff --git a/client-rs/src/daemon.rs b/client-rs/src/daemon.rs index 78aded6..13f9408 100644 --- a/client-rs/src/daemon.rs +++ b/client-rs/src/daemon.rs @@ -930,7 +930,7 @@ async fn broadcast_onchain_ready_jobs( match confirmation_result { Ok(_) => { - info!("[EPOCH BATCH JOB] Transaction is confirmed on-chain!"); + info!("[SYNC COMMITTEE JOB] Transaction is confirmed on-chain!"); db_manager .update_job_status(job.job_uuid, JobStatus::Done) .await?; @@ -963,7 +963,10 @@ async fn broadcast_onchain_ready_jobs( .await?; } Err(e) => { - eprintln!("[EPOCH BATCH JOB] Transaction failed or timed out: {:?}", e); + eprintln!( + "[SYNC COMMITTEE JOB] Transaction failed or timed out: {:?}", + e + ); db_manager .update_job_status(job.job_uuid, JobStatus::Error) .await?; @@ -1158,7 +1161,7 @@ async fn process_job( break; } _ => { - error!("[EPOCH JOB] Unexpected behaviour"); + error!("[SYNC COMMITTEE JOB] Unexpected behaviour"); break; } } @@ -1256,7 +1259,7 @@ async fn process_job( .await?; info!( - "[EPOCH JOB] Proof retrieved from Atlantic. QueryID: {}", + "[BATCH EPOCH JOB] Proof retrieved from Atlantic. QueryID: {}", batch_id ); @@ -1266,12 +1269,12 @@ async fn process_job( // 5) Submit wrapped proof request info!( - "[EPOCH JOB] Uploading proof and sending wrapping query to Atlantic.." + "[BATCH EPOCH JOB] Uploading proof and sending wrapping query to Atlantic.." ); wrapping_batch_id = bankai.atlantic_client.submit_wrapped_proof(proof).await?; info!( - "[EPOCH JOB] Proof wrapping query submitted to Atlantic. Wrapping QueryID: {}", + "[BATCH EPOCH JOB] Proof wrapping query submitted to Atlantic. Wrapping QueryID: {}", wrapping_batch_id ); @@ -1292,7 +1295,7 @@ async fn process_job( JobStatus::WrapProofRequested => { // Pool for Atlantic execution done info!( - "[SYNC COMMITTEE JOB] Waiting for completion of Atlantic proof wrapping job. QueryID: {}", + "[BATCH EPOCH JOB] Waiting for completion of Atlantic proof wrapping job. QueryID: {}", wrapping_batch_id ); @@ -1309,7 +1312,7 @@ async fn process_job( .update_job_status(job.job_id, JobStatus::WrappedProofDone) .await?; - info!("[EPOCH JOB] Proof wrapping done by Atlantic. Fact registered on Integrity. Wrapping QueryID: {}", wrapping_batch_id); + info!("[BATCH EPOCH JOB] Proof wrapping done by Atlantic. Fact registered on Integrity. Wrapping QueryID: {}", wrapping_batch_id); db_manager .update_job_status(job.job_id, JobStatus::OffchainComputationFinished) @@ -1318,7 +1321,7 @@ async fn process_job( break; } _ => { - error!("[EPOCH JOB] Unexpected behaviour"); + error!("[BATCH EPOCH JOB] Unexpected behaviour"); break; } } diff --git a/client-rs/src/routes/mod.rs b/client-rs/src/routes/mod.rs index f21938e..64d49a1 100644 --- a/client-rs/src/routes/mod.rs +++ b/client-rs/src/routes/mod.rs @@ -1,4 +1,5 @@ use crate::state::AppState; +use alloy_primitives::map::HashMap; use axum::{ extract::{Path, State}, response::IntoResponse, @@ -14,20 +15,23 @@ use uuid::Uuid; // Handler for GET /status pub async fn handle_get_status(State(state): State) -> impl IntoResponse { let last_epoch_in_progress = match state.db_manager.get_latest_epoch_in_progress().await { - Ok(Some(epoch)) => { - let last_epoch_in_progress = epoch.to_u64().unwrap(); - last_epoch_in_progress - } - Ok(None) => 0, - Err(_) => 0, + Ok(Some(epoch)) => epoch.to_u64().unwrap(), + Ok(None) | Err(_) => 0, + }; + + let in_progress_jobs_count = match state.db_manager.count_jobs_in_progress().await { + Ok(Some(count)) => count, + Ok(None) | Err(_) => 0, }; - let in_progress_jobs_count = state.db_manager.count_jobs_in_progress().await.unwrap(); - let last_sync_committee_in_progress = state + + let last_sync_committee_in_progress = match state .db_manager .get_latest_sync_committee_in_progress() .await - .unwrap() - .unwrap(); + { + Ok(Some(sync_committee)) => sync_committee, + Ok(None) | Err(_) => 0, + }; // let beacon_chain_state = state // .db_manager @@ -35,12 +39,26 @@ pub async fn handle_get_status(State(state): State) -> impl IntoRespon // .await // .unwrap(); - Json(json!({ "success": true, "details": { - "last_epoch_in_progress": last_epoch_in_progress, - "last_sync_committee_in_progress": last_sync_committee_in_progress, - "jobs_in_progress_count": in_progress_jobs_count, + let jobs_status_counts = state + .db_manager + .get_jobs_count_by_status() + .await + .unwrap_or_default(); - } })) + let mut jobs_status_map = HashMap::new(); + for job_status_count in jobs_status_counts { + jobs_status_map.insert(job_status_count.status.to_string(), job_status_count.count); + } + + Json(json!({ + "success": true, + "details": { + "last_epoch_in_progress": last_epoch_in_progress, + "last_sync_committee_in_progress": last_sync_committee_in_progress, + "jobs_in_progress_count": in_progress_jobs_count, + "jobs_statuses": jobs_status_map, + } + })) } // // Handler for GET /epoch/:slot diff --git a/client-rs/src/state.rs b/client-rs/src/state.rs index b9d2cd2..aac2337 100644 --- a/client-rs/src/state.rs +++ b/client-rs/src/state.rs @@ -27,7 +27,7 @@ pub struct AppState { pub bankai: Arc, } -#[derive(Debug, FromSql, ToSql, Clone)] +#[derive(Debug, FromSql, ToSql, Clone, Eq, Hash, PartialEq)] #[postgres(name = "job_status")] pub enum JobStatus { #[postgres(name = "CREATED")] diff --git a/client-rs/src/utils/database_manager.rs b/client-rs/src/utils/database_manager.rs index ac2f59d..e259680 100644 --- a/client-rs/src/utils/database_manager.rs +++ b/client-rs/src/utils/database_manager.rs @@ -7,6 +7,7 @@ use std::str::FromStr; //use std::error::Error; use chrono::NaiveDateTime; use num_traits::ToPrimitive; +use std::collections::HashMap; use tokio_postgres::{Client, Row}; use tracing::{error, info, warn}; use uuid::Uuid; @@ -24,6 +25,12 @@ pub struct JobSchema { //pub updated_at: i64, } +#[derive(Debug)] +pub struct JobStatusCount { + pub status: JobStatus, + pub count: i64, +} + #[derive(Debug)] pub struct DatabaseManager { client: Client, @@ -636,4 +643,52 @@ impl DatabaseManager { .await?; Ok(()) } + + pub async fn get_jobs_count_by_status( + &self, + ) -> Result, Box> { + let rows = self + .client + .query( + "SELECT job_status, COUNT(*) AS job_count FROM jobs GROUP BY job_status", + &[], + ) + .await?; + + let mut db_counts: HashMap = HashMap::new(); + for row in rows { + let status_str: String = row.get("job_status"); + let status_count: i64 = row.get("job_count"); + + let job_status = JobStatus::from_str(&status_str) + .map_err(|err| format!("Failed to parse job status from DB row: {}", err))?; + + db_counts.insert(job_status, status_count); + } + + let all_possible_statuses = vec![ + JobStatus::Created, + JobStatus::StartedTraceGeneration, + JobStatus::ProgramInputsPrepared, + JobStatus::PieGenerated, + JobStatus::AtlanticProofRequested, + JobStatus::AtlanticProofRetrieved, + JobStatus::WrapProofRequested, + JobStatus::WrappedProofDone, + JobStatus::OffchainComputationFinished, + JobStatus::ReadyToBroadcastOnchain, + JobStatus::ProofVerifyCalledOnchain, + JobStatus::Done, + JobStatus::Error, + JobStatus::Cancelled, + ]; + + let mut result = Vec::with_capacity(all_possible_statuses.len()); + for status in all_possible_statuses { + let count = db_counts.get(&status).copied().unwrap_or(0); + result.push(JobStatusCount { status, count }); + } + + Ok(result) + } } From 7b2ad617497372c56abbb55d3fed1412240dc587 Mon Sep 17 00:00:00 2001 From: petscheit Date: Tue, 4 Feb 2025 19:34:31 +0100 Subject: [PATCH 35/66] feat: add dashboard --- client-rs/src/daemon.rs | 3 + client-rs/src/routes/dashboard.rs | 143 ++++++++++++++++++++++++ client-rs/src/routes/mod.rs | 19 ++++ client-rs/src/utils/database_manager.rs | 104 +++++++++++++++++ client-rs/src/utils/rpc.rs | 16 +++ 5 files changed, 285 insertions(+) create mode 100644 client-rs/src/routes/dashboard.rs diff --git a/client-rs/src/daemon.rs b/client-rs/src/daemon.rs index 78aded6..6a54584 100644 --- a/client-rs/src/daemon.rs +++ b/client-rs/src/daemon.rs @@ -31,6 +31,7 @@ use helpers::{ }; use num_traits::cast::ToPrimitive; use reqwest; +use routes::dashboard::handle_get_dashboard; use starknet::core::types::Felt; use state::check_env_vars; use state::{AppState, Job}; @@ -181,6 +182,8 @@ async fn main() -> Result<(), Box> { get(handle_get_latest_verified_committee), ) .route("/debug/get_job_status", get(handle_get_job_status)) + // Add dashboard route here + .route("/dashboard", get(handle_get_dashboard)) // .route("/get-merkle-inclusion-proof", get(handle_get_merkle_inclusion_proof)) .layer(DefaultBodyLimit::disable()) .layer( diff --git a/client-rs/src/routes/dashboard.rs b/client-rs/src/routes/dashboard.rs new file mode 100644 index 0000000..c0f3e9f --- /dev/null +++ b/client-rs/src/routes/dashboard.rs @@ -0,0 +1,143 @@ +use crate::state::{AppState, JobStatus}; +use axum::extract::State; +use num_traits::SaturatingSub; + +pub async fn handle_get_dashboard( + State(state): State, +) -> String { + let db = state.db_manager.clone(); + let bankai = state.bankai.clone(); + + // Fetch required stats + let latest_beacon_slot = bankai.client.get_head_slot().await.unwrap_or_default(); + let latest_verified_slot = bankai + .starknet_client + .get_latest_epoch_slot(&bankai.config) + .await + .unwrap_or_default() + .to_string() + .parse::() + .unwrap_or(0); + + // Calculate success rate from database + let total_jobs = db.count_total_jobs().await.unwrap_or(0); + let successful_jobs = db.count_successful_jobs().await.unwrap_or(0); + let success_rate = if total_jobs > 0 { + ((successful_jobs as f64 / total_jobs as f64) * 100.0).round() + } else { + 0.0 + }; + + // Calculate average job duration + let avg_duration = db.get_average_job_duration().await.unwrap_or(0); + let avg_duration_str = format!("{}s", avg_duration); + + let jobs_in_progress = db.count_jobs_in_progress().await.unwrap_or(Some(0)).unwrap(); + + // Fetch last 3 batch jobs + let recent_batches = db.get_recent_batch_jobs(6).await.unwrap_or_default(); + + // Format batch information + let batch_info = recent_batches + .iter() + .map(|entry| { + format!( + "║ Batch {:}: {} -> {} [{}] {:<45} ║", + entry.job.job_uuid.to_string()[..8].to_string(), + entry.job.batch_range_begin_epoch, + entry.job.batch_range_end_epoch, + match entry.job.job_status { + JobStatus::Done => "✓", + JobStatus::Error => "✗", + _ => "⋯", + }, + entry.job.job_status.to_string(), + // entry.tx_hash.as_ref().map_or( + // "-".to_string(), + // |hash| format!("0x{:x}", hash) + // ) + ) + }) + .collect::>() + .join("\n"); + + let batch_display = if recent_batches.is_empty() { + " ║ No recent batches found ║ ".to_string() + } else { + batch_info + }; + + // Update system health indicators with simpler checks + let daemon_status = "● Active"; + let db_status = if db.is_connected().await { "● Connected" } else { "○ Disconnected" }; + let beacon_status = if bankai.client.get_head_slot().await.is_ok() { "● Connected" } else { "○ Disconnected" }; + + let epoch_gap = (latest_beacon_slot.saturating_sub(latest_verified_slot) as f64 / 32.0).round() as u64; + + create_ascii_dashboard( + latest_beacon_slot, + latest_verified_slot, + epoch_gap, + success_rate, + &avg_duration_str, + jobs_in_progress, + daemon_status, + db_status, + beacon_status, + &batch_display + ) +} + +pub fn create_ascii_dashboard( + latest_beacon_slot: u64, + latest_verified_slot: u64, + epoch_gap: u64, + success_rate: f64, + avg_duration_str: &str, + jobs_in_progress: u64, + daemon_status: &str, + db_status: &str, + beacon_status: &str, + batch_display: &str, +) -> String { + format!( +r#" + ____ _ _ _ _ __ _ ___ +| __ ) / \ | \ | | |/ / / \ |_ _| +| _ \ / _ \ | \| | ' / / _ \ | | +| |_) / ___ \| |\ | . \ / ___ \ | | +|____/_/ \_\_| \_|_|\_/_/ \_\___| + +╔════════════════════════════════════════ DASHBOARD OVERVIEW ═════════════════════════════════════╗ +║ ║ +║ Statuses: ║ +║ • Daemon: {daemon_status:<12} • Database: {db_status:<12} • Beacon: {beacon_status:<12} ║ +║ ║ +║ Metrics: ║ +║ • Success Rate: {success_rate:<10} ║ +║ • Average Duration: {avg_duration:<10} ║ +║ • Jobs in Progress: {jobs_in_progress:<10} ║ +║ ║ +║ Beacon Info: ║ +║ • Latest Beacon Slot: {latest_beacon_slot:<12} ║ +║ • Latest Verified Slot: {latest_verified_slot:<12} ║ +║ • Epoch Gap: {epoch_gap:<12} ║ +║ ║ +╠═══════════════════════════════════════ RECENT BATCH JOBS ═══════════════════════════════════════╣ +║ UUID: FROM: TO: STATUS: TX: ║ +║ ─────────────────────────────────────────────────────────────────────────────────────────────── ║ +{batch_display_block} +╚═════════════════════════════════════════════════════════════════════════════════════════════════╝ +"#, + daemon_status = daemon_status, + db_status = db_status, + beacon_status = beacon_status, + success_rate = format!("{:.2}%", success_rate), + avg_duration = avg_duration_str, + jobs_in_progress = jobs_in_progress, + latest_beacon_slot = latest_beacon_slot, + latest_verified_slot = latest_verified_slot, + epoch_gap = epoch_gap, + batch_display_block = batch_display + ) +} \ No newline at end of file diff --git a/client-rs/src/routes/mod.rs b/client-rs/src/routes/mod.rs index f21938e..90ed588 100644 --- a/client-rs/src/routes/mod.rs +++ b/client-rs/src/routes/mod.rs @@ -3,12 +3,16 @@ use axum::{ extract::{Path, State}, response::IntoResponse, Json, + routing::{get, post}, + Router, }; use num_traits::cast::ToPrimitive; use serde_json::{json, Value}; use tracing::error; use uuid::Uuid; +pub mod dashboard; + // RPC requests handling functions // // Handler for GET /status @@ -202,3 +206,18 @@ pub async fn handle_get_merkle_paths_for_epoch( } } } + +pub fn configure_routes(state: AppState) -> Router { + Router::new() + // Status routes + .route("/status", get(handle_get_status)) + .route("/epoch/:slot/proof", get(handle_get_epoch_proof)) + .route("/committee/:committee_id/hash", get(handle_get_committee_hash)) + .route("/latest/verified/slot", get(handle_get_latest_verified_slot)) + .route("/latest/verified/committee", get(handle_get_latest_verified_committee)) + .route("/job/:job_id/status", get(handle_get_job_status)) + .route("/epoch/:epoch_id/merkle_paths", get(handle_get_merkle_paths_for_epoch)) + // Dashboard routes + .nest("/dashboard", dashboard::routes()) + .with_state(state) +} diff --git a/client-rs/src/utils/database_manager.rs b/client-rs/src/utils/database_manager.rs index ac2f59d..e9da35b 100644 --- a/client-rs/src/utils/database_manager.rs +++ b/client-rs/src/utils/database_manager.rs @@ -24,6 +24,14 @@ pub struct JobSchema { //pub updated_at: i64, } +#[derive(Debug)] +pub struct JobWithTimestamps { + pub job: JobSchema, + pub created_at: String, + pub updated_at: String, + pub tx_hash: Option, +} + #[derive(Debug)] pub struct DatabaseManager { client: Client, @@ -636,4 +644,100 @@ impl DatabaseManager { .await?; Ok(()) } + + pub async fn count_total_jobs(&self) -> Result> { + let row = self.client + .query_one( + "SELECT COUNT(*) as count FROM jobs", + &[], + ) + .await?; + + Ok(row.get::<_, i64>("count").to_u64().unwrap_or(0)) + } + + pub async fn count_successful_jobs(&self) -> Result> { + let row = self.client + .query_one( + "SELECT COUNT(*) as count FROM jobs WHERE job_status = 'DONE'", + &[], + ) + .await?; + + Ok(row.get::<_, i64>("count").to_u64().unwrap_or(0)) + } + + pub async fn get_average_job_duration(&self) -> Result> { + let row = self.client + .query_one( + "SELECT EXTRACT(EPOCH FROM AVG(updated_at - created_at))::INTEGER as avg_duration + FROM jobs + WHERE job_status = 'DONE'", + &[], + ) + .await?; + + Ok(i64::from(row.get::<_, Option>("avg_duration").unwrap_or(0))) + } + + + pub async fn get_recent_batch_jobs(&self, limit: i64) -> Result, Box> { + let rows = self.client + .query( + "SELECT *, + to_char(created_at, 'HH24:MI:SS') as created_time, + to_char(updated_at, 'HH24:MI:SS') as updated_time + FROM jobs + WHERE type = 'EPOCH_BATCH_UPDATE' + ORDER BY batch_range_begin_epoch DESC + LIMIT $1", + &[&limit], + ) + .await?; + + let jobs = rows + .into_iter() + .map(|row| { + let job_type_str: String = row.get("type"); + let job_status_str: String = row.get("job_status"); + + let job_type = JobType::from_str(&job_type_str) + .expect("Failed to parse job type"); + let job_status = JobStatus::from_str(&job_status_str) + .expect("Failed to parse job status"); + + JobWithTimestamps { + job: JobSchema { + job_uuid: row.get("job_uuid"), + job_status, + slot: row.get("slot"), + batch_range_begin_epoch: row + .get::<&str, Option>("batch_range_begin_epoch") + .unwrap_or(0), + batch_range_end_epoch: row + .get::<&str, Option>("batch_range_end_epoch") + .unwrap_or(0), + job_type, + atlantic_proof_generate_batch_id: row.get("atlantic_proof_generate_batch_id"), + atlantic_proof_wrapper_batch_id: row.get("atlantic_proof_wrapper_batch_id"), + }, + created_at: row.get("created_time"), + updated_at: row.get("updated_time"), + tx_hash: row.get("tx_hash"), + } + }) + .collect(); + + Ok(jobs) + } + + pub async fn is_connected(&self) -> bool { + match self.client + .query_one("SELECT 1", &[]) + .await + { + Ok(_) => true, + Err(_) => false, + } + } } diff --git a/client-rs/src/utils/rpc.rs b/client-rs/src/utils/rpc.rs index 2af5e49..eabff92 100644 --- a/client-rs/src/utils/rpc.rs +++ b/client-rs/src/utils/rpc.rs @@ -200,4 +200,20 @@ impl BeaconRpcClient { let pubkeys = self.fetch_validator_pubkeys(&indexes).await?; Ok(pubkeys.into()) } + + /// Fetches the current head slot of the beacon chain. + /// + /// # Returns + /// The current slot number of the beacon chain head. + pub async fn get_head_slot(&self) -> Result { + let json = self.get_json("eth/v1/beacon/headers/head").await?; + + let slot = json["data"]["header"]["message"]["slot"] + .as_str() + .ok_or(Error::DeserializeError("Missing slot field".into()))? + .parse() + .map_err(|e: std::num::ParseIntError| Error::DeserializeError(e.to_string()))?; + + Ok(slot) + } } From f367c985cd1548581058cbd85f86c60fc32e6fb8 Mon Sep 17 00:00:00 2001 From: petscheit Date: Tue, 4 Feb 2025 19:40:35 +0100 Subject: [PATCH 36/66] fix: merge error --- client-rs/src/routes/mod.rs | 63 +++++++++---------------------------- 1 file changed, 15 insertions(+), 48 deletions(-) diff --git a/client-rs/src/routes/mod.rs b/client-rs/src/routes/mod.rs index 4e0d268..2cabd70 100644 --- a/client-rs/src/routes/mod.rs +++ b/client-rs/src/routes/mod.rs @@ -1,5 +1,4 @@ use crate::state::AppState; -use alloy_primitives::map::HashMap; use axum::{ extract::{Path, State}, response::IntoResponse, @@ -19,23 +18,20 @@ pub mod dashboard; // Handler for GET /status pub async fn handle_get_status(State(state): State) -> impl IntoResponse { let last_epoch_in_progress = match state.db_manager.get_latest_epoch_in_progress().await { - Ok(Some(epoch)) => epoch.to_u64().unwrap(), - Ok(None) | Err(_) => 0, - }; - - let in_progress_jobs_count = match state.db_manager.count_jobs_in_progress().await { - Ok(Some(count)) => count, - Ok(None) | Err(_) => 0, + Ok(Some(epoch)) => { + let last_epoch_in_progress = epoch.to_u64().unwrap(); + last_epoch_in_progress + } + Ok(None) => 0, + Err(_) => 0, }; - - let last_sync_committee_in_progress = match state + let in_progress_jobs_count = state.db_manager.count_jobs_in_progress().await.unwrap(); + let last_sync_committee_in_progress = state .db_manager .get_latest_sync_committee_in_progress() .await - { - Ok(Some(sync_committee)) => sync_committee, - Ok(None) | Err(_) => 0, - }; + .unwrap() + .unwrap(); // let beacon_chain_state = state // .db_manager @@ -43,26 +39,12 @@ pub async fn handle_get_status(State(state): State) -> impl IntoRespon // .await // .unwrap(); - let jobs_status_counts = state - .db_manager - .get_jobs_count_by_status() - .await - .unwrap_or_default(); - - let mut jobs_status_map = HashMap::new(); - for job_status_count in jobs_status_counts { - jobs_status_map.insert(job_status_count.status.to_string(), job_status_count.count); - } + Json(json!({ "success": true, "details": { + "last_epoch_in_progress": last_epoch_in_progress, + "last_sync_committee_in_progress": last_sync_committee_in_progress, + "jobs_in_progress_count": in_progress_jobs_count, - Json(json!({ - "success": true, - "details": { - "last_epoch_in_progress": last_epoch_in_progress, - "last_sync_committee_in_progress": last_sync_committee_in_progress, - "jobs_in_progress_count": in_progress_jobs_count, - "jobs_statuses": jobs_status_map, - } - })) + } })) } // // Handler for GET /epoch/:slot @@ -224,18 +206,3 @@ pub async fn handle_get_merkle_paths_for_epoch( } } } - -pub fn configure_routes(state: AppState) -> Router { - Router::new() - // Status routes - .route("/status", get(handle_get_status)) - .route("/epoch/:slot/proof", get(handle_get_epoch_proof)) - .route("/committee/:committee_id/hash", get(handle_get_committee_hash)) - .route("/latest/verified/slot", get(handle_get_latest_verified_slot)) - .route("/latest/verified/committee", get(handle_get_latest_verified_committee)) - .route("/job/:job_id/status", get(handle_get_job_status)) - .route("/epoch/:epoch_id/merkle_paths", get(handle_get_merkle_paths_for_epoch)) - // Dashboard routes - .nest("/dashboard", dashboard::routes()) - .with_state(state) -} From c58531d1aaeb700eece1f40c5a63d7765f43c53b Mon Sep 17 00:00:00 2001 From: lakewik Date: Tue, 4 Feb 2025 19:46:13 +0100 Subject: [PATCH 37/66] Small logic fixes --- client-rs/src/daemon.rs | 45 ++++++++++++++++++++++++----------------- 1 file changed, 27 insertions(+), 18 deletions(-) diff --git a/client-rs/src/daemon.rs b/client-rs/src/daemon.rs index 13f9408..6ecd44d 100644 --- a/client-rs/src/daemon.rs +++ b/client-rs/src/daemon.rs @@ -420,6 +420,33 @@ async fn handle_beacon_chain_head_event( parsed_event.slot, current_epoch_id, current_sync_committee_id, latest_verified_epoch_slot, latest_verified_epoch_id, latest_verified_sync_committee_id, last_epoch_in_progress, last_sync_committee_in_progress ); + // Decide basing on actual state + if helpers::get_sync_committee_id_by_epoch(latest_scheduled_epoch + 1) + > latest_scheduled_sync_committee + { + // We reached end of current sync committee, need to schedule new sync committee proving + match run_sync_committee_update_job( + db_manager.clone(), + latest_scheduled_sync_committee + 1, + tx.clone(), + ) + .await + { + Ok(()) => {} + Err(e) => { + error!("Error while creating sync committee update job: {}", e); + } + }; + } + + let current_sync_committee_epochs_left = helpers::get_last_epoch_for_sync_committee( + helpers::get_sync_committee_id_by_epoch(latest_scheduled_epoch + 1), + ) - current_epoch_id; + info!( + "{} epochs left in current sync committee", + current_sync_committee_epochs_left + ); + // Decide basing on actual state if epochs_behind > constants::TARGET_BATCH_SIZE { // is_node_in_sync = true; @@ -455,24 +482,6 @@ async fn handle_beacon_chain_head_event( helpers::get_last_epoch_for_sync_committee(currently_processed_sync_committee_id + 1) ); - if helpers::get_sync_committee_id_by_epoch(epoch_to_start_from) - > latest_scheduled_sync_committee - { - // We reached end of current sync committee, need to schedule new sync committee proving - match run_sync_committee_update_job( - db_manager.clone(), - latest_scheduled_sync_committee + 1, - tx.clone(), - ) - .await - { - Ok(()) => {} - Err(e) => { - error!("Error while creating sync committee update job: {}", e); - } - }; - } - if helpers::get_last_epoch_for_sync_committee(currently_processed_sync_committee_id) == epoch_to_start_from {} From 75a729d97e03dedf4bda0e2de3339857abeffe2c Mon Sep 17 00:00:00 2001 From: lakewik Date: Wed, 5 Feb 2025 10:08:52 +0100 Subject: [PATCH 38/66] Add root RPC path & update DB structure --- client-rs/db_structure.sql | 6 ++++++ client-rs/src/daemon.rs | 9 +++++---- client-rs/src/routes/mod.rs | 20 +++++++++++++++++--- 3 files changed, 28 insertions(+), 7 deletions(-) diff --git a/client-rs/db_structure.sql b/client-rs/db_structure.sql index b2de221..944aaec 100644 --- a/client-rs/db_structure.sql +++ b/client-rs/db_structure.sql @@ -32,3 +32,9 @@ CREATE TABLE verified_sync_committee ( sync_committee_id UUID PRIMARY KEY, -- Unique identifier for sync committee (slot number/0x2000) sync_committee_hash TEXT NOT NULL -- Sync committee hash that we are creating inside bankai ); + +CREATE TABLE daemon_state ( + latest_known_beacon_slot BIGINT NOT NULL, + latest_known_beacon_block BYTEA NOT NULL, + updated_at TIMESTAMP DEFAULT NOW () +); diff --git a/client-rs/src/daemon.rs b/client-rs/src/daemon.rs index 2dafa85..d922930 100644 --- a/client-rs/src/daemon.rs +++ b/client-rs/src/daemon.rs @@ -57,6 +57,7 @@ use routes::{ handle_get_latest_verified_slot, handle_get_merkle_paths_for_epoch, handle_get_status, + handle_root_route, }; use std::net::SocketAddr; use sync_committee::SyncCommitteeUpdate; @@ -156,6 +157,7 @@ async fn main() -> Result<(), Box> { }); let app = Router::new() + .route("/", get(handle_root_route)) .route("/status", get(handle_get_status)) .route( "/get_verified_epoch_proof/:epoch", @@ -442,11 +444,10 @@ async fn handle_beacon_chain_head_event( }; } - let current_sync_committee_epochs_left = helpers::get_last_epoch_for_sync_committee( - helpers::get_sync_committee_id_by_epoch(latest_scheduled_epoch + 1), - ) - current_epoch_id; + let current_sync_committee_epochs_left = + helpers::get_last_epoch_for_sync_committee(current_sync_committee_id) - current_epoch_id; info!( - "{} epochs left in current sync committee", + "{} epochs left in current beacon chain sync committee", current_sync_committee_epochs_left ); diff --git a/client-rs/src/routes/mod.rs b/client-rs/src/routes/mod.rs index 2cabd70..50e68b2 100644 --- a/client-rs/src/routes/mod.rs +++ b/client-rs/src/routes/mod.rs @@ -1,10 +1,10 @@ use crate::state::AppState; +use alloy_primitives::map::HashMap; use axum::{ extract::{Path, State}, response::IntoResponse, - Json, routing::{get, post}, - Router, + Json, Router, }; use num_traits::cast::ToPrimitive; use serde_json::{json, Value}; @@ -14,6 +14,9 @@ use uuid::Uuid; pub mod dashboard; // RPC requests handling functions // +pub async fn handle_root_route(State(_state): State) -> impl IntoResponse { + Json(json!({ "success": true, "message": "Bankai daemon running" })) +} // Handler for GET /status pub async fn handle_get_status(State(state): State) -> impl IntoResponse { @@ -38,12 +41,23 @@ pub async fn handle_get_status(State(state): State) -> impl IntoRespon // .get_latest_known_beacon_chain_state() // .await // .unwrap(); + // + let jobs_status_counts = state + .db_manager + .get_jobs_count_by_status() + .await + .unwrap_or_default(); + + let mut jobs_status_map = HashMap::new(); + for job_status_count in jobs_status_counts { + jobs_status_map.insert(job_status_count.status.to_string(), job_status_count.count); + } Json(json!({ "success": true, "details": { "last_epoch_in_progress": last_epoch_in_progress, "last_sync_committee_in_progress": last_sync_committee_in_progress, "jobs_in_progress_count": in_progress_jobs_count, - + "jobs_statuses": jobs_status_map } })) } From c73f2194669c90d161760be8fca715db3b5ad7f1 Mon Sep 17 00:00:00 2001 From: lakewik Date: Wed, 5 Feb 2025 10:17:51 +0100 Subject: [PATCH 39/66] Add missing entrypoint file --- client-rs/scripts/entrypoint.sh | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 client-rs/scripts/entrypoint.sh diff --git a/client-rs/scripts/entrypoint.sh b/client-rs/scripts/entrypoint.sh new file mode 100644 index 0000000..aa08453 --- /dev/null +++ b/client-rs/scripts/entrypoint.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash +set -e + +su postgres -c "/usr/lib/postgresql/14/bin/pg_ctl -D /var/lib/postgresql/data -l logfile start" + +sleep 5 + +su postgres -c "psql -c \"CREATE USER postgres WITH SUPERUSER PASSWORD 'postgres';\"" || true +su postgres -c "psql -c \"CREATE DATABASE bankai_sepolia;\"" || true + +# We need to do migration here, create initial DB structure form DB file + +echo "PostgreSQL is running. Starting the daemon..." + +exec "$@" From e52aa8e870b671a64ff71f4121c09c5bd0e68b1e Mon Sep 17 00:00:00 2001 From: lakewik Date: Wed, 5 Feb 2025 12:55:36 +0100 Subject: [PATCH 40/66] Fix bug related to sync committee and add more details --- client-rs/db_structure.sql | 2 + client-rs/src/constants.rs | 3 + client-rs/src/daemon.rs | 164 +++++++++++++----------- client-rs/src/helpers.rs | 4 + client-rs/src/utils/database_manager.rs | 121 ++++++++++++----- 5 files changed, 190 insertions(+), 104 deletions(-) diff --git a/client-rs/db_structure.sql b/client-rs/db_structure.sql index 944aaec..249239b 100644 --- a/client-rs/db_structure.sql +++ b/client-rs/db_structure.sql @@ -8,6 +8,8 @@ CREATE TABLE jobs ( batch_range_end_epoch BIGINT NOT NULL, type TEXT NOT NULL, tx_hash TEXT NULL, + errored_at_step TEXT NOT NULL, + retries_count BIGINT NOT NULL, updated_at TIMESTAMP DEFAULT NOW (), created_at TIMESTAMP DEFAULT NOW () ); diff --git a/client-rs/src/constants.rs b/client-rs/src/constants.rs index 24817ee..2504887 100644 --- a/client-rs/src/constants.rs +++ b/client-rs/src/constants.rs @@ -9,3 +9,6 @@ pub const STARKNET_SEPOLIA: &str = "0x534e5f5345504f4c4941"; pub const STARKNET_MAINNET: &str = "0x534e5f4d41494e"; pub const USE_TRANSACTOR: bool = false; pub const MAX_JOB_RETRIES_COUNT: u64 = 10; +pub const BEACON_CHAIN_LISTENER_ENABLED: bool = true; +pub const JOBS_RETRY_ENABLED: bool = false; +pub const JOBS_RESUME_ENABLED: bool = false; diff --git a/client-rs/src/daemon.rs b/client-rs/src/daemon.rs index d922930..7ffe400 100644 --- a/client-rs/src/daemon.rs +++ b/client-rs/src/daemon.rs @@ -69,7 +69,7 @@ async fn main() -> Result<(), Box> { // Load .env.sepolia file from_filename(".env.sepolia").ok(); - //let slot_listener_toggle = true; + let slot_listener_toggle = constants::BEACON_CHAIN_LISTENER_ENABLED; let subscriber = FmtSubscriber::builder() //.with_max_level(Level::DEBUG) @@ -128,12 +128,6 @@ async fn main() -> Result<(), Box> { } }); - // Retry any failed jobs before processing new ones - retry_failed_jobs(db_manager.clone(), tx_for_listener.clone()).await?; - - // 🔄 Resume any unfinished jobs before processing new ones - resume_unfinished_jobs(db_manager.clone(), tx_for_listener.clone()).await?; - //Spawn a background task to process jobs tokio::spawn(async move { while let Some(job) = rx.recv().await { @@ -210,6 +204,18 @@ async fn main() -> Result<(), Box> { .unwrap(); }); + // After RPC init, we do some startup checks before start listening to beacon chain: + // + // Retry any failed jobs before processing new ones + if constants::JOBS_RETRY_ENABLED { + retry_failed_jobs(db_manager_for_listener.clone(), tx_for_listener.clone()).await?; + } + + // 🔄 Resume any unfinished jobs before processing new ones + if constants::JOBS_RESUME_ENABLED { + resume_unfinished_jobs(db_manager_for_listener.clone(), tx_for_listener.clone()).await?; + } + //enqueue_sync_committee_jobs(); //enqueue_batch_epochs_jobs(); // @@ -217,93 +223,99 @@ async fn main() -> Result<(), Box> { // Listen for the new slots on BeaconChain // Create an HTTP client let http_stream_client = reqwest::Client::new(); + if slot_listener_toggle { + tokio::spawn(async move { + loop { + // Send the request to the Beacon node + let response = match http_stream_client + .get(&events_endpoint) + //.timeout(std::time::Duration::from_secs(30)) - cannot do this because this will give timeout after each duration since we not using HTTP Pooling here but HTTP streaming + .send() + .await + { + Ok(r) => r, + Err(e) => { + error!("Failed to connect: {}", e); + tokio::time::sleep(std::time::Duration::from_secs(5)).await; + continue; // retry + } + }; - tokio::spawn(async move { - loop { - // Send the request to the Beacon node - let response = match http_stream_client - .get(&events_endpoint) - //.timeout(std::time::Duration::from_secs(30)) - cannot do this because this will give timeout after evach duration since we not using HTTP Pooling here but HTTP streaming - .send() - .await - { - Ok(r) => r, - Err(e) => { - error!("Failed to connect: {}", e); + if !response.status().is_success() { + error!("Got non-200: {}", response.status()); tokio::time::sleep(std::time::Duration::from_secs(5)).await; continue; // retry } - }; - if !response.status().is_success() { - error!("Got non-200: {}", response.status()); - tokio::time::sleep(std::time::Duration::from_secs(5)).await; - continue; // retry - } + info!("Listening for new slots, epochs and sync committee updates..."); - info!("Listening for new slots, epochs and sync committee updates..."); + let mut stream = response.bytes_stream(); - let mut stream = response.bytes_stream(); - - loop { - match timeout(Duration::from_secs(30), stream.next()).await { - // Timed out; handle it locally - Err(_elapsed) => { - warn!( + loop { + match timeout(Duration::from_secs(30), stream.next()).await { + // Timed out; handle it locally + Err(_elapsed) => { + warn!( "Timed out waiting for new slot beacon chain event chunk. Maybe some slots was skipped. Will reconnect..." ); - break; - } - Ok(Some(Ok(bytes))) => { - if let Ok(event_text) = String::from_utf8(bytes.to_vec()) { - // Preprocess the event text - if let Some(json_data) = helpers::extract_json_from_event(&event_text) { - match serde_json::from_str::(&json_data) { - Ok(parsed_event) => { - let epoch_id = helpers::slot_to_epoch_id(parsed_event.slot); - let sync_committee_id = - helpers::slot_to_sync_committee_id(parsed_event.slot); - info!( + break; + } + Ok(Some(Ok(bytes))) => { + if let Ok(event_text) = String::from_utf8(bytes.to_vec()) { + // Preprocess the event text + if let Some(json_data) = + helpers::extract_json_from_event(&event_text) + { + match serde_json::from_str::(&json_data) { + Ok(parsed_event) => { + let epoch_id = + helpers::slot_to_epoch_id(parsed_event.slot); + let sync_committee_id = + helpers::slot_to_sync_committee_id( + parsed_event.slot, + ); + info!( "[EVENT] New beacon slot detected: {} | Block: {} | Epoch: {} | Sync committee: {} | Is epoch transition: {}", parsed_event.slot, parsed_event.block, epoch_id, sync_committee_id, parsed_event.epoch_transition ); - handle_beacon_chain_head_event( - parsed_event, - bankai_for_listener.clone(), - db_manager_for_listener.clone(), - tx_for_listener.clone(), - ) - .await; - } - Err(err) => { - warn!("Failed to parse JSON data: {}", err); + handle_beacon_chain_head_event( + parsed_event, + bankai_for_listener.clone(), + db_manager_for_listener.clone(), + tx_for_listener.clone(), + ) + .await; + } + Err(err) => { + warn!("Failed to parse JSON data: {}", err); + } } + } else { + warn!("No valid JSON data found in event: {}", event_text); } - } else { - warn!("No valid JSON data found in event: {}", event_text); } } - } - Ok(Some(Err(e))) => { - warn!("Beacon chain client stream error: {}", e); - break; // break the while, then reconnect - } - Ok(None) => { - warn!("Beacon chain client stream ended"); - // Stream ended - break; + Ok(Some(Err(e))) => { + warn!("Beacon chain client stream error: {}", e); + break; // break the while, then reconnect + } + Ok(None) => { + warn!("Beacon chain client stream ended"); + // Stream ended + break; + } } } - } - // If we got here because of `timeout` returning `Err(_)`, that means 30s - // passed without a single chunk of data arriving or - // the RPC server has closed connection or some other unknown network error occured + // If we got here because of `timeout` returning `Err(_)`, that means 30s + // passed without a single chunk of data arriving or + // the RPC server has closed connection or some other unknown network error occured - // If we exit the while, we reconnect in the outer loop - info!("Timeout waiting for next event, reconnecting to beacon node..."); - } - }); + // If we exit the while, we reconnect in the outer loop + info!("Timeout waiting for next event, reconnecting to beacon node..."); + } + }); + } // Wait for the server task to finish server_task.await?; @@ -696,6 +708,10 @@ async fn evaluate_jobs_statuses( .set_ready_to_broadcast_for_batch_epochs(first_epoch, last_epoch) // Set READY_TO_BROADCAST when OFFCHAIN_COMPUTATION_FINISHED .await?; + db_manager + .set_ready_to_broadcast_for_sync_committee(latest_verified_sync_committee_id + 1) + .await?; + Ok(()) } diff --git a/client-rs/src/helpers.rs b/client-rs/src/helpers.rs index 412bcf3..55a1b02 100644 --- a/client-rs/src/helpers.rs +++ b/client-rs/src/helpers.rs @@ -77,6 +77,10 @@ pub fn get_first_slot_for_sync_committee(slot: u64) -> u64 { slot * SLOTS_PER_SYNC_COMMITTEE } +pub fn get_last_slot_for_sync_committee(slot: u64) -> u64 { + (slot + 1) * SLOTS_PER_SYNC_COMMITTEE - 1 +} + // Since beacon chain RPCs have different response structure (quicknode responds different than nidereal) we use this event extraction logic pub fn extract_json_from_event(event_text: &str) -> Option { for line in event_text.lines() { diff --git a/client-rs/src/utils/database_manager.rs b/client-rs/src/utils/database_manager.rs index 7a857d2..7d17d47 100644 --- a/client-rs/src/utils/database_manager.rs +++ b/client-rs/src/utils/database_manager.rs @@ -440,6 +440,24 @@ impl DatabaseManager { Ok(()) } + pub async fn count_epoch_jobs_waiting_for_sync_committe_update( + &self, + latest_verified_sync_committee: u64, + ) -> Result> { + let epoch_to_start_check_from = + helpers::get_last_epoch_for_sync_committee(latest_verified_sync_committee) + 1; // So we getting first epoch number from latest unverified committee + let row = self + .client + .query_one( + "SELECT COUNT(*) as count FROM jobs WHERE batch_range_begin_epoch >= $1 + AND job_status = 'OFFCHAIN_COMPUTATION_FINISHED'", + &[&epoch_to_start_check_from.to_i64()], + ) + .await?; + + Ok(row.get::<_, i64>("count").to_u64().unwrap_or(0)) + } + pub async fn set_ready_to_broadcast_for_batch_epochs( &self, first_epoch: u64, @@ -457,7 +475,44 @@ impl DatabaseManager { if rows_affected > 0 { info!( - "{} jobs changed state to READY_TO_BROADCAST_ONCHAIN", + "{} EPOCH_BATCH_UPDATE jobs changed state to READY_TO_BROADCAST_ONCHAIN", + rows_affected + ); + } + Ok(()) + } + + pub async fn set_ready_to_broadcast_for_sync_committee( + &self, + sync_committee_id: u64, + ) -> Result<(), Box> { + let sync_commite_first_slot = helpers::get_first_slot_for_sync_committee(sync_committee_id); + let sync_commite_last_slot = helpers::get_last_slot_for_sync_committee(sync_committee_id); + + let rows_affected = self + .client + .execute( + "UPDATE jobs + SET job_status = 'READY_TO_BROADCAST_ONCHAIN', updated_at = NOW() + WHERE type = 'SYNC_COMMITTEE_UPDATE' + AND job_status = 'OFFCHAIN_COMPUTATION_FINISHED' + AND slot BETWEEN $1 AND $2 + ", + &[ + &sync_commite_first_slot.to_i64(), + &sync_commite_last_slot.to_i64(), + ], + ) + .await?; + + if rows_affected == 1 { + info!( + "{} SYNC_COMMITTEE_UPDATE jobs changed state to READY_TO_BROADCAST_ONCHAIN", + rows_affected + ); + } else if rows_affected > 1 { + warn!( + "{} SYNC_COMMITTEE_UPDATE jobs changed state to READY_TO_BROADCAST_ONCHAIN in one query, something may be wrong!", rows_affected ); } @@ -652,18 +707,19 @@ impl DatabaseManager { } pub async fn count_total_jobs(&self) -> Result> { - let row = self.client - .query_one( - "SELECT COUNT(*) as count FROM jobs", - &[], - ) + let row = self + .client + .query_one("SELECT COUNT(*) as count FROM jobs", &[]) .await?; Ok(row.get::<_, i64>("count").to_u64().unwrap_or(0)) } - pub async fn count_successful_jobs(&self) -> Result> { - let row = self.client + pub async fn count_successful_jobs( + &self, + ) -> Result> { + let row = self + .client .query_one( "SELECT COUNT(*) as count FROM jobs WHERE job_status = 'DONE'", &[], @@ -673,29 +729,37 @@ impl DatabaseManager { Ok(row.get::<_, i64>("count").to_u64().unwrap_or(0)) } - pub async fn get_average_job_duration(&self) -> Result> { - let row = self.client + pub async fn get_average_job_duration( + &self, + ) -> Result> { + let row = self + .client .query_one( - "SELECT EXTRACT(EPOCH FROM AVG(updated_at - created_at))::INTEGER as avg_duration - FROM jobs + "SELECT EXTRACT(EPOCH FROM AVG(updated_at - created_at))::INTEGER as avg_duration + FROM jobs WHERE job_status = 'DONE'", &[], ) .await?; - Ok(i64::from(row.get::<_, Option>("avg_duration").unwrap_or(0))) + Ok(i64::from( + row.get::<_, Option>("avg_duration").unwrap_or(0), + )) } - - pub async fn get_recent_batch_jobs(&self, limit: i64) -> Result, Box> { - let rows = self.client + pub async fn get_recent_batch_jobs( + &self, + limit: i64, + ) -> Result, Box> { + let rows = self + .client .query( - "SELECT *, + "SELECT *, to_char(created_at, 'HH24:MI:SS') as created_time, - to_char(updated_at, 'HH24:MI:SS') as updated_time - FROM jobs - WHERE type = 'EPOCH_BATCH_UPDATE' - ORDER BY batch_range_begin_epoch DESC + to_char(updated_at, 'HH24:MI:SS') as updated_time + FROM jobs + WHERE type = 'EPOCH_BATCH_UPDATE' + ORDER BY batch_range_begin_epoch DESC LIMIT $1", &[&limit], ) @@ -707,10 +771,9 @@ impl DatabaseManager { let job_type_str: String = row.get("type"); let job_status_str: String = row.get("job_status"); - let job_type = JobType::from_str(&job_type_str) - .expect("Failed to parse job type"); - let job_status = JobStatus::from_str(&job_status_str) - .expect("Failed to parse job status"); + let job_type = JobType::from_str(&job_type_str).expect("Failed to parse job type"); + let job_status = + JobStatus::from_str(&job_status_str).expect("Failed to parse job status"); JobWithTimestamps { job: JobSchema { @@ -724,7 +787,8 @@ impl DatabaseManager { .get::<&str, Option>("batch_range_end_epoch") .unwrap_or(0), job_type, - atlantic_proof_generate_batch_id: row.get("atlantic_proof_generate_batch_id"), + atlantic_proof_generate_batch_id: row + .get("atlantic_proof_generate_batch_id"), atlantic_proof_wrapper_batch_id: row.get("atlantic_proof_wrapper_batch_id"), }, created_at: row.get("created_time"), @@ -738,10 +802,7 @@ impl DatabaseManager { } pub async fn is_connected(&self) -> bool { - match self.client - .query_one("SELECT 1", &[]) - .await - { + match self.client.query_one("SELECT 1", &[]).await { Ok(_) => true, Err(_) => false, } From 02340be83bb82f545ba1e80baaabe8461696fbdf Mon Sep 17 00:00:00 2001 From: lakewik Date: Wed, 5 Feb 2025 17:37:39 +0100 Subject: [PATCH 41/66] Improvements fail handling --- client-rs/db_structure.sql | 4 ++-- client-rs/src/constants.rs | 1 + client-rs/src/daemon.rs | 32 +++++++++++++++++++++---- client-rs/src/utils/database_manager.rs | 21 +++++++++++++++- client-rs/src/utils/starknet_client.rs | 8 ++----- 5 files changed, 53 insertions(+), 13 deletions(-) diff --git a/client-rs/db_structure.sql b/client-rs/db_structure.sql index 249239b..b74c388 100644 --- a/client-rs/db_structure.sql +++ b/client-rs/db_structure.sql @@ -4,8 +4,8 @@ CREATE TABLE jobs ( atlantic_proof_generate_batch_id TEXT NULL, atlantic_proof_wrapper_batch_id TEXT NULL, slot BIGINT NOT NULL, -- Slot associated with the job - batch_range_begin_epoch BIGINT NOT NULL, - batch_range_end_epoch BIGINT NOT NULL, + batch_range_begin_epoch BIGINT NULL, + batch_range_end_epoch BIGINT NULL, type TEXT NOT NULL, tx_hash TEXT NULL, errored_at_step TEXT NOT NULL, diff --git a/client-rs/src/constants.rs b/client-rs/src/constants.rs index 2504887..12973df 100644 --- a/client-rs/src/constants.rs +++ b/client-rs/src/constants.rs @@ -12,3 +12,4 @@ pub const MAX_JOB_RETRIES_COUNT: u64 = 10; pub const BEACON_CHAIN_LISTENER_ENABLED: bool = true; pub const JOBS_RETRY_ENABLED: bool = false; pub const JOBS_RESUME_ENABLED: bool = false; +pub const RETRY_DELAY_MS: u64 = 300_0000; diff --git a/client-rs/src/daemon.rs b/client-rs/src/daemon.rs index 7ffe400..a0da36e 100644 --- a/client-rs/src/daemon.rs +++ b/client-rs/src/daemon.rs @@ -696,8 +696,10 @@ async fn evaluate_jobs_statuses( // Firstly we get all jobs with status OFFCHAIN_COMPUTATION_FINISHED // We calculating the start and end epoch for provided last verified sync committe // and setting READY_TO_BROADCAST status for epochs up to the last epoch belonging to provided latest_verified_sync_committee_id - let first_epoch = get_first_epoch_for_sync_committee(latest_verified_sync_committee_id); - let last_epoch = get_last_epoch_for_sync_committee(latest_verified_sync_committee_id); + let first_epoch = get_first_epoch_for_sync_committee(latest_verified_sync_committee_id + 1); + let last_epoch = get_last_epoch_for_sync_committee(latest_verified_sync_committee_id + 1); + + //let first_epoch = first_epoch - 32; // So we also broadcast first epoch from next sync committee info!( "Evaluating jobs for epochs range from {} to {}, for sync committee {}", @@ -709,7 +711,7 @@ async fn evaluate_jobs_statuses( .await?; db_manager - .set_ready_to_broadcast_for_sync_committee(latest_verified_sync_committee_id + 1) + .set_ready_to_broadcast_for_sync_committee(latest_verified_sync_committee_id) .await?; Ok(()) @@ -888,7 +890,29 @@ async fn broadcast_onchain_ready_jobs( .update_job_status(job.job_uuid, JobStatus::ProofVerifyCalledOnchain) .await?; - let _send_result = db_manager.set_job_txhash(job.job_uuid, txhash).await?; + let _ = db_manager.set_job_txhash(job.job_uuid, txhash).await; + + // match send_result { + // Ok(_) => { + // info!("[EPOCH BATCH JOB] Transaction sent"); + // db_manager + // .update_job_status(job.job_uuid, JobStatus::VerifyTransactionSend) + // .await?; + + // // Iterate over and insert epochs proofs to db + // for (index, epoch) in + // circuit_inputs.circuit_inputs.epochs.iter().enumerate() + // { + // println!("Epoch {}: {:?}", index, epoch.expected_circuit_outputs); + // } + // } + // Err(e) => { + // error!("[EPOCH BATCH JOB] Transaction sending error: {:?}", e); + // db_manager + // .update_job_status(job.job_uuid, JobStatus::Error) + // .await?; + // } + // } let confirmation_result = bankai.starknet_client.wait_for_confirmation(txhash).await; diff --git a/client-rs/src/utils/database_manager.rs b/client-rs/src/utils/database_manager.rs index 7d17d47..679ac8d 100644 --- a/client-rs/src/utils/database_manager.rs +++ b/client-rs/src/utils/database_manager.rs @@ -9,7 +9,7 @@ use chrono::NaiveDateTime; use num_traits::ToPrimitive; use std::collections::HashMap; use tokio_postgres::{Client, Row}; -use tracing::{error, info, warn}; +use tracing::{debug, error, info, warn}; use uuid::Uuid; #[derive(Debug)] @@ -440,6 +440,20 @@ impl DatabaseManager { Ok(()) } + pub async fn set_failure_info( + &self, + job_id: Uuid, + failed_at_step: JobStatus, + ) -> Result<(), Box> { + self.client + .execute( + "UPDATE jobs SET errored_at_step = $1, updated_at = NOW() WHERE job_uuid = $2", + &[&failed_at_step.to_string(), &job_id], + ) + .await?; + Ok(()) + } + pub async fn count_epoch_jobs_waiting_for_sync_committe_update( &self, latest_verified_sync_committee: u64, @@ -489,6 +503,11 @@ impl DatabaseManager { let sync_commite_first_slot = helpers::get_first_slot_for_sync_committee(sync_committee_id); let sync_commite_last_slot = helpers::get_last_slot_for_sync_committee(sync_committee_id); + debug!( + "Setting syn committee between slots {} and {} to READY_TO_BROADCAST_ONCHAIN", + sync_commite_first_slot, sync_commite_last_slot + ); + let rows_affected = self .client .execute( diff --git a/client-rs/src/utils/starknet_client.rs b/client-rs/src/utils/starknet_client.rs index 03a8a3f..8af150b 100644 --- a/client-rs/src/utils/starknet_client.rs +++ b/client-rs/src/utils/starknet_client.rs @@ -166,12 +166,8 @@ impl StarknetClient { selector, calldata, }; - - let send_result = self - .account - .execute_v1(vec![call]) - .send() - .await; + + let send_result = self.account.execute_v1(vec![call]).send().await; match send_result { Ok(tx_response) => { From 1afdb19b57f19426a507502a2bcd067345757638 Mon Sep 17 00:00:00 2001 From: lakewik Date: Wed, 5 Feb 2025 17:38:51 +0100 Subject: [PATCH 42/66] Fix sync committee id --- client-rs/src/daemon.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client-rs/src/daemon.rs b/client-rs/src/daemon.rs index a0da36e..b4c5a0a 100644 --- a/client-rs/src/daemon.rs +++ b/client-rs/src/daemon.rs @@ -711,7 +711,7 @@ async fn evaluate_jobs_statuses( .await?; db_manager - .set_ready_to_broadcast_for_sync_committee(latest_verified_sync_committee_id) + .set_ready_to_broadcast_for_sync_committee(latest_verified_sync_committee_id + 1) .await?; Ok(()) From fd66ec2cdd0521c2929c393cf38f31891d552ee2 Mon Sep 17 00:00:00 2001 From: lakewik Date: Wed, 5 Feb 2025 18:59:35 +0100 Subject: [PATCH 43/66] Simplify db manager & add failures reason --- client-rs/Cargo.lock | 1 + client-rs/Cargo.toml | 5 +- client-rs/db_structure.sql | 3 +- client-rs/src/daemon.rs | 8 +- client-rs/src/routes/dashboard.rs | 52 ++++--- client-rs/src/utils/database_manager.rs | 172 +++++++++--------------- 6 files changed, 106 insertions(+), 135 deletions(-) diff --git a/client-rs/Cargo.lock b/client-rs/Cargo.lock index d01e5b2..e340484 100644 --- a/client-rs/Cargo.lock +++ b/client-rs/Cargo.lock @@ -2799,6 +2799,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f66ea23a2d0e5734297357705193335e0a957696f34bed2f2faefacb2fec336f" dependencies = [ "bytes", + "chrono", "fallible-iterator", "postgres-derive", "postgres-protocol", diff --git a/client-rs/Cargo.toml b/client-rs/Cargo.toml index 2015ead..9fd96fc 100644 --- a/client-rs/Cargo.toml +++ b/client-rs/Cargo.toml @@ -32,7 +32,10 @@ starknet = "0.12.0" tree_hash_derive = "0.8.0" tree_hash = "0.8.0" dotenv = "0.15" -tokio-postgres = { version = "0.7.12", features = ["with-uuid-1"] } +tokio-postgres = { version = "0.7.12", features = [ + "with-uuid-1", + "with-chrono-0_4", +] } axum = "0.7.9" thiserror = "2.0.9" tracing = "0.1.41" diff --git a/client-rs/db_structure.sql b/client-rs/db_structure.sql index b74c388..cba57bd 100644 --- a/client-rs/db_structure.sql +++ b/client-rs/db_structure.sql @@ -8,8 +8,9 @@ CREATE TABLE jobs ( batch_range_end_epoch BIGINT NULL, type TEXT NOT NULL, tx_hash TEXT NULL, - errored_at_step TEXT NOT NULL, + failed_at_step TEXT NOT NULL, retries_count BIGINT NOT NULL, + last_failure_time TIMESTAMP NULL, updated_at TIMESTAMP DEFAULT NOW (), created_at TIMESTAMP DEFAULT NOW () ); diff --git a/client-rs/src/daemon.rs b/client-rs/src/daemon.rs index b4c5a0a..ca8c88f 100644 --- a/client-rs/src/daemon.rs +++ b/client-rs/src/daemon.rs @@ -142,6 +142,8 @@ async fn main() -> Result<(), Box> { info!("Job {} completed successfully", job_id); } Err(e) => { + let job_data = db_clone.get_job_by_id(job_id).await.unwrap().unwrap(); + let _ = db_clone.set_failure_info(job_id, job_data.job_status).await; let _ = db_clone.update_job_status(job_id, JobStatus::Error).await; error!("Error processing job {}: {}", job_id, e); } @@ -825,15 +827,17 @@ async fn retry_failed_jobs( match job_to_retry.job_type { JobType::SyncCommitteeUpdate => { info!( - "Requesting retry of failed job {}... (sync committee update job for sync committee {})", + "Requesting retry of failed job {} failed previously at step {}... (sync committee update job for sync committee {})", job_id, + job.failed_at_step.unwrap(), helpers::slot_to_sync_committee_id(job.slot.to_u64().unwrap()) ); } JobType::EpochBatchUpdate => { info!( - "Requesting retry of failed job {}... (batch epoch update job for epochs from {} to {})", + "Requesting retry of failed job {} failed previously at step {} ... (batch epoch update job for epochs from {} to {})", job_id, + job.failed_at_step.unwrap(), job.batch_range_begin_epoch, job.batch_range_end_epoch ); diff --git a/client-rs/src/routes/dashboard.rs b/client-rs/src/routes/dashboard.rs index c0f3e9f..034016a 100644 --- a/client-rs/src/routes/dashboard.rs +++ b/client-rs/src/routes/dashboard.rs @@ -2,9 +2,7 @@ use crate::state::{AppState, JobStatus}; use axum::extract::State; use num_traits::SaturatingSub; -pub async fn handle_get_dashboard( - State(state): State, -) -> String { +pub async fn handle_get_dashboard(State(state): State) -> String { let db = state.db_manager.clone(); let bankai = state.bankai.clone(); @@ -18,7 +16,7 @@ pub async fn handle_get_dashboard( .to_string() .parse::() .unwrap_or(0); - + // Calculate success rate from database let total_jobs = db.count_total_jobs().await.unwrap_or(0); let successful_jobs = db.count_successful_jobs().await.unwrap_or(0); @@ -32,11 +30,15 @@ pub async fn handle_get_dashboard( let avg_duration = db.get_average_job_duration().await.unwrap_or(0); let avg_duration_str = format!("{}s", avg_duration); - let jobs_in_progress = db.count_jobs_in_progress().await.unwrap_or(Some(0)).unwrap(); - - // Fetch last 3 batch jobs - let recent_batches = db.get_recent_batch_jobs(6).await.unwrap_or_default(); - + let jobs_in_progress = db + .count_jobs_in_progress() + .await + .unwrap_or(Some(0)) + .unwrap(); + + // Fetch last 12 batch jobs + let recent_batches = db.get_recent_batch_jobs(12).await.unwrap_or_default(); + // Format batch information let batch_info = recent_batches .iter() @@ -62,17 +64,27 @@ pub async fn handle_get_dashboard( .join("\n"); let batch_display = if recent_batches.is_empty() { - " ║ No recent batches found ║ ".to_string() + " ║ No recent batches found ║ " + .to_string() } else { batch_info }; // Update system health indicators with simpler checks - let daemon_status = "● Active"; - let db_status = if db.is_connected().await { "● Connected" } else { "○ Disconnected" }; - let beacon_status = if bankai.client.get_head_slot().await.is_ok() { "● Connected" } else { "○ Disconnected" }; + let daemon_status = "● Active"; + let db_status = if db.is_connected().await { + "● Connected" + } else { + "○ Disconnected" + }; + let beacon_status = if bankai.client.get_head_slot().await.is_ok() { + "● Connected" + } else { + "○ Disconnected" + }; - let epoch_gap = (latest_beacon_slot.saturating_sub(latest_verified_slot) as f64 / 32.0).round() as u64; + let epoch_gap = + (latest_beacon_slot.saturating_sub(latest_verified_slot) as f64 / 32.0).round() as u64; create_ascii_dashboard( latest_beacon_slot, @@ -84,7 +96,7 @@ pub async fn handle_get_dashboard( daemon_status, db_status, beacon_status, - &batch_display + &batch_display, ) } @@ -101,11 +113,11 @@ pub fn create_ascii_dashboard( batch_display: &str, ) -> String { format!( -r#" - ____ _ _ _ _ __ _ ___ + r#" + ____ _ _ _ _ __ _ ___ | __ ) / \ | \ | | |/ / / \ |_ _| -| _ \ / _ \ | \| | ' / / _ \ | | -| |_) / ___ \| |\ | . \ / ___ \ | | +| _ \ / _ \ | \| | ' / / _ \ | | +| |_) / ___ \| |\ | . \ / ___ \ | | |____/_/ \_\_| \_|_|\_/_/ \_\___| ╔════════════════════════════════════════ DASHBOARD OVERVIEW ═════════════════════════════════════╗ @@ -140,4 +152,4 @@ r#" epoch_gap = epoch_gap, batch_display_block = batch_display ) -} \ No newline at end of file +} diff --git a/client-rs/src/utils/database_manager.rs b/client-rs/src/utils/database_manager.rs index 679ac8d..9d4ffae 100644 --- a/client-rs/src/utils/database_manager.rs +++ b/client-rs/src/utils/database_manager.rs @@ -22,7 +22,9 @@ pub struct JobSchema { pub job_type: JobType, pub atlantic_proof_generate_batch_id: Option, pub atlantic_proof_wrapper_batch_id: Option, - //pub updated_at: i64, + pub failed_at_step: Option, + pub retries_count: Option, + pub last_failure_time: Option, //pub updated_at: i64, } #[derive(Debug)] @@ -196,32 +198,12 @@ impl DatabaseManager { .query_opt("SELECT * FROM jobs WHERE job_uuid = $1", &[&job_id]) .await?; - Ok(row_opt.map(|row| { - let job_status_str: String = row.get("job_status"); - let job_status = job_status_str - .parse::() - .expect("Unknown job status from DB"); - - let job_type_str: String = row.get("type"); - let job_type = job_type_str - .parse::() - .expect("Unknown job type from DB"); - - JobSchema { - job_uuid: row.get("job_uuid"), - job_status, - slot: row.get("slot"), - batch_range_begin_epoch: row - .get::<&str, Option>("batch_range_begin_epoch") - .unwrap_or(0), - batch_range_end_epoch: row - .get::<&str, Option>("batch_range_end_epoch") - .unwrap_or(0), - job_type, - atlantic_proof_generate_batch_id: row.get("atlantic_proof_generate_batch_id"), - atlantic_proof_wrapper_batch_id: row.get("atlantic_proof_wrapper_batch_id"), - } - })) + let row_opt = self + .client + .query_opt("SELECT * FROM jobs WHERE job_uuid = $1", &[&job_id]) + .await?; + + row_opt.map(Self::map_row_to_job).transpose() } // pub async fn get_latest_slot_id_in_progress( // &self, @@ -390,37 +372,10 @@ impl DatabaseManager { ) .await?; - // Map rows into JobSchema structs - let jobs: Vec = rows - .into_iter() - .map( - |row: Row| -> Result> { - let job_type_str: String = row.get("type"); - let job_status_str: String = row.get("job_status"); - - let job_type = JobType::from_str(&job_type_str) - .map_err(|err| format!("Failed to parse job type: {}", err))?; - let job_status = JobStatus::from_str(&job_status_str) - .map_err(|err| format!("Failed to parse job status: {}", err))?; - - Ok(JobSchema { - job_uuid: row.get("job_uuid"), - job_status, - slot: row.get("slot"), - batch_range_begin_epoch: row - .get::<&str, Option>("batch_range_begin_epoch") - .unwrap_or(0), - batch_range_end_epoch: row - .get::<&str, Option>("batch_range_end_epoch") - .unwrap_or(0), - job_type, - atlantic_proof_generate_batch_id: row - .get("atlantic_proof_generate_batch_id"), - atlantic_proof_wrapper_batch_id: row.get("atlantic_proof_wrapper_batch_id"), - //updated_at: row.get("updated_at"), - }) - }, - ) + let jobs = rows + .iter() + .cloned() + .map(Self::map_row_to_job) .collect::, _>>()?; Ok(jobs) @@ -447,7 +402,7 @@ impl DatabaseManager { ) -> Result<(), Box> { self.client .execute( - "UPDATE jobs SET errored_at_step = $1, updated_at = NOW() WHERE job_uuid = $2", + "UPDATE jobs SET errored_at_step = $1, updated_at = NOW(), last_failure_time = NOW() WHERE job_uuid = $2", &[&failed_at_step.to_string(), &job_id], ) .await?; @@ -609,36 +564,10 @@ impl DatabaseManager { let rows = self.client.query(&query, ¶ms).await?; - let jobs: Vec = rows - .into_iter() - .map( - |row: Row| -> Result> { - let job_type_str: String = row.get("type"); - let job_status_str: String = row.get("job_status"); - - let job_type = JobType::from_str(&job_type_str) - .map_err(|err| format!("Failed to parse job type: {}", err))?; - let job_status = JobStatus::from_str(&job_status_str) - .map_err(|err| format!("Failed to parse job status: {}", err))?; - - Ok(JobSchema { - job_uuid: row.get("job_uuid"), - job_status, - slot: row.get("slot"), - batch_range_begin_epoch: row - .get::<&str, Option>("batch_range_begin_epoch") - .unwrap_or(0), - batch_range_end_epoch: row - .get::<&str, Option>("batch_range_end_epoch") - .unwrap_or(0), - job_type, - atlantic_proof_generate_batch_id: row - .get("atlantic_proof_generate_batch_id"), - atlantic_proof_wrapper_batch_id: row.get("atlantic_proof_wrapper_batch_id"), - //updated_at: row.get("updated_at"), - }) - }, - ) + let jobs = rows + .iter() + .cloned() + .map(Self::map_row_to_job) .collect::, _>>()?; Ok(jobs) @@ -787,29 +716,9 @@ impl DatabaseManager { let jobs = rows .into_iter() .map(|row| { - let job_type_str: String = row.get("type"); - let job_status_str: String = row.get("job_status"); - - let job_type = JobType::from_str(&job_type_str).expect("Failed to parse job type"); - let job_status = - JobStatus::from_str(&job_status_str).expect("Failed to parse job status"); - + let job = Self::map_row_to_job(row.clone()).unwrap(); JobWithTimestamps { - job: JobSchema { - job_uuid: row.get("job_uuid"), - job_status, - slot: row.get("slot"), - batch_range_begin_epoch: row - .get::<&str, Option>("batch_range_begin_epoch") - .unwrap_or(0), - batch_range_end_epoch: row - .get::<&str, Option>("batch_range_end_epoch") - .unwrap_or(0), - job_type, - atlantic_proof_generate_batch_id: row - .get("atlantic_proof_generate_batch_id"), - atlantic_proof_wrapper_batch_id: row.get("atlantic_proof_wrapper_batch_id"), - }, + job, created_at: row.get("created_time"), updated_at: row.get("updated_time"), tx_hash: row.get("tx_hash"), @@ -874,4 +783,45 @@ impl DatabaseManager { Ok(result) } + + // Helper functions + fn map_row_to_job(row: Row) -> Result> { + let job_status_str: String = row.get("job_status"); + let job_status = job_status_str + .parse::() + .map_err(|err| format!("Failed to parse job status: {}", err))?; + + let job_type_str: String = row.get("type"); + let job_type = job_type_str + .parse::() + .map_err(|err| format!("Failed to parse job type: {}", err))?; + + let failed_at_step: Option = row + .get::<_, Option>("failed_at_step") + .map(|step| { + step.parse::() + .map_err(|err| format!("Failed to parse job type: {}", err)) + }) + .transpose()?; + + let last_failure_time: Option = row.get("last_failure_time"); + + Ok(JobSchema { + job_uuid: row.get("job_uuid"), + job_status, + slot: row.get("slot"), + batch_range_begin_epoch: row + .get::<&str, Option>("batch_range_begin_epoch") + .unwrap_or(0), + batch_range_end_epoch: row + .get::<&str, Option>("batch_range_end_epoch") + .unwrap_or(0), + job_type, + atlantic_proof_generate_batch_id: row.get("atlantic_proof_generate_batch_id"), + atlantic_proof_wrapper_batch_id: row.get("atlantic_proof_wrapper_batch_id"), + failed_at_step, + retries_count: row.get("retries_count"), + last_failure_time, + }) + } } From 85eb4e844165f236f85c88ce5c5ec962696f5b1e Mon Sep 17 00:00:00 2001 From: lakewik Date: Wed, 5 Feb 2025 19:14:29 +0100 Subject: [PATCH 44/66] Increase status display count and fix errors on etry --- client-rs/src/constants.rs | 4 ++-- client-rs/src/daemon.rs | 8 ++++++-- client-rs/src/routes/dashboard.rs | 4 ++-- client-rs/src/utils/database_manager.rs | 11 +++-------- 4 files changed, 13 insertions(+), 14 deletions(-) diff --git a/client-rs/src/constants.rs b/client-rs/src/constants.rs index 12973df..c943b1c 100644 --- a/client-rs/src/constants.rs +++ b/client-rs/src/constants.rs @@ -10,6 +10,6 @@ pub const STARKNET_MAINNET: &str = "0x534e5f4d41494e"; pub const USE_TRANSACTOR: bool = false; pub const MAX_JOB_RETRIES_COUNT: u64 = 10; pub const BEACON_CHAIN_LISTENER_ENABLED: bool = true; -pub const JOBS_RETRY_ENABLED: bool = false; -pub const JOBS_RESUME_ENABLED: bool = false; +pub const JOBS_RETRY_ENABLED: bool = true; +pub const JOBS_RESUME_ENABLED: bool = true; pub const RETRY_DELAY_MS: u64 = 300_0000; diff --git a/client-rs/src/daemon.rs b/client-rs/src/daemon.rs index ca8c88f..a9739fc 100644 --- a/client-rs/src/daemon.rs +++ b/client-rs/src/daemon.rs @@ -822,6 +822,8 @@ async fn retry_failed_jobs( batch_range_end_epoch: job.batch_range_end_epoch.to_u64(), }; + let failed_at_step = job.failed_at_step.unwrap_or(JobStatus::Created); + let db_clone = db_manager.clone(); let tx_clone = tx.clone(); tokio::spawn(async move { match job_to_retry.job_type { @@ -829,7 +831,7 @@ async fn retry_failed_jobs( info!( "Requesting retry of failed job {} failed previously at step {}... (sync committee update job for sync committee {})", job_id, - job.failed_at_step.unwrap(), + failed_at_step.to_string(), helpers::slot_to_sync_committee_id(job.slot.to_u64().unwrap()) ); } @@ -837,7 +839,7 @@ async fn retry_failed_jobs( info!( "Requesting retry of failed job {} failed previously at step {} ... (batch epoch update job for epochs from {} to {})", job_id, - job.failed_at_step.unwrap(), + failed_at_step.to_string(), job.batch_range_begin_epoch, job.batch_range_end_epoch ); @@ -846,6 +848,8 @@ async fn retry_failed_jobs( if tx_clone.send(job_to_retry).await.is_err() { // return Err("Failed to send job".into()); + // Update the status to status what was at the error occurene time + let _ = db_clone.update_job_status(job_id, failed_at_step).await; error!("Error retrying job: {}", job_id); } }); diff --git a/client-rs/src/routes/dashboard.rs b/client-rs/src/routes/dashboard.rs index 034016a..a35234d 100644 --- a/client-rs/src/routes/dashboard.rs +++ b/client-rs/src/routes/dashboard.rs @@ -36,8 +36,8 @@ pub async fn handle_get_dashboard(State(state): State) -> String { .unwrap_or(Some(0)) .unwrap(); - // Fetch last 12 batch jobs - let recent_batches = db.get_recent_batch_jobs(12).await.unwrap_or_default(); + // Fetch last 20 batch jobs + let recent_batches = db.get_recent_batch_jobs(20).await.unwrap_or_default(); // Format batch information let batch_info = recent_batches diff --git a/client-rs/src/utils/database_manager.rs b/client-rs/src/utils/database_manager.rs index 9d4ffae..7a71c70 100644 --- a/client-rs/src/utils/database_manager.rs +++ b/client-rs/src/utils/database_manager.rs @@ -22,7 +22,7 @@ pub struct JobSchema { pub job_type: JobType, pub atlantic_proof_generate_batch_id: Option, pub atlantic_proof_wrapper_batch_id: Option, - pub failed_at_step: Option, + pub failed_at_step: Option, pub retries_count: Option, pub last_failure_time: Option, //pub updated_at: i64, } @@ -198,11 +198,6 @@ impl DatabaseManager { .query_opt("SELECT * FROM jobs WHERE job_uuid = $1", &[&job_id]) .await?; - let row_opt = self - .client - .query_opt("SELECT * FROM jobs WHERE job_uuid = $1", &[&job_id]) - .await?; - row_opt.map(Self::map_row_to_job).transpose() } // pub async fn get_latest_slot_id_in_progress( @@ -796,10 +791,10 @@ impl DatabaseManager { .parse::() .map_err(|err| format!("Failed to parse job type: {}", err))?; - let failed_at_step: Option = row + let failed_at_step: Option = row .get::<_, Option>("failed_at_step") .map(|step| { - step.parse::() + step.parse::() .map_err(|err| format!("Failed to parse job type: {}", err)) }) .transpose()?; From ed92d544e4110b3224391d94711a8edb7cf5197e Mon Sep 17 00:00:00 2001 From: lakewik Date: Wed, 5 Feb 2025 21:36:45 +0100 Subject: [PATCH 45/66] Fix error propagation and other minor fixes --- client-rs/src/constants.rs | 2 +- client-rs/src/daemon.rs | 4 ++-- client-rs/src/utils/database_manager.rs | 5 +++++ client-rs/src/utils/starknet_client.rs | 2 +- 4 files changed, 9 insertions(+), 4 deletions(-) diff --git a/client-rs/src/constants.rs b/client-rs/src/constants.rs index c943b1c..5f9528c 100644 --- a/client-rs/src/constants.rs +++ b/client-rs/src/constants.rs @@ -2,7 +2,7 @@ pub const SLOTS_PER_EPOCH: u64 = 32; // For mainnet pub const SLOTS_PER_SYNC_COMMITTEE: u64 = 8192; // For mainnet pub const TARGET_BATCH_SIZE: u64 = 32; // Defines how many epochs in one batch pub const EPOCHS_PER_SYNC_COMMITTEE: u64 = 256; // For mainnet -pub const MAX_CONCURRENT_JOBS_IN_PROGRESS: u64 = 7; // Define the limit of how many jobs can be in state "in progress" concurrently +pub const MAX_CONCURRENT_JOBS_IN_PROGRESS: u64 = 16; // Define the limit of how many jobs can be in state "in progress" concurrently pub const MAX_CONCURRENT_PIE_GENERATIONS: usize = 1; // Define how many concurrent trace (pie file) generation jobs are allowed to not exhaust resources pub const MAX_CONCURRENT_RPC_DATA_FETCH_JOBS: usize = 1; // Define how many data fetching jobs can be performed concurrently to not overload RPC pub const STARKNET_SEPOLIA: &str = "0x534e5f5345504f4c4941"; diff --git a/client-rs/src/daemon.rs b/client-rs/src/daemon.rs index a9739fc..30c00d5 100644 --- a/client-rs/src/daemon.rs +++ b/client-rs/src/daemon.rs @@ -846,10 +846,10 @@ async fn retry_failed_jobs( } } + let _ = db_clone.update_job_status(job_id, failed_at_step).await; if tx_clone.send(job_to_retry).await.is_err() { // return Err("Failed to send job".into()); // Update the status to status what was at the error occurene time - let _ = db_clone.update_job_status(job_id, failed_at_step).await; error!("Error retrying job: {}", job_id); } }); @@ -1231,7 +1231,7 @@ async fn process_job( JobType::EpochBatchUpdate => { match current_status { JobStatus::Created => { - info!("[BATCH EPOCH JOB] Preparing inputs for program..."); + info!("[BATCH EPOCH JOB] Preparing inputs for program for epochs from {} to {}...", job.batch_range_begin_epoch.unwrap(), job.batch_range_end_epoch.unwrap()); let circuit_inputs = EpochUpdateBatch::new_by_epoch_range( &bankai, db_manager.clone(), diff --git a/client-rs/src/utils/database_manager.rs b/client-rs/src/utils/database_manager.rs index 7a71c70..5d457a5 100644 --- a/client-rs/src/utils/database_manager.rs +++ b/client-rs/src/utils/database_manager.rs @@ -381,6 +381,11 @@ impl DatabaseManager { job_id: Uuid, new_status: JobStatus, ) -> Result<(), Box> { + info!( + "Job {} status changed to {}", + job_id, + new_status.to_string() + ); self.client .execute( "UPDATE jobs SET job_status = $1, updated_at = NOW() WHERE job_uuid = $2", diff --git a/client-rs/src/utils/starknet_client.rs b/client-rs/src/utils/starknet_client.rs index 8af150b..e307d8f 100644 --- a/client-rs/src/utils/starknet_client.rs +++ b/client-rs/src/utils/starknet_client.rs @@ -288,7 +288,7 @@ impl StarknetClient { let delay = Duration::from_secs(5); for _ in 0..max_retries { - let status = self.get_transaction_status(tx_hash).await?; + let status = self.get_transaction_status(tx_hash).await.unwrap(); info!("Starknet transaction status: {:?}", status); From 107ef4be41d56b9739d417a3b0991d536e19eff1 Mon Sep 17 00:00:00 2001 From: lakewik Date: Wed, 5 Feb 2025 22:06:41 +0100 Subject: [PATCH 46/66] Retrying from specific point works --- client-rs/src/daemon.rs | 72 ++++++++++++++++--------- client-rs/src/utils/database_manager.rs | 2 +- client-rs/src/utils/starknet_client.rs | 4 +- 3 files changed, 49 insertions(+), 29 deletions(-) diff --git a/client-rs/src/daemon.rs b/client-rs/src/daemon.rs index 30c00d5..80a828a 100644 --- a/client-rs/src/daemon.rs +++ b/client-rs/src/daemon.rs @@ -884,10 +884,28 @@ async fn broadcast_onchain_ready_jobs( ); // Submit to Starknet - let txhash = bankai + let send_result = bankai .starknet_client .submit_update(circuit_inputs.expected_circuit_outputs, &bankai.config) - .await?; + .await; + + let txhash = match send_result { + Ok(txhash) => { + info!("[EPOCH BATCH JOB] Transaction sent: {}", txhash); + txhash + } + Err(e) => { + error!("[EPOCH BATCH JOB] Transaction sending error: {:?}", e); + let _ = db_manager + .set_failure_info(job.job_uuid, JobStatus::ReadyToBroadcastOnchain) + .await?; + db_manager + .update_job_status(job.job_uuid, JobStatus::Error) + .await?; + + continue; + } + }; info!( "[EPOCH BATCH JOB] Successfully called batch epoch update onchain for job_uuid: {}, txhash: {}", @@ -900,28 +918,6 @@ async fn broadcast_onchain_ready_jobs( let _ = db_manager.set_job_txhash(job.job_uuid, txhash).await; - // match send_result { - // Ok(_) => { - // info!("[EPOCH BATCH JOB] Transaction sent"); - // db_manager - // .update_job_status(job.job_uuid, JobStatus::VerifyTransactionSend) - // .await?; - - // // Iterate over and insert epochs proofs to db - // for (index, epoch) in - // circuit_inputs.circuit_inputs.epochs.iter().enumerate() - // { - // println!("Epoch {}: {:?}", index, epoch.expected_circuit_outputs); - // } - // } - // Err(e) => { - // error!("[EPOCH BATCH JOB] Transaction sending error: {:?}", e); - // db_manager - // .update_job_status(job.job_uuid, JobStatus::Error) - // .await?; - // } - // } - let confirmation_result = bankai.starknet_client.wait_for_confirmation(txhash).await; @@ -941,6 +937,9 @@ async fn broadcast_onchain_ready_jobs( } Err(e) => { error!("[EPOCH BATCH JOB] Transaction failed or timed out: {:?}", e); + let _ = db_manager + .set_failure_info(job.job_uuid, JobStatus::ReadyToBroadcastOnchain) + .await?; db_manager .update_job_status(job.job_uuid, JobStatus::Error) .await?; @@ -974,13 +973,31 @@ async fn broadcast_onchain_ready_jobs( sync_commite_id ); - let txhash = bankai + let send_result = bankai .starknet_client .submit_update( sync_committee_update_inputs.expected_circuit_outputs, &bankai.config, ) - .await?; + .await; + + let txhash = match send_result { + Ok(txhash) => { + info!("[EPOCH BATCH JOB] Transaction sent: {}", txhash); + txhash + } + Err(e) => { + error!("[EPOCH BATCH JOB] Transaction sending error: {:?}", e); + let _ = db_manager + .set_failure_info(job.job_uuid, JobStatus::ReadyToBroadcastOnchain) + .await?; + db_manager + .update_job_status(job.job_uuid, JobStatus::Error) + .await?; + + continue; + } + }; info!("[SYNC COMMITTEE JOB] Successfully called sync committee ID {} update onchain, transaction confirmed, txhash: {}", sync_commite_id, txhash); @@ -1028,6 +1045,9 @@ async fn broadcast_onchain_ready_jobs( "[SYNC COMMITTEE JOB] Transaction failed or timed out: {:?}", e ); + let _ = db_manager + .set_failure_info(job.job_uuid, JobStatus::ReadyToBroadcastOnchain) + .await?; db_manager .update_job_status(job.job_uuid, JobStatus::Error) .await?; diff --git a/client-rs/src/utils/database_manager.rs b/client-rs/src/utils/database_manager.rs index 5d457a5..0873762 100644 --- a/client-rs/src/utils/database_manager.rs +++ b/client-rs/src/utils/database_manager.rs @@ -402,7 +402,7 @@ impl DatabaseManager { ) -> Result<(), Box> { self.client .execute( - "UPDATE jobs SET errored_at_step = $1, updated_at = NOW(), last_failure_time = NOW() WHERE job_uuid = $2", + "UPDATE jobs SET failed_at_step = $1, updated_at = NOW(), last_failure_time = NOW() WHERE job_uuid = $2", &[&failed_at_step.to_string(), &job_id], ) .await?; diff --git a/client-rs/src/utils/starknet_client.rs b/client-rs/src/utils/starknet_client.rs index e307d8f..3f946a0 100644 --- a/client-rs/src/utils/starknet_client.rs +++ b/client-rs/src/utils/starknet_client.rs @@ -177,10 +177,10 @@ impl StarknetClient { } Err(e) => { error!("Transaction execution error: {:#?}", e); - Err(StarknetError::TransactionError(format!( + return Err(StarknetError::TransactionError(format!( "TransactionExecutionError: {:#?}", e - ))) + ))); } } } From 0690eba26e38875267daf7b4d49c7101cac68d73 Mon Sep 17 00:00:00 2001 From: lakewik Date: Wed, 5 Feb 2025 22:10:18 +0100 Subject: [PATCH 47/66] Retryer fix --- client-rs/src/daemon.rs | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/client-rs/src/daemon.rs b/client-rs/src/daemon.rs index 80a828a..5ec3e6d 100644 --- a/client-rs/src/daemon.rs +++ b/client-rs/src/daemon.rs @@ -846,11 +846,19 @@ async fn retry_failed_jobs( } } - let _ = db_clone.update_job_status(job_id, failed_at_step).await; - if tx_clone.send(job_to_retry).await.is_err() { - // return Err("Failed to send job".into()); - // Update the status to status what was at the error occurene time - error!("Error retrying job: {}", job_id); + let _ = db_clone + .update_job_status(job_id, failed_at_step.clone()) + .await; + if failed_at_step != JobStatus::OffchainComputationFinished + && failed_at_step != JobStatus::ReadyToBroadcastOnchain + && failed_at_step != JobStatus::ProofVerifyCalledOnchain + // These jobs are done sequentially, not in parallel + { + if tx_clone.send(job_to_retry).await.is_err() { + // return Err("Failed to send job".into()); + // Update the status to status what was at the error occurene time + error!("Error retrying job: {}", job_id); + } } }); From b8a3a3b8d7f4707aeee89159267f21a04f7bb4ff Mon Sep 17 00:00:00 2001 From: lakewik Date: Wed, 5 Feb 2025 22:13:53 +0100 Subject: [PATCH 48/66] Fix SQL --- client-rs/db_structure.sql | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/client-rs/db_structure.sql b/client-rs/db_structure.sql index cba57bd..7fb8ac0 100644 --- a/client-rs/db_structure.sql +++ b/client-rs/db_structure.sql @@ -8,8 +8,8 @@ CREATE TABLE jobs ( batch_range_end_epoch BIGINT NULL, type TEXT NOT NULL, tx_hash TEXT NULL, - failed_at_step TEXT NOT NULL, - retries_count BIGINT NOT NULL, + failed_at_step TEXT NULL, + retries_count BIGINT NULL, last_failure_time TIMESTAMP NULL, updated_at TIMESTAMP DEFAULT NOW (), created_at TIMESTAMP DEFAULT NOW () From 2d1401e6a968d95fbeb90f9c41f559fc8e2b8880 Mon Sep 17 00:00:00 2001 From: lakewik Date: Wed, 5 Feb 2025 22:28:47 +0100 Subject: [PATCH 49/66] Change max skipped slots & fix issue with sync committee --- client-rs/src/bankai_client.rs | 8 +++++--- client-rs/src/constants.rs | 1 + client-rs/src/daemon.rs | 10 +++++++--- client-rs/src/epoch_update.rs | 10 ++++++---- client-rs/src/utils/database_manager.rs | 24 ++++++++++++++++++++++++ client-rs/src/utils/rpc.rs | 12 +++++++----- 6 files changed, 50 insertions(+), 15 deletions(-) diff --git a/client-rs/src/bankai_client.rs b/client-rs/src/bankai_client.rs index f114ea1..a00b67b 100644 --- a/client-rs/src/bankai_client.rs +++ b/client-rs/src/bankai_client.rs @@ -1,3 +1,4 @@ +use crate::constants; use crate::{ contract_init::ContractInitializationData, epoch_update::EpochUpdate, @@ -52,7 +53,6 @@ impl BankaiClient { mut slot: u64, ) -> Result { let mut attempts = 0; - const MAX_ATTEMPTS: u8 = 3; // Before we start generating the proof, we ensure the slot was not missed let _header = loop { @@ -60,13 +60,15 @@ impl BankaiClient { Ok(header) => break header, Err(Error::EmptySlotDetected(_)) => { attempts += 1; - if attempts >= MAX_ATTEMPTS { + if attempts >= constants::MAX_SKIPPED_SLOTS_RETRY_ATTEMPTS { return Err(Error::EmptySlotDetected(slot)); } slot += 1; info!( "Empty slot detected! Attempt {}/{}. Fetching slot: {}", - attempts, MAX_ATTEMPTS, slot + attempts, + constants::MAX_SKIPPED_SLOTS_RETRY_ATTEMPTS, + slot ); } Err(e) => return Err(e), // Propagate other errors immediately diff --git a/client-rs/src/constants.rs b/client-rs/src/constants.rs index 5f9528c..83e7fc3 100644 --- a/client-rs/src/constants.rs +++ b/client-rs/src/constants.rs @@ -13,3 +13,4 @@ pub const BEACON_CHAIN_LISTENER_ENABLED: bool = true; pub const JOBS_RETRY_ENABLED: bool = true; pub const JOBS_RESUME_ENABLED: bool = true; pub const RETRY_DELAY_MS: u64 = 300_0000; +pub const MAX_SKIPPED_SLOTS_RETRY_ATTEMPTS: u64 = 5; diff --git a/client-rs/src/daemon.rs b/client-rs/src/daemon.rs index 5ec3e6d..1b59c77 100644 --- a/client-rs/src/daemon.rs +++ b/client-rs/src/daemon.rs @@ -709,9 +709,13 @@ async fn evaluate_jobs_statuses( ); db_manager - .set_ready_to_broadcast_for_batch_epochs(first_epoch, last_epoch) // Set READY_TO_BROADCAST when OFFCHAIN_COMPUTATION_FINISHED + .set_ready_to_broadcast_for_batch_epochs_to(last_epoch) // Set READY_TO_BROADCAST when OFFCHAIN_COMPUTATION_FINISHED .await?; + // db_manager + // .set_ready_to_broadcast_for_batch_epochs(first_epoch, last_epoch) // Set READY_TO_BROADCAST when OFFCHAIN_COMPUTATION_FINISHED + // .await?; + db_manager .set_ready_to_broadcast_for_sync_committee(latest_verified_sync_committee_id + 1) .await?; @@ -991,11 +995,11 @@ async fn broadcast_onchain_ready_jobs( let txhash = match send_result { Ok(txhash) => { - info!("[EPOCH BATCH JOB] Transaction sent: {}", txhash); + info!("[SYNC COMMITTEE JOB] Transaction sent: {}", txhash); txhash } Err(e) => { - error!("[EPOCH BATCH JOB] Transaction sending error: {:?}", e); + error!("[SYNC COMMITTEE JOB] Transaction sending error: {:?}", e); let _ = db_manager .set_failure_info(job.job_uuid, JobStatus::ReadyToBroadcastOnchain) .await?; diff --git a/client-rs/src/epoch_update.rs b/client-rs/src/epoch_update.rs index bf95e39..a6d966b 100644 --- a/client-rs/src/epoch_update.rs +++ b/client-rs/src/epoch_update.rs @@ -1,5 +1,6 @@ use std::fs; +use crate::constants; use crate::{ execution_header::ExecutionHeaderProof, traits::{ProofType, Provable, Submittable}, @@ -77,7 +78,7 @@ impl Provable for EpochUpdate { fn proof_type(&self) -> ProofType { ProofType::Epoch } - + fn inputs_path(&self) -> String { format!( "batches/epoch/{}/input_{}.json", @@ -173,20 +174,21 @@ impl EpochCircuitInputs { mut slot: u64, ) -> Result { let mut attempts = 0; - const MAX_ATTEMPTS: u8 = 3; let header = loop { match client.get_header(slot).await { Ok(header) => break header, Err(Error::EmptySlotDetected(_)) => { attempts += 1; - if attempts >= MAX_ATTEMPTS { + if attempts >= constants::MAX_SKIPPED_SLOTS_RETRY_ATTEMPTS { return Err(Error::EmptySlotDetected(slot)); } slot += 1; info!( "Empty slot detected! Attempt {}/{}. Fetching slot: {}", - attempts, MAX_ATTEMPTS, slot + attempts, + constants::MAX_SKIPPED_SLOTS_RETRY_ATTEMPTS, + slot ); } Err(e) => return Err(e), // Propagate other errors immediately diff --git a/client-rs/src/utils/database_manager.rs b/client-rs/src/utils/database_manager.rs index 0873762..c594b06 100644 --- a/client-rs/src/utils/database_manager.rs +++ b/client-rs/src/utils/database_manager.rs @@ -451,6 +451,30 @@ impl DatabaseManager { Ok(()) } + pub async fn set_ready_to_broadcast_for_batch_epochs_to( + &self, + to_epoch: u64, + ) -> Result<(), Box> { + let rows_affected = self + .client + .execute( + "UPDATE jobs + SET job_status = 'READY_TO_BROADCAST_ONCHAIN', updated_at = NOW() + WHERE batch_range_end_epoch <= $2 AND type = 'EPOCH_BATCH_UPDATE' + AND job_status = 'OFFCHAIN_COMPUTATION_FINISHED'", + &[&to_epoch.to_i64()], + ) + .await?; + + if rows_affected > 0 { + info!( + "{} EPOCH_BATCH_UPDATE jobs changed state to READY_TO_BROADCAST_ONCHAIN", + rows_affected + ); + } + Ok(()) + } + pub async fn set_ready_to_broadcast_for_sync_committee( &self, sync_committee_id: u64, diff --git a/client-rs/src/utils/rpc.rs b/client-rs/src/utils/rpc.rs index eabff92..8a3347d 100644 --- a/client-rs/src/utils/rpc.rs +++ b/client-rs/src/utils/rpc.rs @@ -1,3 +1,4 @@ +use crate::constants; use crate::epoch_update::SyncCommitteeValidatorPubs; use crate::Error; use alloy_rpc_types_beacon::events::light_client_finality::SyncAggregate; @@ -81,7 +82,6 @@ impl BeaconRpcClient { slot += 1; // signature is in the next slot let mut attempts = 0; - const MAX_ATTEMPTS: u8 = 3; // Ensure the slot is not missed and increment in case it is let _header = loop { @@ -89,13 +89,15 @@ impl BeaconRpcClient { Ok(header) => break header, Err(Error::EmptySlotDetected(_)) => { attempts += 1; - if attempts >= MAX_ATTEMPTS { + if attempts >= constants::MAX_SKIPPED_SLOTS_RETRY_ATTEMPTS { return Err(Error::EmptySlotDetected(slot)); } slot += 1; warn!( "Empty slot detected! Attempt {}/{}. Fetching slot: {}", - attempts, MAX_ATTEMPTS, slot + attempts, + constants::MAX_SKIPPED_SLOTS_RETRY_ATTEMPTS, + slot ); } Err(e) => return Err(e), // Propagate other errors immediately @@ -104,7 +106,7 @@ impl BeaconRpcClient { let json = self .get_json(&format!("eth/v2/beacon/blocks/{}", slot)) - .await?; + .await?; serde_json::from_value(json["data"]["message"]["body"]["sync_aggregate"].clone()) .map_err(|e| Error::DeserializeError(e.to_string())) @@ -207,7 +209,7 @@ impl BeaconRpcClient { /// The current slot number of the beacon chain head. pub async fn get_head_slot(&self) -> Result { let json = self.get_json("eth/v1/beacon/headers/head").await?; - + let slot = json["data"]["header"]["message"]["slot"] .as_str() .ok_or(Error::DeserializeError("Missing slot field".into()))? From 101dce335f99cce3ac77365aad1d2a5fea42f566 Mon Sep 17 00:00:00 2001 From: lakewik Date: Wed, 5 Feb 2025 22:30:31 +0100 Subject: [PATCH 50/66] Hotfix --- client-rs/src/utils/database_manager.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client-rs/src/utils/database_manager.rs b/client-rs/src/utils/database_manager.rs index c594b06..fd41eaa 100644 --- a/client-rs/src/utils/database_manager.rs +++ b/client-rs/src/utils/database_manager.rs @@ -460,7 +460,7 @@ impl DatabaseManager { .execute( "UPDATE jobs SET job_status = 'READY_TO_BROADCAST_ONCHAIN', updated_at = NOW() - WHERE batch_range_end_epoch <= $2 AND type = 'EPOCH_BATCH_UPDATE' + WHERE batch_range_end_epoch <= $1 AND type = 'EPOCH_BATCH_UPDATE' AND job_status = 'OFFCHAIN_COMPUTATION_FINISHED'", &[&to_epoch.to_i64()], ) From 0fce7656d483f672682afa7606854321dd3992dc Mon Sep 17 00:00:00 2001 From: lakewik Date: Thu, 6 Feb 2025 10:14:47 +0100 Subject: [PATCH 51/66] Improvements dashboard #1 --- client-rs/src/constants.rs | 2 +- client-rs/src/daemon.rs | 62 ++++++++++++++++++------- client-rs/src/helpers.rs | 16 +++---- client-rs/src/main.rs | 40 ++++++++++++---- client-rs/src/routes/dashboard.rs | 53 +++++++++++++++++++-- client-rs/src/state.rs | 1 + client-rs/src/utils/cairo_runner.rs | 27 +++++++++-- client-rs/src/utils/database_manager.rs | 36 +++++++++++++- client-rs/src/utils/events.rs | 9 ++++ 9 files changed, 204 insertions(+), 42 deletions(-) create mode 100644 client-rs/src/utils/events.rs diff --git a/client-rs/src/constants.rs b/client-rs/src/constants.rs index 83e7fc3..74c9633 100644 --- a/client-rs/src/constants.rs +++ b/client-rs/src/constants.rs @@ -13,4 +13,4 @@ pub const BEACON_CHAIN_LISTENER_ENABLED: bool = true; pub const JOBS_RETRY_ENABLED: bool = true; pub const JOBS_RESUME_ENABLED: bool = true; pub const RETRY_DELAY_MS: u64 = 300_0000; -pub const MAX_SKIPPED_SLOTS_RETRY_ATTEMPTS: u64 = 5; +pub const MAX_SKIPPED_SLOTS_RETRY_ATTEMPTS: u64 = 5; // How many skipped slots in row can be on Beacon Chain before we throw error diff --git a/client-rs/src/daemon.rs b/client-rs/src/daemon.rs index 1b59c77..33327dc 100644 --- a/client-rs/src/daemon.rs +++ b/client-rs/src/daemon.rs @@ -440,22 +440,42 @@ async fn handle_beacon_chain_head_event( ); // Decide basing on actual state - if helpers::get_sync_committee_id_by_epoch(latest_scheduled_epoch + 1) - > latest_scheduled_sync_committee - { - // We reached end of current sync committee, need to schedule new sync committee proving - match run_sync_committee_update_job( - db_manager.clone(), - latest_scheduled_sync_committee + 1, - tx.clone(), - ) - .await - { - Ok(()) => {} - Err(e) => { - error!("Error while creating sync committee update job: {}", e); - } - }; + // if helpers::get_sync_committee_id_by_epoch(latest_scheduled_epoch + 1) + // > latest_scheduled_sync_committee + // { + // // We reached end of current sync committee, need to schedule new sync committee proving + // match run_sync_committee_update_job( + // db_manager.clone(), + // latest_scheduled_sync_committee + 1, + // tx.clone(), + // ) + // .await + // { + // Ok(()) => {} + // Err(e) => { + // error!("Error while creating sync committee update job: {}", e); + // } + // }; + // } + // + + let lowest_committee_update_slot = + (latest_verified_sync_committee_id) * constants::SLOTS_PER_SYNC_COMMITTEE; + if !(latest_verified_epoch_slot < lowest_committee_update_slot) { + if last_sync_committee_in_progress < (latest_scheduled_sync_committee + 1) { + match run_sync_committee_update_job( + db_manager.clone(), + latest_scheduled_sync_committee + 1, + tx.clone(), + ) + .await + { + Ok(()) => {} + Err(e) => { + error!("Error while creating sync committee update job: {}", e); + } + }; + } } let current_sync_committee_epochs_left = @@ -1132,6 +1152,8 @@ async fn process_job( CairoRunner::generate_pie( &sync_committe_update_program_inputs, &bankai.config, + Some(db_manager.clone()), + Some(job.job_id), ) .await?; @@ -1289,7 +1311,13 @@ async fn process_job( info!("[BATCH EPOCH JOB] Starting trace generation..."); - CairoRunner::generate_pie(&circuit_inputs, &bankai.config).await?; + CairoRunner::generate_pie( + &circuit_inputs, + &bankai.config, + Some(db_manager.clone()), + Some(job.job_id), + ) + .await?; db_manager .update_job_status(job.job_id, JobStatus::PieGenerated) diff --git a/client-rs/src/helpers.rs b/client-rs/src/helpers.rs index 55a1b02..986c56a 100644 --- a/client-rs/src/helpers.rs +++ b/client-rs/src/helpers.rs @@ -57,8 +57,8 @@ pub fn get_last_epoch_for_sync_committee(sync_committee_id: u64) -> u64 { (sync_committee_id + 1) * EPOCHS_PER_SYNC_COMMITTEE - 1 } -pub fn get_first_slot_for_epoch(slot: u64) -> u64 { - slot * SLOTS_PER_EPOCH +pub fn get_first_slot_for_epoch(epoch: u64) -> u64 { + epoch * SLOTS_PER_EPOCH } pub fn get_last_slot_for_epoch(epoch: u64) -> u64 { @@ -69,16 +69,16 @@ pub fn get_sync_committee_id_by_epoch(epoch: u64) -> u64 { epoch / EPOCHS_PER_SYNC_COMMITTEE } -pub fn get_sync_committee_id_by_slot(epoch: u64) -> u64 { - epoch / SLOTS_PER_SYNC_COMMITTEE +pub fn get_sync_committee_id_by_slot(slot: u64) -> u64 { + slot / SLOTS_PER_SYNC_COMMITTEE } -pub fn get_first_slot_for_sync_committee(slot: u64) -> u64 { - slot * SLOTS_PER_SYNC_COMMITTEE +pub fn get_first_slot_for_sync_committee(sync_committee: u64) -> u64 { + sync_committee * SLOTS_PER_SYNC_COMMITTEE } -pub fn get_last_slot_for_sync_committee(slot: u64) -> u64 { - (slot + 1) * SLOTS_PER_SYNC_COMMITTEE - 1 +pub fn get_last_slot_for_sync_committee(sync_committee: u64) -> u64 { + (sync_committee + 1) * SLOTS_PER_SYNC_COMMITTEE - 1 } // Since beacon chain RPCs have different response structure (quicknode responds different than nidereal) we use this event extraction logic diff --git a/client-rs/src/main.rs b/client-rs/src/main.rs index f71eaa6..395bfa5 100644 --- a/client-rs/src/main.rs +++ b/client-rs/src/main.rs @@ -1,3 +1,5 @@ +#![allow(dead_code)] +#![allow(unused_imports)] mod bankai_client; mod config; mod constants; @@ -150,6 +152,10 @@ enum Commands { ProveNextCommittee, ProveNextEpoch, ProveNextEpochBatch, + ProveCommitteeAtSlot { + #[arg(long, short)] + slot: u64, + }, CheckBatchStatus { #[arg(long, short)] batch_id: String, @@ -291,19 +297,19 @@ async fn main() -> Result<(), Error> { .await?; let lowest_committee_update_slot = (latest_committee_id) * Felt::from(0x2000); println!("Min Slot Required: {}", lowest_committee_update_slot); - let latest_epoch = bankai + let latest_epoch_slot = bankai .starknet_client .get_latest_epoch_slot(&bankai.config) .await?; - println!("Latest epoch: {}", latest_epoch); - if latest_epoch < lowest_committee_update_slot { - return Err(Error::RequiresNewerEpoch(latest_epoch)); + println!("Latest epoch slot: {}", latest_epoch_slot); + if latest_epoch_slot < lowest_committee_update_slot { + return Err(Error::RequiresNewerEpoch(latest_epoch_slot)); } let update = bankai - .get_sync_committee_update(latest_epoch.try_into().unwrap()) + .get_sync_committee_update(latest_epoch_slot.try_into().unwrap()) .await?; let _ = update.export()?; - CairoRunner::generate_pie(&update, &bankai.config).await?; + CairoRunner::generate_pie(&update, &bankai.config, None, None).await?; let batch_id = bankai.atlantic_client.submit_batch(update).await?; println!("Batch Submitted: {}", batch_id); } @@ -319,7 +325,7 @@ async fn main() -> Result<(), Error> { // let proof = bankai.get_epoch_proof(next_epoch).await?; let epoch_update = EpochUpdate::new(&bankai.client, next_epoch).await?; let _ = epoch_update.export()?; - CairoRunner::generate_pie(&epoch_update, &bankai.config).await?; + CairoRunner::generate_pie(&epoch_update, &bankai.config, None, None).await?; let batch_id = bankai.atlantic_client.submit_batch(epoch_update).await?; println!("Batch Submitted: {}", batch_id); } @@ -327,10 +333,28 @@ async fn main() -> Result<(), Error> { let epoch_update = EpochUpdateBatch::new(&bankai).await?; println!("Update contents: {:?}", epoch_update); let _ = epoch_update.export()?; - CairoRunner::generate_pie(&epoch_update, &bankai.config).await?; + CairoRunner::generate_pie(&epoch_update, &bankai.config, None, None).await?; let batch_id = bankai.atlantic_client.submit_batch(epoch_update).await?; println!("Batch Submitted: {}", batch_id); } + Commands::ProveCommitteeAtSlot { slot } => { + let latest_committee_id = bankai + .starknet_client + .get_latest_committee_id(&bankai.config) + .await?; + let lowest_committee_update_slot = (latest_committee_id) * Felt::from(0x2000); + println!("Min Slot Required: {}", lowest_committee_update_slot); + // if slot < lowest_committee_update_slot { + // return Err(Error::RequiresNewerEpoch(slot)); + // } + let update = bankai + .get_sync_committee_update(slot.try_into().unwrap()) + .await?; + let _ = update.export()?; + CairoRunner::generate_pie(&update, &bankai.config, None, None).await?; + let batch_id = bankai.atlantic_client.submit_batch(update).await?; + println!("Batch Submitted: {}", batch_id); + } Commands::VerifyEpoch { batch_id, slot } => { let status = bankai .atlantic_client diff --git a/client-rs/src/routes/dashboard.rs b/client-rs/src/routes/dashboard.rs index a35234d..4212370 100644 --- a/client-rs/src/routes/dashboard.rs +++ b/client-rs/src/routes/dashboard.rs @@ -1,6 +1,9 @@ -use crate::state::{AppState, JobStatus}; +use crate::{ + helpers, + state::{AppState, JobStatus}, +}; use axum::extract::State; -use num_traits::SaturatingSub; +use num_traits::{SaturatingSub, ToPrimitive}; pub async fn handle_get_dashboard(State(state): State) -> String { let db = state.db_manager.clone(); @@ -70,6 +73,43 @@ pub async fn handle_get_dashboard(State(state): State) -> String { batch_info }; + // Fetch last 20 batch jobs + let recent_sync_committee_jobs = db + .get_recent_sync_committee_jobs(20) + .await + .unwrap_or_default(); + + // Format batch information + let sync_committee_info = recent_sync_committee_jobs + .iter() + .map(|entry| { + format!( + "║ Batch {:}: {} {} [{}] {:<45} ║", + entry.job.job_uuid.to_string()[..8].to_string(), + entry.job.slot, + helpers::get_sync_committee_id_by_slot(entry.job.slot.to_u64().unwrap()), + match entry.job.job_status { + JobStatus::Done => "✓", + JobStatus::Error => "✗", + _ => "⋯", + }, + entry.job.job_status.to_string(), + // entry.tx_hash.as_ref().map_or( + // "-".to_string(), + // |hash| format!("0x{:x}", hash) + // ) + ) + }) + .collect::>() + .join("\n"); + + let sync_committee_jobs_display = if recent_batches.is_empty() { + " ║ No recent sync committee jobs found ║ " + .to_string() + } else { + sync_committee_info + }; + // Update system health indicators with simpler checks let daemon_status = "● Active"; let db_status = if db.is_connected().await { @@ -97,6 +137,7 @@ pub async fn handle_get_dashboard(State(state): State) -> String { db_status, beacon_status, &batch_display, + &sync_committee_jobs_display, ) } @@ -111,6 +152,7 @@ pub fn create_ascii_dashboard( db_status: &str, beacon_status: &str, batch_display: &str, + sync_committee_jobs_display: &str, ) -> String { format!( r#" @@ -139,6 +181,10 @@ pub fn create_ascii_dashboard( ║ UUID: FROM: TO: STATUS: TX: ║ ║ ─────────────────────────────────────────────────────────────────────────────────────────────── ║ {batch_display_block} +╠══════════════════════════════ RECENT SYNC COMMITTEE JOBS ════════════════════════════════════╣ +║ UUID: SLOT: COMMITTEE: STATUS: TX: ║ +║ ─────────────────────────────────────────────────────────────────────────────────────────────── ║ +{sync_committee_jobs_display_block} ╚═════════════════════════════════════════════════════════════════════════════════════════════════╝ "#, daemon_status = daemon_status, @@ -150,6 +196,7 @@ pub fn create_ascii_dashboard( latest_beacon_slot = latest_beacon_slot, latest_verified_slot = latest_verified_slot, epoch_gap = epoch_gap, - batch_display_block = batch_display + batch_display_block = batch_display, + sync_committee_jobs_display_block = sync_committee_jobs_display ) } diff --git a/client-rs/src/state.rs b/client-rs/src/state.rs index aac2337..e264f54 100644 --- a/client-rs/src/state.rs +++ b/client-rs/src/state.rs @@ -88,6 +88,7 @@ impl FromStr for JobStatus { match s { "CREATED" => Ok(JobStatus::Created), "PROGRAM_INPUTS_PREPARED" => Ok(JobStatus::ProgramInputsPrepared), + "STARTED_TRACE_GENERATION" => Ok(JobStatus::StartedTraceGeneration), "PIE_GENERATED" => Ok(JobStatus::PieGenerated), "OFFCHAIN_PROOF_REQUESTED" => Ok(JobStatus::AtlanticProofRequested), "OFFCHAIN_PROOF_RETRIEVED" => Ok(JobStatus::AtlanticProofRetrieved), diff --git a/client-rs/src/utils/cairo_runner.rs b/client-rs/src/utils/cairo_runner.rs index 49b9fe4..a88960d 100644 --- a/client-rs/src/utils/cairo_runner.rs +++ b/client-rs/src/utils/cairo_runner.rs @@ -1,14 +1,25 @@ +use std::sync::Arc; + +use crate::state::JobStatus; use crate::traits::ProofType; use crate::BankaiConfig; use crate::{traits::Provable, Error}; use tokio::task; use tokio::task::JoinError; -use tracing::{debug, info}; +use tracing::info; +use uuid::Uuid; + +use super::database_manager::DatabaseManager; pub struct CairoRunner(); impl CairoRunner { - pub async fn generate_pie(input: &impl Provable, config: &BankaiConfig) -> Result<(), Error> { + pub async fn generate_pie( + input: &impl Provable, + config: &BankaiConfig, + db_manager: Option>, + job_id: Option, + ) -> Result<(), Error> { // Acquire a permit from the semaphore. // If all permits are in use we will wait until one is available. let _permit = config @@ -18,8 +29,16 @@ impl CairoRunner { .await .map_err(|e| Error::CairoRunError(format!("Semaphore error: {}", e)))?; - let input_path = input.inputs_path(); - info!("Cairo Input path: {}", input_path); + match db_manager { + None => {} + Some(db) => { + let _ = db + .update_job_status(job_id.unwrap(), JobStatus::StartedTraceGeneration) + .await; + } + } + + let input_path = input.export()?; let program_path = match input.proof_type() { ProofType::Epoch => config.epoch_circuit_path.clone(), diff --git a/client-rs/src/utils/database_manager.rs b/client-rs/src/utils/database_manager.rs index fd41eaa..d813df6 100644 --- a/client-rs/src/utils/database_manager.rs +++ b/client-rs/src/utils/database_manager.rs @@ -525,7 +525,7 @@ impl DatabaseManager { self.client .execute( "UPDATE jobs SET tx_hash = $1, updated_at = NOW() WHERE job_uuid = $2", - &[&txhash.to_string(), &job_id], + &[&txhash.to_hex_string(), &job_id], ) .await?; Ok(()) @@ -753,6 +753,40 @@ impl DatabaseManager { Ok(jobs) } + pub async fn get_recent_sync_committee_jobs( + &self, + limit: i64, + ) -> Result, Box> { + let rows = self + .client + .query( + "SELECT *, + to_char(created_at, 'HH24:MI:SS') as created_time, + to_char(updated_at, 'HH24:MI:SS') as updated_time + FROM jobs + WHERE type = 'SYNC_COMMITTEE_UPDATE' + ORDER BY slot DESC + LIMIT $1", + &[&limit], + ) + .await?; + + let jobs = rows + .into_iter() + .map(|row| { + let job = Self::map_row_to_job(row.clone()).unwrap(); + JobWithTimestamps { + job, + created_at: row.get("created_time"), + updated_at: row.get("updated_time"), + tx_hash: row.get("tx_hash"), + } + }) + .collect(); + + Ok(jobs) + } + pub async fn is_connected(&self) -> bool { match self.client.query_one("SELECT 1", &[]).await { Ok(_) => true, diff --git a/client-rs/src/utils/events.rs b/client-rs/src/utils/events.rs new file mode 100644 index 0000000..3f06f57 --- /dev/null +++ b/client-rs/src/utils/events.rs @@ -0,0 +1,9 @@ +use tokio::sync::broadcast; + +lazy_static::lazy_static! { + static ref SEMAPHORE_EVENT: broadcast::Sender = broadcast::channel(10).0; +} + +pub fn subscribe_to_semaphore_events() -> broadcast::Receiver { + SEMAPHORE_EVENT.subscribe() +} From 313fa2644ce223fe05239d3a3054de24ee092f94 Mon Sep 17 00:00:00 2001 From: lakewik Date: Thu, 6 Feb 2025 10:32:17 +0100 Subject: [PATCH 52/66] Hotfix --- client-rs/src/daemon.rs | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/client-rs/src/daemon.rs b/client-rs/src/daemon.rs index 33327dc..9a712da 100644 --- a/client-rs/src/daemon.rs +++ b/client-rs/src/daemon.rs @@ -459,24 +459,24 @@ async fn handle_beacon_chain_head_event( // } // - let lowest_committee_update_slot = - (latest_verified_sync_committee_id) * constants::SLOTS_PER_SYNC_COMMITTEE; - if !(latest_verified_epoch_slot < lowest_committee_update_slot) { - if last_sync_committee_in_progress < (latest_scheduled_sync_committee + 1) { - match run_sync_committee_update_job( - db_manager.clone(), - latest_scheduled_sync_committee + 1, - tx.clone(), - ) - .await - { - Ok(()) => {} - Err(e) => { - error!("Error while creating sync committee update job: {}", e); - } - }; - } - } + // let lowest_committee_update_slot = + // (latest_verified_sync_committee_id) * constants::SLOTS_PER_SYNC_COMMITTEE; + // if !(latest_verified_epoch_slot < lowest_committee_update_slot) { + // if last_sync_committee_in_progress < (latest_scheduled_sync_committee + 1) { + // match run_sync_committee_update_job( + // db_manager.clone(), + // latest_scheduled_sync_committee + 1, + // tx.clone(), + // ) + // .await + // { + // Ok(()) => {} + // Err(e) => { + // error!("Error while creating sync committee update job: {}", e); + // } + // }; + // } + // } let current_sync_committee_epochs_left = helpers::get_last_epoch_for_sync_committee(current_sync_committee_id) - current_epoch_id; From d013b3ea71dee25876c4e08b8a4b2a28a06ddc14 Mon Sep 17 00:00:00 2001 From: lakewik Date: Thu, 6 Feb 2025 14:53:38 +0100 Subject: [PATCH 53/66] Hotfix 2 --- client-rs/src/daemon.rs | 48 +++++++++++++--------- client-rs/src/utils/database_manager.rs | 54 +++++++++++++++++++++++++ 2 files changed, 84 insertions(+), 18 deletions(-) diff --git a/client-rs/src/daemon.rs b/client-rs/src/daemon.rs index 9a712da..6adfa22 100644 --- a/client-rs/src/daemon.rs +++ b/client-rs/src/daemon.rs @@ -401,18 +401,26 @@ async fn handle_beacon_chain_head_event( .unwrap() .unwrap(); + let last_done_epoch = db_manager.get_latest_done_epoch().await.unwrap().unwrap(); + let last_sync_committee_in_progress = db_manager .get_latest_sync_committee_in_progress() .await .unwrap() .unwrap(); + let last_done_sync_committee = db_manager + .get_latest_done_sync_committee() + .await + .unwrap() + .unwrap(); + let mut latest_scheduled_epoch = last_epoch_in_progress; let mut latest_scheduled_sync_committee = last_sync_committee_in_progress; if latest_verified_epoch_id > last_epoch_in_progress { if last_epoch_in_progress == 0 { - info!("Starting daemon on clean epochs jobs table"); + //info!("Starting daemon on clean epochs jobs table"); } else { warn!( "Something may be wrong, last verified epoch is greather than last epoch in progress" @@ -424,7 +432,7 @@ async fn handle_beacon_chain_head_event( if latest_verified_sync_committee_id > last_sync_committee_in_progress { if last_sync_committee_in_progress == 0 { - info!("Starting daemon on clean sync committees jobs table"); + //info!("Starting daemon on clean sync committees jobs table"); } else { warn!( "Something may be wrong, last verified sync committee is greather than last sync committee in progress" @@ -550,26 +558,30 @@ async fn handle_beacon_chain_head_event( // helpers::get_last_epoch_for_sync_committee(currently_processed_sync_committee_id) // - latest_scheduled_epoch // ); - match run_batch_epoch_update_job( - db_manager.clone(), - get_first_slot_for_epoch(epoch_to_start_from) - + (constants::SLOTS_PER_EPOCH * constants::TARGET_BATCH_SIZE), - epoch_to_start_from, - epoch_to_end_on, - tx.clone(), - ) - .await - { - Ok(()) => {} - Err(e) => { - error!("Error while creating job: {}", e); - } - }; + // + // Mitigate the issue when Starknet Sequencer RPC responds about last verified slot with delay + if last_done_epoch < latest_verified_epoch_id { + match run_batch_epoch_update_job( + db_manager.clone(), + get_first_slot_for_epoch(epoch_to_start_from) + + (constants::SLOTS_PER_EPOCH * constants::TARGET_BATCH_SIZE), + epoch_to_start_from, + epoch_to_end_on, + tx.clone(), + ) + .await + { + Ok(()) => {} + Err(e) => { + error!("Error while creating job: {}", e); + } + }; + } } else { debug!("All reqired jobs are now queued and processing"); } } else if epochs_behind == constants::TARGET_BATCH_SIZE { - if last_epoch_in_progress < (epochs_behind + current_epoch_id) { + if last_epoch_in_progress < current_epoch_id { // This is when we are synced properly and new epoch batch needs to be inserted info!( "Target batch size reached. Starting processing next epoch batch. Current Beacon Chain epoch: {} Latest verified epoch: {}", diff --git a/client-rs/src/utils/database_manager.rs b/client-rs/src/utils/database_manager.rs index d813df6..98faefa 100644 --- a/client-rs/src/utils/database_manager.rs +++ b/client-rs/src/utils/database_manager.rs @@ -251,6 +251,34 @@ impl DatabaseManager { } } + pub async fn get_latest_done_epoch( + &self, + ) -> Result, Box> { + // Query the latest slot with job_status in ('in_progress', 'initialized') + // //, 'CANCELLED', 'ERROR' + let row_opt = self + .client + .query_opt( + "SELECT batch_range_end_epoch FROM jobs + WHERE job_status = 'DONE' + AND batch_range_end_epoch != 0 + AND type = 'EPOCH_BATCH_UPDATE' + ORDER BY batch_range_end_epoch DESC + LIMIT 1", + &[], + ) + .await?; + + // Extract and return the slot ID + if let Some(row) = row_opt { + Ok(Some( + row.get::<_, i64>("batch_range_end_epoch").to_u64().unwrap(), + )) + } else { + Ok(Some(0)) + } + } + pub async fn get_latest_sync_committee_in_progress( &self, ) -> Result, Box> { @@ -277,6 +305,32 @@ impl DatabaseManager { } } + pub async fn get_latest_done_sync_committee( + &self, + ) -> Result, Box> { + // Query the latest slot with job_status in ('in_progress', 'initialized') + let row_opt = self + .client + .query_opt( + "SELECT slot FROM jobs + WHERE job_status = 'DONE' + AND type = 'SYNC_COMMITTEE_UPDATE' + ORDER BY slot DESC + LIMIT 1", + &[], + ) + .await?; + + // Extract and return the slot ID + if let Some(row) = row_opt { + Ok(Some(helpers::slot_to_sync_committee_id( + row.get::<_, i64>("slot").to_u64().unwrap(), + ))) + } else { + Ok(Some(0)) + } + } + pub async fn count_jobs_in_progress( &self, ) -> Result, Box> { From 5b3a8af6e155fa9271d8a9ed23256e9ce1ead359 Mon Sep 17 00:00:00 2001 From: lakewik Date: Thu, 6 Feb 2025 15:47:52 +0100 Subject: [PATCH 54/66] Add txhashes to dashboard --- client-rs/src/routes/dashboard.rs | 66 +++++++++++++++---------------- 1 file changed, 33 insertions(+), 33 deletions(-) diff --git a/client-rs/src/routes/dashboard.rs b/client-rs/src/routes/dashboard.rs index 4212370..b234417 100644 --- a/client-rs/src/routes/dashboard.rs +++ b/client-rs/src/routes/dashboard.rs @@ -47,7 +47,7 @@ pub async fn handle_get_dashboard(State(state): State) -> String { .iter() .map(|entry| { format!( - "║ Batch {:}: {} -> {} [{}] {:<45} ║", + "║ Batch {:}: {} -> {} [{}] {:<32} {:<66} ║", entry.job.job_uuid.to_string()[..8].to_string(), entry.job.batch_range_begin_epoch, entry.job.batch_range_end_epoch, @@ -57,10 +57,10 @@ pub async fn handle_get_dashboard(State(state): State) -> String { _ => "⋯", }, entry.job.job_status.to_string(), - // entry.tx_hash.as_ref().map_or( - // "-".to_string(), - // |hash| format!("0x{:x}", hash) - // ) + entry + .tx_hash + .as_ref() + .map_or("-".to_string(), |s| s.clone()), ) }) .collect::>() @@ -84,7 +84,7 @@ pub async fn handle_get_dashboard(State(state): State) -> String { .iter() .map(|entry| { format!( - "║ Batch {:}: {} {} [{}] {:<45} ║", + "║ Batch {:}: {} {} [{}] {:<32} {:<66} ║", entry.job.job_uuid.to_string()[..8].to_string(), entry.job.slot, helpers::get_sync_committee_id_by_slot(entry.job.slot.to_u64().unwrap()), @@ -94,17 +94,17 @@ pub async fn handle_get_dashboard(State(state): State) -> String { _ => "⋯", }, entry.job.job_status.to_string(), - // entry.tx_hash.as_ref().map_or( - // "-".to_string(), - // |hash| format!("0x{:x}", hash) - // ) + entry + .tx_hash + .as_ref() + .map_or("-".to_string(), |s| s.clone()), ) }) .collect::>() .join("\n"); let sync_committee_jobs_display = if recent_batches.is_empty() { - " ║ No recent sync committee jobs found ║ " + " ║ No recent sync committee jobs found ║ " .to_string() } else { sync_committee_info @@ -162,30 +162,30 @@ pub fn create_ascii_dashboard( | |_) / ___ \| |\ | . \ / ___ \ | | |____/_/ \_\_| \_|_|\_/_/ \_\___| -╔════════════════════════════════════════ DASHBOARD OVERVIEW ═════════════════════════════════════╗ -║ ║ -║ Statuses: ║ -║ • Daemon: {daemon_status:<12} • Database: {db_status:<12} • Beacon: {beacon_status:<12} ║ -║ ║ -║ Metrics: ║ -║ • Success Rate: {success_rate:<10} ║ -║ • Average Duration: {avg_duration:<10} ║ -║ • Jobs in Progress: {jobs_in_progress:<10} ║ -║ ║ -║ Beacon Info: ║ -║ • Latest Beacon Slot: {latest_beacon_slot:<12} ║ -║ • Latest Verified Slot: {latest_verified_slot:<12} ║ -║ • Epoch Gap: {epoch_gap:<12} ║ -║ ║ -╠═══════════════════════════════════════ RECENT BATCH JOBS ═══════════════════════════════════════╣ -║ UUID: FROM: TO: STATUS: TX: ║ -║ ─────────────────────────────────────────────────────────────────────────────────────────────── ║ +╔════════════════════════════════════════ DASHBOARD OVERVIEW ════════════════════════════════════════════════════════════════════════════════╗ +║ ║ +║ Statuses: ║ +║ • Daemon: {daemon_status:<12} • Database: {db_status:<12} • Beacon: {beacon_status:<12} ║ +║ ║ +║ Metrics: ║ +║ • Success Rate: {success_rate:<10} ║ +║ • Average Duration: {avg_duration:<10} ║ +║ • Jobs in Progress: {jobs_in_progress:<10} ║ +║ ║ +║ Beacon Info: ║ +║ • Latest Beacon Slot: {latest_beacon_slot:<12} ║ +║ • Latest Verified Slot: {latest_verified_slot:<12} ║ +║ • Epoch Gap: {epoch_gap:<12} ║ +║ ║ +╠═══════════════════════════════════════ RECENT BATCH JOBS ══════════════════════════════════════════════════════════════════════════════════╣ +║ UUID: FROM: TO: STATUS: TX: ║ +║ ────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────── ║ {batch_display_block} -╠══════════════════════════════ RECENT SYNC COMMITTEE JOBS ════════════════════════════════════╣ -║ UUID: SLOT: COMMITTEE: STATUS: TX: ║ -║ ─────────────────────────────────────────────────────────────────────────────────────────────── ║ +╠══════════════════════════════ RECENT SYNC COMMITTEE JOBS ═══════════════════════════════════════════════════════════════════════════════╣ +║ UUID: SLOT: COMMITTEE: STATUS: TX: ║ +║ ────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────── ║ {sync_committee_jobs_display_block} -╚═════════════════════════════════════════════════════════════════════════════════════════════════╝ +╚════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════╝ "#, daemon_status = daemon_status, db_status = db_status, From 0f7c5a66003edd421094b93805d7e00e9eab26a9 Mon Sep 17 00:00:00 2001 From: lakewik Date: Thu, 6 Feb 2025 15:58:59 +0100 Subject: [PATCH 55/66] Display fix --- client-rs/src/routes/dashboard.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/client-rs/src/routes/dashboard.rs b/client-rs/src/routes/dashboard.rs index b234417..f226212 100644 --- a/client-rs/src/routes/dashboard.rs +++ b/client-rs/src/routes/dashboard.rs @@ -178,11 +178,11 @@ pub fn create_ascii_dashboard( ║ • Epoch Gap: {epoch_gap:<12} ║ ║ ║ ╠═══════════════════════════════════════ RECENT BATCH JOBS ══════════════════════════════════════════════════════════════════════════════════╣ -║ UUID: FROM: TO: STATUS: TX: ║ +║ UUID: FROM: TO: STATUS: TX: ║ ║ ────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────── ║ {batch_display_block} ╠══════════════════════════════ RECENT SYNC COMMITTEE JOBS ═══════════════════════════════════════════════════════════════════════════════╣ -║ UUID: SLOT: COMMITTEE: STATUS: TX: ║ +║ UUID: SLOT: COMMITTEE: STATUS: TX: ║ ║ ────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────── ║ {sync_committee_jobs_display_block} ╚════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════╝ From a87ba9cb63d95ea9c48332cd90217f8bae34bd10 Mon Sep 17 00:00:00 2001 From: lakewik Date: Thu, 6 Feb 2025 15:59:47 +0100 Subject: [PATCH 56/66] Display fix 2 --- client-rs/src/routes/dashboard.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/client-rs/src/routes/dashboard.rs b/client-rs/src/routes/dashboard.rs index f226212..692e7eb 100644 --- a/client-rs/src/routes/dashboard.rs +++ b/client-rs/src/routes/dashboard.rs @@ -178,11 +178,11 @@ pub fn create_ascii_dashboard( ║ • Epoch Gap: {epoch_gap:<12} ║ ║ ║ ╠═══════════════════════════════════════ RECENT BATCH JOBS ══════════════════════════════════════════════════════════════════════════════════╣ -║ UUID: FROM: TO: STATUS: TX: ║ +║ UUID: FROM: TO: STATUS: TX: ║ ║ ────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────── ║ {batch_display_block} ╠══════════════════════════════ RECENT SYNC COMMITTEE JOBS ═══════════════════════════════════════════════════════════════════════════════╣ -║ UUID: SLOT: COMMITTEE: STATUS: TX: ║ +║ UUID: SLOT: COMMITTEE: STATUS: TX: ║ ║ ────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────── ║ {sync_committee_jobs_display_block} ╚════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════╝ From bc8f6d86d18be559f4681c0918a2c2131696ab13 Mon Sep 17 00:00:00 2001 From: lakewik Date: Fri, 7 Feb 2025 00:53:57 +0100 Subject: [PATCH 57/66] Fixes in committee update --- client-rs/src/daemon.rs | 102 ++++++++++++++++++++++------------------ 1 file changed, 56 insertions(+), 46 deletions(-) diff --git a/client-rs/src/daemon.rs b/client-rs/src/daemon.rs index 6adfa22..be52f82 100644 --- a/client-rs/src/daemon.rs +++ b/client-rs/src/daemon.rs @@ -208,16 +208,16 @@ async fn main() -> Result<(), Box> { // After RPC init, we do some startup checks before start listening to beacon chain: // - // Retry any failed jobs before processing new ones - if constants::JOBS_RETRY_ENABLED { - retry_failed_jobs(db_manager_for_listener.clone(), tx_for_listener.clone()).await?; - } - // 🔄 Resume any unfinished jobs before processing new ones if constants::JOBS_RESUME_ENABLED { resume_unfinished_jobs(db_manager_for_listener.clone(), tx_for_listener.clone()).await?; } + // Retry any failed jobs before processing new ones + if constants::JOBS_RETRY_ENABLED { + retry_failed_jobs(db_manager_for_listener.clone(), tx_for_listener.clone()).await?; + } + //enqueue_sync_committee_jobs(); //enqueue_batch_epochs_jobs(); // @@ -375,14 +375,21 @@ async fn handle_beacon_chain_head_event( .to_u64() .unwrap(); + let lowest_committee_update_slot = + (latest_verified_sync_committee_id) * constants::SLOTS_PER_SYNC_COMMITTEE; + let latest_verified_epoch_id = helpers::slot_to_epoch_id(latest_verified_epoch_slot); let epochs_behind = current_epoch_id - latest_verified_epoch_id; - let _ = evaluate_jobs_statuses(db_manager.clone(), latest_verified_sync_committee_id) - .await - .map_err(|e| { - error!("Error evaluating jobs statuses: {}", e); - }); + let _ = evaluate_jobs_statuses( + db_manager.clone(), + latest_verified_sync_committee_id, + latest_verified_epoch_slot, + ) + .await + .map_err(|e| { + error!("Error evaluating jobs statuses: {}", e); + }); let _ = broadcast_onchain_ready_jobs(db_manager.clone(), bankai.clone()) .await .map_err(|e| { @@ -467,24 +474,26 @@ async fn handle_beacon_chain_head_event( // } // - // let lowest_committee_update_slot = - // (latest_verified_sync_committee_id) * constants::SLOTS_PER_SYNC_COMMITTEE; - // if !(latest_verified_epoch_slot < lowest_committee_update_slot) { - // if last_sync_committee_in_progress < (latest_scheduled_sync_committee + 1) { - // match run_sync_committee_update_job( - // db_manager.clone(), - // latest_scheduled_sync_committee + 1, - // tx.clone(), - // ) - // .await - // { - // Ok(()) => {} - // Err(e) => { - // error!("Error while creating sync committee update job: {}", e); - // } - // }; - // } - // } + if !(latest_verified_epoch_slot < lowest_committee_update_slot) { + info!( + "Lowest committee update slot: {}", + lowest_committee_update_slot + ); + if last_sync_committee_in_progress < latest_scheduled_sync_committee { + match run_sync_committee_update_job( + db_manager.clone(), + latest_scheduled_sync_committee, + tx.clone(), + ) + .await + { + Ok(()) => {} + Err(e) => { + error!("Error while creating sync committee update job: {}", e); + } + }; + } + } let current_sync_committee_epochs_left = helpers::get_last_epoch_for_sync_committee(current_sync_committee_id) - current_epoch_id; @@ -560,23 +569,23 @@ async fn handle_beacon_chain_head_event( // ); // // Mitigate the issue when Starknet Sequencer RPC responds about last verified slot with delay - if last_done_epoch < latest_verified_epoch_id { - match run_batch_epoch_update_job( - db_manager.clone(), - get_first_slot_for_epoch(epoch_to_start_from) - + (constants::SLOTS_PER_EPOCH * constants::TARGET_BATCH_SIZE), - epoch_to_start_from, - epoch_to_end_on, - tx.clone(), - ) - .await - { - Ok(()) => {} - Err(e) => { - error!("Error while creating job: {}", e); - } - }; - } + // if last_done_epoch < latest_verified_epoch_id { + // match run_batch_epoch_update_job( + // db_manager.clone(), + // get_first_slot_for_epoch(epoch_to_start_from) + // + (constants::SLOTS_PER_EPOCH * constants::TARGET_BATCH_SIZE), + // epoch_to_start_from, + // epoch_to_end_on, + // tx.clone(), + // ) + // .await + // { + // Ok(()) => {} + // Err(e) => { + // error!("Error while creating job: {}", e); + // } + // }; + // } } else { debug!("All reqired jobs are now queued and processing"); } @@ -725,6 +734,7 @@ async fn run_sync_committee_update_job( async fn evaluate_jobs_statuses( db_manager: Arc, latest_verified_sync_committee_id: u64, + latest_verified_epoch_slot: u64, ) -> Result<(), Box> { // The purpose of this function is to manage the sequential nature of onchain verification of epochs and sync committees // Firstly we get all jobs with status OFFCHAIN_COMPUTATION_FINISHED @@ -749,7 +759,7 @@ async fn evaluate_jobs_statuses( // .await?; db_manager - .set_ready_to_broadcast_for_sync_committee(latest_verified_sync_committee_id + 1) + .set_ready_to_broadcast_for_sync_committee(latest_verified_sync_committee_id) .await?; Ok(()) From 85f206f5278efca3be6696c908cc51f1af7e7e31 Mon Sep 17 00:00:00 2001 From: lakewik Date: Fri, 7 Feb 2025 01:19:30 +0100 Subject: [PATCH 58/66] Add sync committee info in dashboard --- client-rs/src/constants.rs | 2 +- client-rs/src/daemon.rs | 34 +++++++++++++++---------------- client-rs/src/routes/dashboard.rs | 22 ++++++++++++++++++-- 3 files changed, 38 insertions(+), 20 deletions(-) diff --git a/client-rs/src/constants.rs b/client-rs/src/constants.rs index 74c9633..c474e2f 100644 --- a/client-rs/src/constants.rs +++ b/client-rs/src/constants.rs @@ -11,6 +11,6 @@ pub const USE_TRANSACTOR: bool = false; pub const MAX_JOB_RETRIES_COUNT: u64 = 10; pub const BEACON_CHAIN_LISTENER_ENABLED: bool = true; pub const JOBS_RETRY_ENABLED: bool = true; -pub const JOBS_RESUME_ENABLED: bool = true; +pub const JOBS_RESUME_ENABLED: bool = false; pub const RETRY_DELAY_MS: u64 = 300_0000; pub const MAX_SKIPPED_SLOTS_RETRY_ATTEMPTS: u64 = 5; // How many skipped slots in row can be on Beacon Chain before we throw error diff --git a/client-rs/src/daemon.rs b/client-rs/src/daemon.rs index be52f82..3d5882e 100644 --- a/client-rs/src/daemon.rs +++ b/client-rs/src/daemon.rs @@ -569,23 +569,23 @@ async fn handle_beacon_chain_head_event( // ); // // Mitigate the issue when Starknet Sequencer RPC responds about last verified slot with delay - // if last_done_epoch < latest_verified_epoch_id { - // match run_batch_epoch_update_job( - // db_manager.clone(), - // get_first_slot_for_epoch(epoch_to_start_from) - // + (constants::SLOTS_PER_EPOCH * constants::TARGET_BATCH_SIZE), - // epoch_to_start_from, - // epoch_to_end_on, - // tx.clone(), - // ) - // .await - // { - // Ok(()) => {} - // Err(e) => { - // error!("Error while creating job: {}", e); - // } - // }; - // } + if last_done_epoch < latest_verified_epoch_id { + match run_batch_epoch_update_job( + db_manager.clone(), + get_first_slot_for_epoch(epoch_to_start_from) + + (constants::SLOTS_PER_EPOCH * constants::TARGET_BATCH_SIZE), + epoch_to_start_from, + epoch_to_end_on, + tx.clone(), + ) + .await + { + Ok(()) => {} + Err(e) => { + error!("Error while creating job: {}", e); + } + }; + } } else { debug!("All reqired jobs are now queued and processing"); } diff --git a/client-rs/src/routes/dashboard.rs b/client-rs/src/routes/dashboard.rs index 692e7eb..e3028a9 100644 --- a/client-rs/src/routes/dashboard.rs +++ b/client-rs/src/routes/dashboard.rs @@ -20,6 +20,18 @@ pub async fn handle_get_dashboard(State(state): State) -> String { .parse::() .unwrap_or(0); + let latest_beacon_committee = helpers::get_sync_committee_id_by_slot(latest_beacon_slot); + + let latest_verified_committee = bankai + .starknet_client + .get_latest_committee_id(&bankai.config) + .await + .unwrap_or_default() + .to_string() + .parse::() + .unwrap_or(0) + - 1; + // Calculate success rate from database let total_jobs = db.count_total_jobs().await.unwrap_or(0); let successful_jobs = db.count_successful_jobs().await.unwrap_or(0); @@ -129,6 +141,8 @@ pub async fn handle_get_dashboard(State(state): State) -> String { create_ascii_dashboard( latest_beacon_slot, latest_verified_slot, + latest_beacon_committee, + latest_verified_committee, epoch_gap, success_rate, &avg_duration_str, @@ -144,6 +158,8 @@ pub async fn handle_get_dashboard(State(state): State) -> String { pub fn create_ascii_dashboard( latest_beacon_slot: u64, latest_verified_slot: u64, + latest_beacon_committee: u64, + latest_verified_committee: u64, epoch_gap: u64, success_rate: f64, avg_duration_str: &str, @@ -173,8 +189,8 @@ pub fn create_ascii_dashboard( ║ • Jobs in Progress: {jobs_in_progress:<10} ║ ║ ║ ║ Beacon Info: ║ -║ • Latest Beacon Slot: {latest_beacon_slot:<12} ║ -║ • Latest Verified Slot: {latest_verified_slot:<12} ║ +║ • Latest Beacon Slot: {latest_beacon_slot:<12} • Latest Beacon Committee: {latest_beacon_committee:<12} ║ +║ • Latest Verified Slot: {latest_verified_slot:<12} • Latest Verified Committee: {latest_verified_committee:<12} ║ ║ • Epoch Gap: {epoch_gap:<12} ║ ║ ║ ╠═══════════════════════════════════════ RECENT BATCH JOBS ══════════════════════════════════════════════════════════════════════════════════╣ @@ -195,6 +211,8 @@ pub fn create_ascii_dashboard( jobs_in_progress = jobs_in_progress, latest_beacon_slot = latest_beacon_slot, latest_verified_slot = latest_verified_slot, + latest_beacon_committee = latest_beacon_committee, + latest_verified_committee = latest_verified_committee, epoch_gap = epoch_gap, batch_display_block = batch_display, sync_committee_jobs_display_block = sync_committee_jobs_display From a75dada3c34432dc0ea38f86e1a845b71eb01f9e Mon Sep 17 00:00:00 2001 From: lakewik Date: Fri, 7 Feb 2025 01:20:29 +0100 Subject: [PATCH 59/66] Enable resume --- client-rs/src/constants.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client-rs/src/constants.rs b/client-rs/src/constants.rs index c474e2f..74c9633 100644 --- a/client-rs/src/constants.rs +++ b/client-rs/src/constants.rs @@ -11,6 +11,6 @@ pub const USE_TRANSACTOR: bool = false; pub const MAX_JOB_RETRIES_COUNT: u64 = 10; pub const BEACON_CHAIN_LISTENER_ENABLED: bool = true; pub const JOBS_RETRY_ENABLED: bool = true; -pub const JOBS_RESUME_ENABLED: bool = false; +pub const JOBS_RESUME_ENABLED: bool = true; pub const RETRY_DELAY_MS: u64 = 300_0000; pub const MAX_SKIPPED_SLOTS_RETRY_ATTEMPTS: u64 = 5; // How many skipped slots in row can be on Beacon Chain before we throw error From ed61ff14734f3105ac49de071087b3218a86ac7d Mon Sep 17 00:00:00 2001 From: lakewik Date: Fri, 7 Feb 2025 01:57:27 +0100 Subject: [PATCH 60/66] Add periodic retry, improve display details --- client-rs/src/constants.rs | 1 + client-rs/src/daemon.rs | 29 ++++++++++++++++++---- client-rs/src/utils/database_manager.rs | 32 +++++++++++++++++++++++++ 3 files changed, 57 insertions(+), 5 deletions(-) diff --git a/client-rs/src/constants.rs b/client-rs/src/constants.rs index 74c9633..c5e0296 100644 --- a/client-rs/src/constants.rs +++ b/client-rs/src/constants.rs @@ -14,3 +14,4 @@ pub const JOBS_RETRY_ENABLED: bool = true; pub const JOBS_RESUME_ENABLED: bool = true; pub const RETRY_DELAY_MS: u64 = 300_0000; pub const MAX_SKIPPED_SLOTS_RETRY_ATTEMPTS: u64 = 5; // How many skipped slots in row can be on Beacon Chain before we throw error +pub const JOBS_RETRY_CHECK_INTERVAL: u64 = 600; // In seconds diff --git a/client-rs/src/daemon.rs b/client-rs/src/daemon.rs index 3d5882e..0e7c293 100644 --- a/client-rs/src/daemon.rs +++ b/client-rs/src/daemon.rs @@ -111,9 +111,11 @@ async fn main() -> Result<(), Box> { //let events_endpoint = format!("{}/eth/v1/events?topics=head", beacon_node_url) let db_manager_for_listener = db_manager.clone(); + let db_manager_for_watcher = db_manager.clone(); let bankai_for_listener = bankai.clone(); let tx_for_listener = tx.clone(); + let tx_for_watcher = tx.clone(); let app_state: AppState = AppState { db_manager: db_manager.clone(), @@ -319,6 +321,17 @@ async fn main() -> Result<(), Box> { }); } + // Run check and retry failed jobs periodicially + tokio::spawn(async move { + loop { + retry_failed_jobs(db_manager_for_watcher.clone(), tx_for_watcher.clone()).await; + tokio::time::sleep(std::time::Duration::from_secs( + constants::JOBS_RETRY_CHECK_INTERVAL, + )) + .await; + } + }); + // Wait for the server task to finish server_task.await?; @@ -482,7 +495,7 @@ async fn handle_beacon_chain_head_event( if last_sync_committee_in_progress < latest_scheduled_sync_committee { match run_sync_committee_update_job( db_manager.clone(), - latest_scheduled_sync_committee, + latest_verified_epoch_slot, tx.clone(), ) .await @@ -800,26 +813,28 @@ async fn resume_unfinished_jobs( let job_to_resume = Job { job_id, job_type: job.job_type, - job_status: job.job_status, + job_status: job.job_status.clone(), slot: Some(job.slot.to_u64().unwrap()), batch_range_begin_epoch: job.batch_range_begin_epoch.to_u64(), batch_range_end_epoch: job.batch_range_end_epoch.to_u64(), }; + let resumed_from_step = job.job_status.clone(); let tx_clone = tx.clone(); tokio::spawn(async move { match job_to_resume.job_type { JobType::SyncCommitteeUpdate => { info!( - "Resuming job {}... (sync committee update job for sync committee {})", + "Resuming job {} from step {}... (sync committee update job for sync committee {})", job_id, + resumed_from_step.to_string(), helpers::slot_to_sync_committee_id(job.slot.to_u64().unwrap()) ); } JobType::EpochBatchUpdate => { info!( - "Resuming job {}... (batch epoch update job for epochs from {} to {})", - job_id, job.batch_range_begin_epoch, job.batch_range_end_epoch + "Resuming job {} from step {}... (batch epoch update job for epochs from {} to {})", + job_id, resumed_from_step.to_string(), job.batch_range_begin_epoch, job.batch_range_end_epoch ); } } @@ -1276,6 +1291,10 @@ async fn process_job( current_status = JobStatus::WrapProofRequested; } JobStatus::WrapProofRequested => { + info!( + "[SYNC COMMITTEE JOB] Waiting for completion of Atlantic proof wrappinf job. QueryID: {}", + wrapping_batch_id + ); // Pool for Atlantic execution done bankai .atlantic_client diff --git a/client-rs/src/utils/database_manager.rs b/client-rs/src/utils/database_manager.rs index 98faefa..c54c801 100644 --- a/client-rs/src/utils/database_manager.rs +++ b/client-rs/src/utils/database_manager.rs @@ -848,6 +848,38 @@ impl DatabaseManager { } } + pub async fn get_recent_atlantic_queries_in_progress( + &self, + limit: i64, + ) -> Result, Box> { + let rows = self + .client + .query( + "SELECT atlantic_proof_generate_batch_id, atlantic_proof_wrapper_batch_id + FROM jobs + WHERE job_status != 'DONE' + ORDER BY slot DESC + LIMIT $1", + &[&limit], + ) + .await?; + + let jobs = rows + .into_iter() + .map(|row| { + let job = Self::map_row_to_job(row.clone()).unwrap(); + JobWithTimestamps { + job, + created_at: row.get("created_time"), + updated_at: row.get("updated_time"), + tx_hash: row.get("tx_hash"), + } + }) + .collect(); + + Ok(jobs) + } + pub async fn get_jobs_count_by_status( &self, ) -> Result, Box> { From 56e9571aa3128453ca96ccf698772cfae707227f Mon Sep 17 00:00:00 2001 From: lakewik Date: Fri, 7 Feb 2025 02:02:06 +0100 Subject: [PATCH 61/66] Fix argument --- client-rs/src/daemon.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/client-rs/src/daemon.rs b/client-rs/src/daemon.rs index 0e7c293..3e7a43b 100644 --- a/client-rs/src/daemon.rs +++ b/client-rs/src/daemon.rs @@ -708,7 +708,8 @@ async fn run_batch_epoch_update_job( async fn run_sync_committee_update_job( db_manager: Arc, - sync_committee_id: u64, + //sync_committee_id: u64, + slot: u64, tx: mpsc::Sender, ) -> Result<(), Box> { let job_id = Uuid::new_v4(); @@ -716,9 +717,10 @@ async fn run_sync_committee_update_job( job_id: job_id.clone(), job_type: JobType::SyncCommitteeUpdate, job_status: JobStatus::Created, - slot: Some(helpers::get_first_slot_for_sync_committee( - sync_committee_id, - )), + slot: Some(slot), + // : Some(helpers::get_first_slot_for_sync_committee( + // sync_committee_id, + // )), batch_range_begin_epoch: None, batch_range_end_epoch: None, }; From 204df110b5bf922d4a61427ac72000e62242411b Mon Sep 17 00:00:00 2001 From: lakewik Date: Fri, 7 Feb 2025 14:58:56 +0100 Subject: [PATCH 62/66] Temporary change ASCII art --- client-rs/src/daemon.rs | 8 ++++---- client-rs/src/routes/dashboard.rs | 18 +++++++++++++----- client-rs/src/state.rs | 8 ++++++-- 3 files changed, 23 insertions(+), 11 deletions(-) diff --git a/client-rs/src/daemon.rs b/client-rs/src/daemon.rs index 3e7a43b..42192c2 100644 --- a/client-rs/src/daemon.rs +++ b/client-rs/src/daemon.rs @@ -388,7 +388,7 @@ async fn handle_beacon_chain_head_event( .to_u64() .unwrap(); - let lowest_committee_update_slot = + let lowest_required_committee_update_slot = (latest_verified_sync_committee_id) * constants::SLOTS_PER_SYNC_COMMITTEE; let latest_verified_epoch_id = helpers::slot_to_epoch_id(latest_verified_epoch_slot); @@ -487,10 +487,10 @@ async fn handle_beacon_chain_head_event( // } // - if !(latest_verified_epoch_slot < lowest_committee_update_slot) { + if !(latest_verified_epoch_slot < lowest_required_committee_update_slot) { info!( - "Lowest committee update slot: {}", - lowest_committee_update_slot + "Lowest required committee update slot: {}", + lowest_required_committee_update_slot ); if last_sync_committee_in_progress < latest_scheduled_sync_committee { match run_sync_committee_update_job( diff --git a/client-rs/src/routes/dashboard.rs b/client-rs/src/routes/dashboard.rs index e3028a9..d82148d 100644 --- a/client-rs/src/routes/dashboard.rs +++ b/client-rs/src/routes/dashboard.rs @@ -170,13 +170,21 @@ pub fn create_ascii_dashboard( batch_display: &str, sync_committee_jobs_display: &str, ) -> String { + // ____ _ _ _ _ __ _ ___ + // | __ ) / \ | \ | | |/ / / \ |_ _| + // | _ \ / _ \ | \| | ' / / _ \ | | + // | |_) / ___ \| |\ | . \ / ___ \ | | + // |____/_/ \_\_| \_|_|\_/_/ \_\___| format!( r#" - ____ _ _ _ _ __ _ ___ -| __ ) / \ | \ | | |/ / / \ |_ _| -| _ \ / _ \ | \| | ' / / _ \ | | -| |_) / ___ \| |\ | . \ / ___ \ | | -|____/_/ \_\_| \_|_|\_/_/ \_\___| + _______ _______ _______ ______ _______ _________ _______ +|\ /|( ____ \( ____ )( ___ )( __ \ ( ___ )\__ __/|\ /|( ____ \ +| ) ( || ( \/| ( )|| ( ) || ( \ )| ( ) | ) ( | ) ( || ( \/ +| (___) || (__ | (____)|| | | || | ) || | | | | | | | | || (_____ +| ___ || __) | __)| | | || | | || | | | | | | | | |(_____ ) +| ( ) || ( | (\ ( | | | || | ) || | | | | | | | | | ) | +| ) ( || (____/\| ) \ \__| (___) || (__/ )| (___) | | | | (___) |/\____) | +|/ \|(_______/|/ \__/(_______)(______/ (_______) )_( (_______)\_______) ╔════════════════════════════════════════ DASHBOARD OVERVIEW ════════════════════════════════════════════════════════════════════════════════╗ ║ ║ diff --git a/client-rs/src/state.rs b/client-rs/src/state.rs index e264f54..c5fc9db 100644 --- a/client-rs/src/state.rs +++ b/client-rs/src/state.rs @@ -33,9 +33,11 @@ pub enum JobStatus { #[postgres(name = "CREATED")] Created, // Can act as queued and be picked up by worker to proccess #[postgres(name = "PROGRAM_INPUTS_PREPARED")] - StartedTraceGeneration, - #[postgres(name = "STARTED_TRACE_GENERATION")] + StartedFetchingInputs, + #[postgres(name = "STARTED_FETCHING_INPUTS")] ProgramInputsPrepared, + #[postgres(name = "STARTED_TRACE_GENERATION")] + StartedTraceGeneration, #[postgres(name = "PIE_GENERATED")] PieGenerated, #[postgres(name = "OFFCHAIN_PROOF_REQUESTED")] @@ -64,6 +66,7 @@ impl ToString for JobStatus { fn to_string(&self) -> String { match self { JobStatus::Created => "CREATED".to_string(), + JobStatus::StartedFetchingInputs => "STARTED_FETCHING_INPUTS".to_string(), JobStatus::ProgramInputsPrepared => "PROGRAM_INPUTS_PREPARED".to_string(), JobStatus::StartedTraceGeneration => "STARTED_TRACE_GENERATION".to_string(), JobStatus::PieGenerated => "PIE_GENERATED".to_string(), @@ -87,6 +90,7 @@ impl FromStr for JobStatus { fn from_str(s: &str) -> Result { match s { "CREATED" => Ok(JobStatus::Created), + "STARTED_FETCHING_INPUTS" => Ok(JobStatus::StartedFetchingInputs), "PROGRAM_INPUTS_PREPARED" => Ok(JobStatus::ProgramInputsPrepared), "STARTED_TRACE_GENERATION" => Ok(JobStatus::StartedTraceGeneration), "PIE_GENERATED" => Ok(JobStatus::PieGenerated), From 60f746327495e5b3f7aa9e50c5d9bc9ac8093ab8 Mon Sep 17 00:00:00 2001 From: petscheit Date: Fri, 7 Feb 2025 15:44:39 +0100 Subject: [PATCH 63/66] chore: new title --- client-rs/src/routes/dashboard.rs | 53 +++++++++++++++++++++++-------- 1 file changed, 40 insertions(+), 13 deletions(-) diff --git a/client-rs/src/routes/dashboard.rs b/client-rs/src/routes/dashboard.rs index d82148d..82fc0bd 100644 --- a/client-rs/src/routes/dashboard.rs +++ b/client-rs/src/routes/dashboard.rs @@ -170,21 +170,30 @@ pub fn create_ascii_dashboard( batch_display: &str, sync_committee_jobs_display: &str, ) -> String { - // ____ _ _ _ _ __ _ ___ - // | __ ) / \ | \ | | |/ / / \ |_ _| - // | _ \ / _ \ | \| | ' / / _ \ | | - // | |_) / ___ \| |\ | . \ / ___ \ | | - // |____/_/ \_\_| \_|_|\_/_/ \_\___| format!( r#" - _______ _______ _______ ______ _______ _________ _______ -|\ /|( ____ \( ____ )( ___ )( __ \ ( ___ )\__ __/|\ /|( ____ \ -| ) ( || ( \/| ( )|| ( ) || ( \ )| ( ) | ) ( | ) ( || ( \/ -| (___) || (__ | (____)|| | | || | ) || | | | | | | | | || (_____ -| ___ || __) | __)| | | || | | || | | | | | | | | |(_____ ) -| ( ) || ( | (\ ( | | | || | ) || | | | | | | | | | ) | -| ) ( || (____/\| ) \ \__| (___) || (__/ )| (___) | | | | (___) |/\____) | -|/ \|(_______/|/ \__/(_______)(______/ (_______) )_( (_______)\_______) +BBBBBBBBBBBBBBBBB kkkkkkkk iiii +B::::::::::::::::B k::::::k i::::i +B::::::BBBBBB:::::B k::::::k iiii +BB:::::B B:::::B k::::::k + B::::B B:::::B aaaaaaaaaaaaa nnnn nnnnnnnn k:::::k kkkkkkk aaaaaaaaaaaaa iiiiiii + B::::B B:::::B a::::::::::::a n:::nn::::::::nn k:::::k k:::::k a::::::::::::a i:::::i + B::::BBBBBB:::::B aaaaaaaaa:::::a n::::::::::::::nn k:::::k k:::::k aaaaaaaaa:::::a i::::i + B:::::::::::::BB a::::a nn:::::::::::::::n k:::::k k:::::k a::::a i::::i + B::::BBBBBB:::::B aaaaaaa:::::a n:::::nnnn:::::n k::::::k:::::k aaaaaaa:::::a i::::i + B::::B B:::::B aa::::::::::::a n::::n n::::n k:::::::::::k aa::::::::::::a i::::i + B::::B B:::::B a::::aaaa::::::a n::::n n::::n k:::::::::::k a::::aaaa::::::a i::::i + B::::B B:::::Ba::::a a:::::a n::::n n::::n k::::::k:::::k a::::a a:::::a i::::i +BB:::::BBBBBB::::::Ba::::a a:::::a n::::n n::::nk::::::k k:::::k a::::a a:::::a i::::::i +B:::::::::::::::::B a:::::aaaa::::::a n::::n n::::nk::::::k k:::::k a:::::aaaa::::::a i::::::i +B::::::::::::::::B a::::::::::aa:::a n::::n n::::nk::::::k k:::::k a::::::::::aa:::ai::::::i +BBBBBBBBBBBBBBBBB aaaaaaaaaa aaaa nnnnnn nnnnnnkkkkkkkk kkkkkkk aaaaaaaaaa aaaaiiiiiiii + _ _ _ _ _ + | |__ _ _ | | | | ___ _ __ ___ __| | ___ | |_ _ _ ___ + | '_ \| | | | | |_| |/ _ \ '__/ _ \ / _` |/ _ \| __| | | / __| + | |_) | |_| | | _ | __/ | | (_) | (_| | (_) | |_| |_| \__ \ + |_.__/ \__, | |_| |_|\___|_| \___/ \__,_|\___/ \__|\__,_|___/ + |___/ ╔════════════════════════════════════════ DASHBOARD OVERVIEW ════════════════════════════════════════════════════════════════════════════════╗ ║ ║ @@ -210,6 +219,24 @@ pub fn create_ascii_dashboard( ║ ────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────── ║ {sync_committee_jobs_display_block} ╚════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════╝ + + ____ _ +| _ \ _____ _____ _ __ ___ __| | +| |_) / _ \ \ /\ / / _ \ '__/ _ \/ _` | +| __/ (_) \ V V / __/ | | __/ (_| | +|_| \___/ \_/\_/ \___|_| \___|\__,_| + _ + | |__ _ _ + | '_ \| | | | + | |_) | |_| | + |_.__/ \__, | + |___/ + ____ + / ___| __ _ _ __ __ _ __ _ __ _ _ _ + | | _ / _` | '__/ _` |/ _` |/ _` | ( \/ ) + | |_| | (_| | | | (_| | (_| | (_| | \ / + \____|\__,_|_| \__,_|\__, |\__,_| \/ + |___/ "#, daemon_status = daemon_status, db_status = db_status, From de3fa527a67780058f821c81e5180d9c8b20aaac Mon Sep 17 00:00:00 2001 From: lakewik Date: Sat, 8 Feb 2025 11:31:56 +0100 Subject: [PATCH 64/66] debug 1 --- client-rs/src/daemon.rs | 31 +++++++++++--------- client-rs/src/routes/dashboard.rs | 48 +++++++++++++++---------------- 2 files changed, 41 insertions(+), 38 deletions(-) diff --git a/client-rs/src/daemon.rs b/client-rs/src/daemon.rs index 42192c2..fba85ae 100644 --- a/client-rs/src/daemon.rs +++ b/client-rs/src/daemon.rs @@ -463,8 +463,8 @@ async fn handle_beacon_chain_head_event( } info!( - "Current state: Beacon Chain: [Slot: {} Epoch: {} Sync Committee: {}] | Latest verified: [Slot: {} Epoch: {} Sync Committee: {}] | Latest in progress: [Epoch: {} Sync Committee: {}] | Sync in progress...", - parsed_event.slot, current_epoch_id, current_sync_committee_id, latest_verified_epoch_slot, latest_verified_epoch_id, latest_verified_sync_committee_id, last_epoch_in_progress, last_sync_committee_in_progress + "Current state: Beacon Chain: [Slot: {} Epoch: {} Sync Committee: {}] | Latest verified: [Slot: {} Epoch: {} Sync Committee: {}] | Latest in progress: [Epoch: {} Sync Committee: {}] | Latest done: [Epoch: {} Sync Committee: {}] | Sync in progress...", + parsed_event.slot, current_epoch_id, current_sync_committee_id, latest_verified_epoch_slot, latest_verified_epoch_id, latest_verified_sync_committee_id, last_epoch_in_progress, last_sync_committee_in_progress, last_done_epoch, last_done_sync_committee ); // Decide basing on actual state @@ -493,18 +493,21 @@ async fn handle_beacon_chain_head_event( lowest_required_committee_update_slot ); if last_sync_committee_in_progress < latest_scheduled_sync_committee { - match run_sync_committee_update_job( - db_manager.clone(), - latest_verified_epoch_slot, - tx.clone(), - ) - .await - { - Ok(()) => {} - Err(e) => { - error!("Error while creating sync committee update job: {}", e); - } - }; + if last_done_sync_committee < latest_scheduled_sync_committee { + // This last check because the delay of data from sequencer update after verification onchain + match run_sync_committee_update_job( + db_manager.clone(), + latest_verified_epoch_slot, + tx.clone(), + ) + .await + { + Ok(()) => {} + Err(e) => { + error!("Error while creating sync committee update job: {}", e); + } + }; + } } } diff --git a/client-rs/src/routes/dashboard.rs b/client-rs/src/routes/dashboard.rs index 82fc0bd..17fdaea 100644 --- a/client-rs/src/routes/dashboard.rs +++ b/client-rs/src/routes/dashboard.rs @@ -172,28 +172,28 @@ pub fn create_ascii_dashboard( ) -> String { format!( r#" -BBBBBBBBBBBBBBBBB kkkkkkkk iiii -B::::::::::::::::B k::::::k i::::i -B::::::BBBBBB:::::B k::::::k iiii -BB:::::B B:::::B k::::::k - B::::B B:::::B aaaaaaaaaaaaa nnnn nnnnnnnn k:::::k kkkkkkk aaaaaaaaaaaaa iiiiiii - B::::B B:::::B a::::::::::::a n:::nn::::::::nn k:::::k k:::::k a::::::::::::a i:::::i - B::::BBBBBB:::::B aaaaaaaaa:::::a n::::::::::::::nn k:::::k k:::::k aaaaaaaaa:::::a i::::i - B:::::::::::::BB a::::a nn:::::::::::::::n k:::::k k:::::k a::::a i::::i - B::::BBBBBB:::::B aaaaaaa:::::a n:::::nnnn:::::n k::::::k:::::k aaaaaaa:::::a i::::i - B::::B B:::::B aa::::::::::::a n::::n n::::n k:::::::::::k aa::::::::::::a i::::i - B::::B B:::::B a::::aaaa::::::a n::::n n::::n k:::::::::::k a::::aaaa::::::a i::::i - B::::B B:::::Ba::::a a:::::a n::::n n::::n k::::::k:::::k a::::a a:::::a i::::i +BBBBBBBBBBBBBBBBB kkkkkkkk iiii +B::::::::::::::::B k::::::k i::::i +B::::::BBBBBB:::::B k::::::k iiii +BB:::::B B:::::B k::::::k + B::::B B:::::B aaaaaaaaaaaaa nnnn nnnnnnnn k:::::k kkkkkkk aaaaaaaaaaaaa iiiiiii + B::::B B:::::B a::::::::::::a n:::nn::::::::nn k:::::k k:::::k a::::::::::::a i:::::i + B::::BBBBBB:::::B aaaaaaaaa:::::a n::::::::::::::nn k:::::k k:::::k aaaaaaaaa:::::a i::::i + B:::::::::::::BB a::::a nn:::::::::::::::n k:::::k k:::::k a::::a i::::i + B::::BBBBBB:::::B aaaaaaa:::::a n:::::nnnn:::::n k::::::k:::::k aaaaaaa:::::a i::::i + B::::B B:::::B aa::::::::::::a n::::n n::::n k:::::::::::k aa::::::::::::a i::::i + B::::B B:::::B a::::aaaa::::::a n::::n n::::n k:::::::::::k a::::aaaa::::::a i::::i + B::::B B:::::Ba::::a a:::::a n::::n n::::n k::::::k:::::k a::::a a:::::a i::::i BB:::::BBBBBB::::::Ba::::a a:::::a n::::n n::::nk::::::k k:::::k a::::a a:::::a i::::::i B:::::::::::::::::B a:::::aaaa::::::a n::::n n::::nk::::::k k:::::k a:::::aaaa::::::a i::::::i B::::::::::::::::B a::::::::::aa:::a n::::n n::::nk::::::k k:::::k a::::::::::aa:::ai::::::i BBBBBBBBBBBBBBBBB aaaaaaaaaa aaaa nnnnnn nnnnnnkkkkkkkk kkkkkkk aaaaaaaaaa aaaaiiiiiiii - _ _ _ _ _ - | |__ _ _ | | | | ___ _ __ ___ __| | ___ | |_ _ _ ___ + _ _ _ _ _ + | |__ _ _ | | | | ___ _ __ ___ __| | ___ | |_ _ _ ___ | '_ \| | | | | |_| |/ _ \ '__/ _ \ / _` |/ _ \| __| | | / __| | |_) | |_| | | _ | __/ | | (_) | (_| | (_) | |_| |_| \__ \ |_.__/ \__, | |_| |_|\___|_| \___/ \__,_|\___/ \__|\__,_|___/ - |___/ + |___/ ╔════════════════════════════════════════ DASHBOARD OVERVIEW ════════════════════════════════════════════════════════════════════════════════╗ ║ ║ @@ -220,23 +220,23 @@ BBBBBBBBBBBBBBBBB aaaaaaaaaa aaaa nnnnnn nnnnnnkkkkkkkk kkkkkkk aaa {sync_committee_jobs_display_block} ╚════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════╝ - ____ _ + ____ _ | _ \ _____ _____ _ __ ___ __| | | |_) / _ \ \ /\ / / _ \ '__/ _ \/ _` | | __/ (_) \ V V / __/ | | __/ (_| | |_| \___/ \_/\_/ \___|_| \___|\__,_| - _ - | |__ _ _ + _ + | |__ _ _ | '_ \| | | | | |_) | |_| | |_.__/ \__, | |___/ - ____ - / ___| __ _ _ __ __ _ __ _ __ _ _ _ - | | _ / _` | '__/ _` |/ _` |/ _` | ( \/ ) - | |_| | (_| | | | (_| | (_| | (_| | \ / - \____|\__,_|_| \__,_|\__, |\__,_| \/ - |___/ + ____ + / ___| __ _ _ __ __ _ __ _ __ _ _ _ + | | _ / _` | '__/ _` |/ _` |/ _` | ( \/ ) + | |_| | (_| | | | (_| | (_| | (_| | \ / + \____|\__,_|_| \__,_|\__, |\__,_| \/ + |___/ "#, daemon_status = daemon_status, db_status = db_status, From dc0624b1b520813b258bb543947539105e3b5f1e Mon Sep 17 00:00:00 2001 From: lakewik Date: Sat, 8 Feb 2025 11:38:33 +0100 Subject: [PATCH 65/66] debug 2 --- client-rs/src/daemon.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client-rs/src/daemon.rs b/client-rs/src/daemon.rs index fba85ae..9c3a259 100644 --- a/client-rs/src/daemon.rs +++ b/client-rs/src/daemon.rs @@ -585,7 +585,7 @@ async fn handle_beacon_chain_head_event( // ); // // Mitigate the issue when Starknet Sequencer RPC responds about last verified slot with delay - if last_done_epoch < latest_verified_epoch_id { + if last_done_epoch < epoch_to_start_from { match run_batch_epoch_update_job( db_manager.clone(), get_first_slot_for_epoch(epoch_to_start_from) From 7466b5a4dc0780fb97d0a78c62743bf8a02fb74b Mon Sep 17 00:00:00 2001 From: lakewik Date: Sat, 8 Feb 2025 12:01:23 +0100 Subject: [PATCH 66/66] Add timestamps to dashboard --- client-rs/src/routes/dashboard.rs | 49 +++++++++++++------------ client-rs/src/utils/database_manager.rs | 8 ++-- 2 files changed, 29 insertions(+), 28 deletions(-) diff --git a/client-rs/src/routes/dashboard.rs b/client-rs/src/routes/dashboard.rs index 17fdaea..1b04474 100644 --- a/client-rs/src/routes/dashboard.rs +++ b/client-rs/src/routes/dashboard.rs @@ -59,7 +59,7 @@ pub async fn handle_get_dashboard(State(state): State) -> String { .iter() .map(|entry| { format!( - "║ Batch {:}: {} -> {} [{}] {:<32} {:<66} ║", + "║ Batch {:}: {} -> {} [{}] {:<32} {:<66} {} ║", entry.job.job_uuid.to_string()[..8].to_string(), entry.job.batch_range_begin_epoch, entry.job.batch_range_end_epoch, @@ -73,6 +73,7 @@ pub async fn handle_get_dashboard(State(state): State) -> String { .tx_hash .as_ref() .map_or("-".to_string(), |s| s.clone()), + entry.updated_at ) }) .collect::>() @@ -96,7 +97,7 @@ pub async fn handle_get_dashboard(State(state): State) -> String { .iter() .map(|entry| { format!( - "║ Batch {:}: {} {} [{}] {:<32} {:<66} ║", + "║ Batch {:}: {} {} [{}] {:<32} {:<66} {} ║", entry.job.job_uuid.to_string()[..8].to_string(), entry.job.slot, helpers::get_sync_committee_id_by_slot(entry.job.slot.to_u64().unwrap()), @@ -110,6 +111,7 @@ pub async fn handle_get_dashboard(State(state): State) -> String { .tx_hash .as_ref() .map_or("-".to_string(), |s| s.clone()), + entry.updated_at ) }) .collect::>() @@ -195,30 +197,29 @@ BBBBBBBBBBBBBBBBB aaaaaaaaaa aaaa nnnnnn nnnnnnkkkkkkkk kkkkkkk aaa |_.__/ \__, | |_| |_|\___|_| \___/ \__,_|\___/ \__|\__,_|___/ |___/ -╔════════════════════════════════════════ DASHBOARD OVERVIEW ════════════════════════════════════════════════════════════════════════════════╗ -║ ║ -║ Statuses: ║ -║ • Daemon: {daemon_status:<12} • Database: {db_status:<12} • Beacon: {beacon_status:<12} ║ -║ ║ -║ Metrics: ║ -║ • Success Rate: {success_rate:<10} ║ -║ • Average Duration: {avg_duration:<10} ║ -║ • Jobs in Progress: {jobs_in_progress:<10} ║ -║ ║ -║ Beacon Info: ║ -║ • Latest Beacon Slot: {latest_beacon_slot:<12} • Latest Beacon Committee: {latest_beacon_committee:<12} ║ -║ • Latest Verified Slot: {latest_verified_slot:<12} • Latest Verified Committee: {latest_verified_committee:<12} ║ -║ • Epoch Gap: {epoch_gap:<12} ║ -║ ║ -╠═══════════════════════════════════════ RECENT BATCH JOBS ══════════════════════════════════════════════════════════════════════════════════╣ -║ UUID: FROM: TO: STATUS: TX: ║ -║ ────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────── ║ +╔════════════════════════════════════════ DASHBOARD OVERVIEW ══════════════════════════════════════════════════════════════════════════════════════════════════════╗ +║ ║ +║ • Daemon: {daemon_status:<12} • Database: {db_status:<12} • Beacon: {beacon_status:<12} ║ +║ ║ +║ Metrics: ║ +║ • Success Rate: {success_rate:<10} ║ +║ • Average Duration: {avg_duration:<10} ║ +║ • Jobs in Progress: {jobs_in_progress:<10} ║ +║ ║ +║ Beacon Info: ║ +║ • Latest Beacon Slot: {latest_beacon_slot:<12} • Latest Beacon Committee: {latest_beacon_committee:<12} ║ +║ • Latest Verified Slot: {latest_verified_slot:<12} • Latest Verified Committee: {latest_verified_committee:<12} ║ +║ • Epoch Gap: {epoch_gap:<12} ║ +║ ║ +╠═══════════════════════════════════════ RECENT BATCH JOBS ════════════════════════════════════════════════════════════════════════════════════════════════════════╣ +║ UUID: FROM: TO: STATUS: TX: TIMESTAMP: ║ +║ ──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────── ║ {batch_display_block} -╠══════════════════════════════ RECENT SYNC COMMITTEE JOBS ═══════════════════════════════════════════════════════════════════════════════╣ -║ UUID: SLOT: COMMITTEE: STATUS: TX: ║ -║ ────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────── ║ +╠══════════════════════════════ RECENT SYNC COMMITTEE JOBS ═════════════════════════════════════════════════════════════════════════════════════════════════════╣ +║ UUID: SLOT: COMMITTEE: STATUS: TX: TIMESTAMP: ║ +║ ──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────── ║ {sync_committee_jobs_display_block} -╚════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════╝ +╚══════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════════╝ ____ _ | _ \ _____ _____ _ __ ___ __| | diff --git a/client-rs/src/utils/database_manager.rs b/client-rs/src/utils/database_manager.rs index c54c801..48ca706 100644 --- a/client-rs/src/utils/database_manager.rs +++ b/client-rs/src/utils/database_manager.rs @@ -781,8 +781,8 @@ impl DatabaseManager { .client .query( "SELECT *, - to_char(created_at, 'HH24:MI:SS') as created_time, - to_char(updated_at, 'HH24:MI:SS') as updated_time + to_char(created_at, 'YYYY-MM-DD HH24:MI:SS') as created_time, + to_char(updated_at, 'YYYY-MM-DD HH24:MI:SS') as updated_time FROM jobs WHERE type = 'EPOCH_BATCH_UPDATE' ORDER BY batch_range_begin_epoch DESC @@ -815,8 +815,8 @@ impl DatabaseManager { .client .query( "SELECT *, - to_char(created_at, 'HH24:MI:SS') as created_time, - to_char(updated_at, 'HH24:MI:SS') as updated_time + to_char(created_at, 'YYYY-MM-DD HH24:MI:SS') as created_time, + to_char(updated_at, 'YYYY-MM-DD HH24:MI:SS') as updated_time FROM jobs WHERE type = 'SYNC_COMMITTEE_UPDATE' ORDER BY slot DESC