diff --git a/.config/dictionaries/project.dic b/.config/dictionaries/project.dic index 845ac0b9b..b2a8b2686 100644 --- a/.config/dictionaries/project.dic +++ b/.config/dictionaries/project.dic @@ -1,11 +1,14 @@ aarch +abcz abnf ABNF addrr adminer +alpn apskhem asyncio auditability +Autoupdate backpressure bindgen bkioshn @@ -15,6 +18,8 @@ blosc bmac bootstrapper BROTLI +bufread +bytesize cantopen cardano cbor @@ -22,8 +27,8 @@ CBOR cbork cdylib CEST -CHAINCODE chaincode +CHAINCODE chainsync chrono ciphertext @@ -36,12 +41,12 @@ crontagged cstring dalek dashmap -dashmap -Datelike Datelike DBSTATUS dbsync dcbor +ddup +Decompressor delegators dockerhub dotenv @@ -52,6 +57,7 @@ dreps Earthfile encryptor Errno +etype excalidraw fadvise fcntl @@ -62,6 +68,7 @@ filestat filestorage filesystems fkey +fmmap fmtchk fmtfix fontawesome @@ -76,36 +83,48 @@ gmtime gossipsub happ hardano +hardlink hasher +hexdigit highwater hmod +humansize +humantime ideascale idents -ipfs -ipld IFMT Intellij ioerr iohk ipfs +ipld jetbrains -jsonschema jorm jormungandr Jörmungandr +jsonschema lcov Leshiy -libsqlite -libtest libipld libp2p +libsqlite +libtest linkat lintfix localizable +logcall lookaside maindbname +mapref +Mbits mdlint mdns +memeq +memx +metadatum +Metadatum +mimalloc +minicbor miniprotocol miniprotocols mithril @@ -113,7 +132,9 @@ mitigations mkcron mkdelay mkdirat +mmap moderations +MPMC Multiaddr multiera nanos @@ -146,10 +167,15 @@ pubspec pwrite qpsg rapidoc +Rawhash readlinkat +redb redoc REMOVEDIR renameat +repr +Repr +reqwest retriggering rulelist rulename @@ -159,6 +185,7 @@ rustdoc rustdocflags rustflags rustfmt +rustls rustyline saibatizoku sandboxed @@ -166,11 +193,15 @@ scanorder scanstatus Sched seckey +SIGNDATA +skiplist slotno smac stevenj stringzilla +subdir subsec +surrealkv symlinkat syscall tacho @@ -185,25 +216,34 @@ tinygo toobig toolsets Traceback +txmonitor txns typenum unfinalized unixfs unlinkat +untar upnp +ureq utimensat vitss +vkey +VKEYS +vkeywitness voteplan voteplans +wallclock wasi -wasip WASI +wasip wasmtime webasm webassembly +webpki WORKDIR +wtxn xprivate -XPRV xprv +XPRV xpub yoroi diff --git a/.gitignore b/.gitignore index 5d44027bf..015d61edd 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,6 @@ +### Secrets +**/.secret + ### Linux ### *~ diff --git a/.secret.template b/.secret.template new file mode 100644 index 000000000..072b7c4f2 --- /dev/null +++ b/.secret.template @@ -0,0 +1 @@ +GITHUB_TOKEN=Make One at https://github.com/settings/tokens only need public repo, read packages permissions diff --git a/.vscode/extensions.json b/.vscode/extensions.json index 9fff89180..fdc5b1d78 100644 --- a/.vscode/extensions.json +++ b/.vscode/extensions.json @@ -14,7 +14,6 @@ "tamasfe.even-better-toml", "rust-lang.rust-analyzer", "JScearcy.rust-doc-viewer", - "serayuzgur.crates", "bierner.markdown-checkbox", "bierner.markdown-emoji", "bierner.markdown-footnotes", @@ -24,5 +23,6 @@ "foxundermoon.shell-format", "dtsvet.vscode-wasm", "terrastruct.d2", + "fill-labs.dependi", ] } \ No newline at end of file diff --git a/hermes/Cargo.toml b/hermes/Cargo.toml index 3254e3f41..84ae44fac 100644 --- a/hermes/Cargo.toml +++ b/hermes/Cargo.toml @@ -1,17 +1,11 @@ [workspace] resolver = "2" -members = [ - "bin", - "crates/cardano-chain-follower", - "crates/hermes-ipfs", -] +members = ["bin", "crates/cardano-chain-follower", "crates/hermes-ipfs"] [workspace.package] edition = "2021" version = "0.0.1" -authors = [ - "Steven Johnson " -] +authors = ["Steven Johnson "] homepage = "https://input-output-hk.github.io/hermes" repository = "https://github.com/input-output-hk/hermes" license = "MIT OR Apache-2.0" @@ -34,7 +28,7 @@ bare_urls = "deny" unescaped_backticks = "deny" [workspace.lints.clippy] -pedantic = "deny" +pedantic = {level = "deny", priority = -1 } unwrap_used = "deny" expect_used = "deny" exit = "deny" @@ -52,61 +46,127 @@ missing_docs_in_private_items = "deny" [workspace.dependencies] # specific commit from the `catalyst` branch -pallas = { git = "https://github.com/input-output-hk/catalyst-pallas.git", rev = "709acb19c52c6b789279ecc4bc8793b5d8b5abe9", version = "0.25.0" } +pallas = "0.30.1" # specific commit from the `catalyst` branch -pallas-hardano = { git = "https://github.com/input-output-hk/catalyst-pallas.git", rev = "709acb19c52c6b789279ecc4bc8793b5d8b5abe9", version = "0.25.0" } - -cardano-chain-follower = { path = "crates/cardano-chain-follower", version = "0.0.1" } +pallas-hardano = "0.30.1" +pallas-crypto = "0.30.1" +cardano-chain-follower = { path = "crates/cardano-chain-follower", version = "0.2.0" } hermes-ipfs = { path = "crates/hermes-ipfs", version = "0.0.1" } -wasmtime = "20.0.2" +wasmtime = "24.0.0" rusty_ulid = "2.0.0" -anyhow = "1.0.71" +anyhow = "1.0.86" +blake2b_simd = "1.0.2" +blake3 = { version = "1.5.4", features = ["rayon", "mmap"] } hex-literal = "0.4.1" -thiserror = "1.0.56" +thiserror = "1.0.63" hex = "0.4.3" tracing = "0.1.40" tracing-subscriber = { version = "0.3.18", features = ["env-filter"] } +tracing-log = "0.2.0" +test-log = { version = "0.2.16", default-features = false, features = [ + "trace", +] } criterion = "0.5.1" libtest-mimic = "0.7.0" -crossbeam-queue = "0.3.11" bip39 = "2.0.0" iana-time-zone = "0.1.60" rand = "0.8.5" -bip32 = "0.5.1" +bip32 = "0.5.2" ed25519-bip32 = "0.4.1" dashmap = "6.0.1" once_cell = "1.19.0" -clap = "4.5.3" -build-info = "0.0.37" -build-info-build = "0.0.37" -derive_more = "0.99.17" -chrono = "0.4.35" +clap = "4.5.16" +build-info = "0.0.38" +build-info-build = "0.0.38" +derive_more = "1.0.0" +chrono = "0.4.38" chrono-tz = "0.9.0" saffron = "0.1.0" -tokio = "1.36.0" -libsqlite3-sys = "0.29.0" -stringzilla = "3.8.4" +tokio = { version = "1.39.3", features = [ + "macros", + "rt", + "net", + "rt-multi-thread", + "process", +] } +tokio-util = { version = "0.7.11", features = ["codec"] } +tokio-stream = "0.1.15" +libsqlite3-sys = "0.30.0" +stringzilla = "3.9.3" serial_test = { version = "3.1.1", features = ["file_locks"] } +hdf5 = { git = "https://github.com/aldanor/hdf5-rust.git", rev = "694e900972fbf5ffbdd1a2294f57a2cc3a91c994", version = "0.8.1", features = [ + "static", + "blosc", +] } temp-dir = "0.1.13" -hdf5 = { git="https://github.com/aldanor/hdf5-rust.git", rev="694e900972fbf5ffbdd1a2294f57a2cc3a91c994", version="0.8.1", features = [ "static", "blosc" ]} # needs to enable blosc compression functionality for hdf5 crate -blosc-src = { version = "0.3.0", features = ["lz4", "zlib", "zstd"] } +blosc-src = { version = "0.3.4", features = ["lz4", "zlib", "zstd"] } num_cpus = "1.16.0" console = "0.15.8" -serde = "1.0" -serde_json = "1.0" -jsonschema = "0.18.0" +serde = "1.0.209" +serde_json = "1.0.127" +jsonschema = "0.18.1" +url = "2.5.0" +regex = "1.10.4" +humansize = { version = "^2.1.2", default-features = false, features = [ + "no_alloc", +] } +reqwest = { version = "0.12.7, >=0.0.0", default-features = false, features = [ + "rustls-tls-native-roots", + "http2", + "blocking", + "charset", + "gzip", + "brotli", + "zstd", + "deflate", + "hickory-dns", + "macos-system-configuration", +] } +mithril-client = { git = "https://github.com/input-output-hk/catalyst-mithril.git", branch = "fix/lgpl-licence", default-features = false, features = [ + "full", + "num-integer-backend" +] } +mimalloc = "0.1.43" +bytes = "1.7.1" +tar = "0.4.41" +zstd = "0.13.2" +async-trait = "0.1.81" +dirs = "5.0.1" +futures = "0.3.30" +bytesize = "1.3.0" +async-compression = { version = "0.4.11", features = ["tokio", "zstd"] } +tokio-tar = "0.3.1" +humantime = "2.1.0" +crossbeam-queue = "0.3.11" +crossbeam-skiplist = "0.1.3" +crossbeam-channel = "0.5.13" +crossbeam-epoch = "0.9.18" +strum = "0.26.3" +strum_macros = "0.26.4" +rayon = "1.10.0" +ignore = "0.4.22" hmac = "0.12.1" pbkdf2 = "0.12.2" -blake2b_simd = "1.0.2" sha2 = "0.10" ed25519-dalek = "2.1.1" x509-cert = "0.2.5" -coset = "0.3.7" +coset = "0.3.8" libipld = "0.16.0" -libp2p = "0.53.2" -rust-ipfs = "0.11.19" -rustyline-async = "0.4.2" -dirs = "5.0.1" +libp2p = "0.54.1" +rust-ipfs = "0.11.21" +rustyline-async = "0.4.3" +ouroboros = "0.18.4" +memx = "0.1.32" +fmmap = {version = "0.3.3", features = ["sync", "tokio-async"]} +minicbor = {version = "0.24.2", features = ["alloc", "derive", "half"]} +brotli = "6.0.0" +c509-certificate = { git = "https://github.com/input-output-hk/catalyst-voices.git", package = "c509-certificate", branch = "fix/c509-cleanup"} +num-traits = "0.2.19" +logcall = "0.1.9" +ureq = {version = "2.10.1", features=["native-certs"]} +http = "1.1.0" +hickory-resolver = { version = "0.24.1", features = ["dns-over-rustls"] } +moka = { version = "0.12.8", features = ["sync"]} diff --git a/hermes/Earthfile b/hermes/Earthfile index 23eeed4eb..843289e8b 100644 --- a/hermes/Earthfile +++ b/hermes/Earthfile @@ -1,6 +1,6 @@ VERSION 0.8 -IMPORT github.com/input-output-hk/catalyst-ci/earthly/rust:v3.1.8 AS rust-ci +IMPORT github.com/input-output-hk/catalyst-ci/earthly/rust:feat/faster-rust-tool-install AS rust-ci # Use when debugging cat-ci locally. # IMPORT ../../catalyst-ci/earthly/rust AS rust-ci @@ -16,7 +16,12 @@ builder: RUN mkdir /wasm COPY --dir wasm+wasi-src/wasi /wasm/wasi - # Compiled WASM component for benchmarks + +# benchmark-builder : Builder that includes built WASM Modules for benchmarks to run against. +benchmark-builder: + FROM +builder + + # Compiled WASM component for benchmarks - Should not be in the base builder. COPY stub-module+build/stub.wasm /wasm/stub-module/stub.wasm # bindings-expand : Expands `wasmtime::bindgen!` macro into the `bindings.rs` file @@ -51,7 +56,7 @@ all-hosts-check: # build : Run build using the most efficient host tooling # CI Automated Entry point. build: - FROM +builder + FROM +benchmark-builder # Directory where WASM test components go when we run wasm module integration tests. RUN mkdir ../wasm/test-components @@ -92,6 +97,27 @@ test-wasm-integration: RUN cargo test --release --test wasm-component-integration-tests -- --test RUN cargo test --release --test wasm-component-integration-tests -- --bench +# test-download-chain - Test on downloading a mithril snapshot from the preprod chain without syncing. +test-download-mithril-snapshot: + FROM +build + + # Install deps for compilation. + RUN apt-get update && apt-get install -y m4 + + RUN cargo build -r --package cardano-chain-follower --example follow_chains --features mimalloc + + # Install deps for the binary. + RUN apt-get update \ + && apt-get install -y time aria2 + + # Initialize a private directory to store downloaded snapshot data. + CACHE --sharing private --chmod 0666 --id mithril-dl /root/.local/share/follow_chains + + RUN RUST_LOG="error,follow_chains=debug,cardano_chain_follower=debug,mithril-client=debug" \ + time -f "\nReal Time: %E\nUser Time: %U\nSystem Time: %S" \ + ./target/release/examples/follow_chains \ + --preprod --stop-at-tip --halt-on-error \ + && echo "Mithril Snapshot download succeeded." # all-hosts-build : Test which runs check with all supported host tooling. # Needs qemu or rosetta to run. @@ -117,3 +143,7 @@ json-schemas: COPY --dir schemas . SAVE ARTIFACT schemas schemas + +show-build-dependency-tree: + LOCALLY + RUN cargo tree -e normal,build -f "{p}[{f}]" \ No newline at end of file diff --git a/hermes/Justfile b/hermes/Justfile new file mode 100644 index 000000000..9150a37e6 --- /dev/null +++ b/hermes/Justfile @@ -0,0 +1,59 @@ +# use with https://github.com/casey/just +# +# Hermes developer convenience functions + +# cspell: words prereqs, commitlog, rustls, nocapture + +default: + @just --list --unsorted + +# Show the dependency tree and all enabled feature flags of every crate. +cargo-tree: + cargo tree -e features,normal,build -f "{p}[{f}]" --workspace --frozen + +# Check Dependency licenses and CVE's +license-check: + cargo deny check --exclude-dev + +# Run long running developer test for mithril downloading. +test-mithril-download: + RUST_LOG="error,cardano_chain_follower=debug,turbo-downloader=debug,mithril-client=debug,pallas_hardano=error,h2=error,hickory_resolver=error,hickory_proto=error,rustls=error" \ + cargo test follow::tests::test_follow_preprod -- --show-output --ignored --nocapture + +# Format the rust code +code_format: + cargo +nightly fmtfix + +# Run long running developer test for mithril downloading. +run-mithril-download-example-preprod: code_format + cargo build -r --package cardano-chain-follower --example follow_chains --features mimalloc + RUST_LOG="error,follow_chains=debug,cardano_chain_follower=debug,mithril-client=debug" \ + ./target/release/examples/follow_chains --preprod + +run-mithril-download-example-preprod-high-dl-bandwidth: code_format + cargo build -r --package cardano-chain-follower --example follow_chains --features mimalloc + RUST_LOG="error,follow_chains=debug,cardano_chain_follower=debug,mithril-client=debug" \ + ./target/release/examples/follow_chains --preprod --mithril-sync-workers 64 --mithril-sync-chunk-size 16 --mithril-sync-queue-ahead=6 + +run-mithril-download-example-preprod-conservastive-dl-bandwidth: code_format + cargo build -r --package cardano-chain-follower --example follow_chains --features mimalloc + RUST_LOG="error,follow_chains=debug,cardano_chain_follower=debug,mithril-client=debug" \ + ./target/release/examples/follow_chains --preprod --mithril-sync-workers 8 --mithril-sync-chunk-size 1 --mithril-sync-queue-ahead=2 + +run-mithril-download-example-preview: code_format + cargo build -r --package cardano-chain-follower --example follow_chains --features mimalloc + RUST_LOG="error,follow_chains=debug,cardano_chain_follower=debug,mithril-client=debug" \ + ./target/release/examples/follow_chains --preview + +# Run long running developer test for mithril downloading. +run-mithril-download-example-mainnet: code_format + cargo build -r --package cardano-chain-follower --example follow_chains --features mimalloc + RUST_LOG="error,follow_chains=debug,cardano_chain_follower=debug,mithril-client=debug" \ + ./target/release/examples/follow_chains --mainnet + +# Run long running developer test for mithril downloading. +debug-heap-mithril-download-example: + cargo build --package cardano-chain-follower --example follow_chains + RUST_LOG="error,follow_chains=debug,cardano_chain_follower=debug,mithril-client=debug" \ + heaptrack ./target/debug/examples/follow_chains --preprod + diff --git a/hermes/bin/src/runtime_extensions/hermes/cardano/chain_follower_task.rs b/hermes/bin/src/runtime_extensions/hermes/cardano/chain_follower_task.rs index 01f8a9edd..977c3c73b 100644 --- a/hermes/bin/src/runtime_extensions/hermes/cardano/chain_follower_task.rs +++ b/hermes/bin/src/runtime_extensions/hermes/cardano/chain_follower_task.rs @@ -4,13 +4,18 @@ use std::time::Duration; use anyhow::Context; +use cardano_chain_follower::ChainUpdate; +use pallas::ledger::traverse::{wellknown::GenesisValues, MultiEraBlock, MultiEraTx}; use tracing::{error, instrument, trace, warn}; use super::{ModuleStateKey, Result, STATE}; use crate::{ app::HermesAppName, event::{HermesEvent, TargetApp, TargetModule}, - runtime_extensions::bindings::hermes::cardano::api::{BlockSrc, CardanoBlockchainId}, + runtime_extensions::bindings::{ + hermes::cardano::api::{BlockDetail, BlockSrc, CardanoBlockchainId}, + wasi::clocks::wall_clock::Datetime, + }, wasm::module::ModuleId, }; @@ -18,8 +23,6 @@ use crate::{ struct EventSubscriptions { /// Whether the module is subscribed to block events. blocks: bool, - /// Whether the module is subscribed to rollback events. - rollbacks: bool, /// Whether the module is subscribed to transaction events. txns: bool, } @@ -210,21 +213,64 @@ fn process_chain_update( chain_update: cardano_chain_follower::ChainUpdate, module_state_key: &ModuleStateKey, chain_id: CardanoBlockchainId, event_subscriptions: &EventSubscriptions, ) -> anyhow::Result { - match chain_update { - cardano_chain_follower::ChainUpdate::Block(block_data) => { - process_block_chain_update(module_state_key, chain_id, block_data, event_subscriptions) - .context("Processing block chain update") + let (block_data, immutable, rollback, tip, context) = match chain_update { + ChainUpdate::ImmutableBlock(block_data) => { + ( + block_data, + true, + false, + false, // There are always live blocks in front of immutable ones. + "Processing block chain update (Immutable)", + ) }, - cardano_chain_follower::ChainUpdate::Rollback(block_data) => { - process_rollback_chain_update( - module_state_key, - chain_id, - &block_data, - event_subscriptions, + ChainUpdate::ImmutableBlockRollback(block_data) => { + ( + block_data, + true, + true, + false, // There are always live blocks in front of immutable ones. + "Processing block chain update (Immutable Rollback)", ) - .context("Processing rollback chain update") }, - } + ChainUpdate::Block(block_data) => { + ( + block_data, + false, + false, + false, + "Processing block chain update (Live Block)", + ) + }, + ChainUpdate::BlockTip(block_data) => { + ( + block_data, + false, + false, + true, + "Processing block chain update (Live Block @ Tip)", + ) + }, + ChainUpdate::Rollback(rollback_data) => { + ( + rollback_data, + false, + true, + false, // By definition there are always blocks in front of a rollback. + "Processing rollback chain update", + ) + }, + }; + + process_block_chain_update( + module_state_key, + chain_id, + &block_data, + event_subscriptions, + immutable, + rollback, + tip, + ) + .context(context) } /// Processes a block chain update. @@ -233,21 +279,46 @@ fn process_chain_update( /// Event Queue. fn process_block_chain_update( module_state_key: &ModuleStateKey, chain_id: CardanoBlockchainId, - block_data: cardano_chain_follower::MultiEraBlockData, - event_subscriptions: &EventSubscriptions, + block_data: &cardano_chain_follower::MultiEraBlockData, + event_subscriptions: &EventSubscriptions, immutable: bool, rollback: bool, tip: bool, ) -> anyhow::Result { - let decoded_block_data = block_data.decode().context("Decode block")?; - + let decoded_block_data = block_data.decode(); let block_number = decoded_block_data.number(); - let slot = decoded_block_data.slot(); + // We send block data first. + if event_subscriptions.blocks { + build_and_send_block_event( + module_state_key, + chain_id, + block_data, + &decoded_block_data, + immutable, + rollback, + tip, + ) + .context("Sending Cardano block event to Event Queue")?; + + trace!(block_number, "Generated Cardano block event"); + } + + // TODO(SJ): Don't send transactions until the block has been fully processed. + + // Then if requested, the individual transactions. if event_subscriptions.txns { let txs = decoded_block_data.txs(); - let tx_count = txs.len(); - build_and_send_txns_event(module_state_key, chain_id, slot, txs) - .context("Sending Cardano block transaction events to Event Queue")?; + build_and_send_txns_event( + module_state_key, + chain_id, + &decoded_block_data, + &txs, + immutable, + rollback, + tip, + ) + .context("Sending Cardano block transaction events to Event Queue")?; + let tx_count = txs.len(); trace!( block_number, tx_count, @@ -255,55 +326,67 @@ fn process_block_chain_update( ); } - if event_subscriptions.blocks { - build_and_send_block_event(module_state_key, chain_id, block_data) - .context("Sending Cardano block event to Event Queue")?; + Ok(decoded_block_data.slot()) +} - trace!(block_number, "Generated Cardano block event"); - } +/// Get summary details about a particular block. +fn get_details( + chain_id: CardanoBlockchainId, block_data: &MultiEraBlock, immutable: bool, rollback: bool, + tip: bool, +) -> BlockDetail { + let mut src = BlockSrc::empty(); - Ok(slot) -} + // Is the block Immutable or Live? + if immutable { + src |= BlockSrc::IMMUTABLE; + }; -/// Processes a rollback chain update. -/// -/// This means decoding the block data, building and sending the event to the -/// Event Queue. -fn process_rollback_chain_update( - module_state_key: &ModuleStateKey, chain_id: CardanoBlockchainId, - block_data: &cardano_chain_follower::MultiEraBlockData, - event_subscriptions: &EventSubscriptions, -) -> anyhow::Result { - let decoded_block_data = block_data.decode().context("Decode rollback block")?; + // Set the tip bit flag if at Tip of the chain. + if tip { + src |= BlockSrc::TIP; + }; - let slot = decoded_block_data.slot(); + // Set the rollback bit flag, if the block was from a rollback. + if rollback { + src |= BlockSrc::ROLLBACK; + }; - if event_subscriptions.rollbacks { - build_and_send_rollback_event(module_state_key, chain_id, slot) - .context("Sending Cardano rollback event to Event Queue")?; + let era = format!("{:?}", block_data.era()); + let height = block_data.number(); + let slot = block_data.slot(); + let hash = block_data.hash().to_vec(); - trace!( - block_number = decoded_block_data.number(), - "Generated Cardano rollback event" - ); - } + let wall_clock = match chain_id { + CardanoBlockchainId::Mainnet => block_data.wallclock(&GenesisValues::mainnet()), + CardanoBlockchainId::Preprod => block_data.wallclock(&GenesisValues::preprod()), + CardanoBlockchainId::Preview => block_data.wallclock(&GenesisValues::preview()), + }; - Ok(slot) + BlockDetail { + era, + src, + height, + slot: (slot, hash), + wall_clock: Datetime { + seconds: wall_clock, + nanoseconds: 0, + }, + } } /// Builds a [`super::event::OnCardanoBlockEvent`] from the block data and /// sends it to the given module through the Event Queue. fn build_and_send_block_event( module_state_key: &ModuleStateKey, chain_id: CardanoBlockchainId, - block_data: cardano_chain_follower::MultiEraBlockData, + block_data: &MultiEraBlockData, decoded_block: &MultiEraBlock, immutable: bool, rollback: bool, + tip: bool, ) -> anyhow::Result<()> { + let details = get_details(chain_id, decoded_block, immutable, rollback, tip); + let on_block_event = super::event::OnCardanoBlockEvent { blockchain: chain_id, - block: block_data.into_raw_data(), - // TODO(FelipeRosa): In order to implement this we need the - // cardano-chain-follower crate to give this information along - // with the chain update. - source: BlockSrc::NODE, + block: block_data.clone().into_raw_data(), + details, }; crate::event::queue::send(HermesEvent::new( @@ -316,15 +399,17 @@ fn build_and_send_block_event( /// Builds [`super::event::OnCardanoTxnEvent`] for every transaction on the block data /// and sends them to the given module through the Event Queue. fn build_and_send_txns_event( - module_state_key: &ModuleStateKey, chain_id: CardanoBlockchainId, slot: u64, - txs: Vec, + module_state_key: &ModuleStateKey, chain_id: CardanoBlockchainId, block_data: &MultiEraBlock, + txs: &[MultiEraTx], immutable: bool, rollback: bool, tip: bool, ) -> anyhow::Result<()> { - for (tx, index) in txs.into_iter().zip(0u32..) { + let details = get_details(chain_id, block_data, immutable, rollback, tip); + + for (tx, index) in txs.iter().zip(0u32..) { let on_txn_event = super::event::OnCardanoTxnEvent { blockchain: chain_id, - slot, txn_index: index, txn: tx.encode(), + details: details.clone(), }; // Stop at the first error. @@ -338,23 +423,6 @@ fn build_and_send_txns_event( Ok(()) } -/// Builds a [`super::event::OnCardanoRollback`] from the block data and -/// sends it to the given module through the Event Queue. -fn build_and_send_rollback_event( - module_state_key: &ModuleStateKey, chain_id: CardanoBlockchainId, slot: u64, -) -> anyhow::Result<()> { - let on_rollback_event = super::event::OnCardanoRollback { - blockchain: chain_id, - slot, - }; - - crate::event::queue::send(HermesEvent::new( - on_rollback_event, - TargetApp::List(vec![module_state_key.0.clone()]), - TargetModule::List(vec![module_state_key.1.clone()]), - )) -} - /// Gets the event subscription flags for a given module. fn get_event_subscriptions( module_state_key: &ModuleStateKey, @@ -366,7 +434,6 @@ fn get_event_subscriptions( Ok(EventSubscriptions { blocks: sub_state.subscribed_to_blocks, - rollbacks: sub_state.subscribed_to_rollbacks, txns: sub_state.subscribed_to_txns, }) } diff --git a/hermes/bin/src/runtime_extensions/hermes/cardano/event.rs b/hermes/bin/src/runtime_extensions/hermes/cardano/event.rs index b6237db3f..5da30f135 100644 --- a/hermes/bin/src/runtime_extensions/hermes/cardano/event.rs +++ b/hermes/bin/src/runtime_extensions/hermes/cardano/event.rs @@ -3,7 +3,7 @@ use crate::{ event::HermesEventPayload, runtime_extensions::bindings::hermes::cardano::api::{ - BlockSrc, CardanoBlock, CardanoBlockchainId, CardanoTxn, + BlockDetail, CardanoBlock, CardanoBlockchainId, CardanoTxn, }, }; @@ -14,7 +14,7 @@ pub(super) struct OnCardanoBlockEvent { /// This raw CBOR block data. pub(super) block: CardanoBlock, /// Source information about where the block came from, and if we are at tip or not. - pub(super) source: BlockSrc, + pub(super) details: BlockDetail, } impl HermesEventPayload for OnCardanoBlockEvent { @@ -26,7 +26,12 @@ impl HermesEventPayload for OnCardanoBlockEvent { module .instance .hermes_cardano_event_on_block() - .call_on_cardano_block(&mut module.store, self.blockchain, &self.block, self.source)?; + .call_on_cardano_block( + &mut module.store, + self.blockchain, + &self.block, + &self.details, + )?; Ok(()) } } @@ -35,12 +40,12 @@ impl HermesEventPayload for OnCardanoBlockEvent { pub(super) struct OnCardanoTxnEvent { /// The blockchain id the block originated from. pub(super) blockchain: CardanoBlockchainId, - /// The slot the transaction is in. - pub(super) slot: u64, - /// The offset in the block this transaction is at. + /// The transaction index with the block the transaction is in. pub(super) txn_index: u32, /// The raw transaction data itself. pub(super) txn: CardanoTxn, + /// Details about the block the transaction is in. + pub(super) details: BlockDetail, } impl HermesEventPayload for OnCardanoTxnEvent { @@ -55,33 +60,11 @@ impl HermesEventPayload for OnCardanoTxnEvent { .call_on_cardano_txn( &mut module.store, self.blockchain, - self.slot, self.txn_index, &self.txn, + &self.details, )?; Ok(()) } } - -/// On Cardano rollback event -pub(super) struct OnCardanoRollback { - /// The blockchain id the block originated from. - pub(super) blockchain: CardanoBlockchainId, - /// The slot the transaction is in. - pub(super) slot: u64, -} - -impl HermesEventPayload for OnCardanoRollback { - fn event_name(&self) -> &str { - "on-cardano-rollback" - } - - fn execute(&self, module: &mut crate::wasm::module::ModuleInstance) -> anyhow::Result<()> { - module - .instance - .hermes_cardano_event_on_rollback() - .call_on_cardano_rollback(&mut module.store, self.blockchain, self.slot)?; - Ok(()) - } -} diff --git a/hermes/bin/src/runtime_extensions/hermes/cardano/host.rs b/hermes/bin/src/runtime_extensions/hermes/cardano/host.rs index 60db5c5ba..6e0279c94 100644 --- a/hermes/bin/src/runtime_extensions/hermes/cardano/host.rs +++ b/hermes/bin/src/runtime_extensions/hermes/cardano/host.rs @@ -1,13 +1,27 @@ //! Cardano Blockchain host implementation for WASM runtime. +use cardano_chain_follower::PointOrTip; + use crate::{ runtime_context::HermesRuntimeContext, runtime_extensions::bindings::hermes::cardano::api::{ - CardanoBlock, CardanoBlockchainId, CardanoTxn, FetchError, Host, Slot, TxnError, - UnsubscribeOptions, + CardanoBlock, CardanoBlockchainId, CardanoTxn, FetchError, Host, Slot, SubscribeOptions, + TxnError, }, }; +/// Convert a `whence` parameter into a `Point`. +fn whence_to_point(whence: Slot) -> Option { + match whence { + Slot::Genesis => Some(cardano_chain_follower::Point::Origin.into()), + Slot::Point((slot, hash)) => { + Some(cardano_chain_follower::Point::Specific(slot, hash).into()) + }, + Slot::Tip => Some(cardano_chain_follower::PointOrTip::Tip), + Slot::Continue => None, + } +} + impl Host for HermesRuntimeContext { /// Subscribe to the Blockchain block data. /// @@ -36,31 +50,22 @@ impl Host for HermesRuntimeContext { /// /// `whence` == `stop` will prevent the blockchain syncing, and the caller will be /// unsubscribed. - fn subscribe_blocks( - &mut self, net: CardanoBlockchainId, whence: Slot, - ) -> wasmtime::Result> { - let sub_type = match whence { - Slot::Genesis => { - super::SubscriptionType::Blocks(cardano_chain_follower::Point::Origin.into()) - }, - Slot::Point((slot, hash)) => { - super::SubscriptionType::Blocks( - cardano_chain_follower::Point::Specific(slot, hash).into(), - ) - }, - Slot::Tip => super::SubscriptionType::Blocks(cardano_chain_follower::PointOrTip::Tip), - Slot::Continue => super::SubscriptionType::Continue, - }; + fn subscribe( + &mut self, net: CardanoBlockchainId, whence: Slot, what: SubscribeOptions, + ) -> wasmtime::Result> { + // Convert whence to an actual Point or None if we don't have one at all. + let whence = whence_to_point(whence); let res = super::subscribe( net, self.app_name().clone(), self.module_id().clone(), - sub_type, + whence, + what, ); match res { - Ok(slot) => Ok(Ok(slot)), + Ok(()) => Ok(Ok(())), Err(_) => Ok(Err(FetchError::InvalidSlot)), } } @@ -86,53 +91,12 @@ impl Host for HermesRuntimeContext { /// event twice, /// once before the `stop` and once after the `continue`. fn unsubscribe( - &mut self, net: CardanoBlockchainId, opts: UnsubscribeOptions, + &mut self, net: CardanoBlockchainId, opts: SubscribeOptions, ) -> wasmtime::Result<()> { super::unsubscribe(net, self.app_name().clone(), self.module_id().clone(), opts) // .map_err(|e| wasmtime::Error::new(e)) } - /// Subscribe to transaction data events, does not alter the blockchain sync in - /// anyway. - /// - /// **Parameters** - /// - /// - `net` : The blockchain network to subscribe to txn events from. - fn subscribe_txn(&mut self, net: CardanoBlockchainId) -> wasmtime::Result<()> { - super::subscribe( - net, - self.app_name().clone(), - self.module_id().clone(), - super::SubscriptionType::Transactions, - )?; - - Ok(()) - } - - /// Subscribe to blockchain rollback events, does not alter the blockchain sync in - /// anyway. - /// - /// **Parameters** - /// - /// - `net` : The blockchain network to subscribe to txn events from. - /// - /// **Notes** - /// - /// After a rollback event, the blockchain sync will AUTOMATICALLY start sending block - /// data from the rollback point. No action is required to actually follow the - /// rollback, unless the - /// default behavior is not desired. - fn subscribe_rollback(&mut self, net: CardanoBlockchainId) -> wasmtime::Result<()> { - super::subscribe( - net, - self.app_name().clone(), - self.module_id().clone(), - super::SubscriptionType::Rollbacks, - )?; - - Ok(()) - } - /// Fetch a block from the requested blockchain at the requested slot. /// /// **Parameters** @@ -156,11 +120,9 @@ impl Host for HermesRuntimeContext { fn fetch_block( &mut self, net: CardanoBlockchainId, whence: Slot, ) -> wasmtime::Result> { - let at = match whence { - Slot::Genesis => cardano_chain_follower::Point::Origin.into(), - Slot::Point((slot, hash)) => cardano_chain_follower::Point::Specific(slot, hash).into(), - Slot::Tip => cardano_chain_follower::PointOrTip::Tip, - Slot::Continue => todo!(), + // Convert whence to an actual Point or None if we don't have one at all. + let Some(at) = whence_to_point(whence) else { + return Ok(Err(FetchError::InvalidSlot)); }; match super::read_block(net, at) { @@ -192,6 +154,33 @@ impl Host for HermesRuntimeContext { Ok(block_data.txs().into_iter().map(|tx| tx.encode()).collect()) } + /// Subscribe to transaction data events, does not alter the blockchain sync in + /// anyway. + /// + /// **Parameters** + /// + /// - `net` : The blockchain network to subscribe to txn events from. + fn fetch_txn( + &mut self, net: CardanoBlockchainId, whence: Slot, offset: u16, + ) -> wasmtime::Result> { + // Convert whence to an actual Point or None if we don't have one at all. + let Some(at) = whence_to_point(whence) else { + return Ok(Err(FetchError::InvalidSlot)); + }; + + match super::read_block(net, at) { + Ok(block_data) => { + let decoded_block = block_data.decode(); + let txs = decoded_block.txs(); + let Some(txn) = txs.get(offset as usize) else { + return Ok(Err(FetchError::InvalidTxn)); + }; + Ok(Ok(txn.encode())) + }, + Err(_) => Ok(Err(FetchError::InvalidSlot)), + } + } + /// Post a transactions to the blockchain. /// /// This can be used to post a pre-formed transaction to the required blockchain. diff --git a/hermes/bin/src/runtime_extensions/hermes/cardano/mod.rs b/hermes/bin/src/runtime_extensions/hermes/cardano/mod.rs index 20b82e890..1c2e49d8a 100644 --- a/hermes/bin/src/runtime_extensions/hermes/cardano/mod.rs +++ b/hermes/bin/src/runtime_extensions/hermes/cardano/mod.rs @@ -1,9 +1,11 @@ //! Cardano Blockchain runtime extension implementation. +use cardano_chain_follower::PointOrTip; use dashmap::DashMap; use crate::{ - app::HermesAppName, runtime_extensions::bindings::hermes::cardano::api::CardanoBlockchainId, + app::HermesAppName, + runtime_extensions::bindings::hermes::cardano::api::{CardanoBlockchainId, SubscribeOptions}, wasm::module::ModuleId, }; @@ -22,8 +24,6 @@ struct SubscriptionState { subscribed_to_blocks: bool, /// Whether the module is subscribed to receive transaction events. subscribed_to_txns: bool, - /// Whether the module is subscribed to receive rollback events. - subscribed_to_rollbacks: bool, /// Handle to the cardano chain follower from which the module is receiving /// events. follower_handle: Option, @@ -60,23 +60,11 @@ static STATE: once_cell::sync::Lazy = once_cell::sync::Lazy::new(|| { /// Advise Runtime Extensions of a new context pub(crate) fn new_context(_ctx: &crate::runtime_context::HermesRuntimeContext) {} -/// Available subscription types. -pub(super) enum SubscriptionType { - /// Subscribe to block events from a given point. - Blocks(cardano_chain_follower::PointOrTip), - /// Subscribe to rollback events. - Rollbacks, - /// Subscribe to transaction events. - Transactions, - /// Continue previously stopped subscription event generation. - Continue, -} - /// Subscribes a module or resumes the generation of subscribed events for a module. pub(super) fn subscribe( chain_id: CardanoBlockchainId, app_name: HermesAppName, module_id: ModuleId, - sub_type: SubscriptionType, -) -> Result { + whence: Option, what: SubscribeOptions, +) -> Result<()> { let network = chain_id.into(); let mut sub_state = STATE @@ -84,64 +72,64 @@ pub(super) fn subscribe( .entry((app_name.clone(), module_id.clone(), network)) .or_default(); - match sub_type { - SubscriptionType::Blocks(follow_from) => { - if let Some(handle) = sub_state.follower_handle.as_ref() { - handle.set_read_pointer_sync(follow_from)?; - } else { - let (follower_handle, starting_point) = STATE.tokio_rt_handle.spawn_follower_sync( - app_name, - module_id, - chain_id, - follow_from, - )?; - - sub_state.follower_handle = Some(follower_handle); - sub_state.current_slot = starting_point.slot_or_default(); - } + // Set what we want to subscribe to. + if what.contains(SubscribeOptions::BLOCK) { + sub_state.subscribed_to_blocks = true; + } - sub_state.subscribed_to_blocks = true; - }, - SubscriptionType::Rollbacks => { - sub_state.subscribed_to_rollbacks = true; - }, - SubscriptionType::Transactions => { - sub_state.subscribed_to_txns = true; - }, - SubscriptionType::Continue => { - if let Some(handle) = sub_state.follower_handle.as_ref() { - handle.resume()?; - } - }, + if what.contains(SubscribeOptions::TRANSACTION) { + sub_state.subscribed_to_txns = true; + } + + if let Some(follow_from) = whence { + if let Some(handle) = sub_state.follower_handle.as_ref() { + handle.set_read_pointer_sync(follow_from)?; + } else { + let (follower_handle, starting_point) = STATE.tokio_rt_handle.spawn_follower_sync( + app_name, + module_id, + chain_id, + follow_from, + )?; + + sub_state.follower_handle = Some(follower_handle); + sub_state.current_slot = starting_point.slot_or_default(); + } + } else if let Some(handle) = sub_state.follower_handle.as_ref() { + handle.resume()?; } - Ok(sub_state.current_slot) + Ok(()) } /// Unsubscribes a module or stops the generation of subscribed events for a module. pub(super) fn unsubscribe( chain_id: CardanoBlockchainId, app_name: HermesAppName, module_id: ModuleId, - opts: crate::runtime_extensions::bindings::hermes::cardano::api::UnsubscribeOptions, + opts: SubscribeOptions, ) -> Result<()> { - use crate::runtime_extensions::bindings::hermes::cardano::api::UnsubscribeOptions; - let network = chain_id.into(); let sub_state = STATE.subscriptions.get_mut(&(app_name, module_id, network)); if let Some(mut sub_state) = sub_state { - if opts & UnsubscribeOptions::BLOCK == UnsubscribeOptions::BLOCK { + let mut block_stopped = false; + let mut txn_stopped = false; + + if opts.contains(SubscribeOptions::BLOCK) && sub_state.subscribed_to_blocks { sub_state.subscribed_to_blocks = false; + block_stopped = true; } - if opts & UnsubscribeOptions::TRANSACTION == UnsubscribeOptions::TRANSACTION { + if opts.contains(SubscribeOptions::TRANSACTION) && sub_state.subscribed_to_txns { sub_state.subscribed_to_txns = false; + txn_stopped = true; } - if opts & UnsubscribeOptions::ROLLBACK == UnsubscribeOptions::ROLLBACK { - sub_state.subscribed_to_rollbacks = false; - } - - if opts & UnsubscribeOptions::STOP == UnsubscribeOptions::STOP { + // If we changed the subscription state, and ended up subscribed to nothing, then just + // STOP. + if (block_stopped || txn_stopped) + && !sub_state.subscribed_to_blocks + && !sub_state.subscribed_to_txns + { if let Some(handle) = sub_state.follower_handle.as_ref() { handle.stop()?; } @@ -164,18 +152,19 @@ impl From for cardano_chain_follower::Network { CardanoBlockchainId::Mainnet => cardano_chain_follower::Network::Mainnet, CardanoBlockchainId::Preprod => cardano_chain_follower::Network::Preprod, CardanoBlockchainId::Preview => cardano_chain_follower::Network::Preview, - CardanoBlockchainId::LocalTestBlockchain => todo!(), } } } #[cfg(test)] mod test { - use super::{read_block, subscribe, unsubscribe, SubscriptionType}; + use cardano_chain_follower::PointOrTip; + + use super::{read_block, subscribe, unsubscribe}; use crate::{ app::HermesAppName, runtime_extensions::bindings::hermes::cardano::api::{ - CardanoBlockchainId, UnsubscribeOptions, + CardanoBlockchainId, SubscribeOptions, }, }; @@ -189,7 +178,8 @@ mod test { CardanoBlockchainId::Preprod, app_name.clone(), module_id.clone(), - SubscriptionType::Rollbacks, + Some(PointOrTip::Tip), + SubscribeOptions::BLOCK, ) .expect("subscribed"); @@ -197,7 +187,8 @@ mod test { CardanoBlockchainId::Preprod, app_name.clone(), module_id.clone(), - SubscriptionType::Blocks(cardano_chain_follower::PointOrTip::Tip), + Some(PointOrTip::Tip), + SubscribeOptions::TRANSACTION, ) .expect("subscribed"); @@ -205,7 +196,17 @@ mod test { CardanoBlockchainId::Preprod, app_name.clone(), module_id.clone(), - SubscriptionType::Transactions, + Some(PointOrTip::Tip), + SubscribeOptions::all(), + ) + .expect("subscribed"); + + subscribe( + CardanoBlockchainId::Preprod, + app_name.clone(), + module_id.clone(), + Some(PointOrTip::Tip), + SubscribeOptions::empty(), ) .expect("subscribed"); @@ -215,14 +216,12 @@ mod test { CardanoBlockchainId::Preprod, app_name.clone(), module_id.clone(), - SubscriptionType::Blocks( - cardano_chain_follower::Point::Specific( - 49_075_522, - hex::decode("b7639b523f320643236ab0fc04b7fd381dedd42c8d6b6433b5965a5062411396") - .expect("decode hex value"), - ) - .into(), - ), + Some(PointOrTip::Point(cardano_chain_follower::Point::Specific( + 49_075_522, + hex::decode("b7639b523f320643236ab0fc04b7fd381dedd42c8d6b6433b5965a5062411396") + .expect("decode hex value"), + ))), + SubscribeOptions::empty(), ) .expect("subscribed"); @@ -232,7 +231,7 @@ mod test { CardanoBlockchainId::Preprod, app_name.clone(), module_id.clone(), - UnsubscribeOptions::BLOCK, + SubscribeOptions::BLOCK, ) .expect("subscribed"); @@ -242,17 +241,18 @@ mod test { CardanoBlockchainId::Preprod, app_name.clone(), module_id.clone(), - UnsubscribeOptions::STOP, + SubscribeOptions::all(), ) - .expect("subscribed"); + .expect("unsubscribed"); std::thread::sleep(std::time::Duration::from_secs(5)); subscribe( CardanoBlockchainId::Preprod, - app_name, - module_id, - SubscriptionType::Continue, + app_name.clone(), + module_id.clone(), + None, + SubscribeOptions::empty(), ) .expect("subscribed"); @@ -273,6 +273,6 @@ mod test { ) .expect("read"); - assert_eq!(block_data.decode().expect("valid block").slot(), 49_075_522); + assert_eq!(block_data.decode().slot(), 49_075_522); } } diff --git a/hermes/bin/src/runtime_extensions/hermes/cardano/tokio_runtime_task.rs b/hermes/bin/src/runtime_extensions/hermes/cardano/tokio_runtime_task.rs index 8b9d03191..708ad44c3 100644 --- a/hermes/bin/src/runtime_extensions/hermes/cardano/tokio_runtime_task.rs +++ b/hermes/bin/src/runtime_extensions/hermes/cardano/tokio_runtime_task.rs @@ -167,15 +167,10 @@ async fn spawn_follower( )> { trace!("Spawning chain follower executor"); - let config = cardano_chain_follower::FollowerConfigBuilder::default().build(); - let network = chain_id.into(); - - let follower = cardano_chain_follower::Follower::connect( - follower_connect_address(network), - network, - config, - ) - .await?; + let follower = cardano_chain_follower::FollowerConfigBuilder::default_for(chain_id.into()) + .build() + .connect() + .await?; trace!("Started chain follower"); @@ -208,29 +203,14 @@ async fn read_block( // since we'll not poll the // follower's future so the following process will // not be executed. - let cfg = cardano_chain_follower::FollowerConfigBuilder::default() + let reader = cardano_chain_follower::FollowerConfigBuilder::default_for(network) .chain_update_buffer_size(1) - .build(); - - let reader = cardano_chain_follower::Follower::connect( - follower_connect_address(network), - network, - cfg, - ) - .await?; + .build() + .connect() + .await?; let block_data = reader.read_block(at).await?; Ok(block_data) } } - -/// Returns the peer address used to connect to each Cardano network. -const fn follower_connect_address(network: cardano_chain_follower::Network) -> &'static str { - match network { - cardano_chain_follower::Network::Mainnet => "backbone.cardano-mainnet.iohk.io:3001", - cardano_chain_follower::Network::Preprod => "preprod-node.play.dev.cardano.org:3001", - cardano_chain_follower::Network::Preview => "preview-node.play.dev.cardano.org:3001", - cardano_chain_follower::Network::Testnet => todo!(), - } -} diff --git a/hermes/crates/cardano-chain-follower/Cargo.toml b/hermes/crates/cardano-chain-follower/Cargo.toml index 0c0ee4a19..14f535ac2 100644 --- a/hermes/crates/cardano-chain-follower/Cargo.toml +++ b/hermes/crates/cardano-chain-follower/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "cardano-chain-follower" edition.workspace = true -version.workspace = true +version = "0.2.0" authors.workspace = true homepage.workspace = true repository.workspace = true @@ -11,12 +11,96 @@ license.workspace = true workspace = true [dependencies] +mithril-client.workspace = true pallas.workspace = true pallas-hardano.workspace = true +pallas-crypto.workspace = true thiserror.workspace = true -tokio = { workspace = true, features = ["macros", "rt", "net", "rt-multi-thread"] } +tokio.workspace = true +tokio-util.workspace = true +tokio-stream.workspace = true tracing.workspace = true +dashmap.workspace = true +url.workspace = true +anyhow.workspace = true +chrono.workspace = true +reqwest.workspace = true +async-trait.workspace = true +dirs.workspace = true +futures.workspace = true +async-compression.workspace = true +tokio-tar.workspace = true +humantime.workspace = true +crossbeam-skiplist.workspace = true +strum.workspace = true +ouroboros.workspace = true +hex.workspace = true +rayon.workspace = true +serde.workspace = true +serde_json.workspace = true +mimalloc = {workspace = true, optional = true} +memx.workspace = true +fmmap.workspace = true +minicbor.workspace = true +brotli.workspace = true +zstd.workspace = true +c509-certificate.workspace = true +x509-cert.workspace = true +ed25519-dalek.workspace = true +blake2b_simd.workspace = true +num-traits.workspace = true +logcall.workspace = true +tracing-log.workspace = true +tar.workspace = true +ureq.workspace = true +http.workspace = true +crossbeam-channel.workspace = true +hickory-resolver.workspace = true +moka.workspace = true +crossbeam-epoch.workspace = true [dev-dependencies] hex.workspace = true -tracing-subscriber = { workspace = true, features = ["env-filter"] } +tracing-subscriber.workspace = true +test-log.workspace = true +clap.workspace = true + +# Note, these features are for support of features exposed by dependencies. +[features] +default = ["rustls-tls-native-roots"] + +# Enable the MiMalloc global allocator +# Only used for examples. +mimalloc = ["dep:mimalloc"] + +# These features are for support of dependent crates only. +# They do not change the operation of the main crate. +native-tls = [ + "reqwest/native-tls", + "mithril-client/native-tls", +] +native-tls-alpn = [ + "reqwest/native-tls-alpn", + "mithril-client/native-tls-alpn", +] +native-tls-vendored = [ + "reqwest/native-tls-vendored", + "mithril-client/native-tls-vendored", +] + +rustls-tls = [ + "reqwest/rustls-tls", + "mithril-client/rustls-tls", +] +rustls-tls-manual-roots = [ + "reqwest/rustls-tls-manual-roots", + "mithril-client/rustls-tls-manual-roots", +] +rustls-tls-webpki-roots = [ + "reqwest/rustls-tls-webpki-roots", + "mithril-client/rustls-tls-webpki-roots", +] +rustls-tls-native-roots = [ + "reqwest/rustls-tls-native-roots", + "mithril-client/rustls-tls-native-roots", +] diff --git a/hermes/crates/cardano-chain-follower/Readme.md b/hermes/crates/cardano-chain-follower/Readme.md new file mode 100644 index 000000000..9e8a4744f --- /dev/null +++ b/hermes/crates/cardano-chain-follower/Readme.md @@ -0,0 +1,59 @@ +# Things that need fixing + +The following fixes would be nice to have to: + +1. Improve sync times. +2. Decrease disk utilization. +3. Eliminate external dependencies. + +## Parallel downloading requires external tool + +We currently require an external tool `aria2c` to download the Mithril snapshot. +We should have a native version to remove this external tool dependency. + +See: +For a simple version of such we could adapt. + +The first version should just replace `Aria2c` and download to a file. + +Ideally, we would have an in-memory queue that downloads in parallel, rather than saving to disk. +This would need to use something like a skip-map to re-order the blocks, and a pool of workers to download the next blocks. +It's not trivial, but it would remove the necessity to store the actual snapshot archive on-disk. + +It's not possible to download the snapshot archive to ram because it is enormous. + +## Zstd decompress and tar extraction optimization + +Currently, an async zstd decompress and tar extraction is used. +This is known to be slow, and we are CPU bound doing it. + +Change this to run in a Thread outside async and use the zstd library, which links to the C zstd library directly. +And the non async tar extraction library. + +This will speed up extracting files from the archive. + +This would be better also if we had synchronous piped downloading as mentioned above. + +## Block Decode Optimization + +Currently, to enforce and validate chain integrity, we need to decode the blocks all over the place. +Decoding blocks is expensive, and this is wasteful. +As the application will almost certainly require the block to be decoded, it makes sense for it to happen once in a uniform way. +We would then pass the decoded block to the application saving it the effort of doing it, itself. + +We should Decode LIVE blocks once when we receive them from the network, +and then keep the decoded as well as raw block data in memory. + +For Immutable blocks, we should decode them ONCE when we read them from disk. + +## Immutable Queue Optimization + +The Immutable follower reads from disk, inline. +Disk IO is relatively expensive. +Decoding blocks is also expensive, it's better to do that in parallel with an application processing a previous block. + +What we should do is have a read ahead queue, where a second task is reading ahead of the application following, +reading the next blocks from disk, and decoding them. + +The main follower used by the application then reads from this red ahead queue. +This would help us better utilize disk and CPU resources, which would result in improved sync times. diff --git a/hermes/crates/cardano-chain-follower/examples/concurrent_reads.rs b/hermes/crates/cardano-chain-follower/examples/concurrent_reads.rs deleted file mode 100644 index 31daf87f5..000000000 --- a/hermes/crates/cardano-chain-follower/examples/concurrent_reads.rs +++ /dev/null @@ -1,78 +0,0 @@ -//! This example shows how to use the chain follower to download arbitrary blocks -//! from the chain concurrently. - -use std::error::Error; - -use cardano_chain_follower::{Follower, FollowerConfigBuilder, Network, Point}; -use tracing::level_filters::LevelFilter; -use tracing_subscriber::EnvFilter; - -#[tokio::main] -async fn main() -> Result<(), Box> { - tracing_subscriber::fmt() - .with_env_filter( - EnvFilter::builder() - .with_default_directive(LevelFilter::INFO.into()) - .from_env_lossy(), - ) - .init(); - - let config = FollowerConfigBuilder::default().build(); - - let follower = Follower::connect( - "relays-new.cardano-mainnet.iohk.io:3001", - Network::Mainnet, - config, - ) - .await?; - - let points = vec![ - Point::Specific( - 110_908_236, - hex::decode("ad3798a1db2b6097c71f35609399e4b2ff834f0f45939803d563bf9d660df2f2")?, - ), - Point::Specific( - 110_908_582, - hex::decode("16e97a73e866280582ee1201a5e1815993978eede956af1869b0733bedc131f2")?, - ), - ]; - let mut point_count = points.len(); - - let (tx, mut rx) = tokio::sync::mpsc::unbounded_channel(); - - for p in points { - let slot_no = p.slot_or_default(); - let r = follower.read_block(p); - let r_tx = tx.clone(); - - tokio::spawn(async move { - tracing::info!(slot_no, "Reading block"); - let result = r.await; - drop(r_tx.send(result)); - }); - } - - while let Some(result) = rx.recv().await { - let block_data = result?; - let block = block_data.decode()?; - - let total_fee = block - .txs() - .iter() - .map(|tx| tx.fee().unwrap_or_default()) - .sum::(); - - println!( - "Block {} (slot {}) => total fee: {total_fee}", - block.number(), - block.slot() - ); - - point_count -= 1; - if point_count == 0 { - break; - } - } - - Ok(()) -} diff --git a/hermes/crates/cardano-chain-follower/examples/follow_chain_updates.rs b/hermes/crates/cardano-chain-follower/examples/follow_chain_updates.rs deleted file mode 100644 index 3aa64f27a..000000000 --- a/hermes/crates/cardano-chain-follower/examples/follow_chain_updates.rs +++ /dev/null @@ -1,62 +0,0 @@ -//! This example shows how to use the chain follower to follow chain updates on -//! a Cardano network chain. - -use std::error::Error; - -use cardano_chain_follower::{ChainUpdate, Follower, FollowerConfigBuilder, Network}; -use tracing::level_filters::LevelFilter; -use tracing_subscriber::EnvFilter; - -#[tokio::main] -async fn main() -> Result<(), Box> { - tracing_subscriber::fmt() - .with_env_filter( - EnvFilter::builder() - .with_default_directive(LevelFilter::INFO.into()) - .from_env_lossy(), - ) - .init(); - - // Defaults to start following from the tip. - let config = FollowerConfigBuilder::default().build(); - - let mut follower = Follower::connect( - "relays-new.cardano-mainnet.iohk.io:3001", - Network::Mainnet, - config, - ) - .await?; - - // Wait for 3 chain updates and shutdown. - for _ in 0..3 { - let chain_update = follower.next().await?; - - match chain_update { - ChainUpdate::Block(data) => { - let block = data.decode()?; - - println!( - "New block NUMBER={} SLOT={} HASH={}", - block.number(), - block.slot(), - hex::encode(block.hash()), - ); - }, - ChainUpdate::Rollback(data) => { - let block = data.decode()?; - - println!( - "Rollback block NUMBER={} SLOT={} HASH={}", - block.number(), - block.slot(), - hex::encode(block.hash()), - ); - }, - } - } - - // Waits for the follower background task to exit. - follower.close().await?; - - Ok(()) -} diff --git a/hermes/crates/cardano-chain-follower/examples/follow_chain_updates_mithril.rs b/hermes/crates/cardano-chain-follower/examples/follow_chain_updates_mithril.rs deleted file mode 100644 index ce67c86f4..000000000 --- a/hermes/crates/cardano-chain-follower/examples/follow_chain_updates_mithril.rs +++ /dev/null @@ -1,75 +0,0 @@ -//! This example shows how to use the chain follower to follow chain updates on -//! a Cardano network chain. - -// Allowing since this is example code. -#![allow(clippy::unwrap_used)] - -use std::{error::Error, path::PathBuf}; - -use cardano_chain_follower::{ChainUpdate, Follower, FollowerConfigBuilder, Network, Point}; -use tracing::level_filters::LevelFilter; -use tracing_subscriber::EnvFilter; - -#[tokio::main] -async fn main() -> Result<(), Box> { - tracing_subscriber::fmt() - .with_env_filter( - EnvFilter::builder() - .with_default_directive(LevelFilter::INFO.into()) - .from_env_lossy(), - ) - .init(); - - // Create a follower config specifying the Mithril snapshot path and - // to follow from block 1794552 (preprod). - let config = FollowerConfigBuilder::default() - .follow_from(Point::Specific( - 49_075_262, - hex::decode("e929cd1bf8ec78844ec9ea450111aaf55fbf17540db4b633f27d4503eebf2218")?, - )) - .mithril_snapshot_path( - PathBuf::from(std::env::var("CARGO_MANIFEST_DIR").unwrap()) - .join("examples/snapshot_data"), - ) - .build(); - - let mut follower = Follower::connect( - "preprod-node.play.dev.cardano.org:3001", - Network::Preprod, - config, - ) - .await?; - - // Wait for some chain updates and shutdown. - for _ in 0..10 { - let chain_update = follower.next().await?; - - match chain_update { - ChainUpdate::Block(data) => { - let block = data.decode()?; - - println!( - "New block NUMBER={} SLOT={} HASH={}", - block.number(), - block.slot(), - hex::encode(block.hash()), - ); - }, - ChainUpdate::Rollback(data) => { - let block = data.decode()?; - - println!( - "Rollback block NUMBER={} SLOT={} HASH={}", - block.number(), - block.slot(), - hex::encode(block.hash()), - ); - }, - } - } - - // Waits for the follower background task to exit. - follower.close().await?; - - Ok(()) -} diff --git a/hermes/crates/cardano-chain-follower/examples/follow_chains.rs b/hermes/crates/cardano-chain-follower/examples/follow_chains.rs new file mode 100644 index 000000000..ad5ad3baf --- /dev/null +++ b/hermes/crates/cardano-chain-follower/examples/follow_chains.rs @@ -0,0 +1,378 @@ +//! This example shows how to use the chain follower to follow all chains, until they have +//! all reached tip. It will report on how many blocks for each chain exist between eras, +//! and also how long each chain took to reach its tip. + +// Allowing since this is example code. +//#![allow(clippy::unwrap_used)] + +#[cfg(feature = "mimalloc")] +use mimalloc::MiMalloc; + +/// Use Mimalloc for the global allocator. +#[cfg(feature = "mimalloc")] +#[global_allocator] +static GLOBAL: MiMalloc = MiMalloc; + +use std::{error::Error, time::Duration}; + +use cardano_chain_follower::{ + ChainFollower, ChainSyncConfig, ChainUpdate, Kind, Metadata, Network, Statistics, ORIGIN_POINT, + TIP_POINT, +}; +use clap::{arg, ArgAction, ArgMatches, Command}; +use tokio::time::Instant; +use tracing::{error, info, level_filters::LevelFilter}; +use tracing_subscriber::EnvFilter; + +/// Process our CLI Arguments +fn process_argument() -> (Vec, ArgMatches) { + let matches = Command::new("follow_chains") + .args(&[ + arg!(--preprod "Follow Preprod network").action(ArgAction::SetTrue), + arg!(--preview "Follow Preview network").action(ArgAction::SetTrue), + arg!(--mainnet "Follow Mainnet network").action(ArgAction::SetTrue), + arg!(--all "Follow All networks").action(ArgAction::SetTrue), + arg!(--"stop-at-tip" "Stop when the tip of the blockchain is reached.") + .action(ArgAction::SetTrue), + arg!(--"all-live-blocks" "Show all live blocks.").action(ArgAction::SetTrue), + arg!(--"all-tip-blocks" "Show all blocks read from the Peer as TIP.") + .action(ArgAction::SetTrue), + arg!(--"halt-on-error" "Stop the process when an error occurs without retrying.") + .action(ArgAction::SetTrue), + arg!(--"bad-cip36" "Dump Bad Cip36 registrations detected.") + .action(ArgAction::SetTrue), + arg!(--"largest-metadata" "Dump The largest transaction metadata we find (as we find it).") + .action(ArgAction::SetTrue), + arg!(--"mithril-sync-workers" "The number of workers to use when downloading the blockchain snapshot.") + .value_parser(clap::value_parser!(u16).range(1..)) + .action(ArgAction::Set), + arg!(--"mithril-sync-chunk-size" "The size in MB of each chunk downloaded by a worker.") + .value_parser(clap::value_parser!(u16).range(1..)) + .action(ArgAction::Set), + arg!(--"mithril-sync-queue-ahead" "The number of chunks pre-queued per worker.") + .value_parser(clap::value_parser!(u16).range(1..)) + .action(ArgAction::Set), + arg!(--"mithril-sync-connect-timeout" "The HTTP Connection Timeout for mithril downloads, in seconds.") + .value_parser(clap::value_parser!(u64).range(1..)) + .action(ArgAction::Set), + arg!(--"mithril-sync-data-read-timeout" "The HTTP Data Read Timeout for mithril downloads, in seconds.") + .value_parser(clap::value_parser!(u64).range(1..)) + .action(ArgAction::Set), + ]) + .get_matches(); + + let mut networks = vec![]; + if matches.get_flag("preprod") || matches.get_flag("all") { + networks.push(Network::Preprod); + } + if matches.get_flag("preview") || matches.get_flag("all") { + networks.push(Network::Preview); + } + if matches.get_flag("mainnet") || matches.get_flag("all") { + networks.push(Network::Mainnet); + } + + (networks, matches) +} + +/// Start syncing a particular network +async fn start_sync_for(network: &Network, matches: ArgMatches) -> Result<(), Box> { + let mut cfg = ChainSyncConfig::default_for(*network); + + let mut mithril_dl_connect_timeout = "Not Set".to_string(); + let mut mithril_dl_data_timeout = "Not Set".to_string(); + + let mut dl_config = cfg.mithril_cfg.dl_config.clone().unwrap_or_default(); + + if let Some(workers) = matches.get_one::("mithril-sync-workers") { + dl_config = dl_config.with_workers(*workers as usize); + } + let mithril_dl_workers = format!("{}", dl_config.workers); + + if let Some(chunk_size) = matches.get_one::("mithril-sync-chunk-size") { + dl_config = dl_config.with_chunk_size(*chunk_size as usize * 1024 * 1024); + } + let mithril_dl_chunk_size = format!("{} MBytes", dl_config.chunk_size / (1024 * 1024)); + + if let Some(queue_ahead) = matches.get_one::("mithril-sync-queue-ahead") { + dl_config = dl_config.with_queue_ahead(*queue_ahead as usize); + } + let mithril_dl_queue_ahead = format!("{}", dl_config.queue_ahead); + + if let Some(connect_timeout) = matches.get_one::("mithril-sync-connect-timeout") { + dl_config = dl_config.with_connection_timeout(Duration::from_secs(*connect_timeout)); + } + if let Some(connect_timeout) = dl_config.connection_timeout { + mithril_dl_connect_timeout = format!("{}", humantime::format_duration(connect_timeout)); + } + + if let Some(data_timeout) = matches.get_one::("mithril-sync-data-timeout") { + dl_config = dl_config.with_connection_timeout(Duration::from_secs(*data_timeout)); + } + if let Some(data_timeout) = dl_config.data_read_timeout { + mithril_dl_data_timeout = format!("{}", humantime::format_duration(data_timeout)); + } + + cfg.mithril_cfg = cfg.mithril_cfg.with_dl_config(dl_config); + + info!( + chain = cfg.chain.to_string(), + mithril_sync_dl_workers = mithril_dl_workers, + mithril_sync_dl_chunk_size = mithril_dl_chunk_size, + mithril_sync_dl_queue_ahead = mithril_dl_queue_ahead, + mithril_sync_dl_connect_timeout = mithril_dl_connect_timeout, + mithril_sync_dl_data_read_timeout = mithril_dl_data_timeout, + "Starting Sync" + ); + + if let Err(error) = cfg.run().await { + error!("Failed to start sync task for {} : {}", network, error); + Err(error)?; + } + + Ok(()) +} + +/// The interval between showing a block, even if nothing else changed. +const RUNNING_UPDATE_INTERVAL: u64 = 100_000; + +/// Try and follow a chain continuously, from Genesis until Tip. +#[allow(clippy::too_many_lines)] +async fn follow_for(network: Network, matches: ArgMatches) { + info!(chain = network.to_string(), "Following"); + let mut follower = ChainFollower::new(network, ORIGIN_POINT, TIP_POINT).await; + + let all_tip_blocks = matches.get_flag("all-tip-blocks"); + let all_live_blocks = matches.get_flag("all-live-blocks"); + let stop_at_tip = matches.get_flag("stop-at-tip"); + let halt_on_error = matches.get_flag("halt-on-error"); + let bad_cip36 = matches.get_flag("bad-cip36"); + let largest_metadata = matches.get_flag("largest-metadata"); + + let mut current_era = String::new(); + let mut last_update: Option = None; + let mut last_update_shown = false; + let mut prev_hash: Option> = None; + let mut last_immutable: bool = false; + let mut reached_tip = false; // After we reach TIP we show all block we process. + let mut updates: u64 = 0; + let mut last_fork = 0; + let mut follow_all = false; + + let mut last_metrics_time = Instant::now(); + + let mut biggest_aux_data: usize = 0; + + while let Some(chain_update) = follower.next().await { + updates += 1; + + if chain_update.tip { + reached_tip = true; + } + + let block = chain_update.block_data().decode(); + let this_era = block.era().to_string(); + + // When we transition between important points, show the last block as well. + if ((current_era != this_era) + || (chain_update.immutable() != last_immutable) + || (last_fork != chain_update.data.fork())) + && !last_update_shown + { + if let Some(last_update) = last_update.clone() { + info!( + chain = network.to_string(), + "Chain Update {}:{}", + updates - 1, + last_update + ); + } + } + + // If these become true, we will show all blocks from the follower. + follow_all = follow_all + || (!chain_update.immutable() && all_live_blocks) + || ((chain_update.data.fork() > 1) && all_tip_blocks); + + // Don't know if this update will show or not, so say it didn't. + last_update_shown = false; + + if (current_era != this_era) + || (chain_update.immutable() != last_immutable) + || reached_tip + || follow_all + || (updates % RUNNING_UPDATE_INTERVAL == 0) + || (last_fork != chain_update.data.fork()) + { + current_era = this_era; + last_immutable = chain_update.immutable(); + last_fork = chain_update.data.fork(); + info!( + chain = network.to_string(), + "Chain Update {updates}:{}", chain_update + ); + // We already showed the last update, no need to show it again. + last_update_shown = true; + } + + let this_prev_hash = block.header().previous_hash(); + + // We have no state, so can only check consistency with block updates. + // But thats OK, the chain follower itself is also checking chain consistency. + // This is just an example. + if chain_update.kind == Kind::Block && last_update.is_some() && prev_hash != this_prev_hash + { + let display_last_update = if let Some(last_update) = last_update.clone() { + format!("{last_update}") + } else { + "This Can't Happen".to_string() + }; + error!( + chain = network.to_string(), + "Chain is broken: {chain_update} Does not follow: {display_last_update}", + ); + break; + } + + // Inspect the transactions in the block. + let mut dump_raw_aux_data = false; + for (tx_idx, _tx) in block.txs().iter().enumerate() { + if let Some(decoded_metadata) = chain_update + .data + .txn_metadata(tx_idx, Metadata::cip36::LABEL) + { + let raw_size = match chain_update + .data + .txn_raw_metadata(tx_idx, Metadata::cip36::LABEL) + { + Some(raw) => raw.len(), + None => 0, + }; + + if largest_metadata && raw_size > biggest_aux_data { + biggest_aux_data = raw_size; + dump_raw_aux_data = true; + } + + if bad_cip36 { + #[allow(irrefutable_let_patterns)] // Won't always be irrefutable. + if let Metadata::DecodedMetadataValues::Cip36(cip36) = &decoded_metadata.value { + if !cip36.signed { + dump_raw_aux_data = true; + } + if !decoded_metadata.report.is_empty() { + info!( + chain = network.to_string(), + "Cip36 {tx_idx}:{:?} - {raw_size}", decoded_metadata + ); + dump_raw_aux_data = true; + } + } + } + } + } + + if dump_raw_aux_data { + if let Some(x) = block.as_alonzo() { + info!( + chain = network.to_string(), + "Raw Aux Data: {:02x?}", x.auxiliary_data_set + ); + } else if let Some(x) = block.as_babbage() { + info!( + chain = network.to_string(), + "Raw Aux Data: {:02x?}", x.auxiliary_data_set + ); + } else if let Some(x) = block.as_conway() { + info!( + chain = network.to_string(), + "Raw Aux Data: {:02x?}", x.auxiliary_data_set + ); + } + } + + prev_hash = Some(block.hash()); + last_update = Some(chain_update); + + if reached_tip && stop_at_tip { + break; + } + + let check_time = Instant::now(); + if check_time.duration_since(last_metrics_time).as_secs() >= 60 { + last_metrics_time = check_time; + + let stats = Statistics::new(network); + + info!("Json Metrics: {}", stats.as_json(true)); + + if halt_on_error + && (stats.mithril.download_or_validation_failed > 0 + || stats.mithril.failed_to_get_tip > 0 + || stats.mithril.tip_did_not_advance > 0 + || stats.mithril.tip_failed_to_send_to_updater > 0 + || stats.mithril.failed_to_activate_new_snapshot > 0) + { + break; + } + } + } + + if !last_update_shown { + if let Some(last_update) = last_update.clone() { + info!(chain = network.to_string(), "Last Update: {}", last_update); + } + } + + let stats = Statistics::new(network); + info!("Json Metrics: {}", stats.as_json(true)); + + info!(chain = network.to_string(), "Following Completed."); +} + +#[tokio::main] +async fn main() -> Result<(), Box> { + tracing_subscriber::fmt() + .with_file(true) + .with_line_number(true) + .with_thread_names(true) + .with_thread_ids(true) + .pretty() + .with_env_filter( + EnvFilter::builder() + .with_default_directive(LevelFilter::INFO.into()) + .from_env_lossy(), + ) + .init(); + + let (networks, matches) = process_argument(); + let parallelism = std::thread::available_parallelism()?; + info!( + Parallelism = parallelism, + "Cardano Chain Followers Starting." + ); + + #[cfg(feature = "mimalloc")] + info!("mimalloc global allocator: enabled"); + + // First we need to actually start the underlying sync tasks for each blockchain. + for network in &networks { + start_sync_for(network, matches.clone()).await?; + } + + // Make a follower for the network. + let mut tasks = Vec::new(); + for network in &networks { + tasks.push(tokio::spawn(follow_for(*network, matches.clone()))); + } + + // Wait for all followers to finish. + for task in tasks { + task.await?; + } + + // Keep running for 1 minute after last follower reaches its tip. + tokio::time::sleep(tokio::time::Duration::from_secs(60)).await; + + Ok(()) +} diff --git a/hermes/crates/cardano-chain-follower/examples/read_block.rs b/hermes/crates/cardano-chain-follower/examples/read_block.rs deleted file mode 100644 index b9120a0d0..000000000 --- a/hermes/crates/cardano-chain-follower/examples/read_block.rs +++ /dev/null @@ -1,47 +0,0 @@ -//! This example shows how to use the chain follower to download arbitrary blocks -//! from the chain. - -use std::error::Error; - -use cardano_chain_follower::{Follower, FollowerConfigBuilder, Network, Point}; -use tracing::level_filters::LevelFilter; -use tracing_subscriber::EnvFilter; - -#[tokio::main] -async fn main() -> Result<(), Box> { - tracing_subscriber::fmt() - .with_env_filter( - EnvFilter::builder() - .with_default_directive(LevelFilter::INFO.into()) - .from_env_lossy(), - ) - .init(); - - let config = FollowerConfigBuilder::default().build(); - - let follower = Follower::connect( - "relays-new.cardano-mainnet.iohk.io:3001", - Network::Mainnet, - config, - ) - .await?; - - let data = follower - .read_block(Point::Specific( - 110_908_236, - hex::decode("ad3798a1db2b6097c71f35609399e4b2ff834f0f45939803d563bf9d660df2f2")?, - )) - .await?; - - let block = data.decode()?; - - let total_fee = block - .txs() - .iter() - .map(|tx| tx.fee().unwrap_or_default()) - .sum::(); - - println!("Total fee: {total_fee}"); - - Ok(()) -} diff --git a/hermes/crates/cardano-chain-follower/examples/read_block_mithril.rs b/hermes/crates/cardano-chain-follower/examples/read_block_mithril.rs deleted file mode 100644 index f71a1cc18..000000000 --- a/hermes/crates/cardano-chain-follower/examples/read_block_mithril.rs +++ /dev/null @@ -1,57 +0,0 @@ -//! This example shows how to use the chain follower to read arbitrary blocks -//! from Mithril snapshot files. - -// Allowing since this is example code. -#![allow(clippy::unwrap_used)] - -use std::{error::Error, path::PathBuf}; - -use cardano_chain_follower::{Follower, FollowerConfigBuilder, Network, Point}; -use tracing::level_filters::LevelFilter; -use tracing_subscriber::EnvFilter; - -#[tokio::main] -async fn main() -> Result<(), Box> { - tracing_subscriber::fmt() - .with_env_filter( - EnvFilter::builder() - .with_default_directive(LevelFilter::INFO.into()) - .from_env_lossy(), - ) - .init(); - - // Defaults to start following from the tip. - let config = FollowerConfigBuilder::default() - .mithril_snapshot_path( - PathBuf::from(std::env::var("CARGO_MANIFEST_DIR").unwrap()) - .join("examples/snapshot_data"), - ) - .build(); - - let follower = Follower::connect( - "preprod-node.play.dev.cardano.org:3001", - Network::Preprod, - config, - ) - .await?; - - let data = follower - .read_block(Point::Specific( - 49_075_418, - hex::decode("bdb5ce7788850c30342794f252b1d955086862e8f7cb90a32a8f560b693ca78a")?, - )) - .await?; - - let block = data.decode()?; - - let total_fee = block - .txs() - .iter() - .map(|tx| tx.fee().unwrap_or_default()) - .sum::(); - - println!("Block number: {}", block.number()); - println!("Total fee: {total_fee}"); - - Ok(()) -} diff --git a/hermes/crates/cardano-chain-follower/examples/read_block_range.rs b/hermes/crates/cardano-chain-follower/examples/read_block_range.rs deleted file mode 100644 index da41724bb..000000000 --- a/hermes/crates/cardano-chain-follower/examples/read_block_range.rs +++ /dev/null @@ -1,51 +0,0 @@ -//! This example shows how to use the chain follower to download arbitrary blocks -//! from the chain. - -use std::error::Error; - -use cardano_chain_follower::{Follower, FollowerConfigBuilder, Network, Point}; -use tracing::level_filters::LevelFilter; -use tracing_subscriber::EnvFilter; - -#[tokio::main] -async fn main() -> Result<(), Box> { - tracing_subscriber::fmt() - .with_env_filter( - EnvFilter::builder() - .with_default_directive(LevelFilter::INFO.into()) - .from_env_lossy(), - ) - .init(); - - let config = FollowerConfigBuilder::default().build(); - - let follower = Follower::connect( - "relays-new.cardano-mainnet.iohk.io:3001", - Network::Mainnet, - config, - ) - .await?; - - let data_vec = follower - .read_block_range( - Point::Specific( - 110_908_236, - hex::decode("ad3798a1db2b6097c71f35609399e4b2ff834f0f45939803d563bf9d660df2f2")?, - ), - Point::Specific( - 110_908_582, - hex::decode("16e97a73e866280582ee1201a5e1815993978eede956af1869b0733bedc131f2")?, - ), - ) - .await?; - - let mut total_txs = 0; - for data in data_vec { - let block = data.decode()?; - total_txs += block.tx_count(); - } - - println!("Total transactions: {total_txs}"); - - Ok(()) -} diff --git a/hermes/crates/cardano-chain-follower/examples/read_block_range_mithril.rs b/hermes/crates/cardano-chain-follower/examples/read_block_range_mithril.rs deleted file mode 100644 index abeaf264a..000000000 --- a/hermes/crates/cardano-chain-follower/examples/read_block_range_mithril.rs +++ /dev/null @@ -1,64 +0,0 @@ -//! This example shows how to use the chain follower to read arbitrary blocks -//! from Mithril snapshot files. - -// Allowing since this is example code. -#![allow(clippy::unwrap_used)] - -use std::{error::Error, path::PathBuf}; - -use cardano_chain_follower::{Follower, FollowerConfigBuilder, Network, Point}; -use tracing::level_filters::LevelFilter; -use tracing_subscriber::EnvFilter; - -#[tokio::main] -async fn main() -> Result<(), Box> { - tracing_subscriber::fmt() - .with_env_filter( - EnvFilter::builder() - .with_default_directive(LevelFilter::INFO.into()) - .from_env_lossy(), - ) - .init(); - - // Defaults to start following from the tip. - let config = FollowerConfigBuilder::default() - .mithril_snapshot_path( - PathBuf::from(std::env::var("CARGO_MANIFEST_DIR").unwrap()) - .join("examples/snapshot_data"), - ) - .build(); - - let follower = Follower::connect( - "preprod-node.play.dev.cardano.org:3001", - Network::Preprod, - config, - ) - .await?; - - let data_vec = follower - .read_block_range( - // Block: 1794556 - Point::Specific( - 49_075_380, - hex::decode("a5d7ffbc7e61bf19e90b2b07276026d5fdd43424cc3436547b9532ca4a9f19ad")?, - ), - // Block: 1794560 - Point::Specific( - 49_075_522, - hex::decode("b7639b523f320643236ab0fc04b7fd381dedd42c8d6b6433b5965a5062411396")?, - ), - ) - .await?; - - for data in data_vec { - let block = data.decode()?; - - println!( - "Block {} has {} transactions", - block.number(), - block.tx_count() - ); - } - - Ok(()) -} diff --git a/hermes/crates/cardano-chain-follower/examples/set_read_pointer.rs b/hermes/crates/cardano-chain-follower/examples/set_read_pointer.rs deleted file mode 100644 index 8f44191ed..000000000 --- a/hermes/crates/cardano-chain-follower/examples/set_read_pointer.rs +++ /dev/null @@ -1,74 +0,0 @@ -//! This example shows how to set the follower's read pointer without stopping it. - -use std::error::Error; - -use cardano_chain_follower::{ChainUpdate, Follower, FollowerConfigBuilder, Network, Point}; -use tracing::level_filters::LevelFilter; -use tracing_subscriber::EnvFilter; - -#[tokio::main] -async fn main() -> Result<(), Box> { - tracing_subscriber::fmt() - .with_env_filter( - EnvFilter::builder() - .with_default_directive(LevelFilter::INFO.into()) - .from_env_lossy(), - ) - .init(); - - // Defaults to start following from the tip. - let config = FollowerConfigBuilder::default().build(); - - let mut follower = Follower::connect( - "relays-new.cardano-mainnet.iohk.io:3001", - Network::Mainnet, - config, - ) - .await?; - - let (tx, mut rx) = tokio::sync::oneshot::channel::<()>(); - let mut pointer_set = false; - tokio::spawn(async move { - let _tx = tx; - tokio::time::sleep(std::time::Duration::from_secs(2)).await; - }); - - loop { - tokio::select! { - _ = &mut rx, if !pointer_set => { - follower.set_read_pointer(Point::Specific( - 110_908_236, - hex::decode("ad3798a1db2b6097c71f35609399e4b2ff834f0f45939803d563bf9d660df2f2")?, - )).await?; - println!("set read pointer"); - - pointer_set = true; - } - - chain_update = follower.next() => { - match chain_update? { - ChainUpdate::Block(data) => { - let block = data.decode()?; - - println!( - "New block NUMBER={} SLOT={} HASH={}", - block.number(), - block.slot(), - hex::encode(block.hash()), - ); - }, - ChainUpdate::Rollback(data) => { - let block = data.decode()?; - - println!( - "Rollback block NUMBER={} SLOT={} HASH={}", - block.number(), - block.slot(), - hex::encode(block.hash()), - ); - }, - } - } - } - } -} diff --git a/hermes/crates/cardano-chain-follower/src/chain_sync.rs b/hermes/crates/cardano-chain-follower/src/chain_sync.rs new file mode 100644 index 000000000..779efcc0f --- /dev/null +++ b/hermes/crates/cardano-chain-follower/src/chain_sync.rs @@ -0,0 +1,573 @@ +//! Sync from the chain to an in-memory buffer. +//! +//! All iteration of the chain is done through this buffer or a mithril snapshot. +//! Consumers of this library do not talk to the node directly. + +use std::time::Duration; + +use anyhow::Context; +use pallas::{ + ledger::traverse::MultiEraHeader, + network::{ + facades::PeerClient, + miniprotocols::chainsync::{self, HeaderContent, Tip}, + }, +}; +use tokio::{ + spawn, + sync::mpsc, + time::{sleep, timeout}, +}; +use tracing::{debug, error}; + +use crate::{ + chain_sync_live_chains::{ + get_fill_to_point, get_intersect_points, get_live_block, get_live_head_point, get_peer_tip, + live_chain_add_block_to_tip, live_chain_backfill, live_chain_length, purge_live_chain, + }, + chain_sync_ready::{ + get_chain_update_tx_queue, notify_follower, wait_for_sync_ready, SyncReadyWaiter, + }, + chain_update, + error::{Error, Result}, + mithril_snapshot_config::MithrilUpdateMessage, + mithril_snapshot_data::latest_mithril_snapshot_id, + point::{TIP_POINT, UNKNOWN_POINT}, + stats, ChainSyncConfig, MultiEraBlock, Network, Point, ORIGIN_POINT, +}; + +/// The maximum number of seconds we wait for a node to connect. +const MAX_NODE_CONNECT_TIME_SECS: u64 = 2; + +/// The maximum number of times we wait for a nodeChainUpdate to connect. +/// Currently set to never give up. +const MAX_NODE_CONNECT_RETRIES: u64 = 5; + +/// Try and connect to a node, in a robust and quick way. +/// +/// If it takes longer then 5 seconds, retry the connection. +/// Retry 5 times before giving up. +async fn retry_connect( + addr: &str, magic: u64, +) -> std::result::Result { + let mut retries = MAX_NODE_CONNECT_RETRIES; + loop { + match timeout( + Duration::from_secs(MAX_NODE_CONNECT_TIME_SECS), + PeerClient::connect(addr, magic), + ) + .await + { + Ok(peer) => { + match peer { + Ok(peer) => return Ok(peer), + Err(err) => { + retries -= 1; + if retries == 0 { + return Err(err); + } + debug!("retrying {retries} connect to {addr} : {err:?}"); + }, + } + }, + Err(error) => { + retries -= 1; + if retries == 0 { + return Err(pallas::network::facades::Error::ConnectFailure( + tokio::io::Error::new( + tokio::io::ErrorKind::Other, + format!("failed to connect to {addr} : {error}"), + ), + )); + } + debug!("retrying {retries} connect to {addr} : {error:?}"); + }, + } + } +} + +/// Purge the live chain, and intersect with TIP. +async fn purge_and_intersect_tip(client: &mut PeerClient, chain: Network) -> Result { + if let Err(error) = purge_live_chain(chain, &TIP_POINT) { + // Shouldn't happen. + error!("failed to purge live chain: {error}"); + } + + client + .chainsync() + .intersect_tip() + .await + .map_err(Error::Chainsync) + .map(std::convert::Into::into) +} + +/// Resynchronize to the live tip in memory. +async fn resync_live_tip(client: &mut PeerClient, chain: Network) -> Result { + let sync_points = get_intersect_points(chain); + if sync_points.is_empty() { + return purge_and_intersect_tip(client, chain).await; + } + + let sync_to_point = match client.chainsync().find_intersect(sync_points).await { + Ok((Some(point), _)) => point.into(), + Ok((None, _)) => { + // No intersection found, so purge live chain and re-sync it. + return purge_and_intersect_tip(client, chain).await; + }, + Err(error) => return Err(Error::Chainsync(error)), + }; + + Ok(sync_to_point) +} + +/// Fetch a single block from the Peer, and Decode it. +async fn fetch_block_from_peer( + peer: &mut PeerClient, chain: Network, point: Point, previous_point: Point, fork_count: u64, +) -> anyhow::Result { + let block_data = peer + .blockfetch() + .fetch_single(point.clone().into()) + .await + .with_context(|| "Fetching block data")?; + + debug!("{chain}, {previous_point}, {fork_count}"); + let live_block_data = MultiEraBlock::new(chain, block_data, &previous_point, fork_count)?; + + Ok(live_block_data) +} + +/// Process a rollback. +/// +/// Fetch the rollback block, and try and insert it into the live-chain. +/// If its a real rollback, it will purge the chain ahead of the block automatically. +async fn process_rollback_actual( + peer: &mut PeerClient, chain: Network, point: Point, tip: &Tip, fork_count: &mut u64, +) -> anyhow::Result { + debug!("RollBackward: {:?} {:?}", point, tip); + + // Check if the block is in the live chain, if it is, re-add it, which auto-purges the + // rest of live chain tip. And increments the fork count. + if let Some(mut block) = get_live_block(chain, &point, 0, true) { + // Even though we are re-adding the known block, increase the fork count. + block.set_fork(*fork_count); + live_chain_add_block_to_tip(chain, block, fork_count, tip.0.clone().into())?; + return Ok(point); + } + + // If the block is NOT in the chain, fetch it, and insert it, which will automatically + // find the correct place to insert it, and purge the old tip blocks. + + // We don't know what or if there is a previous block, so probe for it. + // Fizzy search for the block immediately preceding the block we will fetch. + // In case we don;t have a previous point on the live chain, it might be the tip of the + // mithril chain, so get that. + let previous_block = get_live_block(chain, &point, -1, false); + let previous_point = if let Some(previous_block) = previous_block { + let previous = previous_block.previous(); + debug!("Previous block: {:?}", previous); + if previous == ORIGIN_POINT { + latest_mithril_snapshot_id(chain).tip() + } else { + previous + } + } else { + debug!("Using Mithril Tip as rollback previous point."); + latest_mithril_snapshot_id(chain).tip() + }; + debug!("Previous point: {:?}", previous_point); + let block = + fetch_block_from_peer(peer, chain, point.clone(), previous_point, *fork_count).await?; + live_chain_add_block_to_tip(chain, block, fork_count, tip.0.clone().into())?; + + // Next block we receive is a rollback. + Ok(point) +} + +/// Process a rollback detected from the peer. +async fn process_rollback( + peer: &mut PeerClient, chain: Network, point: Point, tip: &Tip, previous_point: &Point, + fork_count: &mut u64, +) -> anyhow::Result { + let rollback_slot = point.slot_or_default(); + let head_slot = previous_point.slot_or_default(); + debug!("Head slot: {}", head_slot); + debug!("Rollback slot: {}", rollback_slot); + let slot_rollback_size = if head_slot > rollback_slot { + head_slot - rollback_slot + } else { + 0 + }; + + // We actually do the work here... + let response = process_rollback_actual(peer, chain, point, tip, fork_count).await?; + + // We never really know how many blocks are rolled back when advised by the peer, but we + // can work out how many slots. This function wraps the real work, so we can properly + // record the stats when the rollback is complete. Even if it errors. + stats::rollback(chain, stats::RollbackType::Peer, slot_rollback_size); + + Ok(response) +} + +/// Process a rollback detected from the peer. +async fn process_next_block( + peer: &mut PeerClient, chain: Network, header: HeaderContent, tip: &Tip, + previous_point: &Point, fork_count: &mut u64, +) -> anyhow::Result { + // Decode the Header of the block so we know what to fetch. + let decoded_header = MultiEraHeader::decode( + header.variant, + header.byron_prefix.map(|p| p.0), + &header.cbor, + ) + .with_context(|| "Decoding Block Header")?; + + let block_point = Point::new(decoded_header.slot(), decoded_header.hash().to_vec()); + + debug!("RollForward: {block_point:?} {tip:?}"); + + let block = fetch_block_from_peer( + peer, + chain, + block_point.clone(), + previous_point.clone(), + *fork_count, + ) + .await?; + + let block_point = block.point(); + + // We can't store this block because we don't know the previous one so the chain + // would break, so just use it for previous. + if *previous_point == UNKNOWN_POINT { + // Nothing else we can do with the first block when we don't know the previous + // one. Just return it's point. + debug!("Not storing the block, because we did not know the previous point."); + } else { + live_chain_add_block_to_tip(chain, block, fork_count, tip.0.clone().into())?; + } + + Ok(block_point) +} + +/// Follows the chain until there is an error. +/// If this returns it can be assumed the client is disconnected. +/// +/// We take ownership of the client because of that. +async fn follow_chain( + peer: &mut PeerClient, chain: Network, fork_count: &mut u64, +) -> anyhow::Result<()> { + let mut update_sender = get_chain_update_tx_queue(chain).await; + let mut previous_point = UNKNOWN_POINT; + + loop { + // debug!("Waiting for data from Cardano Peer Node:"); + + // We can't get an update sender UNTIL we have released the sync lock. + if update_sender.is_none() { + update_sender = get_chain_update_tx_queue(chain).await; + } + + // Check what response type we need to process. + let response = match peer.chainsync().state() { + chainsync::State::CanAwait => peer.chainsync().recv_while_can_await().await, + chainsync::State::MustReply => peer.chainsync().recv_while_must_reply().await, + _ => peer.chainsync().request_next().await, + } + .with_context(|| "Error while receiving block data from peer")?; + + match response { + chainsync::NextResponse::RollForward(header, tip) => { + // Note: Tip is poorly documented. + // It is a tuple with the following structure: + // ((Slot#, BlockHash), Block# ). + // We can find if we are AT tip by comparing the current block Point with the tip + // Point. We can estimate how far behind we are (in blocks) by + // subtracting current block height and the tip block height. + // IF the TIP is <= the current block height THEN we are at tip. + previous_point = + process_next_block(peer, chain, header, &tip, &previous_point, fork_count) + .await?; + + // This update is just for followers to know to look again at their live chains for + // new data. + notify_follower(chain, &update_sender, &chain_update::Kind::Block); + }, + chainsync::NextResponse::RollBackward(point, tip) => { + previous_point = + process_rollback(peer, chain, point.into(), &tip, &previous_point, fork_count) + .await?; + // This update is just for followers to know to look again at their live chains for + // new data. + notify_follower(chain, &update_sender, &chain_update::Kind::Rollback); + }, + chainsync::NextResponse::Await => { + // debug!("Peer Node says: Await"); + }, + } + } +} + +/// How long we wait before trying to reconnect to a peer when it totally fails our +/// attempts. +const PEER_FAILURE_RECONNECT_DELAY: Duration = Duration::from_secs(10); + +/// Do not return until we have a connection to the peer. +async fn persistent_reconnect(addr: &str, chain: Network) -> PeerClient { + // Not yet connected to the peer. + stats::peer_connected(chain, false, addr); + + loop { + // We never have a connection if we end up around the loop, so make a new one. + match retry_connect(addr, chain.into()).await { + Ok(peer) => { + // Successfully connected to the peer. + stats::peer_connected(chain, true, addr); + + return peer; + }, + Err(error) => { + error!( + "Chain Sync for: {} from {} : Failed to connect to relay: {}", + chain, addr, error, + ); + + // Wait a bit before trying again. + tokio::time::sleep(PEER_FAILURE_RECONNECT_DELAY).await; + }, + }; + } +} + +/// Backfill the live chain, based on the Mithril Sync updates. +/// This does NOT return until the live chain has been backfilled from the end of mithril +/// to the current synced tip blocks. +/// +/// This only needs to be done once per chain connection. +async fn live_sync_backfill( + cfg: &ChainSyncConfig, update: &MithrilUpdateMessage, +) -> anyhow::Result<()> { + stats::backfill_started(cfg.chain); + + let (fill_to, _oldest_fork) = get_fill_to_point(cfg.chain).await; + let range = (update.tip.clone().into(), fill_to.clone().into()); + let mut previous_point = update.previous.clone(); + + let range_msg = format!("{range:?}"); + + let mut peer = persistent_reconnect(&cfg.relay_address, cfg.chain).await; + + // Request the range of blocks from the Peer. + peer.blockfetch() + .request_range(range) + .await + .with_context(|| "Requesting Block Range")?; + + let mut backfill_blocks = Vec::::new(); + + while let Some(block_data) = peer.blockfetch().recv_while_streaming().await? { + // Backfilled blocks get placed in the oldest fork currently on the live-chain. + let block = + MultiEraBlock::new(cfg.chain, block_data, &previous_point, 1).with_context(|| { + format!( + "Failed to decode block data. previous: {previous_point:?}, range: {range_msg}" + ) + })?; + + // Check we get the first block in the range properly. + if backfill_blocks.is_empty() && !block.point().strict_eq(&update.tip) { + return Err(Error::BackfillSync(format!( + "First Block is invalid: Block {:?} != Range Start {:?}.", + block.point(), + update.tip + )) + .into()); + } + + previous_point = block.point(); + + backfill_blocks.push(block); + } + + // Check we get the last block in the range properly. + if backfill_blocks.is_empty() || !previous_point.strict_eq(&fill_to) { + return Err(Error::BackfillSync(format!( + "Last Block is invalid. Block {previous_point:?} != Range End {fill_to:?}" + )) + .into()); + } + + // Report how many backfill blocks we received. + let backfill_size = backfill_blocks.len() as u64; + + // Try and backfill, if anything doesn't work, or the chain integrity would break, fail. + live_chain_backfill(cfg.chain, &backfill_blocks)?; + + stats::backfill_ended(cfg.chain, backfill_size); + + debug!("Backfilled Range OK: {}", range_msg); + + Ok(()) +} + +/// Backfill and Purge the live chain, based on the Mithril Sync updates. +async fn live_sync_backfill_and_purge( + cfg: ChainSyncConfig, mut rx: mpsc::Receiver, + mut sync_ready: SyncReadyWaiter, +) { + // Wait for first Mithril Update advice, which triggers a BACKFILL of the Live Data. + let Some(update) = rx.recv().await else { + error!("Mithril Sync Failed, can not continue chain sync either."); + return; + }; + + debug!( + "Before Backfill: Size of the Live Chain is: {} Blocks", + live_chain_length(cfg.chain) + ); + + let live_chain_head: Point; + + loop { + // We will re-attempt backfill, until its successful. + // Backfill is atomic, it either fully works, or none of the live-chain is changed. + debug!("Mithril Tip has advanced to: {update:?} : BACKFILL"); + while let Err(error) = live_sync_backfill(&cfg, &update).await { + error!("Mithril Backfill Sync Failed: {}", error); + sleep(Duration::from_secs(10)).await; + } + + if let Some(head_point) = get_live_head_point(cfg.chain) { + live_chain_head = head_point; + break; + } + } + + stats::new_mithril_update( + cfg.chain, + update.tip.slot_or_default(), + live_chain_length(cfg.chain) as u64, + live_chain_head.slot_or_default(), + ); + + debug!( + "After Backfill: Size of the Live Chain is: {} Blocks", + live_chain_length(cfg.chain) + ); + + // Once Backfill is completed OK we can use the Blockchain data for Syncing and Querying + sync_ready.signal(); + + let mut update_sender = get_chain_update_tx_queue(cfg.chain).await; + + loop { + let Some(update) = rx.recv().await else { + error!("Mithril Sync Failed, can not continue chain sync either."); + return; + }; + + // We can't get an update sender until the sync is released. + if update_sender.is_none() { + update_sender = get_chain_update_tx_queue(cfg.chain).await; + } + + debug!("Mithril Tip has advanced to: {update:?} : PURGE NEEDED"); + + let update_point: Point = update.tip.clone(); + + if let Err(error) = purge_live_chain(cfg.chain, &update_point) { + // This should actually never happen. + error!("Mithril Purge Failed: {}", error); + } + + debug!( + "After Purge: Size of the Live Chain is: {} Blocks", + live_chain_length(cfg.chain) + ); + + notify_follower( + cfg.chain, + &update_sender, + &chain_update::Kind::ImmutableBlockRollForward, + ); + } + + // TODO: If the mithril sync dies, sleep for a bit and make sure the live chain + // doesn't grow indefinitely. + // We COULD move the spawn of mithril following into here, and if the rx dies, kill + // that task, and restart it. + // In reality, the mithril sync should never die and drop the queue. +} + +/// Handle the background downloading of Mithril snapshots for a given network. +/// Note: There can ONLY be at most three of these running at any one time. +/// This is because there can ONLY be one snapshot for each of the three known Cardano +/// networks. +/// # Arguments +/// +/// * `network` - The network type for the client to connect to. +/// * `aggregator_url` - A reference to the URL of an aggregator that can be used to +/// create the client. +/// * `genesis_vkey` - The genesis verification key, which is needed to authenticate with +/// the server. +/// +/// # Returns +/// +/// This does not return, it is a background task. +pub(crate) async fn chain_sync(cfg: ChainSyncConfig, rx: mpsc::Receiver) { + debug!( + "Chain Sync for: {} from {} : Starting", + cfg.chain, cfg.relay_address, + ); + + // Start the SYNC_READY unlock task. + let sync_waiter = wait_for_sync_ready(cfg.chain); + + let backfill_cfg = cfg.clone(); + + // Start the Live chain backfill task. + let _backfill_join_handle = spawn(async move { + live_sync_backfill_and_purge(backfill_cfg.clone(), rx, sync_waiter).await; + }); + + // Live Fill data starts at fork 1. + // Immutable data from a mithril snapshot is fork 0. + // Live backfill is always Fork 1. + let mut fork_count: u64 = 2; + + loop { + // We never have a connection if we end up around the loop, so make a new one. + let mut peer = persistent_reconnect(&cfg.relay_address, cfg.chain).await; + + match resync_live_tip(&mut peer, cfg.chain).await { + Ok(tip) => debug!("Tip Resynchronized to {tip}"), + Err(error) => { + error!( + "Cardano Client {} failed to resync Tip: {}", + cfg.relay_address, error + ); + continue; + }, + } + + // Note: This can ONLY return with an error, otherwise it will sync indefinitely. + if let Err(error) = follow_chain(&mut peer, cfg.chain, &mut fork_count).await { + error!( + "Cardano Client {} failed to follow chain: {}: Reconnecting.", + cfg.relay_address, error + ); + continue; + } + + // If this returns, we are on a new fork (or assume we are) + fork_count += 1; + } +} + +/// Is the current point aligned with what we know as tip. +pub(crate) async fn point_at_tip(chain: Network, point: &Point) -> bool { + let tip = get_peer_tip(chain); + + // We are said to be AT TIP, if the block point is greater than or equal to the tip. + tip <= *point +} diff --git a/hermes/crates/cardano-chain-follower/src/chain_sync_config.rs b/hermes/crates/cardano-chain-follower/src/chain_sync_config.rs new file mode 100644 index 000000000..f7c0e5144 --- /dev/null +++ b/hermes/crates/cardano-chain-follower/src/chain_sync_config.rs @@ -0,0 +1,165 @@ +//! Cardano chain sync configuration. +//! +//! Independent of ANY followers, we allow a maximum of 3 Chains being updated, one for +//! each network. Chain Followers use the data supplied by the Chain-Sync. +//! This module configures the chain sync processes. + +use std::sync::LazyLock; + +use dashmap::DashMap; +use strum::IntoEnumIterator; +use tokio::{sync::Mutex, task::JoinHandle}; +use tracing::{debug, error}; + +use crate::{ + chain_sync::chain_sync, + error::{Error, Result}, + mithril_snapshot_config::MithrilSnapshotConfig, + network::Network, + stats, +}; + +/// Default [`Follower`] block buffer size. +const DEFAULT_CHAIN_UPDATE_BUFFER_SIZE: usize = 32; + +/// How many slots back from TIP is considered Immutable in the absence of a mithril +/// snapshot. +const DEFAULT_IMMUTABLE_SLOT_WINDOW: u64 = 12 * 60 * 60; + +/// Type we use to manage the Sync Task handle map. +type SyncMap = DashMap>>>; +/// Handle to the mithril sync thread. One for each Network ONLY. +static SYNC_JOIN_HANDLE_MAP: LazyLock = LazyLock::new(|| { + let map = DashMap::new(); + for network in Network::iter() { + map.insert(network, Mutex::new(None)); + } + map +}); + +/// A Follower Connection to the Cardano Network. +#[derive(Clone, Debug)] +pub struct ChainSyncConfig { + /// Chain Network + pub chain: Network, + /// Relay Node Address + pub(crate) relay_address: String, + /// Block buffer size option. + chain_update_buffer_size: usize, + /// If we don't have immutable data, how far back from TIP is the data considered + /// Immutable (in slots). + immutable_slot_window: u64, + /// Configuration of Mithril Snapshots. + pub mithril_cfg: MithrilSnapshotConfig, +} + +impl ChainSyncConfig { + /// Sets the defaults for a given cardano network. + /// Each network has a different set of defaults, so no single "default" can apply. + /// This function is preferred to the `default()` standard function. + #[must_use] + pub fn default_for(chain: Network) -> Self { + Self { + chain, + relay_address: chain.default_relay(), + chain_update_buffer_size: DEFAULT_CHAIN_UPDATE_BUFFER_SIZE, + immutable_slot_window: DEFAULT_IMMUTABLE_SLOT_WINDOW, + mithril_cfg: MithrilSnapshotConfig::default_for(chain), + } + } + + /// Sets the relay to use for Chain Sync. + /// + /// # Arguments + /// + /// * `relay`: Address to use for the blockchain relay node. + #[must_use] + pub fn relay(mut self, address: String) -> Self { + self.relay_address = address; + self + } + + /// Sets the size of the chain updates buffer used by the [`Follower`]. + /// + /// # Arguments + /// + /// * `chain_update_buffer_size`: Size of the chain updates buffer. + #[must_use] + pub fn chain_update_buffer_size(mut self, block_buffer_size: usize) -> Self { + self.chain_update_buffer_size = block_buffer_size; + self + } + + /// Sets the size of the Immutable window used when Mithril is not available. + /// + /// # Arguments + /// + /// * `window`: Size of the Immutable window. + #[must_use] + pub fn immutable_slot_window(mut self, window: u64) -> Self { + self.immutable_slot_window = window; + self + } + + /// Sets the the Mithril snapshot Config the `ChainSync` will use. + /// + /// # Arguments + /// + /// * `path`: Mithril snapshot path. + /// * `update`: Auto-update this path with the latest mithril snapshot as it changes. + #[must_use] + pub fn mithril_cfg(mut self, cfg: MithrilSnapshotConfig) -> Self { + self.mithril_cfg = cfg; + self + } + + /// Runs Chain Synchronization. + /// + /// Must be done BEFORE the chain can be followed. + /// + /// # Arguments + /// + /// * `chain`: The chain to follow. + /// + /// # Returns + /// + /// `Result<()>`: On success. + /// + /// # Errors + /// + /// `Error`: On error. + pub async fn run(self) -> Result<()> { + debug!( + chain = self.chain.to_string(), + "Chain Synchronization Starting" + ); + + stats::sync_started(self.chain); + + // Start the Chain Sync - IFF its not already running. + let lock_entry = match SYNC_JOIN_HANDLE_MAP.get(&self.chain) { + None => { + error!("Join Map improperly initialized: Missing {}!!", self.chain); + return Err(Error::Internal); // Should not get here. + }, + Some(entry) => entry, + }; + let mut locked_handle = lock_entry.value().lock().await; + + if (*locked_handle).is_some() { + debug!("Chain Sync Already Running for {}", self.chain); + return Err(Error::ChainSyncAlreadyRunning(self.chain)); + } + + // Start the Mithril Snapshot Follower + let rx = self.mithril_cfg.run().await?; + + // Start Chain Sync + *locked_handle = Some(tokio::spawn(chain_sync(self.clone(), rx))); + + // sync_map.insert(chain, handle); + debug!("Chain Sync for {} : Started", self.chain); + + Ok(()) + } +} diff --git a/hermes/crates/cardano-chain-follower/src/chain_sync_live_chains.rs b/hermes/crates/cardano-chain-follower/src/chain_sync_live_chains.rs new file mode 100644 index 000000000..b5a159eba --- /dev/null +++ b/hermes/crates/cardano-chain-follower/src/chain_sync_live_chains.rs @@ -0,0 +1,516 @@ +//! Storage of each Live Chain per Blockchain. + +use std::{ + ops::Bound, + sync::{Arc, LazyLock, RwLock}, + time::Duration, +}; + +use crossbeam_skiplist::SkipMap; +use rayon::prelude::*; +use strum::IntoEnumIterator; +use tracing::{debug, error}; + +use crate::{ + error::{Error, Result}, + mithril_snapshot_data::latest_mithril_snapshot_id, + point::UNKNOWN_POINT, + stats, MultiEraBlock, Network, Point, TIP_POINT, +}; + +/// Type we use to manage the Sync Task handle map. +type LiveChainBlockList = SkipMap; + +/// Because we have multi-entry relationships in the live-chain protect it with a +/// `read/write lock`. The underlying `SkipMap` is still capable of multiple simultaneous +/// reads from multiple threads which is the most common access. +#[derive(Clone)] +struct ProtectedLiveChainBlockList(Arc>); + +/// Handle to the mithril sync thread. One for each Network ONLY. +static LIVE_CHAINS: LazyLock> = LazyLock::new(|| { + let map = SkipMap::new(); + for network in Network::iter() { + map.insert(network, ProtectedLiveChainBlockList::new()); + } + map +}); + +/// Latest TIP received from the Peer Node. +static PEER_TIP: LazyLock> = LazyLock::new(|| { + let map = SkipMap::new(); + for network in Network::iter() { + map.insert(network, UNKNOWN_POINT); + } + map +}); + +/// Set the last TIP received from the peer. +fn update_peer_tip(chain: Network, tip: Point) { + PEER_TIP.insert(chain, tip); +} + +/// Set the last TIP received from the peer. +pub(crate) fn get_peer_tip(chain: Network) -> Point { + (*PEER_TIP.get_or_insert(chain, UNKNOWN_POINT).value()).clone() +} + +/// Number of seconds to wait if we detect a `SyncReady` race condition. +const DATA_RACE_BACKOFF_SECS: u64 = 2; + +impl ProtectedLiveChainBlockList { + /// Create a new instance of the protected Live Chain skip map. + fn new() -> Self { + ProtectedLiveChainBlockList(Arc::new(RwLock::new(LiveChainBlockList::new()))) + } + + /// Get the `nth` Live block immediately following the specified block. + /// If the search is NOT strict, then the point requested is never found. + /// 0 = The Block immediately after the requested point. + /// 1+ = The block that follows the block after the requested point + /// negative = The block before the requested point. + fn get_block(&self, point: &Point, mut advance: i64, strict: bool) -> Option { + let chain = self.0.read().ok()?; + + let mut this = if strict { + chain.get(point)? + } else if advance < 0 { + // This is a fuzzy lookup backwards. + advance += 1; + chain.upper_bound(Bound::Excluded(point))? + } else { + // This is a fuzzy lookup forwards. + chain.lower_bound(Bound::Excluded(point))? + }; + + // If we are stepping backwards, look backwards. + while advance < 0 { + advance += 1; + this = this.prev()?; + } + + // If we are stepping forwards, look forwards. + while advance > 0 { + advance -= 1; + this = this.next()?; + } + + // Return the block we found. + Some(this.value().clone()) + } + + /// Get the earliest block in the Live Chain + fn get_earliest_block(&self) -> Option { + let chain = self.0.read().ok()?; + let entry = chain.front()?; + Some(entry.value().clone()) + } + + /// Get the point of the first known block in the Live Chain. + fn get_first_live_point(live_chain: &LiveChainBlockList) -> Result { + let Some(check_first_live_entry) = live_chain.front() else { + return Err(Error::LiveSync( + "First Block not found in the Live Chain during Backfill".to_string(), + )); + }; + let check_first_live_block = check_first_live_entry.value(); + Ok(check_first_live_block.point()) + } + + /// Get the point of the first known block in the Live Chain. + fn get_last_live_point(live_chain: &LiveChainBlockList) -> Point { + let Some(check_last_live_entry) = live_chain.back() else { + // Its not an error if we can't get a latest block because the chain is empty, + // so report that we don't know... + return UNKNOWN_POINT; + }; + let check_last_live_block = check_last_live_entry.value(); + check_last_live_block.point() + } + + /// Atomic Backfill the chain with the given blocks + /// Blocks must be sorted in order from earliest to latest. + /// Final block MUST seamlessly link to the current head of the live chain. (Enforced) + /// First block MUST seamlessly link to the Tip of the Immutable chain. (Enforced) + /// The blocks MUST be contiguous and properly self referential. + /// Note: This last condition is NOT enforced, but must be met or block chain + /// iteration will fail. + fn backfill(&self, chain: Network, blocks: &[MultiEraBlock]) -> Result<()> { + let live_chain = self.0.write().map_err(|_| Error::Internal)?; + + // Make sure our first live block == the last mithril tip. + // Ensures we are properly connected to the Mithril Chain. + let first_block_point = blocks + .first() + .ok_or(Error::LiveSync("No first block for backfill.".to_string()))? + .point(); + let latest_mithril_tip = latest_mithril_snapshot_id(chain).tip(); + if !first_block_point.strict_eq(&latest_mithril_tip) { + return Err(Error::LiveSync(format!( + "First Block of Live BackFill {first_block_point} MUST be last block of Mithril Snapshot {latest_mithril_tip}." + ))); + } + + // Get the current Oldest block in the live chain. + let check_first_live_point = Self::get_first_live_point(&live_chain)?; + + let last_backfill_block = blocks + .last() + .ok_or(Error::LiveSync("No last block for backfill.".to_string()))? + .clone(); + let last_backfill_point = last_backfill_block.point(); + + // Make sure the backfill will properly connect the partial Live chain to the Mithril + // chain. + if !last_backfill_point.strict_eq(&check_first_live_point) { + return Err(Error::LiveSync(format!( + "Last Block of Live BackFill {last_backfill_point} MUST be First block of current Live Chain {check_first_live_point}." + ))); + } + + // SkipMap is thread-safe, so we can parallel iterate inserting the blocks. + blocks.par_iter().for_each(|block| { + let _unused = live_chain.insert(block.point(), block.clone()); + }); + + // End of Successful backfill == Reaching TIP, because live sync is always at tip. + stats::tip_reached(chain); + + Ok(()) + } + + /// Check if the given point is strictly in the live-chain. This means the slot and + /// Hash MUST be present. + fn strict_block_lookup(live_chain: &LiveChainBlockList, point: &Point) -> bool { + if let Some(found_block) = live_chain.get(point) { + return found_block.value().point().strict_eq(point); + } + false + } + + /// Adds a block to the tip of the live chain, and automatically purges blocks that + /// would be lost due to rollback. Will REFUSE to add a block which does NOT have + /// a proper "previous" point defined. + fn add_block_to_tip( + &self, chain: Network, block: MultiEraBlock, fork_count: &mut u64, tip: Point, + ) -> Result<()> { + let live_chain = self.0.write().map_err(|_| Error::Internal)?; + + // Check if the insert is the next logical block in the live chain. + // Most likely case, so check it first. + let previous_point = block.previous(); + let last_live_point = Self::get_last_live_point(&live_chain); + if !previous_point.strict_eq(&last_live_point) { + // Detected a rollback, so increase the fork count. + *fork_count += 1; + let mut rollback_size: u64 = 0; + + // We are NOT contiguous, so check if we can become contiguous with a rollback. + debug!("Detected non-contiguous block, rolling back. Fork: {fork_count}"); + + // First check if the previous is >= the earliest block in the live chain. + // This is because when we start syncing we could rollback earlier than our + // previously known earliest block. + // Also check the point we want to link to actually exists. If either are not true, + // Then we could be trying to roll back to an earlier block than our earliest known + // block. + let check_first_live_point = Self::get_first_live_point(&live_chain)?; + if (block.point() < check_first_live_point) + || !Self::strict_block_lookup(&live_chain, &previous_point) + { + debug!("Rollback before live chain, clear it."); + // We rolled back earlier than the current live chain. + // Purge the entire chain, and just add this one block as the new tip. + rollback_size = live_chain.len() as u64; + live_chain.clear(); + } else { + // If we get here we know for a fact that the previous block exists. + // Remove the latest live block, and keep removing it until we re-establish + // connection with the chain sequence. + // We search backwards because a rollback is more likely in the newest blocks than + // the oldest. + while let Some(popped) = live_chain.pop_back() { + rollback_size += 1; + if previous_point.strict_eq(&popped.value().previous()) { + // We are now contiguous, so stop purging. + break; + } + } + } + + // Record a rollback statistic (We record the ACTUAL size our rollback effected our + // internal live chain, not what the node thinks.) + stats::rollback(chain, stats::RollbackType::LiveChain, rollback_size); + } + + let head_slot = block.point().slot_or_default(); + + // Add the block to the tip of the Live Chain. + let _unused = live_chain.insert(block.point(), block); + + let tip_slot = tip.slot_or_default(); + update_peer_tip(chain, tip); + + // Record the new live chain stats after we add a new block. + stats::new_live_block(chain, live_chain.len() as u64, head_slot, tip_slot); + + Ok(()) + } + + /// Checks if the point exists in the live chain. + /// If it does, removes all block preceding it (but not the point itself). + /// Will refuse to purge if the point is not the TIP of the mithril chain. + fn purge(&self, chain: Network, point: &Point) -> Result<()> { + // Make sure our first live block == the last mithril tip. + // Ensures we are properly connected to the Mithril Chain. + // But don't check this if we are about to purge the entire chain. + // We do this before we bother locking the chain for update. + if *point != TIP_POINT { + let latest_mithril_tip = latest_mithril_snapshot_id(chain).tip(); + if !point.strict_eq(&latest_mithril_tip) { + return Err(Error::LiveSync(format!( + "First Block of Live Purge {point} MUST be last block of Mithril Snapshot {latest_mithril_tip}." + ))); + } + } + + let live_chain = self.0.write().map_err(|_| Error::Internal)?; + + // Special Case. + // If the Purge Point == TIP_POINT, then we purge the entire chain. + if *point == TIP_POINT { + live_chain.clear(); + } else { + // If the block we want to purge upto must be in the chain. + let Some(purge_start_block_entry) = live_chain.get(point) else { + return Err(Error::LiveSync(format!( + "The block to purge to {point} is not in the Live chain." + ))); + }; + + // Make sure the block that IS present, is the actual block, by strict equality. + if !purge_start_block_entry.value().point().strict_eq(point) { + return Err(Error::LiveSync(format!( + "The block to purge to {point} slot is in the live chain, but its hashes do not match." + ))); + } + + // Purge every block prior to the purge point. + while let Some(previous_block) = purge_start_block_entry.prev() { + let _unused = previous_block.remove(); + } + + // Try and FORCE the skip map to reclaim its memory + crossbeam_epoch::pin().flush(); + crossbeam_epoch::pin().flush(); + } + + Ok(()) + } + + /// Get the current number of blocks in the live chain + fn len(&self) -> usize { + if let Ok(chain) = self.0.read() { + chain.len() + } else { + 0 + } + } + + /// Get chain sync intersection points for communicating with peer node. + fn get_intersect_points(&self) -> Vec { + let mut intersect_points = Vec::new(); + + let Ok(chain) = self.0.read() else { + return intersect_points; + }; + + // Add the top 4 blocks as the first points to intersect. + let Some(entry) = chain.back() else { + return intersect_points; + }; + intersect_points.push(entry.value().point().into()); + for _ in 0..2 { + if let Some(entry) = entry.prev() { + intersect_points.push(entry.value().point().into()); + } else { + return intersect_points; + }; + } + + // Now find points based on an every increasing Slot age. + let mut slot_age: u64 = 40; + let reference_slot = entry.value().point().slot_or_default(); + let mut previous_point = entry.value().point(); + + // Loop until we exhaust probe slots, OR we would step past genesis. + while slot_age < reference_slot { + let ref_point = Point::fuzzy(reference_slot - slot_age); + let Some(entry) = chain.lower_bound(Bound::Included(&ref_point)) else { + break; + }; + if entry.value().point() == previous_point { + break; + }; + previous_point = entry.value().point(); + intersect_points.push(previous_point.clone().into()); + slot_age *= 2; + } + + intersect_points + } + + /// Given a known point on the live chain, and a fork count, find the best block we + /// have. + fn find_best_fork_block( + &self, point: &Point, previous_point: &Point, fork: u64, + ) -> Option<(MultiEraBlock, u64)> { + let mut rollback_depth: u64 = 0; + let Ok(chain) = self.0.read() else { + return None; + }; + + // Get the block <= the current slot. + let ref_point = Point::fuzzy(point.slot_or_default()); + let mut entry = chain.upper_bound(Bound::Included(&ref_point))?; + + let mut this_block = entry.value().clone(); + // Check if the previous block is the one we previously knew, and if so, thats the best + // block. + if this_block.point().strict_eq(previous_point) { + return Some((this_block, rollback_depth)); + } + + // Search backwards for a fork smaller than or equal to the one we know. + while this_block.fork() > fork { + rollback_depth += 1; + entry = match entry.prev() { + Some(entry) => entry, + None => return None, + }; + + this_block = entry.value().clone(); + } + + Some((this_block, rollback_depth)) + } + + /// Get the point of the block at the head of the live chain. + fn get_live_head_point(&self) -> Option { + let live_chain = self.0.read().map_err(|_| Error::Internal).ok()?; + + let head_point = Self::get_last_live_point(&live_chain); + if head_point == UNKNOWN_POINT { + return None; + } + + Some(head_point) + } +} + +/// Get the `LiveChainBlockList` for a particular `Network`. +fn get_live_chain(chain: Network) -> ProtectedLiveChainBlockList { + // Get a reference to our live chain storage. + // This SHOULD always exist, because its initialized exhaustively. + // If this FAILS, Recreate a blank chain, but log an error as its a serious UNRECOVERABLE + // BUG. + let entry = if let Some(entry) = LIVE_CHAINS.get(&chain) { + entry + } else { + error!( + chain = chain.to_string(), + "Internal Error: Chain Sync Failed to find chain in LIVE_CHAINS" + ); + + // Try and correct the error. + LIVE_CHAINS.insert(chain, ProtectedLiveChainBlockList::new()); + + // This should NOT fail, because we just inserted it, its catastrophic failure if it does. + #[allow(clippy::expect_used)] + LIVE_CHAINS + .get(&chain) + .expect("Internal Error: Chain Sync Failed to find chain in LIVE_CHAINS") + }; + + let value = entry.value(); + value.clone() +} + +/// Get the head `Point` currently in the live chain. +pub(crate) fn get_live_head_point(chain: Network) -> Option { + let live_chain = get_live_chain(chain); + live_chain.get_live_head_point() +} + +/// Get the Live block relative to the specified point. +/// The starting block must exist if the search is strict. +pub(crate) fn get_live_block( + chain: Network, point: &Point, advance: i64, strict: bool, +) -> Option { + let live_chain = get_live_chain(chain); + live_chain.get_block(point, advance, strict) +} + +/// Get the fill tp point for a chain. +/// +/// Returns the Point of the block we are filling up-to, and it's fork count. +/// +/// Note: It MAY change between calling this function and actually backfilling. +/// This is expected and normal behavior. +pub(crate) async fn get_fill_to_point(chain: Network) -> (Point, u64) { + let live_chain = get_live_chain(chain); + + loop { + if let Some(earliest_block) = live_chain.get_earliest_block() { + return (earliest_block.point(), earliest_block.fork()); + } + // Nothing in the Live chain to sync to, so wait until there is. + tokio::time::sleep(Duration::from_secs(DATA_RACE_BACKOFF_SECS)).await; + } +} + +/// Insert a block into the live chain (in-order). +/// Can ONLY be used to add a new tip block to the live chain. +/// `rollback_count` should be set to 1 on the very first connection, after that, +/// it is maintained by this function, and MUST not be modified elsewhere. +pub(crate) fn live_chain_add_block_to_tip( + chain: Network, block: MultiEraBlock, fork_count: &mut u64, tip: Point, +) -> Result<()> { + let live_chain = get_live_chain(chain); + live_chain.add_block_to_tip(chain, block, fork_count, tip) +} + +/// Backfill the live chain with the block set provided. +pub(crate) fn live_chain_backfill(chain: Network, blocks: &[MultiEraBlock]) -> Result<()> { + let live_chain = get_live_chain(chain); + live_chain.backfill(chain, blocks) +} + +/// Get the length of the live chain. +/// Probably used by debug code only, so its ok if this is not use. +pub(crate) fn live_chain_length(chain: Network) -> usize { + let live_chain = get_live_chain(chain); + live_chain.len() +} + +/// On an immutable update, purge the live-chain up to the new immutable tip. +/// Will error if the point is not in the Live chain. +pub(crate) fn purge_live_chain(chain: Network, point: &Point) -> Result<()> { + let live_chain = get_live_chain(chain); + live_chain.purge(chain, point) +} + +/// Get intersection points to try and find best point to connect to the node on +/// reconnect. +pub(crate) fn get_intersect_points(chain: Network) -> Vec { + let live_chain = get_live_chain(chain); + live_chain.get_intersect_points() +} + +/// Find best block from a fork relative to a point. +pub(crate) fn find_best_fork_block( + chain: Network, point: &Point, previous_point: &Point, fork: u64, +) -> Option<(MultiEraBlock, u64)> { + let live_chain = get_live_chain(chain); + live_chain.find_best_fork_block(point, previous_point, fork) +} diff --git a/hermes/crates/cardano-chain-follower/src/chain_sync_ready.rs b/hermes/crates/cardano-chain-follower/src/chain_sync_ready.rs new file mode 100644 index 000000000..4122a56d1 --- /dev/null +++ b/hermes/crates/cardano-chain-follower/src/chain_sync_ready.rs @@ -0,0 +1,170 @@ +//! Flag to control if chain sync for a blockchain is ready. +//! Can not consume the blockchain data until it is. + +use std::{sync::LazyLock, time::Duration}; + +use dashmap::DashMap; +use strum::IntoEnumIterator; +use tokio::{ + sync::{broadcast, oneshot, RwLock}, + time::sleep, +}; +use tracing::error; + +use crate::{chain_update, Network}; + +/// Data we hold related to sync being ready or not. +struct SyncReady { + /// MPMC Receive queue for Blockchain Updates + rx: broadcast::Receiver, + /// MPMC Transmit queue for Blockchain Updates + tx: broadcast::Sender, + /// Sync is ready flag. (Prevents data race conditions) + ready: bool, +} + +impl SyncReady { + /// Create a new `SyncReady` state. + fn new() -> Self { + // Can buffer up to 3 update messages before lagging. + let (tx, rx) = broadcast::channel::(3); + Self { + tx, + rx, + ready: false, + } + } +} + +/// Sand a chain update to any subscribers that are listening. +pub(crate) fn notify_follower( + chain: Network, update_sender: &Option>, + kind: &chain_update::Kind, +) { + if let Some(update_sender) = update_sender { + if let Err(error) = update_sender.send(kind.clone()) { + error!( + chain = chain.to_string(), + "Failed to broadcast the Update {kind} : {error}" + ); + } + } +} + +/// Waiter for sync to become ready, use `signal` when it is. +pub(crate) struct SyncReadyWaiter { + /// The oneshot queue we use to signal ready. + signal: Option>, +} + +impl SyncReadyWaiter { + /// Create a new `SyncReadyWaiter` state. + pub(crate) fn signal(&mut self) { + if let Some(signaler) = self.signal.take() { + if let Err(error) = signaler.send(()) { + error!("sync ready waiter signal should not fail: {error:?}"); + } + } else { + error!("sync ready waiter signal should not be called more than once."); + } + } +} + +/// Lock to prevent using any blockchain data for a network UNTIL it is synced to TIP. +/// Pre-initialized for all possible blockchains, so it's safe to use `expect` to access a +/// value. +static SYNC_READY: LazyLock>> = LazyLock::new(|| { + let map = DashMap::new(); + for network in Network::iter() { + map.insert(network, RwLock::new(SyncReady::new())); + } + map +}); + +/// Write Lock the `SYNC_READY` lock for a network. +/// When we are signaled to be ready, set it to true and release the lock. +pub(crate) fn wait_for_sync_ready(chain: Network) -> SyncReadyWaiter { + let (tx, rx) = oneshot::channel::<()>(); + + tokio::spawn(async move { + // We are safe to use `expect` here because the SYNC_READY list is exhaustively + // initialized. Its a Serious BUG if that not True, so panic is OK. + #[allow(clippy::expect_used)] + let lock_entry = SYNC_READY.get(&chain).expect("network should exist"); + + let lock = lock_entry.value(); + + let mut status = lock.write().await; + + // If we successfully get told to unlock, we do. + if let Ok(()) = rx.await { + status.ready = true; + } + + // If the channel closes early, we can NEVER use the Blockchain data. + }); + + SyncReadyWaiter { signal: Some(tx) } +} + +/// Get a Read lock on the Sync State, and return if we are ready or not. +async fn check_sync_ready(chain: Network) -> bool { + // We are safe to use `expect` here because the SYNC_READY list is exhaustively + // initialized. Its a Serious BUG if that not True, so panic is OK. + #[allow(clippy::expect_used)] + let lock_entry = SYNC_READY.get(&chain).expect("network should exist"); + let lock = lock_entry.value(); + + let status = lock.read().await; + + // If the transmitter has not been taken, we are not really ready. + status.ready +} + +/// Number of seconds to wait if we detect a `SyncReady` race condition. +const SYNC_READY_RACE_BACKOFF_SECS: u64 = 1; + +/// Block until the chain is synced to TIP. +/// This is necessary to ensure the Blockchain data is fully intact before attempting to +/// consume it. +pub(crate) async fn block_until_sync_ready(chain: Network) { + // There is a potential race where we haven't yet write locked the SYNC_READY lock when we + // check it. So, IF the ready state returns as false, sleep a while and try again. + while !check_sync_ready(chain).await { + sleep(Duration::from_secs(SYNC_READY_RACE_BACKOFF_SECS)).await; + } +} + +/// Get the Broadcast Receive queue for the given chain updates. +pub(crate) async fn get_chain_update_rx_queue( + chain: Network, +) -> broadcast::Receiver { + // We are safe to use `expect` here because the SYNC_READY list is exhaustively + // initialized. Its a Serious BUG if that not True, so panic is OK. + #[allow(clippy::expect_used)] + let lock_entry = SYNC_READY.get(&chain).expect("network should exist"); + + let lock = lock_entry.value(); + + let status = lock.read().await; + + status.rx.resubscribe() +} + +/// Get the Broadcast Transmit queue for the given chain updates. +pub(crate) async fn get_chain_update_tx_queue( + chain: Network, +) -> Option> { + // We are safe to use `expect` here because the SYNC_READY list is exhaustively + // initialized. Its a Serious BUG if that not True, so panic is OK. + #[allow(clippy::expect_used)] + let lock_entry = SYNC_READY.get(&chain).expect("network should exist"); + + let lock = lock_entry.value(); + + if let Ok(status) = lock.try_read() { + return Some(status.tx.clone()); + } + + None +} diff --git a/hermes/crates/cardano-chain-follower/src/chain_update.rs b/hermes/crates/cardano-chain-follower/src/chain_update.rs new file mode 100644 index 000000000..044982b96 --- /dev/null +++ b/hermes/crates/cardano-chain-follower/src/chain_update.rs @@ -0,0 +1,63 @@ +//! An update of a blockchain + +use std::fmt::Display; + +use strum::Display; + +use crate::multi_era_block_data::MultiEraBlock; + +/// Enum of chain updates received by the follower. +#[derive(Debug, Clone, Display, PartialEq)] +pub enum Kind { + /// A new part of the chain has become immutable (Roll-forward). + ImmutableBlockRollForward, + /// New block inserted on chain. + Block, + /// Chain rollback to the given block. + Rollback, +} + +/// Actual Chain Update itself. +#[derive(Clone, Debug)] +pub struct ChainUpdate { + /// What kind of update is this? + pub kind: Kind, + /// Is this the tip of the chain? + pub tip: bool, + /// What is the new data? + pub data: MultiEraBlock, +} + +impl ChainUpdate { + /// Creates a new chain update. + #[must_use] + pub fn new(kind: Kind, tip: bool, data: MultiEraBlock) -> Self { + Self { kind, tip, data } + } + + /// Gets the chain update's block data. + #[must_use] + pub fn block_data(&self) -> &MultiEraBlock { + &self.data + } + + /// Gets the chain update's block data. + #[must_use] + pub fn immutable(&self) -> bool { + self.data.immutable() + } +} + +impl Display for ChainUpdate { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let block_type = self.kind.to_string(); + let mut tip: String = String::new(); + if self.tip { + tip = " @ Tip".to_string(); + } + + write!(f, "{block_type}{tip} : {}", self.data)?; + + Ok(()) + } +} diff --git a/hermes/crates/cardano-chain-follower/src/data/Readme.md b/hermes/crates/cardano-chain-follower/src/data/Readme.md new file mode 100644 index 000000000..f2012dfaa --- /dev/null +++ b/hermes/crates/cardano-chain-follower/src/data/Readme.md @@ -0,0 +1,15 @@ +# Data files used by the Follower Crate + +## Mithril signature genesis keys + +These keys are required to validate mithril signatures for each respective Cardano network. + +| File | Network | Source | +| --- | --- | --- | +| `mainnet-genesis.vkey` | Main network. | [mainnet-genesis.vkey] | +| `preprod-genesis.vkey` | Pre-production network. | [preprod-genesis.vkey] | +| `preview-genesis.vkey` | Preview network. | [preview-genesis.vkey] | + +[mainnet-genesis.vkey]: https://raw.githubusercontent.com/input-output-hk/mithril/main/mithril-infra/configuration/release-mainnet/genesis.vkey +[preprod-genesis.vkey]: https://raw.githubusercontent.com/input-output-hk/mithril/main/mithril-infra/configuration/release-preprod/genesis.vkey +[preview-genesis.vkey]: https://raw.githubusercontent.com/input-output-hk/mithril/main/mithril-infra/configuration/pre-release-preview/genesis.vkey diff --git a/hermes/crates/cardano-chain-follower/src/data/mainnet-genesis.vkey b/hermes/crates/cardano-chain-follower/src/data/mainnet-genesis.vkey new file mode 100644 index 000000000..4bbe653ff --- /dev/null +++ b/hermes/crates/cardano-chain-follower/src/data/mainnet-genesis.vkey @@ -0,0 +1 @@ +5b3139312c36362c3134302c3138352c3133382c31312c3233372c3230372c3235302c3134342c32372c322c3138382c33302c31322c38312c3135352c3230342c31302c3137392c37352c32332c3133382c3139362c3231372c352c31342c32302c35372c37392c33392c3137365d \ No newline at end of file diff --git a/hermes/crates/cardano-chain-follower/src/data/preprod-genesis.vkey b/hermes/crates/cardano-chain-follower/src/data/preprod-genesis.vkey new file mode 100644 index 000000000..575154ce7 --- /dev/null +++ b/hermes/crates/cardano-chain-follower/src/data/preprod-genesis.vkey @@ -0,0 +1 @@ +5b3132372c37332c3132342c3136312c362c3133372c3133312c3231332c3230372c3131372c3139382c38352c3137362c3139392c3136322c3234312c36382c3132332c3131392c3134352c31332c3233322c3234332c34392c3232392c322c3234392c3230352c3230352c33392c3233352c34345d \ No newline at end of file diff --git a/hermes/crates/cardano-chain-follower/src/data/preview-genesis.vkey b/hermes/crates/cardano-chain-follower/src/data/preview-genesis.vkey new file mode 100644 index 000000000..575154ce7 --- /dev/null +++ b/hermes/crates/cardano-chain-follower/src/data/preview-genesis.vkey @@ -0,0 +1 @@ +5b3132372c37332c3132342c3136312c362c3133372c3133312c3231332c3230372c3131372c3139382c38352c3137362c3139392c3136322c3234312c36382c3132332c3131392c3134352c31332c3233322c3234332c34392c3232392c322c3234392c3230352c3230352c33392c3233352c34345d \ No newline at end of file diff --git a/hermes/crates/cardano-chain-follower/src/error.rs b/hermes/crates/cardano-chain-follower/src/error.rs new file mode 100644 index 000000000..6cb982cdf --- /dev/null +++ b/hermes/crates/cardano-chain-follower/src/error.rs @@ -0,0 +1,110 @@ +//! Library Crates Defined Errors + +use std::{io, path::PathBuf}; + +use pallas::network::miniprotocols::chainsync; +use thiserror::Error; + +use crate::network::Network; + +/// Crate error type. +#[derive(Debug, Error)] +pub enum Error { + /// Data encoding/decoding error. + #[error("Codec error: {0:?}")] + Codec(String), + /// Client connection error. + #[error("Client error: {0:?}")] + Client(pallas::network::facades::Error), + /// Blockfetch protocol error. + #[error("Blockfetch error: {0:?}")] + Blockfetch(pallas::network::miniprotocols::blockfetch::ClientError), + /// Chainsync protocol error. + #[error("Chainsync error: {0:?}")] + Chainsync(chainsync::ClientError), + /// Backfill Synch error. + #[error("Backfill Sync error: {0}")] + BackfillSync(String), + /// Live Sync error. + #[error("Live Sync error: {0:?}")] + LiveSync(String), + /// Follower failed to set its read pointer. + #[error("Failed to set follower read pointer")] + SetReadPointer, + /// Follower background follow task has stopped. + #[error("Follower follow task is not running")] + FollowTaskNotRunning, + /// Chain Sync already running error. + #[error("Chain Sync already running for network: {0}")] + ChainSyncAlreadyRunning(Network), + /// Mithril snapshot already running error. + #[error("Mithril Snapshot Sync already running for network: {0}")] + MithrilSnapshotSyncAlreadyRunning(Network), + /// Mithril snapshot error. + #[error("Failed to read block(s) from Mithril snapshot")] + MithrilSnapshot(Option), + /// Mithril snapshot chunk error. + #[error("Failed to read block(s) from Mithril snapshot")] + MithrilSnapshotChunk(pallas_hardano::storage::immutable::chunk::Error), + /// Mithril snapshot traversal error. + #[error("Failed to traverse block(s) from Mithril snapshot")] + MithrilSnapshotTraverse(pallas::ledger::traverse::Error), + /// Failed to parse + #[error("Failed to parse network")] + ParseNetwork, + /// Mithril Snapshot path is not a directory + #[error("Mithril Snapshot path `{0}` is not a directory")] + MithrilSnapshotDirectoryNotFound(String), + /// Mithril Snapshot path is already configured for another network + #[error("Mithril Snapshot path `{0}` is already configured for network `{1}`")] + MithrilSnapshotDirectoryAlreadyConfiguredForNetwork(PathBuf, Network), + /// Mithril Snapshot path is already configured for this network + #[error("Mithril Snapshot path `{0}` is already configured as `{1}`")] + MithrilSnapshotDirectoryAlreadyConfigured(PathBuf, PathBuf), + /// Mithril Snapshot path not configured, trying to start auto-update + #[error("Mithril Snapshot path is not configured. Can not start Auto Snapshot Update.")] + MithrilSnapshotDirectoryNotConfigured, + /// Mithril snapshot directory failed to be created. + #[error("Mithril Snapshot path `{0}` does not exist, and could not be created. `{1}`")] + MithrilSnapshotDirectoryCreation(PathBuf, io::Error), + /// Mithril snapshot directory is not writable and we need to be able to update the + /// snapshot data. + #[error("Mithril Snapshot path `{0}` is not writable, or contains read-only files.")] + MithrilSnapshotDirectoryNotWritable(PathBuf), + /// Mithril aggregator URL is already defined for a network. + #[error("Mithril Aggregator URL `{0}` is already configured as `{1}`")] + MithrilAggregatorURLAlreadyConfigured(String, String), + /// Mithril aggregator URL is already defined for a network. + #[error("Mithril Aggregator URL `{0}` is already configured for network `{1}`")] + MithrilAggregatorURLAlreadyConfiguredForNetwork(String, Network), + /// Mithril aggregator URL is not a valid URL + #[error("Mithril Aggregator URL `{0}` is not a valid URL: `{1}`")] + MithrilAggregatorURLParse(String, url::ParseError), + /// General Mithril Client Error + #[error("Mithril Client Error for {0} @ {1}: {2}")] + MithrilClient(Network, String, anyhow::Error), + /// General Mithril Index DB Error + #[error("Mithril Index DB Error for {0}: {1}")] + MithrilIndexDB(Network, anyhow::Error), + /// Mithril Aggregator has no Snapshots + #[error("Mithril Aggregator does not list any Mithril Snapshots for {0} @ {1}")] + MithrilClientNoSnapshots(Network, String), + /// Mithril Aggregator mismatch + #[error("Mithril Aggregator network mismatch. Wanted {0} Got {1}")] + MithrilClientNetworkMismatch(Network, String), + /// Mithril genesis VKEY Mismatch + #[error("Mithril Genesis VKEY for Network {0} is already set, and can not be changed to a different value.")] + MithrilGenesisVKeyMismatch(Network), + /// Mithril genesis VKEY is not properly HEX Encoded + #[error("Mithril Genesis VKEY for Network {0} is not hex encoded. Needs to be only HEX Ascii characters, and even length.")] + MithrilGenesisVKeyNotHex(Network), + /// Mithril Autoupdate requires an Aggregator and a VKEY and a Path + #[error("Mithril Auto Update Network {0} failed to start. No Aggregator and/or Genesis VKEY and/or Path are configured.")] + MithrilUpdateRequiresAggregatorAndVkeyAndPath(Network), + /// Internal Error + #[error("Internal error")] + Internal, +} + +/// Crate result type. +pub type Result = std::result::Result; diff --git a/hermes/crates/cardano-chain-follower/src/follow.rs b/hermes/crates/cardano-chain-follower/src/follow.rs index b92a79ee7..dd4099dbf 100644 --- a/hermes/crates/cardano-chain-follower/src/follow.rs +++ b/hermes/crates/cardano-chain-follower/src/follow.rs @@ -1,771 +1,428 @@ //! Cardano chain follow module. -use std::{future::Future, path::PathBuf}; - -use pallas::network::{facades::PeerClient, miniprotocols::Point}; -use tokio::{ - sync::{mpsc, oneshot}, - task::JoinHandle, -}; +use pallas::network::miniprotocols::txmonitor::{TxBody, TxId}; +use tokio::sync::broadcast::{self}; +use tracing::{debug, error}; use crate::{ - mithril_snapshot::MithrilSnapshot, Error, MultiEraBlockData, Network, PointOrTip, Result, + chain_sync::point_at_tip, + chain_sync_live_chains::{find_best_fork_block, get_live_block, live_chain_length}, + chain_sync_ready::{block_until_sync_ready, get_chain_update_rx_queue}, + chain_update::{self, ChainUpdate}, + mithril_snapshot::MithrilSnapshot, + mithril_snapshot_data::latest_mithril_snapshot_id, + mithril_snapshot_iterator::MithrilSnapshotIterator, + network::Network, + point::{TIP_POINT, UNKNOWN_POINT}, + stats::{self, rollback}, + MultiEraBlock, Point, Statistics, }; -/// Default [`Follower`] block buffer size. -const DEFAULT_CHAIN_UPDATE_BUFFER_SIZE: usize = 32; - -/// Enum of chain updates received by the follower. -pub enum ChainUpdate { - /// New block inserted on chain. - Block(MultiEraBlockData), - /// Chain rollback to the given block. - Rollback(MultiEraBlockData), -} - -impl ChainUpdate { - /// Gets the chain update's block data. - #[must_use] - pub fn block_data(&self) -> &MultiEraBlockData { - match self { - ChainUpdate::Block(block_data) | ChainUpdate::Rollback(block_data) => block_data, - } - } -} - -/// Builder used to create [`FollowerConfig`]s. -pub struct FollowerConfigBuilder { - /// Block buffer size option. - chain_update_buffer_size: usize, - /// Where to start following from. - follow_from: PointOrTip, - /// Path to the Mithril snapshot the follower should use. - mithril_snapshot_path: Option, -} - -impl Default for FollowerConfigBuilder { - fn default() -> Self { - Self { - chain_update_buffer_size: DEFAULT_CHAIN_UPDATE_BUFFER_SIZE, - follow_from: PointOrTip::Tip, - mithril_snapshot_path: None, - } - } +/// The Chain Follower +pub struct ChainFollower { + /// The Blockchain network we are following. + chain: Network, + /// Where we end following. + end: Point, + /// Block we processed most recently. + previous: Point, + /// Where we are currently in the following process. + current: Point, + /// What fork were we last on + fork: u64, + /// Mithril Snapshot + snapshot: MithrilSnapshot, + /// Mithril Snapshot Follower + mithril_follower: Option, + /// Mithril TIP Reached + mithril_tip: Option, + /// Live Block Updates + sync_updates: broadcast::Receiver, } -impl FollowerConfigBuilder { - /// Sets the size of the chain updates buffer used by the [`Follower`]. +impl ChainFollower { + /// Follow a blockchain. /// /// # Arguments /// - /// * `chain_update_buffer_size`: Size of the chain updates buffer. - #[must_use] - pub fn chain_update_buffer_size(mut self, block_buffer_size: usize) -> Self { - self.chain_update_buffer_size = block_buffer_size; - self - } - - /// Sets the point at which the follower will start following from. + /// * `chain` - The blockchain network to follow. + /// * `start` - The point or tip to start following from (inclusive). + /// * `end` - The point or tip to stop following from (inclusive). /// - /// # Arguments + /// # Returns /// - /// * `from`: Sync starting point. - #[must_use] - pub fn follow_from

(mut self, from: P) -> Self - where P: Into { - self.follow_from = from.into(); - self - } - - /// Sets the path of the Mithril snapshot the [`Follower`] will use. + /// The Chain Follower that will return blocks in the requested range. /// - /// # Arguments + /// # Notes /// - /// * `path`: Mithril snapshot path. - #[must_use] - pub fn mithril_snapshot_path(mut self, path: PathBuf) -> Self { - self.mithril_snapshot_path = Some(path); - self - } - - /// Builds a [`FollowerConfig`]. + /// IF end < start, the follower will immediately yield no blocks. + /// IF end is TIP, then the follower will continue to follow even when TIP is reached. + /// Otherwise only blocks in the request range will be returned. + /// + /// Also, UNLIKE the blockchain itself, the only relevant information is the Slot#. + /// The Block hash is not considered. + /// If start is not an exact Slot#, then the NEXT Slot immediately following will be + /// the first block returned. + /// If the end is also not an exact Slot# with a block, then the last block will be + /// the one immediately proceeding it. + /// + /// To ONLY follow from TIP, set BOTH start and end to TIP. #[must_use] - pub fn build(self) -> FollowerConfig { - FollowerConfig { - chain_update_buffer_size: self.chain_update_buffer_size, - follow_from: self.follow_from, - mithril_snapshot_path: self.mithril_snapshot_path, + pub async fn new(chain: Network, start: Point, end: Point) -> Self { + let rx = get_chain_update_rx_queue(chain).await; + + ChainFollower { + chain, + end, + previous: UNKNOWN_POINT, + current: start, + fork: 1, // This is correct, because Mithril is Fork 0. + snapshot: MithrilSnapshot::new(chain), + mithril_follower: None, + mithril_tip: None, + sync_updates: rx, } } -} -/// Configuration for the Cardano chain follower. -#[derive(Clone)] -pub struct FollowerConfig { - /// Configured chain update buffer size. - pub chain_update_buffer_size: usize, - /// Where to start following from. - pub follow_from: PointOrTip, - /// Path to the Mithril snapshot the follower should use. - pub mithril_snapshot_path: Option, -} + /// If we can, get the next update from the mithril snapshot. + async fn next_from_mithril(&mut self) -> Option { + let current_mithril_tip = latest_mithril_snapshot_id(self.chain).tip(); -/// Information used to connect to a client. -#[derive(Clone)] -struct ClientConnectInfo { - /// Node's address - address: String, - /// Network magic - network: Network, -} + if current_mithril_tip > self.current { + if self.mithril_follower.is_none() { + self.mithril_follower = self + .snapshot + .try_read_blocks_from_point(&self.current) + .await; + } -/// Handler for receiving the read block response from the client. -pub struct ReadBlock(tokio::task::JoinHandle>); - -impl Future for ReadBlock { - type Output = Result; - - fn poll( - mut self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>, - ) -> std::task::Poll { - let p = &mut self.0; - // Using tokio pin instead of, e.g., pin-project because we use tokio as the async runtime - // lib for this crate. - tokio::pin!(p); - - match p.poll(cx) { - std::task::Poll::Ready(res) => { - match res { - Ok(res) => std::task::Poll::Ready(res), - Err(_) => std::task::Poll::Ready(Err(Error::InternalError)), + if let Some(follower) = self.mithril_follower.as_mut() { + if let Some(next) = follower.next().await { + // debug!("Pre Previous update 3 : {:?}", self.previous); + self.previous = self.current.clone(); + // debug!("Post Previous update 3 : {:?}", self.previous); + self.current = next.point(); + self.fork = 0; // Mithril Immutable data is always Fork 0. + let update = ChainUpdate::new(chain_update::Kind::Block, false, next); + return Some(update); } - }, - std::task::Poll::Pending => std::task::Poll::Pending, + } } - } -} -/// Handler for receiving the read block range response from the client. -pub struct ReadBlockRange(tokio::task::JoinHandle>>); - -impl Future for ReadBlockRange { - type Output = Result>; - - fn poll( - mut self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>, - ) -> std::task::Poll { - let p = &mut self.0; - // Using tokio pin instead of, e.g., pin-project because we use tokio as the async runtime - // lib for this crate. - tokio::pin!(p); - - match p.poll(cx) { - std::task::Poll::Ready(res) => { - match res { - Ok(res) => std::task::Poll::Ready(res), - Err(_) => std::task::Poll::Ready(Err(Error::InternalError)), - } - }, - std::task::Poll::Pending => std::task::Poll::Pending, + if (self.mithril_tip.is_none() || current_mithril_tip > self.mithril_tip) + && self.current < self.mithril_tip + { + let snapshot = MithrilSnapshot::new(self.chain); + if let Some(block) = snapshot.read_block_at(¤t_mithril_tip).await { + // The Mithril Tip has moved forwards. + self.mithril_tip = Some(current_mithril_tip); + // Get the mithril tip block. + let update = + ChainUpdate::new(chain_update::Kind::ImmutableBlockRollForward, false, block); + return Some(update); + } + error!( + tip = ?self.mithril_tip, + current = ?current_mithril_tip, + "Mithril Tip Block is not in snapshot. Should not happen." + ); } + + None } -} -/// Cardano chain follower. -pub struct Follower { - /// Client connection information. - /// - /// This is used to open more connections when needed. - client_connect_info: ClientConnectInfo, - /// Chain update receiver. - chain_update_rx: mpsc::Receiver>, - /// Follow task request sender. - follow_task_request_tx: mpsc::Sender, - /// Follow task thread join handle. - follow_task_join_handle: JoinHandle<()>, - /// Optional Mithril snapshot information. - mithril_snapshot: Option, -} + /// If we can, get the next update from the mithril snapshot. + async fn next_from_live_chain(&mut self) -> Option { + let mut next_block: Option = None; + let mut update_type = chain_update::Kind::Block; + let mut rollback_depth: u64 = 0; + + // Special Case: point = TIP_POINT. Just return the latest block in the live chain. + if self.current == TIP_POINT { + next_block = { + let block = get_live_block(self.chain, &self.current, -1, false)?; + Some(block) + }; + } -impl Follower { - /// Connects the follower to a producer using the node-to-node protocol. - /// - /// # Arguments - /// - /// * `address`: Address of the node to connect to. - /// * `network`: The [Network] the client is assuming it's connecting to. - /// * `config`: Follower's configuration (see [`FollowerConfigBuilder`]). - /// - /// # Errors - /// - /// Returns Err if the connection could not be established. - pub async fn connect(address: &str, network: Network, config: FollowerConfig) -> Result { - let mut client = PeerClient::connect(address, network.into()) - .await - .map_err(Error::Client)?; - - let Some(follow_from) = set_client_read_pointer(&mut client, config.follow_from).await? - else { - return Err(Error::SetReadPointer); - }; - - let mithril_snapshot = if let Some(path) = config.mithril_snapshot_path { - Some(MithrilSnapshot::from_path(path)?) - } else { - None - }; - - let connect_info = ClientConnectInfo { - address: address.to_string(), - network, - }; - - let (task_request_tx, chain_update_rx, task_join_handle) = task::FollowTask::spawn( - client, - connect_info, - mithril_snapshot.clone(), - config.chain_update_buffer_size, - follow_from, - ); + // In most cases we will be able to get the next block. + if next_block.is_none() { + // If we don't know the previous block, get the block requested. + let advance = if self.previous.is_unknown() { 0 } else { 1 }; + next_block = get_live_block(self.chain, &self.current, advance, true); + } + + // If we can't get the next consecutive block, then + // Get the best previous block. + if next_block.is_none() { + debug!("No blocks left in live chain."); + + // IF this is an update still, and not us having caught up, then it WILL be a rollback. + update_type = chain_update::Kind::Rollback; + next_block = if let Some((block, depth)) = + find_best_fork_block(self.chain, &self.current, &self.previous, self.fork) + { + debug!("Found fork block: {block}"); + // IF the block is the same as our current previous, there has been no chain + // advancement, so just return None. + if block.point().strict_eq(&self.current) { + None + } else { + rollback_depth = depth; + Some(block) + } + } else { + debug!("No block to find, rewinding to latest mithril tip."); + let latest_mithril_point = latest_mithril_snapshot_id(self.chain).tip(); + if let Some(block) = MithrilSnapshot::new(self.chain) + .read_block_at(&latest_mithril_point) + .await + { + rollback_depth = live_chain_length(self.chain) as u64; + Some(block) + } else { + return None; + } + } + } - let client_connect_info = ClientConnectInfo { - address: address.to_string(), - network, - }; - - Ok(Self { - client_connect_info, - chain_update_rx, - follow_task_request_tx: task_request_tx, - follow_task_join_handle: task_join_handle, - mithril_snapshot, - }) + if let Some(next_block) = next_block { + // Update rollback stats for the follower if one is reported. + if update_type == chain_update::Kind::Rollback { + rollback(self.chain, stats::RollbackType::Follower, rollback_depth); + } + // debug!("Pre Previous update 4 : {:?}", self.previous); + self.previous = self.current.clone(); + // debug!("Post Previous update 4 : {:?}", self.previous); + self.current = next_block.point().clone(); + self.fork = next_block.fork(); + + let tip = point_at_tip(self.chain, &self.current).await; + let update = ChainUpdate::new(update_type, tip, next_block); + return Some(update); + } + + None } - /// Set the follower's chain read-pointer. Returns None if the point was - /// not found on the chain. - /// - /// # Arguments - /// - /// * `at`: Point at which to set the read-pointer. - /// - /// # Errors - /// - /// Returns Err if something went wrong while communicating with the producer. - pub async fn set_read_pointer

(&self, at: P) -> Result> - where P: Into { - let (response_tx, response_rx) = oneshot::channel(); - - let req = task::SetReadPointerRequest { - at: at.into(), - response_tx, - }; - - self.follow_task_request_tx - .send(req) - .await - .map_err(|_| Error::FollowTaskNotRunning)?; - - response_rx.await.map_err(|_| Error::FollowTaskNotRunning)? + /// Update the current Point, and return `false` if this fails. + fn update_current(&mut self, update: &Option) -> bool { + if let Some(update) = update { + let decoded = update.block_data().decode(); + self.current = Point::new(decoded.slot(), decoded.hash().to_vec()); + return true; + } + false } - /// Requests the client to read a block. + /// This is an unprotected version of `next()` which can ONLY be used within this + /// crate. Its purpose is to allow the chain data to be inspected/validate prior + /// to unlocking it for general access. /// - /// # Arguments + /// This function can NOT return None, but that state is used to help process data. /// - /// * `at`: Point at which to read the block. - #[must_use] - pub fn read_block

(&self, at: P) -> ReadBlock - where P: Into { - let connect_info = self.client_connect_info.clone(); - let mithril_snapshot = self.mithril_snapshot.clone(); - let at = at.into(); - - let join_handle = tokio::spawn(async move { - let mut client = PeerClient::connect(connect_info.address, connect_info.network.into()) - .await - .map_err(Error::Client)?; - - match at { - PointOrTip::Tip => { - let point = resolve_tip(&mut client).await?; - read_block_from_network(&mut client, point).await - }, - - PointOrTip::Point(point) => { - let snapshot_res = mithril_snapshot - .as_ref() - .and_then(|snapshot| snapshot.try_read_block(point.clone()).ok()) - .flatten(); - - match snapshot_res { - Some(block_data) => { - tracing::trace!("Read block from Mithril snapshot"); - Ok(block_data) - }, - None => read_block_from_network(&mut client, point).await, - } - }, + /// This function must not be exposed for general use. + #[allow(clippy::unused_async)] + pub(crate) async fn unprotected_next(&mut self) -> Option { + let mut update; + + // We will loop here until we can successfully return a new block + loop { + // Check if Immutable TIP has advanced, and if so, send a ChainUpdate about it. + // Should only happen once every ~6hrs. + // TODO. + + // Try and get the next update from the mithril chain, and return it if we are + // successful. + update = self.next_from_mithril().await; + if update.is_some() { + break; } - }); - ReadBlock(join_handle) - } + // No update from Mithril Data, so try and get one from the live chain. + update = self.next_from_live_chain().await; + if update.is_some() { + break; + } - /// Request the client to read a block range. - /// - /// # Arguments - /// - /// * `from`: Block range start. - /// * `to`: Block range end. - #[must_use] - pub fn read_block_range

(&self, from: Point, to: P) -> ReadBlockRange - where P: Into { - let connect_info = self.client_connect_info.clone(); - let mithril_snapshot = self.mithril_snapshot.clone(); - let to = to.into(); - - let join_handle = tokio::spawn(async move { - let mut client = PeerClient::connect(connect_info.address, connect_info.network.into()) - .await - .map_err(Error::Client)?; - - match to { - PointOrTip::Tip => { - let to_point = resolve_tip(&mut client).await?; - read_block_range_from_network(&mut client, from, to_point).await + // IF we can't get a new block directly from the mithril data, or the live chain, then + // wait for something to change which might mean we can get the next block. + let update = self.sync_updates.recv().await; + match update { + Ok(kind) => { + debug!("Update kind: {kind}"); + }, + Err(tokio::sync::broadcast::error::RecvError::Lagged(distance)) => { + debug!("Lagged by {} updates", distance); }, - PointOrTip::Point(to) => { - let snapshot_res = mithril_snapshot - .as_ref() - .and_then(|snapshot| { - snapshot.try_read_block_range(from.clone(), to.clone()).ok() - }) - .flatten(); - - match snapshot_res { - Some((last_point_read, mut block_data_vec)) => { - // If we couldn't get all the blocks from the snapshot, - // try fetching the remaining ones from the network. - if last_point_read.slot_or_default() < to.slot_or_default() { - let network_blocks = - read_block_range_from_network(&mut client, last_point_read, to) - .await?; - - // Discard 1st point as it's already been read from - // the snapshot - let mut network_blocks_iter = network_blocks.into_iter(); - drop(network_blocks_iter.next()); - - block_data_vec.extend(network_blocks_iter); - } - - Ok(block_data_vec) - }, - None => read_block_range_from_network(&mut client, from, to).await, - } + Err(tokio::sync::broadcast::error::RecvError::Closed) => { + // We are closed, so we need to wait for the next update. + // This is not an error. + return None; }, } - }); + } + + // Update the current block, so we know which one to get next. + if !self.update_current(&update) { + return None; + } + + update + } + + /// Get the next block from the follower. + /// Returns NONE is there is no block left to return. + pub async fn next(&mut self) -> Option { + // If we aren't syncing TIP, and Current >= End, then return None + if self.end != TIP_POINT && self.current >= self.end { + return None; + } + + // Can't follow if SYNC is not ready. + block_until_sync_ready(self.chain).await; - ReadBlockRange(join_handle) + // Get next block from the iteration. + self.unprotected_next().await } - /// Receive the next chain update from the producer. + /// Get a single block from the chain by its point. /// - /// # Errors + /// If the Point does not point exactly at a block, it will return the next + /// consecutive block. /// - /// Returns Err if any producer communication errors occurred. - pub async fn next(&mut self) -> Result { - self.chain_update_rx - .recv() - .await - .ok_or(Error::FollowTaskNotRunning)? + /// This is a convenience function which just used `ChainFollower` to fetch a single + /// block. + pub async fn get_block(chain: Network, point: Point) -> Option { + // Get the block from the chain. + // This function suppose to run only once, so the end point + // can be set to `TIP_POINT` + let mut follower = Self::new(chain, point, TIP_POINT).await; + follower.next().await } - /// Closes the follower connection and stops its background task. + /// Get the current Immutable and live tips. /// - /// # Errors - /// - /// Returns Err if some error occurred in the background task. - pub async fn close(self) -> std::result::Result<(), tokio::task::JoinError> { - // NOTE(FelipeRosa): For now just abort the task since it needs no cancellation - self.follow_task_join_handle.abort(); + /// Note, this will block until the chain is synced, ready to be followed. + pub async fn get_tips(chain: Network) -> (Point, Point) { + // Can't follow if SYNC is not ready. + block_until_sync_ready(chain).await; + + let tips = Statistics::tips(chain); - self.follow_task_join_handle.await + let mithril_tip = Point::fuzzy(tips.0); + let live_tip = Point::fuzzy(tips.1); + + (mithril_tip, live_tip) } -} -/// Contains functions related to the Follower's background task. -mod task { - use pallas::{ - ledger::traverse::MultiEraHeader, - network::{ - facades::PeerClient, - miniprotocols::{chainsync, Point}, - }, - }; - use tokio::sync::{mpsc, oneshot}; - - use super::{set_client_read_pointer, ChainUpdate, ClientConnectInfo}; - use crate::{mithril_snapshot::MithrilSnapshot, Error, MultiEraBlockData, PointOrTip, Result}; - - /// Request the task to set the read pointer to the given point or to the - /// tip. - pub(super) struct SetReadPointerRequest { - /// Point at which to set the read pointer. - pub(super) at: PointOrTip, - /// The channel that will be used to send the request's response. - pub(super) response_tx: oneshot::Sender>>, + /// Schedule a transaction to be posted to the blockchain. + /// + /// # Arguments + /// + /// * `chain` - The blockchain to post the transaction on. + /// * `txn` - The transaction to be posted. + /// + /// # Returns + /// + /// `TxId` - The ID of the transaction that was queued. + #[allow(clippy::unused_async)] + pub async fn post_txn(chain: Network, txn: TxBody) -> TxId { + #[allow(clippy::no_effect_underscore_binding)] + let _unused = chain; + #[allow(clippy::no_effect_underscore_binding)] + let _unused = txn; + + "unimplemented".to_string() } - /// Holds state for a follow task. - pub(super) struct FollowTask { - /// Client connection info. - connect_info: ClientConnectInfo, - /// Optional Mithril Snapshot that will be used by the follow task when fetching - /// chain updates. - mithril_snapshot: Option, - /// Request receiver. - request_rx: mpsc::Receiver, - /// Chain update sender. - chain_update_tx: mpsc::Sender>, + /// Check if a transaction, known by its `TxId`, has been sent to the Peer Node. + /// + /// Note, the `TxId` can ONLY be checked for ~6 hrs after it was posted. + /// After which, it should be on the blockchain, and its the applications job to track + /// if a transaction made it on-chain or not. + #[allow(clippy::unused_async)] + pub async fn txn_sent(chain: Network, id: TxId) -> bool { + #[allow(clippy::no_effect_underscore_binding)] + let _unused = chain; + #[allow(clippy::no_effect_underscore_binding)] + let _unused = id; + + false } +} - impl FollowTask { - /// Spawn a follow task. - pub(super) fn spawn( - client: PeerClient, connect_info: ClientConnectInfo, - mithril_snapshot: Option, buffer_size: usize, follow_from: Point, - ) -> ( - mpsc::Sender, - mpsc::Receiver>, - tokio::task::JoinHandle<()>, - ) { - let (request_tx, request_rx) = mpsc::channel(1); - let (chain_update_tx, chain_update_rx) = mpsc::channel(buffer_size); - - let this = Self { - connect_info, - mithril_snapshot, - request_rx, - chain_update_tx, - }; +// TODO(SJ) - Add a function to check if a transaction is pending, or has been sent to the +// chain. - ( - request_tx, - chain_update_rx, - tokio::spawn(this.run(client, follow_from)), - ) - } +#[cfg(test)] +mod tests { + use super::*; - /// Runs the follow task. - /// - /// It keeps asking the connected node for new chain updates. Every update and - /// communication errors are sent through the channel to the follower. - /// - /// Backpressure is achieved with the chain update channel's limited size. - async fn run(mut self, client: PeerClient, from: Point) { - let fetch_chain_updates_fut = Self::fetch_chain_updates( - client, - self.mithril_snapshot.as_ref(), - self.chain_update_tx.clone(), - from, - ); - tokio::pin!(fetch_chain_updates_fut); - - loop { - tokio::select! { - Some(SetReadPointerRequest { at, response_tx }) = self.request_rx.recv() => { - let res = PeerClient::connect(&self.connect_info.address, self.connect_info.network.into()) - .await; - - let Ok(mut client) = res else { - drop(response_tx.send(Err(crate::Error::SetReadPointer))); - continue; - }; - - match set_client_read_pointer(&mut client, at).await { - Ok(Some(from)) => { - fetch_chain_updates_fut.set(Self::fetch_chain_updates( - client, - self.mithril_snapshot.as_ref(), - self.chain_update_tx.clone(), - from.clone(), - )); - - drop(response_tx.send(Ok(Some(from)))); - } - Ok(None) => { - drop(response_tx.send(Ok(None))); - } - Err(_) => { - drop(response_tx.send(Err(crate::Error::SetReadPointer))); - continue; - } - } - } - - () = &mut fetch_chain_updates_fut => {} - } - } - } + fn mock_block() -> MultiEraBlock { + let raw_block = hex::decode(include_str!("./../test_data/shelley.block")) + .expect("Failed to decode hex block."); - /// Sends the next chain update to the follower. - /// This can be either read from the Mithril snapshot (if configured) or - /// from the N2N remote client. - async fn fetch_chain_updates( - mut client: PeerClient, mithril_snapshot: Option<&MithrilSnapshot>, - chain_update_tx: mpsc::Sender>, from: Point, - ) { - let mut current_point = from; - - let set_to_snapshot = mithril_snapshot - .and_then(|snapshot| snapshot.try_read_blocks_from_point(current_point.clone())); - - if let Some(iter) = set_to_snapshot { - let mut last_recv_from_snapshot = false; - - for result in iter { - let mut fallback = false; - - if let Ok(raw_block_data) = result { - let block_data = MultiEraBlockData(raw_block_data); - - match block_data.decode() { - Ok(block) => { - current_point = - Point::Specific(block.slot(), block.hash().to_vec()); - - if chain_update_tx - .send(Ok(ChainUpdate::Block(block_data))) - .await - .is_err() - { - return; - } - - last_recv_from_snapshot = true; - }, - Err(_) => { - fallback = true; - }, - } - } else { - fallback = true; - } - - // If we, for any reason, we failed to get the block from the - // Mithril snapshot, fallback to the getting it from the client. - if fallback { - let res = set_client_read_pointer( - &mut client, - PointOrTip::Point(current_point.clone()), - ) - .await; - - match res { - Ok(Some(p)) => { - current_point = p; - - if !Self::send_next_chain_update( - &mut client, - chain_update_tx.clone(), - ) - .await - { - return; - } - }, - Ok(None) | Err(_) => { - drop( - chain_update_tx - .send(Err(crate::Error::SetReadPointer)) - .await, - ); - return; - }, - } - } - } + let pallas_block = pallas::ledger::traverse::MultiEraBlock::decode(raw_block.as_slice()) + .expect("cannot decode block"); - if last_recv_from_snapshot { - let res = set_client_read_pointer( - &mut client, - PointOrTip::Point(current_point.clone()), - ) - .await; + let previous_point = Point::new( + pallas_block.slot() - 1, + pallas_block + .header() + .previous_hash() + .expect("cannot get previous hash") + .to_vec(), + ); - if let Err(e) = res { - drop(chain_update_tx.send(Err(e)).await); - return; - } + MultiEraBlock::new(Network::Preprod, raw_block.clone(), &previous_point, 1) + .expect("cannot create block") + } - // Skip the next update from the client since we've already - // read it the Mithril snapshot. - drop(Self::next_from_client(&mut client).await); - } - } + #[tokio::test] + async fn test_chain_follower_new() { + let chain = Network::Mainnet; + let start = Point::new(100u64, vec![]); + let end = Point::fuzzy(999u64); + + let follower = ChainFollower::new(chain, start.clone(), end.clone()).await; + + assert_eq!(follower.chain, chain); + assert_eq!(follower.end, end); + assert_eq!(follower.previous, UNKNOWN_POINT); + assert_eq!(follower.current, start); + assert_eq!(follower.fork, 1); + assert!(follower.mithril_follower.is_none()); + assert!(follower.mithril_tip.is_none()); + } - while Self::send_next_chain_update(&mut client, chain_update_tx.clone()).await {} - } + #[tokio::test] + async fn test_chain_follower_update_current_none() { + let chain = Network::Mainnet; + let start = Point::new(100u64, vec![]); + let end = Point::fuzzy(999u64); - /// Waits for the next update from the node the client is connected to. - /// - /// Is cancelled by closing the `chain_update_tx` receiver end (explicitly or by - /// dropping it). - async fn next_from_client(client: &mut PeerClient) -> crate::Result> { - tracing::trace!("Requesting next chain update"); - let res = { - match client.chainsync().state() { - chainsync::State::CanAwait => client.chainsync().recv_while_can_await().await, - chainsync::State::MustReply => client.chainsync().recv_while_must_reply().await, - _ => client.chainsync().request_next().await, - } - .map_err(Error::Chainsync)? - }; + let mut follower = ChainFollower::new(chain, start.clone(), end.clone()).await; - tracing::trace!("Received block data from client"); - - match res { - chainsync::NextResponse::RollForward(header, _tip) => { - let decoded_header = MultiEraHeader::decode( - header.variant, - header.byron_prefix.map(|p| p.0), - &header.cbor, - ) - .map_err(Error::Codec)?; - - let point = - Point::Specific(decoded_header.slot(), decoded_header.hash().to_vec()); - tracing::trace!(point = ?point, "Fetching roll forward block data"); - let block_data = client - .blockfetch() - .fetch_single(point) - .await - .map_err(Error::Blockfetch)?; - - Ok(Some(ChainUpdate::Block(MultiEraBlockData(block_data)))) - }, - chainsync::NextResponse::RollBackward(point, _tip) => { - tracing::trace!(point = ?point, "Fetching roll backward block data"); - let block_data = client - .blockfetch() - .fetch_single(point) - .await - .map_err(Error::Blockfetch)?; - - Ok(Some(ChainUpdate::Rollback(MultiEraBlockData(block_data)))) - }, - chainsync::NextResponse::Await => Ok(None), - } - } + let result = follower.update_current(&None); - /// Sends the next chain update through the follower's chain update channel. - async fn send_next_chain_update( - client: &mut PeerClient, chain_update_tx: mpsc::Sender>, - ) -> bool { - loop { - let res = Self::next_from_client(client).await; - - match res { - Err(err) => { - if chain_update_tx.send(Err(err)).await.is_err() { - return false; - } - }, - Ok(next_response) => { - if let Some(chain_update) = next_response { - if chain_update_tx.send(Ok(chain_update)).await.is_err() { - return false; - } - - return true; - } - }, - } - } - } + assert!(!result); } -} -/// Sets the N2N remote client's read pointer. -async fn set_client_read_pointer(client: &mut PeerClient, at: PointOrTip) -> Result> { - match at { - PointOrTip::Point(Point::Origin) => { - client - .chainsync() - .intersect_origin() - .await - .map(Some) - .map_err(Error::Chainsync) - }, - PointOrTip::Point(p @ Point::Specific(..)) => { - client - .chainsync() - .find_intersect(vec![p]) - .await - .map(|(point, _)| point) - .map_err(Error::Chainsync) - }, - PointOrTip::Tip => { - client - .chainsync() - .intersect_tip() - .await - .map(Some) - .map_err(Error::Chainsync) - }, - } -} + #[tokio::test] + async fn test_chain_follower_update_current() { + let chain = Network::Mainnet; + let start = Point::new(100u64, vec![]); + let end = Point::fuzzy(999u64); -/// Finds the tip point. -/// -/// NOTE: This changes the client's read pointer position. -#[inline] -async fn resolve_tip(client: &mut PeerClient) -> Result { - client - .chainsync() - .intersect_tip() - .await - .map_err(Error::Chainsync) -} + let mut follower = ChainFollower::new(chain, start.clone(), end.clone()).await; -/// Reads a block from the network using the N2N client. -async fn read_block_from_network( - blockfetch_client: &mut PeerClient, point: Point, -) -> Result { - // Used in tracing - let slot = point.slot_or_default(); - - let block_data = blockfetch_client - .blockfetch() - .fetch_single(point) - .await - .map_err(Error::Blockfetch)?; - - tracing::trace!(slot, "Block read from n2n"); - Ok(MultiEraBlockData(block_data)) -} + let block_data = mock_block(); + let update = ChainUpdate::new(chain_update::Kind::Block, false, block_data); + + let result = follower.update_current(&Some(update.clone())); -/// Reads a range of blocks from the network using the N2N client. -async fn read_block_range_from_network( - blockfetch_client: &mut PeerClient, from: Point, to: Point, -) -> Result> { - // Used in tracing - let from_slot = from.slot_or_default(); - let to_slot = to.slot_or_default(); - - let data_vec = blockfetch_client - .blockfetch() - .fetch_range((from, to)) - .await - .map_err(Error::Blockfetch)? - .into_iter() - .map(MultiEraBlockData) - .collect(); - - tracing::trace!(from_slot, to_slot, "Block range read from n2n"); - - Ok(data_vec) + assert!(result); + assert_eq!(follower.current, update.block_data().point()); + } } diff --git a/hermes/crates/cardano-chain-follower/src/lib.rs b/hermes/crates/cardano-chain-follower/src/lib.rs index 215d6bf74..692ac6e34 100644 --- a/hermes/crates/cardano-chain-follower/src/lib.rs +++ b/hermes/crates/cardano-chain-follower/src/lib.rs @@ -1,175 +1,35 @@ //! Cardano chain follower. +mod chain_sync; +mod chain_sync_config; +mod chain_sync_live_chains; +mod chain_sync_ready; +mod chain_update; +mod error; mod follow; +pub mod metadata; +mod mithril_query; mod mithril_snapshot; - -use std::str::FromStr; - -pub use follow::*; -pub use pallas::network::miniprotocols::Point; -use pallas::{ - ledger::traverse::{wellknown::GenesisValues, MultiEraBlock}, - network::miniprotocols::{ - chainsync, MAINNET_MAGIC, PREVIEW_MAGIC, PRE_PRODUCTION_MAGIC, TESTNET_MAGIC, - }, -}; -use thiserror::Error; - -/// Crate error type. -#[derive(Debug, Error)] -pub enum Error { - /// Data encoding/decoding error. - #[error("Codec error: {0:?}")] - Codec(pallas::ledger::traverse::Error), - /// Client connection error. - #[error("Client error: {0:?}")] - Client(pallas::network::facades::Error), - /// Blockfetch protocol error. - #[error("Blockfetch error: {0:?}")] - Blockfetch(pallas::network::miniprotocols::blockfetch::ClientError), - /// Chainsync protocol error. - #[error("Chainsync error: {0:?}")] - Chainsync(chainsync::ClientError), - /// Follower failed to set its read pointer. - #[error("Failed to set follower read pointer")] - SetReadPointer, - /// Follower background follow task has stopped. - #[error("Follower follow task is not running")] - FollowTaskNotRunning, - /// Mithril snapshot error. - #[error("Failed to read block(s) from Mithril snapshot")] - MithrilSnapshot, - /// Failed to parse - #[error("Failed to parse network")] - ParseNetwork, - /// Internal Error - #[error("Internal error")] - InternalError, -} - -/// Crate result type. -pub type Result = std::result::Result; - -/// A point in the chain or the tip. -#[derive(Clone, PartialEq, Eq, Hash)] -pub enum PointOrTip { - /// Represents a specific point of the chain. - Point(Point), - /// Represents the tip of the chain. - Tip, -} - -impl From for PointOrTip { - fn from(point: Point) -> Self { - Self::Point(point) - } -} - -/// CBOR encoded data of a multi-era block. -#[derive(Clone, PartialEq, Eq, Hash)] -pub struct MultiEraBlockData(Vec); - -impl MultiEraBlockData { - /// Decodes the data into a multi-era block. - /// - /// # Errors - /// - /// Returns Err if the block's era couldn't be decided or if the encoded data is - /// invalid. - pub fn decode(&self) -> Result { - let block = MultiEraBlock::decode(&self.0).map_err(Error::Codec)?; - - Ok(block) - } - - /// Consumes the [`MultiEraBlockData`] returning the block data raw bytes. - #[must_use] - pub fn into_raw_data(self) -> Vec { - self.0 - } -} - -impl AsRef<[u8]> for MultiEraBlockData { - fn as_ref(&self) -> &[u8] { - &self.0 - } -} - -/// Enum of possible Cardano networks. -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub enum Network { - /// Cardano mainnet network. - Mainnet, - /// Cardano pre-production network. - Preprod, - /// Cardano preview network. - Preview, - /// Cardano testnet network. - Testnet, -} - -/// The human readable name of the Cardano mainnet network. -const MAINNET_NAME: &str = "mainnet"; -/// The human readable name of the Cardano pre-production network. -const PREPROD_NAME: &str = "preprod"; -/// The human readable name of the Cardano preview network. -const PREVIEW_NAME: &str = "preview"; -/// The human readable name of a Cardano local testnet network. -const TESTNET_NAME: &str = "testnet"; - -impl FromStr for Network { - type Err = Error; - - fn from_str(input: &str) -> std::result::Result { - match input { - MAINNET_NAME => Ok(Network::Mainnet), - PREPROD_NAME => Ok(Network::Preprod), - PREVIEW_NAME => Ok(Network::Preview), - TESTNET_NAME => Ok(Network::Testnet), - _ => Err(Error::ParseNetwork), - } - } -} - -impl std::fmt::Display for Network { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - Network::Mainnet => write!(f, "{MAINNET_NAME}"), - Network::Preprod => write!(f, "{PREPROD_NAME}"), - Network::Preview => write!(f, "{PREVIEW_NAME}"), - Network::Testnet => write!(f, "{TESTNET_NAME}"), - } - } -} - -impl From for u64 { - fn from(network: Network) -> Self { - match network { - Network::Mainnet => MAINNET_MAGIC, - Network::Preprod => PRE_PRODUCTION_MAGIC, - Network::Preview => PREVIEW_MAGIC, - Network::Testnet => TESTNET_MAGIC, - } - } -} - -/// Return genesis values for given network -#[must_use] -pub fn network_genesis_values(network: &Network) -> Option { - match network { - Network::Mainnet => GenesisValues::from_magic(MAINNET_MAGIC), - Network::Preprod => GenesisValues::from_magic(PRE_PRODUCTION_MAGIC), - Network::Preview => GenesisValues::from_magic(PREVIEW_MAGIC), - Network::Testnet => GenesisValues::from_magic(TESTNET_MAGIC), - } -} - -/// Validate a multi-era block. -/// -/// This does not execute Plutus scripts nor validates ledger state. -/// It only checks that the block is correctly formatted for its era. -#[allow(dead_code)] -fn validate_multiera_block(_block: &MultiEraBlock) { - // (fsgr): Not sure about hwo the validation will be done in here yet. - todo!() -} +mod mithril_snapshot_config; +mod mithril_snapshot_data; +mod mithril_snapshot_iterator; +mod mithril_snapshot_sync; +mod mithril_turbo_downloader; +mod multi_era_block_data; +mod network; +mod point; +mod snapshot_id; +mod stats; +pub mod turbo_downloader; +mod utils; +mod witness; + +pub use chain_sync_config::ChainSyncConfig; +pub use chain_update::{ChainUpdate, Kind}; +pub use error::Result; +pub use follow::ChainFollower; +pub use metadata as Metadata; +pub use multi_era_block_data::MultiEraBlock; +pub use network::Network; +pub use point::{Point, ORIGIN_POINT, TIP_POINT}; +pub use stats::Statistics; diff --git a/hermes/crates/cardano-chain-follower/src/metadata/cip36.rs b/hermes/crates/cardano-chain-follower/src/metadata/cip36.rs new file mode 100644 index 000000000..7d831967c --- /dev/null +++ b/hermes/crates/cardano-chain-follower/src/metadata/cip36.rs @@ -0,0 +1,1102 @@ +//! Decoder and Validator for CIP36 Metadata + +use std::sync::Arc; + +use ed25519_dalek::Verifier; +use minicbor::Decoder; +use pallas::ledger::traverse::MultiEraTx; +use tracing::debug; + +use super::{ + DecodedMetadata, DecodedMetadataItem, DecodedMetadataValues, RawAuxData, ValidationReport, +}; +use crate::Network; + +/// CIP36 Metadata Label +pub const LABEL: u64 = 61284; +/// CIP36 Metadata Signature label +pub const SIG_LABEL: u64 = 61285; + +/// Project Catalyst Purpose +pub const PROJECT_CATALYST_PURPOSE: u64 = 0; + +/// Signdata Preamble = `{ 61284: ?? }` +/// CBOR Decoded = +/// A1 # map(1) +/// 19 EF64 # unsigned(61284) +pub const SIGNDATA_PREAMBLE: [u8; 4] = [0xA1, 0x19, 0xEF, 0x64]; + +/// Ed25519 Public Key +type Ed25519PubKey = ed25519_dalek::VerifyingKey; + +/// Voting Public Key - Also known as Delegation in the CIP36 Specification +#[derive(Clone, Debug)] +pub struct VotingPubKey { + /// Ed25519 Public Key + pub voting_pk: Ed25519PubKey, + /// Weight of the Voting Public Key + pub weight: u32, +} + +/// CIP 36 Registration Data. +#[derive(Clone, Debug, Default)] +pub struct Cip36 { + /// Is this CIP36 or CIP15 format. + #[allow(clippy::struct_field_names)] + pub cip36: Option, + /// Voting Keys (Called Delegations in the CIP-36 Spec) + /// If No Voting Keys could be decoded, this will be an empty array. + pub voting_keys: Vec, + /// Stake Address to associate with the Voting Keys + pub stake_pk: Option, + /// Payment Address to associate with the Voting Keys + /// No Payment key decoded will be an empty vec. + pub payment_addr: Vec, + /// Is the address able to be paid to? (Can't be a script or Stake address) + pub payable: bool, + /// Raw Nonce (Nonce that has not had slot correction applied) + pub raw_nonce: u64, + /// Nonce (Nonce that has been slot corrected) + pub nonce: u64, + /// Registration Purpose (Always 0 for Catalyst) + pub purpose: u64, + /// Signature Validates + pub signed: bool, + /// Strict Catalyst Validated + pub strict_catalyst: bool, +} + +impl Cip36 { + /// Decode and validate CIP36/15 Metadata + /// + /// CIP15 is a subset of CIP36. + /// + /// See: + /// * + /// * + /// + /// # Parameters + /// * `decoded_metadata` - Decoded Metadata - Will be updated only if CIP36 Metadata + /// is found. + /// * `slot` - Current Slot + /// * `txn` - Transaction Aux data was attached to and to be validated/decoded + /// against. Not used for CIP36 Metadata. + /// * `raw_aux_data` - Raw Auxiliary Data for the transaction. + /// * `catalyst_strict` - Strict Catalyst Validation - otherwise Catalyst Specific + /// rules/workarounds are not applied. + /// + /// # Returns + /// + /// Nothing. IF CIP36 Metadata is found it will be updated in `decoded_metadata`. + #[allow(clippy::too_many_lines)] + pub(crate) fn decode_and_validate( + decoded_metadata: &DecodedMetadata, slot: u64, txn: &MultiEraTx, raw_aux_data: &RawAuxData, + catalyst_strict: bool, chain: Network, + ) { + let k61284 = raw_aux_data.get_metadata(LABEL); + let k61285 = raw_aux_data.get_metadata(SIG_LABEL); + + let mut cip36 = Cip36 { + strict_catalyst: catalyst_strict, + ..Default::default() + }; + + // If there is NO Cip36/Cip15 Metadata then nothing to decode or validate, so quickly + // exit. + if k61284.is_none() && k61285.is_none() { + return; + } + + // if let Some(reg) = k61284.as_ref() { + // debug!("CIP36 Metadata Detected: {slot}, {reg:02x?}"); + //} + // if let Some(sig) = k61285.as_ref() { + // debug!("CIP36 Signature Detected: {slot}, {sig:02x?}"); + //} + + // Any Decode/Validation errors go here. + let mut validation_report = ValidationReport::new(); + + // Check if we actually have metadata to decode for the CIP36 Registration. + let Some(raw_cip36) = k61284 else { + cip36.decoding_failed( + "No CIP36 Metadata found, but CIP36 Signature Metadata found.", + &mut validation_report, + decoded_metadata, + ); + debug!("decoded 1: {decoded_metadata:?}"); + return; + }; + + let cip36_slice = raw_cip36.as_slice(); + + let mut decoder = Decoder::new(cip36_slice); + + // It should be a definite map, get the number of entries in the map. + let Some(cip36_map_entries) = + cip36.decode_map_entries(&mut decoder, &mut validation_report, decoded_metadata) + else { + debug!("decoded 2: {decoded_metadata:?}"); + return; + }; + + let mut found_keys: Vec = Vec::new(); + + for _entry in 0..cip36_map_entries { + let Some(key) = + cip36.decode_map_key(&mut decoder, &mut validation_report, decoded_metadata) + else { + debug!("decoded 3: {decoded_metadata:?} : {raw_cip36:02x?}"); + return; + }; + + if found_keys.contains(&key) { + validation_report.push(format!("Duplicate key found in CIP36 Metadata: {key}")); + } else { + found_keys.push(key); + match key { + 1 => { + if cip36 + .decode_voting_key( + &mut decoder, + &mut validation_report, + decoded_metadata, + ) + .is_none() + { + // debug!("decoded 4: {decoded_metadata:?} : {validation_report:?} : + // {raw_cip36:02x?}"); + return; + } + }, + 2 => { + if cip36 + .decode_stake_pub( + &mut decoder, + &mut validation_report, + decoded_metadata, + ) + .is_none() + { + // debug!("decoded 5: {decoded_metadata:?} : {validation_report:?} : + // {raw_cip36:02x?}"); + return; + } + }, + 3 => { + if cip36 + .decode_payment_address( + &mut decoder, + &mut validation_report, + decoded_metadata, + txn, + chain, + ) + .is_none() + { + debug!("decoded 6: {decoded_metadata:?} : {validation_report:?} : {raw_cip36:02x?}"); + return; + } + }, + 4 => { + if cip36 + .decode_nonce( + &mut decoder, + &mut validation_report, + decoded_metadata, + slot, + ) + .is_none() + { + debug!("decoded 7: {decoded_metadata:?} : {validation_report:?} : {raw_cip36:02x?}"); + return; + } + }, + 5 => { + if cip36 + .decode_purpose(&mut decoder, &mut validation_report, decoded_metadata) + .is_none() + { + debug!("decoded 8: {decoded_metadata:?} : {validation_report:?} : {raw_cip36:02x?}"); + return; + } + }, + _ => { + validation_report + .push(format!("Invalid key found in CIP36 Metadata: {key}")); + }, + } + } + } + + // Validate that all keys required to be present in the CIP36 Metadata are present. + if !found_keys.contains(&1) { + validation_report.push( + "The CIP36 Metadata Voting Key/Delegation is missing from the data.".to_string(), + ); + } + if !found_keys.contains(&2) { + validation_report + .push("The CIP36 Metadata Stake Address is missing from the data.".to_string()); + } + if !found_keys.contains(&3) { + validation_report + .push("The CIP36 Metadata Payment Address is missing from the data.".to_string()); + } + if !found_keys.contains(&4) { + validation_report + .push("The CIP36 Metadata Nonce is missing from the data.".to_string()); + } + + if !decoded_metadata.0.is_empty() { + debug!("decoded 9: {decoded_metadata:?}"); + } + // If we get this far, decode the signature, and verify it. + cip36.validate_signature(&raw_cip36, k61285, &mut validation_report, decoded_metadata); + } + + /// Decoding of the CIP36 metadata failed, and can not continue. + fn decoding_failed( + &self, reason: &str, validation_report: &mut ValidationReport, + decoded_metadata: &DecodedMetadata, + ) { + validation_report.push(reason.into()); + decoded_metadata.0.insert( + LABEL, + Arc::new(DecodedMetadataItem { + value: DecodedMetadataValues::Cip36(Arc::new(self.clone()).clone()), + report: validation_report.clone(), + }), + ); + } + + /// Decode number of entries in the CIP36 metadata map. + fn decode_map_entries( + &self, decoder: &mut Decoder, validation_report: &mut ValidationReport, + decoded_metadata: &DecodedMetadata, + ) -> Option { + let cip36_map_entries = match decoder.map() { + Ok(None) => { + self.decoding_failed( + "CIP36 Metadata was Indefinite Map, Invalid Encoding.", + validation_report, + decoded_metadata, + ); + return None; + }, + Ok(Some(entries)) => entries, + Err(error) => { + self.decoding_failed( + format!("CIP36 Metadata was error decoding Map: {error}").as_str(), + validation_report, + decoded_metadata, + ); + return None; + }, + }; + + Some(cip36_map_entries) + } + + /// Decode the Key of an entry in the CIP36 Metadata map. + fn decode_map_key( + &self, decoder: &mut Decoder, validation_report: &mut ValidationReport, + decoded_metadata: &DecodedMetadata, + ) -> Option { + let key = match decoder.u64() { + Ok(key) => key, + Err(err) => { + self.decoding_failed( + format!("CIP36 Metadata was error decoding Map Entry Key: {err}").as_str(), + validation_report, + decoded_metadata, + ); + return None; + }, + }; + + Some(key) + } + + /// Decode the Registration Purpose in the CIP36 Metadata map. + fn decode_purpose( + &mut self, decoder: &mut Decoder, validation_report: &mut ValidationReport, + decoded_metadata: &DecodedMetadata, + ) -> Option { + let purpose = match decoder.u64() { + Ok(key) => key, + Err(err) => { + self.decoding_failed( + format!("Error decoding Purpose Value: {err}").as_str(), + validation_report, + decoded_metadata, + ); + return None; + }, + }; + + if self.strict_catalyst && purpose != PROJECT_CATALYST_PURPOSE { + validation_report.push(format!("Registration contains unknown purpose: {purpose}")); + } + + self.purpose = purpose; + + Some(purpose) + } + + /// Decode the Registration Nonce in the CIP36 Metadata map. + fn decode_nonce( + &mut self, decoder: &mut Decoder, validation_report: &mut ValidationReport, + decoded_metadata: &DecodedMetadata, slot: u64, + ) -> Option { + let raw_nonce = match decoder.u64() { + Ok(key) => key, + Err(err) => { + self.decoding_failed( + format!("Error decoding Purpose Value: {err}").as_str(), + validation_report, + decoded_metadata, + ); + return None; + }, + }; + + let nonce = if self.strict_catalyst && raw_nonce > slot { + slot + } else { + raw_nonce + }; + + self.raw_nonce = raw_nonce; + self.nonce = nonce; + + Some(nonce) + } + + /// Decode the Payment Address Metadata in the CIP36 Metadata map. + fn decode_payment_address( + &mut self, decoder: &mut Decoder, validation_report: &mut ValidationReport, + decoded_metadata: &DecodedMetadata, _txn: &MultiEraTx, chain: Network, + ) -> Option { + let raw_address = match decoder.bytes() { + Ok(address) => address, + Err(err) => { + self.decoding_failed( + format!("Error decoding Payment Address: {err}").as_str(), + validation_report, + decoded_metadata, + ); + return None; + }, + }; + + let Some(header_byte) = raw_address.first() else { + self.decoding_failed( + "Error decoding Payment Address: Empty", + validation_report, + decoded_metadata, + ); + return None; + }; + + // See: https://cips.cardano.org/cip/CIP-19 for details on address decoding. + let network_tag = header_byte & 0x0F; + let header_type = header_byte >> 4; + match header_type { + 0..=3 => { + if raw_address.len() != 57 { + validation_report.push(format!("Address Length {} != 57", raw_address.len())); + } + }, + 4 | 5 => { + if raw_address.len() < 29 { + validation_report + .push(format!("Pointer Address Length {} < 29", raw_address.len())); + } + }, + 6 | 7 | 14 | 15 => { + if raw_address.len() != 29 { + validation_report.push(format!( + "Pointer Address Length {} != 29", + raw_address.len() + )); + } + }, + _ => { + validation_report.push(format!( + "Address Type {header_type} is invalid and unsupported" + )); + }, + } + + // Check address is for the correct network of the transaction. + if header_type == 8 { + validation_report.push("Byron Addresses are unsupported".to_string()); + } else { + let valid = match chain { + Network::Mainnet => network_tag == 1, + Network::Preprod | Network::Preview => network_tag == 0, + }; + if !valid { + validation_report.push(format!( + "Network Tag {network_tag} does not match transactions Network ID" + )); + } + } + + // Addresses are only payable if they are a normal payment address and not a script + // address. + self.payable = header_type <= 7 && (header_type & 0x1 == 0); + self.payment_addr = raw_address.to_vec(); + + Some(self.payment_addr.len()) + } + + /// Decode the Payment Address Metadata in the CIP36 Metadata map. + fn decode_ed25519_pub_key( + &mut self, decoder: &mut Decoder, validation_report: &mut ValidationReport, + decoded_metadata: &DecodedMetadata, key_type: &str, + ) -> Option { + let pub_key = match decoder.bytes() { + Ok(pub_key) => pub_key, + Err(err) => { + self.decoding_failed( + format!("Error decoding {key_type}: {err}").as_str(), + validation_report, + decoded_metadata, + ); + return None; + }, + }; + + if pub_key.len() == ed25519_dalek::PUBLIC_KEY_LENGTH { + // Safe to use `unwrap()` here because the length is fixed and we know it's 32 bytes + // long. + #[allow(clippy::unwrap_used)] + let pub_key: [u8; ed25519_dalek::PUBLIC_KEY_LENGTH] = pub_key.try_into().unwrap(); + match ed25519_dalek::VerifyingKey::from_bytes(&pub_key) { + Ok(pk) => return Some(pk), + Err(error) => { + validation_report.push(format!("{key_type} not valid Ed25519: {error}")); + }, + } + } else { + validation_report.push(format!( + "{key_type} Length {} != {}", + pub_key.len(), + ed25519_dalek::PUBLIC_KEY_LENGTH + )); + } + + None + } + + /// Decode the Staking Public Key in the CIP36 Metadata map. + fn decode_stake_pub( + &mut self, decoder: &mut Decoder, validation_report: &mut ValidationReport, + decoded_metadata: &DecodedMetadata, + ) -> Option { + let pk = self.decode_ed25519_pub_key( + decoder, + validation_report, + decoded_metadata, + "Stake Public Key", + )?; + self.stake_pk = Some(pk); + + Some(self.stake_pk.as_slice().len()) + } + + /// Decode an individual delegation entry from the CIP36 Metadata map. + fn decode_delegation( + &mut self, decoder: &mut Decoder, validation_report: &mut ValidationReport, + decoded_metadata: &DecodedMetadata, + ) -> Option { + match decoder.array() { + Ok(Some(2)) => { + let vk = self.decode_ed25519_pub_key( + decoder, + validation_report, + decoded_metadata, + "Delegation Public Key", + )?; + let weight = match decoder.u32() { + Ok(weight) => weight, + Err(err) => { + self.decoding_failed( + format!("Error Decoding CIP36 Delegations Entry Weight: {err}.") + .as_str(), + validation_report, + decoded_metadata, + ); + return None; + }, + }; + + self.voting_keys.push(VotingPubKey { + voting_pk: vk, + weight, + }); + }, + Ok(Some(entries)) => { + self.decoding_failed( + format!("Error Decoding CIP36 Delegations Entry Array: Must have exactly 2 elements, had {entries}.").as_str(), + validation_report, + decoded_metadata, + ); + return None; + }, + Ok(None) => { + self.decoding_failed( + "Error Decoding CIP36 Delegations Entry Array: Indefinite Array is invalid encoding.", + validation_report, + decoded_metadata, + ); + return None; + }, + Err(err) => { + self.decoding_failed( + format!("Error Decoding CIP36 Delegations Entry Array: {err}").as_str(), + validation_report, + decoded_metadata, + ); + return None; + }, + } + + Some(self.voting_keys.len()) + } + + /// Decode the Voting Key(s) in the CIP36 Metadata map. + fn decode_voting_key( + &mut self, decoder: &mut Decoder, validation_report: &mut ValidationReport, + decoded_metadata: &DecodedMetadata, + ) -> Option { + match decoder.datatype() { + Ok(key_type) => { + match key_type { + minicbor::data::Type::Bytes => { + // CIP 15 type registration (single voting key). + self.cip36 = Some(false); + let vk = self.decode_ed25519_pub_key( + decoder, + validation_report, + decoded_metadata, + "Voting Public Key", + )?; + self.voting_keys.push(VotingPubKey { + voting_pk: vk, + weight: 1, + }); + }, + minicbor::data::Type::Array => { + // CIP 36 type registration (multiple voting keys). + self.cip36 = Some(true); + match decoder.array() { + Ok(Some(entries)) => { + for _entry in 0..entries { + self.decode_delegation( + decoder, + validation_report, + decoded_metadata, + )?; + } + }, + Ok(None) => { + self.decoding_failed( + "Error Decoding CIP36 Delegations Array: Indefinite Array is invalid encoding.", + validation_report, + decoded_metadata, + ); + }, + Err(err) => { + self.decoding_failed( + format!("Error Decoding CIP36 Delegations Array: {err}") + .as_str(), + validation_report, + decoded_metadata, + ); + return None; + }, + } + }, + _ => { + self.decoding_failed( + format!( + "Error inspecting Voting Key type: Unexpected CBOR Type {key_type}" + ) + .as_str(), + validation_report, + decoded_metadata, + ); + }, + } + }, + Err(error) => { + self.decoding_failed( + format!("Error inspecting Voting Key type: {error}").as_str(), + validation_report, + decoded_metadata, + ); + return None; + }, + } + + if self.strict_catalyst && self.voting_keys.len() != 1 { + validation_report.push(format!( + "Catalyst Supports only a single Voting Key per registration. Found {}", + self.voting_keys.len() + )); + } + + Some(self.voting_keys.len()) + } + + /// Decode a signature from the Signature metadata in 61285 + /// Also checks that the signature is valid against the public key. + #[allow(clippy::too_many_lines)] + fn validate_signature( + &mut self, metadata: &Arc>, sig_metadata: Option>>, + validation_report: &mut ValidationReport, decoded_metadata: &DecodedMetadata, + ) -> Option { + // Check if we actually have metadata to decode for the CIP36 Registration. + let Some(raw_cip36) = sig_metadata else { + self.decoding_failed( + "No CIP36 Signature found, but CIP36 Metadata found.", + validation_report, + decoded_metadata, + ); + return None; + }; + + let cip36_slice = raw_cip36.as_slice(); + + let mut decoder = Decoder::new(cip36_slice); + + match decoder.map() { + Ok(Some(1)) => (), // Ok + Ok(Some(x)) => { + self.decoding_failed( + format!("CIP36 Signature Map decoding failed: Has {x} entries, should have 1.") + .as_str(), + validation_report, + decoded_metadata, + ); + return None; + }, + Ok(None) => { + self.decoding_failed( + "CIP36 Signature Map is Indefinite. Decoding failed.", + validation_report, + decoded_metadata, + ); + return None; + }, + Err(err) => { + self.decoding_failed( + format!("CIP36 Signature Map decoding failed: {err}").as_str(), + validation_report, + decoded_metadata, + ); + return None; + }, + } + + match decoder.u64() { + Ok(1) => (), // Ok + Ok(x) => { + self.decoding_failed( + format!("CIP36 Signature Map decoding failed: Map entry was {x} MUST BE 1.") + .as_str(), + validation_report, + decoded_metadata, + ); + return None; + }, + Err(err) => { + self.decoding_failed( + format!("CIP36 Signature Map Key decoding failed: {err}").as_str(), + validation_report, + decoded_metadata, + ); + return None; + }, + } + + let sig: ed25519_dalek::Signature = match decoder.bytes() { + Ok(sig) => { + match ed25519_dalek::Signature::from_slice(sig) { + Ok(sig) => sig, + Err(err) => { + self.decoding_failed( + format!("CIP36 Signature Decoding failed: {err}",).as_str(), + validation_report, + decoded_metadata, + ); + return None; + }, + } + }, + Err(error) => { + self.decoding_failed( + format!("CIP36 Signature Decode error: {error}.",).as_str(), + validation_report, + decoded_metadata, + ); + return None; + }, + }; + + // Ok, if we get this far then we have a valid CIP36 Signature. + let Some(pk) = self.stake_pk else { + self.decoding_failed( + "CIP36 Signature Verification Failed, no Staking Public Key.", + validation_report, + decoded_metadata, + ); + return None; + }; + + // Now we have both the Public Key and the signature. So calculate the hash of the + // metadata. + let hash = blake2b_simd::Params::new() + .hash_length(32) + .to_state() + .update(&SIGNDATA_PREAMBLE) + .update(metadata) + .finalize(); + + // debug!( + // "Hash = {:02x?}, pk = {:02x?}, sig = {:02x?}", + // hash.as_bytes(), + // pk.as_ref(), + // sig.to_bytes() + //); + + if let Err(error) = pk.verify(hash.as_bytes(), &sig) { + self.signed = false; + self.decoding_failed( + format!("CIP36 Signature Verification Failed: {error}").as_str(), + validation_report, + decoded_metadata, + ); + return None; + }; + + // If we get this far then we have a valid CIP36 Signature (Doesn't mean there aren't + // other issues). + self.signed = true; + + // Record the fully validated Cip36 metadata + decoded_metadata.0.insert( + LABEL, + Arc::new(DecodedMetadataItem { + value: DecodedMetadataValues::Cip36(Arc::new(self.clone()).clone()), + report: validation_report.clone(), + }), + ); + + Some(true) + } +} + +#[cfg(test)] +mod tests { + use crossbeam_skiplist::SkipMap; + + use super::*; + + fn create_empty_cip36(strict: bool) -> Cip36 { + Cip36 { + cip36: None, + voting_keys: vec![], + stake_pk: None, + payment_addr: vec![], + payable: false, + raw_nonce: 0, + nonce: 0, + purpose: 0, + signed: false, + strict_catalyst: strict, + } + } + + #[test] + fn test_decode_purpose_1() { + let decoded_metadata = DecodedMetadata(SkipMap::new()); + let mut cip36 = create_empty_cip36(true); + let mut decoder = Decoder::new(&[0x00]); + let mut report = ValidationReport::new(); + + let rc = cip36.decode_purpose(&mut decoder, &mut report, &decoded_metadata); + + assert_eq!(report.len(), 0); + assert_eq!(cip36.purpose, 0); + assert_eq!(rc, Some(0)); + } + + #[test] + fn test_decode_purpose_2() { + let decoded_metadata = DecodedMetadata(SkipMap::new()); + let mut cip36 = create_empty_cip36(true); + let mut decoder = Decoder::new(&[0x19, 0x30, 0x39]); + let mut report = ValidationReport::new(); + + let rc = cip36.decode_purpose(&mut decoder, &mut report, &decoded_metadata); + + assert_eq!(report.len(), 1); + assert_eq!(cip36.purpose, 12345); + assert_eq!(rc, Some(12345)); + } + + #[test] + fn test_decode_purpose_3() { + let decoded_metadata = DecodedMetadata(SkipMap::new()); + let mut cip36 = create_empty_cip36(false); + let mut decoder = Decoder::new(&[0x19, 0x30, 0x39]); + let mut report = ValidationReport::new(); + + let rc = cip36.decode_purpose(&mut decoder, &mut report, &decoded_metadata); + + assert_eq!(report.len(), 0); + assert_eq!(cip36.purpose, 12345); + assert_eq!(rc, Some(12345)); + } + + #[test] + fn test_decode_purpose_4() { + let bytes_cases: &[&[u8]] = &[ + &[0x80], // array(0) + &[0xA0], // map(0) + &[0x21], // negative(1) + &[0xF9, 0x3C, 0x00], // primitive(15360) - 1.0 + ]; + + for bytes in bytes_cases { + let decoded_metadata = DecodedMetadata(SkipMap::new()); + let mut cip36 = create_empty_cip36(false); + let mut decoder = Decoder::new(bytes); + let mut report = ValidationReport::new(); + + let rc = cip36.decode_purpose(&mut decoder, &mut report, &decoded_metadata); + + assert_eq!(report.len(), 1); + assert_eq!(cip36.purpose, 0); + assert_eq!(rc, None); + } + } + + #[test] + // valid `nonce`, strict = false, raw_nonce > slot + fn test_decode_nonce_1() { + let decoded_metadata = DecodedMetadata(SkipMap::new()); + let mut cip36 = create_empty_cip36(false); + let mut decoder = Decoder::new(&[0x1B, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF]); + let mut report = ValidationReport::new(); + + let rc = cip36.decode_nonce(&mut decoder, &mut report, &decoded_metadata, 0); + + assert_eq!(report.len(), 0); + assert_eq!(cip36.raw_nonce, u64::MAX); + assert_eq!(cip36.nonce, u64::MAX); + assert_eq!(rc, Some(u64::MAX)); + } + + #[test] + // valid `nonce`, strict = false, raw_nonce < slot + fn test_decode_nonce_2() { + let decoded_metadata = DecodedMetadata(SkipMap::new()); + let mut cip36 = create_empty_cip36(false); + let mut decoder = Decoder::new(&[0x01]); + let mut report = ValidationReport::new(); + + let rc = cip36.decode_nonce(&mut decoder, &mut report, &decoded_metadata, 99); + + assert_eq!(report.len(), 0); + assert_eq!(cip36.raw_nonce, 1); + assert_eq!(cip36.nonce, 1); + assert_eq!(rc, Some(1)); + } + + #[test] + // valid `nonce`, strict = true, raw_nonce > slot + fn test_decode_nonce_3() { + let decoded_metadata = DecodedMetadata(SkipMap::new()); + let mut cip36 = create_empty_cip36(true); + let mut decoder = Decoder::new(&[0x10]); + let mut report = ValidationReport::new(); + + let rc = cip36.decode_nonce(&mut decoder, &mut report, &decoded_metadata, 1); + + assert_eq!(report.len(), 0); + assert_eq!(cip36.raw_nonce, 16); + assert_eq!(cip36.nonce, 1); + assert_eq!(rc, Some(1)); + } + + #[test] + fn test_decode_nonce_4() { + let bytes_cases: &[&[u8]] = &[ + &[0x80], // array(0) + &[0xA0], // map(0) + &[0x21], // negative(1) + &[0xF9, 0x3C, 0x00], // primitive(15360) - 1.0 + ]; + + for bytes in bytes_cases { + let decoded_metadata = DecodedMetadata(SkipMap::new()); + let mut cip36 = create_empty_cip36(false); + let mut decoder = Decoder::new(bytes); + let mut report = ValidationReport::new(); + + let rc = cip36.decode_nonce(&mut decoder, &mut report, &decoded_metadata, 0); + + assert_eq!(report.len(), 1); + assert_eq!(cip36.raw_nonce, 0); + assert_eq!(cip36.nonce, 0); + assert_eq!(rc, None); + } + } + + #[test] + fn test_decode_payment_address_1() { + let hex_data = hex::decode( + // 0x004777561e7d9ec112ec307572faec1aff61ff0cfed68df4cd5c847f1872b617657881e30ad17c46e4010c9cb3ebb2440653a34d32219c83e9 + "5839004777561E7D9EC112EC307572FAEC1AFF61FF0CFED68DF4CD5C847F1872B617657881E30AD17C46E4010C9CB3EBB2440653A34D32219C83E9" + ).expect("cannot decode hex"); + let decoded_metadata = DecodedMetadata(SkipMap::new()); + let mut cip36 = create_empty_cip36(false); + let mut decoder = Decoder::new(&hex_data); + let mut report = ValidationReport::new(); + let multi_era_tx: *const MultiEraTx = std::ptr::null(); + let multi_era_tx = unsafe { &*multi_era_tx }; + + let rc = cip36.decode_payment_address( + &mut decoder, + &mut report, + &decoded_metadata, + multi_era_tx, + Network::Preprod, + ); + + assert_eq!(report.len(), 0); + assert_eq!(cip36.payable, true); + assert_eq!(cip36.payment_addr.len(), 57); + assert_eq!(rc, Some(57)); + } + + #[test] + fn test_decode_stake_pub_1() { + let hex_data = hex::decode( + // 0xe3cd2404c84de65f96918f18d5b445bcb933a7cda18eeded7945dd191e432369 + "5820E3CD2404C84DE65F96918F18D5B445BCB933A7CDA18EEDED7945DD191E432369", + ) + .expect("cannot decode hex"); + let decoded_metadata = DecodedMetadata(SkipMap::new()); + let mut cip36 = create_empty_cip36(false); + let mut decoder = Decoder::new(&hex_data); + let mut report = ValidationReport::new(); + + let rc = cip36.decode_stake_pub(&mut decoder, &mut report, &decoded_metadata); + + assert_eq!(report.len(), 0); + assert!(cip36.stake_pk.is_some()); + assert_eq!(rc, Some(1)); + } + + #[test] + fn test_decode_stake_pub_2() { + let bytes_cases: &[Vec] = &[ + vec![], + hex::decode( + // 0xe3cd2404c84de65f96918f18d5b445bcb933a7cda18eeded7945dd19 (28 bytes) + "581CE3CD2404C84DE65F96918F18D5B445BCB933A7CDA18EEDED7945DD19", + ) + .expect("cannot decode hex"), + ]; + + for bytes in bytes_cases { + let decoded_metadata = DecodedMetadata(SkipMap::new()); + let mut cip36 = create_empty_cip36(false); + let mut decoder = Decoder::new(&bytes); + let mut report = ValidationReport::new(); + + let rc = cip36.decode_stake_pub(&mut decoder, &mut report, &decoded_metadata); + + assert_eq!(report.len(), 1); + assert_eq!(rc, None); + } + } + + #[test] + // cip-36 version + fn test_decode_voting_key_1() { + let hex_data = hex::decode( + // [["0x0036ef3e1f0d3f5989e2d155ea54bdb2a72c4c456ccb959af4c94868f473f5a0", 1]] + "818258200036EF3E1F0D3F5989E2D155EA54BDB2A72C4C456CCB959AF4C94868F473F5A001", + ) + .expect("cannot decode hex"); + let decoded_metadata = DecodedMetadata(SkipMap::new()); + let mut cip36 = create_empty_cip36(false); + let mut decoder = Decoder::new(&hex_data); + let mut report = ValidationReport::new(); + + let rc = cip36.decode_voting_key(&mut decoder, &mut report, &decoded_metadata); + + assert_eq!(report.len(), 0); + assert_eq!(cip36.cip36, Some(true)); + assert_eq!(cip36.voting_keys.len(), 1); + assert_eq!(rc, Some(1)); + } + + #[test] + // cip-15 version + fn test_decode_voting_key_2() { + let hex_data = hex::decode( + // 0x0036ef3e1f0d3f5989e2d155ea54bdb2a72c4c456ccb959af4c94868f473f5a0 + "58200036EF3E1F0D3F5989E2D155EA54BDB2A72C4C456CCB959AF4C94868F473F5A0", + ) + .expect("cannot decode hex"); + let decoded_metadata = DecodedMetadata(SkipMap::new()); + let mut cip36 = create_empty_cip36(false); + let mut decoder = Decoder::new(&hex_data); + let mut report = ValidationReport::new(); + + let rc = cip36.decode_voting_key(&mut decoder, &mut report, &decoded_metadata); + + assert_eq!(report.len(), 0); + assert_eq!(cip36.cip36, Some(false)); + assert_eq!(cip36.voting_keys.len(), 1); + assert_eq!(rc, Some(1)); + } + + #[test] + fn test_decode_voting_key_3() { + let bytes_cases: &[Vec] = &[ + vec![], + hex::decode( + // [[]] (empty) + "8180", + ) + .expect("cannot decode hex"), + hex::decode( + // [["0x0036ef3e1f0d3f5989e2d155ea54bdb2a72c4c456ccb959af4c94868f473f5a0"]] + // (without weight) + "818158200036EF3E1F0D3F5989E2D155EA54BDB2A72C4C456CCB959AF4C94868F473F5A0", + ) + .expect("cannot decode hex"), + ]; + + for bytes in bytes_cases { + let decoded_metadata = DecodedMetadata(SkipMap::new()); + let mut cip36 = create_empty_cip36(false); + let mut decoder = Decoder::new(&bytes); + let mut report = ValidationReport::new(); + + let rc = cip36.decode_voting_key(&mut decoder, &mut report, &decoded_metadata); + + assert_eq!(report.len(), 1); + assert_eq!(rc, None); + } + } +} diff --git a/hermes/crates/cardano-chain-follower/src/metadata/cip509.rs b/hermes/crates/cardano-chain-follower/src/metadata/cip509.rs new file mode 100644 index 000000000..023d973fa --- /dev/null +++ b/hermes/crates/cardano-chain-follower/src/metadata/cip509.rs @@ -0,0 +1,82 @@ +//! Cardano metadata module + +mod x509; + +use minicbor::{decode, Decode, Decoder}; +use strum::EnumDiscriminants; +use x509::X509Metadatum; + +/// Enum of metadata currently supported +#[derive(Debug, PartialEq, EnumDiscriminants)] +#[strum_discriminants(name(MetadataListType))] +enum MetadataList { + /// x509 metadatum + X509Metadatum(X509Metadatum), +} + +impl MetadataListType { + /// Convert associated label unsigned integer to enum. + fn from_u16(value: u16) -> Option { + match value { + 509 => Some(MetadataListType::X509Metadatum), + _ => None, + } + } +} + +/// Struct of metadata +#[derive(Debug, PartialEq)] +struct Metadata { + /// A label of the metadata + label: u16, + /// A possible list of metadata currently supported + tx_metadata: MetadataList, +} + +impl Decode<'_, ()> for Metadata { + fn decode(d: &mut Decoder, _ctx: &mut ()) -> Result { + // Map of label + d.map()?; + let label = d.u16()?; + let metadata = + MetadataListType::from_u16(label).ok_or(decode::Error::message("Invalid label"))?; + match metadata { + MetadataListType::X509Metadatum => { + let x509_metadatum = X509Metadatum::decode(d, &mut ())?; + Ok(Self { + label, + tx_metadata: MetadataList::X509Metadatum(x509_metadatum), + }) + }, + } + } +} + +#[cfg(test)] +mod test_metadata { + use super::*; + + #[test] + fn test_raw_metadata() { + let data = "a11901fda50050ca7a1457ef9f4c7f9c747f8c4a4cfa6c015008b5fcdd1e7487bc8b8db6e6c52f2b9b0258204d3f576f26db29139981a69443c2325daa812cc353a31b5a4db794a5bcbb06c20a8d5840a50a81590238308202343082019da00302010202145afc371daf301793cf0b1835a118c2f90363d5d9300d06092a864886f70d01010b05003045310b300906035840550406130241553113301106035504080c0a536f6d652d53746174653121301f060355040a0c18496e7465726e6574205769646769747320507479204c74643058401e170d3234303731313038353733365a170d3235303731313038353733365a3045310b30090603550406130241553113301106035504080c0a536f6d652d537458406174653121301f060355040a0c18496e7465726e6574205769646769747320507479204c746430819f300d06092a864886f70d010101050003818d00308189025840818100cd28e20b157ca70c85433c1689b1d5890ec479bdd1ffdcc5647ae12be9badf4af20764cd24bd64130831a57506dfbbdd3e924c96b259c6ccedf24d6a255840618f0819643c739f145b733c3c94333e5937b499ada9a4ffc127457c7cb557f2f5623dcadea1e06f09129db9584b0aee949244b3252b52afde5d385c65e563a65840efb07f0203010001a321301f301d0603551d0e0416041492eb169818b833588321957a846077aa239cf3a0300d06092a864886f70d01010b0500038181002e5f584073333ce667e4172b252416eaa1d2e9681f59943724b4f366a8b930443ca6b69b12dd9debee9c8a6307695ee1884da4b00136195d1d8223d1c253ff408edfc8ed584003af1819244c35d3843855fb9af86e84fb7636fa3f4a0fc396f6fb6fd16d3bcebde68a8bd81be61e8ee7d77e9f7f9804e03ebc31b4581313c955a667658b14815840588b004301f50d6b52464320746573742043411a63b0cd001a6955b90047010123456789ab01582102b1216ab96e5b3b3340f5bdf02e693f16213a04525ed444584050b1019c2dfd3838ab010058406fc903015259a38c0800a3d0b2969ca21977e8ed6ec344964d4e1c6b37c8fb541274c3bb81b2f53073c5f101a5ac2a928865835840b6a2679b6e682d2a26945ed0b2181e81d9800558203b6a27bcceb6a42d62a3a8d02a6f0d73653215771de243a63ac048a18b59da2918288250667e69bd56a0fb583ed2d4db363e3bb017a150fdec9a8c902152433c25668aa3883cc9186481a5000001820a000250667e69bd56a0fbd2d4db363e3bb017a103000a64546573741863584091af2d6092350d47cf0b412cec3df7810004c8261271f719f81a33f414dd363b0a865864d9cae3bca22e03ef46e0b1b9740dc09633b6e86ec37b31a0f83d9e0a"; + let hex_data = hex::decode(data).expect("Failed to decode hex data"); + let mut decoder = Decoder::new(&hex_data); + let _metadata = Metadata::decode(&mut decoder, &mut ()).expect("Failed to decode metadata"); + } + + #[test] + fn test_brotli_metadata() { + let data = "a11901fda50050ca7a1457ef9f4c7f9c747f8c4a4cfa6c015008b5fcdd1e7487bc8b8db6e6c52f2b9b0258204d3f576f26db29139981a69443c2325daa812cc353a31b5a4db794a5bcbb06c20b8c58401b3d030866084fcb259de07496d3197e913a39fd628a3db0a4ed6839261a00c51cb0a5b9c16194064132ace375ea23c75c60659400cba304d0d689c00086195d5840ff28714da02c35e7295815ba58b77f227e576fa254c464e2f9c6f9dfa900a0208250033c054a468c38e08819601d073c034a4727a524ff39995477443c1fca235840839c927599b253887f50487c1caf757c0aaf79bc3fcacd42252b8f2ae1f1a8b282929ca22bb5c2885cc23a66005c0cc1ca20142b82310c3a137d44c1943e40995840a7a7ce5c3475b5887a3765ede2ff3b7bfea90f255e2edf37fd44e27f26b8e6cf408aef4b20bebf7257b3dabc7eda65fff4ed278b50219f0a52367ff5b80e46b758403875f55a394d17a5d9a6b1a1deff5b2206e9e9734e9fbefa6a1cdfeb7a104546dfb6e46c46feaeb65a7f4648c276e29e87b27bc053bffef79359300220d0c3875840f2a05cc4880317358e19c758fd9ab9917551ce3987af2e35d73b6958a0f5732784621b0c92f68a93537f16f48445424890f955d7a597c13c2eb54a82b39f0307584097507df5fef916fabb6dafdfb516fb9184783e2cb4e89d048a6c1e5c04818bdb76ffb5cbef1fbe452658d904cd152ee72a3bfc6efe1199fb3b51f1979629cd4e5840fdb7df511723d4cead3d2b2eb9c1f18cbbfcf9f5cc8eac46dc03cd55fcac3303c391437f50400923e65c02e981af5461b6867a47fb25ebe9b0fb4d9e41ec210e58404b9011000206414523c0990f9ee20b5d8a745393d3febaf6413a448b994f1567eb7945df7a0ab44afd55561e0190b376d411026c5d7a4a49a19e0bd3f5addd6c5840492fde46eee8d75b587286291dfeb6a78fdf59c1a6bfa2717b1f41dfa878756140ce7c77504b64b094b870ade78569566eec66369133af5aa8c8eab9f95e29df58409ec10be251547101b24c495c8ff4fa55378dbb4a5c6e89b18a12ac033343d61c3b7f5fba725b51536d92a5cbfaef9be6d24a3e5b3d75a1c0e29e42f523567fac4d0f8200811c822d2210b97f570818635840711ac54a3ab2dad900ce14028b0c05da5a1aa9a50c7ef5d14b378b9e8ca4fde8e026410b0c560fcae6f11e314208d677d692be0953fab2c96aa8f303b8561104"; + let hex_data = hex::decode(data).expect("Failed to decode hex data"); + let mut decoder = Decoder::new(&hex_data); + let _metadata = Metadata::decode(&mut decoder, &mut ()).expect("Failed to decode metadata"); + } + + #[test] + fn test_zstd_metadata() { + let data = "a11901fda50050ca7a1457ef9f4c7f9c747f8c4a4cfa6c015008b5fcdd1e7487bc8b8db6e6c52f2b9b0258204d3f576f26db29139981a69443c2325daa812cc353a31b5a4db794a5bcbb06c20c8c584028b52ffd605002251700942ca50a81590238308202343082019da00302010202145afc371daf301793cf0b1835a118c2f90363d5d9300d06092a864886f70d015840010b05003045310b300906035504061302415531133011080c0a536f6d652d53746174653121301f0a0c18496e7465726e65742057696467697473205074792058404c7464301e170d3234303731313038353733365a170d3235819f01050003818d0030818902818100cd28e20b157ca70c85433c1689b1d5890ec479bdd1ffdcc55840647ae12be9badf4af20764cd24bd64130831a57506dfbbdd3e924c96b259c6ccedf24d6a25618f0819643c739f145b733c3c94333e5937b499ada9a4ffc1274558407c7cb557f2f5623dcadea1e06f09129db9584b0aee949244b3252b52afde5d385c65e563a6efb07f0203010001a321301f301d0603551d0e0416041492eb1698584018b833588321957a846077aa239cf3a00b81002e5f73333ce667e4172b252416eaa1d2e9681f59943724b4f366a8b930443ca6b69b12dd9debee9c8a6307695e5840e1884da4b00136195d1d8223d1c253ff408edfc8ed03af1819244c35d3843855fb9af86e84fb7636fa3f4a0fc396f6fb6fd16d3bcebde68a8bd81be61e8ee7d758407e9f7f9804e03ebc31b4581313c955a667658b1481588b004301f50d6b52464320746573742043411a63b0cd001a6955b90047010123456789ab01582102b12158406ab96e5b3b3340f5bdf02e693f16213a04525ed44450b1019c2dfd3838ab010058406fc903015259a38c0800a3d0b2969ca21977e8ed6ec344964d4e1c6b37c85840fb541274c3bb81b2f53073c5f101a5ac2a92886583b6a2679b6e682d2a26945ed0b2181e81d9800558203b6a27bcceb6a42d62a3a8d02a6f0d73653215771de2584043a63ac048a18b59da2918288250667e69bd56a0fbd2d4db363e3bb017a150fdec9a8c902152433c25668aa3883cc9186481a5000001820a000250667e69bd56582ea0fbd2d4db363e3bb017a103000a64546573740000080084391c0898ad681c1a1ad7a506644166c038791758a7191863584023441d5f1348cd213bb5502838c439ae8fea22b3295c73d34df29cb56887f35cb3e0ba04cc9609ec9f17d2360b1a5f876098dfa5e60ba549d3b6ea2a6026b507"; + let hex_data = hex::decode(data).expect("Failed to decode hex data"); + let mut decoder = Decoder::new(&hex_data); + let _metadata = Metadata::decode(&mut decoder, &mut ()).expect("Failed to decode metadata"); + } +} diff --git a/hermes/crates/cardano-chain-follower/src/metadata/mod.rs b/hermes/crates/cardano-chain-follower/src/metadata/mod.rs new file mode 100644 index 000000000..5c191c07c --- /dev/null +++ b/hermes/crates/cardano-chain-follower/src/metadata/mod.rs @@ -0,0 +1,175 @@ +//! Metadata decoding and validating. + +use std::{fmt::Debug, sync::Arc}; + +use cip36::Cip36; +use dashmap::DashMap; +use pallas::ledger::traverse::{MultiEraBlock, MultiEraTx}; +use raw_aux_data::RawAuxData; +use tracing::error; + +use crate::{utils::usize_from_saturating, Network}; + +pub mod cip36; +mod raw_aux_data; + +/// List of all validation errors (as strings) Metadata is considered Valid if this list +/// is empty. +pub type ValidationReport = Vec; + +/// Possible Decoded Metadata Values. +/// Must match the key they relate too, but the consumer needs to check this. +#[derive(Debug)] +pub enum DecodedMetadataValues { + // Json Metadata // TODO + // Json(serde_json::Value), // TODO + /// CIP-36/CIP-15 Catalyst Registration metadata. + Cip36(Arc), +} + +/// An individual decoded metadata item. +#[derive(Debug)] +pub struct DecodedMetadataItem { + /// The decoded metadata itself. + pub value: DecodedMetadataValues, + /// Validation report for this metadata item. + pub report: ValidationReport, +} + +/// Decoded Metadata for a single transaction. +/// The key is the Primary Label of the Metadata. +/// For example, CIP15/36 uses labels 61284 & 61285, +/// 61284 is the primary label, so decoded metadata +/// will be under that label. +pub(crate) struct DecodedMetadata(DashMap>); + +impl DecodedMetadata { + /// Create new decoded metadata for a transaction. + fn new(chain: Network, slot: u64, txn: &MultiEraTx, raw_aux_data: &RawAuxData) -> Self { + let decoded_metadata = Self(DashMap::new()); + + // Process each known type of metadata here, and record the decoded result. + Cip36::decode_and_validate(&decoded_metadata, slot, txn, raw_aux_data, true, chain); + + // if !decoded_metadata.0.is_empty() { + // debug!("Decoded Metadata final: {decoded_metadata:?}"); + //} + decoded_metadata + } + + /// Get the decoded metadata item at the given slot, or None if it doesn't exist. + pub fn get(&self, primary_label: u64) -> Option> { + let entry = self.0.get(&primary_label)?; + let value = entry.value(); + Some(value.clone()) + } +} + +impl Debug for DecodedMetadata { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str("DecodedMetadata {")?; + for kv in &self.0 { + let k = kv.key(); + let v = kv.value().clone(); + f.write_fmt(format_args!("{k:?}:{v:?} "))?; + } + f.write_str("}") + } +} + +/// Decoded Metadata for a all transactions in a block. +/// The Key for both entries is the Transaction offset in the block. +#[derive(Debug)] +pub struct DecodedTransaction { + /// The Raw Auxiliary Data for each transaction in the block. + raw: DashMap, + /// The Decoded Metadata for each transaction in the block. + decoded: DashMap, +} + +impl DecodedTransaction { + /// Insert another transaction worth of data into the Decoded Aux Data + fn insert( + &mut self, chain: Network, slot: u64, txn_idx: u32, cbor_data: &[u8], + transactions: &[MultiEraTx], + ) { + let txn_idx = usize_from_saturating(txn_idx); + + let Some(txn) = transactions.get(txn_idx) else { + error!("No transaction at index {txn_idx} trying to decode metadata."); + return; + }; + + let txn_raw_aux_data = RawAuxData::new(cbor_data); + let txn_metadata = DecodedMetadata::new(chain, slot, txn, &txn_raw_aux_data); + + self.raw.insert(txn_idx, txn_raw_aux_data); + self.decoded.insert(txn_idx, txn_metadata); + } + + /// Create a new `DecodedTransaction`. + pub(crate) fn new(chain: Network, block: &MultiEraBlock) -> Self { + let mut decoded_aux_data = DecodedTransaction { + raw: DashMap::new(), + decoded: DashMap::new(), + }; + + if block.has_aux_data() { + let transactions = block.txs(); + let slot = block.slot(); + + if let Some(_metadata) = block.as_byron() { + // Nothing to do here. + } else if let Some(alonzo_block) = block.as_alonzo() { + for (txn_idx, metadata) in alonzo_block.auxiliary_data_set.iter() { + decoded_aux_data.insert( + chain, + slot, + *txn_idx, + metadata.raw_cbor(), + &transactions, + ); + } + } else if let Some(babbage_block) = block.as_babbage() { + for (txn_idx, metadata) in babbage_block.auxiliary_data_set.iter() { + decoded_aux_data.insert( + chain, + slot, + *txn_idx, + metadata.raw_cbor(), + &transactions, + ); + } + } else if let Some(conway_block) = block.as_conway() { + for (txn_idx, metadata) in conway_block.auxiliary_data_set.iter() { + decoded_aux_data.insert( + chain, + slot, + *txn_idx, + metadata.raw_cbor(), + &transactions, + ); + } + } else { + error!("Undecodable metadata, unknown Era"); + }; + } + decoded_aux_data + } + + /// Get metadata for a given label in a transaction if it exists. + #[must_use] + pub fn get_metadata(&self, txn_idx: usize, label: u64) -> Option> { + let txn_metadata = self.decoded.get(&txn_idx)?; + let txn_metadata = txn_metadata.value(); + txn_metadata.get(label) + } + + /// Get raw metadata for a given label in a transaction if it exists. + #[must_use] + pub fn get_raw_metadata(&self, txn_idx: usize, label: u64) -> Option>> { + let txn_metadata = self.raw.get(&txn_idx)?; + let txn_metadata = txn_metadata.value(); + txn_metadata.get_metadata(label) + } +} diff --git a/hermes/crates/cardano-chain-follower/src/metadata/raw_aux_data.rs b/hermes/crates/cardano-chain-follower/src/metadata/raw_aux_data.rs new file mode 100644 index 000000000..f8d39ed57 --- /dev/null +++ b/hermes/crates/cardano-chain-follower/src/metadata/raw_aux_data.rs @@ -0,0 +1,269 @@ +//! Raw Auxiliary Data Decoding + +use std::sync::Arc; + +use anyhow::bail; +use dashmap::DashMap; +use minicbor::{data::Type, Decoder}; +use tracing::{error, warn}; + +/// What type of smart contract is this list. +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, strum::Display, Hash)] +pub enum SmartContractType { + /// Native smart contracts + Native, + /// Plutus smart contracts (with version number 1-x) + Plutus(u64), +} + +// We CAN NOT use the Pallas library metadata decoding because it does not preserve raw +// metadata values which are critical for performing operations like signature checks on +// data. So we have a bespoke metadata decoder here. +#[derive(Debug)] +pub(crate) struct RawAuxData { + /// Metadata: key = label, value = raw metadata bytes + metadata: DashMap>>, + /// Scripts: 1 = Native, 2 = Plutus V1, 3 = Plutus V2, 4 = Plutus V3 + scripts: DashMap>>>, +} + +impl RawAuxData { + /// Create a new `RawDecodedMetadata`. + pub(crate) fn new(aux_data: &[u8]) -> Self { + let mut raw_decoded_data = Self { + metadata: DashMap::new(), + scripts: DashMap::new(), + }; + + let mut decoder = Decoder::new(aux_data); + + match decoder.datatype() { + Ok(minicbor::data::Type::Map) => { + if let Err(error) = Self::decode_shelley_map(&mut raw_decoded_data, &mut decoder) { + error!("Failed to Deserialize Shelley Metadata: {error}: {aux_data:02x?}"); + } + }, + Ok(minicbor::data::Type::Array) => { + if let Err(error) = + Self::decode_shelley_ma_array(&mut raw_decoded_data, &mut decoder) + { + error!("Failed to Deserialize Shelley-MA Metadata: {error}: {aux_data:02x?}"); + } + }, + Ok(minicbor::data::Type::Tag) => { + if let Err(error) = + Self::decode_alonzo_plus_map(&mut raw_decoded_data, &mut decoder) + { + error!("Failed to Deserialize Alonzo+ Metadata: {error}: {aux_data:02x?}"); + } + }, + Ok(unexpected) => { + error!("Unexpected datatype for Aux data: {unexpected}: {aux_data:02x?}"); + }, + Err(error) => { + error!("Error decoding metadata: {error}: {aux_data:02x?}"); + }, + } + + raw_decoded_data + } + + /// Decode the Shelley map of metadata. + fn decode_shelley_map( + raw_decoded_data: &mut Self, decoder: &mut minicbor::Decoder, + ) -> anyhow::Result<()> { + let entries = match decoder.map() { + Ok(Some(entries)) => entries, + Ok(None) => { + // Sadly... Indefinite Maps are allowed in Cardano CBOR Encoding. + u64::MAX + }, + Err(error) => { + bail!("Error decoding metadata: {error}"); + }, + }; + + // debug!("Decoding shelley metadata map with {} entries", entries); + + let raw_metadata = decoder.input(); + + for _ in 0..entries { + let key = match decoder.u64() { + Ok(key) => key, + Err(error) => { + bail!("Error decoding metadata key: {error}"); + }, + }; + let value_start = decoder.position(); + if let Err(error) = decoder.skip() { + bail!("Error decoding metadata value: {error}"); + } + let value_end = decoder.position(); + let Some(value_slice) = raw_metadata.get(value_start..value_end) else { + bail!("Invalid metadata value found. Unable to extract raw value slice."); + }; + let value = value_slice.to_vec(); + + // debug!("Decoded metadata key: {key}, value: {value:?}"); + + let _unused = raw_decoded_data.metadata.insert(key, Arc::new(value)); + + // Look for End Sentinel IF its an indefinite MAP (which we know because entries is + // u64::MAX). + if entries == u64::MAX { + match decoder.datatype() { + Ok(Type::Break) => { + // Skip over the break token. + let _unused = decoder.skip(); + break; + }, + Ok(_) => (), // Not break, so do next loop, should be the next key. + Err(error) => { + bail!("Error checking indefinite metadata map end sentinel: {error}"); + }, + } + } + } + + Ok(()) + } + + /// Decode a Shelley-MA Auxiliary Data Array + fn decode_shelley_ma_array( + raw_decoded_data: &mut Self, decoder: &mut minicbor::Decoder, + ) -> anyhow::Result<()> { + match decoder.array() { + Ok(Some(entries)) => { + if entries != 2 { + bail!( + "Invalid number of entries in Metadata Array. Expected 2, found {entries}." + ); + } + }, + Ok(None) => { + bail!("Indefinite Array found decoding Metadata. Invalid."); + }, + Err(error) => { + bail!("Error decoding metadata: {error}"); + }, + }; + + // First entry is the metadata map, so just decode that now. + Self::decode_shelley_map(raw_decoded_data, decoder)?; + // Second entry is an array of native scripts. + Self::decode_script_array(raw_decoded_data, decoder, SmartContractType::Native)?; + + Ok(()) + } + + /// Decode a Shelley-MA Auxiliary Data Array + fn decode_alonzo_plus_map( + raw_decoded_data: &mut Self, decoder: &mut minicbor::Decoder, + ) -> anyhow::Result<()> { + match decoder.tag() { + Ok(tag) => { + if tag.as_u64() != 259 { + bail!("Invalid tag for alonzo+ aux data. Expected 259, found {tag}."); + } + }, + Err(error) => { + bail!("Error decoding tag for alonzo+ aux data: {error}"); + }, + } + + let entries = match decoder.map() { + Ok(Some(entries)) => entries, + Ok(None) => bail!("Indefinite Map found decoding Alonzo+ Metadata. Invalid."), + Err(error) => bail!("Error decoding Alonzo+ Metadata: {error}"), + }; + + // iterate the map + for _ in 0..entries { + let aux_type_key = match decoder.u64() { + Ok(key) => key, + Err(error) => { + bail!("Error decoding Alonzo+ Metadata Aux Data Type Key: {error}"); + }, + }; + + let contract_type = match aux_type_key { + 0 => { + if raw_decoded_data.metadata.is_empty() { + Self::decode_shelley_map(raw_decoded_data, decoder)?; + continue; + } + bail!("Multiple Alonzo+ Metadata entries found. Invalid."); + }, + 1 => SmartContractType::Native, + _ => { + if aux_type_key > 4 { + warn!( + "Auxiliary Type Key > 4 detected, assuming its a plutus script > V3." + ); + } + SmartContractType::Plutus(aux_type_key - 1) + }, + }; + + if raw_decoded_data.scripts.contains_key(&contract_type) { + bail!("Multiple Alonzo+ Scripts of type {contract_type} found. Invalid."); + } + + Self::decode_script_array(raw_decoded_data, decoder, contract_type)?; + } + Ok(()) + } + + /// Decode an array of smart contract scripts + fn decode_script_array( + raw_decoded_data: &mut Self, decoder: &mut minicbor::Decoder, + contract_type: SmartContractType, + ) -> anyhow::Result<()> { + let mut scripts: Vec> = Vec::new(); + + let entries = match decoder.array() { + Ok(Some(entries)) => entries, + Ok(None) => { + bail!("Indefinite Script Array found decoding Metadata. Invalid."); + }, + Err(error) => { + bail!("Error decoding metadata: {error}"); + }, + }; + + let raw_metadata = decoder.input(); + + for _entry in 0..entries { + if contract_type == SmartContractType::Native { + // Native Scripts are actually CBOR arrays, so capture their data as bytes for + // later processing. + let value_start = decoder.position(); + if let Err(error) = decoder.skip() { + bail!("Error decoding native script value: {error}"); + } + let value_end = decoder.position(); + let Some(value_slice) = raw_metadata.get(value_start..value_end) else { + bail!("Invalid metadata value found. Unable to extract native script slice."); + }; + scripts.push(value_slice.to_vec()); + } else { + let script = match decoder.bytes() { + Ok(script) => script, + Err(error) => bail!("Error decoding script data from metadata: {error}"), + }; + scripts.push(script.to_vec()); + } + } + + let _unused = raw_decoded_data + .scripts + .insert(contract_type, Arc::new(scripts)); + + Ok(()) + } + + /// Get Raw metadata for a given metadata label, if it exists. + pub(crate) fn get_metadata(&self, label: u64) -> Option>> { + self.metadata.get(&label).map(|v| v.value().clone()) + } +} diff --git a/hermes/crates/cardano-chain-follower/src/metadata/x509/mod.rs b/hermes/crates/cardano-chain-follower/src/metadata/x509/mod.rs new file mode 100644 index 000000000..cb34eccc6 --- /dev/null +++ b/hermes/crates/cardano-chain-follower/src/metadata/x509/mod.rs @@ -0,0 +1,243 @@ +//! x509 metadata +//! Doc Reference: +//! CDDL Reference: + +mod rbac; + +use std::io::Read; + +use minicbor::{decode, Decode, Decoder}; +use rbac::X509RbacMetadata; +use strum::FromRepr; + +/// Enum of compression algorithms used to compress chunks. +#[derive(FromRepr, Debug, PartialEq)] +#[repr(u8)] +pub enum CompressionAlgorithm { + /// Raw data, no compression. + Raw = 10, + /// Brotli compression. + Brotli = 11, + /// Zstd compression. + Zstd = 12, +} + +/// Struct of x509 chunks. +#[derive(Debug, PartialEq)] +struct X509Chunks { + /// The compression algorithm used to compress the data. + chunk_type: CompressionAlgorithm, + /// The decompressed data. + chunk_data: X509RbacMetadata, +} + +impl X509Chunks { + /// Create new instance of `X509Chunks`. + fn new(chunk_type: CompressionAlgorithm, chunk_data: X509RbacMetadata) -> Self { + Self { + chunk_type, + chunk_data, + } + } +} + +impl Decode<'_, ()> for X509Chunks { + fn decode(d: &mut Decoder, _ctx: &mut ()) -> Result { + // Determine the algorithm + let algorithm = CompressionAlgorithm::from_repr(d.u8()?) + .ok_or(decode::Error::message("Invalid chunk data type"))?; + + // Decompress the data + let decompressed = decompress(d, &algorithm) + .map_err(|e| decode::Error::message(format!("Failed to decompress {e}")))?; + + // Decode the decompressed data. + let mut decoder = Decoder::new(&decompressed); + let chunk_data = X509RbacMetadata::decode(&mut decoder, &mut ()) + .map_err(|e| decode::Error::message(format!("Failed to decode {e}")))?; + + Ok(X509Chunks { + chunk_type: algorithm, + chunk_data, + }) + } +} + +/// Decompress the data using the given algorithm. +fn decompress(d: &mut Decoder, algorithm: &CompressionAlgorithm) -> anyhow::Result> { + let chunk_len = d + .array() + .map_err(|e| anyhow::anyhow!(e.to_string()))? + .ok_or(anyhow::anyhow!("Error indefinite array in X509Chunks"))?; + // Vector containing the concatenated chunks + let mut concat_chunk = vec![]; + for _ in 0..chunk_len { + let chunk_data = d.bytes().map_err(|e| anyhow::anyhow!(e.to_string()))?; + concat_chunk.extend_from_slice(chunk_data); + } + + let mut buffer = vec![]; + + match algorithm { + CompressionAlgorithm::Raw => { + buffer.extend_from_slice(concat_chunk.as_slice()); + }, + CompressionAlgorithm::Zstd => { + zstd::stream::copy_decode(concat_chunk.as_slice(), &mut buffer)?; + }, + CompressionAlgorithm::Brotli => { + let mut decoder = brotli::Decompressor::new(concat_chunk.as_slice(), 4096); + decoder + .read_to_end(&mut buffer) + .map_err(|_| anyhow::anyhow!("Failed to decompress using Brotli algorithm"))?; + }, + } + Ok(buffer) +} + +/// x509 metadatum. +#[derive(Debug, PartialEq)] +pub(crate) struct X509Metadatum { + /// `UUIDv4` Purpose . + purpose: [u8; 16], // (bytes .size 16) + /// Transaction inputs hash. + txn_inputs_hash: [u8; 16], // bytes .size 16 + /// Optional previous transaction ID. + prv_tx_id: Option<[u8; 32]>, // bytes .size 32 + /// x509 chunks. + x509_chunks: X509Chunks, // chunk_type => [ + x509_chunk ] + /// Validation signature. + validation_signature: Vec, // bytes size (1..64) +} + +#[allow(clippy::module_name_repetitions)] +impl X509Metadatum { + /// Create a new instance of `X509Metadatum`. + fn new() -> Self { + Self { + purpose: [0; 16], + txn_inputs_hash: [0; 16], + prv_tx_id: None, + x509_chunks: X509Chunks::new(CompressionAlgorithm::Raw, X509RbacMetadata::new()), + validation_signature: vec![], + } + } + + /// Set the purpose. + fn set_purpose(&mut self, purpose: [u8; 16]) { + self.purpose = purpose; + } + + /// Set the transaction inputs hash. + fn set_txn_inputs_hash(&mut self, txn_inputs_hash: [u8; 16]) { + self.txn_inputs_hash = txn_inputs_hash; + } + + /// Set the previous transaction ID. + fn set_prv_tx_id(&mut self, prv_tx_id: [u8; 32]) { + self.prv_tx_id = Some(prv_tx_id); + } + + /// Set the x509 chunks. + fn set_x509_chunks(&mut self, x509_chunks: X509Chunks) { + self.x509_chunks = x509_chunks; + } + + /// Set the validation signature. + fn set_validation_signature(&mut self, validation_signature: Vec) { + self.validation_signature = validation_signature; + } +} + +/// Enum of x509 metadatum with its associated unsigned integer value. +#[allow(clippy::module_name_repetitions)] +#[derive(FromRepr, Debug, PartialEq)] +#[repr(u8)] +pub enum X509MetadatumInt { + /// Purpose. + Purpose = 0, + /// Transaction inputs hash. + TxInputsHash = 1, + /// Previous transaction ID. + PreviousTxId = 2, + /// Validation signature. + ValidationSignature = 99, +} + +impl Decode<'_, ()> for X509Metadatum { + fn decode(d: &mut Decoder, ctx: &mut ()) -> Result { + let map_len = d.map()?.ok_or(decode::Error::message( + "Error indefinite array in X509Metadatum", + ))?; + let mut x509_metadatum = X509Metadatum::new(); + for _ in 0..map_len { + // Use probe to peak + let key = d.probe().u8()?; + + if let Some(key) = X509MetadatumInt::from_repr(key) { + // Consuming the int + d.u8()?; + match key { + X509MetadatumInt::Purpose => { + x509_metadatum.set_purpose( + d.bytes()?.try_into().map_err(|_| { + decode::Error::message("Invalid data size of Purpose") + })?, + ); + }, + X509MetadatumInt::TxInputsHash => { + x509_metadatum.set_txn_inputs_hash(d.bytes()?.try_into().map_err( + |_| decode::Error::message("Invalid data size of TxInputsHash"), + )?); + }, + X509MetadatumInt::PreviousTxId => { + x509_metadatum.set_prv_tx_id(d.bytes()?.try_into().map_err(|_| { + decode::Error::message("Invalid data size of PreviousTxId") + })?); + }, + X509MetadatumInt::ValidationSignature => { + let validation_signature = d.bytes()?; + if validation_signature.is_empty() || validation_signature.len() > 64 { + return Err(decode::Error::message( + "Invalid data size of ValidationSignature", + )); + } + x509_metadatum.set_validation_signature(validation_signature.to_vec()); + }, + } + } else { + // Handle the x509 chunks 10 11 12 + let x509_chunks = X509Chunks::decode(d, ctx)?; + x509_metadatum.set_x509_chunks(x509_chunks); + } + } + Ok(x509_metadatum) + } +} + +/// Decode any in CDDL, only support basic datatype +pub(crate) fn decode_any(d: &mut Decoder) -> Result, decode::Error> { + match d.datatype()? { + minicbor::data::Type::Bytes => Ok(d.bytes()?.to_vec()), + minicbor::data::Type::String => Ok(d.str()?.as_bytes().to_vec()), + minicbor::data::Type::Array => { + let arr_len = d.array()?.ok_or(decode::Error::message( + "Error indefinite length in decoding any", + ))?; + let mut buffer = vec![]; + for _ in 0..arr_len { + buffer.extend_from_slice(&decode_any(d)?); + } + Ok(buffer) + }, + minicbor::data::Type::U8 + | minicbor::data::Type::U16 + | minicbor::data::Type::U32 + | minicbor::data::Type::U64 => Ok(d.u64()?.to_be_bytes().to_vec()), + minicbor::data::Type::I8 + | minicbor::data::Type::I16 + | minicbor::data::Type::I32 + | minicbor::data::Type::I64 => Ok(d.i64()?.to_be_bytes().to_vec()), + _ => Err(decode::Error::message("Data type not supported")), + } +} diff --git a/hermes/crates/cardano-chain-follower/src/metadata/x509/rbac/certs.rs b/hermes/crates/cardano-chain-follower/src/metadata/x509/rbac/certs.rs new file mode 100644 index 000000000..ae8b95463 --- /dev/null +++ b/hermes/crates/cardano-chain-follower/src/metadata/x509/rbac/certs.rs @@ -0,0 +1,93 @@ +//! Certificates for the RBAC metadata. + +use c509_certificate::c509::C509; +use minicbor::{decode, Decode, Decoder}; +use x509_cert::{der::Decode as x509Decode, Certificate}; + +// ------------------x509------------------------ + +/// A struct of X509 certificate. +#[derive(Debug, PartialEq)] +pub(crate) struct X509DerCert(Vec); + +impl Decode<'_, ()> for X509DerCert { + fn decode(d: &mut Decoder, _ctx: &mut ()) -> Result { + let data = d.bytes()?; + Certificate::from_der(data) + .map_err(|_| decode::Error::message("Invalid x509 certificate"))?; + Ok(Self(data.to_vec())) + } +} + +// ------------------c509----------------------- + +/// Enum of possible c509 certificate. +#[derive(Debug, PartialEq)] +pub(crate) enum C509Cert { + /// A c509 certificate in metadatum reference. + C509CertInMetadatumReference(C509CertInMetadatumReference), + /// A c509 certificate. + C509Certificate(Box), +} + +impl Decode<'_, ()> for C509Cert { + fn decode(d: &mut Decoder, ctx: &mut ()) -> Result { + if d.datatype()? == minicbor::data::Type::Array { + let arr_len = d + .array()? + .ok_or(decode::Error::message("Error indefinite array in C509Cert"))?; + // C509CertInMetadatumReference must have 3 items + if arr_len == 3 { + Ok(Self::C509CertInMetadatumReference( + C509CertInMetadatumReference::decode(d, ctx)?, + )) + } else { + Err(decode::Error::message( + "Invalid length C509CertInMetadatumReference, expected 3", + )) + } + } else { + // Consuming the c509 bytes + let c509 = d.bytes()?; + let mut c509_d = Decoder::new(c509); + Ok(Self::C509Certificate(Box::new(C509::decode( + &mut c509_d, + ctx, + )?))) + } + } +} + +/// A struct of c509 certificate in metadatum reference. +#[derive(Debug, PartialEq)] +pub(crate) struct C509CertInMetadatumReference { + /// Transaction output field. + txn_output_field: u8, + /// Transaction output index. + txn_output_index: u64, + /// Optional certificate reference. + cert_ref: Option>, +} + +impl Decode<'_, ()> for C509CertInMetadatumReference { + fn decode(d: &mut Decoder, _ctx: &mut ()) -> Result { + let txn_output_field = d.u8()?; + let txn_output_index = d.u64()?; + let cert_ref = match d.datatype()? { + minicbor::data::Type::Array => { + let len = d.array()?.ok_or(decode::Error::message( + "Error indefinite array in C509CertInMetadatumReference", + ))?; + let arr: Result, _> = (0..len).map(|_| d.u64()).collect(); + arr.map(Some) + }, + minicbor::data::Type::Null => Ok(None), + _ => Ok(Some(vec![d.u64()?])), + }?; + Ok(Self { + txn_output_field, + txn_output_index, + cert_ref, + }) + } +} diff --git a/hermes/crates/cardano-chain-follower/src/metadata/x509/rbac/mod.rs b/hermes/crates/cardano-chain-follower/src/metadata/x509/rbac/mod.rs new file mode 100644 index 000000000..2075abb6a --- /dev/null +++ b/hermes/crates/cardano-chain-follower/src/metadata/x509/rbac/mod.rs @@ -0,0 +1,170 @@ +//! Role Based Access Control (RBAC) metadata for X509 certificates. +//! Doc Reference: +//! CDDL Reference: + +mod certs; +mod pub_key; +mod role_data; + +use std::collections::HashMap; + +use certs::{C509Cert, X509DerCert}; +use minicbor::{decode, Decode, Decoder}; +use pub_key::SimplePublicKeyType; +use role_data::RoleData; +use strum::FromRepr; + +use super::decode_any; + +/// Struct of x509 RBAC metadata. +#[derive(Debug, PartialEq)] +pub(crate) struct X509RbacMetadata { + /// Optional list of x509 certificates. + x509_certs: Option>, + /// Optional list of c509 certificates. + /// The value can be either the c509 certificate or c509 metadatum reference. + c509_certs: Option>, + /// Optional list of Public keys. + pub_keys: Option>, + /// Optional list of revocation list. + revocation_list: Option>, + /// Optional list of role data. + role_set: Option>, + /// Optional map of purpose key data. + /// Empty map if no purpose key data is present. + purpose_key_data: HashMap>, +} + +/// The first valid purpose key. +const FIRST_PURPOSE_KEY: u16 = 200; +/// The last valid purpose key. +const LAST_PURPOSE_KEY: u16 = 299; + +/// Enum of x509 RBAC metadata with its associated unsigned integer value. +#[derive(FromRepr, Debug, PartialEq)] +#[repr(u16)] +pub enum X509RbacMetadataInt { + /// x509 certificates. + X509Certs = 10, + /// c509 certificates. + C509Certs = 20, + /// Public keys. + PubKeys = 30, + /// Revocation list. + RevocationList = 40, + /// Role data set. + RoleSet = 100, +} + +impl X509RbacMetadata { + /// Create a new instance of `X509RbacMetadata`. + pub(crate) fn new() -> Self { + Self { + x509_certs: None, + c509_certs: None, + pub_keys: None, + revocation_list: None, + role_set: None, + purpose_key_data: HashMap::new(), + } + } + + /// Set the x509 certificates. + fn set_x509_certs(&mut self, x509_certs: Vec) { + self.x509_certs = Some(x509_certs); + } + + /// Set the c509 certificates. + fn set_c509_certs(&mut self, c509_certs: Vec) { + self.c509_certs = Some(c509_certs); + } + + /// Set the public keys. + fn set_pub_keys(&mut self, pub_keys: Vec) { + self.pub_keys = Some(pub_keys); + } + + /// Set the revocation list. + fn set_revocation_list(&mut self, revocation_list: Vec<[u8; 16]>) { + self.revocation_list = Some(revocation_list); + } + + /// Set the role data set. + fn set_role_set(&mut self, role_set: Vec) { + self.role_set = Some(role_set); + } +} + +impl Decode<'_, ()> for X509RbacMetadata { + fn decode(d: &mut Decoder, _ctx: &mut ()) -> Result { + let map_len = d.map()?.ok_or(decode::Error::message( + "Error indefinite map in X509RbacMetadata", + ))?; + let mut x509_rbac_metadata = X509RbacMetadata::new(); + for _ in 0..map_len { + let key = d.u16()?; + if let Some(key) = X509RbacMetadataInt::from_repr(key) { + match key { + X509RbacMetadataInt::X509Certs => { + let x509_certs = decode_array(d)?; + x509_rbac_metadata.set_x509_certs(x509_certs); + }, + X509RbacMetadataInt::C509Certs => { + let c509_certs = decode_array(d)?; + x509_rbac_metadata.set_c509_certs(c509_certs); + }, + X509RbacMetadataInt::PubKeys => { + let pub_keys = decode_array(d)?; + x509_rbac_metadata.set_pub_keys(pub_keys); + }, + X509RbacMetadataInt::RevocationList => { + let revocation_list = decode_revocation_list(d)?; + x509_rbac_metadata.set_revocation_list(revocation_list); + }, + X509RbacMetadataInt::RoleSet => { + let role_set = decode_array(d)?; + x509_rbac_metadata.set_role_set(role_set); + }, + } + } else { + if !(FIRST_PURPOSE_KEY..=LAST_PURPOSE_KEY).contains(&key) { + return Err(decode::Error::message(format!("Invalid purpose key set, should be with the range {FIRST_PURPOSE_KEY} - {LAST_PURPOSE_KEY}"))); + } + x509_rbac_metadata + .purpose_key_data + .insert(key, decode_any(d)?); + } + } + Ok(x509_rbac_metadata) + } +} + +/// Decode an array of type T. +fn decode_array<'b, T>(d: &mut Decoder<'b>) -> Result, decode::Error> +where T: Decode<'b, ()> { + let len = d.array()?.ok_or(decode::Error::message( + "Error indefinite array in X509RbacMetadata", + ))?; + let mut vec = Vec::with_capacity(usize::try_from(len).map_err(decode::Error::message)?); + for _ in 0..len { + vec.push(T::decode(d, &mut ())?); + } + Ok(vec) +} + +/// Decode an array of revocation list. +fn decode_revocation_list(d: &mut Decoder) -> Result, decode::Error> { + let len = d.array()?.ok_or(decode::Error::message( + "Error indefinite array in X509RbacMetadata revocation list", + ))?; + let mut revocation_list = + Vec::with_capacity(usize::try_from(len).map_err(decode::Error::message)?); + for _ in 0..len { + let arr: [u8; 16] = d + .bytes()? + .try_into() + .map_err(|_| decode::Error::message("Invalid revocation list size"))?; + revocation_list.push(arr); + } + Ok(revocation_list) +} diff --git a/hermes/crates/cardano-chain-follower/src/metadata/x509/rbac/pub_key.rs b/hermes/crates/cardano-chain-follower/src/metadata/x509/rbac/pub_key.rs new file mode 100644 index 000000000..f801bf8ef --- /dev/null +++ b/hermes/crates/cardano-chain-follower/src/metadata/x509/rbac/pub_key.rs @@ -0,0 +1,66 @@ +//! Public key type for RBAC metadata + +use minicbor::{data::Tag, decode, Decode, Decoder}; + +/// Enum of possible public key type. +#[derive(Debug, PartialEq)] +pub(crate) enum SimplePublicKeyType { + /// Undefined indicates skipped element. + Undefined, + /// Deleted indicates the key is deleted. + Deleted, + /// Ed25519 key. + Ed25519([u8; 32]), +} + +/// Enum of possible public key tag. +enum PublicKeyTag { + /// Deleted Key tag 31. + Deleted, + /// Ed25519 Key tag 32773. + Ed25519, +} + +impl PublicKeyTag { + /// Get the tag value. + fn tag(self) -> Tag { + match self { + PublicKeyTag::Deleted => Tag::new(0x31), + PublicKeyTag::Ed25519 => Tag::new(0x8005), + } + } +} + +impl Decode<'_, ()> for SimplePublicKeyType { + fn decode(d: &mut Decoder, _ctx: &mut ()) -> Result { + match d.datatype()? { + minicbor::data::Type::Tag => { + let tag = d.tag()?; + match tag { + t if t == PublicKeyTag::Deleted.tag() => Ok(SimplePublicKeyType::Deleted), + t if t == PublicKeyTag::Ed25519.tag() => { + let bytes = d.bytes()?; + let mut ed25519 = [0u8; 32]; + if bytes.len() == 32 { + ed25519.copy_from_slice(bytes); + Ok(SimplePublicKeyType::Ed25519(ed25519)) + } else { + Err(decode::Error::message("Invalid length for Ed25519 key")) + } + }, + _ => { + Err(decode::Error::message( + "Unknown tag for SimplePublicKeyType", + )) + }, + } + }, + minicbor::data::Type::Undefined => Ok(SimplePublicKeyType::Undefined), + _ => { + Err(decode::Error::message( + "Invalid datatype for SimplePublicKeyType", + )) + }, + } + } +} diff --git a/hermes/crates/cardano-chain-follower/src/metadata/x509/rbac/role_data.rs b/hermes/crates/cardano-chain-follower/src/metadata/x509/rbac/role_data.rs new file mode 100644 index 000000000..630909804 --- /dev/null +++ b/hermes/crates/cardano-chain-follower/src/metadata/x509/rbac/role_data.rs @@ -0,0 +1,168 @@ +//! Role data for RBAC metadata. + +use std::collections::HashMap; + +use minicbor::{decode, Decode, Decoder}; +use strum::FromRepr; + +use super::X509RbacMetadataInt; +use crate::metadata::x509::decode_any; + +/// Struct of role data. +#[derive(Debug, PartialEq)] +pub(crate) struct RoleData { + /// Role number. + role_number: u8, + /// Optional role signing key. + role_signing_key: Option, + /// Optional role encryption key. + role_encryption_key: Option, + /// Optional payment key. + payment_key: Option, + /// Optional role extended data keys. + /// Empty map if no role extended data keys. + role_extended_data_keys: HashMap>, +} + +/// The first valid role extended data key. +const FIRST_ROLE_EXT_KEY: u8 = 10; +/// The last valid role extended data key. +const LAST_ROLE_EXT_KEY: u8 = 99; + +/// Enum of role data with its associated unsigned integer value. +#[allow(clippy::module_name_repetitions)] +#[derive(FromRepr, Debug, PartialEq)] +#[repr(u8)] +pub enum RoleDataInt { + /// Role number. + RoleNumber = 0, + /// Role signing key. + RoleSigningKey = 1, + /// Role encryption key. + RoleEncryptionKey = 2, + /// Payment key. + PaymentKey = 3, +} + +#[allow(clippy::module_name_repetitions)] +impl RoleData { + /// Create a new instance of `RoleData`. + fn new() -> Self { + Self { + role_number: 0, + role_signing_key: None, + role_encryption_key: None, + payment_key: None, + role_extended_data_keys: HashMap::new(), + } + } + + /// Set the role number. + fn set_role_number(&mut self, role_number: u8) { + self.role_number = role_number; + } + + /// Set the role signing key. + fn set_role_signing_key(&mut self, key: KeyReference) { + self.role_signing_key = Some(key); + } + + /// Set the role encryption key. + fn set_role_encryption_key(&mut self, key: KeyReference) { + self.role_encryption_key = Some(key); + } + + /// Set the payment key. + fn set_payment_key(&mut self, key: u64) { + self.payment_key = Some(key); + } +} + +impl Decode<'_, ()> for RoleData { + fn decode(d: &mut Decoder, ctx: &mut ()) -> Result { + let map_len = d + .map()? + .ok_or(decode::Error::message("role set has indefinite length"))?; + let mut role_data = RoleData::new(); + for _ in 0..map_len { + let key = d.u8()?; + if let Some(key) = RoleDataInt::from_repr(key) { + match key { + RoleDataInt::RoleNumber => { + role_data.set_role_number(d.u8()?); + }, + RoleDataInt::RoleSigningKey => { + role_data.set_role_signing_key(KeyReference::decode(d, ctx)?); + }, + RoleDataInt::RoleEncryptionKey => { + role_data.set_role_encryption_key(KeyReference::decode(d, ctx)?); + }, + RoleDataInt::PaymentKey => { + role_data.set_payment_key(d.u64()?); + }, + } + } else { + if !(FIRST_ROLE_EXT_KEY..=LAST_ROLE_EXT_KEY).contains(&key) { + return Err(decode::Error::message(format!("Invalid role extended data key, should be with the range {FIRST_ROLE_EXT_KEY} - {LAST_ROLE_EXT_KEY}"))); + } + role_data + .role_extended_data_keys + .insert(key, decode_any(d)?); + } + } + Ok(role_data) + } +} + +/// Enum of key reference. +#[derive(Debug, PartialEq)] +enum KeyReference { + /// Key local reference. + KeyLocalRef(KeyLocalRef), + /// Key hash. + KeyHash(Vec), +} + +impl Decode<'_, ()> for KeyReference { + fn decode(d: &mut Decoder, ctx: &mut ()) -> Result { + match d.datatype()? { + minicbor::data::Type::Array => Ok(Self::KeyLocalRef(KeyLocalRef::decode(d, ctx)?)), + minicbor::data::Type::Bytes => Ok(Self::KeyHash(d.bytes()?.to_vec())), + _ => Err(decode::Error::message("Invalid data type for KeyReference")), + } + } +} + +/// Struct of key local reference. +#[derive(Debug, PartialEq)] +struct KeyLocalRef { + /// Local reference. + local_ref: LocalRefInt, + /// Key offset. + key_offset: u64, +} + +/// Enum of local reference with its associated unsigned integer value. +#[derive(FromRepr, Debug, PartialEq)] +#[repr(u8)] +enum LocalRefInt { + /// x509 certificates. + X509Certs = X509RbacMetadataInt::X509Certs as u8, // 10 + /// c509 certificates. + C509Certs = X509RbacMetadataInt::C509Certs as u8, // 20 + /// Public keys. + PubKeys = X509RbacMetadataInt::PubKeys as u8, // 30 +} + +impl Decode<'_, ()> for KeyLocalRef { + fn decode(d: &mut Decoder, _ctx: &mut ()) -> Result { + d.array()?; + let local_ref = LocalRefInt::from_repr(d.u8()?) + .ok_or(decode::Error::message("Invalid local reference"))?; + let key_offset = d.u64()?; + Ok(Self { + local_ref, + key_offset, + }) + } +} diff --git a/hermes/crates/cardano-chain-follower/src/mithril_query.rs b/hermes/crates/cardano-chain-follower/src/mithril_query.rs new file mode 100644 index 000000000..7c6e38a13 --- /dev/null +++ b/hermes/crates/cardano-chain-follower/src/mithril_query.rs @@ -0,0 +1,47 @@ +//! Functions which query or interact with the Immutable blockchain on disk. + +use std::path::Path; + +use pallas_hardano::storage::immutable::FallibleBlock; +use tokio::task; + +use crate::{ + error::{Error, Result}, + Point, +}; + +/// Synchronous Immutable block iterator. +pub(crate) type ImmutableBlockIterator = Box + Send + Sync>; + +/// Get a mithril snapshot iterator. +pub(crate) async fn make_mithril_iterator( + path: &Path, start: &Point, +) -> Result { + let path = path.to_path_buf(); + let start = start.clone(); + // Initial input + let res = task::spawn_blocking(move || { + pallas_hardano::storage::immutable::read_blocks_from_point(&path, start.clone().into()) + .map_err(|error| Error::MithrilSnapshot(Some(error))) + }) + .await; + + match res { + Ok(res) => res, + Err(_error) => Err(Error::MithrilSnapshot(None)), + } +} + +/// Get latest TIP of the Mithril Immutable Chain. +pub(crate) async fn get_mithril_tip_point(path: &Path) -> Result { + let path = path.to_path_buf(); + let res = + task::spawn_blocking(move || pallas_hardano::storage::immutable::get_tip(&path)).await; + + match res { + Ok(Ok(Some(res))) => Ok(res.into()), + Ok(Ok(None)) => Err(Error::MithrilSnapshot(None)), + Ok(Err(error)) => Err(Error::MithrilSnapshot(Some(error))), + Err(_error) => Err(Error::MithrilSnapshot(None)), + } +} diff --git a/hermes/crates/cardano-chain-follower/src/mithril_snapshot.rs b/hermes/crates/cardano-chain-follower/src/mithril_snapshot.rs index 94c511216..4c59cb51d 100644 --- a/hermes/crates/cardano-chain-follower/src/mithril_snapshot.rs +++ b/hermes/crates/cardano-chain-follower/src/mithril_snapshot.rs @@ -1,137 +1,47 @@ //! Internal Mithril snapshot functions. -use std::path::PathBuf; - -use pallas::network::miniprotocols::Point; -use pallas_hardano::storage::immutable::FallibleBlock; - -use crate::{Error, MultiEraBlockData, Result}; - -/// Wraps the iterator type returned by Pallas. -pub(crate) struct MithrilSnapshotIterator { - /// Inner iterator. - inner: Box + Send + Sync>, -} - -impl Iterator for MithrilSnapshotIterator { - type Item = FallibleBlock; - - fn next(&mut self) -> Option { - self.inner.next() - } -} +use logcall::logcall; +use tracing_log::log; + +use crate::{ + mithril_snapshot_data::latest_mithril_snapshot_id, + mithril_snapshot_iterator::MithrilSnapshotIterator, network::Network, MultiEraBlock, Point, +}; + +// Any single program using this crate can have EXACTLY THREE Mithril snapshots. +// One, for each of the known networks. +// If more mithril snapshots are configured, then the crate will error. +// It IS possible to redundantly configure mithril snapshots, provided they are +// identically configured. The only config option that can change, is if the snapshot is +// auto-updated, ANY follower which sets this enables this function and it can not be +// disabled once started without stopping the program. /// Holds information about a Mithril snapshot. #[derive(Clone)] pub(crate) struct MithrilSnapshot { - /// Path to the Mithril snapshot. - pub path: PathBuf, - /// Snapshot's tip. - pub tip: Point, + /// Network that this snapshot is configured for + chain: Network, } impl MithrilSnapshot { - /// Gets information about the snapshot at the given path. - /// - /// # Arguments - /// - /// * `path`: Mithril snapshot path. - /// - /// # Errors - /// - /// Returns Err if it can't read where the tip is at in the snapshot or - /// if reading the snapshot files fails. - pub fn from_path(path: PathBuf) -> Result { - let tip = pallas_hardano::storage::immutable::get_tip(&path) - .map_err(|_| Error::MithrilSnapshot)? - .ok_or(Error::MithrilSnapshot)?; - - Ok(Self { path, tip }) + /// Create a new Mithril Snapshot handler + pub(crate) fn new(chain: Network) -> Self { + Self { chain } } - /// Tries reading a block from the Mithril snapshot. Returns None if the point - /// is not contained in the snapshot. + /// Checks if the snapshot contains a given point. /// /// # Arguments + /// * `network`: The network that this function should check against. + /// * `point`: The point to be checked for existence within the specified Mithril + /// snapshot. /// - /// * `point`: Point at which to read the block. - /// - /// # Errors - /// - /// Returns Err if anything fails while reading the block data. - pub fn try_read_block(&self, point: Point) -> Result> { - if !self.contains_point(&point) { - return Ok(None); - } - - let mut block_data_iter = - pallas_hardano::storage::immutable::read_blocks_from_point(&self.path, point) - .map_err(|_| Error::MithrilSnapshot)?; - - match block_data_iter.next() { - Some(res) => { - let block_data = res.map_err(|_| Error::MithrilSnapshot)?; + /// Returns true if the point exists within the Mithril snapshot for the specified + /// network, false otherwise. + pub(crate) fn contains_point(&self, point: &Point) -> bool { + let latest_id = latest_mithril_snapshot_id(self.chain); - Ok(Some(MultiEraBlockData(block_data))) - }, - None => Ok(None), - } - } - - /// Tries reading a range of blocks from the Mithril snapshot. - /// Returns None if the range is not contained in the snapshot. - /// - /// This returns the last point that was read. This is useful to check - /// if the range was partially read. - /// - /// # Arguments - /// - /// * `from`: Start point. - /// * `to`: End point. - /// - /// # Errors - /// - /// Returns Err if anything fails while reading any block's data. - #[allow(clippy::needless_pass_by_value)] - pub fn try_read_block_range( - &self, from: Point, to: Point, - ) -> Result)>> { - if !self.contains_point(&from) { - return Ok(None); - } - - let blocks_iter = - pallas_hardano::storage::immutable::read_blocks_from_point(&self.path, from) - .map_err(|_| Error::MithrilSnapshot)?; - - let mut block_data_vec = Vec::new(); - for result in blocks_iter { - let block_data = MultiEraBlockData(result.map_err(|_| Error::MithrilSnapshot)?); - - // TODO(fsgr): Should we check the hash as well? - // Maybe throw an error if we don't get the block we were expecting at that - // slot? - if block_data.decode()?.slot() > to.slot_or_default() { - break; - } - - block_data_vec.push(block_data); - } - - // Get the point from last block read. - // Pop here to get an owned value (we'll insert it back later). - match block_data_vec.pop() { - Some(last_block_data) => { - let last_block = last_block_data.decode()?; - let last_block_point = Point::new(last_block.slot(), last_block.hash().to_vec()); - - // Push the last block data back - block_data_vec.push(last_block_data); - - Ok(Some((last_block_point, block_data_vec))) - }, - None => Ok(None), - } + point.slot_or_default() <= latest_id.tip().slot_or_default() } /// Tries get an iterator that reads blocks from the Mithril snapshot from a given @@ -143,26 +53,38 @@ impl MithrilSnapshot { /// /// # Errors /// - /// Returns Err if anything fails while trying to find the starting point in the - /// snapshot. - pub fn try_read_blocks_from_point(&self, point: Point) -> Option { - if !self.contains_point(&point) { + /// Returns None if its not possible to iterate a mithril snapshot from the requested + /// point for ANY reason. + #[allow(clippy::indexing_slicing)] + #[logcall("debug")] + pub(crate) async fn try_read_blocks_from_point( + &self, point: &Point, + ) -> Option { + let snapshot_id = latest_mithril_snapshot_id(self.chain); + let snapshot_path = snapshot_id.immutable_path(); + + // Quick check if the block can be within the immutable data. + if !self.contains_point(point) { return None; } - let iter = pallas_hardano::storage::immutable::read_blocks_from_point(&self.path, point) - .map_err(|_| Error::MithrilSnapshot) - .ok()?; - - Some(MithrilSnapshotIterator { inner: iter }) + // We don't know the previous block, so we need to find it. + MithrilSnapshotIterator::new(self.chain, &snapshot_path, point, None) + .await + .ok() } - /// Naively checks if the snapshot contains a point. - /// - /// # Arguments - /// - /// * `point`: Point to check. - pub fn contains_point(&self, point: &Point) -> bool { - point.slot_or_default() <= self.tip.slot_or_default() + /// Read a single block from a known point. + #[allow(clippy::indexing_slicing)] + #[logcall("debug")] + pub(crate) async fn read_block_at(&self, point: &Point) -> Option { + if let Some(iterator) = self.try_read_blocks_from_point(point).await { + let block = iterator.next().await; + return block; + } + None } } + +#[cfg(test)] +mod tests {} diff --git a/hermes/crates/cardano-chain-follower/src/mithril_snapshot_config.rs b/hermes/crates/cardano-chain-follower/src/mithril_snapshot_config.rs new file mode 100644 index 000000000..de88efec4 --- /dev/null +++ b/hermes/crates/cardano-chain-follower/src/mithril_snapshot_config.rs @@ -0,0 +1,528 @@ +//! Configuration for the Mithril Snapshot used by the follower. + +use std::{ + path::{Path, PathBuf}, + str::FromStr, + sync::LazyLock, +}; + +use anyhow::bail; +use dashmap::DashMap; +use futures::future::join_all; +use strum::IntoEnumIterator; +use tokio::{ + fs::{self}, + io::{self}, + sync::{mpsc, Mutex}, + task::JoinHandle, +}; +use tracing::{debug, error}; + +use crate::{ + error::{Error, Result}, + mithril_snapshot_data::{latest_mithril_snapshot_id, SnapshotData}, + mithril_snapshot_sync::background_mithril_update, + network::Network, + point::ORIGIN_POINT, + snapshot_id::SnapshotId, + turbo_downloader::DlConfig, + Point, +}; + +/// Type we use to manage the Sync Task handle map. +type SyncMap = DashMap>>>; +/// Handle to the mithril sync thread. One for each Network ONLY. +static SYNC_JOIN_HANDLE_MAP: LazyLock = LazyLock::new(|| { + let map = DashMap::new(); + for network in Network::iter() { + map.insert(network, Mutex::new(None)); + } + map +}); + +/// Subdirectory where we unpack archives temporarily. +const TMP_SUBDIR: &str = "tmp"; + +/// Message we send when Mithril Snapshot updates +#[derive(Debug)] +pub(crate) struct MithrilUpdateMessage { + /// The largest block on the mithril snapshot. + pub tip: Point, + /// The block immediately before it. + pub previous: Point, +} + +/// Configuration used for the Mithril Snapshot downloader. +#[derive(Clone, Debug)] +pub struct MithrilSnapshotConfig { + /// What Blockchain network are we configured for. + pub chain: Network, + /// Path to the Mithril snapshot the follower should use. + /// Note: this is a base directory. The Actual data will be stored under here. + /// archive downloads -> `/dl` + /// unpacked snapshots -> `/` + /// extracting snapshots -> `/tmp` + pub path: PathBuf, + /// Address of the Mithril Aggregator to use to find the latest snapshot data to + /// download. + pub aggregator_url: String, + /// The Genesis Key needed for a network to do Mithril snapshot validation. + pub genesis_key: String, + /// Downloader configuration. + pub dl_config: Option, +} + +impl MithrilSnapshotConfig { + /// Sets the defaults for a given cardano network. + /// Each network has a different set of defaults, so no single "default" can apply. + /// This function is preferred to the `default()` standard function. + #[must_use] + pub fn default_for(chain: Network) -> Self { + Self { + chain, + path: chain.default_mithril_path(), + aggregator_url: chain.default_mithril_aggregator(), + genesis_key: chain.default_mithril_genesis_key(), + dl_config: None, + } + } + + /// Set a custom downloader configuration. + pub fn with_dl_config(mut self, config: DlConfig) -> Self { + self.dl_config = Some(config); + self + } + + /// Try and recover the latest snapshot id from the files on disk. + #[must_use] + pub(crate) async fn recover_latest_snapshot_id(&self) -> Option { + // Can we read directory entries from the base path, if not then there is no latest + // snapshot. + let path = self.path.clone(); + debug!("Recovering latest snapshot id from {:?}", &path); + + let Ok(mut entries) = fs::read_dir(&self.path).await else { + error!( + "Getting latest snapshot failed: Can't read entries from {}", + self.path.to_string_lossy() + ); + return None; + }; + + let mut latest_immutable_file: u64 = 0; // Can't have a 0 file. + let mut latest_path = PathBuf::new(); + + loop { + // Get the next entry, stop on any error, or no entries left. + let Ok(Some(entry)) = entries.next_entry().await else { + break; + }; + + if let Some(immutable_file) = SnapshotId::parse_path(&entry.path()) { + if immutable_file > latest_immutable_file { + latest_immutable_file = immutable_file; + latest_path = entry.path(); + } + } + } + + if latest_immutable_file > 0 { + return SnapshotId::try_new(self.chain, &latest_path).await; + } + + None + } + + /// Activate the tmp mithril path to a numbered snapshot path. + /// And then remove any left over files in download or the tmp path, or old snapshots. + pub(crate) async fn activate(&self, snapshot_number: u64) -> io::Result { + let new_path = self.mithril_path(snapshot_number); + let latest_id = latest_mithril_snapshot_id(self.chain); + + debug!( + "Activating snapshot: {} {} {:?}", + snapshot_number, + new_path.to_string_lossy(), + latest_id + ); + + // Can't activate anything if the tmp directory does not exist. + if !self.tmp_path().is_dir() { + error!("No tmp path found to activate."); + return Err(io::Error::new(io::ErrorKind::NotFound, "No tmp path found")); + } + + // Check if we would actually be making a newer snapshot active. (Should never fail, but + // check anyway.) + if latest_id >= snapshot_number { + error!("Latest snapshot {latest_id:?} is >= than requested snapshot {snapshot_number}"); + return Err(io::Error::new( + io::ErrorKind::NotFound, + "Latest snapshot is newer or equal", + )); + } + + // Rename the tmp path to the new numbered path. + fs::rename(self.tmp_path(), &new_path).await?; + + Ok(new_path) + } + + /// Cleanup the tmp mithril path, all old mithril paths and the dl path. + /// Removes those directories if they exist and all the files they contain. + pub(crate) async fn cleanup(&self) -> io::Result<()> { + let mut cleanup_tasks = Vec::new(); + + // Cleanup up the tmp path. (Shouldn't normally exist, but clean it anyway) + let tmp = self.tmp_path(); + if tmp.exists() { + debug!("Cleaning up TMP @ {}", tmp.display()); + cleanup_tasks.push(fs::remove_dir_all(tmp.clone())); + } + + // Cleanup all numbered paths which are not this latest path + match fs::read_dir(&self.path).await { + Err(err) => { + error!( + "Unexpected failure reading entries in the mithril path {} : {}", + self.path.to_string_lossy(), + err + ); + }, + Ok(mut entries) => { + // Get latest mithril snapshot path and number. + let latest_snapshot = latest_mithril_snapshot_id(self.chain); + + loop { + // Get the next entry, stop on any error, or no entries left. + let Ok(Some(entry)) = entries.next_entry().await else { + break; + }; + + // If None, its not a snapshot path, so continue. + if let Some(this_snapshot) = SnapshotId::new(&entry.path(), ORIGIN_POINT) { + // Don't do anything with the latest snapshot. + // Comparison does NOT use `tip` so we construct a temporary ID without it. + if this_snapshot != latest_snapshot { + debug!( + "Cleaning up non-latest snapshot @ {}", + entry.path().display() + ); + cleanup_tasks.push(fs::remove_dir_all(entry.path())); + } + }; + } + }, + } + + for result in join_all(cleanup_tasks).await { + match result { + Ok(()) => (), + Err(err) => { + error!("Failed to cleanup snapshot: {err:?}"); + }, + } + } + + Ok(()) + } + + /// Deduplicate a file in the tmp directory vs its equivalent in the current snapshot. + /// + /// This does not check if they SHOULD be de-duped, only de-dupes the files specified. + pub(crate) fn dedup_tmp( + &self, tmp_file: &Path, latest_snapshot: &SnapshotData, + ) -> anyhow::Result<()> { + // Get the matching src file in the latest mithril snapshot to compare against. + let snapshot_path = latest_snapshot.id().as_ref(); + let tmp_path = self.tmp_path(); + + let Ok(relative_file) = tmp_file.strip_prefix(&tmp_path) else { + error!("Failed to get relative path of file."); + bail!("Failed to strip prefix: {tmp_path:?}"); + }; + + // IF we make it here, the files are identical, so we can de-dup them safely. + // Remove the tmp file momentarily. + if tmp_file.exists() { + if let Err(error) = std::fs::remove_file(tmp_file) { + error!( + "Error removing tmp file {} : {}", + tmp_file.to_string_lossy(), + error + ); + bail!("Failed to remove tmp file: {tmp_file:?}"); + } + } + + let src_file = snapshot_path.join(relative_file); + let src_file = src_file.as_path(); + // Hardlink the src file to the tmp file. + if let Some(parent) = tmp_file.parent() { + if let Err(error) = std::fs::create_dir_all(parent) { + error!("Error creating parent dir {parent:?} for tmp file {tmp_file:?}: {error}"); + } + } + if let Err(error) = std::fs::hard_link(src_file, tmp_file) { + error!( + "Error linking src file {} to tmp file {} : {}", + src_file.to_string_lossy(), + tmp_file.to_string_lossy(), + error + ); + bail!("Failed to link src file: {src_file:?}"); + } + + // And if we made it here, file was successfully de-duped. YAY. + debug!("DeDup OK: {tmp_file:?}"); + Ok(()) + } + + /// Returns the path to Latest Tmp Snapshot Data. + /// Will use a path relative to mithril data path. + #[must_use] + pub(crate) fn tmp_path(&self) -> PathBuf { + let mut snapshot_path = self.path.clone(); + snapshot_path.push(TMP_SUBDIR); + snapshot_path + } + + /// Returns the path to the Numbered Snapshot Data. + /// Will use a path relative to mithril data path. + #[must_use] + pub(crate) fn mithril_path(&self, snapshot_number: u64) -> PathBuf { + let mut snapshot_path = self.path.clone(); + snapshot_path.push(snapshot_number.to_string()); + snapshot_path + } + + /// Check if the Mithril Snapshot Path is valid an usable. + async fn validate_path(&self) -> Result<()> { + let path = self.path.clone(); + debug!( + path = path.to_string_lossy().to_string(), + "Validating Mithril Snapshot Path" + ); + + // If the path does not exist, try and make it. + if !path.exists() { + // Try and make the directory. + fs::create_dir_all(&path) + .await + .map_err(|e| Error::MithrilSnapshotDirectoryCreation(path.clone(), e))?; + } + + // If the path is NOT a directory, then we can't use it. + if !path.is_dir() { + return Err(Error::MithrilSnapshotDirectoryNotFound( + path.display().to_string(), + )); + } + + // If the directory is not writable then we can't use + if !check_writable(&path) { + return Err(Error::MithrilSnapshotDirectoryNotWritable(path.clone())); + } + + Ok(()) + } + + /// Validate the Genesis VKEY is at least the correct kind of data. + fn validate_genesis_vkey(&self) -> Result<()> { + // First sanitize the vkey by removing all whitespace and make sure its actually valid + // hex. + let vkey = remove_whitespace(&self.genesis_key); + if !is_hex(&vkey) { + return Err(Error::MithrilGenesisVKeyNotHex(self.chain)); + } + + Ok(()) + } + + /// Validate the Aggregator is resolvable and responsive. + async fn validate_aggregator_url(&self) -> Result<()> { + let url = self.aggregator_url.clone(); + let key = self.genesis_key.clone(); + + debug!(url = url, "Validating Aggregator URL"); + + // Not configured already, and not already in use, so make sure its valid. + // We do this by trying to use it to get a list of snapshots. + let client = mithril_client::ClientBuilder::aggregator(&url, &key) + .build() + .map_err(|e| Error::MithrilClient(self.chain, url.clone(), e))?; + + let snapshots = client + .snapshot() + .list() + .await + .map_err(|e| Error::MithrilClient(self.chain, url.clone(), e))?; + + // Check we have a snapshot, and its for our network. + match snapshots.first() { + Some(snapshot) => { + let _aggregator_network = + Network::from_str(&snapshot.beacon.network).map_err(|_err| { + Error::MithrilClientNetworkMismatch( + self.chain, + snapshot.beacon.network.clone(), + ) + })?; + }, + None => return Err(Error::MithrilClientNoSnapshots(self.chain, url)), + } + + Ok(()) + } + + /// Validate the mithril sync configuration is correct. + pub(crate) async fn validate(&self) -> Result<()> { + // Validate the path exists and is a directory, and is writable. + self.validate_path().await?; + // Validate the genesis vkey is valid. + self.validate_genesis_vkey()?; + // Validate the Aggregator is valid and responsive. + self.validate_aggregator_url().await?; + + Ok(()) + } + + /// Run a Mithril Follower for the given network and configuration. + pub(crate) async fn run(&self) -> Result> { + debug!( + chain = self.chain.to_string(), + "Mithril Autoupdate : Starting" + ); + + // Start the Mithril Sync - IFF its not already running. + let lock_entry = match SYNC_JOIN_HANDLE_MAP.get(&self.chain) { + None => { + error!("Join Map improperly initialized: Missing {}!!", self.chain); + return Err(Error::Internal); // Should not get here. + }, + Some(entry) => entry, + }; + let mut locked_handle = lock_entry.value().lock().await; + + if (*locked_handle).is_some() { + debug!("Mithril Already Running for {}", self.chain); + return Err(Error::MithrilSnapshotSyncAlreadyRunning(self.chain)); + } + + self.validate().await?; + + // Create a Queue we use to signal the Live Blockchain Follower that the Mithril Snapshot + // TIP has changed. + // Given how long even the smallest blockchains take to download, a queue depth of 2 is + // plenty. + let (tx, rx) = mpsc::channel::(2); + + // let handle = tokio::spawn(background_mithril_update(chain, self.clone(), tx)); + *locked_handle = Some(tokio::spawn(background_mithril_update(self.clone(), tx))); + + // sync_map.insert(chain, handle); + debug!( + chain = self.chain.to_string(), + "Mithril Autoupdate : Started" + ); + + Ok(rx) + } +} + +/// Check that a given mithril snapshot path and everything in it is writable. +/// We don't care why its NOT writable, just that it is either all writable, or not. +/// Will return false on the first detection of a read only file or directory. +fn check_writable(path: &Path) -> bool { + // Check the permissions of the current path + if let Ok(metadata) = path.metadata() { + if metadata.permissions().readonly() { + return false; + } + } + + // Can't read the directory for any reason, so can't write to the directory. + let path_iterator = match path.read_dir() { + Err(_) => return false, + Ok(entries) => entries, + }; + + // Recursively check the contents of the directory + for entry in path_iterator { + let Ok(entry) = entry else { return false }; + + // If the entry is a directory, recursively check its permissions + // otherwise just check we could re-write it. + if let Ok(metadata) = entry.metadata() { + if metadata.is_dir() { + // This can NOT be combined with the `if` above. + // Doing so will cause the `else` to run on non-writable directories. + // Which is wrong. + if !check_writable(&entry.path()) { + return false; + } + } else { + // If its not a directory then it must be a file. + if metadata.permissions().readonly() { + return false; + } + } + } else { + // Can't identify the file type, so we can't dedup it. + return false; + } + } + // Otherwise we could write everything we scanned. + true +} + +/// Remove whitespace from a string and return the new string +fn remove_whitespace(s: &str) -> String { + s.chars() + .filter(|&c| !c.is_ascii_whitespace()) + .collect::() +} + +/// Check if a string is an even number of hex digits. +fn is_hex(s: &str) -> bool { + s.chars().count() % 2 == 0 && s.chars().all(|c| c.is_ascii_hexdigit()) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[tokio::test] + async fn test_default_for() { + let network = Network::Preprod; + let config = MithrilSnapshotConfig::default_for(network); + + assert_eq!(config.chain, network); + assert_eq!(config.path, network.default_mithril_path()); + assert_eq!(config.aggregator_url, network.default_mithril_aggregator()); + assert_eq!(config.genesis_key, network.default_mithril_genesis_key()); + } + + #[tokio::test] + async fn test_validate_genesis_vkey() { + let config = MithrilSnapshotConfig { + chain: Network::Preprod, + path: PathBuf::new(), + aggregator_url: String::new(), + genesis_key: "1234abcd".to_string(), + dl_config: None, + }; + + assert!(config.validate_genesis_vkey().is_ok()); + + let invalid_config = MithrilSnapshotConfig { + chain: Network::Preprod, + path: PathBuf::new(), + aggregator_url: String::new(), + genesis_key: "1234abcz".to_string(), + dl_config: None, + }; + + assert!(invalid_config.validate_genesis_vkey().is_err()); + } +} diff --git a/hermes/crates/cardano-chain-follower/src/mithril_snapshot_data.rs b/hermes/crates/cardano-chain-follower/src/mithril_snapshot_data.rs new file mode 100644 index 000000000..8c3f146ac --- /dev/null +++ b/hermes/crates/cardano-chain-follower/src/mithril_snapshot_data.rs @@ -0,0 +1,62 @@ +//! Data about the current Mithril Snapshot +use std::{default, sync::LazyLock}; + +use dashmap::DashMap; + +use crate::{network::Network, snapshot_id::SnapshotId}; + +/// Current Mithril Snapshot Data for a network. +#[derive(Debug, Clone)] +pub(crate) struct SnapshotData { + /// Snapshot ID the data represents + id: SnapshotId, +} + +impl SnapshotData { + /// Create a new Snapshot Data. + pub(crate) fn new(id: SnapshotId) -> Self { + SnapshotData { id } + } + + /// Get the snapshot ID of this Snapshot Data. + pub(crate) fn id(&self) -> &SnapshotId { + &self.id + } +} + +impl default::Default for SnapshotData { + /// The default snapshot data represents there is no latest snapshot. + fn default() -> Self { + SnapshotData { + id: SnapshotId::default(), + } + } +} + +/// Current Mithril Snapshot for a network. +static CURRENT_MITHRIL_SNAPSHOT: LazyLock> = + LazyLock::new(DashMap::new); + +/// Get the current latest snapshot data we have recorded. +pub(crate) fn latest_mithril_snapshot_data(chain: Network) -> SnapshotData { + // There should ALWAYS be a snapshot for the chain if this is called. + + match CURRENT_MITHRIL_SNAPSHOT.get(&chain) { + Some(snapshot_data) => snapshot_data.value().clone(), + None => SnapshotData::default(), + } +} + +/// Get the latest Mithril Snapshot for a network. +pub(crate) fn latest_mithril_snapshot_id(chain: Network) -> SnapshotId { + // There should ALWAYS be a snapshot for the chain if this is called. + latest_mithril_snapshot_data(chain).id +} + +/// Update the latest snapshot data. +pub(crate) fn update_latest_mithril_snapshot(chain: Network, snapshot_id: SnapshotId) { + let snapshot_data = SnapshotData::new(snapshot_id); + + // Save the current snapshot + CURRENT_MITHRIL_SNAPSHOT.insert(chain, snapshot_data); +} diff --git a/hermes/crates/cardano-chain-follower/src/mithril_snapshot_iterator.rs b/hermes/crates/cardano-chain-follower/src/mithril_snapshot_iterator.rs new file mode 100644 index 000000000..7c23210dc --- /dev/null +++ b/hermes/crates/cardano-chain-follower/src/mithril_snapshot_iterator.rs @@ -0,0 +1,262 @@ +//! Internal Mithril snapshot iterator functions. + +use std::{ + fmt::Debug, + path::Path, + sync::{Arc, Mutex}, +}; + +use logcall::logcall; +use tokio::task; +use tracing::{debug, error}; +use tracing_log::log; + +use crate::{ + error::{Error, Result}, + mithril_query::{make_mithril_iterator, ImmutableBlockIterator}, + network::Network, + point::ORIGIN_POINT, + MultiEraBlock, Point, +}; + +/// Search backwards by 60 slots (seconds) looking for a previous block. +/// This search window is doubled until the search succeeds. +const BACKWARD_SEARCH_SLOT_INTERVAL: u64 = 60; + +/// Synchronous Inner Iterator state +struct MithrilSnapshotIteratorInner { + /// The chain being iterated + chain: Network, + /// Where we really want to start iterating from + start: Point, + /// Previous iteration point. + previous: Point, + /// Inner iterator. + inner: ImmutableBlockIterator, +} + +impl Debug for MithrilSnapshotIteratorInner { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "MithrilSnapshotIteratorInner {{ chain: {:?}, start: {:?}, previous: {:?} }}", + self.chain, self.start, self.previous + ) + } +} + +/// Wraps the iterator type returned by Pallas. +#[derive(Debug)] +pub(crate) struct MithrilSnapshotIterator { + /// Inner Mutable Synchronous Iterator State + inner: Arc>, +} + +/// Create a probe point used in iterations to find the start when its not exactly known. +pub(crate) fn probe_point(point: &Point, distance: u64) -> Point { + // Now that we have the tip, step back about 4 block intervals from tip, and do a fuzzy + // iteration to find the exact two blocks at the end of the immutable chain. + let step_back_search = point.slot_or_default().saturating_sub(distance); + + // We stepped back to the origin, so just return Origin + if step_back_search == 0 { + return ORIGIN_POINT; + } + + // Create a fuzzy search probe by making the hash zero length. + Point::fuzzy(step_back_search) +} + +impl MithrilSnapshotIterator { + /// Try and probe to establish the iterator from the desired point. + async fn try_fuzzy_iterator( + chain: Network, path: &Path, from: &Point, search_interval: u64, + ) -> Option { + let point = probe_point(from, search_interval); + let Ok(mut iterator) = make_mithril_iterator(path, &point).await else { + return None; + }; + + let mut previous = None; + let mut this = None; + + loop { + let next = iterator.next(); + + match next { + Some(Ok(raw_block)) => { + let Ok(block) = pallas::ledger::traverse::MultiEraBlock::decode(&raw_block) + else { + return None; + }; + + let point = Point::new(block.slot(), block.hash().to_vec()); + previous = this; + this = Some(point.clone()); + + debug!("Searching for {from}. {this:?} > {previous:?}"); + + // Stop as soon as we find the point, or exceed it. + if point >= *from { + break; + } + }, + Some(Err(err)) => { + error!("Error while iterating fuzzy mithril data: {}", err); + return None; + }, + None => break, + }; + } + + debug!("Best Found for {from}. {this:?} > {previous:?}"); + + // Fail if we didn't find the destination block, or its immediate predecessor. + previous.as_ref()?; + let this = this?; + + // Remake the iterator, based on the new known point. + let Ok(iterator) = make_mithril_iterator(path, &this).await else { + return None; + }; + + Some(MithrilSnapshotIterator { + inner: Arc::new(Mutex::new(MithrilSnapshotIteratorInner { + chain, + start: this, + previous: previous?, + inner: iterator, + })), + }) + } + + /// Do a fuzzy search to establish the iterator. + /// We use this when we don't know the previous point, and need to find it. + #[allow(clippy::indexing_slicing)] + #[logcall("debug")] + async fn fuzzy_iterator(chain: Network, path: &Path, from: &Point) -> MithrilSnapshotIterator { + let mut backwards_search = BACKWARD_SEARCH_SLOT_INTERVAL; + loop { + if let Some(iterator) = + Self::try_fuzzy_iterator(chain, path, from, backwards_search).await + { + return iterator; + } + + backwards_search *= 2; + } + } + + /// Create a mithril iterator, optionally where we know the previous point. + /// + /// # Arguments + /// + /// `chain`: The blockchain network to iterate. + /// `from`: The point to start iterating from. If the `Point` does not contain a + /// hash, the iteration start is fuzzy. `previous`: The previous point we are + /// iterating, if known. If the previous is NOT known, then the first block + /// yielded by the iterator is discarded and becomes the known previous. + #[allow(clippy::indexing_slicing)] + #[logcall(ok = "debug", err = "error")] + pub(crate) async fn new( + chain: Network, path: &Path, from: &Point, previous_point: Option, + ) -> Result { + if from.is_fuzzy() || (!from.is_origin() && previous_point.is_none()) { + return Ok(Self::fuzzy_iterator(chain, path, from).await); + } + + let previous = if from.is_origin() { + ORIGIN_POINT + } else { + let Some(previous) = previous_point else { + return Err(Error::Internal); + }; + previous + }; + + debug!("Actual Mithril Iterator Start: {}", from); + + let iterator = make_mithril_iterator(path, from).await?; + + Ok(MithrilSnapshotIterator { + inner: Arc::new(Mutex::new(MithrilSnapshotIteratorInner { + chain, + start: from.clone(), + previous, + inner: iterator, + })), + }) + } + + /// Get the next block, in a way that is Async friendly. + /// Returns the next block, or None if there are no more blocks. + pub(crate) async fn next(&self) -> Option { + let inner = self.inner.clone(); + + let res = task::spawn_blocking(move || { + #[allow(clippy::unwrap_used)] // Unwrap is safe here because the lock can't be poisoned. + let mut inner_iterator = inner.lock().unwrap(); + inner_iterator.next() + }) + .await; + + match res { + Ok(res) => res, + Err(_error) => None, + } + } +} + +impl Iterator for MithrilSnapshotIteratorInner { + type Item = MultiEraBlock; + + fn next(&mut self) -> Option { + for maybe_block in self.inner.by_ref() { + if let Ok(block) = maybe_block { + if !self.previous.is_unknown() { + // We can safely fully decode this block. + match MultiEraBlock::new(self.chain, block, &self.previous, 0) { + Ok(block_data) => { + // Update the previous point + // debug!("Pre Previous update 1 : {:?}", self.previous); + self.previous = block_data.point(); + // debug!("Post Previous update 1 : {:?}", self.previous); + + // Make sure we got to the start, otherwise this could be a block + // artifact from a discover previous point + // search. + if block_data < self.start { + continue; + } + + return Some(block_data); + }, + Err(error) => { + error!(previous=%self.previous, error=%error, "Error decoding a block from the snapshot"); + break; + }, + } + } + + // We cannot fully decode this block because we don't know its previous point, + // So this MUST be the first block in iteration, so use it as the previous. + if let Ok(raw_decoded_block) = + pallas::ledger::traverse::MultiEraBlock::decode(&block) + { + // debug!("Pre Previous update 2 : {:?}", self.previous); + self.previous = + Point::new(raw_decoded_block.slot(), raw_decoded_block.hash().to_vec()); + // debug!("Post Previous update 2 : {:?}", self.previous); + continue; + } + error!("Error decoding block to use for previous from the snapshot."); + break; + } + + error!("Error while fetching a block from the snapshot"); + break; + } + + None + } +} diff --git a/hermes/crates/cardano-chain-follower/src/mithril_snapshot_sync.rs b/hermes/crates/cardano-chain-follower/src/mithril_snapshot_sync.rs new file mode 100644 index 000000000..76dfd061a --- /dev/null +++ b/hermes/crates/cardano-chain-follower/src/mithril_snapshot_sync.rs @@ -0,0 +1,812 @@ +//! Internal Mithril snapshot downloader task. +//! +//! This task is responsible for downloading Mithril snapshot files. It downloads the +//! latest snapshot file and then sleeps until the next snapshot is available. +use std::{ + path::{Path, PathBuf}, + sync::Arc, +}; + +use chrono::{TimeDelta, Utc}; +use dashmap::DashSet; +use humantime::format_duration; +use logcall::logcall; +use mithril_client::{Client, MessageBuilder, MithrilCertificate, Snapshot, SnapshotListItem}; +use tokio::{ + fs::remove_dir_all, + sync::mpsc::Sender, + time::{sleep, Duration}, +}; +use tracing::{debug, error}; +use tracing_log::log; + +use crate::{ + error::{Error, Result}, + mithril_query::get_mithril_tip_point, + mithril_snapshot_config::{MithrilSnapshotConfig, MithrilUpdateMessage}, + mithril_snapshot_data::update_latest_mithril_snapshot, + mithril_snapshot_iterator::MithrilSnapshotIterator, + mithril_turbo_downloader::MithrilTurboDownloader, + network::Network, + snapshot_id::SnapshotId, + stats::{self, mithril_sync_failure, mithril_validation_state}, + MultiEraBlock, +}; + +/// The minimum duration between checks for a new Mithril Snapshot. (Must be same as +/// `MINIMUM_MITHRIL_UPDATE_CHECK_DURATION`) +const MINIMUM_MITHRIL_UPDATE_CHECK_INTERVAL: TimeDelta = TimeDelta::minutes(10); // 10 Minutes +/// The minimum duration between checks for a new Mithril Snapshot. (Must be same as +/// `MINIMUM_MITHRIL_UPDATE_CHECK_INTERVAL`) +const MINIMUM_MITHRIL_UPDATE_CHECK_DURATION: Duration = Duration::from_secs(10 * 60); // 10 Minutes +/// Average Mithril Update is 6 Hrs, so don't wait longer than 7. +const MAXIMUM_MITHRIL_UPDATE_CHECK_INTERVAL: TimeDelta = TimeDelta::hours(7); // 7 Hours +/// Average Mithril Update is 6 Hrs, so don't wait longer than 7. +const EXPECTED_MITHRIL_UPDATE_CHECK_INTERVAL: TimeDelta = TimeDelta::hours(6); // 6 Hours +/// We shouldn't get errors that need to wait for this, but if we do wait this long. +/// These errors should be transient if they occur. +const DOWNLOAD_ERROR_RETRY_DURATION: Duration = Duration::from_secs(2 * 60); // 2 Minutes + +/// Returns the Latest and chronologically previous snapshots data from the Aggregator. +/// Will return None if it can not get the Snapshot list, or there are no entries in it. +/// If there is only a single entry then the latest and chronologically next will be +/// identical. +async fn get_latest_snapshots( + client: &Client, network: Network, +) -> Option<(SnapshotListItem, SnapshotListItem)> { + // Get current latest snapshot from the aggregator + let snapshots = match client.snapshot().list().await { + Ok(s) => s, + Err(e) => { + error!("Unexpected Error [{}]: Unable to get Snapshot List from Aggregator for {}. Mithril Snapshots can not update. Sleeping.", network, e); + return None; + }, + }; + + // Get the current latest snapshot. + let Some(latest_snapshot) = snapshots.first() else { + error!("Unexpected Error: Empty Snapshot List from Aggregator for {}. Mithril Snapshots can not update. Sleeping", network); + return None; + }; + + let chronologically_previous = snapshots.get(1).unwrap_or(latest_snapshot); + + Some((latest_snapshot.clone(), chronologically_previous.clone())) +} + +/// Given a particular snapshot ID, find the Actual Snapshot for it. +async fn get_snapshot_by_id( + client: &Client, network: Network, snapshot_id: &SnapshotId, +) -> Option { + let snapshots = match client.snapshot().list().await { + Ok(s) => s, + Err(e) => { + error!("Unexpected Error [{}]: Unable to get Snapshot List from Aggregator for {}. Mithril Snapshots can not update. Sleeping.", network, e); + return None; + }, + }; + + // Try and find the current snapshot in the list of available snapshots. + for snapshot in snapshots { + // debug!("Checking Snapshot : {:?}", snapshot); + if *snapshot_id == snapshot.beacon.immutable_file_number { + return Some(snapshot); + } + } + + None +} + +/// Create a client, should never fail, but return None if it does, because we can't +/// continue. +fn create_client(cfg: &MithrilSnapshotConfig) -> Option<(Client, Arc)> { + let downloader = Arc::new(MithrilTurboDownloader::new(cfg.clone())); + + // This can't fail, because we already tested it works. But just in case... + let client = match mithril_client::ClientBuilder::aggregator( + &cfg.aggregator_url, + &cfg.genesis_key, + ) + //.add_feedback_receiver(receiver) + .with_snapshot_downloader(downloader.clone()) + .build() + { + Ok(c) => c, + Err(err) => { + error!(chain=cfg.chain.to_string(),"Unexpected Error [{}]: Unable to create Mithril Client. Mithril Snapshots can not update.", err); + return None; + }, + }; + + Some((client, downloader)) +} + +/// Calculate how long we should wait before we check for another Mithril snapshot. +fn calculate_sleep_duration( + latest_snapshot: &SnapshotListItem, previous_snapshot: &SnapshotListItem, +) -> Duration { + // All times are relative to UTC. + let now = Utc::now(); + let mut next_sleep = MINIMUM_MITHRIL_UPDATE_CHECK_INTERVAL; + + // How long between snapshots, + let mut snapshot_interval = (latest_snapshot.created_at - previous_snapshot.created_at) + .max(MAXIMUM_MITHRIL_UPDATE_CHECK_INTERVAL); + + // We should never be negative, but we CAN be zero if there was no chronologically + // previous snapshot. In this case GUESS how long the interval should be based on + // experience. + if snapshot_interval <= TimeDelta::seconds(0) { + snapshot_interval = EXPECTED_MITHRIL_UPDATE_CHECK_INTERVAL; + } + + let next_expected_snapshot = latest_snapshot.created_at + snapshot_interval; + + if next_expected_snapshot > now { + // We are behind schedule. Sleep until the next expected snapshot should be published. + next_sleep = next_expected_snapshot - now; + } + + next_sleep + .to_std() + .unwrap_or(MINIMUM_MITHRIL_UPDATE_CHECK_DURATION) +} + +/// Get the actual snapshot from the specified `snapshot_item` from the list of snapshots. +/// Returns None if there are any issues doing this, otherwise the Snapshot. +/// The only issues should be transient communications errors. +async fn get_snapshot( + client: &Client, snapshot_item: &SnapshotListItem, network: Network, +) -> Option { + let latest_digest = snapshot_item.digest.as_ref(); + let snapshot = match client.snapshot().get(latest_digest).await { + Ok(snapshot) => { + if let Some(snapshot) = snapshot { + snapshot + } else { + // Some kind of communications error has ocurred. + error!("No snapshot returned for {} ???", network); + return None; + } + }, + Err(err) => { + // Some kind of communications error has ocurred. + error!( + "Failure to get the latest snapshot for {} with error: {}", + network, err + ); + return None; + }, + }; + + Some(snapshot) +} + +/// Download and Verify the Snapshots certificate +async fn download_and_verify_snapshot_certificate( + client: &Client, snapshot: &Snapshot, network: Network, +) -> Option { + let certificate = match client + .certificate() + .verify_chain(&snapshot.certificate_hash) + .await + { + Ok(certificate) => certificate, + Err(err) => { + // The certificate is invalid. + error!("The certificate for {} is invalid: {}", network, err); + return None; + }, + }; + + Some(certificate) +} + +/// This function creates a client based on the given network and genesis vkey. +/// +/// # Arguments +/// +/// * `network` - The network type for the client to connect to. +/// * `aggregator_url` - A reference to the URL of an aggregator that can be used to +/// create the client. +/// * `genesis_vkey` - The genesis verification key, which is needed to authenticate with +/// the server. +/// +/// # Returns +/// +/// This function returns a `Client` object if it successfully connects to the specified +/// URL and creates a client. If it fails, it waits for `DOWNLOAD_ERROR_RETRY_DURATION` +/// before attempting again. This never times out, as we can not attempt this if the +/// aggregator was not contactable when the parameters were defined. +async fn connect_client(cfg: &MithrilSnapshotConfig) -> (Client, Arc) { + // Note: we pre-validated connection before we ran, so failure here should be transient. + // Just wait if we fail, and try again later. + loop { + if let Some(client) = create_client(cfg) { + return client; + } + + // If we couldn't create a client, then we don' t need to do anything. + // Error already logged in create_client, no need to print anything here. + sleep(DOWNLOAD_ERROR_RETRY_DURATION).await; + } +} + +/// Relative Directory for Immutable data within a full mithril snapshot. +pub(crate) const MITHRIL_IMMUTABLE_SUB_DIRECTORY: &str = "immutable"; + +/// Get the tip block from the Immutable chain, and the block immediately proceeding it. +/// +/// # Arguments +/// +/// * `path` - The path where the immutable chain is stored. +/// +/// # Returns +/// +/// This function returns the tip block point, and the block point immediately proceeding +/// it in a tuple. +#[allow(clippy::indexing_slicing)] +#[logcall(ok = "debug", err = "error")] +pub(crate) async fn get_mithril_tip(chain: Network, path: &Path) -> Result { + let mut path = path.to_path_buf(); + path.push(MITHRIL_IMMUTABLE_SUB_DIRECTORY); + + debug!( + "Calculating TIP from Immutable storage @ {}", + path.to_string_lossy() + ); + + // Read the Tip (fuzzy), and if we don't get one, or we error, its an error. + // has to be Fuzzy, because we intend to iterate and don't know the previous. + // Nor is there a subsequent block we can use as next. + let tip = get_mithril_tip_point(&path).await?.as_fuzzy(); + debug!("Mithril Tip: {tip}"); + + // Decode and read the tip from the Immutable chain. + let tip_iterator = MithrilSnapshotIterator::new(chain, &path, &tip, None).await?; + let Some(tip_block) = tip_iterator.next().await else { + error!("Failed to fetch the TIP block from the immutable chain."); + + return Err(Error::MithrilSnapshot(None)); + }; + + // Yay, we got a tip, so return it. + Ok(tip_block) +} + +/// Get the Snapshot Data itself from the Aggregator, and a validate Certificate. +async fn get_mithril_snapshot_and_certificate( + chain: Network, client: &Client, item: &SnapshotListItem, +) -> Option<(Snapshot, MithrilCertificate)> { + debug!("Mithril Snapshot background updater for: {chain} : Download snapshot from aggregator."); + + // Download the snapshot from the aggregator. + let Some(snapshot) = get_snapshot(client, item, chain).await else { + // If we couldn't get the snapshot then we don't need to do anything else, transient + // error. + return None; + }; + + debug!("Mithril Snapshot background updater for: {chain} : Download/Verify certificate."); + + // Download and Verify the certificate. + let certificate = download_and_verify_snapshot_certificate(client, &snapshot, chain).await?; + + Some((snapshot, certificate)) +} + +/// Validate that a Mithril Snapshot downloaded matches its certificate. +async fn validate_mithril_snapshot( + chain: Network, certificate: &MithrilCertificate, path: &Path, +) -> bool { + let cert = certificate.clone(); + let mithril_path = path.to_path_buf(); + match tokio::spawn(async move { + // This can be long running and CPU Intensive. + // So we spawn it off to a background task. + MessageBuilder::new() + .compute_snapshot_message(&cert, &mithril_path) + .await + }) + .await + { + Ok(Ok(result)) => { + if certificate.match_message(&result) { + true + } else { + // If we couldn't match then assume its a transient error. + error!("Failed to Match Certificate and Computed Snapshot Message for {chain}!"); + false + } + }, + Ok(Err(error)) => { + // If we got an error then it must be false. + error!("Failed to Compute Snapshot Message: {error}"); + false + }, + Err(error) => { + error!("Snapshot Certificate computation failed: {error}"); + false + }, + } +} + +/// See if we have a latest snapshot already, and if so, validate it. +/// +/// For a existing mithril snapshot to be valid it has to be: +/// 1. The actual latest mithril snapshot; AND +/// 2. It must +async fn get_latest_validated_mithril_snapshot( + chain: Network, client: &Client, cfg: &MithrilSnapshotConfig, +) -> Option { + /// Purge a bad mithril snapshot from disk. + async fn purge_bad_mithril_snapshot(chain: Network, latest_mithril: &SnapshotId) { + debug!("Purging Bad Mithril Snapshot: {latest_mithril}"); + if let Err(error) = remove_dir_all(&latest_mithril).await { + // This should NOT happen because we already checked the Mithril path is fully writable. + error!("Mithril Snapshot background updater for: {chain}: Failed to remove old snapshot {latest_mithril}: {error}"); + } + } + + // Check if we already have a Mithril snapshot downloaded, and IF we do validate it is + // intact. + let latest_mithril = cfg.recover_latest_snapshot_id().await?; + + debug!("Latest Recovered Mithril ID = {latest_mithril}"); + + // Get the actual latest snapshot, shouldn't fail, but say the current is invalid if it + // does. + let (actual_latest, _) = get_latest_snapshots(client, chain).await?; + + // IF the mithril data we have is NOT the current latest (or the immediately previous), it + // may as well be invalid. + if latest_mithril < actual_latest.beacon.immutable_file_number - 1 { + return None; + } + + let Some(snapshot) = get_snapshot_by_id(client, chain, &latest_mithril).await else { + // We have a latest snapshot, but the Aggregator does not know it. + error!("Mithril Snapshot background updater for: {chain}: Latest snapshot {latest_mithril} does not exist on the Aggregator."); + purge_bad_mithril_snapshot(chain, &latest_mithril).await; + return None; + }; + + // Download the snapshot/certificate from the aggregator. + let Some((_, certificate)) = + get_mithril_snapshot_and_certificate(chain, client, &snapshot).await + else { + error!("Mithril Snapshot : Failed to get Snapshot and certificate (Transient Error)."); + + // If we couldn't get the snapshot then we don't need to do anything else, transient + // error. + // purge_bad_mithril_snapshot(chain, &latest_mithril).await; + return None; + }; + + let path = latest_mithril.as_ref(); + let valid = validate_mithril_snapshot(chain, &certificate, path).await; + + if !valid { + error!("Mithril Snapshot : Snapshot fails to validate, can not be recovered."); + purge_bad_mithril_snapshot(chain, &latest_mithril).await; + return None; + } + + Some(latest_mithril) +} + +/// Get the Mithril client and recover our existing mithril snapshot data, if any. +async fn recover_existing_snapshot( + cfg: &MithrilSnapshotConfig, tx: &Sender, +) -> Option { + // This is a Mithril Validation, so record it. + mithril_validation_state(cfg.chain, stats::MithrilValidationState::Start); + + // Note: we pre-validated connection before we ran, so failure here should be transient. + // Just wait if we fail, and try again later. + let (client, downloader) = connect_client(cfg).await; + + debug!( + "Mithril Snapshot background updater for: {} : Client connected.", + cfg.chain + ); + + let mut current_snapshot = None; + + // Check if we already have a Mithril snapshot downloaded, and IF we do validate it is + // intact. + if let Some(active_snapshot) = + get_latest_validated_mithril_snapshot(cfg.chain, &client, cfg).await + { + // Read the actual TIP block from the Mithril chain. + match get_mithril_tip(cfg.chain, &active_snapshot.path()).await { + Ok(tip_block) => { + // Validate the Snapshot ID matches the true TIP. + if active_snapshot.tip() == tip_block.point() { + current_snapshot = Some(active_snapshot.clone()); + update_latest_mithril_snapshot(cfg.chain, active_snapshot); + + // Tell the live sync service the current Mithril TIP. + let update = MithrilUpdateMessage { + tip: tip_block.point(), + previous: tip_block.previous(), + }; + if let Err(error) = tx.send(update).await { + error!( + "Failed to send new tip to the live updater for: {}: {error}", + cfg.chain + ); + }; + } else { + error!( + "Actual Tip Block and Active SnapshotID Point Mismatch. {:?} != {:?}", + active_snapshot.tip(), + tip_block.point() + ); + } + }, + Err(error) => { + error!("Mithril snapshot validation failed for: {}. Could not read the TIP Block : {}.", cfg.chain, error); + }, + } + } else { + debug!("No latest validated snapshot for: {}", cfg.chain); + } + + if current_snapshot.is_none() { + mithril_validation_state(cfg.chain, stats::MithrilValidationState::Failed); + } else { + mithril_validation_state(cfg.chain, stats::MithrilValidationState::Finish); + } + + // Explicitly free the resources claimed by the Mithril Client and Downloader. + drop(client); + drop(downloader); + + current_snapshot +} + +/// Status of checking if we have a new snapshot to get or not. +enum SnapshotStatus { + /// No update, sleep for this long before checking again + Sleep(Duration), + /// Snapshot has updated, here are the details. + Updated((Snapshot, MithrilCertificate)), +} + +/// Check if we have a new snapshot to download, and if so, return its details. +async fn check_snapshot_to_download( + chain: Network, client: &Client, current_snapshot: &Option, +) -> SnapshotStatus { + debug!("Mithril Snapshot background updater for: {chain} : Getting Latest Snapshot."); + + // This should only fail if the Aggregator is offline. + // Because we check we can talk to the aggregator before we create the downloader task. + let Some((latest_snapshot, chronologically_previous_snapshot)) = + get_latest_snapshots(client, chain).await + else { + return SnapshotStatus::Sleep(DOWNLOAD_ERROR_RETRY_DURATION); + }; + + debug!("Mithril Snapshot background updater for: {chain} : Checking if we are up-to-date {current_snapshot:?}."); + + // Check if the latest snapshot is different from our actual previous one. + if let Some(current_mithril_snapshot) = ¤t_snapshot { + let latest_immutable_file_number = latest_snapshot.beacon.immutable_file_number; + debug!("We have a current snapshot: {current_mithril_snapshot} == {latest_immutable_file_number} ??"); + if *current_mithril_snapshot == latest_immutable_file_number { + debug!("Current Snapshot and latest are the same, so wait for it to likely to have changed."); + let next_sleep = + calculate_sleep_duration(&latest_snapshot, &chronologically_previous_snapshot); + return SnapshotStatus::Sleep(next_sleep); + } + } + + // Download the snapshot/certificate from the aggregator. + let Some((snapshot, certificate)) = + get_mithril_snapshot_and_certificate(chain, client, &latest_snapshot).await + else { + // If we couldn't get the snapshot then we don't need to do anything else, transient + // error. + debug!("Failed to retrieve the snapshot and certificate from aggregator."); + return SnapshotStatus::Sleep(DOWNLOAD_ERROR_RETRY_DURATION); + }; + + SnapshotStatus::Updated((snapshot, certificate)) +} + +/// Start Mithril Validation in the background, and return a handle so we can check when +/// it finishes. +fn background_validate_mithril_snapshot( + chain: Network, certificate: MithrilCertificate, tmp_path: PathBuf, +) -> tokio::task::JoinHandle { + tokio::spawn(async move { + debug!( + "Mithril Snapshot background updater for: {} : Check Certificate.", + chain + ); + + stats::mithril_validation_state(chain, stats::MithrilValidationState::Start); + + if !validate_mithril_snapshot(chain, &certificate, &tmp_path).await { + stats::mithril_validation_state(chain, stats::MithrilValidationState::Failed); + // If we couldn't build the message then assume its a transient error. + error!( + chain = %chain, + "Failed to Compute Snapshot Message" + ); + return false; + } + stats::mithril_validation_state(chain, stats::MithrilValidationState::Finish); + + debug!( + "Mithril Snapshot background updater for: {} : Certificate Validated OK.", + chain + ); + + true + }) +} + +/// Convert a chunk filename into its numeric equivalent. +fn chunk_filename_to_chunk_number(chunk: &Path) -> Option { + if let Some(stem) = chunk.file_stem().map(Path::new) { + if let Some(base) = stem.file_name().map(|s| s.to_string_lossy().to_string()) { + if let Ok(num) = base.parse::() { + return Some(num); + } + } + } + None +} + +/// Remove any chunks from the chunk list which exceed the `max_chunk`. +fn trim_chunk_list(chunk_list: &Arc>, max_chunks: u64) { + chunk_list.retain(|entry| { + if let Some(chunk_index) = chunk_filename_to_chunk_number(entry) { + if chunk_index > max_chunks { + debug!("Removing Non immutable Chunk: {:?}", entry); + false + } else { + true + } + } else { + // Huh, not a valid filename, so purge it. + error!("Found an invalid chunk name: {:?}", entry); + false + } + }); +} + +/// Downloads and validates a snapshot from the aggregator. +async fn download_and_validate_snapshot( + client: &Client, downloader: Arc, cfg: &MithrilSnapshotConfig, + snapshot: &Snapshot, certificate: MithrilCertificate, +) -> bool { + debug!( + "Mithril Snapshot background updater for: {} : Download and unpack the Mithril snapshot.", + cfg.chain + ); + + // Download and unpack the actual snapshot archive. + if let Err(error) = client + .snapshot() + .download_unpack(snapshot, &cfg.tmp_path()) + .await + { + // If we couldn't download and unpack, assume its a transient error, + error!("Failed to Download and Unpack snapshot: {error}"); + return false; + } + + debug!( + "Mithril Snapshot background updater for: {} : Add statistics for download.", + cfg.chain + ); + + if let Err(error) = client.snapshot().add_statistics(snapshot).await { + // Just log not fatal to anything. + error!( + "Could not increment snapshot download statistics for {}: {error}", + cfg.chain + ); + // We can process the download even after this fails. + } + + debug!( + "Mithril Snapshot background updater for: {} : Index and Check Certificate.", + cfg.chain + ); + + let chunk_list = downloader.get_new_chunks(); + // Remove the last chunks from the list, if they are > the max_chunk thats immutable. + let max_chunk = snapshot.beacon.immutable_file_number; + trim_chunk_list(&chunk_list, max_chunk); + + let validate_handle = + background_validate_mithril_snapshot(cfg.chain, certificate, cfg.tmp_path()); + + if !validate_handle.await.unwrap_or(false) { + error!("Failed to validate for {}", cfg.chain); + return false; + } + + true +} + +/// We can accumulate junk depending on errors or when we terminate, make sure we are +/// always clean. +async fn cleanup(cfg: &MithrilSnapshotConfig) { + if let Err(error) = cfg.cleanup().await { + error!( + "Mithril Snapshot background updater for: {} : Error cleaning up: {:?}", + cfg.chain, error + ); + } +} + +/// Sleep until its likely there has been another mithril snapshot update. +async fn sleep_until_next_probable_update( + cfg: &MithrilSnapshotConfig, next_sleep: &Duration, +) -> Duration { + debug!( + "Mithril Snapshot background updater for: {} : Sleeping for {}.", + cfg.chain, + format_duration(*next_sleep) + ); + // Wait until its likely we have a new snapshot ready to download. + sleep(*next_sleep).await; + + // Default sleep if we end up back at the top of this loop because of an error. + DOWNLOAD_ERROR_RETRY_DURATION +} + +/// Cleanup the client explicitly and do a new iteration of the loop. +macro_rules! next_iteration { + ($client:ident, $downloader:ident) => { + drop($client); + drop($downloader); + + continue; + }; +} + +/// Handle the background downloading of Mithril snapshots for a given network. +/// Note: There can ONLY be at most three of these running at any one time. +/// This is because there can ONLY be one snapshot for each of the three known Cardano +/// networks. +/// # Arguments +/// +/// * `network` - The network type for the client to connect to. +/// * `aggregator_url` - A reference to the URL of an aggregator that can be used to +/// create the client. +/// * `genesis_vkey` - The genesis verification key, which is needed to authenticate with +/// the server. +/// +/// # Returns +/// +/// This does not return, it is a background task. +#[allow(clippy::too_many_lines)] +pub(crate) async fn background_mithril_update( + cfg: MithrilSnapshotConfig, tx: Sender, +) { + debug!( + "Mithril Snapshot background updater for: {} from {} to {} : Starting", + cfg.chain, + cfg.aggregator_url, + cfg.path.to_string_lossy() + ); + let mut next_sleep = Duration::from_secs(0); + + let mut current_snapshot = recover_existing_snapshot(&cfg, &tx).await; + + loop { + debug!("Background Mithril Updater - New Loop"); + + cleanup(&cfg).await; + + next_sleep = sleep_until_next_probable_update(&cfg, &next_sleep).await; + + let (client, downloader) = connect_client(&cfg).await; + + let (snapshot, certificate) = + match check_snapshot_to_download(cfg.chain, &client, ¤t_snapshot).await { + SnapshotStatus::Sleep(sleep) => { + next_sleep = sleep; + next_iteration!(client, downloader); + }, + SnapshotStatus::Updated(update) => update, + }; + + if !download_and_validate_snapshot( + &client, + downloader.clone(), + &cfg, + &snapshot, + certificate, + ) + .await + { + error!("Failed to Download or Validate a snapshot."); + mithril_sync_failure(cfg.chain, stats::MithrilSyncFailures::DownloadOrValidation); + + next_iteration!(client, downloader); + } + + // Download was A-OK - Update the new immutable tip. + let tip = match get_mithril_tip(cfg.chain, &cfg.tmp_path()).await { + Ok(tip) => tip, + Err(error) => { + // If we couldn't get the tip then assume its a transient error. + error!( + "Failed to Get Tip from Snapshot for {}: {error}", + cfg.chain + ); + mithril_sync_failure(cfg.chain, stats::MithrilSyncFailures::FailedToGetTip); + + next_iteration!(client, downloader); + }, + }; + + debug!("New Immutable TIP = {}", tip); + + // Check that the new tip is more advanced than the OLD tip. + if let Some(active_snapshot) = current_snapshot.clone() { + if tip <= active_snapshot.tip() { + error!( + "New Tip is not more advanced than the old tip for: {}", + cfg.chain + ); + mithril_sync_failure(cfg.chain, stats::MithrilSyncFailures::TipDidNotAdvance); + next_iteration!(client, downloader); + } + } + + // Got a good new tip, so switch to the new mithril image. + match cfg.activate(snapshot.beacon.immutable_file_number).await { + Ok(new_path) => { + debug!( + "Mithril Snapshot background updater for: {} : Updated TIP.", + cfg.chain + ); + current_snapshot = SnapshotId::new(&new_path, tip.point()); + + if let Some(latest_snapshot) = current_snapshot.clone() { + // Update the latest snapshot data record + update_latest_mithril_snapshot(cfg.chain, latest_snapshot); + + // Tell the live updater that the Immutable TIP has updated. + if let Err(error) = tx + .send(MithrilUpdateMessage { + tip: tip.point(), + previous: tip.previous(), + }) + .await + { + error!( + "Failed to send new tip to the live updater for: {}: {error}", + cfg.chain + ); + mithril_sync_failure( + cfg.chain, + stats::MithrilSyncFailures::TipFailedToSendToUpdater, + ); + next_iteration!(client, downloader); + }; + } + }, + Err(err) => { + error!( + chain = cfg.chain.to_string(), + "Failed to activate new snapshot : {err}" + ); + mithril_sync_failure( + cfg.chain, + stats::MithrilSyncFailures::FailedToActivateNewSnapshot, + ); + next_iteration!(client, downloader); + }, + } + next_iteration!(client, downloader); + } +} diff --git a/hermes/crates/cardano-chain-follower/src/mithril_turbo_downloader.rs b/hermes/crates/cardano-chain-follower/src/mithril_turbo_downloader.rs new file mode 100644 index 000000000..86927e4ff --- /dev/null +++ b/hermes/crates/cardano-chain-follower/src/mithril_turbo_downloader.rs @@ -0,0 +1,397 @@ +//! Turbo Downloads for Mithril Snapshots. + +use std::{ + cmp, + ffi::OsStr, + io::{BufReader, Read}, + path::{Path, PathBuf}, + // process::Stdio, + sync::{ + atomic::{AtomicU64, Ordering}, + Arc, OnceLock, + }, +}; + +use anyhow::{anyhow, bail}; +// use async_compression::tokio::bufread::ZstdDecoder; +use async_trait::async_trait; +use dashmap::DashSet; +use fmmap::{ + // tokio::{AsyncMmapFile, AsyncMmapFileExt, AsyncOptions}, + MmapFileExt, +}; +use memx::memcmp; +use mithril_client::{ + common::CompressionAlgorithm, snapshot_downloader::SnapshotDownloader, MithrilResult, +}; +use tar::{Archive, EntryType}; +use tokio::{ + // fs::{create_dir_all, symlink}, + fs::create_dir_all, + // process::Command, + // sync::mpsc::{self, UnboundedSender}, + task::spawn_blocking, + // task::{spawn_blocking, JoinHandle}, +}; +// use tokio_stream::StreamExt; +// use tokio_util::codec::{FramedRead, LinesCodec}; +use tracing::{debug, error}; +use zstd::Decoder; + +use crate::{ + mithril_snapshot_config::MithrilSnapshotConfig, + mithril_snapshot_data::latest_mithril_snapshot_data, + stats::{self}, + turbo_downloader::ParallelDownloadProcessor, + utils::usize_from_saturating, +}; + +/// A snapshot downloader that accelerates Download using `aria2`. +pub struct Inner { + /// Configuration for the snapshot sync. + cfg: MithrilSnapshotConfig, + /// Last hashmap/list of changed chunks from the previous download + new_chunks: Arc>, + + /// The number of files that were new in this download. + new_files: AtomicU64, + /// The number of files that changed in this download. + chg_files: AtomicU64, + /// The total number of files in the download. + tot_files: AtomicU64, + /// The total size of the files extracted in the download. + ext_size: AtomicU64, + /// The total size of the files we deduplicated. + ddup_size: AtomicU64, + + /// The download processor for the current file download. + dl_handler: std::sync::OnceLock, +} + +/// This macro is what happens every time the file is different from previous. +macro_rules! changed_file { + ($self:ident, $rel_file:ident, $abs_file:ident, $new_size:ident) => { + $self.chg_files.fetch_add(1, Ordering::SeqCst); + if $abs_file.extension() == Some(OsStr::new("chunk")) { + $self.new_chunks.insert($abs_file); + } + }; +} + +/// This macro is what happens every time we decide the file can't be deduplicated. +macro_rules! new_file { + ($self:ident, $rel_file:ident, $abs_file:ident, $new_size:ident) => { + $self.new_files.fetch_add(1, Ordering::SeqCst); + if $abs_file.extension() == Some(OsStr::new("chunk")) { + $self.new_chunks.insert($abs_file); + } + }; +} + +impl Inner { + /// Synchronous Download and Dedup archive. + /// + /// Stream Downloads and Decompresses files, and deduplicates them as they are + /// extracted from the embedded tar archive. + /// + /// Per Entry: + /// If the file is NOT to be deduplicated, OR A previous file with the same name and + /// size does not exist, then just extract it where its supposed to go. + /// + /// To Dedup, the original file is mam-mapped. + /// The new file is extracted to an in-memory buffer. + /// If they compare the same, the original file is `HardLinked` to the new file name. + /// Otherwise the new file buffer is saved to disk with the new file name. + fn dl_and_dedup(&self, _location: &str, _target_dir: &Path) -> MithrilResult<()> { + let mut archive = self.create_archive_extractor()?; + + // Iterate the files in the archive. + let entries = match archive.entries() { + Ok(entries) => entries, + Err(error) => bail!("Failed to get entries from the archive: {error}"), + }; + + let tmp_dir = self.cfg.tmp_path(); + let latest_snapshot = latest_mithril_snapshot_data(self.cfg.chain); + + for entry in entries { + let mut entry = match entry { + Ok(entry) => entry, + Err(error) => bail!("Failed to get an entry from the archive: {error}"), + }; + let rel_file = entry.path()?.to_path_buf(); + let entry_size = entry.size(); + + // debug!(chain = %self.cfg.chain, "DeDup : Extracting {}:{} loc {location} target {}", + // rel_file.to_string_lossy(), entry_size, target_dir.to_string_lossy()); + + // Check if we need to extract this path or not. + if !self.check_for_extract(&rel_file, entry.header().entry_type()) { + continue; + } + + // Count total files processed. + self.tot_files.fetch_add(1, Ordering::SeqCst); + + let mut abs_file = tmp_dir.clone(); + abs_file.push(rel_file.clone()); + + let mut prev_file = latest_snapshot.id().path_if_exists(); + if let Some(prev_file) = &mut prev_file { + prev_file.push(rel_file.clone()); + } + + // debug!(chain = %self.cfg.chain, "DeDup : tmp_dir {} abs_file {} prev_file + // {prev_file:?}", tmp_dir.to_string_lossy(), abs_file.to_string_lossy() ); + + self.ext_size.fetch_add(entry_size, Ordering::SeqCst); + + // Try and deduplicate the file if we can, otherwise just extract it. + if let Ok((prev_mmap, _)) = Self::can_deduplicate(&rel_file, entry_size, &prev_file) { + let expected_file_size = usize_from_saturating(entry_size); + let mut buf: Vec = Vec::with_capacity(expected_file_size); + if entry.read_to_end(&mut buf)? != expected_file_size { + bail!( + "Failed to read file {} of size {} got {}", + rel_file.display(), + entry_size, + buf.len() + ); + } + // Got the full file and its the expected size. Is it different? + if memcmp(prev_mmap.as_slice(), buf.as_slice()) == cmp::Ordering::Equal { + // Same so lets Hardlink it, and throw away the temp buffer. + + // Make sure our big mmap get dropped. + drop(prev_mmap); + + // File is the same, so dedup it. + if self.cfg.dedup_tmp(&abs_file, &latest_snapshot).is_ok() { + self.ddup_size.fetch_add(entry_size, Ordering::SeqCst); + changed_file!(self, rel_file, abs_file, entry_size); + drop(buf); + continue; + } + } + + if let Err(error) = std::fs::write(&abs_file, buf) { + error!(chain = %self.cfg.chain, "Failed to write file {} got {}", abs_file.display(), error); + bail!("Failed to write file {} got {}", abs_file.display(), error); + } + } else { + // No dedup, just extract it into the tmp directory as-is. + entry.unpack_in(&tmp_dir)?; + debug!(chain = %self.cfg.chain, "DeDup: Extracted file {rel_file:?}:{entry_size}"); + } + new_file!(self, rel_file, abs_file, entry_size); + } + + let Some(dl_handler) = self.dl_handler.get() else { + bail!("Failed to get the Parallel Download processor!"); + }; + + debug!(chain = %self.cfg.chain, "Download {} bytes", dl_handler.dl_size()); + + stats::mithril_dl_finished(self.cfg.chain, Some(dl_handler.dl_size())); + + Ok(()) + } + + /// Create a TAR archive extractor from the downloading file and a zstd decompressor. + fn create_archive_extractor( + &self, + ) -> MithrilResult>>>> + { + let Some(dl_handler) = self.dl_handler.get() else { + bail!("Failed to get the Parallel Download processor!"); + }; + let buf_reader = BufReader::new(dl_handler.clone()); + let decoder = match zstd::Decoder::new(buf_reader) { + Ok(decoder) => decoder, + Err(error) => bail!("Failed to create ZSTD decoder: {error}"), + }; + Ok(tar::Archive::new(decoder)) + } + + /// Check if we are supposed to extract this file from the archive or not. + fn check_for_extract(&self, path: &Path, etype: EntryType) -> bool { + if path.is_absolute() { + error!(chain = %self.cfg.chain, "DeDup : Cannot extract an absolute path: {:?}", path); + return false; + } + + if etype.is_dir() { + // We don't do anything with just a path, so skip it. + return false; + } + + if !etype.is_file() { + error!(chain = %self.cfg.chain, "DeDup : Cannot extract a non-file: {:?}:{:?}", path, etype); + return false; + } + + true + } + + /// Check if a given path from the archive is able to be deduplicated. + fn can_deduplicate( + rel_file: &Path, file_size: u64, prev_file: &Option, + ) -> MithrilResult<(fmmap::MmapFile, u64)> { + // Can't dedup if the current file is not de-dupable (must be immutable) + if rel_file.starts_with("immutable") { + // Can't dedup if we don't have a previous file to dedup against. + if let Some(prev_file) = prev_file { + if let Some(current_size) = get_file_size_sync(prev_file) { + // If the current file is not exactly the same as the previous file size, we + // can't dedup. + if file_size == current_size { + if let Ok(pref_file_loaded) = mmap_open_sync(prev_file) { + if pref_file_loaded.1 == file_size { + return Ok(pref_file_loaded); + } + } + } + } + } + } + bail!("Can not deduplicate."); + } +} + +/// A snapshot downloader that accelerates Download using `aria2`. +pub struct MithrilTurboDownloader { + /// inner arc wrapped configuration + inner: Arc, +} + +impl MithrilTurboDownloader { + /// Constructs a new `HttpSnapshotDownloader`. + pub fn new(cfg: MithrilSnapshotConfig) -> Self { + // Test if the HTTP Client can properly be created. + let dl_config = cfg.dl_config.clone().unwrap_or_default(); + + let cfg = cfg.with_dl_config(dl_config); + + Self { + inner: Arc::new(Inner { + cfg, + new_chunks: Arc::new(DashSet::new()), + new_files: AtomicU64::new(0), + chg_files: AtomicU64::new(0), + tot_files: AtomicU64::new(0), + ext_size: AtomicU64::new(0), + ddup_size: AtomicU64::new(0), + dl_handler: OnceLock::new(), + }), + } + } + + /// Take the hashmap for the previous download. + pub fn get_new_chunks(&self) -> Arc> { + self.inner.new_chunks.clone() + } + + /// Create directories required to exist for download to succeed. + async fn create_directories(&self, target_dir: &Path) -> MithrilResult<()> { + if let Err(error) = create_dir_all(target_dir).await { + let msg = format!( + "Target directory {} could not be created: {}", + target_dir.to_string_lossy(), + error + ); + Err(anyhow!(msg.clone()).context(msg))?; + } + + Ok(()) + } + + /// Parallel Download, Extract and Dedup the Mithril Archive. + async fn dl_and_dedup(&self, location: &str, target_dir: &Path) -> MithrilResult<()> { + // Get a copy of the inner data to use in the sync download task. + let inner = self.inner.clone(); + let location = location.to_owned(); + let target_dir = target_dir.to_owned(); + + // This is fully synchronous IO, so do it on a sync thread. + let result = spawn_blocking(move || inner.dl_and_dedup(&location, &target_dir)).await; + + if let Ok(result) = result { + return result; + } + + stats::mithril_dl_finished(self.inner.cfg.chain, None); + bail!("Download and Dedup task failed"); + } +} + +/// Get the size of a particular file. None = failed to get size (doesn't matter why). +fn get_file_size_sync(file: &Path) -> Option { + let Ok(metadata) = file.metadata() else { + return None; + }; + Some(metadata.len()) +} + +/// Open a file using mmap for performance. +fn mmap_open_sync(path: &Path) -> MithrilResult<(fmmap::MmapFile, u64)> { + match fmmap::MmapFile::open_with_options(path, fmmap::Options::new().read(true).populate()) { + Ok(file) => { + let len = file.len() as u64; + Ok((file, len)) + }, + Err(error) => { + error!(error=%error, file=%path.to_string_lossy(), "Failed to open file"); + Err(error.into()) + }, + } +} + +#[async_trait] +impl SnapshotDownloader for MithrilTurboDownloader { + async fn download_unpack( + &self, location: &str, target_dir: &Path, _compression_algorithm: CompressionAlgorithm, + _download_id: &str, _snapshot_size: u64, + ) -> MithrilResult<()> { + self.create_directories(target_dir).await?; + + // DL Start stats set after DL actually started inside the probe call. + self.dl_and_dedup(location, target_dir).await?; + + let tot_files = self.inner.tot_files.load(Ordering::SeqCst); + let chg_files = self.inner.chg_files.load(Ordering::SeqCst); + let new_files = self.inner.new_files.load(Ordering::SeqCst); + + stats::mithril_extract_finished( + self.inner.cfg.chain, + Some(self.inner.ext_size.load(Ordering::SeqCst)), + self.inner.ddup_size.load(Ordering::SeqCst), + tot_files - (chg_files + new_files), + chg_files, + new_files, + ); + + debug!("Download and Unpack finished='{location}' to '{target_dir:?}'."); + + Ok(()) + } + + async fn probe(&self, location: &str) -> MithrilResult<()> { + debug!("Probe Snapshot location='{location}'."); + + let dl_config = self.inner.cfg.dl_config.clone().unwrap_or_default(); + let dl_processor = ParallelDownloadProcessor::new(location, dl_config).await?; + + // Decompress and extract and de-dupe each file in the archive. + stats::mithril_extract_started(self.inner.cfg.chain); + + // We also immediately start downloading now. + stats::mithril_dl_started(self.inner.cfg.chain); + + // Save the DownloadProcessor in the inner struct for use to process the downloaded data. + if let Err(_error) = self.inner.dl_handler.set(dl_processor) { + bail!("Failed to set the inner dl_handler. Must already be set?"); + } + + Ok(()) + } +} diff --git a/hermes/crates/cardano-chain-follower/src/multi_era_block_data.rs b/hermes/crates/cardano-chain-follower/src/multi_era_block_data.rs new file mode 100644 index 000000000..c8c21ea41 --- /dev/null +++ b/hermes/crates/cardano-chain-follower/src/multi_era_block_data.rs @@ -0,0 +1,693 @@ +//! Multi Era CBOR Encoded Block Data +//! +//! Data about how the block/transactions can be encoded is found here: +//! +//! +//! DO NOT USE the documentation/cddl definitions from the head of this repo because it +//! currently lacks most of the documentation needed to understand the format and is also +//! incorrectly generated and contains errors that will be difficult to discern. + +use std::{cmp::Ordering, fmt::Display, sync::Arc}; + +use ouroboros::self_referencing; +use tracing::debug; + +use crate::{ + error::Error, metadata, stats::stats_invalid_block, witness::TxWitness, Network, Point, +}; + +/// Self-referencing CBOR encoded data of a multi-era block. +/// Note: The fields in the original struct can not be accessed directly +/// The builder creates accessor methods which are called +/// `borrow_raw_data()` and `borrow_block()` +#[self_referencing] +#[derive(Debug)] +pub(crate) struct SelfReferencedMultiEraBlock { + /// The CBOR encoded data of a multi-era block. + raw_data: Vec, + + /// The decoded multi-era block. + /// References the `raw_data` field. + #[borrows(raw_data)] + #[covariant] + block: pallas::ledger::traverse::MultiEraBlock<'this>, +} + +/// Multi-era block - inner. +#[derive(Debug)] +pub struct MultiEraBlockInner { + /// What blockchain was the block produced on. + //#[allow(dead_code)] + pub chain: Network, + /// The Point on the blockchain this block can be found. + point: Point, + /// The previous point on the blockchain before this block. + /// When the current point is Genesis, so is the previous. + previous: Point, + /// The decoded multi-era block. + data: SelfReferencedMultiEraBlock, + /// Decoded Metadata in the transactions in the block. + metadata: metadata::DecodedTransaction, + /// A map of public key hashes to the public key and transaction numbers they are in. + #[allow(dead_code)] + witness_map: Option, +} + +/// Multi-era block. +#[derive(Clone, Debug)] +pub struct MultiEraBlock { + /// What fork is the block on? + /// This is NOT part of the inner block, because it is not to be protected by the Arc. + /// It can change at any time due to rollbacks detected on the live-chain. + /// This means that any holder of a `MultiEraBlock` will have the actual fork their + /// block was on when they read it, the live-chain code can modify the actual fork + /// count at any time without that impacting consumers processing the data. + /// The fork count itself is used so an asynchronous follower can properly work out + /// how far to roll back on the live-chain in order to resynchronize, without + /// keeping a full state of processed blocks. + /// Followers, simply need to step backwards on the live chain until they find the + /// previous block they followed, or reach a fork that is <= the fork of the + /// previous block they followed. They can then safely re-follow from that earlier + /// point, with full integrity. fork is 0 on any immutable block. + /// It starts at 1 for live blocks, and is only incremented if the live-chain tip is + /// purged because of a detected fork based on data received from the peer node. + /// It does NOT count the strict number of forks reported by the peer node. + fork: u64, + /// The Immutable decoded data about the block itself. + inner: Arc, +} + +impl MultiEraBlock { + /// Creates a new `MultiEraBlockData` from the given bytes. + /// + /// # Errors + /// + /// If the given bytes cannot be decoded as a multi-era block, an error is returned. + fn new_block( + chain: Network, raw_data: Vec, previous: &Point, fork: u64, + ) -> anyhow::Result { + let builder = SelfReferencedMultiEraBlockTryBuilder { + raw_data, + block_builder: |raw_data| -> Result<_, Error> { + pallas::ledger::traverse::MultiEraBlock::decode(raw_data) + .map_err(|err| Error::Codec(err.to_string())) + }, + }; + let self_ref_block = builder.try_build()?; + let decoded_block = self_ref_block.borrow_block(); + + let witness_map = TxWitness::new(&decoded_block.txs()).ok(); + + let slot = decoded_block.slot(); + + let point = Point::new(slot, decoded_block.hash().to_vec()); + + let byron_block = matches!( + decoded_block, + pallas::ledger::traverse::MultiEraBlock::Byron(_) + ); + + // debug!("New Block: {slot} {point} {}", *previous); + + // Dump the early mainnet blocks because somethings funny in there. + // if slot == 0 || slot == 21600 { + // debug!("Block of interest {slot} {:?}", decoded_block); + //} + + // Validate that the Block point is valid. + if !previous.is_origin() { + // Every 21600 Blocks, Byron Era has duplicated sequential slot#'s. + // So this filters them out from the sequential point check. + // The Hash chain is still checked. + if (!byron_block || ((slot % 21600) != 0)) && *previous >= slot { + return Err(Error::Codec(format!( + "Previous slot is not less than current slot:{slot}" + ))); + } + + // Special case, when the previous block is actually UNKNOWN, we can't check it. + if !previous.is_unknown() + // Otherwise, we make sure the hash chain is intact + && !previous.cmp_hash(&decoded_block.header().previous_hash()) + { + debug!("{}, {:?}", previous, decoded_block.header().previous_hash()); + + return Err(Error::Codec( + "Previous Block Hash mismatch with block".to_string(), + )); + } + } + + let metadata = metadata::DecodedTransaction::new(chain, decoded_block); + + Ok(Self { + fork, + inner: Arc::new(MultiEraBlockInner { + chain, + point, + previous: previous.clone(), + data: self_ref_block, + metadata, + witness_map, + }), + }) + } + + /// Creates a new `MultiEraBlockData` from the given bytes. + /// + /// # Errors + /// + /// If the given bytes cannot be decoded as a multi-era block, an error is returned. + pub fn new( + chain: Network, raw_data: Vec, previous: &Point, fork: u64, + ) -> anyhow::Result { + // This lets us reliably count any bad block arising from deserialization. + let block = MultiEraBlock::new_block(chain, raw_data, previous, fork); + if block.is_err() { + stats_invalid_block(chain, fork == 0); + } + block + } + + /// Remake the block on a new fork. + pub fn set_fork(&mut self, fork: u64) { + self.fork = fork; + } + + /// Decodes the data into a multi-era block. + #[must_use] + #[allow(clippy::missing_panics_doc)] + pub fn decode(&self) -> &pallas::ledger::traverse::MultiEraBlock { + self.inner.data.borrow_block() + } + + /// Decodes the data into a multi-era block. + #[must_use] + #[allow(clippy::missing_panics_doc)] + pub fn raw(&self) -> &Vec { + self.inner.data.borrow_raw_data() + } + + /// Returns the block point of this block. + #[must_use] + pub fn point(&self) -> Point { + self.inner.point.clone() + } + + /// Returns the block point of the previous block. + #[must_use] + pub fn previous(&self) -> Point { + self.inner.previous.clone() + } + + /// Is the block data immutable on-chain. + #[must_use] + pub fn immutable(&self) -> bool { + self.fork == 0 + } + + /// Is the block data immutable on-chain. + #[must_use] + pub fn fork(&self) -> u64 { + self.fork + } + + /// What chain was the block from + #[must_use] + pub fn chain(&self) -> Network { + self.inner.chain + } + + /// Get The Decoded Metadata fora a transaction and known label from the block + #[must_use] + pub fn txn_metadata( + &self, txn_idx: usize, label: u64, + ) -> Option> { + self.inner.metadata.get_metadata(txn_idx, label) + } + + /// Get The Raw Metadata fora a transaction and known label from the block + #[must_use] + pub fn txn_raw_metadata(&self, txn_idx: usize, label: u64) -> Option>> { + self.inner.metadata.get_raw_metadata(txn_idx, label) + } + + /// Returns the witness map for the block. + #[allow(dead_code)] + pub(crate) fn witness_map(&self) -> Option<&TxWitness> { + self.inner.witness_map.as_ref() + } +} + +impl Display for MultiEraBlock { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let fork = self.fork; + let block_data = &self.inner.data; + let block = block_data.borrow_block(); + let block_number = block.number(); + let slot = block.slot(); + let size = block.size(); + let txns = block.tx_count(); + let aux_data = block.has_aux_data(); + + let fork = if self.immutable() { + "Immutable".to_string() + } else { + format!("Fork: {fork}") + }; + + let block_era = match block { + pallas::ledger::traverse::MultiEraBlock::EpochBoundary(_) => { + "Byron Epoch Boundary".to_string() + }, + pallas::ledger::traverse::MultiEraBlock::AlonzoCompatible(_, era) => { + format!("{era}") + }, + pallas::ledger::traverse::MultiEraBlock::Babbage(_) => "Babbage".to_string(), + pallas::ledger::traverse::MultiEraBlock::Byron(_) => "Byron".to_string(), + pallas::ledger::traverse::MultiEraBlock::Conway(_) => "Conway".to_string(), + _ => "Unknown".to_string(), + }; + write!(f, "{block_era} block : {}, Previous {} : Slot# {slot} : {fork} : Block# {block_number} : Size {size} : Txns {txns} : AuxData? {aux_data}", + self.point(), self.previous())?; + Ok(()) + } +} + +impl PartialEq for MultiEraBlock { + /// Compare two `MultiEraBlock` by their current points. + /// Ignores the Hash, we only check for equality of the Slot#. + fn eq(&self, other: &Self) -> bool { + self.partial_cmp(other) == Some(Ordering::Equal) + } +} + +impl Eq for MultiEraBlock {} + +impl PartialOrd for MultiEraBlock { + /// Compare two `MultiEraBlock` by their points. + /// Only checks the Slot#. + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for MultiEraBlock { + /// Compare two `LiveBlocks` by their points. + /// Only checks the Slot#. + fn cmp(&self, other: &Self) -> Ordering { + self.inner.point.cmp(&other.inner.point) + } +} + +// Allows us to compare a MultiEraBlock against a Point directly (Just the slot#). +impl PartialEq for MultiEraBlock { + // Equality ONLY checks the Slot# + fn eq(&self, other: &Point) -> bool { + Some(Ordering::Equal) == self.partial_cmp(other) + } +} + +impl PartialOrd for MultiEraBlock { + /// Compare a `MultiEraBlock` to a `Point` by their points. + /// Only checks the Slot#. + fn partial_cmp(&self, other: &Point) -> Option { + Some(self.inner.point.cmp(other)) + } +} + +#[cfg(test)] +pub(crate) mod tests { + use std::ops::Add; + + use anyhow::Ok; + + use crate::{point::ORIGIN_POINT, MultiEraBlock, Network, Point}; + + struct TestRecord { + raw: Vec, + previous: Point, + } + + /// Byron Test Block data + fn byron_block() -> Vec { + hex::decode(include_str!("./../test_data/byron.block")) + .expect("Failed to decode hex block.") + } + + /// Shelley Test Block data + fn shelley_block() -> Vec { + hex::decode(include_str!("./../test_data/shelley.block")) + .expect("Failed to decode hex block.") + } + + /// Mary Test Block data + fn mary_block() -> Vec { + hex::decode(include_str!("./../test_data/mary.block")).expect("Failed to decode hex block.") + } + + /// Allegra Test Block data + fn allegra_block() -> Vec { + hex::decode(include_str!("./../test_data/allegra.block")) + .expect("Failed to decode hex block.") + } + + /// Alonzo Test Block data + pub(crate) fn alonzo_block() -> Vec { + hex::decode(include_str!("./../test_data/allegra.block")) + .expect("Failed to decode hex block.") + } + + /// Babbage Test Block data + pub(crate) fn babbage_block() -> Vec { + hex::decode(include_str!("./../test_data/babbage.block")) + .expect("Failed to decode hex block.") + } + + /// An array of test blocks + fn test_blocks() -> Vec { + vec![ + TestRecord { + raw: byron_block(), + previous: ORIGIN_POINT, + }, + TestRecord { + raw: shelley_block(), + previous: ORIGIN_POINT, + }, + TestRecord { + raw: mary_block(), + previous: ORIGIN_POINT, + }, + TestRecord { + raw: allegra_block(), + previous: ORIGIN_POINT, + }, + TestRecord { + raw: alonzo_block(), + previous: ORIGIN_POINT, + }, + ] + } + + // Gets sorted by slot number from highest to lowest + fn sorted_test_blocks() -> Vec> { + vec![ + mary_block(), // 27388606 + allegra_block(), // 18748707 + alonzo_block(), // 18748707 + shelley_block(), // 7948610 + byron_block(), // 3241381 + ] + } + + /// Previous Point slot is >= blocks point, but hash is correct (should fail) + #[test] + fn test_multi_era_block_point_compare_1() -> anyhow::Result<()> { + for (i, test_block) in test_blocks().into_iter().enumerate() { + let pallas_block = + pallas::ledger::traverse::MultiEraBlock::decode(test_block.raw.as_slice())?; + + let previous_point = Point::new( + pallas_block.slot().add(i as u64), + pallas_block + .header() + .previous_hash() + .expect("cannot get previous hash") + .to_vec(), + ); + + let block = + MultiEraBlock::new(Network::Preprod, test_block.raw.clone(), &previous_point, 1); + + assert!(block.is_err()); + } + + Ok(()) + } + + /// Previous Point slot is < blocks point, but hash is different. (should fail). + #[test] + fn test_multi_era_block_point_compare_2() -> anyhow::Result<()> { + for test_block in test_blocks() { + let pallas_block = + pallas::ledger::traverse::MultiEraBlock::decode(test_block.raw.as_slice())?; + + let previous_point = Point::new(pallas_block.slot() - 1, vec![0; 32]); + + let block = + MultiEraBlock::new(Network::Preprod, test_block.raw.clone(), &previous_point, 1); + + assert!(block.is_err()); + } + + Ok(()) + } + + /// Previous Point slot is < blocks point, and hash is also correct. (should pass). + #[test] + fn test_multi_era_block_point_compare_3() -> anyhow::Result<()> { + for test_block in test_blocks() { + let pallas_block = + pallas::ledger::traverse::MultiEraBlock::decode(test_block.raw.as_slice())?; + + let previous_point = Point::new( + pallas_block.slot() - 1, + pallas_block + .header() + .previous_hash() + .expect("cannot get previous hash") + .to_vec(), + ); + + let block = + MultiEraBlock::new(Network::Preprod, test_block.raw.clone(), &previous_point, 1)?; + + assert_eq!(block.decode().hash(), pallas_block.hash()); + } + + Ok(()) + } + + fn mk_test_blocks() -> Vec { + let raw_blocks = sorted_test_blocks(); + raw_blocks + .iter() + .map(|block| { + let prev_point = pallas::ledger::traverse::MultiEraBlock::decode(block.as_slice()) + .map(|block| { + Point::new( + block.slot() - 1, + block + .header() + .previous_hash() + .expect("cannot get previous hash") + .to_vec(), + ) + }) + .expect("cannot create point"); + + MultiEraBlock::new(Network::Preprod, block.clone(), &prev_point, 1) + .expect("cannot create multi-era block") + }) + .collect() + } + + fn mk_test_points() -> Vec { + let raw_blocks = sorted_test_blocks(); + raw_blocks + .iter() + .map(|block| { + pallas::ledger::traverse::MultiEraBlock::decode(block.as_slice()) + .map(|block| { + Point::new( + block.slot(), + block + .header() + .previous_hash() + .expect("cannot get previous hash") + .to_vec(), + ) + }) + .expect("cannot create point") + }) + .collect() + } + + /// Compares between blocks using comparison operators + #[test] + fn test_multi_era_block_point_compare_4() -> anyhow::Result<()> { + let multi_era_blocks = mk_test_blocks(); + + let mary_block = multi_era_blocks.first().expect("cannot get block"); + let allegra_block = multi_era_blocks.get(1).expect("cannot get block"); + let alonzo_block = multi_era_blocks.get(2).expect("cannot get block"); + let shelley_block = multi_era_blocks.get(3).expect("cannot get block"); + let byron_block = multi_era_blocks.get(4).expect("cannot get block"); + + assert!(mary_block > allegra_block); + assert!(mary_block >= allegra_block); + assert!(mary_block != allegra_block); + assert!(mary_block > alonzo_block); + assert!(mary_block >= alonzo_block); + assert!(mary_block != alonzo_block); + assert!(mary_block > shelley_block); + assert!(mary_block >= shelley_block); + assert!(mary_block != shelley_block); + assert!(mary_block > byron_block); + assert!(mary_block >= byron_block); + + assert!(allegra_block < mary_block); + assert!(allegra_block <= mary_block); + assert!(allegra_block != mary_block); + assert!(allegra_block == alonzo_block); + assert!(allegra_block >= alonzo_block); + assert!(allegra_block <= alonzo_block); + assert!(allegra_block > shelley_block); + assert!(allegra_block >= shelley_block); + assert!(allegra_block != shelley_block); + assert!(allegra_block > byron_block); + assert!(allegra_block >= byron_block); + assert!(allegra_block != byron_block); + + assert!(alonzo_block < mary_block); + assert!(alonzo_block <= mary_block); + assert!(alonzo_block != mary_block); + assert!(alonzo_block == allegra_block); + assert!(alonzo_block >= allegra_block); + assert!(alonzo_block <= allegra_block); + assert!(alonzo_block > shelley_block); + assert!(alonzo_block >= shelley_block); + assert!(alonzo_block != shelley_block); + assert!(alonzo_block > byron_block); + assert!(alonzo_block >= byron_block); + assert!(alonzo_block != byron_block); + + assert!(shelley_block < mary_block); + assert!(shelley_block <= mary_block); + assert!(shelley_block != mary_block); + assert!(shelley_block < allegra_block); + assert!(shelley_block <= allegra_block); + assert!(shelley_block != allegra_block); + assert!(shelley_block < alonzo_block); + assert!(shelley_block <= alonzo_block); + assert!(shelley_block != alonzo_block); + assert!(shelley_block > byron_block); + assert!(shelley_block >= byron_block); + assert!(shelley_block != byron_block); + + assert!(byron_block < mary_block); + assert!(byron_block <= mary_block); + assert!(byron_block != mary_block); + assert!(byron_block < allegra_block); + assert!(byron_block <= allegra_block); + assert!(byron_block != allegra_block); + assert!(byron_block < alonzo_block); + assert!(byron_block <= alonzo_block); + assert!(byron_block != alonzo_block); + assert!(byron_block < shelley_block); + assert!(byron_block <= shelley_block); + assert!(byron_block != shelley_block); + + Ok(()) + } + + /// Compares between blocks and points using comparison operators + #[test] + fn test_multi_era_block_point_compare_5() -> anyhow::Result<()> { + let points = mk_test_points(); + let blocks = mk_test_blocks(); + + let mary_block = blocks.first().expect("cannot get block"); + let allegra_block = blocks.get(1).expect("cannot get block"); + let alonzo_block = blocks.get(2).expect("cannot get block"); + let shelley_block = blocks.get(3).expect("cannot get block"); + let byron_block = blocks.get(4).expect("cannot get block"); + + let mary_point = points.first().expect("cannot get point"); + let allegra_point = points.get(1).expect("cannot get point"); + let alonzo_point = points.get(2).expect("cannot get point"); + let shelley_point = points.get(3).expect("cannot get point"); + let byron_point = points.get(4).expect("cannot get point"); + + assert!(mary_block > allegra_point); + assert!(mary_block >= allegra_point); + assert!(mary_block != allegra_point); + assert!(mary_block > alonzo_point); + assert!(mary_block >= alonzo_point); + assert!(mary_block != alonzo_point); + assert!(mary_block > shelley_point); + assert!(mary_block >= shelley_point); + assert!(mary_block != shelley_point); + assert!(mary_block > byron_point); + assert!(mary_block >= byron_point); + + assert!(allegra_block < mary_point); + assert!(allegra_block <= mary_point); + assert!(allegra_block != mary_point); + assert!(allegra_block == alonzo_point); + assert!(allegra_block >= alonzo_point); + assert!(allegra_block <= alonzo_point); + assert!(allegra_block > shelley_point); + assert!(allegra_block >= shelley_point); + assert!(allegra_block != shelley_point); + assert!(allegra_block > byron_point); + assert!(allegra_block >= byron_point); + assert!(allegra_block != byron_point); + + assert!(alonzo_block < mary_point); + assert!(alonzo_block <= mary_point); + assert!(alonzo_block != mary_point); + assert!(alonzo_block == allegra_point); + assert!(alonzo_block >= allegra_point); + assert!(alonzo_block <= allegra_point); + assert!(alonzo_block > shelley_point); + assert!(alonzo_block >= shelley_point); + assert!(alonzo_block != shelley_point); + assert!(alonzo_block > byron_point); + assert!(alonzo_block >= byron_point); + assert!(alonzo_block != byron_point); + + assert!(shelley_block < mary_point); + assert!(shelley_block <= mary_point); + assert!(shelley_block != mary_point); + assert!(shelley_block < allegra_point); + assert!(shelley_block <= allegra_point); + assert!(shelley_block != allegra_point); + assert!(shelley_block < alonzo_point); + assert!(shelley_block <= alonzo_point); + assert!(shelley_block != alonzo_point); + assert!(shelley_block > byron_point); + assert!(shelley_block >= byron_point); + assert!(shelley_block != byron_point); + + assert!(byron_block < mary_point); + assert!(byron_block <= mary_point); + assert!(byron_block != mary_point); + assert!(byron_block < allegra_point); + assert!(byron_block <= allegra_point); + assert!(byron_block != allegra_point); + assert!(byron_block < alonzo_point); + assert!(byron_block <= alonzo_point); + assert!(byron_block != alonzo_point); + assert!(byron_block < shelley_point); + assert!(byron_block <= shelley_point); + assert!(byron_block != shelley_point); + + Ok(()) + } + + #[test] + fn test_multi_era_block_with_origin_point() { + for test_block in test_blocks() { + let block = MultiEraBlock::new( + Network::Preprod, + test_block.raw.clone(), + &test_block.previous, + 1, + ); + + assert!(block.is_ok()); + } + } +} diff --git a/hermes/crates/cardano-chain-follower/src/network.rs b/hermes/crates/cardano-chain-follower/src/network.rs new file mode 100644 index 000000000..7e36c8d05 --- /dev/null +++ b/hermes/crates/cardano-chain-follower/src/network.rs @@ -0,0 +1,217 @@ +//! Enum of possible Cardano networks. + +use std::{ffi::OsStr, path::PathBuf}; + +use chrono::{DateTime, Utc}; +use pallas::{ + ledger::traverse::wellknown::GenesisValues, + network::miniprotocols::{MAINNET_MAGIC, PREVIEW_MAGIC, PRE_PRODUCTION_MAGIC}, +}; +// use strum::IntoEnumIterator; +// use strum_macros; +use tracing::debug; + +/// Default name of the executable if we can't derive it. +pub(crate) const DEFAULT_EXE_NAME: &str = "cardano_chain_follower"; +/// ENV VAR name for the data path. +pub(crate) const ENVVAR_MITHRIL_DATA_PATH: &str = "MITHRIL_DATA_PATH"; +/// ENV VAR name for the executable name. +pub(crate) const ENVVAR_MITHRIL_EXE_NAME: &str = "MITHRIL_EXE_NAME"; + +/// Enum of possible Cardano networks. +#[derive( + Debug, + Copy, + Clone, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + strum::EnumIter, + strum::VariantNames, + strum::EnumString, + strum::Display, +)] +#[strum(ascii_case_insensitive)] +pub enum Network { + /// Cardano mainnet network. + Mainnet, + /// Cardano pre-production network. + Preprod, + /// Cardano preview network. + Preview, +} + +// Mainnet Defaults. +/// Mainnet Default Public Cardano Relay. +const DEFAULT_MAINNET_RELAY: &str = "backbone.cardano.iog.io:3001"; +/// Main-net Mithril Signature genesis vkey. +const DEFAULT_MAINNET_MITHRIL_GENESIS_KEY: &str = include_str!("data/mainnet-genesis.vkey"); +/// Default Mithril Aggregator to use. +const DEFAULT_MAINNET_MITHRIL_AGGREGATOR: &str = + "https://aggregator.release-mainnet.api.mithril.network/aggregator"; + +// Preprod Defaults +/// Preprod Default Public Cardano Relay. +const DEFAULT_PREPROD_RELAY: &str = "preprod-node.play.dev.cardano.org:3001"; +/// Preprod network Mithril Signature genesis vkey. +const DEFAULT_PREPROD_MITHRIL_GENESIS_KEY: &str = include_str!("data/preprod-genesis.vkey"); +/// Default Mithril Aggregator to use. +const DEFAULT_PREPROD_MITHRIL_AGGREGATOR: &str = + "https://aggregator.release-preprod.api.mithril.network/aggregator"; + +// Preview Defaults +/// Preview Default Public Cardano Relay. +const DEFAULT_PREVIEW_RELAY: &str = "preview-node.play.dev.cardano.org:3001"; +/// Preview network Mithril Signature genesis vkey. +const DEFAULT_PREVIEW_MITHRIL_GENESIS_KEY: &str = include_str!("data/preview-genesis.vkey"); +/// Default Mithril Aggregator to use. +const DEFAULT_PREVIEW_MITHRIL_AGGREGATOR: &str = + "https://aggregator.pre-release-preview.api.mithril.network/aggregator"; + +impl Network { + /// Get the default Relay for a blockchain network. + #[must_use] + pub fn default_relay(self) -> String { + match self { + Network::Mainnet => DEFAULT_MAINNET_RELAY.to_string(), + Network::Preprod => DEFAULT_PREPROD_RELAY.to_string(), + Network::Preview => DEFAULT_PREVIEW_RELAY.to_string(), + } + } + + /// Get the default aggregator for a blockchain. + #[must_use] + pub fn default_mithril_aggregator(self) -> String { + match self { + Network::Mainnet => DEFAULT_MAINNET_MITHRIL_AGGREGATOR.to_string(), + Network::Preprod => DEFAULT_PREPROD_MITHRIL_AGGREGATOR.to_string(), + Network::Preview => DEFAULT_PREVIEW_MITHRIL_AGGREGATOR.to_string(), + } + } + + /// Get the default Mithril Signature genesis key for a blockchain. + #[must_use] + pub fn default_mithril_genesis_key(self) -> String { + match self { + Network::Mainnet => DEFAULT_MAINNET_MITHRIL_GENESIS_KEY.to_string(), + Network::Preprod => DEFAULT_PREPROD_MITHRIL_GENESIS_KEY.to_string(), + Network::Preview => DEFAULT_PREVIEW_MITHRIL_GENESIS_KEY.to_string(), + } + } + + /// Get the default storage location for mithril snapshots. + /// Defaults to: //mithril/ + pub fn default_mithril_path(self) -> PathBuf { + // Get the base path for storing Data. + // IF the ENV var is set, use that. + // Otherwise use the system default data path for an application. + // All else fails default to "/var/lib" + let mut base_path = std::env::var(ENVVAR_MITHRIL_DATA_PATH).map_or_else( + |_| dirs::data_local_dir().unwrap_or("/var/lib".into()), + PathBuf::from, + ); + + // Get the Executable name for the data path. + // IF the ENV var is set, use it, otherwise try and get it from the exe itself. + // Fallback to using a default exe name if all else fails. + let exe_name = std::env::var(ENVVAR_MITHRIL_EXE_NAME).unwrap_or( + std::env::current_exe() + .unwrap_or(DEFAULT_EXE_NAME.into()) + .file_name() + .unwrap_or(OsStr::new(DEFAULT_EXE_NAME)) + .to_string_lossy() + .to_string(), + ); + + // / + base_path.push(exe_name); + + // Put everything in a `mithril` sub directory. + base_path.push("mithril"); + + // // + base_path.push(self.to_string()); + + debug!( + chain = self.to_string(), + path = base_path.to_string_lossy().to_string(), + "DEFAULT Mithril Data Path", + ); + + // Return the final path + base_path + } + + /// Return genesis values for given network + #[must_use] + pub fn genesis_values(self) -> GenesisValues { + match self { + Network::Mainnet => GenesisValues::mainnet(), + Network::Preprod => GenesisValues::preprod(), + Network::Preview => GenesisValues::preview(), + } + } + + /// Convert a given slot# to its Wall Time for a Blockchain network. + #[must_use] + pub fn slot_to_time(&self, slot: u64) -> DateTime { + let genesis = self.genesis_values(); + let wall_clock = genesis.slot_to_wallclock(slot); + + let raw_time: i64 = wall_clock.try_into().unwrap_or(i64::MAX); + DateTime::from_timestamp(raw_time, 0).unwrap_or(DateTime::::MAX_UTC) + } + + /// Convert an arbitrary time to a slot. + /// + /// If the given time predates the blockchain, will return None. + /// + /// The Slot does not have to be a valid slot present in the blockchain. + #[must_use] + pub fn time_to_slot(&self, _time: DateTime) -> Option { + // TODO: Implement this, for now just return None. + None + } +} + +impl From for u64 { + fn from(network: Network) -> Self { + match network { + Network::Mainnet => MAINNET_MAGIC, + Network::Preprod => PRE_PRODUCTION_MAGIC, + Network::Preview => PREVIEW_MAGIC, + } + } +} + +#[cfg(test)] +mod tests { + use std::str::FromStr; + + use anyhow::Ok; + + use super::*; + + #[test] + fn test_from_str() -> anyhow::Result<()> { + let mainnet = Network::from_str("mainnet")?; + let preprod = Network::from_str("preprod")?; + let preview = Network::from_str("preview")?; + + assert_eq!(mainnet, Network::Mainnet); + assert_eq!(preprod, Network::Preprod); + assert_eq!(preview, Network::Preview); + + let mainnet = Network::from_str("Mainnet")?; + let preprod = Network::from_str("Preprod")?; + let preview = Network::from_str("Preview")?; + + assert_eq!(mainnet, Network::Mainnet); + assert_eq!(preprod, Network::Preprod); + assert_eq!(preview, Network::Preview); + + Ok(()) + } +} diff --git a/hermes/crates/cardano-chain-follower/src/point.rs b/hermes/crates/cardano-chain-follower/src/point.rs new file mode 100644 index 000000000..21396144b --- /dev/null +++ b/hermes/crates/cardano-chain-follower/src/point.rs @@ -0,0 +1,590 @@ +//! A Cardano Point on the Blockchain. +//! +//! Wrapped version of the Pallas primitive. +//! We only use this version unless talking to Pallas. + +use std::{ + cmp::Ordering, + fmt::{Debug, Display, Formatter}, +}; + +use pallas::crypto::hash::Hash; + +/// A specific point in the blockchain. It can be used to +/// identify a particular location within the blockchain, such as the tip (the +/// most recent block) or any other block. It has special kinds of `Point`, +/// available as constants: `TIP_POINT`, and `ORIGIN_POINT`. +/// +/// # Attributes +/// +/// * `Point` - The inner type is a `Point` from the `pallas::network::miniprotocols` +/// module. This inner `Point` type encapsulates the specific details required to +/// identify a point in the blockchain. +#[derive(Clone, PartialEq, Eq, Hash, Debug)] +pub struct Point(pallas::network::miniprotocols::Point); + +/// A truly unknown point in the blockchain. It is used +/// when the previous point is completely unknown and does not correspond to the +/// origin of the blockchain. +/// +/// # Usage +/// +/// `UNKNOWN_POINT` can be used in scenarios where the previous point in the blockchain +/// is not known and should not be assumed to be the origin. It serves as a marker +/// for an indeterminate or unspecified point. +/// +/// The inner `Point` is created with `u64::MIN` and an empty `Vec`, indicating +/// that this is a special marker for an unknown point, rather than a specific +/// point in the blockchain. +pub(crate) const UNKNOWN_POINT: Point = Point(pallas::network::miniprotocols::Point::Specific( + u64::MIN, + Vec::new(), +)); + +/// The tip of the blockchain at the current moment. +/// It is used when the specific point in the blockchain is not known, but the +/// interest is in the most recent block (the tip). The tip is the point where +/// new blocks are being added. +/// +/// # Usage +/// +/// `TIP_POINT` can be used in scenarios where the most up-to-date point in the +/// blockchain is required. It signifies that the exact point is not important +/// as long as it is the latest available point in the chain. +/// +/// The inner `Point` is created with `u64::MAX` and an empty `Vec`, indicating +/// that this is a special marker rather than a specific point in the blockchain. +pub const TIP_POINT: Point = Point(pallas::network::miniprotocols::Point::Specific( + u64::MAX, + Vec::new(), +)); + +/// The origin of the blockchain. It is used when the +/// interest is in the very first point of the blockchain, regardless of its +/// specific details. +/// +/// # Usage +/// +/// `ORIGIN_POINT` can be used in scenarios where the starting point of the +/// blockchain is required. It signifies the genesis block or the initial state +/// of the blockchain. +/// +/// The inner `Point` is created with the `Origin` variant from +/// `pallas::network::miniprotocols::Point`, indicating that this is a marker +/// for the blockchain's origin. +pub const ORIGIN_POINT: Point = Point(pallas::network::miniprotocols::Point::Origin); + +impl Point { + /// Creates a new `Point` instance representing a specific + /// point in the blockchain, identified by a given slot and hash. + /// + /// # Parameters + /// + /// * `slot` - A `u64` value representing the slot number in the blockchain. + /// * `hash` - A `Vec` containing the hash of the block at the specified slot. + /// + /// # Returns + /// + /// A new `Point` instance encapsulating the given slot and hash. + /// + /// # Examples + /// + /// ```rs + /// use cardano_chain_follower::Point; + /// + /// let slot = 42; + /// let hash = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; + /// let point = Point::new(slot, hash); + /// ``` + #[must_use] + pub fn new(slot: u64, hash: Vec) -> Self { + Self(pallas::network::miniprotocols::Point::Specific(slot, hash)) + } + + /// Creates a new `Point` instance representing a specific + /// point in the blockchain, identified by a given slot, but with an + /// unknown hash. This can be useful in scenarios where the slot is known + /// but the hash is either unavailable or irrelevant. + /// + /// # Parameters + /// + /// * `slot` - A `u64` value representing the slot number in the blockchain. + /// + /// # Returns + /// + /// A new `Point` instance encapsulating the given slot with an empty hash. + /// + /// # Examples + /// + /// ```rs + /// use cardano_chain_follower::Point; + /// + /// let slot = 42; + /// let point = Point::fuzzy(slot); + /// ``` + #[must_use] + pub fn fuzzy(slot: u64) -> Self { + Self(pallas::network::miniprotocols::Point::Specific( + slot, + Vec::new(), + )) + } + + /// Creates a new Fuzzy `Point` from a concrete point. + /// + /// Will not alter either TIP or ORIGIN points. + #[must_use] + pub fn as_fuzzy(&self) -> Self { + if *self == TIP_POINT { + TIP_POINT + } else { + match self.0 { + pallas::network::miniprotocols::Point::Specific(slot, _) => Self::fuzzy(slot), + pallas::network::miniprotocols::Point::Origin => ORIGIN_POINT, + } + } + } + + /// Check if a Point is Fuzzy. + /// + /// Even though we don't know the hash for TIP or Origin, neither of these points + /// are considered to be fuzzy. + /// + /// # Examples + /// + /// ```rs + /// use cardano_chain_follower::Point; + /// + /// let slot = 42; + /// let point = Point::fuzzy(slot); + /// + /// assert!(point.is_fuzzy()); + /// ``` + #[must_use] + pub fn is_fuzzy(&self) -> bool { + if *self == TIP_POINT { + false + } else { + match self.0 { + pallas::network::miniprotocols::Point::Specific(_, ref hash) => hash.is_empty(), + pallas::network::miniprotocols::Point::Origin => false, + } + } + } + + /// Check if a Point is the origin. + /// + /// Origin is the synthetic Origin point, and ALSO any point thats at slot zero with a + /// hash. + /// + /// # Examples + /// + /// ```rs + /// use cardano_chain_follower::Point; + /// + /// let slot = 42; + /// let point = Point::fuzzy(slot); + /// + /// assert!(!point.is_origin()); + /// ``` + #[must_use] + pub fn is_origin(&self) -> bool { + match self.0 { + pallas::network::miniprotocols::Point::Specific(slot, ref hash) => { + slot == 0 && !hash.is_empty() + }, + pallas::network::miniprotocols::Point::Origin => true, + } + } + + /// Check if a Point is actually unknown. + /// + /// # Examples + /// + /// ```rs + /// use cardano_chain_follower::Point; + /// + /// let point = Point::fuzzy(0); + /// + /// assert!(point.is_unknown()); + /// ``` + #[must_use] + pub fn is_unknown(&self) -> bool { + match self.0 { + pallas::network::miniprotocols::Point::Specific(slot, ref hash) => { + slot == 0 && hash.is_empty() + }, + pallas::network::miniprotocols::Point::Origin => false, + } + } + + /// Check if a Point is actually unknown. + /// + /// # Examples + /// + /// ```rs + /// use cardano_chain_follower::Point; + /// + /// let point = Point::fuzzy(0); + /// + /// assert!(point.is_unknown()); + /// ``` + #[must_use] + pub fn is_tip(&self) -> bool { + match self.0 { + pallas::network::miniprotocols::Point::Specific(slot, ref hash) => { + slot == u64::MAX && hash.is_empty() + }, + pallas::network::miniprotocols::Point::Origin => false, + } + } + + /// Compares the hash stored in the `Point` with a known hash. + /// It returns `true` if the hashes match and `false` otherwise. If the + /// provided hash is `None`, the function checks if the `Point` has an + /// empty hash. + /// + /// # Parameters + /// + /// * `hash` - An `Option>` containing the hash to compare against. If + /// `Some`, the contained hash is compared with the `Point`'s hash. If `None`, the + /// function checks if the `Point`'s hash is empty. + /// + /// # Returns + /// + /// A `bool` indicating whether the hashes match. + /// + /// # Examples + /// + /// ```rs + /// use cardano_chain_follower::Point; + /// + /// use pallas::crypto::hash::Hash; + /// + /// let point = Point::new(42, vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]); + /// let hash = Some(Hash::new([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])); + /// assert!(point.cmp_hash(&hash)); + /// + /// let empty_point = Point::fuzzy(42); + /// assert!(empty_point.cmp_hash(&None)); + /// ``` + #[must_use] + pub fn cmp_hash(&self, hash: &Option>) -> bool { + match hash { + Some(cmp_hash) => { + match self.0 { + pallas::network::miniprotocols::Point::Specific(_, ref hash) => { + **hash == **cmp_hash + }, + pallas::network::miniprotocols::Point::Origin => false, + } + }, + None => { + match self.0 { + pallas::network::miniprotocols::Point::Specific(_, ref hash) => hash.is_empty(), + pallas::network::miniprotocols::Point::Origin => true, + } + }, + } + } + + /// Retrieves the slot number from the `Point`. If the `Point` + /// is the origin, it returns a default slot number. + /// + /// # Returns + /// + /// A `u64` representing the slot number. If the `Point` is the origin, + /// it returns a default slot value, typically `0`. + /// + /// # Examples + /// + /// ```rs + /// use cardano_chain_follower::{Point, ORIGIN_POINT}; + /// + /// let specific_point = Point::new(42, vec![1, 2, 3]); + /// assert_eq!(specific_point.slot_or_default(), 42); + /// + /// let origin_point = ORIGIN_POINT; + /// assert_eq!(origin_point.slot_or_default(), 0); // assuming 0 is the default + /// ``` + #[must_use] + pub fn slot_or_default(&self) -> u64 { + self.0.slot_or_default() + } + + /// Retrieves the hash from the `Point`. If the `Point` is + /// the origin, it returns a default hash value, which is an empty `Vec`. + /// + /// # Returns + /// + /// A `Vec` representing the hash. If the `Point` is the `Origin`, it + /// returns an empty vector. + /// + /// # Examples + /// + /// ```rs + /// use cardano_chain_follower::{Point, ORIGIN_POINT}; + /// + /// let specific_point = Point::new(42, vec![1, 2, 3]); + /// assert_eq!(specific_point.hash_or_default(), vec![1, 2, 3]); + /// + /// let origin_point = ORIGIN_POINT; + /// assert_eq!(origin_point.hash_or_default(), Vec::new()); + /// ``` + #[must_use] + pub fn hash_or_default(&self) -> Vec { + match &self.0 { + pallas::network::miniprotocols::Point::Specific(_, hash) => hash.clone(), + pallas::network::miniprotocols::Point::Origin => Vec::new(), + } + } + + /// Checks if two `Point` instances are strictly equal. + /// Strict equality means both the slot number and hash must be identical. + /// + /// # Parameters + /// + /// * `b` - Another `Point` instance to compare against. + /// + /// # Returns + /// + /// A `bool` indicating whether the two `Point` instances are strictly equal. + /// + /// # Examples + /// + /// ```rs + /// use cardano_chain_follower::Point; + /// + /// let point1 = Point::new(42, vec![1, 2, 3]); + /// let point2 = Point::new(42, vec![1, 2, 3]); + /// assert!(point1.strict_eq(&point2)); + /// + /// let point3 = Point::new(42, vec![1, 2, 3]); + /// let point4 = Point::new(43, vec![1, 2, 3]); + /// assert!(!point3.strict_eq(&point4)); + /// ``` + #[must_use] + pub fn strict_eq(&self, b: &Self) -> bool { + self.0 == b.0 + } +} + +impl Display for Point { + fn fmt(&self, f: &mut Formatter<'_>) -> std::result::Result<(), std::fmt::Error> { + if *self == ORIGIN_POINT { + return write!(f, "Point @ Origin"); + } else if *self == TIP_POINT { + return write!(f, "Point @ Tip"); + } else if *self == UNKNOWN_POINT { + return write!(f, "Point @ Unknown"); + } + + let slot = self.slot_or_default(); + let hash = self.hash_or_default(); + if hash.is_empty() { + return write!(f, "Point @ Probe:{slot}"); + } + write!(f, "Point @ {slot}:{}", hex::encode(hash)) + } +} + +impl From for Point { + fn from(point: pallas::network::miniprotocols::Point) -> Self { + Self(point) + } +} + +impl From for pallas::network::miniprotocols::Point { + fn from(point: Point) -> pallas::network::miniprotocols::Point { + point.0 + } +} + +impl PartialOrd for Point { + /// Implements a partial ordering based on the slot number + /// of two `Point` instances. It only checks the slot number for ordering. + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for Point { + /// Implements a total ordering based on the slot number + /// of two `Point` instances. It only checks the slot number for ordering. + fn cmp(&self, other: &Self) -> Ordering { + cmp_point(&self.0, &other.0) + } +} + +impl PartialEq for Point { + /// Allows to compare a `SnapshotID` against `u64` (Just the Immutable File Number). + /// + /// Equality ONLY checks the Immutable File Number, not the path. + /// This is because the Filename is already the Immutable File Number. + fn eq(&self, other: &u64) -> bool { + self.0.slot_or_default() == *other + } +} + +impl PartialOrd for Point { + /// Allows to compare a `Point` against a `u64` (Just the Immutable File Number). + /// + /// Equality ONLY checks the Immutable File Number, not the path. + /// This is because the Filename is already the Immutable File Number. + fn partial_cmp(&self, other: &u64) -> Option { + self.0.slot_or_default().partial_cmp(other) + } +} + +impl PartialEq> for Point { + /// Allows to compare a `SnapshotID` against `u64` (Just the Immutable File Number). + /// + /// Equality ONLY checks the Immutable File Number, not the path. + /// This is because the Filename is already the Immutable File Number. + fn eq(&self, other: &Option) -> bool { + if let Some(other) = other { + *self == *other + } else { + false + } + } +} + +impl PartialOrd> for Point { + /// Allows to compare a `Point` against a `u64` (Just the Immutable File Number). + /// + /// Equality ONLY checks the Immutable File Number, not the path. + /// This is because the Filename is already the Immutable File Number. + /// Any point is greater than None. + fn partial_cmp(&self, other: &Option) -> Option { + if let Some(other) = other { + self.partial_cmp(other) + } else { + Some(Ordering::Greater) + } + } +} + +impl Default for Point { + /// Returns the default value for `Point`, which is `UNKNOWN_POINT`. + fn default() -> Self { + UNKNOWN_POINT + } +} + +/// Compare Points, because Pallas does not impl `Ord` for Point. +pub(crate) fn cmp_point( + a: &pallas::network::miniprotocols::Point, b: &pallas::network::miniprotocols::Point, +) -> Ordering { + match a { + pallas::network::miniprotocols::Point::Origin => { + match b { + pallas::network::miniprotocols::Point::Origin => Ordering::Equal, + pallas::network::miniprotocols::Point::Specific(..) => Ordering::Less, + } + }, + pallas::network::miniprotocols::Point::Specific(slot, _) => { + match b { + pallas::network::miniprotocols::Point::Origin => Ordering::Greater, + pallas::network::miniprotocols::Point::Specific(other_slot, _) => { + slot.cmp(other_slot) + }, + } + }, + } +} + +#[cfg(test)] +mod tests { + use pallas::crypto::hash::Hash; + + use crate::*; + + #[test] + fn test_create_points() { + let point1 = Point::new(100u64, vec![]); + let fuzzy1 = Point::fuzzy(100u64); + + assert!(point1 == fuzzy1); + } + + #[test] + fn test_cmp_hash_simple() { + let origin1 = ORIGIN_POINT; + let point1 = Point::new(100u64, vec![8; 32]); + + assert!(!origin1.cmp_hash(&Some(Hash::new([0; 32])))); + assert!(origin1.cmp_hash(&None)); + + assert!(point1.cmp_hash(&Some(Hash::new([8; 32])))); + assert!(!point1.cmp_hash(&None)); + } + + #[test] + fn test_get_hash_simple() { + let point1 = Point::new(100u64, vec![8; 32]); + + assert_eq!(point1.hash_or_default(), vec![8; 32]); + } + + #[test] + fn test_identical_compare() { + let point1 = Point::new(100u64, vec![8; 32]); + let point2 = Point::new(100u64, vec![8; 32]); + let point3 = Point::new(999u64, vec![8; 32]); + + assert!(point1.strict_eq(&point2)); + assert!(!point1.strict_eq(&point3)); + } + + #[test] + fn test_comparisons() { + let origin1 = ORIGIN_POINT; + let origin2 = ORIGIN_POINT; + let tip1 = TIP_POINT; + let tip2 = TIP_POINT; + let early_block = Point::new(100u64, vec![]); + let late_block1 = Point::new(5000u64, vec![]); + let late_block2 = Point::new(5000u64, vec![]); + + assert!(origin1 == origin2); + assert!(origin1 < early_block); + assert!(origin1 <= early_block); + assert!(origin1 != early_block); + assert!(origin1 < late_block1); + assert!(origin1 <= late_block1); + assert!(origin1 != late_block1); + assert!(origin1 < tip1); + assert!(origin1 <= tip1); + assert!(origin1 != tip1); + + assert!(tip1 > origin1); + assert!(tip1 >= origin1); + assert!(tip1 != origin1); + assert!(tip1 > early_block); + assert!(tip1 >= late_block1); + assert!(tip1 != late_block1); + assert!(tip1 == tip2); + + assert!(early_block > origin1); + assert!(early_block >= origin1); + assert!(early_block != origin1); + assert!(early_block < late_block1); + assert!(early_block <= late_block1); + assert!(early_block != late_block1); + assert!(early_block < tip1); + assert!(early_block <= tip1); + assert!(early_block != tip1); + + assert!(late_block1 == late_block2); + assert!(late_block1 > origin1); + assert!(late_block1 >= origin1); + assert!(late_block1 != origin1); + assert!(late_block1 > early_block); + assert!(late_block1 >= early_block); + assert!(late_block1 != early_block); + assert!(late_block1 < tip1); + assert!(late_block1 <= tip1); + assert!(late_block1 != tip1); + } +} diff --git a/hermes/crates/cardano-chain-follower/src/snapshot_id.rs b/hermes/crates/cardano-chain-follower/src/snapshot_id.rs new file mode 100644 index 000000000..4a5489b2f --- /dev/null +++ b/hermes/crates/cardano-chain-follower/src/snapshot_id.rs @@ -0,0 +1,295 @@ +//! Simple ID for a mithril snapshot path known by its largest immutable file number + +use std::{ + cmp::Ordering, + default, + fmt::Display, + path::{Path, PathBuf}, +}; + +use tracing::debug; + +use crate::{ + mithril_snapshot_sync::{get_mithril_tip, MITHRIL_IMMUTABLE_SUB_DIRECTORY}, + point::UNKNOWN_POINT, + Network, Point, +}; +/// A Representation of a Snapshot Path and its represented Immutable File Number. +#[derive(Clone, Debug)] +pub(crate) struct SnapshotId { + /// The Snapshot Path + path: PathBuf, + /// The largest Immutable File Number + file: u64, + /// The Tip of the Snapshot + tip: Point, +} + +impl SnapshotId { + /// See if we can Parse the path into an immutable file number. + pub(crate) fn parse_path(path: &Path) -> Option { + // Path must actually exist, and be a directory. + if !path.is_dir() { + None + } else if let Ok(numeric_name) = path + .file_name() + .unwrap_or_default() + .to_string_lossy() + .parse::() + { + Some(numeric_name) + } else { + // If we couldn't parse the file name as a number, then it's not an immutable file. + None + } + } + + /// Try and create a new `SnapshotID` from a given path. + /// Immutable TIP must be provided. + pub(crate) fn new(path: &Path, tip: Point) -> Option { + debug!("Trying to Get SnapshotID of: {}", path.to_string_lossy()); + let immutable_file = SnapshotId::parse_path(path)?; + debug!("Immutable File#: {}", immutable_file); + + Some(SnapshotId { + path: path.to_path_buf(), + file: immutable_file, + tip, + }) + } + + /// Try and create a new `SnapshotID` from a given path. + /// Includes properly getting the Immutable TIP. + pub(crate) async fn try_new(chain: Network, path: &Path) -> Option { + let Ok(tip) = get_mithril_tip(chain, path).await else { + return None; + }; + + SnapshotId::new(path, tip.point()) + } + + /// Get the Immutable Blockchain path from this `SnapshotId` + pub(crate) fn immutable_path(&self) -> PathBuf { + let mut immutable = self.path.clone(); + immutable.push(MITHRIL_IMMUTABLE_SUB_DIRECTORY); + + immutable + } + + /// Get the Blockchain path from this `SnapshotId` + pub(crate) fn path(&self) -> PathBuf { + self.path.clone() + } + + /// Get the Blockchain path from this `SnapshotId` only if it actually exists. + pub(crate) fn path_if_exists(&self) -> Option { + if self.tip.is_unknown() { + return None; + } + Some(self.path.clone()) + } + + /// Get the Tip of the Immutable Blockchain from this `SnapshotId` + pub(crate) fn tip(&self) -> Point { + self.tip.clone() + } +} + +impl default::Default for SnapshotId { + /// Create an empty `SnapshotID`. + fn default() -> Self { + SnapshotId { + path: PathBuf::new(), + file: 0, + tip: UNKNOWN_POINT, + } + } +} + +impl std::convert::AsRef for SnapshotId { + fn as_ref(&self) -> &std::path::Path { + self.path.as_ref() + } +} + +impl Display for SnapshotId { + /// Convert this `SnapshotID` to a `String`. + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { + write!( + f, + "{} @ Tip [{} / {:?}]", + self.path.display(), + self.file, + self.tip + ) + } +} + +// Normal Comparisons to simplify code. +impl PartialEq for SnapshotId { + // Equality ONLY checks the Immutable File Number, not the path. + // This is because the Filename is already the ImmutableFileNumber + fn eq(&self, other: &Self) -> bool { + self.file == other.file + } +} + +impl PartialOrd for SnapshotId { + // Equality ONLY checks the Immutable File Number, not the path. + // This is because the Filename is already the ImmutableFileNumber + fn partial_cmp(&self, other: &Self) -> Option { + self.file.partial_cmp(&other.file) + } +} + +// Allows us to compare a SnapshotID against Some(SnapshotID). +impl PartialEq> for SnapshotId { + // Equality ONLY checks the Immutable File Number, not the path. + // This is because the Filename is already the ImmutableFileNumber + fn eq(&self, other: &Option) -> bool { + match other { + None => false, + Some(other) => self == other, + } + } +} + +impl PartialOrd> for SnapshotId { + // Equality ONLY checks the Immutable File Number, not the path. + // This is because the Filename is already the ImmutableFileNumber + fn partial_cmp(&self, other: &Option) -> Option { + match other { + None => Some(Ordering::Greater), // Anything is always greater than None. + Some(other) => self.partial_cmp(other), + } + } +} + +// Allows us to compare a SnapshotID against u64 (Just the Immutable File Number). +impl PartialEq for SnapshotId { + // Equality ONLY checks the Immutable File Number, not the path. + // This is because the Filename is already the ImmutableFileNumber + fn eq(&self, other: &u64) -> bool { + self.file == *other + } +} + +impl PartialOrd for SnapshotId { + // Equality ONLY checks the Immutable File Number, not the path. + // This is because the Filename is already the ImmutableFileNumber + fn partial_cmp(&self, other: &u64) -> Option { + self.file.partial_cmp(other) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::point::*; + + const TEST_DIR: &str = "test_data/test_snapshot_id"; + + #[test] + fn test_parse_path() { + let dir_path_1 = &[TEST_DIR, "12345"].join("/"); + let dir_path_2 = &[TEST_DIR, "12346"].join("/"); + let dir_path_3 = &[TEST_DIR, "12347"].join("/"); + let dir_path_4 = &[TEST_DIR, "not_found"].join("/"); + let dir_path_5 = &[TEST_DIR, "123abc"].join("/"); + + assert_eq!( + SnapshotId::parse_path(&PathBuf::from(dir_path_1)), + Some(12345) + ); + assert_eq!( + SnapshotId::parse_path(&PathBuf::from(dir_path_2)), + Some(12346) + ); + assert_eq!( + SnapshotId::parse_path(&PathBuf::from(dir_path_3)), + Some(12347) + ); + assert_eq!(SnapshotId::parse_path(&PathBuf::from(dir_path_4)), None); + assert_eq!(SnapshotId::parse_path(&PathBuf::from(dir_path_5)), None); + } + + #[test] + fn test_new() { + let dir_path_1 = &[TEST_DIR, "12345"].join("/"); + let dir_path_2 = &[TEST_DIR, "12346"].join("/"); + let dir_path_3 = &[TEST_DIR, "12347"].join("/"); + + let point_1 = Point::fuzzy(999); + let point_2 = Point::new(999, vec![0; 32]); + let point_3 = Point::new(12345, vec![8; 32]); + + assert!(SnapshotId::new(&PathBuf::from(dir_path_1), point_1).is_some()); + assert!(SnapshotId::new(&PathBuf::from(dir_path_2), point_2).is_some()); + assert!(SnapshotId::new(&PathBuf::from(dir_path_3), point_3).is_some()); + } + + #[tokio::test] + async fn test_try_new() { + let dir_path_1 = &[TEST_DIR, "12345"].join("/"); + let dir_path_1 = PathBuf::from(dir_path_1) + .canonicalize() + .expect("cannot get absolute path"); + + assert_eq!( + SnapshotId::try_new(Network::Preprod, &dir_path_1).await, + None + ); + } + + #[test] + fn test_immutable_path() { + let dir_path_1 = &[TEST_DIR, "12345"].join("/"); + + let point_1 = Point::fuzzy(999); + + let snapshot_id_1 = SnapshotId::new(&PathBuf::from(dir_path_1), point_1) + .expect("cannot create snapshot id"); + + assert_eq!( + snapshot_id_1.immutable_path(), + PathBuf::from([dir_path_1.as_str(), "immutable"].join("/")) + ); + } + + #[test] + fn test_compare() { + let dir_path_1 = &[TEST_DIR, "12345"].join("/"); + let dir_path_2 = &[TEST_DIR, "12345"].join("/"); + let dir_path_3 = &[TEST_DIR, "12346"].join("/"); + let dir_path_4 = &[TEST_DIR, "12347"].join("/"); + + let point_1 = Point::fuzzy(999); + let point_2 = Point::new(999, vec![0; 32]); + let point_3 = Point::new(12345, vec![8; 32]); + + let snapshot_id_1 = SnapshotId::new(&PathBuf::from(dir_path_1), point_1.clone()); + let snapshot_id_2 = SnapshotId::new(&PathBuf::from(dir_path_2), point_1); + let snapshot_id_3 = SnapshotId::new(&PathBuf::from(dir_path_3), point_2); + let snapshot_id_4 = SnapshotId::new(&PathBuf::from(dir_path_4), point_3); + + assert!(snapshot_id_1 == snapshot_id_1); + assert!(snapshot_id_1 == snapshot_id_2); + assert!(snapshot_id_1 != snapshot_id_3); + assert!(snapshot_id_1 < snapshot_id_3); + assert!(snapshot_id_1 != snapshot_id_4); + assert!(snapshot_id_1 < snapshot_id_4); + + assert!(snapshot_id_2 == snapshot_id_1); + assert!(snapshot_id_2 != snapshot_id_3); + assert!(snapshot_id_2 < snapshot_id_3); + assert!(snapshot_id_2 != snapshot_id_4); + assert!(snapshot_id_2 < snapshot_id_4); + + assert!(snapshot_id_4 != snapshot_id_1); + assert!(snapshot_id_4 > snapshot_id_1); + assert!(snapshot_id_4 != snapshot_id_2); + assert!(snapshot_id_4 > snapshot_id_2); + assert!(snapshot_id_4 != snapshot_id_3); + assert!(snapshot_id_4 > snapshot_id_3); + } +} diff --git a/hermes/crates/cardano-chain-follower/src/stats.rs b/hermes/crates/cardano-chain-follower/src/stats.rs new file mode 100644 index 000000000..fadd26f1b --- /dev/null +++ b/hermes/crates/cardano-chain-follower/src/stats.rs @@ -0,0 +1,810 @@ +//! Cardano Chain Follower Statistics + +use std::sync::{Arc, LazyLock, RwLock}; + +use chrono::{DateTime, Utc}; +use dashmap::DashMap; +use serde::Serialize; +use strum::{EnumIter, IntoEnumIterator}; +use tracing::error; + +use crate::Network; + +// -------- GENERAL STATISTIC TRACKING + +/// Statistics related to Mithril Snapshots +#[derive(Debug, Default, Clone, Serialize)] +pub struct Mithril { + /// Number of Mithril Snapshots that have downloaded successfully. + pub updates: u64, + /// The Immutable TIP Slot# - Origin = No downloaded snapshot + pub tip: u64, + /// Time we started downloading the current snapshot. 1/1/1970-00:00:00 UTC = Never + /// downloaded. + pub dl_start: DateTime, + /// Time we finished downloading the current snapshot. if < `dl_start` its the + /// previous time we finished. + pub dl_end: DateTime, + /// Number of times download failed (bad server connection) + pub dl_failures: u64, + /// The time the last download took, in seconds. + pub last_dl_duration: u64, + /// The size of the download archive, in bytes. (If not started and not ended, current + /// partial download size). + pub dl_size: u64, + /// Extraction start time. 1/1/1970-00:00:00 UTC = Never extracted. + pub extract_start: DateTime, + /// Extraction end time. if `extract_end` < `extract_start` its the previous time we + /// finished extracting. + pub extract_end: DateTime, + /// Number of times extraction failed (bad archive) + pub extract_failures: u64, + /// Size of last extracted snapshot, in bytes. + pub extract_size: u64, + /// Deduplicated Size vs previous snapshot. + pub deduplicated_size: u64, + /// Number of identical files deduplicated from previous snapshot. + pub deduplicated: u64, + /// Number of changed files from previous snapshot. + pub changed: u64, + /// Number of new files from previous snapshot. + pub new: u64, + /// Mithril Certificate Validation Start Time. 1/1/1970-00:00:00 UTC = Never + /// validated. + pub validate_start: DateTime, + /// Mithril Certificate Validation End Time. if validate end < validate start its the + /// previous time we finished validating. + pub validate_end: DateTime, + /// Number of times validation failed (bad snapshot) + pub validate_failures: u64, + /// Blocks that failed to deserialize from the mithril immutable chain. + pub invalid_blocks: u64, + /// Download Or Validation Failed + pub download_or_validation_failed: u64, + /// Failed to get tip from mithril snapshot. + pub failed_to_get_tip: u64, + /// Tip failed to advance + pub tip_did_not_advance: u64, + /// Failed to send new tip to updater. + pub tip_failed_to_send_to_updater: u64, + /// Failed to activate new snapshot + pub failed_to_activate_new_snapshot: u64, +} + +impl Mithril { + /// Reset incremental counters in the mithril statistics. + fn reset(&mut self) { + self.updates = 0; + self.dl_failures = 0; + self.extract_failures = 0; + self.validate_failures = 0; + self.invalid_blocks = 0; + } +} + +/// Statistics related to a single depth of rollback +#[derive(Debug, Default, Clone, Serialize)] +pub struct Rollback { + /// How deep was the rollback from tip. + pub depth: u64, + /// How many times has a rollback been this deep. + pub count: u64, +} + +/// Statistics for all our known rollback types +/// Rollback Vec is sorted by depth, ascending. +#[derive(Debug, Default, Clone, Serialize)] +pub struct Rollbacks { + /// These are the ACTUAL rollbacks we did on our live-chain in memory. + pub live: Vec, + /// These are the rollbacks reported by the Peer Node, which may not == an actual + /// rollback on our internal live chain. + pub peer: Vec, + /// These are the rollbacks synthesized for followers, based on their reading of the + /// chain tip. + pub follower: Vec, +} + +/// Individual Follower stats +#[derive(Debug, Default, Clone, Serialize)] +pub struct Follower { + /// Synthetic follower connection ID + pub id: u64, + /// Starting slot for this follower (0 = Start at Genesis Block for the chain). + pub start: u64, + /// Current slot for this follower. + pub current: u64, + /// Target slot for this follower (MAX U64 == Follow Tip Forever). + pub end: u64, + /// Current Sync Time. + pub sync_start: DateTime, + /// When this follower reached TIP or its destination slot. + pub sync_end: Option>, +} + +/// Statistics related to the live blockchain +#[derive(Debug, Default, Clone, Serialize)] +pub struct Live { + /// The Time that synchronization to this blockchain started + pub sync_start: DateTime, + /// The Time that synchronization to this blockchain was complete up-to-tip. None = + /// Not yet synchronized. + pub sync_end: Option>, + /// When backfill started + pub backfill_start: Option>, + /// Backfill size to achieve synchronization. (0 before sync completed) + pub backfill_size: u64, + /// When backfill ended + pub backfill_end: Option>, + /// Backfill Failures + pub backfill_failures: u64, + /// The time of the last backfill failure + pub backfill_failure_time: Option>, + /// Current Number of Live Blocks + pub blocks: u64, + /// The current head of the live chain slot# + pub head_slot: u64, + /// The current live tip slot# as reported by the peer. + pub tip: u64, + /// Number of times we connected/re-connected to the Node. + pub reconnects: u64, + /// Last reconnect time, + pub last_connect: DateTime, + /// Last reconnect time, + pub last_connected_peer: String, + /// Last disconnect time, + pub last_disconnect: DateTime, + /// Last disconnect time, + pub last_disconnected_peer: String, + /// Is there an active connection to the node + pub connected: bool, + /// Rollback statistics. + pub rollbacks: Rollbacks, + /// New blocks read from blockchain. + pub new_blocks: u64, + /// Blocks that failed to deserialize from the blockchain. + pub invalid_blocks: u64, + /// Active Followers (range and current depth) + pub follower: Vec, +} + +impl Live { + /// Reset incremental counters in the live statistics. + fn reset(&mut self) { + self.new_blocks = 0; + self.reconnects = 0; + self.invalid_blocks = 0; + } +} + +/// Statistics for a single follower network. +#[derive(Debug, Default, Clone, Serialize)] +pub struct Statistics { + /// Statistics related to the live connection to the blockchain. + pub live: Live, + /// Statistics related to the mithril certified blockchain archive. + pub mithril: Mithril, +} + +/// Type we use to manage the Sync Task handle map. +type StatsMap = DashMap>>; +/// The statistics being maintained per chain. +static STATS_MAP: LazyLock = LazyLock::new(|| { + let map = StatsMap::default(); + + for network in Network::iter() { + let stats = Statistics::default(); + map.insert(network, Arc::new(RwLock::new(stats))); + } + map +}); + +/// Get the stats for a particular chain. +fn lookup_stats(chain: Network) -> Option>> { + let Some(chain_entry) = STATS_MAP.get(&chain) else { + error!("Stats MUST BE exhaustively pre-allocated."); + return None; + }; + + let chain_stats = chain_entry.value(); + + Some(chain_stats.clone()) +} + +impl Statistics { + /// Get a new statistics struct for a given blockchain network. + #[must_use] + pub fn new(chain: Network) -> Self { + let Some(stats) = lookup_stats(chain) else { + return Statistics::default(); + }; + + let Ok(chain_stats) = stats.read() else { + return Statistics::default(); + }; + + let mut this_stats = chain_stats.clone(); + // Set the current rollback stats. + this_stats.live.rollbacks.live = rollbacks(chain, RollbackType::LiveChain); + this_stats.live.rollbacks.peer = rollbacks(chain, RollbackType::Peer); + this_stats.live.rollbacks.follower = rollbacks(chain, RollbackType::Follower); + + this_stats + } + + /// Reset the incremental counters in a stats record. + fn reset_stats(&mut self) { + self.live.reset(); + self.mithril.reset(); + } + + /// Get the current tips of the immutable chain and live chain. + pub(crate) fn tips(chain: Network) -> (u64, u64) { + let Some(stats) = lookup_stats(chain) else { + return (0, 0); + }; + + let Ok(chain_stats) = stats.read() else { + return (0, 0); + }; + + (chain_stats.mithril.tip, chain_stats.live.head_slot) + } + + /// Reset amd return cumulative counters contained in the statistics. + #[must_use] + pub fn reset(chain: Network) -> Self { + let Some(stats) = lookup_stats(chain) else { + return Statistics::default(); + }; + + let Ok(mut chain_stats) = stats.write() else { + return Statistics::default(); + }; + + chain_stats.reset_stats(); + + let mut this_stats = chain_stats.clone(); + // Reset the current rollback stats. + this_stats.live.rollbacks.live = rollbacks_reset(chain, RollbackType::LiveChain); + this_stats.live.rollbacks.peer = rollbacks_reset(chain, RollbackType::Peer); + this_stats.live.rollbacks.follower = rollbacks_reset(chain, RollbackType::Follower); + + this_stats + } + + /// Return the statistics formatted as JSON + #[must_use] + pub fn as_json(&self, pretty: bool) -> String { + let json = if pretty { + serde_json::to_string_pretty(self) + } else { + serde_json::to_string(self) + }; + match json { + Ok(json) => json, + Err(error) => { + error!("{:?}", error); + String::new() + }, + } + } +} + +/// Count the invalidly deserialized blocks +pub(crate) fn stats_invalid_block(chain: Network, immutable: bool) { + // This will actually always succeed. + let Some(stats) = lookup_stats(chain) else { + return; + }; + + let Ok(mut chain_stats) = stats.write() else { + // Worst case if this fails (it never should) is we stop updating stats. + error!("Stats RwLock should never be able to error."); + return; + }; + + if immutable { + chain_stats.mithril.invalid_blocks += 1; + } else { + chain_stats.live.invalid_blocks += 1; + } +} + +/// Count the validly deserialized blocks +pub(crate) fn new_live_block( + chain: Network, total_live_blocks: u64, head_slot: u64, tip_slot: u64, +) { + // This will actually always succeed. + let Some(stats) = lookup_stats(chain) else { + return; + }; + + let Ok(mut chain_stats) = stats.write() else { + // Worst case if this fails (it never should) is we stop updating stats. + error!("Stats RwLock should never be able to error."); + return; + }; + + chain_stats.live.new_blocks += 1; + chain_stats.live.blocks = total_live_blocks; + chain_stats.live.head_slot = head_slot; + chain_stats.live.tip = tip_slot; +} + +/// Track the end of the current mithril update +pub(crate) fn new_mithril_update( + chain: Network, mithril_tip: u64, total_live_blocks: u64, tip_slot: u64, +) { + // This will actually always succeed. + let Some(stats) = lookup_stats(chain) else { + return; + }; + + let Ok(mut chain_stats) = stats.write() else { + // Worst case if this fails (it never should) is we stop updating stats. + error!("Stats RwLock should never be able to error."); + return; + }; + + chain_stats.mithril.updates += 1; + chain_stats.mithril.tip = mithril_tip; + chain_stats.live.blocks = total_live_blocks; + chain_stats.live.tip = tip_slot; +} + +/// When did we start the backfill. +pub(crate) fn backfill_started(chain: Network) { + // This will actually always succeed. + let Some(stats) = lookup_stats(chain) else { + return; + }; + + let Ok(mut chain_stats) = stats.write() else { + // Worst case if this fails (it never should) is we stop updating stats. + error!("Stats RwLock should never be able to error."); + return; + }; + + // If we start another backfill, then that means the previous backfill failed, so record + // it. + if chain_stats.live.backfill_start.is_some() { + chain_stats.live.backfill_failures += 1; + chain_stats.live.backfill_failure_time = chain_stats.live.backfill_start; + } + + chain_stats.live.backfill_start = Some(Utc::now()); +} + +/// When did we start the backfill. +pub(crate) fn backfill_ended(chain: Network, backfill_size: u64) { + // This will actually always succeed. + let Some(stats) = lookup_stats(chain) else { + return; + }; + + let Ok(mut chain_stats) = stats.write() else { + // Worst case if this fails (it never should) is we stop updating stats. + error!("Stats RwLock should never be able to error."); + return; + }; + + chain_stats.live.backfill_size = backfill_size; + chain_stats.live.backfill_end = Some(Utc::now()); +} + +/// Track statistics about connections to the cardano peer node. +pub(crate) fn peer_connected(chain: Network, active: bool, peer_address: &str) { + // This will actually always succeed. + let Some(stats) = lookup_stats(chain) else { + return; + }; + + let Ok(mut chain_stats) = stats.write() else { + // Worst case if this fails (it never should) is we stop updating stats. + error!("Stats RwLock should never be able to error."); + return; + }; + + if active { + chain_stats.live.reconnects += 1; + chain_stats.live.last_connect = Utc::now(); + chain_stats.live.last_connected_peer = peer_address.to_string(); + } else { + chain_stats.live.last_disconnect = Utc::now(); + chain_stats.live.last_disconnected_peer = peer_address.to_string(); + } + + chain_stats.live.connected = active; +} + +/// Record when we started syncing +pub(crate) fn sync_started(chain: Network) { + // This will actually always succeed. + let Some(stats) = lookup_stats(chain) else { + return; + }; + + let Ok(mut chain_stats) = stats.write() else { + // Worst case if this fails (it never should) is we stop updating stats. + error!("Stats RwLock should never be able to error."); + return; + }; + + chain_stats.live.sync_start = Utc::now(); +} + +/// Record when we first reached tip. This can safely be called multiple times. +/// Except for overhead, only the first call will actually record the time. +pub(crate) fn tip_reached(chain: Network) { + // This will actually always succeed. + let Some(stats) = lookup_stats(chain) else { + return; + }; + + let Ok(mut chain_stats) = stats.write() else { + // Worst case if this fails (it never should) is we stop updating stats. + error!("Stats RwLock should never be able to error."); + return; + }; + + if chain_stats.live.sync_end.is_none() { + chain_stats.live.sync_end = Some(Utc::now()); + } +} + +/// Record that a Mithril snapshot Download has started. +pub(crate) fn mithril_dl_started(chain: Network) { + // This will actually always succeed. + let Some(stats) = lookup_stats(chain) else { + return; + }; + + let Ok(mut chain_stats) = stats.write() else { + // Worst case if this fails (it never should) is we stop updating stats. + error!("Stats RwLock should never be able to error."); + return; + }; + + chain_stats.mithril.dl_start = Utc::now(); +} + +/// Record when DL finished, if it fails, set size to None, otherwise the size of the +/// downloaded file. +pub(crate) fn mithril_dl_finished(chain: Network, dl_size: Option) { + // This will actually always succeed. + let Some(stats) = lookup_stats(chain) else { + return; + }; + + let Ok(mut chain_stats) = stats.write() else { + // Worst case if this fails (it never should) is we stop updating stats. + error!("Stats RwLock should never be able to error."); + return; + }; + + #[allow(clippy::cast_sign_loss)] // Its OK to cast the i64 to u64 because we clamped it. + if let Some(dl_size) = dl_size { + chain_stats.mithril.dl_end = Utc::now(); + chain_stats.mithril.dl_size = dl_size; + let last_dl_duration = chain_stats.mithril.dl_end - chain_stats.mithril.dl_start; + chain_stats.mithril.last_dl_duration = + last_dl_duration.num_seconds().clamp(0, i64::MAX) as u64; + } else { + chain_stats.mithril.dl_failures += 1; + } +} + +/// Record that extracting the mithril snapshot archive has started. +pub(crate) fn mithril_extract_started(chain: Network) { + // This will actually always succeed. + let Some(stats) = lookup_stats(chain) else { + return; + }; + + let Ok(mut chain_stats) = stats.write() else { + // Worst case if this fails (it never should) is we stop updating stats. + error!("Stats RwLock should never be able to error."); + return; + }; + + chain_stats.mithril.extract_start = Utc::now(); +} + +/// Record when DL finished, if it fails, set size to None, otherwise the size of the +/// downloaded file. +pub(crate) fn mithril_extract_finished( + chain: Network, extract_size: Option, deduplicated_size: u64, deduplicated_files: u64, + changed_files: u64, new_files: u64, +) { + // This will actually always succeed. + let Some(stats) = lookup_stats(chain) else { + return; + }; + + let Ok(mut chain_stats) = stats.write() else { + // Worst case if this fails (it never should) is we stop updating stats. + error!("Stats RwLock should never be able to error."); + return; + }; + + if let Some(extract_size) = extract_size { + chain_stats.mithril.extract_end = Utc::now(); + chain_stats.mithril.extract_size = extract_size; + chain_stats.mithril.deduplicated_size = deduplicated_size; + chain_stats.mithril.deduplicated = deduplicated_files; + chain_stats.mithril.changed = changed_files; + chain_stats.mithril.new = new_files; + } else { + chain_stats.mithril.extract_failures += 1; + } +} + +/// State of the Mithril cert validation. +#[derive(Copy, Clone)] +pub(crate) enum MithrilValidationState { + /// Validation Started + Start, + /// Validation Failed + Failed, + /// Validation Finished + Finish, +} + +/// Record when Mithril Cert validation starts, ends or fails). +pub(crate) fn mithril_validation_state(chain: Network, mithril_state: MithrilValidationState) { + // This will actually always succeed. + let Some(stats) = lookup_stats(chain) else { + return; + }; + + let Ok(mut chain_stats) = stats.write() else { + // Worst case if this fails (it never should) is we stop updating stats. + error!("Stats RwLock should never be able to error."); + return; + }; + + match mithril_state { + MithrilValidationState::Start => chain_stats.mithril.validate_start = Utc::now(), + MithrilValidationState::Failed => chain_stats.mithril.validate_failures += 1, + MithrilValidationState::Finish => chain_stats.mithril.validate_end = Utc::now(), + } +} + +/// Mithril Sync Failures. +#[derive(Copy, Clone)] +pub(crate) enum MithrilSyncFailures { + /// Download Or Validation Failed + DownloadOrValidation, + /// Failed to get tip from mithril snapshot. + FailedToGetTip, + /// Tip failed to advance + TipDidNotAdvance, + /// Failed to send new tip to updater. + TipFailedToSendToUpdater, + /// Failed to activate new snapshot + FailedToActivateNewSnapshot, +} + +/// Record when Mithril Cert validation starts, ends or fails). +pub(crate) fn mithril_sync_failure(chain: Network, failure: MithrilSyncFailures) { + // This will actually always succeed. + let Some(stats) = lookup_stats(chain) else { + return; + }; + + let Ok(mut chain_stats) = stats.write() else { + // Worst case if this fails (it never should) is we stop updating stats. + error!("Stats RwLock should never be able to error."); + return; + }; + + match failure { + MithrilSyncFailures::DownloadOrValidation => { + chain_stats.mithril.download_or_validation_failed += 1; + }, + MithrilSyncFailures::FailedToGetTip => chain_stats.mithril.failed_to_get_tip += 1, + MithrilSyncFailures::TipDidNotAdvance => chain_stats.mithril.tip_did_not_advance += 1, + MithrilSyncFailures::TipFailedToSendToUpdater => { + chain_stats.mithril.tip_failed_to_send_to_updater += 1; + }, + MithrilSyncFailures::FailedToActivateNewSnapshot => { + chain_stats.mithril.failed_to_activate_new_snapshot += 1; + }, + } +} + +// -------- ROLLBACK STATISTIC TRACKING +// ---------------------------------------------------------- + +/// The types of rollbacks we track for a chain. +#[derive(EnumIter, Eq, Ord, PartialEq, PartialOrd, Copy, Clone, Hash)] +pub enum RollbackType { + /// Rollback on the in-memory live chain. + LiveChain, + /// Rollback signaled by the peer. + Peer, + /// Rollback synthesized for the Follower. + Follower, +} + +/// Individual rollback records. +type RollbackRecords = DashMap; +/// Rollback Records per rollback type. +type RollbackTypeMap = DashMap>>; +/// Record of rollbacks. +type RollbackMap = DashMap; +/// Statistics of rollbacks detected per chain. +static ROLLBACKS_MAP: LazyLock = LazyLock::new(|| { + let map = RollbackMap::new(); + for network in Network::iter() { + let type_map = RollbackTypeMap::new(); + for rollback in RollbackType::iter() { + type_map.insert(rollback, Arc::new(RwLock::new(RollbackRecords::new()))); + } + map.insert(network, type_map); + } + map +}); + +/// Get the actual rollback map for a chain. +fn lookup_rollback_map( + chain: Network, rollback: RollbackType, +) -> Option>> { + let Some(chain_rollback_map) = ROLLBACKS_MAP.get(&chain) else { + error!("Rollback stats SHOULD BE exhaustively pre-allocated."); + return None; + }; + let chain_rollback_map = chain_rollback_map.value(); + + let Some(rollback_map) = chain_rollback_map.get(&rollback) else { + error!("Rollback stats SHOULD BE exhaustively pre-allocated."); + return None; + }; + let rollback_map = rollback_map.value(); + + Some(rollback_map.clone()) +} + +/// Extract the current rollback stats as a vec. +fn rollbacks(chain: Network, rollback: RollbackType) -> Vec { + let Some(rollback_map) = lookup_rollback_map(chain, rollback) else { + return Vec::new(); + }; + + let Ok(rollback_values) = rollback_map.read() else { + error!("Rollback stats LOCK Poisoned, should not happen."); + return vec![]; + }; + + let mut rollbacks = Vec::new(); + + // Get all the rollback stats. + for stat in rollback_values.iter() { + rollbacks.push(stat.value().clone()); + } + + rollbacks +} + +/// Reset ALL the rollback stats for a given blockchain. +fn rollbacks_reset(chain: Network, rollback: RollbackType) -> Vec { + let Some(rollback_map) = lookup_rollback_map(chain, rollback) else { + return Vec::new(); + }; + + let Ok(rollbacks) = rollback_map.write() else { + error!("Rollback stats LOCK Poisoned, should not happen."); + return vec![]; + }; + + rollbacks.clear(); + + Vec::new() +} + +/// Count a rollback +pub(crate) fn rollback(chain: Network, rollback: RollbackType, depth: u64) { + let Some(rollback_map) = lookup_rollback_map(chain, rollback) else { + return; + }; + + let Ok(rollbacks) = rollback_map.write() else { + error!("Rollback stats LOCK Poisoned, should not happen."); + return; + }; + + let mut value = match rollbacks.get(&depth) { + Some(value_entry) => (*value_entry.value()).clone(), + None => Rollback { depth, count: 0 }, + }; + + value.count += 1; + + let _unused = rollbacks.insert(depth, value); +} + +#[cfg(test)] +#[allow(clippy::unwrap_used)] +mod tests { + use chrono::Utc; + + use super::*; + + #[test] + fn test_mithril_reset() { + let mut mithril = Mithril { + updates: 10, + dl_failures: 5, + extract_failures: 3, + validate_failures: 2, + invalid_blocks: 1, + ..Default::default() + }; + mithril.reset(); + assert_eq!(mithril.updates, 0); + assert_eq!(mithril.dl_failures, 0); + assert_eq!(mithril.extract_failures, 0); + assert_eq!(mithril.validate_failures, 0); + assert_eq!(mithril.invalid_blocks, 0); + } + + #[test] + fn test_live_reset() { + let mut live = Live { + new_blocks: 10, + reconnects: 5, + invalid_blocks: 3, + ..Default::default() + }; + live.reset(); + assert_eq!(live.new_blocks, 0); + assert_eq!(live.reconnects, 0); + assert_eq!(live.invalid_blocks, 0); + } + + #[test] + fn test_statistics_reset_stats() { + let mut stats = Statistics::default(); + stats.live.new_blocks = 10; + stats.mithril.updates = 5; + stats.reset_stats(); + assert_eq!(stats.live.new_blocks, 0); + assert_eq!(stats.mithril.updates, 0); + } + + #[test] + fn test_statistics_as_json() { + let stats = Statistics::default(); + let json = stats.as_json(true); + assert!(json.contains("\"blocks\": 0")); + assert!(json.contains("\"updates\": 0")); + } + + #[test] + fn test_lookup_stats() { + let network = Network::Preprod; + let stats = lookup_stats(network); + assert!(stats.is_some()); + } + + #[test] + fn test_new_live_block() { + let network = Network::Preprod; + new_live_block(network, 100, 50, 200); + let stats = lookup_stats(network).unwrap(); + let stats = stats.read().unwrap(); + assert_eq!(stats.live.blocks, 100); + assert_eq!(stats.live.head_slot, 50); + assert_eq!(stats.live.tip, 200); + } + + #[test] + fn test_mithril_dl_started() { + let network = Network::Preprod; + mithril_dl_started(network); + let stats = lookup_stats(network).unwrap(); + let stats = stats.read().unwrap(); + assert!(stats.mithril.dl_start <= Utc::now()); + } +} diff --git a/hermes/crates/cardano-chain-follower/src/turbo_downloader/mod.rs b/hermes/crates/cardano-chain-follower/src/turbo_downloader/mod.rs new file mode 100644 index 000000000..5ae5495d4 --- /dev/null +++ b/hermes/crates/cardano-chain-follower/src/turbo_downloader/mod.rs @@ -0,0 +1,728 @@ +//! Serializable Parallel Download Processor +//! +//! Provides the capability to quickly download a large file using parallel connections, +//! but still process the data sequentially, without requiring the entire file to be +//! downloaded at once. +//! +//! NOTE: This uses synchronous threading and HTTP Gets because Async proved to be highly +//! variable in its performance. + +use std::{ + io::Read, + net::SocketAddr, + sync::{ + atomic::{AtomicU64, AtomicUsize, Ordering}, + Arc, Mutex, OnceLock, + }, + thread, + time::Duration, +}; + +use anyhow::{bail, Context, Result}; +use dashmap::DashMap; +use http::{ + header::{ACCEPT_RANGES, CONTENT_LENGTH, RANGE}, + StatusCode, +}; +use tracing::{debug, error}; + +use crate::utils::u64_from_saturating; + +/// A Simple DNS Balancing Resolver +struct BalancingResolver { + /// The actual resolver + resolver: hickory_resolver::Resolver, + /// A Cache of the Sockets we already resolved for a URL. + cache: moka::sync::Cache>>, +} + +/// We only have one resolver. +static RESOLVER: OnceLock = OnceLock::new(); + +impl BalancingResolver { + /// Initialize the resolver, only does something once, but safe to call multiple + /// times. + fn init(_cfg: &DlConfig) -> Result<()> { + // Can ONLY init the Resolver once, just return if we try and do it multiple times. + if RESOLVER.get().is_none() { + // Construct a new Resolver with default configuration options + let resolver = match hickory_resolver::Resolver::from_system_conf() { + Ok(r) => r, + Err(e) => { + error!("Failed to initialize DNS Balancing Resolver from system configuration, using Google DNS as fallback: {}", e); + hickory_resolver::Resolver::new( + hickory_resolver::config::ResolverConfig::default(), + hickory_resolver::config::ResolverOpts::default(), + )? + }, + }; + + let cache = moka::sync::Cache::builder() + // We should nto be caching lots of different URL's + .max_capacity(10) + // Time to live (TTL): 60 minutes + .time_to_live(Duration::from_secs(60 * 60)) + // Time to idle (TTI): 5 minutes + .time_to_idle(Duration::from_secs(5 * 60)) + // Create the cache. + .build(); + + // We don't really care if this is already set. + let _unused = RESOLVER.set(BalancingResolver { resolver, cache }); + } + Ok(()) + } + + /// Resolve the given URL with the configured resolver. + fn resolve(&self, url: &str, worker: usize) -> std::io::Result> { + // debug!("Resolving: {url} for {worker}"); + let addresses = if let Some(addresses) = self.cache.get(url) { + addresses + } else { + let Some((host, port_str)) = url.split_once(':') else { + return Err(std::io::Error::new( + std::io::ErrorKind::InvalidData, + "Could not parse URL", + )); + }; + + let port: u16 = port_str.parse().map_err(|_| { + std::io::Error::new( + std::io::ErrorKind::InvalidData, + "Could not parse port number", + ) + })?; + + let mut all_addresses: Vec = Vec::new(); + for addr in self.resolver.lookup_ip(host.to_string())?.iter() { + all_addresses.push(std::net::SocketAddr::new(addr, port)); + } + + let addresses = Arc::new(all_addresses); + self.cache.insert(url.to_string(), addresses.clone()); + addresses + }; + let worker_addresses = worker % addresses.len(); + // Safe because we bound the index with the length of `addresses`. + #[allow(clippy::indexing_slicing)] + Ok(vec![addresses[worker_addresses]]) + } +} + +// Timeout if connection can not be made in 10 seconds. +// const CONNECTION_TIMEOUT: Duration = Duration::from_secs(10); + +// Timeout if no data received for 5 seconds. +// const DATA_READ_TIMEOUT: Duration = Duration::from_secs(5); + +/// Minimum rational size of a chunk in bytes. +const MIN_CHUNK_SIZE: usize = 1024 * 4; // 4 KB + +/// Parallel Downloader Tuning parameters +#[derive(Clone, Debug)] +#[allow(clippy::struct_excessive_bools)] +pub struct DlConfig { + /// Maximum number of parallel connections to use. + pub workers: usize, + /// Size of a chunk in bytes (except the last). + pub chunk_size: usize, + /// Maximum number of chunks queued ahead to workers. + pub queue_ahead: usize, + /// Timeout for each connection. + pub connection_timeout: Option, + /// Timeout for each data read. + pub data_read_timeout: Option, +} + +impl DlConfig { + /// Create a new `DlConfig` + #[must_use] + pub fn new() -> Self { + DlConfig::default() + } + + /// Change the number of workers + #[must_use] + pub fn with_workers(mut self, workers: usize) -> Self { + self.workers = workers; + self + } + + /// Change the chunk size + #[must_use] + pub fn with_chunk_size(mut self, chunk_size: usize) -> Self { + self.chunk_size = chunk_size; + self + } + + /// Change the number of chunks queued ahead to workers + #[must_use] + pub fn with_queue_ahead(mut self, queue_ahead: usize) -> Self { + self.queue_ahead = queue_ahead; + self + } + + /// Change the connection timeout + #[must_use] + pub fn with_connection_timeout(mut self, connection_timeout: Duration) -> Self { + self.connection_timeout = Some(connection_timeout); + self + } + + /// Change the data read timeout + #[must_use] + pub fn with_data_read_timeout(mut self, data_read_timeout: Duration) -> Self { + self.data_read_timeout = Some(data_read_timeout); + self + } + + /// Resolve DNS addresses using Hickory Resolver + fn resolve(url: &str, worker: usize) -> std::io::Result> { + let Some(resolver) = RESOLVER.get() else { + return Err(std::io::Error::new( + std::io::ErrorKind::Other, + "Resolver not initialized.", + )); + }; + + resolver.resolve(url, worker) + } + + /// Builds a `UReq` Agent. + /// + /// Because we need multiple clients to prevent all traffic being forced onto a single + /// connection when HTTP2 is used, the client can NOT be supplied by the user. + /// Instead we create a new one here based on their configuration. + pub(crate) fn make_http_agent(&self, worker: usize) -> ureq::Agent { + let mut agent = ureq::AgentBuilder::new(); + + if let Some(timeout) = self.connection_timeout { + agent = agent.timeout_connect(timeout); + } + + if let Some(timeout) = self.data_read_timeout { + agent = agent.timeout_read(timeout); + } + + let agent = agent.resolver(move |url: &str| Self::resolve(url, worker)); + + agent.build() + } +} + +impl Default for DlConfig { + fn default() -> Self { + DlConfig { + workers: 16, + chunk_size: 2 * 1024 * 1024, + queue_ahead: 3, + connection_timeout: None, + data_read_timeout: None, + } + } +} + +/// An Individual Downloaded block of data. +/// Wrapped in an ARC so its cheap to clone and pass between threads. +type DlBlock = Arc>; + +/// Downloaded Chunk (or error if it fails). +#[derive(Clone)] +struct DlChunk { + /// Index of the worker that fetched the chunk. + worker: usize, + /// Index of the chunk in the file. + chunk_num: usize, + /// The data from the chunk. (None == failed) + chunk: Option, +} + +/// Download Chunk Work Order. +/// This is simply the number of the chunk next to fetch. +/// When finished, the queue is just closed. +type DlWorkOrder = usize; + +/// Parallel Download Processor Inner struct. +/// +/// Note: Maximum Potential Working set in memory will == `dl_chunk` * ((`workers` * +/// `queue_ahead`) + 1) +struct ParallelDownloadProcessorInner { + /// URL to download from. + url: String, + /// Configuration + cfg: DlConfig, + /// Size of the file we expect to download. + file_size: usize, + /// The last chunk we can request + last_chunk: usize, + /// Skip map used to reorder incoming chunks back into sequential order. + reorder_queue: DashMap, + /// A queue for each worker to send them new work orders. + work_queue: DashMap>, + /// New Chunk Queue - Just says we added a new chunk to the reorder queue. + new_chunk_queue_tx: crossbeam_channel::Sender>, + /// New Chunk Queue - Just says we added a new chunk to the reorder queue. + new_chunk_queue_rx: crossbeam_channel::Receiver>, + /// Statistic tracking number of bytes downloaded per worker. + bytes_downloaded: Vec, + /// Left Over Bytes (from the reader) + left_over_bytes: Mutex>, usize)>>, + /// Next Expected Chunk + next_expected_chunk: AtomicUsize, + /// Next Chunk to Request + next_requested_chunk: AtomicUsize, +} + +impl Drop for ParallelDownloadProcessorInner { + /// Cleanup the channel and workers. + fn drop(&mut self) { + debug!("Drop ParallelDownloadProcessorInner"); + self.reorder_queue.clear(); + self.reorder_queue.shrink_to_fit(); + self.work_queue.clear(); + self.work_queue.shrink_to_fit(); + } +} + +impl ParallelDownloadProcessorInner { + /// Get how many bytes were downloaded, total. + pub(crate) fn total_bytes(&self) -> u64 { + self.bytes_downloaded + .iter() + .map(|x| x.load(Ordering::SeqCst)) + .sum::() + } + + /// Get start offset of a chunk. + fn chunk_start(&self, chunk: usize) -> usize { + self.cfg.chunk_size * chunk + } + + /// Get inclusive end offset of a chunk. + fn chunk_end(&self, chunk: usize) -> usize { + let start = self.chunk_start(chunk); + if start + self.cfg.chunk_size >= self.file_size { + self.file_size - 1 + } else { + start + self.cfg.chunk_size - 1 + } + } + + /// Sends a GET request to download a chunk of the file at the specified range + fn get_range(&self, agent: &ureq::Agent, chunk: usize) -> anyhow::Result>> { + let range_start = self.chunk_start(chunk); + let range_end_inclusive = self.chunk_end(chunk); + let range_header = format!("bytes={range_start}-{range_end_inclusive}"); + let get_range_response = agent + .get(&self.url) + .set(RANGE.as_str(), &range_header) + .call() + .context("GET ranged request failed")?; + // let addr = get_range_response.remote_addr(); + // debug!("Chunk {chunk} from {addr:?}"); + if get_range_response.status() != StatusCode::PARTIAL_CONTENT { + bail!( + "Response to range request has an unexpected status code (expected {}, found {})", + StatusCode::PARTIAL_CONTENT, + get_range_response.status() + ) + } + + let range_size = range_end_inclusive - range_start + 1; + let mut bytes: Vec = Vec::with_capacity(range_size); + + let bytes_read = get_range_response + .into_reader() + .take(u64_from_saturating(range_size)) + .read_to_end(&mut bytes)?; + + if bytes_read != range_size { + bail!("Expected {range_size} bytes in response, but only read {bytes_read}") + } + + Ok(Arc::new(bytes)) + } + + /// Queue Chunk to processor. + /// + /// Reorders chunks and sends to the consumer. + fn reorder_queue(&self, chunk: DlChunk) -> anyhow::Result<()> { + self.reorder_queue.insert(chunk.chunk_num, chunk); + self.new_chunk_queue_tx.send(Some(()))?; + Ok(()) + } +} + +/// Parallel Download Processor. +/// +/// Uses multiple connection to speed up downloads, but returns data sequentially +/// so it can be processed without needing to store the whole file in memory or disk. +#[derive(Clone)] +pub(crate) struct ParallelDownloadProcessor(Arc); + +impl ParallelDownloadProcessor { + /// Creates a new instance of the Parallel Download Processor. + /// + /// Can Fail IF there is no HTTP client provided or the URL does not support getting + /// the content length. + pub(crate) async fn new(url: &str, mut cfg: DlConfig) -> anyhow::Result { + if cfg.chunk_size < MIN_CHUNK_SIZE { + bail!( + "Download chunk size must be at least {} bytes", + MIN_CHUNK_SIZE + ); + } + let file_size = get_content_length_async(url).await?; + + // Get the minimum number of workers we need, just in case the chunk size is bigger than + // the requested workers can process. + cfg.workers = file_size.div_ceil(cfg.chunk_size).min(cfg.workers); + + let last_chunk = file_size.div_ceil(cfg.chunk_size); + + // Initialize the download statistics + let mut bytes_downloaded = Vec::with_capacity(cfg.workers); + for _ in 0..cfg.workers { + bytes_downloaded.push(AtomicU64::new(0)); + } + + let new_chunk_queue = crossbeam_channel::unbounded(); + + let processor = ParallelDownloadProcessor(Arc::new(ParallelDownloadProcessorInner { + url: String::from(url), + cfg: cfg.clone(), + file_size, + last_chunk, + reorder_queue: DashMap::with_capacity((cfg.workers * cfg.queue_ahead) + 1), + work_queue: DashMap::with_capacity(cfg.workers + 1), + new_chunk_queue_rx: new_chunk_queue.1, + new_chunk_queue_tx: new_chunk_queue.0, + bytes_downloaded, + left_over_bytes: Mutex::new(None), + next_expected_chunk: AtomicUsize::new(0), + next_requested_chunk: AtomicUsize::new(0), + })); + + processor.start_workers()?; + + Ok(processor) + } + + /// Starts the worker tasks, they will not start doing any work until `download` is + /// called, which happens immediately after they are started. + fn start_workers(&self) -> anyhow::Result<()> { + for worker in 0..self.0.cfg.workers { + // The channel is unbounded, because work distribution is controlled to be at most + // `work_queue` deep per worker. And we don't want anything unexpected to + // cause the processor to block. + let (work_queue_tx, work_queue_rx) = crossbeam_channel::unbounded::(); + let params = self.0.clone(); + thread::spawn(move || { + Self::worker(¶ms, worker, &work_queue_rx); + }); + + let _unused = self.0.work_queue.insert(worker, work_queue_tx); + } + + self.download() + } + + /// The worker task - It is running in parallel and downloads chunks of the file as + /// requested. + fn worker( + params: &Arc, worker_id: usize, + work_queue: &crossbeam_channel::Receiver, + ) { + debug!("Worker {worker_id} started"); + + // Each worker has its own http_client, so there is no cross worker pathology + // Each worker should be expected to make multiple requests to the same host. + // Resolver should never fail to initialize. However, if it does, we can;t start the + // worker. + if let Err(error) = BalancingResolver::init(¶ms.cfg) { + error!("Failed to initialize DNS resolver for worker {worker_id}: {error:?}"); + return; + } + let http_agent = params.cfg.make_http_agent(worker_id); + + while let Ok(next_chunk) = work_queue.recv() { + // Add a small delay to the first chunks for each worker. + // So that the leading chunks are more likely to finish downloading first. + if next_chunk > 0 && next_chunk < params.cfg.workers { + let delay = Duration::from_millis(next_chunk as u64 * 2); + thread::sleep(delay); + } + let mut retries = 0; + let mut block; + // debug!("Worker {worker_id} DL chunk {next_chunk}"); + loop { + block = match params.get_range(&http_agent, next_chunk) { + Ok(block) => Some(block), + Err(error) => { + error!("Error getting chunk: {:?}, error: {:?}", next_chunk, error); + None + }, + }; + + // Quickly retry on error, in case its transient. + if block.is_some() || retries > 3 { + break; + } + retries += 1; + } + // debug!("Worker {worker_id} DL chunk done {next_chunk}: {retries}"); + + if let Some(ref block) = block { + if let Some(dl_stat) = params.bytes_downloaded.get(worker_id) { + let this_bytes_downloaded = u64_from_saturating(block.len()); + let _last_bytes_downloaded = dl_stat + .fetch_add(this_bytes_downloaded, std::sync::atomic::Ordering::SeqCst); + // debug!("Worker {worker_id} DL chunk {next_chunk}: + // {last_bytes_downloaded} + {this_bytes_downloaded} = {}", + // last_bytes_downloaded+this_bytes_downloaded); + } else { + error!("Failed to get bytes downloaded for worker {worker_id}"); + } + } + + if let Err(error) = params.reorder_queue(DlChunk { + worker: worker_id, + chunk_num: next_chunk, + chunk: block, + }) { + error!("Error sending chunk: {:?}, error: {:?}", next_chunk, error); + break; + }; + // debug!("Worker {worker_id} DL chunk queued {next_chunk}"); + } + debug!("Worker {worker_id} ended"); + } + + /// Send a work order to a worker. + fn send_work_order(&self, this_worker: usize, order: DlWorkOrder) -> Result { + let next_worker = (this_worker + 1) % self.0.cfg.workers; + if order < self.0.last_chunk { + // let params = self.0.clone(); + if let Some(worker_queue) = self.0.work_queue.get(&this_worker) { + let queue = worker_queue.value(); + queue.send(order)?; + } else { + bail!("Expected a work queue for worker: {:?}", this_worker); + } + } else { + // No more work, so remove the work queue from the map. + if let Some((_, work_queue)) = self.0.work_queue.remove(&this_worker) { + // Close the work queue, which should terminate the worker. + drop(work_queue); + } + } + Ok(next_worker) + } + + /// Starts Downloading the file using parallel connections. + /// + /// Should only be called once on self. + fn download(&self) -> anyhow::Result<()> { + let params = self.0.clone(); + // Pre fill the work queue with orders. + let max_pre_orders = params.cfg.queue_ahead * params.cfg.workers; + let pre_orders = max_pre_orders.min(params.last_chunk); + + let mut this_worker: usize = 0; + + // Fill up the pre-orders into the workers queues. + for pre_order in 0..pre_orders { + this_worker = self.send_work_order(this_worker, pre_order)?; + } + + params + .next_requested_chunk + .store(pre_orders, Ordering::SeqCst); + + Ok(()) + } + + /// Get current size of data we downloaded. + pub(crate) fn dl_size(&self) -> u64 { + self.0.total_bytes() + } + + /// Actual Read function, done like this so we can have a single cleanup on error or + /// EOF. + fn inner_read(&mut self, buf: &mut [u8]) -> std::io::Result { + // There should only ever be one reader, the purpose of this mutex is to give us + // mutability it should never actually block. + let mut left_over_buffer = self + .0 + .left_over_bytes + .lock() + .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, format!("{e:?}")))?; + + let (left_over_bytes, offset) = + if let Some((left_over_bytes, offset)) = left_over_buffer.take() { + (left_over_bytes, offset) + } else { + // Get the next chunk and inc the one we would want next. + let next_chunk = self.0.next_expected_chunk.fetch_add(1, Ordering::SeqCst); + + // Wait here until we actually have the next chunk in the reorder queue. + while !self.0.reorder_queue.contains_key(&next_chunk) { + if let Err(error) = self.0.new_chunk_queue_rx.recv() { + return Err(std::io::Error::new( + std::io::ErrorKind::Other, + format!("Next Chunk Queue Error: {error:?}"), + )); + } + } + + let Some((_, chunk)) = self.0.reorder_queue.remove(&next_chunk) else { + return Err(std::io::Error::new( + std::io::ErrorKind::Other, + format!("Expected Chunk {next_chunk} Didn't get any"), + )); + }; + + if chunk.chunk_num != next_chunk { + return Err(std::io::Error::new( + std::io::ErrorKind::Other, + format!("Expected Chunk {next_chunk} Got {}", chunk.chunk_num), + )); + } + let Some(ref block) = chunk.chunk else { + return Ok(0); // EOF + }; + + // Got a chunk so lets queue more work from the worker that gave us this block. + // Because we are pre-incrementing here, its possible for this to be > maximum + // chunks and thats OK. + let next_work_order = self.0.next_requested_chunk.fetch_add(1, Ordering::SeqCst); + + // Send more work to the worker that just finished a work order. + // Or Stop the worker if there is no more work they can do. + if let Err(error) = self.send_work_order(chunk.worker, next_work_order) { + return Err(std::io::Error::new( + std::io::ErrorKind::Other, + format!("Failed to send work order to {} : {error:?}", chunk.worker), + )); + } + + // If this was the last chunk, we can stop all the workers and cleanup. + if next_chunk == self.0.last_chunk { + debug!("Last Chunk read from workers. Cleaning Up."); + self.cleanup(); + } + + (block.to_owned(), 0) + }; + + // Send whats leftover or new. + let bytes_left = left_over_bytes.len() - offset; + let bytes_to_copy = bytes_left.min(buf.len()); + let Some(sub_buf) = left_over_bytes.get(offset..offset + bytes_to_copy) else { + error!("Slicing Sub Buffer failed"); + return Err(std::io::Error::new( + std::io::ErrorKind::Other, + "Slicing Sub Buffer failed", + )); + }; + if let Err(error) = memx::memcpy(buf, sub_buf) { + error!(error=?error, "memx::memcpy failed"); + return Err(std::io::Error::new( + std::io::ErrorKind::Other, + "memx::memcpy failed", + )); + } + + // Save whats leftover back inside the mutex, if there is anything. + if offset + bytes_to_copy != left_over_bytes.len() { + *left_over_buffer = Some((left_over_bytes, offset + bytes_to_copy)); + } + + Ok(bytes_to_copy) + } + + /// Cleanup workers and queues when done. + fn cleanup(&self) { + // Close all the workers left running. + for x in &self.0.work_queue { + let worker = x.key(); + if let Some((_, queue)) = self.0.work_queue.remove(worker) { + debug!("Force Closing worker {}", worker); + drop(queue); + } + } + } +} + +impl Drop for ParallelDownloadProcessor { + fn drop(&mut self) { + debug!("ParallelDownloadProcessor::drop"); + self.cleanup(); + } +} + +impl std::io::Read for ParallelDownloadProcessor { + fn read(&mut self, buf: &mut [u8]) -> std::io::Result { + let result = self.inner_read(buf); + match result { + Ok(0) | Err(_) => { + debug!("Read finished with Error or EOF - Cleaning up."); + self.cleanup(); + }, + _ => {}, + } + + result + } +} + +/// Send a HEAD request to obtain the length of the file we want to download (necessary +/// for calculating the offsets of the chunks) +/// +/// This exists because the `Probe` call made by Mithril is Async, and this makes +/// interfacing to that easier. +async fn get_content_length_async(url: &str) -> anyhow::Result { + let url = url.to_owned(); + match tokio::task::spawn_blocking(move || get_content_length(&url)).await { + Ok(result) => result, + Err(error) => { + error!("get_content_length failed"); + Err(anyhow::anyhow!("get_content_length failed: {}", error)) + }, + } +} + +/// Send a HEAD request to obtain the length of the file we want to download (necessary +/// for calculating the offsets of the chunks) +fn get_content_length(url: &str) -> anyhow::Result { + let response = ureq::head(url).call()?; + + if response.status() != StatusCode::OK { + bail!( + "HEAD request did not return a successful response: {}", + response.status_text() + ); + } + + if let Some(accept_ranges) = response.header(ACCEPT_RANGES.as_str()) { + if accept_ranges != "bytes" { + bail!( + "Server doesn't support HTTP range byte requests (Accept-Ranges = {})", + accept_ranges + ); + } + } else { + bail!("Server doesn't support HTTP range requests (missing ACCEPT_RANGES header)"); + }; + + let content_length = if let Some(content_length) = response.header(CONTENT_LENGTH.as_str()) { + let content_length: usize = content_length + .parse() + .context("Content-Length was not a valid unsigned integer")?; + content_length + } else { + bail!("HEAD response did not contain a Content-Length header"); + }; + + Ok(content_length) +} diff --git a/hermes/crates/cardano-chain-follower/src/utils.rs b/hermes/crates/cardano-chain-follower/src/utils.rs new file mode 100644 index 000000000..07c3959d0 --- /dev/null +++ b/hermes/crates/cardano-chain-follower/src/utils.rs @@ -0,0 +1,93 @@ +//! Simple general purpose utility functions. + +use blake2b_simd::{self, Params}; + +/// Convert T to an i16. (saturate if out of range.) +#[allow(dead_code)] // Its OK if we don't use this general utility function. +pub(crate) fn i16_from_saturating>(value: T) -> i16 { + match value.try_into() { + Ok(value) => value, + Err(_) => i16::MAX, + } +} + +/// Convert an to usize. (saturate if out of range.) +#[allow(dead_code)] // Its OK if we don't use this general utility function. +pub(crate) fn usize_from_saturating< + T: Copy + + TryInto + + std::ops::Sub + + std::cmp::PartialOrd + + num_traits::identities::Zero, +>( + value: T, +) -> usize { + if value < T::zero() { + usize::MIN + } else { + match value.try_into() { + Ok(value) => value, + Err(_) => usize::MAX, + } + } +} + +/// Convert an to u32. (saturate if out of range.) +#[allow(dead_code)] // Its OK if we don't use this general utility function. +pub(crate) fn u32_from_saturating< + T: Copy + + TryInto + + std::ops::Sub + + std::cmp::PartialOrd + + num_traits::identities::Zero, +>( + value: T, +) -> u32 { + if value < T::zero() { + u32::MIN + } else { + match value.try_into() { + Ok(converted) => converted, + Err(_) => u32::MAX, + } + } +} + +/// Convert an to u64. (saturate if out of range.) +#[allow(dead_code)] // Its OK if we don't use this general utility function. +pub(crate) fn u64_from_saturating< + T: Copy + + TryInto + + std::ops::Sub + + std::cmp::PartialOrd + + num_traits::identities::Zero, +>( + value: T, +) -> u64 { + if value < T::zero() { + u64::MIN + } else { + match value.try_into() { + Ok(converted) => converted, + Err(_) => u64::MAX, + } + } +} + +/// Convert the given value to `blake2b_244` array. +#[allow(dead_code)] // Its OK if we don't use this general utility function. +pub(crate) fn blake2b_244(value: &[u8]) -> anyhow::Result<[u8; 28]> { + let h = Params::new().hash_length(28).hash(value); + let b = h.as_bytes(); + b.try_into() + .map_err(|_| anyhow::anyhow!("Invalid length of blake2b_244, expected 28 got {}", b.len())) +} + +/// Convert the given value to `blake2b_256` array. +#[allow(dead_code)] // Its OK if we don't use this general utility function. +pub(crate) fn blake2b_256(value: &[u8]) -> anyhow::Result<[u8; 32]> { + let h = Params::new().hash_length(32).hash(value); + let b = h.as_bytes(); + b.try_into() + .map_err(|_| anyhow::anyhow!("Invalid length of blake2b_256, expected 32 got {}", b.len())) +} diff --git a/hermes/crates/cardano-chain-follower/src/witness.rs b/hermes/crates/cardano-chain-follower/src/witness.rs new file mode 100644 index 000000000..9b640a571 --- /dev/null +++ b/hermes/crates/cardano-chain-follower/src/witness.rs @@ -0,0 +1,136 @@ +//! Transaction Witness +use std::fmt::{Display, Formatter}; + +use dashmap::DashMap; +use pallas::{codec::utils::Bytes, ledger::traverse::MultiEraTx}; + +use crate::utils::blake2b_244; + +/// `WitnessMap` type of `DashMap` with +/// key as [u8; 28] = (`blake2b_244` hash of the public key) +/// value as (Bytes, Vec) = (public key, tx index within the block) +#[allow(dead_code)] +pub(crate) type WitnessMap = DashMap<[u8; 28], (Bytes, Vec)>; + +#[derive(Debug)] +#[allow(dead_code)] +/// `TxWitness` struct to store the witness data. +pub(crate) struct TxWitness(WitnessMap); + +#[allow(dead_code)] +impl TxWitness { + /// Create a new `TxWitness` from a list of `MultiEraTx`. + pub(crate) fn new(txs: &[MultiEraTx]) -> anyhow::Result { + let map: WitnessMap = DashMap::new(); + for (i, tx) in txs.iter().enumerate() { + match tx { + MultiEraTx::AlonzoCompatible(tx, _) => { + let witness_set = &tx.transaction_witness_set; + if let Some(vkey_witness_set) = witness_set.vkeywitness.clone() { + for vkey_witness in vkey_witness_set { + let vkey_hash = blake2b_244(&vkey_witness.vkey)?; + let tx_num = u8::try_from(i)?; + map.entry(vkey_hash) + .and_modify(|entry: &mut (_, Vec)| entry.1.push(tx_num)) + .or_insert((vkey_witness.vkey.clone(), vec![tx_num])); + } + }; + }, + MultiEraTx::Babbage(tx) => { + let witness_set = &tx.transaction_witness_set; + if let Some(vkey_witness_set) = witness_set.vkeywitness.clone() { + for vkey_witness in vkey_witness_set { + let vkey_hash = blake2b_244(&vkey_witness.vkey)?; + let tx_num = u8::try_from(i)?; + map.entry(vkey_hash) + .and_modify(|entry: &mut (_, Vec)| entry.1.push(tx_num)) + .or_insert((vkey_witness.vkey.clone(), vec![tx_num])); + } + } + }, + MultiEraTx::Conway(tx) => { + let witness_set = &tx.transaction_witness_set; + if let Some(vkey_witness_set) = &witness_set.vkeywitness.clone() { + for vkey_witness in vkey_witness_set { + let vkey_hash = blake2b_244(&vkey_witness.vkey)?; + let tx_num = u8::try_from(i)?; + map.entry(vkey_hash) + .and_modify(|entry: &mut (_, Vec)| entry.1.push(tx_num)) + .or_insert((vkey_witness.vkey.clone(), vec![tx_num])); + } + } + }, + _ => {}, + }; + } + Ok(Self(map)) + } + + /// Check whether the public key hash is in the given transaction number. + pub(crate) fn check_witness_in_tx(&self, vkey_hash: &[u8; 28], tx_num: u8) -> bool { + self.0 + .get(vkey_hash) + .map_or(false, |entry| entry.1.contains(&tx_num)) + } + + /// Get the actual address from the given public key hash. + pub(crate) fn get_witness_pk_addr(&self, vkey_hash: &[u8; 28]) -> Option { + self.0.get(vkey_hash).map(|entry| entry.0.clone()) + } +} + +impl Display for TxWitness { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + for data in &self.0 { + let vkey_hash = hex::encode(data.key()); + let vkey: Vec = data.0.clone().into(); + let vkey_encoded = hex::encode(&vkey); + writeln!( + f, + "Key Hash: {}, PublicKey: {}, Tx: {:?}", + vkey_hash, vkey_encoded, data.1 + )?; + } + Ok(()) + } +} + +#[cfg(test)] +mod tests { + + use super::*; + use crate::multi_era_block_data::tests::{alonzo_block, babbage_block}; + + #[test] + fn tx_witness() { + let alonzo = alonzo_block(); + let alonzo_block = pallas::ledger::traverse::MultiEraBlock::decode(&alonzo) + .expect("Failed to decode MultiEraBlock"); + let txs_alonzo = alonzo_block.txs(); + let tx_witness_alonzo = TxWitness::new(&txs_alonzo).expect("Failed to create TxWitness"); + let vkey1_hash: [u8; 28] = + hex::decode("6082eb618d161a704207a0b3a9609e820111570d94d1e711b005386c") + .expect("Failed to decode vkey1_hash") + .try_into() + .expect("Invalid length of vkey1_hash"); + println!("{tx_witness_alonzo}"); + assert!(tx_witness_alonzo.get_witness_pk_addr(&vkey1_hash).is_some()); + assert!(tx_witness_alonzo.check_witness_in_tx(&vkey1_hash, 0)); + + let babbage = babbage_block(); + let babbage_block = pallas::ledger::traverse::MultiEraBlock::decode(&babbage) + .expect("Failed to decode MultiEraBlock"); + let txs_babbage = babbage_block.txs(); + let tx_witness_babbage = TxWitness::new(&txs_babbage).expect("Failed to create TxWitness"); + let vkey2_hash: [u8; 28] = + hex::decode("ba4ab50bdecca85162f3b8114739bc5ba3aaa6490e2b1d15ad0f9c66") + .expect("Failed to decode vkey2_hash") + .try_into() + .expect("Invalid length of vkey2_hash"); + println!("{tx_witness_babbage}"); + assert!(tx_witness_babbage + .get_witness_pk_addr(&vkey2_hash) + .is_some()); + assert!(tx_witness_babbage.check_witness_in_tx(&vkey2_hash, 0)); + } +} diff --git a/hermes/crates/cardano-chain-follower/test_data/allegra.block b/hermes/crates/cardano-chain-follower/test_data/allegra.block new file mode 100644 index 000000000..a276b1f3a --- /dev/null +++ b/hermes/crates/cardano-chain-follower/test_data/allegra.block @@ -0,0 +1 @@ +820384828f1a004f3c641a011e1523582008d4d3e0caf55d66233e1e3421f97ea7423c2a82666e22ae216e464ff431a85058203593d74edc6c343c9e9b9d329f8356aedda414992f2f97c2bef93cf606b7b87a58205cd4a875fd1317f8176a910c6705cfdc80308d031c9a3fa6b1bc2b4b4d5621468258407b7d4c4bb23bf66c8aca7ce4830b9f1cadaee5061f89fa57995a8d07ec59f126730368e2b03c754b143a38a31c7a39ca830fb6de287129a27496a48602d39df058500c8cdf0ccba508d39bb915018c420dd057cd5d94fd2cf17b1c3a5f4522581dbdecc54e9de4d1e61d12b4d09db5a5be92b145d1b63e563ee8039af378b69b2e81669daca047d5e34750c34542e3503f0c825840000607436ed04d36164ca57f0f70bfe10ddc3f276540750dff3c0ca399047292c3355e4a488d0f13e548134661056f434f4c966c52f20c6580252c8a25601ded5850cb50509a5659569e8a1881d77dc335a781b245a14a499a7c04cdb9f6ee04be3ab3bf096c42a6c817198477bd2f10a426f7a7cf70d579d2238fecc3ca4597f8793ca02b02574ceb45fe9892997509ed0f1908ae5820dc6af41ca2a4eeb3d2969e559ddb65d2672f1b9aaf901fe4af425407952e290958209d0d3abaefdf5a2ccad0832a80759b8f8eb3c0629e9dc8bda3d44ebf658ab97601188d584051977ef95fbb458fa81ddc7e1ca6345e3f5fc70ae5b80b47d95fd39ed2940d1751ca7ce6dd24901265c299bf1994b3830ca10e995d7fde4beab807cbce03470604005901c0750da95fc1de2debd8cbde31d7f658bb41170695319a9468cde8cf21a93124f7db032571299748da724cbfda88d5f79fd89ac2b3fbd0748e25129ec2e74da20f96364f691ca551f8a7e71305d225a810783947759bf28d580a51244029c290e7c27e1d249f52ed39ac65f66529240ab76e36a4001df5a24b971557475379b24a695a9529646eeb7f1681a3d215fd6fe550654a1e7b06089b68f525b0aba6d91adf229eef5af4a3d602c1d29ba5f18c8a93bd6c950134f37ccf0ebf1b78fa2909f98ed560220de39302f56453b979da7843f6b0cacbe1a33739aa272f193bf9de69d083c2c885c1e226c0e0a35a2400059cead716cfa8064fb98f20162b2b3586c5dd29ac557ad542c610dd6576669823c757353fd03208313d8715268b32ef8ab62e02edaad3ea9c96de888d5933ccfc13e757f92bba98b23a5deb1cb860550f272f685a5bb9b88e2f6bbf227c049f90aea4540347bdac041b09bb75c83352fcea02f91c648156aeada5877eb3adba690c073a6fc1372a48ce8115ba3537243938ce366078d07c2f9dfdd92db07e0047b8d78d32fb4b55fec8810fd9d128c88df3c5d435dc82e163f9275dac718d06f680a3be8db4dc8dd512b059b1631191f283a500818258201e8f5d41ab9915c4cb0fcf1c4127e18585d957ac95b2b4879c3e02b02205cdcf00018182583901495a5a4dd65a483a5eaf6630f59a2c33e02e2c15d1d1bc39d1708ac457fd410378da5864e6b313db03488a085ca56376af0ab5e9854376401a3b76264b021a00029eb9031a011e324b05a1581de157fd410378da5864e6b313db03488a085ca56376af0ab5e9854376401a0068d5dba400848258201f15d61fa4462dffdb330d625b1fcbec930ea633b7bff5fce23e969660702ef500825820cc59f188f8e4027e98fa33404a7ce69b76e0c6b211f11ad71b4daacf67906ec900825820da82abf561c83cffd8657c05d27244081739eacb880b3b5b4db03b1a411911fa0d825820e125166ebe2e0b7bc781b9e7aa886a032ec87cb40e0bdc1acb5058a0555a8f82000188825839015fa4a029126ea6679f6cf29d15369dde7206472bf0f9d63e12a88f575fa4a029126ea6679f6cf29d15369dde7206472bf0f9d63e12a88f571b000000019fbcb30c82583901785eb7eb6c8286b8011a0adbf3ca1b4d37589b80cc6e42fdb4679adc39bc6cf63c0f9dd4f03df80accce7246bea60d649de4a8c4d5c0dca41a16d9fbfb82584c82d818584283581cacf4df690f13b4eaf82b816f399b2e1d8ab86976da93009b813d7758a101581e581cca3e553c9c63c5c2eef54d43a2e23449490865698bb72b3b1c039082001afa0dcc441b00000067e78e120582582b82d818582183581c62d2399c601895246274b8068266fa3138fd09f79fe636aa432db929a0001a5d1abc701a1263562082584c82d818584283581c3cc42d469376e7a5cfba6823160d341b5867c66d009124bd000bb109a101581e581cca3e553c9c63c5e89765f8436cd338c66da66d119ab3c27738826333001a65307a281b00000004dc9ffb9982584c82d818584283581c05bd51fb87d91b5290ecf18958de205badec391e4d643740edb2f0dfa101581e581c735437123ef5c61d0591c67ef0c94a5dc5c18653d6c629949595edef001adf8508d81a2c932a0382584c82d818584283581c5acbd13c916ce291ac4f41b1df2c4c75b8e917622040b54e3376e42da101581e581cca3e553c9c63c5dd0255704321b81423e0ffb4018d237e8e9a96ce3e001af6cde4f81b000000095b4f070082584c82d818584283581c8c8a6f436c8b04fe1c682aa81d417f2e82b41ebb06bf9aeb8a65a198a101581e581cca3e553c9c63c5b7f725d5432632ff88fecdf5936a00291efb732202001a1ec13f971b0000003452024d88021a00036a91031a011e30d9a5008182582026571cc2e731014dfee8536a0be0b816cc38b10ce38501db0e1a5eec4f04ae860001818258390192da16a658a4ecdb2c18b01e464f4ce2924f8830a304648674c1c2756162cdee35822637a84e896c6a8b05214cf21947f91b69122eb985891a00495250021a00061a1c031a011e3123048183028200581c6162cdee35822637a84e896c6a8b05214cf21947f91b69122eb98589581cae66e56ab11ccb39e882669f220a37956c683e4ce84fefd910012d7a83a100828258205f6f43cb12b59a3e5bf242528c8ab85e4feeb2acb89aa08e4948968f7bf2d9e0584010d19b492320347d5b6adf2137ec89f9c13d3fc07cc9b32f86a71fe001fcb75f36e3958949ef2b7e37377d89246b4a39463846402fa870f81d1845ef7c5c1d04825820f49ae225837477663687484ce73ac607ab7fe78600ca08d61db09e2e35d8e1b6584096c80781bd34262ab6f7b12f6592f0b943021e97e9c70ffc23635272c742932a51e4a252341c47a79f4e698235ab22beb3a75256b6476fd46749a68c6e86f60fa102848458200915adaebbd25d0f9a3242bb83c14bf5b012967ab8462acdee1912062a76117f58407a9ebc40eefdbcb1586e9805043ccc3285ee3864686fb1fb0d6d8075a41abd026864d0e92224982acfef061da2765effb26c75faa62c5ef3b656386782987c065820aa35c675609419a21a79ed83318278cbe669d4a4dadbfdcd6234e1315ba7c9685822a101581e581cca3e553c9c63c5a59118f7438117a8d6603837bc193303b5001b095b8458202f84330afbe6ab81c368ab0a850e8b6fa02daacc91709faf6bbdc811b4b5f82e5840bb878822cb198b1d245e2c46c20c930996067fbcb3469a8d36b68d6531add3e3e5081200890d906c70b1bd9e4333115a7c17df8da18eb19148a88a431993900f5820e91eab0e2dae5b26ff019c4b1d56183bfd8f5abd1fd258e4f58b13d55cba36f55822a101581e581cca3e553c9c63c5c0e20ac0433519070b0d32e51dbf04b191a57d44b3845820894b8d96727c3d68a30a7165c67c9bff346260628ee624268dc35d660e55794b584062b68793ff855c0b11a0c0e147de91a9f0631c565d28d270fdc146571d0c23839c970996188bf4e86b7a2d529e694b4301de1692366349913b4966f06be9df01582033aacbefeda60713a90163575735e391b5a1a00fe8629e52875657ed5c93ecba5822a101581e581cca3e553c9c63c58681fbd14353af0a213e5ab85001e4e2cbad09ec518458208e2c6fc08234b2a3437e5bae310f6957c69a62143318c562bc1b72c85141df6d58402f02c6375f5074b287aa72e940a3280f39d6fd2c24240b087f208fb92cb2abaf911657eb06b6e24756dea333628972c611da12aad9cc8dfa6b15a41f1e4bb70258201d6ed1791e5bb28ead7bcc582d7e5e70d264cf5293222352597bf0c2a773e9925822a101581e581cca3e553c9c63c5f777eddd432f2063462cf96a64e586148173444979a10082825820036dc0c351864d35c3d8fb72a0acbd0efb343e005f457b0a845e3a82a48bdaee5840ef6eaf7713acfa2623a589e6a5764f93fff7bed4d60d549a0e47d985bcccdba34e79b6954768b423db879e99be2ea205a151974072734fa996d240dd876bf600825820f2acaa3d79a96db3f8cfbf87792b62e39fa74a4d9ba1bcb82ba1562ca524c0a158406da704b6ca04547fd8fda632d1acdc1f15c5efaaee4e1d92f35c714b59148511828870dce8602de968da15263e938c6f71b37e6db9b47fa217661efc1c117b01a0 \ No newline at end of file diff --git a/hermes/crates/cardano-chain-follower/test_data/alonzo.block b/hermes/crates/cardano-chain-follower/test_data/alonzo.block new file mode 100644 index 000000000..bc6359292 --- /dev/null +++ b/hermes/crates/cardano-chain-follower/test_data/alonzo.block @@ -0,0 +1 @@ +820585828f1a002f48941a0295f18a58206fe2f80eb8cf6ad02d3a3857391b05aef41e575486168d7c36ab693be24c76f25820e7c3155586042372b19c1fe0491b771bfb2eb04f24af76f3870cda983551f4e75820d8ae2a59f1ff6ec33d0df8161fd89d820533b9580a4e43f4e9f6a628582b10ae8258402b498e5bd3f73130e1b7e5ac199fac1a688d948d73d71ec7a951913a6543131c44dcdba5215341b1dc2581c096e99fcf5885f42a9cdcf476322b38cc837111b858508f08dc2bc1e3c6c8a3e6f7f5f1d7af8d2ad33c4d31022f4777641a946938ba0c3af0f78076cae00ab741f4d39dec2be431710acfe55d2b3f5868b24847506b77fb42d639e95bdd025f5f406abec9240f8258400022f7e10e560aad60a6f16b743ff04b4abcd4ba9b572e02c67bde6defd29290a9345b3c1b28ee1dfdadf631a3887bc21807ec5bcadeb6a495f3cde7cfc0b1085850e6db5933067747401ea665a7d8fdb5a0ae131bfa757aa07e2b3fa619e0d94245233806ebf7340826f076f6ff62fe4600e427643ed77e3f02be7c39370c7d46f266e4f5a23c2cee05735e2152b1a73c0e1906965820c48e87eaae5983daca6d2611e5b45a09c4a8300ed2e36f747058761b976c2303582060ffa1e3c1ab6d03a5447d2f40ab023dbce45b13f0e372d63a964d31c7ee60790619014858405915c6868aa7c19b007464392dd4878f710c033e83d1421e188a993dc13a66c3bd60454228eb8105a3d37509ef0302633e42d4b20f86003a83b25a87b8b1ae0c06005901c0eac3f1484e8d6278c81251ce80767faa39153ae2c509795065f6859d87e5ca60356f2e0528e26441091d5fe855b430659f8c113b8e090ac7b5dc2a3f55811b09ff956db28c653766bcd95ca7ed09a8e0c744b75c4cac7b48561ad922978a866bb9014ce731cf098a346d58fd9602b5c712c587cb4ad2d31fc3c869b1d68fa3eb53c94453e0fa42c15686ace90df691b14f1372eb7e86897c0f22d26ae043b105978b6652d4144c7a3c5ef9b61e7d46403acbbf158075e31e4d45969ec968b62a27c05f4c2c8448da438d9b2b0f98a6df9245326476310ac26b164ff0b40e3b7e1f05c38d227f5b9ab87d82c7b64af3351a636ccdb951b6445f7909e56507f301b267d9780335863b3ed7d3ba16ebca3e9b77bc3ddee15436279b3a33eca8dc66a87b2864a550b003dc43622aab8183e891780ab8cd56fcd4ae28775ad6b69a786e19fca6362905d65d92f3b59f8259c1e1f52a8fe125d29c3dda7fcd45c7c71eff039986ce812e9a1f66f6795f53857ac57d32901f6a1992e42e7c8d7a942d25f77f46beb8e4cf801c80372c958face6b1a9dfacc38182310d66ad4816a08b329451c088889a2cb62fedac1944addffaaa0a8dc5cb54b2974b70411226918b8285a40081825820bf1f12a83095ac6738ecce5e3e540ad2cff160c46af9137eb6dc0b971f0ac5de000181825839009493315cd92eb5d8c4304e67b7e16ae36d61d34502694657811a2c8e32c728d3861e164cab28cb8f006448139c8f1740ffb8e7aa9e5232dc1b0000000450b745b6021a0002aeb5075820f607381cf971f3ab1119ad680f73bcc66c8d8d30136afbf82fe05f44f7924487a40081825820df4ebe9ac3ad31a55a06f3e51ca0dbaa947aaf25857ab3a12fe9315cabec11d30001818258390076b0d16f5d09ac02dd1786981066f6fedf7ac165a08b3b6f0fb33f039bf76872ce2fc9debc7c431ce4700ea060a5aefdec0a173d8ed6b4261a02fe430f021a00029d590758200013eb4278b47fcc6298f02bd42b31c919cef864dd20a9929d5eee315e13a557a60081825820087138a5596168650835c8c00f488e167e869bd991ef0683d2dbf3696b0e6650010d80018282581d6095ca37fc71b3b73f2d0e57258ac66857661f33a509436c10917aaf6b1a0022e4be825839005790dbdd97b76eb273e290122d6edb7504ed392c7ea7b011bd25d936719b6905b4122f96283e50a0dad9a9e577553ad49a17bbec3cb715181a2deefb98021a00028f6d031a02960d5d0e80a80081825820cc9f28625de0b5b9bbe8f61c9332bfda2c987162f85d2e42e437666c27826573000d80018182583900be8bb38da7b499acdb1eac0c05dd2649c8de5d791ac87969903df470244805c91110d844e9f4a776a5f201c71313358c13caaa3ee7b488e6821a00bade29a1581c6d566ad1e649b6e86a2f4fa16a4cdf99616230b78742332cbabc5fa4a140183b021a0002c959031a0296183b08000e8009a1581c6d566ad1e649b6e86a2f4fa16a4cdf99616230b78742332cbabc5fa4a14020a30081825820d0965859ce9b3025ccbe64f24e3cb30f7400252eb3e235c3604986c2fdd755db010182825839000e87d178321157275dd6a10f9c40ead38a78ae4703a23a23be57f1cad723da7bf8787357a74b3fa486b13462578ecf6ab6774dd4f15043681aa0eebb0082581d607c4a71a51d0c7400a15a748fa2338f20c8386f1ed4ebc56c2ffec4b01a02f59afe021a0002ad2985a1008182582073fea80d424276ad0978d4fe5310e8bc2d485f5f6bb3bf87612989f112ad5a7d5840abe07f7afebbcfd1816e2f70b66cbe2a0be6a46db86fa783949e4319e203e8faed12930489841f287dde09d00c5d1ed15bc4d47ba1e1deba2bf59350fcadec0fa100818258209dd11e64a93710854fedf85f1b4b23192079b812b67ac3ee0b97bdb11113806158404cf3a0c85491d39ef8ef91dc5b34107da610376301b53ebb2a7ce7c2b8397f6871890f1bf875e8f964146dd7107ddb89b08c343e0d4561481ff5b9e9b7f3ec09a100818258200530deb7fd4edb6b32e312806c7231d3c507f53fc27d5894c14a454936b16a435840369d46b7c720cbd253327c45861c0be0c741babd4b82a29c42c531fbc87562e40496934689289162082eeefe583e3dcd89aeeda671457d30bc21e5f9ba2b8e07a200818258208b5a5251b8a3f1f1b76e38377b3cf857d38284cfa48a2eee46c93df89221e0275840cfc689b9ab1ebc2dcec3f27fba6448b610b4bbbe46ec430c7e9e86ab0b454ca9b3b08efb6e843783f860512b8f2aaa142073766a7fb43ab6ca7ac5ac426f680f018182018282051a02b5a9468200581cbe8bb38da7b499acdb1eac0c05dd2649c8de5d791ac87969903df470a10081825820a87e24f3f590c98c9bc1f34e11607db0e04446afb3b02f3201b8a6972e50512d584018f04cd87d600d8fbd03d1292e4e16cd3ee4f8fc3cc6e504216373e1a8bf8826e17e365931e988e025b2345a390e9e6bbd78f0b0abcb24e0d238df7592570b07a200a11907c1a56641444142544381a266736f7572636569636f696e4765636b6f6576616c75656a302e30303030323937326641444145555281a266736f7572636569636f696e4765636b6f6576616c756563312e35664144414a505981a266736f7572636569636f696e4765636b6f6576616c7565663139332e39386641444155534481a266736f7572636569636f696e4765636b6f6576616c756564312e3639674254434449464681a266736f75726365667472657a6f726576616c75657132323637343134383233333435332e313101a11907c1a363636f3281a266736f75726365664d485a3031396576616c7565633438306868756d696469747981a266736f757263656553485433316576616c75656439362e396b74656d706572617475726581a266736f757263656553485433316576616c756563392e3480 \ No newline at end of file diff --git a/hermes/crates/cardano-chain-follower/test_data/babbage.block b/hermes/crates/cardano-chain-follower/test_data/babbage.block new file mode 100644 index 000000000..aeb4df9c1 --- /dev/null +++ b/hermes/crates/cardano-chain-follower/test_data/babbage.block @@ -0,0 +1 @@ +820685828a1a002751831a0409c74758208f359d2ca9a3e513b574cc88c93346506e2520f9939eadfc2e02e6c251a94769582069b5785508a9898605833a7c346c9fa294eb9e6793e0887a0230a1e026ebd1fa5820e9fff58586a4a588f15f371b4d7bfc04344b6a281bce39a7ab81fe5b6751cbac825840df4ca9e1e98212e3bfa0356274e63890304710237e92973de89d8dfba015f3eb8e925557cadcb70031cd644fe84b043f260e81342bf865d811409ae2aefb528458509aab92e40a5c75252e190d6860fbe0274ee6f7de16d12256df1dbd1f08b0f835fc0f333395d8d94464e5181e72c3fd935573005f03656191e9e95855f8fbcfd3456a6c268df905c805c9e510dff0df0f190c505820e6aebff5e8b2049914014d88821ef94b873b219ea32b44a50161b7bd2442deb5845820356e1f77552adeab6d94c2383cb4f7a5fb1b644858a6639cf9f71da339ad1e17081901ff5840fc25623ba50cf9f2858463dca7a3b1c703b97bd49d54e965d2df9602424986bf26e29ab4dac9b82f185cb825cd4a0b6dbc90374defd9ca20206cd25f47dc01058209015901c07d47377a4d51a9ed20b203308e73c500db49947189f6c92e951ab0d24b131d20d0f157fdf10a90aad4c8c6173dcd8d16b834ff47f1914556ae14b8015d7edc0ea0c33c70c7867e06a2952efe9066ceae6e1b9b23c59c76148f01917099657153af53c0cb5bb0a601d6e7e504033c2522fe069dda3a488d189032fd4ff59273526b263cabf7966326054aa26bb3d3e24010aa1edec3a545c80a1e1e357b0bea164c6663240e865cff3e29d30862b99f1ce52ca042f5d7d065bc29cc7d4ccce45acce31cc0da588c4ddec1fd4e3444ace1bf583df247ba168e0375862090fa4f49fa95fd28df6ce88f72e303d1f9cd4353abdd2d1a4472164d15220142e922b03eb6657152f340cf460514bbe57e892b81fe633d395773c00b91025237ed84ef5c6541d6bccb5a56f736b294fc6500aaa9e958c7b41bd422200545efbf0e5eb0ff95fc2394ab22cb543854aab426f60ab5dc04fa9ca3d9cc71a2abcf00c1ef8287f9bd9f7508fc5cc6a4513faac33b05b06304883ec50acb9c1ee3033604ce4eebe75beba7e97a731f92c6e779ecf700c3220ec992752b49708cab9d9a079803a01832cb14da7c59d069c4538cadd38c90b6502ce3b311424eaba7676f3dbc720b84ab008382582022d427bf2262a44dc3a3a2c9555696a741d779eb89afecf624e91a1ebdf3335400825820533e9251fd0ed7ac8613fe3d05813ce3bde7d96a6e446019a93421a0c6b785a50282582090b06350ad57403e8dcdb49d4f9fad5de1b244668fd5a0c98cd08b78d5f343b3010183a300581d704e3b75eb68829ca9e63b541f3645f78afe8d119932a7a2528171e74901821a001e8480a1581c5cf48c225e057f8da0787eae1e2bc62a1cf1d2ee34c7e2a9498b19b5a1484e6f64654665656401028201d8185840d87a9fd8799f581cba4ab50bdecca85162f3b8114739bc5ba3aaa6490e2b1d15ad0f9c66d8799fd8799f1b0000000da0e77c201b00000191449a86f5ffffffff82581d60ba4ab50bdecca85162f3b8114739bc5ba3aaa6490e2b1d15ad0f9c661a0089544082581d60ba4ab50bdecca85162f3b8114739bc5ba3aaa6490e2b1d15ad0f9c661a00ac398c021a00044c8c031a0409c766081a0409c6ee0b5820dbf505c29f869273f15ea579723e9ca9bcf31fdb8c4cb4e4085268f3714b99b90d818258206f8ebcf2a2b04f5cb5f977aa1e41001301634e64a532a8de1e98147cc84e6502010e81581cba4ab50bdecca85162f3b8114739bc5ba3aaa6490e2b1d15ad0f9c661082581d60ba4ab50bdecca85162f3b8114739bc5ba3aaa6490e2b1d15ad0f9c661a00524801111a00370c3f1281825820efd8a48f69e5270cb73b438067e1b566d39758329a45e5d6d36755f2c732618400ab00838258201df5c710d3251bea8457e05813101bd7f8defaba978114939ffa38035d42672a02825820e04bce69a5659295c235502d5fdb62b29e43f773e698c97e533e394d92d9228001825820ec4b6e2455e84a8aaa365cdc4c41ee446314ea74c209e698f1785c94c9952910000183a300581d704e3b75eb68829ca9e63b541f3645f78afe8d119932a7a2528171e74901821a001e8480a1581c5cf48c225e057f8da0787eae1e2bc62a1cf1d2ee34c7e2a9498b19b5a1484e6f64654665656401028201d8185840d87a9fd8799f581c3f2728ec78ef8b0f356e91a5662ff3124add324a7b7f5aeed69362f4d8799fd8799f1b0000000da0cc2bf01b00000191449a8523ffffffff82581d603f2728ec78ef8b0f356e91a5662ff3124add324a7b7f5aeed69362f41a0089544082581d603f2728ec78ef8b0f356e91a5662ff3124add324a7b7f5aeed69362f41a00a1cf51021a0004529a031a0409c766081a0409c6ee0b5820ed60fdd0e4e9cc43b5fa9a601a3d013c6a8f26916ff80e9b5c5a41813493f04e0d8182582040d412944923070bbdeca4887e30983b497e455e62a150209d522c4f2eec82f5020e81581c3f2728ec78ef8b0f356e91a5662ff3124add324a7b7f5aeed69362f41082581d603f2728ec78ef8b0f356e91a5662ff3124add324a7b7f5aeed69362f41a00455948111a00370c3f1281825820efd8a48f69e5270cb73b438067e1b566d39758329a45e5d6d36755f2c732618400ab00838258203efd5491b62cec43b8421eeb9978d9544a451cf938b454278dabfe508a867175008258204d1deb722bdab493e69840eca2fbb2af5eed36a82d2cd196754ce4af2618ecb902825820f1883a3368b6baa87f5d051d3e7fedb0c571519333f38860b54ec7b4dec3253e010183a300581d704e3b75eb68829ca9e63b541f3645f78afe8d119932a7a2528171e74901821a001e8480a1581c5cf48c225e057f8da0787eae1e2bc62a1cf1d2ee34c7e2a9498b19b5a1484e6f64654665656401028201d8185840d87a9fd8799f581c17942ff3849b623d24e31ec709c1c94c53b9240311820a9601ad4af0d8799fd8799f1b0000000da0cc2bf01b00000191449a8522ffffffff82581d6017942ff3849b623d24e31ec709c1c94c53b9240311820a9601ad4af01a0089544082581d6017942ff3849b623d24e31ec709c1c94c53b9240311820a9601ad4af01a0073c968021a00044c8c031a0409c766081a0409c6ee0b5820dbf505c29f869273f15ea579723e9ca9bcf31fdb8c4cb4e4085268f3714b99b90d818258204c6d1cb7cba6122de41fb91be0f78b61316399096d6f54bd7e3f2f76dd63b92e020e81581c17942ff3849b623d24e31ec709c1c94c53b9240311820a9601ad4af01082581d6017942ff3849b623d24e31ec709c1c94c53b9240311820a9601ad4af01a004df567111a00370c3f1281825820efd8a48f69e5270cb73b438067e1b566d39758329a45e5d6d36755f2c732618400a50081825820aa7bbea2a5101b867d969ad7e6ff7d95d440199ef4ba6621751d25521566101400018182583900eb21979c03eed7207020b2a0b47565b5aafb1a2c3849b59a1fa8e6c5e075be10ec5c575caffb68b08c31470666d4fe1aeea07c16d64739031b00000002525569af021a00032471075820420e72fc38474ce192fc9ba39380a11ddf9e5ac96a814d3fdb68c24cbf81e4600f0084a200818258209a12ec259e59bd8fc60244062f294f11555539f5fd45e77d8a57f16b5987dc4458401944ed87d70eb6646aa840b9914c462b1e4e3513831d1f5ec817a6966d00c0b6f12bf24b0576ea9593ed4f9c9dac2f0d9e15473088e64f688f3dea4416dbe90d0581840000d87980821a0012a1311a15d0fa8ba20081825820499300f082db931c561f42b9ca7d5ea4b288ae74cf1969d1fbd7bf41e7b02e8e5840504c78c1fe246730ca7c2469ba6731cf7e0aff42bd027091283568fe25d2a9f35da1593fb89e5bfd2f7d2db58316ee86ed1c8380e8fb77d61362c78f64374a010581840002d87980821a0012eff61a1622a0d4a20081825820ae01a85ba5eb8e67830cf6b422cb819cac1eaf0c7f62dbc496f12d9735b170d7584004dd05c7ea8b1ab964f67bfe4515e18451b23ee7950c8224f9b355aa1005ba1c7051951e82883c9cb25be7ada388fe2b708eaa6d9f1590516f2782f8b5ff20050581840000d87980821a0012a1311a15d0fa8ba100818258208469288efa6f9cb49040b43dcff93f40969d72433f751afde50235012c01602058401d84bfa159ac070a11834e2a41b38f205ce1628ccdc693fe2000e21c680e6ead1a23a23043bb99ac1de8460fee2f5b40d3a736566b9434d1b551b115956b5004a103a11901fda50050ca7a1457ef9f4c7f9c747f8c4a4cfa6c0150152a368b96d95f61ae69513dd5f6e92a0258204d3f576f26db29139981a69443c2325daa812cc353a31b5a4db794a5bcbb06c20b8c58401b3d030866084fcb259de07496d3197e913a39fd628a3db0a4ed6839261a00c51cb0a5b9c16194064132ace375ea23c75c60659400cba304d0d689c00086195d5840ff28714da02c35e7295815ba58b77f227e576fa254c464e2f9c6f9dfa900a0208250033c054a468c38e08819601d073c034a4727a524ff39995477443c1fca235840839c927599b253887f50487c1caf757c0aaf79bc3fcacd42252b8f2ae1f1a8b282929ca22bb5c2885cc23a66005c0cc1ca20142b82310c3a137d44c1943e40995840a7a7ce5c3475b5887a3765ede2ff3b7bfea90f255e2edf37fd44e27f26b8e6cf408aef4b20bebf7257b3dabc7eda65fff4ed278b50219f0a52367ff5b80e46b758403875f55a394d17a5d9a6b1a1deff5b2206e9e9734e9fbefa6a1cdfeb7a104546dfb6e46c46feaeb65a7f4648c276e29e87b27bc053bffef79359300220d0c3875840f2a05cc4880317358e19c758fd9ab9917551ce3987af2e35d73b6958a0f5732784621b0c92f68a93537f16f48445424890f955d7a597c13c2eb54a82b39f0307584097507df5fef916fabb6dafdfb516fb9184783e2cb4e89d048a6c1e5c04818bdb76ffb5cbef1fbe452658d904cd152ee72a3bfc6efe1199fb3b51f1979629cd4e5840fdb7df511723d4cead3d2b2eb9c1f18cbbfcf9f5cc8eac46dc03cd55fcac3303c391437f50400923e65c02e981af5461b6867a47fb25ebe9b0fb4d9e41ec210e58404b9011000206414523c0990f9ee20b5d8a745393d3febaf6413a448b994f1567eb7945df7a0ab44afd55561e0190b376d411026c5d7a4a49a19e0bd3f5addd6c5840492fde46eee8d75b587286291dfeb6a78fdf59c1a6bfa2717b1f41dfa878756140ce7c77504b64b094b870ade78569566eec66369133af5aa8c8eab9f95e29df58409ec10be251547101b24c495c8ff4fa55378dbb4a5c6e89b18a12ac033343d61c3b7f5fba725b51536d92a5cbfaef9be6d24a3e5b3d75a1c0e29e42f523567fac4d0f8200811c822d2210b97f5708186358407b322d37df11460b98e13f6c3c4d5d4985ad984768d09f77516e8e0f61ed24e646c466a995c2bf2b547302b96d4582be9b65f8d52f9fbc4857e7bef79948860180 \ No newline at end of file diff --git a/hermes/crates/cardano-chain-follower/test_data/byron.block b/hermes/crates/cardano-chain-follower/test_data/byron.block new file mode 100644 index 000000000..2c26a9a3a --- /dev/null +++ b/hermes/crates/cardano-chain-follower/test_data/byron.block @@ -0,0 +1 @@ +820183851a2d964a0958208b5add21497fa1749292318f1ac96a252ee58ac9bf60d321d1273f5135e72f2f8483025820eba8b3720ab760171e4900b88cc0379bf6c78c4dac55d1dc3d646b00c13515405820187817e2ba9ff807dd905e3a4e0d061630979da49071495b2b38201f7ffbc97b8300582025777aca9e4a73d48fc73b4f961d345b06d4a6f349cb7916570d35537d53479f5820d36a2619a672494604e11bb447cbcf5231e9f2ba25c2169177edc941bd50ad6c5820afc0da64183bf2664f3d4eec7238d524ba607faeeab24fc100eb861dba69971b58204e66280cd94d591072349bec0a3090a53aa945562efb6d08d56e53654b0e40988482189619056558400bdb1f5ef3d994037593f2266255f134a564658bb2df814b3b9cefb96da34fa9c888591c85b770fd36726d5f3d991c668828affc7bbe0872fd699136e664d9d8811a00316fa2820282840058400bdb1f5ef3d994037593f2266255f134a564658bb2df814b3b9cefb96da34fa9c888591c85b770fd36726d5f3d991c668828affc7bbe0872fd699136e664d9d858405fddeedade2714d6db2f9e1104743d2d8d818ecddc306e176108db14caadd441b457d5840c60f8840b99c8f78c290ae229d4f8431e678ba7a545c35607b94ddb5840552741f728196e62f218047b944b24ce4d374300d04b9b281426f55aa000d53ded66989ad5ea0908e6ff6492001ff18ece6c7040a934060759e9ae09863bf20358400e663202ff860e5a1cc84f32ad8ceffb0adb6cf476b07829e922312e038ba23573728e02f3775f6714b3b731f8b8084d92b3f38f51a41ba859e7e700feeeab038483000200826a63617264616e6f2d736c01a058204ba92aa320c60acc9ad7b9a64f2eda55c4d2ec28e604faf186708b4f0c4e8edf849f82839f8200d8185824825820da832fb5ef57df5b91817e9a7448d26e92552afb34f8ee5adb491b24bbe990d50eff9f8282d818584283581cdac5d9464c2140aeb0e3b6d69f0657e61f51e0c259fe19681ed268e8a101581e581c2b5a44277e3543c08eae5d9d9d1146f43ba009fea6e285334f2549be001ae69c4d201b0000000172a84e408282d818584283581c2b8e5e0cb6495ec275872d1340b0581613b04a49a3c6f2f760ecaf95a101581e581cca3e553c9c63c5b66689e943ce7dad7d560ae84d7c2eaf21611c024c001ad27c159a1b00000003355d95efffa0818200d8185885825840888cdf85991d85f2023423ba4c80d41570ebf1fc878c9f5731df1d20c64aecf3e8aa2bbafc9beba8ef33acb4d7e199b445229085718fba83b7f86ab6a3bcf782584063e34cf5fa6d8c0288630437fa5e151d93907e826e66ba273145e3ee712930b6f446ff81cb91d7f0cb4ceccd0466ba9ab14448d7eab9fc480a122324bd80170e82839f8200d8185824825820e059de2179400cd7e81ddb6683c0136c9d68119ff3a27a472ad2d98e2f1fbc9c038200d8185824825820adeb5745e6dba2c05a98f0ad9162b947f1484e998b8b3335f98213e0c67f426e008200d8185824825820f0fb258a6e741a02ae91b8dc7fe340b9e5b601a6048bf2a0c205f9cc6f51768d018200d8185824825820c2e4e1f1d8217724b76d979166b16cb0cf5cd6506f70f48c618a085b10460c44028200d8185824825820aaca2f41f4a17fe464481c69f1220a7bfd93b1a6854f52006094271204e7df7c008200d818582482582089185f2daf9ea3bdfdb5d1fef7eced7e890cb89b8821275c0bf0973be08c4ee901ff9f8282d818582183581cb8340f839cc48449e2ee6085bf1ab6f152fb20d2e071f429085bbe13a0001a5bf6bee11b00000011972734888282d818584283581c7ed455da1e6e026b35204699d4fa39b3bc9dc47ed27d36d2b47ec3eca101581e581c082dcccc3e51655ddc77c01953f7b64c7cf6aca758c686d9c51a32fe001ab116b6251b00000005bd6bbf308282d818582183581cc2a3e4bb135f14d44b5659fd3cac5f950e7515f8ce243f264dfe1befa0001ad2e633181a84dbd7408282d818584283581ca492c5ad6316f8e7e84177673acf32f53ec25b2baee5a52ff50964f9a101581e581c1e9a0361bdc37dd4b96893a363943b3fa8e2e89b8761c3f0ff7568bc001a600064881b0000008cc4df8c118282d818584283581cf73d2b9c66ae7c8d479151c7e43e2fae59fba02e7108ac4d643a4909a101581e581c1e9a0361bdc37de543987ea3cadcec1c18fa6cf784794fec37146f34001a7103195d1b00000004f92e889b8282d818584283581cbd765f3097754bda870653533855012a82a22cedf508afc97a05f912a101581e581c1e9a0361bdc37d9d5470dca3256d2b36da565a1be6fd6c319321d8c9001a743764aa1b00000011b52f1056ffa0868200d8185885825840b43ac73cca84eb7baa30f0ad3c4d427ea43e8d8831eb8de34ae9994bc6f00313d81f35d51d695e30f88b267572f96f056ddff1b39acf011b8227c14df833371258408a0c1d59791fd04e6329adcf532fe2749a8da282ca4bdba9942a6acea07b3e4d9d30cc0dcd9a602e151694a417d069fdaa8a3bbbb2e1724b9c9671850498430a8200d81858858258408a3c3f38a7a598bd4b62fa4b56f92b96be3d1b7f1df1a713a3cb4978f835396f935c09b245ab312efda634d3ac4af1f843fb0577725a27579c9fd665d14c24fa5840a27f622daaf26f1047845723f20a13247ce866005f5263397082657a80d60095d81856e127e27e2fe36391c1631170ac630d3e340aed71c9cb8c323eec148a058200d81858858258409322bcf07526e852e5a0830d7c24d4a2a0dcf8a894f176e0a8fe3f742fa485906206283cdab4fdba8280fdc51e818ccdd1e9e6cd530381b18152f38dc598b6cc5840d0edd562447c433cc7870913fb8d90a2169d4a1f9aae8695eedf07e0eff67213fd7c339f03860ecd83ceed1a324aa10067671e4ee1d3f762f355678dd4d0ff008200d8185885825840ac4a10ea86a183dfc8ab8f9ccf7a61eb9b11e471b53321725bafe403bd101abdfac85680f85d72f5462b4abf234759f3d833f7fe5372884f8a84475f4989edbd5840181183c6c147ba34c9f39269ab36949d0b1ec0a7dd950afa1ea2dd27b2d691412948de68f93471935905edf07934a7965926f52f8162841299a55246a5a97d0b8200d818588582584004bff1f0caaf06b7942f52218c1b8a1583f425014ec78ff2c4030f90dfe191ec861f537833d47898185d34746877e9d9d31cb827c145729d1200c1463625b22058401ffcb034120b9b20d84af9baec86bb0b2593d804e8b544d7dcde662b0e2a3da4b4afda77007a4133a9e0ea5306a92edff4fd82a14db371407e6a67bbc644250b8200d81858858258408a3c3f38a7a598bd4b62fa4b56f92b96be3d1b7f1df1a713a3cb4978f835396f935c09b245ab312efda634d3ac4af1f843fb0577725a27579c9fd665d14c24fa5840a27f622daaf26f1047845723f20a13247ce866005f5263397082657a80d60095d81856e127e27e2fe36391c1631170ac630d3e340aed71c9cb8c323eec148a05ff8300d9010280d90102809fff82809fff81a0 \ No newline at end of file diff --git a/hermes/crates/cardano-chain-follower/test_data/mary.block b/hermes/crates/cardano-chain-follower/test_data/mary.block new file mode 100644 index 000000000..7d71682e3 --- /dev/null +++ b/hermes/crates/cardano-chain-follower/test_data/mary.block @@ -0,0 +1 @@ +820484828f1a0055b4ac1a01a1eabe5820e9aac7ed907b246dda5548cf8d78269ddc5006368eac3d6d953800e4531ba7855820cca475e401893077582e74e5f694e741e5ec78df16f5ca4f979a2f297d02c54e58202a20b39a975102c19ea1d145df120579a2595ce6d696753424b13eeceb157fdf825840ec7d0f21df996a302d64f5700d24c11595d95cf30e755295575d9a33bdc9e0a49f777c501e0018161595b8d417e2ef3643f0315dc5f1ff63a2806a7c5c635eb85850aeba5ff94a58858ba358c540ec6fc7cccf4e55af1b45ea0b6f67a20d5def696d3d42cfb1571fe6b282bba6480d407b933d90228f4d9a5a1aa1a7da41e4464e5a9163c8a4ea5b1c95f41dffd9b905680982584000063af76ceade8a17009a28ae666fc90d59b42e0c0a37d4f4537755a66bf2b76d8b7dac1b8bba2b09a5784ca6878d95cee62b6e9987cf3c8d7d22f71526313f5850e584c25e328ce17b61038b33295e1317bf1b0b6b193a9e6f9d9ab455047dd6b5163db4d0a54de492e61a5fc36757880556369522326b2439e14ccc7bdf78eee3f0edae0d2900c38910a0d64d8f8d6509194c4958207cdc18ac07604d453229464652123b6b51c137abe9279e2eef9c0621d504423d58206dc1e500ee69ee4f02d67254ca3fb8ede4c592f4dc145ba4a9a001225913ad5d0418af58407d82457cde034555f832f7a2e01623b0b5f519d54b643e87add08cd6192a161a05dd0da2979d9b547ffcff442372d30e1df19e3c2e5179e4a04de4931fc1430204005901c0b37b5e3b093ef3082bdd548bd5f931f6621a2eca32a6358b38a42a296f2bd4c5600839e7e504fa340812e45337065007fcd5e1e41e1c4ecbec52c33895d187034b970683ab069478692cf6b98fcac79530c2c9bfe2295203e228a8809ba3f9208084a984e5588274ed0a26c6942ef729df8faaf644f7173c68ab986bfdc20f6108d7b3586c7bca66c5e9caedb0b021a23f2131b8f37a030be68e56d52761d8be70493151d0c6c1905e33701b8e7d1ada6a08e4fb410c8c5fda62fccba93d5da1d01477969f12ed67e981961b1a3cbd6cf981bdd593ff28fdb753a5f11ceb62621dc2e87472def58d90742c8a26b5b64573cbbfd32d317b3d77e08c3883f75b148af2784e0c1b5cc84b5b3266cab7bda13c3a893e05ef85d8b063ad239aca2c6018c10ce411aa8bd96abb0be37dd3b629d4aa4ddcded309a6a4cf8e1771aa97b943ffe352719145859c78e07409ff8893802bc1e492486566892c5c48b508423aee0890b940bab42014f9f6d29165a1fb8b53098f995a45e112a0bc66d3d9ce11ddb31739878295fe07db1e66b425cc52a5d73e9088ac1cedf6f3cb832b5fba8ef878cfb5e61813d2d56c6219ef54e2ce0f29c6ffae6f4a776a2ddc70a08be9d58ea50081825820790feaca64576990e3b2ed9cafdb8d2b874c561019f767827e685164577974b30001818258390111aca4f4fe84f5d89a30e8bd1697b52d0620c6b8e2a243c07e226f58f2971be702006ff49954fb5c064347451746ffb7e619bd2774caf59c1a3b7ca69f021a0002ac21031a01a2068e048282008200581cf2971be702006ff49954fb5c064347451746ffb7e619bd2774caf59c83028200581cf2971be702006ff49954fb5c064347451746ffb7e619bd2774caf59c581c024dcb42f0aa6d81a7e26ccdd525a2ed3e9665d126b38ba0f8b77b50a400828258205ed7d5280841f21b89d7d4885e0f4a3fec1d0d05856b68735ed5982677414a9603825820792c27f11f603e2a580847d42f4efc35c23492ae44ae3fd4a5f0deb7bcd8143803018482583901105d5846c4d6054a032092ab231ced9d9ec1cd381c685ecdb15a18eb105d5846c4d6054a032092ab231ced9d9ec1cd381c685ecdb15a18eb1a05ffd451825839015a06e23444ea5af6480d86f01d593cd2c061a1002db515a36df3fbf05a06e23444ea5af6480d86f01d593cd2c061a1002db515a36df3fbf01a126cab0282584c82d818584283581c4bb5beb1813d875ef62a2d8fc47674812a361f1834994e9cd5cbf0eda101581e581c9b1771bd305e4a22633e79a9257cf2458a580a65a4647d834268213d001ae8f33c161a0c66eca582584c82d818584283581c2a224ea8eb307e51dc1c9d3287dd5b73fff36ed2f318d34e4fe73204a101581e581c9b1771bd305e4a615d497aa98e87cc5b8bc918de283f0d9fc265d865001a4954b2701a25f6df28021a0002e715031a01a206c4a40082825820b34f463a8a91f91c207cf3148cfb253b2795eea8643b89281fb1bd043bf35cc700825820911ee5b314df7fa744ea47231bc42ddf3bcee2c910e19fdc3ecfa087912e78a101018282581d613dc0992a980257fbf7e6ae1e9b43af62932885f1e119279f1ce43f511a1a18ff2782581d61d91ef01b73f3010bb173945cf5417257c00c002715a13052015ab54f1a08655352021a0002bf35031a01a1ee84a40082825820861d31b11b125f78a88aea42a81a5463cec8bba58f6671227436822cf79506970082582017949570550c96e1d58abb5a4f07861ebde9472de865e56f0bdf04ba5165e86d00018282584c82d818584283581cbdcd97d67a94fc9e1af08ba34fd47405631a62362482b5c63aefaedda101581e581c7111304fa69dc398fa2055a1b012921e540374ca44f739beb09131c8001ab1e823421a03ea796c82581d610237be10f5ec0ccb6cbd226b112f0940fed44ae0466d9b53962ba8b11a2381e19a021a0002bf35031a01a1ee84a50082825820f74dccfef00ea5dd7a5360f4870de9f664bcbe8c0bf4611ba4d8f5dec687cc9f00825820f74dccfef00ea5dd7a5360f4870de9f664bcbe8c0bf4611ba4d8f5dec687cc9f010182825839012250f08ab10f7bf12f49291e78527f35a4f66ebd03e66524ed9ac8dd2250f08ab10f7bf12f49291e78527f35a4f66ebd03e66524ed9ac8dd1a000f4240825839012250f08ab10f7bf12f49291e78527f35a4f66ebd03e66524ed9ac8dd2250f08ab10f7bf12f49291e78527f35a4f66ebd03e66524ed9ac8dd1a85e84858021a0003096c031a05f5e100048182018200581c2250f08ab10f7bf12f49291e78527f35a4f66ebd03e66524ed9ac8dda40081825820900c833fd9754d4f65d7a1493ce62aaa943c887dc39921d0975976896bbd4e8a00018282581d61dfb778ad19859d22c8909f541b570dd4c386efb78d72173e4af2d2481a04c4b40082583901e683340376e5d57ab9af32493fc8f40c21682621432ebc85ffb4af4eee0df6c2ebfe5d3648def953480b811698b54c9a38164dcf0bf6c0eb1a2ee6c16d021a00028c81031a01a20663a40083825820750a3eab714b04bc4718f43407fee903b996700aa3b0e171f840602b608ce52e00825820d57e3d99a481cdc928a82d2a39b689f49a54044c6cd12239a8e5b2376536659b008258204316395f1f1421863c5faa3590d5cfa91eab9f8c5737afd0e6df8914b8ca5b5700018282583901b46bdb5502435a418fcc35273339c218300e0d633e4c532d98133907cb970842e16c849f65ffb4c0d88243289cf7519aa90bc57b3c3597a0821a00160a5ba1581c73ed4b685261a41c66ba62193572c3ce78652793df0a2d4a75c127cda144574545440182583901306fc9574d0f8c774fa8fde9b5d540e2d4f22c67e1ccdb862f8bc6d1f68419a027bf56bf179f07acdf872cd78e0d6ce129772028dd2d1a81821a002a804fa2581c36e8f59542d444e3afca00cecca881cbdf8257c1b07a2fd84ed25730a1574570737465696e4469646e744b696c6c48696d73656c661a000dbba0581c73ed4b685261a41c66ba62193572c3ce78652793df0a2d4a75c127cda14457454544195d5b021a0002b5c1031a01a206b7a400828258206c027d63a875e6ab52abc975594f6060dc8d8d9342a42899ff64e4b5a8041cc4018258207c1cc2472c9808612e77b75d2c253c9aa05d0a83cf1d72b929fb41cc2b8054ff00018282583901a9274ec0157902ffe776bca9a7b24aac3373044a3c71e0c27ec8f741a9274ec0157902ffe776bca9a7b24aac3373044a3c71e0c27ec8f7411a0e99550c82584c82d818584283581cfa4589909bc695cb0c7fbf23c37b5206fb180f4c5216a6d86c5caa40a101581e581c2b0b011ba3683d5a9b32802aa60b6f471a2bdab01598f0fa5d3ee332001ae706cac71b00000004d4f8ee05021a0002cb3d031a01a206c8a500818258207b9c5b2a8da20bdedb61f836c802ddf361fcc363faa7f64ecebcd3a5fcf8936105018182583901d3b46b29180c65567b03fcbdf0b897f4d3e58ee5ac9c7eb7b7a128e368b887d2e2b58bbb0c238406b77270089d0f370d9e28ab87c57d29f01a0049a28f021a0002a8b1031a01a20672048282008200581c68b887d2e2b58bbb0c238406b77270089d0f370d9e28ab87c57d29f083028200581c68b887d2e2b58bbb0c238406b77270089d0f370d9e28ab87c57d29f0581ce811a4b2f8ef3ec84143e3026d706564bc1cc98dc199a305e0fbb8e3a400818258200cda8d6a05ed90f3618b9a1d69cbaa112d7f508df405b3f7da7e4e348011219a01018282584c82d818584283581c9db8ac183b28b32e9fc70bb83345ff3f02cb6822086bccf9b272a56ea101581e581c6c62a9e3a4d9cc731469d72e5e097859162fe94cfb1149f980d46e36001a2c0ef42d1a12d1332082581d61d91ef01b73f3010bb173945cf5417257c00c002715a13052015ab54f1a166f4db4021a00029cd5031a01a1ee84a500818258204254c7128386731cdb4c709cb10c15080f0bb25a2e00ebaa7c4ba808811a5ece000181825839017c8c932fe58987c54752b4592ae73a9fccafa15553174ca7397eac61193e0d9a2f810bec4a2632006bba910de6dafb246ff3f6829fe3c8f81a755bc688021a00029e09031a01a206cb05a1581de1193e0d9a2f810bec4a2632006bba910de6dafb246ff3f6829fe3c8f81a0058a159a60081825820cd0bd822e080672736f6e10af4053eb7f5b36120fa154dfe647ada2a2a68e08e00018182581d71a5f1baef9bf194a48068c2545d5dd58e93da01a5c395a2f595c955ea821a08e513dba1581c5d2c310ba30ee79a9139defb690af87a110444492c9caf4b2038e0f1bf5443617264616e6f4b69647a303033384643323236015443617264616e6f4b69647a303033384643323237015443617264616e6f4b69647a303033384643323238015443617264616e6f4b69647a303033384643323239015443617264616e6f4b69647a303033384643323330015443617264616e6f4b69647a303033384643323331015443617264616e6f4b69647a303033384643323332015443617264616e6f4b69647a303033384643323333015443617264616e6f4b69647a303033384643323334015443617264616e6f4b69647a303033384643323335015443617264616e6f4b69647a303033384643323336015443617264616e6f4b69647a303033384643323337015443617264616e6f4b69647a303033384643323338015443617264616e6f4b69647a303033384643323339015443617264616e6f4b69647a303033384643323430015443617264616e6f4b69647a303033384643323431015443617264616e6f4b69647a303033384643323432015443617264616e6f4b69647a303033384643323433015443617264616e6f4b69647a303033384643323434015443617264616e6f4b69647a303033384643323435015443617264616e6f4b69647a303033384643323436015443617264616e6f4b69647a303033384643323437015443617264616e6f4b69647a303033384643323438015443617264616e6f4b69647a303033384643323439015443617264616e6f4b69647a303033384643323530015443617264616e6f4b69647a303033384643323531015443617264616e6f4b69647a303033384643323532015443617264616e6f4b69647a303033384643323533015443617264616e6f4b69647a303033384643323534015443617264616e6f4b69647a303033384643323535015443617264616e6f4b69647a303033384643323536015443617264616e6f4b69647a303033384643323537015443617264616e6f4b69647a303033384643323538015443617264616e6f4b69647a303033384643323539015443617264616e6f4b69647a303033384643323630015443617264616e6f4b69647a303033384643323631015443617264616e6f4b69647a303033384643323632015443617264616e6f4b69647a303033384643323633015443617264616e6f4b69647a303033384643323634015443617264616e6f4b69647a303033384643323635015443617264616e6f4b69647a303033384643323636015443617264616e6f4b69647a303033384643323637015443617264616e6f4b69647a303033384643323638015443617264616e6f4b69647a303033384643323639015443617264616e6f4b69647a303033384643323730015443617264616e6f4b69647a303033384643323731015443617264616e6f4b69647a303033384643323732015443617264616e6f4b69647a303033384643323733015443617264616e6f4b69647a303033384643323734015443617264616e6f4b69647a303033384643323735015443617264616e6f4b69647a303033384643323736015443617264616e6f4b69647a303033384643323737015443617264616e6f4b69647a303033384643323738015443617264616e6f4b69647a303033384643323739015443617264616e6f4b69647a303033384643323830015443617264616e6f4b69647a303033384643323831015443617264616e6f4b69647a303033384643323832015443617264616e6f4b69647a303033384643323833015443617264616e6f4b69647a303033384643323834015443617264616e6f4b69647a303033384643323835015443617264616e6f4b69647a303033384643323836015443617264616e6f4b69647a303033384643323837015443617264616e6f4b69647a303033384643323838015443617264616e6f4b69647a303033384643323839015443617264616e6f4b69647a303033384643323930015443617264616e6f4b69647a303033384643323931015443617264616e6f4b69647a303033384643323932015443617264616e6f4b69647a303033384643323933015443617264616e6f4b69647a303033384643323934015443617264616e6f4b69647a303033384643323935015443617264616e6f4b69647a303033384643323936015443617264616e6f4b69647a303033384643323937015443617264616e6f4b69647a303033384643323938015443617264616e6f4b69647a303033384643323939015443617264616e6f4b69647a303033384643333030015443617264616e6f4b69647a303033384643333031015443617264616e6f4b69647a30303338464333303201ff021a000bbda5031a01a4b0cb07582020203a2d332f99480ab1a242d20ab11fe4a3fe1deb7be47dd65d0605e260633709a1581c5d2c310ba30ee79a9139defb690af87a110444492c9caf4b2038e0f1bf5443617264616e6f4b69647a303033384643323236015443617264616e6f4b69647a303033384643323237015443617264616e6f4b69647a303033384643323238015443617264616e6f4b69647a303033384643323239015443617264616e6f4b69647a303033384643323330015443617264616e6f4b69647a303033384643323331015443617264616e6f4b69647a303033384643323332015443617264616e6f4b69647a303033384643323333015443617264616e6f4b69647a303033384643323334015443617264616e6f4b69647a303033384643323335015443617264616e6f4b69647a303033384643323336015443617264616e6f4b69647a303033384643323337015443617264616e6f4b69647a303033384643323338015443617264616e6f4b69647a303033384643323339015443617264616e6f4b69647a303033384643323430015443617264616e6f4b69647a303033384643323431015443617264616e6f4b69647a303033384643323432015443617264616e6f4b69647a303033384643323433015443617264616e6f4b69647a303033384643323434015443617264616e6f4b69647a303033384643323435015443617264616e6f4b69647a303033384643323436015443617264616e6f4b69647a303033384643323437015443617264616e6f4b69647a303033384643323438015443617264616e6f4b69647a303033384643323439015443617264616e6f4b69647a303033384643323530015443617264616e6f4b69647a303033384643323531015443617264616e6f4b69647a303033384643323532015443617264616e6f4b69647a303033384643323533015443617264616e6f4b69647a303033384643323534015443617264616e6f4b69647a303033384643323535015443617264616e6f4b69647a303033384643323536015443617264616e6f4b69647a303033384643323537015443617264616e6f4b69647a303033384643323538015443617264616e6f4b69647a303033384643323539015443617264616e6f4b69647a303033384643323630015443617264616e6f4b69647a303033384643323631015443617264616e6f4b69647a303033384643323632015443617264616e6f4b69647a303033384643323633015443617264616e6f4b69647a303033384643323634015443617264616e6f4b69647a303033384643323635015443617264616e6f4b69647a303033384643323636015443617264616e6f4b69647a303033384643323637015443617264616e6f4b69647a303033384643323638015443617264616e6f4b69647a303033384643323639015443617264616e6f4b69647a303033384643323730015443617264616e6f4b69647a303033384643323731015443617264616e6f4b69647a303033384643323732015443617264616e6f4b69647a303033384643323733015443617264616e6f4b69647a303033384643323734015443617264616e6f4b69647a303033384643323735015443617264616e6f4b69647a303033384643323736015443617264616e6f4b69647a303033384643323737015443617264616e6f4b69647a303033384643323738015443617264616e6f4b69647a303033384643323739015443617264616e6f4b69647a303033384643323830015443617264616e6f4b69647a303033384643323831015443617264616e6f4b69647a303033384643323832015443617264616e6f4b69647a303033384643323833015443617264616e6f4b69647a303033384643323834015443617264616e6f4b69647a303033384643323835015443617264616e6f4b69647a303033384643323836015443617264616e6f4b69647a303033384643323837015443617264616e6f4b69647a303033384643323838015443617264616e6f4b69647a303033384643323839015443617264616e6f4b69647a303033384643323930015443617264616e6f4b69647a303033384643323931015443617264616e6f4b69647a303033384643323932015443617264616e6f4b69647a303033384643323933015443617264616e6f4b69647a303033384643323934015443617264616e6f4b69647a303033384643323935015443617264616e6f4b69647a303033384643323936015443617264616e6f4b69647a303033384643323937015443617264616e6f4b69647a303033384643323938015443617264616e6f4b69647a303033384643323939015443617264616e6f4b69647a303033384643333030015443617264616e6f4b69647a303033384643333031015443617264616e6f4b69647a30303338464333303201ffa400818258206ee70506daa57d2de6544d9c0790beafe592a4daf124f8046ce22859fdd3c1a901018282584c82d818584283581cde35879ae16e897627eb20baf26be7e7bd325583bd17aa9c67713b0aa101581e581ce378ee30d5681449869d426d33e11d95dc7896854fefac8feea15d66001a7d335c381a08583b0082581d6179e67550b2ff311da1883ad0ccc6fb2bb7c75e5489acff735fcc68781a53a6fb32021a00029cd5031a01a1ee84a40083825820a396e38605b4b3fee941a69ca1736e17516be67d6b596ef7e854557cf918738900825820c9f12ee2577895339a74d286bf77134dbcca5f806dc955c3fa94567a0118cfdf0b8258207807540ef1577df8d3282fb18e157d1480535dd6f39648207bea759a28f3cdf217018282584c82d818584283581cdcfe05e2cb99a7e14c0142d453115ebc1ce1d00e68149eb762c47fc5a101581e581c7111304fa69dc3f1644da7a1db41603fa288f170bb5dc225f80da24b001aa9adf1101b00000002363e7f0082583901f765168cd378460d748f14d32cc462c62ef882f74bee0cc3fb38ecfd55ecd0372bb6dad0892a5154802adfe36be613d2c1690498059a8f631a813a2d36021a0002c45d031a01a206878ea10082825820e120d1e68b27660a9346947d2a3f79f5f702b66e58dfd542edc1443923ca89e058409c4e5759292092a773a2a14623f9a83799bd7297291f29e82cb552544765ba8c961dc12872067671e47fa141d2048a237407def2f6756cabfa682cca1c96960e825820b186a878bc2f6aae02a288abcbe1da7ebf6bd9125dc5a5e8e2addaf461c01a1e5840367d70340c36a58756fad5f2a25aef4317444b90cc96d5cbc7aab7c872c204ed7d3273f35fd75606a4b57124e2943e0ee87dedcd1f2c259e3d2a72684556e506a102828458201a66c53814b9b716280d25e928e28281984cecfa644a83cdabb5f2d9fdcdd5d258407ebe78a4009731b6ad5803147b086d7269c2727b4e21685de04f14b0b0a33b93159b167f180414d1696b779057ead145eb053f1bdd4a9a38948c5139aa4374055820deef140087f9cca73b50452ff9afe31fa179959cf7da3b341f7248a5d03d0fff5822a101581e581c9b1771bd305e4a4407f82ca9ce993f8be1c0f19a30307d6de3b5391c84582006334624d8da907649c765188d197b23e561c122c4b42835f26a91412c3e24865840f58e48db026fb30c3f2ac65cfb169ca31276eb73897c5ef495d1582cec422bf6e60efb69690199a9edaf424be7161bc24bd8a53a004662e990a11212f4d1e8025820dbd18cf22cb5c42e86e1dcc1a5d16fe24f2422e57be7a45f1b83e8c4ef18221f5822a101581e581c9b1771bd305e4a339d86c8a93fe0e379c4c7a24c04024e34b9a6659ca10082825820be12a95cfcd1011bfb59bd760ef683e040de4cb0355843683dfac00312b3535d58401ece9f0d7ea618e840bb8eeb2657baf19139d9b1c467e91345930ecf167a42e5ccd92ec95bf26038dc0a8b8cb404f5074990dffeda4d76bc61d9c7eac23965048258200d197b7eeef1b961962a2911d4b553f0d9c3114b8d3e61de3beb4f39dc348eb05840d3fc852714c1c741ab5effeaf50a47b449291d0613d4dd538aea58870a13819f05491726ccea72fc76407ee253f4aff23229a45604523fd5ced2c85dabfaf10ea10082825820d85a190eb4f193ae18c5395bf9d5493ceef6cffde1923fc3d41a3549f68f314b58400847fc53c36acf1f68f4de7b99afcea8706984958c4093d6db7d8e53c86e4daec7cb3ea313acc48b171ed6c3bf0c88951c4e62b1c80f43d72af54dbbeef0ff05825820c2aad03f77eb131a808e04efde662e7fb84034e811810c866cece5462b6dfc7b5840ec86663c28c1494d8ef004202c31747b038e36f7f0a446803bcf0c09f1b98f7ac3b539234b925292c6b88c96529501d3f614c92f83fb3d3eed0abef870cfe208a10083825820a080d8e4a44af1c986d93975807cf10f5d3670b7892f4c34cf91d7f12bbb4e6d5840156631a0c031bcc40e281b2f5405670cd8f87a2b04218a1d577337d1695d20319b6bd6490797b1be5abbc9d2c120469ce1f55fdffb6959bea43d1bb336bb1603825820a080d8e4a44af1c986d93975807cf10f5d3670b7892f4c34cf91d7f12bbb4e6d5840156631a0c031bcc40e281b2f5405670cd8f87a2b04218a1d577337d1695d20319b6bd6490797b1be5abbc9d2c120469ce1f55fdffb6959bea43d1bb336bb1603825820a080d8e4a44af1c986d93975807cf10f5d3670b7892f4c34cf91d7f12bbb4e6d5840156631a0c031bcc40e281b2f5405670cd8f87a2b04218a1d577337d1695d20319b6bd6490797b1be5abbc9d2c120469ce1f55fdffb6959bea43d1bb336bb1603a10081825820dbaa64dac9db1e42c8e7a65cb0a19b0d5bcd9d7492fbbdffa4aa1c6eeefef6055840134ddda8c219f15fcfc5a5760c51dd192d6e6c5765373b49c1f61b03e281ae65c702f53976f2e3ddf604e66dd5601edfa9b76cf7b7744bd56e99eb21ec3ac106a10081825820fa10a8aad3525bae5e6fc86f002a29fdeacf7e2c3adbbd42fb18ce37f46d6da758400b9c109eee0f20b17c2182c4b4356fe27ce61585a7b87c4454133ab9b4eeefa822290eb02210a6faa466113788e74f0b6f15dbd7564ad146b341b5d89aa34800a10282845820ca29c439ebe4e46749b5cd58ccd4a80c3e63590532b5e3e47929dcffadfe6f915840234f256bdaebabc73069dae65682f2bfd642854f37a901532d542537716f14445e4516cb7116d8fcf62b8dfe8d358ef76b3a8877b9e33f8e0ec49cc151b49d025820b5fa70b1f92b6f51f40a1214c83d072ee3463d37dd5a4ab93fd3880c1c2b78705822a101581e581c2b0b011ba3683d3b39e26d2a2a7538c41e52733b9a31fd976489dceb84582090f9faab0162b29b4ae704ec6b34b907ff4b460d95067416847c9de869fcffbd58402cc2a20369497238a6eafc0a73a9b20d0616a9b37769b1ca1f4b85fc0c787d0bb360255bd5008adc6a731b712886e13631f05ca293b1a0c7f0d6402e0e35800e582042273fb0f6b6eeae752bfa1efa18d3a52fe906cad33d9789f42875dccf122b3d5822a101581e581c2b0b011ba3683d6727811e2a7055eba994d24d6d5dbe938c685b65b1a100828258201bf61889f47fb0c57f000efc792c108058abd09ded1b92c8e321d5157dcc0b03584031ef0ade7e1c6aef0020f252650cc1e4278378d8292f68fef83c3cecb7d147fcb11dd12eddd16140ce3bc074f8ef96514558c7c031d7096da5eb9a8bb81c190282582068afabddeff38f0274543bdeca22982ba317c80534e2087b6e9ebb5bcbee2d255840dfc59eb00d76baed7333f8cf737fa9906b28bca9002a5484f0fb982d5458989536d6b5267d2d43cb7a032e166bb4df86a042c44f1c5a9c8fafd88dade88b8002a100818258200d197b7eeef1b961962a2911d4b553f0d9c3114b8d3e61de3beb4f39dc348eb05840d6863f876b33fb84a0f5ae757495136c1d38776a3ce3b7ce45ec4d19d51463fce731ce1d266e35c354ccee3e23b8f539c3122a6185594c2bca8530839f63620da10082825820c936c5031dfe7302a9577d20f39121a05186da762040ea7930ba40b4e75117095840dff4972a083973d9e4393520ab2263d47a8e9fd081e77215ba059eeaf188e3bbcd54f3efd20d6d711d9d29106b2d0ab484e37c24b72f82feb6adfe91eadc5901825820f9b39400c618265f9ba86e002b8df2eb114f2c5129a6c8cad4c749d024bcf73c584098147dd4e497e9aefcca6229e5ca1a28d43e2c6291d0ff627e9f4939341ebd80494fab7bad689f187c03b3d7fcbbf24fa7a04aabcd0c77455756f6ee4708560ca20081825820b58007aa837902c5f9deb45b0720ce9edb90231ab51d8d8a04223abbe91660c15840c592e5a6f8cc80c5897680120a23744aff93bdda1c39008090f5103d758dbe5284125ff5a2b8c060a4fceab1b2a86f5ececfc8ae016c7b352a0fe18d5778cc0e01828200581c026786de9ea6d7de570eb20d7bcb3c815bf88a46cdd9bcf7b5b357158201828200581c026786de9ea6d7de570eb20d7bcb3c815bf88a46cdd9bcf7b5b3571582051a01a4b0cba100818258206687a36f0a4b8abf2671f8affb7febeb8940e9ee83548d2a2924dbc74679c46658403523bc8f616d9e7e7035fbade81505c88d1ea80e1c6e47412b1024c79f20a1221ffbadc68fbc8bca68ba1818d5f5c0446691e409958e43e64993cba32b8d170aa100838258201e148f63439d8482ac734d737dd4bf1d0f2c7053ed9810beea3151f3f4545356584063945f682dd36acc2ba795588b6bd501a1809869d9f7f6106fd12e4bf2ed0b891e875915938c8cca59da16cfe8fe07f414a98923cc3a29e0495193c6a1aa6c008258201cb6da608e12fbed48e93574274838bd855d965661874e734eba54ab138517b15840da0fee21f01f79738d37dc995e9bb2b009e037e652a48742a16d8af9598ddaf800273d35d65a0ac110644dd6e06798592b43025f1a0c2802790c27374599b7028258207b42a33d3b1cc39f54db7273d589c66146f55d2fc3ec784ed596b5b737f8ee7d584094948408e251b18f1ffe93eff189b3bdfb033dacbd1fa8ca3dc8c76e19b6a44598db9968a77480feddd34dabdc8d41e18872e4f085840c5a8cba14dfa9765c03a10b82a11902d1a478383564326333313062613330656537396139313339646566623639306166383761313130343434343932633963616634623230333865306631b84d7443617264616e6f4b69647a303033384643323236a362696418e265696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3232365d7443617264616e6f4b69647a303033384643323237a362696418e365696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3232375d7443617264616e6f4b69647a303033384643323238a362696418e465696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3232385d7443617264616e6f4b69647a303033384643323239a362696418e565696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3232395d7443617264616e6f4b69647a303033384643323330a362696418e665696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3233305d7443617264616e6f4b69647a303033384643323331a362696418e765696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3233315d7443617264616e6f4b69647a303033384643323332a362696418e865696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3233325d7443617264616e6f4b69647a303033384643323333a362696418e965696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3233335d7443617264616e6f4b69647a303033384643323334a362696418ea65696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3233345d7443617264616e6f4b69647a303033384643323335a362696418eb65696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3233355d7443617264616e6f4b69647a303033384643323336a362696418ec65696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3233365d7443617264616e6f4b69647a303033384643323337a362696418ed65696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3233375d7443617264616e6f4b69647a303033384643323338a362696418ee65696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3233385d7443617264616e6f4b69647a303033384643323339a362696418ef65696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3233395d7443617264616e6f4b69647a303033384643323430a362696418f065696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3234305d7443617264616e6f4b69647a303033384643323431a362696418f165696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3234315d7443617264616e6f4b69647a303033384643323432a362696418f265696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3234325d7443617264616e6f4b69647a303033384643323433a362696418f365696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3234335d7443617264616e6f4b69647a303033384643323434a362696418f465696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3234345d7443617264616e6f4b69647a303033384643323435a362696418f565696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3234355d7443617264616e6f4b69647a303033384643323436a362696418f665696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3234365d7443617264616e6f4b69647a303033384643323437a362696418f765696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3234375d7443617264616e6f4b69647a303033384643323438a362696418f865696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3234385d7443617264616e6f4b69647a303033384643323439a362696418f965696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3234395d7443617264616e6f4b69647a303033384643323530a362696418fa65696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3235305d7443617264616e6f4b69647a303033384643323531a362696418fb65696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3235315d7443617264616e6f4b69647a303033384643323532a362696418fc65696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3235325d7443617264616e6f4b69647a303033384643323533a362696418fd65696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3235335d7443617264616e6f4b69647a303033384643323534a362696418fe65696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3235345d7443617264616e6f4b69647a303033384643323535a362696418ff65696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3235355d7443617264616e6f4b69647a303033384643323536a362696419010065696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3235365d7443617264616e6f4b69647a303033384643323537a362696419010165696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3235375d7443617264616e6f4b69647a303033384643323538a362696419010265696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3235385d7443617264616e6f4b69647a303033384643323539a362696419010365696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3235395d7443617264616e6f4b69647a303033384643323630a362696419010465696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3236305d7443617264616e6f4b69647a303033384643323631a362696419010565696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3236315d7443617264616e6f4b69647a303033384643323632a362696419010665696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3236325d7443617264616e6f4b69647a303033384643323633a362696419010765696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3236335d7443617264616e6f4b69647a303033384643323634a362696419010865696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3236345d7443617264616e6f4b69647a303033384643323635a362696419010965696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3236355d7443617264616e6f4b69647a303033384643323636a362696419010a65696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3236365d7443617264616e6f4b69647a303033384643323637a362696419010b65696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3236375d7443617264616e6f4b69647a303033384643323638a362696419010c65696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3236385d7443617264616e6f4b69647a303033384643323639a362696419010d65696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3236395d7443617264616e6f4b69647a303033384643323730a362696419010e65696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3237305d7443617264616e6f4b69647a303033384643323731a362696419010f65696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3237315d7443617264616e6f4b69647a303033384643323732a362696419011065696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3237325d7443617264616e6f4b69647a303033384643323733a362696419011165696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3237335d7443617264616e6f4b69647a303033384643323734a362696419011265696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3237345d7443617264616e6f4b69647a303033384643323735a362696419011365696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3237355d7443617264616e6f4b69647a303033384643323736a362696419011465696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3237365d7443617264616e6f4b69647a303033384643323737a362696419011565696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3237375d7443617264616e6f4b69647a303033384643323738a362696419011665696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3237385d7443617264616e6f4b69647a303033384643323739a362696419011765696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3237395d7443617264616e6f4b69647a303033384643323830a362696419011865696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3238305d7443617264616e6f4b69647a303033384643323831a362696419011965696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3238315d7443617264616e6f4b69647a303033384643323832a362696419011a65696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3238325d7443617264616e6f4b69647a303033384643323833a362696419011b65696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3238335d7443617264616e6f4b69647a303033384643323834a362696419011c65696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3238345d7443617264616e6f4b69647a303033384643323835a362696419011d65696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3238355d7443617264616e6f4b69647a303033384643323836a362696419011e65696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3238365d7443617264616e6f4b69647a303033384643323837a362696419011f65696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3238375d7443617264616e6f4b69647a303033384643323838a362696419012065696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3238385d7443617264616e6f4b69647a303033384643323839a362696419012165696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3238395d7443617264616e6f4b69647a303033384643323930a362696419012265696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3239305d7443617264616e6f4b69647a303033384643323931a362696419012365696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3239315d7443617264616e6f4b69647a303033384643323932a362696419012465696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3239325d7443617264616e6f4b69647a303033384643323933a362696419012565696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3239335d7443617264616e6f4b69647a303033384643323934a362696419012665696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3239345d7443617264616e6f4b69647a303033384643323935a362696419012765696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3239355d7443617264616e6f4b69647a303033384643323936a362696419012865696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3239365d7443617264616e6f4b69647a303033384643323937a362696419012965696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3239375d7443617264616e6f4b69647a303033384643323938a362696419012a65696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3239385d7443617264616e6f4b69647a303033384643323939a362696419012b65696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3239395d7443617264616e6f4b69647a303033384643333030a362696419012c65696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3330305d7443617264616e6f4b69647a303033384643333031a362696419012d65696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3330315d7443617264616e6f4b69647a303033384643333032a362696419012e65696d616765783a697066733a2f2f697066732f516d6232704e3454536f4363333476687a6f59534363444b357936593833344c44794e6843715837556f55627752646e616d65781b43617264616e6f204b69647a204e46542030303338205b3330325d69636f70797269676874782f436f707972696768742043617264616e6f4b69647a20323032313b20616c6c20726967687473207265736572766564697075626c69736865728267636e66742e696f6f63617264616e6f6b69647a2e636f6d6776657273696f6e0180 \ No newline at end of file diff --git a/hermes/crates/cardano-chain-follower/test_data/shelley.block b/hermes/crates/cardano-chain-follower/test_data/shelley.block new file mode 100644 index 000000000..66da341e9 --- /dev/null +++ b/hermes/crates/cardano-chain-follower/test_data/shelley.block @@ -0,0 +1 @@ +820284828f1a004723dd1a007949425820c175f470d30216341423a98a6087175642250acec7d9f53a311cf2e0a1c9c7b258208b53207629f9a30e4b2015044f337c01735abe67243c19470c9dae8c7b732798582090561cf5fb4eada778f0564060b9b5138fbfa50c0e74fc496956c8c3507301a6825840d266d923d59fc8a1b7e964dab2b6db804b494c202586eae8e2db929ca2361d9f01154c4a78b95a2e6bf19ebe98e775f894ad53971bd1ceeee125ee8473747d60585011614e11e284d28aa303da9ca3a37bfde35f931d308ae3da36e381ac42910d36dc26d91bfa726d7b4a7ae1fb263e037e8f9e80e3411a8754863b8b5601047b9e04d0f72f00206ea616c6cffc75fc48018258405620f9239d562aed34442b72c8bc840bb9a5ef897b470132430a02cd0ce69052a6ebb17896177180c1d88afed3d7614878549c5573c0f281d5dad2f29bda5a6d5850f349597045cc5f65a9724770f971e6964e09fd85db8e36ef789f390afd4629a3f5e96b4e5ee8280ec26236a6323cbc16867a1868645566e0607d7a474fd7d06b44c3afbcd85a41098a80ba6faeb7400b190596582000ef8e1bebe7d404a910c7c467fb5aafbc7dee7fcaac94cb9693e08ea9dd7d2a5820674617ebe299bcba144026e4342e9f54c861165c1dde1373fd1206e654f985b800183758405befdeffa73bc8b4a1cd22aa2c896f189a698175e5bfc4a562e3a15b5f6580953e6fcc72a37386816031e36fdf19718351417f01af02c7314fbe9f2792b29e0c02005901c0c597caba74923b7901d5b8162f27338413b12941e411e9371b06a01375b8690aab067a42dde22db909bf77db373ca8645b751711256ba5f360e2935f64d14104cfdfb6c7865c6f4219e67af060cdcf4dc3d874ede00c394e9ecac7ba1f663b367e0f482e1bfaff08808d6567590cd6bf43c849ebacfb5fb185f4592ff3bf0a479d5a1f3f19f819b59f662cad2b6ff2187ec94b4c5fac6b8375b02d6b52d229ae24b389ff2d72b584f47f77cbc62a43f1880e486fda30ac1600f475dc4857e66090fe7399f4e3bff4929ea1c1929371846a34391473c79f9409f05f65fe8d2acb6f5eceb84474555d163db96d809aa77b9c2f80156d0356e75204ab5032e833bbacecf407038c8a28da4900c1c63a5bb32672dd345c37e1c866b15da5d2c41ab76c214bc8e3efd9e34cf092f1166edc2de2b03ccaca01b2c0261bfaf3f166f3937c21128c3ebb96ceefab6c80897da9f096a7cc113c4b9c0cd8b97fe3d29f6a2c9960005d2f1ce2e8bfeca9b1f8ebe80637f59133692e11ad9f9c557c10102472aef7f472d72920bbff7a7f4e344988c2d5f98482fbbae7a081d7f10b55b33a7c4dea90483223bea2093cb068b2db39973dd06700ea4eb65fa3210d7d53430c9c84a5008182582050eba65e73c8c5f7b09f4ea28cf15dce169f3d1c322ca3deff03725f51518bb200018182583901f53fd6f6b96f74cc90fd995afad1bfdbd49ff7d04fc9e7a2f81285b75c465cbf8c5536970e8a29bb7adcda0d663b20007d481813694c64ef1b0000021f05a734bb021a0002a389031a0079652c048183028200581c5c465cbf8c5536970e8a29bb7adcda0d663b20007d481813694c64ef581c6b5180a258275c671690c94c704f074190e90ea900ed565b4c29abe8a50081825820368f24c09763ee846f68c60f987eeca4d9a7e9cb537cea2cc593e77dd9c0ac67000181825839019a5d1ab9ca1eb592973a0d3f5ffdd006e7e4c0be6a7b43211325168932dd8b472ba9876597a01259896cdb4650a62402aa9737175d539fe81b00000045d92911df021a0002a389031a0079652c048183028200581c32dd8b472ba9876597a01259896cdb4650a62402aa9737175d539fe8581c76e80e1b3f622c7051f222453497b0667e12892f5d94ee565d17dc52a40081825820cffac1e93f583d1f272142df5b793666e613c66cea4e500b18437aded85febbc00018282581d614497e18047e5c891ead6a215b3f5dce026921a8b76c7fe27a2ba9f681a000f424082583901a76b7122a3773792b6275c9a0fb0bf4ae0c17c52363de66062eca50c13ce7fae17ab762c4390c4d77bd74f7d4651af480508ea237b1dd0b51a0086a7f3021a0002ac4d031a007950a4a400828258201c3b9c2016d9ae6b3e86e1888296e6bc180e6bcfd344711b7305651ae90f87e60182582038d2f2cf6a2ce8563cc3b84b4b0e003404363a352fe1ac7ef5a6b5bb014c3d6c07018282582b82d818582183581c5f6712df165e03b5eb5e72e50058a181777696b222c54d844944da14a0001add85ea5a1a22dad5bd82581d614db3b10bedda3b9dd3d3a8886ca9d8a581162b874a7460edaab406fb1a032fcbef021a0003447f031a0098968084a10082825820e3968f45134a774f66b011c2c463ad64a5ba7e17f4774be460d08477a07e31f458402c551fc904b8e55a58d437e744566d0307a186af8f08d51a637456e0eb8247a0b639441a4fb25807b46edd60766a9e766b19e7c33caf5eea1752fc017b840100825820590a7c4229d9aede28c43f2626fcd36bc383cc3e622a654ad962e4c49570938c584042f3ed22755c5523a66864cb644330b82ace0b7cd483f4ea4e4c4d3ff88e62785811bf6a7da08e386f218301ff50bf8d000ca3df07dd831b9160b469ac95110ca10082825820feef653d2ba57531b9a442258cc0ad12feda29141b3e2a05e4a037262c4bb34d5840def966f7d8ea6e9ceb519606a9b7076b85677b6edcebd386f42f71c5c942c6ef90cbfad2265634cf5ebae00f8b86760151c6559bc82b52c130547eed5414120b825820d91dc0521946003bc7ded1a54938a04bee80a860382c51ef7c9e4563346979345840ab999c3cbb8edd48da9b884a4dcb86a8e7b7110ee08f312264a018fa6d50e4a0635f124916be7773c38c8bec8a0fa7f2ac5058a776932ebfd557a8a263552409a100818258205ee8a66efac254382955ec4d643a9f24015d0ac1ed32480b1b7235f10e87a0e458400fc033349c1a9ecf966902e05f35b7459a642f7b3818c80003b08179631521c810a8a52e30c5211a38a88732369fdf07d96c83963564f11b14048c060a2d410ea10082825820a1ecd258e4f23d09803873e6b8a5ed21107738e12004ed7b1fe1bf89c9c49e5a5840789070b47a7d4bf6ee4d56773ace2faaed2f76905848d65e5c35bfb75339060ec9c6e904b9a8fc54cf861a6da4aa3d8f607cb325b1f1cc266e49fb8a6dc3ef01825820a1ecd258e4f23d09803873e6b8a5ed21107738e12004ed7b1fe1bf89c9c49e5a5840789070b47a7d4bf6ee4d56773ace2faaed2f76905848d65e5c35bfb75339060ec9c6e904b9a8fc54cf861a6da4aa3d8f607cb325b1f1cc266e49fb8a6dc3ef01a0 \ No newline at end of file diff --git a/hermes/crates/cardano-chain-follower/test_data/test_snapshot_id/12345/immutable/12345.chunk b/hermes/crates/cardano-chain-follower/test_data/test_snapshot_id/12345/immutable/12345.chunk new file mode 100644 index 000000000..e69de29bb diff --git a/hermes/crates/cardano-chain-follower/test_data/test_snapshot_id/12346/immutable/12346.chunk b/hermes/crates/cardano-chain-follower/test_data/test_snapshot_id/12346/immutable/12346.chunk new file mode 100644 index 000000000..e69de29bb diff --git a/hermes/crates/cardano-chain-follower/test_data/test_snapshot_id/12347/immutable/12347.chunk b/hermes/crates/cardano-chain-follower/test_data/test_snapshot_id/12347/immutable/12347.chunk new file mode 100644 index 000000000..e69de29bb diff --git a/hermes/crates/cardano-chain-follower/test_data/test_snapshot_id/123abc/immutable/123abc.chunk b/hermes/crates/cardano-chain-follower/test_data/test_snapshot_id/123abc/immutable/123abc.chunk new file mode 100644 index 000000000..e69de29bb diff --git a/hermes/crates/cardano-chain-follower/testbed/overhead_benchmark/src/benchs/cardano_chain_follower.rs b/hermes/crates/cardano-chain-follower/testbed/overhead_benchmark/src/benchs/cardano_chain_follower.rs index 351547139..f0e850d33 100644 --- a/hermes/crates/cardano-chain-follower/testbed/overhead_benchmark/src/benchs/cardano_chain_follower.rs +++ b/hermes/crates/cardano-chain-follower/testbed/overhead_benchmark/src/benchs/cardano_chain_follower.rs @@ -32,7 +32,10 @@ pub async fn run(params: BenchmarkParams) -> anyhow::Result<()> { let update = follower.next().await?; match update { - ChainUpdate::Block(raw_block_data) => { + ChainUpdate::ImmutableBlockRollback(data) + | ChainUpdate::BlockTip(data) + | ChainUpdate::ImmutableBlock(data) + | ChainUpdate::Block(raw_block_data) => { let block_data = raw_block_data.decode()?; monitor_task_handle.send_update(monitor::BenchmarkStats { diff --git a/hermes/crates/cardano-chain-follower/testbed/rust-toolchain.toml b/hermes/crates/cardano-chain-follower/testbed/rust-toolchain.toml index 2c1a03c1a..21c73f28f 100644 --- a/hermes/crates/cardano-chain-follower/testbed/rust-toolchain.toml +++ b/hermes/crates/cardano-chain-follower/testbed/rust-toolchain.toml @@ -1,3 +1,3 @@ [toolchain] -channel = "1.78" +channel = "1.80" profile = "default" diff --git a/hermes/crates/cbork/Earthfile b/hermes/crates/cbork/Earthfile index a56cd8b78..dd3415104 100644 --- a/hermes/crates/cbork/Earthfile +++ b/hermes/crates/cbork/Earthfile @@ -1,6 +1,6 @@ VERSION 0.8 -IMPORT github.com/input-output-hk/catalyst-ci/earthly/rust:v3.1.8 AS rust-ci +IMPORT github.com/input-output-hk/catalyst-ci/earthly/rust:feat/faster-rust-tool-install AS rust-ci # Use when debugging cat-ci locally. # IMPORT ../../catalyst-ci/earthly/rust AS rust-ci diff --git a/hermes/crates/cbork/rust-toolchain.toml b/hermes/crates/cbork/rust-toolchain.toml index 08feed89d..20a42f2a9 100644 --- a/hermes/crates/cbork/rust-toolchain.toml +++ b/hermes/crates/cbork/rust-toolchain.toml @@ -1,3 +1,3 @@ [toolchain] -channel = "1.78" +channel = "1.80" profile = "default" \ No newline at end of file diff --git a/hermes/deny.toml b/hermes/deny.toml index 921f1eb3c..43412dba9 100644 --- a/hermes/deny.toml +++ b/hermes/deny.toml @@ -17,8 +17,8 @@ targets = [ [advisories] version = 2 ignore = [ - { id = "RUSTSEC-2020-0168", reason = "`mach` is used by wasmtime and we have no control over that." }, - { id = "RUSTSEC-2021-0145", reason = "we don't target windows, and don;t use a custom global allocator." }, + # { id = "RUSTSEC-2020-0168", reason = "`mach` is used by wasmtime and we have no control over that." }, + # { id = "RUSTSEC-2021-0145", reason = "we don't target windows, and don;t use a custom global allocator." }, ] [bans] @@ -53,6 +53,7 @@ allow-git = [ "https://github.com/input-output-hk/catalyst-pallas.git", "https://github.com/bytecodealliance/wasmtime", "https://github.com/aldanor/hdf5-rust", + "https://github.com/input-output-hk/catalyst-mithril.git" ] [licenses] @@ -71,13 +72,12 @@ allow = [ "Apache-2.0 WITH LLVM-exception", "CC0-1.0", "ISC", + "Zlib", "Unicode-3.0", "MPL-2.0", ] exceptions = [ - #{ allow = ["Zlib"], crate = "tinyvec" }, - #{ allow = ["Unicode-DFS-2016"], crate = "unicode-ident" }, - #{ allow = ["OpenSSL"], crate = "ring" }, + { allow = ["OpenSSL"], crate = "ring" }, # OpenSSL license is permissive, but be explicit about where we accept it. ] [[licenses.clarify]] @@ -92,7 +92,7 @@ license-files = [{ path = "../LICENSE-MIT", hash = 0x001c7e6c }] [[licenses.clarify]] crate = "ring" -expression = "MIT" +expression = "ISC AND MIT AND OpenSSL" license-files = [{ path = "LICENSE", hash = 0xbd0eed23 }] # SPDX considers OpenSSL to encompass both the OpenSSL and SSLeay licenses diff --git a/hermes/rust-toolchain.toml b/hermes/rust-toolchain.toml index f0ad0a7e0..ff6c11e3f 100644 --- a/hermes/rust-toolchain.toml +++ b/hermes/rust-toolchain.toml @@ -1,4 +1,4 @@ [toolchain] -channel = "1.78" +channel = "1.80" profile = "default" targets = ["x86_64-unknown-linux-musl"] diff --git a/wasm/integration-test/cardano/Earthfile b/wasm/integration-test/cardano/Earthfile index 6e33ff0b4..5a6637fcb 100644 --- a/wasm/integration-test/cardano/Earthfile +++ b/wasm/integration-test/cardano/Earthfile @@ -1,6 +1,6 @@ VERSION 0.8 -IMPORT github.com/input-output-hk/catalyst-ci/earthly/rust:v3.1.8 AS rust-ci +IMPORT github.com/input-output-hk/catalyst-ci/earthly/rust:feat/faster-rust-tool-install AS rust-ci # Use when debugging cat-ci locally. # IMPORT ../../catalyst-ci/earthly/rust AS rust-ci diff --git a/wasm/integration-test/cardano/rust-toolchain.toml b/wasm/integration-test/cardano/rust-toolchain.toml index 3820d414e..270039a93 100644 --- a/wasm/integration-test/cardano/rust-toolchain.toml +++ b/wasm/integration-test/cardano/rust-toolchain.toml @@ -1,3 +1,3 @@ [toolchain] -channel = "1.78" +channel = "1.80" targets = ["wasm32-wasip1"] diff --git a/wasm/integration-test/ipfs/Earthfile b/wasm/integration-test/ipfs/Earthfile index 61f3359c5..633ef160a 100644 --- a/wasm/integration-test/ipfs/Earthfile +++ b/wasm/integration-test/ipfs/Earthfile @@ -1,6 +1,6 @@ VERSION 0.8 -IMPORT github.com/input-output-hk/catalyst-ci/earthly/rust:v3.1.8 AS rust-ci +IMPORT github.com/input-output-hk/catalyst-ci/earthly/rust:feat/faster-rust-tool-install AS rust-ci # Use when debugging cat-ci locally. # IMPORT ../../catalyst-ci/earthly/rust AS rust-ci diff --git a/wasm/integration-test/ipfs/rust-toolchain.toml b/wasm/integration-test/ipfs/rust-toolchain.toml index dcb74252a..e3f2aeb57 100644 --- a/wasm/integration-test/ipfs/rust-toolchain.toml +++ b/wasm/integration-test/ipfs/rust-toolchain.toml @@ -1,3 +1,3 @@ [toolchain] -channel = "1.78" +channel = "1.80" targets = ["wasm32-unknown-unknown"] diff --git a/wasm/integration-test/sqlite/Earthfile b/wasm/integration-test/sqlite/Earthfile index 0f7f4f6f5..43cd796d9 100644 --- a/wasm/integration-test/sqlite/Earthfile +++ b/wasm/integration-test/sqlite/Earthfile @@ -1,6 +1,6 @@ VERSION 0.8 -IMPORT github.com/input-output-hk/catalyst-ci/earthly/rust:v3.1.8 AS rust-ci +IMPORT github.com/input-output-hk/catalyst-ci/earthly/rust:feat/faster-rust-tool-install AS rust-ci # Use when debugging cat-ci locally. # IMPORT ../../catalyst-ci/earthly/rust AS rust-ci diff --git a/wasm/integration-test/sqlite/rust-toolchain.toml b/wasm/integration-test/sqlite/rust-toolchain.toml index 3820d414e..270039a93 100644 --- a/wasm/integration-test/sqlite/rust-toolchain.toml +++ b/wasm/integration-test/sqlite/rust-toolchain.toml @@ -1,3 +1,3 @@ [toolchain] -channel = "1.78" +channel = "1.80" targets = ["wasm32-wasip1"] diff --git a/wasm/wasi-hermes-component-adapter/Earthfile b/wasm/wasi-hermes-component-adapter/Earthfile index 0b6ac0f0a..6e95829e3 100644 --- a/wasm/wasi-hermes-component-adapter/Earthfile +++ b/wasm/wasi-hermes-component-adapter/Earthfile @@ -1,6 +1,6 @@ VERSION 0.8 -IMPORT github.com/input-output-hk/catalyst-ci/earthly/rust:v3.1.8 AS rust-ci +IMPORT github.com/input-output-hk/catalyst-ci/earthly/rust:feat/faster-rust-tool-install AS rust-ci # Use when debugging cat-ci locally. # IMPORT ../../catalyst-ci/earthly/rust AS rust-ci diff --git a/wasm/wasi-hermes-component-adapter/rust-toolchain.toml b/wasm/wasi-hermes-component-adapter/rust-toolchain.toml index 1170a92c3..7f02acd2c 100644 --- a/wasm/wasi-hermes-component-adapter/rust-toolchain.toml +++ b/wasm/wasi-hermes-component-adapter/rust-toolchain.toml @@ -1,4 +1,4 @@ [toolchain] -channel = "1.78" +channel = "1.80" profile = "default" targets = ["wasm32-unknown-unknown"] diff --git a/wasm/wasi/wit/deps/hermes-cardano/api.wit b/wasm/wasi/wit/deps/hermes-cardano/api.wit index e02c0be6a..3cbdb9019 100644 --- a/wasm/wasi/wit/deps/hermes-cardano/api.wit +++ b/wasm/wasi/wit/deps/hermes-cardano/api.wit @@ -10,6 +10,7 @@ interface api { use hermes:binary/api.{bstr}; use hermes:cbor/api.{cbor}; + use wasi:clocks/wall-clock@0.2.0.{datetime}; /// Cardano Blocks are CBOR Data type cardano-block = cbor; @@ -22,20 +23,52 @@ interface api { mainnet, // Cardano Mainnet preprod, // Cardano Preprod Network preview, // Cardano Preview Network - local-test-blockchain // A local isolated test blockchain. } /// Source information about where the block came from, and if we are at tip or not. flags block-src { + /// Is this block live or immutable. + /// true = Block is known to be immutable, and is not subject to rollbacks. + /// false = Block is NOT known to be immutable and could be subject to rollbacks + immutable, + /// Is this block the current TIP of the blockchain tip, - node, - mithril + /// Is this block caused by a rollback + rollback + } + + + /// A Known Block point on the blockchain. + /// Identified by slot and block hash. + /// The two are required because rollbacks may yield the same slot, but a different hash. + type block-point = tuple; + + + /// The era of a particular block. + /// This is a string which identifies how the block data is encoded and its internal contents. + /// TODO: Document the known eras. + type era-id = string; + + /// Details of the block + record block-detail { + /// The era of the block. + /// This is a string which identifies how the block data is encoded. + /// TODO: Document the known eras. + era: era-id, + /// The source of this block (Immutable or Live data, tip and/or rollback). + src: block-src, + /// Block Height + height: u64, + /// Block Slot#/Hash + slot: block-point, + /// Wall Clock of the slot calculated from the Slot# and Genesis parameters. + wall-clock: datetime, } /// The Slot number to interact with variant slot { genesis, // The very start of the blockchain. - point(tuple), // A particular slot number. + point(block-point), // A particular slot/block number. tip, // The TIP of the blockchain. continue, // From wherever its currently pointing. } @@ -44,6 +77,7 @@ interface api { enum fetch-error { blockchain-not-available, // The blockchain requested is not available. invalid-slot, // The slot requested is not a valid slot for the blockchain. + invalid-txn, // The Transaction does not exist in the Block. } /// Errors that can occur when posting transactions. @@ -54,14 +88,11 @@ interface api { } /// Options used to unsubscribe from the blockchain data flow. - flags unsubscribe-options { - block, // Stop receiving block data - transaction, // Stop receiving txn data - rollback, // Stop receiving rollback data - stop // stop the blockchain fetching process altogether. + flags subscribe-options { + block, // Subscribe/Unsubscribe this module receiving block data + transaction, // Subscribe/Unsubscribe this module receiving txn data } - /// Subscribe to the Blockchain block data. /// /// **Parameters** @@ -84,48 +115,19 @@ interface api { /// When `whence` == `continue` the blockchain will keep syncing from where it is at, and this module /// will be subscribed to block updates. /// - /// `whence` == `stop` will prevent the blockchain syncing, and the caller will be unsubscribed. - /// - /// - subscribe-blocks: func (net: cardano-blockchain-id, whence: slot) -> result; + subscribe: func (net: cardano-blockchain-id, whence: slot, what: subscribe-options) -> result<_, fetch-error>; /// Unsubscribe from the blockchain events listed. /// /// **Parameters** /// - /// - `opts` : The events to unsubscribe from (and optionally stop the blockchain follower). - /// - /// **Notes** - /// - /// This only unsubscribes from the events. - /// The option `stop` MUST be set to actually stop fetching data from the blockchain once started. - /// - /// `stop` can be set without unsubscribing, and this will interrupt the flow of blockchain data. - /// After `stop`, `subscribe-blocks(?, continue)` would cause blockchain sync to continue from - /// the last block received. This would result in the last block being sent as an event twice, - /// once before the `stop` and once after the `continue`. - unsubscribe: func(net: cardano-blockchain-id, opts: unsubscribe-options); - - /// Subscribe to transaction data events, does not alter the blockchain sync in anyway. - /// - /// **Parameters** - /// - /// - `net` : The blockchain network to subscribe to txn events from. - /// - subscribe-txn: func (net: cardano-blockchain-id); - - /// Subscribe to blockchain rollback events, does not alter the blockchain sync in anyway. - /// - /// **Parameters** - /// - /// - `net` : The blockchain network to subscribe to txn events from. + /// - `what` : The events to unsubscribe from (and optionally stop the blockchain follower). /// /// **Notes** /// - /// After a rollback event, the blockchain sync will AUTOMATICALLY start sending block - /// data from the rollback point. No action is required to actually follow the rollback, unless the - /// default behavior is not desired. - subscribe-rollback: func (net: cardano-blockchain-id); + /// This unsubscribes from the events. + /// When all events in all listeners are unsubscribed, the blockchain will stop being followed. + unsubscribe: func(net: cardano-blockchain-id, what: subscribe-options); /// Fetch a block from the requested blockchain at the requested slot. /// @@ -145,10 +147,9 @@ interface api { /// being followed and generating events. /// It also will not alter the automatic fetching of blocks in any way, and happens in parallel /// to automated block fetch. - /// fetch-block: func (net: cardano-blockchain-id, whence: slot) -> result; - /// Get transactions from a block. + /// Get all transactions from a block. /// /// This can be used to easily extract all transactions from a complete block. /// @@ -167,6 +168,28 @@ interface api { /// get-txns: func (block: cardano-block) -> list; + /// Fetch a single transaction from the requested blockchain at the requested slot/offset. + /// + /// **Parameters** + /// + /// - `net` : The blockchain network to get a block from. + /// - `whence` : Which block to get. + /// - `offset` : Which transaction in that block to fetch. + /// + /// **Returns** + /// + /// - `cardano-txn` : The block requested. + /// - `fetch-error` : An error if the block can not be fetched. + /// + /// **Notes** + /// + /// Fetching a transaction does not require the blockchain to be subscribed, or for blocks to be + /// being followed and generating events. + /// It also will not alter the automatic fetching of blocks in any way, and happens in parallel + /// to automated block fetch. + /// + fetch-txn: func (net: cardano-blockchain-id, whence: slot, offset: u16) -> result; + /// Post a transactions to the blockchain. /// /// This can be used to post a pre-formed transaction to the required blockchain. diff --git a/wasm/wasi/wit/deps/hermes-cardano/event.wit b/wasm/wasi/wit/deps/hermes-cardano/event.wit index 5cbee8775..9fa204831 100644 --- a/wasm/wasi/wit/deps/hermes-cardano/event.wit +++ b/wasm/wasi/wit/deps/hermes-cardano/event.wit @@ -9,20 +9,15 @@ /// /// **Guarantee**: Transaction events will be sent and could arrive in any order, /// BUT only after all event processing of the block they come from is complete. -/// Further block or rollback events will not occur until all transaction events +/// Further block events will not occur until all transaction events /// from a block are fully processed. -/// -/// **Guarantee**: Rollback events will be fully processed before the next block -/// event will be sent. The block event sent immediately after a rollback event -/// will be the target of the rollback. This means that rollback processing does -/// not need to reset or re-subscribe the blockchain follower. /// /// **Warning**: Events from different blockchains are not synchronized between /// each other. /// Cardano API Interface - Export ONLY interface event-on-block { - use api.{cardano-blockchain-id, cardano-block, block-src}; + use api.{cardano-blockchain-id, cardano-block, block-detail}; /// Triggered when a cardano block event fires. /// @@ -32,17 +27,17 @@ interface event-on-block { /// /// - `blockchain` : The blockchain id the block originated from. /// - `block` : This raw CBOR block data. - /// - `source` : Source information about where the block came from, and if we are at tip or not. + /// - `detail` : Details about the block, which can help process it. /// /// Returns: /// Nothing. /// - on-cardano-block: func(blockchain: cardano-blockchain-id, block: cardano-block, source: block-src); + on-cardano-block: func(blockchain: cardano-blockchain-id, block: cardano-block, detail: block-detail); } /// Cardano API Interface - Export ONLY interface event-on-txn { - use api.{cardano-blockchain-id, cardano-txn}; + use api.{cardano-blockchain-id, cardano-txn, block-detail}; /// Triggered when a cardano transaction event fires. /// @@ -51,38 +46,18 @@ interface event-on-txn { /// ## Parameters /// /// - `blockchain` : The blockchain id the block originated from. - /// - `slot` : The slot the transaction is in. /// - `txn-index` : The offset in the block this transaction is at. /// - `txn` : The raw transaction data itself. + /// - `detail` : Details about the block this transaction is found in. /// /// Returns: /// Nothing. /// - on-cardano-txn: func(blockchain: cardano-blockchain-id, slot: u64, txn-index: u32, txn: cardano-txn); -} - -/// Cardano API Interface - Export ONLY -interface event-on-rollback { - use api.{cardano-blockchain-id}; - - /// Triggered when a cardano rollback event fires. - /// - /// The module must export this interface to use it. - /// - /// ## Parameters - /// - /// - `blockchain` : The blockchain id the rollback originated from. - /// - `slot` : The slot the rollback is targeting. (The next block event will be from this slot.) - /// - /// Returns: - /// Nothing. - /// - on-cardano-rollback: func(blockchain: cardano-blockchain-id, slot: u64); + on-cardano-txn: func(blockchain: cardano-blockchain-id, txn-index: u32, txn: cardano-txn, detail: block-detail); } world cardano-events { export event-on-block; export event-on-txn; - export event-on-rollback; } \ No newline at end of file diff --git a/wasm/wasi/wit/deps/hermes-cardano/world.wit b/wasm/wasi/wit/deps/hermes-cardano/world.wit index 84d105c92..63907644f 100644 --- a/wasm/wasi/wit/deps/hermes-cardano/world.wit +++ b/wasm/wasi/wit/deps/hermes-cardano/world.wit @@ -6,5 +6,4 @@ world all { export event-on-block; export event-on-txn; - export event-on-rollback; }