From 38e1e5336562e12414edc3f08c1377c776bf2563 Mon Sep 17 00:00:00 2001 From: Krzysztof Lis Date: Fri, 20 May 2022 11:05:47 +0200 Subject: [PATCH 1/9] fix: L2 reorg error context --- crates/pathfinder/src/state/sync.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/pathfinder/src/state/sync.rs b/crates/pathfinder/src/state/sync.rs index bcac4a3236..bed67ab970 100644 --- a/crates/pathfinder/src/state/sync.rs +++ b/crates/pathfinder/src/state/sync.rs @@ -534,7 +534,7 @@ async fn l2_reorg( // TODO: clean up state tree's as well... StarknetBlocksTable::reorg(&transaction, reorg_tail) - .context("Delete L1 state from database")?; + .context("Delete L2 state from database")?; // Track combined L1 and L2 state. let l1_l2_head = RefsTable::get_l1_l2_head(&transaction).context("Query L1-L2 head")?; From 518651055d5f52dd6f88e1300f5d177977a47d90 Mon Sep 17 00:00:00 2001 From: Krzysztof Lis Date: Fri, 20 May 2022 21:54:51 +0200 Subject: [PATCH 2/9] feat: storage revision 10 --- crates/pathfinder/src/storage.rs | 3 +- crates/pathfinder/src/storage/schema.rs | 1 + .../src/storage/schema/revision_0007.rs | 135 +++++++++--------- .../src/storage/schema/revision_0010.rs | 124 ++++++++++++++++ 4 files changed, 198 insertions(+), 65 deletions(-) create mode 100644 crates/pathfinder/src/storage/schema/revision_0010.rs diff --git a/crates/pathfinder/src/storage.rs b/crates/pathfinder/src/storage.rs index 38008a6e81..5168bc622c 100644 --- a/crates/pathfinder/src/storage.rs +++ b/crates/pathfinder/src/storage.rs @@ -27,7 +27,7 @@ use tracing::info; /// Indicates database is non-existant. const DB_VERSION_EMPTY: u32 = 0; /// Current database version. -const DB_VERSION_CURRENT: u32 = 9; +const DB_VERSION_CURRENT: u32 = 10; /// Sqlite key used for the PRAGMA user version. const VERSION_KEY: &str = "user_version"; @@ -147,6 +147,7 @@ fn migrate_database(connection: &mut Connection) -> anyhow::Result<()> { 6 => schema::revision_0007::migrate(&transaction)?, 7 => schema::revision_0008::migrate(&transaction)?, 8 => schema::revision_0009::migrate(&transaction)?, + 9 => schema::revision_0010::migrate(&transaction)?, _ => unreachable!("Database version constraint was already checked!"), }; // If any migration action requires vacuuming, we should vacuum. diff --git a/crates/pathfinder/src/storage/schema.rs b/crates/pathfinder/src/storage/schema.rs index 2045dfc577..99f9482fb7 100644 --- a/crates/pathfinder/src/storage/schema.rs +++ b/crates/pathfinder/src/storage/schema.rs @@ -7,6 +7,7 @@ pub(crate) mod revision_0006; pub(crate) mod revision_0007; pub(crate) mod revision_0008; pub(crate) mod revision_0009; +pub(crate) mod revision_0010; /// Used to indicate which action the caller should perform after a schema migration. #[derive(Debug, Clone, Copy, PartialEq)] diff --git a/crates/pathfinder/src/storage/schema/revision_0007.rs b/crates/pathfinder/src/storage/schema/revision_0007.rs index 4a162e51ee..b43d90cafb 100644 --- a/crates/pathfinder/src/storage/schema/revision_0007.rs +++ b/crates/pathfinder/src/storage/schema/revision_0007.rs @@ -179,73 +179,80 @@ mod transaction { } } +const STARKNET_EVENTS_CREATE_STMT: &str = r"CREATE TABLE starknet_events ( + block_number INTEGER NOT NULL, + idx INTEGER NOT NULL, + transaction_hash BLOB NOT NULL, + from_address BLOB NOT NULL, + -- Keys are represented as base64 encoded strings separated by space + keys TEXT, + data BLOB, + FOREIGN KEY(block_number) REFERENCES starknet_blocks(number) + ON DELETE CASCADE + ); + + -- Event filters can specify ranges of blocks + CREATE INDEX starknet_events_block_number ON starknet_events(block_number); + + -- Event filter can specify a contract address + CREATE INDEX starknet_events_from_address ON starknet_events(from_address); + + CREATE VIRTUAL TABLE starknet_events_keys + USING fts5( + keys, + content='starknet_events', + content_rowid='rowid', + tokenize='ascii' + ); + + CREATE TRIGGER starknet_events_ai + AFTER INSERT ON starknet_events + BEGIN + INSERT INTO starknet_events_keys(rowid, keys) + VALUES ( + new.rowid, + new.keys + ); + END; + + CREATE TRIGGER starknet_events_ad + AFTER DELETE ON starknet_events + BEGIN + INSERT INTO starknet_events_keys(starknet_events_keys, rowid, keys) + VALUES ( + 'delete', + old.rowid, + old.keys + ); + END; + + CREATE TRIGGER starknet_events_au + AFTER UPDATE ON starknet_events + BEGIN + INSERT INTO starknet_events_keys(starknet_events_keys, rowid, keys) + VALUES ( + 'delete', + old.rowid, + old.keys + ); + INSERT INTO starknet_events_keys(rowid, keys) + VALUES ( + new.rowid, + new.keys + ); + END;"; + pub(crate) fn migrate(transaction: &Transaction) -> anyhow::Result { + migrate_with(transaction, STARKNET_EVENTS_CREATE_STMT) +} + +pub(crate) fn migrate_with( + transaction: &Transaction, + starknet_events_create_stmt: &'static str, +) -> anyhow::Result { // Create the new events table. transaction - .execute_batch( - r"CREATE TABLE starknet_events ( - block_number INTEGER NOT NULL, - idx INTEGER NOT NULL, - transaction_hash BLOB NOT NULL, - from_address BLOB NOT NULL, - -- Keys are represented as base64 encoded strings separated by space - keys TEXT, - data BLOB, - FOREIGN KEY(block_number) REFERENCES starknet_blocks(number) - ON DELETE CASCADE - ); - - -- Event filters can specify ranges of blocks - CREATE INDEX starknet_events_block_number ON starknet_events(block_number); - - -- Event filter can specify a contract address - CREATE INDEX starknet_events_from_address ON starknet_events(from_address); - - CREATE VIRTUAL TABLE starknet_events_keys - USING fts5( - keys, - content='starknet_events', - content_rowid='rowid', - tokenize='ascii' - ); - - CREATE TRIGGER starknet_events_ai - AFTER INSERT ON starknet_events - BEGIN - INSERT INTO starknet_events_keys(rowid, keys) - VALUES ( - new.rowid, - new.keys - ); - END; - - CREATE TRIGGER starknet_events_ad - AFTER DELETE ON starknet_events - BEGIN - INSERT INTO starknet_events_keys(starknet_events_keys, rowid, keys) - VALUES ( - 'delete', - old.rowid, - old.keys - ); - END; - - CREATE TRIGGER starknet_events_au - AFTER UPDATE ON starknet_events - BEGIN - INSERT INTO starknet_events_keys(starknet_events_keys, rowid, keys) - VALUES ( - 'delete', - old.rowid, - old.keys - ); - INSERT INTO starknet_events_keys(rowid, keys) - VALUES ( - new.rowid, - new.keys - ); - END;", - ) + .execute_batch(starknet_events_create_stmt) .context("Create starknet events tables and indexes")?; // Create an index on starknet_blocks(hash) so that we can look up block numbers based diff --git a/crates/pathfinder/src/storage/schema/revision_0010.rs b/crates/pathfinder/src/storage/schema/revision_0010.rs new file mode 100644 index 0000000000..80ee31dac6 --- /dev/null +++ b/crates/pathfinder/src/storage/schema/revision_0010.rs @@ -0,0 +1,124 @@ +use crate::storage::schema::PostMigrationAction; + +use anyhow::Context; +use rusqlite::Transaction; + +pub(crate) fn migrate(transaction: &Transaction) -> anyhow::Result { + // We need to check if this db needs fixing at all + let update_is_not_required = { + let mut stmt = transaction + .prepare("SELECT sql FROM sqlite_schema where tbl_name = 'starknet_events'") + .context("Preparing statement")?; + let mut rows = stmt.query([]).context("Executing query")?; + // Unwrap is safe because the schema for this table obviously contains more than + // zero SQL statements, as can be seen in revision 7 migration. + // The first statement of the schema for this table is the creation of the table + // which could be missing the crucial action, which is ON DELETE CASCADE. + rows.next()? + .unwrap() + .get_ref_unwrap("sql") + .as_str()? + .contains("ON DELETE CASCADE") + }; + + if update_is_not_required { + return Ok(PostMigrationAction::None); + } + + // When altering a table in a way that requires recreating it through copying and deletion + // it is [recommended](https://www.sqlite.org/lang_altertable.html) to: + // 1. create the new table with some temporary name + // 2. copy the data from the old table + // 3. drop the old table + // 4. rename the new table + // Instead of the opposite: + // 1. rename the old table + // 2. create the new table with the final name + // 3. copy the data from the old table + // 4. drop the old table + // + // Triggers and indexes are dropped with the old table, so they need to be recreated + // The virtual table remains unchanged + transaction + .execute_batch( + r" + CREATE TABLE starknet_events_v2 ( + block_number INTEGER NOT NULL, + idx INTEGER NOT NULL, + transaction_hash BLOB NOT NULL, + from_address BLOB NOT NULL, + -- Keys are represented as base64 encoded strings separated by space + keys TEXT, + data BLOB, + FOREIGN KEY(block_number) REFERENCES starknet_blocks(number) + ON DELETE CASCADE + ); + + INSERT INTO starknet_events_v2 ( + block_number, + idx, + transaction_hash, + from_address, + keys, + data) + + SELECT starknet_events.block_number, + starknet_events.idx, + starknet_events.transaction_hash, + starknet_events.from_address, + starknet_events.keys, + starknet_events.data + + FROM starknet_events; + + DROP TABLE starknet_events; + + ALTER TABLE starknet_events_v2 RENAME TO starknet_events; + + -- Event filters can specify ranges of blocks + CREATE INDEX starknet_events_block_number ON starknet_events(block_number); + + -- Event filter can specify a contract address + CREATE INDEX starknet_events_from_address ON starknet_events(from_address); + + CREATE TRIGGER starknet_events_ai + AFTER INSERT ON starknet_events + BEGIN + INSERT INTO starknet_events_keys(rowid, keys) + VALUES ( + new.rowid, + new.keys + ); + END; + + CREATE TRIGGER starknet_events_ad + AFTER DELETE ON starknet_events + BEGIN + INSERT INTO starknet_events_keys(starknet_events_keys, rowid, keys) + VALUES ( + 'delete', + old.rowid, + old.keys + ); + END; + + CREATE TRIGGER starknet_events_au + AFTER UPDATE ON starknet_events + BEGIN + INSERT INTO starknet_events_keys(starknet_events_keys, rowid, keys) + VALUES ( + 'delete', + old.rowid, + old.keys + ); + INSERT INTO starknet_events_keys(rowid, keys) + VALUES ( + new.rowid, + new.keys + ); + END;", + ) + .context("Recreating the starknet_events table, related triggers and indexes")?; + + Ok(PostMigrationAction::None) +} From f6e4e7c8e8fbf2b5f451b1f485b57fbe02df75cf Mon Sep 17 00:00:00 2001 From: Krzysztof Lis Date: Fri, 20 May 2022 21:55:36 +0200 Subject: [PATCH 3/9] test: revision 10, including the regression --- .../src/storage/schema/revision_0010.rs | 328 ++++++++++++++++++ 1 file changed, 328 insertions(+) diff --git a/crates/pathfinder/src/storage/schema/revision_0010.rs b/crates/pathfinder/src/storage/schema/revision_0010.rs index 80ee31dac6..a15720795a 100644 --- a/crates/pathfinder/src/storage/schema/revision_0010.rs +++ b/crates/pathfinder/src/storage/schema/revision_0010.rs @@ -122,3 +122,331 @@ pub(crate) fn migrate(transaction: &Transaction) -> anyhow::Result anyhow::Result<()> { + connection.execute( + r"INSERT INTO starknet_blocks ( number, hash, root, timestamp) + VALUES (:number, :hash, :root, :timestamp)", + named_params! { + ":number": block.number.0, + ":hash": block.hash.0.as_be_bytes(), + ":root": block.root.0.as_be_bytes(), + ":timestamp": block.timestamp.0, + }, + )?; + + Ok(()) + } + } + } + + /// This is a test helper function which runs a stateful scenario of the migration + /// with the revision 7 migration being customisable via a closure provided by the caller + fn run_stateful_scenario(revision_0007_migrate_fn: Fn) { + let mut connection = Connection::open_in_memory().unwrap(); + let transaction = connection.transaction().unwrap(); + + // 1. Migrate the db up to rev7 + schema::revision_0001::migrate(&transaction).unwrap(); + schema::revision_0002::migrate(&transaction).unwrap(); + schema::revision_0003::migrate(&transaction).unwrap(); + schema::revision_0004::migrate(&transaction).unwrap(); + schema::revision_0005::migrate(&transaction).unwrap(); + schema::revision_0006::migrate(&transaction).unwrap(); + revision_0007_migrate_fn(&transaction); + + // 2. Insert some data that would cause the regression + let block0_number = StarknetBlockNumber(0); + let block1_number = StarknetBlockNumber(1); + let block0_hash = StarknetBlockHash(StarkHash::from_be_slice(b"block 1 hash").unwrap()); + let block0 = storage_rev7::StarknetBlock { + hash: block0_hash, + number: block0_number, + root: GlobalRoot(StarkHash::from_be_slice(b"root 0").unwrap()), + timestamp: StarknetBlockTimestamp(0), + }; + let block1 = storage_rev7::StarknetBlock { + hash: StarknetBlockHash(StarkHash::from_be_slice(b"block 1 hash").unwrap()), + number: block1_number, + root: GlobalRoot(StarkHash::from_be_slice(b"root 1").unwrap()), + timestamp: StarknetBlockTimestamp(1), + }; + let contract0_address = + ContractAddress(StarkHash::from_be_slice(b"contract 0 address").unwrap()); + let contract1_address = + ContractAddress(StarkHash::from_be_slice(b"contract 1 address").unwrap()); + let transaction0_hash = + StarknetTransactionHash(StarkHash::from_be_slice(b"transaction 0 hash").unwrap()); + let transaction0 = Transaction { + calldata: None, + class_hash: None, + constructor_calldata: None, + contract_address: contract0_address, + contract_address_salt: None, + entry_point_selector: None, + entry_point_type: None, + max_fee: None, + signature: None, + transaction_hash: transaction0_hash, + r#type: transaction::Type::Deploy, + }; + let mut transaction1 = transaction0.clone(); + transaction1.transaction_hash = + StarknetTransactionHash(StarkHash::from_be_slice(b"transaction 1 hash").unwrap()); + let event0_key = EventKey(StarkHash::from_be_slice(b"event 0 key").unwrap()); + let event1_key = EventKey(StarkHash::from_be_slice(b"event 1 key").unwrap()); + let event0_data = EventData(StarkHash::from_be_slice(b"event 0 data").unwrap()); + let event0 = Event { + data: vec![event0_data], + from_address: contract0_address, + keys: vec![event0_key], + }; + let event1 = Event { + data: vec![EventData( + StarkHash::from_be_slice(b"event 1 data").unwrap(), + )], + from_address: contract1_address, + keys: vec![event1_key], + }; + + storage_rev7::StarknetBlocksTable::insert(&transaction, &block0).unwrap(); + StarknetEventsTable::insert_events( + &transaction, + block0_number, + &transaction0, + &[event0], + ) + .unwrap(); + storage_rev7::StarknetBlocksTable::insert(&transaction, &block1).unwrap(); + StarknetEventsTable::insert_events( + &transaction, + block1_number, + &transaction1, + &[event1], + ) + .unwrap(); + + // 3. Migrate up to rev9 + schema::revision_0008::migrate(&transaction).unwrap(); + schema::revision_0009::migrate(&transaction).unwrap(); + + // 4. Migration to rev10 should fix the problem + let action = super::super::migrate(&transaction).unwrap(); + assert_eq!(action, PostMigrationAction::None); + + // 5. Perform the operation that used to trigger the failure and make sure it does not occur now + StarknetBlocksTable::reorg(&transaction, block1_number).unwrap(); + + assert_eq!( + StarknetBlocksTable::get_latest_number(&transaction) + .unwrap() + .unwrap(), + block0_number + ); + let filter0 = StarknetEventFilter { + contract_address: None, + from_block: None, + to_block: None, + keys: vec![event0_key], + page_size: 10, + page_number: 0, + }; + let filter1 = StarknetEventFilter { + contract_address: None, + from_block: None, + to_block: None, + keys: vec![event1_key], + page_size: 10, + page_number: 0, + }; + assert_eq!( + StarknetEventsTable::get_events(&transaction, &filter0).unwrap(), + PageOfEvents { + events: vec![StarknetEmittedEvent { + block_hash: block0_hash, + block_number: block0_number, + data: vec![event0_data], + from_address: contract0_address, + keys: vec![event0_key], + transaction_hash: transaction0_hash, + }], + is_last_page: true + } + ); + assert!(StarknetEventsTable::get_events(&transaction, &filter1) + .unwrap() + .events + .is_empty()); + } + + #[test] + fn correct_schema_in_rev7() { + run_stateful_scenario(|tx| { + schema::revision_0007::migrate(tx).unwrap(); + }); + } + + #[test] + fn buggy_schema_in_rev7() { + run_stateful_scenario(|tx| { + schema::revision_0007::migrate_with(tx, super::BUGGY_STARKNET_EVENTS_CREATE_STMT) + .unwrap(); + }); + } + } +} From ed760650f31bfee1ca73de72fa7377c5ba8a80f2 Mon Sep 17 00:00:00 2001 From: Krzysztof Lis Date: Fri, 20 May 2022 21:56:03 +0200 Subject: [PATCH 4/9] chore: bump schema revision in py --- py/src/call.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/py/src/call.py b/py/src/call.py index 2e460b5983..1dcf56e744 100644 --- a/py/src/call.py +++ b/py/src/call.py @@ -7,7 +7,7 @@ from starkware.storage.storage import Storage # used from tests, and the query which asserts that the schema is of expected version. -EXPECTED_SCHEMA_REVISION = 9 +EXPECTED_SCHEMA_REVISION = 10 EXPECTED_CAIRO_VERSION = "0.8.2.1" From 51c883264daa8a902365d698f1ff1b1fa9a9755c Mon Sep 17 00:00:00 2001 From: Krzysztof Lis Date: Fri, 20 May 2022 21:58:32 +0200 Subject: [PATCH 5/9] test: remove redundant test --- crates/pathfinder/src/storage/state.rs | 123 +------------------------ 1 file changed, 1 insertion(+), 122 deletions(-) diff --git a/crates/pathfinder/src/storage/state.rs b/crates/pathfinder/src/storage/state.rs index f59d62e12a..12ff12c3a8 100644 --- a/crates/pathfinder/src/storage/state.rs +++ b/crates/pathfinder/src/storage/state.rs @@ -977,10 +977,7 @@ impl ContractsStateTable { #[cfg(test)] mod tests { use super::*; - use crate::{ - sequencer::reply::transaction::{Event, Transaction}, - storage::Storage, - }; + use crate::storage::Storage; mod contracts { use super::*; @@ -2080,122 +2077,4 @@ mod tests { ); } } - - #[test] - fn revision7_l2_reorg_regression() { - let storage = Storage::in_memory().unwrap(); - let connection = storage.connection().unwrap(); - - let block0_number = StarknetBlockNumber(0); - let block1_number = StarknetBlockNumber(1); - let block0_hash = StarknetBlockHash(StarkHash::from_be_slice(b"block 1 hash").unwrap()); - let block0 = StarknetBlock { - hash: block0_hash, - number: block0_number, - root: GlobalRoot(StarkHash::from_be_slice(b"root 0").unwrap()), - timestamp: StarknetBlockTimestamp(0), - gas_price: GasPrice::from(0), - sequencer_address: SequencerAddress( - StarkHash::from_be_slice(b"sequencer_address 0").unwrap(), - ), - }; - let block1 = StarknetBlock { - hash: StarknetBlockHash(StarkHash::from_be_slice(b"block 1 hash").unwrap()), - number: block1_number, - root: GlobalRoot(StarkHash::from_be_slice(b"root 1").unwrap()), - timestamp: StarknetBlockTimestamp(1), - gas_price: GasPrice::from(1), - sequencer_address: SequencerAddress( - StarkHash::from_be_slice(b"sequencer_address 1").unwrap(), - ), - }; - let contract0_address = - ContractAddress(StarkHash::from_be_slice(b"contract 0 address").unwrap()); - let contract1_address = - ContractAddress(StarkHash::from_be_slice(b"contract 1 address").unwrap()); - let transaction0_hash = - StarknetTransactionHash(StarkHash::from_be_slice(b"transaction 0 hash").unwrap()); - let transaction0 = Transaction { - calldata: None, - class_hash: None, - constructor_calldata: None, - contract_address: contract0_address, - contract_address_salt: None, - entry_point_selector: None, - entry_point_type: None, - max_fee: None, - signature: None, - transaction_hash: transaction0_hash, - r#type: transaction::Type::Deploy, - }; - let mut transaction1 = transaction0.clone(); - transaction1.transaction_hash = - StarknetTransactionHash(StarkHash::from_be_slice(b"transaction 1 hash").unwrap()); - let event0_key = EventKey(StarkHash::from_be_slice(b"event 0 key").unwrap()); - let event1_key = EventKey(StarkHash::from_be_slice(b"event 1 key").unwrap()); - let event0_data = EventData(StarkHash::from_be_slice(b"event 0 data").unwrap()); - let event0 = Event { - data: vec![event0_data], - from_address: contract0_address, - keys: vec![event0_key], - }; - let event1 = Event { - data: vec![EventData( - StarkHash::from_be_slice(b"event 1 data").unwrap(), - )], - from_address: contract1_address, - keys: vec![event1_key], - }; - - StarknetBlocksTable::insert(&connection, &block0).unwrap(); - StarknetEventsTable::insert_events(&connection, block0_number, &transaction0, &[event0]) - .unwrap(); - StarknetBlocksTable::insert(&connection, &block1).unwrap(); - StarknetEventsTable::insert_events(&connection, block1_number, &transaction1, &[event1]) - .unwrap(); - - // UUT - StarknetBlocksTable::reorg(&connection, block1_number).unwrap(); - - assert_eq!( - StarknetBlocksTable::get_latest_number(&connection) - .unwrap() - .unwrap(), - block0_number - ); - let filter0 = StarknetEventFilter { - contract_address: None, - from_block: None, - to_block: None, - keys: vec![event0_key], - page_size: 10, - page_number: 0, - }; - let filter1 = StarknetEventFilter { - contract_address: None, - from_block: None, - to_block: None, - keys: vec![event1_key], - page_size: 10, - page_number: 0, - }; - assert_eq!( - StarknetEventsTable::get_events(&connection, &filter0).unwrap(), - PageOfEvents { - events: vec![StarknetEmittedEvent { - block_hash: block0_hash, - block_number: block0_number, - data: vec![event0_data], - from_address: contract0_address, - keys: vec![event0_key], - transaction_hash: transaction0_hash, - }], - is_last_page: true - } - ); - assert!(StarknetEventsTable::get_events(&connection, &filter1) - .unwrap() - .events - .is_empty()); - } } From 2a99b07c3871ecf520144fd829663860e88285f5 Mon Sep 17 00:00:00 2001 From: Krzysztof Lis Date: Fri, 20 May 2022 22:08:19 +0200 Subject: [PATCH 6/9] test: make the retry timeout test faster --- crates/pathfinder/Cargo.toml | 2 +- crates/pathfinder/src/sequencer.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/pathfinder/Cargo.toml b/crates/pathfinder/Cargo.toml index 4e88e0e732..6e7d3a38a5 100644 --- a/crates/pathfinder/Cargo.toml +++ b/crates/pathfinder/Cargo.toml @@ -40,7 +40,7 @@ serde_with = "1.9.4" sha3 = "0.9" tempfile = "3" thiserror = "1.0.30" -tokio = "1.11.0" +tokio = { version = "1.11.0", features = ["test-util"] } tokio-retry = "0.3.0" toml = "0.5.8" tracing = "0.1.31" diff --git a/crates/pathfinder/src/sequencer.rs b/crates/pathfinder/src/sequencer.rs index c1de49d27b..ec490d9447 100644 --- a/crates/pathfinder/src/sequencer.rs +++ b/crates/pathfinder/src/sequencer.rs @@ -1603,7 +1603,7 @@ mod tests { ); } - #[test_log::test(tokio::test)] + #[tokio::test(flavor = "current_thread", start_paused = true)] async fn request_timeout() { use std::sync::atomic::{AtomicUsize, Ordering}; From fa7a6e295dfc68241ed9383a99e51d9fb53811df Mon Sep 17 00:00:00 2001 From: Krzysztof Lis Date: Mon, 23 May 2022 16:38:09 +0200 Subject: [PATCH 7/9] fix: make sure starknet_events rowids remain the same - the virtual table `starknet_events_keys` remains unchanged - but as it references rowids from the old `starknet_events` table - we need to make sure that the recreated `starknet_events` table contains the same rowids - otherwise `starknet_events_keys` could contain stale data and `starknet_getEvents` could cease to retrun valid replies --- crates/pathfinder/src/storage.rs | 64 ++++++++- .../src/storage/schema/revision_0010.rs | 127 +++++++++++++++++- crates/pathfinder/src/storage/state.rs | 56 +------- 3 files changed, 189 insertions(+), 58 deletions(-) diff --git a/crates/pathfinder/src/storage.rs b/crates/pathfinder/src/storage.rs index 5168bc622c..01a337f711 100644 --- a/crates/pathfinder/src/storage.rs +++ b/crates/pathfinder/src/storage.rs @@ -199,9 +199,13 @@ fn enable_foreign_keys(connection: &Connection) -> anyhow::Result<()> { pub(crate) mod test_utils { use super::StarknetBlock; - use crate::core::{ - GasPrice, GlobalRoot, SequencerAddress, StarknetBlockHash, StarknetBlockNumber, - StarknetBlockTimestamp, + use crate::{ + core::{ + ContractAddress, EventData, EventKey, GasPrice, GlobalRoot, SequencerAddress, + StarknetBlockHash, StarknetBlockNumber, StarknetBlockTimestamp, + StarknetTransactionHash, StarknetTransactionIndex, + }, + sequencer::reply::transaction, }; use pedersen::StarkHash; @@ -222,6 +226,60 @@ pub(crate) mod test_utils { .try_into() .unwrap() } + + /// Creates a set of test transactions and receipts. + pub(crate) fn create_transactions_and_receipts( + ) -> [(transaction::Transaction, transaction::Receipt); N] { + let transactions = (0..N).map(|i| transaction::Transaction { + calldata: None, + class_hash: None, + constructor_calldata: None, + contract_address: ContractAddress(StarkHash::from_hex_str(&"2".repeat(i + 3)).unwrap()), + contract_address_salt: None, + entry_point_type: None, + entry_point_selector: None, + signature: None, + transaction_hash: StarknetTransactionHash( + StarkHash::from_hex_str(&"f".repeat(i + 3)).unwrap(), + ), + r#type: transaction::Type::InvokeFunction, + max_fee: None, + }); + let receipts = (0..N).map(|i| transaction::Receipt { + actual_fee: None, + events: vec![transaction::Event { + from_address: ContractAddress(StarkHash::from_hex_str(&"2".repeat(i + 3)).unwrap()), + data: vec![EventData( + StarkHash::from_hex_str(&"c".repeat(i + 3)).unwrap(), + )], + keys: vec![ + EventKey(StarkHash::from_hex_str(&"d".repeat(i + 3)).unwrap()), + EventKey(StarkHash::from_hex_str("deadbeef").unwrap()), + ], + }], + execution_resources: transaction::ExecutionResources { + builtin_instance_counter: + transaction::execution_resources::BuiltinInstanceCounter::Empty( + transaction::execution_resources::EmptyBuiltinInstanceCounter {}, + ), + n_steps: i as u64 + 987, + n_memory_holes: i as u64 + 1177, + }, + l1_to_l2_consumed_message: None, + l2_to_l1_messages: Vec::new(), + transaction_hash: StarknetTransactionHash( + StarkHash::from_hex_str(&"e".repeat(i + 3)).unwrap(), + ), + transaction_index: StarknetTransactionIndex(i as u64 + 2311), + }); + + transactions + .into_iter() + .zip(receipts) + .collect::>() + .try_into() + .unwrap() + } } #[cfg(test)] diff --git a/crates/pathfinder/src/storage/schema/revision_0010.rs b/crates/pathfinder/src/storage/schema/revision_0010.rs index a15720795a..4e72cef842 100644 --- a/crates/pathfinder/src/storage/schema/revision_0010.rs +++ b/crates/pathfinder/src/storage/schema/revision_0010.rs @@ -55,6 +55,7 @@ pub(crate) fn migrate(transaction: &Transaction) -> anyhow::Result anyhow::Result Vec { + let blocks = crate::storage::test_utils::create_blocks::(); + let transactions_and_receipts = + crate::storage::test_utils::create_transactions_and_receipts::(); + + for (i, block) in blocks.iter().enumerate() { + connection + .execute( + r"INSERT INTO starknet_blocks ( number, hash, root, timestamp) + VALUES (:number, :hash, :root, :timestamp)", + rusqlite::named_params! { + ":number": block.number.0, + ":hash": block.hash.0.as_be_bytes(), + ":root": block.root.0.as_be_bytes(), + ":timestamp": block.timestamp.0, + }, + ) + .unwrap(); + + crate::storage::StarknetTransactionsTable::upsert( + connection, + block.hash, + block.number, + &transactions_and_receipts[i * TXNS_PER_BLOCK..(i + 1) * TXNS_PER_BLOCK], + ) + .unwrap(); + } + + transactions_and_receipts + .iter() + .enumerate() + .map(|(i, (txn, receipt))| { + let event = &receipt.events[0]; + let block = &blocks[i / 10]; + + StarknetEmittedEvent { + data: event.data.clone(), + from_address: event.from_address, + keys: event.keys.clone(), + block_hash: block.hash, + block_number: block.number, + transaction_hash: txn.transaction_hash, + } + }) + .collect() + } + + #[test] + fn virtual_table_still_references_valid_data() { + use crate::storage::schema; + use anyhow::Context; + + let mut connection = Connection::open_in_memory().unwrap(); + let transaction = connection.transaction().unwrap(); + + // 0. Initial migrations happen + schema::revision_0001::migrate(&transaction).unwrap(); + schema::revision_0002::migrate(&transaction).unwrap(); + schema::revision_0003::migrate(&transaction).unwrap(); + schema::revision_0004::migrate(&transaction).unwrap(); + schema::revision_0005::migrate(&transaction).unwrap(); + schema::revision_0006::migrate(&transaction).unwrap(); + + // 1. There is a buggy schema in rev7 + schema::revision_0007::migrate_with( + &transaction, + super::BUGGY_STARKNET_EVENTS_CREATE_STMT, + ) + .unwrap(); + + // 2. Simulate rowids of the old `starknet_events` table to be different from + // the new, migrated `starknet_events` table + let emitted_events = setup(&transaction); + let changed = transaction + .execute(r"UPDATE starknet_events SET rowid = rowid + 1000000", []) + .context("Force arbitrary rowids") + .unwrap(); + assert_eq!(changed, NUM_TXNS); + + let expected_event = &emitted_events[1]; + let filter = StarknetEventFilter { + from_block: Some(expected_event.block_number), + to_block: Some(expected_event.block_number), + contract_address: Some(expected_event.from_address), + // we're using a key which is present in _all_ events + keys: vec![EventKey(StarkHash::from_hex_str("deadbeef").unwrap())], + page_size: NUM_TXNS, + page_number: 0, + }; + + // 3. Getting events works just fine, the result relies on the data in `starknet_events_keys` virtual table + let events = StarknetEventsTable::get_events(&transaction, &filter).unwrap(); + assert_eq!( + events, + PageOfEvents { + events: vec![expected_event.clone()], + is_last_page: true + } + ); + + // 4. More migrations happen + schema::revision_0008::migrate(&transaction).unwrap(); + schema::revision_0009::migrate(&transaction).unwrap(); + + // 5. Eventually schema from rev7 gets fixed, but we need to make sure that the virtual + // table `starknet_events_keys` still contains data which references valid rowids + // in the new `starknet_events` table + schema::revision_0010::migrate(&transaction).unwrap(); + + let events = StarknetEventsTable::get_events(&transaction, &filter).unwrap(); + assert_eq!( + events, + PageOfEvents { + events: vec![expected_event.clone()], + is_last_page: true + } + ); + } } } diff --git a/crates/pathfinder/src/storage/state.rs b/crates/pathfinder/src/storage/state.rs index 12ff12c3a8..1e75e97d72 100644 --- a/crates/pathfinder/src/storage/state.rs +++ b/crates/pathfinder/src/storage/state.rs @@ -1554,7 +1554,7 @@ mod tests { mod starknet_events { use super::*; - use crate::core::{EventData, StarknetTransactionIndex}; + use crate::core::EventData; use crate::sequencer::reply::transaction; #[test] @@ -1608,59 +1608,7 @@ mod tests { fn create_transactions_and_receipts( ) -> [(transaction::Transaction, transaction::Receipt); NUM_TRANSACTIONS] { - let transactions = (0..NUM_TRANSACTIONS).map(|i| transaction::Transaction { - calldata: None, - class_hash: None, - constructor_calldata: None, - contract_address: ContractAddress( - StarkHash::from_hex_str(&"2".repeat(i + 3)).unwrap(), - ), - contract_address_salt: None, - entry_point_type: None, - entry_point_selector: None, - signature: None, - transaction_hash: StarknetTransactionHash( - StarkHash::from_hex_str(&"f".repeat(i + 3)).unwrap(), - ), - r#type: transaction::Type::InvokeFunction, - max_fee: None, - }); - let receipts = (0..NUM_TRANSACTIONS).map(|i| transaction::Receipt { - actual_fee: None, - events: vec![transaction::Event { - from_address: ContractAddress( - StarkHash::from_hex_str(&"2".repeat(i + 3)).unwrap(), - ), - data: vec![EventData( - StarkHash::from_hex_str(&"c".repeat(i + 3)).unwrap(), - )], - keys: vec![ - EventKey(StarkHash::from_hex_str(&"d".repeat(i + 3)).unwrap()), - EventKey(StarkHash::from_hex_str("deadbeef").unwrap()), - ], - }], - execution_resources: transaction::ExecutionResources { - builtin_instance_counter: - transaction::execution_resources::BuiltinInstanceCounter::Empty( - transaction::execution_resources::EmptyBuiltinInstanceCounter {}, - ), - n_steps: i as u64 + 987, - n_memory_holes: i as u64 + 1177, - }, - l1_to_l2_consumed_message: None, - l2_to_l1_messages: Vec::new(), - transaction_hash: StarknetTransactionHash( - StarkHash::from_hex_str(&"e".repeat(i + 3)).unwrap(), - ), - transaction_index: StarknetTransactionIndex(i as u64 + 2311), - }); - - transactions - .into_iter() - .zip(receipts) - .collect::>() - .try_into() - .unwrap() + crate::storage::test_utils::create_transactions_and_receipts::() } fn setup(connection: &Connection) -> Vec { From c2cccbaa5a91e81281ce1dc24bf18aa084c9f00f Mon Sep 17 00:00:00 2001 From: Krzysztof Lis Date: Tue, 24 May 2022 09:49:27 +0200 Subject: [PATCH 8/9] fixup! doc: enhance comment about why rowids are copied --- .../pathfinder/src/storage/schema/revision_0010.rs | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/crates/pathfinder/src/storage/schema/revision_0010.rs b/crates/pathfinder/src/storage/schema/revision_0010.rs index 4e72cef842..28484b9cd6 100644 --- a/crates/pathfinder/src/storage/schema/revision_0010.rs +++ b/crates/pathfinder/src/storage/schema/revision_0010.rs @@ -37,8 +37,15 @@ pub(crate) fn migrate(transaction: &Transaction) -> anyhow::Result anyhow::Result Date: Tue, 24 May 2022 10:12:59 +0200 Subject: [PATCH 9/9] revert: "test: make the retry timeout test faster" This reverts commit 2a99b07c3871ecf520144fd829663860e88285f5. --- crates/pathfinder/Cargo.toml | 2 +- crates/pathfinder/src/sequencer.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/pathfinder/Cargo.toml b/crates/pathfinder/Cargo.toml index 6e7d3a38a5..4e88e0e732 100644 --- a/crates/pathfinder/Cargo.toml +++ b/crates/pathfinder/Cargo.toml @@ -40,7 +40,7 @@ serde_with = "1.9.4" sha3 = "0.9" tempfile = "3" thiserror = "1.0.30" -tokio = { version = "1.11.0", features = ["test-util"] } +tokio = "1.11.0" tokio-retry = "0.3.0" toml = "0.5.8" tracing = "0.1.31" diff --git a/crates/pathfinder/src/sequencer.rs b/crates/pathfinder/src/sequencer.rs index ec490d9447..c1de49d27b 100644 --- a/crates/pathfinder/src/sequencer.rs +++ b/crates/pathfinder/src/sequencer.rs @@ -1603,7 +1603,7 @@ mod tests { ); } - #[tokio::test(flavor = "current_thread", start_paused = true)] + #[test_log::test(tokio::test)] async fn request_timeout() { use std::sync::atomic::{AtomicUsize, Ordering};