Skip to content

Commit

Permalink
Merge pull request #299 from eqlabs/CHr15F0x/fix_schema
Browse files Browse the repository at this point in the history
Migrate the db if it is based on a "broken" version of rev7
  • Loading branch information
CHr15F0x authored May 24, 2022
2 parents 00396c1 + d06bb26 commit d3e2866
Show file tree
Hide file tree
Showing 7 changed files with 725 additions and 246 deletions.
2 changes: 1 addition & 1 deletion crates/pathfinder/src/state/sync.rs
Original file line number Diff line number Diff line change
Expand Up @@ -533,7 +533,7 @@ async fn l2_reorg(
// TODO: clean up state tree's as well...

StarknetBlocksTable::reorg(&transaction, reorg_tail)
.context("Delete L1 state from database")?;
.context("Delete L2 state from database")?;

// Track combined L1 and L2 state.
let l1_l2_head = RefsTable::get_l1_l2_head(&transaction).context("Query L1-L2 head")?;
Expand Down
67 changes: 63 additions & 4 deletions crates/pathfinder/src/storage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ use tracing::info;
/// Indicates database is non-existant.
const DB_VERSION_EMPTY: u32 = 0;
/// Current database version.
const DB_VERSION_CURRENT: u32 = 9;
const DB_VERSION_CURRENT: u32 = 10;
/// Sqlite key used for the PRAGMA user version.
const VERSION_KEY: &str = "user_version";

Expand Down Expand Up @@ -147,6 +147,7 @@ fn migrate_database(connection: &mut Connection) -> anyhow::Result<()> {
6 => schema::revision_0007::migrate(&transaction)?,
7 => schema::revision_0008::migrate(&transaction)?,
8 => schema::revision_0009::migrate(&transaction)?,
9 => schema::revision_0010::migrate(&transaction)?,
_ => unreachable!("Database version constraint was already checked!"),
};
// If any migration action requires vacuuming, we should vacuum.
Expand Down Expand Up @@ -198,9 +199,13 @@ fn enable_foreign_keys(connection: &Connection) -> anyhow::Result<()> {
pub(crate) mod test_utils {
use super::StarknetBlock;

use crate::core::{
GasPrice, GlobalRoot, SequencerAddress, StarknetBlockHash, StarknetBlockNumber,
StarknetBlockTimestamp,
use crate::{
core::{
ContractAddress, EventData, EventKey, GasPrice, GlobalRoot, SequencerAddress,
StarknetBlockHash, StarknetBlockNumber, StarknetBlockTimestamp,
StarknetTransactionHash, StarknetTransactionIndex,
},
sequencer::reply::transaction,
};

use pedersen::StarkHash;
Expand All @@ -221,6 +226,60 @@ pub(crate) mod test_utils {
.try_into()
.unwrap()
}

/// Creates a set of test transactions and receipts.
pub(crate) fn create_transactions_and_receipts<const N: usize>(
) -> [(transaction::Transaction, transaction::Receipt); N] {
let transactions = (0..N).map(|i| transaction::Transaction {
calldata: None,
class_hash: None,
constructor_calldata: None,
contract_address: ContractAddress(StarkHash::from_hex_str(&"2".repeat(i + 3)).unwrap()),
contract_address_salt: None,
entry_point_type: None,
entry_point_selector: None,
signature: None,
transaction_hash: StarknetTransactionHash(
StarkHash::from_hex_str(&"f".repeat(i + 3)).unwrap(),
),
r#type: transaction::Type::InvokeFunction,
max_fee: None,
});
let receipts = (0..N).map(|i| transaction::Receipt {
actual_fee: None,
events: vec![transaction::Event {
from_address: ContractAddress(StarkHash::from_hex_str(&"2".repeat(i + 3)).unwrap()),
data: vec![EventData(
StarkHash::from_hex_str(&"c".repeat(i + 3)).unwrap(),
)],
keys: vec![
EventKey(StarkHash::from_hex_str(&"d".repeat(i + 3)).unwrap()),
EventKey(StarkHash::from_hex_str("deadbeef").unwrap()),
],
}],
execution_resources: transaction::ExecutionResources {
builtin_instance_counter:
transaction::execution_resources::BuiltinInstanceCounter::Empty(
transaction::execution_resources::EmptyBuiltinInstanceCounter {},
),
n_steps: i as u64 + 987,
n_memory_holes: i as u64 + 1177,
},
l1_to_l2_consumed_message: None,
l2_to_l1_messages: Vec::new(),
transaction_hash: StarknetTransactionHash(
StarkHash::from_hex_str(&"e".repeat(i + 3)).unwrap(),
),
transaction_index: StarknetTransactionIndex(i as u64 + 2311),
});

transactions
.into_iter()
.zip(receipts)
.collect::<Vec<_>>()
.try_into()
.unwrap()
}
}

#[cfg(test)]
Expand Down
1 change: 1 addition & 0 deletions crates/pathfinder/src/storage/schema.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ pub(crate) mod revision_0006;
pub(crate) mod revision_0007;
pub(crate) mod revision_0008;
pub(crate) mod revision_0009;
pub(crate) mod revision_0010;

/// Used to indicate which action the caller should perform after a schema migration.
#[derive(Debug, Clone, Copy, PartialEq)]
Expand Down
135 changes: 71 additions & 64 deletions crates/pathfinder/src/storage/schema/revision_0007.rs
Original file line number Diff line number Diff line change
Expand Up @@ -179,73 +179,80 @@ mod transaction {
}
}

const STARKNET_EVENTS_CREATE_STMT: &str = r"CREATE TABLE starknet_events (
block_number INTEGER NOT NULL,
idx INTEGER NOT NULL,
transaction_hash BLOB NOT NULL,
from_address BLOB NOT NULL,
-- Keys are represented as base64 encoded strings separated by space
keys TEXT,
data BLOB,
FOREIGN KEY(block_number) REFERENCES starknet_blocks(number)
ON DELETE CASCADE
);
-- Event filters can specify ranges of blocks
CREATE INDEX starknet_events_block_number ON starknet_events(block_number);
-- Event filter can specify a contract address
CREATE INDEX starknet_events_from_address ON starknet_events(from_address);
CREATE VIRTUAL TABLE starknet_events_keys
USING fts5(
keys,
content='starknet_events',
content_rowid='rowid',
tokenize='ascii'
);
CREATE TRIGGER starknet_events_ai
AFTER INSERT ON starknet_events
BEGIN
INSERT INTO starknet_events_keys(rowid, keys)
VALUES (
new.rowid,
new.keys
);
END;
CREATE TRIGGER starknet_events_ad
AFTER DELETE ON starknet_events
BEGIN
INSERT INTO starknet_events_keys(starknet_events_keys, rowid, keys)
VALUES (
'delete',
old.rowid,
old.keys
);
END;
CREATE TRIGGER starknet_events_au
AFTER UPDATE ON starknet_events
BEGIN
INSERT INTO starknet_events_keys(starknet_events_keys, rowid, keys)
VALUES (
'delete',
old.rowid,
old.keys
);
INSERT INTO starknet_events_keys(rowid, keys)
VALUES (
new.rowid,
new.keys
);
END;";

pub(crate) fn migrate(transaction: &Transaction) -> anyhow::Result<PostMigrationAction> {
migrate_with(transaction, STARKNET_EVENTS_CREATE_STMT)
}

pub(crate) fn migrate_with(
transaction: &Transaction,
starknet_events_create_stmt: &'static str,
) -> anyhow::Result<PostMigrationAction> {
// Create the new events table.
transaction
.execute_batch(
r"CREATE TABLE starknet_events (
block_number INTEGER NOT NULL,
idx INTEGER NOT NULL,
transaction_hash BLOB NOT NULL,
from_address BLOB NOT NULL,
-- Keys are represented as base64 encoded strings separated by space
keys TEXT,
data BLOB,
FOREIGN KEY(block_number) REFERENCES starknet_blocks(number)
ON DELETE CASCADE
);
-- Event filters can specify ranges of blocks
CREATE INDEX starknet_events_block_number ON starknet_events(block_number);
-- Event filter can specify a contract address
CREATE INDEX starknet_events_from_address ON starknet_events(from_address);
CREATE VIRTUAL TABLE starknet_events_keys
USING fts5(
keys,
content='starknet_events',
content_rowid='rowid',
tokenize='ascii'
);
CREATE TRIGGER starknet_events_ai
AFTER INSERT ON starknet_events
BEGIN
INSERT INTO starknet_events_keys(rowid, keys)
VALUES (
new.rowid,
new.keys
);
END;
CREATE TRIGGER starknet_events_ad
AFTER DELETE ON starknet_events
BEGIN
INSERT INTO starknet_events_keys(starknet_events_keys, rowid, keys)
VALUES (
'delete',
old.rowid,
old.keys
);
END;
CREATE TRIGGER starknet_events_au
AFTER UPDATE ON starknet_events
BEGIN
INSERT INTO starknet_events_keys(starknet_events_keys, rowid, keys)
VALUES (
'delete',
old.rowid,
old.keys
);
INSERT INTO starknet_events_keys(rowid, keys)
VALUES (
new.rowid,
new.keys
);
END;",
)
.execute_batch(starknet_events_create_stmt)
.context("Create starknet events tables and indexes")?;

// Create an index on starknet_blocks(hash) so that we can look up block numbers based
Expand Down
Loading

0 comments on commit d3e2866

Please sign in to comment.