From b82ada5b1c3004d35b60be05dec8241b88094647 Mon Sep 17 00:00:00 2001 From: Andrew Schran Date: Thu, 23 Jan 2025 13:20:11 -0500 Subject: [PATCH] Add new `object_per_epoch_marker_table` that includes consensus start versions (#20822) This introduces the concept of a "FullObjectID" and "FullObjectKey", which for consensus objects includes the initial shared version/start version. Migrates to these new types across the codebase where relevant. --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] gRPC: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: --- crates/simulacrum/src/store/in_mem_store.rs | 2 + crates/sui-core/src/authority.rs | 25 +++- .../authority/authority_per_epoch_store.rs | 11 +- .../sui-core/src/authority/authority_store.rs | 118 +++++++++++++----- .../src/authority/authority_store_tables.rs | 4 +- .../src/authority/test_authority_builder.rs | 9 +- .../checkpoints/checkpoint_executor/mod.rs | 18 ++- crates/sui-core/src/execution_cache.rs | 73 +++++++---- .../unit_tests/writeback_cache_tests.rs | 19 +-- .../src/execution_cache/writeback_cache.rs | 78 +++++++----- .../sui-core/src/transaction_input_loader.rs | 51 +++++--- crates/sui-core/src/transaction_manager.rs | 35 ++++-- crates/sui-core/src/transaction_outputs.rs | 44 ++++--- .../src/unit_tests/authority_tests.rs | 4 + .../shared_object_deletion_tests.rs | 40 ++++-- .../src/unit_tests/transaction_deny_tests.rs | 1 + .../unit_tests/transaction_manager_tests.rs | 10 +- .../unit_tests/transfer_to_object_tests.rs | 14 +-- crates/sui-open-rpc/spec/openrpc.json | 1 + crates/sui-protocol-config/src/lib.rs | 9 ++ ...ocol_config__test__Mainnet_version_72.snap | 1 + ...ocol_config__test__Testnet_version_72.snap | 1 + ...sui_protocol_config__test__version_72.snap | 1 + crates/sui-replay/src/replay.rs | 2 + .../src/benchmark_context.rs | 3 +- .../src/mock_storage.rs | 2 + .../src/simulator_persisted_store.rs | 2 + crates/sui-types/src/base_types.rs | 30 ++++- crates/sui-types/src/in_memory_storage.rs | 2 + crates/sui-types/src/inner_temporary_store.rs | 26 ++-- crates/sui-types/src/object.rs | 23 +++- crates/sui-types/src/storage/mod.rs | 107 +++++++++++++++- crates/sui-types/src/transaction.rs | 19 +++ .../latest/sui-adapter/src/temporary_store.rs | 7 ++ .../src/object_runtime/object_store.rs | 10 +- .../sui-move-natives/src/test_scenario.rs | 3 + .../v0/sui-adapter/src/temporary_store.rs | 7 ++ .../v1/sui-adapter/src/temporary_store.rs | 7 ++ .../src/object_runtime/mod.rs | 5 + .../src/object_runtime/object_store.rs | 8 +- .../v2/sui-adapter/src/temporary_store.rs | 7 ++ .../src/object_runtime/object_store.rs | 10 +- 42 files changed, 663 insertions(+), 186 deletions(-) diff --git a/crates/simulacrum/src/store/in_mem_store.rs b/crates/simulacrum/src/store/in_mem_store.rs index 6e9de3db8ffb6..0bc2efc3de946 100644 --- a/crates/simulacrum/src/store/in_mem_store.rs +++ b/crates/simulacrum/src/store/in_mem_store.rs @@ -269,6 +269,8 @@ impl ChildObjectResolver for InMemoryStore { receiving_object_id: &ObjectID, receive_object_at_version: SequenceNumber, _epoch_id: EpochId, + // TODO: Delete this parameter once table migration is complete. + _use_object_per_epoch_marker_table_v2: bool, ) -> sui_types::error::SuiResult> { let recv_object = match crate::store::SimulatorStore::get_object(self, receiving_object_id) { diff --git a/crates/sui-core/src/authority.rs b/crates/sui-core/src/authority.rs index 1a2e0bc2fd260..833b0ed91d43c 100644 --- a/crates/sui-core/src/authority.rs +++ b/crates/sui-core/src/authority.rs @@ -890,6 +890,10 @@ impl AuthorityState { &input_object_kinds, &receiving_objects_refs, epoch_store.epoch(), + epoch_store + .protocol_config() + .use_object_per_epoch_marker_table_v2_as_option() + .unwrap_or(false), )?; let (_gas_status, checked_input_objects) = sui_transaction_checks::check_transaction_input( @@ -1557,7 +1561,14 @@ impl AuthorityState { inner_temporary_store, ); self.get_cache_writer() - .write_transaction_outputs(epoch_store.epoch(), transaction_outputs.into()) + .write_transaction_outputs( + epoch_store.epoch(), + transaction_outputs.into(), + epoch_store + .protocol_config() + .use_object_per_epoch_marker_table_v2_as_option() + .unwrap_or(false), + ) .await; if certificate.transaction_data().is_end_of_epoch_tx() { @@ -1797,6 +1808,10 @@ impl AuthorityState { &input_object_kinds, &receiving_object_refs, epoch_store.epoch(), + epoch_store + .protocol_config() + .use_object_per_epoch_marker_table_v2_as_option() + .unwrap_or(false), )?; // make a gas object if one was not provided @@ -1983,6 +1998,10 @@ impl AuthorityState { &input_object_kinds, &receiving_object_refs, epoch_store.epoch(), + epoch_store + .protocol_config() + .use_object_per_epoch_marker_table_v2_as_option() + .unwrap_or(false), )?; // make a gas object if one was not provided @@ -2145,6 +2164,10 @@ impl AuthorityState { &input_object_kinds, &receiving_object_refs, epoch_store.epoch(), + epoch_store + .protocol_config() + .use_object_per_epoch_marker_table_v2_as_option() + .unwrap_or(false), )?; // Create and use a dummy gas object if there is no gas object provided. diff --git a/crates/sui-core/src/authority/authority_per_epoch_store.rs b/crates/sui-core/src/authority/authority_per_epoch_store.rs index 35b9affa1d88e..639155d67fc37 100644 --- a/crates/sui-core/src/authority/authority_per_epoch_store.rs +++ b/crates/sui-core/src/authority/authority_per_epoch_store.rs @@ -34,7 +34,8 @@ use sui_storage::mutex_table::{MutexGuard, MutexTable}; use sui_types::accumulator::Accumulator; use sui_types::authenticator_state::{get_authenticator_state, ActiveJwk}; use sui_types::base_types::{ - AuthorityName, ConsensusObjectSequenceKey, EpochId, ObjectID, SequenceNumber, TransactionDigest, + AuthorityName, ConsensusObjectSequenceKey, EpochId, FullObjectID, ObjectID, SequenceNumber, + TransactionDigest, }; use sui_types::base_types::{ConciseableName, ObjectRef}; use sui_types::committee::Committee; @@ -1437,7 +1438,7 @@ impl AuthorityPerEpochStore { error: "no assigned shared versions".to_string(), })?; - let initial_shared_version = + let modified_initial_shared_version = if self.epoch_start_config().use_version_assignment_tables_v3() { *initial_shared_version } else { @@ -1447,7 +1448,7 @@ impl AuthorityPerEpochStore { }; // If we found assigned versions, but they are missing the assignment for // this object, it indicates a serious inconsistency! - let Some(version) = assigned_shared_versions.get(&(*id, initial_shared_version)) else { + let Some(version) = assigned_shared_versions.get(&(*id, modified_initial_shared_version)) else { panic!( "Shared object version should have been assigned. key: {key:?}, \ obj id: {id:?}, initial_shared_version: {initial_shared_version:?}, \ @@ -1455,13 +1456,13 @@ impl AuthorityPerEpochStore { ) }; InputKey::VersionedObject { - id: *id, + id: FullObjectID::new(*id, Some(*initial_shared_version)), version: *version, } } InputObjectKind::MovePackage(id) => InputKey::Package { id: *id }, InputObjectKind::ImmOrOwnedMoveObject(objref) => InputKey::VersionedObject { - id: objref.0, + id: FullObjectID::new(objref.0, None), version: objref.1, }, }) diff --git a/crates/sui-core/src/authority/authority_store.rs b/crates/sui-core/src/authority/authority_store.rs index 795f238fee257..638b706b6835e 100644 --- a/crates/sui-core/src/authority/authority_store.rs +++ b/crates/sui-core/src/authority/authority_store.rs @@ -31,7 +31,8 @@ use sui_types::error::UserInputError; use sui_types::execution::TypeLayoutStore; use sui_types::message_envelope::Message; use sui_types::storage::{ - get_module, BackingPackageStore, MarkerValue, ObjectKey, ObjectOrTombstone, ObjectStore, + get_module, BackingPackageStore, FullObjectKey, MarkerValue, ObjectKey, ObjectOrTombstone, + ObjectStore, }; use sui_types::sui_system_state::get_sui_system_state; use sui_types::{base_types::SequenceNumber, fp_bail, fp_ensure}; @@ -212,9 +213,12 @@ impl AuthorityStore { // We can safely delete all entries in the per epoch marker table since this is only called // at epoch boundaries (during reconfiguration). Therefore any entries that currently // exist can be removed. Because of this we can use the `schedule_delete_all` method. + self.perpetual_tables + .object_per_epoch_marker_table + .schedule_delete_all()?; Ok(self .perpetual_tables - .object_per_epoch_marker_table + .object_per_epoch_marker_table_v2 .schedule_delete_all()?) } @@ -412,40 +416,70 @@ impl AuthorityStore { pub fn get_marker_value( &self, - object_id: &ObjectID, - version: &SequenceNumber, + object_key: FullObjectKey, epoch_id: EpochId, + // TODO: Delete this parameter once table migration is complete. + use_object_per_epoch_marker_table_v2: bool, ) -> SuiResult> { - let object_key = (epoch_id, ObjectKey(*object_id, *version)); - Ok(self - .perpetual_tables - .object_per_epoch_marker_table - .get(&object_key)?) + if use_object_per_epoch_marker_table_v2 { + Ok(self + .perpetual_tables + .object_per_epoch_marker_table_v2 + .get(&(epoch_id, object_key))?) + } else { + Ok(self + .perpetual_tables + .object_per_epoch_marker_table + .get(&(epoch_id, object_key.into_object_key()))?) + } } pub fn get_latest_marker( &self, - object_id: &ObjectID, + object_id: FullObjectID, epoch_id: EpochId, + use_object_per_epoch_marker_table_v2: bool, ) -> SuiResult> { - let min_key = (epoch_id, ObjectKey::min_for_id(object_id)); - let max_key = (epoch_id, ObjectKey::max_for_id(object_id)); + if use_object_per_epoch_marker_table_v2 { + let min_key = (epoch_id, FullObjectKey::min_for_id(&object_id)); + let max_key = (epoch_id, FullObjectKey::max_for_id(&object_id)); - let marker_entry = self - .perpetual_tables - .object_per_epoch_marker_table - .safe_iter_with_bounds(Some(min_key), Some(max_key)) - .skip_prior_to(&max_key)? - .next(); - match marker_entry { - Some(Ok(((epoch, key), marker))) => { - // because of the iterator bounds these cannot fail - assert_eq!(epoch, epoch_id); - assert_eq!(key.0, *object_id); - Ok(Some((key.1, marker))) + let marker_entry = self + .perpetual_tables + .object_per_epoch_marker_table_v2 + .safe_iter_with_bounds(Some(min_key), Some(max_key)) + .skip_prior_to(&max_key)? + .next(); + match marker_entry { + Some(Ok(((epoch, key), marker))) => { + // because of the iterator bounds these cannot fail + assert_eq!(epoch, epoch_id); + assert_eq!(key.id(), object_id); + Ok(Some((key.version(), marker))) + } + Some(Err(e)) => Err(e.into()), + None => Ok(None), + } + } else { + let min_key = (epoch_id, ObjectKey::min_for_id(&object_id.id())); + let max_key = (epoch_id, ObjectKey::max_for_id(&object_id.id())); + + let marker_entry = self + .perpetual_tables + .object_per_epoch_marker_table + .safe_iter_with_bounds(Some(min_key), Some(max_key)) + .skip_prior_to(&max_key)? + .next(); + match marker_entry { + Some(Ok(((epoch, key), marker))) => { + // because of the iterator bounds these cannot fail + assert_eq!(epoch, epoch_id); + assert_eq!(key.0, object_id.id()); + Ok(Some((key.1, marker))) + } + Some(Err(e)) => Err(e.into()), + None => Ok(None), } - Some(Err(e)) => Err(e.into()), - None => Ok(None), } } @@ -824,6 +858,8 @@ impl AuthorityStore { &self, epoch_id: EpochId, tx_outputs: &[Arc], + // TODO: Delete this parameter once table migration is complete. + use_object_per_epoch_marker_table_v2: bool, ) -> SuiResult { let mut written = Vec::with_capacity(tx_outputs.len()); for outputs in tx_outputs { @@ -834,7 +870,12 @@ impl AuthorityStore { let mut write_batch = self.perpetual_tables.transactions.batch(); for outputs in tx_outputs { - self.write_one_transaction_outputs(&mut write_batch, epoch_id, outputs)?; + self.write_one_transaction_outputs( + &mut write_batch, + epoch_id, + outputs, + use_object_per_epoch_marker_table_v2, + )?; } // test crashing before writing the batch fail_point_async!("crash"); @@ -859,6 +900,8 @@ impl AuthorityStore { write_batch: &mut DBBatch, epoch_id: EpochId, tx_outputs: &TransactionOutputs, + // TODO: Delete this parameter once table migration is complete. + use_object_per_epoch_marker_table_v2: bool, ) -> SuiResult { let TransactionOutputs { transaction, @@ -883,12 +926,21 @@ impl AuthorityStore { // Add batched writes for objects and locks. let effects_digest = effects.digest(); - write_batch.insert_batch( - &self.perpetual_tables.object_per_epoch_marker_table, - markers - .iter() - .map(|(key, marker_value)| ((epoch_id, *key), *marker_value)), - )?; + if use_object_per_epoch_marker_table_v2 { + write_batch.insert_batch( + &self.perpetual_tables.object_per_epoch_marker_table_v2, + markers + .iter() + .map(|(key, marker_value)| ((epoch_id, *key), *marker_value)), + )?; + } else { + write_batch.insert_batch( + &self.perpetual_tables.object_per_epoch_marker_table, + markers + .iter() + .map(|(key, marker_value)| ((epoch_id, key.into_object_key()), *marker_value)), + )?; + } write_batch.insert_batch( &self.perpetual_tables.objects, diff --git a/crates/sui-core/src/authority/authority_store_tables.rs b/crates/sui-core/src/authority/authority_store_tables.rs index 4c5b3f3f28c70..5bd9f7810bb8d 100644 --- a/crates/sui-core/src/authority/authority_store_tables.rs +++ b/crates/sui-core/src/authority/authority_store_tables.rs @@ -9,7 +9,7 @@ use sui_types::accumulator::Accumulator; use sui_types::base_types::SequenceNumber; use sui_types::digests::TransactionEventsDigest; use sui_types::effects::TransactionEffects; -use sui_types::storage::MarkerValue; +use sui_types::storage::{FullObjectKey, MarkerValue}; use typed_store::metrics::SamplingInterval; use typed_store::rocks::util::{empty_compaction_filter, reference_count_merge_operator}; use typed_store::rocks::{ @@ -136,6 +136,7 @@ pub struct AuthorityPerpetualTables { /// objects that have been deleted. This table is meant to be pruned per-epoch, and all /// previous epochs other than the current epoch may be pruned safely. pub(crate) object_per_epoch_marker_table: DBMap<(EpochId, ObjectKey), MarkerValue>, + pub(crate) object_per_epoch_marker_table_v2: DBMap<(EpochId, FullObjectKey), MarkerValue>, } impl AuthorityPerpetualTables { @@ -459,6 +460,7 @@ impl AuthorityPerpetualTables { self.expected_network_sui_amount.unsafe_clear()?; self.expected_storage_fund_imbalance.unsafe_clear()?; self.object_per_epoch_marker_table.unsafe_clear()?; + self.object_per_epoch_marker_table_v2.unsafe_clear()?; self.objects.rocksdb.flush()?; Ok(()) } diff --git a/crates/sui-core/src/authority/test_authority_builder.rs b/crates/sui-core/src/authority/test_authority_builder.rs index 256ef7b4980de..1fbc4baee27ee 100644 --- a/crates/sui-core/src/authority/test_authority_builder.rs +++ b/crates/sui-core/src/authority/test_authority_builder.rs @@ -377,7 +377,14 @@ impl<'a> TestAuthorityBuilder<'a> { state .get_cache_commit() - .commit_transaction_outputs(epoch_store.epoch(), &[*genesis.transaction().digest()]) + .commit_transaction_outputs( + epoch_store.epoch(), + &[*genesis.transaction().digest()], + epoch_store + .protocol_config() + .use_object_per_epoch_marker_table_v2_as_option() + .unwrap_or(false), + ) .await; // We want to insert these objects directly instead of relying on genesis because diff --git a/crates/sui-core/src/checkpoints/checkpoint_executor/mod.rs b/crates/sui-core/src/checkpoints/checkpoint_executor/mod.rs index 780e32904bf6d..1b3acd7f3a489 100644 --- a/crates/sui-core/src/checkpoints/checkpoint_executor/mod.rs +++ b/crates/sui-core/src/checkpoints/checkpoint_executor/mod.rs @@ -442,7 +442,14 @@ impl CheckpointExecutor { let cache_commit = self.state.get_cache_commit(); debug!(seq = ?checkpoint.sequence_number, "committing checkpoint transactions to disk"); cache_commit - .commit_transaction_outputs(epoch_store.epoch(), all_tx_digests) + .commit_transaction_outputs( + epoch_store.epoch(), + all_tx_digests, + epoch_store + .protocol_config() + .use_object_per_epoch_marker_table_v2_as_option() + .unwrap_or(false), + ) .await; epoch_store @@ -657,7 +664,14 @@ impl CheckpointExecutor { let cache_commit = self.state.get_cache_commit(); cache_commit - .commit_transaction_outputs(cur_epoch, &[change_epoch_tx_digest]) + .commit_transaction_outputs( + cur_epoch, + &[change_epoch_tx_digest], + epoch_store + .protocol_config() + .use_object_per_epoch_marker_table_v2_as_option() + .unwrap_or(false), + ) .await; fail_point_async!("prune-and-compact"); diff --git a/crates/sui-core/src/execution_cache.rs b/crates/sui-core/src/execution_cache.rs index 0a66cbfc34dc4..1fe5cef45ef7f 100644 --- a/crates/sui-core/src/execution_cache.rs +++ b/crates/sui-core/src/execution_cache.rs @@ -19,14 +19,14 @@ use std::path::Path; use std::sync::Arc; use sui_config::ExecutionCacheConfig; use sui_protocol_config::ProtocolVersion; -use sui_types::base_types::VerifiedExecutionData; +use sui_types::base_types::{FullObjectID, VerifiedExecutionData}; use sui_types::digests::{TransactionDigest, TransactionEffectsDigest, TransactionEventsDigest}; use sui_types::effects::{TransactionEffects, TransactionEvents}; use sui_types::error::{SuiError, SuiResult, UserInputError}; use sui_types::messages_checkpoint::CheckpointSequenceNumber; use sui_types::object::Object; use sui_types::storage::{ - BackingPackageStore, BackingStore, ChildObjectResolver, MarkerValue, ObjectKey, + BackingPackageStore, BackingStore, ChildObjectResolver, FullObjectKey, MarkerValue, ObjectKey, ObjectOrTombstone, ObjectStore, PackageObject, ParentSync, }; use sui_types::sui_system_state::SuiSystemState; @@ -173,6 +173,8 @@ pub trait ExecutionCacheCommit: Send + Sync { &'a self, epoch: EpochId, digests: &'a [TransactionDigest], + // TODO: Delete this parameter once table migration is complete. + use_object_per_epoch_marker_table_v2: bool, ) -> BoxFuture<'a, ()>; /// Durably commit transactions (but not their outputs) to the database. @@ -268,6 +270,8 @@ pub trait ObjectCacheRead: Send + Sync { keys: &[InputKey], receiving_objects: HashSet, epoch: EpochId, + // TODO: Delete this parameter once table migration is complete. + use_object_per_epoch_marker_table_v2: bool, ) -> Vec { let (keys_with_version, keys_without_version): (Vec<_>, Vec<_>) = keys .iter() @@ -279,7 +283,7 @@ pub trait ObjectCacheRead: Send + Sync { self.multi_object_exists_by_key( &keys_with_version .iter() - .map(|(_, k)| ObjectKey(k.id(), k.version().unwrap())) + .map(|(_, k)| ObjectKey(k.id().id(), k.version().unwrap())) .collect::>(), ) .into_iter(), @@ -301,20 +305,21 @@ pub trait ObjectCacheRead: Send + Sync { // specified version exists or was deleted. We will then let mark it as available // to let the transaction through so it can fail at execution. let is_available = self - .get_object(&input_key.id()) + .get_object(&input_key.id().id()) .map(|obj| obj.version() >= input_key.version().unwrap()) .unwrap_or(false) - || self.have_deleted_owned_object_at_version_or_after( - &input_key.id(), + || self.have_deleted_fastpath_object_at_version_or_after( + input_key.id().id(), input_key.version().unwrap(), epoch, + use_object_per_epoch_marker_table_v2, ); versioned_results.push((*idx, is_available)); } else if self .get_deleted_shared_object_previous_tx_digest( - &input_key.id(), - input_key.version().unwrap(), + FullObjectKey::new(input_key.id(), input_key.version().unwrap()), epoch, + use_object_per_epoch_marker_table_v2, ) .is_some() { @@ -329,7 +334,7 @@ pub trait ObjectCacheRead: Send + Sync { let unversioned_results = keys_without_version.into_iter().map(|(idx, key)| { ( idx, - match self.get_latest_object_ref_or_tombstone(key.id()) { + match self.get_latest_object_ref_or_tombstone(key.id().id()) { None => false, Some(entry) => entry.2.is_alive(), }, @@ -372,25 +377,30 @@ pub trait ObjectCacheRead: Send + Sync { /// Get the marker at a specific version fn get_marker_value( &self, - object_id: &ObjectID, - version: SequenceNumber, + object_key: FullObjectKey, epoch_id: EpochId, + // TODO: Delete this parameter once table migration is complete. + use_object_per_epoch_marker_table_v2: bool, ) -> Option; /// Get the latest marker for a given object. fn get_latest_marker( &self, - object_id: &ObjectID, + object_id: FullObjectID, epoch_id: EpochId, + // TODO: Delete this parameter once table migration is complete. + use_object_per_epoch_marker_table_v2: bool, ) -> Option<(SequenceNumber, MarkerValue)>; /// If the shared object was deleted, return deletion info for the current live version fn get_last_shared_object_deletion_info( &self, - object_id: &ObjectID, + object_id: FullObjectID, epoch_id: EpochId, + // TODO: Delete this parameter once table migration is complete. + use_object_per_epoch_marker_table_v2: bool, ) -> Option<(SequenceNumber, TransactionDigest)> { - match self.get_latest_marker(object_id, epoch_id) { + match self.get_latest_marker(object_id, epoch_id, use_object_per_epoch_marker_table_v2) { Some((version, MarkerValue::SharedDeleted(digest))) => Some((version, digest)), _ => None, } @@ -399,11 +409,12 @@ pub trait ObjectCacheRead: Send + Sync { /// If the shared object was deleted, return deletion info for the specified version. fn get_deleted_shared_object_previous_tx_digest( &self, - object_id: &ObjectID, - version: SequenceNumber, + object_key: FullObjectKey, epoch_id: EpochId, + // TODO: Delete this parameter once table migration is complete. + use_object_per_epoch_marker_table_v2: bool, ) -> Option { - match self.get_marker_value(object_id, version, epoch_id) { + match self.get_marker_value(object_key, epoch_id, use_object_per_epoch_marker_table_v2) { Some(MarkerValue::SharedDeleted(digest)) => Some(digest), _ => None, } @@ -411,24 +422,28 @@ pub trait ObjectCacheRead: Send + Sync { fn have_received_object_at_version( &self, - object_id: &ObjectID, - version: SequenceNumber, + object_key: FullObjectKey, epoch_id: EpochId, + // TODO: Delete this parameter once table migration is complete. + use_object_per_epoch_marker_table_v2: bool, ) -> bool { matches!( - self.get_marker_value(object_id, version, epoch_id), + self.get_marker_value(object_key, epoch_id, use_object_per_epoch_marker_table_v2), Some(MarkerValue::Received) ) } - fn have_deleted_owned_object_at_version_or_after( + fn have_deleted_fastpath_object_at_version_or_after( &self, - object_id: &ObjectID, + object_id: ObjectID, version: SequenceNumber, epoch_id: EpochId, + // TODO: Delete this parameter once table migration is complete. + use_object_per_epoch_marker_table_v2: bool, ) -> bool { + let full_id = FullObjectID::Fastpath(object_id); // function explicilty assumes "fastpath" matches!( - self.get_latest_marker(object_id, epoch_id), + self.get_latest_marker(full_id, epoch_id, use_object_per_epoch_marker_table_v2), Some((marker_version, MarkerValue::OwnedDeleted)) if marker_version >= version ) } @@ -591,6 +606,8 @@ pub trait ExecutionCacheWrite: Send + Sync { &self, epoch_id: EpochId, tx_outputs: Arc, + // TODO: Delete this parameter once table migration is complete. + use_object_per_epoch_marker_table_v2: bool, ) -> BoxFuture<'_, ()>; /// Attempt to acquire object locks for all of the owned input locks. @@ -727,6 +744,8 @@ macro_rules! implement_storage_traits { receiving_object_id: &ObjectID, receive_object_at_version: SequenceNumber, epoch_id: EpochId, + // TODO: Delete this parameter once table migration is complete. + use_object_per_epoch_marker_table_v2: bool, ) -> SuiResult> { let Some(recv_object) = ObjectCacheRead::get_object_by_key( self, @@ -743,9 +762,13 @@ macro_rules! implement_storage_traits { // transaction replay due to possible reordering of transactions during replay. if recv_object.owner != Owner::AddressOwner((*owner).into()) || self.have_received_object_at_version( - receiving_object_id, - receive_object_at_version, + // TODO: Add support for receiving ConsensusV2 objects. For now this assumes fastpath. + FullObjectKey::new( + FullObjectID::new(*receiving_object_id, None), + receive_object_at_version, + ), epoch_id, + use_object_per_epoch_marker_table_v2, ) { return Ok(None); diff --git a/crates/sui-core/src/execution_cache/unit_tests/writeback_cache_tests.rs b/crates/sui-core/src/execution_cache/unit_tests/writeback_cache_tests.rs index af402369dfe96..f90537f423983 100644 --- a/crates/sui-core/src/execution_cache/unit_tests/writeback_cache_tests.rs +++ b/crates/sui-core/src/execution_cache/unit_tests/writeback_cache_tests.rs @@ -325,7 +325,7 @@ impl Scenario { .find(|o| **o == object.compute_object_reference()) .expect("received object must have new lock"); self.outputs.markers.push(( - object.compute_object_reference().into(), + object.compute_full_object_reference().into(), MarkerValue::Received, )); } @@ -348,7 +348,7 @@ impl Scenario { assert!(self.transactions.insert(tx), "transaction is not unique"); self.cache() - .write_transaction_outputs(1 /* epoch */, outputs.clone()) + .write_transaction_outputs(1 /* epoch */, outputs.clone(), true) .await; self.count_action(); @@ -357,7 +357,10 @@ impl Scenario { // commit a transaction to the database pub async fn commit(&mut self, tx: TransactionDigest) -> SuiResult { - let res = self.cache().commit_transaction_outputs(1, &[tx]).await; + let res = self + .cache() + .commit_transaction_outputs(1, &[tx], true) + .await; self.count_action(); Ok(res) } @@ -488,9 +491,11 @@ impl Scenario { .unwrap(), *object ); - assert!(self - .cache() - .have_received_object_at_version(id, object.version(), 1)); + assert!(self.cache().have_received_object_at_version( + FullObjectKey::new(object.full_id(), object.version()), + 1, + true + )); } } @@ -553,7 +558,7 @@ async fn test_committed() { s.assert_live(&[1, 2]); s.assert_dirty(&[1, 2]); - s.cache().commit_transaction_outputs(1, &[tx]).await; + s.cache().commit_transaction_outputs(1, &[tx], true).await; s.assert_not_dirty(&[1, 2]); s.assert_cached(&[1, 2]); diff --git a/crates/sui-core/src/execution_cache/writeback_cache.rs b/crates/sui-core/src/execution_cache/writeback_cache.rs index 120fcc257c97d..82880b6a75342 100644 --- a/crates/sui-core/src/execution_cache/writeback_cache.rs +++ b/crates/sui-core/src/execution_cache/writeback_cache.rs @@ -63,7 +63,9 @@ use sui_config::ExecutionCacheConfig; use sui_macros::fail_point_async; use sui_protocol_config::ProtocolVersion; use sui_types::accumulator::Accumulator; -use sui_types::base_types::{EpochId, ObjectID, ObjectRef, SequenceNumber, VerifiedExecutionData}; +use sui_types::base_types::{ + EpochId, FullObjectID, ObjectID, ObjectRef, SequenceNumber, VerifiedExecutionData, +}; use sui_types::bridge::{get_bridge, Bridge}; use sui_types::digests::{ ObjectDigest, TransactionDigest, TransactionEffectsDigest, TransactionEventsDigest, @@ -73,7 +75,9 @@ use sui_types::error::{SuiError, SuiResult, UserInputError}; use sui_types::message_envelope::Message; use sui_types::messages_checkpoint::CheckpointSequenceNumber; use sui_types::object::Object; -use sui_types::storage::{MarkerValue, ObjectKey, ObjectOrTombstone, ObjectStore, PackageObject}; +use sui_types::storage::{ + FullObjectKey, MarkerValue, ObjectKey, ObjectOrTombstone, ObjectStore, PackageObject, +}; use sui_types::sui_system_state::{get_sui_system_state, SuiSystemState}; use sui_types::transaction::{VerifiedSignedTransaction, VerifiedTransaction}; use tap::TapOptional; @@ -180,7 +184,7 @@ impl IsNewer for LatestObjectCacheEntry { } } -type MarkerKey = (EpochId, ObjectID); +type MarkerKey = (EpochId, FullObjectID); enum CacheResult { /// Entry is in the cache @@ -547,22 +551,18 @@ impl WritebackCache { async fn write_marker_value( &self, epoch_id: EpochId, - object_key: &ObjectKey, + object_key: FullObjectKey, marker_value: MarkerValue, ) { - tracing::trace!( - "inserting marker value {:?}: {:?}", - object_key, - marker_value - ); + tracing::trace!("inserting marker value {object_key:?}: {marker_value:?}",); fail_point_async!("write_marker_entry"); self.metrics.record_cache_write("marker"); self.dirty .markers - .entry((epoch_id, object_key.0)) + .entry((epoch_id, object_key.id())) .or_default() .value_mut() - .insert(object_key.1, marker_value); + .insert(object_key.version(), marker_value); } // lock both the dirty and committed sides of the cache, and then pass the entries to @@ -734,28 +734,27 @@ impl WritebackCache { fn get_marker_value_cache_only( &self, - object_id: &ObjectID, - version: SequenceNumber, + object_key: FullObjectKey, epoch_id: EpochId, ) -> CacheResult { Self::with_locked_cache_entries( &self.dirty.markers, &self.cached.marker_cache, - &(epoch_id, *object_id), + &(epoch_id, object_key.id()), |dirty_entry, cached_entry| { check_cache_entry_by_version!( self, "marker_by_version", "uncommitted", dirty_entry, - version + object_key.version() ); check_cache_entry_by_version!( self, "marker_by_version", "committed", cached_entry, - version + object_key.version() ); CacheResult::Miss }, @@ -764,13 +763,13 @@ impl WritebackCache { fn get_latest_marker_value_cache_only( &self, - object_id: &ObjectID, + object_id: FullObjectID, epoch_id: EpochId, ) -> CacheResult<(SequenceNumber, MarkerValue)> { Self::with_locked_cache_entries( &self.dirty.markers, &self.cached.marker_cache, - &(epoch_id, *object_id), + &(epoch_id, object_id), |dirty_entry, cached_entry| { check_cache_entry_by_latest!(self, "marker_latest", "uncommitted", dirty_entry); check_cache_entry_by_latest!(self, "marker_latest", "committed", cached_entry); @@ -846,7 +845,7 @@ impl WritebackCache { // Update all markers for (object_key, marker_value) in markers.iter() { - self.write_marker_value(epoch_id, object_key, *marker_value) + self.write_marker_value(epoch_id, *object_key, *marker_value) .await; } @@ -928,7 +927,13 @@ impl WritebackCache { // Commits dirty data for the given TransactionDigest to the db. #[instrument(level = "debug", skip_all)] - async fn commit_transaction_outputs(&self, epoch: EpochId, digests: &[TransactionDigest]) { + async fn commit_transaction_outputs( + &self, + epoch: EpochId, + digests: &[TransactionDigest], + // TODO: Delete this parameter once table migration is complete. + use_object_per_epoch_marker_table_v2: bool, + ) { fail_point_async!("writeback-cache-commit"); trace!(?digests); @@ -955,7 +960,7 @@ impl WritebackCache { // a cache eviction could cause a value to disappear briefly, even if we insert to the // cache before removing from the dirty set. self.store - .write_transaction_outputs(epoch, &all_outputs) + .write_transaction_outputs(epoch, &all_outputs, use_object_per_epoch_marker_table_v2) .await .expect("db error"); @@ -1093,8 +1098,8 @@ impl WritebackCache { Self::move_version_from_dirty_to_cache( &self.dirty.markers, &self.cached.marker_cache, - (epoch, object_key.0), - object_key.1, + (epoch, object_key.id()), + object_key.version(), marker_value, ); } @@ -1297,8 +1302,16 @@ impl ExecutionCacheCommit for WritebackCache { &'a self, epoch: EpochId, digests: &'a [TransactionDigest], + // TODO: Delete this parameter once table migration is complete. + use_object_per_epoch_marker_table_v2: bool, ) -> BoxFuture<'a, ()> { - WritebackCache::commit_transaction_outputs(self, epoch, digests).boxed() + WritebackCache::commit_transaction_outputs( + self, + epoch, + digests, + use_object_per_epoch_marker_table_v2, + ) + .boxed() } fn persist_transactions<'a>(&'a self, digests: &'a [TransactionDigest]) -> BoxFuture<'a, ()> { @@ -1643,24 +1656,27 @@ impl ObjectCacheRead for WritebackCache { fn get_marker_value( &self, - object_id: &ObjectID, - version: SequenceNumber, + object_key: FullObjectKey, epoch_id: EpochId, + // TODO: Delete this parameter once table migration is complete. + use_object_per_epoch_marker_table_v2: bool, ) -> Option { - match self.get_marker_value_cache_only(object_id, version, epoch_id) { + match self.get_marker_value_cache_only(object_key, epoch_id) { CacheResult::Hit(marker) => Some(marker), CacheResult::NegativeHit => None, CacheResult::Miss => self .record_db_get("marker_by_version") - .get_marker_value(object_id, &version, epoch_id) + .get_marker_value(object_key, epoch_id, use_object_per_epoch_marker_table_v2) .expect("db error"), } } fn get_latest_marker( &self, - object_id: &ObjectID, + object_id: FullObjectID, epoch_id: EpochId, + // TODO: Delete this parameter once table migration is complete. + use_object_per_epoch_marker_table_v2: bool, ) -> Option<(SequenceNumber, MarkerValue)> { match self.get_latest_marker_value_cache_only(object_id, epoch_id) { CacheResult::Hit((v, marker)) => Some((v, marker)), @@ -1669,7 +1685,7 @@ impl ObjectCacheRead for WritebackCache { } CacheResult::Miss => self .record_db_get("marker_latest") - .get_latest_marker(object_id, epoch_id) + .get_latest_marker(object_id, epoch_id, use_object_per_epoch_marker_table_v2) .expect("db error"), } } @@ -2064,6 +2080,8 @@ impl ExecutionCacheWrite for WritebackCache { &self, epoch_id: EpochId, tx_outputs: Arc, + // TODO: Delete this parameter once table migration is complete. + _use_object_per_epoch_marker_table_v2: bool, ) -> BoxFuture<'_, ()> { WritebackCache::write_transaction_outputs(self, epoch_id, tx_outputs).boxed() } diff --git a/crates/sui-core/src/transaction_input_loader.rs b/crates/sui-core/src/transaction_input_loader.rs index 016e8b956cf97..f9ff6a59500d2 100644 --- a/crates/sui-core/src/transaction_input_loader.rs +++ b/crates/sui-core/src/transaction_input_loader.rs @@ -14,9 +14,9 @@ use once_cell::unsync::OnceCell; use std::collections::HashMap; use std::sync::Arc; use sui_types::{ - base_types::{EpochId, ObjectRef, SequenceNumber, TransactionDigest}, + base_types::{EpochId, FullObjectID, ObjectRef, SequenceNumber, TransactionDigest}, error::{SuiError, SuiResult, UserInputError}, - storage::ObjectKey, + storage::{FullObjectKey, ObjectKey}, transaction::{ InputObjectKind, InputObjects, ObjectReadResult, ObjectReadResultKind, ReceivingObjectReadResult, ReceivingObjectReadResultKind, ReceivingObjects, TransactionKey, @@ -47,6 +47,8 @@ impl TransactionInputLoader { input_object_kinds: &[InputObjectKind], receiving_objects: &[ObjectRef], epoch_id: EpochId, + // TODO: Delete this parameter once table migration is complete. + use_object_per_epoch_marker_table_v2: bool, ) -> SuiResult<(InputObjects, ReceivingObjects)> { // Length of input_object_kinds have been checked via validity_check() for ProgrammableTransaction. let mut input_results = vec![None; input_object_kinds.len()]; @@ -65,14 +67,21 @@ impl TransactionInputLoader { object: ObjectReadResultKind::Object(package), }); } - InputObjectKind::SharedMoveObject { id, .. } => match self.cache.get_object(id) { + InputObjectKind::SharedMoveObject { + id, + initial_shared_version, + .. + } => match self.cache.get_object(id) { Some(object) => { input_results[i] = Some(ObjectReadResult::new(*kind, object.into())) } None => { - if let Some((version, digest)) = self - .cache - .get_last_shared_object_deletion_info(id, epoch_id) + if let Some((version, digest)) = + self.cache.get_last_shared_object_deletion_info( + FullObjectID::new(*id, Some(*initial_shared_version)), + epoch_id, + use_object_per_epoch_marker_table_v2, + ) { input_results[i] = Some(ObjectReadResult { input_object_kind: *kind, @@ -101,8 +110,11 @@ impl TransactionInputLoader { }); } - let receiving_results = - self.read_receiving_objects_for_signing(receiving_objects, epoch_id)?; + let receiving_results = self.read_receiving_objects_for_signing( + receiving_objects, + epoch_id, + use_object_per_epoch_marker_table_v2, + )?; Ok(( input_results @@ -228,11 +240,18 @@ impl TransactionInputLoader { input_object_kind: *input_object_kind, object: obj.into(), }, - (None, InputObjectKind::SharedMoveObject { id, .. }) => { + (None, InputObjectKind::SharedMoveObject { id, initial_shared_version, .. }) => { assert!(key.1.is_valid()); // Check if the object was deleted by a concurrently certified tx let version = key.1; - if let Some(dependency) = self.cache.get_deleted_shared_object_previous_tx_digest(id, version, epoch_id) { + if let Some(dependency) = self.cache.get_deleted_shared_object_previous_tx_digest( + FullObjectKey::new( + FullObjectID::new(*id, Some(*initial_shared_version)), + version, + ), + epoch_id, + epoch_store.protocol_config().use_object_per_epoch_marker_table_v2_as_option().unwrap_or(false), + ) { ObjectReadResult { input_object_kind: *input, object: ObjectReadResultKind::DeletedSharedObject(version, dependency), @@ -259,16 +278,20 @@ impl TransactionInputLoader { &self, receiving_objects: &[ObjectRef], epoch_id: EpochId, + // TODO: Delete this parameter once table migration is complete. + use_object_per_epoch_marker_table_v2: bool, ) -> SuiResult { let mut receiving_results = Vec::with_capacity(receiving_objects.len()); for objref in receiving_objects { // Note: the digest is checked later in check_transaction_input let (object_id, version, _) = objref; - if self - .cache - .have_received_object_at_version(object_id, *version, epoch_id) - { + // TODO: Add support for receiving ConsensusV2 objects. For now this assumes fastpath. + if self.cache.have_received_object_at_version( + FullObjectKey::new(FullObjectID::new(*object_id, None), *version), + epoch_id, + use_object_per_epoch_marker_table_v2, + ) { receiving_results.push(ReceivingObjectReadResult::new( *objref, ReceivingObjectReadResultKind::PreviouslyReceivedObject, diff --git a/crates/sui-core/src/transaction_manager.rs b/crates/sui-core/src/transaction_manager.rs index a81f5cb9e0b34..e44b4fa7e6586 100644 --- a/crates/sui-core/src/transaction_manager.rs +++ b/crates/sui-core/src/transaction_manager.rs @@ -13,7 +13,7 @@ use mysten_common::fatal; use mysten_metrics::monitored_scope; use parking_lot::RwLock; use sui_types::{ - base_types::{ObjectID, SequenceNumber, TransactionDigest}, + base_types::{FullObjectID, SequenceNumber, TransactionDigest}, committee::EpochId, digests::TransactionEffectsDigest, error::{SuiError, SuiResult}, @@ -83,10 +83,10 @@ pub struct PendingCertificate { } struct CacheInner { - versioned_cache: LruCache, + versioned_cache: LruCache, // we cache packages separately, because they are more expensive to look up in the db, so we // don't want to evict packages in favor of mutable objects. - unversioned_cache: LruCache, + unversioned_cache: LruCache, max_size: usize, metrics: Arc, @@ -228,7 +228,7 @@ struct Inner { // Stores age info for all transactions depending on each object. // Used for throttling signing and submitting transactions depending on hot objects. // An `IndexMap` is used to ensure that the insertion order is preserved. - input_objects: HashMap, + input_objects: HashMap, // Maps object IDs to the highest observed sequence number of the object. When the value is // None, indicates that the object is immutable, corresponding to an InputKey with no sequence @@ -461,7 +461,8 @@ impl TransactionManager { cert.data().intent_message().value.receiving_objects(); for entry in receiving_object_entries { let key = InputKey::VersionedObject { - id: entry.0, + // TODO: Add support for receiving ConsensusV2 objects. For now this assumes fastpath. + id: FullObjectID::new(entry.0, None), version: entry.1, }; receiving_objects.insert(key); @@ -510,6 +511,10 @@ impl TransactionManager { &input_object_cache_misses, receiving_objects, epoch_store.epoch(), + epoch_store + .protocol_config() + .use_object_per_epoch_marker_table_v2_as_option() + .unwrap_or(false), ) .into_iter() .zip(input_object_cache_misses); @@ -786,8 +791,8 @@ impl TransactionManager { // Returns the number of transactions waiting on each object ID, as well as the age of the oldest transaction in the queue. pub(crate) fn objects_queue_len_and_age( &self, - keys: Vec, - ) -> Vec<(ObjectID, usize, Option)> { + keys: Vec, + ) -> Vec<(FullObjectID, usize, Option)> { let reconfig_lock = self.inner.read(); let inner = reconfig_lock.read(); keys.into_iter() @@ -839,7 +844,10 @@ impl TransactionManager { .transaction_data() .shared_input_objects() .into_iter() - .filter_map(|r| r.mutable.then_some(r.id)) + .filter_map(|r| { + r.mutable + .then_some(FullObjectID::new(r.id, Some(r.initial_shared_version))) + }) .collect(), ) { // When this occurs, most likely transactions piled up on a shared object. @@ -849,7 +857,7 @@ impl TransactionManager { object_id, queue_len ); fp_bail!(SuiError::TooManyTransactionsPendingOnObject { - object_id, + object_id: object_id.id(), queue_len, threshold: overload_config.max_transaction_manager_per_object_queue_length, }); @@ -863,7 +871,7 @@ impl TransactionManager { age.as_millis() ); fp_bail!(SuiError::TooOldTransactionPendingOnObject { - object_id, + object_id: object_id.id(), txn_age_sec: age.as_secs(), threshold: overload_config.max_txn_age_in_queue.as_secs(), }); @@ -1012,6 +1020,7 @@ mod test { use super::*; use prometheus::Registry; use rand::{Rng, RngCore}; + use sui_types::base_types::ObjectID; #[test] #[cfg_attr(msim, ignore)] @@ -1037,7 +1046,7 @@ mod test { // insert 10 unique versioned objects for i in 0..10 { - let object = ObjectID::new([i; 32]); + let object = FullObjectID::new(ObjectID::new([i; 32]), None); let input_key = InputKey::VersionedObject { id: object, version: (i as u64).into(), @@ -1049,7 +1058,7 @@ mod test { // first 5 versioned objects have been evicted for i in 0..5 { - let object = ObjectID::new([i; 32]); + let object = FullObjectID::new(ObjectID::new([i; 32]), None); let input_key = InputKey::VersionedObject { id: object, version: (i as u64).into(), @@ -1065,7 +1074,7 @@ mod test { } // object 9 is available at version 9 - let object = ObjectID::new([9; 32]); + let object = FullObjectID::new(ObjectID::new([9; 32]), None); let input_key = InputKey::VersionedObject { id: object, version: 9.into(), diff --git a/crates/sui-core/src/transaction_outputs.rs b/crates/sui-core/src/transaction_outputs.rs index 43b556525a407..1578e87f605ce 100644 --- a/crates/sui-core/src/transaction_outputs.rs +++ b/crates/sui-core/src/transaction_outputs.rs @@ -3,10 +3,10 @@ use std::collections::{HashMap, HashSet}; use std::sync::Arc; -use sui_types::base_types::ObjectRef; +use sui_types::base_types::{FullObjectID, ObjectRef}; use sui_types::effects::{TransactionEffects, TransactionEffectsAPI, TransactionEvents}; use sui_types::inner_temporary_store::{InnerTemporaryStore, WrittenObjects}; -use sui_types::storage::{MarkerValue, ObjectKey}; +use sui_types::storage::{FullObjectKey, MarkerValue, ObjectKey}; use sui_types::transaction::{TransactionDataAPI, VerifiedTransaction}; /// TransactionOutputs @@ -15,7 +15,7 @@ pub struct TransactionOutputs { pub effects: TransactionEffects, pub events: TransactionEvents, - pub markers: Vec<(ObjectKey, MarkerValue)>, + pub markers: Vec<(FullObjectKey, MarkerValue)>, pub wrapped: Vec, pub deleted: Vec, pub locks_to_delete: Vec, @@ -32,6 +32,7 @@ impl TransactionOutputs { ) -> TransactionOutputs { let InnerTemporaryStore { input_objects, + deleted_consensus_objects, mutable_inputs, written, events, @@ -57,19 +58,26 @@ impl TransactionOutputs { // object deletions in the marker table. For deleted entries in the marker table we need to // make sure we don't accidentally overwrite entries. let markers: Vec<_> = { - let received = received_objects - .clone() - .map(|objref| (ObjectKey::from(objref), MarkerValue::Received)); + let received = received_objects.clone().map(|objref| { + ( + // TODO: Add support for receiving ConsensusV2 objects. For now this assumes fastpath. + FullObjectKey::new(FullObjectID::new(objref.0, None), objref.1), + MarkerValue::Received, + ) + }); let deleted = deleted.into_iter().map(|(object_id, version)| { - let object_key = ObjectKey(object_id, version); - if input_objects + let shared_key = input_objects .get(&object_id) - .is_some_and(|object| object.is_shared()) - { - (object_key, MarkerValue::SharedDeleted(tx_digest)) + .filter(|o| o.is_consensus()) + .map(|o| FullObjectKey::new(o.full_id(), version)); + if let Some(shared_key) = shared_key { + (shared_key, MarkerValue::SharedDeleted(tx_digest)) } else { - (object_key, MarkerValue::OwnedDeleted) + ( + FullObjectKey::new(FullObjectID::new(object_id, None), version), + MarkerValue::OwnedDeleted, + ) } }); @@ -78,9 +86,17 @@ impl TransactionOutputs { // NB: that we do _not_ smear shared objects that were taken immutably in the // transaction. let smeared_objects = effects.deleted_mutably_accessed_shared_objects(); - let shared_smears = smeared_objects.into_iter().map(move |object_id| { + let shared_smears = smeared_objects.into_iter().map(|object_id| { + let id = input_objects + .get(&object_id) + .map(|obj| obj.full_id()) + .unwrap_or_else(|| { + let start_version = deleted_consensus_objects.get(&object_id) + .expect("deleted object must be in either input_objects or deleted_consensus_objects"); + FullObjectID::new(object_id, Some(*start_version)) + }); ( - ObjectKey(object_id, lamport_version), + FullObjectKey::new(id, lamport_version), MarkerValue::SharedDeleted(tx_digest), ) }); diff --git a/crates/sui-core/src/unit_tests/authority_tests.rs b/crates/sui-core/src/unit_tests/authority_tests.rs index dc086129d2bc4..6b9eae0f277ab 100644 --- a/crates/sui-core/src/unit_tests/authority_tests.rs +++ b/crates/sui-core/src/unit_tests/authority_tests.rs @@ -3239,6 +3239,7 @@ async fn test_store_revert_wrap_move_call() { .commit_transaction_outputs( authority_state.epoch_store_for_testing().epoch(), &[*create_effects.transaction_digest()], + true, ) .await; @@ -3338,6 +3339,7 @@ async fn test_store_revert_unwrap_move_call() { *create_effects.transaction_digest(), *wrap_effects.transaction_digest(), ], + true, ) .await; @@ -3617,6 +3619,7 @@ async fn test_store_revert_add_ofield() { *create_outer_effects.transaction_digest(), *create_inner_effects.transaction_digest(), ], + true, ) .await; @@ -3744,6 +3747,7 @@ async fn test_store_revert_remove_ofield() { *create_inner_effects.transaction_digest(), *add_effects.transaction_digest(), ], + true, ) .await; diff --git a/crates/sui-core/src/unit_tests/shared_object_deletion_tests.rs b/crates/sui-core/src/unit_tests/shared_object_deletion_tests.rs index 05e967aec9e27..a409574ed5305 100644 --- a/crates/sui-core/src/unit_tests/shared_object_deletion_tests.rs +++ b/crates/sui-core/src/unit_tests/shared_object_deletion_tests.rs @@ -5,12 +5,13 @@ use std::sync::Arc; use sui_types::{ - base_types::{ObjectID, ObjectRef, SequenceNumber, SuiAddress}, + base_types::{FullObjectID, ObjectID, ObjectRef, SequenceNumber, SuiAddress}, crypto::{get_key_pair, AccountKeyPair}, effects::TransactionEffects, execution_status::{CommandArgumentError, ExecutionFailureStatus}, object::Object, programmable_transaction_builder::ProgrammableTransactionBuilder, + storage::FullObjectKey, transaction::{ProgrammableTransaction, Transaction, TEST_ONLY_GAS_UNIT_FOR_PUBLISH}, }; @@ -553,13 +554,12 @@ impl TestRunner { pub fn object_exists_in_marker_table( &mut self, - object_id: &ObjectID, - version: &SequenceNumber, + object_key: FullObjectKey, epoch: EpochId, ) -> Option { self.authority_state .get_object_cache_reader() - .get_deleted_shared_object_previous_tx_digest(object_id, *version, epoch) + .get_deleted_shared_object_previous_tx_digest(object_key, epoch, true) } } @@ -608,7 +608,13 @@ async fn test_delete_shared_object() { assert_eq!( user1 - .object_exists_in_marker_table(&deleted_obj_id, &deleted_obj_ver, 0) + .object_exists_in_marker_table( + FullObjectKey::new( + FullObjectID::new(deleted_obj_id, Some(initial_shared_version)), + deleted_obj_ver + ), + 0 + ) .unwrap(), *effects.transaction_digest(), ); @@ -728,7 +734,13 @@ async fn test_delete_shared_object_immut_mut_mut_interleave() { assert_eq!( user1 - .object_exists_in_marker_table(&deleted_obj_id, &deleted_obj_ver, 0) + .object_exists_in_marker_table( + FullObjectKey::new( + FullObjectID::new(deleted_obj_id, Some(initial_shared_version)), + deleted_obj_ver + ), + 0 + ) .unwrap(), *effects.transaction_digest(), ); @@ -825,7 +837,13 @@ async fn test_delete_shared_object_immut_mut_immut_interleave() { assert_eq!( user1 - .object_exists_in_marker_table(&deleted_obj_id, &deleted_obj_ver, 0) + .object_exists_in_marker_table( + FullObjectKey::new( + FullObjectID::new(deleted_obj_id, Some(initial_shared_version)), + deleted_obj_ver + ), + 0 + ) .unwrap(), *effects.transaction_digest(), ); @@ -1475,7 +1493,13 @@ async fn test_delete_with_shared_after_mutate_enqueued() { let deleted_obj_ver = delete_effects.deleted()[0].1; assert!(user_1 - .object_exists_in_marker_table(&shared_obj_id, &deleted_obj_ver, 0) + .object_exists_in_marker_table( + FullObjectKey::new( + FullObjectID::new(shared_obj_id, Some(initial_shared_version)), + deleted_obj_ver + ), + 0 + ) .is_some()); let mutate_effects = res.get(1).unwrap(); diff --git a/crates/sui-core/src/unit_tests/transaction_deny_tests.rs b/crates/sui-core/src/unit_tests/transaction_deny_tests.rs index b31049319b31d..911ae4eb5edb4 100644 --- a/crates/sui-core/src/unit_tests/transaction_deny_tests.rs +++ b/crates/sui-core/src/unit_tests/transaction_deny_tests.rs @@ -354,6 +354,7 @@ async fn test_package_denied() { .commit_transaction_outputs( state.epoch_store_for_testing().epoch(), &[tx_c, tx_b, tx_a, tx_c_prime, tx_b_prime], + true, ) .await; diff --git a/crates/sui-core/src/unit_tests/transaction_manager_tests.rs b/crates/sui-core/src/unit_tests/transaction_manager_tests.rs index 7e3e455d8ed4c..b5f13b1f57993 100644 --- a/crates/sui-core/src/unit_tests/transaction_manager_tests.rs +++ b/crates/sui-core/src/unit_tests/transaction_manager_tests.rs @@ -60,7 +60,7 @@ fn get_input_keys(objects: &[Object]) -> Vec { objects .iter() .map(|object| InputKey::VersionedObject { - id: object.id(), + id: object.full_id(), version: object.version(), }) .collect() @@ -340,7 +340,7 @@ async fn transaction_manager_object_dependency() { // Notify TM about availability of the first shared object. transaction_manager.objects_available( vec![InputKey::VersionedObject { - id: shared_object.id(), + id: shared_object.full_id(), version: shared_version, }], &state.epoch_store_for_testing(), @@ -377,7 +377,7 @@ async fn transaction_manager_object_dependency() { // Make shared_object_2 available. transaction_manager.objects_available( vec![InputKey::VersionedObject { - id: shared_object_2.id(), + id: shared_object_2.full_id(), version: shared_version_2, }], &state.epoch_store_for_testing(), @@ -475,7 +475,7 @@ async fn transaction_manager_receiving_notify_commit() { transaction_manager.notify_commit( txn.digest(), vec![InputKey::VersionedObject { - id: object.id(), + id: object.full_id(), version: object.version().next(), }], &state.epoch_store_for_testing(), @@ -860,7 +860,7 @@ async fn transaction_manager_with_cancelled_transactions() { // Notify TM about availability of the owned object. transaction_manager.objects_available( vec![InputKey::VersionedObject { - id: owned_object.id(), + id: owned_object.full_id(), version: owned_version, }], &state.epoch_store_for_testing(), diff --git a/crates/sui-core/src/unit_tests/transfer_to_object_tests.rs b/crates/sui-core/src/unit_tests/transfer_to_object_tests.rs index ff1b582545525..12e974b05bcf3 100644 --- a/crates/sui-core/src/unit_tests/transfer_to_object_tests.rs +++ b/crates/sui-core/src/unit_tests/transfer_to_object_tests.rs @@ -1759,8 +1759,8 @@ async fn test_have_deleted_owned_object() { assert!(cache.get_object(&new_child.0.0).is_some()); // Should not show as deleted for either versions - assert!(!cache.have_deleted_owned_object_at_version_or_after(&new_child.0.0, new_child.0.1, 0)); - assert!(!cache.have_deleted_owned_object_at_version_or_after(&new_child.0.0, child.0.1, 0)); + assert!(!cache.have_deleted_fastpath_object_at_version_or_after(new_child.0.0, new_child.0.1, 0, true)); + assert!(!cache.have_deleted_fastpath_object_at_version_or_after(new_child.0.0, child.0.1, 0, true)); let effects = runner .run({ @@ -1777,13 +1777,13 @@ async fn test_have_deleted_owned_object() { let deleted_child = effects.deleted().into_iter().find(|(id, _, _)| *id == new_child.0 .0).unwrap(); assert!(cache.get_object(&deleted_child.0).is_none()); - assert!(cache.have_deleted_owned_object_at_version_or_after(&deleted_child.0, deleted_child.1, 0)); - assert!(cache.have_deleted_owned_object_at_version_or_after(&deleted_child.0, new_child.0.1, 0)); - assert!(cache.have_deleted_owned_object_at_version_or_after(&deleted_child.0, child.0.1, 0)); + assert!(cache.have_deleted_fastpath_object_at_version_or_after(deleted_child.0, deleted_child.1, 0, true)); + assert!(cache.have_deleted_fastpath_object_at_version_or_after(deleted_child.0, new_child.0.1, 0, true)); + assert!(cache.have_deleted_fastpath_object_at_version_or_after(deleted_child.0, child.0.1, 0, true)); // Should not show as deleted for versions after this though - assert!(!cache.have_deleted_owned_object_at_version_or_after(&deleted_child.0, deleted_child.1.next(), 0)); + assert!(!cache.have_deleted_fastpath_object_at_version_or_after(deleted_child.0, deleted_child.1.next(), 0, true)); // Should not show as deleted for other epochs outside of our current epoch too - assert!(!cache.have_deleted_owned_object_at_version_or_after(&deleted_child.0, deleted_child.1, 1)); + assert!(!cache.have_deleted_fastpath_object_at_version_or_after(deleted_child.0, deleted_child.1, 1, true)); } } } diff --git a/crates/sui-open-rpc/spec/openrpc.json b/crates/sui-open-rpc/spec/openrpc.json index ee507825cdd8d..9f30237e51838 100644 --- a/crates/sui-open-rpc/spec/openrpc.json +++ b/crates/sui-open-rpc/spec/openrpc.json @@ -1949,6 +1949,7 @@ "types_is_one_time_witness_type_tag_cost_per_byte": { "u64": "2" }, + "use_object_per_epoch_marker_table_v2": null, "validator_validate_metadata_cost_base": { "u64": "52" }, diff --git a/crates/sui-protocol-config/src/lib.rs b/crates/sui-protocol-config/src/lib.rs index 89f2d2a106810..73d5fde6fef21 100644 --- a/crates/sui-protocol-config/src/lib.rs +++ b/crates/sui-protocol-config/src/lib.rs @@ -208,6 +208,7 @@ const MAX_PROTOCOL_VERSION: u64 = 72; // Max gas budget moved to 50_000 SUI // Max gas price moved to 50 SUI // Variants as type nodes. +// Enable new marker table version. #[derive(Copy, Clone, Debug, Hash, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] pub struct ProtocolVersion(u64); @@ -1358,6 +1359,9 @@ pub struct ProtocolConfig { /// SIP-45: K in the formula `amplification_factor = max(0, gas_price / reference_gas_price - K)`. /// This is the threshold for activating consensus amplification. sip_45_consensus_amplification_threshold: Option, + + /// Enables use of v2 of the object per-epoch marker table with FullObjectID keys. + use_object_per_epoch_marker_table_v2: Option, } // feature flags @@ -2300,6 +2304,8 @@ impl ProtocolConfig { gas_budget_based_txn_cost_absolute_cap_commit_count: None, sip_45_consensus_amplification_threshold: None, + + use_object_per_epoch_marker_table_v2: None, // When adding a new constant, set it to None in the earliest version, like this: // new_constant: None, }; @@ -3149,6 +3155,9 @@ impl ProtocolConfig { cfg.max_gas_price = Some(50_000_000_000); cfg.feature_flags.variant_nodes = true; + + // Enable new marker table version. + cfg.use_object_per_epoch_marker_table_v2 = Some(true); } // Use this template when making changes: // diff --git a/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Mainnet_version_72.snap b/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Mainnet_version_72.snap index 7f09185833073..da1ccef87ddcd 100644 --- a/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Mainnet_version_72.snap +++ b/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Mainnet_version_72.snap @@ -343,4 +343,5 @@ max_accumulated_randomness_txn_cost_per_object_in_mysticeti_commit: 3700000 gas_budget_based_txn_cost_cap_factor: 400000 gas_budget_based_txn_cost_absolute_cap_commit_count: 50 sip_45_consensus_amplification_threshold: 5 +use_object_per_epoch_marker_table_v2: true diff --git a/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Testnet_version_72.snap b/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Testnet_version_72.snap index 625ffd0d5b845..16243daa336d6 100644 --- a/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Testnet_version_72.snap +++ b/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Testnet_version_72.snap @@ -346,4 +346,5 @@ max_accumulated_randomness_txn_cost_per_object_in_mysticeti_commit: 3700000 gas_budget_based_txn_cost_cap_factor: 400000 gas_budget_based_txn_cost_absolute_cap_commit_count: 50 sip_45_consensus_amplification_threshold: 5 +use_object_per_epoch_marker_table_v2: true diff --git a/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__version_72.snap b/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__version_72.snap index 48c15de6ea3c6..dc1fa8c1d8c59 100644 --- a/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__version_72.snap +++ b/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__version_72.snap @@ -355,4 +355,5 @@ max_accumulated_randomness_txn_cost_per_object_in_mysticeti_commit: 3700000 gas_budget_based_txn_cost_cap_factor: 400000 gas_budget_based_txn_cost_absolute_cap_commit_count: 50 sip_45_consensus_amplification_threshold: 5 +use_object_per_epoch_marker_table_v2: true diff --git a/crates/sui-replay/src/replay.rs b/crates/sui-replay/src/replay.rs index e3b22d539ec32..f19fa0e069dde 100644 --- a/crates/sui-replay/src/replay.rs +++ b/crates/sui-replay/src/replay.rs @@ -1907,6 +1907,8 @@ impl ChildObjectResolver for LocalExec { receiving_object_id: &ObjectID, receive_object_at_version: SequenceNumber, _epoch_id: EpochId, + // TODO: Delete this parameter once table migration is complete. + _use_object_per_epoch_marker_table_v2: bool, ) -> SuiResult> { fn inner( self_: &LocalExec, diff --git a/crates/sui-single-node-benchmark/src/benchmark_context.rs b/crates/sui-single-node-benchmark/src/benchmark_context.rs index 574b0d675bfb5..266c64b7bef67 100644 --- a/crates/sui-single-node-benchmark/src/benchmark_context.rs +++ b/crates/sui-single-node-benchmark/src/benchmark_context.rs @@ -117,6 +117,7 @@ impl BenchmarkContext { .commit_transaction_outputs( effects.executed_epoch(), &[*effects.transaction_digest()], + true, ) .await; let (owner, root_object) = effects @@ -187,7 +188,7 @@ impl BenchmarkContext { // For checkpoint executor, in order to commit a checkpoint it is required previous versions // of objects are already committed. cache_commit - .commit_transaction_outputs(epoch_id, &[*effects.transaction_digest()]) + .commit_transaction_outputs(epoch_id, &[*effects.transaction_digest()], true) .await; } self.refresh_gas_objects(new_gas_objects); diff --git a/crates/sui-single-node-benchmark/src/mock_storage.rs b/crates/sui-single-node-benchmark/src/mock_storage.rs index f16c3acb8675e..0ce7b8db79547 100644 --- a/crates/sui-single-node-benchmark/src/mock_storage.rs +++ b/crates/sui-single-node-benchmark/src/mock_storage.rs @@ -162,6 +162,8 @@ impl ChildObjectResolver for InMemoryObjectStore { _receiving_object_id: &ObjectID, _receive_object_at_version: SequenceNumber, _epoch_id: EpochId, + // TODO: Delete this parameter once table migration is complete. + _use_object_per_epoch_marker_table_v2: bool, ) -> SuiResult> { unimplemented!() } diff --git a/crates/sui-transactional-test-runner/src/simulator_persisted_store.rs b/crates/sui-transactional-test-runner/src/simulator_persisted_store.rs index 72c021f859db0..ce73f18a2404f 100644 --- a/crates/sui-transactional-test-runner/src/simulator_persisted_store.rs +++ b/crates/sui-transactional-test-runner/src/simulator_persisted_store.rs @@ -457,6 +457,8 @@ impl ChildObjectResolver for PersistedStore { receiving_object_id: &ObjectID, receive_object_at_version: SequenceNumber, _epoch_id: EpochId, + // TODO: Delete this parameter once table migration is complete. + _use_object_per_epoch_marker_table_v2: bool, ) -> sui_types::error::SuiResult> { let recv_object = match SimulatorStore::get_object(self, receiving_object_id) { None => return Ok(None), diff --git a/crates/sui-types/src/base_types.rs b/crates/sui-types/src/base_types.rs index 71c5a35ccb07e..d03d00bf74f86 100644 --- a/crates/sui-types/src/base_types.rs +++ b/crates/sui-types/src/base_types.rs @@ -135,6 +135,32 @@ pub struct ObjectID( AccountAddress, ); +#[serde_as] +#[derive(Debug, Eq, PartialEq, Clone, Copy, PartialOrd, Ord, Hash, Serialize, Deserialize)] +pub enum FullObjectID { + Fastpath(ObjectID), + Consensus(ConsensusObjectSequenceKey), +} + +impl FullObjectID { + pub fn new(object_id: ObjectID, start_version: Option) -> Self { + if let Some(start_version) = start_version { + Self::Consensus((object_id, start_version)) + } else { + Self::Fastpath(object_id) + } + } + + pub fn id(&self) -> ObjectID { + match &self { + FullObjectID::Fastpath(object_id) => *object_id, + FullObjectID::Consensus(consensus_object_sequence_key) => { + consensus_object_sequence_key.0 + } + } + } +} + pub type VersionDigest = (SequenceNumber, ObjectDigest); pub type ObjectRef = (ObjectID, SequenceNumber, ObjectDigest); @@ -155,8 +181,10 @@ pub fn update_object_ref_for_testing(object_ref: ObjectRef) -> ObjectRef { ) } +pub type FullObjectRef = (FullObjectID, SequenceNumber, ObjectDigest); + /// Represents an distinct stream of object versions for a Shared or ConsensusV2 object, -/// based on the object ID and initial shared version. +/// based on the object ID and start version. pub type ConsensusObjectSequenceKey = (ObjectID, SequenceNumber); /// Wrapper around StructTag with a space-efficient representation for common types like coins diff --git a/crates/sui-types/src/in_memory_storage.rs b/crates/sui-types/src/in_memory_storage.rs index 826c45ccec0cc..36c382135542b 100644 --- a/crates/sui-types/src/in_memory_storage.rs +++ b/crates/sui-types/src/in_memory_storage.rs @@ -68,6 +68,8 @@ impl ChildObjectResolver for InMemoryStorage { receiving_object_id: &ObjectID, receive_object_at_version: SequenceNumber, _epoch_id: EpochId, + // TODO: Delete this parameter once table migration is complete. + _use_object_per_epoch_marker_table_v2: bool, ) -> SuiResult> { let recv_object = match self.persistent.get(receiving_object_id).cloned() { None => return Ok(None), diff --git a/crates/sui-types/src/inner_temporary_store.rs b/crates/sui-types/src/inner_temporary_store.rs index b7e62c22304bf..460c98a09e7bc 100644 --- a/crates/sui-types/src/inner_temporary_store.rs +++ b/crates/sui-types/src/inner_temporary_store.rs @@ -1,7 +1,7 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::base_types::{SequenceNumber, VersionDigest}; +use crate::base_types::{FullObjectID, SequenceNumber, VersionDigest}; use crate::effects::{TransactionEffects, TransactionEffectsAPI, TransactionEvents}; use crate::error::SuiResult; use crate::execution::DynamicallyLoadedObjectMetadata; @@ -26,6 +26,7 @@ pub type TxCoins = (ObjectMap, WrittenObjects); #[derive(Debug, Clone)] pub struct InnerTemporaryStore { pub input_objects: ObjectMap, + pub deleted_consensus_objects: BTreeMap, pub mutable_inputs: BTreeMap, // All the written objects' sequence number should have been updated to the lamport version. pub written: WrittenObjects, @@ -46,7 +47,7 @@ impl InnerTemporaryStore { InputKey::Package { id: *id } } else { InputKey::VersionedObject { - id: *id, + id: obj.full_id(), version: obj.version(), } } @@ -62,14 +63,14 @@ impl InnerTemporaryStore { // add deleted shared objects to the outputkeys that then get sent to notify_commit let deleted_output_keys = deleted .iter() - .filter(|(id, _)| { + .filter_map(|(id, seq)| { self.input_objects .get(id) - .is_some_and(|obj| obj.is_shared()) + .and_then(|obj| obj.is_shared().then_some((obj.full_id(), *seq))) }) - .map(|(id, seq)| InputKey::VersionedObject { - id: *id, - version: *seq, + .map(|(full_id, seq)| InputKey::VersionedObject { + id: full_id, + version: seq, }); output_keys.extend(deleted_output_keys); @@ -78,8 +79,17 @@ impl InnerTemporaryStore { let smeared_version = self.lamport_version; let deleted_accessed_objects = effects.deleted_mutably_accessed_shared_objects(); for object_id in deleted_accessed_objects.into_iter() { + let id = self + .input_objects + .get(&object_id) + .map(|obj| obj.full_id()) + .unwrap_or_else(|| { + let start_version = self.deleted_consensus_objects.get(&object_id) + .expect("deleted object must be in either input_objects or deleted_consensus_objects"); + FullObjectID::new(object_id, Some(*start_version)) + }); let key = InputKey::VersionedObject { - id: object_id, + id, version: smeared_version, }; output_keys.push(key); diff --git a/crates/sui-types/src/object.rs b/crates/sui-types/src/object.rs index c6de390950517..f8039d19e2542 100644 --- a/crates/sui-types/src/object.rs +++ b/crates/sui-types/src/object.rs @@ -17,7 +17,7 @@ use serde::{Deserialize, Serialize}; use serde_with::serde_as; use serde_with::Bytes; -use crate::base_types::{MoveObjectType, ObjectIDParseError}; +use crate::base_types::{FullObjectID, FullObjectRef, MoveObjectType, ObjectIDParseError}; use crate::coin::{Coin, CoinMetadata, TreasuryCap}; use crate::crypto::{default_hash, deterministic_random_account_key}; use crate::error::{ExecutionError, ExecutionErrorKind, UserInputError, UserInputResult}; @@ -565,6 +565,10 @@ impl Owner { pub fn is_shared(&self) -> bool { matches!(self, Owner::Shared { .. }) } + + pub fn is_consensus(&self) -> bool { + matches!(self, Owner::Shared { .. } | Owner::ConsensusV2 { .. }) + } } impl PartialEq for Owner { @@ -811,6 +815,10 @@ impl ObjectInner { self.owner.is_shared() } + pub fn is_consensus(&self) -> bool { + self.owner.is_consensus() + } + pub fn get_single_owner(&self) -> Option { self.owner.get_owner_address().ok() } @@ -830,6 +838,10 @@ impl ObjectInner { (self.id(), self.version(), self.digest()) } + pub fn compute_full_object_reference(&self) -> FullObjectRef { + (self.full_id(), self.version(), self.digest()) + } + pub fn digest(&self) -> ObjectDigest { ObjectDigest::new(default_hash(self)) } @@ -843,6 +855,15 @@ impl ObjectInner { } } + pub fn full_id(&self) -> FullObjectID { + let id = self.id(); + if let Some(start_version) = self.owner.start_version() { + FullObjectID::Consensus((id, start_version)) + } else { + FullObjectID::Fastpath(id) + } + } + pub fn version(&self) -> SequenceNumber { use Data::*; diff --git a/crates/sui-types/src/storage/mod.rs b/crates/sui-types/src/storage/mod.rs index 8b06376387860..498f75fbc0bca 100644 --- a/crates/sui-types/src/storage/mod.rs +++ b/crates/sui-types/src/storage/mod.rs @@ -7,7 +7,9 @@ mod read_store; mod shared_in_memory_store; mod write_store; -use crate::base_types::{TransactionDigest, VersionNumber}; +use crate::base_types::{ + ConsensusObjectSequenceKey, FullObjectID, FullObjectRef, TransactionDigest, VersionNumber, +}; use crate::committee::EpochId; use crate::error::{ExecutionError, SuiError}; use crate::execution::{DynamicallyLoadedObjectMetadata, ExecutionResults}; @@ -42,7 +44,7 @@ pub use write_store::WriteStore; #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] pub enum InputKey { VersionedObject { - id: ObjectID, + id: FullObjectID, version: SequenceNumber, }, Package { @@ -51,10 +53,10 @@ pub enum InputKey { } impl InputKey { - pub fn id(&self) -> ObjectID { + pub fn id(&self) -> FullObjectID { match self { InputKey::VersionedObject { id, .. } => *id, - InputKey::Package { id } => *id, + InputKey::Package { id } => FullObjectID::Fastpath(*id), } } @@ -79,7 +81,7 @@ impl From<&Object> for InputKey { InputKey::Package { id: obj.id() } } else { InputKey::VersionedObject { - id: obj.id(), + id: obj.full_id(), version: obj.version(), } } @@ -187,6 +189,8 @@ pub trait ChildObjectResolver { receiving_object_id: &ObjectID, receive_object_at_version: SequenceNumber, epoch_id: EpochId, + // TODO: Delete this parameter once table migration is complete. + use_object_per_epoch_marker_table_v2: bool, ) -> SuiResult>; } @@ -426,6 +430,8 @@ impl ChildObjectResolver for std::sync::Arc { receiving_object_id: &ObjectID, receive_object_at_version: SequenceNumber, epoch_id: EpochId, + // TODO: Delete this parameter once table migration is complete. + use_object_per_epoch_marker_table_v2: bool, ) -> SuiResult> { ChildObjectResolver::get_object_received_at_version( self.as_ref(), @@ -433,6 +439,7 @@ impl ChildObjectResolver for std::sync::Arc { receiving_object_id, receive_object_at_version, epoch_id, + use_object_per_epoch_marker_table_v2, ) } } @@ -452,6 +459,8 @@ impl ChildObjectResolver for &S { receiving_object_id: &ObjectID, receive_object_at_version: SequenceNumber, epoch_id: EpochId, + // TODO: Delete this parameter once table migration is complete. + use_object_per_epoch_marker_table_v2: bool, ) -> SuiResult> { ChildObjectResolver::get_object_received_at_version( *self, @@ -459,6 +468,7 @@ impl ChildObjectResolver for &S { receiving_object_id, receive_object_at_version, epoch_id, + use_object_per_epoch_marker_table_v2, ) } } @@ -478,6 +488,8 @@ impl ChildObjectResolver for &mut S { receiving_object_id: &ObjectID, receive_object_at_version: SequenceNumber, epoch_id: EpochId, + // TODO: Delete this parameter once table migration is complete. + use_object_per_epoch_marker_table_v2: bool, ) -> SuiResult> { ChildObjectResolver::get_object_received_at_version( *self, @@ -485,11 +497,11 @@ impl ChildObjectResolver for &mut S { receiving_object_id, receive_object_at_version, epoch_id, + use_object_per_epoch_marker_table_v2, ) } } -// The primary key type for object storage. #[serde_as] #[derive(Eq, PartialEq, Clone, Copy, PartialOrd, Ord, Hash, Serialize, Deserialize, Debug)] pub struct ObjectKey(pub ObjectID, pub VersionNumber); @@ -518,6 +530,89 @@ impl From<&ObjectRef> for ObjectKey { } } +#[serde_as] +#[derive(Eq, PartialEq, Clone, Copy, PartialOrd, Ord, Hash, Serialize, Deserialize, Debug)] +pub struct ConsensusObjectKey(pub ConsensusObjectSequenceKey, pub VersionNumber); + +/// FullObjectKey represents a unique object a specific version. For fastpath objects, this +/// is the same as ObjectKey. For consensus objects, this includes the start version, which +/// may change if an object is transferred out of and back into consensus. +#[serde_as] +#[derive(Eq, PartialEq, Clone, Copy, PartialOrd, Ord, Hash, Serialize, Deserialize, Debug)] +pub enum FullObjectKey { + Fastpath(ObjectKey), + Consensus(ConsensusObjectKey), +} + +impl FullObjectKey { + pub fn max_for_id(id: &FullObjectID) -> Self { + match id { + FullObjectID::Fastpath(object_id) => Self::Fastpath(ObjectKey::max_for_id(object_id)), + FullObjectID::Consensus(consensus_object_sequence_key) => Self::Consensus( + ConsensusObjectKey(*consensus_object_sequence_key, VersionNumber::MAX), + ), + } + } + + pub fn min_for_id(id: &FullObjectID) -> Self { + match id { + FullObjectID::Fastpath(object_id) => Self::Fastpath(ObjectKey::min_for_id(object_id)), + FullObjectID::Consensus(consensus_object_sequence_key) => Self::Consensus( + ConsensusObjectKey(*consensus_object_sequence_key, VersionNumber::MIN), + ), + } + } + + pub fn new(object_id: FullObjectID, version: VersionNumber) -> Self { + match object_id { + FullObjectID::Fastpath(object_id) => Self::Fastpath(ObjectKey(object_id, version)), + FullObjectID::Consensus(consensus_object_sequence_key) => { + Self::Consensus(ConsensusObjectKey(consensus_object_sequence_key, version)) + } + } + } + + pub fn id(&self) -> FullObjectID { + match self { + FullObjectKey::Fastpath(object_key) => FullObjectID::Fastpath(object_key.0), + FullObjectKey::Consensus(consensus_object_key) => { + FullObjectID::Consensus(consensus_object_key.0) + } + } + } + + pub fn version(&self) -> VersionNumber { + match self { + FullObjectKey::Fastpath(object_key) => object_key.1, + FullObjectKey::Consensus(consensus_object_key) => consensus_object_key.1, + } + } + + // Returns the equivalent ObjectKey for this FullObjectKey, discarding any initial + // shared version information, if present. + // TODO: Delete this function once marker table migration is complete. + pub fn into_object_key(self) -> ObjectKey { + match self { + FullObjectKey::Fastpath(object_key) => object_key, + FullObjectKey::Consensus(consensus_object_key) => { + ObjectKey(consensus_object_key.0 .0, consensus_object_key.1) + } + } + } +} + +impl From for FullObjectKey { + fn from(object_ref: FullObjectRef) -> Self { + FullObjectKey::from(&object_ref) + } +} + +impl From<&FullObjectRef> for FullObjectKey { + fn from(object_ref: &FullObjectRef) -> Self { + FullObjectKey::new(object_ref.0, object_ref.1) + } +} + #[derive(Clone)] pub enum ObjectOrTombstone { Object(Object), diff --git a/crates/sui-types/src/transaction.rs b/crates/sui-types/src/transaction.rs index d3ee748b0c504..6f4908c3d0a92 100644 --- a/crates/sui-types/src/transaction.rs +++ b/crates/sui-types/src/transaction.rs @@ -3263,6 +3263,25 @@ impl InputObjects { ) } + pub fn deleted_consensus_objects(&self) -> BTreeMap { + self.objects + .iter() + .filter_map(|obj| { + if let InputObjectKind::SharedMoveObject { + id, + initial_shared_version, + .. + } = obj.input_object_kind + { + obj.is_deleted_shared_object() + .then_some((id, initial_shared_version)) + } else { + None + } + }) + .collect() + } + pub fn into_object_map(self) -> BTreeMap { self.objects .into_iter() diff --git a/sui-execution/latest/sui-adapter/src/temporary_store.rs b/sui-execution/latest/sui-adapter/src/temporary_store.rs index 8e4223f2485ed..913159926d6ce 100644 --- a/sui-execution/latest/sui-adapter/src/temporary_store.rs +++ b/sui-execution/latest/sui-adapter/src/temporary_store.rs @@ -45,6 +45,7 @@ pub struct TemporaryStore<'backing> { store: &'backing dyn BackingStore, tx_digest: TransactionDigest, input_objects: BTreeMap, + deleted_consensus_objects: BTreeMap, /// The version to assign to all objects written by the transaction using this store. lamport_timestamp: SequenceNumber, mutable_input_refs: BTreeMap, // Inputs that are mutable @@ -85,6 +86,7 @@ impl<'backing> TemporaryStore<'backing> { ) -> Self { let mutable_input_refs = input_objects.mutable_inputs(); let lamport_timestamp = input_objects.lamport_timestamp(&receiving_objects); + let deleted_consensus_objects = input_objects.deleted_consensus_objects(); let objects = input_objects.into_object_map(); #[cfg(debug_assertions)] { @@ -105,6 +107,7 @@ impl<'backing> TemporaryStore<'backing> { store, tx_digest, input_objects: objects, + deleted_consensus_objects, lamport_timestamp, mutable_input_refs, execution_results: ExecutionResultsV2::default(), @@ -142,6 +145,7 @@ impl<'backing> TemporaryStore<'backing> { let results = self.execution_results; InnerTemporaryStore { input_objects: self.input_objects, + deleted_consensus_objects: self.deleted_consensus_objects, mutable_inputs: self.mutable_input_refs, written: results.written_objects, events: TransactionEvents { @@ -970,6 +974,8 @@ impl<'backing> ChildObjectResolver for TemporaryStore<'backing> { receiving_object_id: &ObjectID, receive_object_at_version: SequenceNumber, epoch_id: EpochId, + // TODO: Delete this parameter once table migration is complete. + use_object_per_epoch_marker_table_v2: bool, ) -> SuiResult> { // You should never be able to try and receive an object after deleting it or writing it in the same // transaction since `Receiving` doesn't have copy. @@ -986,6 +992,7 @@ impl<'backing> ChildObjectResolver for TemporaryStore<'backing> { receiving_object_id, receive_object_at_version, epoch_id, + use_object_per_epoch_marker_table_v2, ) } } diff --git a/sui-execution/latest/sui-move-natives/src/object_runtime/object_store.rs b/sui-execution/latest/sui-move-natives/src/object_runtime/object_store.rs index 323905b909dc5..68b345eabe54f 100644 --- a/sui-execution/latest/sui-move-natives/src/object_runtime/object_store.rs +++ b/sui-execution/latest/sui-move-natives/src/object_runtime/object_store.rs @@ -173,7 +173,15 @@ impl<'a> Inner<'a> { ) -> PartialVMResult> { let child_opt = self .resolver - .get_object_received_at_version(&owner, &child, version, self.current_epoch_id) + .get_object_received_at_version( + &owner, + &child, + version, + self.current_epoch_id, + self.protocol_config + .use_object_per_epoch_marker_table_v2_as_option() + .unwrap_or(false), + ) .map_err(|msg| { PartialVMError::new(StatusCode::STORAGE_ERROR).with_message(format!("{msg}")) })?; diff --git a/sui-execution/latest/sui-move-natives/src/test_scenario.rs b/sui-execution/latest/sui-move-natives/src/test_scenario.rs index 1292666485571..d9b6700a67ab4 100644 --- a/sui-execution/latest/sui-move-natives/src/test_scenario.rs +++ b/sui-execution/latest/sui-move-natives/src/test_scenario.rs @@ -74,6 +74,8 @@ impl ChildObjectResolver for InMemoryTestStore { receiving_object_id: &ObjectID, receive_object_at_version: SequenceNumber, epoch_id: sui_types::committee::EpochId, + // TODO: Delete this parameter once table migration is complete. + use_object_per_epoch_marker_table_v2: bool, ) -> sui_types::error::SuiResult> { self.0.with_borrow(|store| { store.get_object_received_at_version( @@ -81,6 +83,7 @@ impl ChildObjectResolver for InMemoryTestStore { receiving_object_id, receive_object_at_version, epoch_id, + use_object_per_epoch_marker_table_v2, ) }) } diff --git a/sui-execution/v0/sui-adapter/src/temporary_store.rs b/sui-execution/v0/sui-adapter/src/temporary_store.rs index 1745509129b47..b5c518ab64041 100644 --- a/sui-execution/v0/sui-adapter/src/temporary_store.rs +++ b/sui-execution/v0/sui-adapter/src/temporary_store.rs @@ -44,6 +44,7 @@ pub struct TemporaryStore<'backing> { store: &'backing dyn BackingStore, tx_digest: TransactionDigest, input_objects: BTreeMap, + deleted_consensus_objects: BTreeMap, /// The version to assign to all objects written by the transaction using this store. lamport_timestamp: SequenceNumber, mutable_input_refs: BTreeMap, // Inputs that are mutable @@ -76,12 +77,14 @@ impl<'backing> TemporaryStore<'backing> { ) -> Self { let mutable_input_refs = input_objects.mutable_inputs(); let lamport_timestamp = input_objects.lamport_timestamp(&[]); + let deleted_consensus_objects = input_objects.deleted_consensus_objects(); let objects = input_objects.into_object_map(); Self { store, tx_digest, input_objects: objects, + deleted_consensus_objects, lamport_timestamp, mutable_input_refs, written: BTreeMap::new(), @@ -145,6 +148,7 @@ impl<'backing> TemporaryStore<'backing> { pub fn into_inner(self) -> InnerTemporaryStore { InnerTemporaryStore { input_objects: self.input_objects, + deleted_consensus_objects: self.deleted_consensus_objects, mutable_inputs: self.mutable_input_refs, written: self .written @@ -965,6 +969,8 @@ impl<'backing> ChildObjectResolver for TemporaryStore<'backing> { receiving_object_id: &ObjectID, receive_object_at_version: SequenceNumber, epoch_id: EpochId, + // TODO: Delete this parameter once table migration is complete. + use_object_per_epoch_marker_table_v2: bool, ) -> SuiResult> { // You should never be able to try and receive an object after deleting it or writing it in the same // transaction since `Receiving` doesn't have copy. @@ -975,6 +981,7 @@ impl<'backing> ChildObjectResolver for TemporaryStore<'backing> { receiving_object_id, receive_object_at_version, epoch_id, + use_object_per_epoch_marker_table_v2, ) } } diff --git a/sui-execution/v1/sui-adapter/src/temporary_store.rs b/sui-execution/v1/sui-adapter/src/temporary_store.rs index 5b5da393d226f..3c9bbe9048881 100644 --- a/sui-execution/v1/sui-adapter/src/temporary_store.rs +++ b/sui-execution/v1/sui-adapter/src/temporary_store.rs @@ -43,6 +43,7 @@ pub struct TemporaryStore<'backing> { store: &'backing dyn BackingStore, tx_digest: TransactionDigest, input_objects: BTreeMap, + deleted_consensus_objects: BTreeMap, /// The version to assign to all objects written by the transaction using this store. lamport_timestamp: SequenceNumber, mutable_input_refs: BTreeMap, // Inputs that are mutable @@ -72,11 +73,13 @@ impl<'backing> TemporaryStore<'backing> { ) -> Self { let mutable_input_refs = input_objects.mutable_inputs(); let lamport_timestamp = input_objects.lamport_timestamp(&receiving_objects); + let deleted_consensus_objects = input_objects.deleted_consensus_objects(); let objects = input_objects.into_object_map(); Self { store, tx_digest, input_objects: objects, + deleted_consensus_objects, lamport_timestamp, mutable_input_refs, execution_results: ExecutionResultsV2::default(), @@ -111,6 +114,7 @@ impl<'backing> TemporaryStore<'backing> { let results = self.execution_results; InnerTemporaryStore { input_objects: self.input_objects, + deleted_consensus_objects: self.deleted_consensus_objects, mutable_inputs: self.mutable_input_refs, written: results.written_objects, events: TransactionEvents { @@ -1067,6 +1071,8 @@ impl<'backing> ChildObjectResolver for TemporaryStore<'backing> { receiving_object_id: &ObjectID, receive_object_at_version: SequenceNumber, epoch_id: EpochId, + // TODO: Delete this parameter once table migration is complete. + use_object_per_epoch_marker_table_v2: bool, ) -> SuiResult> { // You should never be able to try and receive an object after deleting it or writing it in the same // transaction since `Receiving` doesn't have copy. @@ -1083,6 +1089,7 @@ impl<'backing> ChildObjectResolver for TemporaryStore<'backing> { receiving_object_id, receive_object_at_version, epoch_id, + use_object_per_epoch_marker_table_v2, ) } } diff --git a/sui-execution/v1/sui-move-natives/src/object_runtime/mod.rs b/sui-execution/v1/sui-move-natives/src/object_runtime/mod.rs index 0ca90b8ed32ba..eaffa92930f02 100644 --- a/sui-execution/v1/sui-move-natives/src/object_runtime/mod.rs +++ b/sui-execution/v1/sui-move-natives/src/object_runtime/mod.rs @@ -116,6 +116,11 @@ pub(crate) struct LocalProtocolConfig { impl LocalProtocolConfig { fn new(config: &ProtocolConfig) -> Self { + // This should always be false for old protocol versions. + assert!(!config + .use_object_per_epoch_marker_table_v2_as_option() + .unwrap_or(false)); + Self { max_num_deleted_move_object_ids: config.max_num_deleted_move_object_ids(), max_num_event_emit: config.max_num_event_emit(), diff --git a/sui-execution/v1/sui-move-natives/src/object_runtime/object_store.rs b/sui-execution/v1/sui-move-natives/src/object_runtime/object_store.rs index 09b576d1cb5b3..a0901eb00413f 100644 --- a/sui-execution/v1/sui-move-natives/src/object_runtime/object_store.rs +++ b/sui-execution/v1/sui-move-natives/src/object_runtime/object_store.rs @@ -90,7 +90,13 @@ impl<'a> Inner<'a> { ) -> PartialVMResult> { let child_opt = self .resolver - .get_object_received_at_version(&owner, &child, version, self.current_epoch_id) + .get_object_received_at_version( + &owner, + &child, + version, + self.current_epoch_id, + false, // invariant verified in LocalProtocolConfig::new + ) .map_err(|msg| { PartialVMError::new(StatusCode::STORAGE_ERROR).with_message(format!("{msg}")) })?; diff --git a/sui-execution/v2/sui-adapter/src/temporary_store.rs b/sui-execution/v2/sui-adapter/src/temporary_store.rs index 837d24b1e0e99..cea9bc626d157 100644 --- a/sui-execution/v2/sui-adapter/src/temporary_store.rs +++ b/sui-execution/v2/sui-adapter/src/temporary_store.rs @@ -43,6 +43,7 @@ pub struct TemporaryStore<'backing> { store: &'backing dyn BackingStore, tx_digest: TransactionDigest, input_objects: BTreeMap, + deleted_consensus_objects: BTreeMap, /// The version to assign to all objects written by the transaction using this store. lamport_timestamp: SequenceNumber, mutable_input_refs: BTreeMap, // Inputs that are mutable @@ -74,6 +75,7 @@ impl<'backing> TemporaryStore<'backing> { ) -> Self { let mutable_input_refs = input_objects.mutable_inputs(); let lamport_timestamp = input_objects.lamport_timestamp(&receiving_objects); + let deleted_consensus_objects = input_objects.deleted_consensus_objects(); let objects = input_objects.into_object_map(); #[cfg(debug_assertions)] { @@ -94,6 +96,7 @@ impl<'backing> TemporaryStore<'backing> { store, tx_digest, input_objects: objects, + deleted_consensus_objects, lamport_timestamp, mutable_input_refs, execution_results: ExecutionResultsV2::default(), @@ -130,6 +133,7 @@ impl<'backing> TemporaryStore<'backing> { InnerTemporaryStore { input_objects: self.input_objects, mutable_inputs: self.mutable_input_refs, + deleted_consensus_objects: self.deleted_consensus_objects, written: results.written_objects, events: TransactionEvents { data: results.user_events, @@ -1119,6 +1123,8 @@ impl<'backing> ChildObjectResolver for TemporaryStore<'backing> { receiving_object_id: &ObjectID, receive_object_at_version: SequenceNumber, epoch_id: EpochId, + // TODO: Delete this parameter once table migration is complete. + use_object_per_epoch_marker_table_v2: bool, ) -> SuiResult> { // You should never be able to try and receive an object after deleting it or writing it in the same // transaction since `Receiving` doesn't have copy. @@ -1135,6 +1141,7 @@ impl<'backing> ChildObjectResolver for TemporaryStore<'backing> { receiving_object_id, receive_object_at_version, epoch_id, + use_object_per_epoch_marker_table_v2, ) } } diff --git a/sui-execution/v2/sui-move-natives/src/object_runtime/object_store.rs b/sui-execution/v2/sui-move-natives/src/object_runtime/object_store.rs index 7d977efac9623..1fd05ea65697c 100644 --- a/sui-execution/v2/sui-move-natives/src/object_runtime/object_store.rs +++ b/sui-execution/v2/sui-move-natives/src/object_runtime/object_store.rs @@ -93,7 +93,15 @@ impl<'a> Inner<'a> { ) -> PartialVMResult> { let child_opt = self .resolver - .get_object_received_at_version(&owner, &child, version, self.current_epoch_id) + .get_object_received_at_version( + &owner, + &child, + version, + self.current_epoch_id, + self.protocol_config + .use_object_per_epoch_marker_table_v2_as_option() + .unwrap_or(false), + ) .map_err(|msg| { PartialVMError::new(StatusCode::STORAGE_ERROR).with_message(format!("{msg}")) })?;