diff --git a/pallets/storage-provider/Cargo.toml b/pallets/storage-provider/Cargo.toml index d0977233f..f9c5cf83f 100644 --- a/pallets/storage-provider/Cargo.toml +++ b/pallets/storage-provider/Cargo.toml @@ -22,6 +22,7 @@ primitives-proofs = { workspace = true, default-features = false } scale-info = { workspace = true, default-features = false, features = ["derive"] } sp-arithmetic = { workspace = true, default-features = false } sp-core = { workspace = true, default-features = false } +sp-runtime = { workspace = true, default-features = false } # Frame deps frame-benchmarking = { workspace = true, default-features = false, optional = true } @@ -34,7 +35,6 @@ multihash-codetable = { workspace = true, features = ["blake2b"] } pallet-balances = { workspace = true, default-features = false } pallet-market = { workspace = true, default-features = false } sp-io = { workspace = true } -sp-runtime = { workspace = true, default-features = false } [features] default = ["std"] diff --git a/pallets/storage-provider/src/deadline.rs b/pallets/storage-provider/src/deadline.rs new file mode 100644 index 000000000..77582713b --- /dev/null +++ b/pallets/storage-provider/src/deadline.rs @@ -0,0 +1,476 @@ +extern crate alloc; + +use alloc::vec::Vec; + +use codec::{Decode, Encode}; +use frame_support::{ + pallet_prelude::*, + sp_runtime::{BoundedBTreeMap, BoundedVec}, + PalletError, +}; +use primitives_proofs::SectorNumber; +use scale_info::{prelude::cmp, TypeInfo}; + +use crate::{ + pallet::LOG_TARGET, + partition::{Partition, PartitionNumber, MAX_PARTITIONS_PER_DEADLINE}, + sector::SectorOnChainInfo, +}; + +mod assignment; + +pub use assignment::assign_deadlines; + +/// Deadline holds the state for all sectors due at a specific deadline. +/// +/// A deadline exists along side 47 other deadlines (1 for every 30 minutes in a day). +/// Only one deadline may be active for a given proving window. +#[derive(Clone, Debug, Default, Decode, Encode, PartialEq, TypeInfo)] +pub struct Deadline { + /// Partitions in this deadline. Indexed by partition number. + pub partitions: BoundedBTreeMap< + PartitionNumber, + Partition, + ConstU32, + >, + + /// Maps blocks to partitions that _may_ have sectors about to expire — i.e. just before or in that block. + /// The expiration happens either on-time or early because faults. + /// + /// Filecoin has another expiration mapping in the Partition struct which maps the a block to sectors that are on-time or expired (due to being faulty). + /// We can extract this information from other sources. + /// The faulty sectors are stored in the Partition and the sectors that are on-time are sectors - (faults + terminated + unproven + recoveries). + /// + /// Getting the information about a partition that has sectors that are about to expire you need to get the current deadline from the storage provider state. + /// `let current_deadline_block = storage_provider_state.current_deadline;` + /// With the current deadline we can then get the partition number that is associated with that deadline block. + /// `let partition_number = deadline.expirations_blocks.get(current_deadline_block);` + /// + /// Then we can get the partition information from the deadline. + /// `let partition_to_expire = deadline.partitions.get(partition_number);` + /// + /// With this information we can get the sectors information from the storage provider state. + /// `let sectors_info = partition_to_expire.` + /// Then we can get the sector information. + /// `let sectors_info: Vec = partition_to_expire.sectors.iter().map(|sector_number| { + /// storage_provider_state.sectors.get(sector_number) + /// }).collect()` + /// + /// # Important + /// Partitions MUST NOT be removed from this queue (until the + /// associated block has passed) even if they no longer have sectors + /// expiring at that block. Sectors expiring at their given block may later be + /// recovered, and this queue will not be updated at that time. + pub expirations_blocks: + BoundedBTreeMap>, + + /// Partitions that have been proved by window PoSts so far during the + /// current challenge window. + pub partitions_posted: BoundedBTreeSet>, + + /// Partition numbers with sectors that terminated early. + pub early_terminations: BoundedBTreeSet>, + + /// The number of non-terminated sectors in this deadline (incl faulty). + pub live_sectors: u64, + + /// The total number of sectors in this deadline (incl dead). + pub total_sectors: u64, +} + +impl Deadline +where + BlockNumber: sp_runtime::traits::BlockNumber, +{ + /// Construct a new [`Deadline`] instance. + pub fn new() -> Self { + Self { + partitions: BoundedBTreeMap::new(), + expirations_blocks: BoundedBTreeMap::new(), + partitions_posted: BoundedBTreeSet::new(), + early_terminations: BoundedBTreeSet::new(), + live_sectors: 0, + total_sectors: 0, + } + } + + /// Sets a given partition as proven. + /// + /// If the partition has already been proven, an error is returned. + pub fn record_proven(&mut self, partition_num: PartitionNumber) -> Result<(), DeadlineError> { + log::debug!(target: LOG_TARGET, "record_proven: partition number = {partition_num:?}"); + ensure!( + !self.partitions_posted.contains(&partition_num), + DeadlineError::PartitionAlreadyProven + ); + self.partitions_posted + .try_insert(partition_num) + .map_err(|_| DeadlineError::ProofUpdateFailed)?; + Ok(()) + } + + /// Adds sectors to the current deadline. + /// + /// Added sectors will be stored in the deadline's last stored partition. + /// + /// # Important + /// * It's the caller's responsibility to make sure that this deadline isn't currently being proven — i.e. open. + /// * The sectors are assumed to be non-faulty. + pub fn add_sectors( + &mut self, + partition_size: u64, + sectors: &[SectorOnChainInfo], + ) -> Result<(), DeadlineError> { + if sectors.is_empty() { + return Ok(()); + } + + // First update partitions, consuming the sectors + let mut partition_deadline_updates = + Vec::<(BlockNumber, PartitionNumber)>::with_capacity(sectors.len()); + // PRE-COND: there can never be more live sectors than u64, so it never overflows + self.live_sectors += sectors.len() as u64; + self.total_sectors += sectors.len() as u64; + + let partitions = &mut self.partitions; + + let mut partition_idx = partitions.len().saturating_sub(1); + // try filling up the last partition first. + loop { + // Get/create partition to update. + let mut partition = match partitions.get_mut(&(partition_idx as u32)) { + Some(partition) => partition.clone(), + None => { + // This case will only happen when trying to add a full partition more than once in go. + Partition::new() + } + }; + + // Figure out which (if any) sectors we want to add to this partition. + let sector_count = partition.sectors.len() as u64; + if sector_count >= partition_size { + continue; + } + + let size = cmp::min(partition_size - sector_count, sectors.len() as u64) as usize; + + let (partition_new_sectors, sectors) = sectors.split_at(size); + + let new_partition_sectors: Vec = partition_new_sectors + .into_iter() + .map(|sector| sector.sector_number) + .collect(); + + // Add sectors to partition. + partition + .add_sectors(&new_partition_sectors) + .map_err(|_| DeadlineError::CouldNotAddSectors)?; + + // Save partition if it is newly constructed. + if !partitions.contains_key(&(partition_idx as u32)) { + partitions.try_insert(partition_idx as u32, partition).map_err(|_| { + log::error!(target: LOG_TARGET, "add_sectors: Cannot insert new partition at {partition_idx}"); + DeadlineError::CouldNotAddSectors + })?; + } + + // Record deadline -> partition mapping so we can later update the deadlines. + partition_deadline_updates.extend( + partition_new_sectors + .iter() + .map(|s| (s.expiration, partition_idx as PartitionNumber)), + ); + + if sectors.is_empty() { + break; + } + partition_idx += 1; + } + + // Next, update the expiration queue. + for (block, partition_index) in partition_deadline_updates { + self.expirations_blocks.try_insert(block, partition_index).map_err(|_| { + log::error!(target: LOG_TARGET, "add_sectors: Cannot update expiration queue at index {partition_idx}"); + DeadlineError::CouldNotAddSectors + })?; + } + + Ok(()) + } +} + +#[derive(Clone, Debug, Decode, Encode, PartialEq, TypeInfo)] +pub struct Deadlines { + /// Deadlines indexed by their proving periods — e.g. for proving period 7, find it in + /// `deadlines[7]` — proving periods are present in the interval `[0, 47]`. + /// + /// Bounded to 48 elements since that's the set amount of deadlines per proving period. + /// + /// In the original implementation, the information is kept in a separated structure, possibly + /// to make fetching the state more efficient as this is kept in the storage providers + /// blockstore. However, we're keeping all the state on-chain + /// + /// References: + /// * + /// * + /// * + pub due: BoundedVec, ConstU32<48>>, +} + +impl Deadlines +where + BlockNumber: sp_runtime::traits::BlockNumber, +{ + /// Construct a new [`Deadlines`]. + /// + /// Pre-initializes all the `w_post_period_deadlines` as empty deadlines. + pub fn new(w_post_period_deadlines: u64) -> Self { + let mut due = BoundedVec::new(); + for _ in 0..w_post_period_deadlines { + let _ = due.try_push(Deadline::new()); + } + Self { due } + } + + /// Get the amount of deadlines that are due. + pub fn len(&self) -> usize { + self.due.len() + } + + /// Loads a mutable deadline from the given index. + /// Fails if the index does not exist or is out of range. + pub fn load_deadline_mut( + &mut self, + idx: usize, + ) -> Result<&mut Deadline, DeadlineError> { + log::debug!(target: LOG_TARGET, "load_deadline_mut: getting deadline at index {idx}"); + // Ensure the provided index is within range. + ensure!(self.len() > idx, DeadlineError::DeadlineIndexOutOfRange); + Ok(self + .due + .get_mut(idx) + .expect("Deadlines are pre-initialized, this cannot fail")) + } + + /// Loads a deadline + /// Fails if the index does not exist or is out of range. + pub fn load_deadline(&self, idx: usize) -> Result, DeadlineError> { + log::debug!(target: LOG_TARGET, "load_deadline_mut: getting deadline at index {idx}"); + // Ensure the provided index is within range. + ensure!(self.len() > idx, DeadlineError::DeadlineIndexOutOfRange); + Ok(self + .due + .get(idx) + .cloned() + .expect("Deadlines are pre-initialized, this cannot fail")) + } + + /// Records a deadline as proven. + /// + /// Returns an error if the deadline has already been proven. + pub fn record_proven( + &mut self, + deadline_idx: usize, + partition_num: PartitionNumber, + ) -> Result<(), DeadlineError> { + log::debug!(target: LOG_TARGET, "record_proven: partition number: {partition_num:?}"); + let deadline = self.load_deadline_mut(deadline_idx)?; + deadline.record_proven(partition_num)?; + Ok(()) + } + + /// Replace values of the deadline at index `deadline_idx` with those of `new_dl`. + pub fn update_deadline( + &mut self, + deadline_idx: usize, + new_dl: Deadline, + ) -> Result<(), DeadlineError> { + let dl = self + .due + .get_mut(deadline_idx) + .ok_or(DeadlineError::DeadlineNotFound)?; + dl.partitions_posted = new_dl.partitions_posted; + dl.expirations_blocks = new_dl.expirations_blocks; + dl.early_terminations = new_dl.early_terminations; + dl.live_sectors = new_dl.live_sectors; + dl.total_sectors = new_dl.total_sectors; + dl.partitions = new_dl.partitions; + Ok(()) + } +} + +/// Holds information about deadlines like when they open and close and what deadline index they relate to. +/// +/// Filecoin reference about PoSt deadline design: +/// +#[derive(Clone, Debug, Decode, Encode, PartialEq, TypeInfo)] +pub struct DeadlineInfo { + /// The block number at which this info was calculated. + pub block_number: BlockNumber, + + /// The block number at which the proving period for this deadline starts. + /// period_start < open_at to give time to SPs to create the proof before open. + pub period_start: BlockNumber, + + /// The deadline index within its proving window. + pub idx: u64, + + /// The first block number from which a proof can be submitted. + /// open_at > period_start + pub open_at: BlockNumber, + + /// The first block number from which a proof can *no longer* be submitted. + pub close_at: BlockNumber, + + /// The number of non-overlapping PoSt deadlines in each proving period. + pub w_post_period_deadlines: u64, + + /// The period over which all an SP's active sectors will be challenged. + pub w_post_proving_period: BlockNumber, + + /// The duration of a deadline's challenge window, the period before a deadline when the challenge is available. + pub w_post_challenge_window: BlockNumber, +} + +impl DeadlineInfo +where + BlockNumber: sp_runtime::traits::BlockNumber, +{ + /// Constructs a new `DeadlineInfo`. + /// + /// Reference: + pub fn new( + block_number: BlockNumber, + period_start: BlockNumber, + idx: u64, + w_post_period_deadlines: u64, + w_post_challenge_window: BlockNumber, + w_post_proving_period: BlockNumber, + ) -> Result { + // convert w_post_period_deadlines and idx so we can math + let period_deadlines = BlockNumber::try_from(w_post_period_deadlines).map_err(|_| { + log::error!(target: LOG_TARGET, "failed to convert {w_post_period_deadlines:?} to BlockNumber"); + DeadlineError::CouldNotConstructDeadlineInfo + })?; + let idx_converted = BlockNumber::try_from(idx).map_err(|_| { + log::error!(target: LOG_TARGET, "failed to convert {idx:?} to BlockNumber"); + DeadlineError::CouldNotConstructDeadlineInfo + })?; + let (open_at, close_at) = if idx_converted < period_deadlines { + let open_at = period_start + (idx_converted * w_post_challenge_window); + let close_at = open_at + w_post_challenge_window; + (open_at, close_at) + } else { + let after_last_deadline = period_start + w_post_proving_period; + (after_last_deadline, after_last_deadline) + }; + Ok(Self { + block_number, + period_start, + idx, + open_at, + close_at, + w_post_period_deadlines, + w_post_challenge_window, + w_post_proving_period, + }) + } + + /// Whether the current deadline is currently open. + pub fn is_open(&self) -> bool { + self.block_number >= self.open_at && self.block_number < self.close_at + } + + /// Whether the current deadline has already closed. + pub fn has_elapsed(&self) -> bool { + self.block_number >= self.close_at + } + + /// Returns the next deadline that has not yet elapsed. + /// + /// If the current deadline has not elapsed yet then it returns the current deadline. + /// Otherwise it calculates the next period start by getting the gap between the current block number and the closing block number + /// and adding 1. Making sure it is a multiple of proving period by dividing by `w_post_proving_period`. + pub fn next_not_elapsed(self) -> Result { + if !self.has_elapsed() { + return Ok(self); + } + + // has elapsed, advance by some multiples of w_post_proving_period + let gap = self.block_number - self.close_at; + let next_deadline = TryInto::::try_into(1u64) + .map_err(|_| DeadlineError::FailedToGetNextDeadline)?; + let delta_periods = next_deadline + gap / self.w_post_proving_period; + + Self::new( + self.block_number, + self.period_start + self.w_post_proving_period * delta_periods, + self.idx, + self.w_post_period_deadlines, + self.w_post_proving_period, + self.w_post_challenge_window, + ) + } +} + +/// Returns true if the deadline at the given index is currently mutable. +/// +/// Deadlines are considered to be immutable if they are being proven or about to be proven. +/// +/// Reference: +pub fn deadline_is_mutable( + proving_period_start: BlockNumber, + deadline_idx: u64, + current_block: BlockNumber, + w_post_challenge_window: BlockNumber, + w_post_period_deadlines: u64, + w_post_proving_period: BlockNumber, +) -> Result +where + BlockNumber: sp_runtime::traits::BlockNumber, +{ + // Get the next non-elapsed deadline (i.e., the next time we care about + // mutations to the deadline). + let dl_info = DeadlineInfo::new( + current_block, + proving_period_start, + deadline_idx, + w_post_period_deadlines, + w_post_challenge_window, + w_post_proving_period, + )? + .next_not_elapsed()?; + log::debug!(target: LOG_TARGET,"dl_info = {dl_info:?}"); + + // Ensure that the current block is at least one challenge window before + // that deadline opens. + Ok(current_block < dl_info.open_at - w_post_challenge_window) +} + +#[derive(Decode, Encode, PalletError, TypeInfo, RuntimeDebug)] +pub enum DeadlineError { + /// Emitted when the passed in deadline index supplied for `submit_windowed_post` is out of range. + DeadlineIndexOutOfRange, + /// Emitted when a trying to get a deadline index but fails because that index does not exist. + DeadlineNotFound, + /// Emitted when a given index in `Deadlines` already exists and try to insert a deadline on that index. + DeadlineIndexExists, + /// Emitted when trying to insert a new deadline fails. + CouldNotInsertDeadline, + /// Emitted when constructing `DeadlineInfo` fails. + CouldNotConstructDeadlineInfo, + /// Emitted when a proof is submitted for a partition that is already proven. + PartitionAlreadyProven, + /// Emitted when trying to retrieve a partition that does not exit. + PartitionNotFound, + /// Emitted when trying to update proven partitions fails. + ProofUpdateFailed, + /// Emitted when trying to get the next instance of a deadline that has not yet elapsed fails. + FailedToGetNextDeadline, + /// Emitted when max partition for a given deadline have been reached. + MaxPartitionsReached, + /// Emitted when trying to add sectors to a deadline fails. + CouldNotAddSectors, + /// Emitted when assigning sectors to deadlines fails. + CouldNotAssignSectorsToDeadlines, +} diff --git a/pallets/storage-provider/src/deadline/assignment.rs b/pallets/storage-provider/src/deadline/assignment.rs new file mode 100644 index 000000000..9f4b57759 --- /dev/null +++ b/pallets/storage-provider/src/deadline/assignment.rs @@ -0,0 +1,444 @@ +extern crate alloc; + +use alloc::{collections::BinaryHeap, vec, vec::Vec}; +use core::cmp::Ordering; + +use crate::{ + deadline::{Deadline, DeadlineError}, + pallet::LOG_TARGET, + sector::SectorOnChainInfo, +}; + +/// Intermediary data structure used to assign deadlines to sectors. +struct DeadlineAssignmentInfo { + /// The deadline index. + index: usize, + /// The number of live sectors (i.e. sectors that have *not* been terminated) in the deadline. + live_sectors: u64, + /// The total number of sectors in the deadline (may include terminated ones). + total_sectors: u64, +} + +impl DeadlineAssignmentInfo { + /// Returns the amount of partitions after adding 1 sector to total sectors. + fn partitions_after_assignment(&self, partition_size: u64) -> u64 { + let total_sectors = self.total_sectors + 1; // after assignment + total_sectors.div_ceil(partition_size) + } + + /// Returns the amount of partitions after adding 1 sector to live sectors. + fn compact_partitions_after_assignment(&self, partition_size: u64) -> u64 { + let live_sectors = self.live_sectors + 1; // after assignment + live_sectors.div_ceil(partition_size) + } + + /// Partitions size = maximum amount of sectors in a single partition. + /// total_sectors % partition size is zero if the partition is full. + /// Example 1: partition size = 10, total sectors = 8; 8 % 10 = 8 -> not full + /// Example 2: partition size = 10, total sectors = 10; 10 % 10 = 0 -> full + fn is_full_now(&self, partition_size: u64) -> bool { + self.total_sectors % partition_size == 0 + } + + /// Determines if the maximum amount of partitions is reached. The max_partitions value is passed into this function. + fn max_partitions_reached(&self, partition_size: u64, max_partitions: u64) -> bool { + self.total_sectors >= partition_size * max_partitions + } +} + +/// Reference: https://github.com/filecoin-project/builtin-actors/blob/8d957d2901c0f2044417c268f0511324f591cb92/actors/miner/src/deadline_assignment.rs#L47 +fn cmp(a: &DeadlineAssignmentInfo, b: &DeadlineAssignmentInfo, partition_size: u64) -> Ordering { + // When assigning partitions to deadlines, we're trying to optimize the + // following: + // + // First, avoid increasing the maximum number of partitions in any + // deadline, across all deadlines, after compaction. This would + // necessitate buying a new GPU. + // + // Second, avoid forcing the miner to repeatedly compact partitions. A + // miner would be "forced" to compact a partition when a the number of + // partitions in any given deadline goes above the current maximum + // number of partitions across all deadlines, and compacting that + // deadline would then reduce the number of partitions, reducing the + // maximum. + // + // At the moment, the only "forced" compaction happens when either: + // + // 1. Assignment of the sector into any deadline would force a + // compaction. + // 2. The chosen deadline has at least one full partition's worth of + // terminated sectors and at least one fewer partition (after + // compaction) than any other deadline. + // + // Third, we attempt to assign "runs" of sectors to the same partition + // to reduce the size of the bitfields. + // + // Finally, we try to balance the number of sectors (thus partitions) + // assigned to any given deadline over time. + + // Summary: + // + // 1. Assign to the deadline that will have the _least_ number of + // post-compaction partitions (after sector assignment). + // 2. Assign to the deadline that will have the _least_ number of + // pre-compaction partitions (after sector assignment). + // 3. Assign to a deadline with a non-full partition. + // - If both have non-full partitions, assign to the most full one (stable assortment). + // 4. Assign to the deadline with the least number of live sectors. + // 5. Assign sectors to the deadline with the lowest index first. + + // If one deadline would end up with fewer partitions (after + // compacting), assign to that one. This ensures we keep the maximum + // number of partitions in any given deadline to a minimum. + // + // Technically, this could increase the maximum number of partitions + // before compaction. However, that can only happen if the deadline in + // question could save an entire partition by compacting. At that point, + // the miner should compact the deadline. + a.compact_partitions_after_assignment(partition_size) + .cmp(&b.compact_partitions_after_assignment(partition_size)) + .then_with(|| { + // If, after assignment, neither deadline would have fewer + // post-compaction partitions, assign to the deadline with the fewest + // pre-compaction partitions (after assignment). This will put off + // compaction as long as possible. + a.partitions_after_assignment(partition_size) + .cmp(&b.partitions_after_assignment(partition_size)) + }) + .then_with(|| { + // Ok, we'll end up with the same number of partitions any which way we + // go. Try to fill up a partition instead of opening a new one. + a.is_full_now(partition_size) + .cmp(&b.is_full_now(partition_size)) + }) + .then_with(|| { + // Either we have two open partitions, or neither deadline has an open + // partition. + + // If we have two open partitions, fill the deadline with the most-full + // open partition. This helps us assign runs of sequential sectors into + // the same partition. + if !a.is_full_now(partition_size) && !b.is_full_now(partition_size) { + a.total_sectors.cmp(&b.total_sectors).reverse() + } else { + Ordering::Equal + } + }) + .then_with(|| { + // Otherwise, assign to the deadline with the least live sectors. This + // will break the tie in one of the two immediately preceding + // conditions. + a.live_sectors.cmp(&b.live_sectors) + }) + .then_with(|| { + // Finally, fall back on the deadline index. + a.index.cmp(&b.index) + }) +} + +// Assigns partitions to deadlines, first filling partial partitions, then +// adding new partitions to deadlines with the fewest live sectors. +pub fn assign_deadlines( + max_partitions: u64, + partition_size: u64, + deadlines: &[Option>], + sectors: &[SectorOnChainInfo], + w_post_period_deadlines: u64, +) -> Result>>, DeadlineError> +where + BlockNumber: sp_runtime::traits::BlockNumber, +{ + log::debug!(target: LOG_TARGET,"deadlines len: {}, sectors len: {}", deadlines.len(), sectors.len()); + let mut nones = 0; + for dl in deadlines { + if dl.is_none() { + nones += 1; + } + } + log::debug!(target: LOG_TARGET,"deadlines that are none: {nones}"); + struct Entry { + partition_size: u64, + info: DeadlineAssignmentInfo, + } + + impl PartialEq for Entry { + fn eq(&self, other: &Self) -> bool { + self.cmp(other) == Ordering::Equal + } + } + + impl Eq for Entry {} + + impl PartialOrd for Entry { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } + } + + impl Ord for Entry { + fn cmp(&self, other: &Self) -> Ordering { + // we're using a max heap instead of a min heap, so we need to reverse the ordering + cmp(&self.info, &other.info, self.partition_size).reverse() + } + } + + let mut heap: BinaryHeap = deadlines + .iter() + .enumerate() + .filter_map(|(index, deadline)| deadline.as_ref().map(|dl| (index, dl))) + .map(|(index, deadline)| Entry { + partition_size, + info: DeadlineAssignmentInfo { + index, + live_sectors: deadline.live_sectors, + total_sectors: deadline.total_sectors, + }, + }) + .collect(); + + assert!(!heap.is_empty()); + + let mut changes = vec![Vec::new(); w_post_period_deadlines as usize]; + + for sector in sectors { + let info = &mut heap + .peek_mut() + .ok_or(DeadlineError::CouldNotConstructDeadlineInfo)? + .info; + + if info.max_partitions_reached(partition_size, max_partitions) { + return Err(DeadlineError::MaxPartitionsReached); + } + + changes[info.index].push(sector.clone()); + info.live_sectors += 1; + info.total_sectors += 1; + } + + Ok(changes.into_iter()) +} + +#[cfg(test)] +mod tests { + use frame_support::BoundedVec; + use primitives_proofs::RegisteredSealProof; + + use crate::{ + deadline::{assign_deadlines, Deadline}, + sector::SectorOnChainInfo, + }; + + impl Default for SectorOnChainInfo { + fn default() -> Self { + Self { + sector_number: 1, + seal_proof: RegisteredSealProof::StackedDRG2KiBV1P1, + sealed_cid: BoundedVec::new(), + activation: 1, + expiration: 1, + unsealed_cid: BoundedVec::new(), + } + } + } + + #[test] + fn test_deadline_assignment() { + const PARTITION_SIZE: u64 = 4; + const MAX_PARTITIONS: u64 = 100; + + #[derive(Clone)] + struct Spec { + live_sectors: u64, + dead_sectors: u64, + expect_sectors: Vec, + } + + struct TestCase { + sectors: u64, + deadlines: Vec>, + } + let test_cases = [ + // Even assignment and striping. + TestCase { + sectors: 10, + deadlines: vec![ + Some(Spec { + dead_sectors: 0, + live_sectors: 0, + expect_sectors: vec![0, 1, 2, 3, 8, 9], + }), + Some(Spec { + dead_sectors: 0, + live_sectors: 0, + expect_sectors: vec![4, 5, 6, 7], + }), + ], + }, + // Fill non-full first + TestCase { + sectors: 5, + deadlines: vec![ + Some(Spec { + dead_sectors: 0, + live_sectors: 0, + expect_sectors: vec![3, 4], + }), + None, // expect nothing. + None, + Some(Spec { + dead_sectors: 0, + live_sectors: 1, + expect_sectors: vec![0, 1, 2], + }), + ], + }, + // Assign to deadline with least number of live partitions. + TestCase { + sectors: 1, + deadlines: vec![ + // 2 live partitions. +1 would add another. + Some(Spec { + dead_sectors: 0, + live_sectors: 8, + expect_sectors: vec![], + }), + // 2 live partitions. +1 wouldn't add another. + // 1 dead partition. + Some(Spec { + dead_sectors: 5, + live_sectors: 7, + expect_sectors: vec![0], + }), + ], + }, + // Avoid increasing max partitions. Both deadlines have the same + // number of partitions post-compaction, but deadline 1 has + // fewer pre-compaction. + TestCase { + sectors: 1, + deadlines: vec![ + // one live, one dead. + Some(Spec { + dead_sectors: 4, + live_sectors: 4, + expect_sectors: vec![], + }), + // 1 live partitions. +1 would add another. + Some(Spec { + dead_sectors: 0, + live_sectors: 4, + expect_sectors: vec![0], + }), + ], + }, + // With multiple open partitions, assign to most full first. + TestCase { + sectors: 1, + deadlines: vec![ + Some(Spec { + dead_sectors: 0, + live_sectors: 1, + expect_sectors: vec![], + }), + Some(Spec { + dead_sectors: 0, + live_sectors: 2, + expect_sectors: vec![0], + }), + ], + }, + // dead sectors also count + TestCase { + sectors: 1, + deadlines: vec![ + Some(Spec { + dead_sectors: 0, + live_sectors: 1, + expect_sectors: vec![], + }), + Some(Spec { + dead_sectors: 2, + live_sectors: 0, + expect_sectors: vec![0], + }), + ], + }, + // dead sectors really do count. + TestCase { + sectors: 1, + deadlines: vec![ + Some(Spec { + dead_sectors: 1, + live_sectors: 0, + expect_sectors: vec![], + }), + Some(Spec { + dead_sectors: 2, + live_sectors: 0, + expect_sectors: vec![0], + }), + ], + }, + // when partitions are equally full, assign based on live sectors. + TestCase { + sectors: 1, + deadlines: vec![ + Some(Spec { + dead_sectors: 1, + live_sectors: 1, + expect_sectors: vec![], + }), + Some(Spec { + dead_sectors: 2, + live_sectors: 0, + expect_sectors: vec![0], + }), + ], + }, + ]; + + for (nth_tc, tc) in test_cases.iter().enumerate() { + let deadlines: Vec>> = tc + .deadlines + .iter() + .cloned() + .map(|maybe_dl| { + maybe_dl.map(|dl| Deadline { + live_sectors: dl.live_sectors, + total_sectors: dl.live_sectors + dl.dead_sectors, + ..Default::default() + }) + }) + .collect(); + + let sectors: Vec> = (0..tc.sectors) + .map(|i| SectorOnChainInfo { + sector_number: i, + ..Default::default() + }) + .collect(); + + let assignment = + assign_deadlines(MAX_PARTITIONS, PARTITION_SIZE, &deadlines, §ors, 48).unwrap(); + for (i, sectors) in assignment.enumerate() { + if let Some(Some(dl)) = tc.deadlines.get(i) { + assert_eq!( + dl.expect_sectors.len(), + sectors.len(), + "for deadline {}, case {}", + i, + nth_tc + ); + for (i, &expected_sector_no) in dl.expect_sectors.iter().enumerate() { + assert_eq!(sectors[i].sector_number, expected_sector_no); + } + } else { + assert!( + sectors.is_empty(), + "expected no sectors to have been assigned to blacked out deadline" + ); + } + } + } + } +} diff --git a/pallets/storage-provider/src/lib.rs b/pallets/storage-provider/src/lib.rs index 727c9bac7..1d5e81da2 100644 --- a/pallets/storage-provider/src/lib.rs +++ b/pallets/storage-provider/src/lib.rs @@ -18,6 +18,8 @@ mod benchmarks; #[cfg(test)] mod tests; +mod deadline; +mod partition; mod proofs; mod sector; mod storage_provider; @@ -29,6 +31,9 @@ pub mod pallet { pub const BLAKE2B_MULTIHASH_CODE: u64 = 0xB220; pub const LOG_TARGET: &'static str = "runtime::storage_provider"; + extern crate alloc; + + use alloc::vec; use core::fmt::Debug; use cid::{Cid, Version}; @@ -44,12 +49,14 @@ pub mod pallet { use scale_info::TypeInfo; use crate::{ + deadline::DeadlineInfo, proofs::{ assign_proving_period_offset, current_deadline_index, current_proving_period_start, + SubmitWindowedPoStParams, }, sector::{ ProveCommitSector, SectorOnChainInfo, SectorPreCommitInfo, SectorPreCommitOnChainInfo, - SECTORS_MAX, + MAX_SECTORS, }, storage_provider::{StorageProviderInfo, StorageProviderState}, }; @@ -67,33 +74,111 @@ pub mod pallet { pub trait Config: frame_system::Config { /// Because this pallet emits events, it depends on the runtime's definition of an event. type RuntimeEvent: From> + IsType<::RuntimeEvent>; + /// Peer ID is derived by hashing an encoded public key. /// Usually represented in bytes. /// https://github.com/libp2p/specs/blob/2ea41e8c769f1bead8e637a9d4ebf8c791976e8a/peer-ids/peer-ids.md#peer-ids /// More information about libp2p peer ids: https://docs.libp2p.io/concepts/fundamentals/peers/ type PeerId: Clone + Debug + Decode + Encode + Eq + TypeInfo; + /// Currency mechanism, used for collateral type Currency: ReservableCurrency; + /// Market trait implementation for activating deals type Market: Market>; - /// Proving period for submitting Window PoSt, 24 hours is blocks + + /// Window PoSt proving period — equivalent to 24 hours worth of blocks. + /// + /// During the proving period, storage providers submit Spacetime proofs over smaller + /// intervals that make it unreasonable to cheat the system, if they fail to provide a proof + /// in time, they will get slashed. + /// + /// In Filecoin, this concept starts with wall time — i.e. 24 hours — and is quantized into + /// discrete blocks. In our case, we need to consistently put out blocks, every 12 seconds + /// or 5 blocks per minute, as such, we instead work by block numbers only. + /// + /// For example, consider that the first proving period was started at block `0`, to figure + /// out the proving period for an arbitrary block we must perform integer division between + /// the block number and the amount of blocks expected to be produced in 24 hours: + /// + /// ```text + /// proving_period = current_block // DAYS + /// ``` + /// + /// If we produce 5 blocks per minute, in an hour, we produce `60 * 5 = 300`, following that + /// we produce `24 * 300 = 7200` blocks per day. + /// + /// Hence, if we're in the block number `6873` we get `6873 // 7200 = 0` meaning we are in + /// the proving period `0`; moving that forward, consider the block `745711`, we'll get + /// `745711 // 7200 = 103`, thus, we're in the proving period `103`. + /// + /// References: + /// * + /// * #[pallet::constant] type WPoStProvingPeriod: Get>; + + /// Window PoSt challenge window — equivalent to 30 minutes worth of blocks. + /// + /// To better understand the following explanation, read [`WPoStProvingPeriod`] first. + /// + /// During the Window PoSt proving period, challenges are issued to storage providers to + /// prove they are still (correctly) storing the data they accepted, in the case of failure + /// the storage provider will get slashed and have the sector marked as faulty. + /// + /// Given that our system works around block numbers, we have time quantization by default, + /// however it still is necessary to figure out where we stand in the current challenge + /// window. + /// + /// Since we know that, in Filecoin, each 24 hour period is subdivided into 30 minute + /// epochs, we also subdivide our 24 hour period by 48, just in blocks. + /// + /// Consider the block number `745711` (like in the [`WPoStProvingPeriod`]) and that every + /// 30 minutes, we produce `150` blocks (`300 blocks / hour // 2`). To calculate the current + /// challenge window we perform the following steps: + /// + /// 1. calculate the current proving period — `745711 // 7200 = 103` + /// 2. calculate the start of said proving period — `103 * 7200 = 741600` + /// 3. calculate how many blocks elapsed since the beginning of said proving period — + /// `745711 - 741600 = 4111` + /// 4. calculate the number of elapsed challenge windows — `4111 // 150 = 27` + /// + /// In some cases, it will be helpful to calculate the next deadline as well, picking up + /// where we left, we perform the following steps: + /// + /// 5. calculate the block in which the current challenge window started — + /// for the "sub-block" `27 * 150 = 4050` & for the block `103 * 7200 + 4050 = 745650` + /// 6. calculate the next deadline — `745650 + 150 = 745800` + /// + /// References: + /// * /// Window PoSt challenge window (default 30 minutes in blocks) #[pallet::constant] type WPoStChallengeWindow: Get>; + /// Minimum number of blocks past the current block a sector may be set to expire. #[pallet::constant] type MinSectorExpiration: Get>; + /// Maximum number of blocks past the current block a sector may be set to expire. #[pallet::constant] type MaxSectorExpirationExtension: Get>; + /// Maximum number of blocks a sector can stay in pre-committed state #[pallet::constant] type SectorMaximumLifetime: Get>; + /// Maximum duration to allow for the sealing process for seal algorithms. #[pallet::constant] type MaxProveCommitDuration: Get>; + + /// Represents how many challenge deadline there are in 1 proving period. + /// Closely tied to `WPoStChallengeWindow` + #[pallet::constant] + type WPoStPeriodDeadlines: Get; + + #[pallet::constant] + type MaxPartitionsPerDeadline: Get; } /// Need some storage type that keeps track of sectors, deadlines and terminations. @@ -124,6 +209,8 @@ pub mod pallet { owner: T::AccountId, sector_number: SectorNumber, }, + /// Emitted when an SP submits a valid PoSt + ValidPoStSubmitted { owner: T::AccountId }, } #[pallet::error] @@ -142,8 +229,6 @@ pub mod pallet { InvalidProofType, /// Emitted when there is not enough funds to run an extrinsic. NotEnoughFunds, - /// Emitted when a storage provider tries to commit more sectors than MAX_SECTORS. - MaxPreCommittedSectorExceeded, /// Emitted when a sector fails to activate. SectorActivateFailed, /// Emitted when removing a pre_committed sector after proving fails. @@ -165,6 +250,16 @@ pub mod pallet { /// Emitted when a prove commit is sent after the deadline. /// These pre-commits will be cleaned up in the hook. ProveCommitAfterDeadline, + /// Emitted when a PoSt supplied by by the SP is invalid + PoStProofInvalid, + /// Emitted when an error occurs when submitting PoSt. + InvalidDeadlineSubmission, + /// Wrapper around the [`DeadlineError`] type. + DeadlineError(crate::deadline::DeadlineError), + /// Wrapper around the [`PartitionError`] type. + PartitionError(crate::partition::PartitionError), + /// Wrapper around the [`StorageProviderError`] type. + StorageProviderError(crate::storage_provider::StorageProviderError), /// Emitted when Market::verify_deals_for_activation fails for an unexpected reason. /// Verification happens in pre_commit, to make sure a sector is precommited with valid deals. CouldNotVerifySectorForPreCommit, @@ -198,7 +293,12 @@ pub mod pallet { let deadline_idx = current_deadline_index(current_block, period_start, T::WPoStChallengeWindow::get()); let info = StorageProviderInfo::new(peer_id, window_post_proof_type); - let state = StorageProviderState::new(&info, period_start, deadline_idx); + let state = StorageProviderState::new( + &info, + period_start, + deadline_idx, + T::WPoStPeriodDeadlines::get(), + ); StorageProviders::::insert(&owner, state); // Emit event Self::deposit_event(Event::StorageProviderRegistered { owner, info }); @@ -223,7 +323,7 @@ pub mod pallet { let current_block = >::block_number(); ensure!( - sector_number <= SECTORS_MAX.into(), + sector_number <= MAX_SECTORS.into(), Error::::InvalidSector ); ensure!( @@ -286,7 +386,7 @@ pub mod pallet { .ok_or(Error::::StorageProviderNotFound)?; sp.add_pre_commit_deposit(deposit)?; sp.put_pre_committed_sector(sector_on_chain) - .map_err(|_| Error::::MaxPreCommittedSectorExceeded)?; + .map_err(|e| Error::::StorageProviderError(e))?; Ok(()) })?; Self::deposit_event(Event::SectorPreCommitted { owner, sector }); @@ -304,10 +404,13 @@ pub mod pallet { let sp = StorageProviders::::try_get(&owner) .map_err(|_| Error::::StorageProviderNotFound)?; let sector_number = sector.sector_number; - + ensure!( + sector_number <= MAX_SECTORS.into(), + Error::::InvalidSector + ); let precommit = sp .get_pre_committed_sector(sector_number) - .map_err(|_| Error::::InvalidSector)?; + .map_err(|e| Error::::StorageProviderError(e))?; let current_block = >::block_number(); let prove_commit_due = precommit.pre_commit_block_number + T::MaxProveCommitDuration::get(); @@ -328,10 +431,24 @@ pub mod pallet { let sp = maybe_sp .as_mut() .ok_or(Error::::StorageProviderNotFound)?; - sp.activate_sector(sector_number, new_sector) - .map_err(|_| Error::::SectorActivateFailed)?; + sp.activate_sector(sector_number, new_sector.clone()) + .map_err(|e| Error::::StorageProviderError(e))?; + let mut new_sectors = BoundedVec::new(); + new_sectors + .try_push(new_sector) + .expect("Infallible since only 1 element is inserted"); + sp.assign_sectors_to_deadlines( + current_block, + new_sectors, + sp.info.window_post_partition_sectors, + T::MaxPartitionsPerDeadline::get(), + T::WPoStChallengeWindow::get(), + T::WPoStPeriodDeadlines::get(), + T::WPoStProvingPeriod::get(), + ) + .map_err(|e| Error::::StorageProviderError(e))?; sp.remove_pre_committed_sector(sector_number) - .map_err(|_| Error::::CouldNotRemoveSector)?; + .map_err(|e| Error::::StorageProviderError(e))?; Ok(()) })?; @@ -350,6 +467,46 @@ pub mod pallet { Ok(()) } + + /// The SP uses this extrinsic to submit their Proof-of-Spacetime. + /// + /// * Proofs are checked with `validate_windowed_post`. + /// * Currently the proof is considered valid when `proof.len() > 0`. + pub fn submit_windowed_post( + origin: OriginFor, + windowed_post: SubmitWindowedPoStParams>, + ) -> DispatchResult { + let owner = ensure_signed(origin)?; + let current_block = >::block_number(); + let mut sp = StorageProviders::::try_get(&owner) + .map_err(|_| Error::::StorageProviderNotFound)?; + if let Err(e) = Self::validate_windowed_post( + current_block, + &windowed_post, + sp.info.window_post_proof_type, + ) { + log::error!(target: LOG_TARGET, "submit_window_post: PoSt submission is invalid {e:?}"); + return Err(e.into()); + } + let current_deadline = sp + .deadline_info( + current_block, + T::WPoStChallengeWindow::get(), + T::WPoStPeriodDeadlines::get(), + T::WPoStProvingPeriod::get(), + ) + .map_err(|e| Error::::DeadlineError(e))?; + Self::validate_deadline(current_block, ¤t_deadline, &windowed_post)?; + let deadlines = sp.get_deadlines_mut(); + log::debug!(target: LOG_TARGET, "submit_windowed_post: deadlines = {deadlines:#?}"); + // record sector as proven + deadlines + .record_proven(windowed_post.deadline as usize, windowed_post.partition) + .map_err(|e| Error::::DeadlineError(e))?; + log::debug!(target: LOG_TARGET, "submit_windowed_post: proof recorded"); + Self::deposit_event(Event::ValidPoStSubmitted { owner }); + Ok(()) + } } impl Pallet { @@ -380,6 +537,58 @@ pub mod pallet { ); Ok(()) } + + /// Validates the SPs submitted PoSt by checking if: + /// - it has the correct proof type + /// - the proof length is > 0 + /// - the chain commit block < current block + fn validate_windowed_post( + current_block: BlockNumberFor, + windowed_post: &SubmitWindowedPoStParams>, + expected_proof: RegisteredPoStProof, + ) -> Result<(), Error> { + ensure!( + windowed_post.proof.post_proof == expected_proof, + Error::::InvalidProofType + ); + // TODO(@aidan46, #91, 2024-07-03): Validate the proof after research is done + ensure!( + windowed_post.proof.proof_bytes.len() > 0, + Error::::PoStProofInvalid + ); + // chain commit block must be less than the current block + ensure!( + windowed_post.chain_commit_block < current_block, + Error::::PoStProofInvalid + ); + Ok(()) + } + + /// Check whether the given deadline is valid for PoSt submission. + /// + /// Fails if: + /// - The given deadline is not open. + /// - There is and deadline index mismatch. + /// - The block the deadline was committed at is after the current block. + fn validate_deadline( + curr_block: BlockNumberFor, + current_deadline: &DeadlineInfo>, + post_params: &SubmitWindowedPoStParams>, + ) -> Result<(), Error> { + ensure!(current_deadline.is_open(), { + log::error!(target: LOG_TARGET, "validate_deadline: {current_deadline:?}, deadline isn't open"); + Error::::InvalidDeadlineSubmission + }); + ensure!(post_params.deadline == current_deadline.idx, { + log::error!(target: LOG_TARGET, "validate_deadline: given index does not match current index {} != {}", post_params.deadline, current_deadline.idx); + Error::::InvalidDeadlineSubmission + }); + ensure!(post_params.chain_commit_block < curr_block, { + log::error!(target: LOG_TARGET, "validate_deadline: chain commit block is after current block {:?} > {curr_block:?}", post_params.chain_commit_block); + Error::::InvalidDeadlineSubmission + }); + Ok(()) + } } // Adapted from filecoin reference here: https://github.com/filecoin-project/builtin-actors/blob/54236ae89880bf4aa89b0dba6d9060c3fd2aacee/actors/miner/src/commd.rs#L51-L56 diff --git a/pallets/storage-provider/src/partition.rs b/pallets/storage-provider/src/partition.rs new file mode 100644 index 000000000..ec6ce6468 --- /dev/null +++ b/pallets/storage-provider/src/partition.rs @@ -0,0 +1,153 @@ +use core::cmp::Ord; + +use codec::{Decode, Encode}; +use frame_support::{pallet_prelude::*, sp_runtime::BoundedBTreeSet, PalletError}; +use primitives_proofs::SectorNumber; +use scale_info::TypeInfo; + +use crate::{pallet::LOG_TARGET, sector::MAX_SECTORS}; + +/// Max amount of partitions per deadline. +/// ref: +pub const MAX_PARTITIONS_PER_DEADLINE: u32 = 3000; +pub type PartitionNumber = u32; + +#[derive(Clone, Debug, Decode, Encode, PartialEq, TypeInfo)] +pub struct Partition { + /// All sector numbers in this partition, including faulty, unproven and terminated sectors. + pub sectors: BoundedBTreeSet>, + + /// Unproven sectors in this partition. This will be cleared on + /// a successful window post (or at the end of the partition's next + /// deadline). At that time, any still unproven sectors will be added to + /// the faulty sectors. + pub unproven: BoundedBTreeSet>, + + /// Subset of sectors detected/declared faulty and not yet recovered (excl. from PoSt). + /// The intersection of `faults` and `terminated` is always empty. + /// + /// Used in the `declare_faults` extrinsic + /// TODO: Add helper method for adding faults. + pub faults: BoundedBTreeSet>, + + /// Subset of faulty sectors expected to recover on next PoSt + /// The intersection of `recoveries` and `terminated` is always empty. + /// + /// Used in the `declare_faults_recovered` extrinsic + /// TODO: Add helper method for adding recoveries. + pub recoveries: BoundedBTreeSet>, + + /// Subset of sectors terminated but not yet removed from partition (excl. from PoSt) + /// TODO: Add helper method for adding terminated sectors. + pub terminated: BoundedBTreeSet>, + + /// Sectors that were terminated before their committed expiration, indexed by termination block. + pub early_terminations: BoundedBTreeMap< + BlockNumber, + BoundedBTreeSet>, + ConstU32, + >, +} + +impl Partition +where + BlockNumber: Ord, +{ + pub fn new() -> Self { + Self { + sectors: BoundedBTreeSet::new(), + unproven: BoundedBTreeSet::new(), + faults: BoundedBTreeSet::new(), + recoveries: BoundedBTreeSet::new(), + terminated: BoundedBTreeSet::new(), + early_terminations: BoundedBTreeMap::new(), + } + } + + /// Live sectors are sectors that are not terminated (i.e. not in `terminated` or `early_terminations`). + pub fn live_sectors( + &self, + ) -> Result>, PartitionError> { + let mut live_sectors = BoundedBTreeSet::new(); + let difference = self.sectors.difference(&self.terminated).cloned(); + for sector_number in difference { + live_sectors + .try_insert(sector_number) + .map_err(|_| PartitionError::FailedToGetLiveSectors)?; + } + Ok(live_sectors) + } + + /// Adds sectors to this partition. + /// The sectors are "live", neither faulty, recovering, nor terminated. + /// + /// condition: the sector numbers cannot be in any of the `BoundedBTreeSet`'s + /// fails if any of the given sector numbers are a duplicate + pub fn add_sectors(&mut self, sectors: &[SectorNumber]) -> Result<(), PartitionError> { + let new_sectors = sectors.iter().cloned(); + for sector_number in new_sectors { + // Ensure that the sector number has not been used before. + // All sector number (including faulty, terminated and unproven) are contained in `sectors` so we only need to check in there. + ensure!(!self.sectors.contains(§or_number), { + log::error!(target: LOG_TARGET, "check_sector_number_duplicate: sector_number {sector_number:?} duplicate in sectors"); + PartitionError::DuplicateSectorNumber + }); + self.sectors + .try_insert(sector_number) + .map_err(|_| PartitionError::FailedToAddSector)?; + } + Ok(()) + } +} + +#[derive(Decode, Encode, PalletError, TypeInfo, RuntimeDebug)] +pub enum PartitionError { + /// Emitted when trying to get the live sectors for a partition fails. + FailedToGetLiveSectors, + /// Emitted when adding sectors fails + FailedToAddSector, + /// Emitted when trying to add a sector number that has already been used in this partition. + DuplicateSectorNumber, +} + +#[cfg(test)] +mod test { + use frame_support::sp_runtime::bounded_vec; + + use super::*; + + #[test] + fn add_sectors() -> Result<(), PartitionError> { + // Set up partition, using `u64` for block number because it is not relevant to this test. + let mut partition: Partition = Partition::new(); + // Add some sectors + let sectors_to_add: BoundedVec> = bounded_vec![1, 2]; + partition.add_sectors(§ors_to_add)?; + for sector_number in sectors_to_add { + assert!(partition.sectors.contains(§or_number)); + } + Ok(()) + } + + #[test] + fn live_sectors() -> Result<(), PartitionError> { + // Set up partition, using `u64` for block number because it is not relevant to this test. + let mut partition: Partition = Partition::new(); + // Add some sectors + partition.add_sectors(&[1, 2])?; + // Terminate a sector that is in the active sectors. + partition + .terminated + .try_insert(1) + .expect("Programmer error"); + let live_sectors = partition.live_sectors()?; + // Create expected result. + let mut expected_live_sectors: BoundedBTreeSet> = + BoundedBTreeSet::new(); + expected_live_sectors + .try_insert(2) + .expect("Programmer error"); + assert_eq!(live_sectors, expected_live_sectors); + Ok(()) + } +} diff --git a/pallets/storage-provider/src/proofs.rs b/pallets/storage-provider/src/proofs.rs index a0d90ac06..5c9da43e9 100644 --- a/pallets/storage-provider/src/proofs.rs +++ b/pallets/storage-provider/src/proofs.rs @@ -2,16 +2,35 @@ use codec::{Decode, Encode}; use frame_support::{pallet_prelude::ConstU32, sp_runtime::BoundedVec}; use primitives_proofs::RegisteredPoStProof; use scale_info::TypeInfo; -use sp_arithmetic::traits::BaseArithmetic; use sp_core::blake2_64; +use crate::partition::PartitionNumber; + /// Proof of Spacetime data stored on chain. #[derive(Debug, Decode, Encode, TypeInfo, PartialEq, Eq, Clone)] pub struct PoStProof { + /// The proof type, currently only one type is supported. pub post_proof: RegisteredPoStProof, + /// The proof submission, to be checked in the storage provider pallet. pub proof_bytes: BoundedVec>, // Arbitrary length } +/// Parameter type for `submit_windowed_post` extrinsic. +// In filecoind the proof is an array of proofs, one per distinct registered proof type present in the sectors being proven. +// Reference: +// We differ here from Filecoin and do not support registration of different proof types. +#[derive(Debug, Decode, Encode, TypeInfo, PartialEq, Eq, Clone)] +pub struct SubmitWindowedPoStParams { + /// The deadline index which the submission targets. + pub deadline: u64, + /// The partition being proven. + pub partition: PartitionNumber, + /// The proof submission. + pub proof: PoStProof, + /// The block at which these proofs is being committed. + pub chain_commit_block: BlockNumber, +} + /// Error type for proof operations. #[derive(Debug)] pub enum ProofError { @@ -29,7 +48,7 @@ pub(crate) fn assign_proving_period_offset( ) -> Result where AccountId: Encode, - BlockNumber: BaseArithmetic + Encode + TryFrom, + BlockNumber: sp_runtime::traits::BlockNumber, { // Encode address and current block number let mut addr = addr.encode(); @@ -58,11 +77,11 @@ pub(crate) fn current_proving_period_start( proving_period: BlockNumber, // should be the max proving period ) -> BlockNumber where - BlockNumber: BaseArithmetic, + BlockNumber: sp_runtime::traits::BlockNumber, { // Use this value to calculate the proving period start, modulo the proving period so we cannot go over the max proving period // the value represents how far into a proving period we are. - let how_far_into_proving_period = current_block.clone() % proving_period.clone(); + let how_far_into_proving_period = current_block % proving_period; let period_progress = if how_far_into_proving_period >= offset { how_far_into_proving_period - offset } else { @@ -82,7 +101,7 @@ pub(crate) fn current_deadline_index( challenge_window: BlockNumber, ) -> BlockNumber where - BlockNumber: BaseArithmetic, + BlockNumber: sp_runtime::traits::BlockNumber, { match current_block.checked_sub(&period_start) { Some(block) => block / challenge_window, diff --git a/pallets/storage-provider/src/sector.rs b/pallets/storage-provider/src/sector.rs index 8300d6062..393534c03 100644 --- a/pallets/storage-provider/src/sector.rs +++ b/pallets/storage-provider/src/sector.rs @@ -6,7 +6,7 @@ use primitives_proofs::{ use scale_info::TypeInfo; // https://github.com/filecoin-project/builtin-actors/blob/17ede2b256bc819dc309edf38e031e246a516486/runtime/src/runtime/policy.rs#L262 -pub const SECTORS_MAX: u32 = 32 << 20; +pub const MAX_SECTORS: u32 = 32 << 20; /// This type is passed into the pre commit function on the storage provider pallet #[derive(Clone, Debug, Decode, Encode, PartialEq, TypeInfo)] @@ -51,21 +51,26 @@ impl SectorPreCommitOnChainInfo { } } -impl From<&SectorPreCommitOnChainInfo> +impl From<&SectorPreCommitOnChainInfo> for SectorDeal +where + BlockNumber: sp_runtime::traits::BlockNumber, { fn from(precommit: &SectorPreCommitOnChainInfo) -> Self { Self { sector_number: precommit.info.sector_number, - sector_expiry: precommit.info.expiration.clone(), + sector_expiry: precommit.info.expiration, sector_type: precommit.info.seal_proof.clone(), deal_ids: precommit.info.deal_ids.clone(), } } } -#[derive(Debug, Decode, Encode, TypeInfo)] -pub struct SectorOnChainInfo { +#[derive(Clone, Debug, Decode, Encode, TypeInfo)] +pub struct SectorOnChainInfo +where + BlockNumber: sp_runtime::traits::BlockNumber, +{ pub sector_number: SectorNumber, /// The seal proof type implies the PoSt proofs pub seal_proof: RegisteredSealProof, @@ -82,7 +87,10 @@ pub struct SectorOnChainInfo { pub unsealed_cid: SectorId, } -impl SectorOnChainInfo { +impl SectorOnChainInfo +where + BlockNumber: sp_runtime::traits::BlockNumber, +{ pub fn from_pre_commit( pre_commit: SectorPreCommitInfo, activation: BlockNumber, diff --git a/pallets/storage-provider/src/storage_provider.rs b/pallets/storage-provider/src/storage_provider.rs index 8fc94c0c0..fdddb50b0 100644 --- a/pallets/storage-provider/src/storage_provider.rs +++ b/pallets/storage-provider/src/storage_provider.rs @@ -1,30 +1,48 @@ +extern crate alloc; + +use alloc::vec::Vec; + use codec::{Decode, Encode}; use frame_support::{ pallet_prelude::{ConstU32, RuntimeDebug}, - sp_runtime::BoundedBTreeMap, + sp_runtime::{BoundedBTreeMap, BoundedVec}, + PalletError, }; use primitives_proofs::{RegisteredPoStProof, SectorNumber, SectorSize}; use scale_info::TypeInfo; use sp_arithmetic::{traits::BaseArithmetic, ArithmeticError}; -use crate::sector::{SectorOnChainInfo, SectorPreCommitOnChainInfo, SECTORS_MAX}; +use crate::{ + deadline::{ + assign_deadlines, deadline_is_mutable, Deadline, DeadlineError, DeadlineInfo, Deadlines, + }, + pallet::LOG_TARGET, + sector::{SectorOnChainInfo, SectorPreCommitOnChainInfo, MAX_SECTORS}, +}; /// This struct holds the state of a single storage provider. #[derive(Debug, Decode, Encode, TypeInfo)] -pub struct StorageProviderState { +pub struct StorageProviderState +where + BlockNumber: sp_runtime::traits::BlockNumber, +{ /// Contains static information about this storage provider pub info: StorageProviderInfo, + /// Information for all proven and not-yet-garbage-collected sectors. pub sectors: - BoundedBTreeMap, ConstU32>, // Cannot use ConstU64 here because of BoundedBTreeMap trait bound `Get` + BoundedBTreeMap, ConstU32>, // Cannot use ConstU64 here because of BoundedBTreeMap trait bound `Get`, + /// Total funds locked as pre_commit_deposit pub pre_commit_deposits: Balance, + /// Sectors that have been pre-committed but not yet proven. pub pre_committed_sectors: BoundedBTreeMap< SectorNumber, SectorPreCommitOnChainInfo, - ConstU32, // Cannot use ConstU64 here because of BoundedBTreeMap trait bound `Get` + ConstU32, // Cannot use ConstU64 here because of BoundedBTreeMap trait bound `Get` >, + /// The first block in this storage provider's current proving period. This is the first block in which a PoSt for a /// partition at the storage provider's first deadline may arrive. Alternatively, it is after the last block at which /// a PoSt for the previous window is valid. @@ -33,22 +51,39 @@ pub struct StorageProviderState { /// PoSt requirements. /// Updated at the end of every period. pub proving_period_start: BlockNumber, + /// Index of the deadline within the proving period beginning at ProvingPeriodStart that has not yet been /// finalized. /// Updated at the end of each deadline window. pub current_deadline: BlockNumber, + + /// Deadlines indexed by their proving periods — e.g. for proving period 7, find it in + /// `deadlines[7]` — proving periods are present in the interval `[0, 47]`. + /// + /// Bounded to 48 elements since that's the set amount of deadlines per proving period. + /// + /// In the original implementation, the information is kept in a separated structure, possibly + /// to make fetching the state more efficient as this is kept in the storage providers + /// blockstore. However, we're keeping all the state on-chain + /// + /// References: + /// * + /// * + /// * + pub deadlines: Deadlines, } impl StorageProviderState where PeerId: Clone + Decode + Encode + TypeInfo, - BlockNumber: Decode + Encode + TypeInfo, + BlockNumber: sp_runtime::traits::BlockNumber, Balance: BaseArithmetic, { pub fn new( info: &StorageProviderInfo, period_start: BlockNumber, deadline_idx: BlockNumber, + w_post_period_deadlines: u64, ) -> Self { Self { info: info.clone(), @@ -57,6 +92,7 @@ where pre_committed_sectors: BoundedBTreeMap::new(), proving_period_start: period_start, current_deadline: deadline_idx, + deadlines: Deadlines::new(w_post_period_deadlines), } } @@ -117,10 +153,126 @@ where .map_err(|_| StorageProviderError::SectorNumberInUse)?; Ok(()) } + + /// Assign new sector to a deadline. + pub fn assign_sectors_to_deadlines( + &mut self, + current_block: BlockNumber, + mut sectors: BoundedVec, ConstU32>, + partition_size: u64, + max_partitions_per_deadline: u64, + w_post_challenge_window: BlockNumber, + w_post_period_deadlines: u64, + w_post_proving_period: BlockNumber, + ) -> Result<(), StorageProviderError> { + let deadlines = &self.deadlines; + sectors.sort_by_key(|info| info.sector_number); + let mut deadline_vec: Vec>> = + (0..w_post_period_deadlines).map(|_| None).collect(); + log::debug!(target: LOG_TARGET, + "assign_sectors_to_deadlines: deadline len = {}", + deadlines.len() + ); + let proving_period_start = self.current_proving_period_start( + current_block, + w_post_challenge_window, + w_post_period_deadlines, + w_post_proving_period, + )?; + deadlines.clone().due.iter().enumerate().try_for_each( + |(deadline_idx, deadline)| -> Result<(), DeadlineError> { + // Skip deadlines that aren't currently mutable. + if deadline_is_mutable( + proving_period_start, + deadline_idx as u64, + current_block, + w_post_challenge_window, + w_post_period_deadlines, + w_post_proving_period, + )? { + deadline_vec[deadline_idx as usize] = Some(deadline.clone()); + } + Ok(()) + }, + )?; + let deadline_to_sectors = assign_deadlines( + max_partitions_per_deadline, + partition_size, + &deadline_vec, + §ors, + w_post_period_deadlines, + )?; + let deadlines = self.get_deadlines_mut(); + for (deadline_idx, deadline_sectors) in deadline_to_sectors.enumerate() { + if deadline_sectors.is_empty() { + continue; + } + + let deadline = + deadline_vec[deadline_idx] + .as_mut() + .ok_or(StorageProviderError::DeadlineError( + DeadlineError::CouldNotAssignSectorsToDeadlines, + ))?; + + deadline.add_sectors(partition_size, &deadline_sectors)?; + + deadlines + .update_deadline(deadline_idx, deadline.clone()) + .map_err(|e| StorageProviderError::DeadlineError(e))?; + } + Ok(()) + } + + // Returns current proving period start for the current block according to the current block and constant state offset + fn current_proving_period_start( + &self, + current_block: BlockNumber, + w_post_challenge_window: BlockNumber, + w_post_period_deadlines: u64, + w_post_proving_period: BlockNumber, + ) -> Result { + let dl_info = self.deadline_info( + current_block, + w_post_challenge_window, + w_post_period_deadlines, + w_post_proving_period, + )?; + Ok(dl_info.period_start) + } + + /// Simple getter for mutable deadlines. + pub fn get_deadlines_mut(&mut self) -> &mut Deadlines { + &mut self.deadlines + } + + /// Returns deadline calculations for the current (according to state) proving period. + pub fn deadline_info( + &self, + current_block: BlockNumber, + w_post_challenge_window: BlockNumber, + w_post_period_deadlines: u64, + w_post_proving_period: BlockNumber, + ) -> Result, DeadlineError> { + let current_deadline_index = + (current_block / self.proving_period_start) / w_post_challenge_window; + // convert to u64 + let current_deadline_index: u64 = current_deadline_index + .try_into() + .map_err(|_| DeadlineError::CouldNotConstructDeadlineInfo)?; + DeadlineInfo::new( + current_block, + self.proving_period_start, + current_deadline_index, + w_post_period_deadlines, + w_post_challenge_window, + w_post_proving_period, + ) + } } /// Errors that can occur while interacting with the storage provider state. -#[derive(RuntimeDebug)] +#[derive(Decode, Encode, PalletError, TypeInfo, RuntimeDebug)] pub enum StorageProviderError { /// Happens when an SP tries to pre-commit more sectors than SECTOR_MAX. MaxPreCommittedSectorExceeded, @@ -128,6 +280,14 @@ pub enum StorageProviderError { SectorNotFound, /// Happens when a sector number is already in use. SectorNumberInUse, + /// Wrapper around [`DeadlineError`] + DeadlineError(crate::deadline::DeadlineError), +} + +impl From for StorageProviderError { + fn from(dl_err: DeadlineError) -> Self { + Self::DeadlineError(dl_err) + } } /// Static information about the storage provider. diff --git a/pallets/storage-provider/src/tests/mod.rs b/pallets/storage-provider/src/tests/mod.rs index b609be62c..27864b6b9 100644 --- a/pallets/storage-provider/src/tests/mod.rs +++ b/pallets/storage-provider/src/tests/mod.rs @@ -16,12 +16,19 @@ use sp_runtime::{ BuildStorage, MultiSignature, MultiSigner, }; -use crate::{self as pallet_storage_provider, pallet::CID_CODEC, sector::SectorPreCommitInfo}; +use crate::{ + self as pallet_storage_provider, + pallet::CID_CODEC, + partition::PartitionNumber, + proofs::{PoStProof, SubmitWindowedPoStParams}, + sector::SectorPreCommitInfo, +}; mod pre_commit_sector; mod prove_commit_sector; mod state; mod storage_provider_registration; +mod submit_windowed_post; type Block = frame_system::mocking::MockBlock; type BlockNumber = u64; @@ -86,6 +93,8 @@ parameter_types! { pub const MaxSectorExpirationExtension: BlockNumber = 1278 * DAYS; pub const SectorMaximumLifetime: BlockNumber = YEARS * 5; pub const MaxProveCommitDuration: BlockNumber = (30 * DAYS) + 150; + pub const WPoStPeriodDeadlines: u64 = 48; + pub const MaxPartitionsPerDeadline: u64 = 3000; } impl pallet_storage_provider::Config for Test { @@ -99,6 +108,8 @@ impl pallet_storage_provider::Config for Test { type MaxSectorExpirationExtension = MaxSectorExpirationExtension; type SectorMaximumLifetime = SectorMaximumLifetime; type MaxProveCommitDuration = MaxProveCommitDuration; + type WPoStPeriodDeadlines = WPoStPeriodDeadlines; + type MaxPartitionsPerDeadline = MaxPartitionsPerDeadline; } type AccountIdOf = ::AccountId; @@ -379,3 +390,42 @@ impl DealProposalBuilder { signed } } + +struct SubmitWindowedPoStBuilder { + deadline: u64, + partition: PartitionNumber, + proof: PoStProof, + chain_commit_block: BlockNumber, +} + +impl SubmitWindowedPoStBuilder { + pub(crate) fn chain_commit_block(self, chain_commit_block: BlockNumber) -> Self { + Self { + chain_commit_block, + ..self + } + } + + pub(crate) fn build(self) -> SubmitWindowedPoStParams { + SubmitWindowedPoStParams { + deadline: self.deadline, + partition: self.partition, + proof: self.proof, + chain_commit_block: self.chain_commit_block, + } + } +} + +impl Default for SubmitWindowedPoStBuilder { + fn default() -> Self { + Self { + deadline: 0, + partition: 1, + proof: PoStProof { + post_proof: RegisteredPoStProof::StackedDRGWindow2KiBV1P1, + proof_bytes: bounded_vec![0x1, 0x2, 0x3], + }, + chain_commit_block: System::block_number(), + } + } +} diff --git a/pallets/storage-provider/src/tests/pre_commit_sector.rs b/pallets/storage-provider/src/tests/pre_commit_sector.rs index b6e7f4b46..c870cdfa7 100644 --- a/pallets/storage-provider/src/tests/pre_commit_sector.rs +++ b/pallets/storage-provider/src/tests/pre_commit_sector.rs @@ -5,7 +5,7 @@ use sp_runtime::{BoundedVec, DispatchError}; use super::new_test_ext; use crate::{ pallet::{Error, Event, StorageProviders}, - sector::SECTORS_MAX, + sector::MAX_SECTORS, tests::{ account, cid_of, events, publish_deals, register_storage_provider, run_to_block, Balances, MaxProveCommitDuration, MaxSectorExpirationExtension, RuntimeEvent, RuntimeOrigin, @@ -206,7 +206,7 @@ fn fails_invalid_sector() { // Sector to be pre-committed let sector = SectorPreCommitInfoBuilder::default() - .sector_number(SECTORS_MAX as u64 + 1) + .sector_number(MAX_SECTORS as u64 + 1) .build(); // Run pre commit extrinsic diff --git a/pallets/storage-provider/src/tests/prove_commit_sector.rs b/pallets/storage-provider/src/tests/prove_commit_sector.rs index a7c379ef9..ad2f50705 100644 --- a/pallets/storage-provider/src/tests/prove_commit_sector.rs +++ b/pallets/storage-provider/src/tests/prove_commit_sector.rs @@ -6,6 +6,7 @@ use super::{new_test_ext, MaxProveCommitDuration}; use crate::{ pallet::{Error, Event, StorageProviders}, sector::ProveCommitSector, + storage_provider::StorageProviderError, tests::{ account, events, publish_deals, register_storage_provider, run_to_block, Balances, RuntimeEvent, RuntimeOrigin, SectorPreCommitInfoBuilder, StorageProvider, System, Test, @@ -132,7 +133,7 @@ fn fails_storage_precommit_missing() { RuntimeOrigin::signed(account(storage_provider)), sector ), - Error::::InvalidSector, + Error::::StorageProviderError(StorageProviderError::SectorNotFound), ); }); } diff --git a/pallets/storage-provider/src/tests/submit_windowed_post.rs b/pallets/storage-provider/src/tests/submit_windowed_post.rs new file mode 100644 index 000000000..e6e458fb1 --- /dev/null +++ b/pallets/storage-provider/src/tests/submit_windowed_post.rs @@ -0,0 +1,98 @@ +use frame_support::assert_ok; +use sp_core::bounded_vec; + +use crate::{ + pallet::{Event, StorageProviders}, + sector::ProveCommitSector, + tests::{ + account, events, new_test_ext, register_storage_provider, run_to_block, + DealProposalBuilder, Market, RuntimeEvent, RuntimeOrigin, SectorPreCommitInfoBuilder, + StorageProvider, SubmitWindowedPoStBuilder, System, Test, ALICE, BOB, + }, +}; + +#[test] +fn submit_windowed_post() { + new_test_ext().execute_with(|| { + // Setup accounts + let storage_provider = ALICE; + let storage_client = BOB; + + // Register storage provider + register_storage_provider(account(storage_provider)); + + // Add balance to the market pallet + assert_ok!(Market::add_balance( + RuntimeOrigin::signed(account(storage_provider)), + 60 + )); + assert_ok!(Market::add_balance( + RuntimeOrigin::signed(account(storage_client)), + 70 + )); + + // Generate a deal proposal + let deal_proposal = DealProposalBuilder::default() + .client(storage_client) + .provider(storage_provider) + .signed(storage_client); + + // Publish the deal proposal + assert_ok!(Market::publish_storage_deals( + RuntimeOrigin::signed(account(storage_provider)), + bounded_vec![deal_proposal], + )); + + // Sector to be pre-committed and proven + let sector_number = 1; + + // Sector data + let sector = SectorPreCommitInfoBuilder::default() + .sector_number(sector_number) + .deals(vec![0]) + .build(); + + // Run pre commit extrinsic + assert_ok!(StorageProvider::pre_commit_sector( + RuntimeOrigin::signed(account(storage_provider)), + sector.clone() + )); + + // Prove commit sector + let sector = ProveCommitSector { + sector_number, + proof: bounded_vec![0xd, 0xe, 0xa, 0xd], + }; + + assert_ok!(StorageProvider::prove_commit_sector( + RuntimeOrigin::signed(account(storage_provider)), + sector + )); + // Remove any events that were triggered until now. + System::reset_events(); + run_to_block(6700); + // Done with setup build window post proof + let windowed_post = SubmitWindowedPoStBuilder::default() + .chain_commit_block(System::block_number() - 1) + .build(); + // Run extrinsic and assert that the result is `Ok` + assert_ok!(StorageProvider::submit_windowed_post( + RuntimeOrigin::signed(account(ALICE)), + windowed_post, + )); + // Check that expected events were emitted + assert_eq!( + events(), + [RuntimeEvent::StorageProvider( + Event::::ValidPoStSubmitted { + owner: account(ALICE) + } + )] + ); + let state = StorageProviders::::get(account(ALICE)).unwrap(); + let deadlines = state.deadlines; + let new_dl = deadlines.due.first().expect("Programmer error"); + assert_eq!(new_dl.live_sectors, 1); + assert_eq!(new_dl.total_sectors, 1); + }); +} diff --git a/runtime/src/configs/mod.rs b/runtime/src/configs/mod.rs index ca7142d45..e567d1f0d 100644 --- a/runtime/src/configs/mod.rs +++ b/runtime/src/configs/mod.rs @@ -322,6 +322,8 @@ parameter_types! { pub const MaxSectorExpirationExtension: BlockNumber = 1278 * DAYS; pub const SectorMaximumLifetime: BlockNumber = (365 * DAYS) * 5; // 5 years pub const MaxProveCommitDuration: BlockNumber = (30 * DAYS) + 150; + pub const WPoStPeriodDeadlines: u64 = 48; + pub const MaxPartitionsPerDeadline: u64 = 3000; // Market Pallet /// Deal duration values copied from FileCoin. @@ -340,6 +342,8 @@ parameter_types! { pub const MaxSectorExpirationExtension: BlockNumber = 60 * MINUTES; pub const SectorMaximumLifetime: BlockNumber = 120 * MINUTES; pub const MaxProveCommitDuration: BlockNumber = 5 * MINUTES; + pub const WPoStPeriodDeadlines: u64 = 48; + pub const MaxPartitionsPerDeadline: u64 = 3000; // Market Pallet pub const TimeUnitInBlocks: u64 = MINUTES; @@ -358,6 +362,8 @@ impl pallet_storage_provider::Config for Runtime { type MaxSectorExpirationExtension = MaxSectorExpirationExtension; type SectorMaximumLifetime = SectorMaximumLifetime; type MaxProveCommitDuration = MaxProveCommitDuration; + type WPoStPeriodDeadlines = WPoStPeriodDeadlines; + type MaxPartitionsPerDeadline = MaxPartitionsPerDeadline; } parameter_types! {