diff --git a/benches/Cargo.toml b/benches/Cargo.toml index a167b9f6e..fb49e6339 100644 --- a/benches/Cargo.toml +++ b/benches/Cargo.toml @@ -10,8 +10,8 @@ async-channel = "1.4.0" v1 = { path="../protocols/v1", package="sv1_api", version = "^1.0.0" } serde_json = { version = "1.0.64", default-features = false, features = ["alloc"] } iai="0.1" -mining_sv2 = { path = "../protocols/v2/subprotocols/mining", version = "^1.0.0" } -roles_logic_sv2 = { path = "../protocols/v2/roles-logic-sv2", version = "^1.0.0" } +mining_sv2 = { path = "../protocols/v2/subprotocols/mining", version = "^2.0.0" } +roles_logic_sv2 = { path = "../protocols/v2/roles-logic-sv2", version = "^2.0.0" } framing_sv2 = { version = "3.0.0", path = "../protocols/v2/framing-sv2" } serde = { version = "1.0.89", default-features = false, features = ["derive", "alloc"] } num-bigint = "0.4.3" diff --git a/protocols/fuzz-tests/Cargo.toml b/protocols/fuzz-tests/Cargo.toml index 5f0ee7ef6..fabb3d2b2 100644 --- a/protocols/fuzz-tests/Cargo.toml +++ b/protocols/fuzz-tests/Cargo.toml @@ -19,7 +19,7 @@ arbitrary = { version = "1", features = ["derive"] } rand = "0.8.3" binary_codec_sv2 = { version = "1.0.0", path = "../v2/binary-sv2/no-serde-sv2/codec"} codec_sv2 = { version = "1.0.0", path = "../v2/codec-sv2", features = ["noise_sv2"]} -roles_logic_sv2 = { version = "1.0.0", path = "../v2/roles-logic-sv2"} +roles_logic_sv2 = { version = "2.0.0", path = "../v2/roles-logic-sv2"} affinity = "0.1.1" threadpool = "1.8.1" lazy_static = "1.4.0" diff --git a/protocols/v2/noise-sv2/src/signature_message.rs b/protocols/v2/noise-sv2/src/signature_message.rs index c82e4bd08..954e22b51 100644 --- a/protocols/v2/noise-sv2/src/signature_message.rs +++ b/protocols/v2/noise-sv2/src/signature_message.rs @@ -76,7 +76,7 @@ impl SignatureNoiseMessage { .duration_since(SystemTime::UNIX_EPOCH) .unwrap() .as_secs() as u32; - if self.valid_from <= now && self.not_valid_after >= now { + if (self.valid_from - 10) <= now && (self.not_valid_after + 10) >= now { let secp = Secp256k1::verification_only(); let (m, s) = self.split(); // m = SHA-256(version || valid_from || not_valid_after || server_static_key) diff --git a/protocols/v2/roles-logic-sv2/Cargo.toml b/protocols/v2/roles-logic-sv2/Cargo.toml index 941f027e1..3d3b3d876 100644 --- a/protocols/v2/roles-logic-sv2/Cargo.toml +++ b/protocols/v2/roles-logic-sv2/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "roles_logic_sv2" -version = "1.2.2" +version = "2.0.0" authors = ["The Stratum V2 Developers"] edition = "2018" readme = "README.md" @@ -18,7 +18,7 @@ stratum-common = { version="1.0.0", path = "../../../common", features=["bitcoin serde = { version = "1.0.89", features = ["derive", "alloc"], default-features = false, optional = true} binary_sv2 = {version = "^1.0.0", path = "../../../protocols/v2/binary-sv2/binary-sv2", default-features = true } common_messages_sv2 = { path = "../../../protocols/v2/subprotocols/common-messages", version = "^2.0.0" } -mining_sv2 = { path = "../../../protocols/v2/subprotocols/mining", version = "^1.0.0" } +mining_sv2 = { path = "../../../protocols/v2/subprotocols/mining", version = "^2.0.0" } template_distribution_sv2 = { path = "../../../protocols/v2/subprotocols/template-distribution", version = "^1.0.1" } job_declaration_sv2 = { path = "../../../protocols/v2/subprotocols/job-declaration", version = "^1.0.0" } const_sv2 = { version = "^3.0.0", path = "../../../protocols/v2/const-sv2"} @@ -46,4 +46,4 @@ prop_test = ["template_distribution_sv2/prop_test"] disable_nopanic = [] [package.metadata.docs.rs] -all-features = true \ No newline at end of file +all-features = true diff --git a/protocols/v2/roles-logic-sv2/src/channel_logic/channel_factory.rs b/protocols/v2/roles-logic-sv2/src/channel_logic/channel_factory.rs index 943349ebd..bb45a78b7 100644 --- a/protocols/v2/roles-logic-sv2/src/channel_logic/channel_factory.rs +++ b/protocols/v2/roles-logic-sv2/src/channel_logic/channel_factory.rs @@ -10,12 +10,16 @@ use crate::{ use mining_sv2::{ ExtendedExtranonce, NewExtendedMiningJob, NewMiningJob, OpenExtendedMiningChannelSuccess, OpenMiningChannelError, OpenStandardMiningChannelSuccess, SetCustomMiningJob, - SetCustomMiningJobSuccess, SetNewPrevHash, SubmitSharesError, SubmitSharesExtended, - SubmitSharesStandard, Target, + SetCustomMiningJobSuccess, SetExtranoncePrefix, SetNewPrevHash, SubmitSharesError, + SubmitSharesExtended, SubmitSharesStandard, Target, }; use nohash_hasher::BuildNoHashHasher; -use std::{collections::HashMap, convert::TryInto, sync::Arc}; +use std::{ + collections::{HashMap, HashSet}, + convert::TryInto, + sync::Arc, +}; use template_distribution_sv2::{NewTemplate, SetNewPrevHash as SetNewPrevHashFromTp}; use tracing::{debug, error, info, trace, warn}; @@ -210,13 +214,84 @@ struct ChannelFactory { last_prev_hash: Option<(StagedPhash, Vec)>, last_prev_hash_: Option, // (NewExtendedMiningJob,group ids that already received the job) - last_valid_job: Option<(NewExtendedMiningJob<'static>, Vec)>, + last_valid_jobs: [Option<(NewExtendedMiningJob<'static>, Vec)>; 3], + // Index of the last valid job for channel_id ++ job_id + id_to_job: HashMap>, + // Used to understand which is the last added element in last_valid_jobs + added_elements: usize, kind: ExtendedChannelKind, job_ids: Id, channel_to_group_id: HashMap>, future_templates: HashMap, BuildNoHashHasher>, } +impl ChannelFactory { + // TODO channels in groups channel must be handled in a different way + // get the group_id construct group_id ++ channel_id and remove it + // this will be done in a future PR since no one is using them + fn close_channel(&mut self, channel_id: u32) { + self.standard_channels_for_hom_downstreams + .remove(&channel_id); + self.extended_channels.remove(&channel_id); + } +} + +impl ChannelFactory { + fn add_valid_job(&mut self, job: NewExtendedMiningJob<'static>, group_ids: Vec) { + match self.last_valid_jobs { + [None, None, None] => { + self.id_to_job.insert(job.job_id, 0); + self.last_valid_jobs[0] = Some((job, group_ids)); + self.added_elements = 1; + } + [Some(_), None, None] => { + self.id_to_job.insert(job.job_id, 1); + self.last_valid_jobs[1] = Some((job, group_ids)); + self.added_elements = 2; + } + [Some(_), Some(_), None] => { + self.id_to_job.insert(job.job_id, 2); + self.last_valid_jobs[2] = Some((job, group_ids)); + self.added_elements = 3; + } + [Some(_), Some(_), Some(_)] => { + let to_remove = self.added_elements % 3; + self.id_to_job.retain(|_, v| *v != to_remove as u8); + self.id_to_job.insert(job.job_id, to_remove as u8); + self.last_valid_jobs[to_remove] = Some((job, group_ids)); + self.added_elements += 1; + } + _ => panic!("Internal error: invalid last_valid_jobs state"), + } + } + fn get_valid_job(&self, job_id: u32) -> Option<&(NewExtendedMiningJob<'static>, Vec)> { + let index = self.id_to_job.get(&job_id)?; + self.last_valid_jobs[*index as usize].as_ref() + } + fn get_last_valid_job(&self) -> Option<&(NewExtendedMiningJob<'static>, Vec)> { + let index = self.get_last_valid_job_index()?; + self.last_valid_jobs[index as usize].as_ref() + } + fn get_mut_last_valid_job(&mut self) -> Option<&mut (NewExtendedMiningJob<'static>, Vec)> { + let index = self.get_last_valid_job_index()?; + self.last_valid_jobs[index as usize].as_mut() + } + fn get_last_valid_job_index(&self) -> Option { + match self.last_valid_jobs { + [None, None, None] => None, + [Some(_), None, None] => Some(0), + [Some(_), Some(_), None] => Some(1), + [Some(_), Some(_), Some(_)] => Some(2), + _ => panic!("Internal error: invalid last_valid_jobs state"), + } + } + fn clear_valid_jobs(&mut self) { + self.last_valid_jobs = [None, None, None]; + self.id_to_job.clear(); + self.added_elements = 0; + } +} + impl ChannelFactory { pub fn add_standard_channel( &mut self, @@ -224,37 +299,42 @@ impl ChannelFactory { downstream_hash_rate: f32, is_header_only: bool, id: u32, + additional_coinbase_script_data: Option<&[u8]>, ) -> Result, Error> { match is_header_only { - true => { - self.new_standard_channel_for_hom_downstream(request_id, downstream_hash_rate, id) - } + true => self.new_standard_channel_for_hom_downstream( + request_id, + downstream_hash_rate, + id, + additional_coinbase_script_data, + ), false => self.new_standard_channel_for_non_hom_downstream( request_id, downstream_hash_rate, id, + additional_coinbase_script_data, ), } } /// Called when a `OpenExtendedMiningChannel` message is received. /// Here we save the downstream's target (based on hashrate) and the /// channel's extranonce details before returning the relevant SV2 mining messages - /// to be sent downstream. For the mining messages, we will first return an - /// `OpenExtendedMiningChannelSuccess` if the channel is successfully opened. Then we add - /// the `NewExtendedMiningJob` and `SetNewPrevHash` messages if the relevant data is - /// available. If the channel opening fails, we return `OpenExtenedMiningChannelError`. + /// to be sent downstream. For the mining messages, we will first return an `OpenExtendedMiningChannelSuccess` + /// if the channel is successfully opened. Then we add the `NewExtendedMiningJob` and `SetNewPrevHash` messages if + /// the relevant data is available. If the channel opening fails, we return `OpenExtenedMiningChannelError`. pub fn new_extended_channel( &mut self, request_id: u32, hash_rate: f32, min_extranonce_size: u16, - ) -> Result>, Error> { + additional_coinbase_script_data: Option<&[u8]>, + ) -> Result<(Vec>, Option), Error> { let extended_channels_group = 0; let max_extranonce_size = self.extranonces.get_range2_len() as u16; if min_extranonce_size <= max_extranonce_size { - // SECURITY is very unlikely to finish the ids btw this unwrap could be used by an - // attaccher that want to dirsrupt the service maybe we should have a method - // to reuse ids that are no longer connected? + // SECURITY is very unlikely to finish the ids btw this unwrap could be used by an attaccher that + // want to dirsrupt the service maybe we should have a method to reuse ids that are no + // longer connected? let channel_id = self .ids .safe_lock(|ids| ids.new_channel_id(extended_channels_group)) @@ -277,8 +357,23 @@ impl ChannelFactory { .extranonces .next_extended(max_extranonce_size as usize) .unwrap(); + let extranonce_with_stripped_data = extranonce + .into_prefix(self.extranonces.get_prefix_len(), &[]) + .unwrap(); + let success_with_stirpped_extranonce_add_data = OpenExtendedMiningChannelSuccess { + request_id, + channel_id, + target: target.clone(), + extranonce_size: max_extranonce_size, + extranonce_prefix: extranonce_with_stripped_data, + }; + self.extended_channels + .insert(channel_id, success_with_stirpped_extranonce_add_data); let extranonce_prefix = extranonce - .into_prefix(self.extranonces.get_prefix_len()) + .into_prefix( + self.extranonces.get_prefix_len(), + additional_coinbase_script_data.unwrap_or(&[]), + ) .unwrap(); let success = OpenExtendedMiningChannelSuccess { request_id, @@ -287,9 +382,8 @@ impl ChannelFactory { extranonce_size: max_extranonce_size, extranonce_prefix, }; - self.extended_channels.insert(channel_id, success.clone()); let mut result = vec![Mining::OpenExtendedMiningChannelSuccess(success)]; - if let Some((job, _)) = &self.last_valid_job { + if let Some((job, _)) = &self.get_last_valid_job() { let mut job = job.clone(); job.set_future(); let j_id = job.job_id; @@ -306,11 +400,14 @@ impl ChannelFactory { for (job, _) in &self.future_jobs { result.push(Mining::NewExtendedMiningJob(job.clone())) } - Ok(result) + Ok((result, Some(channel_id))) } else { - Ok(vec![Mining::OpenMiningChannelError( - OpenMiningChannelError::unsupported_extranonce_size(request_id), - )]) + Ok(( + vec![Mining::OpenMiningChannelError( + OpenMiningChannelError::unsupported_extranonce_size(request_id), + )], + None, + )) } } /// Called when we want to replicate a channel already opened by another actor. @@ -344,6 +441,7 @@ impl ChannelFactory { request_id: u32, downstream_hash_rate: f32, id: u32, + additional_coinbase_script_data: Option<&[u8]>, ) -> Result, Error> { let hom_group_id = 0; let mut result = vec![]; @@ -374,17 +472,30 @@ impl ChannelFactory { self.standard_channels_for_hom_downstreams .insert(channel_id, standard_channel); + let extranonce: Vec = match additional_coinbase_script_data { + Some(data) => { + let mut data = data.to_vec(); + data.extend_from_slice(extranonce.as_ref()); + data + } + None => extranonce.into(), + }; + // First message to be sent is OpenStandardMiningChannelSuccess result.push(Mining::OpenStandardMiningChannelSuccess( OpenStandardMiningChannelSuccess { request_id: request_id.into(), channel_id, target, - extranonce_prefix: extranonce.into(), + extranonce_prefix: extranonce.try_into().expect("Internal error: On initialization we make sure that extranonce + coinbase script additional data are not > then 32 bytes"), group_channel_id: hom_group_id, }, )); - self.prepare_standard_jobs_and_p_hash(&mut result, channel_id)?; + self.prepare_standard_jobs_and_p_hash( + &mut result, + channel_id, + additional_coinbase_script_data, + )?; self.channel_to_group_id.insert(channel_id, hom_group_id); Ok(result) } @@ -396,6 +507,7 @@ impl ChannelFactory { request_id: u32, downstream_hash_rate: f32, group_id: u32, + additional_coinbase_script_data: Option<&[u8]>, ) -> Result, Error> { let mut result = vec![]; let channel_id = self @@ -429,13 +541,21 @@ impl ChannelFactory { self.standard_channels_for_non_hom_downstreams .insert(complete_id, standard_channel); + let extranonce: Vec = match additional_coinbase_script_data { + Some(data) => { + let mut data = data.to_vec(); + data.extend_from_slice(extranonce.as_ref()); + data + } + None => extranonce.into(), + }; // First message to be sent is OpenStandardMiningChannelSuccess result.push(Mining::OpenStandardMiningChannelSuccess( OpenStandardMiningChannelSuccess { request_id: request_id.into(), channel_id, target, - extranonce_prefix: extranonce.into(), + extranonce_prefix: extranonce.try_into().expect(""), group_channel_id: group_id, }, )); @@ -450,6 +570,7 @@ impl ChannelFactory { &mut self, result: &mut Vec, channel_id: u32, + additional_coinbase_script_data: Option<&[u8]>, ) -> Result<(), Error> { // Safe cause the function is private and we always add the channel before calling this // funtion @@ -457,8 +578,8 @@ impl ChannelFactory { .standard_channels_for_hom_downstreams .get(&channel_id) .unwrap(); - // OPTIMIZATION this could be memoized somewhere cause is very likely that we will receive a - // lot od OpenStandardMiningChannel requests consequtevely + // OPTIMIZATION this could be memoized somewhere cause is very likely that we will receive a lot od + // OpenStandardMiningChannel requests consequtevely let job_id = self.job_ids.next(); let future_jobs: Option>> = self .future_jobs @@ -466,21 +587,24 @@ impl ChannelFactory { .map(|j| { extended_to_standard_job( &j.0, - &standard_channel.extranonce.clone().to_vec()[..], + standard_channel.extranonce.as_ref(), standard_channel.channel_id, Some(job_id), + additional_coinbase_script_data, ) }) .collect(); + let id = self.job_ids.next(); // OPTIMIZATION the extranonce is cloned so many time but maybe is avoidable? - let last_valid_job = match &self.last_valid_job { + let last_valid_job = match self.get_last_valid_job() { Some((j, _)) => Some( extended_to_standard_job( j, - &standard_channel.extranonce.clone().to_vec(), + standard_channel.extranonce.as_ref(), standard_channel.channel_id, - Some(self.job_ids.next()), + Some(id), + additional_coinbase_script_data, ) .ok_or(Error::ImpossibleToCalculateMerkleRoot)?, ), @@ -567,11 +691,9 @@ impl ChannelFactory { // This is the same thing of just check if there is a prev hash add it to result if there // is last_job add it to result and add each future job to result. // But using the pattern match is more clear how each option is handled - match ( - self.last_prev_hash.as_mut(), - self.last_valid_job.as_mut(), - self.future_jobs.is_empty(), - ) { + let last_prev_hash = self.last_prev_hash.clone(); + let is_empty = self.future_jobs.is_empty(); + match (last_prev_hash, self.get_mut_last_valid_job(), is_empty) { // If we do not have anything just do nothing (None, None, true) => (), // If we have only future jobs we need to send them all after the @@ -588,16 +710,17 @@ impl ChannelFactory { } // If we have just a prev hash we need to send it after the SetupConnectionSuccess // message - (Some((prev_h, group_id_p_hash_sent)), None, true) => { + (Some((prev_h, mut group_id_p_hash_sent)), None, true) => { if !group_id_p_hash_sent.contains(&group_id) { let prev_h = prev_h.into_set_p_hash(group_id, None); group_id_p_hash_sent.push(group_id); result.push(Mining::SetNewPrevHash(prev_h.clone())); } + self.last_prev_hash = Some((prev_h, group_id_p_hash_sent)); } // If we have a prev hash and a last valid job we need to send before the prev hash and // the the valid job - (Some((prev_h, group_id_p_hash_sent)), Some((job, group_id_job_sent)), true) => { + (Some((prev_h, mut group_id_p_hash_sent)), Some((job, group_id_job_sent)), true) => { if !group_id_p_hash_sent.contains(&group_id) { let prev_h = prev_h.into_set_p_hash(group_id, Some(job.job_id)); group_id_p_hash_sent.push(group_id); @@ -609,9 +732,10 @@ impl ChannelFactory { group_id_job_sent.push(group_id); result.push(Mining::NewExtendedMiningJob(job)); } + self.last_prev_hash = Some((prev_h, group_id_p_hash_sent)); } // If we have everything we need, send before the prev hash and then all the jobs - (Some((prev_h, group_id_p_hash_sent)), Some((job, group_id_job_sent)), false) => { + (Some((prev_h, mut group_id_p_hash_sent)), Some((job, group_id_job_sent)), false) => { if !group_id_p_hash_sent.contains(&group_id) { let prev_h = prev_h.into_set_p_hash(group_id, Some(job.job_id)); group_id_p_hash_sent.push(group_id); @@ -633,6 +757,7 @@ impl ChannelFactory { result.push(Mining::NewExtendedMiningJob(job)); } } + self.last_prev_hash = Some((prev_h, group_id_p_hash_sent)); } // This can not happen because we can not have a valid job without a prev hash (None, Some(_), true) => unreachable!(), @@ -644,10 +769,10 @@ impl ChannelFactory { } } - /// Called when a new prev hash is received. If the respective job is available in the future - /// job queue, we move the future job into the valid job slot and store the prev hash as the - /// current prev hash to be referenced. + /// Called when a new prev hash is received. If the respective job is available in the future job queue, + /// we move the future job into the valid job slot and store the prev hash as the current prev hash to be referenced. fn on_new_prev_hash(&mut self, m: StagedPhash) -> Result<(), Error> { + self.clear_valid_jobs(); while let Some(mut job) = self.future_jobs.pop() { if job.0.job_id == m.job_id { let now = std::time::SystemTime::now() @@ -655,10 +780,9 @@ impl ChannelFactory { .unwrap() .as_secs() as u32; job.0.set_no_future(now); - self.last_valid_job = Some(job); + self.add_valid_job(job.0, job.1); break; } - self.last_valid_job = None; } self.future_jobs = vec![]; self.last_prev_hash_ = Some(crate::utils::u256_to_block_hash(m.prev_hash.clone())); @@ -672,16 +796,21 @@ impl ChannelFactory { self.last_prev_hash = Some((m, ids)); Ok(()) } - /// Called when a `NewExtendedMiningJob` arrives. If the job is future, we add it to the future - /// queue. If the job is not future, we pair it with a the most recent prev hash + /// Called when a `NewExtendedMiningJob` arrives. If the job is future, we add it to the future queue. + /// If the job is not future, we pair it with a the most recent prev hash fn on_new_extended_mining_job( &mut self, m: NewExtendedMiningJob<'static>, + additional_coinbase_script_data: Option<&[u8]>, ) -> Result, BuildNoHashHasher>, Error> { match (m.is_future(), &self.last_prev_hash) { (true, _) => { let mut result = HashMap::with_hasher(BuildNoHashHasher::default()); - self.prepare_jobs_for_downstream_on_new_extended(&mut result, &m)?; + self.prepare_jobs_for_downstream_on_new_extended( + &mut result, + &m, + additional_coinbase_script_data, + )?; let mut ids = vec![]; for complete_id in self.standard_channels_for_non_hom_downstreams.keys() { let group_id = GroupId::into_group_id(*complete_id); @@ -694,7 +823,11 @@ impl ChannelFactory { } (false, Some(_)) => { let mut result = HashMap::with_hasher(BuildNoHashHasher::default()); - self.prepare_jobs_for_downstream_on_new_extended(&mut result, &m)?; + self.prepare_jobs_for_downstream_on_new_extended( + &mut result, + &m, + additional_coinbase_script_data, + )?; // If job is not future it must always be paired with the last received prev hash let mut ids = vec![]; for complete_id in self.standard_channels_for_non_hom_downstreams.keys() { @@ -703,7 +836,7 @@ impl ChannelFactory { ids.push(group_id) } } - self.last_valid_job = Some((m, ids)); + self.add_valid_job(m, ids); if let Some((_p_hash, _)) = &self.last_prev_hash { Ok(result) } else { @@ -722,14 +855,18 @@ impl ChannelFactory { &mut self, result: &mut HashMap>, m: &NewExtendedMiningJob<'static>, + additional_coinbase_script_data: Option<&[u8]>, ) -> Result<(), Error> { for (id, channel) in &self.standard_channels_for_hom_downstreams { let job_id = self.job_ids.next(); let mut standard_job = extended_to_standard_job( m, - &channel.extranonce.clone().to_vec()[..], + channel.extranonce.as_ref(), *id, Some(job_id), + // We dont' care about what we have in the additional data since downstream do not + // have to handle extranonces in that case, whatever is ok. + additional_coinbase_script_data, ) .unwrap(); standard_job.channel_id = *id; @@ -754,8 +891,7 @@ impl ChannelFactory { // If there is job creator, bitcoin_target is retrieved from there. If not, it is set to 0. // If there is a job creator we pass the correct template id. If not, we pass `None` - // allow comparison chain because clippy wants to make job management assertion into a match - // clause + // allow comparison chain because clippy wants to make job management assertion into a match clause #[allow(clippy::comparison_chain)] #[allow(clippy::too_many_arguments)] fn check_target>( @@ -769,6 +905,7 @@ impl ChannelFactory { coinbase_tx_suffix: &[u8], prev_blockhash: hash_types::BlockHash, bits: u32, + additional_coinbase_script_data: Option<&[u8]>, ) -> Result { debug!("Checking target for share {:?}", m); let upstream_target = match &self.kind { @@ -781,9 +918,7 @@ impl ChannelFactory { } => upstream_target.clone(), }; - let (downstream_target, extranonce) = self - .get_channel_specific_mining_info(&m) - .ok_or(Error::ShareDoNotMatchAnyChannel)?; + let (downstream_target, extranonce) = self.get_channel_specific_mining_info(&m)?; let extranonce_1_len = self.extranonces.get_range0_len(); let extranonce_2 = extranonce[extranonce_1_len..].to_vec(); match &mut m { @@ -806,6 +941,7 @@ impl ChannelFactory { coinbase_tx_suffix, &extranonce[..], &merkle_path[..], + additional_coinbase_script_data.unwrap_or(&[]), ) .ok_or(Error::InvalidCoinbase)? .try_into() @@ -908,10 +1044,16 @@ impl ChannelFactory { } } /// Returns the downstream target and extranonce for the channel - fn get_channel_specific_mining_info(&self, m: &Share) -> Option<(mining_sv2::Target, Vec)> { + fn get_channel_specific_mining_info( + &self, + m: &Share, + ) -> Result<(mining_sv2::Target, Vec), Error> { match m { Share::Extended(share) => { - let channel = self.extended_channels.get(&m.get_channel_id())?; + let channel = self + .extended_channels + .get(&m.get_channel_id()) + .ok_or(Error::ShareDoNotMatchAnyChannel)?; let extranonce_prefix = channel.extranonce_prefix.to_vec(); let dowstream_target = channel.target.clone().into(); let extranonce = [&extranonce_prefix[..], &share.extranonce.to_vec()[..]] @@ -923,8 +1065,9 @@ impl ChannelFactory { self.extranonces.get_len(), extranonce.len() ); + return Err(Error::InvalidCoinbase); } - Some((dowstream_target, extranonce)) + Ok((dowstream_target, extranonce)) } Share::Standard((share, group_id)) => match &self.kind { ExtendedChannelKind::Pool => { @@ -937,9 +1080,16 @@ impl ChannelFactory { .standard_channels_for_hom_downstreams .get(&share.channel_id); }; - Some(( - channel?.target.clone(), - channel?.extranonce.clone().to_vec(), + Ok(( + channel + .ok_or(Error::ShareDoNotMatchAnyChannel)? + .target + .clone(), + channel + .ok_or(Error::ShareDoNotMatchAnyChannel)? + .extranonce + .clone() + .to_vec(), )) } ExtendedChannelKind::Proxy { .. } | ExtendedChannelKind::ProxyJd { .. } => { @@ -952,9 +1102,16 @@ impl ChannelFactory { .standard_channels_for_hom_downstreams .get(&share.channel_id); }; - Some(( - channel?.target.clone(), - channel?.extranonce.clone().to_vec(), + Ok(( + channel + .ok_or(Error::ShareDoNotMatchAnyChannel)? + .target + .clone(), + channel + .ok_or(Error::ShareDoNotMatchAnyChannel)? + .extranonce + .clone() + .to_vec(), )) } }, @@ -975,7 +1132,19 @@ pub struct PoolChannelFactory { inner: ChannelFactory, job_creator: JobsCreators, pool_coinbase_outputs: Vec, - pool_signature: String, + // Per channel additional data that the pool may want to include in the coinbase input script + // as first part of the extranonce. This can be used to put things like the pool signature + // or commitments. It is per channel since the pool may want to include different + // commitment data based on downstream hash rate. + // channel_if -> (additional_coinbase_script_data, old_additional_coinbase_script_data) + #[allow(clippy::type_complexity)] + channel_to_additional_coinbase_script_data: + HashMap, Option>), BuildNoHashHasher>, + // Additional data that the pool may want to include in the coinbase input script as first part + // of the extranonce. This can be used to put things like the pool signature. + additional_coinbase_script_data: Vec, + // channel_id ++ job_id + job_ids_using_old_add_data: HashSet>, // extedned_channel_id -> SetCustomMiningJob negotiated_jobs: HashMap, BuildNoHashHasher>, } @@ -988,8 +1157,12 @@ impl PoolChannelFactory { share_per_min: f32, kind: ExtendedChannelKind, pool_coinbase_outputs: Vec, - pool_signature: String, - ) -> Self { + additional_coinbase_script_data: Vec, + ) -> Result { + if additional_coinbase_script_data.len() + extranonces.get_len() > 32 { + error!("Additional coinbase script data is too big"); + return Err(Error::AdditionalCoinbaseScriptDataTooBig); + } let inner = ChannelFactory { ids, standard_channels_for_non_hom_downstreams: HashMap::with_hasher( @@ -1004,20 +1177,26 @@ impl PoolChannelFactory { future_jobs: Vec::new(), last_prev_hash: None, last_prev_hash_: None, - last_valid_job: None, + last_valid_jobs: [None, None, None], + id_to_job: HashMap::with_hasher(BuildNoHashHasher::default()), + added_elements: 0, kind, job_ids: Id::new(), channel_to_group_id: HashMap::with_hasher(BuildNoHashHasher::default()), future_templates: HashMap::with_hasher(BuildNoHashHasher::default()), }; - Self { + Ok(Self { inner, job_creator, pool_coinbase_outputs, - pool_signature, + channel_to_additional_coinbase_script_data: HashMap::with_hasher( + BuildNoHashHasher::default(), + ), + additional_coinbase_script_data, + job_ids_using_old_add_data: HashSet::with_hasher(BuildNoHashHasher::default()), negotiated_jobs: HashMap::with_hasher(BuildNoHashHasher::default()), - } + }) } /// Calls [`ChannelFactory::add_standard_channel`] pub fn add_standard_channel( @@ -1027,8 +1206,15 @@ impl PoolChannelFactory { is_header_only: bool, id: u32, ) -> Result, Error> { - self.inner - .add_standard_channel(request_id, downstream_hash_rate, is_header_only, id) + self.channel_to_additional_coinbase_script_data + .insert(id, (self.additional_coinbase_script_data.clone(), None)); + self.inner.add_standard_channel( + request_id, + downstream_hash_rate, + is_header_only, + id, + Some(&self.additional_coinbase_script_data), + ) } /// Calls [`ChannelFactory::new_extended_channel`] pub fn new_extended_channel( @@ -1037,8 +1223,25 @@ impl PoolChannelFactory { hash_rate: f32, min_extranonce_size: u16, ) -> Result>, Error> { - self.inner - .new_extended_channel(request_id, hash_rate, min_extranonce_size) + match self.inner.new_extended_channel( + request_id, + hash_rate, + min_extranonce_size, + Some(&self.additional_coinbase_script_data), + ) { + // Channel is opened + Ok((res, Some(channel_id))) => { + self.channel_to_additional_coinbase_script_data.insert( + channel_id, + (self.additional_coinbase_script_data.clone(), None), + ); + Ok(res) + } + // Channel is not opened and we can return an error downtream + Ok((res, None)) => Ok(res), + // Channel is not opened and we can not return an error downtream + Err(e) => Err(e), + } } /// Called when we want to replicate a channel already opened by another actor. /// is used only in the jd client from the template provider module to mock a pool. @@ -1050,6 +1253,13 @@ impl PoolChannelFactory { channel_id: u32, extranonce_size: u16, ) -> Option<()> { + // This initialise a PoolChannelFactory for a JDC that can not have + // additional_coinbase_script_data as it is set only by the pool. + assert!(self.additional_coinbase_script_data.is_empty()); + self.channel_to_additional_coinbase_script_data.insert( + channel_id, + (self.additional_coinbase_script_data.clone(), None), + ); self.inner.replicate_upstream_extended_channel_only_jd( target, extranonce, @@ -1083,23 +1293,30 @@ impl PoolChannelFactory { m, true, self.pool_coinbase_outputs.clone(), - self.pool_signature.clone(), + self.additional_coinbase_script_data.len() as u8, )?; - self.inner.on_new_extended_mining_job(new_job) + self.inner.on_new_extended_mining_job( + new_job, + // Here we can use the data that we used to initialize this channel factory. Since this + // value it will be used only to create standard jobs for HOM downstreams. + Some(&self.additional_coinbase_script_data), + ) } - /// Called when a `SubmitSharesStandard` message is received from the downstream. We check the - /// shares against the channel's respective target and return `OnNewShare` to let us know if - /// and where the shares should be relayed + /// Called when a `SubmitSharesStandard` message is received from the downstream. We check the shares + /// against the channel's respective target and return `OnNewShare` to let us know if and where the shares should + /// be relayed pub fn on_submit_shares_standard( &mut self, m: SubmitSharesStandard, ) -> Result { + let additional_coinbase_script_data = + self.get_additional_coinbase_script_data(m.channel_id, m.job_id); match self.inner.channel_to_group_id.get(&m.channel_id) { Some(g_id) => { let referenced_job = self .inner - .last_valid_job - .clone() + .get_valid_job(m.job_id) + .cloned() .ok_or(Error::ShareDoNotMatchAnyJob)? .0; let merkle_path = referenced_job.merkle_path.to_vec(); @@ -1129,6 +1346,7 @@ impl PoolChannelFactory { referenced_job.coinbase_tx_suffix.as_ref(), prev_blockhash, bits, + Some(&additional_coinbase_script_data), ) } None => { @@ -1145,14 +1363,16 @@ impl PoolChannelFactory { } } - /// Called when a `SubmitSharesExtended` message is received from the downstream. We check the - /// shares against the channel's respective target and return `OnNewShare` to let us know if - /// and where the shares should be relayed + /// Called when a `SubmitSharesExtended` message is received from the downstream. We check the shares + /// against the channel's respective target and return `OnNewShare` to let us know if and where the shares should + /// be relayed pub fn on_submit_shares_extended( &mut self, m: SubmitSharesExtended, ) -> Result { let target = self.job_creator.last_target(); + let additional_coinbase_script_data = + self.get_additional_coinbase_script_data(m.channel_id, m.job_id); // When downstream set a custom mining job we add the job to the negotiated job // hashmap, with the extended channel id as a key. Whenever the pool receive a share must // first check if the channel have a negotiated job if so we can not retreive the template @@ -1160,14 +1380,16 @@ impl PoolChannelFactory { if self.negotiated_jobs.contains_key(&m.channel_id) { let referenced_job = self.negotiated_jobs.get(&m.channel_id).unwrap(); let merkle_path = referenced_job.merkle_path.to_vec(); - let pool_signature = self.pool_signature.clone(); - let extended_job = - job_creator::extended_job_from_custom_job(referenced_job, pool_signature, 32) - .unwrap(); + let extended_job = job_creator::extended_job_from_custom_job( + referenced_job, + additional_coinbase_script_data.len() as u8, + self.inner.extranonces.get_len() as u8, + ) + .unwrap(); let prev_blockhash = crate::utils::u256_to_block_hash(referenced_job.prev_hash.clone()); let bits = referenced_job.nbits; - self.inner.check_target( - Share::Extended(m.into_static()), + match self.inner.check_target( + Share::Extended(m.clone().into_static()), target, None, 0, @@ -1176,12 +1398,48 @@ impl PoolChannelFactory { extended_job.coinbase_tx_suffix.as_ref(), prev_blockhash, bits, - ) + Some(&additional_coinbase_script_data), + ) { + // Since this is a share for a custom job and there is no way to know if the share + // do not met target cause pool sent a new extranonce prefix and the miner is still + // using the old one we check also against the old one since we don't want to fail + // in that case. + Ok(OnNewShare::SendErrorDownstream(m_)) => { + match self.get_old_additional_coinbase_script_data(m.channel_id) { + Some(additional_coinbase_script_data) => { + let target = self.job_creator.last_target(); + let referenced_job = self.negotiated_jobs.get(&m.channel_id).unwrap(); + let merkle_path = referenced_job.merkle_path.to_vec(); + let extended_job = job_creator::extended_job_from_custom_job( + referenced_job, + additional_coinbase_script_data.len() as u8, + self.inner.extranonces.get_len() as u8, + ) + .unwrap(); + self.inner.check_target( + Share::Extended(m.into_static()), + target, + None, + 0, + merkle_path, + extended_job.coinbase_tx_prefix.as_ref(), + extended_job.coinbase_tx_suffix.as_ref(), + prev_blockhash, + bits, + Some(&additional_coinbase_script_data), + ) + } + None => Ok(OnNewShare::SendErrorDownstream(m_)), + } + } + Ok(res) => Ok(res), + Err(err) => Err(err), + } } else { let referenced_job = self .inner - .last_valid_job - .clone() + .get_valid_job(m.job_id) + .cloned() .ok_or(Error::ShareDoNotMatchAnyJob)? .0; let merkle_path = referenced_job.merkle_path.to_vec(); @@ -1210,6 +1468,7 @@ impl PoolChannelFactory { referenced_job.coinbase_tx_suffix.as_ref(), prev_blockhash, bits, + Some(&additional_coinbase_script_data), ) } } @@ -1228,8 +1487,7 @@ impl PoolChannelFactory { .unwrap(); new_id } - /// Returns the full extranonce, extranonce1 (static for channel) + extranonce2 (miner nonce - /// space) + /// Returns the full extranonce, extranonce1 (static for channel) + extranonce2 (miner nonce space) pub fn extranonce_from_downstream_extranonce( &self, ext: mining_sv2::Extranonce, @@ -1286,6 +1544,95 @@ impl PoolChannelFactory { pub fn set_target(&mut self, new_target: &mut Target) { self.inner.kind.set_target(new_target); } + + // TODO ret can not be larger then 32 bytes maybe use the stack for it? + #[inline(always)] + fn get_additional_coinbase_script_data(&self, channel_id: u32, job_id: u32) -> Vec { + debug_assert!({ + let have_old = self.job_ids_using_old_add_data.contains(&job_id); + let not_have_old = self + .channel_to_additional_coinbase_script_data + .get(&channel_id) + .unwrap() + .1 + .is_some(); + if have_old { + !not_have_old + } else { + true + } + }); + match self + .channel_to_additional_coinbase_script_data + .get(&channel_id) + { + Some((add_data, None)) => add_data.clone(), + Some((add_data, Some(old_data))) => { + if self.job_ids_using_old_add_data.contains(&job_id) { + old_data.clone() + } else { + add_data.clone() + } + } + None => panic!("Internal error: channel not initialized can not get additional data"), + } + } + + // TODO ret can not be larger then 32 bytes maybe use the stack for it? + #[inline(always)] + fn get_old_additional_coinbase_script_data(&self, channel_id: u32) -> Option> { + self.channel_to_additional_coinbase_script_data + .get(&channel_id)? + .1 + .clone() + } + + /// This set a new additional coinbase script data for a particular channel. Think to keep in + /// mind before using this function: + /// 1. Standard hom channels are not affected by the change + /// 2. The new additional data MUST have the exact same len as the additonal data used to + /// initialize the channle factory with PoolChannelFactory::new + /// 3. For job provided by the pool, all the non future sent before the new additional data will + /// have the old additional data. All the future jobs and the non future jobs sent after the + /// new additional data will have the new additional data + /// 4. Custom jobs will be checked against the new additional data, if the check fail we check + /// against the old additional data if also this check fail we return SubmitShareError + pub fn change_additional_coinbase_script_data( + &mut self, + new_data: Vec, + channel_id: u32, + ) -> Result { + if self.additional_coinbase_script_data.len() == new_data.len() { + let mut ids_for_old_data = HashSet::with_hasher(BuildNoHashHasher::default()); + for id in self.inner.id_to_job.keys() { + ids_for_old_data.insert(*id); + } + self.job_ids_using_old_add_data = ids_for_old_data; + match self + .channel_to_additional_coinbase_script_data + .get_mut(&channel_id) + { + Some(data) => { + data.1 = Some(data.0.clone()); + data.0 = new_data.clone(); + let res = SetExtranoncePrefix { + channel_id, + extranonce_prefix: new_data.try_into().expect(""), + }; + Ok(Mining::SetExtranoncePrefix(res)) + } + None => Err(Error::NotFoundChannelId), + } + } else { + Err(Error::NewAdditionalCoinbaseDataLenDoNotMatch) + } + } + + pub fn close_channel(&mut self, channel_id: u32) { + self.channel_to_additional_coinbase_script_data + .retain(|k, _| k != &channel_id); + self.inner.close_channel(channel_id); + } } /// Used by proxies that want to open extended channls with upstream. If the proxy has job @@ -1295,7 +1642,6 @@ pub struct ProxyExtendedChannelFactory { inner: ChannelFactory, job_creator: Option, pool_coinbase_outputs: Option>, - pool_signature: String, // Id assigned to the extended channel by upstream extended_channel_id: u32, } @@ -1309,7 +1655,6 @@ impl ProxyExtendedChannelFactory { share_per_min: f32, kind: ExtendedChannelKind, pool_coinbase_outputs: Option>, - pool_signature: String, extended_channel_id: u32, ) -> Self { match &kind { @@ -1339,7 +1684,9 @@ impl ProxyExtendedChannelFactory { future_jobs: Vec::new(), last_prev_hash: None, last_prev_hash_: None, - last_valid_job: None, + last_valid_jobs: [None, None, None], + id_to_job: HashMap::with_hasher(BuildNoHashHasher::default()), + added_elements: 0, kind, job_ids: Id::new(), channel_to_group_id: HashMap::with_hasher(BuildNoHashHasher::default()), @@ -1349,7 +1696,6 @@ impl ProxyExtendedChannelFactory { inner, job_creator, pool_coinbase_outputs, - pool_signature, extended_channel_id, } } @@ -1362,7 +1708,7 @@ impl ProxyExtendedChannelFactory { id: u32, ) -> Result, Error> { self.inner - .add_standard_channel(request_id, downstream_hash_rate, id_header_only, id) + .add_standard_channel(request_id, downstream_hash_rate, id_header_only, id, None) } /// Calls [`ChannelFactory::new_extended_channel`] pub fn new_extended_channel( @@ -1372,11 +1718,11 @@ impl ProxyExtendedChannelFactory { min_extranonce_size: u16, ) -> Result, Error> { self.inner - .new_extended_channel(request_id, hash_rate, min_extranonce_size) + .new_extended_channel(request_id, hash_rate, min_extranonce_size, None) + .map(|x| x.0) } - /// Called only when a new prev hash is received by a Template Provider when job declaration is - /// used. It matches the message with a `job_id`, creates a new custom job, and calls - /// [`ChannelFactory::on_new_prev_hash`] + /// Called only when a new prev hash is received by a Template Provider when job declaration is used. + /// It matches the message with a `job_id`, creates a new custom job, and calls [`ChannelFactory::on_new_prev_hash`] pub fn on_new_prev_hash_from_tp( &mut self, m: &SetNewPrevHashFromTp<'static>, @@ -1417,9 +1763,8 @@ impl ProxyExtendedChannelFactory { panic!("A channel factory without job creator do not have declaration capabilities") } } - /// Called only when a new template is received by a Template Provider when job declaration is - /// used. It creates a new custom job and calls - /// [`ChannelFactory::on_new_extended_mining_job`] + /// Called only when a new template is received by a Template Provider when job declaration is used. + /// It creates a new custom job and calls [`ChannelFactory::on_new_extended_mining_job`] #[allow(clippy::type_complexity)] pub fn on_new_template( &mut self, @@ -1440,12 +1785,7 @@ impl ProxyExtendedChannelFactory { self.job_creator.as_mut(), self.pool_coinbase_outputs.as_mut(), ) { - let new_job = job_creator.on_new_template( - m, - true, - pool_coinbase_outputs.clone(), - self.pool_signature.clone(), - )?; + let new_job = job_creator.on_new_template(m, true, pool_coinbase_outputs.clone(), 0)?; let id = new_job.job_id; if !new_job.is_future() && self.inner.last_prev_hash.is_some() { let prev_hash = self.last_prev_hash().unwrap(); @@ -1468,7 +1808,7 @@ impl ProxyExtendedChannelFactory { future_job: m.future_template, }; return Ok(( - self.inner.on_new_extended_mining_job(new_job)?, + self.inner.on_new_extended_mining_job(new_job, None)?, Some(custom_mining_job), id, )); @@ -1477,22 +1817,26 @@ impl ProxyExtendedChannelFactory { .future_templates .insert(new_job.job_id, m.clone()); } - Ok((self.inner.on_new_extended_mining_job(new_job)?, None, id)) + Ok(( + self.inner.on_new_extended_mining_job(new_job, None)?, + None, + id, + )) } else { panic!("Either channel factory has no job creator or pool_coinbase_outputs are not yet set") } } - /// Called when a `SubmitSharesStandard` message is received from the downstream. We check the - /// shares against the channel's respective target and return `OnNewShare` to let us know if - /// and where the the shares should be relayed + /// Called when a `SubmitSharesStandard` message is received from the downstream. We check the shares + /// against the channel's respective target and return `OnNewShare` to let us know if and where the the + /// shares should be relayed pub fn on_submit_shares_extended( &mut self, m: SubmitSharesExtended<'static>, ) -> Result { let merkle_path = self .inner - .last_valid_job + .get_valid_job(m.job_id) .as_ref() .ok_or(Error::ShareDoNotMatchAnyJob)? .0 @@ -1501,8 +1845,8 @@ impl ProxyExtendedChannelFactory { let referenced_job = self .inner - .last_valid_job - .clone() + .get_valid_job(m.job_id) + .cloned() .ok_or(Error::ShareDoNotMatchAnyJob)? .0; @@ -1546,6 +1890,7 @@ impl ProxyExtendedChannelFactory { referenced_job.coinbase_tx_suffix.as_ref(), prev_blockhash, bits, + None, ) } else { let bitcoin_target = [0; 32]; @@ -1572,20 +1917,21 @@ impl ProxyExtendedChannelFactory { referenced_job.coinbase_tx_suffix.as_ref(), prev_blockhash, bits, + None, ) } } - /// Called when a `SubmitSharesStandard` message is received from the Downstream. We check the - /// shares against the channel's respective target and return `OnNewShare` to let us know if - /// and where the shares should be relayed + /// Called when a `SubmitSharesStandard` message is received from the Downstream. We check the shares + /// against the channel's respective target and return `OnNewShare` to let us know if and where the shares should + /// be relayed pub fn on_submit_shares_standard( &mut self, m: SubmitSharesStandard, ) -> Result { let merkle_path = self .inner - .last_valid_job + .get_valid_job(m.job_id) .as_ref() .ok_or(Error::ShareDoNotMatchAnyJob)? .0 @@ -1593,8 +1939,8 @@ impl ProxyExtendedChannelFactory { .to_vec(); let referenced_job = self .inner - .last_valid_job - .clone() + .get_valid_job(m.job_id) + .cloned() .ok_or(Error::ShareDoNotMatchAnyJob)? .0; match self.inner.channel_to_group_id.get(&m.channel_id) { @@ -1602,7 +1948,12 @@ impl ProxyExtendedChannelFactory { if let Some(job_creator) = self.job_creator.as_mut() { let template_id = job_creator .get_template_id_from_job( - self.inner.last_valid_job.as_ref().unwrap().0.job_id, + self.inner + .get_valid_job(m.job_id) + .as_ref() + .unwrap() + .0 + .job_id, ) .ok_or(Error::NoTemplateForId)?; let bitcoin_target = job_creator.last_target(); @@ -1627,6 +1978,7 @@ impl ProxyExtendedChannelFactory { referenced_job.coinbase_tx_suffix.as_ref(), prev_blockhash, bits, + None, ) } else { let bitcoin_target = [0; 32]; @@ -1653,6 +2005,7 @@ impl ProxyExtendedChannelFactory { referenced_job.coinbase_tx_suffix.as_ref(), prev_blockhash, bits, + None, ) } } @@ -1685,16 +2038,18 @@ impl ProxyExtendedChannelFactory { &mut self, m: NewExtendedMiningJob<'static>, ) -> Result, BuildNoHashHasher>, Error> { - self.inner.on_new_extended_mining_job(m) + self.inner.on_new_extended_mining_job(m, None) } pub fn set_target(&mut self, new_target: &mut Target) { self.inner.kind.set_target(new_target); } pub fn last_valid_job_version(&self) -> Option { - self.inner.last_valid_job.as_ref().map(|j| j.0.version) + self.inner + .get_last_valid_job() + .as_ref() + .map(|j| j.0.version) } - /// Returns the full extranonce, extranonce1 (static for channel) + extranonce2 (miner nonce - /// space) + /// Returns the full extranonce, extranonce1 (static for channel) + extranonce2 (miner nonce space) pub fn extranonce_from_downstream_extranonce( &self, ext: mining_sv2::Extranonce, @@ -1745,6 +2100,10 @@ impl ProxyExtendedChannelFactory { ) -> Option { self.inner.update_target_for_channel(channel_id, new_target) } + + pub fn get_extranonce_len(&self) -> usize { + self.inner.extranonces.get_len() + } } /// Used by proxies for tracking upstream targets. @@ -1770,7 +2129,7 @@ mod test { use super::*; use binary_sv2::{Seq0255, B064K, U256}; use bitcoin::{hash_types::WPubkeyHash, PublicKey, TxOut}; - use mining_sv2::OpenStandardMiningChannel; + use mining_sv2::{OpenExtendedMiningChannel, OpenStandardMiningChannel}; const BLOCK_REWARD: u64 = 2_000_000_000; @@ -1862,7 +2221,7 @@ mod test { // Initialize a Channel of type Pool let out = TxOut {value: BLOCK_REWARD, script_pubkey: decode_hex("4104c6d0969c2d98a5c19ba7c36c7937c5edbd60ff2a01397c4afe54f16cd641667ea0049ba6f9e1796ba3c8e49e1b504c532ebbaaa1010c3f7d9b83a8ea7fd800e2ac").unwrap().into()}; - let pool_signature = "".to_string(); + let additional_coinbase_script_data = "".to_string(); let creator = JobsCreators::new(7); let share_per_min = 1.0; // Create an ExtendedExtranonce of len 7: @@ -1883,8 +2242,9 @@ mod test { share_per_min, channel_kind, vec![out], - pool_signature, - ); + additional_coinbase_script_data.into_bytes(), + ) + .unwrap(); // Build a NewTemplate let new_template = NewTemplate { @@ -1991,4 +2351,144 @@ mod test { OnNewShare::ShareMeetDownstreamTarget => panic!(), }; } + #[test] + fn test_extranonce_prefix_in_hom() { + let extranonce_prefix1 = [10, 11, 12]; + let (prefix, _, _) = get_coinbase(); + + // Initialize a Channel of type Pool + let out = TxOut {value: BLOCK_REWARD, script_pubkey: decode_hex("4104c6d0969c2d98a5c19ba7c36c7937c5edbd60ff2a01397c4afe54f16cd641667ea0049ba6f9e1796ba3c8e49e1b504c532ebbaaa1010c3f7d9b83a8ea7fd800e2ac").unwrap().into()}; + let creator = JobsCreators::new(7); + let share_per_min = 1.0; + let extranonces = ExtendedExtranonce::new(0..0, 0..0, 0..7); + + let ids = Arc::new(Mutex::new(GroupId::new())); + let channel_kind = ExtendedChannelKind::Pool; + let mut channel = PoolChannelFactory::new( + ids, + extranonces, + creator, + share_per_min, + channel_kind, + vec![out], + extranonce_prefix1.clone().into(), + ) + .unwrap(); + + // Build a NewTemplate + let new_template = NewTemplate { + template_id: 10, + future_template: true, + version: VERSION, + coinbase_tx_version: 1, + coinbase_prefix: prefix.try_into().unwrap(), + coinbase_tx_input_sequence: u32::MAX, + coinbase_tx_value_remaining: 5_000_000_000, + coinbase_tx_outputs_count: 0, + coinbase_tx_outputs: get_coinbase_outputs(), + coinbase_tx_locktime: 0, + merkle_path: get_merkle_path(), + }; + + // "Send" the NewTemplate to the channel + let _ = channel.on_new_template(&mut (new_template.clone())); + + // Build a PrevHash + let mut p_hash = decode_hex(PREV_HASH).unwrap(); + p_hash.reverse(); + let prev_hash = SetNewPrevHashFromTp { + template_id: 10, + prev_hash: p_hash.try_into().unwrap(), + header_timestamp: PREV_HEADER_TIMESTAMP, + n_bits: PREV_HEADER_NBITS, + target: nbit_to_target(PREV_HEADER_NBITS), + }; + + // "Send" the SetNewPrevHash to channel + let _ = channel.on_new_prev_hash_from_tp(&prev_hash); + + let result = channel + .add_standard_channel(100, 100_000_000_000_000.0, true, 2) + .unwrap(); + let extranonce_prefix = match &result[0] { + Mining::OpenStandardMiningChannelSuccess(msg) => msg.extranonce_prefix.clone().to_vec(), + _ => panic!(), + }; + assert!(&extranonce_prefix.to_vec()[0..3] == extranonce_prefix1); + } + #[test] + fn test_extranonce_prefix_in_extended() { + let extranonce_prefix1 = [10, 11, 12]; + let extranonce_prefix2 = [14, 11, 12]; + let (prefix, _, _) = get_coinbase(); + + // Initialize a Channel of type Pool + let out = TxOut {value: BLOCK_REWARD, script_pubkey: decode_hex("4104c6d0969c2d98a5c19ba7c36c7937c5edbd60ff2a01397c4afe54f16cd641667ea0049ba6f9e1796ba3c8e49e1b504c532ebbaaa1010c3f7d9b83a8ea7fd800e2ac").unwrap().into()}; + let creator = JobsCreators::new(16); + let share_per_min = 1.0; + let extranonces = ExtendedExtranonce::new(0..0, 0..8, 8..16); + + let ids = Arc::new(Mutex::new(GroupId::new())); + let channel_kind = ExtendedChannelKind::Pool; + let mut channel = PoolChannelFactory::new( + ids, + extranonces, + creator, + share_per_min, + channel_kind, + vec![out], + extranonce_prefix1.clone().into(), + ) + .unwrap(); + + // Build a NewTemplate + let new_template = NewTemplate { + template_id: 10, + future_template: true, + version: VERSION, + coinbase_tx_version: 1, + coinbase_prefix: prefix.try_into().unwrap(), + coinbase_tx_input_sequence: u32::MAX, + coinbase_tx_value_remaining: 5_000_000_000, + coinbase_tx_outputs_count: 0, + coinbase_tx_outputs: get_coinbase_outputs(), + coinbase_tx_locktime: 0, + merkle_path: get_merkle_path(), + }; + + // "Send" the NewTemplate to the channel + let _ = channel.on_new_template(&mut (new_template.clone())); + + // Build a PrevHash + let mut p_hash = decode_hex(PREV_HASH).unwrap(); + p_hash.reverse(); + let prev_hash = SetNewPrevHashFromTp { + template_id: 10, + prev_hash: p_hash.try_into().unwrap(), + header_timestamp: PREV_HEADER_TIMESTAMP, + n_bits: PREV_HEADER_NBITS, + target: nbit_to_target(PREV_HEADER_NBITS), + }; + + let _ = channel.on_new_prev_hash_from_tp(&prev_hash); + + let result = channel + .new_extended_channel(100, 100_000_000_000_000.0, 2) + .unwrap(); + let (extranonce_prefix, channel_id) = match &result[0] { + Mining::OpenExtendedMiningChannelSuccess(msg) => { + (msg.extranonce_prefix.clone().to_vec(), msg.channel_id) + } + _ => panic!(), + }; + assert!(&extranonce_prefix.to_vec()[0..3] == extranonce_prefix1); + match channel + .change_additional_coinbase_script_data(extranonce_prefix2.to_vec(), channel_id) + { + Ok(Mining::SetExtranoncePrefix(msg)) => { + assert!(&msg.extranonce_prefix.to_vec()[0..3] == extranonce_prefix2); + } + _ => panic!(), + } + } } diff --git a/protocols/v2/roles-logic-sv2/src/channel_logic/mod.rs b/protocols/v2/roles-logic-sv2/src/channel_logic/mod.rs index 7b5f0feed..d66a64d69 100644 --- a/protocols/v2/roles-logic-sv2/src/channel_logic/mod.rs +++ b/protocols/v2/roles-logic-sv2/src/channel_logic/mod.rs @@ -7,15 +7,17 @@ use std::convert::TryInto; /// convert extended to standard job by calculating the merkle root pub fn extended_to_standard_job<'a>( extended: &NewExtendedMiningJob, - coinbase_script: &[u8], + extranonce: &[u8], channel_id: u32, job_id: Option, + additional_coinbase_script_data: Option<&[u8]>, ) -> Option> { let merkle_root = crate::utils::merkle_root_from_path( extended.coinbase_tx_prefix.inner_as_ref(), extended.coinbase_tx_suffix.inner_as_ref(), - coinbase_script, + extranonce, &extended.merkle_path.inner_as_ref(), + additional_coinbase_script_data.unwrap_or(&[]), ); Some(NewMiningJob { diff --git a/protocols/v2/roles-logic-sv2/src/channel_logic/proxy_group_channel.rs b/protocols/v2/roles-logic-sv2/src/channel_logic/proxy_group_channel.rs index 65f4cdb3d..cee3d160e 100644 --- a/protocols/v2/roles-logic-sv2/src/channel_logic/proxy_group_channel.rs +++ b/protocols/v2/roles-logic-sv2/src/channel_logic/proxy_group_channel.rs @@ -115,6 +115,7 @@ impl GroupChannel { &channel.extranonce.clone().to_vec(), channel.channel_id, None, + None, ) .ok_or(Error::ImpossibleToCalculateMerkleRoot)?; res.push(Mining::NewMiningJob(standard_job)); @@ -126,6 +127,7 @@ impl GroupChannel { &channel.extranonce.clone().to_vec(), channel.channel_id, None, + None, ) .ok_or(Error::ImpossibleToCalculateMerkleRoot)?; @@ -192,6 +194,7 @@ impl GroupChannel { &downstream.extranonce.clone().to_vec(), downstream.channel_id, None, + None, ) .ok_or(Error::ImpossibleToCalculateMerkleRoot) } diff --git a/protocols/v2/roles-logic-sv2/src/errors.rs b/protocols/v2/roles-logic-sv2/src/errors.rs index 20c4bcd55..5ad78b42e 100644 --- a/protocols/v2/roles-logic-sv2/src/errors.rs +++ b/protocols/v2/roles-logic-sv2/src/errors.rs @@ -61,6 +61,8 @@ pub enum Error { HashrateError(InputError), LogicErrorMessage(std::boxed::Box>), JDSMissingTransactions, + AdditionalCoinbaseScriptDataTooBig, + NewAdditionalCoinbaseDataLenDoNotMatch, } impl From for Error { @@ -153,6 +155,8 @@ impl Display for Error { HashrateError(e) => write!(f, "Impossible to get Hashrate: {:?}", e), LogicErrorMessage(e) => write!(f, "Message is well formatted but can not be handled: {:?}", e), JDSMissingTransactions => write!(f, "JD server cannot propagate the block: missing transactions"), + AdditionalCoinbaseScriptDataTooBig => write!(f, "Additional coinbase script data too big"), + NewAdditionalCoinbaseDataLenDoNotMatch => write!(f, "Channel factory can update the additional data only if the new data is the same size as the old one"), } } } diff --git a/protocols/v2/roles-logic-sv2/src/job_creator.rs b/protocols/v2/roles-logic-sv2/src/job_creator.rs index 1ed653762..1215ce4b0 100644 --- a/protocols/v2/roles-logic-sv2/src/job_creator.rs +++ b/protocols/v2/roles-logic-sv2/src/job_creator.rs @@ -70,7 +70,7 @@ impl JobsCreators { template: &mut NewTemplate, version_rolling_allowed: bool, mut pool_coinbase_outputs: Vec, - pool_signature: String, + additional_coinbase_script_data_len: u8, ) -> Result, Error> { let server_tx_outputs = template.coinbase_tx_outputs.to_vec(); let mut outputs = tx_outputs_to_costum_scripts(&server_tx_outputs); @@ -87,7 +87,7 @@ impl JobsCreators { new_extended_job( template, &mut pool_coinbase_outputs, - pool_signature, + additional_coinbase_script_data_len, next_job_id, version_rolling_allowed, self.extranonce_len, @@ -137,7 +137,7 @@ impl JobsCreators { pub fn extended_job_from_custom_job( referenced_job: &mining_sv2::SetCustomMiningJob, - pool_signature: String, + additional_coinbase_script_data_len: u8, extranonce_len: u8, ) -> Result, Error> { let mut outputs = @@ -158,7 +158,7 @@ pub fn extended_job_from_custom_job( new_extended_job( &mut template, &mut outputs, - pool_signature, + additional_coinbase_script_data_len, 0, true, extranonce_len, @@ -177,7 +177,7 @@ pub fn extended_job_from_custom_job( fn new_extended_job( new_template: &mut NewTemplate, coinbase_outputs: &mut [TxOut], - pool_signature: String, + additional_coinbase_script_data_len: u8, job_id: u32, version_rolling_allowed: bool, extranonce_len: u8, @@ -193,7 +193,7 @@ fn new_extended_job( .map_err(|_| Error::TxVersionTooBig)?; let bip34_bytes = get_bip_34_bytes(new_template, tx_version)?; - let script_prefix_len = bip34_bytes.len() + pool_signature.as_bytes().len(); + let script_prefix_len = bip34_bytes.len(); let coinbase = coinbase( bip34_bytes, @@ -201,7 +201,7 @@ fn new_extended_job( new_template.coinbase_tx_locktime, new_template.coinbase_tx_input_sequence, coinbase_outputs, - pool_signature, + additional_coinbase_script_data_len, extranonce_len, ); @@ -224,7 +224,12 @@ fn new_extended_job( version_rolling_allowed, merkle_path: new_template.merkle_path.clone().into_static(), coinbase_tx_prefix: coinbase_tx_prefix(&coinbase, script_prefix_len)?, - coinbase_tx_suffix: coinbase_tx_suffix(&coinbase, extranonce_len, script_prefix_len)?, + coinbase_tx_suffix: coinbase_tx_suffix( + &coinbase, + extranonce_len, + script_prefix_len, + additional_coinbase_script_data_len as usize, + )?, }; debug!( @@ -249,10 +254,10 @@ fn coinbase_tx_prefix( }; let index = 4 // tx version + segwit_bytes - + 1 // number of inputs TODO can be also 3 + + 1 // number of inputs (always 1) + 32 // prev OutPoint + 4 // index - + 1 // bytes in script TODO can be also 3 + + 1 // bytes in script (max 100 so always 1 byte) + script_prefix_len; // bip34_bytes let r = encoded[0..index].to_vec(); r.try_into().map_err(Error::BinarySv2Error) @@ -264,6 +269,7 @@ fn coinbase_tx_suffix( coinbase: &Transaction, extranonce_len: u8, script_prefix_len: usize, + additional_coinbase_script_data_len: usize, ) -> Result, Error> { let encoded = coinbase.serialize(); // If script_prefix_len is not 0 we are not in a test enviornment and the coinbase have the 0 @@ -279,6 +285,7 @@ fn coinbase_tx_suffix( + 4 // index + 1 // bytes in script TODO can be also 3 + script_prefix_len // bip34_bytes + + additional_coinbase_script_data_len + (extranonce_len as usize)..] .to_vec(); r.try_into().map_err(Error::BinarySv2Error) @@ -327,7 +334,7 @@ fn coinbase( lock_time: u32, sequence: u32, coinbase_outputs: &[TxOut], - pool_signature: String, + additional_coinbase_script_data_len: u8, extranonce_len: u8, ) -> Transaction { // If script_prefix_len is not 0 we are not in a test enviornment and the coinbase have the 0 @@ -336,7 +343,7 @@ fn coinbase( 0 => Witness::from_vec(vec![]), _ => Witness::from_vec(vec![vec![0; 32]]), }; - bip34_bytes.extend_from_slice(pool_signature.as_bytes()); + bip34_bytes.extend_from_slice(&vec![0_u8; additional_coinbase_script_data_len as usize]); bip34_bytes.extend_from_slice(&vec![0; extranonce_len as usize]); let tx_in = TxIn { previous_output: OutPoint::null(), @@ -557,7 +564,7 @@ pub mod tests { let mut jobs_creators = JobsCreators::new(32); let job = jobs_creators - .on_new_template(template.borrow_mut(), false, vec![out], "".to_string()) + .on_new_template(template.borrow_mut(), false, vec![out], 0) .unwrap(); assert_eq!( @@ -581,8 +588,7 @@ pub mod tests { assert_eq!(jobs_creators.lasts_new_template.len(), 0); - let _ = - jobs_creators.on_new_template(template.borrow_mut(), false, vec![out], "".to_string()); + let _ = jobs_creators.on_new_template(template.borrow_mut(), false, vec![out], 0); assert_eq!(jobs_creators.lasts_new_template.len(), 1); assert_eq!(jobs_creators.lasts_new_template[0], template); @@ -616,8 +622,7 @@ pub mod tests { let mut jobs_creators = JobsCreators::new(32); //Create a template - let _ = - jobs_creators.on_new_template(template.borrow_mut(), false, vec![out], "".to_string()); + let _ = jobs_creators.on_new_template(template.borrow_mut(), false, vec![out], 0); let test_id = template.template_id; // Create a SetNewPrevHash with matching template_id @@ -705,7 +710,7 @@ pub mod tests { let extranonce = &[0_u8; 32]; let path: &[binary_sv2::U256] = &[]; let stripped_merkle_root = - merkle_root_from_path(&prefix[..], &suffix[..], extranonce, path).unwrap(); + merkle_root_from_path(&prefix[..], &suffix[..], extranonce, path, &[]).unwrap(); let og_merkle_root = coinbase.txid().to_vec(); assert!( stripped_merkle_root == og_merkle_root, diff --git a/protocols/v2/roles-logic-sv2/src/job_dispatcher.rs b/protocols/v2/roles-logic-sv2/src/job_dispatcher.rs index 019f9f274..780687efe 100644 --- a/protocols/v2/roles-logic-sv2/src/job_dispatcher.rs +++ b/protocols/v2/roles-logic-sv2/src/job_dispatcher.rs @@ -18,8 +18,7 @@ use std::{collections::HashMap, convert::TryInto, sync::Arc}; use stratum_common::bitcoin::hashes::{sha256d, Hash, HashEngine}; -/// Used to convert an extended mining job to a standard mining job. The `extranonce` field must -/// be exactly 32 bytes. +/// Used to convert an extended mining job to a standard mining job pub fn extended_to_standard_job_for_group_channel<'a>( extended: &NewExtendedMiningJob, extranonce: &[u8], @@ -31,6 +30,7 @@ pub fn extended_to_standard_job_for_group_channel<'a>( extended.coinbase_tx_suffix.inner_as_ref(), extranonce, &extended.merkle_path.inner_as_ref(), + &[], ); Some(NewMiningJob { @@ -146,6 +146,7 @@ impl GroupChannelJobDispatcher { &mut self, extended: &NewExtendedMiningJob, channel: &StandardChannel, + additional_coinbase_script_data: &[u8], // should be changed to return a Result> ) -> Option> { if extended.is_future() { @@ -161,9 +162,17 @@ impl GroupChannelJobDispatcher { let standard_job_id = self.ids.safe_lock(|ids| ids.next()).unwrap(); let extranonce: Vec = channel.extranonce.clone().into(); + let mut prefix: Vec = + Vec::with_capacity(extranonce.len() + additional_coinbase_script_data.len()); + for b in additional_coinbase_script_data { + prefix.push(*b); + } + for b in extranonce { + prefix.push(b); + } let new_mining_job_message = extended_to_standard_job_for_group_channel( extended, - &extranonce, + &prefix, channel.channel_id, standard_job_id, )?; @@ -310,19 +319,20 @@ mod tests { #[test] fn test_group_channel_job_dispatcher() { + let extranonce_len = 16; let out = TxOut { value: BLOCK_REWARD, script_pubkey: Script::new_p2pk(&new_pub_key()), }; - let pool_signature = "Stratum v2 SRI Pool".to_string(); - let mut jobs_creators = JobsCreators::new(32); + let pool_signature = "Stratum v2 SRI".to_string(); + let mut jobs_creators = JobsCreators::new(extranonce_len); let group_channel_id = 1; //Create a template let mut template = template_from_gen(&mut Gen::new(255)); template.template_id = template.template_id % u64::MAX; template.future_template = true; let extended_mining_job = jobs_creators - .on_new_template(&mut template, false, vec![out], pool_signature) + .on_new_template(&mut template, false, vec![out], pool_signature.len() as u8) .expect("Failed to create new job"); // create GroupChannelJobDispatcher @@ -331,8 +341,9 @@ mod tests { // create standard channel let target = Target::from(U256::try_from(utils::extranonce_gen()).unwrap()); let standard_channel_id = 2; - let extranonce = Extranonce::try_from(utils::extranonce_gen()) - .expect("Failed to convert bytes to extranonce"); + let extranonce = + Extranonce::try_from(utils::extranonce_gen()[0..extranonce_len as usize].to_vec()) + .expect("Failed to convert bytes to extranonce"); let standard_channel = StandardChannel { channel_id: standard_channel_id, group_id: group_channel_id, @@ -341,7 +352,11 @@ mod tests { }; // call target function (on_new_extended_mining_job) let new_mining_job = group_channel_dispatcher - .on_new_extended_mining_job(&extended_mining_job, &standard_channel) + .on_new_extended_mining_job( + &extended_mining_job, + &standard_channel, + &pool_signature.clone().into_bytes(), + ) .unwrap(); // on_new_extended_mining_job assertions @@ -351,6 +366,7 @@ mod tests { &extended_mining_job, extranonce.clone(), standard_channel_id, + &pool_signature, ); // on_new_prev_hash assertions if extended_mining_job.is_future() { @@ -374,13 +390,23 @@ mod tests { extended_mining_job: &NewExtendedMiningJob, extranonce: Extranonce, standard_channel_id: u32, + pool_signature: &String, ) -> (u32, Vec) { + let extranonce: Vec = extranonce.clone().into(); + let mut prefix: Vec = Vec::new(); + for b in pool_signature.clone().into_bytes() { + prefix.push(b); + } + for b in extranonce { + prefix.push(b); + } // compute test merkle path let new_root = merkle_root_from_path( extended_mining_job.coinbase_tx_prefix.inner_as_ref(), extended_mining_job.coinbase_tx_suffix.inner_as_ref(), - extranonce.to_vec().as_slice(), + prefix.as_slice(), &extended_mining_job.merkle_path.inner_as_ref(), + &[], ) .unwrap(); // Assertions diff --git a/protocols/v2/roles-logic-sv2/src/utils.rs b/protocols/v2/roles-logic-sv2/src/utils.rs index d4b6f8944..d85fef1c8 100644 --- a/protocols/v2/roles-logic-sv2/src/utils.rs +++ b/protocols/v2/roles-logic-sv2/src/utils.rs @@ -142,10 +142,16 @@ pub fn merkle_root_from_path>( coinbase_tx_suffix: &[u8], extranonce: &[u8], path: &[T], + additional_coinbase_script_data: &[u8], ) -> Option> { - let mut coinbase = - Vec::with_capacity(coinbase_tx_prefix.len() + coinbase_tx_suffix.len() + extranonce.len()); + let mut coinbase = Vec::with_capacity( + coinbase_tx_prefix.len() + + coinbase_tx_suffix.len() + + extranonce.len() + + additional_coinbase_script_data.len(), + ); coinbase.extend_from_slice(coinbase_tx_prefix); + coinbase.extend_from_slice(additional_coinbase_script_data); coinbase.extend_from_slice(extranonce); coinbase.extend_from_slice(coinbase_tx_suffix); let coinbase = match Transaction::deserialize(&coinbase[..]) { @@ -549,6 +555,7 @@ fn test_merkle_root_from_path() { &coinbase_bytes[30..], &coinbase_bytes[20..30], &path, + &[], ) .unwrap(); assert_eq!(expected_root, root); @@ -565,13 +572,20 @@ fn test_merkle_root_from_path() { &coinbase_bytes[30..], &coinbase_bytes[20..30], &path, + &[], ) .unwrap(); assert_eq!(coinbase_id, root); //Target None return path on serialization assert_eq!( - merkle_root_from_path(&coinbase_bytes, &coinbase_bytes, &coinbase_bytes, &path), + merkle_root_from_path( + &coinbase_bytes, + &coinbase_bytes, + &coinbase_bytes, + &path, + &[] + ), None ); } @@ -676,6 +690,7 @@ pub fn get_target( coinbase_tx_suffix, extranonce, &(merkle_path[..]), + &[], ) .unwrap() .try_into() @@ -778,9 +793,14 @@ impl<'a> From> for bitcoin::Block { let id = id.as_ref().to_vec(); path.push(id); } - let merkle_root = - merkle_root_from_path(&coinbase_pre[..], &coinbase_suf[..], &extranonce[..], &path) - .expect("Invalid coinbase"); + let merkle_root = merkle_root_from_path( + &coinbase_pre[..], + &coinbase_suf[..], + &extranonce[..], + &path, + &[], + ) + .expect("Invalid coinbase"); let merkle_root = Hash::from_inner(merkle_root.try_into().unwrap()); let prev_blockhash = u256_to_block_hash(message.prev_hash.into_static()); diff --git a/protocols/v2/subprotocols/mining/Cargo.toml b/protocols/v2/subprotocols/mining/Cargo.toml index 3e498840a..a0d759181 100644 --- a/protocols/v2/subprotocols/mining/Cargo.toml +++ b/protocols/v2/subprotocols/mining/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "mining_sv2" -version = "1.0.1" +version = "2.0.0" authors = ["The Stratum V2 Developers"] edition = "2018" readme = "README.md" diff --git a/protocols/v2/subprotocols/mining/src/lib.rs b/protocols/v2/subprotocols/mining/src/lib.rs index 8f10f33b5..3549fe46c 100644 --- a/protocols/v2/subprotocols/mining/src/lib.rs +++ b/protocols/v2/subprotocols/mining/src/lib.rs @@ -1,33 +1,111 @@ -//! # Stratum V2 Mining Protocol Messages Crate +#![no_std] + +//! # Mining Protocol +//! ## Channels +//! The protocol is designed such that downstream devices (or proxies) open communication +//! channels with upstream stratum nodes within established connections. The upstream stratum +//! endpoints could be actual mining servers or proxies that pass the messages further upstream. +//! Each channel identifies a dedicated mining session associated with an authorized user. +//! Upstream stratum nodes accept work submissions and specify a mining target on a +//! per-channel basis. //! -//! `mining_sv2` is a Rust crate that implements a set of messages defined in the Mining protocol -//! of Stratum V2. +//! There can theoretically be up to 2^32 open channels within one physical connection to an +//! upstream stratum node. All channels are independent of each other, but share some messages +//! broadcasted from the server for higher efficiency (e.g. information about a new prevhash). +//! Each channel is identified by its channel_id (U32), which is consistent throughout the whole +//! life of the connection. //! -//! The Mining protocol enables the distribution of work to mining devices and the submission of -//! proof-of-work results. +//! A proxy can either transparently allow its clients to open separate channels with the server +//! (preferred behaviour) or aggregate open connections from downstream devices into its own +//! open channel with the server and translate the messages accordingly (present mainly for +//! allowing v1 proxies). Both options have some practical use cases. In either case, proxies +//! SHOULD aggregate clients’ channels into a smaller number of TCP connections. This saves +//! network traffic for broadcast messages sent by a server because fewer messages need to be +//! sent in total, which leads to lower latencies as a result. And it further increases efficiency by +//! allowing larger packets to be sent. //! -//! For further information about the messages, please refer to [Stratum V2 documentation - Mining](https://stratumprotocol.org/specification/05-Mining-Protocol/). +//! The protocol defines three types of channels: **standard channels** , **extended channels** (mining +//! sessions) and **group channels** (organizational), which are useful for different purposes. +//! The main difference between standard and extended channels is that standard channels +//! cannot manipulate the coinbase transaction / Merkle path, as they operate solely on provided +//! Merkle roots. We call this **header-only mining**. Extended channels, on the other hand, are +//! given extensive control over the search space so that they can implement various advanceduse cases such as translation between v1 and v2 protocols, difficulty aggregation, custom +//! search space splitting, etc. //! -//! ## Build Options +//! This separation vastly simplifies the protocol implementation for clients that don’t support +//! extended channels, as they only need to implement the subset of protocol messages related to +//! standard channels (see Mining Protocol Messages for details). //! -//! This crate can be built with the following features: -//! - `no_std`: Disables the standard library. -//! - `with_serde`: Enables support for serialization and deserialization using Serde. +//! ### Standard Channels //! -//! **Note that `with_serde` feature flag is only used for the Message Generator, and deprecated -//! for -//! any other kind of usage. It will likely be fully deprecated in the future.** +//! Standard channels are intended to be used by end mining devices. +//! The size of the search space for one standard channel (header-only mining) for one particular +//! value in the nTime field is 2^(NONCE_BITS + VERSION_ROLLING_BITS) = ~280Th, where +//! NONCE_BITS = 32 and VERSION_ROLLING_BITS = 16. This is a guaranteed space before +//! nTime rolling (or changing the Merkle root). +//! The protocol dedicates all directly modifiable bits (version, nonce, and nTime) from the block +//! header to one mining channel. This is the smallest assignable unit of search space by the +//! protocol. The client which opened the particular channel owns the whole assigned space and +//! can split it further if necessary (e.g. for multiple hashing boards and for individual chips etc.). //! -//! ## Usage +//! ### Extended channels //! -//! To include this crate in your project, run: -//! ```bash -//! $ cargo add mining_sv2 -//! ``` +//! Extended channels are intended to be used by proxies. Upstream servers which accept +//! connections and provide work MUST support extended channels. Clients, on the other hand, do +//! not have to support extended channels, as they MAY be implemented more simply with only +//! standard channels at the end-device level. Thus, upstream servers providing work MUST also +//! support standard channels. +//! The size of search space for an extended channel is +//! 2^(NONCE_BITS+VERSION_ROLLING_BITS+extranonce_size*8) per nTime value. //! -//! For further information about the mining protocol, please refer to [Stratum V2 documentation - -//! Mining Protocol](https://stratumprotocol.org/specification/05-Mining-Protocol/). -#![cfg_attr(feature = "no_std", no_std)] +//! ### Group Channels +//! +//! Standard channels opened within one particular connection can be grouped together to be +//! addressable by a common communication group channel. +//! Whenever a standard channel is created it is always put into some channel group identified by +//! its group_channel_id. Group channel ID namespace is the same as channel ID namespace on a +//! particular connection but the values chosen for group channel IDs must be distinct. +//! +//! ### Future Jobs +//! An empty future block job or speculated non-empty job can be sent in advance to speedup +//! new mining job distribution. The point is that the mining server MAY have precomputed such a +//! job and is able to pre-distribute it for all active channels. The only missing information to +//! start to mine on the new block is the new prevhash. This information can be provided +//! independently.Such an approach improves the efficiency of the protocol where the upstream node +//! doesn’t waste precious time immediately after a new block is found in the network. +//! +//! ### Hashing Space Distribution +//! Each mining device has to work on a unique part of the whole search space. The full search +//! space is defined in part by valid values in the following block header fields: +//! * Nonce header field (32 bits), +//! * Version header field (16 bits, as specified by BIP 320), +//! * Timestamp header field. +//! +//! The other portion of the block header that’s used to define the full search space is the Merkle +//! root hash of all transactions in the block, projected to the last variable field in the block +//! header: +//! +//! * Merkle root, deterministically computed from: +//! * Coinbase transaction: typically 4-8 bytes, possibly much more. +//! * Transaction set: practically unbounded space. All roles in Stratum v2 MUST NOT +//! use transaction selection/ordering for additional hash space extension. This +//! stems both from the concept that miners/pools should be able to choose their +//! transaction set freely without any interference with the protocol, and also to +//! enable future protocol modifications to Bitcoin. In other words, any rules +//! imposed on transaction selection/ordering by miners not described in the rest of +//! this document may result in invalid work/blocks. +//! +//! Mining servers MUST assign a unique subset of the search space to each connection/channel +//! (and therefore each mining device) frequently and rapidly enough so that the mining devices +//! are not running out of search space. Unique jobs can be generated regularly by: +//! * Putting unique data into the coinbase for each connection/channel, and/or +//! * Using unique work from a work provider, e.g. a previous work update (note that this is +//! likely more difficult to implement, especially in light of the requirement that transaction +//! selection/ordering not be used explicitly for additional hash space distribution). +//! +//! This protocol explicitly expects that upstream server software is able to manage the size of +//! the hashing space correctly for its clients and can provide new jobs quickly enough. +use alloc::vec::Vec; use binary_sv2::{B032, U256}; use core::{ cmp::{Ord, PartialOrd}, @@ -137,11 +215,9 @@ impl Ord for Target { // WARNING: do not derive Copy on this type. Some operations performed to a copy of an extranonce // do not affect the original, and this may lead to different extranonce inconsistency -/// Extranonce bytes which need to be added to the coinbase to form a fully valid submission. -/// +/// Extranonce bytes which need to be added to the coinbase to form a fully valid submission: +/// (full coinbase = coinbase_tx_prefix + extranonce + coinbase_tx_suffix). /// Representation is in big endian, so tail is for the digits relative to smaller powers -/// -/// `full coinbase = coinbase_tx_prefix + extranonce + coinbase_tx_suffix`. #[derive(Debug, Clone, PartialEq, Eq)] pub struct Extranonce { extranonce: alloc::vec::Vec, @@ -202,6 +278,12 @@ impl core::convert::TryFrom> for Extranonce { } } +impl AsRef<[u8]> for Extranonce { + fn as_ref(&self) -> &[u8] { + self.extranonce.as_ref() + } +} + impl Extranonce { pub fn new(len: usize) -> Option { if len > MAX_EXTRANONCE_LEN { @@ -236,12 +318,21 @@ impl Extranonce { /// Return only the prefix part of the extranonce /// If the required size is greater than the extranonce len it return None - pub fn into_prefix(&self, prefix_len: usize) -> Option> { + pub fn into_prefix( + &self, + prefix_len: usize, + additional_coinbase_script_data: &[u8], + ) -> Option> { if prefix_len > self.extranonce.len() { None } else { - let mut prefix = self.extranonce.clone(); - prefix.resize(prefix_len, 0); + let mut prefix = Vec::with_capacity(prefix_len + additional_coinbase_script_data.len()); + for b in additional_coinbase_script_data { + prefix.push(*b); + } + for i in 0..prefix_len { + prefix.push(self.extranonce[i]); + } // unwrap is sage as prefix_len can not be greater than 32 cause is not possible to // contruct Extranonce with the inner vecto greater than 32. Some(prefix.try_into().unwrap()) @@ -258,9 +349,9 @@ impl From<&mut ExtendedExtranonce> for Extranonce { } #[derive(Debug, Clone)] -/// Downstram and upstream are relative to an actor of the protocol P. In simple terms, upstream is -/// the part of the protocol that a user P sees when he looks above and downstream when he looks -/// beneath. +/// Downstram and upstream are not global terms but are relative +/// to an actor of the protocol P. In simple terms, upstream is the part of the protocol that a +/// user P sees when he looks above and downstream when he looks beneath. /// /// An ExtendedExtranonce is defined by 3 ranges: /// @@ -299,6 +390,7 @@ impl From<&mut ExtendedExtranonce> for Extranonce { /// range_2 -> 32..32 no more downstream /// /// +/// /// About how the pool work having both extended and standard downstreams: /// /// the pool reserve the first 16 bytes for herself and let downstreams change the lase 16, so @@ -320,6 +412,7 @@ impl From<&mut ExtendedExtranonce> for Extranonce { /// 0000 0000 0000 0000 0000 0000 0000 0001 ecc ecc /// /// +/// /// ExtendedExtranonce type is meant to be used in cases where the extranonce length is not /// 32bytes. So, the inner part is an array of 32bytes, but only the firsts corresponding to the /// range_2.end are used by the pool @@ -637,8 +730,7 @@ pub mod tests { assert!(extranonce.is_some()); - //validate that the extranonce is the concatenation of the upstream part and the downstream - // part + //validate that the extranonce is the concatenation of the upstream part and the downstream part assert_eq!( extra_content, extranonce.unwrap().extranonce.to_vec()[downstream_len..downstream_len * 2] @@ -836,8 +928,8 @@ pub mod tests { } None => false, }, - // if .next_standard() method falls in None case, this means that the range_2 is at - // maximum value, so every entry must be 255 as u8 + // if .next_standard() method falls in None case, this means that the range_2 is at maximum + // value, so every entry must be 255 as u8 None => { for b in inner[range_2.start..range_2.end].iter() { if b != &255_u8 { @@ -1025,7 +1117,7 @@ pub mod tests { fn test_extranonce_to_prefix() { let inner = vec![1, 2, 3, 4, 5, 6, 7, 8, 9]; let extranone = Extranonce { extranonce: inner }; - let prefix = extranone.into_prefix(4).unwrap(); + let prefix = extranone.into_prefix(4, &[]).unwrap(); assert!(vec![1, 2, 3, 4] == prefix.to_vec()) } @@ -1033,7 +1125,7 @@ pub mod tests { fn test_extranonce_to_prefix_not_greater_than_inner() { let inner = vec![1, 2, 3, 4, 5, 6, 7, 8, 9]; let extranone = Extranonce { extranonce: inner }; - let prefix = extranone.into_prefix(20); + let prefix = extranone.into_prefix(20, &[]); assert!(prefix.is_none()) } diff --git a/roles/Cargo.lock b/roles/Cargo.lock index 20c738898..39164320b 100644 --- a/roles/Cargo.lock +++ b/roles/Cargo.lock @@ -1607,7 +1607,7 @@ dependencies = [ [[package]] name = "mining_sv2" -version = "1.0.1" +version = "2.0.0" dependencies = [ "binary_sv2", "const_sv2", @@ -2016,7 +2016,7 @@ dependencies = [ [[package]] name = "roles_logic_sv2" -version = "1.2.2" +version = "2.0.0" dependencies = [ "binary_sv2", "chacha20poly1305", diff --git a/roles/jd-client/Cargo.toml b/roles/jd-client/Cargo.toml index 92c3e030e..7a54343c8 100644 --- a/roles/jd-client/Cargo.toml +++ b/roles/jd-client/Cargo.toml @@ -24,7 +24,7 @@ buffer_sv2 = { version = "^1.0.0", path = "../../utils/buffer" } codec_sv2 = { version = "^1.0.1", path = "../../protocols/v2/codec-sv2", features = ["noise_sv2", "with_buffer_pool"] } framing_sv2 = { version = "^3.0.0", path = "../../protocols/v2/framing-sv2" } network_helpers_sv2 = { version = "2.0.0", path = "../roles-utils/network-helpers", features=["with_tokio", "with_buffer_pool"] } -roles_logic_sv2 = { version = "^1.0.0", path = "../../protocols/v2/roles-logic-sv2" } +roles_logic_sv2 = { version = "^2.0.0", path = "../../protocols/v2/roles-logic-sv2" } serde = { version = "1.0.89", default-features = false, features = ["derive", "alloc"] } futures = "0.3.25" tokio = { version = "1", features = ["full"] } diff --git a/roles/jd-client/src/lib/downstream.rs b/roles/jd-client/src/lib/downstream.rs index c5d49d304..36dad4514 100644 --- a/roles/jd-client/src/lib/downstream.rs +++ b/roles/jd-client/src/lib/downstream.rs @@ -492,8 +492,9 @@ impl share_per_min, kind, coinbase_outputs, - "SOLO".to_string(), - ); + "SOLO".as_bytes().to_vec(), + ) + .expect("Signature + extranonce lens exceed 32 bytes"); self.status.set_channel(channel_factory); let request_id = m.request_id; diff --git a/roles/jd-client/src/lib/mod.rs b/roles/jd-client/src/lib/mod.rs index 467ac52b5..b5b1fc0fa 100644 --- a/roles/jd-client/src/lib/mod.rs +++ b/roles/jd-client/src/lib/mod.rs @@ -247,7 +247,6 @@ impl JobDeclaratorClient { upstream_addr, upstream_config.authority_pubkey, 0, // TODO - upstream_config.pool_signature.clone(), status::Sender::Upstream(tx_status.clone()), task_collector.clone(), Arc::new(Mutex::new(PoolChangerTrigger::new(timeout))), diff --git a/roles/jd-client/src/lib/upstream_sv2/upstream.rs b/roles/jd-client/src/lib/upstream_sv2/upstream.rs index 4877f44e7..ddae5ea56 100644 --- a/roles/jd-client/src/lib/upstream_sv2/upstream.rs +++ b/roles/jd-client/src/lib/upstream_sv2/upstream.rs @@ -114,8 +114,6 @@ pub struct Upstream { pub min_extranonce_size: u16, #[allow(dead_code)] pub upstream_extranonce1_size: usize, - /// String be included in coinbase tx input scriptsig - pub pool_signature: String, /// Receives messages from the SV2 Upstream role pub receiver: Receiver, /// Sends messages to the SV2 Upstream role @@ -151,7 +149,6 @@ impl Upstream { address: SocketAddr, authority_public_key: Secp256k1PublicKey, min_extranonce_size: u16, - pool_signature: String, tx_status: status::Sender, task_collector: Arc>>, pool_chaneger_trigger: Arc>, @@ -189,7 +186,6 @@ impl Upstream { min_extranonce_size, upstream_extranonce1_size: 16, /* 16 is the default since that is the only value the * pool supports currently */ - pool_signature, tx_status, receiver, sender, @@ -564,7 +560,6 @@ impl ParseUpstreamMiningMessages Result, RolesLogicError> { info!("Receive open extended mining channel success"); let ids = Arc::new(Mutex::new(roles_logic_sv2::utils::GroupId::new())); - let pool_signature = self.pool_signature.clone(); let prefix_len = m.extranonce_prefix.to_vec().len(); let self_len = 0; let total_len = prefix_len + m.extranonce_size as usize; @@ -586,8 +581,9 @@ impl ParseUpstreamMiningMessages Ok(SendTo::Respond(Mining::SubmitSharesSuccess(success))) }, }, - Err(_) => todo!(), + Err(e) => { + dbg!(e); + panic!("Internal Error: unexpected message from channel factory"); + } } } diff --git a/roles/pool/src/lib/mining_pool/mod.rs b/roles/pool/src/lib/mining_pool/mod.rs index 2b179c788..cfb22b578 100644 --- a/roles/pool/src/lib/mining_pool/mod.rs +++ b/roles/pool/src/lib/mining_pool/mod.rs @@ -597,11 +597,11 @@ impl Pool { sender_message_received_signal: Sender<()>, status_tx: status::Sender, ) -> Arc> { - let extranonce_len = 32; + let extranonce_len = 13; let range_0 = std::ops::Range { start: 0, end: 0 }; - let range_1 = std::ops::Range { start: 0, end: 16 }; + let range_1 = std::ops::Range { start: 0, end: 5 }; let range_2 = std::ops::Range { - start: 16, + start: 5, end: extranonce_len, }; let ids = Arc::new(Mutex::new(roles_logic_sv2::utils::GroupId::new())); @@ -611,15 +611,18 @@ impl Pool { let creator = JobsCreators::new(extranonce_len as u8); let share_per_min = 1.0; let kind = roles_logic_sv2::channel_logic::channel_factory::ExtendedChannelKind::Pool; - let channel_factory = Arc::new(Mutex::new(PoolChannelFactory::new( - ids, - extranonces, - creator, - share_per_min, - kind, - pool_coinbase_outputs.expect("Invalid coinbase output in config"), - config.pool_signature.clone(), - ))); + let channel_factory = Arc::new(Mutex::new( + PoolChannelFactory::new( + ids, + extranonces, + creator, + share_per_min, + kind, + pool_coinbase_outputs.expect("Invalid coinbase output in config"), + config.pool_signature.clone().into_bytes(), + ) + .expect("Signature + extranonce lens exceed 32 bytes"), + )); let pool = Arc::new(Mutex::new(Pool { downstreams: HashMap::with_hasher(BuildNoHashHasher::default()), solution_sender, @@ -744,9 +747,8 @@ mod test { use super::Configuration; - // this test is used to verify the `coinbase_tx_prefix` and `coinbase_tx_suffix` values tested - // against in message generator - // `stratum/test/message-generator/test/pool-sri-test-extended.json` + // this test is used to verify the `coinbase_tx_prefix` and `coinbase_tx_suffix` values tested against in + // message generator `stratum/test/message-generator/test/pool-sri-test-extended.json` #[test] fn test_coinbase_outputs_from_config() { let config_path = "./config-examples/pool-config-local-tp-example.toml"; @@ -840,8 +842,8 @@ mod test { // copied from roles-logic-sv2::job_creator fn coinbase_tx_prefix(coinbase: &Transaction, script_prefix_len: usize) -> B064K<'static> { let encoded = coinbase.serialize(); - // If script_prefix_len is not 0 we are not in a test enviornment and the coinbase have the - // 0 witness + // If script_prefix_len is not 0 we are not in a test enviornment and the coinbase have the 0 + // witness let segwit_bytes = match script_prefix_len { 0 => 0, _ => 2, @@ -864,8 +866,8 @@ mod test { script_prefix_len: usize, ) -> B064K<'static> { let encoded = coinbase.serialize(); - // If script_prefix_len is not 0 we are not in a test enviornment and the coinbase have the - // 0 witness + // If script_prefix_len is not 0 we are not in a test enviornment and the coinbase have the 0 + // witness let segwit_bytes = match script_prefix_len { 0 => 0, _ => 2, diff --git a/roles/test-utils/mining-device-sv1/src/job.rs b/roles/test-utils/mining-device-sv1/src/job.rs index 1d6b3d2bc..73ef57c8d 100644 --- a/roles/test-utils/mining-device-sv1/src/job.rs +++ b/roles/test-utils/mining-device-sv1/src/job.rs @@ -49,6 +49,7 @@ impl Job { &coinbase_tx_suffix, &extranonce, &path, + &[], ) .unwrap(); let merkle_root: [u8; 32] = merkle_root.try_into().unwrap(); diff --git a/roles/test-utils/mining-device/Cargo.toml b/roles/test-utils/mining-device/Cargo.toml index 66b700dfa..153b2068b 100644 --- a/roles/test-utils/mining-device/Cargo.toml +++ b/roles/test-utils/mining-device/Cargo.toml @@ -22,7 +22,7 @@ path = "src/lib/mod.rs" [dependencies] stratum-common = { version = "1.0.0", path = "../../../common" } codec_sv2 = { version = "^1.0.1", path = "../../../protocols/v2/codec-sv2", features=["noise_sv2"] } -roles_logic_sv2 = { version = "1.0.0", path = "../../../protocols/v2/roles-logic-sv2" } +roles_logic_sv2 = { version = "2.0.0", path = "../../../protocols/v2/roles-logic-sv2" } const_sv2 = { version = "3.0.0", path = "../../../protocols/v2/const-sv2" } async-channel = "1.5.1" binary_sv2 = { version = "1.0.0", path = "../../../protocols/v2/binary-sv2/binary-sv2" } diff --git a/roles/tests-integration/tests/common/mod.rs b/roles/tests-integration/tests/common/mod.rs index bb716aa53..67d0e8281 100644 --- a/roles/tests-integration/tests/common/mod.rs +++ b/roles/tests-integration/tests/common/mod.rs @@ -425,8 +425,16 @@ pub async fn start_sv2_translator(upstream: SocketAddr) -> SocketAddr { downstream_difficulty_config, ); - let config = - translator_sv2::proxy_config::ProxyConfig::new(upstream_conf, downstream_conf, 2, 2, 8); + let min_extranonce2_size = 5; + let min_version = 5; + let max_version = 5; + let config = translator_sv2::proxy_config::ProxyConfig::new( + upstream_conf, + downstream_conf, + max_version, + min_version, + min_extranonce2_size, + ); let translator_v2 = translator_sv2::TranslatorSv2::new(config); tokio::spawn(async move { translator_v2.start().await; diff --git a/roles/translator/Cargo.toml b/roles/translator/Cargo.toml index c4ffecd18..9c1bc5cec 100644 --- a/roles/translator/Cargo.toml +++ b/roles/translator/Cargo.toml @@ -30,7 +30,7 @@ codec_sv2 = { version = "^1.0.1", path = "../../protocols/v2/codec-sv2", feature framing_sv2 = { version = "^3.0.0", path = "../../protocols/v2/framing-sv2" } network_helpers_sv2 = { version = "2.0.0", path = "../roles-utils/network-helpers", features=["async_std", "with_buffer_pool"] } once_cell = "1.12.0" -roles_logic_sv2 = { version = "^1.0.0", path = "../../protocols/v2/roles-logic-sv2" } +roles_logic_sv2 = { version = "^2.0.0", path = "../../protocols/v2/roles-logic-sv2" } serde = { version = "1.0.89", default-features = false, features = ["derive", "alloc"] } serde_json = { version = "1.0.64", default-features = false, features = ["alloc"] } futures = "0.3.25" diff --git a/roles/translator/config-examples/tproxy-config-hosted-pool-example.toml b/roles/translator/config-examples/tproxy-config-hosted-pool-example.toml index 47d4ea875..cd1202f5b 100644 --- a/roles/translator/config-examples/tproxy-config-hosted-pool-example.toml +++ b/roles/translator/config-examples/tproxy-config-hosted-pool-example.toml @@ -20,7 +20,7 @@ min_supported_version = 2 # Max value: 16 (leaves 0 bytes for search space splitting of downstreams) # Max value for CGminer: 8 # Min value: 2 -min_extranonce2_size = 8 +min_extranonce2_size = 5 # Difficulty params [downstream_difficulty_config] diff --git a/roles/translator/config-examples/tproxy-config-local-jdc-example.toml b/roles/translator/config-examples/tproxy-config-local-jdc-example.toml index 5fe4a8eeb..1e18457ac 100644 --- a/roles/translator/config-examples/tproxy-config-local-jdc-example.toml +++ b/roles/translator/config-examples/tproxy-config-local-jdc-example.toml @@ -19,8 +19,8 @@ min_supported_version = 2 # Minimum extranonce2 size for downstream # Max value: 16 (leaves 0 bytes for search space splitting of downstreams) # Max value for CGminer: 8 -# Min value: 2 -min_extranonce2_size = 8 +# Min value: 5 +min_extranonce2_size = 5 # Difficulty params [downstream_difficulty_config] diff --git a/roles/translator/config-examples/tproxy-config-local-pool-example.toml b/roles/translator/config-examples/tproxy-config-local-pool-example.toml index b4359d5ab..12d6f2f17 100644 --- a/roles/translator/config-examples/tproxy-config-local-pool-example.toml +++ b/roles/translator/config-examples/tproxy-config-local-pool-example.toml @@ -20,7 +20,7 @@ min_supported_version = 2 # Max value: 16 (leaves 0 bytes for search space splitting of downstreams) # Max value for CGminer: 8 # Min value: 2 -min_extranonce2_size = 8 +min_extranonce2_size = 5 # Difficulty params [downstream_difficulty_config] diff --git a/roles/translator/src/lib/mod.rs b/roles/translator/src/lib/mod.rs index 7b47a40e6..e225a133a 100644 --- a/roles/translator/src/lib/mod.rs +++ b/roles/translator/src/lib/mod.rs @@ -206,6 +206,7 @@ impl TranslatorSv2 { upstream.clone(), proxy_config.min_supported_version, proxy_config.max_supported_version, + proxy_config.min_extranonce2_size, ) .await { diff --git a/roles/translator/src/lib/proxy/bridge.rs b/roles/translator/src/lib/proxy/bridge.rs index 152521757..a931e391e 100644 --- a/roles/translator/src/lib/proxy/bridge.rs +++ b/roles/translator/src/lib/proxy/bridge.rs @@ -101,7 +101,6 @@ impl Bridge { share_per_min, ExtendedChannelKind::Proxy { upstream_target }, None, - String::from(""), up_id, ), future_jobs: vec![], @@ -341,6 +340,10 @@ impl Bridge { }) .map_err(|_| PoisonLock)?; + let extranonce_len = self_ + .safe_lock(|s| s.channel_factory.get_extranonce_len()) + .unwrap(); + let mut match_a_future_job = false; while let Some(job) = future_jobs.pop() { if job.job_id == sv2_set_new_prev_hash.job_id { @@ -350,6 +353,7 @@ impl Bridge { sv2_set_new_prev_hash.clone(), job, true, + extranonce_len, ); // Get the sender to send the mining.notify to the Downstream @@ -429,6 +433,9 @@ impl Bridge { .on_new_extended_mining_job(sv2_new_extended_mining_job.as_static().clone()) }) .map_err(|_| PoisonLock)??; + let extranonce_len = self_ + .safe_lock(|s| s.channel_factory.get_extranonce_len()) + .unwrap(); // If future_job=true, this job is meant for a future SetNewPrevHash that the proxy // has yet to receive. Insert this new job into the job_mapper . @@ -457,6 +464,7 @@ impl Bridge { last_p_hash, sv2_new_extended_mining_job.clone(), false, + extranonce_len, ); // Get the sender to send the mining.notify to the Downstream tx_sv1_notify.send(notify.clone())?; diff --git a/roles/translator/src/lib/proxy/next_mining_notify.rs b/roles/translator/src/lib/proxy/next_mining_notify.rs index 7bcaf44f1..c61198667 100644 --- a/roles/translator/src/lib/proxy/next_mining_notify.rs +++ b/roles/translator/src/lib/proxy/next_mining_notify.rs @@ -16,9 +16,10 @@ pub fn create_notify( new_prev_hash: SetNewPrevHash<'static>, new_job: NewExtendedMiningJob<'static>, clean_jobs: bool, + extranonce_len: usize, ) -> server_to_client::Notify<'static> { // TODO 32 must be changed! - let new_job = extended_job_to_non_segwit(new_job, 32) + let new_job = extended_job_to_non_segwit(new_job, extranonce_len) .expect("failed to convert extended job to non segwit"); // Make sure that SetNewPrevHash + NewExtendedMiningJob is matching (not future) let job_id = new_job.job_id.to_string(); diff --git a/roles/translator/src/lib/upstream_sv2/upstream.rs b/roles/translator/src/lib/upstream_sv2/upstream.rs index c025c1407..bdefe6373 100644 --- a/roles/translator/src/lib/upstream_sv2/upstream.rs +++ b/roles/translator/src/lib/upstream_sv2/upstream.rs @@ -171,8 +171,7 @@ impl Upstream { job_id: None, last_job_id: None, min_extranonce_size, - upstream_extranonce1_size: 16, /* 16 is the default since that is the only value the - * pool supports currently */ + upstream_extranonce1_size: 8, tx_sv2_extranonce, tx_status, target, @@ -186,6 +185,7 @@ impl Upstream { self_: Arc>, min_version: u16, max_version: u16, + min_extranonce_size: u16, ) -> ProxyResult<'static, ()> { // Get the `SetupConnection` message with Mining Device information (currently hard coded) let setup_connection = Self::get_setup_connection_message(min_version, max_version, false)?; @@ -242,9 +242,8 @@ impl Upstream { request_id: 0, // TODO user_identity, // TODO nominal_hash_rate, - max_target: u256_from_int(u64::MAX), // TODO - min_extranonce_size: 8, /* 8 is the max extranonce2 size the braiins - * pool supports */ + max_target: u256_from_int(u64::MAX), + min_extranonce_size, }); // reset channel hashrate so downstreams can manage from now on out diff --git a/test/config/interop-jd-translator/pool-config.toml b/test/config/interop-jd-translator/pool-config.toml index 9de7b11b1..91d50f203 100644 --- a/test/config/interop-jd-translator/pool-config.toml +++ b/test/config/interop-jd-translator/pool-config.toml @@ -16,7 +16,7 @@ coinbase_outputs = [ ] # Pool signature (string to be included in coinbase tx) # e.g. "Foundry USA", "Antpool", "/ViaBTC/Mined by gitgab19", etc -pool_signature = "Stratum v2 SRI Pool - gitgab19" +pool_signature = "Stratum v2 SRI Pool" # Template Provider config # hosted testnet TP diff --git a/test/config/pool-config-sri-tp.toml b/test/config/pool-config-sri-tp.toml index 3d7c18baf..be433b26a 100644 --- a/test/config/pool-config-sri-tp.toml +++ b/test/config/pool-config-sri-tp.toml @@ -11,4 +11,4 @@ coinbase_outputs = [ { output_script_type = "P2WPKH", output_script_value = "036adc3bdf21e6f9a0f0fb0066bf517e5b7909ed1563d6958a10993849a7554075" }, ] # Pool signature (string to be included in coinbase tx) -pool_signature = "Stratum v2 SRI Pool" \ No newline at end of file +pool_signature = "Stratum v2 SRI" diff --git a/test/config/pool-mock-tp-standard-coverage.toml b/test/config/pool-mock-tp-standard-coverage.toml index 181981c3f..98e4a5cc6 100644 --- a/test/config/pool-mock-tp-standard-coverage.toml +++ b/test/config/pool-mock-tp-standard-coverage.toml @@ -12,4 +12,4 @@ coinbase_outputs = [ { output_script_type = "P2WPKH", output_script_value = "036adc3bdf21e6f9a0f0fb0066bf517e5b7909ed1563d6958a10993849a7554075" }, ] # Pool signature (string to be included in coinbase tx) -pool_signature = "Stratum v2 SRI Pool" \ No newline at end of file +pool_signature = "Stratum v2 SRI" diff --git a/test/config/pool-mock-tp.toml b/test/config/pool-mock-tp.toml index 252d0637a..98e4a5cc6 100644 --- a/test/config/pool-mock-tp.toml +++ b/test/config/pool-mock-tp.toml @@ -12,4 +12,4 @@ coinbase_outputs = [ { output_script_type = "P2WPKH", output_script_value = "036adc3bdf21e6f9a0f0fb0066bf517e5b7909ed1563d6958a10993849a7554075" }, ] # Pool signature (string to be included in coinbase tx) -pool_signature = "Stratum v2 SRI Pool" +pool_signature = "Stratum v2 SRI" diff --git a/test/config/tproxy-config-no-jd-sv1-cpu-md.toml b/test/config/tproxy-config-no-jd-sv1-cpu-md.toml index 7c90479f0..2b05ec879 100644 --- a/test/config/tproxy-config-no-jd-sv1-cpu-md.toml +++ b/test/config/tproxy-config-no-jd-sv1-cpu-md.toml @@ -24,7 +24,7 @@ min_supported_version = 2 # Max value: 16 (leaves 0 bytes for search space splitting of downstreams) # Max value for CGminer: 8 # Min value: 2 -min_extranonce2_size = 8 +min_extranonce2_size = 5 coinbase_reward_sat = 5_000_000_000 # optional jn config, if set the tproxy start on JN mode @@ -45,4 +45,4 @@ shares_per_minute = 100.0 # interval in seconds to elapse before updating channel hashrate with the pool channel_diff_update_interval = 60 # estimated accumulated hashrate of all downstream miners -channel_nominal_hashrate = 500.0 \ No newline at end of file +channel_nominal_hashrate = 500.0 diff --git a/test/message-generator/test/pool-sri-test-1-standard/pool-sri-test-1-standard.json b/test/message-generator/test/pool-sri-test-1-standard/pool-sri-test-1-standard.json index 24ae109c5..85cfd0b42 100644 --- a/test/message-generator/test/pool-sri-test-1-standard/pool-sri-test-1-standard.json +++ b/test/message-generator/test/pool-sri-test-1-standard/pool-sri-test-1-standard.json @@ -48,7 +48,7 @@ "type": "SubmitSharesStandard", "channel_id": 1, "sequence_number": 0, - "job_id": 0, + "job_id": 1, "nonce": 4035255480, "ntime": 1698941362, "version": 536870912 diff --git a/test/message-generator/test/pool-sri-test-extended_0/pool-sri-test-extended_0.json b/test/message-generator/test/pool-sri-test-extended_0/pool-sri-test-extended_0.json index b8a529017..0cbe477ba 100644 --- a/test/message-generator/test/pool-sri-test-extended_0/pool-sri-test-extended_0.json +++ b/test/message-generator/test/pool-sri-test-extended_0/pool-sri-test-extended_0.json @@ -14,7 +14,7 @@ "user_identity": "", "nominal_hash_rate": 10, "max_target": [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1], - "min_extranonce_size": 16 + "min_extranonce_size": 8 }, "replace_fields": [["request_id", "ARBITRARY"]], "id": "open_extended_mining_channel" @@ -54,7 +54,7 @@ [ [ "extranonce_size", - {"U16": 16} + {"U16": 8} ] ] ] diff --git a/test/message-generator/test/pool-sri-test-extended_1/pool-sri-test-extended_1.json b/test/message-generator/test/pool-sri-test-extended_1/pool-sri-test-extended_1.json index 0567ae1e3..ecd4fa743 100644 --- a/test/message-generator/test/pool-sri-test-extended_1/pool-sri-test-extended_1.json +++ b/test/message-generator/test/pool-sri-test-extended_1/pool-sri-test-extended_1.json @@ -66,7 +66,7 @@ [ [ "coinbase_tx_prefix", - {"B064K": [2, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 255, 255, 56, 3, 76, 163, 38, 0, 83, 116, 114, 97, 116, 117, 109, 32, 118, 50, 32, 83, 82, 73, 32, 80, 111, 111, 108]} + {"B064K": [2, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 255, 255, 32, 3, 76, 163, 38, 0]} ], [ diff --git a/test/message-generator/test/standard-coverage-test/standard-coverage-test.json b/test/message-generator/test/standard-coverage-test/standard-coverage-test.json index a9a8daa93..e11d2d726 100644 --- a/test/message-generator/test/standard-coverage-test/standard-coverage-test.json +++ b/test/message-generator/test/standard-coverage-test/standard-coverage-test.json @@ -54,7 +54,7 @@ "type": "SubmitSharesStandard", "channel_id": 1, "sequence_number": 0, - "job_id": 0, + "job_id": 1, "nonce": 927894720, "ntime": 1671039088, "version": 536870912 @@ -66,8 +66,8 @@ "type": "SubmitSharesStandard", "channel_id": 1, "sequence_number": 0, - "job_id": 0, - "nonce": 1751, + "job_id": 1, + "nonce": 1752, "ntime": 1671116742, "version": 536870912 }, diff --git a/test/message-generator/test/translation-proxy-old-share/translation-proxy-old-share.json b/test/message-generator/test/translation-proxy-old-share/translation-proxy-old-share.json index 79833f2c0..52ada3a76 100644 --- a/test/message-generator/test/translation-proxy-old-share/translation-proxy-old-share.json +++ b/test/message-generator/test/translation-proxy-old-share/translation-proxy-old-share.json @@ -22,7 +22,7 @@ "message": { "id": 0, "method": "mining.submit", - "params": ["username", "0", "0000000000000000", "641577b0", "7a600640"] + "params": ["username", "0", "0000000000", "641577b0", "7a600640"] }, "id": "mining.submit" } diff --git a/utils/message-generator/Cargo.toml b/utils/message-generator/Cargo.toml index 003656392..0185cdccf 100644 --- a/utils/message-generator/Cargo.toml +++ b/utils/message-generator/Cargo.toml @@ -20,7 +20,7 @@ codec_sv2 = { version = "1.0.0", path = "../../protocols/v2/codec-sv2", features const_sv2 = { version = "3.0.0", path = "../../protocols/v2/const-sv2" } load_file = "1.0.1" network_helpers_sv2 = { version = "2.0.0", path = "../../roles/roles-utils/network-helpers", features = ["with_tokio","with_serde"] } -roles_logic_sv2 = { version = "1.0.0", path = "../../protocols/v2/roles-logic-sv2", features = ["with_serde"] } +roles_logic_sv2 = { version = "2.0.0", path = "../../protocols/v2/roles-logic-sv2", features = ["with_serde"] } v1 = { version = "^1.0.0", path = "../../protocols/v1", package="sv1_api" } serde = { version = "*", features = ["derive", "alloc"], default-features = false } serde_json = { version = "1.0", default-features = false, features = ["alloc"] }