diff --git a/.bazelrc b/.bazelrc index 4db0597cf..b52ab3056 100644 --- a/.bazelrc +++ b/.bazelrc @@ -35,22 +35,24 @@ build --@rules_rust//:extra_rustc_flags=-Cdebug-assertions=on build --@rules_rust//:extra_rustc_flag=-Dbindings_with_variant_name build --strip=never -build:dfinity --remote_cache=bazel-remote.idx.dfinity.network +# build:dfinity --remote_cache=bazel-remote.idx.dfinity.network build --experimental_remote_cache_async build --remote_timeout=30s # Default is 60s. build:ci --remote_timeout=5m # Default is 60s. -build:dfinity --experimental_remote_downloader=bazel-remote.idx.dfinity.network --experimental_remote_downloader_local_fallback +# build:dfinity --experimental_remote_downloader=bazel-remote.idx.dfinity.network --experimental_remote_downloader_local_fallback build:local --experimental_remote_downloader= # Does not produce valid JSON. See https://github.com/bazelbuild/bazel/issues/14209 build --execution_log_json_file=bazel-build-log.json -build:ci --build_event_binary_file=bazel-bep.pb -build:dfinity --bes_results_url=https://dash.idx.dfinity.network/invocation/ -build:dfinity --bes_backend=bes.idx.dfinity.network +# build:dfinity --bes_results_url=https://dash.idx.dfinity.network/invocation/ +# build:dfinity --bes_backend=bes.idx.dfinity.network build --bes_timeout=30s # Default is no timeout. build:ci --bes_timeout=180s # Default is no timeout. +build:ci --bes_upload_mode=fully_async +build:ci --bes_results_url= +build:ci --bes_backend= build --remote_local_fallback build --experimental_repository_downloader_retries=3 # https://bazel.build/reference/command-line-reference#flag--experimental_repository_downloader_retries diff --git a/.github/workflows/build/action.yaml b/.github/workflows/build/action.yaml index 4ca6330b7..d50f9a964 100644 --- a/.github/workflows/build/action.yaml +++ b/.github/workflows/build/action.yaml @@ -28,7 +28,7 @@ runs: echo "repin=false" >> $GITHUB_OUTPUT return $r } - bazel_build_auto_repin build ... + bazel_build_auto_repin build --config=ci ... shell: bash - uses: actions/checkout@v4 diff --git a/.github/workflows/dashboard.yaml b/.github/workflows/dashboard.yaml index 1f1ce5ef5..498cddd73 100644 --- a/.github/workflows/dashboard.yaml +++ b/.github/workflows/dashboard.yaml @@ -7,6 +7,9 @@ on: branches: - "main" pull_request: + paths: + - dashboard/** + - ./.github/workflows/dashboard.yaml merge_group: concurrency: diff --git a/rs/cli/src/cli.rs b/rs/cli/src/cli.rs index fbd7a98d5..86cc617fb 100644 --- a/rs/cli/src/cli.rs +++ b/rs/cli/src/cli.rs @@ -476,13 +476,13 @@ impl ParsedCli { require_authentication, opts.neuron_id, opts.private_key_pem.clone(), - opts.hsm_slot.clone(), + opts.hsm_slot, opts.hsm_pin.clone(), opts.hsm_key_id.clone(), ) .await?; Ok(ParsedCli { - network: network, + network, yes: opts.yes, neuron, ic_admin_bin_path: opts.ic_admin.clone(), diff --git a/rs/cli/src/detect_neuron.rs b/rs/cli/src/detect_neuron.rs index 8baf68835..49409028f 100644 --- a/rs/cli/src/detect_neuron.rs +++ b/rs/cli/src/detect_neuron.rs @@ -56,7 +56,7 @@ impl Neuron { Some(auth) => auth, None => return Err(anyhow::anyhow!("No HSM detected")), }; - match auto_detect_neuron(&network.get_nns_urls(), auth).await { + match auto_detect_neuron(network.get_nns_urls(), auth).await { Ok(Some(n)) => Ok(n), Ok(None) => anyhow::bail!("No HSM detected. Please provide HSM slot, pin, and key id."), Err(e) => anyhow::bail!("Error while detectin neuron: {}", e), @@ -174,7 +174,7 @@ pub fn detect_hsm_auth() -> anyhow::Result> { } // FIXME: This function should use either the HSM or the private key, instead of assuming the HSM -pub async fn auto_detect_neuron(nns_urls: &Vec, auth: Auth) -> anyhow::Result> { +pub async fn auto_detect_neuron(nns_urls: &[url::Url], auth: Auth) -> anyhow::Result> { if let Auth::Hsm { pin, slot, key_id } = auth { let auth = Auth::Hsm { pin: pin.clone(), diff --git a/rs/cli/src/general.rs b/rs/cli/src/general.rs index e4ddf684b..e6c4fd844 100644 --- a/rs/cli/src/general.rs +++ b/rs/cli/src/general.rs @@ -19,7 +19,7 @@ use crate::detect_neuron::{Auth, Neuron}; pub async fn vote_on_proposals( neuron: &Neuron, - nns_urls: &Vec, + nns_urls: &[Url], accepted_proposers: &[u64], accepted_topics: &[i32], simulate: bool, @@ -94,7 +94,7 @@ pub async fn get_node_metrics_history( subnets: Vec, start_at_nanos: u64, auth: &Auth, - nns_urls: &Vec, + nns_urls: &[Url], ) -> anyhow::Result<()> { let lock = Mutex::new(()); let canister_agent = match auth { diff --git a/rs/cli/src/ic_admin.rs b/rs/cli/src/ic_admin.rs index 474b8db3d..32281ed37 100644 --- a/rs/cli/src/ic_admin.rs +++ b/rs/cli/src/ic_admin.rs @@ -177,7 +177,7 @@ impl IcAdminWrapper { .fold("".to_string(), |acc, s| { let s = if s.contains('\n') { format!(r#""{}""#, s) } else { s }; let hsm_pin = if let Auth::Hsm { pin, .. } = &self.neuron.auth { - &pin + pin } else { "" }; @@ -611,7 +611,7 @@ must be identical, and must match the SHA256 from the payload of the NNS proposa let re_comment = Regex::new(r"\s*\s*").unwrap(); let mut builder = edit::Builder::new(); let with_suffix = builder.suffix(".md"); - let edited = edit::edit_with_builder(template, &with_suffix)? + let edited = edit::edit_with_builder(template, with_suffix)? .trim() .replace("\r(\n)?", "\n") .split('\n') @@ -899,11 +899,13 @@ must be identical, and must match the SHA256 from the payload of the NNS proposa Ok(()) } - for (_, mods) in reverse_sorted.into_iter() { - submit_proposal(self, mods, propose_options.clone(), simulate)?; - break; // no more than one rule mod implemented currenty -- FIXME + // no more than one rule mod implemented currenty -- FIXME + match reverse_sorted.into_iter().last() { + Some((_, mods)) => submit_proposal(self, mods, propose_options.clone(), simulate), + None => Err(anyhow::anyhow!( + "Expected to have one item for firewall rule modification" + )), } - Ok(()) } } diff --git a/rs/cli/src/main.rs b/rs/cli/src/main.rs index 9482abefa..2dad75f25 100644 --- a/rs/cli/src/main.rs +++ b/rs/cli/src/main.rs @@ -44,7 +44,7 @@ async fn main() -> Result<(), anyhow::Error> { cli_opts.neuron_id = Some(STAGING_NEURON_ID); } } - let governance_canister_v = match governance_canister_version(&nns_urls).await { + let governance_canister_v = match governance_canister_version(nns_urls).await { Ok(c) => c, Err(e) => { return Err(anyhow::anyhow!( @@ -289,12 +289,12 @@ async fn main() -> Result<(), anyhow::Error> { cli::Commands::Vote {accepted_neurons, accepted_topics}=> { let cli = cli::ParsedCli::from_opts(&cli_opts, true).await?; - vote_on_proposals(cli.get_neuron(), &target_network.get_nns_urls(), accepted_neurons, accepted_topics, simulate).await + vote_on_proposals(cli.get_neuron(), target_network.get_nns_urls(), accepted_neurons, accepted_topics, simulate).await }, cli::Commands::TrustworthyMetrics { wallet, start_at_timestamp, subnet_ids } => { let auth = Auth::from_cli_args(cli_opts.private_key_pem, cli_opts.hsm_slot, cli_opts.hsm_pin, cli_opts.hsm_key_id)?; - get_node_metrics_history(CanisterId::from_str(wallet)?, subnet_ids.clone(), *start_at_timestamp, &auth, &target_network.get_nns_urls()).await + get_node_metrics_history(CanisterId::from_str(wallet)?, subnet_ids.clone(), *start_at_timestamp, &auth, target_network.get_nns_urls()).await }, cli::Commands::DumpRegistry { version, path } => { diff --git a/rs/cli/src/runner.rs b/rs/cli/src/runner.rs index 79a86728c..11d5a393f 100644 --- a/rs/cli/src/runner.rs +++ b/rs/cli/src/runner.rs @@ -194,7 +194,7 @@ impl Runner { dashboard_backend_client, // TODO: Remove once DREL-118 completed. // Fake registry that is not used, but some methods still rely on backend. - registry: registry::RegistryState::new(&network, true).await, + registry: registry::RegistryState::new(network, true).await, }) } @@ -208,7 +208,7 @@ impl Runner { let node_providers = query_ic_dashboard_list::("v3/node-providers") .await? .node_providers; - let _ = registry.update_node_details(&node_providers).await?; + registry.update_node_details(&node_providers).await?; Ok(Self { ic_admin, dashboard_backend_client, @@ -306,8 +306,8 @@ impl Runner { self.registry.subnets(), &self.registry.network(), ProposalAgent::new(self.registry.get_nns_urls()), - &version, - &exclude, + version, + exclude, ); match hostos_rollout.execute(node_group).await? { diff --git a/rs/decentralization/src/nakamoto/mod.rs b/rs/decentralization/src/nakamoto/mod.rs index a5180bef2..531cc7b47 100644 --- a/rs/decentralization/src/nakamoto/mod.rs +++ b/rs/decentralization/src/nakamoto/mod.rs @@ -133,7 +133,6 @@ impl NakamotoScore { // all strings and their counts let value_counts = counters .into_iter() - .map(|(feat, cnt)| (feat, cnt)) .sorted_by_key(|(_feat, cnt)| -(*cnt as isize)) .collect::>(); @@ -810,8 +809,8 @@ mod tests { nodes: subnet_all .nodes .iter() - .cloned() .filter(|n| !re_unhealthy_nodes.is_match(&n.id.to_string())) + .cloned() .collect(), removed_nodes: Vec::new(), min_nakamoto_coefficients: None, diff --git a/rs/ic-canisters/src/governance.rs b/rs/ic-canisters/src/governance.rs index 6eee08580..506d9dbe7 100644 --- a/rs/ic-canisters/src/governance.rs +++ b/rs/ic-canisters/src/governance.rs @@ -18,7 +18,7 @@ pub struct GovernanceCanisterVersion { pub stringified_hash: String, } -pub async fn governance_canister_version(nns_urls: &Vec) -> Result { +pub async fn governance_canister_version(nns_urls: &[Url]) -> Result { let canister_agent = Agent::builder() .with_transport( ic_agent::agent::http_transport::reqwest_transport::ReqwestHttpReplicaV2Transport::create( diff --git a/rs/ic-canisters/src/parallel_hardware_identity.rs b/rs/ic-canisters/src/parallel_hardware_identity.rs index 3a4e98585..73f6f2ba6 100644 --- a/rs/ic-canisters/src/parallel_hardware_identity.rs +++ b/rs/ic-canisters/src/parallel_hardware_identity.rs @@ -279,7 +279,7 @@ fn get_ec_point( let der_encoded_ec_point = get_variable_length_attribute(ctx, session_handle, object_handle, CKA_EC_POINT)?; let blocks = from_der(der_encoded_ec_point.as_slice()).map_err(HardwareIdentityError::ASN1Decode)?; - let block = blocks.get(0).ok_or(HardwareIdentityError::EcPointEmpty)?; + let block = blocks.first().ok_or(HardwareIdentityError::EcPointEmpty)?; if let OctetString(_size, data) = block { Ok(data.clone()) } else { @@ -298,7 +298,7 @@ fn get_attribute_length( ctx.get_attribute_value(session_handle, object_handle, &mut attributes)?; let first = attributes - .get(0) + .first() .ok_or(HardwareIdentityError::AttributeNotFound(attribute_type))?; Ok(first.ulValueLen as usize) } diff --git a/rs/ic-management-backend/src/endpoints/governance_canister.rs b/rs/ic-management-backend/src/endpoints/governance_canister.rs index c7696389f..29aa9f074 100644 --- a/rs/ic-management-backend/src/endpoints/governance_canister.rs +++ b/rs/ic-management-backend/src/endpoints/governance_canister.rs @@ -6,6 +6,6 @@ async fn governance_canister_version_endpoint( registry: web::Data>>, ) -> Result { let registry = registry.read().await; - let g = governance_canister_version(®istry.network().get_nns_urls()).await; + let g = governance_canister_version(registry.network().get_nns_urls()).await; response_from_result(g) } diff --git a/rs/ic-management-backend/src/endpoints/mod.rs b/rs/ic-management-backend/src/endpoints/mod.rs index 3dd41a28a..7617463b1 100644 --- a/rs/ic-management-backend/src/endpoints/mod.rs +++ b/rs/ic-management-backend/src/endpoints/mod.rs @@ -181,7 +181,7 @@ async fn get_subnet( #[get("/rollout")] async fn rollout(registry: web::Data>>) -> Result { let registry = registry.read().await; - let proposal_agent = proposal::ProposalAgent::new(®istry.get_nns_urls()); + let proposal_agent = proposal::ProposalAgent::new(registry.get_nns_urls()); let network = registry.network(); let prometheus_client = prometheus::client(&network); let service = RolloutBuilder { @@ -197,7 +197,7 @@ async fn rollout(registry: web::Data>>) -> R #[get("/subnets/versions")] async fn subnets_release(registry: web::Data>>) -> Result { let registry = registry.read().await; - let proposal_agent = proposal::ProposalAgent::new(®istry.get_nns_urls()); + let proposal_agent = proposal::ProposalAgent::new(registry.get_nns_urls()); let network = registry.network(); let prometheus_client = prometheus::client(&network); response_from_result( diff --git a/rs/ic-management-backend/src/proposal.rs b/rs/ic-management-backend/src/proposal.rs index 2496003d2..bd3415279 100644 --- a/rs/ic-management-backend/src/proposal.rs +++ b/rs/ic-management-backend/src/proposal.rs @@ -82,8 +82,9 @@ pub struct UpdateUnassignedNodesProposal { pub payload: UpdateUnassignedNodesConfigPayload, } +#[allow(dead_code)] impl ProposalAgent { - pub fn new(nns_urls: &Vec) -> Self { + pub fn new(nns_urls: &[Url]) -> Self { let agent = Agent::builder() .with_transport( ReqwestHttpReplicaV2Transport::create(nns_urls[0].clone()).expect("failed to create transport"), diff --git a/rs/ic-management-backend/src/registry.rs b/rs/ic-management-backend/src/registry.rs index 9fb87855b..982924f6f 100644 --- a/rs/ic-management-backend/src/registry.rs +++ b/rs/ic-management-backend/src/registry.rs @@ -192,9 +192,10 @@ impl ReleasesOps for ArtifactReleases { } } +#[allow(dead_code)] impl RegistryState { pub async fn new(network: &Network, without_update_loop: bool) -> Self { - sync_local_store(&network).await.expect("failed to init local store"); + sync_local_store(network).await.expect("failed to init local store"); if !without_update_loop { let closure_network = network.clone(); @@ -651,7 +652,7 @@ impl RegistryState { pub async fn nodes_with_proposals(&self) -> Result> { let nodes = self.nodes.clone(); - let proposal_agent = proposal::ProposalAgent::new(&self.network.get_nns_urls()); + let proposal_agent = proposal::ProposalAgent::new(self.network.get_nns_urls()); let topology_proposals = proposal_agent.list_open_topology_proposals().await?; @@ -669,18 +670,18 @@ impl RegistryState { } pub async fn open_elect_replica_proposals(&self) -> Result> { - let proposal_agent = proposal::ProposalAgent::new(&self.network.get_nns_urls()); + let proposal_agent = proposal::ProposalAgent::new(self.network.get_nns_urls()); proposal_agent.list_open_elect_replica_proposals().await } pub async fn open_elect_hostos_proposals(&self) -> Result> { - let proposal_agent = proposal::ProposalAgent::new(&self.network.get_nns_urls()); + let proposal_agent = proposal::ProposalAgent::new(self.network.get_nns_urls()); proposal_agent.list_open_elect_hostos_proposals().await } pub async fn subnets_with_proposals(&self) -> Result> { let subnets = self.subnets.clone(); - let proposal_agent = proposal::ProposalAgent::new(&self.network.get_nns_urls()); + let proposal_agent = proposal::ProposalAgent::new(self.network.get_nns_urls()); let topology_proposals = proposal_agent.list_open_topology_proposals().await?; diff --git a/rs/ic-observability/multiservice-discovery/src/definition.rs b/rs/ic-observability/multiservice-discovery/src/definition.rs index 155943fbc..c7b95d93c 100644 --- a/rs/ic-observability/multiservice-discovery/src/definition.rs +++ b/rs/ic-observability/multiservice-discovery/src/definition.rs @@ -83,7 +83,7 @@ impl From for Definition { log: log.clone(), public_key: fs_definition.public_key, poll_interval: fs_definition.poll_interval, - registry_query_timeout: fs_definition.registry_query_timeout.clone(), + registry_query_timeout: fs_definition.registry_query_timeout, ic_discovery: Arc::new( IcServiceDiscoveryImpl::new(log, fs_definition.registry_path, fs_definition.registry_query_timeout) .unwrap(), diff --git a/rs/ic-observability/service-discovery/src/registry_sync.rs b/rs/ic-observability/service-discovery/src/registry_sync.rs index 280bbb550..45a36a7e2 100644 --- a/rs/ic-observability/service-discovery/src/registry_sync.rs +++ b/rs/ic-observability/service-discovery/src/registry_sync.rs @@ -41,7 +41,7 @@ impl Display for SyncError { pub async fn sync_local_registry( log: Logger, local_path: PathBuf, - nns_urls: &Vec, + nns_urls: &[Url], use_current_version: bool, public_key: Option, stop_signal: &Receiver<()>, diff --git a/rs/log-fetcher/src/journald_parser.rs b/rs/log-fetcher/src/journald_parser.rs index 349687616..30cf4475a 100644 --- a/rs/log-fetcher/src/journald_parser.rs +++ b/rs/log-fetcher/src/journald_parser.rs @@ -17,7 +17,6 @@ enum LineStatus { NotStarted, Started, Utf8, - Binary, } pub fn parse_journal_entries_new(body: &[u8]) -> Vec { @@ -35,6 +34,7 @@ pub fn parse_journal_entries_new(body: &[u8]) -> Vec { first_found = LineStatus::Utf8; } (b'\n', LineStatus::Started) => { + // Binary field case current_entry.push(current_line.clone()); current_line.clear(); let mut next = vec![]; @@ -73,7 +73,6 @@ pub fn parse_journal_entries_new(body: &[u8]) -> Vec { current_line.push(*byte); first_found = LineStatus::Started; } - (a, b) => unreachable!("Shouldn't happen: {}, {:?}", a, b), } } // Check if there's an entry at the end of the body diff --git a/rs/np-notifications/src/nodes_status.rs b/rs/np-notifications/src/nodes_status.rs index 60a366a8d..2e6663677 100644 --- a/rs/np-notifications/src/nodes_status.rs +++ b/rs/np-notifications/src/nodes_status.rs @@ -121,7 +121,7 @@ mod tests { // Node added // Node removed // Node unchanged - let ids = vec![ + let ids = [ PrincipalId::new_node_test_id(0), PrincipalId::new_node_test_id(1), PrincipalId::new_node_test_id(2), diff --git a/rs/np-notifications/src/router.rs b/rs/np-notifications/src/router.rs index 50523508e..6db6e1805 100644 --- a/rs/np-notifications/src/router.rs +++ b/rs/np-notifications/src/router.rs @@ -143,7 +143,8 @@ mod tests { use rand::distributions::Alphanumeric; use rand::{thread_rng, Rng}; use std::path::Path; - use std::{fs::File, io::Write, str::FromStr, sync::Arc}; + use std::rc::Rc; + use std::{fs::File, io::Write, str::FromStr}; use crate::router::CONFIG_FILE_PATH_VAR_NAME; use crate::{ @@ -292,7 +293,7 @@ node_providers: status_change: (Status::Healthy, Status::Degraded), }; - let test_sink = Arc::new(TestSink::new()); + let test_sink = Rc::new(TestSink::new()); let router = Router { routes: vec![Route { matcher: Matcher { diff --git a/rs/np-notifications/src/sink.rs b/rs/np-notifications/src/sink.rs index c18af0ed3..5f2ccfe2c 100644 --- a/rs/np-notifications/src/sink.rs +++ b/rs/np-notifications/src/sink.rs @@ -1,4 +1,5 @@ -use std::{cell::RefCell, sync::Arc}; +use std::cell::RefCell; +use std::rc::Rc; use anyhow::anyhow; use anyhow::Result; @@ -13,7 +14,7 @@ pub enum Sink { #[allow(unused)] Webhook(WebhookSink), #[allow(unused)] - Test(Arc), + Test(Rc), } impl Sink { diff --git a/rs/rollout-controller/src/calculation/mod.rs b/rs/rollout-controller/src/calculation/mod.rs index da0e613f8..df64b2d1d 100644 --- a/rs/rollout-controller/src/calculation/mod.rs +++ b/rs/rollout-controller/src/calculation/mod.rs @@ -126,7 +126,7 @@ pub async fn calculate_progress<'a>( &subnet_update_proposals, &unassigned_nodes_proposals, index, - Some(&logger), + Some(logger), &unassigned_nodes_version, &subnets, Local::now().date_naive(), diff --git a/rs/rollout-controller/src/calculation/should_proceed.rs b/rs/rollout-controller/src/calculation/should_proceed.rs index c4b9c3d3b..a3fbc97cf 100644 --- a/rs/rollout-controller/src/calculation/should_proceed.rs +++ b/rs/rollout-controller/src/calculation/should_proceed.rs @@ -38,7 +38,7 @@ mod should_proceed_tests { let day = NaiveDate::from_str("2024-03-11").unwrap(); let index = Index { rollout: Rollout { - skip_days: vec![day.clone()], + skip_days: vec![day], ..Default::default() }, ..Default::default() diff --git a/rs/rollout-controller/src/calculation/stage_checks.rs b/rs/rollout-controller/src/calculation/stage_checks.rs index 6f1818bee..1fd8cb15e 100644 --- a/rs/rollout-controller/src/calculation/stage_checks.rs +++ b/rs/rollout-controller/src/calculation/stage_checks.rs @@ -57,7 +57,7 @@ pub fn check_stages<'a>( if let SubnetAction::Noop { subnet_short: _ } = a { return true; } - return false; + false }) { return Ok(stage_actions); } @@ -78,7 +78,7 @@ pub fn check_stages<'a>( } fn week_passed(release_start: NaiveDate, now: NaiveDate) -> bool { - let mut counter = release_start.clone(); + let mut counter = release_start; counter = counter .checked_add_days(Days::new(1)) .expect("Should be able to add a day"); @@ -119,7 +119,7 @@ fn check_stage<'a>( } } } - return false; + false }) { None => stage_actions.push(SubnetAction::PlaceProposal { is_unassigned: true, @@ -291,13 +291,13 @@ fn get_remaining_bake_time_for_subnet( } }; - return match bake.ge(&stage_bake_time) { + match bake.ge(&stage_bake_time) { true => Ok(0.0), false => { let remaining = Duration::from_secs_f64(stage_bake_time - bake); - return Ok(remaining.as_secs_f64()); + Ok(remaining.as_secs_f64()) } - }; + } } fn get_open_proposal_for_subnet<'a>( @@ -343,7 +343,7 @@ mod get_open_proposal_for_subnet_tests { use super::*; use rstest::rstest; - pub(super) fn craft_subnet_from_id<'a>(subnet_id: &'a str) -> Subnet { + pub(super) fn craft_subnet_from_id(subnet_id: &str) -> Subnet { Subnet { principal: PrincipalId(Principal::from_str(subnet_id).expect("Can create principal")), ..Default::default() @@ -394,7 +394,7 @@ mod get_open_proposal_for_subnet_tests { #[test] fn should_find_open_proposal_for_subnet() { let proposals = craft_open_proposals( - &vec![ + &[ "snjp4-xlbw4-mnbog-ddwy6-6ckfd-2w5a2-eipqo-7l436-pxqkh-l6fuv-vae", "pae4o-o6dxf-xki7q-ezclx-znyd6-fnk6w-vkv5z-5lfwh-xym2i-otrrw-fqe", ], @@ -429,7 +429,7 @@ mod get_open_proposal_for_subnet_tests { #[case] current_version: &str, ) { let proposals = craft_executed_proposals( - &vec![ + &[ "snjp4-xlbw4-mnbog-ddwy6-6ckfd-2w5a2-eipqo-7l436-pxqkh-l6fuv-vae", "pae4o-o6dxf-xki7q-ezclx-znyd6-fnk6w-vkv5z-5lfwh-xym2i-otrrw-fqe", ], @@ -762,9 +762,10 @@ mod check_stages_tests { impl TestCase { pub fn new(name: &'static str) -> Self { - let mut case = Self::default(); - case.name = name; - case + TestCase { + name, + ..Default::default() + } } pub fn with_index(mut self, index: Index) -> Self { @@ -936,7 +937,7 @@ mod check_stages_tests { .with_last_bake_status(&[(1, "9h"), (2, "5h"), (3, "5h"), (4, "5h")]) .with_unassigned_node_proposals(&[(true, "b")]) .with_now("2024-03-04") - .expect_actions(&vec![]), + .expect_actions(&[]), TestCase::new("Partially executed step, a subnet is baking but the other doesn't have a submitted proposal") .with_subnet_update_proposals(&[(1, true, "b"), (2, true, "b")]) .with_last_bake_status(&[(1, "9h"), (2, "3h")]) @@ -980,10 +981,10 @@ mod check_stages_tests { pause: false, skip_days: vec![], stages: vec![ - stage(&vec![1], "8h"), - stage(&vec![2, 3], "4h"), + stage(&[1], "8h"), + stage(&[2, 3], "4h"), stage_unassigned(), - stage_next_week(&vec![4], "4h"), + stage_next_week(&[4], "4h"), ], }, releases: vec![