diff --git a/compiler/noirc_evaluator/src/errors.rs b/compiler/noirc_evaluator/src/errors.rs index 9aabe3bc749..ed94adac28e 100644 --- a/compiler/noirc_evaluator/src/errors.rs +++ b/compiler/noirc_evaluator/src/errors.rs @@ -177,7 +177,7 @@ impl RuntimeError { _ => { let message = self.to_string(); let location = - self.call_stack().back().expect("Expected RuntimeError to have a location"); + self.call_stack().back().unwrap_or_else(|| panic!("Expected RuntimeError to have a location. Error message: {message}")); Diagnostic::simple_error(message, String::new(), location.span) } diff --git a/compiler/noirc_evaluator/src/ssa/acir_gen/mod.rs b/compiler/noirc_evaluator/src/ssa/acir_gen/mod.rs index 2c151ed1e68..bc634b40d6f 100644 --- a/compiler/noirc_evaluator/src/ssa/acir_gen/mod.rs +++ b/compiler/noirc_evaluator/src/ssa/acir_gen/mod.rs @@ -679,11 +679,11 @@ impl Context { instruction: InstructionId, dfg: &DataFlowGraph, index: ValueId, - array: ValueId, + array_id: ValueId, store_value: Option, ) -> Result { let index_const = dfg.get_numeric_constant(index); - let value_type = dfg.type_of_value(array); + let value_type = dfg.type_of_value(array_id); // Compiler sanity checks assert!( !value_type.is_nested_slice(), @@ -693,7 +693,7 @@ impl Context { unreachable!("ICE: expected array or slice type"); }; - match self.convert_value(array, dfg) { + match self.convert_value(array_id, dfg) { AcirValue::Var(acir_var, _) => { return Err(RuntimeError::InternalError(InternalError::Unexpected { expected: "an array value".to_string(), diff --git a/compiler/noirc_evaluator/src/ssa/ir/instruction/call.rs b/compiler/noirc_evaluator/src/ssa/ir/instruction/call.rs index 6ff1ccd29db..885f989f05d 100644 --- a/compiler/noirc_evaluator/src/ssa/ir/instruction/call.rs +++ b/compiler/noirc_evaluator/src/ssa/ir/instruction/call.rs @@ -1,3 +1,4 @@ +use fxhash::FxHashMap as HashMap; use std::{collections::VecDeque, rc::Rc}; use acvm::{acir::BlackBoxFunc, BlackBoxResolutionError, FieldElement}; @@ -318,6 +319,8 @@ fn simplify_slice_push_back( for elem in &arguments[2..] { slice.push_back(*elem); } + let slice_size = slice.len(); + let element_size = element_type.element_size(); let new_slice = dfg.make_array(slice, element_type); let set_last_slice_value_instr = @@ -326,7 +329,11 @@ fn simplify_slice_push_back( .insert_instruction_and_results(set_last_slice_value_instr, block, None, call_stack) .first(); - let mut value_merger = ValueMerger::new(dfg, block, None, None); + let mut slice_sizes = HashMap::default(); + slice_sizes.insert(set_last_slice_value, slice_size / element_size); + slice_sizes.insert(new_slice, slice_size / element_size); + + let mut value_merger = ValueMerger::new(dfg, block, &mut slice_sizes); let new_slice = value_merger.merge_values( len_not_equals_capacity, len_equals_capacity, diff --git a/compiler/noirc_evaluator/src/ssa/ir/types.rs b/compiler/noirc_evaluator/src/ssa/ir/types.rs index f412def1e76..8dc9e67db79 100644 --- a/compiler/noirc_evaluator/src/ssa/ir/types.rs +++ b/compiler/noirc_evaluator/src/ssa/ir/types.rs @@ -120,7 +120,7 @@ impl Type { } Type::Slice(_) => true, Type::Numeric(_) => false, - Type::Reference(_) => false, + Type::Reference(element) => element.contains_slice_element(), Type::Function => false, } } diff --git a/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg.rs b/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg.rs index 1059994b9be..943a57c1bc0 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg.rs @@ -152,8 +152,10 @@ use crate::ssa::{ }; mod branch_analysis; +mod capacity_tracker; pub(crate) mod value_merger; +use capacity_tracker::SliceCapacityTracker; use value_merger::ValueMerger; impl Ssa { @@ -184,17 +186,6 @@ struct Context<'f> { /// between inlining of branches. store_values: HashMap, - /// Maps an address to the old and new value of the element at that address - /// The difference between this map and store_values is that this stores - /// the old and new value of an element from the outer block whose jmpif - /// terminator is being flattened. - /// - /// This map persists throughout the flattening process, where addresses - /// are overwritten as new stores are found. This overwriting is the desired behavior, - /// as we want the most update to date value to be stored at a given address as - /// we walk through blocks to flatten. - outer_block_stores: HashMap, - /// Stores all allocations local to the current branch. /// Since these branches are local to the current branch (ie. only defined within one branch of /// an if expression), they should not be merged with their previous value or stored value in @@ -209,6 +200,10 @@ struct Context<'f> { /// condition. If we are under multiple conditions (a nested if), the topmost condition is /// the most recent condition combined with all previous conditions via `And` instructions. conditions: Vec<(BasicBlockId, ValueId)>, + + /// Maps SSA array values with a slice type to their size. + /// This is maintained by appropriate calls to the `SliceCapacityTracker` and is used by the `ValueMerger`. + slice_sizes: HashMap, } pub(crate) struct Store { @@ -239,7 +234,7 @@ fn flatten_function_cfg(function: &mut Function) { local_allocations: HashSet::new(), branch_ends, conditions: Vec::new(), - outer_block_stores: HashMap::default(), + slice_sizes: HashMap::default(), }; context.flatten(); } @@ -262,21 +257,18 @@ impl<'f> Context<'f> { /// Returns the last block to be inlined. This is either the return block of the function or, /// if self.conditions is not empty, the end block of the most recent condition. fn handle_terminator(&mut self, block: BasicBlockId) -> BasicBlockId { - if let TerminatorInstruction::JmpIf { .. } = - self.inserter.function.dfg[block].unwrap_terminator() - { - // Find stores in the outer block and insert into the `outer_block_stores` map. - // Not using this map can lead to issues when attempting to merge slices. - // When inlining a branch end, only the then branch and the else branch are checked for stores. - // However, there are cases where we want to load a value that comes from the outer block - // that we are handling the terminator for here. - let instructions = self.inserter.function.dfg[block].instructions().to_vec(); - for instruction in instructions { - let (instruction, _) = self.inserter.map_instruction(instruction); - if let Instruction::Store { address, value } = instruction { - self.outer_block_stores.insert(address, value); - } - } + // As we recursively flatten inner blocks, we need to track the slice information + // for the outer block before we start recursively inlining + let outer_block_instructions = self.inserter.function.dfg[block].instructions(); + let mut capacity_tracker = SliceCapacityTracker::new(&self.inserter.function.dfg); + for instruction in outer_block_instructions { + let results = self.inserter.function.dfg.instruction_results(*instruction); + let instruction = &self.inserter.function.dfg[*instruction]; + capacity_tracker.collect_slice_information( + instruction, + &mut self.slice_sizes, + results.to_vec(), + ); } match self.inserter.function.dfg[block].unwrap_terminator() { @@ -494,12 +486,16 @@ impl<'f> Context<'f> { }); let block = self.inserter.function.entry_block(); - let mut value_merger = ValueMerger::new( - &mut self.inserter.function.dfg, - block, - Some(&self.store_values), - Some(&self.outer_block_stores), - ); + + // Make sure we have tracked the slice capacities of any block arguments + let capacity_tracker = SliceCapacityTracker::new(&self.inserter.function.dfg); + for (then_arg, else_arg) in args.iter() { + capacity_tracker.compute_slice_capacity(*then_arg, &mut self.slice_sizes); + capacity_tracker.compute_slice_capacity(*else_arg, &mut self.slice_sizes); + } + + let mut value_merger = + ValueMerger::new(&mut self.inserter.function.dfg, block, &mut self.slice_sizes); // Cannot include this in the previous vecmap since it requires exclusive access to self let args = vecmap(args, |(then_arg, else_arg)| { @@ -538,18 +534,22 @@ impl<'f> Context<'f> { } } + // Most slice information is collected when instructions are inlined. + // We need to collect information on slice values here as we may possibly merge stores + // before any inlining occurs. + let capacity_tracker = SliceCapacityTracker::new(&self.inserter.function.dfg); + for (then_case, else_case, _) in new_map.values() { + capacity_tracker.compute_slice_capacity(*then_case, &mut self.slice_sizes); + capacity_tracker.compute_slice_capacity(*else_case, &mut self.slice_sizes); + } + let then_condition = then_branch.condition; let else_condition = else_branch.condition; let block = self.inserter.function.entry_block(); - let mut value_merger = ValueMerger::new( - &mut self.inserter.function.dfg, - block, - Some(&self.store_values), - Some(&self.outer_block_stores), - ); - + let mut value_merger = + ValueMerger::new(&mut self.inserter.function.dfg, block, &mut self.slice_sizes); // Merging must occur in a separate loop as we cannot borrow `self` as mutable while `value_merger` does let mut new_values = HashMap::default(); for (address, (then_case, else_case, _)) in &new_map { @@ -571,6 +571,16 @@ impl<'f> Context<'f> { .insert(address, Store { old_value: *old_value, new_value: value }); } } + + // Collect any potential slice information on the stores we are merging + for (address, (_, _, _)) in &new_map { + let value = new_values[address]; + let address = *address; + let instruction = Instruction::Store { address, value }; + + let mut capacity_tracker = SliceCapacityTracker::new(&self.inserter.function.dfg); + capacity_tracker.collect_slice_information(&instruction, &mut self.slice_sizes, vec![]); + } } fn remember_store(&mut self, address: ValueId, new_value: ValueId) { @@ -579,8 +589,18 @@ impl<'f> Context<'f> { store_value.new_value = new_value; } else { let load = Instruction::Load { address }; + let load_type = Some(vec![self.inserter.function.dfg.type_of_value(new_value)]); - let old_value = self.insert_instruction_with_typevars(load, load_type).first(); + let old_value = + self.insert_instruction_with_typevars(load.clone(), load_type).first(); + + // Need this or else we will be missing a the previous value of a slice that we wish to merge + let mut capacity_tracker = SliceCapacityTracker::new(&self.inserter.function.dfg); + capacity_tracker.collect_slice_information( + &load, + &mut self.slice_sizes, + vec![old_value], + ); self.store_values.insert(address, Store { old_value, new_value }); } @@ -602,8 +622,15 @@ impl<'f> Context<'f> { // unnecessary, when removing it actually causes an aliasing/mutability error. let instructions = self.inserter.function.dfg[destination].instructions().to_vec(); - for instruction in instructions { - self.push_instruction(instruction); + for instruction in instructions.iter() { + let results = self.push_instruction(*instruction); + let (instruction, _) = self.inserter.map_instruction(*instruction); + let mut capacity_tracker = SliceCapacityTracker::new(&self.inserter.function.dfg); + capacity_tracker.collect_slice_information( + &instruction, + &mut self.slice_sizes, + results, + ); } self.handle_terminator(destination) @@ -615,7 +642,7 @@ impl<'f> Context<'f> { /// As a result, the instruction that will be pushed will actually be a new instruction /// with a different InstructionId from the original. The results of the given instruction /// will also be mapped to the results of the new instruction. - fn push_instruction(&mut self, id: InstructionId) { + fn push_instruction(&mut self, id: InstructionId) -> Vec { let (instruction, call_stack) = self.inserter.map_instruction(id); let instruction = self.handle_instruction_side_effects(instruction, call_stack.clone()); let is_allocate = matches!(instruction, Instruction::Allocate); @@ -628,6 +655,8 @@ impl<'f> Context<'f> { if is_allocate { self.local_allocations.insert(results.first()); } + + results.results().into_owned() } /// If we are currently in a branch, we need to modify constrain instructions diff --git a/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg/capacity_tracker.rs b/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg/capacity_tracker.rs new file mode 100644 index 00000000000..7cd0fe3084e --- /dev/null +++ b/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg/capacity_tracker.rs @@ -0,0 +1,150 @@ +use crate::ssa::ir::{ + dfg::DataFlowGraph, + instruction::{Instruction, Intrinsic}, + types::Type, + value::{Value, ValueId}, +}; + +use fxhash::FxHashMap as HashMap; + +pub(crate) struct SliceCapacityTracker<'a> { + dfg: &'a DataFlowGraph, +} + +impl<'a> SliceCapacityTracker<'a> { + pub(crate) fn new(dfg: &'a DataFlowGraph) -> Self { + SliceCapacityTracker { dfg } + } + + /// Determine how the slice sizes map needs to be updated according to the provided instruction. + pub(crate) fn collect_slice_information( + &mut self, + instruction: &Instruction, + slice_sizes: &mut HashMap, + results: Vec, + ) { + match instruction { + Instruction::ArrayGet { array, .. } => { + let array_typ = self.dfg.type_of_value(*array); + let array_value = &self.dfg[*array]; + if matches!(array_value, Value::Array { .. }) && array_typ.contains_slice_element() + { + // Initial insertion into the slice sizes map + // Any other insertions should only occur if the value is already + // a part of the map. + self.compute_slice_capacity(*array, slice_sizes); + } + } + Instruction::ArraySet { array, value, .. } => { + let array_typ = self.dfg.type_of_value(*array); + let array_value = &self.dfg[*array]; + if matches!(array_value, Value::Array { .. }) && array_typ.contains_slice_element() + { + // Initial insertion into the slice sizes map + // Any other insertions should only occur if the value is already + // a part of the map. + self.compute_slice_capacity(*array, slice_sizes); + } + + let value_typ = self.dfg.type_of_value(*value); + // Compiler sanity check + assert!(!value_typ.contains_slice_element(), "ICE: Nested slices are not allowed and should not have reached the flattening pass of SSA"); + + if let Some(capacity) = slice_sizes.get(array) { + slice_sizes.insert(results[0], *capacity); + } + } + Instruction::Call { func, arguments } => { + let func = &self.dfg[*func]; + if let Value::Intrinsic(intrinsic) = func { + let (argument_index, result_index) = match intrinsic { + Intrinsic::SlicePushBack + | Intrinsic::SlicePushFront + | Intrinsic::SlicePopBack + | Intrinsic::SliceInsert + | Intrinsic::SliceRemove => (1, 1), + // `pop_front` returns the popped element, and then the respective slice. + // This means in the case of a slice with structs, the result index of the popped slice + // will change depending on the number of elements in the struct. + // For example, a slice with four elements will look as such in SSA: + // v3, v4, v5, v6, v7, v8 = call slice_pop_front(v1, v2) + // where v7 is the slice length and v8 is the popped slice itself. + Intrinsic::SlicePopFront => (1, results.len() - 1), + _ => return, + }; + let slice_contents = arguments[argument_index]; + match intrinsic { + Intrinsic::SlicePushBack + | Intrinsic::SlicePushFront + | Intrinsic::SliceInsert => { + for arg in &arguments[(argument_index + 1)..] { + let element_typ = self.dfg.type_of_value(*arg); + if element_typ.contains_slice_element() { + self.compute_slice_capacity(*arg, slice_sizes); + } + } + if let Some(contents_capacity) = slice_sizes.get(&slice_contents) { + let new_capacity = *contents_capacity + 1; + slice_sizes.insert(results[result_index], new_capacity); + } + } + Intrinsic::SlicePopBack + | Intrinsic::SliceRemove + | Intrinsic::SlicePopFront => { + // We do not decrement the size on intrinsics that could remove values from a slice. + // This is because we could potentially go back to the smaller slice and not fill in dummies. + // This pass should be tracking the potential max that a slice ***could be*** + if let Some(contents_capacity) = slice_sizes.get(&slice_contents) { + let new_capacity = *contents_capacity - 1; + slice_sizes.insert(results[result_index], new_capacity); + } + } + _ => {} + } + } + } + Instruction::Store { address, value } => { + let value_typ = self.dfg.type_of_value(*value); + if value_typ.contains_slice_element() { + self.compute_slice_capacity(*value, slice_sizes); + + let value_capacity = slice_sizes.get(value).unwrap_or_else(|| { + panic!("ICE: should have slice capacity set for value {value} being stored at {address}") + }); + + slice_sizes.insert(*address, *value_capacity); + } + } + Instruction::Load { address } => { + let load_typ = self.dfg.type_of_value(*address); + if load_typ.contains_slice_element() { + let result = results[0]; + + let address_capacity = slice_sizes.get(address).unwrap_or_else(|| { + panic!("ICE: should have slice capacity set at address {address} being loaded into {result}") + }); + + slice_sizes.insert(result, *address_capacity); + } + } + _ => {} + } + } + + /// Computes the starting capacity of a slice which is still a `Value::Array` + pub(crate) fn compute_slice_capacity( + &self, + array_id: ValueId, + slice_sizes: &mut HashMap, + ) { + if let Value::Array { array, typ } = &self.dfg[array_id] { + // Compiler sanity check + assert!(!typ.is_nested_slice(), "ICE: Nested slices are not allowed and should not have reached the flattening pass of SSA"); + if let Type::Slice(_) = typ { + let element_size = typ.element_size(); + let len = array.len() / element_size; + slice_sizes.insert(array_id, len); + } + } + } +} diff --git a/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg/value_merger.rs b/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg/value_merger.rs index 446560f45f1..6b923a2e42d 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg/value_merger.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg/value_merger.rs @@ -4,35 +4,26 @@ use fxhash::FxHashMap as HashMap; use crate::ssa::ir::{ basic_block::BasicBlockId, dfg::{CallStack, DataFlowGraph}, - instruction::{BinaryOp, Instruction, Intrinsic}, + instruction::{BinaryOp, Instruction}, types::Type, - value::{Value, ValueId}, + value::ValueId, }; -use crate::ssa::opt::flatten_cfg::Store; - pub(crate) struct ValueMerger<'a> { dfg: &'a mut DataFlowGraph, block: BasicBlockId, - store_values: Option<&'a HashMap>, - outer_block_stores: Option<&'a HashMap>, - slice_sizes: HashMap, + // Maps SSA array values with a slice type to their size. + // This must be computed before merging values. + slice_sizes: &'a mut HashMap, } impl<'a> ValueMerger<'a> { pub(crate) fn new( dfg: &'a mut DataFlowGraph, block: BasicBlockId, - store_values: Option<&'a HashMap>, - outer_block_stores: Option<&'a HashMap>, + slice_sizes: &'a mut HashMap, ) -> Self { - ValueMerger { - dfg, - block, - store_values, - outer_block_stores, - slice_sizes: HashMap::default(), - } + ValueMerger { dfg, block, slice_sizes } } /// Merge two values a and b from separate basic blocks to a single value. @@ -184,11 +175,13 @@ impl<'a> ValueMerger<'a> { _ => panic!("Expected slice type"), }; - let then_len = self.get_slice_length(then_value_id); - self.slice_sizes.insert(then_value_id, then_len); + let then_len = *self.slice_sizes.get(&then_value_id).unwrap_or_else(|| { + panic!("ICE: Merging values during flattening encountered slice {then_value_id} without a preset size"); + }); - let else_len = self.get_slice_length(else_value_id); - self.slice_sizes.insert(else_value_id, else_len); + let else_len = *self.slice_sizes.get(&else_value_id).unwrap_or_else(|| { + panic!("ICE: Merging values during flattening encountered slice {else_value_id} without a preset size"); + }); let len = then_len.max(else_len); @@ -218,8 +211,10 @@ impl<'a> ValueMerger<'a> { } }; - let then_element = get_element(then_value_id, typevars.clone(), then_len); - let else_element = get_element(else_value_id, typevars, else_len); + let then_element = + get_element(then_value_id, typevars.clone(), then_len * element_types.len()); + let else_element = + get_element(else_value_id, typevars, else_len * element_types.len()); merged.push_back(self.merge_values( then_condition, @@ -233,82 +228,6 @@ impl<'a> ValueMerger<'a> { self.dfg.make_array(merged, typ) } - fn get_slice_length(&mut self, value_id: ValueId) -> usize { - let value = &self.dfg[value_id]; - match value { - Value::Array { array, .. } => array.len(), - Value::Instruction { instruction: instruction_id, .. } => { - let instruction = &self.dfg[*instruction_id]; - match instruction { - // TODO(#3188): A slice can be the result of an ArrayGet when it is the - // fetched from a slice of slices or as a struct field. - // However, we need to incorporate nested slice support in flattening - // in order for this to be valid - // Instruction::ArrayGet { array, .. } => {} - Instruction::ArraySet { array, .. } => { - let array = *array; - let len = self.get_slice_length(array); - self.slice_sizes.insert(array, len); - len - } - Instruction::Load { address } => { - let outer_block_stores = self.outer_block_stores.expect("ICE: A map of previous stores is required in order to resolve a slice load"); - let store_values = self.store_values.expect("ICE: A map of previous stores is required in order to resolve a slice load"); - let store_value = outer_block_stores - .get(address) - .expect("ICE: load in merger should have store from outer block"); - - if let Some(len) = self.slice_sizes.get(store_value) { - return *len; - } - - let store_value = if let Some(store) = store_values.get(address) { - if let Some(len) = self.slice_sizes.get(&store.new_value) { - return *len; - } - - store.new_value - } else { - *store_value - }; - - self.get_slice_length(store_value) - } - Instruction::Call { func, arguments } => { - let slice_contents = arguments[1]; - let func = &self.dfg[*func]; - match func { - Value::Intrinsic(intrinsic) => match intrinsic { - Intrinsic::SlicePushBack - | Intrinsic::SlicePushFront - | Intrinsic::SliceInsert => { - // `get_slice_length` needs to be called here as it is borrows self as mutable - let initial_len = self.get_slice_length(slice_contents); - self.slice_sizes.insert(slice_contents, initial_len); - initial_len + 1 - } - Intrinsic::SlicePopBack - | Intrinsic::SlicePopFront - | Intrinsic::SliceRemove => { - // `get_slice_length` needs to be called here as it is borrows self as mutable - let initial_len = self.get_slice_length(slice_contents); - self.slice_sizes.insert(slice_contents, initial_len); - initial_len - 1 - } - _ => { - unreachable!("ICE: Intrinsic not supported, got {intrinsic:?}") - } - }, - _ => unreachable!("ICE: Expected intrinsic value but got {func:?}"), - } - } - _ => unreachable!("ICE: Got unexpected instruction: {instruction:?}"), - } - } - _ => unreachable!("ICE: Got unexpected value when resolving slice length {value:?}"), - } - } - /// Construct a dummy value to be attached to the smaller of two slices being merged. /// We need to make sure we follow the internal element type structure of the slice type /// even for dummy data to ensure that we do not have errors later in the compiler, diff --git a/test_programs/execution_success/regression_4202/Nargo.toml b/test_programs/execution_success/regression_4202/Nargo.toml new file mode 100644 index 00000000000..acfba12dd4f --- /dev/null +++ b/test_programs/execution_success/regression_4202/Nargo.toml @@ -0,0 +1,7 @@ +[package] +name = "regression_4202" +type = "bin" +authors = [""] +compiler_version = ">=0.23.0" + +[dependencies] \ No newline at end of file diff --git a/test_programs/execution_success/regression_4202/Prover.toml b/test_programs/execution_success/regression_4202/Prover.toml new file mode 100644 index 00000000000..e9319802dfd --- /dev/null +++ b/test_programs/execution_success/regression_4202/Prover.toml @@ -0,0 +1 @@ +input = [1, 2, 3, 4] diff --git a/test_programs/execution_success/regression_4202/src/main.nr b/test_programs/execution_success/regression_4202/src/main.nr new file mode 100644 index 00000000000..37d2ee4578d --- /dev/null +++ b/test_programs/execution_success/regression_4202/src/main.nr @@ -0,0 +1,14 @@ +fn main(input: [u32; 4]) { + let mut slice1: [u32] = [1, 2, 3, 4]; + if slice1[0] == 3 { + slice1[1] = 4; + } + + if slice1[1] == 5 { + slice1[3] = 6; + } + + for i in 0..4 { + assert(slice1[i] == input[i]); + } +}