diff --git a/aztec_macros/src/lib.rs b/aztec_macros/src/lib.rs index f55feb9801e..6d3aa0d8b01 100644 --- a/aztec_macros/src/lib.rs +++ b/aztec_macros/src/lib.rs @@ -33,7 +33,6 @@ impl MacroProcessor for AztecMacro { #[derive(Debug, Clone)] pub enum AztecMacroError { - // TODO(benesjan): https://github.com/AztecProtocol/aztec-packages/issues/2905 AztecNotFound, AztecComputeNoteHashAndNullifierNotFound { span: Span }, } @@ -42,12 +41,12 @@ impl From for MacroError { fn from(err: AztecMacroError) -> Self { match err { AztecMacroError::AztecNotFound {} => MacroError { - primary_message: "Aztec dependency not found. Please add aztec as a dependency in your Cargo.toml".to_owned(), + primary_message: "Aztec dependency not found. Please add aztec as a dependency in your Cargo.toml. For more information go to https://docs.aztec.network/dev_docs/debugging/aztecnr-errors#aztec-dependency-not-found-please-add-aztec-as-a-dependency-in-your-nargotoml".to_owned(), secondary_message: None, span: None, }, AztecMacroError::AztecComputeNoteHashAndNullifierNotFound { span } => MacroError { - primary_message: "compute_note_hash_and_nullifier function not found. Define it in your contract.".to_owned(), + primary_message: "compute_note_hash_and_nullifier function not found. Define it in your contract. For more information go to https://docs.aztec.network/dev_docs/debugging/aztecnr-errors#compute_note_hash_and_nullifier-function-not-found-define-it-in-your-contract".to_owned(), secondary_message: None, span: Some(span), }, diff --git a/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_black_box.rs b/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_black_box.rs index 0d97dd12601..9979bf0cd29 100644 --- a/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_black_box.rs +++ b/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_black_box.rs @@ -1,9 +1,9 @@ -use acvm::acir::{ - brillig::{BlackBoxOp, HeapVector, RegisterOrMemory}, - BlackBoxFunc, -}; +use acvm::acir::{brillig::BlackBoxOp, BlackBoxFunc}; -use crate::brillig::brillig_ir::BrilligContext; +use crate::brillig::brillig_ir::{ + brillig_variable::{BrilligVariable, BrilligVector}, + BrilligContext, +}; /// Transforms SSA's black box function calls into the corresponding brillig instructions /// Extracting arguments and results from the SSA function call @@ -11,31 +11,31 @@ use crate::brillig::brillig_ir::BrilligContext; pub(crate) fn convert_black_box_call( brillig_context: &mut BrilligContext, bb_func: &BlackBoxFunc, - function_arguments: &[RegisterOrMemory], - function_results: &[RegisterOrMemory], + function_arguments: &[BrilligVariable], + function_results: &[BrilligVariable], ) { match bb_func { BlackBoxFunc::SHA256 => { - if let ([message], [RegisterOrMemory::HeapArray(result_array)]) = + if let ([message], [BrilligVariable::BrilligArray(result_array)]) = (function_arguments, function_results) { let message_vector = convert_array_or_vector(brillig_context, message, bb_func); brillig_context.black_box_op_instruction(BlackBoxOp::Sha256 { - message: message_vector, - output: *result_array, + message: message_vector.to_heap_vector(), + output: result_array.to_heap_array(), }); } else { unreachable!("ICE: SHA256 expects one array argument and one array result") } } BlackBoxFunc::Blake2s => { - if let ([message], [RegisterOrMemory::HeapArray(result_array)]) = + if let ([message], [BrilligVariable::BrilligArray(result_array)]) = (function_arguments, function_results) { let message_vector = convert_array_or_vector(brillig_context, message, bb_func); brillig_context.black_box_op_instruction(BlackBoxOp::Blake2s { - message: message_vector, - output: *result_array, + message: message_vector.to_heap_vector(), + output: result_array.to_heap_array(), }); } else { unreachable!("ICE: Blake2s expects one array argument and one array result") @@ -43,28 +43,28 @@ pub(crate) fn convert_black_box_call( } BlackBoxFunc::Keccak256 => { if let ( - [message, RegisterOrMemory::RegisterIndex(array_size)], - [RegisterOrMemory::HeapArray(result_array)], + [message, BrilligVariable::Simple(array_size)], + [BrilligVariable::BrilligArray(result_array)], ) = (function_arguments, function_results) { let mut message_vector = convert_array_or_vector(brillig_context, message, bb_func); message_vector.size = *array_size; brillig_context.black_box_op_instruction(BlackBoxOp::Keccak256 { - message: message_vector, - output: *result_array, + message: message_vector.to_heap_vector(), + output: result_array.to_heap_array(), }); } else { unreachable!("ICE: Keccak256 expects message, message size and result array") } } BlackBoxFunc::HashToField128Security => { - if let ([message], [RegisterOrMemory::RegisterIndex(result_register)]) = + if let ([message], [BrilligVariable::Simple(result_register)]) = (function_arguments, function_results) { let message_vector = convert_array_or_vector(brillig_context, message, bb_func); brillig_context.black_box_op_instruction(BlackBoxOp::HashToField128Security { - message: message_vector, + message: message_vector.to_heap_vector(), output: *result_register, }); } else { @@ -73,17 +73,17 @@ pub(crate) fn convert_black_box_call( } BlackBoxFunc::EcdsaSecp256k1 => { if let ( - [RegisterOrMemory::HeapArray(public_key_x), RegisterOrMemory::HeapArray(public_key_y), RegisterOrMemory::HeapArray(signature), message], - [RegisterOrMemory::RegisterIndex(result_register)], + [BrilligVariable::BrilligArray(public_key_x), BrilligVariable::BrilligArray(public_key_y), BrilligVariable::BrilligArray(signature), message], + [BrilligVariable::Simple(result_register)], ) = (function_arguments, function_results) { let message_hash_vector = convert_array_or_vector(brillig_context, message, bb_func); brillig_context.black_box_op_instruction(BlackBoxOp::EcdsaSecp256k1 { - hashed_msg: message_hash_vector, - public_key_x: *public_key_x, - public_key_y: *public_key_y, - signature: *signature, + hashed_msg: message_hash_vector.to_heap_vector(), + public_key_x: public_key_x.to_heap_array(), + public_key_y: public_key_y.to_heap_array(), + signature: signature.to_heap_array(), result: *result_register, }); } else { @@ -94,15 +94,15 @@ pub(crate) fn convert_black_box_call( } BlackBoxFunc::PedersenCommitment => { if let ( - [message, RegisterOrMemory::RegisterIndex(domain_separator)], - [RegisterOrMemory::HeapArray(result_array)], + [message, BrilligVariable::Simple(domain_separator)], + [BrilligVariable::BrilligArray(result_array)], ) = (function_arguments, function_results) { let message_vector = convert_array_or_vector(brillig_context, message, bb_func); brillig_context.black_box_op_instruction(BlackBoxOp::PedersenCommitment { - inputs: message_vector, + inputs: message_vector.to_heap_vector(), domain_separator: *domain_separator, - output: *result_array, + output: result_array.to_heap_array(), }); } else { unreachable!("ICE: Pedersen expects one array argument, a register for the domain separator, and one array result") @@ -110,13 +110,13 @@ pub(crate) fn convert_black_box_call( } BlackBoxFunc::PedersenHash => { if let ( - [message, RegisterOrMemory::RegisterIndex(domain_separator)], - [RegisterOrMemory::RegisterIndex(result)], + [message, BrilligVariable::Simple(domain_separator)], + [BrilligVariable::Simple(result)], ) = (function_arguments, function_results) { let message_vector = convert_array_or_vector(brillig_context, message, bb_func); brillig_context.black_box_op_instruction(BlackBoxOp::PedersenHash { - inputs: message_vector, + inputs: message_vector.to_heap_vector(), domain_separator: *domain_separator, output: *result, }); @@ -126,8 +126,8 @@ pub(crate) fn convert_black_box_call( } BlackBoxFunc::SchnorrVerify => { if let ( - [RegisterOrMemory::RegisterIndex(public_key_x), RegisterOrMemory::RegisterIndex(public_key_y), RegisterOrMemory::HeapArray(signature), message], - [RegisterOrMemory::RegisterIndex(result_register)], + [BrilligVariable::Simple(public_key_x), BrilligVariable::Simple(public_key_y), BrilligVariable::BrilligArray(signature), message], + [BrilligVariable::Simple(result_register)], ) = (function_arguments, function_results) { let message_hash = convert_array_or_vector(brillig_context, message, bb_func); @@ -135,8 +135,8 @@ pub(crate) fn convert_black_box_call( brillig_context.black_box_op_instruction(BlackBoxOp::SchnorrVerify { public_key_x: *public_key_x, public_key_y: *public_key_y, - message: message_hash, - signature, + message: message_hash.to_heap_vector(), + signature: signature.to_heap_vector(), result: *result_register, }); } else { @@ -145,14 +145,14 @@ pub(crate) fn convert_black_box_call( } BlackBoxFunc::FixedBaseScalarMul => { if let ( - [RegisterOrMemory::RegisterIndex(low), RegisterOrMemory::RegisterIndex(high)], - [RegisterOrMemory::HeapArray(result_array)], + [BrilligVariable::Simple(low), BrilligVariable::Simple(high)], + [BrilligVariable::BrilligArray(result_array)], ) = (function_arguments, function_results) { brillig_context.black_box_op_instruction(BlackBoxOp::FixedBaseScalarMul { low: *low, high: *high, - result: *result_array, + result: result_array.to_heap_array(), }); } else { unreachable!( @@ -166,12 +166,12 @@ pub(crate) fn convert_black_box_call( fn convert_array_or_vector( brillig_context: &mut BrilligContext, - array_or_vector: &RegisterOrMemory, + array_or_vector: &BrilligVariable, bb_func: &BlackBoxFunc, -) -> HeapVector { +) -> BrilligVector { match array_or_vector { - RegisterOrMemory::HeapArray(array) => brillig_context.array_to_vector(array), - RegisterOrMemory::HeapVector(vector) => *vector, + BrilligVariable::BrilligArray(array) => brillig_context.array_to_vector(array), + BrilligVariable::BrilligVector(vector) => *vector, _ => unreachable!( "ICE: {} expected an array or a vector, but got {:?}", bb_func.name(), diff --git a/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_block.rs b/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_block.rs index 18fd822b07d..0e06a36fd94 100644 --- a/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_block.rs +++ b/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_block.rs @@ -1,6 +1,6 @@ +use crate::brillig::brillig_ir::brillig_variable::{BrilligArray, BrilligVariable, BrilligVector}; use crate::brillig::brillig_ir::{ - extract_heap_array, extract_register, extract_registers, BrilligBinaryOp, BrilligContext, - BRILLIG_INTEGER_ARITHMETIC_BIT_SIZE, + BrilligBinaryOp, BrilligContext, BRILLIG_INTEGER_ARITHMETIC_BIT_SIZE, }; use crate::ssa::ir::dfg::CallStack; use crate::ssa::ir::{ @@ -13,7 +13,7 @@ use crate::ssa::ir::{ types::{NumericType, Type}, value::{Value, ValueId}, }; -use acvm::acir::brillig::{BinaryFieldOp, BinaryIntOp, HeapArray, RegisterIndex, RegisterOrMemory}; +use acvm::acir::brillig::{BinaryFieldOp, BinaryIntOp, RegisterIndex, RegisterOrMemory}; use acvm::brillig_vm::brillig::HeapVector; use acvm::FieldElement; use fxhash::{FxHashMap as HashMap, FxHashSet as HashSet}; @@ -53,7 +53,7 @@ impl<'block> BrilligBlock<'block> { variables .get_available_variables(function_context) .into_iter() - .flat_map(extract_registers) + .flat_map(|variable| variable.extract_registers()) .collect(), ); let last_uses = function_context.liveness.get_last_uses(&block_id).clone(); @@ -159,7 +159,7 @@ impl<'block> BrilligBlock<'block> { .iter() .flat_map(|value_id| { let return_variable = self.convert_ssa_value(*value_id, dfg); - extract_registers(return_variable) + return_variable.extract_registers() }) .collect(); self.brillig_context.return_instruction(&return_registers); @@ -168,32 +168,44 @@ impl<'block> BrilligBlock<'block> { } /// Passes an arbitrary variable from the registers of the source to the registers of the destination - fn pass_variable(&mut self, source: RegisterOrMemory, destination: RegisterOrMemory) { + fn pass_variable(&mut self, source: BrilligVariable, destination: BrilligVariable) { match (source, destination) { ( - RegisterOrMemory::RegisterIndex(source_register), - RegisterOrMemory::RegisterIndex(destination_register), + BrilligVariable::Simple(source_register), + BrilligVariable::Simple(destination_register), ) => { self.brillig_context.mov_instruction(destination_register, source_register); } ( - RegisterOrMemory::HeapArray(HeapArray { pointer: source_pointer, .. }), - RegisterOrMemory::HeapArray(HeapArray { pointer: destination_pointer, .. }), + BrilligVariable::BrilligArray(BrilligArray { + pointer: source_pointer, + size: _, + rc: source_rc, + }), + BrilligVariable::BrilligArray(BrilligArray { + pointer: destination_pointer, + size: _, + rc: destination_rc, + }), ) => { self.brillig_context.mov_instruction(destination_pointer, source_pointer); + self.brillig_context.mov_instruction(destination_rc, source_rc); } ( - RegisterOrMemory::HeapVector(HeapVector { + BrilligVariable::BrilligVector(BrilligVector { pointer: source_pointer, size: source_size, + rc: source_rc, }), - RegisterOrMemory::HeapVector(HeapVector { + BrilligVariable::BrilligVector(BrilligVector { pointer: destination_pointer, size: destination_size, + rc: destination_rc, }), ) => { self.brillig_context.mov_instruction(destination_pointer, source_pointer); self.brillig_context.mov_instruction(destination_size, source_size); + self.brillig_context.mov_instruction(destination_rc, source_rc); } (_, _) => { unreachable!("ICE: Cannot pass value from {:?} to {:?}", source, destination); @@ -214,7 +226,7 @@ impl<'block> BrilligBlock<'block> { // In the case of arrays, the values should already be in memory and the register should // Be a valid pointer to the array. // For slices, two registers are passed, the pointer to the data and a register holding the size of the slice. - Type::Numeric(_) | Type::Array(..) | Type::Slice(..) | Type::Reference => { + Type::Numeric(_) | Type::Array(..) | Type::Slice(..) | Type::Reference(_) => { self.variables.get_block_param( self.function_context, self.block_id, @@ -264,7 +276,25 @@ impl<'block> BrilligBlock<'block> { result_value, dfg, ); - self.brillig_context.allocate_variable_instruction(address_register); + match dfg.type_of_value(result_value) { + Type::Reference(element) => match *element { + Type::Array(..) => { + self.brillig_context + .allocate_array_reference_instruction(address_register); + } + Type::Slice(..) => { + self.brillig_context + .allocate_vector_reference_instruction(address_register); + } + _ => { + self.brillig_context + .allocate_simple_reference_instruction(address_register); + } + }, + _ => { + unreachable!("ICE: Allocate on non-reference type") + } + } } Instruction::Store { address, value } => { let address_register = self.convert_ssa_register_value(*address, dfg); @@ -299,10 +329,11 @@ impl<'block> BrilligBlock<'block> { Value::ForeignFunction(func_name) => { let result_ids = dfg.instruction_results(instruction_id); - let input_registers = - vecmap(arguments, |value_id| self.convert_ssa_value(*value_id, dfg)); + let input_registers = vecmap(arguments, |value_id| { + self.convert_ssa_value(*value_id, dfg).to_register_or_memory() + }); let output_registers = vecmap(result_ids, |value_id| { - self.allocate_external_call_result(*value_id, dfg) + self.allocate_external_call_result(*value_id, dfg).to_register_or_memory() }); self.brillig_context.foreign_call_instruction( func_name.to_owned(), @@ -388,7 +419,7 @@ impl<'block> BrilligBlock<'block> { // or an array in the case of an array. if let Type::Numeric(_) = dfg.type_of_value(param_id) { let len_variable = self.convert_ssa_value(arguments[0], dfg); - let len_register_index = extract_register(len_variable); + let len_register_index = len_variable.extract_register(); self.brillig_context.mov_instruction(result_register, len_register_index); } else { self.convert_ssa_array_len(arguments[0], result_register, dfg); @@ -416,29 +447,29 @@ impl<'block> BrilligBlock<'block> { let results = dfg.instruction_results(instruction_id); - let target_len_variable = self.variables.define_variable( + let target_len = self.variables.define_register_variable( self.function_context, self.brillig_context, results[0], dfg, ); - let target_len = extract_register(target_len_variable); - let target_slice = self.variables.define_variable( - self.function_context, - self.brillig_context, - results[1], - dfg, - ); - - let heap_vec = self.brillig_context.extract_heap_vector(target_slice); + let target_vector = self + .variables + .define_variable( + self.function_context, + self.brillig_context, + results[1], + dfg, + ) + .extract_vector(); // Update the user-facing slice length self.brillig_context.mov_instruction(target_len, limb_count); self.brillig_context.radix_instruction( source, - heap_vec, + target_vector, radix, limb_count, matches!(endianness, Endian::Big), @@ -456,24 +487,29 @@ impl<'block> BrilligBlock<'block> { results[0], dfg, ); - let target_len = extract_register(target_len_variable); + let target_len = target_len_variable.extract_register(); - let target_slice = self.variables.define_variable( + let target_vector = match self.variables.define_variable( self.function_context, self.brillig_context, results[1], dfg, - ); + ) { + BrilligVariable::BrilligArray(array) => { + self.brillig_context.array_to_vector(&array) + } + BrilligVariable::BrilligVector(vector) => vector, + BrilligVariable::Simple(..) => unreachable!("ICE: ToBits on non-array"), + }; let radix = self.brillig_context.make_constant(2_usize.into()); - let heap_vec = self.brillig_context.extract_heap_vector(target_slice); // Update the user-facing slice length self.brillig_context.mov_instruction(target_len, limb_count); self.brillig_context.radix_instruction( source, - heap_vec, + target_vector, radix, limb_count, matches!(endianness, Endian::Big), @@ -523,8 +559,8 @@ impl<'block> BrilligBlock<'block> { let array_variable = self.convert_ssa_value(*array, dfg); let array_pointer = match array_variable { - RegisterOrMemory::HeapArray(HeapArray { pointer, .. }) => pointer, - RegisterOrMemory::HeapVector(HeapVector { pointer, .. }) => pointer, + BrilligVariable::BrilligArray(BrilligArray { pointer, .. }) => pointer, + BrilligVariable::BrilligVector(BrilligVector { pointer, .. }) => pointer, _ => unreachable!("ICE: array get on non-array"), }; @@ -574,6 +610,14 @@ impl<'block> BrilligBlock<'block> { self.brillig_context.deallocate_register(condition); self.brillig_context.deallocate_register(right); } + Instruction::IncrementRc { value } => { + let rc_register = match self.convert_ssa_value(*value, dfg) { + BrilligVariable::BrilligArray(BrilligArray { rc, .. }) + | BrilligVariable::BrilligVector(BrilligVector { rc, .. }) => rc, + _ => unreachable!("ICE: increment rc on non-array"), + }; + self.brillig_context.usize_op_in_place(rc_register, BinaryIntOp::Add, 1); + } _ => todo!("ICE: Instruction not supported {instruction:?}"), }; @@ -598,10 +642,7 @@ impl<'block> BrilligBlock<'block> { // Convert the arguments to registers casting those to the types of the receiving function let argument_registers: Vec = arguments .iter() - .flat_map(|argument_id| { - let variable_to_pass = self.convert_ssa_value(*argument_id, dfg); - extract_registers(variable_to_pass) - }) + .flat_map(|argument_id| self.convert_ssa_value(*argument_id, dfg).extract_registers()) .collect(); let result_ids = dfg.instruction_results(instruction_id); @@ -637,7 +678,7 @@ impl<'block> BrilligBlock<'block> { // Collect the registers that should have been returned let returned_registers: Vec = variables_assigned_to .iter() - .flat_map(|returned_variable| extract_registers(*returned_variable)) + .flat_map(|returned_variable| returned_variable.extract_registers()) .collect(); assert!( @@ -654,17 +695,13 @@ impl<'block> BrilligBlock<'block> { &mut self, array_pointer: RegisterIndex, index_register: RegisterIndex, - destination_variable: RegisterOrMemory, + destination_variable: BrilligVariable, ) { match destination_variable { - RegisterOrMemory::RegisterIndex(destination_register) => { + BrilligVariable::Simple(destination_register) => { self.brillig_context.array_get(array_pointer, index_register, destination_register); } - RegisterOrMemory::HeapArray(HeapArray { pointer, .. }) => { - self.brillig_context.array_get(array_pointer, index_register, pointer); - } - RegisterOrMemory::HeapVector(..) => { - // Vectors are stored as references inside arrays to be able to match SSA indexes + BrilligVariable::BrilligArray(..) | BrilligVariable::BrilligVector(..) => { let reference = self.brillig_context.allocate_register(); self.brillig_context.array_get(array_pointer, index_register, reference); self.brillig_context.load_variable_instruction(destination_variable, reference); @@ -677,25 +714,30 @@ impl<'block> BrilligBlock<'block> { /// With a specific value changed. fn convert_ssa_array_set( &mut self, - source_variable: RegisterOrMemory, - destination_variable: RegisterOrMemory, + source_variable: BrilligVariable, + destination_variable: BrilligVariable, index_register: RegisterIndex, - value_variable: RegisterOrMemory, + value_variable: BrilligVariable, ) { let destination_pointer = match destination_variable { - RegisterOrMemory::HeapArray(HeapArray { pointer, .. }) => pointer, - RegisterOrMemory::HeapVector(HeapVector { pointer, .. }) => pointer, + BrilligVariable::BrilligArray(BrilligArray { pointer, .. }) => pointer, + BrilligVariable::BrilligVector(BrilligVector { pointer, .. }) => pointer, _ => unreachable!("ICE: array set returns non-array"), }; - // First issue a array copy to the destination + let reference_count = match source_variable { + BrilligVariable::BrilligArray(BrilligArray { rc, .. }) + | BrilligVariable::BrilligVector(BrilligVector { rc, .. }) => rc, + _ => unreachable!("ICE: array set on non-array"), + }; + let (source_pointer, source_size_as_register) = match source_variable { - RegisterOrMemory::HeapArray(HeapArray { size, pointer }) => { + BrilligVariable::BrilligArray(BrilligArray { size, pointer, rc: _ }) => { let source_size_register = self.brillig_context.allocate_register(); self.brillig_context.const_instruction(source_size_register, size.into()); (pointer, source_size_register) } - RegisterOrMemory::HeapVector(HeapVector { size, pointer }) => { + BrilligVariable::BrilligVector(BrilligVector { size, pointer, rc: _ }) => { let source_size_register = self.brillig_context.allocate_register(); self.brillig_context.mov_instruction(source_size_register, size); (pointer, source_size_register) @@ -703,51 +745,96 @@ impl<'block> BrilligBlock<'block> { _ => unreachable!("ICE: array set on non-array"), }; - self.brillig_context - .allocate_array_instruction(destination_pointer, source_size_as_register); + let one = self.brillig_context.make_constant(1_usize.into()); + let condition = self.brillig_context.allocate_register(); - self.brillig_context.copy_array_instruction( - source_pointer, - destination_pointer, - source_size_as_register, + self.brillig_context.binary_instruction( + reference_count, + one, + condition, + BrilligBinaryOp::Field { op: BinaryFieldOp::Equals }, ); - if let RegisterOrMemory::HeapVector(HeapVector { size: target_size, .. }) = - destination_variable - { - self.brillig_context.mov_instruction(target_size, source_size_as_register); + self.brillig_context.branch_instruction(condition, |ctx, cond| { + if cond { + // Reference count is 1, we can mutate the array directly + ctx.mov_instruction(destination_pointer, source_pointer); + } else { + // First issue a array copy to the destination + ctx.allocate_array_instruction(destination_pointer, source_size_as_register); + + ctx.copy_array_instruction( + source_pointer, + destination_pointer, + source_size_as_register, + ); + } + }); + + match destination_variable { + BrilligVariable::BrilligArray(BrilligArray { rc: target_rc, .. }) => { + self.brillig_context.const_instruction(target_rc, 1_usize.into()); + } + BrilligVariable::BrilligVector(BrilligVector { + size: target_size, + rc: target_rc, + .. + }) => { + self.brillig_context.mov_instruction(target_size, source_size_as_register); + self.brillig_context.const_instruction(target_rc, 1_usize.into()); + } + _ => unreachable!("ICE: array set on non-array"), } // Then set the value in the newly created array self.store_variable_in_array(destination_pointer, index_register, value_variable); self.brillig_context.deallocate_register(source_size_as_register); + self.brillig_context.deallocate_register(one); + self.brillig_context.deallocate_register(condition); } - pub(crate) fn store_variable_in_array( - &mut self, + pub(crate) fn store_variable_in_array_with_ctx( + ctx: &mut BrilligContext, destination_pointer: RegisterIndex, index_register: RegisterIndex, - value_variable: RegisterOrMemory, + value_variable: BrilligVariable, ) { match value_variable { - RegisterOrMemory::RegisterIndex(value_register) => { - self.brillig_context.array_set(destination_pointer, index_register, value_register); + BrilligVariable::Simple(value_register) => { + ctx.array_set(destination_pointer, index_register, value_register); } - RegisterOrMemory::HeapArray(HeapArray { pointer, .. }) => { - self.brillig_context.array_set(destination_pointer, index_register, pointer); + BrilligVariable::BrilligArray(_) => { + let reference: RegisterIndex = ctx.allocate_register(); + ctx.allocate_array_reference_instruction(reference); + ctx.store_variable_instruction(reference, value_variable); + ctx.array_set(destination_pointer, index_register, reference); + ctx.deallocate_register(reference); } - RegisterOrMemory::HeapVector(_) => { - // Vectors are stored as references inside arrays to be able to match SSA indexes - let reference = self.brillig_context.allocate_register(); - self.brillig_context.allocate_variable_instruction(reference); - self.brillig_context.store_variable_instruction(reference, value_variable); - self.brillig_context.array_set(destination_pointer, index_register, reference); - self.brillig_context.deallocate_register(reference); + BrilligVariable::BrilligVector(_) => { + let reference = ctx.allocate_register(); + ctx.allocate_vector_reference_instruction(reference); + ctx.store_variable_instruction(reference, value_variable); + ctx.array_set(destination_pointer, index_register, reference); + ctx.deallocate_register(reference); } } } + pub(crate) fn store_variable_in_array( + &mut self, + destination_pointer: RegisterIndex, + index_register: RegisterIndex, + value_variable: BrilligVariable, + ) { + Self::store_variable_in_array_with_ctx( + self.brillig_context, + destination_pointer, + index_register, + value_variable, + ); + } + /// Convert the SSA slice operations to brillig slice operations fn convert_ssa_slice_intrinsic_call( &mut self, @@ -770,7 +857,7 @@ impl<'block> BrilligBlock<'block> { results[0], dfg, ) { - RegisterOrMemory::RegisterIndex(register_index) => register_index, + BrilligVariable::Simple(register_index) => register_index, _ => unreachable!("ICE: first value of a slice must be a register index"), }; @@ -781,7 +868,7 @@ impl<'block> BrilligBlock<'block> { dfg, ); - let target_vector = self.brillig_context.extract_heap_vector(target_variable); + let target_vector = target_variable.extract_vector(); let item_values = vecmap(&arguments[2..element_size + 2], |arg| { self.convert_ssa_value(*arg, dfg) }); @@ -797,7 +884,7 @@ impl<'block> BrilligBlock<'block> { results[0], dfg, ) { - RegisterOrMemory::RegisterIndex(register_index) => register_index, + BrilligVariable::Simple(register_index) => register_index, _ => unreachable!("ICE: first value of a slice must be a register index"), }; @@ -807,7 +894,7 @@ impl<'block> BrilligBlock<'block> { results[1], dfg, ); - let target_vector = self.brillig_context.extract_heap_vector(target_variable); + let target_vector = target_variable.extract_vector(); let item_values = vecmap(&arguments[2..element_size + 2], |arg| { self.convert_ssa_value(*arg, dfg) }); @@ -823,7 +910,7 @@ impl<'block> BrilligBlock<'block> { results[0], dfg, ) { - RegisterOrMemory::RegisterIndex(register_index) => register_index, + BrilligVariable::Simple(register_index) => register_index, _ => unreachable!("ICE: first value of a slice must be a register index"), }; @@ -834,7 +921,7 @@ impl<'block> BrilligBlock<'block> { dfg, ); - let target_vector = self.brillig_context.extract_heap_vector(target_variable); + let target_vector = target_variable.extract_vector(); let pop_variables = vecmap(&results[2..element_size + 2], |result| { self.variables.define_variable( @@ -856,7 +943,7 @@ impl<'block> BrilligBlock<'block> { results[element_size], dfg, ) { - RegisterOrMemory::RegisterIndex(register_index) => register_index, + BrilligVariable::Simple(register_index) => register_index, _ => unreachable!("ICE: first value of a slice must be a register index"), }; @@ -875,7 +962,7 @@ impl<'block> BrilligBlock<'block> { results[element_size + 1], dfg, ); - let target_vector = self.brillig_context.extract_heap_vector(target_variable); + let target_vector = target_variable.extract_vector(); self.update_slice_length(target_len, arguments[0], dfg, BinaryIntOp::Sub); @@ -888,7 +975,7 @@ impl<'block> BrilligBlock<'block> { results[0], dfg, ) { - RegisterOrMemory::RegisterIndex(register_index) => register_index, + BrilligVariable::Simple(register_index) => register_index, _ => unreachable!("ICE: first value of a slice must be a register index"), }; @@ -900,7 +987,7 @@ impl<'block> BrilligBlock<'block> { dfg, ); - let target_vector = self.brillig_context.extract_heap_vector(target_variable); + let target_vector = target_variable.extract_vector(); // Remove if indexing in insert is changed to flattened indexing // https://github.com/noir-lang/noir/issues/1889#issuecomment-1668048587 @@ -931,7 +1018,7 @@ impl<'block> BrilligBlock<'block> { results[0], dfg, ) { - RegisterOrMemory::RegisterIndex(register_index) => register_index, + BrilligVariable::Simple(register_index) => register_index, _ => unreachable!("ICE: first value of a slice must be a register index"), }; @@ -943,7 +1030,7 @@ impl<'block> BrilligBlock<'block> { target_id, dfg, ); - let target_vector = self.brillig_context.extract_heap_vector(target_variable); + let target_vector = target_variable.extract_vector(); // Remove if indexing in remove is changed to flattened indexing // https://github.com/noir-lang/noir/issues/1889#issuecomment-1668048587 @@ -998,7 +1085,7 @@ impl<'block> BrilligBlock<'block> { binary_op: BinaryIntOp, ) { let source_len_variable = self.convert_ssa_value(source_value, dfg); - let source_len = extract_register(source_len_variable); + let source_len = source_len_variable.extract_register(); self.brillig_context.usize_op(source_len, target_len, binary_op, 1); } @@ -1064,7 +1151,7 @@ impl<'block> BrilligBlock<'block> { } /// Converts an SSA `ValueId` into a `RegisterOrMemory`. Initializes if necessary. - fn convert_ssa_value(&mut self, value_id: ValueId, dfg: &DataFlowGraph) -> RegisterOrMemory { + fn convert_ssa_value(&mut self, value_id: ValueId, dfg: &DataFlowGraph) -> BrilligVariable { let value_id = dfg.resolve(value_id); let value = &dfg[value_id]; @@ -1082,7 +1169,7 @@ impl<'block> BrilligBlock<'block> { } else { let new_variable = self.variables.allocate_constant(self.brillig_context, value_id, dfg); - let register_index = extract_register(new_variable); + let register_index = new_variable.extract_register(); self.brillig_context.const_instruction(register_index, (*constant).into()); new_variable @@ -1097,19 +1184,21 @@ impl<'block> BrilligBlock<'block> { // Initialize the variable let pointer = match new_variable { - RegisterOrMemory::HeapArray(heap_array) => { + BrilligVariable::BrilligArray(brillig_array) => { self.brillig_context - .allocate_fixed_length_array(heap_array.pointer, array.len()); + .allocate_fixed_length_array(brillig_array.pointer, array.len()); + self.brillig_context + .const_instruction(brillig_array.rc, 1_usize.into()); - heap_array.pointer + brillig_array.pointer } - RegisterOrMemory::HeapVector(heap_vector) => { - self.brillig_context - .const_instruction(heap_vector.size, array.len().into()); + BrilligVariable::BrilligVector(vector) => { + self.brillig_context.const_instruction(vector.size, array.len().into()); self.brillig_context - .allocate_array_instruction(heap_vector.pointer, heap_vector.size); + .allocate_array_instruction(vector.pointer, vector.size); + self.brillig_context.const_instruction(vector.rc, 1_usize.into()); - heap_vector.pointer + vector.pointer } _ => unreachable!( "ICE: Cannot initialize array value created as {new_variable:?}" @@ -1138,7 +1227,7 @@ impl<'block> BrilligBlock<'block> { new_variable } } - _ => { + Value::Function(_) | Value::Intrinsic(_) | Value::ForeignFunction(_) => { todo!("ICE: Cannot convert value {value:?}") } } @@ -1151,14 +1240,14 @@ impl<'block> BrilligBlock<'block> { dfg: &DataFlowGraph, ) -> RegisterIndex { let variable = self.convert_ssa_value(value_id, dfg); - extract_register(variable) + variable.extract_register() } fn allocate_external_call_result( &mut self, result: ValueId, dfg: &DataFlowGraph, - ) -> RegisterOrMemory { + ) -> BrilligVariable { let typ = dfg[result].get_type(); match typ { Type::Numeric(_) => self.variables.define_variable( @@ -1175,8 +1264,10 @@ impl<'block> BrilligBlock<'block> { result, dfg, ); - let array = extract_heap_array(variable); + let array = variable.extract_array(); self.brillig_context.allocate_fixed_length_array(array.pointer, array.size); + self.brillig_context.const_instruction(array.rc, 1_usize.into()); + variable } Type::Slice(_) => { @@ -1186,12 +1277,14 @@ impl<'block> BrilligBlock<'block> { result, dfg, ); - let vector = self.brillig_context.extract_heap_vector(variable); + let vector = variable.extract_vector(); // Set the pointer to the current stack frame // The stack pointer will then be updated by the caller of this method // once the external call is resolved and the array size is known self.brillig_context.set_array_pointer(vector.pointer); + self.brillig_context.const_instruction(vector.rc, 1_usize.into()); + variable } _ => { @@ -1201,7 +1294,7 @@ impl<'block> BrilligBlock<'block> { } /// Gets the "user-facing" length of an array. - /// An array of structs with two fields would be stored as an 2 * array.len() heap array/heap vector. + /// An array of structs with two fields would be stored as an 2 * array.len() array/vector. /// So we divide the length by the number of subitems in an item to get the user-facing length. fn convert_ssa_array_len( &mut self, @@ -1213,11 +1306,11 @@ impl<'block> BrilligBlock<'block> { let element_size = dfg.type_of_value(array_id).element_size(); match array_variable { - RegisterOrMemory::HeapArray(HeapArray { size, .. }) => { + BrilligVariable::BrilligArray(BrilligArray { size, .. }) => { self.brillig_context .const_instruction(result_register, (size / element_size).into()); } - RegisterOrMemory::HeapVector(HeapVector { size, .. }) => { + BrilligVariable::BrilligVector(BrilligVector { size, .. }) => { self.brillig_context.usize_op( size, result_register, @@ -1240,7 +1333,7 @@ pub(crate) fn type_of_binary_operation(lhs_type: &Type, rhs_type: &Type) -> Type (_, Type::Function) | (Type::Function, _) => { unreachable!("Functions are invalid in binary operations") } - (_, Type::Reference) | (Type::Reference, _) => { + (_, Type::Reference(_)) | (Type::Reference(_), _) => { unreachable!("References are invalid in binary operations") } (_, Type::Array(..)) | (Type::Array(..), _) => { diff --git a/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_block_variables.rs b/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_block_variables.rs index eb7bab8c971..f2e698c0aa9 100644 --- a/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_block_variables.rs +++ b/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_block_variables.rs @@ -1,8 +1,11 @@ -use acvm::brillig_vm::brillig::{HeapArray, HeapVector, RegisterIndex, RegisterOrMemory}; +use acvm::brillig_vm::brillig::RegisterIndex; use fxhash::{FxHashMap as HashMap, FxHashSet as HashSet}; use crate::{ - brillig::brillig_ir::{extract_register, BrilligContext}, + brillig::brillig_ir::{ + brillig_variable::{BrilligArray, BrilligVariable, BrilligVector}, + BrilligContext, + }, ssa::ir::{ basic_block::BasicBlockId, dfg::DataFlowGraph, @@ -16,7 +19,7 @@ use super::brillig_fn::FunctionContext; #[derive(Debug, Default)] pub(crate) struct BlockVariables { available_variables: HashSet, - available_constants: HashMap, + available_constants: HashMap, } impl BlockVariables { @@ -32,7 +35,7 @@ impl BlockVariables { pub(crate) fn get_available_variables( &self, function_context: &FunctionContext, - ) -> Vec { + ) -> Vec { self.available_variables .iter() .map(|value_id| { @@ -52,7 +55,7 @@ impl BlockVariables { brillig_context: &mut BrilligContext, value_id: ValueId, dfg: &DataFlowGraph, - ) -> RegisterOrMemory { + ) -> BrilligVariable { let value_id = dfg.resolve(value_id); let variable = allocate_value(value_id, brillig_context, dfg); @@ -74,7 +77,7 @@ impl BlockVariables { dfg: &DataFlowGraph, ) -> RegisterIndex { let variable = self.define_variable(function_context, brillig_context, value, dfg); - extract_register(variable) + variable.extract_register() } /// Removes a variable so it's not used anymore within this block. @@ -88,7 +91,7 @@ impl BlockVariables { function_context: &FunctionContext, value_id: ValueId, dfg: &DataFlowGraph, - ) -> RegisterOrMemory { + ) -> BrilligVariable { let value_id = dfg.resolve(value_id); if let Some(constant) = self.available_constants.get(&value_id) { *constant @@ -112,7 +115,7 @@ impl BlockVariables { brillig_context: &mut BrilligContext, value_id: ValueId, dfg: &DataFlowGraph, - ) -> RegisterOrMemory { + ) -> BrilligVariable { let value_id = dfg.resolve(value_id); let constant = allocate_value(value_id, brillig_context, dfg); self.available_constants.insert(value_id, constant); @@ -124,7 +127,7 @@ impl BlockVariables { &mut self, value_id: ValueId, dfg: &DataFlowGraph, - ) -> Option { + ) -> Option { let value_id = dfg.resolve(value_id); self.available_constants.get(&value_id).cloned() } @@ -141,7 +144,7 @@ impl BlockVariables { block_id: BasicBlockId, value_id: ValueId, dfg: &DataFlowGraph, - ) -> RegisterOrMemory { + ) -> BrilligVariable { let value_id = dfg.resolve(value_id); assert!( function_context @@ -166,25 +169,34 @@ pub(crate) fn allocate_value( value_id: ValueId, brillig_context: &mut BrilligContext, dfg: &DataFlowGraph, -) -> RegisterOrMemory { +) -> BrilligVariable { let typ = dfg.type_of_value(value_id); match typ { - Type::Numeric(_) | Type::Reference => { + Type::Numeric(_) | Type::Reference(_) => { let register = brillig_context.allocate_register(); - RegisterOrMemory::RegisterIndex(register) + BrilligVariable::Simple(register) } Type::Array(item_typ, elem_count) => { let pointer_register = brillig_context.allocate_register(); + let rc_register = brillig_context.allocate_register(); let size = compute_array_length(&item_typ, elem_count); - RegisterOrMemory::HeapArray(HeapArray { pointer: pointer_register, size }) + + BrilligVariable::BrilligArray(BrilligArray { + pointer: pointer_register, + size, + rc: rc_register, + }) } Type::Slice(_) => { let pointer_register = brillig_context.allocate_register(); let size_register = brillig_context.allocate_register(); - RegisterOrMemory::HeapVector(HeapVector { + let rc_register = brillig_context.allocate_register(); + + BrilligVariable::BrilligVector(BrilligVector { pointer: pointer_register, size: size_register, + rc: rc_register, }) } Type::Function => { diff --git a/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_fn.rs b/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_fn.rs index ec72ceb2909..026def4ef11 100644 --- a/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_fn.rs +++ b/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_fn.rs @@ -1,9 +1,9 @@ -use acvm::brillig_vm::brillig::RegisterOrMemory; use iter_extended::vecmap; use crate::{ brillig::brillig_ir::{ artifact::{BrilligParameter, Label}, + brillig_variable::BrilligVariable, BrilligContext, }, ssa::ir::{ @@ -21,7 +21,7 @@ use super::{brillig_block_variables::allocate_value, variable_liveness::Variable pub(crate) struct FunctionContext { pub(crate) function_id: FunctionId, /// Map from SSA values its allocation. Since values can be only defined once in SSA form, we insert them here on when we allocate them at their definition. - pub(crate) ssa_value_allocations: HashMap, + pub(crate) ssa_value_allocations: HashMap, /// Block parameters are pre allocated at the function level. pub(crate) block_parameters: HashMap>, /// The block ids of the function in reverse post order. @@ -72,7 +72,7 @@ impl FunctionContext { fn ssa_type_to_parameter(typ: &Type) -> BrilligParameter { match typ { - Type::Numeric(_) | Type::Reference => BrilligParameter::Simple, + Type::Numeric(_) | Type::Reference(_) => BrilligParameter::Simple, Type::Array(item_type, size) => BrilligParameter::Array( vecmap(item_type.iter(), |item_typ| { FunctionContext::ssa_type_to_parameter(item_typ) diff --git a/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_slice_ops.rs b/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_slice_ops.rs index 211d670e7d8..6402e6f9d97 100644 --- a/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_slice_ops.rs +++ b/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_slice_ops.rs @@ -1,13 +1,15 @@ -use acvm::brillig_vm::brillig::{BinaryIntOp, HeapVector, RegisterIndex, RegisterOrMemory}; +use acvm::brillig_vm::brillig::{BinaryIntOp, RegisterIndex}; + +use crate::brillig::brillig_ir::brillig_variable::{BrilligVariable, BrilligVector}; use super::brillig_block::BrilligBlock; impl<'block> BrilligBlock<'block> { pub(crate) fn slice_push_back_operation( &mut self, - target_vector: HeapVector, - source_vector: HeapVector, - variables_to_insert: &[RegisterOrMemory], + target_vector: BrilligVector, + source_vector: BrilligVector, + variables_to_insert: &[BrilligVariable], ) { // First we need to allocate the target vector incrementing the size by variables_to_insert.len() self.brillig_context.usize_op( @@ -17,6 +19,8 @@ impl<'block> BrilligBlock<'block> { variables_to_insert.len(), ); self.brillig_context.allocate_array_instruction(target_vector.pointer, target_vector.size); + // We initialize the RC of the target vector to 1 + self.brillig_context.const_instruction(target_vector.rc, 1_usize.into()); // Now we copy the source vector into the target vector self.brillig_context.copy_array_instruction( @@ -40,9 +44,9 @@ impl<'block> BrilligBlock<'block> { pub(crate) fn slice_push_front_operation( &mut self, - target_vector: HeapVector, - source_vector: HeapVector, - variables_to_insert: &[RegisterOrMemory], + target_vector: BrilligVector, + source_vector: BrilligVector, + variables_to_insert: &[BrilligVariable], ) { // First we need to allocate the target vector incrementing the size by variables_to_insert.len() self.brillig_context.usize_op( @@ -52,6 +56,8 @@ impl<'block> BrilligBlock<'block> { variables_to_insert.len(), ); self.brillig_context.allocate_array_instruction(target_vector.pointer, target_vector.size); + // We initialize the RC of the target vector to 1 + self.brillig_context.const_instruction(target_vector.rc, 1_usize.into()); // Now we offset the target pointer by variables_to_insert.len() let destination_copy_pointer = self.brillig_context.allocate_register(); @@ -81,9 +87,9 @@ impl<'block> BrilligBlock<'block> { pub(crate) fn slice_pop_front_operation( &mut self, - target_vector: HeapVector, - source_vector: HeapVector, - removed_items: &[RegisterOrMemory], + target_vector: BrilligVector, + source_vector: BrilligVector, + removed_items: &[BrilligVariable], ) { // First we need to allocate the target vector decrementing the size by removed_items.len() self.brillig_context.usize_op( @@ -93,6 +99,8 @@ impl<'block> BrilligBlock<'block> { removed_items.len(), ); self.brillig_context.allocate_array_instruction(target_vector.pointer, target_vector.size); + // We initialize the RC of the target vector to 1 + self.brillig_context.const_instruction(target_vector.rc, 1_usize.into()); // Now we offset the source pointer by removed_items.len() let source_copy_pointer = self.brillig_context.allocate_register(); @@ -121,9 +129,9 @@ impl<'block> BrilligBlock<'block> { pub(crate) fn slice_pop_back_operation( &mut self, - target_vector: HeapVector, - source_vector: HeapVector, - removed_items: &[RegisterOrMemory], + target_vector: BrilligVector, + source_vector: BrilligVector, + removed_items: &[BrilligVariable], ) { // First we need to allocate the target vector decrementing the size by removed_items.len() self.brillig_context.usize_op( @@ -133,6 +141,8 @@ impl<'block> BrilligBlock<'block> { removed_items.len(), ); self.brillig_context.allocate_array_instruction(target_vector.pointer, target_vector.size); + // We initialize the RC of the target vector to 1 + self.brillig_context.const_instruction(target_vector.rc, 1_usize.into()); // Now we copy all elements except the last items into the target vector self.brillig_context.copy_array_instruction( @@ -156,10 +166,10 @@ impl<'block> BrilligBlock<'block> { pub(crate) fn slice_insert_operation( &mut self, - target_vector: HeapVector, - source_vector: HeapVector, + target_vector: BrilligVector, + source_vector: BrilligVector, index: RegisterIndex, - items: &[RegisterOrMemory], + items: &[BrilligVariable], ) { // First we need to allocate the target vector incrementing the size by items.len() self.brillig_context.usize_op( @@ -169,6 +179,8 @@ impl<'block> BrilligBlock<'block> { items.len(), ); self.brillig_context.allocate_array_instruction(target_vector.pointer, target_vector.size); + // We initialize the RC of the target vector to 1 + self.brillig_context.const_instruction(target_vector.rc, 1_usize.into()); // Copy the elements to the left of the index self.brillig_context.copy_array_instruction( @@ -226,10 +238,10 @@ impl<'block> BrilligBlock<'block> { pub(crate) fn slice_remove_operation( &mut self, - target_vector: HeapVector, - source_vector: HeapVector, + target_vector: BrilligVector, + source_vector: BrilligVector, index: RegisterIndex, - removed_items: &[RegisterOrMemory], + removed_items: &[BrilligVariable], ) { // First we need to allocate the target vector decrementing the size by removed_items.len() self.brillig_context.usize_op( @@ -239,6 +251,8 @@ impl<'block> BrilligBlock<'block> { removed_items.len(), ); self.brillig_context.allocate_array_instruction(target_vector.pointer, target_vector.size); + // We initialize the RC of the target vector to 1 + self.brillig_context.const_instruction(target_vector.rc, 1_usize.into()); // Copy the elements to the left of the index self.brillig_context.copy_array_instruction( @@ -297,11 +311,11 @@ impl<'block> BrilligBlock<'block> { pub(crate) fn convert_array_or_vector_to_vector( &mut self, - source_variable: RegisterOrMemory, - ) -> HeapVector { + source_variable: BrilligVariable, + ) -> BrilligVector { match source_variable { - RegisterOrMemory::HeapVector(source_vector) => source_vector, - RegisterOrMemory::HeapArray(source_array) => { + BrilligVariable::BrilligVector(source_vector) => source_vector, + BrilligVariable::BrilligArray(source_array) => { self.brillig_context.array_to_vector(&source_array) } _ => unreachable!("ICE: unsupported slice push back source {:?}", source_variable), @@ -313,13 +327,16 @@ impl<'block> BrilligBlock<'block> { mod tests { use std::vec; - use acvm::acir::brillig::{HeapVector, Value}; - use acvm::brillig_vm::brillig::{RegisterIndex, RegisterOrMemory}; + use acvm::acir::brillig::Value; + use acvm::brillig_vm::brillig::RegisterIndex; use crate::brillig::brillig_gen::brillig_block::BrilligBlock; use crate::brillig::brillig_gen::brillig_block_variables::BlockVariables; use crate::brillig::brillig_gen::brillig_fn::FunctionContext; use crate::brillig::brillig_ir::artifact::BrilligParameter; + use crate::brillig::brillig_ir::brillig_variable::{ + BrilligArray, BrilligVariable, BrilligVector, + }; use crate::brillig::brillig_ir::tests::{ create_and_run_vm, create_context, create_entry_point_bytecode, }; @@ -373,33 +390,44 @@ mod tests { let (_, mut function_context, mut context) = create_test_environment(); // Allocate the parameters - let array_pointer = context.allocate_register(); + let array_variable = BrilligArray { + pointer: context.allocate_register(), + size: array.len(), + rc: context.allocate_register(), + }; let item_to_insert = context.allocate_register(); // Cast the source array to a vector - let array_size = context.make_constant(array.len().into()); + let source_vector = context.array_to_vector(&array_variable); // Allocate the results - let copied_array_pointer = context.allocate_register(); - let copied_array_size = context.allocate_register(); + let target_vector = BrilligVector { + pointer: context.allocate_register(), + size: context.allocate_register(), + rc: context.allocate_register(), + }; let mut block = create_brillig_block(&mut function_context, &mut context); if push_back { block.slice_push_back_operation( - HeapVector { pointer: copied_array_pointer, size: copied_array_size }, - HeapVector { pointer: array_pointer, size: array_size }, - &[RegisterOrMemory::RegisterIndex(item_to_insert)], + target_vector, + source_vector, + &[BrilligVariable::Simple(item_to_insert)], ); } else { block.slice_push_front_operation( - HeapVector { pointer: copied_array_pointer, size: copied_array_size }, - HeapVector { pointer: array_pointer, size: array_size }, - &[RegisterOrMemory::RegisterIndex(item_to_insert)], + target_vector, + source_vector, + &[BrilligVariable::Simple(item_to_insert)], ); } - context.return_instruction(&[copied_array_pointer, copied_array_size]); + context.return_instruction(&[ + target_vector.pointer, + target_vector.rc, + target_vector.size, + ]); let bytecode = create_entry_point_bytecode(context, arguments, returns).byte_code; let vm = create_and_run_vm( @@ -465,34 +493,45 @@ mod tests { let (_, mut function_context, mut context) = create_test_environment(); // Allocate the parameters - let array_pointer = context.allocate_register(); + let array_variable = BrilligArray { + pointer: context.allocate_register(), + size: array.len(), + rc: context.allocate_register(), + }; // Cast the source array to a vector - let array_size = context.make_constant(array.len().into()); + let source_vector = context.array_to_vector(&array_variable); // Allocate the results - let copied_array_pointer = context.allocate_register(); + let target_vector = BrilligVector { + pointer: context.allocate_register(), + size: context.allocate_register(), + rc: context.allocate_register(), + }; let removed_item = context.allocate_register(); - let copied_array_size = context.allocate_register(); - let mut block = create_brillig_block(&mut function_context, &mut context); if pop_back { block.slice_pop_back_operation( - HeapVector { pointer: copied_array_pointer, size: copied_array_size }, - HeapVector { pointer: array_pointer, size: array_size }, - &[RegisterOrMemory::RegisterIndex(removed_item)], + target_vector, + source_vector, + &[BrilligVariable::Simple(removed_item)], ); } else { block.slice_pop_front_operation( - HeapVector { pointer: copied_array_pointer, size: copied_array_size }, - HeapVector { pointer: array_pointer, size: array_size }, - &[RegisterOrMemory::RegisterIndex(removed_item)], + target_vector, + source_vector, + &[BrilligVariable::Simple(removed_item)], ); } - context.return_instruction(&[copied_array_pointer, copied_array_size, removed_item]); + context.return_instruction(&[ + target_vector.pointer, + target_vector.rc, + target_vector.size, + removed_item, + ]); let bytecode = create_entry_point_bytecode(context, arguments, returns).byte_code; let vm = create_and_run_vm(array.clone(), vec![Value::from(0_usize)], &bytecode); @@ -557,28 +596,38 @@ mod tests { let (_, mut function_context, mut context) = create_test_environment(); // Allocate the parameters - let array_pointer = context.allocate_register(); + let array_variable = BrilligArray { + pointer: context.allocate_register(), + size: array.len(), + rc: context.allocate_register(), + }; let item_to_insert = context.allocate_register(); let index_to_insert = context.allocate_register(); // Cast the source array to a vector - let array_size = context.make_constant(array.len().into()); + let source_vector = context.array_to_vector(&array_variable); // Allocate the results - let copied_array_pointer = context.allocate_register(); - - let copied_array_size = context.allocate_register(); + let target_vector = BrilligVector { + pointer: context.allocate_register(), + size: context.allocate_register(), + rc: context.allocate_register(), + }; let mut block = create_brillig_block(&mut function_context, &mut context); block.slice_insert_operation( - HeapVector { pointer: copied_array_pointer, size: copied_array_size }, - HeapVector { pointer: array_pointer, size: array_size }, + target_vector, + source_vector, index_to_insert, - &[RegisterOrMemory::RegisterIndex(item_to_insert)], + &[BrilligVariable::Simple(item_to_insert)], ); - context.return_instruction(&[copied_array_pointer, copied_array_size]); + context.return_instruction(&[ + target_vector.pointer, + target_vector.rc, + target_vector.size, + ]); let bytecode = create_entry_point_bytecode(context, arguments, returns).byte_code; let vm = create_and_run_vm( @@ -679,28 +728,39 @@ mod tests { let (_, mut function_context, mut context) = create_test_environment(); // Allocate the parameters - let array_pointer = context.allocate_register(); + let array_variable = BrilligArray { + pointer: context.allocate_register(), + size: array.len(), + rc: context.allocate_register(), + }; let index_to_insert = context.allocate_register(); // Cast the source array to a vector - let array_size = context.make_constant(array.len().into()); + let source_vector = context.array_to_vector(&array_variable); // Allocate the results - let copied_array_pointer = context.allocate_register(); + let target_vector = BrilligVector { + pointer: context.allocate_register(), + size: context.allocate_register(), + rc: context.allocate_register(), + }; let removed_item = context.allocate_register(); - let copied_array_size = context.allocate_register(); - let mut block = create_brillig_block(&mut function_context, &mut context); block.slice_remove_operation( - HeapVector { pointer: copied_array_pointer, size: copied_array_size }, - HeapVector { pointer: array_pointer, size: array_size }, + target_vector, + source_vector, index_to_insert, - &[RegisterOrMemory::RegisterIndex(removed_item)], + &[BrilligVariable::Simple(removed_item)], ); - context.return_instruction(&[copied_array_pointer, copied_array_size, removed_item]); + context.return_instruction(&[ + target_vector.pointer, + target_vector.rc, + target_vector.size, + removed_item, + ]); let bytecode = create_entry_point_bytecode(context, arguments, returns).byte_code; let vm = create_and_run_vm(array.clone(), vec![Value::from(0_usize), index], &bytecode); diff --git a/compiler/noirc_evaluator/src/brillig/brillig_gen/variable_liveness.rs b/compiler/noirc_evaluator/src/brillig/brillig_gen/variable_liveness.rs index d57196288bf..05978c2c6ab 100644 --- a/compiler/noirc_evaluator/src/brillig/brillig_gen/variable_liveness.rs +++ b/compiler/noirc_evaluator/src/brillig/brillig_gen/variable_liveness.rs @@ -332,7 +332,7 @@ mod test { let v0 = builder.add_parameter(Type::field()); let v1 = builder.add_parameter(Type::field()); - let v3 = builder.insert_allocate(); + let v3 = builder.insert_allocate(Type::field()); let zero = builder.numeric_constant(0u128, Type::field()); builder.insert_store(v3, zero); @@ -439,7 +439,7 @@ mod test { let v0 = builder.add_parameter(Type::field()); let v1 = builder.add_parameter(Type::field()); - let v3 = builder.insert_allocate(); + let v3 = builder.insert_allocate(Type::field()); let zero = builder.numeric_constant(0u128, Type::field()); builder.insert_store(v3, zero); diff --git a/compiler/noirc_evaluator/src/brillig/brillig_ir.rs b/compiler/noirc_evaluator/src/brillig/brillig_ir.rs index 880ae95dcd7..ff182aaa7d2 100644 --- a/compiler/noirc_evaluator/src/brillig/brillig_ir.rs +++ b/compiler/noirc_evaluator/src/brillig/brillig_ir.rs @@ -5,6 +5,7 @@ //! ssa types and types in this module. //! A similar paradigm can be seen with the `acir_ir` module. pub(crate) mod artifact; +pub(crate) mod brillig_variable; pub(crate) mod debug_show; pub(crate) mod registers; @@ -14,12 +15,13 @@ use crate::ssa::ir::dfg::CallStack; use self::{ artifact::{BrilligArtifact, UnresolvedJumpLocation}, + brillig_variable::{BrilligArray, BrilligVariable, BrilligVector}, registers::BrilligRegistersContext, }; use acvm::{ acir::brillig::{ - BinaryFieldOp, BinaryIntOp, BlackBoxOp, HeapArray, HeapVector, Opcode as BrilligOpcode, - RegisterIndex, RegisterOrMemory, Value, + BinaryFieldOp, BinaryIntOp, BlackBoxOp, Opcode as BrilligOpcode, RegisterIndex, + RegisterOrMemory, Value, }, FieldElement, }; @@ -88,6 +90,8 @@ pub(crate) struct BrilligContext { context_label: String, /// Section label, used to separate sections of code section_label: usize, + /// Stores the next available section + next_section: usize, /// IR printer debug_show: DebugShow, } @@ -100,6 +104,7 @@ impl BrilligContext { registers: BrilligRegistersContext::new(), context_label: String::default(), section_label: 0, + next_section: 1, debug_show: DebugShow::new(enable_debug_trace), } } @@ -161,10 +166,14 @@ impl BrilligContext { /// Allocates a variable in memory and stores the /// pointer to the array in `pointer_register` - pub(crate) fn allocate_variable_instruction(&mut self, pointer_register: RegisterIndex) { + fn allocate_variable_reference_instruction( + &mut self, + pointer_register: RegisterIndex, + size: usize, + ) { self.debug_show.allocate_instruction(pointer_register); - // A variable can be stored in up to two values, so we reserve two values for that. - let size_register = self.make_constant(2_u128.into()); + // A variable can be stored in up to three values, so we reserve three values for that. + let size_register = self.make_constant(size.into()); self.push_opcode(BrilligOpcode::Mov { destination: pointer_register, source: ReservedRegisters::stack_pointer(), @@ -177,6 +186,30 @@ impl BrilligContext { ); } + pub(crate) fn allocate_simple_reference_instruction( + &mut self, + pointer_register: RegisterIndex, + ) { + self.allocate_variable_reference_instruction(pointer_register, 1); + } + + pub(crate) fn allocate_array_reference_instruction(&mut self, pointer_register: RegisterIndex) { + self.allocate_variable_reference_instruction( + pointer_register, + BrilligArray::registers_count(), + ); + } + + pub(crate) fn allocate_vector_reference_instruction( + &mut self, + pointer_register: RegisterIndex, + ) { + self.allocate_variable_reference_instruction( + pointer_register, + BrilligVector::registers_count(), + ); + } + /// Gets the value in the array at index `index` and stores it in `result` pub(crate) fn array_get( &mut self, @@ -253,8 +286,8 @@ impl BrilligContext { { let iterator_register = self.make_constant(0_u128.into()); - let loop_label = self.next_section_label(); - self.enter_next_section(); + let (loop_section, loop_label) = self.reserve_next_section_label(); + self.enter_section(loop_section); // Loop body @@ -267,7 +300,7 @@ impl BrilligContext { BinaryIntOp::LessThan, ); - let exit_loop_label = self.next_section_label(); + let (exit_loop_section, exit_loop_label) = self.reserve_next_section_label(); self.not_instruction(iterator_less_than_iterations, 1, iterator_less_than_iterations); self.jump_if_instruction(iterator_less_than_iterations, exit_loop_label); @@ -281,12 +314,41 @@ impl BrilligContext { self.jump_instruction(loop_label); // Exit the loop - self.enter_next_section(); + self.enter_section(exit_loop_section); + // Deallocate our temporary registers self.deallocate_register(iterator_less_than_iterations); self.deallocate_register(iterator_register); } + /// This instruction will issue an if-then branch that will check if the condition is true + /// and if so, perform the instructions given in `f(self, true)` and otherwise perform the + /// instructions given in `f(self, false)`. A boolean is passed instead of two separate + /// functions to allow the given function to mutably alias its environment. + pub(crate) fn branch_instruction( + &mut self, + condition: RegisterIndex, + mut f: impl FnMut(&mut BrilligContext, bool), + ) { + // Reserve 3 sections + let (then_section, then_label) = self.reserve_next_section_label(); + let (otherwise_section, otherwise_label) = self.reserve_next_section_label(); + let (end_section, end_label) = self.reserve_next_section_label(); + + self.jump_if_instruction(condition, then_label.clone()); + self.jump_instruction(otherwise_label.clone()); + + self.enter_section(then_section); + f(self, true); + self.jump_instruction(end_label.clone()); + + self.enter_section(otherwise_section); + f(self, false); + self.jump_instruction(end_label.clone()); + + self.enter_section(end_section); + } + /// Adds a label to the next opcode pub(crate) fn enter_context(&mut self, label: T) { self.debug_show.enter_context(label.to_string()); @@ -299,23 +361,25 @@ impl BrilligContext { .add_label_at_position(self.current_section_label(), self.obj.index_of_next_opcode()); } - /// Increments the section label and adds a section label to the next opcode - fn enter_next_section(&mut self) { - self.section_label += 1; + /// Enter the given section + fn enter_section(&mut self, section: usize) { + self.section_label = section; self.obj .add_label_at_position(self.current_section_label(), self.obj.index_of_next_opcode()); } + /// Create, reserve, and return a new section label. + fn reserve_next_section_label(&mut self) -> (usize, String) { + let section = self.next_section; + self.next_section += 1; + (section, self.compute_section_label(section)) + } + /// Internal function used to compute the section labels fn compute_section_label(&self, section: usize) -> String { format!("{}-{}", self.context_label, section) } - /// Returns the next section label - fn next_section_label(&self) -> String { - self.compute_section_label(self.section_label + 1) - } - /// Returns the current section label fn current_section_label(&self) -> String { self.compute_section_label(self.section_label) @@ -371,15 +435,13 @@ impl BrilligContext { assert_message: Option, ) { self.debug_show.constrain_instruction(condition); - self.add_unresolved_jump( - BrilligOpcode::JumpIf { condition, location: 0 }, - self.next_section_label(), - ); + let (next_section, next_label) = self.reserve_next_section_label(); + self.add_unresolved_jump(BrilligOpcode::JumpIf { condition, location: 0 }, next_label); self.push_opcode(BrilligOpcode::Trap); if let Some(assert_message) = assert_message { self.obj.add_assert_message_to_last_opcode(assert_message); } - self.enter_next_section(); + self.enter_section(next_section); } /// Processes a return instruction. @@ -528,17 +590,24 @@ impl BrilligContext { /// Loads a variable stored previously pub(crate) fn load_variable_instruction( &mut self, - destination: RegisterOrMemory, + destination: BrilligVariable, variable_pointer: RegisterIndex, ) { match destination { - RegisterOrMemory::RegisterIndex(register_index) => { + BrilligVariable::Simple(register_index) => { self.load_instruction(register_index, variable_pointer); } - RegisterOrMemory::HeapArray(HeapArray { pointer, .. }) => { + BrilligVariable::BrilligArray(BrilligArray { pointer, size: _, rc }) => { self.load_instruction(pointer, variable_pointer); + + let rc_pointer = self.allocate_register(); + self.mov_instruction(rc_pointer, variable_pointer); + self.usize_op_in_place(rc_pointer, BinaryIntOp::Add, 1_usize); + + self.load_instruction(rc, rc_pointer); + self.deallocate_register(rc_pointer); } - RegisterOrMemory::HeapVector(HeapVector { pointer, size }) => { + BrilligVariable::BrilligVector(BrilligVector { pointer, size, rc }) => { self.load_instruction(pointer, variable_pointer); let size_pointer = self.allocate_register(); @@ -547,6 +616,13 @@ impl BrilligContext { self.load_instruction(size, size_pointer); self.deallocate_register(size_pointer); + + let rc_pointer = self.allocate_register(); + self.mov_instruction(rc_pointer, variable_pointer); + self.usize_op_in_place(rc_pointer, BinaryIntOp::Add, 2_usize); + + self.load_instruction(rc, rc_pointer); + self.deallocate_register(rc_pointer); } } } @@ -565,32 +641,38 @@ impl BrilligContext { pub(crate) fn store_variable_instruction( &mut self, variable_pointer: RegisterIndex, - source: RegisterOrMemory, + source: BrilligVariable, ) { - let size_pointer = self.allocate_register(); - self.mov_instruction(size_pointer, variable_pointer); - self.usize_op_in_place(size_pointer, BinaryIntOp::Add, 1_usize); - match source { - RegisterOrMemory::RegisterIndex(register_index) => { + BrilligVariable::Simple(register_index) => { self.store_instruction(variable_pointer, register_index); - let size_constant = self.make_constant(Value::from(1_usize)); - self.store_instruction(size_pointer, size_constant); - self.deallocate_register(size_constant); } - RegisterOrMemory::HeapArray(HeapArray { pointer, size }) => { + BrilligVariable::BrilligArray(BrilligArray { pointer, size: _, rc }) => { self.store_instruction(variable_pointer, pointer); - let size_constant = self.make_constant(Value::from(size)); - self.store_instruction(size_pointer, size_constant); - self.deallocate_register(size_constant); + + let rc_pointer: RegisterIndex = self.allocate_register(); + self.mov_instruction(rc_pointer, variable_pointer); + self.usize_op_in_place(rc_pointer, BinaryIntOp::Add, 1_usize); + self.store_instruction(rc_pointer, rc); + self.deallocate_register(rc_pointer); } - RegisterOrMemory::HeapVector(HeapVector { pointer, size }) => { + BrilligVariable::BrilligVector(BrilligVector { pointer, size, rc }) => { self.store_instruction(variable_pointer, pointer); + + let size_pointer = self.allocate_register(); + self.mov_instruction(size_pointer, variable_pointer); + self.usize_op_in_place(size_pointer, BinaryIntOp::Add, 1_usize); self.store_instruction(size_pointer, size); + + let rc_pointer: RegisterIndex = self.allocate_register(); + self.mov_instruction(rc_pointer, variable_pointer); + self.usize_op_in_place(rc_pointer, BinaryIntOp::Add, 2_usize); + self.store_instruction(rc_pointer, rc); + + self.deallocate_register(size_pointer); + self.deallocate_register(rc_pointer); } } - - self.deallocate_register(size_pointer); } /// Emits a truncate instruction. @@ -725,14 +807,14 @@ impl BrilligContext { } /// Saves all of the registers that have been used up until this point. - fn save_registers_of_vars(&mut self, vars: &[RegisterOrMemory]) -> Vec { + fn save_registers_of_vars(&mut self, vars: &[BrilligVariable]) -> Vec { // Save all of the used registers at this point in memory // because the function call will/may overwrite them. // // Note that here it is important that the stack pointer register is at register 0, // as after the first register save we add to the pointer. let mut used_registers: Vec<_> = - vars.iter().flat_map(|var| extract_registers(*var)).collect(); + vars.iter().flat_map(|var| var.extract_registers()).collect(); // Also dump the previous stack pointer used_registers.push(ReservedRegisters::previous_stack_pointer()); @@ -811,7 +893,7 @@ impl BrilligContext { pub(crate) fn pre_call_save_registers_prep_args( &mut self, arguments: &[RegisterIndex], - variables_to_save: &[RegisterOrMemory], + variables_to_save: &[BrilligVariable], ) -> Vec { // Save all the registers we have used to the stack. let saved_registers = self.save_registers_of_vars(variables_to_save); @@ -852,9 +934,9 @@ impl BrilligContext { } /// Utility method to transform a HeapArray to a HeapVector by making a runtime constant with the size. - pub(crate) fn array_to_vector(&mut self, array: &HeapArray) -> HeapVector { + pub(crate) fn array_to_vector(&mut self, array: &BrilligArray) -> BrilligVector { let size_register = self.make_constant(array.size.into()); - HeapVector { size: size_register, pointer: array.pointer } + BrilligVector { size: size_register, pointer: array.pointer, rc: array.rc } } /// Issues a blackbox operation. @@ -868,12 +950,13 @@ impl BrilligContext { pub(crate) fn radix_instruction( &mut self, source: RegisterIndex, - target_vector: HeapVector, + target_vector: BrilligVector, radix: RegisterIndex, limb_count: RegisterIndex, big_endian: bool, ) { self.mov_instruction(target_vector.size, limb_count); + self.const_instruction(target_vector.rc, 1_usize.into()); self.allocate_array_instruction(target_vector.pointer, target_vector.size); let shifted_register = self.allocate_register(); @@ -914,7 +997,7 @@ impl BrilligContext { } /// This instruction will reverse the order of the elements in a vector. - pub(crate) fn reverse_vector_in_place_instruction(&mut self, vector: HeapVector) { + pub(crate) fn reverse_vector_in_place_instruction(&mut self, vector: BrilligVector) { let iteration_count = self.allocate_register(); self.usize_op(vector.size, iteration_count, BinaryIntOp::UnsignedDiv, 2); @@ -949,51 +1032,12 @@ impl BrilligContext { self.deallocate_register(index_at_end_of_array); } - pub(crate) fn extract_heap_vector(&mut self, variable: RegisterOrMemory) -> HeapVector { - match variable { - RegisterOrMemory::HeapVector(vector) => vector, - RegisterOrMemory::HeapArray(array) => { - let size = self.allocate_register(); - self.const_instruction(size, array.size.into()); - HeapVector { pointer: array.pointer, size } - } - _ => unreachable!("ICE: Expected vector, got {variable:?}"), - } - } - /// Sets a current call stack that the next pushed opcodes will be associated with. pub(crate) fn set_call_stack(&mut self, call_stack: CallStack) { self.obj.set_call_stack(call_stack); } } -pub(crate) fn extract_register(variable: RegisterOrMemory) -> RegisterIndex { - match variable { - RegisterOrMemory::RegisterIndex(register_index) => register_index, - _ => unreachable!("ICE: Expected register, got {variable:?}"), - } -} - -pub(crate) fn extract_heap_array(variable: RegisterOrMemory) -> HeapArray { - match variable { - RegisterOrMemory::HeapArray(array) => array, - _ => unreachable!("ICE: Expected array, got {variable:?}"), - } -} - -/// Collects the registers that a given variable is stored in. -pub(crate) fn extract_registers(variable: RegisterOrMemory) -> Vec { - match variable { - RegisterOrMemory::RegisterIndex(register_index) => vec![register_index], - RegisterOrMemory::HeapArray(array) => { - vec![array.pointer] - } - RegisterOrMemory::HeapVector(vector) => { - vec![vector.pointer, vector.size] - } - } -} - /// Type to encapsulate the binary operation types in Brillig #[derive(Clone)] pub(crate) enum BrilligBinaryOp { diff --git a/compiler/noirc_evaluator/src/brillig/brillig_ir/brillig_variable.rs b/compiler/noirc_evaluator/src/brillig/brillig_ir/brillig_variable.rs new file mode 100644 index 00000000000..46c54d55ecb --- /dev/null +++ b/compiler/noirc_evaluator/src/brillig/brillig_ir/brillig_variable.rs @@ -0,0 +1,99 @@ +use acvm::brillig_vm::brillig::{HeapArray, HeapVector, RegisterIndex, RegisterOrMemory}; +use serde::{Deserialize, Serialize}; + +/// The representation of a noir array in the Brillig IR +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Copy)] +pub(crate) struct BrilligArray { + pub(crate) pointer: RegisterIndex, + pub(crate) size: usize, + pub(crate) rc: RegisterIndex, +} + +impl BrilligArray { + pub(crate) fn to_heap_array(self) -> HeapArray { + HeapArray { pointer: self.pointer, size: self.size } + } + + pub(crate) fn registers_count() -> usize { + 2 + } + + pub(crate) fn extract_registers(self) -> Vec { + vec![self.pointer, self.rc] + } +} + +/// The representation of a noir slice in the Brillig IR +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Copy)] +pub(crate) struct BrilligVector { + pub(crate) pointer: RegisterIndex, + pub(crate) size: RegisterIndex, + pub(crate) rc: RegisterIndex, +} + +impl BrilligVector { + pub(crate) fn to_heap_vector(self) -> HeapVector { + HeapVector { pointer: self.pointer, size: self.size } + } + + pub(crate) fn registers_count() -> usize { + 3 + } + + pub(crate) fn extract_registers(self) -> Vec { + vec![self.pointer, self.size, self.rc] + } +} + +/// The representation of a noir value in the Brillig IR +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Copy)] +pub(crate) enum BrilligVariable { + Simple(RegisterIndex), + BrilligArray(BrilligArray), + BrilligVector(BrilligVector), +} + +impl BrilligVariable { + pub(crate) fn extract_register(self) -> RegisterIndex { + match self { + BrilligVariable::Simple(register_index) => register_index, + _ => unreachable!("ICE: Expected register, got {self:?}"), + } + } + + pub(crate) fn extract_array(self) -> BrilligArray { + match self { + BrilligVariable::BrilligArray(array) => array, + _ => unreachable!("ICE: Expected array, got {self:?}"), + } + } + + pub(crate) fn extract_vector(self) -> BrilligVector { + match self { + BrilligVariable::BrilligVector(vector) => vector, + _ => unreachable!("ICE: Expected vector, got {self:?}"), + } + } + + pub(crate) fn extract_registers(self) -> Vec { + match self { + BrilligVariable::Simple(register_index) => vec![register_index], + BrilligVariable::BrilligArray(array) => array.extract_registers(), + BrilligVariable::BrilligVector(vector) => vector.extract_registers(), + } + } + + pub(crate) fn to_register_or_memory(self) -> RegisterOrMemory { + match self { + BrilligVariable::Simple(register_index) => { + RegisterOrMemory::RegisterIndex(register_index) + } + BrilligVariable::BrilligArray(array) => { + RegisterOrMemory::HeapArray(array.to_heap_array()) + } + BrilligVariable::BrilligVector(vector) => { + RegisterOrMemory::HeapVector(vector.to_heap_vector()) + } + } + } +} diff --git a/compiler/noirc_evaluator/src/brillig/brillig_ir/entry_point.rs b/compiler/noirc_evaluator/src/brillig/brillig_ir/entry_point.rs index fb426ad6876..48615988238 100644 --- a/compiler/noirc_evaluator/src/brillig/brillig_ir/entry_point.rs +++ b/compiler/noirc_evaluator/src/brillig/brillig_ir/entry_point.rs @@ -2,6 +2,7 @@ use crate::brillig::brillig_ir::ReservedRegisters; use super::{ artifact::{BrilligArtifact, BrilligParameter}, + brillig_variable::{BrilligArray, BrilligVariable}, debug_show::DebugShow, registers::BrilligRegistersContext, BrilligContext, @@ -20,6 +21,7 @@ impl BrilligContext { registers: BrilligRegistersContext::new(), context_label: String::default(), section_label: 0, + next_section: 1, debug_show: DebugShow::new(false), }; @@ -32,18 +34,39 @@ impl BrilligContext { } /// Adds the instructions needed to handle entry point parameters - /// - /// And sets the starting value of the reserved registers + /// The runtime will leave the parameters in the first `n` registers. + /// Arrays will be passed as pointers to the first element, with all the nested arrays flattened. + /// First, reserve the registers that contain the parameters. + /// This function also sets the starting value of the reserved registers fn entry_point_instruction(&mut self, arguments: Vec) { - // Translate the inputs by the reserved registers offset - for i in (0..arguments.len()).rev() { - self.push_opcode(BrilligOpcode::Mov { - destination: ReservedRegisters::user_register_index(i), - source: RegisterIndex::from(i), - }); - // Make sure we don't overwrite the arguments - self.allocate_register(); - } + let preallocated_registers: Vec<_> = + arguments.iter().enumerate().map(|(i, _)| RegisterIndex::from(i)).collect(); + self.set_allocated_registers(preallocated_registers.clone()); + + // Then allocate and initialize the variables that will hold the parameters + let argument_variables: Vec<_> = arguments + .iter() + .zip(preallocated_registers) + .map(|(argument, param_register)| match argument { + BrilligParameter::Simple => { + let variable_register = self.allocate_register(); + self.mov_instruction(variable_register, param_register); + BrilligVariable::Simple(variable_register) + } + BrilligParameter::Array(item_types, item_count) => { + let pointer_register = self.allocate_register(); + let rc_register = self.allocate_register(); + self.mov_instruction(pointer_register, param_register); + self.const_instruction(rc_register, 1_usize.into()); + BrilligVariable::BrilligArray(BrilligArray { + pointer: pointer_register, + size: item_types.len() * item_count, + rc: rc_register, + }) + } + BrilligParameter::Slice(_) => unimplemented!("Unsupported slices as parameter"), + }) + .collect(); // Calculate the initial value for the stack pointer register let size_arguments_memory: usize = arguments @@ -65,16 +88,24 @@ impl BrilligContext { value: 0_usize.into(), }); - for (index, parameter) in arguments.iter().enumerate() { + // Deflatten the arrays + for (parameter, assigned_variable) in arguments.iter().zip(&argument_variables) { if let BrilligParameter::Array(item_type, item_count) = parameter { if item_type.iter().any(|param| !matches!(param, BrilligParameter::Simple)) { - let pointer_register = ReservedRegisters::user_register_index(index); + let pointer_register = assigned_variable.extract_array().pointer; let deflattened_register = self.deflatten_array(item_type, *item_count, pointer_register); self.mov_instruction(pointer_register, deflattened_register); } } } + + // Move the parameters to the first user defined registers, to follow function call convention. + for (i, register) in + argument_variables.into_iter().flat_map(|arg| arg.extract_registers()).enumerate() + { + self.mov_instruction(ReservedRegisters::user_register_index(i), register); + } } /// Computes the size of a parameter if it was flattened @@ -92,6 +123,7 @@ impl BrilligContext { } /// Deflatten an array by recursively allocating nested arrays and copying the plain values. + /// Returns the pointer to the deflattened items. fn deflatten_array( &mut self, item_type: &[BrilligParameter], @@ -139,13 +171,25 @@ impl BrilligContext { *nested_array_item_count, nested_array_pointer, ); - self.array_set( - deflattened_array_pointer, - target_index, - deflattened_nested_array_pointer, + let reference = self.allocate_register(); + let rc = self.allocate_register(); + self.const_instruction(rc, 1_usize.into()); + + self.allocate_array_reference_instruction(reference); + self.store_variable_instruction( + reference, + BrilligVariable::BrilligArray(BrilligArray { + pointer: deflattened_nested_array_pointer, + size: nested_array_item_type.len() * nested_array_item_count, + rc, + }), ); + self.array_set(deflattened_array_pointer, target_index, reference); + self.deallocate_register(nested_array_pointer); + self.deallocate_register(reference); + self.deallocate_register(rc); source_offset += BrilligContext::flattened_size(subitem); } @@ -163,21 +207,36 @@ impl BrilligContext { } /// Adds the instructions needed to handle return parameters + /// The runtime expects the results in the first `n` registers. + /// Arrays are expected to be returned as pointers to the first element with all the nested arrays flattened. + /// However, the function called returns variables (that have extra data) and the returned arrays are unflattened. fn exit_point_instruction(&mut self, return_parameters: Vec) { - // Make sure we don't overwrite the return parameters - return_parameters.iter().for_each(|_| { - self.allocate_register(); - }); - - for (index, ret) in return_parameters.iter().enumerate() { - if let BrilligParameter::Array(item_type, item_count) = ret { + // First, we allocate the registers that hold the returned variables from the function call. + self.set_allocated_registers(vec![]); + let returned_variables: Vec<_> = return_parameters + .iter() + .map(|return_parameter| match return_parameter { + BrilligParameter::Simple => BrilligVariable::Simple(self.allocate_register()), + BrilligParameter::Array(item_types, item_count) => { + BrilligVariable::BrilligArray(BrilligArray { + pointer: self.allocate_register(), + size: item_types.len() * item_count, + rc: self.allocate_register(), + }) + } + BrilligParameter::Slice(..) => unreachable!("ICE: Cannot return slices"), + }) + .collect(); + // Now, we unflatten the returned arrays + for (return_param, returned_variable) in return_parameters.iter().zip(&returned_variables) { + if let BrilligParameter::Array(item_type, item_count) = return_param { if item_type.iter().any(|item| !matches!(item, BrilligParameter::Simple)) { - let returned_pointer = ReservedRegisters::user_register_index(index); + let returned_pointer = returned_variable.extract_array().pointer; let flattened_array_pointer = self.allocate_register(); self.allocate_fixed_length_array( flattened_array_pointer, - BrilligContext::flattened_size(ret), + BrilligContext::flattened_size(return_param), ); self.flatten_array( @@ -191,16 +250,18 @@ impl BrilligContext { } } } - // We want all functions to follow the calling convention of returning + // The VM expects us to follow the calling convention of returning // their results in the first `n` registers. So we to move the return values // to the first `n` registers once completed. // Move the results to registers 0..n - for i in 0..return_parameters.len() { - self.push_opcode(BrilligOpcode::Mov { - destination: i.into(), - source: ReservedRegisters::user_register_index(i), - }); + for (i, returned_variable) in returned_variables.into_iter().enumerate() { + let register = match returned_variable { + BrilligVariable::Simple(register) => register, + BrilligVariable::BrilligArray(array) => array.pointer, + BrilligVariable::BrilligVector(vector) => vector.pointer, + }; + self.push_opcode(BrilligOpcode::Mov { destination: i.into(), source: register }); } self.push_opcode(BrilligOpcode::Stop); } @@ -237,11 +298,22 @@ impl BrilligContext { target_offset += 1; } BrilligParameter::Array(nested_array_item_type, nested_array_item_count) => { - let nested_array_pointer = self.allocate_register(); + let nested_array_reference = self.allocate_register(); self.array_get( deflattened_array_pointer, source_index, - nested_array_pointer, + nested_array_reference, + ); + + let nested_array_variable = BrilligVariable::BrilligArray(BrilligArray { + pointer: self.allocate_register(), + size: nested_array_item_type.len() * nested_array_item_count, + rc: self.allocate_register(), + }); + + self.load_variable_instruction( + nested_array_variable, + nested_array_reference, ); let flattened_nested_array_pointer = self.allocate_register(); @@ -262,11 +334,15 @@ impl BrilligContext { nested_array_item_type, *nested_array_item_count, flattened_nested_array_pointer, - nested_array_pointer, + nested_array_variable.extract_array().pointer, ); - self.deallocate_register(nested_array_pointer); + self.deallocate_register(nested_array_reference); self.deallocate_register(flattened_nested_array_pointer); + nested_array_variable + .extract_registers() + .into_iter() + .for_each(|register| self.deallocate_register(register)); target_offset += BrilligContext::flattened_size(subitem); } @@ -288,6 +364,7 @@ mod tests { use crate::brillig::brillig_ir::{ artifact::BrilligParameter, + brillig_variable::BrilligArray, tests::{create_and_run_vm, create_context, create_entry_point_bytecode}, }; @@ -332,18 +409,24 @@ mod tests { Value::from(4_usize), Value::from(5_usize), Value::from(6_usize), - // The pointer to the nested array of the first item - Value::from(10_usize), - Value::from(3_usize), - // The pointer to the nested array of the second item + // The pointer to the nested reference of the first item Value::from(12_usize), + Value::from(3_usize), + // The pointer to the nested reference of the second item + Value::from(16_usize), Value::from(6_usize), // The nested array of the first item Value::from(1_usize), Value::from(2_usize), + // The nested reference of the first item + Value::from(10_usize), + Value::from(1_usize), // The nested array of the second item Value::from(4_usize), Value::from(5_usize), + // The nested reference of the second item + Value::from(14_usize), + Value::from(1_usize), ] ); } @@ -358,35 +441,31 @@ mod tests { Value::from(5_usize), Value::from(6_usize), ]; - let array_param = BrilligParameter::Array( vec![ - BrilligParameter::Simple, BrilligParameter::Array(vec![BrilligParameter::Simple], 2), + BrilligParameter::Simple, ], 2, ); - let arguments = vec![array_param.clone()]; let returns = vec![array_param]; let mut context = create_context(); // Allocate the parameter - let array_pointer = context.allocate_register(); + let brillig_array = BrilligArray { + pointer: context.allocate_register(), + size: 2, + rc: context.allocate_register(), + }; - context.return_instruction(&[array_pointer]); + context.return_instruction(&brillig_array.extract_registers()); let bytecode = create_entry_point_bytecode(context, arguments, returns).byte_code; let vm = create_and_run_vm(flattened_array.clone(), vec![Value::from(0_usize)], &bytecode); let memory = vm.get_memory(); - assert_eq!( - vm.get_registers().get(RegisterIndex(0)), - // The returned value will be past the original array and the deflattened array - Value::from(flattened_array.len() + (flattened_array.len() + 2)), - ); - assert_eq!( memory, &vec![ @@ -397,19 +476,25 @@ mod tests { Value::from(4_usize), Value::from(5_usize), Value::from(6_usize), - // The pointer to the nested array of the first item - Value::from(1_usize), - Value::from(10_usize), - // The pointer to the nested array of the second item - Value::from(4_usize), + // The pointer to the nested reference of the first item Value::from(12_usize), + Value::from(3_usize), + // The pointer to the nested reference of the second item + Value::from(16_usize), + Value::from(6_usize), // The nested array of the first item + Value::from(1_usize), Value::from(2_usize), - Value::from(3_usize), + // The nested reference of the first item + Value::from(10_usize), + Value::from(1_usize), // The nested array of the second item + Value::from(4_usize), Value::from(5_usize), - Value::from(6_usize), - // The values flattened again + // The nested reference of the second item + Value::from(14_usize), + Value::from(1_usize), + // The original flattened again Value::from(1_usize), Value::from(2_usize), Value::from(3_usize), @@ -418,5 +503,6 @@ mod tests { Value::from(6_usize), ] ); + assert_eq!(vm.get_registers().get(RegisterIndex(0)), 18_usize.into()); } } diff --git a/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/acir_variable.rs b/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/acir_variable.rs index acbba2b3426..9ccb1e7adfb 100644 --- a/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/acir_variable.rs +++ b/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/acir_variable.rs @@ -380,9 +380,15 @@ impl AcirContext { rhs: AcirVar, typ: AcirType, ) -> Result { - let inputs = vec![AcirValue::Var(lhs, typ.clone()), AcirValue::Var(rhs, typ)]; - let outputs = self.black_box_function(BlackBoxFunc::AND, inputs, 1)?; - Ok(outputs[0]) + let bit_size = typ.bit_size(); + if bit_size == 1 { + // Operands are booleans. + self.mul_var(lhs, rhs) + } else { + let inputs = vec![AcirValue::Var(lhs, typ.clone()), AcirValue::Var(rhs, typ)]; + let outputs = self.black_box_function(BlackBoxFunc::AND, inputs, 1)?; + Ok(outputs[0]) + } } /// Returns an `AcirVar` that is the OR result of `lhs` & `rhs`. diff --git a/compiler/noirc_evaluator/src/ssa/acir_gen/mod.rs b/compiler/noirc_evaluator/src/ssa/acir_gen/mod.rs index 709a3ad67fe..842499c953e 100644 --- a/compiler/noirc_evaluator/src/ssa/acir_gen/mod.rs +++ b/compiler/noirc_evaluator/src/ssa/acir_gen/mod.rs @@ -565,6 +565,9 @@ impl Context { Instruction::Load { .. } => { unreachable!("Expected all load instructions to be removed before acir_gen") } + Instruction::IncrementRc { .. } => { + // Do nothing. Only Brillig needs to worry about reference counted arrays + } Instruction::RangeCheck { value, max_bit_size, assert_message } => { let acir_var = self.convert_numeric_value(*value, dfg)?; self.acir_context.range_constrain_var( @@ -1576,7 +1579,7 @@ impl Context { (_, Type::Function) | (Type::Function, _) => { unreachable!("all functions should be inlined") } - (_, Type::Reference) | (Type::Reference, _) => { + (_, Type::Reference(_)) | (Type::Reference(_), _) => { unreachable!("References are invalid in binary operations") } (_, Type::Array(..)) | (Type::Array(..), _) => { diff --git a/compiler/noirc_evaluator/src/ssa/function_builder/mod.rs b/compiler/noirc_evaluator/src/ssa/function_builder/mod.rs index 47423841a3b..e01e1fe1a1d 100644 --- a/compiler/noirc_evaluator/src/ssa/function_builder/mod.rs +++ b/compiler/noirc_evaluator/src/ssa/function_builder/mod.rs @@ -170,8 +170,9 @@ impl FunctionBuilder { /// Insert an allocate instruction at the end of the current block, allocating the /// given amount of field elements. Returns the result of the allocate instruction, /// which is always a Reference to the allocated data. - pub(crate) fn insert_allocate(&mut self) -> ValueId { - self.insert_instruction(Instruction::Allocate, None).first() + pub(crate) fn insert_allocate(&mut self, element_type: Type) -> ValueId { + let reference_type = Type::Reference(Rc::new(element_type)); + self.insert_instruction(Instruction::Allocate, Some(vec![reference_type])).first() } pub(crate) fn set_location(&mut self, location: Location) -> &mut FunctionBuilder { @@ -458,6 +459,27 @@ impl FunctionBuilder { _ => None, } } + + /// Insert instructions to increment the reference count of any array(s) stored + /// within the given value. If the given value is not an array and does not contain + /// any arrays, this does nothing. + pub(crate) fn increment_array_reference_count(&mut self, value: ValueId) { + match self.type_of_value(value) { + Type::Numeric(_) => (), + Type::Function => (), + Type::Reference(element) => { + if element.contains_an_array() { + let value = self.insert_load(value, element.as_ref().clone()); + self.increment_array_reference_count(value); + } + } + Type::Array(..) | Type::Slice(..) => { + self.insert_instruction(Instruction::IncrementRc { value }, None); + // If there are nested arrays or slices, we wait until ArrayGet + // is issued to increment the count of that array. + } + } + } } impl std::ops::Index for FunctionBuilder { diff --git a/compiler/noirc_evaluator/src/ssa/ir/dfg.rs b/compiler/noirc_evaluator/src/ssa/ir/dfg.rs index 3cb6736007d..75b2cf962f7 100644 --- a/compiler/noirc_evaluator/src/ssa/ir/dfg.rs +++ b/compiler/noirc_evaluator/src/ssa/ir/dfg.rs @@ -318,7 +318,7 @@ impl DataFlowGraph { /// True if the type of this value is Type::Reference. /// Using this method over type_of_value avoids cloning the value's type. pub(crate) fn value_is_reference(&self, value: ValueId) -> bool { - matches!(self.values[value].get_type(), Type::Reference) + matches!(self.values[value].get_type(), Type::Reference(_)) } /// Appends a result type to the instruction. @@ -521,13 +521,13 @@ impl<'dfg> InsertInstructionResult<'dfg> { #[cfg(test)] mod tests { use super::DataFlowGraph; - use crate::ssa::ir::instruction::Instruction; + use crate::ssa::ir::{instruction::Instruction, types::Type}; #[test] fn make_instruction() { let mut dfg = DataFlowGraph::default(); let ins = Instruction::Allocate; - let ins_id = dfg.make_instruction(ins, None); + let ins_id = dfg.make_instruction(ins, Some(vec![Type::field()])); let results = dfg.instruction_results(ins_id); assert_eq!(results.len(), 1); diff --git a/compiler/noirc_evaluator/src/ssa/ir/instruction.rs b/compiler/noirc_evaluator/src/ssa/ir/instruction.rs index 71401201715..63b32766f62 100644 --- a/compiler/noirc_evaluator/src/ssa/ir/instruction.rs +++ b/compiler/noirc_evaluator/src/ssa/ir/instruction.rs @@ -182,6 +182,13 @@ pub(crate) enum Instruction { /// Creates a new array with the new value at the given index. All other elements are identical /// to those in the given array. This will not modify the original array. ArraySet { array: ValueId, index: ValueId, value: ValueId }, + + /// An instruction to increment the reference count of a value. + /// + /// This currently only has an effect in Brillig code where array sharing and copy on write is + /// implemented via reference counting. In ACIR code this is done with im::Vector and these + /// IncrementRc instructions are ignored. + IncrementRc { value: ValueId }, } impl Instruction { @@ -195,18 +202,19 @@ impl Instruction { match self { Instruction::Binary(binary) => binary.result_type(), Instruction::Cast(_, typ) => InstructionResultType::Known(typ.clone()), - Instruction::Allocate { .. } => InstructionResultType::Known(Type::Reference), Instruction::Not(value) | Instruction::Truncate { value, .. } => { InstructionResultType::Operand(*value) } Instruction::ArraySet { array, .. } => InstructionResultType::Operand(*array), Instruction::Constrain(..) | Instruction::Store { .. } - | Instruction::EnableSideEffects { .. } - | Instruction::RangeCheck { .. } => InstructionResultType::None, - Instruction::Load { .. } | Instruction::ArrayGet { .. } | Instruction::Call { .. } => { - InstructionResultType::Unknown - } + | Instruction::IncrementRc { .. } + | Instruction::RangeCheck { .. } + | Instruction::EnableSideEffects { .. } => InstructionResultType::None, + Instruction::Allocate { .. } + | Instruction::Load { .. } + | Instruction::ArrayGet { .. } + | Instruction::Call { .. } => InstructionResultType::Unknown, } } @@ -235,6 +243,7 @@ impl Instruction { | Allocate | Load { .. } | Store { .. } + | IncrementRc { .. } | RangeCheck { .. } => false, Call { func, .. } => match dfg[*func] { @@ -266,7 +275,11 @@ impl Instruction { | ArrayGet { .. } | ArraySet { .. } => false, - Constrain(..) | Store { .. } | EnableSideEffects { .. } | RangeCheck { .. } => true, + Constrain(..) + | Store { .. } + | EnableSideEffects { .. } + | IncrementRc { .. } + | RangeCheck { .. } => true, // Some `Intrinsic`s have side effects so we must check what kind of `Call` this is. Call { func, .. } => match dfg[*func] { @@ -323,6 +336,7 @@ impl Instruction { Instruction::ArraySet { array, index, value } => { Instruction::ArraySet { array: f(*array), index: f(*index), value: f(*value) } } + Instruction::IncrementRc { value } => Instruction::IncrementRc { value: f(*value) }, Instruction::RangeCheck { value, max_bit_size, assert_message } => { Instruction::RangeCheck { value: f(*value), @@ -374,7 +388,7 @@ impl Instruction { Instruction::EnableSideEffects { condition } => { f(*condition); } - Instruction::RangeCheck { value, .. } => { + Instruction::IncrementRc { value } | Instruction::RangeCheck { value, .. } => { f(*value); } } @@ -474,6 +488,7 @@ impl Instruction { Instruction::Allocate { .. } => None, Instruction::Load { .. } => None, Instruction::Store { .. } => None, + Instruction::IncrementRc { .. } => None, Instruction::RangeCheck { value, max_bit_size, .. } => { if let Some(numeric_constant) = dfg.get_numeric_constant(*value) { if numeric_constant.num_bits() < *max_bit_size { diff --git a/compiler/noirc_evaluator/src/ssa/ir/printer.rs b/compiler/noirc_evaluator/src/ssa/ir/printer.rs index c6b1f3c7528..2899b987c1d 100644 --- a/compiler/noirc_evaluator/src/ssa/ir/printer.rs +++ b/compiler/noirc_evaluator/src/ssa/ir/printer.rs @@ -172,6 +172,9 @@ pub(crate) fn display_instruction( show(*value) ) } + Instruction::IncrementRc { value } => { + writeln!(f, "inc_rc {}", show(*value)) + } Instruction::RangeCheck { value, max_bit_size, .. } => { writeln!(f, "range_check {} to {} bits", show(*value), *max_bit_size,) } diff --git a/compiler/noirc_evaluator/src/ssa/ir/types.rs b/compiler/noirc_evaluator/src/ssa/ir/types.rs index e7ecdd9094d..13fd534f5ea 100644 --- a/compiler/noirc_evaluator/src/ssa/ir/types.rs +++ b/compiler/noirc_evaluator/src/ssa/ir/types.rs @@ -25,7 +25,7 @@ pub(crate) enum Type { Numeric(NumericType), /// A reference to some value, such as an array - Reference, + Reference(Rc), /// An immutable array value with the given element type and length Array(Rc, usize), @@ -86,7 +86,7 @@ impl Type { } Type::Slice(_) => true, Type::Numeric(_) => false, - Type::Reference => false, + Type::Reference(_) => false, Type::Function => false, } } @@ -116,6 +116,15 @@ impl Type { } has_internal_slices } + + /// True if this type is an array (or slice) or internally contains an array (or slice) + pub(crate) fn contains_an_array(&self) -> bool { + match self { + Type::Numeric(_) | Type::Function => false, + Type::Array(_, _) | Type::Slice(_) => true, + Type::Reference(element) => element.contains_an_array(), + } + } } impl NumericType { @@ -147,7 +156,7 @@ impl std::fmt::Display for Type { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Type::Numeric(numeric) => numeric.fmt(f), - Type::Reference => write!(f, "reference"), + Type::Reference(element) => write!(f, "&mut {element}"), Type::Array(element, length) => { let elements = vecmap(element.iter(), |element| element.to_string()); write!(f, "[{}; {length}]", elements.join(", ")) diff --git a/compiler/noirc_evaluator/src/ssa/opt/die.rs b/compiler/noirc_evaluator/src/ssa/opt/die.rs index 3db95f6ad99..53cdf72bbbf 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/die.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/die.rs @@ -7,7 +7,7 @@ use crate::ssa::{ basic_block::{BasicBlock, BasicBlockId}, dfg::DataFlowGraph, function::Function, - instruction::InstructionId, + instruction::{Instruction, InstructionId}, post_order::PostOrder, value::{Value, ValueId}, }, @@ -38,6 +38,8 @@ fn dead_instruction_elimination(function: &mut Function) { for block in blocks.as_slice() { context.remove_unused_instructions_in_block(function, *block); } + + context.remove_increment_rc_instructions(&mut function.dfg); } /// Per function context for tracking unused values and which instructions to remove. @@ -45,6 +47,11 @@ fn dead_instruction_elimination(function: &mut Function) { struct Context { used_values: HashSet, instructions_to_remove: HashSet, + + /// IncrementRc instructions must be revisited after the main DIE pass since + /// they are technically side-effectful but we stil want to remove them if their + /// `value` parameter is not used elsewhere. + increment_rc_instructions: Vec<(InstructionId, BasicBlockId)>, } impl Context { @@ -67,14 +74,19 @@ impl Context { let block = &function.dfg[block_id]; self.mark_terminator_values_as_used(function, block); - for instruction in block.instructions().iter().rev() { - if self.is_unused(*instruction, function) { - self.instructions_to_remove.insert(*instruction); + for instruction_id in block.instructions().iter().rev() { + if self.is_unused(*instruction_id, function) { + self.instructions_to_remove.insert(*instruction_id); } else { - let instruction = &function.dfg[*instruction]; - instruction.for_each_value(|value| { - self.mark_used_instruction_results(&function.dfg, value); - }); + let instruction = &function.dfg[*instruction_id]; + + if let Instruction::IncrementRc { .. } = instruction { + self.increment_rc_instructions.push((*instruction_id, block_id)); + } else { + instruction.for_each_value(|value| { + self.mark_used_instruction_results(&function.dfg, value); + }); + } } } @@ -119,11 +131,28 @@ impl Context { self.mark_used_instruction_results(dfg, *elem); } } + Value::Param { .. } => { + self.used_values.insert(value_id); + } _ => { // Does not comprise of any instruction results } } } + + fn remove_increment_rc_instructions(self, dfg: &mut DataFlowGraph) { + for (increment_rc, block) in self.increment_rc_instructions { + let value = match &dfg[increment_rc] { + Instruction::IncrementRc { value } => *value, + other => unreachable!("Expected IncrementRc instruction, found {other:?}"), + }; + + // This could be more efficient if we have to remove multiple IncrementRcs in a single block + if !self.used_values.contains(&value) { + dfg[block].instructions_mut().retain(|instruction| *instruction != increment_rc); + } + } + } } #[cfg(test)] @@ -176,10 +205,10 @@ mod test { builder.switch_to_block(b1); let _v3 = builder.add_block_parameter(b1, Type::field()); - let v4 = builder.insert_allocate(); + let v4 = builder.insert_allocate(Type::field()); let _v5 = builder.insert_load(v4, Type::field()); - let v6 = builder.insert_allocate(); + let v6 = builder.insert_allocate(Type::field()); builder.insert_store(v6, one); let v7 = builder.insert_load(v6, Type::field()); let v8 = builder.insert_binary(v7, BinaryOp::Add, one); diff --git a/compiler/noirc_evaluator/src/ssa/opt/fill_internal_slices.rs b/compiler/noirc_evaluator/src/ssa/opt/fill_internal_slices.rs index c1e801d8280..9aff18f503e 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/fill_internal_slices.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/fill_internal_slices.rs @@ -465,7 +465,7 @@ impl<'f> Context<'f> { self.inserter.function.dfg.make_array(slice, typ.clone()) } } - Type::Reference => { + Type::Reference(_) => { unreachable!("ICE: Generating dummy data for references is unsupported") } Type::Function => { diff --git a/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg.rs b/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg.rs index d2ed21c60d7..29df9d3c76d 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg.rs @@ -817,7 +817,7 @@ mod test { #[test] fn merge_stores() { // fn main f0 { - // b0(v0: u1, v1: ref): + // b0(v0: u1, v1: &mut Field): // jmpif v0, then: b1, else: b2 // b1(): // store v1, Field 5 @@ -832,7 +832,7 @@ mod test { let b2 = builder.insert_block(); let v0 = builder.add_parameter(Type::bool()); - let v1 = builder.add_parameter(Type::Reference); + let v1 = builder.add_parameter(Type::Reference(Rc::new(Type::field()))); builder.terminate_with_jmpif(v0, b1, b2); @@ -894,7 +894,7 @@ mod test { let b3 = builder.insert_block(); let v0 = builder.add_parameter(Type::bool()); - let v1 = builder.add_parameter(Type::Reference); + let v1 = builder.add_parameter(Type::Reference(Rc::new(Type::field()))); builder.terminate_with_jmpif(v0, b1, b2); @@ -993,7 +993,7 @@ mod test { let c1 = builder.add_parameter(Type::bool()); let c4 = builder.add_parameter(Type::bool()); - let r1 = builder.insert_allocate(); + let r1 = builder.insert_allocate(Type::field()); let store_value = |builder: &mut FunctionBuilder, value: u128| { let value = builder.field_constant(value); @@ -1144,7 +1144,7 @@ mod test { builder.terminate_with_jmpif(v0, b1, b2); builder.switch_to_block(b1); - let v2 = builder.insert_allocate(); + let v2 = builder.insert_allocate(Type::field()); let zero = builder.field_constant(0u128); builder.insert_store(v2, zero); let _v4 = builder.insert_load(v2, Type::field()); @@ -1313,7 +1313,7 @@ mod test { let v8 = builder.insert_binary(v6, BinaryOp::Mod, i_two); let v9 = builder.insert_cast(v8, Type::bool()); - let v10 = builder.insert_allocate(); + let v10 = builder.insert_allocate(Type::field()); builder.insert_store(v10, zero); builder.terminate_with_jmpif(v9, b1, b2); @@ -1412,9 +1412,9 @@ mod test { let ten = builder.field_constant(10u128); let one_hundred = builder.field_constant(100u128); - let v0 = builder.insert_allocate(); + let v0 = builder.insert_allocate(Type::field()); builder.insert_store(v0, zero); - let v2 = builder.insert_allocate(); + let v2 = builder.insert_allocate(Type::field()); builder.insert_store(v2, two); let v4 = builder.insert_load(v2, Type::field()); let v5 = builder.insert_binary(v4, BinaryOp::Lt, two); diff --git a/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg/value_merger.rs b/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg/value_merger.rs index 32979f78632..446560f45f1 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg/value_merger.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg/value_merger.rs @@ -60,7 +60,7 @@ impl<'a> ValueMerger<'a> { typ @ Type::Slice(_) => { self.merge_slice_values(typ, then_condition, else_condition, then_value, else_value) } - Type::Reference => panic!("Cannot return references from an if expression"), + Type::Reference(_) => panic!("Cannot return references from an if expression"), Type::Function => panic!("Cannot return functions from an if expression"), } } @@ -333,7 +333,7 @@ impl<'a> ValueMerger<'a> { // to accurately construct dummy data unreachable!("ICE: Cannot return a slice of slices from an if expression") } - Type::Reference => { + Type::Reference(_) => { unreachable!("ICE: Merging references is unsupported") } Type::Function => { diff --git a/compiler/noirc_evaluator/src/ssa/opt/mem2reg.rs b/compiler/noirc_evaluator/src/ssa/opt/mem2reg.rs index e5fffaccdd0..fba6e6ab989 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/mem2reg.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/mem2reg.rs @@ -326,7 +326,7 @@ impl<'f> PerFunctionContext<'f> { match typ { Type::Numeric(_) => false, Type::Function => false, - Type::Reference => true, + Type::Reference(_) => true, Type::Array(elements, _) | Type::Slice(elements) => { elements.iter().any(Self::contains_references) } @@ -427,7 +427,7 @@ mod tests { let func_id = Id::test_new(0); let mut builder = FunctionBuilder::new("func".into(), func_id, RuntimeType::Acir); - let v0 = builder.insert_allocate(); + let v0 = builder.insert_allocate(Type::Array(Rc::new(vec![Type::field()]), 2)); let one = builder.field_constant(FieldElement::one()); let two = builder.field_constant(FieldElement::one()); @@ -468,7 +468,7 @@ mod tests { let func_id = Id::test_new(0); let mut builder = FunctionBuilder::new("func".into(), func_id, RuntimeType::Acir); - let v0 = builder.insert_allocate(); + let v0 = builder.insert_allocate(Type::field()); let one = builder.field_constant(FieldElement::one()); builder.insert_store(v0, one); let v1 = builder.insert_load(v0, Type::field()); @@ -502,7 +502,7 @@ mod tests { let func_id = Id::test_new(0); let mut builder = FunctionBuilder::new("func".into(), func_id, RuntimeType::Acir); - let v0 = builder.insert_allocate(); + let v0 = builder.insert_allocate(Type::field()); let const_one = builder.field_constant(FieldElement::one()); builder.insert_store(v0, const_one); builder.terminate_with_return(vec![v0]); @@ -562,7 +562,7 @@ mod tests { let main_id = Id::test_new(0); let mut builder = FunctionBuilder::new("main".into(), main_id, RuntimeType::Acir); - let v0 = builder.insert_allocate(); + let v0 = builder.insert_allocate(Type::field()); let five = builder.field_constant(5u128); builder.insert_store(v0, five); @@ -642,12 +642,12 @@ mod tests { let main_id = Id::test_new(0); let mut builder = FunctionBuilder::new("main".into(), main_id, RuntimeType::Acir); - let v0 = builder.insert_allocate(); + let v0 = builder.insert_allocate(Type::field()); let zero = builder.field_constant(0u128); builder.insert_store(v0, zero); - let v2 = builder.insert_allocate(); + let v2 = builder.insert_allocate(Type::Reference(Rc::new(Type::field()))); builder.insert_store(v2, v0); let v3 = builder.insert_load(v2, Type::field()); diff --git a/compiler/noirc_evaluator/src/ssa/ssa_gen/context.rs b/compiler/noirc_evaluator/src/ssa/ssa_gen/context.rs index 72b94e575a9..9d27ffc60d8 100644 --- a/compiler/noirc_evaluator/src/ssa/ssa_gen/context.rs +++ b/compiler/noirc_evaluator/src/ssa/ssa_gen/context.rs @@ -152,7 +152,8 @@ impl<'a> FunctionContext<'a> { /// Allocate a single slot of memory and store into it the given initial value of the variable. /// Always returns a Value::Mutable wrapping the allocate instruction. pub(super) fn new_mutable_variable(&mut self, value_to_store: ValueId) -> Value { - let alloc = self.builder.insert_allocate(); + let element_type = self.builder.current_function.dfg.type_of_value(value_to_store); + let alloc = self.builder.insert_allocate(element_type); self.builder.insert_store(alloc, value_to_store); let typ = self.builder.type_of_value(value_to_store); Value::Mutable(alloc, typ) @@ -177,7 +178,7 @@ impl<'a> FunctionContext<'a> { // A mutable reference wraps each element into a reference. // This can be multiple values if the element type is a tuple. ast::Type::MutableReference(element) => { - Self::map_type_helper(element, &mut |_| f(Type::Reference)) + Self::map_type_helper(element, &mut |typ| f(Type::Reference(Rc::new(typ)))) } ast::Type::FmtString(len, fields) => { // A format string is represented by multiple values @@ -231,8 +232,8 @@ impl<'a> FunctionContext<'a> { ast::Type::Slice(_) => panic!("convert_non_tuple_type called on a slice: {typ}"), ast::Type::MutableReference(element) => { // Recursive call to panic if element is a tuple - Self::convert_non_tuple_type(element); - Type::Reference + let element = Self::convert_non_tuple_type(element); + Type::Reference(Rc::new(element)) } } } @@ -600,7 +601,7 @@ impl<'a> FunctionContext<'a> { let loop_end = self.builder.insert_block(); // pre-loop - let result_alloc = self.builder.set_location(location).insert_allocate(); + let result_alloc = self.builder.set_location(location).insert_allocate(Type::bool()); let true_value = self.builder.numeric_constant(1u128, Type::bool()); self.builder.insert_store(result_alloc, true_value); let zero = self.builder.field_constant(0u128); diff --git a/compiler/noirc_evaluator/src/ssa/ssa_gen/mod.rs b/compiler/noirc_evaluator/src/ssa/ssa_gen/mod.rs index 7677f5669cb..53f1bc863be 100644 --- a/compiler/noirc_evaluator/src/ssa/ssa_gen/mod.rs +++ b/compiler/noirc_evaluator/src/ssa/ssa_gen/mod.rs @@ -210,6 +210,13 @@ impl<'a> FunctionContext<'a> { for element in elements { element.for_each(|element| { let element = element.eval(self); + + // If we're referencing a sub-array in a larger nested array we need to + // increase the reference count of the sub array. This maintains a + // pessimistic reference count (since some are likely moved rather than shared) + // which is important for Brillig's copy on write optimization. This has no + // effect in ACIR code. + self.builder.increment_array_reference_count(element); array.push_back(element); }); } @@ -248,11 +255,12 @@ impl<'a> FunctionContext<'a> { Ok(self.codegen_reference(&unary.rhs)?.map(|rhs| { match rhs { value::Value::Normal(value) => { - let alloc = self.builder.insert_allocate(); + let rhs_type = self.builder.current_function.dfg.type_of_value(value); + let alloc = self.builder.insert_allocate(rhs_type); self.builder.insert_store(alloc, value); Tree::Leaf(value::Value::Normal(alloc)) } - // NOTE: The `.into()` here converts the Value::Mutable into + // The `.into()` here converts the Value::Mutable into // a Value::Normal so it is no longer automatically dereferenced. value::Value::Mutable(reference, _) => reference.into(), } @@ -300,6 +308,7 @@ impl<'a> FunctionContext<'a> { } else { (array_or_slice[0], None) }; + self.codegen_array_index( array, index_value, @@ -344,7 +353,13 @@ impl<'a> FunctionContext<'a> { } _ => unreachable!("must have array or slice but got {array_type}"), } - self.builder.insert_array_get(array, offset, typ).into() + + // Reference counting in brillig relies on us incrementing reference + // counts when nested arrays/slices are constructed or indexed. This + // has no effect in ACIR code. + let result = self.builder.insert_array_get(array, offset, typ); + self.builder.increment_array_reference_count(result); + result.into() })) } @@ -534,6 +549,11 @@ impl<'a> FunctionContext<'a> { arguments.append(&mut values); } + // If an array is passed as an argument we increase its reference count + for argument in &arguments { + self.builder.increment_array_reference_count(*argument); + } + self.codegen_intrinsic_call_checks(function, &arguments, call.location); Ok(self.insert_call(function, arguments, &call.return_type, call.location)) @@ -575,12 +595,18 @@ impl<'a> FunctionContext<'a> { fn codegen_let(&mut self, let_expr: &ast::Let) -> Result { let mut values = self.codegen_expression(&let_expr.expression)?; - if let_expr.mutable { - values = values.map(|value| { - let value = value.eval(self); - Tree::Leaf(self.new_mutable_variable(value)) - }); - } + values = values.map(|value| { + let value = value.eval(self); + + // Make sure to increment array reference counts on each let binding + self.builder.increment_array_reference_count(value); + + Tree::Leaf(if let_expr.mutable { + self.new_mutable_variable(value) + } else { + value::Value::Normal(value) + }) + }); self.define(let_expr.id, values); Ok(Self::unit_value()) diff --git a/compiler/noirc_frontend/src/hir/def_collector/dc_crate.rs b/compiler/noirc_frontend/src/hir/def_collector/dc_crate.rs index 047c53e3206..86122530cde 100644 --- a/compiler/noirc_frontend/src/hir/def_collector/dc_crate.rs +++ b/compiler/noirc_frontend/src/hir/def_collector/dc_crate.rs @@ -1,34 +1,32 @@ use super::dc_mod::collect_defs; use super::errors::{DefCollectorErrorKind, DuplicateType}; use crate::graph::CrateId; -use crate::hir::def_map::{CrateDefMap, LocalModuleId, ModuleData, ModuleDefId, ModuleId}; +use crate::hir::def_map::{CrateDefMap, LocalModuleId, ModuleId}; use crate::hir::resolution::errors::ResolverError; -use crate::hir::resolution::import::PathResolutionError; -use crate::hir::resolution::path_resolver::PathResolver; + +use crate::hir::resolution::import::{resolve_imports, ImportDirective}; use crate::hir::resolution::resolver::Resolver; use crate::hir::resolution::{ - import::{resolve_imports, ImportDirective}, - path_resolver::StandardPathResolver, + collect_impls, collect_trait_impls, resolve_free_functions, resolve_globals, resolve_impls, + resolve_structs, resolve_trait_by_path, resolve_trait_impls, resolve_traits, + resolve_type_aliases, }; use crate::hir::type_check::{type_check_func, TypeCheckError, TypeChecker}; use crate::hir::Context; -use crate::hir_def::traits::{Trait, TraitConstant, TraitFunction, TraitImpl, TraitType}; + use crate::macros_api::MacroProcessor; -use crate::node_interner::{ - FuncId, NodeInterner, StmtId, StructId, TraitId, TraitImplId, TypeAliasId, -}; +use crate::node_interner::{FuncId, NodeInterner, StmtId, StructId, TraitId, TypeAliasId}; use crate::parser::{ParserError, SortedModule}; use crate::{ - ExpressionKind, Generics, Ident, LetStatement, Literal, NoirFunction, NoirStruct, NoirTrait, - NoirTypeAlias, Path, Shared, StructType, TraitItem, Type, TypeBinding, TypeVariableKind, - UnresolvedGenerics, UnresolvedTraitConstraint, UnresolvedType, + ExpressionKind, LetStatement, Literal, NoirFunction, NoirStruct, NoirTrait, NoirTypeAlias, + Path, Type, UnresolvedGenerics, UnresolvedTraitConstraint, UnresolvedType, }; use fm::FileId; use iter_extended::vecmap; use noirc_errors::{CustomDiagnostic, Span}; -use std::collections::{BTreeMap, HashMap, HashSet}; -use std::rc::Rc; +use std::collections::{BTreeMap, HashMap}; + use std::vec; /// Stores all of the unresolved functions in a particular file/mod @@ -125,6 +123,16 @@ pub struct DefCollector { pub(crate) collected_traits_impls: Vec, } +/// Maps the type and the module id in which the impl is defined to the functions contained in that +/// impl along with the generics declared on the impl itself. This also contains the Span +/// of the object_type of the impl, used to issue an error if the object type fails to resolve. +/// +/// Note that because these are keyed by unresolved types, the impl map is one of the few instances +/// of HashMap rather than BTreeMap. For this reason, we should be careful not to iterate over it +/// since it would be non-deterministic. +pub(crate) type ImplMap = + HashMap<(UnresolvedType, LocalModuleId), Vec<(UnresolvedGenerics, Span, UnresolvedFunctions)>>; + #[derive(Debug, Clone)] pub enum CompilationError { ParseError(ParserError), @@ -167,16 +175,6 @@ impl From for CompilationError { } } -/// Maps the type and the module id in which the impl is defined to the functions contained in that -/// impl along with the generics declared on the impl itself. This also contains the Span -/// of the object_type of the impl, used to issue an error if the object type fails to resolve. -/// -/// Note that because these are keyed by unresolved types, the impl map is one of the few instances -/// of HashMap rather than BTreeMap. For this reason, we should be careful not to iterate over it -/// since it would be non-deterministic. -type ImplMap = - HashMap<(UnresolvedType, LocalModuleId), Vec<(UnresolvedGenerics, Span, UnresolvedFunctions)>>; - impl DefCollector { fn new(def_map: CrateDefMap) -> DefCollector { DefCollector { @@ -360,264 +358,6 @@ impl DefCollector { } } -/// Go through the list of impls and add each function within to the scope -/// of the module defined by its type. -fn collect_impls( - context: &mut Context, - crate_id: CrateId, - collected_impls: &ImplMap, -) -> Vec<(CompilationError, FileId)> { - let interner = &mut context.def_interner; - let def_maps = &mut context.def_maps; - let mut errors: Vec<(CompilationError, FileId)> = vec![]; - - for ((unresolved_type, module_id), methods) in collected_impls { - let path_resolver = - StandardPathResolver::new(ModuleId { local_id: *module_id, krate: crate_id }); - - let file = def_maps[&crate_id].file_id(*module_id); - - for (generics, span, unresolved) in methods { - let mut resolver = Resolver::new(interner, &path_resolver, def_maps, file); - resolver.add_generics(generics); - let typ = resolver.resolve_type(unresolved_type.clone()); - - errors.extend(take_errors(unresolved.file_id, resolver)); - - if let Some(struct_type) = get_struct_type(&typ) { - let struct_type = struct_type.borrow(); - - // `impl`s are only allowed on types defined within the current crate - if struct_type.id.krate() != crate_id { - let span = *span; - let type_name = struct_type.name.to_string(); - let error = DefCollectorErrorKind::ForeignImpl { span, type_name }; - errors.push((error.into(), unresolved.file_id)); - continue; - } - - // Grab the module defined by the struct type. Note that impls are a case - // where the module the methods are added to is not the same as the module - // they are resolved in. - let module = get_module_mut(def_maps, struct_type.id.module_id()); - - for (_, method_id, method) in &unresolved.functions { - // If this method was already declared, remove it from the module so it cannot - // be accessed with the `TypeName::method` syntax. We'll check later whether the - // object types in each method overlap or not. If they do, we issue an error. - // If not, that is specialization which is allowed. - if module.declare_function(method.name_ident().clone(), *method_id).is_err() { - module.remove_function(method.name_ident()); - } - } - // Prohibit defining impls for primitive types if we're not in the stdlib - } else if typ != Type::Error && !crate_id.is_stdlib() { - let span = *span; - let error = DefCollectorErrorKind::NonStructTypeInImpl { span }; - errors.push((error.into(), unresolved.file_id)); - } - } - } - errors -} - -fn get_module_mut( - def_maps: &mut BTreeMap, - module: ModuleId, -) -> &mut ModuleData { - &mut def_maps.get_mut(&module.krate).unwrap().modules[module.local_id.0] -} - -fn collect_trait_impl_methods( - interner: &mut NodeInterner, - def_maps: &BTreeMap, - crate_id: CrateId, - trait_id: TraitId, - trait_impl: &mut UnresolvedTraitImpl, -) -> Vec<(CompilationError, FileId)> { - // In this Vec methods[i] corresponds to trait.methods[i]. If the impl has no implementation - // for a particular method, the default implementation will be added at that slot. - let mut ordered_methods = Vec::new(); - - let the_trait = interner.get_trait(trait_id); - - // check whether the trait implementation is in the same crate as either the trait or the type - let mut errors = - check_trait_impl_crate_coherence(interner, &the_trait, trait_impl, crate_id, def_maps); - // set of function ids that have a corresponding method in the trait - let mut func_ids_in_trait = HashSet::new(); - - for method in &the_trait.methods { - let overrides: Vec<_> = trait_impl - .methods - .functions - .iter() - .filter(|(_, _, f)| f.name() == method.name.0.contents) - .collect(); - - if overrides.is_empty() { - if let Some(default_impl) = &method.default_impl { - let func_id = interner.push_empty_fn(); - let module = ModuleId { local_id: trait_impl.module_id, krate: crate_id }; - interner.push_function(func_id, &default_impl.def, module); - func_ids_in_trait.insert(func_id); - ordered_methods.push(( - method.default_impl_module_id, - func_id, - *default_impl.clone(), - )); - } else { - let error = DefCollectorErrorKind::TraitMissingMethod { - trait_name: the_trait.name.clone(), - method_name: method.name.clone(), - trait_impl_span: trait_impl.object_type.span.expect("type must have a span"), - }; - errors.push((error.into(), trait_impl.file_id)); - } - } else { - for (_, func_id, _) in &overrides { - func_ids_in_trait.insert(*func_id); - } - - if overrides.len() > 1 { - let error = DefCollectorErrorKind::Duplicate { - typ: DuplicateType::TraitAssociatedFunction, - first_def: overrides[0].2.name_ident().clone(), - second_def: overrides[1].2.name_ident().clone(), - }; - errors.push((error.into(), trait_impl.file_id)); - } - - ordered_methods.push(overrides[0].clone()); - } - } - - // Emit MethodNotInTrait error for methods in the impl block that - // don't have a corresponding method signature defined in the trait - for (_, func_id, func) in &trait_impl.methods.functions { - if !func_ids_in_trait.contains(func_id) { - let error = DefCollectorErrorKind::MethodNotInTrait { - trait_name: the_trait.name.clone(), - impl_method: func.name_ident().clone(), - }; - errors.push((error.into(), trait_impl.file_id)); - } - } - - trait_impl.methods.functions = ordered_methods; - trait_impl.methods.trait_id = Some(trait_id); - errors -} - -fn collect_trait_impl( - context: &mut Context, - crate_id: CrateId, - trait_impl: &mut UnresolvedTraitImpl, -) -> Vec<(CompilationError, FileId)> { - let interner = &mut context.def_interner; - let def_maps = &mut context.def_maps; - let mut errors: Vec<(CompilationError, FileId)> = vec![]; - let unresolved_type = trait_impl.object_type.clone(); - let module = ModuleId { local_id: trait_impl.module_id, krate: crate_id }; - trait_impl.trait_id = - match resolve_trait_by_path(def_maps, module, trait_impl.trait_path.clone()) { - Ok(trait_id) => Some(trait_id), - Err(error) => { - errors.push((error.into(), trait_impl.file_id)); - None - } - }; - - if let Some(trait_id) = trait_impl.trait_id { - errors - .extend(collect_trait_impl_methods(interner, def_maps, crate_id, trait_id, trait_impl)); - - let path_resolver = StandardPathResolver::new(module); - let file = def_maps[&crate_id].file_id(trait_impl.module_id); - let mut resolver = Resolver::new(interner, &path_resolver, def_maps, file); - resolver.add_generics(&trait_impl.generics); - let typ = resolver.resolve_type(unresolved_type); - errors.extend(take_errors(trait_impl.file_id, resolver)); - - if let Some(struct_type) = get_struct_type(&typ) { - let struct_type = struct_type.borrow(); - let module = get_module_mut(def_maps, struct_type.id.module_id()); - - for (_, method_id, method) in &trait_impl.methods.functions { - // If this method was already declared, remove it from the module so it cannot - // be accessed with the `TypeName::method` syntax. We'll check later whether the - // object types in each method overlap or not. If they do, we issue an error. - // If not, that is specialization which is allowed. - if module.declare_function(method.name_ident().clone(), *method_id).is_err() { - module.remove_function(method.name_ident()); - } - } - } - } - errors -} - -fn collect_trait_impls( - context: &mut Context, - crate_id: CrateId, - collected_impls: &mut [UnresolvedTraitImpl], -) -> Vec<(CompilationError, FileId)> { - collected_impls - .iter_mut() - .flat_map(|trait_impl| collect_trait_impl(context, crate_id, trait_impl)) - .collect() -} - -fn check_trait_impl_crate_coherence( - interner: &mut NodeInterner, - the_trait: &Trait, - trait_impl: &UnresolvedTraitImpl, - current_crate: CrateId, - def_maps: &BTreeMap, -) -> Vec<(CompilationError, FileId)> { - let mut errors: Vec<(CompilationError, FileId)> = vec![]; - - let module = ModuleId { krate: current_crate, local_id: trait_impl.module_id }; - let file = def_maps[¤t_crate].file_id(trait_impl.module_id); - let path_resolver = StandardPathResolver::new(module); - let mut resolver = Resolver::new(interner, &path_resolver, def_maps, file); - - let object_crate = match resolver.resolve_type(trait_impl.object_type.clone()) { - Type::Struct(struct_type, _) => struct_type.borrow().id.krate(), - _ => CrateId::Dummy, - }; - - if current_crate != the_trait.crate_id && current_crate != object_crate { - let error = DefCollectorErrorKind::TraitImplOrphaned { - span: trait_impl.object_type.span.expect("object type must have a span"), - }; - errors.push((error.into(), trait_impl.file_id)); - } - - errors -} - -fn resolve_trait_by_path( - def_maps: &BTreeMap, - module: ModuleId, - path: Path, -) -> Result { - let path_resolver = StandardPathResolver::new(module); - - match path_resolver.resolve(def_maps, path.clone()) { - Ok(ModuleDefId::TraitId(trait_id)) => Ok(trait_id), - Ok(_) => Err(DefCollectorErrorKind::NotATrait { not_a_trait_name: path }), - Err(_) => Err(DefCollectorErrorKind::TraitNotFound { trait_path: path }), - } -} - -fn get_struct_type(typ: &Type) -> Option<&Shared> { - match typ { - Type::Struct(definition, _) => Some(definition), - _ => None, - } -} - /// Separate the globals Vec into two. The first element in the tuple will be the /// literal globals, except for arrays, and the second will be all other globals. /// We exclude array literals as they can contain complex types @@ -630,49 +370,6 @@ fn filter_literal_globals( }) } -pub struct ResolvedGlobals { - pub globals: Vec<(FileId, StmtId)>, - pub errors: Vec<(CompilationError, FileId)>, -} - -impl ResolvedGlobals { - pub fn extend(&mut self, oth: Self) { - self.globals.extend(oth.globals); - self.errors.extend(oth.errors); - } -} - -fn resolve_globals( - context: &mut Context, - globals: Vec, - crate_id: CrateId, -) -> ResolvedGlobals { - let mut errors: Vec<(CompilationError, FileId)> = vec![]; - let globals = vecmap(globals, |global| { - let module_id = ModuleId { local_id: global.module_id, krate: crate_id }; - let path_resolver = StandardPathResolver::new(module_id); - - let mut resolver = Resolver::new( - &mut context.def_interner, - &path_resolver, - &context.def_maps, - global.file_id, - ); - - let name = global.stmt_def.pattern.name_ident().clone(); - - let hir_stmt = resolver.resolve_global_let(global.stmt_def); - errors.extend(take_errors(global.file_id, resolver)); - - context.def_interner.update_global(global.stmt_id, hir_stmt); - - context.def_interner.push_global(global.stmt_id, name, global.module_id); - - (global.file_id, global.stmt_id) - }); - ResolvedGlobals { globals, errors } -} - fn type_check_globals( interner: &mut NodeInterner, global_ids: Vec<(FileId, StmtId)>, @@ -705,360 +402,8 @@ fn type_check_functions( .collect() } -/// Create the mappings from TypeId -> StructType -/// so that expressions can access the fields of structs -fn resolve_structs( - context: &mut Context, - structs: BTreeMap, - crate_id: CrateId, -) -> Vec<(CompilationError, FileId)> { - let mut errors: Vec<(CompilationError, FileId)> = vec![]; - // Resolve each field in each struct. - // Each struct should already be present in the NodeInterner after def collection. - for (type_id, typ) in structs { - let file_id = typ.file_id; - let (generics, fields, resolver_errors) = resolve_struct_fields(context, crate_id, typ); - errors.extend(vecmap(resolver_errors, |err| (err.into(), file_id))); - context.def_interner.update_struct(type_id, |struct_def| { - struct_def.set_fields(fields); - struct_def.generics = generics; - }); - } - errors -} - -fn resolve_trait_types( - _context: &mut Context, - _crate_id: CrateId, - _unresolved_trait: &UnresolvedTrait, -) -> (Vec, Vec<(CompilationError, FileId)>) { - // TODO - (vec![], vec![]) -} -fn resolve_trait_constants( - _context: &mut Context, - _crate_id: CrateId, - _unresolved_trait: &UnresolvedTrait, -) -> (Vec, Vec<(CompilationError, FileId)>) { - // TODO - (vec![], vec![]) -} - -fn resolve_trait_methods( - context: &mut Context, - trait_id: TraitId, - crate_id: CrateId, - unresolved_trait: &UnresolvedTrait, -) -> (Vec, Vec<(CompilationError, FileId)>) { - let interner = &mut context.def_interner; - let def_maps = &mut context.def_maps; - - let path_resolver = StandardPathResolver::new(ModuleId { - local_id: unresolved_trait.module_id, - krate: crate_id, - }); - let file = def_maps[&crate_id].file_id(unresolved_trait.module_id); - - let mut res = vec![]; - let mut resolver_errors = vec![]; - for item in &unresolved_trait.trait_def.items { - if let TraitItem::Function { - name, - generics, - parameters, - return_type, - where_clause: _, - body: _, - } = item - { - let the_trait = interner.get_trait(trait_id); - let self_type = - Type::TypeVariable(the_trait.self_type_typevar.clone(), TypeVariableKind::Normal); - - let mut resolver = Resolver::new(interner, &path_resolver, def_maps, file); - resolver.add_generics(generics); - resolver.set_self_type(Some(self_type)); - - let arguments = vecmap(parameters, |param| resolver.resolve_type(param.1.clone())); - let resolved_return_type = resolver.resolve_type(return_type.get_type().into_owned()); - let generics = resolver.get_generics().to_vec(); - - let name = name.clone(); - let span: Span = name.span(); - let default_impl_list: Vec<_> = unresolved_trait - .fns_with_default_impl - .functions - .iter() - .filter(|(_, _, q)| q.name() == name.0.contents) - .collect(); - let default_impl = if default_impl_list.len() == 1 { - Some(Box::new(default_impl_list[0].2.clone())) - } else { - None - }; - - let f = TraitFunction { - name, - generics, - arguments, - return_type: resolved_return_type, - span, - default_impl, - default_impl_file_id: unresolved_trait.file_id, - default_impl_module_id: unresolved_trait.module_id, - }; - res.push(f); - resolver_errors.extend(take_errors_filter_self_not_resolved(file, resolver)); - } - } - (res, resolver_errors) -} - -fn take_errors_filter_self_not_resolved( - file_id: FileId, - resolver: Resolver<'_>, -) -> Vec<(CompilationError, FileId)> { - resolver - .take_errors() - .iter() - .filter(|resolution_error| match resolution_error { - ResolverError::PathResolutionError(PathResolutionError::Unresolved(ident)) => { - &ident.0.contents != "Self" - } - _ => true, - }) - .cloned() - .map(|resolution_error| (resolution_error.into(), file_id)) - .collect() -} - -fn take_errors(file_id: FileId, resolver: Resolver<'_>) -> Vec<(CompilationError, FileId)> { - vecmap(resolver.take_errors(), |e| (e.into(), file_id)) -} - -/// Create the mappings from TypeId -> TraitType -/// so that expressions can access the elements of traits -fn resolve_traits( - context: &mut Context, - traits: BTreeMap, - crate_id: CrateId, -) -> Vec<(CompilationError, FileId)> { - for (trait_id, unresolved_trait) in &traits { - context.def_interner.push_empty_trait(*trait_id, unresolved_trait); - } - let mut res: Vec<(CompilationError, FileId)> = vec![]; - for (trait_id, unresolved_trait) in traits { - // Resolve order - // 1. Trait Types ( Trait constants can have a trait type, therefore types before constants) - let _ = resolve_trait_types(context, crate_id, &unresolved_trait); - // 2. Trait Constants ( Trait's methods can use trait types & constants, therefore they should be after) - let _ = resolve_trait_constants(context, crate_id, &unresolved_trait); - // 3. Trait Methods - let (methods, errors) = - resolve_trait_methods(context, trait_id, crate_id, &unresolved_trait); - res.extend(errors); - context.def_interner.update_trait(trait_id, |trait_def| { - trait_def.set_methods(methods); - }); - } - res -} - -fn resolve_struct_fields( - context: &mut Context, - krate: CrateId, - unresolved: UnresolvedStruct, -) -> (Generics, Vec<(Ident, Type)>, Vec) { - let path_resolver = - StandardPathResolver::new(ModuleId { local_id: unresolved.module_id, krate }); - let file_id = unresolved.file_id; - let (generics, fields, errors) = - Resolver::new(&mut context.def_interner, &path_resolver, &context.def_maps, file_id) - .resolve_struct_fields(unresolved.struct_def); - (generics, fields, errors) -} - -fn resolve_type_aliases( - context: &mut Context, - type_aliases: BTreeMap, - crate_id: CrateId, -) -> Vec<(CompilationError, FileId)> { - let mut errors: Vec<(CompilationError, FileId)> = vec![]; - for (type_id, unresolved_typ) in type_aliases { - let path_resolver = StandardPathResolver::new(ModuleId { - local_id: unresolved_typ.module_id, - krate: crate_id, - }); - let file = unresolved_typ.file_id; - let (typ, generics, resolver_errors) = - Resolver::new(&mut context.def_interner, &path_resolver, &context.def_maps, file) - .resolve_type_aliases(unresolved_typ.type_alias_def); - errors.extend(resolver_errors.iter().cloned().map(|e| (e.into(), file))); - context.def_interner.set_type_alias(type_id, typ, generics); - } - errors -} - -fn resolve_impls( - interner: &mut NodeInterner, - crate_id: CrateId, - def_maps: &BTreeMap, - collected_impls: ImplMap, - errors: &mut Vec<(CompilationError, FileId)>, -) -> Vec<(FileId, FuncId)> { - let mut file_method_ids = Vec::new(); - - for ((unresolved_type, module_id), methods) in collected_impls { - let path_resolver = - StandardPathResolver::new(ModuleId { local_id: module_id, krate: crate_id }); - - let file = def_maps[&crate_id].file_id(module_id); - - for (generics, _, functions) in methods { - let mut resolver = Resolver::new(interner, &path_resolver, def_maps, file); - resolver.add_generics(&generics); - let generics = resolver.get_generics().to_vec(); - let self_type = resolver.resolve_type(unresolved_type.clone()); - - let mut file_func_ids = resolve_function_set( - interner, - crate_id, - def_maps, - functions, - Some(self_type.clone()), - None, - generics, - errors, - ); - if self_type != Type::Error { - for (file_id, method_id) in &file_func_ids { - let method_name = interner.function_name(method_id).to_owned(); - - if let Some(first_fn) = - interner.add_method(&self_type, method_name.clone(), *method_id, false) - { - let error = ResolverError::DuplicateDefinition { - name: method_name, - first_span: interner.function_ident(&first_fn).span(), - second_span: interner.function_ident(method_id).span(), - }; - errors.push((error.into(), *file_id)); - } - } - } - file_method_ids.append(&mut file_func_ids); - } - } - - file_method_ids -} - -fn resolve_trait_impls( - context: &mut Context, - traits: Vec, - crate_id: CrateId, - errors: &mut Vec<(CompilationError, FileId)>, -) -> Vec<(FileId, FuncId)> { - let interner = &mut context.def_interner; - let mut methods = Vec::<(FileId, FuncId)>::new(); - - for trait_impl in traits { - let unresolved_type = trait_impl.object_type; - let local_mod_id = trait_impl.module_id; - let module_id = ModuleId { krate: crate_id, local_id: local_mod_id }; - let path_resolver = StandardPathResolver::new(module_id); - - let self_type_span = unresolved_type.span; - - let mut resolver = - Resolver::new(interner, &path_resolver, &context.def_maps, trait_impl.file_id); - resolver.add_generics(&trait_impl.generics); - let self_type = resolver.resolve_type(unresolved_type.clone()); - let generics = resolver.get_generics().to_vec(); - - let impl_id = interner.next_trait_impl_id(); - - let mut impl_methods = resolve_function_set( - interner, - crate_id, - &context.def_maps, - trait_impl.methods.clone(), - Some(self_type.clone()), - Some(impl_id), - generics.clone(), - errors, - ); - - let maybe_trait_id = trait_impl.trait_id; - if let Some(trait_id) = maybe_trait_id { - for (_, func) in &impl_methods { - interner.set_function_trait(*func, self_type.clone(), trait_id); - } - } - - if matches!(self_type, Type::MutableReference(_)) { - let span = self_type_span.unwrap_or_else(|| trait_impl.trait_path.span()); - let error = DefCollectorErrorKind::MutableReferenceInTraitImpl { span }; - errors.push((error.into(), trait_impl.file_id)); - } - - let mut new_resolver = - Resolver::new(interner, &path_resolver, &context.def_maps, trait_impl.file_id); - - new_resolver.set_generics(generics); - new_resolver.set_self_type(Some(self_type.clone())); - - if let Some(trait_id) = maybe_trait_id { - check_methods_signatures( - &mut new_resolver, - &impl_methods, - trait_id, - trait_impl.generics.len(), - errors, - ); - - let where_clause = trait_impl - .where_clause - .into_iter() - .flat_map(|item| new_resolver.resolve_trait_constraint(item)) - .collect(); - - let resolved_trait_impl = Shared::new(TraitImpl { - ident: trait_impl.trait_path.last_segment().clone(), - typ: self_type.clone(), - trait_id, - file: trait_impl.file_id, - where_clause, - methods: vecmap(&impl_methods, |(_, func_id)| *func_id), - }); - - if let Err((prev_span, prev_file)) = interner.add_trait_implementation( - self_type.clone(), - trait_id, - impl_id, - resolved_trait_impl, - ) { - let error = DefCollectorErrorKind::OverlappingImpl { - typ: self_type.clone(), - span: self_type_span.unwrap_or_else(|| trait_impl.trait_path.span()), - }; - errors.push((error.into(), trait_impl.file_id)); - - // The 'previous impl defined here' note must be a separate error currently - // since it may be in a different file and all errors have the same file id. - let error = DefCollectorErrorKind::OverlappingImplNote { span: prev_span }; - errors.push((error.into(), prev_file)); - } - - methods.append(&mut impl_methods); - } - } - - methods -} - // TODO(vitkov): Move this out of here and into type_check -fn check_methods_signatures( +pub(crate) fn check_methods_signatures( resolver: &mut Resolver, impl_methods: &Vec<(FileId, FuncId)>, trait_id: TraitId, @@ -1151,72 +496,3 @@ fn check_methods_signatures( the_trait.self_type_typevar.borrow_mut().unbind(the_trait.self_type_typevar_id); } - -fn resolve_free_functions( - interner: &mut NodeInterner, - crate_id: CrateId, - def_maps: &BTreeMap, - collected_functions: Vec, - self_type: Option, - errors: &mut Vec<(CompilationError, FileId)>, -) -> Vec<(FileId, FuncId)> { - // Lower each function in the crate. This is now possible since imports have been resolved - collected_functions - .into_iter() - .flat_map(|unresolved_functions| { - resolve_function_set( - interner, - crate_id, - def_maps, - unresolved_functions, - self_type.clone(), - None, - vec![], // no impl generics - errors, - ) - }) - .collect() -} - -#[allow(clippy::too_many_arguments)] -fn resolve_function_set( - interner: &mut NodeInterner, - crate_id: CrateId, - def_maps: &BTreeMap, - mut unresolved_functions: UnresolvedFunctions, - self_type: Option, - trait_impl_id: Option, - impl_generics: Vec<(Rc, Shared, Span)>, - errors: &mut Vec<(CompilationError, FileId)>, -) -> Vec<(FileId, FuncId)> { - let file_id = unresolved_functions.file_id; - - let where_clause_errors = - unresolved_functions.resolve_trait_bounds_trait_ids(def_maps, crate_id); - errors.extend(where_clause_errors.iter().cloned().map(|e| (e.into(), file_id))); - - vecmap(unresolved_functions.functions, |(mod_id, func_id, func)| { - let module_id = ModuleId { krate: crate_id, local_id: mod_id }; - let path_resolver = StandardPathResolver::new(module_id); - - let mut resolver = Resolver::new(interner, &path_resolver, def_maps, file_id); - // Must use set_generics here to ensure we re-use the same generics from when - // the impl was originally collected. Otherwise the function will be using different - // TypeVariables for the same generic, causing it to instantiate incorrectly. - resolver.set_generics(impl_generics.clone()); - resolver.set_self_type(self_type.clone()); - resolver.set_trait_id(unresolved_functions.trait_id); - resolver.set_trait_impl_id(trait_impl_id); - - // Without this, impl methods can accidentally be placed in contracts. See #3254 - if self_type.is_some() { - resolver.set_in_contract(false); - } - - let (hir_func, func_meta, errs) = resolver.resolve_function(func, func_id); - interner.push_fn_meta(func_meta, func_id); - interner.update_fn(func_id, hir_func); - errors.extend(errs.iter().cloned().map(|e| (e.into(), file_id))); - (file_id, func_id) - }) -} diff --git a/compiler/noirc_frontend/src/hir/resolution/functions.rs b/compiler/noirc_frontend/src/hir/resolution/functions.rs new file mode 100644 index 00000000000..387f94e129c --- /dev/null +++ b/compiler/noirc_frontend/src/hir/resolution/functions.rs @@ -0,0 +1,85 @@ +use std::{collections::BTreeMap, rc::Rc}; + +use fm::FileId; +use iter_extended::vecmap; +use noirc_errors::Span; + +use crate::{ + graph::CrateId, + hir::{ + def_collector::dc_crate::{CompilationError, UnresolvedFunctions}, + def_map::{CrateDefMap, ModuleId}, + }, + node_interner::{FuncId, NodeInterner, TraitImplId}, + Shared, Type, TypeBinding, +}; + +use super::{path_resolver::StandardPathResolver, resolver::Resolver}; + +#[allow(clippy::too_many_arguments)] +pub(crate) fn resolve_function_set( + interner: &mut NodeInterner, + crate_id: CrateId, + def_maps: &BTreeMap, + mut unresolved_functions: UnresolvedFunctions, + self_type: Option, + trait_impl_id: Option, + impl_generics: Vec<(Rc, Shared, Span)>, + errors: &mut Vec<(CompilationError, FileId)>, +) -> Vec<(FileId, FuncId)> { + let file_id = unresolved_functions.file_id; + + let where_clause_errors = + unresolved_functions.resolve_trait_bounds_trait_ids(def_maps, crate_id); + errors.extend(where_clause_errors.iter().cloned().map(|e| (e.into(), file_id))); + + vecmap(unresolved_functions.functions, |(mod_id, func_id, func)| { + let module_id = ModuleId { krate: crate_id, local_id: mod_id }; + let path_resolver = StandardPathResolver::new(module_id); + + let mut resolver = Resolver::new(interner, &path_resolver, def_maps, file_id); + // Must use set_generics here to ensure we re-use the same generics from when + // the impl was originally collected. Otherwise the function will be using different + // TypeVariables for the same generic, causing it to instantiate incorrectly. + resolver.set_generics(impl_generics.clone()); + resolver.set_self_type(self_type.clone()); + resolver.set_trait_id(unresolved_functions.trait_id); + resolver.set_trait_impl_id(trait_impl_id); + + // Without this, impl methods can accidentally be placed in contracts. See #3254 + if self_type.is_some() { + resolver.set_in_contract(false); + } + + let (hir_func, func_meta, errs) = resolver.resolve_function(func, func_id); + interner.push_fn_meta(func_meta, func_id); + interner.update_fn(func_id, hir_func); + errors.extend(errs.iter().cloned().map(|e| (e.into(), file_id))); + (file_id, func_id) + }) +} + +pub(crate) fn resolve_free_functions( + interner: &mut NodeInterner, + crate_id: CrateId, + def_maps: &BTreeMap, + collected_functions: Vec, + self_type: Option, + errors: &mut Vec<(CompilationError, FileId)>, +) -> Vec<(FileId, FuncId)> { + collected_functions + .into_iter() + .flat_map(|unresolved_functions| { + resolve_function_set( + interner, + crate_id, + def_maps, + unresolved_functions, + self_type.clone(), + None, + vec![], // no impl generics + errors, + ) + }) + .collect() +} diff --git a/compiler/noirc_frontend/src/hir/resolution/globals.rs b/compiler/noirc_frontend/src/hir/resolution/globals.rs new file mode 100644 index 00000000000..b5aec212dbf --- /dev/null +++ b/compiler/noirc_frontend/src/hir/resolution/globals.rs @@ -0,0 +1,55 @@ +use super::{path_resolver::StandardPathResolver, resolver::Resolver, take_errors}; +use crate::{ + graph::CrateId, + hir::{ + def_collector::dc_crate::{CompilationError, UnresolvedGlobal}, + def_map::ModuleId, + Context, + }, + node_interner::StmtId, +}; +use fm::FileId; +use iter_extended::vecmap; + +pub(crate) struct ResolvedGlobals { + pub(crate) globals: Vec<(FileId, StmtId)>, + pub(crate) errors: Vec<(CompilationError, FileId)>, +} + +impl ResolvedGlobals { + pub(crate) fn extend(&mut self, oth: Self) { + self.globals.extend(oth.globals); + self.errors.extend(oth.errors); + } +} + +pub(crate) fn resolve_globals( + context: &mut Context, + globals: Vec, + crate_id: CrateId, +) -> ResolvedGlobals { + let mut errors: Vec<(CompilationError, FileId)> = vec![]; + let globals = vecmap(globals, |global| { + let module_id = ModuleId { local_id: global.module_id, krate: crate_id }; + let path_resolver = StandardPathResolver::new(module_id); + + let mut resolver = Resolver::new( + &mut context.def_interner, + &path_resolver, + &context.def_maps, + global.file_id, + ); + + let name = global.stmt_def.pattern.name_ident().clone(); + + let hir_stmt = resolver.resolve_global_let(global.stmt_def); + errors.extend(take_errors(global.file_id, resolver)); + + context.def_interner.update_global(global.stmt_id, hir_stmt); + + context.def_interner.push_global(global.stmt_id, name, global.module_id); + + (global.file_id, global.stmt_id) + }); + ResolvedGlobals { globals, errors } +} diff --git a/compiler/noirc_frontend/src/hir/resolution/impls.rs b/compiler/noirc_frontend/src/hir/resolution/impls.rs new file mode 100644 index 00000000000..4aa70f00cfc --- /dev/null +++ b/compiler/noirc_frontend/src/hir/resolution/impls.rs @@ -0,0 +1,137 @@ +use std::collections::BTreeMap; + +use fm::FileId; + +use crate::{ + graph::CrateId, + hir::{ + def_collector::{ + dc_crate::{CompilationError, ImplMap}, + errors::DefCollectorErrorKind, + }, + def_map::{CrateDefMap, ModuleId}, + Context, + }, + node_interner::{FuncId, NodeInterner}, + Type, +}; + +use super::{ + errors::ResolverError, functions, get_module_mut, get_struct_type, + path_resolver::StandardPathResolver, resolver::Resolver, take_errors, +}; + +/// Go through the list of impls and add each function within to the scope +/// of the module defined by its type. +pub(crate) fn collect_impls( + context: &mut Context, + crate_id: CrateId, + collected_impls: &ImplMap, +) -> Vec<(CompilationError, FileId)> { + let interner = &mut context.def_interner; + let def_maps = &mut context.def_maps; + let mut errors: Vec<(CompilationError, FileId)> = vec![]; + + for ((unresolved_type, module_id), methods) in collected_impls { + let path_resolver = + StandardPathResolver::new(ModuleId { local_id: *module_id, krate: crate_id }); + + let file = def_maps[&crate_id].file_id(*module_id); + + for (generics, span, unresolved) in methods { + let mut resolver = Resolver::new(interner, &path_resolver, def_maps, file); + resolver.add_generics(generics); + let typ = resolver.resolve_type(unresolved_type.clone()); + + errors.extend(take_errors(unresolved.file_id, resolver)); + + if let Some(struct_type) = get_struct_type(&typ) { + let struct_type = struct_type.borrow(); + + // `impl`s are only allowed on types defined within the current crate + if struct_type.id.krate() != crate_id { + let span = *span; + let type_name = struct_type.name.to_string(); + let error = DefCollectorErrorKind::ForeignImpl { span, type_name }; + errors.push((error.into(), unresolved.file_id)); + continue; + } + + // Grab the module defined by the struct type. Note that impls are a case + // where the module the methods are added to is not the same as the module + // they are resolved in. + let module = get_module_mut(def_maps, struct_type.id.module_id()); + + for (_, method_id, method) in &unresolved.functions { + // If this method was already declared, remove it from the module so it cannot + // be accessed with the `TypeName::method` syntax. We'll check later whether the + // object types in each method overlap or not. If they do, we issue an error. + // If not, that is specialization which is allowed. + if module.declare_function(method.name_ident().clone(), *method_id).is_err() { + module.remove_function(method.name_ident()); + } + } + // Prohibit defining impls for primitive types if we're not in the stdlib + } else if typ != Type::Error && !crate_id.is_stdlib() { + let span = *span; + let error = DefCollectorErrorKind::NonStructTypeInImpl { span }; + errors.push((error.into(), unresolved.file_id)); + } + } + } + errors +} + +pub(crate) fn resolve_impls( + interner: &mut NodeInterner, + crate_id: CrateId, + def_maps: &BTreeMap, + collected_impls: ImplMap, + errors: &mut Vec<(CompilationError, FileId)>, +) -> Vec<(FileId, FuncId)> { + let mut file_method_ids = Vec::new(); + + for ((unresolved_type, module_id), methods) in collected_impls { + let path_resolver = + StandardPathResolver::new(ModuleId { local_id: module_id, krate: crate_id }); + + let file = def_maps[&crate_id].file_id(module_id); + + for (generics, _, functions) in methods { + let mut resolver = Resolver::new(interner, &path_resolver, def_maps, file); + resolver.add_generics(&generics); + let generics = resolver.get_generics().to_vec(); + let self_type = resolver.resolve_type(unresolved_type.clone()); + + let mut file_func_ids = functions::resolve_function_set( + interner, + crate_id, + def_maps, + functions, + Some(self_type.clone()), + None, + generics, + errors, + ); + if self_type != Type::Error { + for (file_id, method_id) in &file_func_ids { + let method_name = interner.function_name(method_id).to_owned(); + + if let Some(first_fn) = + interner.add_method(&self_type, method_name.clone(), *method_id, false) + { + let error = ResolverError::DuplicateDefinition { + name: method_name, + first_span: interner.function_ident(&first_fn).span(), + second_span: interner.function_ident(method_id).span(), + }; + errors.push((error.into(), *file_id)); + } + } + } + file_method_ids.append(&mut file_func_ids); + } + } + + file_method_ids +} diff --git a/compiler/noirc_frontend/src/hir/resolution/mod.rs b/compiler/noirc_frontend/src/hir/resolution/mod.rs index 601e78015ca..8c16a9cca80 100644 --- a/compiler/noirc_frontend/src/hir/resolution/mod.rs +++ b/compiler/noirc_frontend/src/hir/resolution/mod.rs @@ -9,3 +9,50 @@ pub mod errors; pub mod import; pub mod path_resolver; pub mod resolver; + +mod functions; +mod globals; +mod impls; +mod structs; +mod traits; +mod type_aliases; + +pub(crate) use functions::resolve_free_functions; +pub(crate) use globals::resolve_globals; +pub(crate) use impls::{collect_impls, resolve_impls}; +pub(crate) use structs::resolve_structs; +pub(crate) use traits::{ + collect_trait_impls, resolve_trait_by_path, resolve_trait_impls, resolve_traits, +}; +pub(crate) use type_aliases::resolve_type_aliases; + +use crate::{ + graph::CrateId, + hir::{ + def_collector::dc_crate::CompilationError, + def_map::{CrateDefMap, ModuleData, ModuleId}, + }, + Shared, StructType, Type, +}; +use fm::FileId; +use iter_extended::vecmap; +use resolver::Resolver; +use std::collections::BTreeMap; + +fn take_errors(file_id: FileId, resolver: Resolver<'_>) -> Vec<(CompilationError, FileId)> { + vecmap(resolver.take_errors(), |e| (e.into(), file_id)) +} + +fn get_module_mut( + def_maps: &mut BTreeMap, + module: ModuleId, +) -> &mut ModuleData { + &mut def_maps.get_mut(&module.krate).unwrap().modules[module.local_id.0] +} + +fn get_struct_type(typ: &Type) -> Option<&Shared> { + match typ { + Type::Struct(definition, _) => Some(definition), + _ => None, + } +} diff --git a/compiler/noirc_frontend/src/hir/resolution/structs.rs b/compiler/noirc_frontend/src/hir/resolution/structs.rs new file mode 100644 index 00000000000..72a7b736436 --- /dev/null +++ b/compiler/noirc_frontend/src/hir/resolution/structs.rs @@ -0,0 +1,53 @@ +use std::collections::BTreeMap; + +use fm::FileId; +use iter_extended::vecmap; + +use crate::{ + graph::CrateId, + hir::{ + def_collector::dc_crate::{CompilationError, UnresolvedStruct}, + def_map::ModuleId, + Context, + }, + node_interner::StructId, + Generics, Ident, Type, +}; + +use super::{errors::ResolverError, path_resolver::StandardPathResolver, resolver::Resolver}; + +/// Create the mappings from TypeId -> StructType +/// so that expressions can access the fields of structs +pub(crate) fn resolve_structs( + context: &mut Context, + structs: BTreeMap, + crate_id: CrateId, +) -> Vec<(CompilationError, FileId)> { + let mut errors: Vec<(CompilationError, FileId)> = vec![]; + // Resolve each field in each struct. + // Each struct should already be present in the NodeInterner after def collection. + for (type_id, typ) in structs { + let file_id = typ.file_id; + let (generics, fields, resolver_errors) = resolve_struct_fields(context, crate_id, typ); + errors.extend(vecmap(resolver_errors, |err| (err.into(), file_id))); + context.def_interner.update_struct(type_id, |struct_def| { + struct_def.set_fields(fields); + struct_def.generics = generics; + }); + } + errors +} + +fn resolve_struct_fields( + context: &mut Context, + krate: CrateId, + unresolved: UnresolvedStruct, +) -> (Generics, Vec<(Ident, Type)>, Vec) { + let path_resolver = + StandardPathResolver::new(ModuleId { local_id: unresolved.module_id, krate }); + let file_id = unresolved.file_id; + let (generics, fields, errors) = + Resolver::new(&mut context.def_interner, &path_resolver, &context.def_maps, file_id) + .resolve_struct_fields(unresolved.struct_def); + (generics, fields, errors) +} diff --git a/compiler/noirc_frontend/src/hir/resolution/traits.rs b/compiler/noirc_frontend/src/hir/resolution/traits.rs new file mode 100644 index 00000000000..702e96362a6 --- /dev/null +++ b/compiler/noirc_frontend/src/hir/resolution/traits.rs @@ -0,0 +1,450 @@ +use std::collections::{BTreeMap, HashSet}; + +use fm::FileId; +use iter_extended::vecmap; +use noirc_errors::Span; + +use crate::{ + graph::CrateId, + hir::{ + def_collector::{ + dc_crate::{ + check_methods_signatures, CompilationError, UnresolvedTrait, UnresolvedTraitImpl, + }, + errors::{DefCollectorErrorKind, DuplicateType}, + }, + def_map::{CrateDefMap, ModuleDefId, ModuleId}, + Context, + }, + hir_def::traits::{Trait, TraitConstant, TraitFunction, TraitImpl, TraitType}, + node_interner::{FuncId, NodeInterner, TraitId}, + Path, Shared, TraitItem, Type, TypeVariableKind, +}; + +use super::{ + errors::ResolverError, + functions, get_module_mut, get_struct_type, + import::PathResolutionError, + path_resolver::{PathResolver, StandardPathResolver}, + resolver::Resolver, + take_errors, +}; + +/// Create the mappings from TypeId -> TraitType +/// so that expressions can access the elements of traits +pub(crate) fn resolve_traits( + context: &mut Context, + traits: BTreeMap, + crate_id: CrateId, +) -> Vec<(CompilationError, FileId)> { + for (trait_id, unresolved_trait) in &traits { + context.def_interner.push_empty_trait(*trait_id, unresolved_trait); + } + let mut res: Vec<(CompilationError, FileId)> = vec![]; + for (trait_id, unresolved_trait) in traits { + // Resolve order + // 1. Trait Types ( Trait constants can have a trait type, therefore types before constants) + let _ = resolve_trait_types(context, crate_id, &unresolved_trait); + // 2. Trait Constants ( Trait's methods can use trait types & constants, therefore they should be after) + let _ = resolve_trait_constants(context, crate_id, &unresolved_trait); + // 3. Trait Methods + let (methods, errors) = + resolve_trait_methods(context, trait_id, crate_id, &unresolved_trait); + res.extend(errors); + context.def_interner.update_trait(trait_id, |trait_def| { + trait_def.set_methods(methods); + }); + } + res +} + +fn resolve_trait_types( + _context: &mut Context, + _crate_id: CrateId, + _unresolved_trait: &UnresolvedTrait, +) -> (Vec, Vec<(CompilationError, FileId)>) { + // TODO + (vec![], vec![]) +} +fn resolve_trait_constants( + _context: &mut Context, + _crate_id: CrateId, + _unresolved_trait: &UnresolvedTrait, +) -> (Vec, Vec<(CompilationError, FileId)>) { + // TODO + (vec![], vec![]) +} + +fn resolve_trait_methods( + context: &mut Context, + trait_id: TraitId, + crate_id: CrateId, + unresolved_trait: &UnresolvedTrait, +) -> (Vec, Vec<(CompilationError, FileId)>) { + let interner = &mut context.def_interner; + let def_maps = &mut context.def_maps; + + let path_resolver = StandardPathResolver::new(ModuleId { + local_id: unresolved_trait.module_id, + krate: crate_id, + }); + let file = def_maps[&crate_id].file_id(unresolved_trait.module_id); + + let mut res = vec![]; + let mut resolver_errors = vec![]; + for item in &unresolved_trait.trait_def.items { + if let TraitItem::Function { + name, + generics, + parameters, + return_type, + where_clause: _, + body: _, + } = item + { + let the_trait = interner.get_trait(trait_id); + let self_type = + Type::TypeVariable(the_trait.self_type_typevar.clone(), TypeVariableKind::Normal); + + let mut resolver = Resolver::new(interner, &path_resolver, def_maps, file); + resolver.add_generics(generics); + resolver.set_self_type(Some(self_type)); + + let arguments = vecmap(parameters, |param| resolver.resolve_type(param.1.clone())); + let resolved_return_type = resolver.resolve_type(return_type.get_type().into_owned()); + let generics = resolver.get_generics().to_vec(); + + let name = name.clone(); + let span: Span = name.span(); + let default_impl_list: Vec<_> = unresolved_trait + .fns_with_default_impl + .functions + .iter() + .filter(|(_, _, q)| q.name() == name.0.contents) + .collect(); + let default_impl = if default_impl_list.len() == 1 { + Some(Box::new(default_impl_list[0].2.clone())) + } else { + None + }; + + let f = TraitFunction { + name, + generics, + arguments, + return_type: resolved_return_type, + span, + default_impl, + default_impl_file_id: unresolved_trait.file_id, + default_impl_module_id: unresolved_trait.module_id, + }; + res.push(f); + resolver_errors.extend(take_errors_filter_self_not_resolved(file, resolver)); + } + } + (res, resolver_errors) +} + +fn collect_trait_impl_methods( + interner: &mut NodeInterner, + def_maps: &BTreeMap, + crate_id: CrateId, + trait_id: TraitId, + trait_impl: &mut UnresolvedTraitImpl, +) -> Vec<(CompilationError, FileId)> { + // In this Vec methods[i] corresponds to trait.methods[i]. If the impl has no implementation + // for a particular method, the default implementation will be added at that slot. + let mut ordered_methods = Vec::new(); + + let the_trait = interner.get_trait(trait_id); + + // check whether the trait implementation is in the same crate as either the trait or the type + let mut errors = + check_trait_impl_crate_coherence(interner, &the_trait, trait_impl, crate_id, def_maps); + // set of function ids that have a corresponding method in the trait + let mut func_ids_in_trait = HashSet::new(); + + for method in &the_trait.methods { + let overrides: Vec<_> = trait_impl + .methods + .functions + .iter() + .filter(|(_, _, f)| f.name() == method.name.0.contents) + .collect(); + + if overrides.is_empty() { + if let Some(default_impl) = &method.default_impl { + let func_id = interner.push_empty_fn(); + let module = ModuleId { local_id: trait_impl.module_id, krate: crate_id }; + interner.push_function(func_id, &default_impl.def, module); + func_ids_in_trait.insert(func_id); + ordered_methods.push(( + method.default_impl_module_id, + func_id, + *default_impl.clone(), + )); + } else { + let error = DefCollectorErrorKind::TraitMissingMethod { + trait_name: the_trait.name.clone(), + method_name: method.name.clone(), + trait_impl_span: trait_impl.object_type.span.expect("type must have a span"), + }; + errors.push((error.into(), trait_impl.file_id)); + } + } else { + for (_, func_id, _) in &overrides { + func_ids_in_trait.insert(*func_id); + } + + if overrides.len() > 1 { + let error = DefCollectorErrorKind::Duplicate { + typ: DuplicateType::TraitAssociatedFunction, + first_def: overrides[0].2.name_ident().clone(), + second_def: overrides[1].2.name_ident().clone(), + }; + errors.push((error.into(), trait_impl.file_id)); + } + + ordered_methods.push(overrides[0].clone()); + } + } + + // Emit MethodNotInTrait error for methods in the impl block that + // don't have a corresponding method signature defined in the trait + for (_, func_id, func) in &trait_impl.methods.functions { + if !func_ids_in_trait.contains(func_id) { + let error = DefCollectorErrorKind::MethodNotInTrait { + trait_name: the_trait.name.clone(), + impl_method: func.name_ident().clone(), + }; + errors.push((error.into(), trait_impl.file_id)); + } + } + + trait_impl.methods.functions = ordered_methods; + trait_impl.methods.trait_id = Some(trait_id); + errors +} + +fn collect_trait_impl( + context: &mut Context, + crate_id: CrateId, + trait_impl: &mut UnresolvedTraitImpl, +) -> Vec<(CompilationError, FileId)> { + let interner = &mut context.def_interner; + let def_maps = &mut context.def_maps; + let mut errors: Vec<(CompilationError, FileId)> = vec![]; + let unresolved_type = trait_impl.object_type.clone(); + let module = ModuleId { local_id: trait_impl.module_id, krate: crate_id }; + trait_impl.trait_id = + match resolve_trait_by_path(def_maps, module, trait_impl.trait_path.clone()) { + Ok(trait_id) => Some(trait_id), + Err(error) => { + errors.push((error.into(), trait_impl.file_id)); + None + } + }; + + if let Some(trait_id) = trait_impl.trait_id { + errors + .extend(collect_trait_impl_methods(interner, def_maps, crate_id, trait_id, trait_impl)); + + let path_resolver = StandardPathResolver::new(module); + let file = def_maps[&crate_id].file_id(trait_impl.module_id); + let mut resolver = Resolver::new(interner, &path_resolver, def_maps, file); + resolver.add_generics(&trait_impl.generics); + let typ = resolver.resolve_type(unresolved_type); + errors.extend(take_errors(trait_impl.file_id, resolver)); + + if let Some(struct_type) = get_struct_type(&typ) { + let struct_type = struct_type.borrow(); + let module = get_module_mut(def_maps, struct_type.id.module_id()); + + for (_, method_id, method) in &trait_impl.methods.functions { + // If this method was already declared, remove it from the module so it cannot + // be accessed with the `TypeName::method` syntax. We'll check later whether the + // object types in each method overlap or not. If they do, we issue an error. + // If not, that is specialization which is allowed. + if module.declare_function(method.name_ident().clone(), *method_id).is_err() { + module.remove_function(method.name_ident()); + } + } + } + } + errors +} + +pub(crate) fn collect_trait_impls( + context: &mut Context, + crate_id: CrateId, + collected_impls: &mut [UnresolvedTraitImpl], +) -> Vec<(CompilationError, FileId)> { + collected_impls + .iter_mut() + .flat_map(|trait_impl| collect_trait_impl(context, crate_id, trait_impl)) + .collect() +} + +fn check_trait_impl_crate_coherence( + interner: &mut NodeInterner, + the_trait: &Trait, + trait_impl: &UnresolvedTraitImpl, + current_crate: CrateId, + def_maps: &BTreeMap, +) -> Vec<(CompilationError, FileId)> { + let mut errors: Vec<(CompilationError, FileId)> = vec![]; + + let module = ModuleId { krate: current_crate, local_id: trait_impl.module_id }; + let file = def_maps[¤t_crate].file_id(trait_impl.module_id); + let path_resolver = StandardPathResolver::new(module); + let mut resolver = Resolver::new(interner, &path_resolver, def_maps, file); + + let object_crate = match resolver.resolve_type(trait_impl.object_type.clone()) { + Type::Struct(struct_type, _) => struct_type.borrow().id.krate(), + _ => CrateId::Dummy, + }; + + if current_crate != the_trait.crate_id && current_crate != object_crate { + let error = DefCollectorErrorKind::TraitImplOrphaned { + span: trait_impl.object_type.span.expect("object type must have a span"), + }; + errors.push((error.into(), trait_impl.file_id)); + } + + errors +} + +pub(crate) fn resolve_trait_by_path( + def_maps: &BTreeMap, + module: ModuleId, + path: Path, +) -> Result { + let path_resolver = StandardPathResolver::new(module); + + match path_resolver.resolve(def_maps, path.clone()) { + Ok(ModuleDefId::TraitId(trait_id)) => Ok(trait_id), + Ok(_) => Err(DefCollectorErrorKind::NotATrait { not_a_trait_name: path }), + Err(_) => Err(DefCollectorErrorKind::TraitNotFound { trait_path: path }), + } +} +pub(crate) fn resolve_trait_impls( + context: &mut Context, + traits: Vec, + crate_id: CrateId, + errors: &mut Vec<(CompilationError, FileId)>, +) -> Vec<(FileId, FuncId)> { + let interner = &mut context.def_interner; + let mut methods = Vec::<(FileId, FuncId)>::new(); + + for trait_impl in traits { + let unresolved_type = trait_impl.object_type; + let local_mod_id = trait_impl.module_id; + let module_id = ModuleId { krate: crate_id, local_id: local_mod_id }; + let path_resolver = StandardPathResolver::new(module_id); + + let self_type_span = unresolved_type.span; + + let mut resolver = + Resolver::new(interner, &path_resolver, &context.def_maps, trait_impl.file_id); + resolver.add_generics(&trait_impl.generics); + let self_type = resolver.resolve_type(unresolved_type.clone()); + let generics = resolver.get_generics().to_vec(); + + let impl_id = interner.next_trait_impl_id(); + + let mut impl_methods = functions::resolve_function_set( + interner, + crate_id, + &context.def_maps, + trait_impl.methods.clone(), + Some(self_type.clone()), + Some(impl_id), + generics.clone(), + errors, + ); + + let maybe_trait_id = trait_impl.trait_id; + if let Some(trait_id) = maybe_trait_id { + for (_, func) in &impl_methods { + interner.set_function_trait(*func, self_type.clone(), trait_id); + } + } + + if matches!(self_type, Type::MutableReference(_)) { + let span = self_type_span.unwrap_or_else(|| trait_impl.trait_path.span()); + let error = DefCollectorErrorKind::MutableReferenceInTraitImpl { span }; + errors.push((error.into(), trait_impl.file_id)); + } + + let mut new_resolver = + Resolver::new(interner, &path_resolver, &context.def_maps, trait_impl.file_id); + + new_resolver.set_generics(generics); + new_resolver.set_self_type(Some(self_type.clone())); + + if let Some(trait_id) = maybe_trait_id { + check_methods_signatures( + &mut new_resolver, + &impl_methods, + trait_id, + trait_impl.generics.len(), + errors, + ); + + let where_clause = trait_impl + .where_clause + .into_iter() + .flat_map(|item| new_resolver.resolve_trait_constraint(item)) + .collect(); + + let resolved_trait_impl = Shared::new(TraitImpl { + ident: trait_impl.trait_path.last_segment().clone(), + typ: self_type.clone(), + trait_id, + file: trait_impl.file_id, + where_clause, + methods: vecmap(&impl_methods, |(_, func_id)| *func_id), + }); + + if let Err((prev_span, prev_file)) = interner.add_trait_implementation( + self_type.clone(), + trait_id, + impl_id, + resolved_trait_impl, + ) { + let error = DefCollectorErrorKind::OverlappingImpl { + typ: self_type.clone(), + span: self_type_span.unwrap_or_else(|| trait_impl.trait_path.span()), + }; + errors.push((error.into(), trait_impl.file_id)); + + // The 'previous impl defined here' note must be a separate error currently + // since it may be in a different file and all errors have the same file id. + let error = DefCollectorErrorKind::OverlappingImplNote { span: prev_span }; + errors.push((error.into(), prev_file)); + } + + methods.append(&mut impl_methods); + } + } + + methods +} + +pub(crate) fn take_errors_filter_self_not_resolved( + file_id: FileId, + resolver: Resolver<'_>, +) -> Vec<(CompilationError, FileId)> { + resolver + .take_errors() + .iter() + .filter(|resolution_error| match resolution_error { + ResolverError::PathResolutionError(PathResolutionError::Unresolved(ident)) => { + &ident.0.contents != "Self" + } + _ => true, + }) + .cloned() + .map(|resolution_error| (resolution_error.into(), file_id)) + .collect() +} diff --git a/compiler/noirc_frontend/src/hir/resolution/type_aliases.rs b/compiler/noirc_frontend/src/hir/resolution/type_aliases.rs new file mode 100644 index 00000000000..f66f6c8dfa7 --- /dev/null +++ b/compiler/noirc_frontend/src/hir/resolution/type_aliases.rs @@ -0,0 +1,33 @@ +use super::{path_resolver::StandardPathResolver, resolver::Resolver}; +use crate::{ + graph::CrateId, + hir::{ + def_collector::dc_crate::{CompilationError, UnresolvedTypeAlias}, + def_map::ModuleId, + Context, + }, + node_interner::TypeAliasId, +}; +use fm::FileId; +use std::collections::BTreeMap; + +pub(crate) fn resolve_type_aliases( + context: &mut Context, + type_aliases: BTreeMap, + crate_id: CrateId, +) -> Vec<(CompilationError, FileId)> { + let mut errors: Vec<(CompilationError, FileId)> = vec![]; + for (type_id, unresolved_typ) in type_aliases { + let path_resolver = StandardPathResolver::new(ModuleId { + local_id: unresolved_typ.module_id, + krate: crate_id, + }); + let file = unresolved_typ.file_id; + let (typ, generics, resolver_errors) = + Resolver::new(&mut context.def_interner, &path_resolver, &context.def_maps, file) + .resolve_type_aliases(unresolved_typ.type_alias_def); + errors.extend(resolver_errors.iter().cloned().map(|e| (e.into(), file))); + context.def_interner.set_type_alias(type_id, typ, generics); + } + errors +} diff --git a/scripts/bootstrap_native.sh b/scripts/bootstrap_native.sh index 26cd44704aa..693a9d9678e 100755 --- a/scripts/bootstrap_native.sh +++ b/scripts/bootstrap_native.sh @@ -4,11 +4,13 @@ set -eu cd $(dirname "$0")/.. # If this project has been subrepod into another project, set build data manually. +export SOURCE_DATE_EPOCH=$(date +%s) +export GIT_DIRTY=false if [ -f ".gitrepo" ]; then - export SOURCE_DATE_EPOCH=$(date +%s) - export GIT_DIRTY=false export GIT_COMMIT=$(awk '/commit =/ {print $3}' .gitrepo) +else + export GIT_COMMIT=$(git rev-parse --verify HEAD) fi # Build native. -cargo build --features="noirc_frontend/aztec" --release \ No newline at end of file +cargo build --features="noirc_driver/aztec" --release diff --git a/scripts/bootstrap_packages.sh b/scripts/bootstrap_packages.sh index 552ddd7597a..5fce2675037 100755 --- a/scripts/bootstrap_packages.sh +++ b/scripts/bootstrap_packages.sh @@ -6,13 +6,15 @@ cd $(dirname "$0")/.. ./scripts/install_wasm-bindgen.sh # If this project has been subrepod into another project, set build data manually. +export SOURCE_DATE_EPOCH=$(date +%s) +export GIT_DIRTY=false if [ -f ".gitrepo" ]; then - export SOURCE_DATE_EPOCH=$(date +%s) - export GIT_DIRTY=false export GIT_COMMIT=$(awk '/commit =/ {print $3}' .gitrepo) +else + export GIT_COMMIT=$(git rev-parse --verify HEAD) fi -export cargoExtraArgs="--features noirc_frontend/aztec" +export cargoExtraArgs="--features noirc_driver/aztec" yarn yarn build diff --git a/test_programs/acir_artifacts/brillig_oracle/target/acir.gz b/test_programs/acir_artifacts/brillig_oracle/target/acir.gz index db158f61882..d202ee2437a 100644 Binary files a/test_programs/acir_artifacts/brillig_oracle/target/acir.gz and b/test_programs/acir_artifacts/brillig_oracle/target/acir.gz differ diff --git a/test_programs/acir_artifacts/brillig_oracle/target/witness.gz b/test_programs/acir_artifacts/brillig_oracle/target/witness.gz index 3fead7f6b2e..bd1fe47aade 100644 Binary files a/test_programs/acir_artifacts/brillig_oracle/target/witness.gz and b/test_programs/acir_artifacts/brillig_oracle/target/witness.gz differ diff --git a/test_programs/compile_success_empty/ec_baby_jubjub/src/main.nr b/test_programs/compile_success_empty/ec_baby_jubjub/src/main.nr index 05a186be478..becd3c8927a 100644 --- a/test_programs/compile_success_empty/ec_baby_jubjub/src/main.nr +++ b/test_programs/compile_success_empty/ec_baby_jubjub/src/main.nr @@ -33,31 +33,37 @@ fn main() { ); let p3_affine = bjj_affine.add(p1_affine, p2_affine); - assert(p3_affine.eq( - Gaffine::new( - 7916061937171219682591368294088513039687205273691143098332585753343424131937, - 14035240266687799601661095864649209771790948434046947201833777492504781204499 + assert( + p3_affine.eq( + Gaffine::new( + 7916061937171219682591368294088513039687205273691143098332585753343424131937, + 14035240266687799601661095864649209771790948434046947201833777492504781204499 + ) ) - )); + ); // Test scalar multiplication let p4_affine = bjj_affine.mul(2, p1_affine); - assert(p4_affine.eq( - Gaffine::new( - 6890855772600357754907169075114257697580319025794532037257385534741338397365, - 4338620300185947561074059802482547481416142213883829469920100239455078257889 + assert( + p4_affine.eq( + Gaffine::new( + 6890855772600357754907169075114257697580319025794532037257385534741338397365, + 4338620300185947561074059802482547481416142213883829469920100239455078257889 + ) ) - )); + ); assert(p4_affine.eq(bjj_affine.bit_mul([0, 1], p1_affine))); // Test subtraction let p5_affine = bjj_affine.subtract(p3_affine, p3_affine); assert(p5_affine.eq(Gaffine::zero())); // Check that these points are on the curve - assert(bjj_affine.contains(bjj_affine.gen) + assert( + bjj_affine.contains(bjj_affine.gen) & bjj_affine.contains(p1_affine) & bjj_affine.contains(p2_affine) & bjj_affine.contains(p3_affine) & bjj_affine.contains(p4_affine) - & bjj_affine.contains(p5_affine)); + & bjj_affine.contains(p5_affine) + ); // Test CurveGroup equivalents let bjj = bjj_affine.into_group(); // Baby Jubjub let p1 = p1_affine.into_group(); @@ -74,12 +80,14 @@ fn main() { assert(G::zero().eq(bjj.subtract(p3, p3))); assert(p5.eq(G::zero())); // Check that these points are on the curve - assert(bjj.contains(bjj.gen) + assert( + bjj.contains(bjj.gen) & bjj.contains(p1) & bjj.contains(p2) & bjj.contains(p3) & bjj.contains(p4) - & bjj.contains(p5)); + & bjj.contains(p5) + ); // Test SWCurve equivalents of the above // First the affine representation let bjj_swcurve_affine = bjj_affine.into_swcurve(); @@ -98,12 +106,14 @@ fn main() { assert(SWGaffine::zero().eq(bjj_swcurve_affine.subtract(p3_swcurve_affine, p3_swcurve_affine))); assert(p5_swcurve_affine.eq(SWGaffine::zero())); // Check that these points are on the curve - assert(bjj_swcurve_affine.contains(bjj_swcurve_affine.gen) + assert( + bjj_swcurve_affine.contains(bjj_swcurve_affine.gen) & bjj_swcurve_affine.contains(p1_swcurve_affine) & bjj_swcurve_affine.contains(p2_swcurve_affine) & bjj_swcurve_affine.contains(p3_swcurve_affine) & bjj_swcurve_affine.contains(p4_swcurve_affine) - & bjj_swcurve_affine.contains(p5_swcurve_affine)); + & bjj_swcurve_affine.contains(p5_swcurve_affine) + ); // Then the CurveGroup representation let bjj_swcurve = bjj.into_swcurve(); @@ -121,12 +131,14 @@ fn main() { assert(SWG::zero().eq(bjj_swcurve.subtract(p3_swcurve, p3_swcurve))); assert(p5_swcurve.eq(SWG::zero())); // Check that these points are on the curve - assert(bjj_swcurve.contains(bjj_swcurve.gen) + assert( + bjj_swcurve.contains(bjj_swcurve.gen) & bjj_swcurve.contains(p1_swcurve) & bjj_swcurve.contains(p2_swcurve) & bjj_swcurve.contains(p3_swcurve) & bjj_swcurve.contains(p4_swcurve) - & bjj_swcurve.contains(p5_swcurve)); + & bjj_swcurve.contains(p5_swcurve) + ); // Test MontCurve conversions // First the affine representation let bjj_montcurve_affine = bjj_affine.into_montcurve(); @@ -145,12 +157,14 @@ fn main() { assert(MGaffine::zero().eq(bjj_montcurve_affine.subtract(p3_montcurve_affine, p3_montcurve_affine))); assert(p5_montcurve_affine.eq(MGaffine::zero())); // Check that these points are on the curve - assert(bjj_montcurve_affine.contains(bjj_montcurve_affine.gen) + assert( + bjj_montcurve_affine.contains(bjj_montcurve_affine.gen) & bjj_montcurve_affine.contains(p1_montcurve_affine) & bjj_montcurve_affine.contains(p2_montcurve_affine) & bjj_montcurve_affine.contains(p3_montcurve_affine) & bjj_montcurve_affine.contains(p4_montcurve_affine) - & bjj_montcurve_affine.contains(p5_montcurve_affine)); + & bjj_montcurve_affine.contains(p5_montcurve_affine) + ); // Then the CurveGroup representation let bjj_montcurve = bjj.into_montcurve(); @@ -168,31 +182,37 @@ fn main() { assert(MG::zero().eq(bjj_montcurve.subtract(p3_montcurve, p3_montcurve))); assert(p5_montcurve.eq(MG::zero())); // Check that these points are on the curve - assert(bjj_montcurve.contains(bjj_montcurve.gen) + assert( + bjj_montcurve.contains(bjj_montcurve.gen) & bjj_montcurve.contains(p1_montcurve) & bjj_montcurve.contains(p2_montcurve) & bjj_montcurve.contains(p3_montcurve) & bjj_montcurve.contains(p4_montcurve) - & bjj_montcurve.contains(p5_montcurve)); + & bjj_montcurve.contains(p5_montcurve) + ); // Elligator 2 map-to-curve let ell2_pt_map = bjj_affine.elligator2_map(27); - assert(ell2_pt_map.eq( - MGaffine::new( - 7972459279704486422145701269802978968072470631857513331988813812334797879121, - 8142420778878030219043334189293412482212146646099536952861607542822144507872 - ).into_tecurve() - )); + assert( + ell2_pt_map.eq( + MGaffine::new( + 7972459279704486422145701269802978968072470631857513331988813812334797879121, + 8142420778878030219043334189293412482212146646099536952861607542822144507872 + ).into_tecurve() + ) + ); // SWU map-to-curve let swu_pt_map = bjj_affine.swu_map(5, 27); - assert(swu_pt_map.eq( - bjj_affine.map_from_swcurve( - SWGaffine::new( - 2162719247815120009132293839392097468339661471129795280520343931405114293888, - 5341392251743377373758788728206293080122949448990104760111875914082289313973 + assert( + swu_pt_map.eq( + bjj_affine.map_from_swcurve( + SWGaffine::new( + 2162719247815120009132293839392097468339661471129795280520343931405114293888, + 5341392251743377373758788728206293080122949448990104760111875914082289313973 + ) ) ) - )); + ); } } diff --git a/test_programs/execution_success/1_mul/Nargo.toml b/test_programs/execution_success/1_mul/Nargo.toml index a0fd8d98027..94b36157cca 100644 --- a/test_programs/execution_success/1_mul/Nargo.toml +++ b/test_programs/execution_success/1_mul/Nargo.toml @@ -1,5 +1,6 @@ [package] name = "1_mul" +version = "0.1.0" type = "bin" authors = [""] diff --git a/test_programs/execution_success/brillig_oracle/src/main.nr b/test_programs/execution_success/brillig_oracle/src/main.nr index 86cf6ff1498..490b7b605e3 100644 --- a/test_programs/execution_success/brillig_oracle/src/main.nr +++ b/test_programs/execution_success/brillig_oracle/src/main.nr @@ -1,25 +1,41 @@ use dep::std::slice; +use dep::std::test::OracleMock; + // Tests oracle usage in brillig/unconstrained functions fn main(x: Field) { - get_number_sequence_wrapper(20); + let size = 20; + // TODO: Add a method along the lines of `(0..size).to_array()`. + let mut mock_oracle_response = [0; 20]; + // TODO: Add an `array.reverse()` method. + let mut reversed_mock_oracle_response = [0; 20]; + for i in 0..size { + mock_oracle_response[i] = i; + reversed_mock_oracle_response[19 - i] = i; + } + + // TODO: this method of returning a slice feels hacky. + let _ = OracleMock::mock("get_number_sequence").with_params(size).returns((20, mock_oracle_response)); + let _ = OracleMock::mock("get_reverse_number_sequence").with_params(size).returns((20, reversed_mock_oracle_response)); + + get_number_sequence_wrapper(size); } -// TODO(#1911): This function does not need to be an oracle but acts -// as a useful test while we finalize code generation for slices in Brillig + +// Define oracle functions which we have mocked above #[oracle(get_number_sequence)] unconstrained fn get_number_sequence(_size: Field) -> [Field] {} -// TODO(#1911) + #[oracle(get_reverse_number_sequence)] unconstrained fn get_reverse_number_sequence(_size: Field) -> [Field] {} unconstrained fn get_number_sequence_wrapper(size: Field) { let slice = get_number_sequence(size); - for i in 0..19 as u32 { + for i in 0..20 as u32 { assert(slice[i] == i as Field); } let reversed_slice = get_reverse_number_sequence(size); // Regression test that we have not overwritten memory - for i in 0..19 as u32 { + for i in 0..20 as u32 { assert(slice[i] == reversed_slice[19 - i]); } } diff --git a/test_programs/execution_success/higher_order_functions/src/main.nr b/test_programs/execution_success/higher_order_functions/src/main.nr index 479cdbbe7bf..6583f961d58 100644 --- a/test_programs/execution_success/higher_order_functions/src/main.nr +++ b/test_programs/execution_success/higher_order_functions/src/main.nr @@ -5,10 +5,12 @@ fn main(w: Field) -> pub Field { assert(twice(|x| x * 2, 5) == 20); assert((|x, y| x + y + 1)(2, 3) == 6); // nested lambdas - assert((|a, b| { + assert( + (|a, b| { a + (|c| c + 2)(b) })(0, 1) - == 3); + == 3 + ); // Closures: let a = 42; let g = || a; diff --git a/test_programs/execution_success/regression/src/main.nr b/test_programs/execution_success/regression/src/main.nr index ed3dbf23a24..08112d4c616 100644 --- a/test_programs/execution_success/regression/src/main.nr +++ b/test_programs/execution_success/regression/src/main.nr @@ -89,11 +89,13 @@ fn main(x: [u8; 5], z: Field) { let enc_val1 = enc(val1, val1_length); - assert(enc_val1.0 == [ + assert( + enc_val1.0 == [ 0x94, 0xb8, 0x8f, 0x61, 0xe6, 0xfb, 0xda, 0x83, 0xfb, 0xff, 0xfa, 0xbe, 0x36, 0x41, 0x12, 0x13, 0x74, 0x80, 0x39, 0x80, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 - ]); + ] + ); assert(enc_val1.1 == 21); // Issue 2399 let result_0 = bitshift_literal_0(); diff --git a/test_programs/execution_success/workspace/crates/b/src/main.nr b/test_programs/execution_success/workspace/crates/b/src/main.nr index 6e170de75fc..4e1fd3c9035 100644 --- a/test_programs/execution_success/workspace/crates/b/src/main.nr +++ b/test_programs/execution_success/workspace/crates/b/src/main.nr @@ -1,3 +1,3 @@ -fn main(x : Field, y : pub Field) { +fn main(x: Field, y: pub Field) { assert(x != y); } diff --git a/test_programs/execution_success/workspace_default_member/a/src/main.nr b/test_programs/execution_success/workspace_default_member/a/src/main.nr index 550e5034a7b..cf72627da2e 100644 --- a/test_programs/execution_success/workspace_default_member/a/src/main.nr +++ b/test_programs/execution_success/workspace_default_member/a/src/main.nr @@ -1,3 +1,3 @@ -fn main(x : Field, y : pub Field) { +fn main(x: Field, y: pub Field) { assert(x == y); } diff --git a/tooling/nargo/src/ops/foreign_calls.rs b/tooling/nargo/src/ops/foreign_calls.rs index 6cc78febab3..1ca270a5bf7 100644 --- a/tooling/nargo/src/ops/foreign_calls.rs +++ b/tooling/nargo/src/ops/foreign_calls.rs @@ -2,7 +2,6 @@ use acvm::{ acir::brillig::{ForeignCallParam, ForeignCallResult, Value}, pwg::ForeignCallWaitInfo, }; -use iter_extended::vecmap; use noirc_printable_type::{decode_string_value, ForeignCallError, PrintableValueDisplay}; pub trait ForeignCallExecutor { @@ -16,8 +15,6 @@ pub trait ForeignCallExecutor { /// After resolution of a foreign call, nargo will restart execution of the ACVM pub(crate) enum ForeignCall { Println, - Sequence, - ReverseSequence, CreateMock, SetMockParams, SetMockReturns, @@ -35,8 +32,6 @@ impl ForeignCall { pub(crate) fn name(&self) -> &'static str { match self { ForeignCall::Println => "println", - ForeignCall::Sequence => "get_number_sequence", - ForeignCall::ReverseSequence => "get_reverse_number_sequence", ForeignCall::CreateMock => "create_mock", ForeignCall::SetMockParams => "set_mock_params", ForeignCall::SetMockReturns => "set_mock_returns", @@ -48,8 +43,6 @@ impl ForeignCall { pub(crate) fn lookup(op_name: &str) -> Option { match op_name { "println" => Some(ForeignCall::Println), - "get_number_sequence" => Some(ForeignCall::Sequence), - "get_reverse_number_sequence" => Some(ForeignCall::ReverseSequence), "create_mock" => Some(ForeignCall::CreateMock), "set_mock_params" => Some(ForeignCall::SetMockParams), "set_mock_returns" => Some(ForeignCall::SetMockReturns), @@ -147,30 +140,6 @@ impl ForeignCallExecutor for DefaultForeignCallExecutor { } Ok(ForeignCallResult { values: vec![] }) } - Some(ForeignCall::Sequence) => { - let sequence_length: u128 = - foreign_call.inputs[0].unwrap_value().to_field().to_u128(); - let sequence = vecmap(0..sequence_length, Value::from); - - Ok(ForeignCallResult { - values: vec![ - ForeignCallParam::Single(sequence_length.into()), - ForeignCallParam::Array(sequence), - ], - }) - } - Some(ForeignCall::ReverseSequence) => { - let sequence_length: u128 = - foreign_call.inputs[0].unwrap_value().to_field().to_u128(); - let sequence = vecmap((0..sequence_length).rev(), Value::from); - - Ok(ForeignCallResult { - values: vec![ - ForeignCallParam::Single(sequence_length.into()), - ForeignCallParam::Array(sequence), - ], - }) - } Some(ForeignCall::CreateMock) => { let mock_oracle_name = Self::parse_string(&foreign_call.inputs[0]); assert!(ForeignCall::lookup(&mock_oracle_name).is_none()); diff --git a/tooling/nargo/src/package.rs b/tooling/nargo/src/package.rs index 94c7c5b9c98..ecbf3585210 100644 --- a/tooling/nargo/src/package.rs +++ b/tooling/nargo/src/package.rs @@ -43,6 +43,7 @@ impl Dependency { #[derive(Clone)] pub struct Package { + pub version: Option, // A semver string which specifies the compiler version required to compile this package pub compiler_required_version: Option, pub root_dir: PathBuf, diff --git a/tooling/nargo_cli/tests/execution_success/brillig_cow/Nargo.toml b/tooling/nargo_cli/tests/execution_success/brillig_cow/Nargo.toml new file mode 100644 index 00000000000..d191eb53ddf --- /dev/null +++ b/tooling/nargo_cli/tests/execution_success/brillig_cow/Nargo.toml @@ -0,0 +1,6 @@ +[package] +name = "brillig_cow" +type = "bin" +authors = [""] + +[dependencies] diff --git a/tooling/nargo_cli/tests/execution_success/brillig_cow/Prover.toml b/tooling/nargo_cli/tests/execution_success/brillig_cow/Prover.toml new file mode 100644 index 00000000000..6533d218b15 --- /dev/null +++ b/tooling/nargo_cli/tests/execution_success/brillig_cow/Prover.toml @@ -0,0 +1,7 @@ +original = [0, 1, 2, 3, 4] +index = 2 + +[expected_result] +original = [0, 1, 2, 3, 4] +modified_once = [0, 1, 27, 3, 4] +modified_twice = [0, 1, 27, 27, 4] diff --git a/tooling/nargo_cli/tests/execution_success/brillig_cow/src/main.nr b/tooling/nargo_cli/tests/execution_success/brillig_cow/src/main.nr new file mode 100644 index 00000000000..7d847e085fe --- /dev/null +++ b/tooling/nargo_cli/tests/execution_success/brillig_cow/src/main.nr @@ -0,0 +1,54 @@ +// Tests the copy on write optimization for arrays. We look for cases where we are modifying an array in place when we shouldn't. + +global ARRAY_SIZE = 5; + +struct ExecutionResult { + original: [Field; ARRAY_SIZE], + modified_once: [Field; ARRAY_SIZE], + modified_twice: [Field; ARRAY_SIZE], +} + +impl ExecutionResult { + fn is_equal(self, other: ExecutionResult) -> bool { + (self.original == other.original) & + (self.modified_once == other.modified_once) & + (self.modified_twice == other.modified_twice) + } +} + +fn modify_in_inlined_constrained(original: [Field; ARRAY_SIZE], index: u64) -> ExecutionResult { + let mut modified = original; + + modified[index] = 27; + + let modified_once = modified; + + modified[index+1] = 27; + + ExecutionResult { + original, + modified_once, + modified_twice: modified + } +} + +unconstrained fn modify_in_unconstrained(original: [Field; ARRAY_SIZE], index: u64) -> ExecutionResult { + let mut modified = original; + + modified[index] = 27; + + let modified_once = modified; + + modified[index+1] = 27; + + ExecutionResult { + original, + modified_once, + modified_twice: modified + } +} + +unconstrained fn main(original: [Field; ARRAY_SIZE], index: u64, expected_result: ExecutionResult) { + assert(expected_result.is_equal(modify_in_unconstrained(original, index))); + assert(expected_result.is_equal(modify_in_inlined_constrained(original, index))); +} diff --git a/tooling/nargo_toml/src/errors.rs b/tooling/nargo_toml/src/errors.rs index 490242cc9ac..da976e1b185 100644 --- a/tooling/nargo_toml/src/errors.rs +++ b/tooling/nargo_toml/src/errors.rs @@ -71,6 +71,7 @@ pub enum ManifestError { SemverError(SemverError), } +#[allow(clippy::enum_variant_names)] #[derive(Error, Debug, PartialEq, Eq, Clone)] pub enum SemverError { #[error("Incompatible compiler version in package {package_name}. Required compiler version is {required_compiler_version} but the compiler version is {compiler_version_found}.\n Update the compiler_version field in Nargo.toml to >={required_compiler_version} or compile this project with version {required_compiler_version}")] @@ -81,4 +82,6 @@ pub enum SemverError { }, #[error("Could not parse the required compiler version for package {package_name} in Nargo.toml. Error: {error}")] CouldNotParseRequiredVersion { package_name: String, error: String }, + #[error("Could not parse the package version for package {package_name} in Nargo.toml. Error: {error}")] + CouldNotParsePackageVersion { package_name: String, error: String }, } diff --git a/tooling/nargo_toml/src/lib.rs b/tooling/nargo_toml/src/lib.rs index 223ed2da081..141cb3411b3 100644 --- a/tooling/nargo_toml/src/lib.rs +++ b/tooling/nargo_toml/src/lib.rs @@ -8,6 +8,7 @@ use std::{ path::{Component, Path, PathBuf}, }; +use errors::SemverError; use fm::{NormalizePath, FILE_EXTENSION}; use nargo::{ package::{Dependency, Package, PackageType}, @@ -99,7 +100,7 @@ struct PackageConfig { impl PackageConfig { fn resolve_to_package(&self, root_dir: &Path) -> Result { - let name = if let Some(name) = &self.package.name { + let name: CrateName = if let Some(name) = &self.package.name { name.parse().map_err(|_| ManifestError::InvalidPackageName { toml: root_dir.join("Nargo.toml"), name: name.into(), @@ -163,7 +164,18 @@ impl PackageConfig { } }; + // If there is a package version, ensure that it is semver compatible + if let Some(version) = &self.package.version { + semver::parse_semver_compatible_version(version).map_err(|err| { + ManifestError::SemverError(SemverError::CouldNotParsePackageVersion { + package_name: name.to_string(), + error: err.to_string(), + }) + })?; + } + Ok(Package { + version: self.package.version.clone(), compiler_required_version: self.package.compiler_version.clone(), root_dir: root_dir.to_path_buf(), entry_path, @@ -225,6 +237,7 @@ struct WorkspaceConfig { #[derive(Default, Debug, Deserialize, Clone)] struct PackageMetadata { name: Option, + version: Option, #[serde(alias = "type")] package_type: Option, entry: Option, diff --git a/tooling/nargo_toml/src/semver.rs b/tooling/nargo_toml/src/semver.rs index de722f06bd8..6acc68afa47 100644 --- a/tooling/nargo_toml/src/semver.rs +++ b/tooling/nargo_toml/src/semver.rs @@ -3,14 +3,19 @@ use nargo::{ package::{Dependency, Package}, workspace::Workspace, }; -use semver::{Version, VersionReq}; +use semver::{Error, Version, VersionReq}; + +// Parse a semver compatible version string +pub(crate) fn parse_semver_compatible_version(version: &str) -> Result { + Version::parse(version) +} // Check that all of the packages in the workspace are compatible with the current compiler version pub(crate) fn semver_check_workspace( workspace: Workspace, current_compiler_version: String, ) -> Result<(), ManifestError> { - let version = Version::parse(¤t_compiler_version) + let version = parse_semver_compatible_version(¤t_compiler_version) .expect("The compiler version is not a valid semver version"); for package in &workspace.members { semver_check_package(package, &version).map_err(ManifestError::SemverError)?; @@ -83,6 +88,7 @@ mod tests { entry_path: PathBuf::new(), name: CrateName::from_str("test").unwrap(), dependencies: BTreeMap::new(), + version: Some("1.0".to_string()), }; if let Err(err) = semver_check_package(&package, &compiler_version) { panic!("semver check should have passed. compiler version is 0.1.0 and required version from the package is 0.1.0\n error: {err:?}") @@ -113,6 +119,7 @@ mod tests { entry_path: PathBuf::new(), name: CrateName::from_str("test").unwrap(), dependencies: BTreeMap::new(), + version: Some("1.0".to_string()), }; let valid_dependency = Package { @@ -122,6 +129,7 @@ mod tests { entry_path: PathBuf::new(), name: CrateName::from_str("good_dependency").unwrap(), dependencies: BTreeMap::new(), + version: Some("1.0".to_string()), }; let invalid_dependency = Package { compiler_required_version: Some("0.2.0".to_string()), @@ -130,6 +138,7 @@ mod tests { entry_path: PathBuf::new(), name: CrateName::from_str("bad_dependency").unwrap(), dependencies: BTreeMap::new(), + version: Some("1.0".to_string()), }; package.dependencies.insert( @@ -169,6 +178,7 @@ mod tests { entry_path: PathBuf::new(), name: CrateName::from_str("test").unwrap(), dependencies: BTreeMap::new(), + version: Some("1.0".to_string()), }; if let Err(err) = semver_check_package(&package, &compiler_version) { @@ -187,6 +197,7 @@ mod tests { entry_path: PathBuf::new(), name: CrateName::from_str("test").unwrap(), dependencies: BTreeMap::new(), + version: Some("1.0".to_string()), }; if let Err(err) = semver_check_package(&package, &compiler_version) {