diff --git a/evm_arithmetization/src/cpu/jumps.rs b/evm_arithmetization/src/cpu/jumps.rs index 2a8df148a..c100c4f91 100644 --- a/evm_arithmetization/src/cpu/jumps.rs +++ b/evm_arithmetization/src/cpu/jumps.rs @@ -74,7 +74,6 @@ pub(crate) fn eval_packed_jump_jumpi( let dst = lv.mem_channels[0].value; let cond = lv.mem_channels[1].value; let filter = lv.op.jumps; // `JUMP` or `JUMPI` - let jumpdest_flag_channel = lv.mem_channels[NUM_GP_CHANNELS - 1]; let is_jump = filter * (P::ONES - lv.opcode_bits[0]); let is_jumpi = filter * lv.opcode_bits[0]; @@ -121,26 +120,34 @@ pub(crate) fn eval_packed_jump_jumpi( // If we're jumping, then the high 7 limbs of the destination must be 0. let dst_hi_sum: P = dst[1..].iter().copied().sum(); yield_constr.constraint(filter * jumps_lv.should_jump * dst_hi_sum); - // Check that the destination address holds a `JUMPDEST` instruction. Note that - // this constraint does not need to be conditioned on `should_jump` because - // no read takes place if we're not jumping, so we're free to set the - // channel to 1. - yield_constr.constraint(filter * (jumpdest_flag_channel.value[0] - P::ONES)); - - // Make sure that the JUMPDEST flag channel is constrained. - // Only need to read if we're about to jump and we're not in kernel mode. - yield_constr.constraint( - filter - * (jumpdest_flag_channel.used - jumps_lv.should_jump * (P::ONES - lv.is_kernel_mode)), - ); - yield_constr.constraint(filter * (jumpdest_flag_channel.is_read - P::ONES)); - yield_constr.constraint(filter * (jumpdest_flag_channel.addr_context - lv.context)); - yield_constr.constraint( - filter - * (jumpdest_flag_channel.addr_segment - - P::Scalar::from_canonical_usize(Segment::JumpdestBits.unscale())), - ); - yield_constr.constraint(filter * (jumpdest_flag_channel.addr_virtual - dst[0])); + + // We skip jump destinations verification with `cdk_erigon`. + #[cfg(not(feature = "cdk_erigon"))] + { + let jumpdest_flag_channel = lv.mem_channels[NUM_GP_CHANNELS - 1]; + + // Check that the destination address holds a `JUMPDEST` instruction. Note that + // this constraint does not need to be conditioned on `should_jump` because + // no read takes place if we're not jumping, so we're free to set the + // channel to 1. + yield_constr.constraint(filter * (jumpdest_flag_channel.value[0] - P::ONES)); + + // Make sure that the JUMPDEST flag channel is constrained. + // Only need to read if we're about to jump and we're not in kernel mode. + yield_constr.constraint( + filter + * (jumpdest_flag_channel.used + - jumps_lv.should_jump * (P::ONES - lv.is_kernel_mode)), + ); + yield_constr.constraint(filter * (jumpdest_flag_channel.is_read - P::ONES)); + yield_constr.constraint(filter * (jumpdest_flag_channel.addr_context - lv.context)); + yield_constr.constraint( + filter + * (jumpdest_flag_channel.addr_segment + - P::Scalar::from_canonical_usize(Segment::JumpdestBits.unscale())), + ); + yield_constr.constraint(filter * (jumpdest_flag_channel.addr_virtual - dst[0])); + } // Disable unused memory channels for &channel in &lv.mem_channels[2..NUM_GP_CHANNELS - 1] { @@ -179,7 +186,6 @@ pub(crate) fn eval_ext_circuit_jump_jumpi, const D: let dst = lv.mem_channels[0].value; let cond = lv.mem_channels[1].value; let filter = lv.op.jumps; // `JUMP` or `JUMPI` - let jumpdest_flag_channel = lv.mem_channels[NUM_GP_CHANNELS - 1]; let one_extension = builder.one_extension(); let is_jump = builder.sub_extension(one_extension, lv.opcode_bits[0]); let is_jump = builder.mul_extension(filter, is_jump); @@ -281,50 +287,57 @@ pub(crate) fn eval_ext_circuit_jump_jumpi, const D: let constr = builder.mul_extension(filter, constr); yield_constr.constraint(builder, constr); } - // Check that the destination address holds a `JUMPDEST` instruction. Note that - // this constraint does not need to be conditioned on `should_jump` because - // no read takes place if we're not jumping, so we're free to set the - // channel to 1. - { - let constr = builder.mul_sub_extension(filter, jumpdest_flag_channel.value[0], filter); - yield_constr.constraint(builder, constr); - } - // Make sure that the JUMPDEST flag channel is constrained. - // Only need to read if we're about to jump and we're not in kernel mode. + // We skip jump destinations verification with `cdk_erigon`. + #[cfg(not(feature = "cdk_erigon"))] { - let constr = builder.mul_sub_extension( - jumps_lv.should_jump, - lv.is_kernel_mode, - jumps_lv.should_jump, - ); - let constr = builder.add_extension(jumpdest_flag_channel.used, constr); - let constr = builder.mul_extension(filter, constr); - yield_constr.constraint(builder, constr); - } - { - let constr = builder.mul_sub_extension(filter, jumpdest_flag_channel.is_read, filter); - yield_constr.constraint(builder, constr); - } - { - let constr = builder.sub_extension(jumpdest_flag_channel.addr_context, lv.context); - let constr = builder.mul_extension(filter, constr); - yield_constr.constraint(builder, constr); - } - { - let constr = builder.arithmetic_extension( - F::ONE, - -F::from_canonical_usize(Segment::JumpdestBits.unscale()), - filter, - jumpdest_flag_channel.addr_segment, - filter, - ); - yield_constr.constraint(builder, constr); - } - { - let constr = builder.sub_extension(jumpdest_flag_channel.addr_virtual, dst[0]); - let constr = builder.mul_extension(filter, constr); - yield_constr.constraint(builder, constr); + let jumpdest_flag_channel = lv.mem_channels[NUM_GP_CHANNELS - 1]; + + // Check that the destination address holds a `JUMPDEST` instruction. Note that + // this constraint does not need to be conditioned on `should_jump` because + // no read takes place if we're not jumping, so we're free to set the + // channel to 1. + { + let constr = builder.mul_sub_extension(filter, jumpdest_flag_channel.value[0], filter); + yield_constr.constraint(builder, constr); + } + + // Make sure that the JUMPDEST flag channel is constrained. + // Only need to read if we're about to jump and we're not in kernel mode. + { + let constr = builder.mul_sub_extension( + jumps_lv.should_jump, + lv.is_kernel_mode, + jumps_lv.should_jump, + ); + let constr = builder.add_extension(jumpdest_flag_channel.used, constr); + let constr = builder.mul_extension(filter, constr); + yield_constr.constraint(builder, constr); + } + { + let constr = builder.mul_sub_extension(filter, jumpdest_flag_channel.is_read, filter); + yield_constr.constraint(builder, constr); + } + { + let constr = builder.sub_extension(jumpdest_flag_channel.addr_context, lv.context); + let constr = builder.mul_extension(filter, constr); + yield_constr.constraint(builder, constr); + } + { + let constr = builder.arithmetic_extension( + F::ONE, + -F::from_canonical_usize(Segment::JumpdestBits.unscale()), + filter, + jumpdest_flag_channel.addr_segment, + filter, + ); + yield_constr.constraint(builder, constr); + } + { + let constr = builder.sub_extension(jumpdest_flag_channel.addr_virtual, dst[0]); + let constr = builder.mul_extension(filter, constr); + yield_constr.constraint(builder, constr); + } } // Disable unused memory channels diff --git a/evm_arithmetization/src/cpu/kernel/aggregator.rs b/evm_arithmetization/src/cpu/kernel/aggregator.rs index 74a5f17cb..d24e856fa 100644 --- a/evm_arithmetization/src/cpu/kernel/aggregator.rs +++ b/evm_arithmetization/src/cpu/kernel/aggregator.rs @@ -11,9 +11,7 @@ use crate::cpu::kernel::parser::parse; pub const NUMBER_KERNEL_FILES: usize = if cfg!(feature = "eth_mainnet") { 157 -} else if cfg!(feature = "cdk_erigon") { - 155 -} else if cfg!(feature = "polygon_pos") { +} else if cfg!(feature = "cdk_erigon") || cfg!(feature = "polygon_pos") { 154 } else { // unreachable @@ -43,6 +41,7 @@ pub static KERNEL_FILES: [&str; NUMBER_KERNEL_FILES] = [ include_str!("asm/core/create_receipt.asm"), include_str!("asm/core/gas.asm"), include_str!("asm/core/intrinsic_gas.asm"), + #[cfg(not(feature = "cdk_erigon"))] include_str!("asm/core/jumpdest_analysis.asm"), include_str!("asm/core/nonce.asm"), include_str!("asm/core/process_txn.asm"), diff --git a/evm_arithmetization/src/cpu/kernel/asm/core/call.asm b/evm_arithmetization/src/cpu/kernel/asm/core/call.asm index c6f91459e..ab8b67c4e 100644 --- a/evm_arithmetization/src/cpu/kernel/asm/core/call.asm +++ b/evm_arithmetization/src/cpu/kernel/asm/core/call.asm @@ -386,11 +386,17 @@ call_too_deep: SET_CONTEXT %checkpoint // Checkpoint %increment_call_depth - // Perform jumpdest analysis - %mload_context_metadata(@CTX_METADATA_CODE_SIZE) - GET_CONTEXT - // stack: ctx, code_size, retdest - %jumpdest_analysis + + // We skip jumpdest analysis with `cdk_erigon`. + #[cfg(not(feature = cdk_erigon))] + { + // Perform jumpdest analysis + %mload_context_metadata(@CTX_METADATA_CODE_SIZE) + GET_CONTEXT + // stack: ctx, code_size, retdest + %jumpdest_analysis + } + PUSH 0 // jump dest EXIT_KERNEL // (Old context) stack: new_ctx diff --git a/evm_arithmetization/src/cpu/kernel/asm/core/exception.asm b/evm_arithmetization/src/cpu/kernel/asm/core/exception.asm index 925921097..654dad8f5 100644 --- a/evm_arithmetization/src/cpu/kernel/asm/core/exception.asm +++ b/evm_arithmetization/src/cpu/kernel/asm/core/exception.asm @@ -124,22 +124,26 @@ global exc_invalid_jumpi_destination: global invalid_jump_jumpi_destination_common: - // We have a jump destination on the stack. We want to `PANIC` if it is valid, and jump to - // `fault_exception` if it is not. An address is a valid jump destination if it points to a - // `JUMPDEST` instruction. In practice, since in this implementation memory addresses are - // limited to 32 bits, we check two things: - // 1. the address is no more than 32 bits long, and - // 2. it points to a `JUMPDEST` instruction. - // stack: jump_dest - DUP1 - %shr_const(32) - %jumpi(fault_exception) // This keeps one copy of jump_dest on the stack, but that's fine. - // jump_dest is a valid address; check if it points to a `JUMP_DEST`. - DUP1 - %verify_non_jumpdest - %mload_current(@SEGMENT_JUMPDEST_BITS) - // stack: is_valid_jumpdest - %jumpi(panic) // Trap should never have been entered. + // We skip jump destinations verification with `cdk_erigon`. + #[cfg(not(feature = cdk_erigon))] + { + // We have a jump destination on the stack. We want to `PANIC` if it is valid, and jump to + // `fault_exception` if it is not. An address is a valid jump destination if it points to a + // `JUMPDEST` instruction. In practice, since in this implementation memory addresses are + // limited to 32 bits, we check two things: + // 1. the address is no more than 32 bits long, and + // 2. it points to a `JUMPDEST` instruction. + // stack: jump_dest + DUP1 + %shr_const(32) + %jumpi(fault_exception) // This keeps one copy of jump_dest on the stack, but that's fine. + // jump_dest is a valid address; check if it points to a `JUMP_DEST`. + DUP1 + %verify_non_jumpdest + %mload_current(@SEGMENT_JUMPDEST_BITS) + // stack: is_valid_jumpdest + %jumpi(panic) // Trap should never have been entered. + } %jump(fault_exception) diff --git a/evm_arithmetization/src/cpu/kernel/tests/core/jumpdest_analysis.rs b/evm_arithmetization/src/cpu/kernel/tests/core/jumpdest_analysis.rs index b0ef17033..f2d00ede5 100644 --- a/evm_arithmetization/src/cpu/kernel/tests/core/jumpdest_analysis.rs +++ b/evm_arithmetization/src/cpu/kernel/tests/core/jumpdest_analysis.rs @@ -1,14 +1,53 @@ +#![cfg(not(feature = "cdk_erigon"))] + use std::collections::{BTreeSet, HashMap}; use anyhow::Result; use ethereum_types::U256; use plonky2::field::goldilocks_field::GoldilocksField as F; +use plonky2::hash::hash_types::RichField; use crate::cpu::kernel::aggregator::KERNEL; use crate::cpu::kernel::interpreter::Interpreter; use crate::cpu::kernel::opcodes::{get_opcode, get_push_opcode}; +use crate::memory::segments::Segment; +use crate::witness::memory::MemoryAddress; use crate::witness::operation::CONTEXT_SCALING_FACTOR; +impl Interpreter { + pub(crate) fn set_jumpdest_analysis_inputs(&mut self, jumps: HashMap>) { + self.generation_state.set_jumpdest_analysis_inputs(jumps); + } + + pub(crate) fn get_jumpdest_bit(&self, offset: usize) -> U256 { + if self.generation_state.memory.contexts[self.context()].segments + [Segment::JumpdestBits.unscale()] + .content + .len() + > offset + { + // Even though we are in the interpreter, `JumpdestBits` is not part of the + // preinitialized segments, so we don't need to carry out the additional checks + // when get the value from memory. + self.generation_state.memory.get_with_init(MemoryAddress { + context: self.context(), + segment: Segment::JumpdestBits.unscale(), + virt: offset, + }) + } else { + 0.into() + } + } + + pub(crate) fn get_jumpdest_bits(&self, context: usize) -> Vec { + self.generation_state.memory.contexts[context].segments[Segment::JumpdestBits.unscale()] + .content + .iter() + .map(|x| x.unwrap_or_default().bit(0)) + .collect() + } +} + #[test] fn test_jumpdest_analysis() -> Result<()> { // By default the interpreter will skip jumpdest analysis asm and compute diff --git a/evm_arithmetization/src/cpu/kernel/tests/mod.rs b/evm_arithmetization/src/cpu/kernel/tests/mod.rs index 39810148b..2b983d099 100644 --- a/evm_arithmetization/src/cpu/kernel/tests/mod.rs +++ b/evm_arithmetization/src/cpu/kernel/tests/mod.rs @@ -26,11 +26,7 @@ mod signed_syscalls; mod transaction_parsing; mod transient_storage; -use std::{ - collections::{BTreeSet, HashMap}, - ops::Range, - str::FromStr, -}; +use std::{ops::Range, str::FromStr}; use anyhow::Result; use ethereum_types::U256; @@ -248,10 +244,6 @@ impl Interpreter { code.into_iter().map(|val| Some(U256::from(val))).collect(); } - pub(crate) fn set_jumpdest_analysis_inputs(&mut self, jumps: HashMap>) { - self.generation_state.set_jumpdest_analysis_inputs(jumps); - } - pub(crate) fn extract_kernel_memory(self, segment: Segment, range: Range) -> Vec { let mut output: Vec = Vec::with_capacity(range.end); for i in range { @@ -312,34 +304,6 @@ impl Interpreter { result } - pub(crate) fn get_jumpdest_bit(&self, offset: usize) -> U256 { - if self.generation_state.memory.contexts[self.context()].segments - [Segment::JumpdestBits.unscale()] - .content - .len() - > offset - { - // Even though we are in the interpreter, `JumpdestBits` is not part of the - // preinitialized segments, so we don't need to carry out the additional checks - // when get the value from memory. - self.generation_state.memory.get_with_init(MemoryAddress { - context: self.context(), - segment: Segment::JumpdestBits.unscale(), - virt: offset, - }) - } else { - 0.into() - } - } - - pub(crate) fn get_jumpdest_bits(&self, context: usize) -> Vec { - self.generation_state.memory.contexts[context].segments[Segment::JumpdestBits.unscale()] - .content - .iter() - .map(|x| x.unwrap_or_default().bit(0)) - .collect() - } - pub(crate) fn set_is_kernel(&mut self, is_kernel: bool) { self.generation_state.registers.is_kernel = is_kernel } diff --git a/evm_arithmetization/src/witness/transition.rs b/evm_arithmetization/src/witness/transition.rs index 6263977f4..fa66bee5b 100644 --- a/evm_arithmetization/src/witness/transition.rs +++ b/evm_arithmetization/src/witness/transition.rs @@ -1,12 +1,12 @@ -use ethereum_types::U256; use log::log_enabled; use plonky2::field::types::Field; use plonky2::hash::hash_types::RichField; -use super::util::{mem_read_gp_with_log_and_fill, stack_pop_with_log_and_fill}; +use super::util::stack_pop_with_log_and_fill; use crate::cpu::columns::CpuColumnsView; use crate::cpu::kernel::aggregator::KERNEL; use crate::cpu::kernel::constants::context_metadata::ContextMetadata; +use crate::cpu::kernel::opcodes::get_opcode; use crate::cpu::membus::NUM_GP_CHANNELS; use crate::cpu::stack::{ EQ_STACK_BEHAVIOR, IS_ZERO_STACK_BEHAVIOR, JUMPI_OP, JUMP_OP, MIGHT_OVERFLOW, STACK_BEHAVIORS, @@ -18,7 +18,7 @@ use crate::witness::gas::gas_to_charge; use crate::witness::memory::MemoryAddress; use crate::witness::operation::*; use crate::witness::state::RegistersState; -use crate::witness::util::mem_read_code_with_log_and_fill; +use crate::witness::util::{mem_read_code_with_log_and_fill, mem_read_gp_with_log_and_fill}; use crate::{arithmetic, logic}; pub(crate) const EXC_STOP_CODE: u8 = 6; @@ -382,30 +382,42 @@ where .map_err(|_| ProgramError::InvalidJumpDestination)?; if !self.generate_jumpdest_analysis(dst as usize) { - let gen_state = self.get_mut_generation_state(); - let (jumpdest_bit, jumpdest_bit_log) = mem_read_gp_with_log_and_fill( - NUM_GP_CHANNELS - 1, - MemoryAddress::new( - gen_state.registers.context, - Segment::JumpdestBits, - dst as usize, - ), - gen_state, - &mut row, - ); - row.mem_channels[1].value[0] = F::ONE; - if gen_state.registers.is_kernel { - // Don't actually do the read, just set the address, etc. - let channel = &mut row.mem_channels[NUM_GP_CHANNELS - 1]; - channel.used = F::ZERO; - channel.value[0] = F::ONE; - } else { - if jumpdest_bit != U256::one() { + let gen_state = self.get_mut_generation_state(); + + // We skip jump destinations verification with `cdk_erigon`. + if !cfg!(feature = "cdk_erigon") { + let (jumpdest_bit, jumpdest_bit_log) = mem_read_gp_with_log_and_fill( + NUM_GP_CHANNELS - 1, + MemoryAddress::new( + gen_state.registers.context, + Segment::JumpdestBits, + dst as usize, + ), + gen_state, + &mut row, + ); + + if gen_state.registers.is_kernel { + // Don't actually do the read, just set the address, etc. + let channel = &mut row.mem_channels[NUM_GP_CHANNELS - 1]; + channel.used = F::ZERO; + channel.value[0] = F::ONE; + } else { + if jumpdest_bit != ethereum_types::U256::one() { + return Err(ProgramError::InvalidJumpDestination); + } + self.push_memory(jumpdest_bit_log); + } + } else if !gen_state.registers.is_kernel { + // Perform a sanity check on the jumpdest, and abort if it is invalid. + let addr = + MemoryAddress::new(gen_state.registers.context, Segment::Code, dst as usize); + let jump_dst = gen_state.get_from_memory(addr); + if jump_dst != get_opcode("JUMPDEST").into() { return Err(ProgramError::InvalidJumpDestination); } - self.push_memory(jumpdest_bit_log); } // Extra fields required by the constraints. @@ -454,26 +466,42 @@ where } let gen_state = self.get_mut_generation_state(); - let (jumpdest_bit, jumpdest_bit_log) = mem_read_gp_with_log_and_fill( - NUM_GP_CHANNELS - 1, - MemoryAddress::new( + + // We skip jump destinations verification with `cdk_erigon`. + if !cfg!(feature = "cdk_erigon") { + let (jumpdest_bit, jumpdest_bit_log) = mem_read_gp_with_log_and_fill( + NUM_GP_CHANNELS - 1, + MemoryAddress::new( + gen_state.registers.context, + Segment::JumpdestBits, + dst.low_u32() as usize, + ), + gen_state, + &mut row, + ); + + if !should_jump || gen_state.registers.is_kernel { + // Don't actually do the read, just set the address, etc. + let channel = &mut row.mem_channels[NUM_GP_CHANNELS - 1]; + channel.used = F::ZERO; + channel.value[0] = F::ONE; + } else { + if jumpdest_bit != ethereum_types::U256::one() { + return Err(ProgramError::InvalidJumpiDestination); + } + self.push_memory(jumpdest_bit_log); + } + } else if should_jump && !gen_state.registers.is_kernel { + // Perform a sanity check on the jumpdest, and abort if it is invalid. + let addr = MemoryAddress::new( gen_state.registers.context, - Segment::JumpdestBits, + Segment::Code, dst.low_u32() as usize, - ), - gen_state, - &mut row, - ); - if !should_jump || gen_state.registers.is_kernel { - // Don't actually do the read, just set the address, etc. - let channel = &mut row.mem_channels[NUM_GP_CHANNELS - 1]; - channel.used = F::ZERO; - channel.value[0] = F::ONE; - } else { - if jumpdest_bit != U256::one() { + ); + let jump_dst = gen_state.get_from_memory(addr); + if jump_dst != get_opcode("JUMPDEST").into() { return Err(ProgramError::InvalidJumpiDestination); } - self.push_memory(jumpdest_bit_log); } let diff = row.stack_len - F::TWO;