Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(type2): Skip jumpdest analysis #631

Merged
merged 3 commits into from
Sep 16, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
141 changes: 77 additions & 64 deletions evm_arithmetization/src/cpu/jumps.rs
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,6 @@ pub(crate) fn eval_packed_jump_jumpi<P: PackedField>(
let dst = lv.mem_channels[0].value;
let cond = lv.mem_channels[1].value;
let filter = lv.op.jumps; // `JUMP` or `JUMPI`
let jumpdest_flag_channel = lv.mem_channels[NUM_GP_CHANNELS - 1];
let is_jump = filter * (P::ONES - lv.opcode_bits[0]);
let is_jumpi = filter * lv.opcode_bits[0];

Expand Down Expand Up @@ -121,26 +120,34 @@ pub(crate) fn eval_packed_jump_jumpi<P: PackedField>(
// If we're jumping, then the high 7 limbs of the destination must be 0.
let dst_hi_sum: P = dst[1..].iter().copied().sum();
yield_constr.constraint(filter * jumps_lv.should_jump * dst_hi_sum);
// Check that the destination address holds a `JUMPDEST` instruction. Note that
// this constraint does not need to be conditioned on `should_jump` because
// no read takes place if we're not jumping, so we're free to set the
// channel to 1.
yield_constr.constraint(filter * (jumpdest_flag_channel.value[0] - P::ONES));

// Make sure that the JUMPDEST flag channel is constrained.
// Only need to read if we're about to jump and we're not in kernel mode.
yield_constr.constraint(
filter
* (jumpdest_flag_channel.used - jumps_lv.should_jump * (P::ONES - lv.is_kernel_mode)),
);
yield_constr.constraint(filter * (jumpdest_flag_channel.is_read - P::ONES));
yield_constr.constraint(filter * (jumpdest_flag_channel.addr_context - lv.context));
yield_constr.constraint(
filter
* (jumpdest_flag_channel.addr_segment
- P::Scalar::from_canonical_usize(Segment::JumpdestBits.unscale())),
);
yield_constr.constraint(filter * (jumpdest_flag_channel.addr_virtual - dst[0]));

// We skip jump destinations verification with `cdk_erigon`.
#[cfg(not(feature = "cdk_erigon"))]
{
let jumpdest_flag_channel = lv.mem_channels[NUM_GP_CHANNELS - 1];

// Check that the destination address holds a `JUMPDEST` instruction. Note that
// this constraint does not need to be conditioned on `should_jump` because
// no read takes place if we're not jumping, so we're free to set the
// channel to 1.
yield_constr.constraint(filter * (jumpdest_flag_channel.value[0] - P::ONES));

// Make sure that the JUMPDEST flag channel is constrained.
// Only need to read if we're about to jump and we're not in kernel mode.
yield_constr.constraint(
filter
* (jumpdest_flag_channel.used
- jumps_lv.should_jump * (P::ONES - lv.is_kernel_mode)),
);
yield_constr.constraint(filter * (jumpdest_flag_channel.is_read - P::ONES));
yield_constr.constraint(filter * (jumpdest_flag_channel.addr_context - lv.context));
yield_constr.constraint(
filter
* (jumpdest_flag_channel.addr_segment
- P::Scalar::from_canonical_usize(Segment::JumpdestBits.unscale())),
);
yield_constr.constraint(filter * (jumpdest_flag_channel.addr_virtual - dst[0]));
}

// Disable unused memory channels
for &channel in &lv.mem_channels[2..NUM_GP_CHANNELS - 1] {
Expand Down Expand Up @@ -179,7 +186,6 @@ pub(crate) fn eval_ext_circuit_jump_jumpi<F: RichField + Extendable<D>, const D:
let dst = lv.mem_channels[0].value;
let cond = lv.mem_channels[1].value;
let filter = lv.op.jumps; // `JUMP` or `JUMPI`
let jumpdest_flag_channel = lv.mem_channels[NUM_GP_CHANNELS - 1];
let one_extension = builder.one_extension();
let is_jump = builder.sub_extension(one_extension, lv.opcode_bits[0]);
let is_jump = builder.mul_extension(filter, is_jump);
Expand Down Expand Up @@ -281,50 +287,57 @@ pub(crate) fn eval_ext_circuit_jump_jumpi<F: RichField + Extendable<D>, const D:
let constr = builder.mul_extension(filter, constr);
yield_constr.constraint(builder, constr);
}
// Check that the destination address holds a `JUMPDEST` instruction. Note that
// this constraint does not need to be conditioned on `should_jump` because
// no read takes place if we're not jumping, so we're free to set the
// channel to 1.
{
let constr = builder.mul_sub_extension(filter, jumpdest_flag_channel.value[0], filter);
yield_constr.constraint(builder, constr);
}

// Make sure that the JUMPDEST flag channel is constrained.
// Only need to read if we're about to jump and we're not in kernel mode.
// We skip jump destinations verification with `cdk_erigon`.
#[cfg(not(feature = "cdk_erigon"))]
{
let constr = builder.mul_sub_extension(
jumps_lv.should_jump,
lv.is_kernel_mode,
jumps_lv.should_jump,
);
let constr = builder.add_extension(jumpdest_flag_channel.used, constr);
let constr = builder.mul_extension(filter, constr);
yield_constr.constraint(builder, constr);
}
{
let constr = builder.mul_sub_extension(filter, jumpdest_flag_channel.is_read, filter);
yield_constr.constraint(builder, constr);
}
{
let constr = builder.sub_extension(jumpdest_flag_channel.addr_context, lv.context);
let constr = builder.mul_extension(filter, constr);
yield_constr.constraint(builder, constr);
}
{
let constr = builder.arithmetic_extension(
F::ONE,
-F::from_canonical_usize(Segment::JumpdestBits.unscale()),
filter,
jumpdest_flag_channel.addr_segment,
filter,
);
yield_constr.constraint(builder, constr);
}
{
let constr = builder.sub_extension(jumpdest_flag_channel.addr_virtual, dst[0]);
let constr = builder.mul_extension(filter, constr);
yield_constr.constraint(builder, constr);
let jumpdest_flag_channel = lv.mem_channels[NUM_GP_CHANNELS - 1];

// Check that the destination address holds a `JUMPDEST` instruction. Note that
// this constraint does not need to be conditioned on `should_jump` because
// no read takes place if we're not jumping, so we're free to set the
// channel to 1.
{
let constr = builder.mul_sub_extension(filter, jumpdest_flag_channel.value[0], filter);
yield_constr.constraint(builder, constr);
}

// Make sure that the JUMPDEST flag channel is constrained.
// Only need to read if we're about to jump and we're not in kernel mode.
{
let constr = builder.mul_sub_extension(
jumps_lv.should_jump,
lv.is_kernel_mode,
jumps_lv.should_jump,
);
let constr = builder.add_extension(jumpdest_flag_channel.used, constr);
let constr = builder.mul_extension(filter, constr);
yield_constr.constraint(builder, constr);
}
{
let constr = builder.mul_sub_extension(filter, jumpdest_flag_channel.is_read, filter);
yield_constr.constraint(builder, constr);
}
{
let constr = builder.sub_extension(jumpdest_flag_channel.addr_context, lv.context);
let constr = builder.mul_extension(filter, constr);
yield_constr.constraint(builder, constr);
}
{
let constr = builder.arithmetic_extension(
F::ONE,
-F::from_canonical_usize(Segment::JumpdestBits.unscale()),
filter,
jumpdest_flag_channel.addr_segment,
filter,
);
yield_constr.constraint(builder, constr);
}
{
let constr = builder.sub_extension(jumpdest_flag_channel.addr_virtual, dst[0]);
let constr = builder.mul_extension(filter, constr);
yield_constr.constraint(builder, constr);
}
}

// Disable unused memory channels
Expand Down
5 changes: 2 additions & 3 deletions evm_arithmetization/src/cpu/kernel/aggregator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,7 @@ use crate::cpu::kernel::parser::parse;

pub const NUMBER_KERNEL_FILES: usize = if cfg!(feature = "eth_mainnet") {
157
} else if cfg!(feature = "cdk_erigon") {
155
} else if cfg!(feature = "polygon_pos") {
} else if cfg!(feature = "cdk_erigon") || cfg!(feature = "polygon_pos") {
154
} else {
// unreachable
Expand Down Expand Up @@ -43,6 +41,7 @@ pub static KERNEL_FILES: [&str; NUMBER_KERNEL_FILES] = [
include_str!("asm/core/create_receipt.asm"),
include_str!("asm/core/gas.asm"),
include_str!("asm/core/intrinsic_gas.asm"),
#[cfg(not(feature = "cdk_erigon"))]
include_str!("asm/core/jumpdest_analysis.asm"),
include_str!("asm/core/nonce.asm"),
include_str!("asm/core/process_txn.asm"),
Expand Down
16 changes: 11 additions & 5 deletions evm_arithmetization/src/cpu/kernel/asm/core/call.asm
Original file line number Diff line number Diff line change
Expand Up @@ -386,11 +386,17 @@ call_too_deep:
SET_CONTEXT
%checkpoint // Checkpoint
%increment_call_depth
// Perform jumpdest analysis
%mload_context_metadata(@CTX_METADATA_CODE_SIZE)
GET_CONTEXT
// stack: ctx, code_size, retdest
%jumpdest_analysis

// We skip jumpdest analysis with `cdk_erigon`.
#[cfg(not(feature = cdk_erigon))]
{
// Perform jumpdest analysis
%mload_context_metadata(@CTX_METADATA_CODE_SIZE)
GET_CONTEXT
// stack: ctx, code_size, retdest
%jumpdest_analysis
}

PUSH 0 // jump dest
EXIT_KERNEL
// (Old context) stack: new_ctx
Expand Down
36 changes: 20 additions & 16 deletions evm_arithmetization/src/cpu/kernel/asm/core/exception.asm
Original file line number Diff line number Diff line change
Expand Up @@ -124,22 +124,26 @@ global exc_invalid_jumpi_destination:


global invalid_jump_jumpi_destination_common:
// We have a jump destination on the stack. We want to `PANIC` if it is valid, and jump to
// `fault_exception` if it is not. An address is a valid jump destination if it points to a
// `JUMPDEST` instruction. In practice, since in this implementation memory addresses are
// limited to 32 bits, we check two things:
// 1. the address is no more than 32 bits long, and
// 2. it points to a `JUMPDEST` instruction.
// stack: jump_dest
DUP1
%shr_const(32)
%jumpi(fault_exception) // This keeps one copy of jump_dest on the stack, but that's fine.
// jump_dest is a valid address; check if it points to a `JUMP_DEST`.
DUP1
%verify_non_jumpdest
%mload_current(@SEGMENT_JUMPDEST_BITS)
// stack: is_valid_jumpdest
%jumpi(panic) // Trap should never have been entered.
// We skip jump destinations verification with `cdk_erigon`.
#[cfg(not(feature = cdk_erigon))]
{
// We have a jump destination on the stack. We want to `PANIC` if it is valid, and jump to
// `fault_exception` if it is not. An address is a valid jump destination if it points to a
// `JUMPDEST` instruction. In practice, since in this implementation memory addresses are
// limited to 32 bits, we check two things:
// 1. the address is no more than 32 bits long, and
// 2. it points to a `JUMPDEST` instruction.
// stack: jump_dest
DUP1
%shr_const(32)
%jumpi(fault_exception) // This keeps one copy of jump_dest on the stack, but that's fine.
// jump_dest is a valid address; check if it points to a `JUMP_DEST`.
DUP1
%verify_non_jumpdest
%mload_current(@SEGMENT_JUMPDEST_BITS)
// stack: is_valid_jumpdest
%jumpi(panic) // Trap should never have been entered.
}
%jump(fault_exception)


Expand Down
Original file line number Diff line number Diff line change
@@ -1,14 +1,53 @@
#![cfg(not(feature = "cdk_erigon"))]

use std::collections::{BTreeSet, HashMap};

use anyhow::Result;
use ethereum_types::U256;
use plonky2::field::goldilocks_field::GoldilocksField as F;
use plonky2::hash::hash_types::RichField;

use crate::cpu::kernel::aggregator::KERNEL;
use crate::cpu::kernel::interpreter::Interpreter;
use crate::cpu::kernel::opcodes::{get_opcode, get_push_opcode};
use crate::memory::segments::Segment;
use crate::witness::memory::MemoryAddress;
use crate::witness::operation::CONTEXT_SCALING_FACTOR;

impl<F: RichField> Interpreter<F> {
pub(crate) fn set_jumpdest_analysis_inputs(&mut self, jumps: HashMap<usize, BTreeSet<usize>>) {
self.generation_state.set_jumpdest_analysis_inputs(jumps);
}

pub(crate) fn get_jumpdest_bit(&self, offset: usize) -> U256 {
if self.generation_state.memory.contexts[self.context()].segments
[Segment::JumpdestBits.unscale()]
.content
.len()
> offset
{
// Even though we are in the interpreter, `JumpdestBits` is not part of the
// preinitialized segments, so we don't need to carry out the additional checks
// when get the value from memory.
self.generation_state.memory.get_with_init(MemoryAddress {
context: self.context(),
segment: Segment::JumpdestBits.unscale(),
virt: offset,
})
} else {
0.into()
}
}

pub(crate) fn get_jumpdest_bits(&self, context: usize) -> Vec<bool> {
self.generation_state.memory.contexts[context].segments[Segment::JumpdestBits.unscale()]
.content
.iter()
.map(|x| x.unwrap_or_default().bit(0))
.collect()
}
}

#[test]
fn test_jumpdest_analysis() -> Result<()> {
// By default the interpreter will skip jumpdest analysis asm and compute
Expand Down
38 changes: 1 addition & 37 deletions evm_arithmetization/src/cpu/kernel/tests/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,11 +26,7 @@ mod signed_syscalls;
mod transaction_parsing;
mod transient_storage;

use std::{
collections::{BTreeSet, HashMap},
ops::Range,
str::FromStr,
};
use std::{ops::Range, str::FromStr};

use anyhow::Result;
use ethereum_types::U256;
Expand Down Expand Up @@ -248,10 +244,6 @@ impl<F: RichField> Interpreter<F> {
code.into_iter().map(|val| Some(U256::from(val))).collect();
}

pub(crate) fn set_jumpdest_analysis_inputs(&mut self, jumps: HashMap<usize, BTreeSet<usize>>) {
self.generation_state.set_jumpdest_analysis_inputs(jumps);
}

pub(crate) fn extract_kernel_memory(self, segment: Segment, range: Range<usize>) -> Vec<U256> {
let mut output: Vec<U256> = Vec::with_capacity(range.end);
for i in range {
Expand Down Expand Up @@ -312,34 +304,6 @@ impl<F: RichField> Interpreter<F> {
result
}

pub(crate) fn get_jumpdest_bit(&self, offset: usize) -> U256 {
if self.generation_state.memory.contexts[self.context()].segments
[Segment::JumpdestBits.unscale()]
.content
.len()
> offset
{
// Even though we are in the interpreter, `JumpdestBits` is not part of the
// preinitialized segments, so we don't need to carry out the additional checks
// when get the value from memory.
self.generation_state.memory.get_with_init(MemoryAddress {
context: self.context(),
segment: Segment::JumpdestBits.unscale(),
virt: offset,
})
} else {
0.into()
}
}

pub(crate) fn get_jumpdest_bits(&self, context: usize) -> Vec<bool> {
self.generation_state.memory.contexts[context].segments[Segment::JumpdestBits.unscale()]
.content
.iter()
.map(|x| x.unwrap_or_default().bit(0))
.collect()
}

pub(crate) fn set_is_kernel(&mut self, is_kernel: bool) {
self.generation_state.registers.is_kernel = is_kernel
}
Expand Down
Loading
Loading