diff --git a/core/src/alu/divrem/mod.rs b/core/src/alu/divrem/mod.rs index f29aee24d0..4938106cf3 100644 --- a/core/src/alu/divrem/mod.rs +++ b/core/src/alu/divrem/mod.rs @@ -64,7 +64,6 @@ pub mod utils; use core::borrow::{Borrow, BorrowMut}; use core::mem::size_of; -use hashbrown::HashMap; use p3_air::{Air, AirBuilder, BaseAir}; use p3_field::AbstractField; @@ -76,7 +75,6 @@ use sp1_derive::AlignedBorrow; use crate::air::MachineAir; use crate::air::{SP1AirBuilder, Word}; use crate::alu::divrem::utils::{get_msb, get_quotient_and_remainder, is_signed_operation}; -use crate::alu::{create_alu_lookups, AluEvent}; use crate::bytes::event::ByteRecord; use crate::bytes::{ByteLookupEvent, ByteOpcode}; use crate::disassembler::WORD_SIZE; @@ -354,66 +352,7 @@ impl MachineAir for DivRemChip { } // Insert the necessary multiplication & LT events. - // - // This generate_trace for div must be executed _before_ calling generate_trace for - // mul and LT upon which div depends. This ordering is critical as mul and LT - // require all the mul and LT events be added before we can call generate_trace. { - // Insert the absolute value computation events. - { - let mut add_events: Vec = vec![]; - if cols.abs_c_alu_event == F::one() { - add_events.push(AluEvent { - lookup_id: event.sub_lookups[4], - shard: event.shard, - channel: event.channel, - clk: event.clk, - opcode: Opcode::ADD, - a: 0, - b: event.c, - c: (event.c as i32).abs() as u32, - sub_lookups: create_alu_lookups(), - }) - } - if cols.abs_rem_alu_event == F::one() { - add_events.push(AluEvent { - lookup_id: event.sub_lookups[5], - shard: event.shard, - channel: event.channel, - clk: event.clk, - opcode: Opcode::ADD, - a: 0, - b: remainder, - c: (remainder as i32).abs() as u32, - sub_lookups: create_alu_lookups(), - }) - } - let mut alu_events = HashMap::new(); - alu_events.insert(Opcode::ADD, add_events); - output.add_alu_events(alu_events); - } - - let mut lower_word = 0; - for i in 0..WORD_SIZE { - lower_word += (c_times_quotient[i] as u32) << (i * BYTE_SIZE); - } - - let mut upper_word = 0; - for i in 0..WORD_SIZE { - upper_word += (c_times_quotient[WORD_SIZE + i] as u32) << (i * BYTE_SIZE); - } - - let lower_multiplication = AluEvent { - lookup_id: event.sub_lookups[0], - shard: event.shard, - channel: event.channel, - clk: event.clk, - opcode: Opcode::MUL, - a: lower_word, - c: event.c, - b: quotient, - sub_lookups: create_alu_lookups(), - }; cols.lower_nonce = F::from_canonical_u32( input .nonce_lookup @@ -421,25 +360,6 @@ impl MachineAir for DivRemChip { .copied() .unwrap_or_default(), ); - output.add_mul_event(lower_multiplication); - - let upper_multiplication = AluEvent { - lookup_id: event.sub_lookups[1], - shard: event.shard, - channel: event.channel, - clk: event.clk, - opcode: { - if is_signed_operation(event.opcode) { - Opcode::MULH - } else { - Opcode::MULHU - } - }, - a: upper_word, - c: event.c, - b: quotient, - sub_lookups: create_alu_lookups(), - }; cols.upper_nonce = F::from_canonical_u32( input .nonce_lookup @@ -447,8 +367,7 @@ impl MachineAir for DivRemChip { .copied() .unwrap_or_default(), ); - output.add_mul_event(upper_multiplication); - let lt_event = if is_signed_operation(event.opcode) { + if is_signed_operation(event.opcode) { cols.abs_nonce = F::from_canonical_u32( input .nonce_lookup @@ -456,17 +375,6 @@ impl MachineAir for DivRemChip { .copied() .unwrap_or_default(), ); - AluEvent { - lookup_id: event.sub_lookups[2], - shard: event.shard, - channel: event.channel, - opcode: Opcode::SLTU, - a: 1, - b: (remainder as i32).abs() as u32, - c: u32::max(1, (event.c as i32).abs() as u32), - clk: event.clk, - sub_lookups: create_alu_lookups(), - } } else { cols.abs_nonce = F::from_canonical_u32( input @@ -475,22 +383,7 @@ impl MachineAir for DivRemChip { .copied() .unwrap_or_default(), ); - AluEvent { - lookup_id: event.sub_lookups[3], - shard: event.shard, - channel: event.channel, - opcode: Opcode::SLTU, - a: 1, - b: remainder, - c: u32::max(1, event.c), - clk: event.clk, - sub_lookups: create_alu_lookups(), - } }; - - if cols.remainder_check_multiplicity == F::one() { - output.add_lt_event(lt_event); - } } // Range check. diff --git a/core/src/alu/divrem/utils.rs b/core/src/alu/divrem/utils.rs index d71c35aad6..4985a7f619 100644 --- a/core/src/alu/divrem/utils.rs +++ b/core/src/alu/divrem/utils.rs @@ -1,4 +1,5 @@ -use crate::runtime::Opcode; +use crate::alu::{create_alu_lookups, AluEvent}; +use crate::runtime::{Opcode, Runtime}; /// Returns `true` if the given `opcode` is a signed operation. pub fn is_signed_operation(opcode: Opcode) -> bool { @@ -28,3 +29,115 @@ pub fn get_quotient_and_remainder(b: u32, c: u32, opcode: Opcode) -> (u32, u32) pub const fn get_msb(a: u32) -> u8 { ((a >> 31) & 1) as u8 } + +pub fn emit_divrem_alu_events(runtime: &mut Runtime, event: AluEvent) { + let (quotient, remainder) = get_quotient_and_remainder(event.b, event.c, event.opcode); + let c_msb = get_msb(event.c); + let rem_msb = get_msb(remainder); + let mut c_neg = 0; + let mut rem_neg = 0; + let is_signed_operation = is_signed_operation(event.opcode); + if is_signed_operation { + c_neg = c_msb; // same as abs_c_alu_event + rem_neg = rem_msb; // same as abs_rem_alu_event + } + + if c_neg == 1 { + runtime.record.add_events.push(AluEvent { + lookup_id: event.sub_lookups[4], + shard: event.shard, + channel: event.channel, + clk: event.clk, + opcode: Opcode::ADD, + a: 0, + b: event.c, + c: (event.c as i32).abs() as u32, + sub_lookups: create_alu_lookups(), + }); + } + if rem_neg == 1 { + runtime.record.add_events.push(AluEvent { + lookup_id: event.sub_lookups[5], + shard: event.shard, + channel: event.channel, + clk: event.clk, + opcode: Opcode::ADD, + a: 0, + b: remainder, + c: (remainder as i32).abs() as u32, + sub_lookups: create_alu_lookups(), + }); + } + + let c_times_quotient = { + if is_signed_operation { + (((quotient as i32) as i64) * ((event.c as i32) as i64)).to_le_bytes() + } else { + ((quotient as u64) * (event.c as u64)).to_le_bytes() + } + }; + let lower_word = u32::from_le_bytes(c_times_quotient[0..4].try_into().unwrap()); + let upper_word = u32::from_le_bytes(c_times_quotient[4..8].try_into().unwrap()); + + let lower_multiplication = AluEvent { + lookup_id: event.sub_lookups[0], + shard: event.shard, + channel: event.channel, + clk: event.clk, + opcode: Opcode::MUL, + a: lower_word, + c: event.c, + b: quotient, + sub_lookups: create_alu_lookups(), + }; + runtime.record.mul_events.push(lower_multiplication); + + let upper_multiplication = AluEvent { + lookup_id: event.sub_lookups[1], + shard: event.shard, + channel: event.channel, + clk: event.clk, + opcode: { + if is_signed_operation { + Opcode::MULH + } else { + Opcode::MULHU + } + }, + a: upper_word, + c: event.c, + b: quotient, + sub_lookups: create_alu_lookups(), + }; + runtime.record.mul_events.push(upper_multiplication); + + let lt_event = if is_signed_operation { + AluEvent { + lookup_id: event.sub_lookups[2], + shard: event.shard, + channel: event.channel, + opcode: Opcode::SLTU, + a: 1, + b: (remainder as i32).abs() as u32, + c: u32::max(1, (event.c as i32).abs() as u32), + clk: event.clk, + sub_lookups: create_alu_lookups(), + } + } else { + AluEvent { + lookup_id: event.sub_lookups[3], + shard: event.shard, + channel: event.channel, + opcode: Opcode::SLTU, + a: 1, + b: remainder, + c: u32::max(1, event.c), + clk: event.clk, + sub_lookups: create_alu_lookups(), + } + }; + + if event.c != 0 { + runtime.record.lt_events.push(lt_event); + } +} diff --git a/core/src/cpu/trace.rs b/core/src/cpu/trace.rs index c9425a7c3e..4845ef4569 100644 --- a/core/src/cpu/trace.rs +++ b/core/src/cpu/trace.rs @@ -16,14 +16,14 @@ use super::{CpuChip, CpuEvent}; use crate::air::MachineAir; use crate::air::Word; use crate::alu::create_alu_lookups; -use crate::alu::{self, AluEvent}; +use crate::alu::AluEvent; use crate::bytes::event::ByteRecord; use crate::bytes::{ByteLookupEvent, ByteOpcode}; use crate::cpu::columns::CpuCols; use crate::cpu::trace::ByteOpcode::{U16Range, U8Range}; use crate::disassembler::WORD_SIZE; use crate::memory::MemoryCols; -use crate::runtime::{ExecutionRecord, Opcode, Program}; +use crate::runtime::{ExecutionRecord, Opcode, Program, Runtime}; use crate::runtime::{MemoryRecordEnum, SyscallCode}; impl MachineAir for CpuChip { @@ -70,29 +70,20 @@ impl MachineAir for CpuChip { fn generate_dependencies(&self, input: &ExecutionRecord, output: &mut ExecutionRecord) { // Generate the trace rows for each event. let chunk_size = std::cmp::max(input.cpu_events.len() / num_cpus::get(), 1); - let (alu_events, blu_events): (Vec<_>, Vec<_>) = input + let blu_events: Vec<_> = input .cpu_events .par_chunks(chunk_size) .map(|ops: &[CpuEvent]| { - let mut alu = HashMap::new(); let mut blu: Vec<_> = Vec::with_capacity(ops.len() * 8); ops.iter().for_each(|op| { let mut row = [F::zero(); NUM_CPU_COLS]; let cols: &mut CpuCols = row.as_mut_slice().borrow_mut(); - let (alu_events, blu_events) = - self.event_to_row::(op, &HashMap::new(), cols); - alu_events.into_iter().for_each(|(key, value)| { - alu.entry(key).or_insert(Vec::default()).extend(value); - }); + let blu_events = self.event_to_row::(op, &HashMap::new(), cols); blu.extend(blu_events); }); - (alu, blu) + blu }) - .unzip(); - - for alu_events_chunk in alu_events.into_iter() { - output.add_alu_events(alu_events_chunk); - } + .collect(); let mut blu_events = blu_events.into_iter().flatten().collect::>(); blu_events.par_sort_unstable_by_key(|event| (event.shard, event.opcode)); @@ -108,14 +99,207 @@ impl MachineAir for CpuChip { } impl CpuChip { + /// Given an CpuEvent, emit all ALU events that are derived from it. + pub fn event_to_alu_events(runtime: &mut Runtime, event: CpuEvent) { + if matches!( + event.instruction.opcode, + Opcode::LB + | Opcode::LH + | Opcode::LW + | Opcode::LBU + | Opcode::LHU + | Opcode::SB + | Opcode::SH + | Opcode::SW + ) { + let memory_addr = event.b.wrapping_add(event.c); + // Add event to ALU check to check that addr == b + c + let add_event = AluEvent { + lookup_id: event.memory_add_lookup_id, + shard: event.shard, + channel: event.channel, + clk: event.clk, + opcode: Opcode::ADD, + a: memory_addr, + b: event.b, + c: event.c, + sub_lookups: create_alu_lookups(), + }; + runtime.record.add_events.push(add_event); + let addr_offset = (memory_addr % 4 as u32) as u8; + let mem_value = event.memory_record.unwrap().value(); + + if matches!(event.instruction.opcode, Opcode::LB | Opcode::LH) { + let (unsigned_mem_val, most_sig_mem_value_byte, sign_value) = + match event.instruction.opcode { + Opcode::LB => { + let most_sig_mem_value_byte = + mem_value.to_le_bytes()[addr_offset as usize]; + let sign_value = 256; + ( + most_sig_mem_value_byte as u32, + most_sig_mem_value_byte, + sign_value, + ) + } + Opcode::LH => { + let sign_value = 65536; + let unsigned_mem_val = match (addr_offset >> 1) % 2 { + 0 => mem_value & 0x0000FFFF, + 1 => (mem_value & 0xFFFF0000) >> 16, + _ => unreachable!(), + }; + let most_sig_mem_value_byte = unsigned_mem_val.to_le_bytes()[1]; + (unsigned_mem_val, most_sig_mem_value_byte, sign_value) + } + _ => unreachable!(), + }; + + if most_sig_mem_value_byte >> 7 & 0x01 == 1 { + let sub_event = AluEvent { + lookup_id: event.memory_sub_lookup_id, + channel: event.channel, + shard: event.shard, + clk: event.clk, + opcode: Opcode::SUB, + a: event.a, + b: unsigned_mem_val, + c: sign_value, + sub_lookups: create_alu_lookups(), + }; + runtime.record.add_events.push(sub_event); + } + } + } + + if event.instruction.is_branch_instruction() { + let a_eq_b = event.a == event.b; + let use_signed_comparison = + matches!(event.instruction.opcode, Opcode::BLT | Opcode::BGE); + let a_lt_b = if use_signed_comparison { + (event.a as i32) < (event.b as i32) + } else { + event.a < event.b + }; + let a_gt_b = if use_signed_comparison { + (event.a as i32) > (event.b as i32) + } else { + event.a > event.b + }; + + let alu_op_code = if use_signed_comparison { + Opcode::SLT + } else { + Opcode::SLTU + }; + // Add the ALU events for the comparisons + let lt_comp_event = AluEvent { + lookup_id: event.branch_lt_lookup_id, + shard: event.shard, + channel: event.channel, + clk: event.clk, + opcode: alu_op_code, + a: a_lt_b as u32, + b: event.a, + c: event.b, + sub_lookups: create_alu_lookups(), + }; + let gt_comp_event = AluEvent { + lookup_id: event.branch_gt_lookup_id, + shard: event.shard, + channel: event.channel, + clk: event.clk, + opcode: alu_op_code, + a: a_gt_b as u32, + b: event.b, + c: event.a, + sub_lookups: create_alu_lookups(), + }; + runtime.record.lt_events.push(lt_comp_event); + runtime.record.lt_events.push(gt_comp_event); + let branching = match event.instruction.opcode { + Opcode::BEQ => a_eq_b, + Opcode::BNE => !a_eq_b, + Opcode::BLT | Opcode::BLTU => a_lt_b, + Opcode::BGE | Opcode::BGEU => a_eq_b || a_gt_b, + _ => unreachable!(), + }; + if branching { + let next_pc = event.pc.wrapping_add(event.c); + let add_event = AluEvent { + lookup_id: event.branch_add_lookup_id, + shard: event.shard, + channel: event.channel, + clk: event.clk, + opcode: Opcode::ADD, + a: next_pc, + b: event.pc, + c: event.c, + sub_lookups: create_alu_lookups(), + }; + runtime.record.add_events.push(add_event); + } + } + + if event.instruction.is_jump_instruction() { + match event.instruction.opcode { + Opcode::JAL => { + let next_pc = event.pc.wrapping_add(event.b); + let add_event = AluEvent { + lookup_id: event.jump_jal_lookup_id, + shard: event.shard, + channel: event.channel, + clk: event.clk, + opcode: Opcode::ADD, + a: next_pc, + b: event.pc, + c: event.b, + sub_lookups: create_alu_lookups(), + }; + runtime.record.add_events.push(add_event); + } + Opcode::JALR => { + let next_pc = event.b.wrapping_add(event.c); + let add_event = AluEvent { + lookup_id: event.jump_jalr_lookup_id, + shard: event.shard, + channel: event.channel, + clk: event.clk, + opcode: Opcode::ADD, + a: next_pc, + b: event.b, + c: event.c, + sub_lookups: create_alu_lookups(), + }; + runtime.record.add_events.push(add_event); + } + _ => unreachable!(), + } + } + + if matches!(event.instruction.opcode, Opcode::AUIPC) { + let add_event = AluEvent { + lookup_id: event.auipc_lookup_id, + shard: event.shard, + channel: event.channel, + clk: event.clk, + opcode: Opcode::ADD, + a: event.a, + b: event.pc, + c: event.b, + sub_lookups: create_alu_lookups(), + }; + runtime.record.add_events.push(add_event); + } + } + /// Create a row from an event. fn event_to_row( &self, event: &CpuEvent, nonce_lookup: &HashMap, cols: &mut CpuCols, - ) -> (HashMap>, Vec) { - let mut new_alu_events = HashMap::new(); + ) -> Vec { let mut new_blu_events = Vec::new(); // Populate shard and clk columns. @@ -190,16 +374,10 @@ impl CpuChip { } // Populate memory, branch, jump, and auipc specific fields. - self.populate_memory( - cols, - event, - &mut new_alu_events, - &mut new_blu_events, - nonce_lookup, - ); - self.populate_branch(cols, event, &mut new_alu_events, nonce_lookup); - self.populate_jump(cols, event, &mut new_alu_events, nonce_lookup); - self.populate_auipc(cols, event, &mut new_alu_events, nonce_lookup); + self.populate_memory(cols, event, &mut new_blu_events, nonce_lookup); + self.populate_branch(cols, event, nonce_lookup); + self.populate_jump(cols, event, nonce_lookup); + self.populate_auipc(cols, event, nonce_lookup); let is_halt = self.populate_ecall(cols, event, nonce_lookup); cols.is_sequential_instr = F::from_bool( @@ -211,7 +389,7 @@ impl CpuChip { // Assert that the instruction is not a no-op. cols.is_real = F::one(); - (new_alu_events, new_blu_events) + new_blu_events } /// Populates the shard, channel, and clk related rows. @@ -266,7 +444,6 @@ impl CpuChip { &self, cols: &mut CpuCols, event: &CpuEvent, - new_alu_events: &mut HashMap>, new_blu_events: &mut Vec, nonce_lookup: &HashMap, ) { @@ -297,23 +474,6 @@ impl CpuChip { let aligned_addr_ls_byte = (aligned_addr & 0x000000FF) as u8; let bits: [bool; 8] = array::from_fn(|i| aligned_addr_ls_byte & (1 << i) != 0); memory_columns.aa_least_sig_byte_decomp = array::from_fn(|i| F::from_bool(bits[i + 2])); - - // Add event to ALU check to check that addr == b + c - let add_event = AluEvent { - lookup_id: event.memory_add_lookup_id, - shard: event.shard, - channel: event.channel, - clk: event.clk, - opcode: Opcode::ADD, - a: memory_addr, - b: event.b, - c: event.c, - sub_lookups: create_alu_lookups(), - }; - new_alu_events - .entry(Opcode::ADD) - .and_modify(|op_new_events| op_new_events.push(add_event)) - .or_insert(vec![add_event]); memory_columns.addr_word_nonce = F::from_canonical_u32( nonce_lookup .get(&event.memory_add_lookup_id) @@ -355,15 +515,11 @@ impl CpuChip { // For the signed load instructions, we need to check if the loaded value is negative. if matches!(event.instruction.opcode, Opcode::LB | Opcode::LH) { - let most_sig_mem_value_byte: u8; - let sign_value: u32; - if matches!(event.instruction.opcode, Opcode::LB) { - sign_value = 256; - most_sig_mem_value_byte = cols.unsigned_mem_val.to_u32().to_le_bytes()[0]; + let most_sig_mem_value_byte = if matches!(event.instruction.opcode, Opcode::LB) { + cols.unsigned_mem_val.to_u32().to_le_bytes()[0] } else { // LHU case - sign_value = 65536; - most_sig_mem_value_byte = cols.unsigned_mem_val.to_u32().to_le_bytes()[1]; + cols.unsigned_mem_val.to_u32().to_le_bytes()[1] }; for i in (0..8).rev() { @@ -372,28 +528,12 @@ impl CpuChip { } if memory_columns.most_sig_byte_decomp[7] == F::one() { cols.mem_value_is_neg = F::one(); - let sub_event = AluEvent { - lookup_id: event.memory_sub_lookup_id, - channel: event.channel, - shard: event.shard, - clk: event.clk, - opcode: Opcode::SUB, - a: event.a, - b: cols.unsigned_mem_val.to_u32(), - c: sign_value, - sub_lookups: create_alu_lookups(), - }; cols.unsigned_mem_val_nonce = F::from_canonical_u32( nonce_lookup .get(&event.memory_sub_lookup_id) .copied() .unwrap_or_default(), ); - - new_alu_events - .entry(Opcode::SUB) - .and_modify(|op_new_events| op_new_events.push(sub_event)) - .or_insert(vec![sub_event]); } } } @@ -418,7 +558,6 @@ impl CpuChip { &self, cols: &mut CpuCols, event: &CpuEvent, - alu_events: &mut HashMap>, nonce_lookup: &HashMap, ) { if event.instruction.is_branch_instruction() { @@ -440,24 +579,6 @@ impl CpuChip { event.a > event.b }; - let alu_op_code = if use_signed_comparison { - Opcode::SLT - } else { - Opcode::SLTU - }; - - // Add the ALU events for the comparisons - let lt_comp_event = AluEvent { - lookup_id: event.branch_lt_lookup_id, - shard: event.shard, - channel: event.channel, - clk: event.clk, - opcode: alu_op_code, - a: a_lt_b as u32, - b: event.a, - c: event.b, - sub_lookups: create_alu_lookups(), - }; branch_columns.a_lt_b_nonce = F::from_canonical_u32( nonce_lookup .get(&event.branch_lt_lookup_id) @@ -465,22 +586,6 @@ impl CpuChip { .unwrap_or_default(), ); - alu_events - .entry(alu_op_code) - .and_modify(|op_new_events| op_new_events.push(lt_comp_event)) - .or_insert(vec![lt_comp_event]); - - let gt_comp_event = AluEvent { - lookup_id: event.branch_gt_lookup_id, - shard: event.shard, - channel: event.channel, - clk: event.clk, - opcode: alu_op_code, - a: a_gt_b as u32, - b: event.b, - c: event.a, - sub_lookups: create_alu_lookups(), - }; branch_columns.a_gt_b_nonce = F::from_canonical_u32( nonce_lookup .get(&event.branch_gt_lookup_id) @@ -488,11 +593,6 @@ impl CpuChip { .unwrap_or_default(), ); - alu_events - .entry(alu_op_code) - .and_modify(|op_new_events| op_new_events.push(gt_comp_event)) - .or_insert(vec![gt_comp_event]); - branch_columns.a_eq_b = F::from_bool(a_eq_b); branch_columns.a_lt_b = F::from_bool(a_lt_b); branch_columns.a_gt_b = F::from_bool(a_gt_b); @@ -513,29 +613,12 @@ impl CpuChip { if branching { cols.branching = F::one(); - - let add_event = AluEvent { - lookup_id: event.branch_add_lookup_id, - shard: event.shard, - channel: event.channel, - clk: event.clk, - opcode: Opcode::ADD, - a: next_pc, - b: event.pc, - c: event.c, - sub_lookups: create_alu_lookups(), - }; branch_columns.next_pc_nonce = F::from_canonical_u32( nonce_lookup .get(&event.branch_add_lookup_id) .copied() .unwrap_or_default(), ); - - alu_events - .entry(Opcode::ADD) - .and_modify(|op_new_events| op_new_events.push(add_event)) - .or_insert(vec![add_event]); } else { cols.not_branching = F::one(); } @@ -547,7 +630,6 @@ impl CpuChip { &self, cols: &mut CpuCols, event: &CpuEvent, - alu_events: &mut HashMap>, nonce_lookup: &HashMap, ) { if event.instruction.is_jump_instruction() { @@ -561,58 +643,24 @@ impl CpuChip { jump_columns.pc_range_checker.populate(event.pc); jump_columns.next_pc = Word::from(next_pc); jump_columns.next_pc_range_checker.populate(next_pc); - - let add_event = AluEvent { - lookup_id: event.jump_jal_lookup_id, - shard: event.shard, - channel: event.channel, - clk: event.clk, - opcode: Opcode::ADD, - a: next_pc, - b: event.pc, - c: event.b, - sub_lookups: create_alu_lookups(), - }; jump_columns.jal_nonce = F::from_canonical_u32( nonce_lookup .get(&event.jump_jal_lookup_id) .copied() .unwrap_or_default(), ); - - alu_events - .entry(Opcode::ADD) - .and_modify(|op_new_events| op_new_events.push(add_event)) - .or_insert(vec![add_event]); } Opcode::JALR => { let next_pc = event.b.wrapping_add(event.c); jump_columns.op_a_range_checker.populate(event.a); jump_columns.next_pc = Word::from(next_pc); jump_columns.next_pc_range_checker.populate(next_pc); - - let add_event = AluEvent { - lookup_id: event.jump_jalr_lookup_id, - shard: event.shard, - channel: event.channel, - clk: event.clk, - opcode: Opcode::ADD, - a: next_pc, - b: event.b, - c: event.c, - sub_lookups: create_alu_lookups(), - }; jump_columns.jalr_nonce = F::from_canonical_u32( nonce_lookup .get(&event.jump_jalr_lookup_id) .copied() .unwrap_or_default(), ); - - alu_events - .entry(Opcode::ADD) - .and_modify(|op_new_events| op_new_events.push(add_event)) - .or_insert(vec![add_event]); } _ => unreachable!(), } @@ -624,7 +672,6 @@ impl CpuChip { &self, cols: &mut CpuCols, event: &CpuEvent, - alu_events: &mut HashMap>, nonce_lookup: &HashMap, ) { if matches!(event.instruction.opcode, Opcode::AUIPC) { @@ -632,29 +679,12 @@ impl CpuChip { auipc_columns.pc = Word::from(event.pc); auipc_columns.pc_range_checker.populate(event.pc); - - let add_event = AluEvent { - lookup_id: event.auipc_lookup_id, - shard: event.shard, - channel: event.channel, - clk: event.clk, - opcode: Opcode::ADD, - a: event.a, - b: event.pc, - c: event.b, - sub_lookups: create_alu_lookups(), - }; auipc_columns.auipc_nonce = F::from_canonical_u32( nonce_lookup .get(&event.auipc_lookup_id) .copied() .unwrap_or_default(), ); - - alu_events - .entry(Opcode::ADD) - .and_modify(|op_new_events| op_new_events.push(add_event)) - .or_insert(vec![add_event]); } } diff --git a/core/src/runtime/mod.rs b/core/src/runtime/mod.rs index c18300ccb2..201a23ae5e 100644 --- a/core/src/runtime/mod.rs +++ b/core/src/runtime/mod.rs @@ -42,6 +42,7 @@ use crate::alu::create_alu_lookups; use crate::alu::divrem; use crate::bytes::NUM_BYTE_LOOKUP_CHANNELS; use crate::memory::MemoryInitializeFinalizeEvent; +use crate::stark::CpuChip; use crate::utils::SP1CoreOpts; use crate::{alu::AluEvent, cpu::CpuEvent}; @@ -464,120 +465,7 @@ impl<'a> Runtime<'a> { }; self.record.cpu_events.push(cpu_event); - } - - fn emit_divrem_alu(&mut self, event: AluEvent) { - let (quotient, remainder) = - divrem::utils::get_quotient_and_remainder(event.b, event.c, event.opcode); - let c_msb = divrem::utils::get_msb(event.c); - let rem_msb = divrem::utils::get_msb(remainder); - let mut c_neg = 0; - let mut rem_neg = 0; - let is_signed_operation = divrem::utils::is_signed_operation(event.opcode); - if is_signed_operation { - c_neg = c_msb; // same as abs_c_alu_event - rem_neg = rem_msb; // same as abs_rem_alu_event - } - - // Get all the ALU events for the division and remainder operations. - if c_neg == 1 { - self.record.add_events.push(AluEvent { - lookup_id: event.sub_lookups[4], - shard: event.shard, - channel: event.channel, - clk: event.clk, - opcode: Opcode::ADD, - a: 0, - b: event.c, - c: (event.c as i32).abs() as u32, - sub_lookups: create_alu_lookups(), - }); - } - if rem_neg == 1 { - self.record.add_events.push(AluEvent { - lookup_id: event.sub_lookups[5], - shard: event.shard, - channel: event.channel, - clk: event.clk, - opcode: Opcode::ADD, - a: 0, - b: remainder, - c: (remainder as i32).abs() as u32, - sub_lookups: create_alu_lookups(), - }); - } - - let c_times_quotient = { - if is_signed_operation { - (((quotient as i32) as i64) * ((event.c as i32) as i64)).to_le_bytes() - } else { - ((quotient as u64) * (event.c as u64)).to_le_bytes() - } - }; - let lower_word = u32::from_le_bytes(c_times_quotient[0..4].try_into().unwrap()); - let upper_word = u32::from_le_bytes(c_times_quotient[4..8].try_into().unwrap()); - - let lower_multiplication = AluEvent { - lookup_id: event.sub_lookups[0], - shard: event.shard, - channel: event.channel, - clk: event.clk, - opcode: Opcode::MUL, - a: lower_word, - c: event.c, - b: quotient, - sub_lookups: create_alu_lookups(), - }; - self.record.mul_events.push(lower_multiplication); - - let upper_multiplication = AluEvent { - lookup_id: event.sub_lookups[1], - shard: event.shard, - channel: event.channel, - clk: event.clk, - opcode: { - if is_signed_operation { - Opcode::MULH - } else { - Opcode::MULHU - } - }, - a: upper_word, - c: event.c, - b: quotient, - sub_lookups: create_alu_lookups(), - }; - self.record.mul_events.push(upper_multiplication); - - let lt_event = if is_signed_operation { - AluEvent { - lookup_id: event.sub_lookups[2], - shard: event.shard, - channel: event.channel, - opcode: Opcode::SLTU, - a: 1, - b: (remainder as i32).abs() as u32, - c: u32::max(1, (event.c as i32).abs() as u32), - clk: event.clk, - sub_lookups: create_alu_lookups(), - } - } else { - AluEvent { - lookup_id: event.sub_lookups[3], - shard: event.shard, - channel: event.channel, - opcode: Opcode::SLTU, - a: 1, - b: remainder, - c: u32::max(1, event.c), - clk: event.clk, - sub_lookups: create_alu_lookups(), - } - }; - - if event.c != 0 { - self.record.lt_events.push(lt_event); - } + CpuChip::event_to_alu_events(self, cpu_event); } /// Emit an ALU event. @@ -617,6 +505,7 @@ impl<'a> Runtime<'a> { } Opcode::DIVU | Opcode::REMU | Opcode::DIV | Opcode::REM => { self.record.divrem_events.push(event); + divrem::utils::emit_divrem_alu_events(self, event); } _ => {} } diff --git a/core/src/stark/machine.rs b/core/src/stark/machine.rs index 3c80832343..43a9760ed4 100644 --- a/core/src/stark/machine.rs +++ b/core/src/stark/machine.rs @@ -715,7 +715,6 @@ pub mod tests { } #[test] - #[ignore] fn test_ssz_withdrawal() { let program = ssz_withdrawals_program(); run_test(program).unwrap();