Barretenberg
The ZK-SNARK library at the core of Aztec
Loading...
Searching...
No Matches
execution_trace.cpp
Go to the documentation of this file.
2
3#include <algorithm>
4#include <array>
5#include <cstddef>
6#include <numeric>
7#include <ranges>
8#include <stdexcept>
9
34
39
40namespace bb::avm2::tracegen {
41namespace {
42
43constexpr std::array<C, AVM_MAX_OPERANDS> OPERAND_COLUMNS = {
44 C::execution_op_0_, C::execution_op_1_, C::execution_op_2_, C::execution_op_3_,
45 C::execution_op_4_, C::execution_op_5_, C::execution_op_6_,
46};
47constexpr std::array<C, AVM_MAX_OPERANDS> OPERAND_IS_ADDRESS_COLUMNS = {
48 C::execution_sel_op_is_address_0_, C::execution_sel_op_is_address_1_, C::execution_sel_op_is_address_2_,
49 C::execution_sel_op_is_address_3_, C::execution_sel_op_is_address_4_, C::execution_sel_op_is_address_5_,
50 C::execution_sel_op_is_address_6_,
51};
52constexpr std::array<C, AVM_MAX_OPERANDS> OPERAND_AFTER_RELATIVE_COLUMNS = {
53 C::execution_op_after_relative_0_, C::execution_op_after_relative_1_, C::execution_op_after_relative_2_,
54 C::execution_op_after_relative_3_, C::execution_op_after_relative_4_, C::execution_op_after_relative_5_,
55 C::execution_op_after_relative_6_,
56};
57constexpr std::array<C, AVM_MAX_OPERANDS> RESOLVED_OPERAND_COLUMNS = {
58 C::execution_rop_0_, C::execution_rop_1_, C::execution_rop_2_, C::execution_rop_3_,
59 C::execution_rop_4_, C::execution_rop_5_, C::execution_rop_6_,
60};
61constexpr std::array<C, AVM_MAX_OPERANDS> RESOLVED_OPERAND_TAG_COLUMNS = {
62 C::execution_rop_tag_0_, C::execution_rop_tag_1_, C::execution_rop_tag_2_, C::execution_rop_tag_3_,
63 C::execution_rop_tag_4_, C::execution_rop_tag_5_, C::execution_rop_tag_6_,
64};
65constexpr std::array<C, AVM_MAX_OPERANDS> OPERAND_SHOULD_APPLY_INDIRECTION_COLUMNS = {
66 C::execution_sel_should_apply_indirection_0_, C::execution_sel_should_apply_indirection_1_,
67 C::execution_sel_should_apply_indirection_2_, C::execution_sel_should_apply_indirection_3_,
68 C::execution_sel_should_apply_indirection_4_, C::execution_sel_should_apply_indirection_5_,
69 C::execution_sel_should_apply_indirection_6_,
70};
71constexpr std::array<C, AVM_MAX_OPERANDS> OPERAND_RELATIVE_OVERFLOW_COLUMNS = {
72 C::execution_sel_relative_overflow_0_, C::execution_sel_relative_overflow_1_, C::execution_sel_relative_overflow_2_,
73 C::execution_sel_relative_overflow_3_, C::execution_sel_relative_overflow_4_, C::execution_sel_relative_overflow_5_,
74 C::execution_sel_relative_overflow_6_,
75};
76constexpr std::array<C, AVM_MAX_OPERANDS> OPERAND_IS_RELATIVE_VALID_BASE_COLUMNS = {
77 C::execution_sel_op_do_overflow_check_0_, C::execution_sel_op_do_overflow_check_1_,
78 C::execution_sel_op_do_overflow_check_2_, C::execution_sel_op_do_overflow_check_3_,
79 C::execution_sel_op_do_overflow_check_4_, C::execution_sel_op_do_overflow_check_5_,
80 C::execution_sel_op_do_overflow_check_6_,
81};
82constexpr size_t TOTAL_INDIRECT_BITS = 16;
83static_assert(static_cast<size_t>(AVM_MAX_OPERANDS) * 2 <= TOTAL_INDIRECT_BITS);
84constexpr std::array<C, TOTAL_INDIRECT_BITS / 2> OPERAND_IS_RELATIVE_WIRE_COLUMNS = {
85 C::execution_sel_op_is_relative_wire_0_, C::execution_sel_op_is_relative_wire_1_,
86 C::execution_sel_op_is_relative_wire_2_, C::execution_sel_op_is_relative_wire_3_,
87 C::execution_sel_op_is_relative_wire_4_, C::execution_sel_op_is_relative_wire_5_,
88 C::execution_sel_op_is_relative_wire_6_, C::execution_sel_op_is_relative_wire_7_,
89
90};
91constexpr std::array<C, TOTAL_INDIRECT_BITS / 2> OPERAND_IS_INDIRECT_WIRE_COLUMNS = {
92 C::execution_sel_op_is_indirect_wire_0_, C::execution_sel_op_is_indirect_wire_1_,
93 C::execution_sel_op_is_indirect_wire_2_, C::execution_sel_op_is_indirect_wire_3_,
94 C::execution_sel_op_is_indirect_wire_4_, C::execution_sel_op_is_indirect_wire_5_,
95 C::execution_sel_op_is_indirect_wire_6_, C::execution_sel_op_is_indirect_wire_7_,
96};
97
98constexpr std::array<C, AVM_MAX_REGISTERS> REGISTER_COLUMNS = {
99 C::execution_register_0_, C::execution_register_1_, C::execution_register_2_,
100 C::execution_register_3_, C::execution_register_4_, C::execution_register_5_,
101};
102constexpr std::array<C, AVM_MAX_REGISTERS> REGISTER_MEM_TAG_COLUMNS = {
103 C::execution_mem_tag_reg_0_, C::execution_mem_tag_reg_1_, C::execution_mem_tag_reg_2_,
104 C::execution_mem_tag_reg_3_, C::execution_mem_tag_reg_4_, C::execution_mem_tag_reg_5_,
105};
106constexpr std::array<C, AVM_MAX_REGISTERS> REGISTER_IS_WRITE_COLUMNS = {
107 C::execution_rw_reg_0_, C::execution_rw_reg_1_, C::execution_rw_reg_2_,
108 C::execution_rw_reg_3_, C::execution_rw_reg_4_, C::execution_rw_reg_5_,
109};
110constexpr std::array<C, AVM_MAX_REGISTERS> REGISTER_MEM_OP_COLUMNS = {
111 C::execution_sel_mem_op_reg_0_, C::execution_sel_mem_op_reg_1_, C::execution_sel_mem_op_reg_2_,
112 C::execution_sel_mem_op_reg_3_, C::execution_sel_mem_op_reg_4_, C::execution_sel_mem_op_reg_5_,
113};
114constexpr std::array<C, AVM_MAX_REGISTERS> REGISTER_EXPECTED_TAG_COLUMNS = {
115 C::execution_expected_tag_reg_0_, C::execution_expected_tag_reg_1_, C::execution_expected_tag_reg_2_,
116 C::execution_expected_tag_reg_3_, C::execution_expected_tag_reg_4_, C::execution_expected_tag_reg_5_,
117};
118constexpr std::array<C, AVM_MAX_REGISTERS> REGISTER_TAG_CHECK_COLUMNS = {
119 C::execution_sel_tag_check_reg_0_, C::execution_sel_tag_check_reg_1_, C::execution_sel_tag_check_reg_2_,
120 C::execution_sel_tag_check_reg_3_, C::execution_sel_tag_check_reg_4_, C::execution_sel_tag_check_reg_5_,
121};
122constexpr std::array<C, AVM_MAX_REGISTERS> REGISTER_OP_REG_EFFECTIVE_COLUMNS = {
123 C::execution_sel_op_reg_effective_0_, C::execution_sel_op_reg_effective_1_, C::execution_sel_op_reg_effective_2_,
124 C::execution_sel_op_reg_effective_3_, C::execution_sel_op_reg_effective_4_, C::execution_sel_op_reg_effective_5_,
125};
126
134C get_execution_opcode_selector(ExecutionOpCode exec_opcode)
135{
136 switch (exec_opcode) {
138 return C::execution_sel_execute_get_env_var;
140 return C::execution_sel_execute_mov;
142 return C::execution_sel_execute_jump;
144 return C::execution_sel_execute_jumpi;
146 return C::execution_sel_execute_call;
148 return C::execution_sel_execute_static_call;
150 return C::execution_sel_execute_internal_call;
152 return C::execution_sel_execute_internal_return;
154 return C::execution_sel_execute_return;
156 return C::execution_sel_execute_revert;
158 return C::execution_sel_execute_success_copy;
160 return C::execution_sel_execute_returndata_size;
162 return C::execution_sel_execute_debug_log;
164 return C::execution_sel_execute_sload;
166 return C::execution_sel_execute_sstore;
168 return C::execution_sel_execute_notehash_exists;
170 return C::execution_sel_execute_emit_notehash;
172 return C::execution_sel_execute_l1_to_l2_message_exists;
174 return C::execution_sel_execute_nullifier_exists;
176 return C::execution_sel_execute_emit_nullifier;
178 return C::execution_sel_execute_send_l2_to_l1_msg;
179 default:
180 throw std::runtime_error("Execution opcode does not have a corresponding selector");
181 }
182}
183
187struct FailingContexts {
188 bool app_logic_failure = false;
189 bool teardown_failure = false;
192 unordered_flat_set<uint32_t> does_context_fail;
193};
194
206FailingContexts preprocess_for_discard(
208{
209 FailingContexts dying_info;
210
211 // Preprocessing pass 1: find the events that exit the app logic and teardown phases
212 for (const auto& ex_event : ex_events) {
213 bool is_exit = ex_event.is_exit();
214 bool is_top_level = ex_event.after_context_event.parent_id == 0;
215
216 if (is_exit && is_top_level) {
217 // TODO(dbanks12): confirm this should be after_context_event and not before_context_event
218 if (ex_event.after_context_event.phase == TransactionPhase::APP_LOGIC) {
219 dying_info.app_logic_failure = ex_event.is_failure();
220 dying_info.app_logic_exit_context_id = ex_event.after_context_event.id;
221 } else if (ex_event.after_context_event.phase == TransactionPhase::TEARDOWN) {
222 dying_info.teardown_failure = ex_event.is_failure();
223 dying_info.teardown_exit_context_id = ex_event.after_context_event.id;
224 break; // Teardown is the last phase we care about
225 }
226 }
227 }
228
229 // Preprocessing pass 2: find all contexts that fail and mark them
230 for (const auto& ex_event : ex_events) {
231 if (ex_event.is_failure()) {
232 dying_info.does_context_fail.insert(ex_event.after_context_event.id);
233 }
234 }
235
236 return dying_info;
237}
238
246bool is_phase_discarded(TransactionPhase phase, const FailingContexts& failures)
247{
248 // Note that app logic also gets discarded if teardown failures
249 return (phase == TransactionPhase::APP_LOGIC && (failures.app_logic_failure || failures.teardown_failure)) ||
250 (phase == TransactionPhase::TEARDOWN && failures.teardown_failure);
251}
252
260uint32_t dying_context_for_phase(TransactionPhase phase, const FailingContexts& failures)
261{
262 assert((phase == TransactionPhase::APP_LOGIC || phase == TransactionPhase::TEARDOWN) &&
263 "Execution events must have app logic or teardown phase");
264
265 switch (phase) {
267 if (failures.app_logic_failure) {
268 return failures.app_logic_exit_context_id;
269 }
270
271 // Note that app logic also gets discarded if teardown failures
272 if (failures.teardown_failure) {
273 return failures.teardown_exit_context_id;
274 }
275
276 return 0;
277 }
279 return failures.teardown_failure ? failures.teardown_exit_context_id : 0;
280 default:
281 __builtin_unreachable(); // tell the compiler "we never reach here"
282 }
283}
284
285} // namespace
286
289{
290 uint32_t row = 1; // We start from row 1 because this trace contains shifted columns.
291
292 // Preprocess events to determine which contexts will fail
293 const FailingContexts failures = preprocess_for_discard(ex_events);
294
295 // Some variables updated per loop iteration to track
296 // whether or not the upcoming row should "discard" [side effects].
297 uint32_t discard = 0;
298 uint32_t dying_context_id = 0;
299 bool is_first_event_in_enqueued_call = true;
300 bool prev_row_was_enter_call = false;
301
302 for (const auto& ex_event : ex_events) {
303 // Check if this is the first event in an enqueued call and whether
304 // the phase should be discarded
305 if (discard == 0 && is_first_event_in_enqueued_call &&
306 is_phase_discarded(ex_event.after_context_event.phase, failures)) {
307 discard = 1;
308 dying_context_id = dying_context_for_phase(ex_event.after_context_event.phase, failures);
309 }
310
311 const bool has_parent = ex_event.after_context_event.parent_id != 0;
312
313 /**************************************************************************************************
314 * Setup.
315 **************************************************************************************************/
316
317 trace.set(
318 row,
319 { {
320 { C::execution_sel, 1 },
321 // Selectors that indicate "dispatch" from tx trace
322 // Note: Enqueued Call End is determined during the opcode execution temporality group
323 { C::execution_enqueued_call_start, is_first_event_in_enqueued_call ? 1 : 0 },
324 // Context
325 { C::execution_context_id, ex_event.after_context_event.id },
326 { C::execution_parent_id, ex_event.after_context_event.parent_id },
327 { C::execution_pc, ex_event.before_context_event.pc },
328 { C::execution_msg_sender, ex_event.after_context_event.msg_sender },
329 { C::execution_contract_address, ex_event.after_context_event.contract_addr },
330 { C::execution_transaction_fee, ex_event.after_context_event.transaction_fee },
331 { C::execution_is_static, ex_event.after_context_event.is_static },
332 { C::execution_parent_calldata_addr, ex_event.after_context_event.parent_cd_addr },
333 { C::execution_parent_calldata_size, ex_event.after_context_event.parent_cd_size },
334 { C::execution_last_child_returndata_addr, ex_event.after_context_event.last_child_rd_addr },
335 { C::execution_last_child_returndata_size, ex_event.after_context_event.last_child_rd_size },
336 { C::execution_last_child_success, ex_event.after_context_event.last_child_success },
337 { C::execution_last_child_id, ex_event.after_context_event.last_child_id },
338 { C::execution_l2_gas_limit, ex_event.after_context_event.gas_limit.l2_gas },
339 { C::execution_da_gas_limit, ex_event.after_context_event.gas_limit.da_gas },
340 { C::execution_l2_gas_used, ex_event.after_context_event.gas_used.l2_gas },
341 { C::execution_da_gas_used, ex_event.after_context_event.gas_used.da_gas },
342 { C::execution_parent_l2_gas_limit, ex_event.after_context_event.parent_gas_limit.l2_gas },
343 { C::execution_parent_da_gas_limit, ex_event.after_context_event.parent_gas_limit.da_gas },
344 { C::execution_parent_l2_gas_used, ex_event.after_context_event.parent_gas_used.l2_gas },
345 { C::execution_parent_da_gas_used, ex_event.after_context_event.parent_gas_used.da_gas },
346 { C::execution_next_context_id, ex_event.next_context_id },
347 // Context - gas.
348 { C::execution_prev_l2_gas_used, ex_event.before_context_event.gas_used.l2_gas },
349 { C::execution_prev_da_gas_used, ex_event.before_context_event.gas_used.da_gas },
350 // Context - tree states
351 // Context - tree states - Written public data slots tree
352 { C::execution_prev_written_public_data_slots_tree_root,
353 ex_event.before_context_event.written_public_data_slots_tree_snapshot.root },
354 { C::execution_prev_written_public_data_slots_tree_size,
355 ex_event.before_context_event.written_public_data_slots_tree_snapshot.next_available_leaf_index },
356 { C::execution_written_public_data_slots_tree_root,
357 ex_event.after_context_event.written_public_data_slots_tree_snapshot.root },
358 { C::execution_written_public_data_slots_tree_size,
359 ex_event.after_context_event.written_public_data_slots_tree_snapshot.next_available_leaf_index },
360 // Context - tree states - Nullifier tree
361 { C::execution_prev_nullifier_tree_root,
362 ex_event.before_context_event.tree_states.nullifier_tree.tree.root },
363 { C::execution_prev_nullifier_tree_size,
364 ex_event.before_context_event.tree_states.nullifier_tree.tree.next_available_leaf_index },
365 { C::execution_prev_num_nullifiers_emitted,
366 ex_event.before_context_event.tree_states.nullifier_tree.counter },
367 { C::execution_nullifier_tree_root, ex_event.after_context_event.tree_states.nullifier_tree.tree.root },
368 { C::execution_nullifier_tree_size,
369 ex_event.after_context_event.tree_states.nullifier_tree.tree.next_available_leaf_index },
370 { C::execution_num_nullifiers_emitted,
371 ex_event.after_context_event.tree_states.nullifier_tree.counter },
372 // Context - tree states - Public data tree
373 { C::execution_prev_public_data_tree_root,
374 ex_event.before_context_event.tree_states.public_data_tree.tree.root },
375 { C::execution_prev_public_data_tree_size,
376 ex_event.before_context_event.tree_states.public_data_tree.tree.next_available_leaf_index },
377 { C::execution_public_data_tree_root,
378 ex_event.after_context_event.tree_states.public_data_tree.tree.root },
379 { C::execution_public_data_tree_size,
380 ex_event.after_context_event.tree_states.public_data_tree.tree.next_available_leaf_index },
381 // Context - tree states - Note hash tree
382 { C::execution_prev_note_hash_tree_root,
383 ex_event.before_context_event.tree_states.note_hash_tree.tree.root },
384 { C::execution_prev_note_hash_tree_size,
385 ex_event.before_context_event.tree_states.note_hash_tree.tree.next_available_leaf_index },
386 { C::execution_prev_num_note_hashes_emitted,
387 ex_event.before_context_event.tree_states.note_hash_tree.counter },
388 { C::execution_note_hash_tree_root, ex_event.after_context_event.tree_states.note_hash_tree.tree.root },
389 { C::execution_note_hash_tree_size,
390 ex_event.after_context_event.tree_states.note_hash_tree.tree.next_available_leaf_index },
391 { C::execution_num_note_hashes_emitted,
392 ex_event.after_context_event.tree_states.note_hash_tree.counter },
393 // Context - tree states - L1 to L2 message tree
394 { C::execution_l1_l2_tree_root,
395 ex_event.after_context_event.tree_states.l1_to_l2_message_tree.tree.root },
396 // Context - tree states - Retrieved bytecodes tree
397 { C::execution_prev_retrieved_bytecodes_tree_root,
398 ex_event.before_context_event.retrieved_bytecodes_tree_snapshot.root },
399 { C::execution_prev_retrieved_bytecodes_tree_size,
400 ex_event.before_context_event.retrieved_bytecodes_tree_snapshot.next_available_leaf_index },
401 { C::execution_retrieved_bytecodes_tree_root,
402 ex_event.after_context_event.retrieved_bytecodes_tree_snapshot.root },
403 { C::execution_retrieved_bytecodes_tree_size,
404 ex_event.after_context_event.retrieved_bytecodes_tree_snapshot.next_available_leaf_index },
405 // Context - side effects
406 { C::execution_prev_num_unencrypted_log_fields, ex_event.before_context_event.numUnencryptedLogFields },
407 { C::execution_num_unencrypted_log_fields, ex_event.after_context_event.numUnencryptedLogFields },
408 { C::execution_prev_num_l2_to_l1_messages, ex_event.before_context_event.numL2ToL1Messages },
409 { C::execution_num_l2_to_l1_messages, ex_event.after_context_event.numL2ToL1Messages },
410 // Helpers for identifying parent context
411 { C::execution_has_parent_ctx, has_parent ? 1 : 0 },
412 { C::execution_is_parent_id_inv, ex_event.after_context_event.parent_id }, // Will be inverted in batch.
413 } });
414
415 // Internal stack
416 trace.set(row,
417 { {
418 { C::execution_internal_call_id, ex_event.before_context_event.internal_call_id },
419 { C::execution_internal_call_return_id, ex_event.before_context_event.internal_call_return_id },
420 { C::execution_next_internal_call_id, ex_event.before_context_event.next_internal_call_id },
421 } });
422
423 /**************************************************************************************************
424 * Temporality group 1: Bytecode retrieval.
425 **************************************************************************************************/
426
427 const bool bytecode_retrieval_failed = ex_event.error == ExecutionError::BYTECODE_RETRIEVAL;
428 const bool sel_first_row_in_context = prev_row_was_enter_call || is_first_event_in_enqueued_call;
429 trace.set(row,
430 { {
431 { C::execution_sel_first_row_in_context, sel_first_row_in_context ? 1 : 0 },
432 { C::execution_sel_bytecode_retrieval_failure, bytecode_retrieval_failed ? 1 : 0 },
433 { C::execution_sel_bytecode_retrieval_success, !bytecode_retrieval_failed ? 1 : 0 },
434 { C::execution_bytecode_id, ex_event.after_context_event.bytecode_id },
435 } });
436
437 /**************************************************************************************************
438 * Temporality group 2: Instruction fetching. Mapping from wire to execution and addressing.
439 **************************************************************************************************/
440
441 // This will only have a value if instruction fetching succeeded.
443 const bool error_in_instruction_fetching = ex_event.error == ExecutionError::INSTRUCTION_FETCHING;
444 const bool instruction_fetching_success = !bytecode_retrieval_failed && !error_in_instruction_fetching;
445 trace.set(C::execution_sel_instruction_fetching_failure, row, error_in_instruction_fetching ? 1 : 0);
446
447 if (instruction_fetching_success) {
448 exec_opcode = ex_event.wire_instruction.get_exec_opcode();
449 process_instr_fetching(ex_event.wire_instruction, trace, row);
450 // If we fetched an instruction successfully, we can set the next PC.
451 trace.set(row,
452 { {
453 { C::execution_next_pc,
454 ex_event.before_context_event.pc + ex_event.wire_instruction.size_in_bytes() },
455 } });
456
457 // Along this function we need to set the info we get from the EXEC_SPEC_READ lookup.
458 process_execution_spec(ex_event, trace, row);
459
460 process_addressing(ex_event.addressing_event, ex_event.wire_instruction, trace, row);
461 }
462
463 const bool addressing_failed = ex_event.error == ExecutionError::ADDRESSING;
464
465 /**************************************************************************************************
466 * Temporality group 3: Registers read.
467 **************************************************************************************************/
468
469 // Note that if addressing did not fail, register reading will not fail.
471 std::ranges::fill(registers, MemoryValue::from<FF>(0));
472 const bool should_process_registers = instruction_fetching_success && !addressing_failed;
473 const bool register_processing_failed = ex_event.error == ExecutionError::REGISTER_READ;
474 if (should_process_registers) {
475 process_registers(*exec_opcode, ex_event.inputs, ex_event.output, registers, trace, row);
476 }
477
478 /**************************************************************************************************
479 * Temporality group 4: Gas (both base and dynamic).
480 **************************************************************************************************/
481
482 const bool should_check_gas = should_process_registers && !register_processing_failed;
483 const bool oog = ex_event.error == ExecutionError::GAS;
484 trace.set(C::execution_sel_should_check_gas, row, should_check_gas ? 1 : 0);
485 if (should_check_gas) {
486 process_gas(ex_event.gas_event, *exec_opcode, trace, row);
487
488 // To_Radix Dynamic Gas Factor related selectors.
489 // We need the register information to compute dynamic gas factor and process_gas() does not have
490 // access to it and nor should it.
491 if (*exec_opcode == ExecutionOpCode::TORADIXBE) {
492 uint32_t radix = ex_event.inputs[1].as<uint32_t>(); // Safe since already tag checked
493 uint32_t num_limbs = ex_event.inputs[2].as<uint32_t>(); // Safe since already tag checked
494 uint32_t num_p_limbs = radix > 256 ? 32 : static_cast<uint32_t>(get_p_limbs_per_radix_size(radix));
495 trace.set(row,
496 { {
497 // To Radix BE Dynamic Gas
498 { C::execution_two_five_six, 256 },
499 { C::execution_sel_radix_gt_256, radix > 256 ? 1 : 0 },
500 { C::execution_sel_lookup_num_p_limbs, radix <= 256 ? 1 : 0 },
501 { C::execution_num_p_limbs, num_p_limbs },
502 { C::execution_sel_use_num_limbs, num_limbs > num_p_limbs ? 1 : 0 },
503 // Don't set dyn gas factor here since already set in process_gas
504 } });
505 }
506 }
507
508 /**************************************************************************************************
509 * Temporality group 5: Opcode execution.
510 **************************************************************************************************/
511
512 const bool should_execute_opcode = should_check_gas && !oog;
513
514 // These booleans are used after of the "opcode code execution" block but need
515 // to be set as part of the "opcode code execution" block.
516 bool sel_enter_call = false;
517 bool sel_exit_call = false;
518 bool should_execute_revert = false;
519
520 const bool opcode_execution_failed = ex_event.error == ExecutionError::OPCODE_EXECUTION;
521 if (should_execute_opcode) {
522 // At this point we can assume instruction fetching succeeded, so this should never fail.
523 const auto& dispatch_to_subtrace = get_subtrace_info_map().at(*exec_opcode);
524 trace.set(row,
525 { {
526 { C::execution_sel_should_execute_opcode, 1 },
527 { C::execution_sel_opcode_error, opcode_execution_failed ? 1 : 0 },
528 { get_subtrace_selector(dispatch_to_subtrace.subtrace_selector), 1 },
529 } });
530
531 // Execution Trace opcodes - separating for clarity
532 if (dispatch_to_subtrace.subtrace_selector == SubtraceSel::EXECUTION) {
533 trace.set(get_execution_opcode_selector(*exec_opcode), row, 1);
534 }
535
536 // Execution trace opcodes specific logic.
537 // Note that the opcode selectors were set above. (e.g., sel_execute_call, sel_execute_static_call, ..).
538 if (*exec_opcode == ExecutionOpCode::CALL || *exec_opcode == ExecutionOpCode::STATICCALL) {
539 sel_enter_call = true;
540
541 Gas gas_left = ex_event.after_context_event.gas_limit - ex_event.after_context_event.gas_used;
542
543 uint32_t allocated_l2_gas = registers[0].as<uint32_t>();
544 bool is_l2_gas_allocated_lt_left = allocated_l2_gas < gas_left.l2_gas;
545
546 uint32_t allocated_da_gas = registers[1].as<uint32_t>();
547 bool is_da_gas_allocated_lt_left = allocated_da_gas < gas_left.da_gas;
548
549 trace.set(row,
550 { {
551 { C::execution_sel_enter_call, 1 },
552 { C::execution_l2_gas_left, gas_left.l2_gas },
553 { C::execution_da_gas_left, gas_left.da_gas },
554 { C::execution_call_is_l2_gas_allocated_lt_left, is_l2_gas_allocated_lt_left },
555 { C::execution_call_is_da_gas_allocated_lt_left, is_da_gas_allocated_lt_left },
556 } });
557 } else if (*exec_opcode == ExecutionOpCode::RETURN) {
558 sel_exit_call = true;
559 trace.set(row,
560 { {
561 { C::execution_nested_return, has_parent ? 1 : 0 },
562 } });
563 } else if (*exec_opcode == ExecutionOpCode::REVERT) {
564 sel_exit_call = true;
565 should_execute_revert = true;
566 } else if (exec_opcode == ExecutionOpCode::GETENVVAR) {
567 assert(ex_event.addressing_event.resolution_info.size() == 2 &&
568 "GETENVVAR should have exactly two resolved operands (envvar enum and output)");
569 // rop[1] is the envvar enum
570 Operand envvar_enum = ex_event.addressing_event.resolution_info[1].resolved_operand;
571 process_get_env_var_opcode(envvar_enum, ex_event.output, trace, row);
572 } else if (*exec_opcode == ExecutionOpCode::INTERNALRETURN) {
573 trace.set(C::execution_internal_call_return_id_inv,
574 row,
575 ex_event.before_context_event.internal_call_return_id); // Will be inverted in batch later.
576 } else if (*exec_opcode == ExecutionOpCode::SSTORE) {
577 uint32_t remaining_data_writes = MAX_PUBLIC_DATA_UPDATE_REQUESTS_PER_TX -
578 ex_event.before_context_event.tree_states.public_data_tree.counter;
579
580 trace.set(row,
581 { {
582 { C::execution_max_data_writes_reached, remaining_data_writes == 0 },
583 { C::execution_remaining_data_writes_inv,
584 remaining_data_writes }, // Will be inverted in batch later.
585 { C::execution_sel_write_public_data, !opcode_execution_failed },
586 } });
587 } else if (*exec_opcode == ExecutionOpCode::NOTEHASHEXISTS) {
588 uint64_t leaf_index = registers[1].as<uint64_t>();
589 uint64_t note_hash_tree_leaf_count = NOTE_HASH_TREE_LEAF_COUNT;
590 bool note_hash_leaf_in_range = leaf_index < note_hash_tree_leaf_count;
591
592 trace.set(row,
593 { {
594 { C::execution_note_hash_leaf_in_range, note_hash_leaf_in_range },
595 { C::execution_note_hash_tree_leaf_count, FF(note_hash_tree_leaf_count) },
596 } });
597 } else if (*exec_opcode == ExecutionOpCode::EMITNOTEHASH) {
598 uint32_t remaining_note_hashes =
599 MAX_NOTE_HASHES_PER_TX - ex_event.before_context_event.tree_states.note_hash_tree.counter;
600
601 trace.set(row,
602 { {
603 { C::execution_sel_reached_max_note_hashes, remaining_note_hashes == 0 },
604 { C::execution_remaining_note_hashes_inv,
605 remaining_note_hashes }, // Will be inverted in batch later.
606 { C::execution_sel_write_note_hash, !opcode_execution_failed },
607 } });
608 } else if (*exec_opcode == ExecutionOpCode::L1TOL2MSGEXISTS) {
609 uint64_t leaf_index = registers[1].as<uint64_t>();
610 uint64_t l1_to_l2_msg_tree_leaf_count = L1_TO_L2_MSG_TREE_LEAF_COUNT;
611 bool l1_to_l2_msg_leaf_in_range = leaf_index < l1_to_l2_msg_tree_leaf_count;
612
613 trace.set(row,
614 { {
615 { C::execution_l1_to_l2_msg_leaf_in_range, l1_to_l2_msg_leaf_in_range },
616 { C::execution_l1_to_l2_msg_tree_leaf_count, FF(l1_to_l2_msg_tree_leaf_count) },
617 } });
618 //} else if (exec_opcode == ExecutionOpCode::NULLIFIEREXISTS) {
619 // no custom columns!
620 } else if (*exec_opcode == ExecutionOpCode::EMITNULLIFIER) {
621 uint32_t remaining_nullifiers =
622 MAX_NULLIFIERS_PER_TX - ex_event.before_context_event.tree_states.nullifier_tree.counter;
623
624 trace.set(row,
625 { {
626 { C::execution_sel_reached_max_nullifiers, remaining_nullifiers == 0 },
627 { C::execution_remaining_nullifiers_inv,
628 remaining_nullifiers }, // Will be inverted in batch later.
629 { C::execution_sel_write_nullifier,
630 remaining_nullifiers != 0 && !ex_event.before_context_event.is_static },
631 } });
632 } else if (*exec_opcode == ExecutionOpCode::SENDL2TOL1MSG) {
633 uint32_t remaining_l2_to_l1_msgs =
634 MAX_L2_TO_L1_MSGS_PER_TX - ex_event.before_context_event.numL2ToL1Messages;
635
636 trace.set(row,
637 { { { C::execution_sel_l2_to_l1_msg_limit_error, remaining_l2_to_l1_msgs == 0 },
638 { C::execution_remaining_l2_to_l1_msgs_inv,
639 remaining_l2_to_l1_msgs }, // Will be inverted in batch later.
640 { C::execution_sel_write_l2_to_l1_msg, !opcode_execution_failed && discard == 0 },
641 {
642 C::execution_public_inputs_index,
644 ex_event.before_context_event.numL2ToL1Messages,
645 } } });
646 }
647 }
648
649 /**************************************************************************************************
650 * Temporality group 6: Register write.
651 **************************************************************************************************/
652
653 const bool should_process_register_write = should_execute_opcode && !opcode_execution_failed;
654 if (should_process_register_write) {
655 process_registers_write(*exec_opcode, trace, row);
656 }
657
658 /**************************************************************************************************
659 * Discarding and error related selectors.
660 **************************************************************************************************/
661
662 const bool is_dying_context = discard == 1 && (ex_event.after_context_event.id == dying_context_id);
663
664 // Need to generate the item below for checking "is dying context" in circuit
665 FF dying_context_diff = 0;
666 if (!is_dying_context) {
667 // Compute inversion when context_id != dying_context_id
668 dying_context_diff = FF(ex_event.after_context_event.id) - FF(dying_context_id);
669 }
670
671 // This is here instead of guarded by `should_execute_opcode` because is_err is a higher level error
672 // than just an opcode error (i.e., it is on if there are any errors in any temporality group).
673 const bool is_err = ex_event.error != ExecutionError::NONE;
674 sel_exit_call = sel_exit_call || is_err; // sel_execute_revert || sel_execute_return || sel_error
675 const bool is_failure = should_execute_revert || is_err;
676 const bool nested_exit_call = sel_exit_call && has_parent;
677 const bool enqueued_call_end = sel_exit_call && !has_parent;
678 const bool rollback_context = (should_execute_revert || is_err) && has_parent;
679 const bool resolves_dying_context = is_failure && is_dying_context;
680 const bool nested_call_rom_undiscarded_context = sel_enter_call && discard == 0;
681
682 trace.set(
683 row,
684 { {
685 { C::execution_sel_exit_call, sel_exit_call ? 1 : 0 },
686 { C::execution_nested_exit_call, nested_exit_call ? 1 : 0 },
687 { C::execution_rollback_context, rollback_context ? 1 : 0 },
688 { C::execution_sel_error, is_err ? 1 : 0 },
689 { C::execution_sel_failure, is_failure ? 1 : 0 },
690 { C::execution_discard, discard },
691 { C::execution_dying_context_id, dying_context_id },
692 { C::execution_dying_context_id_inv, dying_context_id }, // Will be inverted in batch.
693 { C::execution_is_dying_context, is_dying_context ? 1 : 0 },
694 { C::execution_dying_context_diff_inv, dying_context_diff }, // Will be inverted in batch.
695 { C::execution_enqueued_call_end, enqueued_call_end ? 1 : 0 },
696 { C::execution_resolves_dying_context, resolves_dying_context ? 1 : 0 },
697 { C::execution_nested_call_from_undiscarded_context, nested_call_rom_undiscarded_context ? 1 : 0 },
698 } });
699
700 // Trace-generation is done for this event.
701 // Now, use this event to determine whether we should set/reset the discard flag for the NEXT event
702 const bool event_kills_dying_context =
703 discard == 1 && is_failure && ex_event.after_context_event.id == dying_context_id;
704
705 if (event_kills_dying_context) {
706 // Set/unset discard flag if the current event is the one that kills the dying context
707 dying_context_id = 0;
708 discard = 0;
709 } else if (sel_enter_call && discard == 0 && !is_err &&
710 failures.does_context_fail.contains(ex_event.next_context_id)) {
711 // If making a nested call, and discard isn't already high...
712 // if the nested context being entered eventually dies, raise discard flag and remember which
713 // context is dying. NOTE: if a [STATIC]CALL instruction _itself_ errors, we don't set the
714 // discard flag because we aren't actually entering a new context!
715 dying_context_id = ex_event.next_context_id;
716 discard = 1;
717 }
718 // Otherwise, we aren't entering or exiting a dying context,
719 // so just propagate discard and dying context.
720 // Implicit: dying_context_id = dying_context_id; discard = discard;
721
722 // If an enqueued call just exited, next event (if any) is the first in an enqueued call.
723 // Update flag for next iteration.
724 is_first_event_in_enqueued_call = !has_parent && sel_exit_call;
725
726 // Track this bool for use determining whether the next row is the first in a context
727 prev_row_was_enter_call = sel_enter_call;
728
729 row++;
730 }
731
732 // Batch invert the columns.
734}
735
737 TraceContainer& trace,
738 uint32_t row)
739{
740 trace.set(row,
741 { {
742 { C::execution_sel_instruction_fetching_success, 1 },
743 { C::execution_ex_opcode, static_cast<uint8_t>(instruction.get_exec_opcode()) },
744 { C::execution_indirect, instruction.indirect },
745 { C::execution_instr_length, instruction.size_in_bytes() },
746 } });
747
748 // At this point we can assume instruction fetching succeeded.
749 auto operands = instruction.operands;
750 assert(operands.size() <= AVM_MAX_OPERANDS);
751 operands.resize(AVM_MAX_OPERANDS, Operand::from<FF>(0));
752
753 for (size_t i = 0; i < AVM_MAX_OPERANDS; i++) {
754 trace.set(OPERAND_COLUMNS[i], row, operands.at(i));
755 }
756}
757
759 TraceContainer& trace,
760 uint32_t row)
761{
762 // At this point we can assume instruction fetching succeeded, so this should never fail.
763 ExecutionOpCode exec_opcode = ex_event.wire_instruction.get_exec_opcode();
764 const auto& exec_spec = get_exec_instruction_spec().at(exec_opcode);
765 const auto& gas_cost = exec_spec.gas_cost;
766
767 // Gas.
768 trace.set(row,
769 { {
770 { C::execution_opcode_gas, gas_cost.opcode_gas },
771 { C::execution_base_da_gas, gas_cost.base_da },
772 { C::execution_dynamic_l2_gas, gas_cost.dyn_l2 },
773 { C::execution_dynamic_da_gas, gas_cost.dyn_da },
774 } });
775
776 const auto& register_info = exec_spec.register_info;
777 for (size_t i = 0; i < AVM_MAX_REGISTERS; i++) {
778 trace.set(row,
779 { {
780 { REGISTER_IS_WRITE_COLUMNS[i], register_info.is_write(i) ? 1 : 0 },
781 { REGISTER_MEM_OP_COLUMNS[i], register_info.is_active(i) ? 1 : 0 },
782 { REGISTER_EXPECTED_TAG_COLUMNS[i],
783 register_info.need_tag_check(i) ? static_cast<uint32_t>(*register_info.expected_tag(i)) : 0 },
784 { REGISTER_TAG_CHECK_COLUMNS[i], register_info.need_tag_check(i) ? 1 : 0 },
785 } });
786 }
787
788 // Set is_address columns
789 const auto& num_addresses = exec_spec.num_addresses;
790 for (size_t i = 0; i < num_addresses; i++) {
791 trace.set(OPERAND_IS_ADDRESS_COLUMNS[i], row, 1);
792 }
793
794 // At this point we can assume instruction fetching succeeded, so this should never fail.
795 const auto& dispatch_to_subtrace = get_subtrace_info_map().at(exec_opcode);
796 trace.set(row,
797 { {
798 { C::execution_subtrace_id, get_subtrace_id(dispatch_to_subtrace.subtrace_selector) },
799 { C::execution_subtrace_operation_id, dispatch_to_subtrace.subtrace_operation_id },
800 { C::execution_dyn_gas_id, exec_spec.dyn_gas_id },
801 } });
802}
803
805 ExecutionOpCode exec_opcode,
806 TraceContainer& trace,
807 uint32_t row)
808{
809 bool oog = gas_event.oog_l2 || gas_event.oog_da;
810 trace.set(row,
811 { {
812 { C::execution_out_of_gas_l2, gas_event.oog_l2 ? 1 : 0 },
813 { C::execution_out_of_gas_da, gas_event.oog_da ? 1 : 0 },
814 { C::execution_sel_out_of_gas, oog ? 1 : 0 },
815 // Base gas.
816 { C::execution_addressing_gas, gas_event.addressing_gas },
817 // Dynamic gas.
818 { C::execution_dynamic_l2_gas_factor, gas_event.dynamic_gas_factor.l2_gas },
819 { C::execution_dynamic_da_gas_factor, gas_event.dynamic_gas_factor.da_gas },
820 // Derived cumulative gas used.
821 { C::execution_total_gas_l2, gas_event.total_gas_used_l2 },
822 { C::execution_total_gas_da, gas_event.total_gas_used_da },
823 } });
824
825 const auto& exec_spec = get_exec_instruction_spec().at(exec_opcode);
826 if (exec_spec.dyn_gas_id != 0) {
827 trace.set(get_dyn_gas_selector(exec_spec.dyn_gas_id), row, 1);
828 }
829}
830
833 TraceContainer& trace,
834 uint32_t row)
835{
836 // At this point we can assume instruction fetching succeeded, so this should never fail.
837 ExecutionOpCode exec_opcode = instruction.get_exec_opcode();
838 const ExecInstructionSpec& ex_spec = get_exec_instruction_spec().at(exec_opcode);
839
840 auto resolution_info_vec = addr_event.resolution_info;
841 assert(resolution_info_vec.size() <= AVM_MAX_OPERANDS);
842 resolution_info_vec.resize(AVM_MAX_OPERANDS,
843 {
844 // This is the default we want: both tag and value 0.
845 .after_relative = Operand::from<FF>(0),
846 .resolved_operand = Operand::from<FF>(0),
847 });
848
849 std::array<bool, AVM_MAX_OPERANDS> should_apply_indirection{};
850 std::array<bool, AVM_MAX_OPERANDS> is_relative_effective{};
851 std::array<bool, AVM_MAX_OPERANDS> is_indirect_effective{};
853 std::array<FF, AVM_MAX_OPERANDS> after_relative{};
854 std::array<FF, AVM_MAX_OPERANDS> resolved_operand{};
855 std::array<uint8_t, AVM_MAX_OPERANDS> resolved_operand_tag{};
856 uint8_t num_relative_operands = 0;
857
858 bool base_address_invalid = false;
859 bool do_base_check = false;
860
861 // Gather operand information.
862 for (size_t i = 0; i < AVM_MAX_OPERANDS; i++) {
863 const auto& resolution_info = resolution_info_vec.at(i);
864 bool op_is_address = i < ex_spec.num_addresses;
865 relative_oob[i] = resolution_info.error.has_value() &&
866 *resolution_info.error == AddressingEventError::RELATIVE_COMPUTATION_OOB;
867 base_address_invalid =
868 base_address_invalid ||
869 (resolution_info.error.has_value() && *resolution_info.error == AddressingEventError::BASE_ADDRESS_INVALID);
870 is_indirect_effective[i] = op_is_address && is_operand_indirect(instruction.indirect, i);
871 is_relative_effective[i] = op_is_address && is_operand_relative(instruction.indirect, i);
872 should_apply_indirection[i] = is_indirect_effective[i] && !relative_oob[i] && !base_address_invalid;
873 resolved_operand_tag[i] = static_cast<uint8_t>(resolution_info.resolved_operand.get_tag());
874 after_relative[i] = resolution_info.after_relative;
875 resolved_operand[i] = resolution_info.resolved_operand;
876 if (is_relative_effective[i]) {
877 do_base_check = true;
878 num_relative_operands++;
879 }
880 }
881
882 // Set the operand columns.
883 for (size_t i = 0; i < AVM_MAX_OPERANDS; i++) {
884 trace.set(row,
885 { {
886 { OPERAND_RELATIVE_OVERFLOW_COLUMNS[i], relative_oob[i] ? 1 : 0 },
887 { OPERAND_AFTER_RELATIVE_COLUMNS[i], after_relative[i] },
888 { OPERAND_SHOULD_APPLY_INDIRECTION_COLUMNS[i], should_apply_indirection[i] ? 1 : 0 },
889 { OPERAND_IS_RELATIVE_VALID_BASE_COLUMNS[i],
890 is_relative_effective[i] && !base_address_invalid ? 1 : 0 },
891 { RESOLVED_OPERAND_COLUMNS[i], resolved_operand[i] },
892 { RESOLVED_OPERAND_TAG_COLUMNS[i], resolved_operand_tag[i] },
893 } });
894 }
895
896 // We need to compute relative and indirect over the whole 16 bits of the indirect flag.
897 // See comment in PIL file about indirect upper bits.
898 for (size_t i = 0; i < TOTAL_INDIRECT_BITS / 2; i++) {
899 bool is_relative = is_operand_relative(instruction.indirect, i);
900 bool is_indirect = is_operand_indirect(instruction.indirect, i);
901 trace.set(row,
902 { {
903 { OPERAND_IS_RELATIVE_WIRE_COLUMNS[i], is_relative ? 1 : 0 },
904 { OPERAND_IS_INDIRECT_WIRE_COLUMNS[i], is_indirect ? 1 : 0 },
905 } });
906 }
907
908 // Inverse of following difference is required when base address is invalid.
909 FF base_address_tag_diff = base_address_invalid ? FF(static_cast<uint8_t>(addr_event.base_address.get_tag())) -
910 FF(static_cast<uint8_t>(MemoryTag::U32))
911 : 0;
912
913 // Tag check after indirection.
914 bool some_final_check_failed = std::ranges::any_of(addr_event.resolution_info, [](const auto& info) {
915 return info.error.has_value() && *info.error == AddressingEventError::INVALID_ADDRESS_AFTER_INDIRECTION;
916 });
917 FF batched_tags_diff = 0;
918 if (some_final_check_failed) {
919 FF power_of_2 = 1;
920 for (size_t i = 0; i < AVM_MAX_OPERANDS; ++i) {
921 batched_tags_diff +=
922 FF(is_indirect_effective[i] ? 1 : 0) * power_of_2 * (FF(resolved_operand_tag[i]) - FF(MEM_TAG_U32));
923 power_of_2 *= 8; // 2^3
924 }
925 }
926
927 // Collect addressing errors. See PIL file for reference.
928 bool addressing_failed =
929 std::ranges::any_of(addr_event.resolution_info, [](const auto& info) { return info.error.has_value(); });
930 FF addressing_error_collection =
931 addressing_failed
932 ? FF(
933 // Base address invalid.
934 (base_address_invalid ? 1 : 0) +
935 // Relative overflow.
936 std::accumulate(addr_event.resolution_info.begin(),
937 addr_event.resolution_info.end(),
938 static_cast<uint32_t>(0),
939 [](uint32_t acc, const auto& info) {
940 return acc +
941 (info.error.has_value() &&
942 *info.error == AddressingEventError::RELATIVE_COMPUTATION_OOB
943 ? 1
944 : 0);
945 }) +
946 // Some invalid address after indirection.
947 (some_final_check_failed ? 1 : 0))
948 : 0;
949
950 trace.set(
951 row,
952 { {
953 { C::execution_sel_addressing_error, addressing_failed ? 1 : 0 },
954 { C::execution_addressing_error_collection_inv, addressing_error_collection }, // Will be inverted in batch.
955 { C::execution_base_address_val, addr_event.base_address.as_ff() },
956 { C::execution_base_address_tag, static_cast<uint8_t>(addr_event.base_address.get_tag()) },
957 { C::execution_base_address_tag_diff_inv, base_address_tag_diff }, // Will be inverted in batch.
958 { C::execution_batched_tags_diff_inv, batched_tags_diff }, // Will be inverted in batch.
959 { C::execution_sel_some_final_check_failed, some_final_check_failed ? 1 : 0 },
960 { C::execution_sel_base_address_failure, base_address_invalid ? 1 : 0 },
961 { C::execution_num_relative_operands_inv,
962 do_base_check ? num_relative_operands : 0 }, // Will be inverted in batch later.
963 { C::execution_sel_do_base_check, do_base_check ? 1 : 0 },
964 { C::execution_highest_address, AVM_HIGHEST_MEM_ADDRESS },
965 } });
966}
967
969{
970 trace.invert_columns({ {
971 // Registers.
972 C::execution_batched_tags_diff_inv_reg,
973 // Context.
974 C::execution_is_parent_id_inv,
975 C::execution_internal_call_return_id_inv,
976 // Trees.
977 C::execution_remaining_data_writes_inv,
978 C::execution_remaining_note_hashes_inv,
979 C::execution_remaining_nullifiers_inv,
980 // L1ToL2MsgExists.
981 C::execution_remaining_l2_to_l1_msgs_inv,
982 // Discard.
983 C::execution_dying_context_id_inv,
984 C::execution_dying_context_diff_inv,
985 // Addressing.
986 C::execution_addressing_error_collection_inv,
987 C::execution_batched_tags_diff_inv,
988 C::execution_base_address_tag_diff_inv,
989 C::execution_num_relative_operands_inv,
990 } });
991}
992
995 const MemoryValue& output,
997 TraceContainer& trace,
998 uint32_t row)
999{
1000 assert(registers.size() == AVM_MAX_REGISTERS);
1001 // At this point we can assume instruction fetching succeeded, so this should never fail.
1002 const auto& register_info = get_exec_instruction_spec().at(exec_opcode).register_info;
1003
1004 // Registers.
1005 size_t input_counter = 0;
1006 for (uint8_t i = 0; i < AVM_MAX_REGISTERS; ++i) {
1007 if (register_info.is_active(i)) {
1008 if (register_info.is_write(i)) {
1009 // If this is a write operation, we need to get the value from the output.
1010 registers[i] = output;
1011 } else {
1012 // If this is a read operation, we need to get the value from the input.
1013 auto input = inputs.size() > input_counter ? inputs.at(input_counter) : MemoryValue::from<FF>(0);
1014 registers[i] = input;
1015 input_counter++;
1016 }
1017 }
1018 }
1019
1020 for (size_t i = 0; i < AVM_MAX_REGISTERS; i++) {
1021 trace.set(REGISTER_COLUMNS[i], row, registers[i]);
1022 trace.set(REGISTER_MEM_TAG_COLUMNS[i], row, static_cast<uint8_t>(registers[i].get_tag()));
1023 // This one is special because it sets the reads (but not the writes).
1024 // If we got here, sel_should_read_registers=1.
1025 if (register_info.is_active(i) && !register_info.is_write(i)) {
1026 trace.set(REGISTER_OP_REG_EFFECTIVE_COLUMNS[i], row, 1);
1027 }
1028 }
1029
1030 // Tag check.
1031 bool some_tag_check_failed = false;
1032 for (size_t i = 0; i < AVM_MAX_REGISTERS; i++) {
1033 if (register_info.need_tag_check(i)) {
1034 if (registers[i].get_tag() != *register_info.expected_tag(i)) {
1035 some_tag_check_failed = true;
1036 break;
1037 }
1038 }
1039 }
1040
1041 FF batched_tags_diff_reg = 0;
1042 if (some_tag_check_failed) {
1043 FF power_of_2 = 1;
1044 for (size_t i = 0; i < AVM_MAX_REGISTERS; ++i) {
1045 if (register_info.need_tag_check(i)) {
1046 batched_tags_diff_reg += power_of_2 * (FF(static_cast<uint8_t>(registers[i].get_tag())) -
1047 FF(static_cast<uint8_t>(*register_info.expected_tag(i))));
1048 }
1049 power_of_2 *= 8; // 2^3
1050 }
1051 }
1052
1053 trace.set(row,
1054 { {
1055 { C::execution_sel_should_read_registers, 1 },
1056 { C::execution_batched_tags_diff_inv_reg, batched_tags_diff_reg }, // Will be inverted in batch.
1057 { C::execution_sel_register_read_error, some_tag_check_failed ? 1 : 0 },
1058 } });
1059}
1060
1062{
1063 const auto& register_info = get_exec_instruction_spec().at(exec_opcode).register_info;
1064 trace.set(C::execution_sel_should_write_registers, row, 1);
1065
1066 for (size_t i = 0; i < AVM_MAX_REGISTERS; i++) {
1067 // This one is special because it sets the writes.
1068 // If we got here, sel_should_write_registers=1.
1069 if (register_info.is_active(i) && register_info.is_write(i)) {
1070 trace.set(REGISTER_OP_REG_EFFECTIVE_COLUMNS[i], row, 1);
1071 }
1072 }
1073}
1074
1076 MemoryValue output,
1077 TraceContainer& trace,
1078 uint32_t row)
1079{
1080 assert(envvar_enum.get_tag() == ValueTag::U8);
1081 const auto& envvar_spec = GetEnvVarSpec::get_table(envvar_enum.as<uint8_t>());
1082
1083 trace.set(row,
1084 { {
1085 { C::execution_sel_execute_get_env_var, 1 },
1086 { C::execution_sel_envvar_pi_lookup_col0, envvar_spec.envvar_pi_lookup_col0 ? 1 : 0 },
1087 { C::execution_sel_envvar_pi_lookup_col1, envvar_spec.envvar_pi_lookup_col1 ? 1 : 0 },
1088 { C::execution_envvar_pi_row_idx, envvar_spec.envvar_pi_row_idx },
1089 { C::execution_is_address, envvar_spec.is_address ? 1 : 0 },
1090 { C::execution_is_sender, envvar_spec.is_sender ? 1 : 0 },
1091 { C::execution_is_transactionfee, envvar_spec.is_transactionfee ? 1 : 0 },
1092 { C::execution_is_isstaticcall, envvar_spec.is_isstaticcall ? 1 : 0 },
1093 { C::execution_is_l2gasleft, envvar_spec.is_l2gasleft ? 1 : 0 },
1094 { C::execution_is_dagasleft, envvar_spec.is_dagasleft ? 1 : 0 },
1095 { C::execution_value_from_pi,
1096 envvar_spec.envvar_pi_lookup_col0 || envvar_spec.envvar_pi_lookup_col1 ? output.as_ff() : 0 },
1097 { C::execution_mem_tag_reg_0_, envvar_spec.out_tag },
1098 } });
1099}
1100
1103 // Execution specification (precomputed)
1105 // Bytecode retrieval
1106 .add<lookup_execution_bytecode_retrieval_result_settings, InteractionType::LookupGeneric>()
1107 // Instruction fetching
1109 .add<lookup_execution_instruction_fetching_body_settings, InteractionType::LookupGeneric>()
1110 // Addressing
1112 .add<lookup_addressing_relative_overflow_result_1_settings, InteractionType::LookupGeneric>(C::gt_sel)
1114 .add<lookup_addressing_relative_overflow_result_3_settings, InteractionType::LookupGeneric>(C::gt_sel)
1116 .add<lookup_addressing_relative_overflow_result_5_settings, InteractionType::LookupGeneric>(C::gt_sel)
1118 // Internal Call Stack
1119 .add<lookup_internal_call_push_call_stack_settings_, InteractionType::LookupSequential>()
1121 // Gas
1122 .add<lookup_gas_addressing_gas_read_settings, InteractionType::LookupIntoIndexedByClk>()
1124 .add<lookup_gas_is_out_of_gas_da_settings, InteractionType::LookupGeneric>(C::gt_sel)
1126 // Gas - ToRadix BE
1127 .add<lookup_execution_check_radix_gt_256_settings, InteractionType::LookupGeneric>(C::gt_sel)
1129 .add<lookup_execution_get_max_limbs_settings, InteractionType::LookupGeneric>(C::gt_sel)
1130 // Dynamic Gas - SStore
1132 // Context Stack
1133 .add<lookup_context_ctx_stack_call_settings, InteractionType::LookupSequential>()
1135 .add<lookup_context_ctx_stack_return_settings, InteractionType::LookupGeneric>()
1136 // External Call
1138 .add<lookup_external_call_call_is_da_gas_allocated_lt_left_settings, InteractionType::LookupGeneric>(C::gt_sel)
1139 // GetEnvVar opcode
1141 .add<lookup_get_env_var_read_from_public_inputs_col0_settings, InteractionType::LookupIntoIndexedByClk>()
1143 // Sload opcode (cannot be sequential as public data tree check trace is sorted in tracegen)
1144 .add<lookup_sload_storage_read_settings, InteractionType::LookupGeneric>()
1145 // Sstore opcode
1147 // NoteHashExists
1148 .add<lookup_notehash_exists_note_hash_read_settings, InteractionType::LookupSequential>()
1150 // NullifierExists opcode
1151 .add<lookup_nullifier_exists_nullifier_exists_check_settings, InteractionType::LookupSequential>()
1152 // EmitNullifier
1154 // EmitNoteHash
1155 .add<lookup_emit_notehash_notehash_tree_write_settings, InteractionType::LookupSequential>()
1156 // L1ToL2MsgExists
1158 C::gt_sel)
1159 .add<lookup_l1_to_l2_message_exists_l1_to_l2_msg_read_settings, InteractionType::LookupSequential>()
1160 // SendL2ToL1Msg
1162 // Dispatching to other sub-traces
1163 .add<lookup_execution_dispatch_to_alu_settings, InteractionType::LookupGeneric>()
1165 .add<perm_execution_dispatch_to_cd_copy_settings, InteractionType::Permutation>()
1167 .add<lookup_execution_dispatch_to_cast_settings, InteractionType::LookupGeneric>()
1169 .add<perm_execution_dispatch_to_get_contract_instance_settings, InteractionType::Permutation>()
1171 .add<perm_execution_dispatch_to_poseidon2_perm_settings, InteractionType::Permutation>()
1173 .add<perm_execution_dispatch_to_keccakf1600_settings, InteractionType::Permutation>()
1175 .add<perm_execution_dispatch_to_to_radix_settings, InteractionType::Permutation>();
1176
1177} // namespace bb::avm2::tracegen
#define MEM_TAG_U32
#define AVM_PUBLIC_INPUTS_AVM_ACCUMULATED_DATA_L2_TO_L1_MSGS_ROW_IDX
#define AVM_MAX_OPERANDS
#define NOTE_HASH_TREE_LEAF_COUNT
#define L1_TO_L2_MSG_TREE_LEAF_COUNT
#define AVM_MAX_REGISTERS
#define MAX_L2_TO_L1_MSGS_PER_TX
#define MAX_NOTE_HASHES_PER_TX
#define MAX_NULLIFIERS_PER_TX
#define AVM_HIGHEST_MEM_ADDRESS
#define MAX_PUBLIC_DATA_UPDATE_REQUESTS_PER_TX
ValueTag get_tag() const
void process_execution_spec(const simulation::ExecutionEvent &ex_event, TraceContainer &trace, uint32_t row)
void process_instr_fetching(const simulation::Instruction &instruction, TraceContainer &trace, uint32_t row)
static const InteractionDefinition interactions
void process_get_env_var_opcode(simulation::Operand envvar_enum, MemoryValue output, TraceContainer &trace, uint32_t row)
void process_registers(ExecutionOpCode exec_opcode, const std::vector< MemoryValue > &inputs, const MemoryValue &output, std::span< MemoryValue > registers, TraceContainer &trace, uint32_t row)
void process_registers_write(ExecutionOpCode exec_opcode, TraceContainer &trace, uint32_t row)
void process_gas(const simulation::GasEvent &gas_event, ExecutionOpCode exec_opcode, TraceContainer &trace, uint32_t row)
void process(const simulation::EventEmitterInterface< simulation::ExecutionEvent >::Container &ex_events, TraceContainer &trace)
void process_addressing(const simulation::AddressingEvent &addr_event, const simulation::Instruction &instruction, TraceContainer &trace, uint32_t row)
static Table get_table(uint8_t envvar)
InteractionDefinition & add(auto &&... args)
void info(Args... args)
Definition log.hpp:75
TestTraceContainer trace
bool app_logic_failure
uint32_t app_logic_exit_context_id
bool teardown_failure
unordered_flat_set< uint32_t > does_context_fail
uint32_t teardown_exit_context_id
GasEvent gas_event
Instruction instruction
AvmProvingInputs inputs
Column get_dyn_gas_selector(uint32_t dyn_gas_id)
Get the column selector for a given dynamic gas ID.
const std::unordered_map< ExecutionOpCode, SubtraceInfo > & get_subtrace_info_map()
Column get_subtrace_selector(SubtraceSel subtrace_sel)
Get the column selector for a given subtrace selector.
FF get_subtrace_id(SubtraceSel subtrace_sel)
Get the subtrace ID for a given subtrace enum.
lookup_settings< lookup_get_env_var_read_from_public_inputs_col1_settings_ > lookup_get_env_var_read_from_public_inputs_col1_settings
lookup_settings< lookup_external_call_call_is_l2_gas_allocated_lt_left_settings_ > lookup_external_call_call_is_l2_gas_allocated_lt_left_settings
lookup_settings< lookup_execution_check_written_storage_slot_settings_ > lookup_execution_check_written_storage_slot_settings
lookup_settings< lookup_addressing_relative_overflow_result_2_settings_ > lookup_addressing_relative_overflow_result_2_settings
permutation_settings< perm_execution_dispatch_to_emit_unencrypted_log_settings_ > perm_execution_dispatch_to_emit_unencrypted_log_settings
lookup_settings< lookup_addressing_relative_overflow_result_4_settings_ > lookup_addressing_relative_overflow_result_4_settings
lookup_settings< lookup_execution_dyn_l2_factor_bitwise_settings_ > lookup_execution_dyn_l2_factor_bitwise_settings
bool is_operand_relative(uint16_t indirect_flag, size_t operand_index)
Definition addressing.hpp:8
lookup_settings< lookup_emit_nullifier_write_nullifier_settings_ > lookup_emit_nullifier_write_nullifier_settings
size_t get_p_limbs_per_radix_size(size_t radix)
Definition to_radix.cpp:54
lookup_settings< lookup_send_l2_to_l1_msg_write_l2_to_l1_msg_settings_ > lookup_send_l2_to_l1_msg_write_l2_to_l1_msg_settings
permutation_settings< perm_execution_dispatch_to_sha256_compression_settings_ > perm_execution_dispatch_to_sha256_compression_settings
lookup_settings< lookup_gas_is_out_of_gas_l2_settings_ > lookup_gas_is_out_of_gas_l2_settings
lookup_settings< lookup_execution_dispatch_to_set_settings_ > lookup_execution_dispatch_to_set_settings
lookup_settings< lookup_context_ctx_stack_rollback_settings_ > lookup_context_ctx_stack_rollback_settings
bool is_operand_indirect(uint16_t indirect_flag, size_t operand_index)
lookup_settings< lookup_execution_dispatch_to_bitwise_settings_ > lookup_execution_dispatch_to_bitwise_settings
lookup_settings< lookup_execution_get_p_limbs_settings_ > lookup_execution_get_p_limbs_settings
const std::unordered_map< ExecutionOpCode, ExecInstructionSpec > & get_exec_instruction_spec()
lookup_settings< lookup_execution_exec_spec_read_settings_ > lookup_execution_exec_spec_read_settings
lookup_settings< lookup_get_env_var_precomputed_info_settings_ > lookup_get_env_var_precomputed_info_settings
lookup_settings< lookup_addressing_relative_overflow_result_0_settings_ > lookup_addressing_relative_overflow_result_0_settings
lookup_settings< lookup_l1_to_l2_message_exists_l1_to_l2_msg_leaf_index_in_range_settings_ > lookup_l1_to_l2_message_exists_l1_to_l2_msg_leaf_index_in_range_settings
permutation_settings< perm_execution_dispatch_to_ecc_add_settings_ > perm_execution_dispatch_to_ecc_add_settings
lookup_settings< lookup_addressing_relative_overflow_result_6_settings_ > lookup_addressing_relative_overflow_result_6_settings
lookup_settings< lookup_execution_instruction_fetching_result_settings_ > lookup_execution_instruction_fetching_result_settings
lookup_settings< lookup_notehash_exists_note_hash_leaf_index_in_range_settings_ > lookup_notehash_exists_note_hash_leaf_index_in_range_settings
lookup_settings< lookup_sstore_record_written_storage_slot_settings_ > lookup_sstore_record_written_storage_slot_settings
AvmFlavorSettings::FF FF
Definition field.hpp:10
permutation_settings< perm_execution_dispatch_to_rd_copy_settings_ > perm_execution_dispatch_to_rd_copy_settings
constexpr decltype(auto) get(::tuplet::tuple< T... > &&t) noexcept
Definition tuple.hpp:13
std::vector< OperandResolutionInfo > resolution_info
ExecutionOpCode get_exec_opcode() const