Barretenberg
The ZK-SNARK library at the core of Aztec
Loading...
Searching...
No Matches
execution_trace.cpp
Go to the documentation of this file.
2
3#include <algorithm>
4#include <array>
5#include <cstddef>
6#include <cstdint>
7#include <numeric>
8#include <ranges>
9#include <stdexcept>
10#include <sys/types.h>
11#include <unordered_map>
12
19
46
50
51namespace bb::avm2::tracegen {
52namespace {
53
54constexpr std::array<Column, AVM_MAX_OPERANDS> OPERAND_COLUMNS = {
55 C::execution_op_0_, C::execution_op_1_, C::execution_op_2_, C::execution_op_3_,
56 C::execution_op_4_, C::execution_op_5_, C::execution_op_6_,
57};
58constexpr std::array<Column, AVM_MAX_OPERANDS> OPERAND_IS_ADDRESS_COLUMNS = {
59 C::execution_sel_op_is_address_0_, C::execution_sel_op_is_address_1_, C::execution_sel_op_is_address_2_,
60 C::execution_sel_op_is_address_3_, C::execution_sel_op_is_address_4_, C::execution_sel_op_is_address_5_,
61 C::execution_sel_op_is_address_6_,
62};
63constexpr std::array<Column, AVM_MAX_OPERANDS> OPERAND_AFTER_RELATIVE_COLUMNS = {
64 C::execution_op_after_relative_0_, C::execution_op_after_relative_1_, C::execution_op_after_relative_2_,
65 C::execution_op_after_relative_3_, C::execution_op_after_relative_4_, C::execution_op_after_relative_5_,
66 C::execution_op_after_relative_6_,
67};
68constexpr std::array<Column, AVM_MAX_OPERANDS> RESOLVED_OPERAND_COLUMNS = {
69 C::execution_rop_0_, C::execution_rop_1_, C::execution_rop_2_, C::execution_rop_3_,
70 C::execution_rop_4_, C::execution_rop_5_, C::execution_rop_6_,
71};
72constexpr std::array<Column, AVM_MAX_OPERANDS> RESOLVED_OPERAND_TAG_COLUMNS = {
73 C::execution_rop_tag_0_, C::execution_rop_tag_1_, C::execution_rop_tag_2_, C::execution_rop_tag_3_,
74 C::execution_rop_tag_4_, C::execution_rop_tag_5_, C::execution_rop_tag_6_,
75};
76constexpr std::array<Column, AVM_MAX_OPERANDS> OPERAND_SHOULD_APPLY_INDIRECTION_COLUMNS = {
77 C::execution_sel_should_apply_indirection_0_, C::execution_sel_should_apply_indirection_1_,
78 C::execution_sel_should_apply_indirection_2_, C::execution_sel_should_apply_indirection_3_,
79 C::execution_sel_should_apply_indirection_4_, C::execution_sel_should_apply_indirection_5_,
80 C::execution_sel_should_apply_indirection_6_,
81};
82constexpr std::array<Column, AVM_MAX_OPERANDS> OPERAND_RELATIVE_OVERFLOW_COLUMNS = {
83 C::execution_sel_relative_overflow_0_, C::execution_sel_relative_overflow_1_, C::execution_sel_relative_overflow_2_,
84 C::execution_sel_relative_overflow_3_, C::execution_sel_relative_overflow_4_, C::execution_sel_relative_overflow_5_,
85 C::execution_sel_relative_overflow_6_,
86};
87constexpr std::array<Column, AVM_MAX_OPERANDS> OPERAND_IS_RELATIVE_EFFECTIVE_COLUMNS = {
88 C::execution_sel_op_is_relative_effective_0_, C::execution_sel_op_is_relative_effective_1_,
89 C::execution_sel_op_is_relative_effective_2_, C::execution_sel_op_is_relative_effective_3_,
90 C::execution_sel_op_is_relative_effective_4_, C::execution_sel_op_is_relative_effective_5_,
91 C::execution_sel_op_is_relative_effective_6_,
92};
93constexpr std::array<Column, AVM_MAX_OPERANDS> OPERAND_RELATIVE_OOB_CHECK_DIFF_COLUMNS = {
94 C::execution_overflow_range_check_result_0_, C::execution_overflow_range_check_result_1_,
95 C::execution_overflow_range_check_result_2_, C::execution_overflow_range_check_result_3_,
96 C::execution_overflow_range_check_result_4_, C::execution_overflow_range_check_result_5_,
97 C::execution_overflow_range_check_result_6_,
98};
99
100constexpr size_t TOTAL_INDIRECT_BITS = 16;
101static_assert(AVM_MAX_OPERANDS * 2 <= TOTAL_INDIRECT_BITS);
102constexpr std::array<Column, TOTAL_INDIRECT_BITS / 2> OPERAND_IS_RELATIVE_WIRE_COLUMNS = {
103 C::execution_sel_op_is_relative_wire_0_, C::execution_sel_op_is_relative_wire_1_,
104 C::execution_sel_op_is_relative_wire_2_, C::execution_sel_op_is_relative_wire_3_,
105 C::execution_sel_op_is_relative_wire_4_, C::execution_sel_op_is_relative_wire_5_,
106 C::execution_sel_op_is_relative_wire_6_, C::execution_sel_op_is_relative_wire_7_,
107
108};
109constexpr std::array<Column, TOTAL_INDIRECT_BITS / 2> OPERAND_IS_INDIRECT_WIRE_COLUMNS = {
110 C::execution_sel_op_is_indirect_wire_0_, C::execution_sel_op_is_indirect_wire_1_,
111 C::execution_sel_op_is_indirect_wire_2_, C::execution_sel_op_is_indirect_wire_3_,
112 C::execution_sel_op_is_indirect_wire_4_, C::execution_sel_op_is_indirect_wire_5_,
113 C::execution_sel_op_is_indirect_wire_6_, C::execution_sel_op_is_indirect_wire_7_,
114};
115
116constexpr std::array<Column, AVM_MAX_REGISTERS> REGISTER_COLUMNS = {
117 C::execution_register_0_, C::execution_register_1_, C::execution_register_2_, C::execution_register_3_,
118 C::execution_register_4_, C::execution_register_5_, C::execution_register_6_,
119};
120constexpr std::array<Column, AVM_MAX_REGISTERS> REGISTER_MEM_TAG_COLUMNS = {
121 C::execution_mem_tag_reg_0_, C::execution_mem_tag_reg_1_, C::execution_mem_tag_reg_2_, C::execution_mem_tag_reg_3_,
122 C::execution_mem_tag_reg_4_, C::execution_mem_tag_reg_5_, C::execution_mem_tag_reg_6_,
123};
124constexpr std::array<Column, AVM_MAX_REGISTERS> REGISTER_IS_WRITE_COLUMNS = {
125 C::execution_rw_reg_0_, C::execution_rw_reg_1_, C::execution_rw_reg_2_, C::execution_rw_reg_3_,
126 C::execution_rw_reg_4_, C::execution_rw_reg_5_, C::execution_rw_reg_6_,
127};
128constexpr std::array<Column, AVM_MAX_REGISTERS> REGISTER_MEM_OP_COLUMNS = {
129 C::execution_sel_mem_op_reg_0_, C::execution_sel_mem_op_reg_1_, C::execution_sel_mem_op_reg_2_,
130 C::execution_sel_mem_op_reg_3_, C::execution_sel_mem_op_reg_4_, C::execution_sel_mem_op_reg_5_,
131 C::execution_sel_mem_op_reg_6_,
132};
133constexpr std::array<Column, AVM_MAX_REGISTERS> REGISTER_EXPECTED_TAG_COLUMNS = {
134 C::execution_expected_tag_reg_0_, C::execution_expected_tag_reg_1_, C::execution_expected_tag_reg_2_,
135 C::execution_expected_tag_reg_3_, C::execution_expected_tag_reg_4_, C::execution_expected_tag_reg_5_,
136 C::execution_expected_tag_reg_6_,
137};
138constexpr std::array<Column, AVM_MAX_REGISTERS> REGISTER_TAG_CHECK_COLUMNS = {
139 C::execution_sel_tag_check_reg_0_, C::execution_sel_tag_check_reg_1_, C::execution_sel_tag_check_reg_2_,
140 C::execution_sel_tag_check_reg_3_, C::execution_sel_tag_check_reg_4_, C::execution_sel_tag_check_reg_5_,
141 C::execution_sel_tag_check_reg_6_,
142};
143constexpr std::array<Column, AVM_MAX_REGISTERS> REGISTER_OP_REG_EFFECTIVE_COLUMNS = {
144 C::execution_sel_op_reg_effective_0_, C::execution_sel_op_reg_effective_1_, C::execution_sel_op_reg_effective_2_,
145 C::execution_sel_op_reg_effective_3_, C::execution_sel_op_reg_effective_4_, C::execution_sel_op_reg_effective_5_,
146 C::execution_sel_op_reg_effective_6_,
147};
148
156Column get_execution_opcode_selector(ExecutionOpCode exec_opcode)
157{
158 switch (exec_opcode) {
160 return C::execution_sel_execute_get_env_var;
162 return C::execution_sel_execute_mov;
164 return C::execution_sel_execute_jump;
166 return C::execution_sel_execute_jumpi;
168 return C::execution_sel_execute_call;
170 return C::execution_sel_execute_static_call;
172 return C::execution_sel_execute_internal_call;
174 return C::execution_sel_execute_internal_return;
176 return C::execution_sel_execute_return;
178 return C::execution_sel_execute_revert;
180 return C::execution_sel_execute_success_copy;
182 return C::execution_sel_execute_returndata_size;
184 return C::execution_sel_execute_debug_log;
186 return C::execution_sel_execute_sload;
188 return C::execution_sel_execute_sstore;
190 return C::execution_sel_execute_notehash_exists;
192 return C::execution_sel_execute_emit_notehash;
194 return C::execution_sel_execute_l1_to_l2_message_exists;
196 return C::execution_sel_execute_nullifier_exists;
198 return C::execution_sel_execute_emit_nullifier;
200 return C::execution_sel_execute_send_l2_to_l1_msg;
201 default:
202 throw std::runtime_error("Execution opcode does not have a corresponding selector");
203 }
204}
205
209struct FailingContexts {
210 bool app_logic_failure = false;
211 bool teardown_failure = false;
214 std::unordered_set<uint32_t> does_context_fail;
215};
216
228FailingContexts preprocess_for_discard(
230{
231 FailingContexts dying_info;
232
233 // Preprocessing pass 1: find the events that exit the app logic and teardown phases
234 for (const auto& ex_event : ex_events) {
235 bool is_exit = ex_event.is_exit();
236 bool is_top_level = ex_event.after_context_event.parent_id == 0;
237
238 if (is_exit && is_top_level) {
239 // TODO(dbanks12): confirm this should be after_context_event and not before_context_event
240 if (ex_event.after_context_event.phase == TransactionPhase::APP_LOGIC) {
241 dying_info.app_logic_failure = ex_event.is_failure();
242 dying_info.app_logic_exit_context_id = ex_event.after_context_event.id;
243 } else if (ex_event.after_context_event.phase == TransactionPhase::TEARDOWN) {
244 dying_info.teardown_failure = ex_event.is_failure();
245 dying_info.teardown_exit_context_id = ex_event.after_context_event.id;
246 break; // Teardown is the last phase we care about
247 }
248 }
249 }
250
251 // Preprocessing pass 2: find all contexts that fail and mark them
252 for (const auto& ex_event : ex_events) {
253 if (ex_event.is_failure()) {
254 dying_info.does_context_fail.insert(ex_event.after_context_event.id);
255 }
256 }
257
258 return dying_info;
259}
260
268bool is_phase_discarded(TransactionPhase phase, const FailingContexts& failures)
269{
270 // Note that app logic also gets discarded if teardown failures
271 return (phase == TransactionPhase::APP_LOGIC && (failures.app_logic_failure || failures.teardown_failure)) ||
272 (phase == TransactionPhase::TEARDOWN && failures.teardown_failure);
273}
274
282uint32_t dying_context_for_phase(TransactionPhase phase, const FailingContexts& failures)
283{
284 assert((phase == TransactionPhase::APP_LOGIC || phase == TransactionPhase::TEARDOWN) &&
285 "Execution events must have app logic or teardown phase");
286
287 switch (phase) {
289 // Note that app logic also gets discarded if teardown failures
290 return failures.app_logic_failure ? failures.app_logic_exit_context_id
291 : failures.teardown_failure ? failures.teardown_exit_context_id
292 : 0;
294 return failures.teardown_failure ? failures.teardown_exit_context_id : 0;
295 default:
296 __builtin_unreachable(); // tell the compiler "we never reach here"
297 }
298}
299
300} // namespace
301
304{
305 uint32_t row = 1; // We start from row 1 because this trace contains shifted columns.
306
307 // Preprocess events to determine which contexts will fail
308 FailingContexts failures = preprocess_for_discard(ex_events);
309
310 uint32_t last_seen_parent_id = 0;
311 FF cached_parent_id_inv = 0;
312
313 // Some variables updated per loop iteration to track
314 // whether or not the upcoming row should "discard" [side effects].
315 uint32_t discard = 0;
316 uint32_t dying_context_id = 0;
317 FF dying_context_id_inv = 0;
318 bool is_first_event_in_enqueued_call = true;
319 bool prev_row_was_enter_call = false;
320
321 for (const auto& ex_event : ex_events) {
322 // Check if this is the first event in an enqueued call and whether
323 // the phase should be discarded
324 if (discard == 0 && is_first_event_in_enqueued_call &&
325 is_phase_discarded(ex_event.after_context_event.phase, failures)) {
326 discard = 1;
327 dying_context_id = dying_context_for_phase(ex_event.after_context_event.phase, failures);
328 dying_context_id_inv = FF(dying_context_id).invert();
329 }
330
331 // Cache the parent id inversion since we will repeatedly just be doing the same expensive inversion
332 bool has_parent = ex_event.after_context_event.parent_id != 0;
333 if (last_seen_parent_id != ex_event.after_context_event.parent_id) {
334 last_seen_parent_id = ex_event.after_context_event.parent_id;
335 cached_parent_id_inv = has_parent ? FF(ex_event.after_context_event.parent_id).invert() : 0;
336 }
337
338 /**************************************************************************************************
339 * Setup.
340 **************************************************************************************************/
341
342 trace.set(
343 row,
344 { {
345 { C::execution_sel, 1 },
346 // Selectors that indicate "dispatch" from tx trace
347 // Note: Enqueued Call End is determined during the opcode execution temporality group
348 { C::execution_enqueued_call_start, is_first_event_in_enqueued_call ? 1 : 0 },
349 // Context
350 { C::execution_context_id, ex_event.after_context_event.id },
351 { C::execution_parent_id, ex_event.after_context_event.parent_id },
352 { C::execution_pc, ex_event.before_context_event.pc },
353 { C::execution_msg_sender, ex_event.after_context_event.msg_sender },
354 { C::execution_contract_address, ex_event.after_context_event.contract_addr },
355 { C::execution_transaction_fee, ex_event.after_context_event.transaction_fee },
356 { C::execution_is_static, ex_event.after_context_event.is_static },
357 { C::execution_parent_calldata_addr, ex_event.after_context_event.parent_cd_addr },
358 { C::execution_parent_calldata_size, ex_event.after_context_event.parent_cd_size },
359 { C::execution_last_child_returndata_addr, ex_event.after_context_event.last_child_rd_addr },
360 { C::execution_last_child_returndata_size, ex_event.after_context_event.last_child_rd_size },
361 { C::execution_last_child_success, ex_event.after_context_event.last_child_success },
362 { C::execution_last_child_id, ex_event.after_context_event.last_child_id },
363 { C::execution_l2_gas_limit, ex_event.after_context_event.gas_limit.l2Gas },
364 { C::execution_da_gas_limit, ex_event.after_context_event.gas_limit.daGas },
365 { C::execution_l2_gas_used, ex_event.after_context_event.gas_used.l2Gas },
366 { C::execution_da_gas_used, ex_event.after_context_event.gas_used.daGas },
367 { C::execution_parent_l2_gas_limit, ex_event.after_context_event.parent_gas_limit.l2Gas },
368 { C::execution_parent_da_gas_limit, ex_event.after_context_event.parent_gas_limit.daGas },
369 { C::execution_parent_l2_gas_used, ex_event.after_context_event.parent_gas_used.l2Gas },
370 { C::execution_parent_da_gas_used, ex_event.after_context_event.parent_gas_used.daGas },
371 { C::execution_next_context_id, ex_event.next_context_id },
372 // Context - gas.
373 { C::execution_prev_l2_gas_used, ex_event.before_context_event.gas_used.l2Gas },
374 { C::execution_prev_da_gas_used, ex_event.before_context_event.gas_used.daGas },
375 // Context - tree states
376 // Context - tree states - Written public data slots tree
377 { C::execution_prev_written_public_data_slots_tree_root,
378 ex_event.before_context_event.written_public_data_slots_tree_snapshot.root },
379 { C::execution_prev_written_public_data_slots_tree_size,
380 ex_event.before_context_event.written_public_data_slots_tree_snapshot.nextAvailableLeafIndex },
381 { C::execution_written_public_data_slots_tree_root,
382 ex_event.after_context_event.written_public_data_slots_tree_snapshot.root },
383 { C::execution_written_public_data_slots_tree_size,
384 ex_event.after_context_event.written_public_data_slots_tree_snapshot.nextAvailableLeafIndex },
385 { C::execution_prev_public_data_tree_root,
386 ex_event.before_context_event.tree_states.publicDataTree.tree.root },
387 { C::execution_prev_public_data_tree_size,
388 ex_event.before_context_event.tree_states.publicDataTree.tree.nextAvailableLeafIndex },
389 // Context - tree states - Nullifier tree
390 { C::execution_prev_nullifier_tree_root,
391 ex_event.before_context_event.tree_states.nullifierTree.tree.root },
392 { C::execution_prev_nullifier_tree_size,
393 ex_event.before_context_event.tree_states.nullifierTree.tree.nextAvailableLeafIndex },
394 { C::execution_prev_num_nullifiers_emitted,
395 ex_event.before_context_event.tree_states.nullifierTree.counter },
396 { C::execution_nullifier_tree_root, ex_event.after_context_event.tree_states.nullifierTree.tree.root },
397 { C::execution_nullifier_tree_size,
398 ex_event.after_context_event.tree_states.nullifierTree.tree.nextAvailableLeafIndex },
399 { C::execution_num_nullifiers_emitted, ex_event.after_context_event.tree_states.nullifierTree.counter },
400 // Context - tree states - Public data tree
401 { C::execution_public_data_tree_root,
402 ex_event.after_context_event.tree_states.publicDataTree.tree.root },
403 { C::execution_public_data_tree_size,
404 ex_event.after_context_event.tree_states.publicDataTree.tree.nextAvailableLeafIndex },
405 // Context - tree states - Note hash tree
406 { C::execution_prev_note_hash_tree_root,
407 ex_event.before_context_event.tree_states.noteHashTree.tree.root },
408 { C::execution_prev_note_hash_tree_size,
409 ex_event.before_context_event.tree_states.noteHashTree.tree.nextAvailableLeafIndex },
410 { C::execution_prev_num_note_hashes_emitted,
411 ex_event.before_context_event.tree_states.noteHashTree.counter },
412 { C::execution_note_hash_tree_root, ex_event.after_context_event.tree_states.noteHashTree.tree.root },
413 { C::execution_note_hash_tree_size,
414 ex_event.after_context_event.tree_states.noteHashTree.tree.nextAvailableLeafIndex },
415 { C::execution_num_note_hashes_emitted, ex_event.after_context_event.tree_states.noteHashTree.counter },
416 // Context - tree states - L1 to L2 message tree
417 { C::execution_l1_l2_tree_root, ex_event.after_context_event.tree_states.l1ToL2MessageTree.tree.root },
418 // Context - side effects
419 { C::execution_prev_num_unencrypted_logs,
420 ex_event.before_context_event.side_effect_states.numUnencryptedLogs },
421 { C::execution_num_unencrypted_logs,
422 ex_event.after_context_event.side_effect_states.numUnencryptedLogs },
423 { C::execution_prev_num_l2_to_l1_messages,
424 ex_event.before_context_event.side_effect_states.numL2ToL1Messages },
425 { C::execution_num_l2_to_l1_messages,
426 ex_event.after_context_event.side_effect_states.numL2ToL1Messages },
427 // Other.
428 { C::execution_bytecode_id, ex_event.before_context_event.bytecode_id },
429 // Helpers for identifying parent context
430 { C::execution_has_parent_ctx, has_parent ? 1 : 0 },
431 { C::execution_is_parent_id_inv, cached_parent_id_inv },
432 } });
433
434 // Internal stack
435 trace.set(row,
436 { {
437 { C::execution_internal_call_id, ex_event.before_context_event.internal_call_id },
438 { C::execution_internal_call_return_id, ex_event.before_context_event.internal_call_return_id },
439 { C::execution_next_internal_call_id, ex_event.before_context_event.next_internal_call_id },
440 } });
441
442 /**************************************************************************************************
443 * Temporality group 1: Bytecode retrieval.
444 **************************************************************************************************/
445
446 bool bytecode_retrieval_failed = ex_event.error == ExecutionError::BYTECODE_NOT_FOUND;
447 trace.set(row,
448 { {
449 { C::execution_sel_bytecode_retrieval_failure, bytecode_retrieval_failed ? 1 : 0 },
450 { C::execution_sel_bytecode_retrieval_success, !bytecode_retrieval_failed ? 1 : 0 },
451 { C::execution_bytecode_id, ex_event.before_context_event.bytecode_id },
452 } });
453
454 /**************************************************************************************************
455 * Temporality group 2: Instruction fetching.
456 **************************************************************************************************/
457
458 // This will only have a value if instruction fetching succeeded.
460 bool process_instruction_fetching = !bytecode_retrieval_failed;
461 bool instruction_fetching_failed = ex_event.error == ExecutionError::INSTRUCTION_FETCHING;
462 trace.set(C::execution_sel_instruction_fetching_failure, row, instruction_fetching_failed ? 1 : 0);
463 if (process_instruction_fetching && !instruction_fetching_failed) {
464 exec_opcode = ex_event.wire_instruction.get_exec_opcode();
465 process_instr_fetching(ex_event.wire_instruction, trace, row);
466 // If we fetched an instruction successfully, we can set the next PC.
467 trace.set(row,
468 { {
469 { C::execution_next_pc,
470 ex_event.before_context_event.pc + ex_event.wire_instruction.size_in_bytes() },
471 } });
472 }
473
474 /**************************************************************************************************
475 * Temporality group 2: Mapping from wire to execution and addressing.
476 **************************************************************************************************/
477
478 // Along this function we need to set the info we get from the EXEC_SPEC_READ lookup.
479 bool should_read_exec_spec = process_instruction_fetching && !instruction_fetching_failed;
480 if (should_read_exec_spec) {
481 process_execution_spec(ex_event, trace, row);
482 }
483
484 bool should_resolve_address = should_read_exec_spec;
485 // pol SEL_SHOULD_RESOLVE_ADDRESS = sel_bytecode_retrieval_success * sel_instruction_fetching_success;
486 if (should_resolve_address) {
487 process_addressing(ex_event.addressing_event, ex_event.wire_instruction, trace, row);
488 }
489 bool addressing_failed = ex_event.error == ExecutionError::ADDRESSING;
490
491 /**************************************************************************************************
492 * Temporality group 3: Registers read.
493 **************************************************************************************************/
494
495 // Note that if addressing did not fail, register reading will not fail.
497 std::fill(registers.begin(), registers.end(), TaggedValue::from<FF>(0));
498 bool should_process_registers = should_resolve_address && !addressing_failed;
499 bool register_processing_failed = ex_event.error == ExecutionError::REGISTER_READ;
500 if (should_process_registers) {
501 process_registers(*exec_opcode, ex_event.inputs, ex_event.output, registers, trace, row);
502 }
503
504 /**************************************************************************************************
505 * Temporality group 4: Gas (both base and dynamic).
506 **************************************************************************************************/
507
508 bool should_check_gas = should_process_registers && !register_processing_failed;
509 bool oog = ex_event.error == ExecutionError::GAS;
510 trace.set(C::execution_sel_should_check_gas, row, should_check_gas ? 1 : 0);
511 if (should_check_gas) {
512 process_gas(ex_event.gas_event, *exec_opcode, trace, row);
513 // todo(ilyas): this is a bad place to do this, but we need the register information to compute dyn gas
514 // factor. process_gas does not have access to it and nor should it.
515 if (*exec_opcode == ExecutionOpCode::TORADIXBE) {
516 uint32_t radix = ex_event.inputs[1].as<uint32_t>(); // Safe since already tag checked
517 uint32_t num_limbs = ex_event.inputs[2].as<uint32_t>(); // Safe since already tag checked
518 uint32_t num_p_limbs = radix > 256 ? 32 : static_cast<uint32_t>(get_p_limbs_per_radix()[radix].size());
519 trace.set(row,
520 { {
521 // To Radix BE Dynamic Gas
522 { C::execution_two_five_six, 256 },
523 { C::execution_sel_radix_gt_256, radix > 256 ? 1 : 0 },
524 { C::execution_sel_lookup_num_p_limbs, radix <= 256 ? 1 : 0 },
525 { C::execution_num_p_limbs, num_p_limbs },
526 { C::execution_sel_use_num_limbs, num_limbs > num_p_limbs ? 1 : 0 },
527 // Don't set dyn gas factor here since already set in process_gas
528 } });
529 } else if (exec_opcode == ExecutionOpCode::EMITUNENCRYPTEDLOG) {
530 trace.set(C::execution_dynamic_da_gas_factor, row, registers[1].as<uint32_t>());
531 }
532 }
533
534 /**************************************************************************************************
535 * Temporality group 5: Opcode execution.
536 **************************************************************************************************/
537
538 // TODO(ilyas): This can possibly be gated with some boolean but I'm not sure what is going on.
539 // TODO: this needs a refactor and is most likely wrong.
540
541 // Overly verbose but maximising readibility here
542 // FIXME(ilyas): We currently cannot move this into the if statement because they are used outside of this
543 // temporality group (e.g. in recomputing discard)
544 bool should_execute_opcode = should_check_gas && !oog;
545 bool should_execute_call =
546 should_execute_opcode && exec_opcode.has_value() && *exec_opcode == ExecutionOpCode::CALL;
547 bool should_execute_static_call =
548 should_execute_opcode && exec_opcode.has_value() && *exec_opcode == ExecutionOpCode::STATICCALL;
549 bool should_execute_return =
550 should_execute_opcode && exec_opcode.has_value() && *exec_opcode == ExecutionOpCode::RETURN;
551 bool should_execute_revert =
552 should_execute_opcode && exec_opcode.has_value() && *exec_opcode == ExecutionOpCode::REVERT;
553
554 bool is_err = ex_event.error != ExecutionError::NONE;
555 bool is_failure = should_execute_revert || is_err;
556 bool sel_enter_call = should_execute_call || should_execute_static_call;
557 // TODO: would is_err here catch any error at the opcode execution step which we dont want to consider?
558 bool sel_exit_call = should_execute_return || should_execute_revert || is_err;
559
560 if (sel_exit_call) {
561 // We rollback if we revert or error and we have a parent context.
562 trace.set(row,
563 { {
564 // Exit reason - opcode or error
565 { C::execution_sel_execute_return, should_execute_return ? 1 : 0 },
566 { C::execution_sel_execute_revert, should_execute_revert ? 1 : 0 },
567 { C::execution_sel_exit_call, sel_exit_call ? 1 : 0 },
568 { C::execution_nested_return, should_execute_return && has_parent ? 1 : 0 },
569 // Enqueued or nested exit dependent on if we are a child context
570 { C::execution_enqueued_call_end, !has_parent ? 1 : 0 },
571 { C::execution_nested_exit_call, has_parent ? 1 : 0 },
572 } });
573 }
574
575 bool opcode_execution_failed = ex_event.error == ExecutionError::OPCODE_EXECUTION;
576 if (should_execute_opcode) {
577 // At this point we can assume instruction fetching succeeded, so this should never fail.
578 const auto& dispatch_to_subtrace = SUBTRACE_INFO_MAP.at(*exec_opcode);
579 trace.set(row,
580 { {
581 { C::execution_sel_should_execute_opcode, 1 },
582 { C::execution_sel_opcode_error, opcode_execution_failed ? 1 : 0 },
583 { get_subtrace_selector(dispatch_to_subtrace.subtrace_selector), 1 },
584 } });
585
586 // Execution Trace opcodes - separating for clarity
587 if (dispatch_to_subtrace.subtrace_selector == SubtraceSel::EXECUTION) {
588 trace.set(get_execution_opcode_selector(*exec_opcode), row, 1);
589 }
590
591 // Call specific logic
592 if (sel_enter_call) {
593 Gas gas_left = ex_event.after_context_event.gas_limit - ex_event.after_context_event.gas_used;
594
595 uint32_t allocated_l2_gas = registers[0].as<uint32_t>();
596 bool is_l2_gas_allocated_lt_left = allocated_l2_gas < gas_left.l2Gas;
597 uint32_t allocated_left_l2_cmp_diff = is_l2_gas_allocated_lt_left
598 ? gas_left.l2Gas - allocated_l2_gas - 1
599 : allocated_l2_gas - gas_left.l2Gas;
600
601 uint32_t allocated_da_gas = registers[1].as<uint32_t>();
602 bool is_da_gas_allocated_lt_left = allocated_da_gas < gas_left.daGas;
603 uint32_t allocated_left_da_cmp_diff = is_da_gas_allocated_lt_left
604 ? gas_left.daGas - allocated_da_gas - 1
605 : allocated_da_gas - gas_left.daGas;
606
607 trace.set(row,
608 { {
609 { C::execution_sel_enter_call, sel_enter_call ? 1 : 0 },
610 { C::execution_sel_execute_call, should_execute_call ? 1 : 0 },
611 { C::execution_sel_execute_static_call, should_execute_static_call ? 1 : 0 },
612 { C::execution_constant_32, 32 },
613 { C::execution_call_is_l2_gas_allocated_lt_left, is_l2_gas_allocated_lt_left },
614 { C::execution_call_allocated_left_l2_cmp_diff, allocated_left_l2_cmp_diff },
615 { C::execution_call_is_da_gas_allocated_lt_left, is_da_gas_allocated_lt_left },
616 { C::execution_call_allocated_left_da_cmp_diff, allocated_left_da_cmp_diff },
617 } });
618 }
619 // Separate if-statement for opcodes.
620 // This cannot be an else-if chained to the above,
621 // because `sel_exit_call` can happen on any opcode
622 // and we still need to tracegen the opcode-specific logic.
623 if (exec_opcode == ExecutionOpCode::GETENVVAR) {
624 assert(ex_event.addressing_event.resolution_info.size() == 2 &&
625 "GETENVVAR should have exactly two resolved operands (envvar enum and output)");
626 // rop[1] is the envvar enum
627 TaggedValue envvar_enum = ex_event.addressing_event.resolution_info[1].resolved_operand;
628 process_get_env_var_opcode(envvar_enum, ex_event.output, trace, row);
629 } else if (exec_opcode == ExecutionOpCode::INTERNALRETURN) {
630 trace.set(C::execution_internal_call_return_id_inv,
631 row,
632 ex_event.before_context_event.internal_call_return_id != 0
633 ? FF(ex_event.before_context_event.internal_call_return_id).invert()
634 : 0);
635 } else if (exec_opcode == ExecutionOpCode::SSTORE) {
636 uint32_t remaining_data_writes = MAX_PUBLIC_DATA_UPDATE_REQUESTS_PER_TX -
637 ex_event.before_context_event.tree_states.publicDataTree.counter;
638
639 trace.set(row,
640 { {
641 { C::execution_max_data_writes_reached, remaining_data_writes == 0 },
642 { C::execution_remaining_data_writes_inv,
643 remaining_data_writes == 0 ? 0 : FF(remaining_data_writes).invert() },
644 { C::execution_sel_write_public_data, !opcode_execution_failed },
645 } });
646 } else if (exec_opcode == ExecutionOpCode::NOTEHASHEXISTS) {
647 uint64_t leaf_index = registers[1].as<uint64_t>();
648 uint64_t note_hash_tree_leaf_count = NOTE_HASH_TREE_LEAF_COUNT;
649 bool note_hash_leaf_in_range = leaf_index < note_hash_tree_leaf_count;
650
651 trace.set(row,
652 { {
653 { C::execution_note_hash_leaf_in_range, note_hash_leaf_in_range },
654 { C::execution_note_hash_tree_leaf_count, FF(note_hash_tree_leaf_count) },
655 } });
656 } else if (exec_opcode == ExecutionOpCode::EMITNOTEHASH) {
657 uint32_t remaining_note_hashes =
658 MAX_NOTE_HASHES_PER_TX - ex_event.before_context_event.tree_states.noteHashTree.counter;
659
660 trace.set(row,
661 { {
662 { C::execution_sel_reached_max_note_hashes, remaining_note_hashes == 0 },
663 { C::execution_remaining_note_hashes_inv,
664 remaining_note_hashes == 0 ? 0 : FF(remaining_note_hashes).invert() },
665 { C::execution_sel_write_note_hash, !opcode_execution_failed },
666 } });
667 } else if (exec_opcode == ExecutionOpCode::L1TOL2MSGEXISTS) {
668 uint64_t leaf_index = registers[1].as<uint64_t>();
669 uint64_t l1_to_l2_msg_tree_leaf_count = L1_TO_L2_MSG_TREE_LEAF_COUNT;
670 bool l1_to_l2_msg_leaf_in_range = leaf_index < l1_to_l2_msg_tree_leaf_count;
671
672 trace.set(row,
673 { {
674 { C::execution_l1_to_l2_msg_leaf_in_range, l1_to_l2_msg_leaf_in_range },
675 { C::execution_l1_to_l2_msg_tree_leaf_count, FF(l1_to_l2_msg_tree_leaf_count) },
676 } });
677 //} else if (exec_opcode == ExecutionOpCode::NULLIFIEREXISTS) {
678 // no custom columns!
679 } else if (exec_opcode == ExecutionOpCode::EMITNULLIFIER) {
680 uint32_t remaining_nullifiers =
681 MAX_NULLIFIERS_PER_TX - ex_event.before_context_event.tree_states.nullifierTree.counter;
682
683 trace.set(row,
684 { {
685 { C::execution_sel_reached_max_nullifiers, remaining_nullifiers == 0 },
686 { C::execution_remaining_nullifiers_inv,
687 remaining_nullifiers == 0 ? 0 : FF(remaining_nullifiers).invert() },
688 { C::execution_sel_write_nullifier,
689 remaining_nullifiers != 0 && !ex_event.before_context_event.is_static },
690 } });
691 } else if (exec_opcode == ExecutionOpCode::SENDL2TOL1MSG) {
692 uint32_t remaining_l2_to_l1_msgs =
693 MAX_L2_TO_L1_MSGS_PER_TX - ex_event.before_context_event.side_effect_states.numL2ToL1Messages;
694
695 trace.set(row,
696 { { { C::execution_sel_l2_to_l1_msg_limit_error, remaining_l2_to_l1_msgs == 0 },
697 { C::execution_remaining_l2_to_l1_msgs_inv,
698 remaining_l2_to_l1_msgs == 0 ? 0 : FF(remaining_l2_to_l1_msgs).invert() },
699 { C::execution_sel_write_l2_to_l1_msg, !opcode_execution_failed && !discard },
700 {
701 C::execution_public_inputs_index,
703 ex_event.before_context_event.side_effect_states.numL2ToL1Messages,
704 } } });
705 }
706 }
707
708 /**************************************************************************************************
709 * Temporality group 6: Register write.
710 **************************************************************************************************/
711
712 bool should_process_register_write = should_execute_opcode && !opcode_execution_failed;
713 if (should_process_register_write) {
714 process_registers_write(*exec_opcode, trace, row);
715 }
716
717 /**************************************************************************************************
718 * Discarding.
719 **************************************************************************************************/
720
721 bool is_dying_context = discard == 1 && (ex_event.after_context_event.id == dying_context_id);
722
723 // Need to generate the item below for checking "is dying context" in circuit
724 FF dying_context_diff_inv = 0;
725 if (!is_dying_context) {
726 // Compute inversion when context_id != dying_context_id
727 FF diff = FF(ex_event.after_context_event.id) - FF(dying_context_id);
728 if (!diff.is_zero()) {
729 dying_context_diff_inv = diff.invert();
730 }
731 }
732
733 // Needed for bc retrieval
734 bool sel_first_row_in_context = prev_row_was_enter_call || is_first_event_in_enqueued_call;
735
736 bool enqueued_call_end = sel_exit_call && !has_parent;
737 bool resolves_dying_context = is_failure && is_dying_context;
738 bool nested_call_rom_undiscarded_context = sel_enter_call && discard == 0;
739 bool propagate_discard = !enqueued_call_end && !resolves_dying_context && !nested_call_rom_undiscarded_context;
740
741 // This is here instead of guarded by `should_execute_opcode` because is_err is a higher level error
742 // than just an opcode error (i.e., it is on if there are any errors in any temporality group).
743 bool rollback_context = (should_execute_revert || is_err) && has_parent;
744
745 trace.set(
746 row,
747 { {
748
749 // sel_exit_call and rollback has to be set here because they include sel_error
750 { C::execution_sel_exit_call, sel_exit_call ? 1 : 0 },
751 { C::execution_rollback_context, rollback_context ? 1 : 0 },
752 { C::execution_sel_error, is_err ? 1 : 0 },
753 { C::execution_sel_failure, is_failure ? 1 : 0 },
754 { C::execution_discard, discard },
755 { C::execution_dying_context_id, dying_context_id },
756 { C::execution_dying_context_id_inv, dying_context_id_inv },
757 { C::execution_is_dying_context, is_dying_context ? 1 : 0 },
758 { C::execution_dying_context_diff_inv, dying_context_diff_inv },
759 { C::execution_enqueued_call_end, enqueued_call_end ? 1 : 0 },
760 { C::execution_sel_first_row_in_context, sel_first_row_in_context ? 1 : 0 },
761 { C::execution_resolves_dying_context, resolves_dying_context ? 1 : 0 },
762 { C::execution_nested_call_from_undiscarded_context, nested_call_rom_undiscarded_context ? 1 : 0 },
763 { C::execution_propagate_discard, propagate_discard ? 1 : 0 },
764 } });
765
766 // Trace-generation is done for this event.
767 // Now, use this event to determine whether we should set/reset the discard flag for the NEXT event
768 bool event_kills_dying_context =
769 discard == 1 && is_failure && ex_event.after_context_event.id == dying_context_id;
770
771 if (event_kills_dying_context) {
772 // Set/unset discard flag if the current event is the one that kills the dying context
773 dying_context_id = 0;
774 dying_context_id_inv = 0;
775 discard = 0;
776 } else if (sel_enter_call && discard == 0 && !is_err &&
777 failures.does_context_fail.contains(ex_event.next_context_id)) {
778 // If making a nested call, and discard isn't already high...
779 // if the nested context being entered eventually dies, raise discard flag and remember which
780 // context is dying. NOTE: if a [STATIC]CALL instruction _itself_ errors, we don't set the
781 // discard flag because we aren't actually entering a new context!
782 dying_context_id = ex_event.next_context_id;
783 dying_context_id_inv = FF(dying_context_id).invert();
784 discard = 1;
785 }
786 // Otherwise, we aren't entering or exiting a dying context,
787 // so just propagate discard and dying context.
788 // Implicit: dying_context_id = dying_context_id; discard = discard;
789
790 // If an enqueued call just exited, next event (if any) is the first in an enqueued call.
791 // Update flag for next iteration.
792 is_first_event_in_enqueued_call = ex_event.after_context_event.parent_id == 0 && sel_exit_call;
793
794 // Track this bool for use determining whether the next row is the first in a context
795 prev_row_was_enter_call = sel_enter_call;
796
797 row++;
798 }
799
800 if (!ex_events.empty()) {
801 trace.set(C::execution_last, row - 1, 1);
802 }
803}
804
807 uint32_t row)
808{
809 trace.set(row,
810 { {
811 { C::execution_sel_instruction_fetching_success, 1 },
812 { C::execution_ex_opcode, static_cast<uint8_t>(instruction.get_exec_opcode()) },
813 { C::execution_indirect, instruction.indirect },
814 { C::execution_instr_length, instruction.size_in_bytes() },
815 } });
816
817 // At this point we can assume instruction fetching succeeded.
818 auto operands = instruction.operands;
819 assert(operands.size() <= AVM_MAX_OPERANDS);
820 operands.resize(AVM_MAX_OPERANDS, simulation::Operand::from<FF>(0));
821
822 for (size_t i = 0; i < AVM_MAX_OPERANDS; i++) {
823 trace.set(OPERAND_COLUMNS[i], row, operands.at(i));
824 }
825}
826
829 uint32_t row)
830{
831 // At this point we can assume instruction fetching succeeded, so this should never fail.
832 ExecutionOpCode exec_opcode = ex_event.wire_instruction.get_exec_opcode();
833 const auto& exec_spec = EXEC_INSTRUCTION_SPEC.at(exec_opcode);
834 const auto& gas_cost = exec_spec.gas_cost;
835
836 // Gas.
837 trace.set(row,
838 { {
839 { C::execution_opcode_gas, gas_cost.opcode_gas },
840 { C::execution_base_da_gas, gas_cost.base_da },
841 { C::execution_dynamic_l2_gas, gas_cost.dyn_l2 },
842 { C::execution_dynamic_da_gas, gas_cost.dyn_da },
843 } });
844
845 const auto& register_info = exec_spec.register_info;
846 for (size_t i = 0; i < AVM_MAX_REGISTERS; i++) {
847 trace.set(row,
848 { {
849 { REGISTER_IS_WRITE_COLUMNS[i], register_info.is_write(i) ? 1 : 0 },
850 { REGISTER_MEM_OP_COLUMNS[i], register_info.is_active(i) ? 1 : 0 },
851 { REGISTER_EXPECTED_TAG_COLUMNS[i],
852 register_info.need_tag_check(i) ? static_cast<uint32_t>(*register_info.expected_tag(i)) : 0 },
853 { REGISTER_TAG_CHECK_COLUMNS[i], register_info.need_tag_check(i) ? 1 : 0 },
854 } });
855 }
856
857 // Set is_address columns
858 const auto& num_addresses = exec_spec.num_addresses;
859 for (size_t i = 0; i < num_addresses; i++) {
860 trace.set(OPERAND_IS_ADDRESS_COLUMNS[i], row, 1);
861 }
862
863 // At this point we can assume instruction fetching succeeded, so this should never fail.
864 const auto& dispatch_to_subtrace = SUBTRACE_INFO_MAP.at(exec_opcode);
865 trace.set(row,
866 { {
867 { C::execution_subtrace_id, get_subtrace_id(dispatch_to_subtrace.subtrace_selector) },
868 { C::execution_subtrace_operation_id, dispatch_to_subtrace.subtrace_operation_id },
869 { C::execution_dyn_gas_id, exec_spec.dyn_gas_id },
870 } });
871}
872
874 ExecutionOpCode exec_opcode,
876 uint32_t row)
877{
878 bool oog = gas_event.oog_l2 || gas_event.oog_da;
879 trace.set(row,
880 { {
881 { C::execution_out_of_gas_l2, gas_event.oog_l2 ? 1 : 0 },
882 { C::execution_out_of_gas_da, gas_event.oog_da ? 1 : 0 },
883 { C::execution_sel_out_of_gas, oog ? 1 : 0 },
884 // Base gas.
885 { C::execution_addressing_gas, gas_event.addressing_gas },
886 { C::execution_limit_used_l2_cmp_diff, gas_event.limit_used_l2_comparison_witness },
887 { C::execution_limit_used_da_cmp_diff, gas_event.limit_used_da_comparison_witness },
888 { C::execution_constant_64, 64 },
889 // Dynamic gas.
890 { C::execution_dynamic_l2_gas_factor, gas_event.dynamic_gas_factor.l2Gas },
891 { C::execution_dynamic_da_gas_factor, gas_event.dynamic_gas_factor.daGas },
892 } });
893
894 const auto& exec_spec = EXEC_INSTRUCTION_SPEC.at(exec_opcode);
895 if (exec_spec.dyn_gas_id != 0) {
896 trace.set(get_dyn_gas_selector(exec_spec.dyn_gas_id), row, 1);
897 }
898}
899
903 uint32_t row)
904{
905 // At this point we can assume instruction fetching succeeded, so this should never fail.
906 ExecutionOpCode exec_opcode = instruction.get_exec_opcode();
907 const ExecInstructionSpec& ex_spec = EXEC_INSTRUCTION_SPEC.at(exec_opcode);
908
909 auto resolution_info_vec = addr_event.resolution_info;
910 assert(resolution_info_vec.size() <= AVM_MAX_OPERANDS);
911 resolution_info_vec.resize(AVM_MAX_OPERANDS,
912 {
913 // This is the default we want: both tag and value 0.
914 .after_relative = simulation::Operand::from<FF>(0),
915 .resolved_operand = simulation::Operand::from<FF>(0),
916 });
917
918 std::array<bool, AVM_MAX_OPERANDS> should_apply_indirection{};
919 std::array<bool, AVM_MAX_OPERANDS> is_relative_effective{};
920 std::array<bool, AVM_MAX_OPERANDS> is_indirect_effective{};
922 std::array<FF, AVM_MAX_OPERANDS> after_relative{};
923 std::array<FF, AVM_MAX_OPERANDS> resolved_operand{};
924 std::array<uint8_t, AVM_MAX_OPERANDS> resolved_operand_tag{};
925 uint8_t num_relative_operands = 0;
926
927 // Gather operand information.
928 for (size_t i = 0; i < AVM_MAX_OPERANDS; i++) {
929 const auto& resolution_info = resolution_info_vec.at(i);
930 bool op_is_address = i < ex_spec.num_addresses;
931 relative_oob[i] = resolution_info.error.has_value() &&
932 *resolution_info.error == AddressingEventError::RELATIVE_COMPUTATION_OOB;
933 is_indirect_effective[i] = op_is_address && is_operand_indirect(instruction.indirect, i);
934 is_relative_effective[i] = op_is_address && is_operand_relative(instruction.indirect, i);
935 should_apply_indirection[i] = is_indirect_effective[i] && !relative_oob[i];
936 resolved_operand_tag[i] = static_cast<uint8_t>(resolution_info.resolved_operand.get_tag());
937 after_relative[i] = resolution_info.after_relative;
938 resolved_operand[i] = resolution_info.resolved_operand;
939 if (is_relative_effective[i]) {
940 num_relative_operands++;
941 }
942 }
943
944 // Set the operand columns.
945 for (size_t i = 0; i < AVM_MAX_OPERANDS; i++) {
946 FF relative_oob_check_diff = 0;
947 if (is_relative_effective[i]) {
948 relative_oob_check_diff =
949 !relative_oob[i] ? FF(1ULL << 32) - after_relative[i] - 1 : after_relative[i] - FF(1ULL << 32);
950 }
951 trace.set(row,
952 { {
953 { OPERAND_RELATIVE_OVERFLOW_COLUMNS[i], relative_oob[i] ? 1 : 0 },
954 { OPERAND_AFTER_RELATIVE_COLUMNS[i], after_relative[i] },
955 { OPERAND_SHOULD_APPLY_INDIRECTION_COLUMNS[i], should_apply_indirection[i] ? 1 : 0 },
956 { OPERAND_IS_RELATIVE_EFFECTIVE_COLUMNS[i], is_relative_effective[i] ? 1 : 0 },
957 { OPERAND_RELATIVE_OOB_CHECK_DIFF_COLUMNS[i], relative_oob_check_diff },
958 { RESOLVED_OPERAND_COLUMNS[i], resolved_operand[i] },
959 { RESOLVED_OPERAND_TAG_COLUMNS[i], resolved_operand_tag[i] },
960 } });
961 }
962
963 // We need to compute relative and indirect over the whole 16 bits of the indirect flag.
964 // See comment in PIL file about indirect upper bits.
965 for (size_t i = 0; i < TOTAL_INDIRECT_BITS / 2; i++) {
966 bool is_relative = is_operand_relative(instruction.indirect, i);
967 bool is_indirect = is_operand_indirect(instruction.indirect, i);
968 trace.set(row,
969 { {
970 { OPERAND_IS_RELATIVE_WIRE_COLUMNS[i], is_relative ? 1 : 0 },
971 { OPERAND_IS_INDIRECT_WIRE_COLUMNS[i], is_indirect ? 1 : 0 },
972 } });
973 }
974
975 // Base address check.
976 bool do_base_check = num_relative_operands != 0;
977 bool base_address_invalid = do_base_check && addr_event.base_address.get_tag() != MemoryTag::U32;
978 FF base_address_tag_diff_inv =
979 base_address_invalid
980 ? (FF(static_cast<uint8_t>(addr_event.base_address.get_tag())) - FF(static_cast<uint8_t>(MemoryTag::U32)))
981 .invert()
982 : 0;
983
984 // Tag check after indirection.
985 bool some_final_check_failed =
986 std::any_of(addr_event.resolution_info.begin(), addr_event.resolution_info.end(), [](const auto& info) {
987 return info.error.has_value() && *info.error == AddressingEventError::INVALID_ADDRESS_AFTER_INDIRECTION;
988 });
989 FF batched_tags_diff_inv = 0;
990 if (some_final_check_failed) {
991 FF batched_tags_diff = 0;
992 FF power_of_2 = 1;
993 for (size_t i = 0; i < AVM_MAX_OPERANDS; ++i) {
994 batched_tags_diff +=
995 FF(is_indirect_effective[i]) * power_of_2 * (FF(resolved_operand_tag[i]) - FF(MEM_TAG_U32));
996 power_of_2 *= 8; // 2^3
997 }
998 batched_tags_diff_inv = batched_tags_diff != 0 ? batched_tags_diff.invert() : 0;
999 }
1000
1001 // Collect addressing errors. See PIL file for reference.
1002 bool addressing_failed = std::any_of(addr_event.resolution_info.begin(),
1003 addr_event.resolution_info.end(),
1004 [](const auto& info) { return info.error.has_value(); });
1005 FF addressing_error_collection_inv =
1006 addressing_failed
1007 ? FF(
1008 // Base address invalid.
1009 (base_address_invalid ? 1 : 0) +
1010 // Relative overflow.
1011 std::accumulate(addr_event.resolution_info.begin(),
1012 addr_event.resolution_info.end(),
1013 static_cast<uint32_t>(0),
1014 [](uint32_t acc, const auto& info) {
1015 return acc +
1016 (info.error.has_value() &&
1017 *info.error == AddressingEventError::RELATIVE_COMPUTATION_OOB
1018 ? 1
1019 : 0);
1020 }) +
1021 // Some invalid address after indirection.
1022 (some_final_check_failed ? 1 : 0))
1023 .invert()
1024 : 0;
1025
1026 trace.set(row,
1027 { {
1028 { C::execution_sel_addressing_error, addressing_failed ? 1 : 0 },
1029 { C::execution_addressing_error_collection_inv, addressing_error_collection_inv },
1030 { C::execution_base_address_val, addr_event.base_address.as_ff() },
1031 { C::execution_base_address_tag, static_cast<uint8_t>(addr_event.base_address.get_tag()) },
1032 { C::execution_base_address_tag_diff_inv, base_address_tag_diff_inv },
1033 { C::execution_sel_base_address_failure, base_address_invalid ? 1 : 0 },
1034 { C::execution_num_relative_operands_inv, do_base_check ? FF(num_relative_operands).invert() : 0 },
1035 { C::execution_sel_do_base_check, do_base_check ? 1 : 0 },
1036 { C::execution_constant_32, 32 },
1037 { C::execution_two_to_32, 1ULL << 32 },
1038 } });
1039}
1040
1042 const std::vector<TaggedValue>& inputs,
1043 const TaggedValue& output,
1046 uint32_t row)
1047{
1048 assert(registers.size() == AVM_MAX_REGISTERS);
1049 // At this point we can assume instruction fetching succeeded, so this should never fail.
1050 const auto& register_info = EXEC_INSTRUCTION_SPEC.at(exec_opcode).register_info;
1051
1052 // Registers.
1053 size_t input_counter = 0;
1054 for (uint8_t i = 0; i < AVM_MAX_REGISTERS; ++i) {
1055 if (register_info.is_active(i)) {
1056 if (register_info.is_write(i)) {
1057 // If this is a write operation, we need to get the value from the output.
1058 registers[i] = output;
1059 } else {
1060 // If this is a read operation, we need to get the value from the input.
1061 auto input = inputs.size() > input_counter ? inputs.at(input_counter) : TaggedValue::from<FF>(0);
1062 registers[i] = input;
1063 input_counter++;
1064 }
1065 }
1066 }
1067
1068 for (size_t i = 0; i < AVM_MAX_REGISTERS; i++) {
1069 trace.set(REGISTER_COLUMNS[i], row, registers[i]);
1070 trace.set(REGISTER_MEM_TAG_COLUMNS[i], row, static_cast<uint8_t>(registers[i].get_tag()));
1071 // This one is special because it sets the reads (but not the writes).
1072 // If we got here, sel_should_read_registers=1.
1073 if (register_info.is_active(i) && !register_info.is_write(i)) {
1074 trace.set(REGISTER_OP_REG_EFFECTIVE_COLUMNS[i], row, 1);
1075 }
1076 }
1077
1078 // Tag check.
1079 bool some_tag_check_failed = false;
1080 for (size_t i = 0; i < AVM_MAX_REGISTERS; i++) {
1081 if (register_info.need_tag_check(i)) {
1082 if (registers[i].get_tag() != *register_info.expected_tag(i)) {
1083 some_tag_check_failed = true;
1084 break;
1085 }
1086 }
1087 }
1088
1089 FF batched_tags_diff_inv_reg = 0;
1090 if (some_tag_check_failed) {
1091 FF batched_tags_diff = 0;
1092 FF power_of_2 = 1;
1093 for (size_t i = 0; i < AVM_MAX_REGISTERS; ++i) {
1094 if (register_info.need_tag_check(i)) {
1095 batched_tags_diff += power_of_2 * (FF(static_cast<uint8_t>(registers[i].get_tag())) -
1096 FF(static_cast<uint8_t>(*register_info.expected_tag(i))));
1097 }
1098 power_of_2 *= 8; // 2^3
1099 }
1100 batched_tags_diff_inv_reg = batched_tags_diff != 0 ? batched_tags_diff.invert() : 0;
1101 }
1102
1103 trace.set(row,
1104 { {
1105 { C::execution_sel_should_read_registers, 1 },
1106 { C::execution_batched_tags_diff_inv_reg, batched_tags_diff_inv_reg },
1107 { C::execution_sel_register_read_error, some_tag_check_failed ? 1 : 0 },
1108 } });
1109}
1110
1112{
1113 const auto& register_info = EXEC_INSTRUCTION_SPEC.at(exec_opcode).register_info;
1114 trace.set(C::execution_sel_should_write_registers, row, 1);
1115
1116 for (size_t i = 0; i < AVM_MAX_REGISTERS; i++) {
1117 // This one is special because it sets the writes.
1118 // If we got here, sel_should_write_registers=1.
1119 if (register_info.is_active(i) && register_info.is_write(i)) {
1120 trace.set(REGISTER_OP_REG_EFFECTIVE_COLUMNS[i], row, 1);
1121 }
1122 }
1123}
1124
1126 TaggedValue output,
1128 uint32_t row)
1129{
1130 assert(envvar_enum.get_tag() == ValueTag::U8);
1131 const auto& envvar_spec = GetEnvVarSpec::get_table(envvar_enum.as<uint8_t>());
1132
1133 trace.set(row,
1134 { {
1135 { C::execution_sel_execute_get_env_var, 1 },
1136 { C::execution_sel_envvar_pi_lookup_col0, envvar_spec.envvar_pi_lookup_col0 ? 1 : 0 },
1137 { C::execution_sel_envvar_pi_lookup_col1, envvar_spec.envvar_pi_lookup_col1 ? 1 : 0 },
1138 { C::execution_envvar_pi_row_idx, envvar_spec.envvar_pi_row_idx },
1139 { C::execution_is_address, envvar_spec.is_address ? 1 : 0 },
1140 { C::execution_is_sender, envvar_spec.is_sender ? 1 : 0 },
1141 { C::execution_is_transactionfee, envvar_spec.is_transactionfee ? 1 : 0 },
1142 { C::execution_is_isstaticcall, envvar_spec.is_isstaticcall ? 1 : 0 },
1143 { C::execution_is_l2gasleft, envvar_spec.is_l2gasleft ? 1 : 0 },
1144 { C::execution_is_dagasleft, envvar_spec.is_dagasleft ? 1 : 0 },
1145 { C::execution_value_from_pi,
1146 envvar_spec.envvar_pi_lookup_col0 || envvar_spec.envvar_pi_lookup_col1 ? output.as_ff() : 0 },
1147 { C::execution_mem_tag_reg_0_, envvar_spec.out_tag },
1148 } });
1149}
1150
1153 // Execution
1155 .add<lookup_execution_check_written_storage_slot_settings, InteractionType::LookupSequential>()
1156 // Bytecode retrieval
1158 // Instruction fetching
1159 .add<lookup_execution_instruction_fetching_result_settings, InteractionType::LookupGeneric>()
1161 // Addressing
1162 .add<lookup_addressing_relative_overflow_range_0_settings, InteractionType::LookupGeneric>()
1164 .add<lookup_addressing_relative_overflow_range_2_settings, InteractionType::LookupGeneric>()
1166 .add<lookup_addressing_relative_overflow_range_4_settings, InteractionType::LookupGeneric>()
1168 .add<lookup_addressing_relative_overflow_range_6_settings, InteractionType::LookupGeneric>()
1169 // Registers
1171 .add<lookup_registers_mem_op_1_settings, InteractionType::LookupGeneric>()
1173 .add<lookup_registers_mem_op_3_settings, InteractionType::LookupGeneric>()
1175 .add<lookup_registers_mem_op_5_settings, InteractionType::LookupGeneric>()
1177 // Internal Call Stack
1178 .add<lookup_internal_call_push_call_stack_settings_, InteractionType::LookupSequential>()
1180 // Gas
1181 .add<lookup_gas_addressing_gas_read_settings, InteractionType::LookupIntoIndexedByClk>()
1183 .add<lookup_gas_limit_used_da_range_settings, InteractionType::LookupGeneric>()
1185 // Gas - ToRadix BE
1186 .add<lookup_execution_check_radix_gt_256_settings, InteractionType::LookupGeneric>()
1188 .add<lookup_execution_get_max_limbs_settings, InteractionType::LookupGeneric>()
1189 // Context Stack
1191 .add<lookup_context_ctx_stack_rollback_settings, InteractionType::LookupGeneric>()
1193 // External Call
1194 .add<lookup_external_call_call_allocated_left_l2_range_settings, InteractionType::LookupGeneric>()
1196 // Dispatch to gadget sub-traces
1197 .add<perm_execution_dispatch_keccakf1600_settings, InteractionType::Permutation>()
1198 // GetEnvVar opcode
1200 .add<lookup_get_env_var_read_from_public_inputs_col0_settings, InteractionType::LookupIntoIndexedByClk>()
1202 // Sload opcode
1203 .add<lookup_sload_storage_read_settings, InteractionType::LookupGeneric>()
1204 // Sstore opcode
1206 .add<lookup_sstore_storage_write_settings, InteractionType::LookupGeneric>()
1207 // NoteHashExists
1209 .add<lookup_notehash_exists_note_hash_leaf_index_in_range_settings, InteractionType::LookupGeneric>()
1210 // NullifierExists opcode
1212 // EmitNullifier
1213 .add<lookup_emit_nullifier_write_nullifier_settings, InteractionType::LookupSequential>()
1214 // GetContractInstance opcode
1216 // EmitNoteHash
1217 .add<lookup_emit_notehash_notehash_tree_write_settings, InteractionType::LookupSequential>()
1218 // L1ToL2MsgExists
1220 .add<lookup_l1_to_l2_message_exists_l1_to_l2_msg_read_settings, InteractionType::LookupSequential>()
1221 // Alu dispatching
1223 .add<lookup_alu_exec_dispatching_cast_settings, InteractionType::LookupGeneric>()
1225 // SendL2ToL1Msg
1226 .add<lookup_send_l2_to_l1_msg_write_l2_to_l1_msg_settings, InteractionType::LookupIntoIndexedByClk>();
1227
1228} // namespace bb::avm2::tracegen
#define MEM_TAG_U32
#define AVM_PUBLIC_INPUTS_AVM_ACCUMULATED_DATA_L2_TO_L1_MSGS_ROW_IDX
#define AVM_MAX_OPERANDS
#define NOTE_HASH_TREE_LEAF_COUNT
#define L1_TO_L2_MSG_TREE_LEAF_COUNT
#define AVM_MAX_REGISTERS
#define MAX_L2_TO_L1_MSGS_PER_TX
#define MAX_NOTE_HASHES_PER_TX
#define MAX_NULLIFIERS_PER_TX
#define MAX_PUBLIC_DATA_UPDATE_REQUESTS_PER_TX
ValueTag get_tag() const
void process_get_env_var_opcode(TaggedValue envvar_enum, TaggedValue output, TraceContainer &trace, uint32_t row)
void process_execution_spec(const simulation::ExecutionEvent &ex_event, TraceContainer &trace, uint32_t row)
void process_instr_fetching(const simulation::Instruction &instruction, TraceContainer &trace, uint32_t row)
static const InteractionDefinition interactions
void process_registers_write(ExecutionOpCode exec_opcode, TraceContainer &trace, uint32_t row)
void process_gas(const simulation::GasEvent &gas_event, ExecutionOpCode exec_opcode, TraceContainer &trace, uint32_t row)
void process(const simulation::EventEmitterInterface< simulation::ExecutionEvent >::Container &ex_events, TraceContainer &trace)
void process_registers(ExecutionOpCode exec_opcode, const std::vector< TaggedValue > &inputs, const TaggedValue &output, std::span< TaggedValue > registers, TraceContainer &trace, uint32_t row)
void process_addressing(const simulation::AddressingEvent &addr_event, const simulation::Instruction &instruction, TraceContainer &trace, uint32_t row)
static Table get_table(uint8_t envvar)
InteractionDefinition & add(auto &&... args)
void info(Args... args)
Definition log.hpp:70
TestTraceContainer trace
bool app_logic_failure
uint32_t app_logic_exit_context_id
bool teardown_failure
std::unordered_set< uint32_t > does_context_fail
uint32_t teardown_exit_context_id
GasEvent gas_event
Instruction instruction
const std::unordered_map< ExecutionOpCode, SubtraceInfo > SUBTRACE_INFO_MAP
Column get_dyn_gas_selector(uint32_t dyn_gas_id)
Get the column selector for a given dynamic gas ID.
Column get_subtrace_selector(SubtraceSel subtrace_sel)
Get the column selector for a given subtrace selector.
FF get_subtrace_id(SubtraceSel subtrace_sel)
Get the subtrace ID for a given subtrace enum.
lookup_settings< lookup_get_env_var_read_from_public_inputs_col1_settings_ > lookup_get_env_var_read_from_public_inputs_col1_settings
const std::array< std::vector< uint8_t >, 257 > & get_p_limbs_per_radix()
Definition to_radix.cpp:33
lookup_settings< lookup_registers_mem_op_4_settings_ > lookup_registers_mem_op_4_settings
lookup_settings< lookup_registers_mem_op_0_settings_ > lookup_registers_mem_op_0_settings
lookup_settings< lookup_execution_dyn_l2_factor_bitwise_settings_ > lookup_execution_dyn_l2_factor_bitwise_settings
bool is_operand_relative(uint16_t indirect_flag, size_t operand_index)
Definition addressing.hpp:8
lookup_settings< lookup_notehash_exists_note_hash_read_settings_ > lookup_notehash_exists_note_hash_read_settings
lookup_settings< lookup_registers_mem_op_6_settings_ > lookup_registers_mem_op_6_settings
lookup_settings< lookup_registers_mem_op_2_settings_ > lookup_registers_mem_op_2_settings
lookup_settings< lookup_gas_limit_used_l2_range_settings_ > lookup_gas_limit_used_l2_range_settings
lookup_settings< lookup_addressing_relative_overflow_range_5_settings_ > lookup_addressing_relative_overflow_range_5_settings
lookup_settings< lookup_addressing_relative_overflow_range_1_settings_ > lookup_addressing_relative_overflow_range_1_settings
lookup_settings< lookup_context_ctx_stack_return_settings_ > lookup_context_ctx_stack_return_settings
bool is_operand_indirect(uint16_t indirect_flag, size_t operand_index)
lookup_settings< lookup_execution_get_p_limbs_settings_ > lookup_execution_get_p_limbs_settings
lookup_settings< lookup_execution_bytecode_retrieval_result_settings_ > lookup_execution_bytecode_retrieval_result_settings
lookup_settings< lookup_execution_instruction_fetching_body_settings_ > lookup_execution_instruction_fetching_body_settings
permutation_settings< perm_execution_dispatch_get_contract_instance_settings_ > perm_execution_dispatch_get_contract_instance_settings
lookup_settings< lookup_execution_exec_spec_read_settings_ > lookup_execution_exec_spec_read_settings
lookup_settings< lookup_get_env_var_precomputed_info_settings_ > lookup_get_env_var_precomputed_info_settings
lookup_settings< lookup_l1_to_l2_message_exists_l1_to_l2_msg_leaf_index_in_range_settings_ > lookup_l1_to_l2_message_exists_l1_to_l2_msg_leaf_index_in_range_settings
lookup_settings< lookup_alu_exec_dispatching_set_settings_ > lookup_alu_exec_dispatching_set_settings
lookup_settings< lookup_sstore_record_written_storage_slot_settings_ > lookup_sstore_record_written_storage_slot_settings
lookup_settings< lookup_addressing_relative_overflow_range_3_settings_ > lookup_addressing_relative_overflow_range_3_settings
lookup_settings< lookup_alu_register_tag_value_settings_ > lookup_alu_register_tag_value_settings
lookup_settings< lookup_external_call_call_allocated_left_da_range_settings_ > lookup_external_call_call_allocated_left_da_range_settings
lookup_settings< lookup_context_ctx_stack_call_settings_ > lookup_context_ctx_stack_call_settings
lookup_settings< lookup_nullifier_exists_nullifier_exists_check_settings_ > lookup_nullifier_exists_nullifier_exists_check_settings
AvmFlavorSettings::FF FF
Definition field.hpp:10
const std::unordered_map< ExecutionOpCode, ExecInstructionSpec > EXEC_INSTRUCTION_SPEC
constexpr decltype(auto) get(::tuplet::tuple< T... > &&t) noexcept
Definition tuple.hpp:13
std::vector< OperandResolutionInfo > resolution_info
ExecutionOpCode get_exec_opcode() const