Barretenberg
The ZK-SNARK library at the core of Aztec
Loading...
Searching...
No Matches
bytecode_trace.cpp
Go to the documentation of this file.
2
3#include <cmath>
4#include <cstddef>
5#include <cstdint>
6#include <memory>
7#include <ranges>
8#include <stdexcept>
9#include <vector>
10
24
26
27namespace bb::avm2::tracegen {
28
31 TraceContainer& trace)
32{
33 using C = Column;
34 // Since next_packed_pc - pc is always in the range [0, 31), we can precompute the inverses:
35 std::vector<FF> next_packed_pc_min_pc_inverses = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
36 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30 };
37 FF::batch_invert(next_packed_pc_min_pc_inverses);
38
39 // We start from row 1 because we need a row of zeroes for the shifts.
40 uint32_t row = 1;
41
42 for (const auto& event : events) {
43 const auto& bytecode = *event.bytecode;
44 const auto id = event.bytecode_id;
45 auto bytecode_at = [&bytecode](size_t i) -> uint8_t { return i < bytecode.size() ? bytecode[i] : 0; };
46 const uint32_t bytecode_len = static_cast<uint32_t>(bytecode.size());
47
48 for (uint32_t i = 0; i < bytecode_len; i++) {
49 const uint32_t remaining = bytecode_len - i;
50 const uint32_t bytes_to_read = std::min(remaining, DECOMPOSE_WINDOW_SIZE);
51 const bool is_last = remaining == 1;
52 const bool is_windows_eq_remaining = remaining == DECOMPOSE_WINDOW_SIZE;
53
54 // Check that we still expect the max public bytecode in bytes to fit within 24 bits (i.e. <= 0xffffff).
55 static_assert(MAX_PACKED_PUBLIC_BYTECODE_SIZE_IN_FIELDS * 32 <= 0xffffff);
56
57 // We set the decomposition in bytes, and other values.
58 trace.set(row + i,
59 { {
60 { C::bc_decomposition_sel, 1 },
61 { C::bc_decomposition_id, id },
62 { C::bc_decomposition_pc, i },
63 { C::bc_decomposition_last_of_contract, is_last ? 1 : 0 },
64 { C::bc_decomposition_bytes_remaining, remaining },
65 { C::bc_decomposition_bytes_to_read, bytes_to_read },
66 { C::bc_decomposition_sel_windows_gt_remaining, DECOMPOSE_WINDOW_SIZE > remaining ? 1 : 0 },
67 { C::bc_decomposition_is_windows_eq_remaining, is_windows_eq_remaining ? 1 : 0 },
68 // Inverses will be calculated in batch later.
69 { C::bc_decomposition_bytes_rem_inv, remaining },
70 { C::bc_decomposition_bytes_rem_min_one_inv, is_last ? 0 : FF(remaining - 1) },
71 { C::bc_decomposition_windows_min_remaining_inv,
72 is_windows_eq_remaining ? 0 : FF(DECOMPOSE_WINDOW_SIZE) - FF(remaining) },
73 // Sliding window.
74 { C::bc_decomposition_bytes, bytecode_at(i) },
75 { C::bc_decomposition_bytes_pc_plus_1, bytecode_at(i + 1) },
76 { C::bc_decomposition_bytes_pc_plus_2, bytecode_at(i + 2) },
77 { C::bc_decomposition_bytes_pc_plus_3, bytecode_at(i + 3) },
78 { C::bc_decomposition_bytes_pc_plus_4, bytecode_at(i + 4) },
79 { C::bc_decomposition_bytes_pc_plus_5, bytecode_at(i + 5) },
80 { C::bc_decomposition_bytes_pc_plus_6, bytecode_at(i + 6) },
81 { C::bc_decomposition_bytes_pc_plus_7, bytecode_at(i + 7) },
82 { C::bc_decomposition_bytes_pc_plus_8, bytecode_at(i + 8) },
83 { C::bc_decomposition_bytes_pc_plus_9, bytecode_at(i + 9) },
84 { C::bc_decomposition_bytes_pc_plus_10, bytecode_at(i + 10) },
85 { C::bc_decomposition_bytes_pc_plus_11, bytecode_at(i + 11) },
86 { C::bc_decomposition_bytes_pc_plus_12, bytecode_at(i + 12) },
87 { C::bc_decomposition_bytes_pc_plus_13, bytecode_at(i + 13) },
88 { C::bc_decomposition_bytes_pc_plus_14, bytecode_at(i + 14) },
89 { C::bc_decomposition_bytes_pc_plus_15, bytecode_at(i + 15) },
90 { C::bc_decomposition_bytes_pc_plus_16, bytecode_at(i + 16) },
91 { C::bc_decomposition_bytes_pc_plus_17, bytecode_at(i + 17) },
92 { C::bc_decomposition_bytes_pc_plus_18, bytecode_at(i + 18) },
93 { C::bc_decomposition_bytes_pc_plus_19, bytecode_at(i + 19) },
94 { C::bc_decomposition_bytes_pc_plus_20, bytecode_at(i + 20) },
95 { C::bc_decomposition_bytes_pc_plus_21, bytecode_at(i + 21) },
96 { C::bc_decomposition_bytes_pc_plus_22, bytecode_at(i + 22) },
97 { C::bc_decomposition_bytes_pc_plus_23, bytecode_at(i + 23) },
98 { C::bc_decomposition_bytes_pc_plus_24, bytecode_at(i + 24) },
99 { C::bc_decomposition_bytes_pc_plus_25, bytecode_at(i + 25) },
100 { C::bc_decomposition_bytes_pc_plus_26, bytecode_at(i + 26) },
101 { C::bc_decomposition_bytes_pc_plus_27, bytecode_at(i + 27) },
102 { C::bc_decomposition_bytes_pc_plus_28, bytecode_at(i + 28) },
103 { C::bc_decomposition_bytes_pc_plus_29, bytecode_at(i + 29) },
104 { C::bc_decomposition_bytes_pc_plus_30, bytecode_at(i + 30) },
105 { C::bc_decomposition_bytes_pc_plus_31, bytecode_at(i + 31) },
106 { C::bc_decomposition_bytes_pc_plus_32, bytecode_at(i + 32) },
107 { C::bc_decomposition_bytes_pc_plus_33, bytecode_at(i + 33) },
108 { C::bc_decomposition_bytes_pc_plus_34, bytecode_at(i + 34) },
109 { C::bc_decomposition_bytes_pc_plus_35, bytecode_at(i + 35) },
110 { C::bc_decomposition_bytes_pc_plus_36, bytecode_at(i + 36) },
111 } });
112 }
113
114 // We set the packed field every 31 bytes.
115 auto bytecode_field_at = [&](size_t i) -> FF {
116 // We need to read uint256_ts because reading FFs messes up the order of the bytes.
117 uint256_t as_int = 0;
118 if (bytecode_len - i >= 32) {
119 as_int = from_buffer<uint256_t>(bytecode, i);
120 } else {
121 std::vector<uint8_t> tail(bytecode.begin() + static_cast<ssize_t>(i), bytecode.end());
122 tail.resize(32, 0);
123 as_int = from_buffer<uint256_t>(tail, 0);
124 }
125 return as_int >> 8;
126 };
127 for (uint32_t i = 0; i < bytecode_len; i += 31) {
128 trace.set(row + i,
129 { {
130 { C::bc_decomposition_sel_packed, 1 },
131 { C::bc_decomposition_packed_field, bytecode_field_at(i) },
132 { C::bc_decomposition_next_packed_pc, i },
133 { C::bc_decomposition_next_packed_pc_min_pc_inv, 0 },
134 } });
135 for (uint32_t j = i + 1; j < std::min(bytecode_len, i + 31); j++) {
136 trace.set(
137 row + j,
138 { {
139 { C::bc_decomposition_next_packed_pc, i + 31 },
140 { C::bc_decomposition_next_packed_pc_min_pc_inv, next_packed_pc_min_pc_inverses[i + 31 - j] },
141 } });
142 }
143 }
144
145 // We advance to the next bytecode.
146 row += bytecode_len;
147 }
148
149 // Batch invert the columns.
150 trace.invert_columns({ { C::bc_decomposition_bytes_rem_inv,
151 C::bc_decomposition_bytes_rem_min_one_inv,
152 C::bc_decomposition_windows_min_remaining_inv } });
153}
154
157{
158 using C = Column;
159 uint32_t row = 1;
160
161 for (const auto& event : events) {
162 const auto id = event.bytecode_id;
163 // Note that bytecode fields from the BytecodeHashingEvent do not contain the prepended separator
164 std::vector<FF> fields = { GENERATOR_INDEX__PUBLIC_BYTECODE };
165 fields.insert(fields.end(), event.bytecode_fields.begin(), event.bytecode_fields.end());
166 auto bytecode_field_at = [&fields](size_t i) -> FF { return i < fields.size() ? fields[i] : 0; };
167 FF output_hash = Poseidon2::hash(fields);
168 auto padding_amount = (3 - (fields.size() % 3)) % 3;
169 auto num_rounds = (fields.size() + padding_amount) / 3;
170 uint32_t pc_index = 0;
171 for (uint32_t i = 0; i < fields.size(); i += 3) {
172 bool start_of_bytecode = i == 0;
173 bool end_of_bytecode = i + 3 >= fields.size();
174 // When we start the bytecode, we want to look up field 1 at pc = 0 in the decomposition trace, since we
175 // force field 0 to be the separator:
176 uint32_t pc_index_1 = start_of_bytecode ? 0 : pc_index + 31;
177 trace.set(row,
178 { { { C::bc_hashing_sel, 1 },
179 { C::bc_hashing_start, start_of_bytecode },
180 { C::bc_hashing_sel_not_start, !start_of_bytecode },
181 { C::bc_hashing_latch, end_of_bytecode },
182 { C::bc_hashing_bytecode_id, id },
183 { C::bc_hashing_input_len, fields.size() },
184 { C::bc_hashing_rounds_rem, num_rounds },
185 { C::bc_hashing_pc_index, pc_index },
186 { C::bc_hashing_pc_index_1, pc_index_1 },
187 { C::bc_hashing_pc_index_2, pc_index_1 + 31 },
188 { C::bc_hashing_packed_fields_0, bytecode_field_at(i) },
189 { C::bc_hashing_packed_fields_1, bytecode_field_at(i + 1) },
190 { C::bc_hashing_packed_fields_2, bytecode_field_at(i + 2) },
191 { C::bc_hashing_sel_not_padding_1, end_of_bytecode && padding_amount == 2 ? 0 : 1 },
192 { C::bc_hashing_sel_not_padding_2, end_of_bytecode && padding_amount > 0 ? 0 : 1 },
193 { C::bc_hashing_output_hash, output_hash } } });
194 if (end_of_bytecode) {
195 // TODO(MW): Cleanup: below sets the pc at which the final field starts.
196 // It can't just be pc_index + 31 * padding_amount because we 'skip' 31 bytes at start == 1 to force
197 // the first field to be the separator:
198 trace.set(row,
199 { {
200 { C::bc_hashing_pc_at_final_field,
201 padding_amount == 2 ? pc_index : pc_index_1 + (31 * (1 - padding_amount)) },
202 } });
203 }
204 pc_index = pc_index_1 + 62;
205 row++;
206 num_rounds--;
207 }
208 }
209}
210
213 TraceContainer& trace)
214{
215 using C = Column;
216
217 uint32_t row = 1;
218 for (const auto& event : events) {
219 uint64_t remaining_bytecodes = MAX_PUBLIC_CALLS_TO_UNIQUE_CONTRACT_CLASS_IDS +
221 event.retrieved_bytecodes_snapshot_before.next_available_leaf_index;
222 bool error = event.instance_not_found_error || event.limit_error;
223 trace.set(
224 row,
225 { {
226 { C::bc_retrieval_sel, 1 },
227 { C::bc_retrieval_bytecode_id, event.bytecode_id },
228 { C::bc_retrieval_address, event.address },
229 { C::bc_retrieval_error, error },
230
231 // Contract instance members (for lookup into contract_instance_retrieval)
232 { C::bc_retrieval_current_class_id, event.current_class_id },
233
234 // Tree context (for lookup into contract_instance_retrieval)
235 { C::bc_retrieval_public_data_tree_root, event.public_data_tree_root },
236 { C::bc_retrieval_nullifier_tree_root, event.nullifier_root },
237
238 // Retrieved bytecodes tree state
239 { C::bc_retrieval_prev_retrieved_bytecodes_tree_root, event.retrieved_bytecodes_snapshot_before.root },
240 { C::bc_retrieval_prev_retrieved_bytecodes_tree_size,
241 event.retrieved_bytecodes_snapshot_before.next_available_leaf_index },
242 { C::bc_retrieval_next_retrieved_bytecodes_tree_root, event.retrieved_bytecodes_snapshot_after.root },
243 { C::bc_retrieval_next_retrieved_bytecodes_tree_size,
244 event.retrieved_bytecodes_snapshot_after.next_available_leaf_index },
245
246 // Instance existence determined by shared contract instance retrieval
247 { C::bc_retrieval_instance_exists, !event.instance_not_found_error },
248
249 // Limit handling
250 { C::bc_retrieval_no_remaining_bytecodes, remaining_bytecodes == 0 },
251 { C::bc_retrieval_remaining_bytecodes_inv, remaining_bytecodes }, // Will be inverted in batch later.
252 { C::bc_retrieval_is_new_class, event.is_new_class },
253 { C::bc_retrieval_should_retrieve, !error },
254
255 // Contract class for bytecode operations
256 { C::bc_retrieval_artifact_hash, event.contract_class.artifact_hash },
257 { C::bc_retrieval_private_functions_root, event.contract_class.private_functions_root },
258
259 } });
260 row++;
261 }
262
263 // Batch invert the columns.
264 trace.invert_columns({ { C::bc_retrieval_remaining_bytecodes_inv } });
265}
266
269 TraceContainer& trace)
270{
271 using C = Column;
277
278 // We start from row 1 because we need a row of zeroes for the shifts.
279 uint32_t row = 1;
280
281 for (const auto& event : events) {
282 const auto bytecode_id = event.bytecode_id;
283 const auto bytecode_size = event.bytecode->size();
284
285 auto get_operand = [&](size_t i) -> FF {
286 return i < event.instruction.operands.size() ? static_cast<FF>(event.instruction.operands[i]) : 0;
287 };
288 auto bytecode_at = [&](size_t i) -> uint8_t { return i < bytecode_size ? (*event.bytecode)[i] : 0; };
289
290 const uint8_t wire_opcode = bytecode_at(event.pc);
291 const bool wire_opcode_in_range =
292 event.error != PC_OUT_OF_RANGE && wire_opcode < static_cast<uint8_t>(WireOpCode::LAST_OPCODE_SENTINEL);
293
294 uint32_t size_in_bytes = 0;
295 ExecutionOpCode exec_opcode = static_cast<ExecutionOpCode>(0);
296 std::array<uint8_t, NUM_OP_DC_SELECTORS> op_dc_selectors{};
297 uint8_t has_tag = 0;
298 uint8_t tag_is_op2 = 0;
299 uint8_t tag_value = 0;
300
301 if (wire_opcode_in_range) {
302 const auto& wire_instr_spec = get_wire_instruction_spec().at(static_cast<WireOpCode>(wire_opcode));
303 size_in_bytes = wire_instr_spec.size_in_bytes;
304 exec_opcode = wire_instr_spec.exec_opcode;
305 op_dc_selectors = wire_instr_spec.op_dc_selectors;
306
307 if (wire_instr_spec.tag_operand_idx.has_value()) {
308 const auto tag_value_idx = wire_instr_spec.tag_operand_idx.value();
309 assert((tag_value_idx == 2 || tag_value_idx == 3) &&
310 "Current constraints support only tag for operand index equal to 2 or 3");
311 has_tag = 1;
312
313 if (tag_value_idx == 2) {
314 tag_is_op2 = 1;
315 tag_value = static_cast<uint8_t>(get_operand(1)); // in instruction.operands, op2 has index 1
316 } else {
317 tag_value = static_cast<uint8_t>(get_operand(2));
318 }
319 }
320 }
321
322 const uint32_t bytes_remaining =
323 event.error == PC_OUT_OF_RANGE ? 0 : static_cast<uint32_t>(bytecode_size - event.pc);
324 const uint32_t bytes_to_read = std::min(bytes_remaining, DECOMPOSE_WINDOW_SIZE);
325
326 uint32_t instr_abs_diff = 0;
327 if (size_in_bytes <= bytes_to_read) {
328 instr_abs_diff = bytes_to_read - size_in_bytes;
329 } else {
330 instr_abs_diff = size_in_bytes - bytes_to_read - 1;
331 }
332
333 uint32_t bytecode_size_u32 = static_cast<uint32_t>(bytecode_size);
334 uint32_t pc_abs_diff =
335 bytecode_size_u32 > event.pc ? bytecode_size_u32 - event.pc - 1 : event.pc - bytecode_size_u32;
336
337 trace.set(row,
338 { {
339 { C::instr_fetching_sel, 1 },
340 { C::instr_fetching_bytecode_id, bytecode_id },
341 { C::instr_fetching_pc, event.pc },
342 // indirect + operands.
343 { C::instr_fetching_indirect, event.instruction.indirect },
344 { C::instr_fetching_op1, get_operand(0) },
345 { C::instr_fetching_op2, get_operand(1) },
346 { C::instr_fetching_op3, get_operand(2) },
347 { C::instr_fetching_op4, get_operand(3) },
348 { C::instr_fetching_op5, get_operand(4) },
349 { C::instr_fetching_op6, get_operand(5) },
350 { C::instr_fetching_op7, get_operand(6) },
351 // Single bytes.
352 { C::instr_fetching_bd0, wire_opcode },
353 { C::instr_fetching_bd1, bytecode_at(event.pc + 1) },
354 { C::instr_fetching_bd2, bytecode_at(event.pc + 2) },
355 { C::instr_fetching_bd3, bytecode_at(event.pc + 3) },
356 { C::instr_fetching_bd4, bytecode_at(event.pc + 4) },
357 { C::instr_fetching_bd5, bytecode_at(event.pc + 5) },
358 { C::instr_fetching_bd6, bytecode_at(event.pc + 6) },
359 { C::instr_fetching_bd7, bytecode_at(event.pc + 7) },
360 { C::instr_fetching_bd8, bytecode_at(event.pc + 8) },
361 { C::instr_fetching_bd9, bytecode_at(event.pc + 9) },
362 { C::instr_fetching_bd10, bytecode_at(event.pc + 10) },
363 { C::instr_fetching_bd11, bytecode_at(event.pc + 11) },
364 { C::instr_fetching_bd12, bytecode_at(event.pc + 12) },
365 { C::instr_fetching_bd13, bytecode_at(event.pc + 13) },
366 { C::instr_fetching_bd14, bytecode_at(event.pc + 14) },
367 { C::instr_fetching_bd15, bytecode_at(event.pc + 15) },
368 { C::instr_fetching_bd16, bytecode_at(event.pc + 16) },
369 { C::instr_fetching_bd17, bytecode_at(event.pc + 17) },
370 { C::instr_fetching_bd18, bytecode_at(event.pc + 18) },
371 { C::instr_fetching_bd19, bytecode_at(event.pc + 19) },
372 { C::instr_fetching_bd20, bytecode_at(event.pc + 20) },
373 { C::instr_fetching_bd21, bytecode_at(event.pc + 21) },
374 { C::instr_fetching_bd22, bytecode_at(event.pc + 22) },
375 { C::instr_fetching_bd23, bytecode_at(event.pc + 23) },
376 { C::instr_fetching_bd24, bytecode_at(event.pc + 24) },
377 { C::instr_fetching_bd25, bytecode_at(event.pc + 25) },
378 { C::instr_fetching_bd26, bytecode_at(event.pc + 26) },
379 { C::instr_fetching_bd27, bytecode_at(event.pc + 27) },
380 { C::instr_fetching_bd28, bytecode_at(event.pc + 28) },
381 { C::instr_fetching_bd29, bytecode_at(event.pc + 29) },
382 { C::instr_fetching_bd30, bytecode_at(event.pc + 30) },
383 { C::instr_fetching_bd31, bytecode_at(event.pc + 31) },
384 { C::instr_fetching_bd32, bytecode_at(event.pc + 32) },
385 { C::instr_fetching_bd33, bytecode_at(event.pc + 33) },
386 { C::instr_fetching_bd34, bytecode_at(event.pc + 34) },
387 { C::instr_fetching_bd35, bytecode_at(event.pc + 35) },
388 { C::instr_fetching_bd36, bytecode_at(event.pc + 36) },
389
390 // From instruction table.
391 { C::instr_fetching_exec_opcode, static_cast<uint32_t>(exec_opcode) },
392 { C::instr_fetching_instr_size, size_in_bytes },
393 { C::instr_fetching_sel_has_tag, has_tag },
394 { C::instr_fetching_sel_tag_is_op2, tag_is_op2 },
395
396 // Fill operand decomposition selectors
397 { C::instr_fetching_sel_op_dc_0, op_dc_selectors.at(0) },
398 { C::instr_fetching_sel_op_dc_1, op_dc_selectors.at(1) },
399 { C::instr_fetching_sel_op_dc_2, op_dc_selectors.at(2) },
400 { C::instr_fetching_sel_op_dc_3, op_dc_selectors.at(3) },
401 { C::instr_fetching_sel_op_dc_4, op_dc_selectors.at(4) },
402 { C::instr_fetching_sel_op_dc_5, op_dc_selectors.at(5) },
403 { C::instr_fetching_sel_op_dc_6, op_dc_selectors.at(6) },
404 { C::instr_fetching_sel_op_dc_7, op_dc_selectors.at(7) },
405 { C::instr_fetching_sel_op_dc_8, op_dc_selectors.at(8) },
406 { C::instr_fetching_sel_op_dc_9, op_dc_selectors.at(9) },
407 { C::instr_fetching_sel_op_dc_10, op_dc_selectors.at(10) },
408 { C::instr_fetching_sel_op_dc_11, op_dc_selectors.at(11) },
409 { C::instr_fetching_sel_op_dc_12, op_dc_selectors.at(12) },
410 { C::instr_fetching_sel_op_dc_13, op_dc_selectors.at(13) },
411 { C::instr_fetching_sel_op_dc_14, op_dc_selectors.at(14) },
412 { C::instr_fetching_sel_op_dc_15, op_dc_selectors.at(15) },
413 { C::instr_fetching_sel_op_dc_16, op_dc_selectors.at(16) },
414
415 // Parsing errors
416 { C::instr_fetching_pc_out_of_range, event.error == PC_OUT_OF_RANGE ? 1 : 0 },
417 { C::instr_fetching_opcode_out_of_range, event.error == OPCODE_OUT_OF_RANGE ? 1 : 0 },
418 { C::instr_fetching_instr_out_of_range, event.error == INSTRUCTION_OUT_OF_RANGE ? 1 : 0 },
419 { C::instr_fetching_tag_out_of_range, event.error == TAG_OUT_OF_RANGE ? 1 : 0 },
420 { C::instr_fetching_sel_parsing_err, event.error.has_value() ? 1 : 0 },
421
422 // selector for lookups
423 { C::instr_fetching_sel_pc_in_range, event.error != PC_OUT_OF_RANGE ? 1 : 0 },
424
425 { C::instr_fetching_bytecode_size, bytecode_size },
426 { C::instr_fetching_bytes_to_read, bytes_to_read },
427 { C::instr_fetching_instr_abs_diff, instr_abs_diff },
428 { C::instr_fetching_pc_abs_diff, pc_abs_diff },
429 { C::instr_fetching_pc_size_in_bits,
430 AVM_PC_SIZE_IN_BITS }, // Remove when we support constants in lookups
431 { C::instr_fetching_tag_value, tag_value },
432 } });
433 row++;
434 }
435}
436
439 // Bytecode Hashing
441 .add<lookup_bc_hashing_poseidon2_hash_settings, InteractionType::LookupSequential>()
442 // Bytecode Retrieval
444 .add<lookup_bc_retrieval_contract_instance_retrieval_settings, InteractionType::LookupSequential>()
446 .add<lookup_bc_retrieval_retrieved_bytecodes_insertion_settings, InteractionType::LookupSequential>()
447 // Bytecode Decomposition
452 perm_bc_hashing_get_packed_field_2_settings>(Column::bc_decomposition_sel_packed)
453 // Instruction Fetching
455 .add<lookup_instr_fetching_bytecode_size_from_bc_dec_settings, InteractionType::LookupGeneric>()
457 .add<lookup_instr_fetching_instr_abs_diff_positive_settings, InteractionType::LookupIntoIndexedByClk>()
459 .add<lookup_instr_fetching_pc_abs_diff_positive_settings, InteractionType::LookupGeneric>();
460
461} // namespace bb::avm2::tracegen
std::shared_ptr< Napi::ThreadSafeFunction > bytecode
#define AVM_RETRIEVED_BYTECODES_TREE_INITIAL_SIZE
#define MAX_PUBLIC_CALLS_TO_UNIQUE_CONTRACT_CLASS_IDS
#define AVM_PC_SIZE_IN_BITS
#define GENERATOR_INDEX__PUBLIC_BYTECODE
#define MAX_PACKED_PUBLIC_BYTECODE_SIZE_IN_FIELDS
void process_retrieval(const simulation::EventEmitterInterface< simulation::BytecodeRetrievalEvent >::Container &events, TraceContainer &trace)
static const InteractionDefinition interactions
void process_decomposition(const simulation::EventEmitterInterface< simulation::BytecodeDecompositionEvent >::Container &events, TraceContainer &trace)
void process_hashing(const simulation::EventEmitterInterface< simulation::BytecodeHashingEvent >::Container &events, TraceContainer &trace)
void process_instruction_fetching(const simulation::EventEmitterInterface< simulation::InstructionFetchingEvent >::Container &events, TraceContainer &trace)
InteractionDefinition & add(auto &&... args)
static FF hash(const std::vector< FF > &input)
Hashes a vector of field elements.
Implements a parallelized batch insertion indexed tree Accepts template argument of the type of store...
TestTraceContainer trace
lookup_settings< lookup_instr_fetching_wire_instruction_info_settings_ > lookup_instr_fetching_wire_instruction_info_settings
const std::unordered_map< WireOpCode, WireInstructionSpec > & get_wire_instruction_spec()
lookup_settings< lookup_instr_fetching_bytes_from_bc_dec_settings_ > lookup_instr_fetching_bytes_from_bc_dec_settings
constexpr uint32_t DECOMPOSE_WINDOW_SIZE
lookup_settings< lookup_instr_fetching_tag_value_validation_settings_ > lookup_instr_fetching_tag_value_validation_settings
lookup_settings< lookup_bc_retrieval_class_id_derivation_settings_ > lookup_bc_retrieval_class_id_derivation_settings
lookup_settings< lookup_bc_retrieval_is_new_class_check_settings_ > lookup_bc_retrieval_is_new_class_check_settings
lookup_settings< lookup_bc_hashing_check_final_bytes_remaining_settings_ > lookup_bc_hashing_check_final_bytes_remaining_settings
lookup_settings< lookup_bc_decomposition_bytes_are_bytes_settings_ > lookup_bc_decomposition_bytes_are_bytes_settings
AvmFlavorSettings::FF FF
Definition field.hpp:10
simulation::PublicDataTreeReadWriteEvent event
static void batch_invert(C &coeffs) noexcept