Barretenberg
The ZK-SNARK library at the core of Aztec
Loading...
Searching...
No Matches
data_copy_trace.cpp
Go to the documentation of this file.
2
3#include <algorithm>
4#include <cassert>
5#include <cstdint>
6
11
12namespace bb::avm2::tracegen {
13
47{
48 using C = Column;
49 uint32_t row = 1;
50 for (const auto& event : events) {
51 const bool is_cd_copy = event.operation == simulation::DataCopyOperation::CD_COPY;
52 const bool is_rd_copy = event.operation == simulation::DataCopyOperation::RD_COPY;
53 const bool is_top_level = event.read_context_id == 0;
54 const FF parent_id_inv = is_top_level ? 0 : FF(event.read_context_id); // Will be inverted in batch later
55
56 // While we know at this point data copy size and data offset are guaranteed to be U32
57 // we cast to a wider integer type to detect overflows
58 const uint64_t copy_size = static_cast<uint64_t>(event.data_copy_size);
59 const uint64_t data_offset = static_cast<uint64_t>(event.data_offset);
60 const uint64_t data_index_upper_bound =
61 std::min(data_offset + copy_size, static_cast<uint64_t>(event.src_data_size));
62
63 const uint64_t read_addr_upper_bound = static_cast<uint64_t>(event.src_data_addr) + data_index_upper_bound;
64 const uint64_t write_addr_upper_bound = static_cast<uint64_t>(event.dst_addr) + copy_size;
65
66 trace.set(row,
67 { {
68 // Unconditional values
69 { C::data_copy_sel, 1 },
70 { C::data_copy_clk, event.execution_clk },
71 { C::data_copy_sel_start, 1 },
72 { C::data_copy_sel_cd_copy, is_cd_copy ? 1 : 0 },
73 { C::data_copy_sel_cd_copy_start, is_cd_copy ? 1 : 0 },
74 { C::data_copy_sel_rd_copy_start, is_rd_copy ? 1 : 0 },
75
76 { C::data_copy_src_context_id, event.read_context_id },
77 { C::data_copy_dst_context_id, event.write_context_id },
78
79 { C::data_copy_copy_size, event.data_copy_size },
80 { C::data_copy_offset, event.data_offset },
81
82 { C::data_copy_src_addr, event.src_data_addr },
83 { C::data_copy_src_data_size, event.src_data_size },
84 { C::data_copy_dst_addr, event.dst_addr },
85
86 { C::data_copy_is_top_level, is_top_level ? 1 : 0 },
87 { C::data_copy_parent_id_inv, parent_id_inv }, // Will be inverted in batch later
88
89 // Compute Data Index Upper Bound
90 { C::data_copy_offset_plus_size, data_offset + copy_size },
91 { C::data_copy_offset_plus_size_is_gt, data_offset + copy_size > event.src_data_size ? 1 : 0 },
92 { C::data_copy_data_index_upper_bound, data_index_upper_bound },
93
94 // Addresses Upper Bounds
95 { C::data_copy_mem_size, static_cast<uint64_t>(AVM_MEMORY_SIZE) },
96 { C::data_copy_read_addr_upper_bound, read_addr_upper_bound },
97 { C::data_copy_write_addr_upper_bound, write_addr_upper_bound },
98
99 } });
100
102 // Memory Address Range Check
104 // We need to check that the read and write addresses are within the valid memory range.
105 // Note: for enqueued calls, there is no out of bound read since we read from a column.
106
107 bool read_address_overflow = read_addr_upper_bound > AVM_MEMORY_SIZE;
108 bool write_address_overflow = write_addr_upper_bound > AVM_MEMORY_SIZE;
109 if (read_address_overflow || write_address_overflow) {
110 trace.set(row,
111 { {
112 { C::data_copy_sel_end, 1 },
113 // Add error flag - note we can be out of range for both reads and writes
114 { C::data_copy_src_out_of_range_err, read_address_overflow ? 1 : 0 },
115 { C::data_copy_dst_out_of_range_err, write_address_overflow ? 1 : 0 },
116 { C::data_copy_err, 1 },
117 } });
118 row++;
119 continue; // Go to the next event
120 }
121
122 // If there is an error, the copying data is empty. Therefore, we have to perform this
123 // assertion after the error check.
124 assert(event.copying_data.size() == copy_size);
125
127 // Check for Zero Sized Copy
129 // This has to happen outside of the next loop since we will not enter it if the copy size is zero
130 if (copy_size == 0) {
131 trace.set(
132 row,
133 { {
134 { C::data_copy_sel_start_no_err, 1 },
135 { C::data_copy_sel_end, 1 },
136 { C::data_copy_sel_write_count_is_zero, 1 },
137 { C::data_copy_data_index_upper_bound_gt_offset, data_index_upper_bound > data_offset ? 1 : 0 },
138 } });
139 row++;
140 continue; // Go to the next event
141 }
142
144 // Process Data Copy Rows
146 uint32_t reads_left =
147 data_offset >= data_index_upper_bound ? 0 : static_cast<uint32_t>(data_index_upper_bound - data_offset);
148
149 for (uint32_t i = 0; i < copy_size; i++) {
150 bool start = i == 0;
151 auto current_copy_size = copy_size - i;
152 bool end = (current_copy_size - 1) == 0;
153
154 bool is_padding_row = reads_left == 0;
155
156 // These are guaranteed not to overflow since we checked the read/write addresses above
157 uint64_t read_addr = event.src_data_addr + data_offset + i;
158 bool read_cd_col = is_cd_copy && is_top_level && !is_padding_row;
159
160 // Read from memory if this is not a padding row and we are either RD_COPY-ing or a nested CD_COPY
161 bool sel_mem_read = !is_padding_row && (is_rd_copy || !is_top_level);
162 FF value = is_padding_row ? 0 : event.copying_data[i].as_ff();
163 // Circuit only enforces tag consistency for memory reads.
164 FF tag = sel_mem_read ? static_cast<FF>(static_cast<uint8_t>(event.copying_data[i].get_tag())) : 0;
165
166 trace.set(
167 row,
168 { {
169 { C::data_copy_sel, 1 },
170 { C::data_copy_clk, event.execution_clk },
171 { C::data_copy_sel_cd_copy, is_cd_copy ? 1 : 0 },
172
173 { C::data_copy_src_context_id, event.read_context_id },
174 { C::data_copy_dst_context_id, event.write_context_id },
175 { C::data_copy_dst_addr, event.dst_addr + i },
176
177 { C::data_copy_sel_start_no_err, start ? 1 : 0 },
178 { C::data_copy_sel_end, end ? 1 : 0 },
179 { C::data_copy_copy_size, current_copy_size },
180 { C::data_copy_write_count_minus_one_inv,
181 current_copy_size - 1 }, // Will be inverted in batch later
182
183 { C::data_copy_sel_mem_write, 1 },
184
185 { C::data_copy_is_top_level, is_top_level ? 1 : 0 },
186 { C::data_copy_parent_id_inv, parent_id_inv }, // Will be inverted in batch later
187
188 { C::data_copy_sel_mem_read, sel_mem_read ? 1 : 0 },
189 { C::data_copy_read_addr, read_addr },
190 { C::data_copy_read_addr_plus_one, read_cd_col ? read_addr + 1 : 0 },
191
192 { C::data_copy_reads_left_inv, reads_left }, // Will be inverted in batch later
193 { C::data_copy_padding, is_padding_row ? 1 : 0 },
194 { C::data_copy_value, value },
195 { C::data_copy_tag, tag },
196
197 { C::data_copy_cd_copy_col_read, read_cd_col ? 1 : 0 },
198
199 // Reads Left
200 { C::data_copy_reads_left, reads_left },
201 { C::data_copy_data_index_upper_bound_gt_offset,
202 (start && data_index_upper_bound > data_offset) ? 1 : 0 },
203
204 // Non-zero Copy Size
205 { C::data_copy_write_count_zero_inv, start ? FF(copy_size) : 0 }, // Will be inverted in batch later
206 } });
207
208 if (reads_left > 0) {
209 reads_left--;
210 }
211
212 row++;
213 }
214 }
215
216 // Batch invert the columns.
217 trace.invert_columns({ { C::data_copy_parent_id_inv,
218 C::data_copy_write_count_zero_inv,
219 C::data_copy_reads_left_inv,
220 C::data_copy_write_count_minus_one_inv } });
221}
222
225 // Enqueued Call Col Read
227 // GT checks
228 .add<lookup_data_copy_offset_plus_size_is_gt_data_size_settings, InteractionType::LookupGeneric>(Column::gt_sel)
230 .add<lookup_data_copy_check_dst_addr_in_range_settings, InteractionType::LookupGeneric>(Column::gt_sel)
232 Column::gt_sel);
233} // namespace bb::avm2::tracegen
#define AVM_MEMORY_SIZE
static const InteractionDefinition interactions
void process(const simulation::EventEmitterInterface< simulation::DataCopyEvent >::Container &events, TraceContainer &trace)
Builds the data copy trace.
InteractionDefinition & add(auto &&... args)
TestTraceContainer trace
lookup_settings< lookup_data_copy_col_read_settings_ > lookup_data_copy_col_read_settings
lookup_settings< lookup_data_copy_data_index_upper_bound_gt_offset_settings_ > lookup_data_copy_data_index_upper_bound_gt_offset_settings
lookup_settings< lookup_data_copy_check_src_addr_in_range_settings_ > lookup_data_copy_check_src_addr_in_range_settings
AvmFlavorSettings::FF FF
Definition field.hpp:10
simulation::PublicDataTreeReadWriteEvent event