Barretenberg
The ZK-SNARK library at the core of Aztec
Loading...
Searching...
No Matches
data_copy_trace.cpp
Go to the documentation of this file.
2
3#include <cassert>
4#include <cstdint>
5#include <memory>
6
15
16namespace bb::avm2::tracegen {
17
19
22{
23 using C = Column;
24
25 uint32_t row = 1;
26 // When processing the events, we need to handle any potential errors and create the respective error columns
27 for (const auto& event : events) {
28 // We first set elements of the row that are unconditional, i.e. they are always set regardless of success/error
29 bool is_cd_copy = event.operation == simulation::DataCopyOperation::CD_COPY;
30 bool is_rd_copy = event.operation == simulation::DataCopyOperation::RD_COPY;
31
32 // todo(ilyas): Can optimize this as we only need the inverse if CD_COPY as well
33 bool is_top_level = event.read_context_id == 0;
34 FF parent_id_inv = is_top_level ? 0 : FF(event.read_context_id).invert();
35
36 // While we know at this point data copy size and data offset are guaranteed to be U32
37 // we cast to a wider integer type to detect overflows
38 uint64_t copy_size = static_cast<uint64_t>(event.data_copy_size);
39 uint64_t data_offset = static_cast<uint64_t>(event.data_offset);
40 uint64_t max_read_index = std::min(data_offset + copy_size, static_cast<uint64_t>(event.data_size));
41
42 uint64_t max_read_addr = static_cast<uint64_t>(event.data_addr) + max_read_index;
43 uint64_t max_write_addr = static_cast<uint64_t>(event.dst_addr) + copy_size;
44
45 trace.set(row,
46 { {
47 // Unconditional values
48 { C::data_copy_clk, event.execution_clk },
49 { C::data_copy_sel_start, 1 },
50 { C::data_copy_sel_cd_copy, is_cd_copy ? 1 : 0 },
51 { C::data_copy_sel_cd_copy_start, is_cd_copy ? 1 : 0 },
52 { C::data_copy_sel_rd_copy, is_rd_copy ? 1 : 0 },
53 { C::data_copy_sel_rd_copy_start, is_rd_copy ? 1 : 0 },
54 { C::data_copy_thirty_two, 32 }, // Need this for range checks
55
56 { C::data_copy_src_context_id, event.read_context_id },
57 { C::data_copy_dst_context_id, event.write_context_id },
58
59 { C::data_copy_copy_size, event.data_copy_size },
60 { C::data_copy_offset, event.data_offset },
61
62 { C::data_copy_src_addr, event.data_addr },
63 { C::data_copy_src_data_size, event.data_size },
64 { C::data_copy_dst_addr, event.dst_addr },
65
66 { C::data_copy_is_top_level, is_top_level ? 1 : 0 },
67 { C::data_copy_parent_id_inv, parent_id_inv },
68
69 // Compute Max Read Index
70 { C::data_copy_offset_plus_size, data_offset + copy_size },
71 { C::data_copy_offset_plus_size_is_gt, data_offset + copy_size > event.data_size ? 1 : 0 },
72 { C::data_copy_max_read_index, max_read_index },
73
74 // Max Addresses
75 { C::data_copy_max_mem_addr, MAX_MEM_ADDR },
76 { C::data_copy_max_read_addr, max_read_addr },
77 { C::data_copy_max_write_addr, max_write_addr },
78
79 } });
80
82 // Memory Address Range Check
84 // We need to check that the read and write addresses are within the valid memory range.
85 // Note: for enqueued calls, there is no out of bound read since we read from a column.
86
87 bool read_address_overflow = max_read_addr > MAX_MEM_ADDR;
88 bool write_address_overflow = max_write_addr > MAX_MEM_ADDR;
89 if (read_address_overflow || write_address_overflow) {
90 trace.set(row,
91 { {
92 { C::data_copy_sel_end, 1 },
93 // Add error flag - note we can be out of range for both reads and writes
94 { C::data_copy_src_out_of_range_err, read_address_overflow ? 1 : 0 },
95 { C::data_copy_dst_out_of_range_err, write_address_overflow ? 1 : 0 },
96 { C::data_copy_err, 1 },
97 } });
98 row++;
99 continue; // Go to the next event
100 }
101
102 auto reads_left = data_offset > max_read_index ? 0 : max_read_index - data_offset;
103
105 // Check for Zero Sized Copy
107 // This has to happen outside of the next loop since we will not enter it if the copy size is zero
108 if (copy_size == 0) {
109 trace.set(row,
110 { {
111 { C::data_copy_sel_start_no_err, 1 },
112 { C::data_copy_sel_end, 1 },
113 { C::data_copy_sel_write_count_is_zero, 1 },
114 { C::data_copy_write_count_zero_inv, copy_size == 0 ? 0 : FF(copy_size).invert() },
115 } });
116 row++;
117 continue; // Go to the next event
118 }
119
121 // Process Data Copy Rows
123 for (uint32_t i = 0; i < event.calldata.size(); i++) {
124 bool start = i == 0;
125 auto current_copy_size = copy_size - i;
126 bool end = (current_copy_size - 1) == 0;
127
128 bool is_padding_row = reads_left == 0;
129
130 // These are guaranteed not to overflow since we checked the read/write addresses above
131 auto read_addr = event.data_addr + data_offset + i;
132 bool read_cd_col = is_cd_copy && is_top_level && !is_padding_row;
133
134 // Read from memory if this is not a padding row and we are either RD_COPY-ing or a nested CD_COPY
135 bool sel_mem_read = !is_padding_row && (is_rd_copy || event.read_context_id != 0);
136 FF value = is_padding_row ? 0 : event.calldata[i];
137 FF reads_left_inv = is_padding_row ? 0 : FF(reads_left).invert();
138
139 FF write_count_mins_one_inv = end ? 0 : FF(current_copy_size - 1).invert();
140
141 trace.set(row,
142 { {
143 { C::data_copy_clk, event.execution_clk },
144 { C::data_copy_sel_cd_copy, is_cd_copy ? 1 : 0 },
145 { C::data_copy_sel_rd_copy, is_rd_copy ? 1 : 0 },
146 { C::data_copy_thirty_two, 32 }, // Need this for range checks
147
148 { C::data_copy_src_context_id, event.read_context_id },
149 { C::data_copy_dst_context_id, event.write_context_id },
150 { C::data_copy_dst_addr, event.dst_addr + i },
151
152 { C::data_copy_sel_start_no_err, start ? 1 : 0 },
153 { C::data_copy_sel_end, end ? 1 : 0 },
154 { C::data_copy_copy_size, current_copy_size },
155 { C::data_copy_write_count_minus_one_inv, write_count_mins_one_inv },
156
157 { C::data_copy_sel_mem_write, 1 },
158
159 { C::data_copy_is_top_level, is_top_level ? 1 : 0 },
160 { C::data_copy_parent_id_inv, parent_id_inv },
161
162 { C::data_copy_sel_mem_read, sel_mem_read ? 1 : 0 },
163 { C::data_copy_read_addr, read_addr },
164
165 { C::data_copy_reads_left_inv, reads_left_inv },
166 { C::data_copy_padding, is_padding_row ? 1 : 0 },
167 { C::data_copy_value, value },
168
169 { C::data_copy_cd_copy_col_read, read_cd_col ? 1 : 0 },
170
171 // Reads Left
172 { C::data_copy_reads_left, reads_left },
173 { C::data_copy_offset_gt_max_read_index, (start && data_offset > max_read_index) ? 1 : 0 },
174
175 // Non-zero Copy Size
176 { C::data_copy_write_count_zero_inv, start ? FF(copy_size).invert() : 0 },
177 } });
178
179 reads_left = reads_left == 0 ? 0 : reads_left - 1;
180 row++;
181 }
182 }
183}
184
187 // Mem Read / Writes (Need to be moved to permutations)
189 .add<lookup_data_copy_mem_write_settings, InteractionType::LookupGeneric>()
190 // Enqueued Call Col Read
192 // GT checks
193 .add<lookup_data_copy_max_read_index_gt_settings, InteractionType::LookupGeneric>()
195 .add<lookup_data_copy_check_dst_addr_in_range_settings, InteractionType::LookupGeneric>()
197 // Permutations
198 .add<perm_data_copy_dispatch_cd_copy_settings, InteractionType::Permutation>()
200} // namespace bb::avm2::tracegen
#define AVM_HIGHEST_MEM_ADDRESS
static const InteractionDefinition interactions
void process(const simulation::EventEmitterInterface< simulation::DataCopyEvent >::Container &events, TraceContainer &trace)
InteractionDefinition & add(auto &&... args)
TestTraceContainer trace
constexpr uint32_t MAX_MEM_ADDR
lookup_settings< lookup_data_copy_col_read_settings_ > lookup_data_copy_col_read_settings
permutation_settings< perm_data_copy_dispatch_rd_copy_settings_ > perm_data_copy_dispatch_rd_copy_settings
lookup_settings< lookup_data_copy_mem_read_settings_ > lookup_data_copy_mem_read_settings
lookup_settings< lookup_data_copy_check_src_addr_in_range_settings_ > lookup_data_copy_check_src_addr_in_range_settings
lookup_settings< lookup_data_copy_offset_gt_max_read_index_settings_ > lookup_data_copy_offset_gt_max_read_index_settings
AvmFlavorSettings::FF FF
Definition field.hpp:10
simulation::PublicDataTreeReadWriteEvent event
constexpr field invert() const noexcept