Barretenberg
The ZK-SNARK library at the core of Aztec
Loading...
Searching...
No Matches
sha256_trace.cpp
Go to the documentation of this file.
2
3#include <algorithm>
4#include <any>
5#include <concepts>
6#include <cstddef>
7#include <cstdint>
8#include <memory>
9#include <ranges>
10#include <stdexcept>
11
18
19namespace bb::avm2::tracegen {
20
21namespace {
22
23// These are some useful groupings of columns for the SHA256 trace that we will iterate over.
24constexpr std::array<Column, 8> state_cols = {
25 Column::sha256_a, Column::sha256_b, Column::sha256_c, Column::sha256_d,
26 Column::sha256_e, Column::sha256_f, Column::sha256_g, Column::sha256_h,
27};
28
29constexpr std::array<Column, 8> init_state_cols = {
30 Column::sha256_init_a, Column::sha256_init_b, Column::sha256_init_c, Column::sha256_init_d,
31 Column::sha256_init_e, Column::sha256_init_f, Column::sha256_init_g, Column::sha256_init_h,
32};
33
34constexpr std::array<Column, 16> w_cols = {
35 Column::sha256_helper_w0, Column::sha256_helper_w1, Column::sha256_helper_w2, Column::sha256_helper_w3,
36 Column::sha256_helper_w4, Column::sha256_helper_w5, Column::sha256_helper_w6, Column::sha256_helper_w7,
37 Column::sha256_helper_w8, Column::sha256_helper_w9, Column::sha256_helper_w10, Column::sha256_helper_w11,
38 Column::sha256_helper_w12, Column::sha256_helper_w13, Column::sha256_helper_w14, Column::sha256_helper_w15,
39};
40
41constexpr std::array<Column, 16> output_cols = {
42 Column::sha256_output_a_lhs, Column::sha256_output_a_rhs, Column::sha256_output_b_lhs, Column::sha256_output_b_rhs,
43 Column::sha256_output_c_lhs, Column::sha256_output_c_rhs, Column::sha256_output_d_lhs, Column::sha256_output_d_rhs,
44 Column::sha256_output_e_lhs, Column::sha256_output_e_rhs, Column::sha256_output_f_lhs, Column::sha256_output_f_rhs,
45 Column::sha256_output_g_lhs, Column::sha256_output_g_rhs, Column::sha256_output_h_lhs, Column::sha256_output_h_rhs,
46};
47
48constexpr std::array<uint32_t, 64> round_constants = {
49 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
50 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
51 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
52 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
53 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
54 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
55 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
56 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
57};
58
59}; // namespace
60
61// These are helper functions to iterate and set repetitive columns in the trace.
63{
64 for (size_t i = 0; i < 16; i++) {
65 trace.set(row, { { { w_cols[i], prev_w_helpers[i] } } });
66 }
67}
68
69void Sha256TraceBuilder::set_state_cols(const std::array<uint32_t, 8>& state, TraceContainer& trace)
70{
71 for (size_t i = 0; i < 8; i++) {
72 trace.set(row, { { { state_cols[i], state[i] } } });
73 }
74}
75
76void Sha256TraceBuilder::set_init_state_cols(const std::array<uint32_t, 8>& init_state, TraceContainer& trace)
77{
78 for (size_t i = 0; i < 8; i++) {
79 trace.set(row, { { { init_state_cols[i], init_state[i] } } });
80 }
81}
82
83// Decomposes a into two 32-bit values at the bit position b and inserts witness data into the trace.
85 uint64_t a, const uint8_t b, Column c_lhs, Column c_rhs, TraceContainer& trace)
86{
87 uint32_t a_lhs = static_cast<uint32_t>(a >> b);
88 uint32_t a_rhs = static_cast<uint32_t>(a) & static_cast<uint32_t>((static_cast<uint64_t>(1) << b) - 1);
89 trace.set(row, { { { c_lhs, a_lhs }, { c_rhs, a_rhs } } });
90}
91
92// Performs 32-bit rotation with witness data inserted into the trace.
94 const uint32_t val, const uint8_t shift, Column c_result, Column c_lhs, Column c_rhs, TraceContainer& trace)
95{
96 auto result = (val >> (shift & 31U)) | (val << (32U - (shift & 31U)));
97 into_limbs_with_witness(val, shift, c_lhs, c_rhs, trace);
98 trace.set(c_result, row, result);
99 return result;
100}
101
102// Performs 32-bit shift right with witness data inserted into the trace.
104 const uint32_t val, const uint8_t shift, Column c_result, Column c_lhs, Column c_rhs, TraceContainer& trace)
105{
106 auto result = val >> shift;
107 into_limbs_with_witness(val, shift, c_lhs, c_rhs, trace);
108 trace.set(c_result, row, result);
109 return result;
110}
111
112// Computes and returns the message schedule (w) value for that round, and inserts witness data into the trace.
115{
116 using C = Column;
117
118 // Computing w[j] := w[j-16] + s0 + w[j-7] + s1
119
120 // Step (1) s0 := ror(w[i - 15], 7) ^ ror(w[i - 15], 18) ^ (w[i - 15] >> 3);
121 // Compute ror(w[i - 15], 7)
122 uint32_t rot_7 =
123 ror_with_witness(prev_w_helpers[1], 7, C::sha256_w_15_rotr_7, C::sha256_lhs_w_7, C::sha256_rhs_w_7, trace);
124 trace.set(C::sha256_two_pow_7, row, 128); // Store 2^7 for reference
125 // Compute ror(w[i - 15], 18)
126 uint32_t rot_18 =
127 ror_with_witness(prev_w_helpers[1], 18, C::sha256_w_15_rotr_18, C::sha256_lhs_w_18, C::sha256_rhs_w_18, trace);
128 trace.set(C::sha256_two_pow_18, row, 262144); // Store 2^18 for reference
129 // Compute (w[i - 15] >> 3)
130 uint32_t shift_3 =
131 shr_with_witness(prev_w_helpers[1], 3, C::sha256_w_15_rshift_3, C::sha256_lhs_w_3, C::sha256_rhs_w_3, trace);
132 trace.set(C::sha256_two_pow_3, row, 8); // Store 2^3 for reference
133
134 // Compute ror(w[i - 15], 7) ^ ror(w[i - 15], 18)
135 trace.set(C::sha256_w_15_rotr_7_xor_w_15_rotr_18, row, rot_7 ^ rot_18);
136 // Compute s0;
137 uint32_t w_s_0 = rot_7 ^ rot_18 ^ shift_3;
138 trace.set(C::sha256_w_s_0, row, w_s_0);
139
140 // Step (2) s1 := ror(w[i - 2], 17) ^ ror(w[i - 2], 19) ^ (w[i - 2] >> 10);
141 // Compute ror(w[i - 2], 17)
142 uint32_t rot_17 =
143 ror_with_witness(prev_w_helpers[14], 17, C::sha256_w_2_rotr_17, C::sha256_lhs_w_17, C::sha256_rhs_w_17, trace);
144 trace.set(C::sha256_two_pow_17, row, 131072); // Store 2^17 for reference
145 // Compute ror(wi - 2, 19)
146 uint32_t rot_19 =
147 ror_with_witness(prev_w_helpers[14], 19, C::sha256_w_2_rotr_19, C::sha256_lhs_w_19, C::sha256_rhs_w_19, trace);
148 trace.set(C::sha256_two_pow_19, row, 524288); // Store 2^19 for reference
149 // Compute (w[i - 2] >> 10)
150 uint32_t shift_10 = shr_with_witness(
151 prev_w_helpers[14], 10, C::sha256_w_2_rshift_10, C::sha256_lhs_w_10, C::sha256_rhs_w_10, trace);
152 trace.set(C::sha256_two_pow_10, row, 1024); // Store 2^10 for reference
153
154 // Compute ror(w[i - 2], 17) ^ ror(w[i - 2], 19)
155 trace.set(C::sha256_w_2_rotr_17_xor_w_2_rotr_19, row, rot_17 ^ rot_19);
156 // Compute s1;
157 uint32_t w_s_1 = rot_17 ^ rot_19 ^ shift_10;
158 trace.set(C::sha256_w_s_1, row, w_s_1);
159
160 // Compute w:= w[0] + s0 + w[9] + s1
161 // The computation of w can overflow 32 bits so we need to use a 64-bit integer and perform modulo reduction
162 uint64_t computed_w =
163 prev_w_helpers[0] + static_cast<uint64_t>(w_s_0) + prev_w_helpers[9] + static_cast<uint64_t>(w_s_1);
164
165 into_limbs_with_witness(computed_w, 32, C::sha256_computed_w_lhs, C::sha256_computed_w_rhs, trace);
166 return static_cast<uint32_t>(computed_w);
167}
168
169// Perform the SHA-256 compression function for a single round and insert witness data into the trace.
170std::array<uint32_t, 8> Sha256TraceBuilder::compute_compression_with_witness(const std::array<uint32_t, 8>& state,
171 uint32_t round_w,
172 uint32_t round_constant,
173 uint32_t row,
175{
176 using C = Column;
177
178 // Apply SHA-256 compression function to the message schedule
179 // Compute S1 := ror(e, 6U) ^ ror(e, 11U) ^ ror(e, 25U);
180 // Compute ror(e, 6)
181 uint32_t rot_6 = ror_with_witness(state[4], 6, C::sha256_e_rotr_6, C::sha256_lhs_e_6, C::sha256_rhs_e_6, trace);
182 trace.set(C::sha256_two_pow_6, row, 64); // Store 2^6 for reference
183 // Compute ror(e, 11)
184 uint32_t rot_11 =
185 ror_with_witness(state[4], 11, C::sha256_e_rotr_11, C::sha256_lhs_e_11, C::sha256_rhs_e_11, trace);
186 trace.set(C::sha256_two_pow_11, row, 2048); // Store 2^11 for reference
187 // Compute ror(e, 25)
188 uint32_t rot_25 =
189 ror_with_witness(state[4], 25, C::sha256_e_rotr_25, C::sha256_lhs_e_25, C::sha256_rhs_e_25, trace);
190 trace.set(C::sha256_two_pow_25, row, 33554432); // Store 2^25 for reference
191
192 // Compute ror(e, 6) ^ ror(e, 11)
193 trace.set(C::sha256_e_rotr_6_xor_e_rotr_11, row, rot_6 ^ rot_11);
194 // Compute S1, this can't overflow but we expand to uint64_t for later use
195 uint64_t S1 = rot_6 ^ rot_11 ^ rot_25;
196 trace.set(C::sha256_s_1, row, S1);
197
198 // Compute ch := (e & f) ^ (~e & g);
199 // Compute ~e
200 uint32_t not_e = ~state[4];
201 trace.set(C::sha256_not_e, row, not_e);
202 // Compute e & f
203 uint32_t e_and_f = state[4] & state[5];
204 trace.set(C::sha256_e_and_f, row, e_and_f);
205 // Compute ~e & g
206 uint32_t not_e_and_g = not_e & state[6];
207 trace.set(C::sha256_not_e_and_g, row, not_e_and_g);
208 // Compute (e & f) ^ (~e & g)
209 uint64_t ch = e_and_f ^ not_e_and_g;
210 trace.set(C::sha256_ch, row, ch);
211
212 // Compute S0 := ror(a, 2U) ^ ror(a, 13U) ^ ror(a, 22U);
213 // Compute ror(a, 2)
214 uint32_t rot_2 = ror_with_witness(state[0], 2, C::sha256_a_rotr_2, C::sha256_lhs_a_2, C::sha256_rhs_a_2, trace);
215 trace.set(C::sha256_two_pow_2, row, 4); // Store 2^2 for reference
216 // Compute ror(a, 13)
217 uint32_t rot_13 =
218 ror_with_witness(state[0], 13, C::sha256_a_rotr_13, C::sha256_lhs_a_13, C::sha256_rhs_a_13, trace);
219 trace.set(C::sha256_two_pow_13, row, 8192); // Store 2^13 for reference
220 // Compute ror(a, 22)
221 uint32_t rot_22 =
222 ror_with_witness(state[0], 22, C::sha256_a_rotr_22, C::sha256_lhs_a_22, C::sha256_rhs_a_22, trace);
223 trace.set(C::sha256_two_pow_22, row, 4194304); // Store 2^22 for reference
224
225 // Compute ror(a, 2) ^ ror(a, 13)
226 trace.set(C::sha256_a_rotr_2_xor_a_rotr_13, row, rot_2 ^ rot_13);
227 // Compute S0, this can't overflow but we expand to uint64_t for later use
228 uint64_t S0 = rot_2 ^ rot_13 ^ rot_22;
229 trace.set(C::sha256_s_0, row, S0);
230
231 // Compute Maj := (a & b) ^ (a & c) ^ (b & c);
232 // Compute a & b
233 uint32_t a_and_b = state[0] & state[1];
234 trace.set(C::sha256_a_and_b, row, a_and_b);
235 // Compute a & c
236 uint32_t a_and_c = state[0] & state[2];
237 trace.set(C::sha256_a_and_c, row, a_and_c);
238 // Compute b & c
239 uint32_t b_and_c = state[1] & state[2];
240 trace.set(C::sha256_b_and_c, row, b_and_c);
241 // Compute (a & b) ^ (a & c)
242 trace.set(C::sha256_a_and_b_xor_a_and_c, row, a_and_b ^ a_and_c);
243 // Compute Maj, this is expanded to uint64_t to detect later overflows
244 uint64_t maj = a_and_b ^ a_and_c ^ b_and_c;
245 trace.set(C::sha256_maj, row, maj);
246
247 // Compute temp values, these need be 64-bit integers and performed modulo 2^32
248 uint64_t temp1 = static_cast<uint64_t>(state[7]) + S1 + ch + round_constant + round_w;
249 uint64_t temp2 = S0 + maj;
250 uint64_t next_a = temp1 + temp2;
251 into_limbs_with_witness(next_a, 32, C::sha256_next_a_lhs, C::sha256_next_a_rhs, trace);
252 trace.set(C::sha256_round_constant, row, round_constant);
253 uint32_t a = static_cast<uint32_t>(next_a);
254
255 // Additions can overflow 32 bits so we perform modulo reduction
256 uint64_t next_e = state[3] + temp1;
257 into_limbs_with_witness(next_e, 32, C::sha256_next_e_lhs, C::sha256_next_e_rhs, trace);
258 uint32_t e = static_cast<uint32_t>(next_e);
259
260 return {
261 a, /*a = temp1 + temp2*/
262 state[0], /*b = a*/
263 state[1], /*c = b*/
264 state[2], /*d = c*/
265 e, /*e = d + temp1*/
266 state[4], /*f = e*/
267 state[5], /*g = f*/
268 state[6], /*h = g*/
269 };
270}
271
272// Computes the final output from the final round state and inserts witness data into the trace.
273void Sha256TraceBuilder::compute_sha256_output(const std::array<uint32_t, 8>& out_state,
274 const std::array<uint32_t, 8>& init_state,
276{
277 uint32_t counter = 0;
278 for (const auto& [init, state] : zip_view(init_state, out_state)) {
279 uint64_t output = static_cast<uint64_t>(init) + static_cast<uint64_t>(state);
280 into_limbs_with_witness(output, 32, output_cols[counter], output_cols[counter + 1], trace);
281 counter += 2;
282 }
283}
284
288{
289 using C = Column;
290
291 for (const auto& event : events) {
292
294 // Memory Components of SHA-256 Compression Function
296 // Upcast addresses to uint64_t to avoid overflow issues
297 uint64_t state_addr = static_cast<uint64_t>(event.state_addr);
298 uint64_t input_addr = static_cast<uint64_t>(event.input_addr);
299 uint64_t output_addr = static_cast<uint64_t>(event.output_addr);
300
301 uint64_t max_state_addr = state_addr + 7; // State is 8 elements
302 uint64_t max_input_addr = input_addr + 15; // Input is 16 elements
303 uint64_t max_output_addr = output_addr + 7; // Output is 8 elements
304
305 // These are unconditional values that must always be set at the start
306 trace.set(row,
307 { {
308 { C::sha256_sel, 1 },
309 { C::sha256_start, 1 },
310 { C::sha256_execution_clk, event.execution_clk },
311 { C::sha256_space_id, event.space_id },
312 { C::sha256_u32_tag, static_cast<uint8_t>(MemoryTag::U32) },
313 // Operand Addresses
314 { C::sha256_state_addr, state_addr },
315 { C::sha256_input_addr, input_addr },
316 { C::sha256_output_addr, output_addr },
317 // Helpers
318 { C::sha256_max_mem_addr, AVM_HIGHEST_MEM_ADDRESS },
319 { C::sha256_max_state_addr, max_state_addr },
320 { C::sha256_max_input_addr, max_input_addr },
321 { C::sha256_max_output_addr, max_output_addr },
322 { C::sha256_input_rounds_rem, 16 }, // Number of inputs
323 { C::sha256_sel_is_input_round, 1 },
324 { C::sha256_rounds_remaining, 64 }, // Number of Sha256 Rounds
325 } });
326
328 // Error Handling - Memory Out of Range
330 bool state_out_of_range = max_state_addr > AVM_HIGHEST_MEM_ADDRESS;
331 bool input_out_of_range = max_input_addr > AVM_HIGHEST_MEM_ADDRESS;
332 bool output_out_of_range = max_output_addr > AVM_HIGHEST_MEM_ADDRESS;
333
334 bool out_of_range_err = output_out_of_range || input_out_of_range || state_out_of_range;
335 if (out_of_range_err) {
336 trace.set(row,
337 { {
338 // Error flags
339 { C::sha256_sel_state_out_of_range_err, state_out_of_range ? 1 : 0 },
340 { C::sha256_sel_input_out_of_range_err, input_out_of_range ? 1 : 0 },
341 { C::sha256_sel_output_out_of_range_err, output_out_of_range ? 1 : 0 },
342 { C::sha256_mem_out_of_range_err, 1 },
343 { C::sha256_err, 1 }, // Set the error flag
344 { C::sha256_latch, 1 }, // Latch is set on error
345 } });
346 row++;
347 continue; // Skip to the next event if we have an out of range error
348 }
349
351 // Load Initial State from Memory
353 // If we get here we are safe to load the memory, we need to split this up between the parallel and sequential
354 // loading. State is loaded in parallel, whilst inputs are loaded sequential.
355
356 // Since we treat them as separate temporality groups, if there is an error in the state loading, we will not
357 // load the input
358 trace.set(row,
359 { {
360 // State Loading Selectors
361 { C::sha256_sel_mem_state_or_output, 1 },
362 // State Addresses
363 { C::sha256_memory_address_0_, state_addr },
364 { C::sha256_memory_address_1_, state_addr + 1 },
365 { C::sha256_memory_address_2_, state_addr + 2 },
366 { C::sha256_memory_address_3_, state_addr + 3 },
367 { C::sha256_memory_address_4_, state_addr + 4 },
368 { C::sha256_memory_address_5_, state_addr + 5 },
369 { C::sha256_memory_address_6_, state_addr + 6 },
370 { C::sha256_memory_address_7_, state_addr + 7 },
371 // State Values
372 { C::sha256_memory_register_0_, event.state[0].as_ff() },
373 { C::sha256_memory_register_1_, event.state[1].as_ff() },
374 { C::sha256_memory_register_2_, event.state[2].as_ff() },
375 { C::sha256_memory_register_3_, event.state[3].as_ff() },
376 { C::sha256_memory_register_4_, event.state[4].as_ff() },
377 { C::sha256_memory_register_5_, event.state[5].as_ff() },
378 { C::sha256_memory_register_6_, event.state[6].as_ff() },
379 { C::sha256_memory_register_7_, event.state[7].as_ff() },
380 // Values need to match initial state of sha256 compression
381 { C::sha256_init_a, event.state[0].as_ff() },
382 { C::sha256_init_b, event.state[1].as_ff() },
383 { C::sha256_init_c, event.state[2].as_ff() },
384 { C::sha256_init_d, event.state[3].as_ff() },
385 { C::sha256_init_e, event.state[4].as_ff() },
386 { C::sha256_init_f, event.state[5].as_ff() },
387 { C::sha256_init_g, event.state[6].as_ff() },
388 { C::sha256_init_h, event.state[7].as_ff() },
389 // State Memory Tags
390 { C::sha256_memory_tag_0_, static_cast<uint8_t>(event.state[0].get_tag()) },
391 { C::sha256_memory_tag_1_, static_cast<uint8_t>(event.state[1].get_tag()) },
392 { C::sha256_memory_tag_2_, static_cast<uint8_t>(event.state[2].get_tag()) },
393 { C::sha256_memory_tag_3_, static_cast<uint8_t>(event.state[3].get_tag()) },
394 { C::sha256_memory_tag_4_, static_cast<uint8_t>(event.state[4].get_tag()) },
395 { C::sha256_memory_tag_5_, static_cast<uint8_t>(event.state[5].get_tag()) },
396 { C::sha256_memory_tag_6_, static_cast<uint8_t>(event.state[6].get_tag()) },
397 { C::sha256_memory_tag_7_, static_cast<uint8_t>(event.state[7].get_tag()) },
398 } });
399
401 // Check for Tag Errors in State
403 bool invalid_state_tag_err = std::ranges::any_of(
404 event.state, [](const MemoryValue& state) { return state.get_tag() != MemoryTag::U32; });
405
406 if (invalid_state_tag_err) {
407 // This is the more efficient batched tag check we perform in the circuit
408 uint64_t batched_check = 0;
409 // Batch the state tag checks
410 for (uint32_t i = 0; i < event.state.size(); i++) {
411 batched_check |=
412 (static_cast<uint64_t>(event.state[i].get_tag()) - static_cast<uint64_t>(MemoryTag::U32))
413 << (i * 3);
414 }
415 trace.set(row,
416 { {
417 { C::sha256_sel_invalid_state_tag_err, 1 },
418 { C::sha256_batch_tag_inv, FF(batched_check).invert() },
419 { C::sha256_latch, 1 },
420 { C::sha256_err, 1 }, // Set the error flag
421 } });
422
423 row++;
424 continue; // Skip to the next event if we have an invalid state tag error
425 }
426
428 // Load Hash inputs and check for tag errors
430 // The inputs vector is expected to 16 elements and each element is expected to be a 32-bit value
431 // If during simulation we encounter an invalid tag, it will have been the last element we retrieved
432 // before we threw an error - so it will be the last element in the input vector.
433 // Therefore, it is just sufficient to check the tag of the last element
434 bool invalid_tag_err = event.input.back().get_tag() != MemoryTag::U32;
435
436 // Note that if we encountered an invalid tag error, the row that loaded the invalid tag needs to contain
437 // sel_invalid_input_ROW_tag_err. And all the rows before need to contain sel_invalid_input_tag_err.
438 // The former is used to constrain the specific error, while the latter is used to propagate the error
439 // to the start row (to communicate back to execution) and to turn off any computation constraints.
440 for (uint32_t i = 0; i < event.input.size(); i++) {
441 uint32_t input_rounds_rem = 16 - i;
442 FF input_rounds_rem_inv = input_rounds_rem == 0 ? 0 : FF(input_rounds_rem).invert();
443
444 MemoryValue round_input = event.input[i];
445 FF input_tag = FF(static_cast<uint8_t>(round_input.get_tag()));
446 FF expected_tag = FF(static_cast<uint8_t>(MemoryTag::U32));
447 FF input_tag_diff = input_tag - expected_tag;
448 FF input_tag_diff_inv = input_tag_diff == 0 ? 0 : input_tag_diff.invert();
449
450 bool is_last = (i == event.input.size() - 1);
451 trace.set(row + i,
452 { {
453 { C::sha256_sel, 1 },
454 // Propagated Fields
455 { C::sha256_execution_clk, event.execution_clk },
456 { C::sha256_space_id, event.space_id },
457 { C::sha256_output_addr, output_addr },
458 { C::sha256_sel_is_input_round, 1 },
459 { C::sha256_u32_tag, expected_tag },
460 { C::sha256_sel_read_input_from_memory, 1 },
461 // Input Rounds Control Flow
462 { C::sha256_input_rounds_rem, input_rounds_rem },
463 { C::sha256_input_rounds_rem_inv, input_rounds_rem_inv },
464 { C::sha256_input_addr, input_addr + i },
465 { C::sha256_input, round_input.as_ff() },
466 { C::sha256_input_tag, input_tag },
467 { C::sha256_input_tag_diff_inv, input_tag_diff_inv },
468 // Set input value
469 { C::sha256_w, round_input.as_ff() },
470 // Error Columns
471 // Propagated tag error columns
472 { C::sha256_sel_invalid_input_tag_err, invalid_tag_err ? 1 : 0 },
473 // Invalid Row Tag Error Columns
474 { C::sha256_sel_invalid_input_row_tag_err, (is_last && invalid_tag_err) ? 1 : 0 },
475 { C::sha256_err, invalid_tag_err ? 1 : 0 },
476 { C::sha256_latch, (is_last && invalid_tag_err) ? 1 : 0 },
477 } });
478 }
479
480 if (invalid_tag_err) {
481 // We need to increment the row counter for the next event (since we may have added rows for input loading)
482 row += event.input.size();
483 continue;
484 }
485
486 // If we get to this point, we are safe to proceed with the SHA-256 compression function
487 // and we won't encounter any more errors
488
490 // Execute SHA-256 Compression Function
492 std::array<uint32_t, 8> state;
493 std::ranges::transform(event.state.begin(), event.state.end(), state.begin(), [](const MemoryValue& val) {
494 return val.as<uint32_t>();
495 });
496
497 std::array<uint32_t, 16> prev_w_helpers;
498 std::ranges::transform(event.input.begin(),
499 event.input.end(),
500 prev_w_helpers.begin(),
501 [](const MemoryValue& val) { return val.as<uint32_t>(); });
502 std::array<uint32_t, 8> round_state = state;
503
504 // Each event results in 65 rows in the trace.
505 // 64 rows for the 64 rounds of the SHA-256 compression function
506 // 1 row for the final state
507
508 // Begin the rounds loop
509 for (size_t i = 0; i < 64; i++) {
510 // Detect if we are still using the inputs for values of w
511 bool is_an_input_round = i < 16;
512 // Used to check we non-zero rounds remaining
513 FF inv = FF(64 - i).invert();
514 uint32_t round_w =
515 is_an_input_round ? event.input[i].as<uint32_t>() : compute_w_with_witness(prev_w_helpers, trace);
516 trace.set(row,
517 { {
518 { C::sha256_sel, 1 },
519 // Propagated Fields
520 { C::sha256_execution_clk, event.execution_clk },
521 { C::sha256_space_id, event.space_id },
522 { C::sha256_output_addr, output_addr },
523 { C::sha256_u32_tag, static_cast<uint8_t>(MemoryTag::U32) },
524 { C::sha256_two_pow_32, 1UL << 32 },
525 // For round selectors
526 { C::sha256_xor_sel, 2 },
527 { C::sha256_perform_round, 1 },
528 { C::sha256_round_count, i },
529 { C::sha256_rounds_remaining, 64 - i },
530 { C::sha256_rounds_remaining_inv, inv },
531 { C::sha256_w, round_w },
532 { C::sha256_sel_compute_w, is_an_input_round ? 0 : 1 },
533 } });
534 // Set the init state columns - propagated down
536 // Set the state columns
537 set_state_cols(round_state, trace);
538 // Set the round columns
539 set_helper_cols(prev_w_helpers, trace);
540
541 // Apply SHA-256 compression function to the message schedule and update the state
542 round_state = compute_compression_with_witness(round_state, round_w, round_constants[i], row, trace);
543
544 // Update the prev_w_helpers, we shift all the values to the left and add the new round_w to
545 // the end
546 for (size_t j = 0; j < 15; j++) {
547 prev_w_helpers[j] = prev_w_helpers[j + 1];
548 }
549 prev_w_helpers[15] = round_w;
550
551 row++;
552 }
553
554 // Set the final row
555 trace.set(row,
556 { {
557 { C::sha256_latch, 1 },
558 { C::sha256_sel, 1 },
559 { C::sha256_xor_sel, 2 },
560 { C::sha256_round_count, 64 },
561 } });
562
563 // Set the init state columns - propagated down
565 // Set the state column
566 set_state_cols(round_state, trace);
567 // Set the round columns
568 set_helper_cols(prev_w_helpers, trace);
569 // Compute the output from the final round state
570 compute_sha256_output(round_state, state, trace);
571
573 // Write output memory
575 trace.set(row,
576 { {
577 // Memory Fields
578 { C::sha256_execution_clk, event.execution_clk },
579 { C::sha256_space_id, event.space_id },
580 { C::sha256_sel_mem_state_or_output, 1 },
581 { C::sha256_rw, 1 }, // Writing output
582 { C::sha256_u32_tag, static_cast<uint8_t>(MemoryTag::U32) },
583 { C::sha256_two_pow_32, 1UL << 32 },
584 { C::sha256_output_addr, output_addr },
585 // Output Addresses
586 { C::sha256_memory_address_0_, output_addr },
587 { C::sha256_memory_address_1_, output_addr + 1 },
588 { C::sha256_memory_address_2_, output_addr + 2 },
589 { C::sha256_memory_address_3_, output_addr + 3 },
590 { C::sha256_memory_address_4_, output_addr + 4 },
591 { C::sha256_memory_address_5_, output_addr + 5 },
592 { C::sha256_memory_address_6_, output_addr + 6 },
593 { C::sha256_memory_address_7_, output_addr + 7 },
594 // Output Values
595 { C::sha256_memory_register_0_, round_state[0] + state[0] },
596 { C::sha256_memory_register_1_, round_state[1] + state[1] },
597 { C::sha256_memory_register_2_, round_state[2] + state[2] },
598 { C::sha256_memory_register_3_, round_state[3] + state[3] },
599 { C::sha256_memory_register_4_, round_state[4] + state[4] },
600 { C::sha256_memory_register_5_, round_state[5] + state[5] },
601 { C::sha256_memory_register_6_, round_state[6] + state[6] },
602 { C::sha256_memory_register_7_, round_state[7] + state[7] },
603 // Output Memory Tags
604 { C::sha256_memory_tag_0_, static_cast<uint8_t>(MemoryTag::U32) },
605 { C::sha256_memory_tag_1_, static_cast<uint8_t>(MemoryTag::U32) },
606 { C::sha256_memory_tag_2_, static_cast<uint8_t>(MemoryTag::U32) },
607 { C::sha256_memory_tag_3_, static_cast<uint8_t>(MemoryTag::U32) },
608 { C::sha256_memory_tag_4_, static_cast<uint8_t>(MemoryTag::U32) },
609 { C::sha256_memory_tag_5_, static_cast<uint8_t>(MemoryTag::U32) },
610 { C::sha256_memory_tag_6_, static_cast<uint8_t>(MemoryTag::U32) },
611 { C::sha256_memory_tag_7_, static_cast<uint8_t>(MemoryTag::U32) },
612 } });
613
614 row++;
615 }
616}
617
621 // GT Interactions
622 .add<lookup_sha256_mem_check_state_addr_in_range_settings, InteractionType::LookupGeneric>()
624 .add<lookup_sha256_mem_check_output_addr_in_range_settings, InteractionType::LookupGeneric>()
625 // Dispatch Permutation
627 // Bitwise operations
628 .add<lookup_sha256_w_s_0_xor_0_settings, InteractionType::LookupGeneric>()
630 .add<lookup_sha256_w_s_1_xor_0_settings, InteractionType::LookupGeneric>()
632 .add<lookup_sha256_s_1_xor_0_settings, InteractionType::LookupGeneric>()
634 .add<lookup_sha256_ch_and_0_settings, InteractionType::LookupGeneric>()
636 .add<lookup_sha256_ch_xor_settings, InteractionType::LookupGeneric>()
638 .add<lookup_sha256_s_0_xor_1_settings, InteractionType::LookupGeneric>()
640 .add<lookup_sha256_maj_and_1_settings, InteractionType::LookupGeneric>()
642 .add<lookup_sha256_maj_xor_0_settings, InteractionType::LookupGeneric>()
644 // Range Checks for Rotations and Shifts
645 .add<lookup_sha256_range_rhs_w_7_settings, InteractionType::LookupGeneric>()
647 .add<lookup_sha256_range_rhs_w_3_settings, InteractionType::LookupGeneric>()
649 .add<lookup_sha256_range_rhs_w_19_settings, InteractionType::LookupGeneric>()
651 .add<lookup_sha256_range_rhs_e_6_settings, InteractionType::LookupGeneric>()
653 .add<lookup_sha256_range_rhs_e_25_settings, InteractionType::LookupGeneric>()
655 .add<lookup_sha256_range_rhs_a_13_settings, InteractionType::LookupGeneric>()
657 // Range Checks for modulo add
658 .add<lookup_sha256_range_comp_w_lhs_settings, InteractionType::LookupGeneric>()
660 .add<lookup_sha256_range_comp_next_a_lhs_settings, InteractionType::LookupGeneric>()
662 .add<lookup_sha256_range_comp_next_e_lhs_settings, InteractionType::LookupGeneric>()
664 .add<lookup_sha256_range_comp_a_lhs_settings, InteractionType::LookupGeneric>()
666 .add<lookup_sha256_range_comp_b_lhs_settings, InteractionType::LookupGeneric>()
668 .add<lookup_sha256_range_comp_c_lhs_settings, InteractionType::LookupGeneric>()
670 .add<lookup_sha256_range_comp_d_lhs_settings, InteractionType::LookupGeneric>()
672 .add<lookup_sha256_range_comp_e_lhs_settings, InteractionType::LookupGeneric>()
674 .add<lookup_sha256_range_comp_f_lhs_settings, InteractionType::LookupGeneric>()
676 .add<lookup_sha256_range_comp_g_lhs_settings, InteractionType::LookupGeneric>()
678 .add<lookup_sha256_range_comp_h_lhs_settings, InteractionType::LookupGeneric>()
680
681} // namespace bb::avm2::tracegen
#define AVM_HIGHEST_MEM_ADDRESS
ValueTag get_tag() const
InteractionDefinition & add(auto &&... args)
void process(const simulation::EventEmitterInterface< simulation::Sha256CompressionEvent >::Container &events, TraceContainer &trace)
uint32_t shr_with_witness(const uint32_t val, const uint8_t shift, Column c_result, Column c_lhs, Column c_rhs, TraceContainer &trace)
static const InteractionDefinition interactions
void into_limbs_with_witness(const uint64_t, const uint8_t b, Column c_lhs, Column c_rhs, TraceContainer &trace)
std::array< uint32_t, 8 > compute_compression_with_witness(const std::array< uint32_t, 8 > &state, uint32_t round_w, uint32_t round_constant, uint32_t row, TraceContainer &trace)
void compute_sha256_output(const std::array< uint32_t, 8 > &out_state, const std::array< uint32_t, 8 > &init_state, TraceContainer &trace)
uint32_t ror_with_witness(const uint32_t val, const uint8_t shift, Column c_result, Column c_lhs, Column c_rhs, TraceContainer &trace)
void set_state_cols(const std::array< uint32_t, 8 > &state, TraceContainer &trace)
uint32_t compute_w_with_witness(const std::array< uint32_t, 16 > &prev_w_helpers, TraceContainer &trace)
void set_init_state_cols(const std::array< uint32_t, 8 > &init_state, TraceContainer &trace)
void set_helper_cols(const std::array< uint32_t, 16 > &prev_w_helpers, TraceContainer &trace)
TestTraceContainer trace
FF a
FF b
const auto init
Definition fr.bench.cpp:141
constexpr uint32_t round_constants[64]
lookup_settings< lookup_sha256_w_s_1_xor_1_settings_ > lookup_sha256_w_s_1_xor_1_settings
lookup_settings< lookup_sha256_range_comp_w_rhs_settings_ > lookup_sha256_range_comp_w_rhs_settings
lookup_settings< lookup_sha256_range_rhs_e_11_settings_ > lookup_sha256_range_rhs_e_11_settings
lookup_settings< lookup_sha256_range_rhs_a_22_settings_ > lookup_sha256_range_rhs_a_22_settings
lookup_settings< lookup_sha256_range_comp_h_rhs_settings_ > lookup_sha256_range_comp_h_rhs_settings
lookup_settings< lookup_sha256_mem_check_input_addr_in_range_settings_ > lookup_sha256_mem_check_input_addr_in_range_settings
lookup_settings< lookup_sha256_range_rhs_w_18_settings_ > lookup_sha256_range_rhs_w_18_settings
lookup_settings< lookup_sha256_s_1_xor_1_settings_ > lookup_sha256_s_1_xor_1_settings
lookup_settings< lookup_sha256_range_comp_b_rhs_settings_ > lookup_sha256_range_comp_b_rhs_settings
lookup_settings< lookup_sha256_range_rhs_w_10_settings_ > lookup_sha256_range_rhs_w_10_settings
permutation_settings< perm_sha256_mem_dispatch_sha256_settings_ > perm_sha256_mem_dispatch_sha256_settings
lookup_settings< lookup_sha256_range_comp_c_rhs_settings_ > lookup_sha256_range_comp_c_rhs_settings
lookup_settings< lookup_sha256_maj_and_0_settings_ > lookup_sha256_maj_and_0_settings
lookup_settings< lookup_sha256_maj_and_2_settings_ > lookup_sha256_maj_and_2_settings
lookup_settings< lookup_sha256_round_constant_settings_ > lookup_sha256_round_constant_settings
lookup_settings< lookup_sha256_range_comp_next_e_rhs_settings_ > lookup_sha256_range_comp_next_e_rhs_settings
lookup_settings< lookup_sha256_maj_xor_1_settings_ > lookup_sha256_maj_xor_1_settings
lookup_settings< lookup_sha256_range_comp_f_rhs_settings_ > lookup_sha256_range_comp_f_rhs_settings
lookup_settings< lookup_sha256_s_0_xor_0_settings_ > lookup_sha256_s_0_xor_0_settings
lookup_settings< lookup_sha256_w_s_0_xor_1_settings_ > lookup_sha256_w_s_0_xor_1_settings
lookup_settings< lookup_sha256_range_rhs_a_2_settings_ > lookup_sha256_range_rhs_a_2_settings
lookup_settings< lookup_sha256_range_comp_g_rhs_settings_ > lookup_sha256_range_comp_g_rhs_settings
lookup_settings< lookup_sha256_range_rhs_w_17_settings_ > lookup_sha256_range_rhs_w_17_settings
lookup_settings< lookup_sha256_ch_and_1_settings_ > lookup_sha256_ch_and_1_settings
lookup_settings< lookup_sha256_range_comp_next_a_rhs_settings_ > lookup_sha256_range_comp_next_a_rhs_settings
lookup_settings< lookup_sha256_range_comp_d_rhs_settings_ > lookup_sha256_range_comp_d_rhs_settings
lookup_settings< lookup_sha256_range_comp_a_rhs_settings_ > lookup_sha256_range_comp_a_rhs_settings
AvmFlavorSettings::FF FF
Definition field.hpp:10
lookup_settings< lookup_sha256_range_comp_e_rhs_settings_ > lookup_sha256_range_comp_e_rhs_settings
constexpr decltype(auto) get(::tuplet::tuple< T... > &&t) noexcept
Definition tuple.hpp:13
simulation::PublicDataTreeReadWriteEvent event