Barretenberg
The ZK-SNARK library at the core of Aztec
Loading...
Searching...
No Matches
sha256.cpp
Go to the documentation of this file.
1// === AUDIT STATUS ===
2// internal: { status: not started, auditors: [], date: YYYY-MM-DD }
3// external_1: { status: not started, auditors: [], date: YYYY-MM-DD }
4// external_2: { status: not started, auditors: [], date: YYYY-MM-DD }
5// =====================
6
7#include "sha256.hpp"
8
13
14using namespace bb;
15
16namespace bb::stdlib {
17using namespace bb::plookup;
18
19constexpr size_t get_num_blocks(const size_t num_bits)
20{
21 constexpr size_t extra_bits = 65UL;
22
23 return ((num_bits + extra_bits) / 512UL) + ((num_bits + extra_bits) % 512UL > 0);
24}
25
26template <typename Builder> void SHA256<Builder>::prepare_constants(std::array<field_t<Builder>, 8>& input)
27{
28 for (size_t i = 0; i < 8; i++) {
29 input[i] = init_constants[i];
30 }
31}
32
33template <typename Builder>
35{
37
38 sparse_witness_limbs result(w);
39
40 const auto lookup = plookup_read<Builder>::get_lookup_accumulators(MultiTableId::SHA256_WITNESS_INPUT, w);
41
43 lookup[ColumnIdx::C2][0],
44 lookup[ColumnIdx::C2][1],
45 lookup[ColumnIdx::C2][2],
46 lookup[ColumnIdx::C2][3],
47 };
49 lookup[ColumnIdx::C3][0],
50 lookup[ColumnIdx::C3][1],
51 lookup[ColumnIdx::C3][2],
52 lookup[ColumnIdx::C3][3],
53 };
54 result.has_sparse_limbs = true;
55
56 return result;
57}
58
59template <typename Builder>
61{
63
64 Builder* ctx = w_in[0].get_context();
65
67 for (size_t i = 0; i < 16; ++i) {
68 w_sparse[i] = SHA256<Builder>::sparse_witness_limbs(w_in[i]);
69 if (!ctx && w_in[i].get_context()) {
70 ctx = w_in[i].get_context();
71 }
72 }
73
74 for (size_t i = 16; i < 64; ++i) {
75 auto& w_left = w_sparse[i - 15];
76 auto& w_right = w_sparse[i - 2];
77
78 if (!w_left.has_sparse_limbs) {
79 w_left = convert_witness(w_left.normal);
80 }
81 if (!w_right.has_sparse_limbs) {
82 w_right = convert_witness(w_right.normal);
83 }
84
86 w_left.sparse_limbs[0] * left_multipliers[0],
87 w_left.sparse_limbs[1] * left_multipliers[1],
88 w_left.sparse_limbs[2] * left_multipliers[2],
89 w_left.sparse_limbs[3] * left_multipliers[3],
90 };
91
93 w_right.sparse_limbs[0] * right_multipliers[0],
94 w_right.sparse_limbs[1] * right_multipliers[1],
95 w_right.sparse_limbs[2] * right_multipliers[2],
96 w_right.sparse_limbs[3] * right_multipliers[3],
97 };
98
99 const field_pt left_xor_sparse =
100 left[0].add_two(left[1], left[2]).add_two(left[3], w_left.rotated_limbs[1]) * fr(4);
101
102 const field_pt xor_result_sparse = right[0]
103 .add_two(right[1], right[2])
104 .add_two(right[3], w_right.rotated_limbs[2])
105 .add_two(w_right.rotated_limbs[3], left_xor_sparse)
106 .normalize();
107
109
110 // TODO NORMALIZE WITH RANGE CHECK
111
112 field_pt w_out_raw = xor_result.add_two(w_sparse[i - 16].normal, w_sparse[i - 7].normal);
113 field_pt w_out;
114 if (w_out_raw.witness_index == IS_CONSTANT) {
115 w_out = field_pt(ctx, fr(w_out_raw.get_value().from_montgomery_form().data[0] & (uint64_t)0xffffffffULL));
116
117 } else {
118 w_out = witness_t<Builder>(
119 ctx, fr(w_out_raw.get_value().from_montgomery_form().data[0] & (uint64_t)0xffffffffULL));
120 static constexpr fr inv_pow_two = fr(2).pow(32).invert();
121 // If we multiply the field elements by constants separately and then subtract, then the divisor is
122 // going to be in a normalized state right after subtraction and the call to .normalize() won't add
123 // gates
124 field_pt w_out_raw_inv_pow_two = w_out_raw * inv_pow_two;
125 field_pt w_out_inv_pow_two = w_out * inv_pow_two;
126 field_pt divisor = (w_out_raw_inv_pow_two - w_out_inv_pow_two).normalize();
127 ctx->create_new_range_constraint(divisor.witness_index, 3);
128 }
129
130 w_sparse[i] = sparse_witness_limbs(w_out);
131 }
132
133 std::array<field_pt, 64> w_extended;
134
135 for (size_t i = 0; i < 64; ++i) {
136 w_extended[i] = w_sparse[i].normal;
137 }
138 return w_extended;
139}
140
141template <typename Builder>
150
151template <typename Builder>
160
161template <typename Builder>
163{
165
167 const auto rotation_coefficients = sha256_tables::get_choose_rotation_multipliers();
168
169 field_pt rotation_result = lookup[ColumnIdx::C3][0];
170
171 e.sparse = lookup[ColumnIdx::C2][0];
172
173 field_pt sparse_limb_3 = lookup[ColumnIdx::C2][2];
174
175 // where is the middle limb used
176 field_pt xor_result = (rotation_result * fr(7))
177 .add_two(e.sparse * (rotation_coefficients[0] * fr(7) + fr(1)),
178 sparse_limb_3 * (rotation_coefficients[2] * fr(7)));
179
180 field_pt choose_result_sparse = xor_result.add_two(f.sparse + f.sparse, g.sparse + g.sparse + g.sparse).normalize();
181
182 field_pt choose_result = plookup_read<Builder>::read_from_1_to_2_table(SHA256_CH_OUTPUT, choose_result_sparse);
183
184 return choose_result;
185}
186
187template <typename Builder>
189{
191
193 const auto rotation_coefficients = sha256_tables::get_majority_rotation_multipliers();
194
195 field_pt rotation_result =
196 lookup[ColumnIdx::C3][0]; // last index of first row gives accumulating sum of "non-trival" wraps
197 a.sparse = lookup[ColumnIdx::C2][0];
198 // use these values to compute trivial wraps somehow
199 field_pt sparse_accumulator_2 = lookup[ColumnIdx::C2][1];
200
201 field_pt xor_result = (rotation_result * fr(4))
202 .add_two(a.sparse * (rotation_coefficients[0] * fr(4) + fr(1)),
203 sparse_accumulator_2 * (rotation_coefficients[1] * fr(4)));
204
205 field_pt majority_result_sparse = xor_result.add_two(b.sparse, c.sparse).normalize();
206
207 field_pt majority_result = plookup_read<Builder>::read_from_1_to_2_table(SHA256_MAJ_OUTPUT, majority_result_sparse);
208
209 return majority_result;
210}
211
212template <typename Builder>
214{
217
218 Builder* ctx = a.get_context() ? a.get_context() : b.get_context();
219
220 uint256_t sum = a.get_value() + b.get_value();
221
222 uint256_t normalized_sum = static_cast<uint32_t>(sum.data[0]);
223
224 if (a.witness_index == IS_CONSTANT && b.witness_index == IS_CONSTANT) {
225 return field_pt(ctx, normalized_sum);
226 }
227
228 field_pt overflow = witness_pt(ctx, fr((sum - normalized_sum) >> 32));
229
230 field_pt result = a.add_two(b, overflow * field_pt(ctx, -fr((uint64_t)(1ULL << 32ULL))));
231 // Has to be a byte?
232 overflow.create_range_constraint(3);
233 return result;
234}
235
236template <typename Builder>
238 const std::array<field_t<Builder>, 16>& input)
239{
249 sparse_value a = sparse_value(h_init[0]);
250 auto b = map_into_maj_sparse_form(h_init[1]);
251 auto c = map_into_maj_sparse_form(h_init[2]);
252 sparse_value d = sparse_value(h_init[3]);
253 sparse_value e = sparse_value(h_init[4]);
254 auto f = map_into_choose_sparse_form(h_init[5]);
255 auto g = map_into_choose_sparse_form(h_init[6]);
256 sparse_value h = sparse_value(h_init[7]);
257
261 const auto w = extend_witness(input);
262
266 // As opposed to standard sha description - Maj and Choose functions also include required rotations for round
267 for (size_t i = 0; i < 64; ++i) {
268 auto ch = choose(e, f, g);
269 auto maj = majority(a, b, c);
270 auto temp1 = ch.add_two(h.normal, w[i] + fr(round_constants[i]));
271
272 h = g;
273 g = f;
274 f = e;
275 e.normal = add_normalize(d.normal, temp1);
276 d = c;
277 c = b;
278 b = a;
279 a.normal = add_normalize(temp1, maj);
280 }
281
286 output[0] = add_normalize(a.normal, h_init[0]);
287 output[1] = add_normalize(b.normal, h_init[1]);
288 output[2] = add_normalize(c.normal, h_init[2]);
289 output[3] = add_normalize(d.normal, h_init[3]);
290 output[4] = add_normalize(e.normal, h_init[4]);
291 output[5] = add_normalize(f.normal, h_init[5]);
292 output[6] = add_normalize(g.normal, h_init[6]);
293 output[7] = add_normalize(h.normal, h_init[7]);
294
301 for (size_t i = 0; i < 8; i++) {
302 output[i].create_range_constraint(32);
303 }
304
305 return output;
306}
307
308template <typename Builder> byte_array<Builder> SHA256<Builder>::hash(const byte_array_ct& input)
309{
310 Builder* ctx = input.get_context();
311 std::vector<field_ct> message_schedule;
312 const size_t message_length_bytes = input.size();
313
314 for (size_t idx = 0; idx < message_length_bytes; idx++) {
315 message_schedule.push_back(input[idx]);
316 }
317
318 message_schedule.push_back(field_ct(ctx, 128));
319
320 constexpr size_t bytes_per_block = 64;
321 // Include message length
322 const size_t num_bytes = message_schedule.size() + 8;
323 const size_t num_blocks = num_bytes / bytes_per_block + (num_bytes % bytes_per_block != 0);
324
325 const size_t num_total_bytes = num_blocks * bytes_per_block;
326 // Pad with zeroes to make the number divisible by 64
327 for (size_t i = num_bytes; i < num_total_bytes; ++i) {
328 message_schedule.push_back(field_ct(ctx, 0));
329 }
330
331 // Append the message length bits represented as a byte array of length 8.
332 const size_t message_bits = message_length_bytes * 8;
333 byte_array_ct message_length_byte_decomposition(field_ct(message_bits), 8);
334
335 for (size_t idx = 0; idx < 8; idx++) {
336 message_schedule.push_back(message_length_byte_decomposition[idx]);
337 }
338
339 // Compute 4-byte slices
341
342 for (size_t i = 0; i < message_schedule.size(); i += 4) {
344 for (size_t j = 0; j < 4; ++j) {
345 const size_t shift = 8 * (3 - j);
346 chunk.push_back(message_schedule[i + j] * field_ct(ctx, uint256_t(1) << shift));
347 }
348 slices.push_back(field_ct::accumulate(chunk));
349 }
350
351 constexpr size_t slices_per_block = 16;
352
353 std::array<field_ct, 8> rolling_hash;
354 prepare_constants(rolling_hash);
355 for (size_t i = 0; i < num_blocks; ++i) {
356 std::array<field_ct, 16> hash_input;
357 for (size_t j = 0; j < 16; ++j) {
358 hash_input[j] = slices[i * slices_per_block + j];
359 }
360 rolling_hash = sha256_block(rolling_hash, hash_input);
361 }
362
364 // Each element of rolling_hash is a 4-byte field_t, decompose rolling hash into bytes.
365 for (const auto& word : rolling_hash) {
366 // This constructor constrains
367 // - word length to be <=4 bytes
368 // - the element reconstructed from bytes is equal to the given input.
369 // - each entry to be a byte
370 byte_array_ct word_byte_decomposition(word, 4);
371 for (size_t i = 0; i < 4; i++) {
372 output.push_back(word_byte_decomposition[i]);
373 }
374 }
375 //
376 return byte_array<Builder>(ctx, output);
377}
378
380template class SHA256<bb::MegaCircuitBuilder>;
381
382} // namespace bb::stdlib
static field_ct add_normalize(const field_ct &a, const field_ct &b)
Definition sha256.cpp:213
static std::array< field_ct, 64 > extend_witness(const std::array< field_ct, 16 > &w_in)
Definition sha256.cpp:60
static field_ct majority(sparse_value &a, const sparse_value &b, const sparse_value &c)
Definition sha256.cpp:188
static void prepare_constants(std::array< field_ct, 8 > &input)
Definition sha256.cpp:26
static field_ct choose(sparse_value &e, const sparse_value &f, const sparse_value &g)
Definition sha256.cpp:162
static sparse_value map_into_choose_sparse_form(const field_ct &e)
Definition sha256.cpp:142
static byte_array< Builder > hash(const byte_array_ct &input)
Definition sha256.cpp:308
static sparse_value map_into_maj_sparse_form(const field_ct &e)
Definition sha256.cpp:152
static sparse_witness_limbs convert_witness(const field_ct &w)
Definition sha256.cpp:34
static std::array< field_ct, 8 > sha256_block(const std::array< field_ct, 8 > &h_init, const std::array< field_ct, 16 > &input)
Definition sha256.cpp:237
Represents a dynamic array of bytes in-circuit.
size_t size() const
Builder * get_context() const
static field_t accumulate(const std::vector< field_t > &input)
Efficiently compute the sum of vector entries. Using big_add_gate we reduce the number of gates neede...
Definition field.cpp:1147
void create_range_constraint(size_t num_bits, std::string const &msg="field_t::range_constraint") const
Let x = *this.normalize(), constrain x.v < 2^{num_bits}.
Definition field.cpp:908
bb::fr get_value() const
Given a := *this, compute its value given by a.v * a.mul + a.add.
Definition field.cpp:827
field_t normalize() const
Return a new element, where the in-circuit witness contains the actual represented value (multiplicat...
Definition field.cpp:635
uint32_t witness_index
Definition field.hpp:132
field_t add_two(const field_t &add_b, const field_t &add_c) const
Efficiently compute (this + a + b) using big_mul gate.
Definition field.cpp:572
static plookup::ReadData< field_pt > get_lookup_accumulators(const plookup::MultiTableId id, const field_pt &key_a, const field_pt &key_b=0, const bool is_2_to_1_lookup=false)
Definition plookup.cpp:19
static field_pt read_from_1_to_2_table(const plookup::MultiTableId id, const field_pt &key_a)
Definition plookup.cpp:89
FF a
FF b
stdlib::witness_t< bb::UltraCircuitBuilder > witness_pt
stdlib::field_t< UltraCircuitBuilder > field_pt
stdlib::field_t< Builder > field_ct
std::array< bb::fr, 3 > get_choose_rotation_multipliers()
Definition sha256.hpp:189
std::array< bb::fr, 3 > get_majority_rotation_multipliers()
Definition sha256.hpp:164
@ SHA256_CH_INPUT
Definition types.hpp:91
@ SHA256_MAJ_OUTPUT
Definition types.hpp:94
@ SHA256_CH_OUTPUT
Definition types.hpp:92
@ SHA256_WITNESS_OUTPUT
Definition types.hpp:96
@ SHA256_MAJ_INPUT
Definition types.hpp:93
field_t< Builder > add_normalize(const field_t< Builder > &a, const field_t< Builder > &b)
void g(field_t< Builder > state[BLAKE_STATE_SIZE], size_t a, size_t b, size_t c, size_t d, field_t< Builder > x, field_t< Builder > y, const bool last_update=false)
constexpr size_t get_num_blocks(const size_t num_bits)
Definition sha256.cpp:19
Entry point for Barretenberg command-line interface.
field< Bn254FrParams > fr
Definition fr.hpp:174
Inner sum(Cont< Inner, Args... > const &in)
Definition container.hpp:70
constexpr decltype(auto) get(::tuplet::tuple< T... > &&t) noexcept
Definition tuple.hpp:13
std::array< uint64_t, 64 > extend_witness(std::array< uint64_t, 16 > &in)
BB_INLINE constexpr field pow(const uint256_t &exponent) const noexcept
constexpr field invert() const noexcept
BB_INLINE constexpr field from_montgomery_form() const noexcept
std::array< field_ct, 4 > rotated_limbs
Definition sha256.hpp:83
std::array< field_ct, 4 > sparse_limbs
Definition sha256.hpp:81