21 constexpr size_t extra_bits = 65UL;
23 return ((num_bits + extra_bits) / 512UL) + ((num_bits + extra_bits) % 512UL > 0);
28 for (
size_t i = 0; i < 8; i++) {
29 input[i] = init_constants[i];
33template <
typename Builder>
43 lookup[ColumnIdx::C2][0],
44 lookup[ColumnIdx::C2][1],
45 lookup[ColumnIdx::C2][2],
46 lookup[ColumnIdx::C2][3],
49 lookup[ColumnIdx::C3][0],
50 lookup[ColumnIdx::C3][1],
51 lookup[ColumnIdx::C3][2],
52 lookup[ColumnIdx::C3][3],
59template <
typename Builder>
64 Builder* ctx = w_in[0].get_context();
67 for (
size_t i = 0; i < 16; ++i) {
69 if (!ctx && w_in[i].get_context()) {
70 ctx = w_in[i].get_context();
74 for (
size_t i = 16; i < 64; ++i) {
75 auto& w_left = w_sparse[i - 15];
76 auto& w_right = w_sparse[i - 2];
78 if (!w_left.has_sparse_limbs) {
79 w_left = convert_witness(w_left.normal);
81 if (!w_right.has_sparse_limbs) {
82 w_right = convert_witness(w_right.normal);
86 w_left.sparse_limbs[0] * left_multipliers[0],
87 w_left.sparse_limbs[1] * left_multipliers[1],
88 w_left.sparse_limbs[2] * left_multipliers[2],
89 w_left.sparse_limbs[3] * left_multipliers[3],
93 w_right.sparse_limbs[0] * right_multipliers[0],
94 w_right.sparse_limbs[1] * right_multipliers[1],
95 w_right.sparse_limbs[2] * right_multipliers[2],
96 w_right.sparse_limbs[3] * right_multipliers[3],
100 left[0].
add_two(left[1], left[2]).
add_two(left[3], w_left.rotated_limbs[1]) *
fr(4);
102 const field_pt xor_result_sparse = right[0]
104 .
add_two(right[3], w_right.rotated_limbs[2])
105 .
add_two(w_right.rotated_limbs[3], left_xor_sparse)
112 field_pt w_out_raw = xor_result.
add_two(w_sparse[i - 16].normal, w_sparse[i - 7].normal);
124 field_pt w_out_raw_inv_pow_two = w_out_raw * inv_pow_two;
125 field_pt w_out_inv_pow_two = w_out * inv_pow_two;
126 field_pt divisor = (w_out_raw_inv_pow_two - w_out_inv_pow_two).normalize();
135 for (
size_t i = 0; i < 64; ++i) {
136 w_extended[i] = w_sparse[i].normal;
141template <
typename Builder>
151template <
typename Builder>
161template <
typename Builder>
169 field_pt rotation_result = lookup[ColumnIdx::C3][0];
171 e.
sparse = lookup[ColumnIdx::C2][0];
173 field_pt sparse_limb_3 = lookup[ColumnIdx::C2][2];
176 field_pt xor_result = (rotation_result *
fr(7))
177 .add_two(e.
sparse * (rotation_coefficients[0] *
fr(7) +
fr(1)),
178 sparse_limb_3 * (rotation_coefficients[2] *
fr(7)));
184 return choose_result;
187template <
typename Builder>
196 lookup[ColumnIdx::C3][0];
197 a.sparse = lookup[ColumnIdx::C2][0];
199 field_pt sparse_accumulator_2 = lookup[ColumnIdx::C2][1];
201 field_pt xor_result = (rotation_result *
fr(4))
202 .add_two(
a.sparse * (rotation_coefficients[0] *
fr(4) +
fr(1)),
203 sparse_accumulator_2 * (rotation_coefficients[1] *
fr(4)));
209 return majority_result;
212template <
typename Builder>
218 Builder* ctx =
a.get_context() ?
a.get_context() :
b.get_context();
222 uint256_t normalized_sum =
static_cast<uint32_t
>(
sum.data[0]);
224 if (
a.witness_index == IS_CONSTANT &&
b.witness_index == IS_CONSTANT) {
225 return field_pt(ctx, normalized_sum);
236template <
typename Builder>
250 auto b = map_into_maj_sparse_form(h_init[1]);
251 auto c = map_into_maj_sparse_form(h_init[2]);
254 auto f = map_into_choose_sparse_form(h_init[5]);
255 auto g = map_into_choose_sparse_form(h_init[6]);
267 for (
size_t i = 0; i < 64; ++i) {
268 auto ch = choose(e, f,
g);
269 auto maj = majority(
a,
b, c);
270 auto temp1 = ch.add_two(h.
normal, w[i] +
fr(round_constants[i]));
301 for (
size_t i = 0; i < 8; i++) {
302 output[i].create_range_constraint(32);
312 const size_t message_length_bytes = input.
size();
314 for (
size_t idx = 0; idx < message_length_bytes; idx++) {
315 message_schedule.push_back(input[idx]);
318 message_schedule.push_back(
field_ct(ctx, 128));
320 constexpr size_t bytes_per_block = 64;
322 const size_t num_bytes = message_schedule.size() + 8;
323 const size_t num_blocks = num_bytes / bytes_per_block + (num_bytes % bytes_per_block != 0);
325 const size_t num_total_bytes = num_blocks * bytes_per_block;
327 for (
size_t i = num_bytes; i < num_total_bytes; ++i) {
328 message_schedule.push_back(
field_ct(ctx, 0));
332 const size_t message_bits = message_length_bytes * 8;
335 for (
size_t idx = 0; idx < 8; idx++) {
336 message_schedule.push_back(message_length_byte_decomposition[idx]);
342 for (
size_t i = 0; i < message_schedule.size(); i += 4) {
344 for (
size_t j = 0; j < 4; ++j) {
345 const size_t shift = 8 * (3 - j);
351 constexpr size_t slices_per_block = 16;
354 prepare_constants(rolling_hash);
355 for (
size_t i = 0; i < num_blocks; ++i) {
357 for (
size_t j = 0; j < 16; ++j) {
358 hash_input[j] = slices[i * slices_per_block + j];
360 rolling_hash = sha256_block(rolling_hash, hash_input);
365 for (
const auto& word : rolling_hash) {
371 for (
size_t i = 0; i < 4; i++) {
372 output.push_back(word_byte_decomposition[i]);
static field_ct add_normalize(const field_ct &a, const field_ct &b)
static std::array< field_ct, 64 > extend_witness(const std::array< field_ct, 16 > &w_in)
static field_ct majority(sparse_value &a, const sparse_value &b, const sparse_value &c)
static void prepare_constants(std::array< field_ct, 8 > &input)
static field_ct choose(sparse_value &e, const sparse_value &f, const sparse_value &g)
static sparse_value map_into_choose_sparse_form(const field_ct &e)
static byte_array< Builder > hash(const byte_array_ct &input)
static sparse_value map_into_maj_sparse_form(const field_ct &e)
static sparse_witness_limbs convert_witness(const field_ct &w)
static std::array< field_ct, 8 > sha256_block(const std::array< field_ct, 8 > &h_init, const std::array< field_ct, 16 > &input)
Represents a dynamic array of bytes in-circuit.
Builder * get_context() const
static field_t accumulate(const std::vector< field_t > &input)
Efficiently compute the sum of vector entries. Using big_add_gate we reduce the number of gates neede...
void create_range_constraint(size_t num_bits, std::string const &msg="field_t::range_constraint") const
Let x = *this.normalize(), constrain x.v < 2^{num_bits}.
bb::fr get_value() const
Given a := *this, compute its value given by a.v * a.mul + a.add.
field_t normalize() const
Return a new element, where the in-circuit witness contains the actual represented value (multiplicat...
field_t add_two(const field_t &add_b, const field_t &add_c) const
Efficiently compute (this + a + b) using big_mul gate.
static plookup::ReadData< field_pt > get_lookup_accumulators(const plookup::MultiTableId id, const field_pt &key_a, const field_pt &key_b=0, const bool is_2_to_1_lookup=false)
static field_pt read_from_1_to_2_table(const plookup::MultiTableId id, const field_pt &key_a)
stdlib::witness_t< bb::UltraCircuitBuilder > witness_pt
stdlib::field_t< UltraCircuitBuilder > field_pt
stdlib::field_t< Builder > field_ct
std::array< bb::fr, 3 > get_choose_rotation_multipliers()
std::array< bb::fr, 3 > get_majority_rotation_multipliers()
field_t< Builder > add_normalize(const field_t< Builder > &a, const field_t< Builder > &b)
void g(field_t< Builder > state[BLAKE_STATE_SIZE], size_t a, size_t b, size_t c, size_t d, field_t< Builder > x, field_t< Builder > y, const bool last_update=false)
constexpr size_t get_num_blocks(const size_t num_bits)
Entry point for Barretenberg command-line interface.
field< Bn254FrParams > fr
Inner sum(Cont< Inner, Args... > const &in)
constexpr decltype(auto) get(::tuplet::tuple< T... > &&t) noexcept
std::array< uint64_t, 64 > extend_witness(std::array< uint64_t, 16 > &in)
BB_INLINE constexpr field pow(const uint256_t &exponent) const noexcept
constexpr field invert() const noexcept
BB_INLINE constexpr field from_montgomery_form() const noexcept
std::array< field_ct, 4 > rotated_limbs
std::array< field_ct, 4 > sparse_limbs