Barretenberg
The ZK-SNARK library at the core of Aztec
Loading...
Searching...
No Matches
biggroup_bn254.hpp
Go to the documentation of this file.
1// === AUDIT STATUS ===
2// internal: { status: not started, auditors: [], date: YYYY-MM-DD }
3// external_1: { status: not started, auditors: [], date: YYYY-MM-DD }
4// external_2: { status: not started, auditors: [], date: YYYY-MM-DD }
5// =====================
6
7#pragma once
22
33template <class C, class Fq, class Fr, class G>
34template <typename, typename>
35 requires(IsNotMegaBuilder<C>)
37 const std::vector<element>& big_points,
38 const std::vector<Fr>& big_scalars,
39 const std::vector<element>& small_points,
40 const std::vector<Fr>& small_scalars,
41 const Fr& generator_scalar,
42 const size_t max_num_small_bits)
43{
44 C* ctx = nullptr;
45 for (auto element : big_points) {
46 if (element.get_context()) {
47 ctx = element.get_context();
48 break;
49 }
50 }
51
52 constexpr size_t NUM_BIG_POINTS = 4;
53 // TODO: handle situation where big points size is not 4 :/
54
55 auto big_table_pair =
56 create_endo_pair_quad_lookup_table({ big_points[0], big_points[1], big_points[2], big_points[3] });
57 auto& big_table = big_table_pair.first;
58 auto& endo_table = big_table_pair.second;
59 batch_lookup_table small_table(small_points);
60 std::vector<std::vector<bool_ct>> big_naf_entries;
61 std::vector<std::vector<bool_ct>> endo_naf_entries;
62 std::vector<std::vector<bool_ct>> small_naf_entries;
63
64 const auto split_into_endomorphism_scalars = [ctx](const Fr& scalar) {
65 bb::fr k = scalar.get_value();
66 bb::fr k1(0);
67 bb::fr k2(0);
69 Fr scalar_k1 = Fr::from_witness(ctx, k1.to_montgomery_form());
70 Fr scalar_k2 = Fr::from_witness(ctx, k2.to_montgomery_form());
72 scalar.assert_equal(scalar_k1 - scalar_k2 * beta);
73 return std::make_pair<Fr, Fr>((Fr)scalar_k1, (Fr)scalar_k2);
74 };
75
76 for (size_t i = 0; i < NUM_BIG_POINTS; ++i) {
77 const auto [scalar_k1, scalar_k2] = split_into_endomorphism_scalars(big_scalars[i]);
78 big_naf_entries.emplace_back(compute_naf(scalar_k1, max_num_small_bits));
79 endo_naf_entries.emplace_back(compute_naf(scalar_k2, max_num_small_bits));
80 }
81
82 const auto [generator_k1, generator_k2] = split_into_endomorphism_scalars(generator_scalar);
83 const std::vector<field_t<C>> generator_wnaf = compute_wnaf<128, 8>(generator_k1);
84 const std::vector<field_t<C>> generator_endo_wnaf = compute_wnaf<128, 8>(generator_k2);
85 const auto generator_table =
86 element::eight_bit_fixed_base_table(element::eight_bit_fixed_base_table::CurveType::BN254, false);
87 const auto generator_endo_table =
88 element::eight_bit_fixed_base_table(element::eight_bit_fixed_base_table::CurveType::BN254, true);
89
90 for (size_t i = 0; i < small_points.size(); ++i) {
91 small_naf_entries.emplace_back(compute_naf(small_scalars[i], max_num_small_bits));
92 }
93
94 const size_t num_rounds = max_num_small_bits;
95
96 const auto offset_generators = compute_offset_generators(num_rounds);
97
98 auto init_point = element::chain_add(offset_generators.first, small_table.get_chain_initial_entry());
99 init_point = element::chain_add(endo_table[0], init_point);
100 init_point = element::chain_add(big_table[0], init_point);
101
102 element accumulator = element::chain_add_end(init_point);
103
104 const auto get_point_to_add = [&](size_t naf_index) {
105 std::vector<bool_ct> small_nafs;
106 std::vector<bool_ct> big_nafs;
107 std::vector<bool_ct> endo_nafs;
108 for (size_t i = 0; i < small_points.size(); ++i) {
109 small_nafs.emplace_back(small_naf_entries[i][naf_index]);
110 }
111 for (size_t i = 0; i < NUM_BIG_POINTS; ++i) {
112 big_nafs.emplace_back(big_naf_entries[i][naf_index]);
113 endo_nafs.emplace_back(endo_naf_entries[i][naf_index]);
114 }
115
116 auto to_add = small_table.get_chain_add_accumulator(small_nafs);
117 to_add = element::chain_add(big_table.get({ big_nafs[0], big_nafs[1], big_nafs[2], big_nafs[3] }), to_add);
118 to_add = element::chain_add(endo_table.get({ endo_nafs[0], endo_nafs[1], endo_nafs[2], endo_nafs[3] }), to_add);
119 return to_add;
120 };
121
122 // Perform multiple rounds of the montgomery ladder algoritm per "iteration" of our main loop.
123 // This is in order to reduce the number of field reductions required when calling `multiple_montgomery_ladder`
124 constexpr size_t num_rounds_per_iteration = 4;
125
126 // we require that we perform max of one generator per iteration
127 static_assert(num_rounds_per_iteration < 8);
128
129 size_t num_iterations = num_rounds / num_rounds_per_iteration;
130 num_iterations += ((num_iterations * num_rounds_per_iteration) == num_rounds) ? 0 : 1;
131 const size_t num_rounds_per_final_iteration = (num_rounds - 1) - ((num_iterations - 1) * num_rounds_per_iteration);
132
133 size_t generator_idx = 0;
134 for (size_t i = 0; i < num_iterations; ++i) {
135
136 const size_t inner_num_rounds =
137 (i != num_iterations - 1) ? num_rounds_per_iteration : num_rounds_per_final_iteration;
139
140 for (size_t j = 0; j < inner_num_rounds; ++j) {
141 to_add.emplace_back(get_point_to_add(i * num_rounds_per_iteration + j + 1));
142 }
143
144 bool add_generator_this_round = false;
145 size_t add_idx = 0;
146 for (size_t j = 0; j < inner_num_rounds; ++j) {
147 add_generator_this_round = ((i * num_rounds_per_iteration + j) % 8) == 6;
148 if (add_generator_this_round) {
149 add_idx = j;
150 break;
151 }
152 }
153 if (add_generator_this_round) {
154 to_add[add_idx] = element::chain_add(generator_table[generator_wnaf[generator_idx]], to_add[add_idx]);
155 to_add[add_idx] =
156 element::chain_add(generator_endo_table[generator_endo_wnaf[generator_idx]], to_add[add_idx]);
157 generator_idx++;
158 }
159 accumulator = accumulator.multiple_montgomery_ladder(to_add);
160 }
161
162 for (size_t i = 0; i < small_points.size(); ++i) {
163 element skew = accumulator - small_points[i];
164 Fq out_x = accumulator.x.conditional_select(skew.x, small_naf_entries[i][num_rounds]);
165 Fq out_y = accumulator.y.conditional_select(skew.y, small_naf_entries[i][num_rounds]);
166 accumulator = element(out_x, out_y);
167 }
168
170 Fq beta(bb::fr(beta_val.slice(0, 136)), bb::fr(beta_val.slice(136, 256)), false);
171
172 for (size_t i = 0; i < NUM_BIG_POINTS; ++i) {
173 element skew_point = big_points[i];
174 skew_point.x *= beta;
175 element skew = accumulator + skew_point;
176 Fq out_x = accumulator.x.conditional_select(skew.x, endo_naf_entries[i][num_rounds]);
177 Fq out_y = accumulator.y.conditional_select(skew.y, endo_naf_entries[i][num_rounds]);
178 accumulator = element(out_x, out_y);
179 }
180 {
181 element skew = accumulator - generator_table[128];
182 Fq out_x = accumulator.x.conditional_select(skew.x, bool_ct(generator_wnaf[generator_wnaf.size() - 1]));
183 Fq out_y = accumulator.y.conditional_select(skew.y, bool_ct(generator_wnaf[generator_wnaf.size() - 1]));
184 accumulator = element(out_x, out_y);
185 }
186 {
187 element skew = accumulator - generator_endo_table[128];
188 Fq out_x = accumulator.x.conditional_select(skew.x, bool_ct(generator_endo_wnaf[generator_wnaf.size() - 1]));
189 Fq out_y = accumulator.y.conditional_select(skew.y, bool_ct(generator_endo_wnaf[generator_wnaf.size() - 1]));
190 accumulator = element(out_x, out_y);
191 }
192
193 for (size_t i = 0; i < NUM_BIG_POINTS; ++i) {
194 element skew = accumulator - big_points[i];
195 Fq out_x = accumulator.x.conditional_select(skew.x, big_naf_entries[i][num_rounds]);
196 Fq out_y = accumulator.y.conditional_select(skew.y, big_naf_entries[i][num_rounds]);
197 accumulator = element(out_x, out_y);
198 }
199 accumulator = accumulator - offset_generators.second;
200
201 return accumulator;
202}
203
215template <typename C, class Fq, class Fr, class G>
216template <typename, typename>
217 requires(IsNotMegaBuilder<C>)
219 const std::vector<Fr>& big_scalars,
220 const std::vector<element>& small_points,
221 const std::vector<Fr>& small_scalars,
222 const size_t max_num_small_bits)
223{
224
225 BB_ASSERT_EQ(max_num_small_bits % 2, 0U);
226
227 const size_t num_big_points = big_points.size();
228 const size_t num_small_points = small_points.size();
229 C* ctx = nullptr;
230 for (auto element : big_points) {
231 if (element.get_context()) {
232 ctx = element.get_context();
233 break;
234 }
235 }
236
238 std::vector<Fr> scalars;
239 std::vector<element> endo_points;
240 std::vector<Fr> endo_scalars;
241
255 for (size_t i = 0; i < num_big_points; ++i) {
256 Fr scalar = big_scalars[i];
257 // Q: is it a problem if wraps? get_value is 512 bits
258 // A: it can't wrap, this method only compiles if the Fr type is a field_t<bb::fr> type
259
260 // Split k into short scalars (scalar_k1, scalar_k2) using bn254 endomorphism.
261 bb::fr k = uint256_t(scalar.get_value());
262 bb::fr k1(0);
263 bb::fr k2(0);
265 Fr scalar_k1 = Fr::from_witness(ctx, k1.to_montgomery_form());
266 Fr scalar_k2 = Fr::from_witness(ctx, k2.to_montgomery_form());
267
268 // Propagate tags
269 scalar_k1.set_origin_tag(scalar.get_origin_tag());
270 scalar_k2.set_origin_tag(scalar.get_origin_tag());
271
272 // Add copy constraint that validates k1 = scalar_k1 - scalar_k2 * \lambda
273 scalar.assert_equal(scalar_k1 - scalar_k2 * lambda);
274 scalars.push_back(scalar_k1);
275 endo_scalars.push_back(scalar_k2);
276 element point = big_points[i];
277 points.push_back(point);
278
279 // negate the point that maps to the endo scalar `scalar_k2`
280 // instead of computing scalar_k1 * [P] - scalar_k2 * [P], we compute scalar_k1 * [P] + scalar_k2 * [-P]
281 point.y = -point.y;
282 point.x = point.x * Fq(ctx, uint256_t(beta));
283 point.y.self_reduce();
284 endo_points.push_back(point);
285 }
286 for (size_t i = 0; i < num_small_points; ++i) {
287 points.push_back(small_points[i]);
288 scalars.push_back(small_scalars[i]);
289 }
290 std::copy(endo_points.begin(), endo_points.end(), std::back_inserter(points));
291 std::copy(endo_scalars.begin(), endo_scalars.end(), std::back_inserter(scalars));
292
293 // Compute the tag of the result
294 OriginTag union_tag{};
295 for (size_t i = 0; i < points.size(); i++) {
296 union_tag = OriginTag(union_tag, OriginTag(points[i].get_origin_tag(), scalars[i].get_origin_tag()));
297
298 // Remove tags so they don't interfere during computation
299 points[i].set_origin_tag(OriginTag());
300 scalars[i].set_origin_tag(OriginTag());
301 }
302 BB_ASSERT_EQ(big_scalars.size(), num_big_points);
303 BB_ASSERT_EQ(small_scalars.size(), num_small_points);
304
320 batch_lookup_table point_table(points);
321
334 const size_t num_rounds = max_num_small_bits;
335 const size_t num_points = points.size();
337 for (size_t i = 0; i < num_points; ++i) {
338 naf_entries.emplace_back(compute_naf(scalars[i], max_num_small_bits));
339 }
340
345 const auto offset_generators = compute_offset_generators(num_rounds);
346
352 element accumulator = offset_generators.first + point_table.get_initial_entry();
353
369 for (size_t i = 1; i < num_rounds / 2; ++i) {
370 // `nafs` tracks the naf value for each point for the current round
372 for (size_t j = 0; j < points.size(); ++j) {
373 nafs.emplace_back(naf_entries[j][i * 2 - 1]);
374 }
375
388 element::chain_add_accumulator add_1 = point_table.get_chain_add_accumulator(nafs);
389 for (size_t j = 0; j < points.size(); ++j) {
390 nafs[j] = (naf_entries[j][i * 2]);
391 }
392 element::chain_add_accumulator add_2 = point_table.get_chain_add_accumulator(nafs);
393
394 // Perform the double montgomery ladder.
395 accumulator = accumulator.multiple_montgomery_ladder({ add_1, add_2 });
396 }
397
398 // we need to iterate 1 more time if the number of rounds is even
399 if ((num_rounds & 0x01ULL) == 0x00ULL) {
401 for (size_t j = 0; j < points.size(); ++j) {
402 nafs.emplace_back(naf_entries[j][num_rounds - 1]);
403 }
404 element::chain_add_accumulator add_1 = point_table.get_chain_add_accumulator(nafs);
405 accumulator = accumulator.multiple_montgomery_ladder({ add_1 });
406 }
407
426 for (size_t i = 0; i < num_points; ++i) {
427 element skew = accumulator - points[i];
428 Fq out_x = accumulator.x.conditional_select(skew.x, naf_entries[i][num_rounds]);
429 Fq out_y = accumulator.y.conditional_select(skew.y, naf_entries[i][num_rounds]);
430 accumulator = element(out_x, out_y);
431 }
432
433 // Remove the offset generator point!
434 accumulator = accumulator - offset_generators.second;
435
436 accumulator.set_origin_tag(union_tag);
437 // Return our scalar mul output
438 return accumulator;
439}
440} // namespace bb::stdlib::element_default
#define BB_ASSERT_EQ(actual, expected,...)
Definition assert.hpp:59
constexpr uint256_t slice(uint64_t start, uint64_t end) const
Implements boolean logic in-circuit.
Definition bool.hpp:59
element multiple_montgomery_ladder(const std::vector< chain_add_accumulator > &to_add) const
Perform repeated iterations of the montgomery ladder algorithm.
void set_origin_tag(OriginTag tag) const
Definition biggroup.hpp:376
constexpr decltype(auto) get(::tuplet::tuple< T... > &&t) noexcept
Definition tuple.hpp:13
This file contains part of the logic for the Origin Tag mechanism that tracks the use of in-circuit p...
static constexpr field cube_root_of_unity()
BB_INLINE constexpr field to_montgomery_form() const noexcept
static void split_into_endomorphism_scalars(const field &k, field &k1, field &k2)
BB_INLINE constexpr field from_montgomery_form() const noexcept
element::chain_add_accumulator get_chain_add_accumulator(std::vector< bool_ct > &naf_entries) const
Definition biggroup.hpp:741
bb::fq Fq