Barretenberg
The ZK-SNARK library at the core of Aztec
Loading...
Searching...
No Matches
biggroup_nafs.hpp
Go to the documentation of this file.
1// === AUDIT STATUS ===
2// internal: { status: not started, auditors: [], date: YYYY-MM-DD }
3// external_1: { status: not started, auditors: [], date: YYYY-MM-DD }
4// external_2: { status: not started, auditors: [], date: YYYY-MM-DD }
5// =====================
6
7#pragma once
11
13
101template <typename C, class Fq, class Fr, class G>
102template <size_t wnaf_size, size_t lo_stagger, size_t hi_stagger>
104{
131 C* ctx = scalar.context;
132
133 constexpr size_t num_bits = 129;
134
143 const auto compute_single_wnaf = [ctx](const secp256k1::fr& k,
144 const auto stagger,
145 const bool is_negative,
146 const bool is_lo = false) {
147 // The number of rounds is the minimal required to cover the whole scalar with wnaf_size windows
148 constexpr size_t num_rounds = ((num_bits + wnaf_size - 1) / wnaf_size);
149 // Stagger mask is needed to retrieve the lowest bits that will not be used in montgomery ladder directly
150 const uint64_t stagger_mask = (1ULL << stagger) - 1;
151 // Stagger scalar represents the lower "staggered" bits that are not used in the ladder
152 const uint64_t stagger_scalar = k.data[0] & stagger_mask;
153
154 uint64_t wnaf_values[num_rounds] = { 0 };
155 bool skew_without_stagger;
156 uint256_t k_u256{ k.data[0], k.data[1], k.data[2], k.data[3] };
157 k_u256 = k_u256 >> stagger;
158 if (is_lo) {
159 bb::wnaf::fixed_wnaf<num_bits - lo_stagger, 1, wnaf_size>(
160 &k_u256.data[0], &wnaf_values[0], skew_without_stagger, 0);
161 } else {
162 bb::wnaf::fixed_wnaf<num_bits - hi_stagger, 1, wnaf_size>(
163 &k_u256.data[0], &wnaf_values[0], skew_without_stagger, 0);
164 }
165
166 // Number of rounds that are needed to reconstruct the scalar without staggered bits
167 const size_t num_rounds_excluding_stagger_bits = ((num_bits + wnaf_size - 1 - stagger) / wnaf_size);
168
178 const auto compute_staggered_wnaf_fragment =
179 [](const uint64_t fragment_u64, const uint64_t stagger, bool is_negative, bool wnaf_skew) {
180 // If there is not stagger then there is no need to change anyhing
181 if (stagger == 0) {
182 return std::make_pair<uint64_t, bool>((uint64_t)0, (bool)wnaf_skew);
183 }
184 int fragment = static_cast<int>(fragment_u64);
185 // Inverse the fragment if it's negative
186 if (is_negative) {
187 fragment = -fragment;
188 }
189 // If the value is positive and there is a skew in wnaf, subtract 2ˢᵗᵃᵍᵍᵉʳ. If negative and there is
190 // skew, then add
191 if (!is_negative && wnaf_skew) {
192 fragment -= (1 << stagger);
193 } else if (is_negative && wnaf_skew) {
194 fragment += (1 << stagger);
195 }
196 // If the lowest bit is zero, then set final skew to 1 and add 1 to the absolute value of the fragment
197 bool output_skew = (fragment_u64 % 2) == 0;
198 if (!is_negative && output_skew) {
199 fragment += 1;
200 } else if (is_negative && output_skew) {
201 fragment -= 1;
202 }
203
204 uint64_t output_fragment;
205 if (fragment < 0) {
206 output_fragment = static_cast<uint64_t>((int)((1ULL << (wnaf_size - 1))) + (fragment / 2 - 1));
207 } else {
208 output_fragment = static_cast<uint64_t>((1ULL << (wnaf_size - 1)) - 1ULL +
209 (uint64_t)((uint64_t)fragment / 2 + 1));
210 }
211
212 return std::make_pair<uint64_t, bool>((uint64_t)output_fragment, (bool)output_skew);
213 };
214
215 // Compute the lowest fragment and final skew
216 const auto [first_fragment, skew] =
217 compute_staggered_wnaf_fragment(stagger_scalar, stagger, is_negative, skew_without_stagger);
218
219 constexpr uint64_t wnaf_window_size = (1ULL << (wnaf_size - 1));
224 const auto get_wnaf_wires = [ctx](uint64_t* wnaf_values, bool is_negative, size_t rounds) {
225 std::vector<field_t<C>> wnaf_entries;
226 for (size_t i = 0; i < rounds; ++i) {
227 // Predicate == sign of current wnaf value
228 bool predicate = bool((wnaf_values[i] >> 31U) & 1U);
229 uint64_t offset_entry;
230 // If the signs of current entry and the whole scalar are the same, then add the lowest bits of current
231 // wnaf value to the windows size to form an entry. Otherwise, subract the lowest bits along with 1
232 if ((!predicate && !is_negative) || (predicate && is_negative)) {
233 // TODO: Why is this mask fixed?
234 offset_entry = wnaf_window_size + (wnaf_values[i] & 0xffffff);
235 } else {
236 offset_entry = wnaf_window_size - 1 - (wnaf_values[i] & 0xffffff);
237 }
238 field_t<C> entry(witness_t<C>(ctx, offset_entry));
239
240 // TODO: Do these need to be range constrained? we use these witnesses
241 // to index a size-16 ROM lookup table, which performs an implicit range constraint
242 entry.create_range_constraint(wnaf_size);
243 wnaf_entries.emplace_back(entry);
244 }
245 return wnaf_entries;
246 };
247
248 // Get wnaf witnesses
249 std::vector<field_t<C>> wnaf = get_wnaf_wires(&wnaf_values[0], is_negative, num_rounds_excluding_stagger_bits);
250 // Compute and constrain skews
251 field_t<C> negative_skew = witness_t<C>(ctx, is_negative ? 0 : skew);
252 field_t<C> positive_skew = witness_t<C>(ctx, is_negative ? skew : 0);
253 ctx->create_new_range_constraint(negative_skew.witness_index, 1, "biggroup_nafs");
254 ctx->create_new_range_constraint(positive_skew.witness_index, 1, "biggroup_nafs");
255 ctx->create_new_range_constraint((negative_skew + positive_skew).witness_index, 1, "biggroup_nafs");
256
257 const auto reconstruct_bigfield_from_wnaf = [ctx](const std::vector<field_t<C>>& wnaf,
258 const field_t<C>& positive_skew,
259 const field_t<C>& stagger_fragment,
260 const size_t stagger,
261 const size_t rounds) {
262 std::vector<field_t<C>> accumulator;
263 // Collect positive wnaf entries for accumulation
264 for (size_t i = 0; i < rounds; ++i) {
265 field_t<C> entry = wnaf[rounds - 1 - i];
266 entry *= static_cast<field_t<C>>(uint256_t(1) << (i * wnaf_size));
267 accumulator.emplace_back(entry);
268 }
269 // Accumulate entries, shift by stagger and add the stagger itself
271 sum = sum * field_t<C>(bb::fr(1ULL << stagger));
272 sum += (stagger_fragment);
273 sum = sum.normalize();
274 // TODO: improve efficiency by creating a constructor that does NOT require us to range constrain
275 // limbs (we already know (sum < 2^{130}))
276 // Convert this value to bigfield element
277 Fr reconstructed = Fr(sum, field_t<C>::from_witness_index(ctx, ctx->zero_idx), false);
278 // Double the final value and add the skew
279 reconstructed = (reconstructed + reconstructed).add_to_lower_limb(positive_skew, uint256_t(1));
280 return reconstructed;
281 };
282
283 // Initialize stagger witness
284 field_t<C> stagger_fragment = witness_t<C>(ctx, first_fragment);
285
286 // Reconstruct bigfield x_pos
287 Fr wnaf_sum = reconstruct_bigfield_from_wnaf(
288 wnaf, positive_skew, stagger_fragment, stagger, num_rounds_excluding_stagger_bits);
289
290 // Start reconstructing x_neg
291 uint256_t negative_constant_wnaf_offset(0);
292
293 // Construct 0xF..F
294 for (size_t i = 0; i < num_rounds_excluding_stagger_bits; ++i) {
295 negative_constant_wnaf_offset += uint256_t(wnaf_window_size * 2 - 1) * (uint256_t(1) << (i * wnaf_size));
296 }
297 // Shift by stagger
298 negative_constant_wnaf_offset = negative_constant_wnaf_offset << stagger;
299 // Add for stagger
300 if (stagger > 0) {
301 negative_constant_wnaf_offset += ((1ULL << wnaf_size) - 1ULL); // FROM STAGGER FRAMGENT
302 }
303
304 // TODO: improve efficiency by removing range constraint on lo_offset and hi_offset (we already know are
305 // boolean)
306 // Add the skew to the bigfield constant
307 Fr offset = Fr(nullptr, negative_constant_wnaf_offset).add_to_lower_limb(negative_skew, uint256_t(1));
308 // x_pos - x_neg
309 Fr reconstructed = wnaf_sum - offset;
310
311 secp256k1_wnaf wnaf_out{ .wnaf = wnaf,
312 .positive_skew = positive_skew,
313 .negative_skew = negative_skew,
314 .least_significant_wnaf_fragment = stagger_fragment,
315 .has_wnaf_fragment = (stagger > 0) };
316
317 return std::make_pair<Fr, secp256k1_wnaf>((Fr)reconstructed, (secp256k1_wnaf)wnaf_out);
318 };
319
320 secp256k1::fr k(scalar.get_value().lo);
321 secp256k1::fr klo(0);
322 secp256k1::fr khi(0);
323 bool klo_negative = false;
324 bool khi_negative = false;
326
327 /* AUDITNOTE: it has been observed in testing that klo_negative is always false.
328 On the other hand, khi_negative is sometimes true (e.g., in test_wnaf_secp256k1, take
329 scalar_a = 0x3e3e7e9628094ee8942358f6daa1130790f5165d55705d83dad745c85f36807a). So it may be
330 that this block is not needed. I could not quickly determine why this might be the case,
331 so I leave it to the auditor to check whether the following if block is needed. */
333 klo_negative = true;
334 klo = -klo;
335 }
337 khi_negative = true;
338 khi = -khi;
339 }
340
341 const auto [klo_reconstructed, klo_out] = compute_single_wnaf(klo, lo_stagger, klo_negative, true);
342 const auto [khi_reconstructed, khi_out] = compute_single_wnaf(khi, hi_stagger, khi_negative, false);
343
344 uint256_t minus_lambda_val(-secp256k1::fr::cube_root_of_unity());
345 Fr minus_lambda(bb::fr(minus_lambda_val.slice(0, 136)), bb::fr(minus_lambda_val.slice(136, 256)), false);
346
347 Fr reconstructed_scalar = khi_reconstructed.madd(minus_lambda, { klo_reconstructed });
348
349 if (reconstructed_scalar.get_value() != scalar.get_value()) {
350 std::cerr << "biggroup_nafs: secp256k1 reconstructed wnaf does not match input! " << reconstructed_scalar
351 << " vs " << scalar << std::endl;
352 }
353 scalar.binary_basis_limbs[0].element.assert_equal(reconstructed_scalar.binary_basis_limbs[0].element);
354 scalar.binary_basis_limbs[1].element.assert_equal(reconstructed_scalar.binary_basis_limbs[1].element);
355 scalar.binary_basis_limbs[2].element.assert_equal(reconstructed_scalar.binary_basis_limbs[2].element);
356 scalar.binary_basis_limbs[3].element.assert_equal(reconstructed_scalar.binary_basis_limbs[3].element);
357 scalar.prime_basis_limb.assert_equal(reconstructed_scalar.prime_basis_limb);
358
359 return { .klo = klo_out, .khi = khi_out };
360}
361
362template <typename C, class Fq, class Fr, class G>
363template <size_t max_num_bits, size_t WNAF_SIZE>
365{
366 C* ctx = scalar.context;
367 uint512_t scalar_multiplier_512 = uint512_t(uint256_t(scalar.get_value()) % Fr::modulus);
368 uint256_t scalar_multiplier = scalar_multiplier_512.lo;
369
370 constexpr size_t num_bits = (max_num_bits == 0) ? (Fr::modulus.get_msb() + 1) : (max_num_bits);
371 constexpr size_t num_rounds = ((num_bits + WNAF_SIZE - 1) / WNAF_SIZE);
372
373 uint64_t wnaf_values[num_rounds] = { 0 };
374 bool skew = false;
375 bb::wnaf::fixed_wnaf<num_bits, 1, WNAF_SIZE>(&scalar_multiplier.data[0], &wnaf_values[0], skew, 0);
376
377 std::vector<field_t<C>> wnaf_entries;
378 for (size_t i = 0; i < num_rounds; ++i) {
379 bool predicate = bool((wnaf_values[i] >> 31U) & 1U);
380 uint64_t offset_entry;
381 if (!predicate) {
382 offset_entry = (1ULL << (WNAF_SIZE - 1)) + (wnaf_values[i] & 0xffffff);
383 } else {
384 offset_entry = (1ULL << (WNAF_SIZE - 1)) - 1 - (wnaf_values[i] & 0xffffff);
385 }
386 field_t<C> entry(witness_t<C>(ctx, offset_entry));
387 ctx->create_new_range_constraint(entry.witness_index, 1ULL << (WNAF_SIZE), "biggroup_nafs");
388
389 wnaf_entries.emplace_back(entry);
390 }
391
392 // add skew
393 wnaf_entries.emplace_back(witness_t<C>(ctx, skew));
394 ctx->create_new_range_constraint(wnaf_entries[wnaf_entries.size() - 1].witness_index, 1, "biggroup_nafs");
395
396 // TODO(https://github.com/AztecProtocol/barretenberg/issues/664)
397 // VALIDATE SUM DOES NOT OVERFLOW P
398
399 // validate correctness of wNAF
400 if constexpr (!Fr::is_composite) {
401 std::vector<Fr> accumulators;
402 for (size_t i = 0; i < num_rounds; ++i) {
403 Fr entry = wnaf_entries[wnaf_entries.size() - 2 - i];
404 entry *= 2;
405 // entry -= 15;
406 entry *= static_cast<Fr>(uint256_t(1) << (i * WNAF_SIZE));
407 accumulators.emplace_back(entry);
408 }
409 accumulators.emplace_back(wnaf_entries[wnaf_entries.size() - 1] * -1);
410 uint256_t negative_offset(0);
411 for (size_t i = 0; i < num_rounds; ++i) {
412 negative_offset += uint256_t((1ULL << WNAF_SIZE) - 1) * (uint256_t(1) << (i * WNAF_SIZE));
413 }
414 accumulators.emplace_back(-Fr(negative_offset));
415 Fr accumulator_result = Fr::accumulate(accumulators);
416 scalar.assert_equal(accumulator_result);
417 } else {
418 // If Fr is a non-native field element, we can't just accumulate the wnaf entries into a single value,
419 // as we could overflow the circuit modulus
420 //
421 // We add the first 34 wnaf entries into a 'low' 136-bit accumulator (136 = 2 68 bit limbs)
422 // We add the remaining wnaf entries into a 'high' accumulator
423 // We can then directly construct a Fr element from the accumulators.
424 // However we cannot underflow our accumulators, and our wnafs represent negative and positive values
425 // The raw value of each wnaf value is contained in the range [0, 15], however these values represent integers
426 // [-15, -13, -11, ..., 13, 15]
427 //
428 // To map from the raw value to the actual value, we must compute `value * 2 - 15`
429 // However, we do not subtract off the -15 term when constructing our low and high accumulators. Instead of
430 // multiplying by two when accumulating we simply add the accumulated value to itself. This way it automatically
431 // updates multiplicative constants without computing new witnesses. This ensures the low accumulator will not
432 // underflow
433 //
434 // Once we have reconstructed an Fr element out of our accumulators,
435 // we ALSO construct an Fr element from the constant offset terms we left out
436 // We then subtract off the constant term and call `Fr::assert_is_in_field` to reduce the value modulo
437 // Fr::modulus
438 const auto reconstruct_half_wnaf = [](field_t<C>* wnafs, const size_t half_round_length) {
439 std::vector<field_t<C>> half_accumulators;
440 for (size_t i = 0; i < half_round_length; ++i) {
441 field_t<C> entry = wnafs[half_round_length - 1 - i];
442 entry *= static_cast<field_t<C>>(uint256_t(1) << (i * 4));
443 half_accumulators.emplace_back(entry);
444 }
445 return field_t<C>::accumulate(half_accumulators);
446 };
447 const size_t midpoint = num_rounds - (Fr::NUM_LIMB_BITS * 2) / WNAF_SIZE;
448 auto hi_accumulators = reconstruct_half_wnaf(&wnaf_entries[0], midpoint);
449 auto lo_accumulators = reconstruct_half_wnaf(&wnaf_entries[midpoint], num_rounds - midpoint);
450 uint256_t negative_lo(0);
451 uint256_t negative_hi(0);
452 for (size_t i = 0; i < midpoint; ++i) {
453 negative_hi += uint256_t(15) * (uint256_t(1) << (i * 4));
454 }
455 for (size_t i = 0; i < (num_rounds - midpoint); ++i) {
456 negative_lo += uint256_t(15) * (uint256_t(1) << (i * 4));
457 }
458 BB_ASSERT_EQ((num_rounds - midpoint) * 4, 136U);
459 // If skew == 1 lo_offset = 0, else = 0xf...f
460 field_t<C> lo_offset = (-field_t<C>(bb::fr(negative_lo)))
461 .madd(wnaf_entries[wnaf_entries.size() - 1], field_t<C>(bb::fr(negative_lo)))
462 .normalize();
463 Fr offset = Fr(lo_offset, field_t<C>(bb::fr(negative_hi)) + wnaf_entries[wnaf_entries.size() - 1], true);
464 Fr reconstructed = Fr(lo_accumulators, hi_accumulators, true);
465 reconstructed = (reconstructed + reconstructed) - offset;
466 reconstructed.assert_is_in_field();
467 reconstructed.assert_equal(scalar);
468 }
469
470 // Set tags of wnaf_entries to the original scalar tag
471 const auto original_tag = scalar.get_origin_tag();
472 for (auto& entry : wnaf_entries) {
473 entry.set_origin_tag(original_tag);
474 }
475 return wnaf_entries;
476}
477
478template <typename C, class Fq, class Fr, class G>
479std::vector<bool_t<C>> element<C, Fq, Fr, G>::compute_naf(const Fr& scalar, const size_t max_num_bits)
480{
481 // We are not handling the case of odd bit lengths here.
482 BB_ASSERT_EQ(max_num_bits % 2, 0U);
483
484 C* ctx = scalar.context;
485 uint512_t scalar_multiplier_512 = uint512_t(uint256_t(scalar.get_value()) % Fr::modulus);
486 uint256_t scalar_multiplier = scalar_multiplier_512.lo;
487 // NAF can't handle 0
488 if (scalar_multiplier == 0) {
489 scalar_multiplier = Fr::modulus;
490 }
491
492 const size_t num_rounds = (max_num_bits == 0) ? Fr::modulus.get_msb() + 1 : max_num_bits;
493 std::vector<bool_ct> naf_entries(num_rounds + 1);
494
495 // if boolean is false => do NOT flip y
496 // if boolean is true => DO flip y
497 // first entry is skew. i.e. do we subtract one from the final result or not
498 if (scalar_multiplier.get_bit(0) == false) {
499 // add skew
500 naf_entries[num_rounds] = bool_ct(witness_t(ctx, true));
501 scalar_multiplier += uint256_t(1);
502 } else {
503 naf_entries[num_rounds] = bool_ct(witness_t(ctx, false));
504 }
505 // We need to manually propagate the origin tag
506 naf_entries[num_rounds].set_origin_tag(scalar.get_origin_tag());
507
508 for (size_t i = 0; i < num_rounds - 1; ++i) {
509 bool next_entry = scalar_multiplier.get_bit(i + 1);
510 // if the next entry is false, we need to flip the sign of the current entry. i.e. make negative
511 // This is a VERY hacky workaround to ensure that UltraBuilder will apply a basic
512 // range constraint per bool, and not a full 1-bit range gate
513 if (next_entry == false) {
514 bool_ct bit(ctx, true);
515 bit.context = ctx;
516 bit.witness_index = witness_t<C>(ctx, true).witness_index; // flip sign
517 bit.witness_bool = true;
518 ctx->create_new_range_constraint(
519 bit.witness_index, 1, "biggroup_nafs: compute_naf extracted too many bits in non-next_entry case");
520
521 naf_entries[num_rounds - i - 1] = bit;
522 } else {
523 bool_ct bit(ctx, false);
524 bit.witness_index = witness_t<C>(ctx, false).witness_index; // don't flip sign
525 bit.witness_bool = false;
526 ctx->create_new_range_constraint(
527 bit.witness_index, 1, "biggroup_nafs: compute_naf extracted too many bits in next_entry case");
528
529 naf_entries[num_rounds - i - 1] = bit;
530 }
531 // We need to manually propagate the origin tag
532 naf_entries[num_rounds - i - 1].set_origin_tag(scalar.get_origin_tag());
533 }
534 naf_entries[0] = bool_ct(ctx, false); // most significant entry is always true
535
536 // validate correctness of NAF
537 if constexpr (!Fr::is_composite) {
538 std::vector<Fr> accumulators;
539 for (size_t i = 0; i < num_rounds; ++i) {
540 // bit = 1 - 2 * naf
541 Fr entry(naf_entries[naf_entries.size() - 2 - i]);
542 entry *= -2;
543 entry += 1;
544 entry *= static_cast<Fr>(uint256_t(1) << (i));
545 accumulators.emplace_back(entry);
546 }
547 accumulators.emplace_back(Fr(naf_entries[naf_entries.size() - 1]) * -1); // skew
548 Fr accumulator_result = Fr::accumulate(accumulators);
549 scalar.assert_equal(accumulator_result);
550 } else {
551 const auto reconstruct_half_naf = [](bool_ct* nafs, const size_t half_round_length) {
552 // Q: need constraint to start from zero?
553 field_t<C> negative_accumulator(0);
554 field_t<C> positive_accumulator(0);
555 for (size_t i = 0; i < half_round_length; ++i) {
556 negative_accumulator = negative_accumulator + negative_accumulator + field_t<C>(nafs[i]);
557 positive_accumulator =
558 positive_accumulator + positive_accumulator + field_t<C>(1) - field_t<C>(nafs[i]);
559 }
560 return std::make_pair(positive_accumulator, negative_accumulator);
561 };
562 const size_t midpoint =
563 (num_rounds > Fr::NUM_LIMB_BITS * 2) ? num_rounds - Fr::NUM_LIMB_BITS * 2 : num_rounds / 2;
564
565 std::pair<field_t<C>, field_t<C>> hi_accumulators;
566 std::pair<field_t<C>, field_t<C>> lo_accumulators;
567
568 if (num_rounds > Fr::NUM_LIMB_BITS * 2) {
569 hi_accumulators = reconstruct_half_naf(&naf_entries[0], midpoint);
570 lo_accumulators = reconstruct_half_naf(&naf_entries[midpoint], num_rounds - midpoint);
571
572 } else {
573 // If the number of rounds is smaller than Fr::NUM_LIMB_BITS, the high bits of the resulting Fr element are
574 // 0.
575 const field_t<C> zero = field_t<C>::from_witness_index(ctx, 0);
576 lo_accumulators = reconstruct_half_naf(&naf_entries[0], num_rounds);
577 hi_accumulators = std::make_pair(zero, zero);
578 }
579
580 lo_accumulators.second = lo_accumulators.second + field_t<C>(naf_entries[num_rounds]);
581
582 Fr reconstructed_positive = Fr(lo_accumulators.first, hi_accumulators.first);
583 Fr reconstructed_negative = Fr(lo_accumulators.second, hi_accumulators.second);
584 Fr accumulator = reconstructed_positive - reconstructed_negative;
585 accumulator.assert_equal(scalar);
586 }
587 // Propagate tags to naf
588 const auto original_tag = scalar.get_origin_tag();
589 for (auto& naf_entry : naf_entries) {
590 naf_entry.set_origin_tag(original_tag);
591 }
592 return naf_entries;
593}
594} // namespace bb::stdlib::element_default
#define BB_ASSERT_EQ(actual, expected,...)
Definition assert.hpp:59
constexpr bool get_bit(uint64_t bit_index) const
constexpr uint256_t slice(uint64_t start, uint64_t end) const
constexpr uint64_t get_msb() const
Implements boolean logic in-circuit.
Definition bool.hpp:59
void set_origin_tag(const OriginTag &new_tag) const
Definition bool.hpp:119
Builder * context
Definition bool.hpp:130
uint32_t witness_index
Definition bool.hpp:133
static field_t from_witness_index(Builder *ctx, uint32_t witness_index)
Definition field.cpp:59
static field_t accumulate(const std::vector< field_t > &input)
Efficiently compute the sum of vector entries. Using big_add_gate we reduce the number of gates neede...
Definition field.cpp:1147
void create_range_constraint(size_t num_bits, std::string const &msg="field_t::range_constraint") const
Let x = *this.normalize(), constrain x.v < 2^{num_bits}.
Definition field.cpp:908
uint32_t witness_index
Definition field.hpp:132
ssize_t offset
Definition engine.cpp:36
stdlib::bool_t< Builder > bool_ct
constexpr T get_msb(const T in)
Definition get_msb.hpp:47
uintx< uint256_t > uint512_t
Definition uintx.hpp:307
void fixed_wnaf(const uint64_t *scalar, uint64_t *wnaf, bool &skew_map, const uint64_t point_index, const uint64_t num_points, const size_t wnaf_bits) noexcept
Performs fixed-window non-adjacent form (WNAF) computation for scalar multiplication.
Definition wnaf.hpp:178
Inner sum(Cont< Inner, Args... > const &in)
Definition container.hpp:70
constexpr decltype(auto) get(::tuplet::tuple< T... > &&t) noexcept
Definition tuple.hpp:13
Curve::ScalarField Fr
static constexpr field cube_root_of_unity()
static constexpr uint256_t modulus
static void split_into_endomorphism_scalars(const field &k, field &k1, field &k2)
constexpr uint256_t uint256_t_no_montgomery_conversion() const noexcept
BB_INLINE constexpr field from_montgomery_form() const noexcept
std::vector< field_t< Builder > > wnaf
Definition biggroup.hpp:34
#define WNAF_SIZE(x)
Definition wnaf.hpp:16