131 C* ctx = scalar.context;
133 constexpr size_t num_bits = 129;
143 const auto compute_single_wnaf = [ctx](
const secp256k1::fr& k,
145 const bool is_negative,
146 const bool is_lo =
false) {
148 constexpr size_t num_rounds = ((num_bits + wnaf_size - 1) / wnaf_size);
150 const uint64_t stagger_mask = (1ULL << stagger) - 1;
152 const uint64_t stagger_scalar = k.
data[0] & stagger_mask;
154 uint64_t wnaf_values[num_rounds] = { 0 };
155 bool skew_without_stagger;
157 k_u256 = k_u256 >> stagger;
160 &k_u256.data[0], &wnaf_values[0], skew_without_stagger, 0);
163 &k_u256.data[0], &wnaf_values[0], skew_without_stagger, 0);
167 const size_t num_rounds_excluding_stagger_bits = ((num_bits + wnaf_size - 1 - stagger) / wnaf_size);
178 const auto compute_staggered_wnaf_fragment =
179 [](
const uint64_t fragment_u64,
const uint64_t stagger,
bool is_negative,
bool wnaf_skew) {
184 int fragment =
static_cast<int>(fragment_u64);
187 fragment = -fragment;
191 if (!is_negative && wnaf_skew) {
192 fragment -= (1 << stagger);
193 }
else if (is_negative && wnaf_skew) {
194 fragment += (1 << stagger);
197 bool output_skew = (fragment_u64 % 2) == 0;
198 if (!is_negative && output_skew) {
200 }
else if (is_negative && output_skew) {
204 uint64_t output_fragment;
206 output_fragment =
static_cast<uint64_t
>((int)((1ULL << (wnaf_size - 1))) + (fragment / 2 - 1));
208 output_fragment =
static_cast<uint64_t
>((1ULL << (wnaf_size - 1)) - 1ULL +
209 (uint64_t)((uint64_t)fragment / 2 + 1));
216 const auto [first_fragment, skew] =
217 compute_staggered_wnaf_fragment(stagger_scalar, stagger, is_negative, skew_without_stagger);
219 constexpr uint64_t wnaf_window_size = (1ULL << (wnaf_size - 1));
224 const auto get_wnaf_wires = [ctx](uint64_t* wnaf_values,
bool is_negative,
size_t rounds) {
226 for (
size_t i = 0; i < rounds; ++i) {
228 bool predicate = bool((wnaf_values[i] >> 31U) & 1U);
229 uint64_t offset_entry;
232 if ((!predicate && !is_negative) || (predicate && is_negative)) {
234 offset_entry = wnaf_window_size + (wnaf_values[i] & 0xffffff);
236 offset_entry = wnaf_window_size - 1 - (wnaf_values[i] & 0xffffff);
243 wnaf_entries.emplace_back(entry);
249 std::vector<field_t<C>> wnaf = get_wnaf_wires(&wnaf_values[0], is_negative, num_rounds_excluding_stagger_bits);
253 ctx->create_new_range_constraint(negative_skew.
witness_index, 1,
"biggroup_nafs");
254 ctx->create_new_range_constraint(positive_skew.
witness_index, 1,
"biggroup_nafs");
255 ctx->create_new_range_constraint((negative_skew + positive_skew).witness_index, 1,
"biggroup_nafs");
260 const size_t stagger,
261 const size_t rounds) {
264 for (
size_t i = 0; i < rounds; ++i) {
267 accumulator.emplace_back(entry);
272 sum += (stagger_fragment);
279 reconstructed = (reconstructed + reconstructed).add_to_lower_limb(positive_skew,
uint256_t(1));
280 return reconstructed;
287 Fr wnaf_sum = reconstruct_bigfield_from_wnaf(
288 wnaf, positive_skew, stagger_fragment, stagger, num_rounds_excluding_stagger_bits);
291 uint256_t negative_constant_wnaf_offset(0);
294 for (
size_t i = 0; i < num_rounds_excluding_stagger_bits; ++i) {
295 negative_constant_wnaf_offset +=
uint256_t(wnaf_window_size * 2 - 1) * (
uint256_t(1) << (i * wnaf_size));
298 negative_constant_wnaf_offset = negative_constant_wnaf_offset << stagger;
301 negative_constant_wnaf_offset += ((1ULL << wnaf_size) - 1ULL);
307 Fr offset =
Fr(
nullptr, negative_constant_wnaf_offset).add_to_lower_limb(negative_skew,
uint256_t(1));
309 Fr reconstructed = wnaf_sum -
offset;
312 .positive_skew = positive_skew,
313 .negative_skew = negative_skew,
314 .least_significant_wnaf_fragment = stagger_fragment,
315 .has_wnaf_fragment = (stagger > 0) };
323 bool klo_negative =
false;
324 bool khi_negative =
false;
341 const auto [klo_reconstructed, klo_out] = compute_single_wnaf(klo, lo_stagger, klo_negative,
true);
342 const auto [khi_reconstructed, khi_out] = compute_single_wnaf(khi, hi_stagger, khi_negative,
false);
347 Fr reconstructed_scalar = khi_reconstructed.madd(minus_lambda, { klo_reconstructed });
349 if (reconstructed_scalar.get_value() != scalar.get_value()) {
350 std::cerr <<
"biggroup_nafs: secp256k1 reconstructed wnaf does not match input! " << reconstructed_scalar
353 scalar.binary_basis_limbs[0].element.assert_equal(reconstructed_scalar.binary_basis_limbs[0].element);
354 scalar.binary_basis_limbs[1].element.assert_equal(reconstructed_scalar.binary_basis_limbs[1].element);
355 scalar.binary_basis_limbs[2].element.assert_equal(reconstructed_scalar.binary_basis_limbs[2].element);
356 scalar.binary_basis_limbs[3].element.assert_equal(reconstructed_scalar.binary_basis_limbs[3].element);
357 scalar.prime_basis_limb.assert_equal(reconstructed_scalar.prime_basis_limb);
359 return { .klo = klo_out, .khi = khi_out };
366 C* ctx = scalar.context;
368 uint256_t scalar_multiplier = scalar_multiplier_512.
lo;
370 constexpr size_t num_bits = (max_num_bits == 0) ? (
Fr::modulus.
get_msb() + 1) : (max_num_bits);
373 uint64_t wnaf_values[num_rounds] = { 0 };
375 bb::wnaf::fixed_wnaf<num_bits, 1, WNAF_SIZE>(&scalar_multiplier.
data[0], &wnaf_values[0], skew, 0);
378 for (
size_t i = 0; i < num_rounds; ++i) {
379 bool predicate = bool((wnaf_values[i] >> 31U) & 1U);
380 uint64_t offset_entry;
382 offset_entry = (1ULL << (
WNAF_SIZE - 1)) + (wnaf_values[i] & 0xffffff);
384 offset_entry = (1ULL << (
WNAF_SIZE - 1)) - 1 - (wnaf_values[i] & 0xffffff);
389 wnaf_entries.emplace_back(entry);
394 ctx->create_new_range_constraint(wnaf_entries[wnaf_entries.size() - 1].witness_index, 1,
"biggroup_nafs");
400 if constexpr (!Fr::is_composite) {
401 std::vector<Fr> accumulators;
402 for (
size_t i = 0; i < num_rounds; ++i) {
403 Fr entry = wnaf_entries[wnaf_entries.size() - 2 - i];
407 accumulators.emplace_back(entry);
409 accumulators.emplace_back(wnaf_entries[wnaf_entries.size() - 1] * -1);
411 for (
size_t i = 0; i < num_rounds; ++i) {
414 accumulators.emplace_back(-
Fr(negative_offset));
415 Fr accumulator_result = Fr::accumulate(accumulators);
416 scalar.assert_equal(accumulator_result);
438 const auto reconstruct_half_wnaf = [](
field_t<C>* wnafs,
const size_t half_round_length) {
440 for (
size_t i = 0; i < half_round_length; ++i) {
441 field_t<C> entry = wnafs[half_round_length - 1 - i];
443 half_accumulators.emplace_back(entry);
447 const size_t midpoint = num_rounds - (Fr::NUM_LIMB_BITS * 2) /
WNAF_SIZE;
448 auto hi_accumulators = reconstruct_half_wnaf(&wnaf_entries[0], midpoint);
449 auto lo_accumulators = reconstruct_half_wnaf(&wnaf_entries[midpoint], num_rounds - midpoint);
452 for (
size_t i = 0; i < midpoint; ++i) {
455 for (
size_t i = 0; i < (num_rounds - midpoint); ++i) {
461 .madd(wnaf_entries[wnaf_entries.size() - 1],
field_t<C>(
bb::fr(negative_lo)))
464 Fr reconstructed =
Fr(lo_accumulators, hi_accumulators,
true);
465 reconstructed = (reconstructed + reconstructed) -
offset;
466 reconstructed.assert_is_in_field();
467 reconstructed.assert_equal(scalar);
471 const auto original_tag = scalar.get_origin_tag();
472 for (
auto& entry : wnaf_entries) {
473 entry.set_origin_tag(original_tag);
484 C* ctx = scalar.context;
486 uint256_t scalar_multiplier = scalar_multiplier_512.
lo;
488 if (scalar_multiplier == 0) {
492 const size_t num_rounds = (max_num_bits == 0) ?
Fr::modulus.
get_msb() + 1 : max_num_bits;
498 if (scalar_multiplier.
get_bit(0) ==
false) {
506 naf_entries[num_rounds].set_origin_tag(scalar.get_origin_tag());
508 for (
size_t i = 0; i < num_rounds - 1; ++i) {
509 bool next_entry = scalar_multiplier.
get_bit(i + 1);
513 if (next_entry ==
false) {
518 ctx->create_new_range_constraint(
519 bit.
witness_index, 1,
"biggroup_nafs: compute_naf extracted too many bits in non-next_entry case");
521 naf_entries[num_rounds - i - 1] = bit;
526 ctx->create_new_range_constraint(
527 bit.
witness_index, 1,
"biggroup_nafs: compute_naf extracted too many bits in next_entry case");
529 naf_entries[num_rounds - i - 1] = bit;
532 naf_entries[num_rounds - i - 1].
set_origin_tag(scalar.get_origin_tag());
534 naf_entries[0] =
bool_ct(ctx,
false);
537 if constexpr (!Fr::is_composite) {
538 std::vector<Fr> accumulators;
539 for (
size_t i = 0; i < num_rounds; ++i) {
541 Fr entry(naf_entries[naf_entries.size() - 2 - i]);
545 accumulators.emplace_back(entry);
547 accumulators.emplace_back(
Fr(naf_entries[naf_entries.size() - 1]) * -1);
548 Fr accumulator_result = Fr::accumulate(accumulators);
549 scalar.assert_equal(accumulator_result);
551 const auto reconstruct_half_naf = [](
bool_ct* nafs,
const size_t half_round_length) {
555 for (
size_t i = 0; i < half_round_length; ++i) {
556 negative_accumulator = negative_accumulator + negative_accumulator +
field_t<C>(nafs[i]);
557 positive_accumulator =
560 return std::make_pair(positive_accumulator, negative_accumulator);
562 const size_t midpoint =
563 (num_rounds > Fr::NUM_LIMB_BITS * 2) ? num_rounds - Fr::NUM_LIMB_BITS * 2 : num_rounds / 2;
568 if (num_rounds > Fr::NUM_LIMB_BITS * 2) {
569 hi_accumulators = reconstruct_half_naf(&naf_entries[0], midpoint);
570 lo_accumulators = reconstruct_half_naf(&naf_entries[midpoint], num_rounds - midpoint);
576 lo_accumulators = reconstruct_half_naf(&naf_entries[0], num_rounds);
580 lo_accumulators.second = lo_accumulators.second +
field_t<C>(naf_entries[num_rounds]);
582 Fr reconstructed_positive =
Fr(lo_accumulators.first, hi_accumulators.first);
583 Fr reconstructed_negative =
Fr(lo_accumulators.second, hi_accumulators.second);
584 Fr accumulator = reconstructed_positive - reconstructed_negative;
585 accumulator.assert_equal(scalar);
588 const auto original_tag = scalar.get_origin_tag();
589 for (
auto& naf_entry : naf_entries) {
590 naf_entry.set_origin_tag(original_tag);