|
constexpr size_t | get_optimal_bucket_width (const size_t num_points) |
|
constexpr size_t | get_num_buckets (const size_t num_points) |
|
constexpr size_t | get_num_rounds (const size_t num_points) |
|
template<size_t bits, size_t bit_position> |
uint64_t | get_wnaf_bits_const (const uint64_t *scalar) noexcept |
|
uint64_t | get_wnaf_bits (const uint64_t *scalar, const uint64_t bits, const uint64_t bit_position) noexcept |
|
void | fixed_wnaf_packed (const uint64_t *scalar, uint64_t *wnaf, bool &skew_map, const uint64_t point_index, const size_t wnaf_bits) noexcept |
|
void | fixed_wnaf (const uint64_t *scalar, uint64_t *wnaf, bool &skew_map, const uint64_t point_index, const uint64_t num_points, const size_t wnaf_bits) noexcept |
| Performs fixed-window non-adjacent form (WNAF) computation for scalar multiplication.
|
|
uint64_t | get_num_scalar_bits (const uint64_t *scalar) |
|
void | fixed_wnaf_with_counts (const uint64_t *scalar, uint64_t *wnaf, bool &skew_map, uint64_t *wnaf_round_counts, const uint64_t point_index, const uint64_t num_points, const size_t wnaf_bits) noexcept |
|
template<size_t num_points, size_t wnaf_bits, size_t round_i> |
void | wnaf_round (uint64_t *scalar, uint64_t *wnaf, const uint64_t point_index, const uint64_t previous) noexcept |
|
template<size_t scalar_bits, size_t num_points, size_t wnaf_bits, size_t round_i> |
void | wnaf_round (uint64_t *scalar, uint64_t *wnaf, const uint64_t point_index, const uint64_t previous) noexcept |
|
template<size_t wnaf_bits, size_t round_i> |
void | wnaf_round_packed (const uint64_t *scalar, uint64_t *wnaf, const uint64_t point_index, const uint64_t previous) noexcept |
|
template<size_t num_points, size_t wnaf_bits> |
void | fixed_wnaf (uint64_t *scalar, uint64_t *wnaf, bool &skew_map, const size_t point_index) noexcept |
|
template<size_t num_bits, size_t num_points, size_t wnaf_bits> |
void | fixed_wnaf (uint64_t *scalar, uint64_t *wnaf, bool &skew_map, const size_t point_index) noexcept |
|
template<size_t scalar_bits, size_t num_points, size_t wnaf_bits, size_t round_i> |
void | wnaf_round_with_restricted_first_slice (uint64_t *scalar, uint64_t *wnaf, const uint64_t point_index, const uint64_t previous) noexcept |
|
template<size_t num_bits, size_t num_points, size_t wnaf_bits> |
void | fixed_wnaf_with_restricted_first_slice (uint64_t *scalar, uint64_t *wnaf, bool &skew_map, const size_t point_index) noexcept |
|
void bb::wnaf::fixed_wnaf_with_counts |
( |
const uint64_t * |
scalar, |
|
|
uint64_t * |
wnaf, |
|
|
bool & |
skew_map, |
|
|
uint64_t * |
wnaf_round_counts, |
|
|
const uint64_t |
point_index, |
|
|
const uint64_t |
num_points, |
|
|
const size_t |
wnaf_bits |
|
) |
| |
|
inlinenoexcept |
How to compute an x-bit wnaf slice?
Iterate over number of slices in scalar. For each slice, if slice is even, ADD +1 to current slice and SUBTRACT 2^x from previous slice. (for 1st slice we instead add +1 and set the scalar's 'skew' value to 'true' (i.e. need to subtract 1 from it at the end of our scalar mul algo))
In *wnaf we store the following:
- bits 0-30: ABSOLUTE value of wnaf (i.e. -3 goes to 3)
- bit 31: 'predicate' bool (i.e. does the wnaf value need to be negated?)
- bits 32-63: position in a point array that describes the elliptic curve point this wnaf slice is referencing
N.B. IN OUR STDLIB ALGORITHMS THE SKEW VALUE REPRESENTS AN ADDITION NOT A SUBTRACTION (i.e. we add +1 at the end of the scalar mul algo we don't sub 1) (this is to eliminate situations which could produce the point at infinity as an output as our circuit logic cannot accommodate this edge case).
Credits: Zac W.
- Parameters
-
scalar | Pointer to the 128-bit non-montgomery scalar that is supposed to be transformed into wnaf |
wnaf | Pointer to output array that needs to accommodate enough 64-bit WNAF entries |
skew_map | Reference to output skew value, which if true shows that the point should be added once at the end of computation |
wnaf_round_counts | Pointer to output array specifying the number of points participating in each round |
point_index | The index of the point that should be multiplied by this scalar in the point array |
num_points | Total points in the MSM (2*num_initial_points) |
Definition at line 274 of file wnaf.hpp.
uint64_t bb::wnaf::get_num_scalar_bits |
( |
const uint64_t * |
scalar | ) |
|
|
inline |
Current flow...
If a wnaf entry is even, we add +1 to it, and subtract 32 from the previous entry. This works if the previous entry is odd. If we recursively apply this process, starting at the least significant window, this will always be the case.
However, we want to skip over windows that are 0, which poses a problem.
Scenario 1: even window followed by 0 window followed by any window 'x'
We can't add 1 to the even window and subtract 32 from the 0 window, as we don't have a bucket that maps to -32 This means that we have to identify whether we are going to borrow 32 from 'x', requiring us to look at least 2 steps ahead
Scenario 2: <even> <0> <0> <x>
This problem proceeds indefinitely - if we have adjacent 0 windows, we do not know whether we need to track a borrow flag until we identify the next non-zero window
Scenario 3: <odd> <0>
This one works...
Ok, so we should be a bit more limited with when we don't include window entries. The goal here is to identify short scalars, so we want to identify the most significant non-zero window
Definition at line 234 of file wnaf.hpp.
uint64_t bb::wnaf::get_wnaf_bits |
( |
const uint64_t * |
scalar, |
|
|
const uint64_t |
bits, |
|
|
const uint64_t |
bit_position |
|
) |
| |
|
inlinenoexcept |
we want to take a 128 bit scalar and shift it down by (bit_position). We then wish to mask out bits
number of bits. Low limb contains first 64 bits, so we wish to shift this limb by (bit_position mod 64), which is also (bit_position & 63) If we require bits from the high limb, these need to be shifted left, not right. Actual bit position of bit in high limb = b
. Desired position = 64 - (amount we shifted low limb by) = 64 - (bit_position & 63)
So, step 1: get low limb and shift right by (bit_position & 63) get high limb and shift left by (64 - (bit_position & 63))
Definition at line 113 of file wnaf.hpp.
template<size_t bits, size_t bit_position>
uint64_t bb::wnaf::get_wnaf_bits_const |
( |
const uint64_t * |
scalar | ) |
|
|
inlinenoexcept |
we want to take a 128 bit scalar and shift it down by (bit_position). We then wish to mask out bits
number of bits. Low limb contains first 64 bits, so we wish to shift this limb by (bit_position mod 64), which is also (bit_position & 63) If we require bits from the high limb, these need to be shifted left, not right. Actual bit position of bit in high limb = b
. Desired position = 64 - (amount we shifted low limb by) = 64 - (bit_position & 63)
So, step 1: get low limb and shift right by (bit_position & 63) get high limb and shift left by (64 - (bit_position & 63))
Definition at line 79 of file wnaf.hpp.