Barretenberg
The ZK-SNARK library at the core of Aztec
Loading...
Searching...
No Matches
cycle_scalar.cpp
Go to the documentation of this file.
1// === AUDIT STATUS ===
2// internal: { status: not started, auditors: [], date: YYYY-MM-DD }
3// external_1: { status: not started, auditors: [], date: YYYY-MM-DD }
4// external_2: { status: not started, auditors: [], date: YYYY-MM-DD }
5// =====================
6
7#include "./cycle_scalar.hpp"
8#include "./cycle_group.hpp"
11
12namespace bb::stdlib {
13
14template <typename Builder>
16 : lo(_lo)
17 , hi(_hi)
18{}
19
20template <typename Builder> cycle_scalar<Builder>::cycle_scalar(const field_t& in)
21{
22 const uint256_t value(in.get_value());
23 const uint256_t lo_v = value.slice(0, LO_BITS);
24 const uint256_t hi_v = value.slice(LO_BITS, HI_BITS);
25 constexpr uint256_t shift = uint256_t(1) << LO_BITS;
26 if (in.is_constant()) {
27 lo = lo_v;
28 hi = hi_v;
29 } else {
30 lo = witness_t<Builder>(in.get_context(), lo_v);
31 hi = witness_t<Builder>(in.get_context(), hi_v);
32 (lo + hi * shift).assert_equal(in);
33 // TODO(https://github.com/AztecProtocol/barretenberg/issues/1022): ensure lo and hi are in bb::fr modulus not
34 // bb::fq modulus otherwise we could have two representations for in
35 validate_scalar_is_in_field();
36 }
37 // We need to manually propagate the origin tag
38 lo.set_origin_tag(in.get_origin_tag());
39 hi.set_origin_tag(in.get_origin_tag());
40}
41
42template <typename Builder> cycle_scalar<Builder>::cycle_scalar(const ScalarField& in)
43{
44 const uint256_t value(in);
45 const uint256_t lo_v = value.slice(0, LO_BITS);
46 const uint256_t hi_v = value.slice(LO_BITS, HI_BITS);
47 lo = lo_v;
48 hi = hi_v;
49}
50
51template <typename Builder>
53{
54 const uint256_t value_u256(value);
55 const uint256_t lo_v = value_u256.slice(0, LO_BITS);
56 const uint256_t hi_v = value_u256.slice(LO_BITS, HI_BITS);
61 return cycle_scalar(lo, hi);
62}
63
74template <typename Builder>
76 const uint256_t& bitstring,
77 const size_t num_bits)
78{
79 BB_ASSERT_LT(bitstring.get_msb(), num_bits);
80 const uint256_t lo_v = bitstring.slice(0, LO_BITS);
81 const uint256_t hi_v = bitstring.slice(LO_BITS, HI_BITS);
86 cycle_scalar result{ lo, hi, num_bits, true, false };
87 return result;
88}
89
100template <typename Builder>
102{
103 const uint256_t value_u256(in.get_value());
104 const uint256_t lo_v = value_u256.slice(0, LO_BITS);
105 const uint256_t hi_v = value_u256.slice(LO_BITS, HI_BITS);
106 if (in.is_constant()) {
107 cycle_scalar result{ field_t(lo_v), field_t(hi_v), NUM_BITS, false, true };
108 return result;
109 }
110 field_t lo = witness_t<Builder>(in.get_context(), lo_v);
111 field_t hi = witness_t<Builder>(in.get_context(), hi_v);
112 lo.add_two(hi * (uint256_t(1) << LO_BITS), -in).assert_equal(0);
113
114 // We need to manually propagate the origin tag
117
118 cycle_scalar result{ lo, hi, NUM_BITS, skip_primality_test, true };
119 return result;
120}
130template <typename Builder> cycle_scalar<Builder>::cycle_scalar(BigScalarField& scalar)
131{
132 auto* ctx = get_context() ? get_context() : scalar.get_context();
133
134 if (scalar.is_constant()) {
135 const uint256_t value((scalar.get_value() % uint512_t(ScalarField::modulus)).lo);
136 const uint256_t value_lo = value.slice(0, LO_BITS);
137 const uint256_t value_hi = value.slice(LO_BITS, HI_BITS);
138
139 lo = value_lo;
140 hi = value_hi;
141 // N.B. to be able to call assert equal, these cannot be constants
142 } else {
143 // To efficiently convert a bigfield into a cycle scalar,
144 // we are going to explicitly rely on the fact that `scalar.lo` and `scalar.hi`
145 // are implicitly range-constrained to be 128 bits when they are converted into 4-bit lookup window slices
146
147 // First check: can the scalar actually fit into LO_BITS + HI_BITS?
148 // If it can, we can tolerate the scalar being > ScalarField::modulus, because performing a scalar mul
149 // implicilty performs a modular reduction
150 // If not, call `self_reduce` to cut enougn modulus multiples until the above condition is met
151 if (scalar.get_maximum_value() >= (uint512_t(1) << (LO_BITS + HI_BITS))) {
152 scalar.self_reduce();
153 }
154
155 field_t limb0 = scalar.binary_basis_limbs[0].element;
156 field_t limb1 = scalar.binary_basis_limbs[1].element;
157 field_t limb2 = scalar.binary_basis_limbs[2].element;
158 field_t limb3 = scalar.binary_basis_limbs[3].element;
159
160 // The general plan is as follows:
161 // 1. ensure limb0 contains no more than BigScalarField::NUM_LIMB_BITS
162 // 2. define limb1_lo = limb1.slice(0, LO_BITS - BigScalarField::NUM_LIMB_BITS)
163 // 3. define limb1_hi = limb1.slice(LO_BITS - BigScalarField::NUM_LIMB_BITS, <whatever maximum bound of limb1
164 // is>)
165 // 4. construct *this.lo out of limb0 and limb1_lo
166 // 5. construct *this.hi out of limb1_hi, limb2 and limb3
167 // This is a lot of logic, but very cheap on constraints.
168 // For fresh bignums that have come out of a MUL operation,
169 // the only "expensive" part is a size (LO_BITS - BigScalarField::NUM_LIMB_BITS) range check
170
171 // to convert into a cycle_scalar, we need to convert 4*68 bit limbs into 2*128 bit limbs
172 // we also need to ensure that the number of bits in cycle_scalar is < LO_BITS + HI_BITS
173 // note: we do not need to validate that the scalar is within the field modulus
174 // because performing a scalar multiplication implicitly performs a modular reduction (ecc group is
175 // multiplicative modulo BigField::modulus)
176
177 uint256_t limb1_max = scalar.binary_basis_limbs[1].maximum_value;
178
179 // Ensure that limb0 only contains at most NUM_LIMB_BITS. If it exceeds this value, slice of the excess and add
180 // it into limb1
181 if (scalar.binary_basis_limbs[0].maximum_value > BigScalarField::DEFAULT_MAXIMUM_LIMB) {
182 const uint256_t limb = limb0.get_value();
183 const uint256_t lo_v = limb.slice(0, BigScalarField::NUM_LIMB_BITS);
184 const uint256_t hi_v = limb >> BigScalarField::NUM_LIMB_BITS;
185 field_t lo = field_t::from_witness(ctx, lo_v);
186 field_t hi = field_t::from_witness(ctx, hi_v);
187
188 uint256_t hi_max = (scalar.binary_basis_limbs[0].maximum_value >> BigScalarField::NUM_LIMB_BITS);
189 const uint64_t hi_bits = hi_max.get_msb() + 1;
190 lo.create_range_constraint(BigScalarField::NUM_LIMB_BITS);
191 hi.create_range_constraint(static_cast<size_t>(hi_bits));
192 limb0.assert_equal(lo + hi * BigScalarField::shift_1);
193
194 limb1 += hi;
195 limb1_max += hi_max;
196 limb0 = lo;
197 }
198
199 // sanity check that limb[1] is the limb that contributs both to *this.lo and *this.hi
200 BB_ASSERT_GT(BigScalarField::NUM_LIMB_BITS * 2, LO_BITS);
201 BB_ASSERT_LT(BigScalarField::NUM_LIMB_BITS, LO_BITS);
202
203 // limb1 is the tricky one as it contributs to both *this.lo and *this.hi
204 // By this point, we know that limb1 fits in the range `1 << BigScalarField::NUM_LIMB_BITS to (1 <<
205 // BigScalarField::NUM_LIMB_BITS) + limb1_max.get_maximum_value() we need to slice this limb into 2. The first
206 // is LO_BITS - BigScalarField::NUM_LIMB_BITS (which reprsents its contribution to *this.lo) and the second
207 // represents the limbs contribution to *this.hi Step 1: compute the max bit sizes of both slices
208 const size_t lo_bits_in_limb_1 = LO_BITS - BigScalarField::NUM_LIMB_BITS;
209 const size_t hi_bits_in_limb_1 = (static_cast<size_t>(limb1_max.get_msb()) + 1) - lo_bits_in_limb_1;
210
211 // Step 2: compute the witness values of both slices
212 const uint256_t limb_1 = limb1.get_value();
213 const uint256_t limb_1_hi_multiplicand = (uint256_t(1) << lo_bits_in_limb_1);
214 const uint256_t limb_1_hi_v = limb_1 >> lo_bits_in_limb_1;
215 const uint256_t limb_1_lo_v = limb_1 - (limb_1_hi_v << lo_bits_in_limb_1);
216
217 // Step 3: instantiate both slices as witnesses and validate their sum equals limb1
218 field_t limb_1_lo = field_t::from_witness(ctx, limb_1_lo_v);
219 field_t limb_1_hi = field_t::from_witness(ctx, limb_1_hi_v);
220
221 // We need to propagate the origin tag to the chunks of limb1
222 limb_1_lo.set_origin_tag(limb1.get_origin_tag());
223 limb_1_hi.set_origin_tag(limb1.get_origin_tag());
224 limb1.assert_equal(limb_1_hi * limb_1_hi_multiplicand + limb_1_lo);
225
226 // Step 4: apply range constraints to validate both slices represent the expected contributions to *this.lo and
227 // *this,hi
228 limb_1_lo.create_range_constraint(lo_bits_in_limb_1);
229 limb_1_hi.create_range_constraint(hi_bits_in_limb_1);
230
231 // construct *this.lo out of:
232 // a. `limb0` (the first NUM_LIMB_BITS bits of scalar)
233 // b. `limb_1_lo` (the first LO_BITS - NUM_LIMB_BITS) of limb1
234 lo = limb0 + (limb_1_lo * BigScalarField::shift_1);
235
236 const uint256_t limb_2_shift = uint256_t(1) << (BigScalarField::NUM_LIMB_BITS - lo_bits_in_limb_1);
237 const uint256_t limb_3_shift =
238 uint256_t(1) << ((BigScalarField::NUM_LIMB_BITS - lo_bits_in_limb_1) + BigScalarField::NUM_LIMB_BITS);
239
240 // construct *this.hi out of limb2, limb3 and the remaining term from limb1 not contributing to `lo`
241 hi = limb_1_hi.add_two(limb2 * limb_2_shift, limb3 * limb_3_shift);
242 }
243 // We need to manually propagate the origin tag
244 lo.set_origin_tag(scalar.get_origin_tag());
245 hi.set_origin_tag(scalar.get_origin_tag());
246};
247
248template <typename Builder> bool cycle_scalar<Builder>::is_constant() const
249{
250 return (lo.is_constant() && hi.is_constant());
251}
252
266template <typename Builder> void cycle_scalar<Builder>::validate_scalar_is_in_field() const
267{
268 using FF = typename field_t::native;
269 constexpr bool IS_ULTRA = Builder::CIRCUIT_TYPE == CircuitType::ULTRA;
270
271 // AUDITTODO: Investigate using field_t::split_at here per Sergei's suggestion
272 if (!is_constant() && !skip_primality_test()) {
273 // Check that scalar.hi * 2^LO_BITS + scalar.lo < cycle_group_modulus when evaluated over the integers
274 const uint256_t cycle_group_modulus =
275 use_bn254_scalar_field_for_primality_test() ? FF::modulus : ScalarField::modulus;
276 const uint256_t r_lo = cycle_group_modulus.slice(0, LO_BITS);
277 const uint256_t r_hi = cycle_group_modulus.slice(LO_BITS, HI_BITS);
278
279 bool need_borrow = uint256_t(lo.get_value()) > r_lo;
280 field_t borrow = lo.is_constant() ? need_borrow : field_t::from_witness(get_context(), need_borrow);
281
282 // directly call `create_new_range_constraint` to avoid creating an arithmetic gate
283 if (!lo.is_constant()) {
284 // We need to manually propagate the origin tag
285 borrow.set_origin_tag(lo.get_origin_tag());
286 if constexpr (IS_ULTRA) {
287 get_context()->create_new_range_constraint(borrow.get_witness_index(), 1, "borrow");
288 } else {
289 borrow.assert_equal(borrow * borrow);
290 }
291 }
292 // Hi range check = r_hi - y_hi - borrow
293 // Lo range check = r_lo - y_lo + borrow * 2^{126}
294 field_t hi_diff = (-hi + r_hi) - borrow;
295 field_t lo_diff = (-lo + r_lo) + (borrow * (uint256_t(1) << LO_BITS));
296
297 hi_diff.create_range_constraint(HI_BITS);
298 lo_diff.create_range_constraint(LO_BITS);
299 }
300}
301
303{
304 uint256_t lo_v(lo.get_value());
305 uint256_t hi_v(hi.get_value());
306 return ScalarField(lo_v + (hi_v << LO_BITS));
307}
308
311
312} // namespace bb::stdlib
#define BB_ASSERT_GT(left, right,...)
Definition assert.hpp:87
#define BB_ASSERT_LT(left, right,...)
Definition assert.hpp:115
constexpr uint256_t slice(uint64_t start, uint64_t end) const
constexpr uint64_t get_msb() const
Builder * get_context() const
Definition bigfield.hpp:701
uint512_t get_value() const
uint512_t get_maximum_value() const
bb::OriginTag get_origin_tag() const
Definition bigfield.hpp:711
bool is_constant() const
Check if the bigfield is constant, i.e. its prime limb is constant.
Definition bigfield.hpp:615
std::array< Limb, NUM_LIMBS > binary_basis_limbs
Represents a bigfield element in the binary basis. A bigfield element is represented as a combination...
Definition bigfield.hpp:80
cycle_scalar represents a member of the cycle curve SCALAR FIELD. This is NOT the native circuit fiel...
typename Curve::ScalarField ScalarField
static cycle_scalar create_from_bn254_scalar(const field_t &_in, bool skip_primality_test=false)
Use when we want to multiply a group element by a string of bits of known size. N....
ScalarField get_value() const
cycle_scalar(const field_t &_lo, const field_t &_hi, const size_t bits, const bool skip_primality_test, const bool use_bn254_scalar_field_for_primality_test)
static cycle_scalar from_witness(Builder *context, const ScalarField &value)
static cycle_scalar from_witness_bitstring(Builder *context, const uint256_t &bitstring, size_t num_bits)
Use when we want to multiply a group element by a string of bits of known size. N....
void validate_scalar_is_in_field() const
Checks that a cycle_scalar value is smaller than a prime field modulus when evaluated over the INTEGE...
void assert_equal(const field_t &rhs, std::string const &msg="field_t::assert_equal") const
Copy constraint: constrain that *this field is equal to rhs element.
Definition field.cpp:929
void create_range_constraint(size_t num_bits, std::string const &msg="field_t::range_constraint") const
Let x = *this.normalize(), constrain x.v < 2^{num_bits}.
Definition field.cpp:908
Builder * get_context() const
Definition field.hpp:389
OriginTag get_origin_tag() const
Definition field.hpp:333
bb::fr get_value() const
Given a := *this, compute its value given by a.v * a.mul + a.add.
Definition field.cpp:827
static field_t from_witness(Builder *ctx, const bb::fr &input)
Definition field.hpp:424
bool is_constant() const
Definition field.hpp:399
void set_free_witness_tag()
Set the free witness flag for the field element's tag.
Definition field.hpp:338
void set_origin_tag(const OriginTag &new_tag) const
Definition field.hpp:332
field_t add_two(const field_t &add_b, const field_t &add_c) const
Efficiently compute (this + a + b) using big_mul gate.
Definition field.cpp:572
uint32_t get_witness_index() const
Get the witness index of the current field element.
Definition field.hpp:461
StrictMock< MockContext > context
typename Flavor::FF FF
static constexpr uint256_t modulus