Barretenberg
The ZK-SNARK library at the core of Aztec
Loading...
Searching...
No Matches
cycle_group.fuzzer.hpp
Go to the documentation of this file.
1// === AUDIT STATUS ===
2// internal: { status: not started, auditors: [], date: YYYY-MM-DD }
3// external_1: { status: not started, auditors: [], date: YYYY-MM-DD }
4// external_2: { status: not started, auditors: [], date: YYYY-MM-DD }
5// =====================
6
7#pragma once
12#pragma clang diagnostic push
13
14// -Wc99-designator prevents us from using designators and nested designators
15// in struct intializations
16// such as {.in.first = a, .out = b}, since it's not a part of c++17 standard
17// However the use of them in this particular file heavily increases
18// the readability and conciseness of the CycleGroupBase::Instruction initializations
19#pragma clang diagnostic ignored "-Wc99-designator"
20
21#define HAVOC_TESTING
22
23// This is a global variable, so that the execution handling class could alter it and signal to the input tester
24// that the input should fail
26
28
29// #define FUZZING_SHOW_INFORMATION
30
31// #define DISABLE_MULTIPLICATION
32// #define DISABLE_BATCH_MUL
33
34#ifdef FUZZING_SHOW_INFORMATION
35#define PREP_SINGLE_ARG(stack, first_index, output_index) \
36 std::string rhs = stack[first_index].cycle_group.is_constant() ? "c" : "w"; \
37 std::string out = rhs; \
38 rhs += std::to_string(first_index); \
39 out += std::to_string(output_index >= stack.size() ? stack.size() : output_index); \
40 out = (output_index >= stack.size() ? "auto " : "") + out;
41
42#define PREP_TWO_ARG(stack, first_index, second_index, output_index) \
43 std::string lhs = stack[first_index].cycle_group.is_constant() ? "c" : "w"; \
44 std::string rhs = stack[second_index].cycle_group.is_constant() ? "c" : "w"; \
45 std::string out = \
46 (stack[first_index].cycle_group.is_constant() && stack[second_index].cycle_group.is_constant()) ? "c" : "w"; \
47 lhs += std::to_string(first_index); \
48 rhs += std::to_string(second_index); \
49 out += std::to_string(output_index >= stack.size() ? stack.size() : output_index); \
50 out = (output_index >= stack.size() ? "auto " : "") + out;
51#endif
52
54
55constexpr size_t MINIMUM_MUL_ELEMENTS = 0;
56constexpr size_t MAXIMUM_MUL_ELEMENTS = 8;
57
58// This is an external function in Libfuzzer used internally by custom mutators
59extern "C" size_t LLVMFuzzerMutate(uint8_t* Data, size_t Size, size_t MaxSize);
60
64template <typename Builder> class CycleGroupBase {
65 private:
73 using GroupElement = typename Curve::Element;
76 using BaseField = typename Curve::BaseField;
77
78 public:
80 public:
81 enum OPCODE {
93#ifndef DISABLE_MULTIPLICATION
95#endif
96#ifndef DISABLE_BATCH_MUL
98#endif
100 _LAST
101 };
102
103 struct Element {
104 Element(ScalarField s = ScalarField::one(), GroupElement g = GroupElement::one())
105 : scalar(s)
106 , value(g) {};
109 };
110
111 struct TwoArgs {
112 uint8_t in;
113 uint8_t out;
114 };
115
116 struct MulArgs {
117 uint8_t in;
118 uint8_t out;
120 };
121
122 struct ThreeArgs {
123 uint8_t in1;
124 uint8_t in2;
125 uint8_t out;
126 };
127
128 struct FourArgs {
129 uint8_t in1;
130 uint8_t in2;
131 uint8_t in3;
132 uint8_t out;
133 };
134
141
153
154 // The type of the instruction
156
157 // Instruction arguments
159
167 template <typename T>
168 inline static Instruction generateRandom(T& rng)
169 requires SimpleRng<T>
170 {
171 OPCODE instruction_opcode = static_cast<OPCODE>(rng.next() % (OPCODE::_LAST));
172 uint8_t in, in1, in2, in3, out;
173 Instruction instr;
174
175 switch (instruction_opcode) {
176 case OPCODE::CONSTANT:
177 case OPCODE::WITNESS:
179 auto scalar = ScalarField(static_cast<uint64_t>(fast_log_distributed_uint256(rng)));
180 auto el = GroupElement::one() * scalar;
181 return { .id = instruction_opcode, .arguments.element = Element(scalar, el) };
182 }
183 case OPCODE::DBL:
184 case OPCODE::NEG:
186 case OPCODE::SET:
187 case OPCODE::SET_INF:
188 in = static_cast<uint8_t>(rng.next() & 0xff);
189 out = static_cast<uint8_t>(rng.next() & 0xff);
190 return { .id = instruction_opcode, .arguments.twoArgs = { .in = in, .out = out } };
191 case OPCODE::ADD:
192 case OPCODE::SUBTRACT:
193 in1 = static_cast<uint8_t>(rng.next() & 0xff);
194 in2 = static_cast<uint8_t>(rng.next() & 0xff);
195 out = static_cast<uint8_t>(rng.next() & 0xff);
196 return { .id = instruction_opcode,
197 .arguments.threeArgs.in1 = in1,
198 .arguments.threeArgs.in2 = in2,
199 .arguments.threeArgs.out = out };
201 in1 = static_cast<uint8_t>(rng.next() & 0xff);
202 in2 = static_cast<uint8_t>(rng.next() & 0xff);
203 in3 = static_cast<uint8_t>(rng.next() & 0xff);
204 out = static_cast<uint8_t>(rng.next() & 0xff);
205 return { .id = instruction_opcode,
206 .arguments.fourArgs.in1 = in1,
207 .arguments.fourArgs.in2 = in2,
208 .arguments.fourArgs.in3 = in3,
209 .arguments.fourArgs.out = out };
210#ifndef DISABLE_MULTIPLICATION
211 case OPCODE::MULTIPLY:
212 in = static_cast<uint8_t>(rng.next() & 0xff);
213 out = static_cast<uint8_t>(rng.next() & 0xff);
214 return { .id = instruction_opcode,
215 .arguments.mulArgs.scalar = ScalarField(fast_log_distributed_uint256(rng)),
216 .arguments.mulArgs.in = in,
217 .arguments.mulArgs.out = out };
218#endif
219#ifndef DISABLE_BATCH_MUL
220 case OPCODE::BATCH_MUL: {
221 uint8_t mult_size0 =
223 static_cast<uint8_t>(rng.next() % ((MAXIMUM_MUL_ELEMENTS - MINIMUM_MUL_ELEMENTS) / 2));
224 uint8_t mult_size1 =
226 static_cast<uint8_t>(rng.next() % ((MAXIMUM_MUL_ELEMENTS - MINIMUM_MUL_ELEMENTS) / 2));
227 uint8_t mult_size =
228 mult_size0 +
229 mult_size1; // Sample the amount of batch mul participants from the binomial distribution
230 instr.id = instruction_opcode;
231 instr.arguments.batchMulArgs.add_elements_count = mult_size;
232 for (size_t i = 0; i < mult_size; i++) {
233 instr.arguments.batchMulArgs.inputs[i] = static_cast<uint8_t>(rng.next() & 0xff);
234 }
235 for (size_t i = 0; i < mult_size; i++) {
236 instr.arguments.batchMulArgs.scalars[i] = ScalarField(fast_log_distributed_uint256(rng));
237 }
238 instr.arguments.batchMulArgs.output_index = static_cast<uint8_t>(rng.next() & 0xff);
239 return instr;
240 }
241#endif
243 return { .id = instruction_opcode, .arguments.randomseed = rng.next() * rng.next() };
244
245 default:
246 abort(); // We missed some instructions in switch
247 }
248 }
249
259 template <typename T>
260 inline static Element mutateGroupElement(Element e, T& rng, HavocSettings& havoc_config)
261 requires SimpleRng<T>
262 {
263 // We can't just randomely modify a point on a curve
264 // But we can modify it's scalar
265 // With a certain probability, we apply changes to the Montgomery form, rather than the plain form. This
266 // has merit, since the computation is performed in montgomery form and comparisons are often performed
267 // in it, too.
268 // By the same logic we can switch between Jacobian and Affine coordinates.
269 // Libfuzzer comparison tracing logic can then be enabled in Montgomery form
270 bool convert_to_montgomery = (rng.next() % (havoc_config.VAL_MUT_MONTGOMERY_PROBABILITY +
271 havoc_config.VAL_MUT_NON_MONTGOMERY_PROBABILITY)) <
272 havoc_config.VAL_MUT_MONTGOMERY_PROBABILITY;
273 bool normalize = (rng.next() % (havoc_config.VAL_MUT_MONTGOMERY_PROBABILITY +
274 havoc_config.VAL_MUT_NON_MONTGOMERY_PROBABILITY)) <
275 havoc_config.VAL_MUT_MONTGOMERY_PROBABILITY;
276 uint256_t value_data;
277 // Conversion at the start
278#define MONT_CONVERSION \
279 if (convert_to_montgomery) { \
280 value_data = uint256_t(e.scalar.to_montgomery_form()); \
281 } else { \
282 value_data = uint256_t(e.scalar); \
283 }
284 // Inverse conversion at the end
285#define INV_MONT_CONVERSION \
286 if (convert_to_montgomery) { \
287 e.scalar = ScalarField(value_data).from_montgomery_form(); \
288 } else { \
289 e.scalar = ScalarField(value_data); \
290 }
291
292 // Pick the last value from the mutation distrivution vector
293 const size_t mutation_type_count = havoc_config.value_mutation_distribution.size();
294 // Choose mutation
295 const size_t choice = rng.next() % havoc_config.value_mutation_distribution[mutation_type_count - 1];
296 if (choice < havoc_config.value_mutation_distribution[0]) {
297 // Delegate mutation to libfuzzer (bit/byte mutations, autodictionary, etc)
299 LLVMFuzzerMutate((uint8_t*)&value_data, sizeof(uint256_t), sizeof(uint256_t));
301 e.value = GroupElement::one() * e.scalar;
302 } else if (choice < havoc_config.value_mutation_distribution[1]) {
303 // Small addition/subtraction
304 if (convert_to_montgomery) {
305 e.scalar = e.scalar.to_montgomery_form();
306 }
307 auto extra = ScalarField(rng.next() & 0xff);
308
309 // With 50% probability we add/sub a small value
310 if (rng.next() & 1) {
311 auto switch_sign = static_cast<bool>(rng.next() & 1);
312 if (!switch_sign) {
313 e.scalar += extra;
314 e.value += GroupElement::one() * extra;
315 } else {
316 e.scalar -= extra;
317 e.value -= GroupElement::one() * extra;
318 }
319 } else {
320 // otherwise we multiply by a small value
321 e.scalar *= extra;
322 e.value *= extra;
323 }
324 if (normalize) {
325 e.value = e.value.normalize();
326 }
327 if (convert_to_montgomery) {
328 e.scalar = e.scalar.from_montgomery_form();
329 }
330 } else if (choice < havoc_config.value_mutation_distribution[2]) {
331 if (convert_to_montgomery) {
332 e.scalar = e.scalar.to_montgomery_form();
333 }
334 // Substitute scalar element with a special value
335 switch (rng.next() % 8) {
336 case 0:
337 e.scalar = ScalarField::zero();
338 break;
339 case 1:
340 e.scalar = ScalarField::one();
341 break;
342 case 2:
343 e.scalar = -ScalarField::one();
344 break;
345 case 3:
346 e.scalar = ScalarField::one().sqrt().second;
347 break;
348 case 4:
349 e.scalar = ScalarField::one().sqrt().second.invert();
350 break;
351 case 5:
352 e.scalar = ScalarField::get_root_of_unity(13);
353 break;
354 case 6:
355 e.scalar = ScalarField(2);
356 break;
357 case 7:
358 e.scalar = ScalarField((ScalarField::modulus - 1) / 2);
359 break;
360 default:
361 abort();
362 break;
363 }
364 if (convert_to_montgomery) {
365 e.scalar = e.scalar.to_montgomery_form();
366 }
367 e.value = GroupElement::one() * e.scalar;
368 }
369 // Return value
370 return e;
371 }
381 template <typename T>
382 inline static ScalarField mutateScalarElement(ScalarField e, T& rng, HavocSettings& havoc_config)
383 requires SimpleRng<T>
384 {
385 // With a certain probability, we apply changes to the Montgomery form, rather than the plain form. This
386 // has merit, since the computation is performed in montgomery form and comparisons are often performed
387 // in it, too.
388 // Libfuzzer comparison tracing logic can then be enabled in Montgomery form
389 bool convert_to_montgomery = (rng.next() % (havoc_config.VAL_MUT_MONTGOMERY_PROBABILITY +
390 havoc_config.VAL_MUT_NON_MONTGOMERY_PROBABILITY)) <
391 havoc_config.VAL_MUT_MONTGOMERY_PROBABILITY;
392 uint256_t value_data;
393 // Conversion at the start
394#define MONT_CONVERSION_SCALAR \
395 if (convert_to_montgomery) { \
396 value_data = uint256_t(e.to_montgomery_form()); \
397 } else { \
398 value_data = uint256_t(e); \
399 }
400 // Inverse conversion at the end
401#define INV_MONT_CONVERSION_SCALAR \
402 if (convert_to_montgomery) { \
403 e = ScalarField(value_data).from_montgomery_form(); \
404 } else { \
405 e = ScalarField(value_data); \
406 }
407
408 // Pick the last value from the mutation distrivution vector
409 const size_t mutation_type_count = havoc_config.value_mutation_distribution.size();
410 // Choose mutation
411 const size_t choice = rng.next() % havoc_config.value_mutation_distribution[mutation_type_count - 1];
412 if (choice < havoc_config.value_mutation_distribution[0]) {
413 // Delegate mutation to libfuzzer (bit/byte mutations, autodictionary, etc)
415 LLVMFuzzerMutate((uint8_t*)&value_data, sizeof(uint256_t), sizeof(uint256_t));
417 } else if (choice < havoc_config.value_mutation_distribution[1]) {
418 // Small addition/subtraction
419 if (convert_to_montgomery) {
420 e = e.to_montgomery_form();
421 }
422 auto extra = ScalarField(rng.next() & 0xff);
423
424 // With 50% probability we add/sub a small value
425 if (rng.next() & 1) {
426 auto switch_sign = static_cast<bool>(rng.next() & 1);
427 if (!switch_sign) {
428 e += extra;
429 } else {
430 e -= extra;
431 }
432 } else {
433 // otherwise we multiply by a small value
434 e *= extra;
435 }
436 if (convert_to_montgomery) {
437 e = e.from_montgomery_form();
438 }
439 } else if (choice < havoc_config.value_mutation_distribution[2]) {
440 if (convert_to_montgomery) {
441 e = e.to_montgomery_form();
442 }
443 // Substitute scalar element with a special value
444 // I think that zeros from mutateGroupElement are enough zeros produced
445 switch (rng.next() % 7) {
446 case 0:
447 e = ScalarField::one();
448 break;
449 case 1:
450 e = -ScalarField::one();
451 break;
452 case 2:
453 e = ScalarField::one().sqrt().second;
454 break;
455 case 3:
456 e = ScalarField::one().sqrt().second.invert();
457 break;
458 case 4:
459 e = ScalarField::get_root_of_unity(13);
460 break;
461 case 5:
462 e = ScalarField(2);
463 break;
464 case 6:
465 e = ScalarField((ScalarField::modulus - 1) / 2);
466 break;
467 default:
468 abort();
469 break;
470 }
471 if (convert_to_montgomery) {
472 e = e.to_montgomery_form();
473 }
474 }
475 // Return value
476 return e;
477 }
487 template <typename T>
489 requires SimpleRng<T>
490 {
491#define PUT_RANDOM_BYTE_IF_LUCKY(variable) \
492 if (rng.next() & 1) { \
493 variable = rng.next() & 0xff; \
494 }
495 // Depending on instruction type...
496 switch (instruction.id) {
497 case OPCODE::CONSTANT:
498 case OPCODE::WITNESS:
500 // If it represents pushing a value on the stack with a 50% probability randomly sample a bit_range
501 // Maybe mutate the value
502 if (rng.next() & 1) {
503 instruction.arguments.element =
504 mutateGroupElement(instruction.arguments.element, rng, havoc_config);
505 }
506 break;
507
508 case OPCODE::DBL:
509 case OPCODE::NEG:
511 case OPCODE::SET:
512 case OPCODE::SET_INF:
513 PUT_RANDOM_BYTE_IF_LUCKY(instruction.arguments.twoArgs.in);
514 PUT_RANDOM_BYTE_IF_LUCKY(instruction.arguments.twoArgs.out);
515 break;
516#ifndef DISABLE_MULTIPLICATION
517 case OPCODE::MULTIPLY:
518 PUT_RANDOM_BYTE_IF_LUCKY(instruction.arguments.mulArgs.in);
519 PUT_RANDOM_BYTE_IF_LUCKY(instruction.arguments.mulArgs.out);
520 if (rng.next() & 1) {
521 instruction.arguments.mulArgs.scalar =
522 mutateScalarElement(instruction.arguments.mulArgs.scalar, rng, havoc_config);
523 }
524 break;
525#endif
526 case OPCODE::ADD:
527 case OPCODE::SUBTRACT:
528 PUT_RANDOM_BYTE_IF_LUCKY(instruction.arguments.threeArgs.in1);
529 PUT_RANDOM_BYTE_IF_LUCKY(instruction.arguments.threeArgs.in2);
530 PUT_RANDOM_BYTE_IF_LUCKY(instruction.arguments.threeArgs.out);
531 break;
533 PUT_RANDOM_BYTE_IF_LUCKY(instruction.arguments.fourArgs.in1);
534 PUT_RANDOM_BYTE_IF_LUCKY(instruction.arguments.fourArgs.in2);
535 PUT_RANDOM_BYTE_IF_LUCKY(instruction.arguments.fourArgs.in3);
536 PUT_RANDOM_BYTE_IF_LUCKY(instruction.arguments.fourArgs.out);
537 break;
538#ifndef DISABLE_BATCH_MUL
540 if (rng.next() & 1) {
541 uint8_t mult_size0 =
543 static_cast<uint8_t>(rng.next() % ((MAXIMUM_MUL_ELEMENTS - MINIMUM_MUL_ELEMENTS) / 2));
544 uint8_t mult_size1 =
546 static_cast<uint8_t>(rng.next() % ((MAXIMUM_MUL_ELEMENTS - MINIMUM_MUL_ELEMENTS) / 2));
547 uint8_t mult_size =
548 mult_size0 +
549 mult_size1; // Sample the amount of batch mul participants from the binomial distribution
550 instruction.arguments.batchMulArgs.add_elements_count = mult_size;
551 }
552 if (instruction.arguments.batchMulArgs.add_elements_count && (rng.next() & 1)) {
553 size_t mut_count =
554 static_cast<uint8_t>(rng.next() % (instruction.arguments.batchMulArgs.add_elements_count));
555 for (size_t i = 0; i < mut_count; i++) {
556 size_t ind =
557 rng.next() % static_cast<size_t>(instruction.arguments.batchMulArgs.add_elements_count);
558 PUT_RANDOM_BYTE_IF_LUCKY(instruction.arguments.batchMulArgs.inputs[ind]);
559 }
560 }
561 if (instruction.arguments.batchMulArgs.add_elements_count && (rng.next() & 1)) {
562 size_t mut_count =
563 static_cast<uint8_t>(rng.next() % (instruction.arguments.batchMulArgs.add_elements_count));
564 for (size_t i = 0; i < mut_count; i++) {
565 size_t ind =
566 rng.next() % static_cast<size_t>(instruction.arguments.batchMulArgs.add_elements_count);
567 instruction.arguments.batchMulArgs.scalars[ind] =
568 mutateScalarElement(instruction.arguments.batchMulArgs.scalars[ind], rng, havoc_config);
569 }
570 }
571 PUT_RANDOM_BYTE_IF_LUCKY(instruction.arguments.batchMulArgs.output_index);
572 break;
573#endif
575 instruction.arguments.randomseed = rng.next();
576 break;
577 default:
578 abort(); // We missed some instructions in switch
579 return instruction;
580 }
581 return instruction;
582 }
583 };
584 // We use argsizes to both specify the size of data needed to parse the instruction and to signal that the
585 // instruction is enabled (if it is -1,it's disabled )
586 class ArgSizes {
587 public:
588 static constexpr size_t CONSTANT = sizeof(typename Instruction::Element);
589 static constexpr size_t WITNESS = sizeof(typename Instruction::Element);
590 static constexpr size_t CONSTANT_WITNESS = sizeof(typename Instruction::Element);
591 static constexpr size_t DBL = 2;
592 static constexpr size_t NEG = 2;
593 static constexpr size_t ASSERT_EQUAL = 2;
594 static constexpr size_t SET = 2;
595 static constexpr size_t SET_INF = 2;
596 static constexpr size_t ADD = 3;
597 static constexpr size_t SUBTRACT = 3;
598 static constexpr size_t COND_ASSIGN = 4;
599#ifndef DISABLE_MULTIPLICATION
600 static constexpr size_t MULTIPLY = sizeof(typename Instruction::MulArgs);
601#endif
602#ifndef DISABLE_BATCH_MUL
603 static constexpr size_t BATCH_MUL = sizeof(typename Instruction::BatchMulArgs);
604#endif
605 static constexpr size_t RANDOMSEED = sizeof(uint32_t);
606 };
607
614 public:
615 static constexpr size_t SET = 0;
616 static constexpr size_t RANDOMSEED = 0;
617
618 static constexpr size_t CONSTANT = 1;
619 static constexpr size_t WITNESS = 1;
620 static constexpr size_t CONSTANT_WITNESS = 1;
621 static constexpr size_t ADD = 1;
622 static constexpr size_t SUBTRACT = 1;
623 static constexpr size_t DBL = 1;
624 static constexpr size_t NEG = 1;
625 static constexpr size_t COND_ASSIGN = 1;
626
627#ifndef DISABLE_MULTIPLICATION
628 static constexpr size_t MULTIPLY = 2;
629#endif
630 static constexpr size_t ASSERT_EQUAL = 2;
631 static constexpr size_t SET_INF = 2;
632
633#ifndef DISABLE_BATCH_MUL
634 static constexpr size_t BATCH_MUL = 4;
635#endif
636 static constexpr size_t _LIMIT = 64;
637 };
642 class Parser {
643 public:
651 template <typename Instruction::OPCODE opcode> inline static Instruction parseInstructionArgs(uint8_t* Data)
652 {
653 Instruction instr;
654 instr.id = static_cast<typename Instruction::OPCODE>(opcode);
655 switch (opcode) {
659 auto scalar = ScalarField::serialize_from_buffer(Data);
660 auto el = GroupElement::one() * scalar;
661 instr.arguments.element = typename Instruction::Element(scalar, el);
662 break;
663 }
669 instr.arguments.twoArgs = { .in = *Data, .out = *(Data + 1) };
670 break;
673 instr.arguments.threeArgs = { .in1 = *Data, .in2 = *(Data + 1), .out = *(Data + 2) };
674 break;
676 instr.arguments.fourArgs = { .in1 = *Data, .in2 = *(Data + 1), .in3 = *(Data + 2), .out = *(Data + 3) };
677 break;
678
679#ifndef DISABLE_MULTIPLICATION
681 instr.arguments.mulArgs.in = *Data;
682 instr.arguments.mulArgs.out = *(Data + 1);
683 instr.arguments.mulArgs.scalar = ScalarField::serialize_from_buffer(Data + 2);
684 break;
685#endif
686#ifndef DISABLE_BATCH_MUL
688 // In case of LLVM native instruction mutator
692 }
693 instr.arguments.batchMulArgs.output_index = *(Data + 1);
694
696 memcpy(instr.arguments.batchMulArgs.inputs, Data + 2, n);
697
698 size_t offset = n + 2;
699 for (size_t i = 0; i < n; i++) {
700 instr.arguments.batchMulArgs.scalars[i] = ScalarField::serialize_from_buffer(Data + offset);
701 offset += sizeof(ScalarField);
702 }
703 }
704#endif
706 memcpy(&instr.arguments.randomseed, Data, sizeof(uint32_t));
707 break;
708 default:
709 abort(); // We missed some instructions in switch
710 }
711 return instr;
712 }
720 template <typename Instruction::OPCODE instruction_opcode>
721 inline static void writeInstruction(Instruction& instruction, uint8_t* Data)
722 {
723 *Data = instruction.id;
724 switch (instruction_opcode) {
728 ScalarField::serialize_to_buffer(instruction.arguments.element.scalar, Data + 1);
729 break;
735 *(Data + 1) = instruction.arguments.twoArgs.in;
736 *(Data + 2) = instruction.arguments.twoArgs.out;
737 break;
740 *(Data + 1) = instruction.arguments.threeArgs.in1;
741 *(Data + 2) = instruction.arguments.threeArgs.in2;
742 *(Data + 3) = instruction.arguments.threeArgs.out;
743 break;
745 *(Data + 1) = instruction.arguments.fourArgs.in1;
746 *(Data + 2) = instruction.arguments.fourArgs.in2;
747 *(Data + 3) = instruction.arguments.fourArgs.in3;
748 *(Data + 4) = instruction.arguments.fourArgs.out;
749 break;
750#ifndef DISABLE_MULTIPLICATION
752 *(Data + 1) = instruction.arguments.mulArgs.in;
753 *(Data + 2) = instruction.arguments.mulArgs.out;
754 ScalarField::serialize_to_buffer(instruction.arguments.mulArgs.scalar, Data + 3);
755 break;
756#endif
757#ifndef DISABLE_BATCH_MUL
759 *(Data + 1) = instruction.arguments.batchMulArgs.add_elements_count;
760 *(Data + 2) = instruction.arguments.batchMulArgs.output_index;
761
762 size_t n = instruction.arguments.batchMulArgs.add_elements_count;
763
764 memcpy(Data + 3, instruction.arguments.batchMulArgs.inputs, n);
765 size_t offset = n + 3;
766 for (size_t i = 0; i < n; i++) {
767 ScalarField::serialize_to_buffer(instruction.arguments.batchMulArgs.scalars[i], Data + offset);
768 offset += sizeof(ScalarField);
769 }
770 break;
771 }
772#endif
774 memcpy(Data + 1, &instruction.arguments.randomseed, sizeof(uint32_t));
775 break;
776 default:
777 abort(); // We missed some instructions in switch
778 }
779 return;
780 };
781 };
787 private:
788 static bool_t construct_predicate(Builder* builder, const bool predicate)
789 {
790 /* The context field of a predicate can be nullptr;
791 * in that case, the function that handles the predicate
792 * will use the context of another input parameter
793 */
794 const bool predicate_is_const = static_cast<bool>(VarianceRNG.next() & 1);
795 if (predicate_is_const) {
796 const bool predicate_has_ctx = static_cast<bool>(VarianceRNG.next() % 2);
797#ifdef FUZZING_SHOW_INFORMATION
798 std::cout << "bool_t(" << (predicate_has_ctx ? "&builder," : "nullptr,")
799 << (predicate ? "true)" : "false)");
800#endif
801 return bool_t(predicate_has_ctx ? builder : nullptr, predicate);
802 }
803#ifdef FUZZING_SHOW_INFORMATION
804 std::cout << "bool_t(witness_t(&builder, " << (predicate ? "true));" : "false))");
805#endif
806 return bool_t(witness_t(builder, predicate));
807 }
808
810 {
811 const bool reconstruct = static_cast<bool>(VarianceRNG.next() % 2);
812 if (!reconstruct) {
813 return this->cycle_group;
814 }
815 return cycle_group_t(this->cycle_group);
816 }
817
818 public:
822
823 ExecutionHandler() = default;
825 : base_scalar(s)
826 , base(g)
827 , cycle_group(w_g)
828 {}
829
831 {
832 ScalarField base_scalar_res = this->base_scalar + other.base_scalar;
833 GroupElement base_res = this->base + other.base;
834
835 if (other.cg().get_value() == this->cg().get_value()) {
836 uint8_t dbl_path = VarianceRNG.next() % 4;
837 switch (dbl_path) {
838 case 0:
839#ifdef FUZZING_SHOW_INFORMATION
840 std::cout << "left.dbl" << std::endl;
841#endif
842 return ExecutionHandler(base_scalar_res, base_res, this->cg().dbl());
843 case 1:
844#ifdef FUZZING_SHOW_INFORMATION
845 std::cout << "right.dbl" << std::endl;
846#endif
847 return ExecutionHandler(base_scalar_res, base_res, other.cg().dbl());
848 case 2:
849 return ExecutionHandler(base_scalar_res, base_res, this->cg() + other.cg());
850 case 3:
851 return ExecutionHandler(base_scalar_res, base_res, other.cg() + this->cg());
852 }
853 } else if (other.cg().get_value() == -this->cg().get_value()) {
854 uint8_t inf_path = VarianceRNG.next() % 4;
855 cycle_group_t res;
856 switch (inf_path) {
857 case 0:
858#ifdef FUZZING_SHOW_INFORMATION
859 std::cout << "left.set_point_at_infinity(";
860#endif
861 res = this->cg();
862 res.set_point_at_infinity(this->construct_predicate(builder, true));
863#ifdef FUZZING_SHOW_INFORMATION
864 std::cout << ");" << std::endl;
865#endif
866 return ExecutionHandler(base_scalar_res, base_res, res);
867 case 1:
868#ifdef FUZZING_SHOW_INFORMATION
869 std::cout << "right.set_point_at_infinity(";
870#endif
871 res = other.cg();
872 res.set_point_at_infinity(this->construct_predicate(builder, true));
873#ifdef FUZZING_SHOW_INFORMATION
874 std::cout << ");" << std::endl;
875#endif
876 return ExecutionHandler(base_scalar_res, base_res, res);
877 case 2:
878 return ExecutionHandler(base_scalar_res, base_res, this->cg() + other.cg());
879 case 3:
880 return ExecutionHandler(base_scalar_res, base_res, other.cg() + this->cg());
881 }
882 }
883 bool smth_inf = this->cycle_group.is_point_at_infinity().get_value() ||
884 other.cycle_group.is_point_at_infinity().get_value();
885 uint8_t add_option = smth_inf ? 4 + (VarianceRNG.next() % 2) : VarianceRNG.next() % 6;
886
887 switch (add_option) {
888 case 0:
889#ifdef FUZZING_SHOW_INFORMATION
890 std::cout << "left.unconditional_add(right);" << std::endl;
891#endif
892 return ExecutionHandler(base_scalar_res, base_res, this->cg().unconditional_add(other.cg()));
893 case 1:
894#ifdef FUZZING_SHOW_INFORMATION
895 std::cout << "right.unconditional_add(left);" << std::endl;
896#endif
897 return ExecutionHandler(base_scalar_res, base_res, other.cg().unconditional_add(this->cg()));
898 case 2:
899#ifdef FUZZING_SHOW_INFORMATION
900 std::cout << "left.checked_unconditional_add(right);" << std::endl;
901#endif
902 return ExecutionHandler(base_scalar_res, base_res, this->cg().checked_unconditional_add(other.cg()));
903 case 3:
904#ifdef FUZZING_SHOW_INFORMATION
905 std::cout << "right.checked_unconditional_add(left);" << std::endl;
906#endif
907 return ExecutionHandler(base_scalar_res, base_res, other.cg().checked_unconditional_add(this->cg()));
908 case 4:
909 return ExecutionHandler(base_scalar_res, base_res, this->cg() + other.cg());
910 case 5:
911 return ExecutionHandler(base_scalar_res, base_res, other.cg() + this->cg());
912 }
913 return {};
914 }
915
917 {
918 ScalarField base_scalar_res = this->base_scalar - other.base_scalar;
919 GroupElement base_res = this->base - other.base;
920
921 if (other.cg().get_value() == -this->cg().get_value()) {
922 uint8_t dbl_path = VarianceRNG.next() % 3;
923
924 switch (dbl_path) {
925 case 0:
926#ifdef FUZZING_SHOW_INFORMATION
927 std::cout << "left.dbl();" << std::endl;
928#endif
929 return ExecutionHandler(base_scalar_res, base_res, this->cg().dbl());
930 case 1:
931#ifdef FUZZING_SHOW_INFORMATION
932 std::cout << "-right.dbl();" << std::endl;
933#endif
934 return ExecutionHandler(base_scalar_res, base_res, -other.cg().dbl());
935 case 2:
936 return ExecutionHandler(base_scalar_res, base_res, this->cg() - other.cg());
937 }
938 } else if (other.cg().get_value() == this->cg().get_value()) {
939 uint8_t inf_path = VarianceRNG.next() % 3;
940 cycle_group_t res;
941
942 switch (inf_path) {
943 case 0:
944#ifdef FUZZING_SHOW_INFORMATION
945 std::cout << "left.set_point_at_infinity(";
946#endif
947 res = this->cg();
948 res.set_point_at_infinity(this->construct_predicate(builder, true));
949#ifdef FUZZING_SHOW_INFORMATION
950 std::cout << ");" << std::endl;
951#endif
952 return ExecutionHandler(base_scalar_res, base_res, res);
953 case 1:
954#ifdef FUZZING_SHOW_INFORMATION
955 std::cout << "right.set_point_at_infinity(";
956#endif
957 res = other.cg();
958 res.set_point_at_infinity(this->construct_predicate(builder, true));
959#ifdef FUZZING_SHOW_INFORMATION
960 std::cout << ");" << std::endl;
961#endif
962 return ExecutionHandler(base_scalar_res, base_res, res);
963 case 2:
964 return ExecutionHandler(base_scalar_res, base_res, this->cg() - other.cg());
965 }
966 }
967 bool smth_inf = this->cycle_group.is_point_at_infinity().get_value() ||
968 other.cycle_group.is_point_at_infinity().get_value();
969 uint8_t add_option = smth_inf ? 2 : VarianceRNG.next() % 3;
970
971 switch (add_option) {
972 case 0:
973#ifdef FUZZING_SHOW_INFORMATION
974 std::cout << "left.unconditional_subtract(right);" << std::endl;
975#endif
976 return ExecutionHandler(base_scalar_res, base_res, this->cg().unconditional_subtract(other.cg()));
977 case 1:
978#ifdef FUZZING_SHOW_INFORMATION
979 std::cout << "left.checked_unconditional_subtract(right);" << std::endl;
980#endif
981 return ExecutionHandler(
982 base_scalar_res, base_res, this->cg().checked_unconditional_subtract(other.cg()));
983 case 2:
984 return ExecutionHandler(base_scalar_res, base_res, this->cg() - other.cg());
985 }
986 return {};
987 }
988
990 {
991 bool is_witness = VarianceRNG.next() & 1;
992#ifdef FUZZING_SHOW_INFORMATION
993 std::cout << " * cycle_scalar_t" << (is_witness ? "::from_witness(&builder, " : "(") << "ScalarField(\""
994 << multiplier << "\"));";
995#endif
996 auto scalar = is_witness ? cycle_scalar_t::from_witness(builder, multiplier) : cycle_scalar_t(multiplier);
997 return ExecutionHandler(this->base_scalar * multiplier, this->base * multiplier, this->cg() * scalar);
998 }
999
1001 const std::vector<ExecutionHandler>& to_add,
1002 const std::vector<ScalarField>& to_mul)
1003 {
1005 to_add_cg.reserve(to_add.size());
1007 to_mul_cs.reserve(to_mul.size());
1008
1009 GroupElement accumulator_cg = GroupElement::one();
1010 ScalarField accumulator_cs = ScalarField::zero();
1011
1012 for (size_t i = 0; i < to_add.size(); i++) {
1013 to_add_cg.push_back(to_add[i].cycle_group);
1014
1015 bool is_witness = VarianceRNG.next() & 1;
1016#ifdef FUZZING_SHOW_INFORMATION
1017 std::cout << "cycle_scalar_t" << (is_witness ? "::from_witness(&builder, " : "(") << "ScalarField(\""
1018 << to_mul[i] << "\")), ";
1019#endif
1020 auto scalar = is_witness ? cycle_scalar_t::from_witness(builder, to_mul[i]) : cycle_scalar_t(to_mul[i]);
1021 to_mul_cs.push_back(scalar);
1022
1023 accumulator_cg += to_add[i].base * to_mul[i];
1024 accumulator_cs += to_add[i].base_scalar * to_mul[i];
1025 }
1026 accumulator_cg -= GroupElement::one();
1027
1028 // Handle the linearly dependant case that is
1029 // assumed to not happen in real life
1030 if (accumulator_cg.is_point_at_infinity()) {
1031 to_add_cg.push_back(cycle_group_t(GroupElement::one()));
1032 to_mul_cs.push_back(cycle_scalar_t(ScalarField::one()));
1033 accumulator_cg += GroupElement::one();
1034 accumulator_cs += ScalarField::one();
1035 }
1036
1037 auto batch_mul_res = cycle_group_t::batch_mul(to_add_cg, to_mul_cs);
1038 return ExecutionHandler(accumulator_cs, accumulator_cg, batch_mul_res);
1039 }
1040
1042 {
1043 this->base_scalar = -this->base_scalar;
1044 this->base = -this->base;
1045 this->cycle_group = -this->cycle_group;
1046 }
1047
1049 {
1050 return ExecutionHandler(this->base_scalar + this->base_scalar, this->base.dbl(), this->cg().dbl());
1051 }
1052
1054 {
1055 ScalarField new_base_scalar = predicate ? other.base_scalar : this->base_scalar;
1056 GroupElement new_base = predicate ? other.base : this->base;
1057 cycle_group_t new_cycle_group =
1058 cycle_group_t::conditional_assign(construct_predicate(builder, predicate), other.cg(), this->cg());
1059 return ExecutionHandler(new_base_scalar, new_base, new_cycle_group);
1060 }
1061
1063 {
1064 if (other.cg().is_constant()) {
1065 if (this->cg().is_constant()) {
1066 // Assert equal does nothing in this case
1067 return;
1068 }
1069 }
1070 auto to_add = cycle_group_t::from_witness(builder, AffineElement(this->base - other.base));
1071 auto to_ae = other.cg() + to_add;
1072 this->cg().assert_equal(to_ae);
1073 }
1074
1075 /* Explicit re-instantiation using the various cycle_group_t constructors */
1077 {
1078 uint32_t switch_case = VarianceRNG.next() % 4;
1079 switch (switch_case) {
1080 case 0:
1081#ifdef FUZZING_SHOW_INFORMATION
1082 std::cout << "cycle_group_t(" << std::endl;
1083#endif
1084 /* construct via cycle_group_t */
1085 return ExecutionHandler(this->base_scalar, this->base, cycle_group_t(this->cycle_group));
1086 case 1: {
1087#ifdef FUZZING_SHOW_INFORMATION
1088 std::cout << "cycle_group_t::from" << (this->cycle_group.is_constant() ? "" : "_constant")
1089 << "_witness(&builder, e.get_value());";
1090#endif
1091 /* construct via AffineElement */
1092 AffineElement e = this->cycle_group.get_value();
1093 if (this->cycle_group.is_constant()) {
1094 return ExecutionHandler(
1095 this->base_scalar, this->base, cycle_group_t::from_constant_witness(builder, e));
1096 }
1097 return ExecutionHandler(this->base_scalar, this->base, cycle_group_t::from_witness(builder, e));
1098 }
1099 case 2: {
1100#ifdef FUZZING_SHOW_INFORMATION
1101 std::cout << "tmp = el;" << std::endl;
1102 std::cout << "res = cycle_group_t(tmp);" << std::endl;
1103#endif
1104 /* Invoke assigment operator */
1105 cycle_group_t cg_new(builder);
1106 cg_new = this->cg();
1107 return ExecutionHandler(this->base_scalar, this->base, cycle_group_t(cg_new));
1108 }
1109 case 3: {
1110#ifdef FUZZING_SHOW_INFORMATION
1111 std::cout << "tmp = el;" << std::endl;
1112 std::cout << "res = cycle_group_t(std::move(tmp));" << std::endl;
1113#endif
1114 /* Invoke move constructor */
1115 cycle_group_t cg_copy = this->cg();
1116 return ExecutionHandler(this->base_scalar, this->base, cycle_group_t(std::move(cg_copy)));
1117 }
1118 default:
1119 abort();
1120 }
1121 }
1122 /* Explicit re-instantiation using the various cycle_group_t constructors + set inf in the end*/
1124 {
1125 auto res = this->set(builder);
1126 const bool set_inf =
1127 res.cycle_group.is_point_at_infinity().get_value() ? true : static_cast<bool>(VarianceRNG.next() & 1);
1128#ifdef FUZZING_SHOW_INFORMATION
1129 std::cout << "el.set_point_at_infinty(";
1130#endif
1131 res.set_point_at_infinity(this->construct_predicate(builder, set_inf));
1132#ifdef FUZZING_SHOW_INFORMATION
1133 std::cout << ");" << std::endl;
1134#endif
1135 return res;
1136 }
1137
1146 static inline size_t execute_CONSTANT(Builder* builder,
1149 {
1150 (void)builder;
1151 stack.push_back(
1152 ExecutionHandler(instruction.arguments.element.scalar,
1153 instruction.arguments.element.value,
1154 cycle_group_t(static_cast<AffineElement>(instruction.arguments.element.value))));
1155#ifdef FUZZING_SHOW_INFORMATION
1156 std::cout << "auto c" << stack.size() - 1 << " = cycle_group_t(ae(\""
1157 << instruction.arguments.element.scalar << "\"));" << std::endl;
1158#endif
1159 return 0;
1160 };
1161
1170 static inline size_t execute_WITNESS(Builder* builder,
1173 {
1174 stack.push_back(ExecutionHandler(
1175 instruction.arguments.element.scalar,
1176 instruction.arguments.element.value,
1177 cycle_group_t::from_witness(builder, static_cast<AffineElement>(instruction.arguments.element.value))));
1178#ifdef FUZZING_SHOW_INFORMATION
1179 std::cout << "auto w" << stack.size() - 1 << " = cycle_group_t::from_witness(&builder, ae(\""
1180 << instruction.arguments.element.scalar << "\"));" << std::endl;
1181#endif
1182 return 0;
1183 }
1184
1197 {
1198 stack.push_back(
1199 ExecutionHandler(instruction.arguments.element.scalar,
1200 instruction.arguments.element.value,
1202 builder, static_cast<AffineElement>(instruction.arguments.element.value))));
1203#ifdef FUZZING_SHOW_INFORMATION
1204 std::cout << "auto cw" << stack.size() - 1 << " = cycle_group_t::from_constant_witness(&builder, ae(\""
1205 << instruction.arguments.element.scalar << "\"));" << std::endl;
1206#endif
1207 return 0;
1208 }
1209
1218 static inline size_t execute_DBL(Builder* builder,
1221 {
1222 (void)builder;
1223 if (stack.size() == 0) {
1224 return 1;
1225 }
1226 size_t first_index = instruction.arguments.twoArgs.in % stack.size();
1227 size_t output_index = instruction.arguments.twoArgs.out;
1228
1229#ifdef FUZZING_SHOW_INFORMATION
1230 PREP_SINGLE_ARG(stack, first_index, output_index)
1231 std::cout << out << " = " << rhs << ".dbl();" << std::endl;
1232#endif
1233 ExecutionHandler result;
1234 result = stack[first_index].dbl();
1235 // If the output index is larger than the number of elements in stack, append
1236 if (output_index >= stack.size()) {
1237 stack.push_back(result);
1238 } else {
1239 stack[output_index] = result;
1240 }
1241 return 0;
1242 };
1243
1252 static inline size_t execute_NEG(Builder* builder,
1255 {
1256 (void)builder;
1257 if (stack.size() == 0) {
1258 return 1;
1259 }
1260 size_t first_index = instruction.arguments.twoArgs.in % stack.size();
1261 size_t output_index = instruction.arguments.twoArgs.out;
1262
1263#ifdef FUZZING_SHOW_INFORMATION
1264 PREP_SINGLE_ARG(stack, first_index, output_index)
1265 std::cout << out << " = -" << rhs << ";" << std::endl;
1266#endif
1267 ExecutionHandler result;
1268 result = -stack[first_index];
1269 // If the output index is larger than the number of elements in stack, append
1270 if (output_index >= stack.size()) {
1271 stack.push_back(result);
1272 } else {
1273 stack[output_index] = result;
1274 }
1275 return 0;
1276 };
1277
1286 static inline size_t execute_ASSERT_EQUAL(Builder* builder,
1289 {
1290 if (stack.size() == 0) {
1291 return 1;
1292 }
1293 size_t first_index = instruction.arguments.twoArgs.in % stack.size();
1294 size_t second_index = instruction.arguments.twoArgs.out % stack.size();
1295
1296#ifdef FUZZING_SHOW_INFORMATION
1297 PREP_TWO_ARG(stack, first_index, second_index, 0)
1298 std::cout << "assert_equal(" << lhs << ", " << rhs << ", builder);" << std::endl;
1299#endif
1300 stack[first_index].assert_equal(builder, stack[second_index]);
1301 return 0;
1302 };
1303
1312 static inline size_t execute_SET(Builder* builder,
1315 {
1316 (void)builder;
1317 if (stack.size() == 0) {
1318 return 1;
1319 }
1320 size_t first_index = instruction.arguments.twoArgs.in % stack.size();
1321 size_t output_index = instruction.arguments.twoArgs.out;
1322 ExecutionHandler result;
1323
1324#ifdef FUZZING_SHOW_INFORMATION
1325 PREP_SINGLE_ARG(stack, first_index, output_index)
1326 std::cout << out << " = ";
1327#endif
1328 result = stack[first_index].set(builder);
1329#ifdef FUZZING_SHOW_INFORMATION
1330 std::cout << rhs << ");" << std::endl;
1331#endif
1332 // If the output index is larger than the number of elements in stack, append
1333 if (output_index >= stack.size()) {
1334 stack.push_back(result);
1335 } else {
1336 stack[output_index] = result;
1337 }
1338 return 0;
1339 };
1340
1349 static inline size_t execute_SET_INF(Builder* builder,
1352 {
1353 (void)builder;
1354 if (stack.size() == 0) {
1355 return 1;
1356 }
1357 size_t first_index = instruction.arguments.twoArgs.in % stack.size();
1358 size_t output_index = instruction.arguments.twoArgs.out;
1359 ExecutionHandler result;
1360
1361#ifdef FUZZING_SHOW_INFORMATION
1362 PREP_SINGLE_ARG(stack, first_index, output_index)
1363 std::cout << out << " = " << rhs << std::endl;
1364#endif
1365 result = stack[first_index].set_inf(builder);
1366 // If the output index is larger than the number of elements in stack, append
1367 if (output_index >= stack.size()) {
1368 stack.push_back(result);
1369 } else {
1370 stack[output_index] = result;
1371 }
1372 return 0;
1373 };
1374
1383 static inline size_t execute_ADD(Builder* builder,
1386 {
1387 (void)builder;
1388 if (stack.size() == 0) {
1389 return 1;
1390 }
1391 size_t first_index = instruction.arguments.threeArgs.in1 % stack.size();
1392 size_t second_index = instruction.arguments.threeArgs.in2 % stack.size();
1393 size_t output_index = instruction.arguments.threeArgs.out;
1394
1395#ifdef FUZZING_SHOW_INFORMATION
1396 PREP_TWO_ARG(stack, first_index, second_index, output_index)
1397 std::cout << out << " = " << lhs << " + " << rhs << ";" << std::endl;
1398#endif
1399 ExecutionHandler result;
1400 result = stack[first_index].operator_add(builder, stack[second_index]);
1401 // If the output index is larger than the number of elements in stack, append
1402 if (output_index >= stack.size()) {
1403 stack.push_back(result);
1404 } else {
1405 stack[output_index] = result;
1406 }
1407 return 0;
1408 };
1409
1418 static inline size_t execute_SUBTRACT(Builder* builder,
1421 {
1422 (void)builder;
1423 if (stack.size() == 0) {
1424 return 1;
1425 }
1426 size_t first_index = instruction.arguments.threeArgs.in1 % stack.size();
1427 size_t second_index = instruction.arguments.threeArgs.in2 % stack.size();
1428 size_t output_index = instruction.arguments.threeArgs.out;
1429
1430#ifdef FUZZING_SHOW_INFORMATION
1431 PREP_TWO_ARG(stack, first_index, second_index, output_index)
1432 std::cout << out << " = " << lhs << " - " << rhs << ";" << std::endl;
1433#endif
1434 ExecutionHandler result;
1435 result = stack[first_index].operator_sub(builder, stack[second_index]);
1436 // If the output index is larger than the number of elements in stack, append
1437 if (output_index >= stack.size()) {
1438 stack.push_back(result);
1439 } else {
1440 stack[output_index] = result;
1441 }
1442 return 0;
1443 };
1444
1453 static inline size_t execute_COND_ASSIGN(Builder* builder,
1456 {
1457 (void)builder;
1458 if (stack.size() == 0) {
1459 return 1;
1460 }
1461 size_t first_index = instruction.arguments.fourArgs.in1 % stack.size();
1462 size_t second_index = instruction.arguments.fourArgs.in2 % stack.size();
1463 size_t output_index = instruction.arguments.fourArgs.out % stack.size();
1464 bool predicate = instruction.arguments.fourArgs.in3 % 2;
1465
1466 ExecutionHandler result;
1467
1468#ifdef FUZZING_SHOW_INFORMATION
1469 PREP_TWO_ARG(stack, first_index, second_index, output_index)
1470 std::cout << out << " = cycle_group_t::conditional_assign(";
1471#endif
1472 result = stack[first_index].conditional_assign(builder, stack[second_index], predicate);
1473#ifdef FUZZING_SHOW_INFORMATION
1474 std::cout << rhs << ", " << lhs << ");" << std::endl;
1475#endif
1476 // If the output index is larger than the number of elements in stack, append
1477 if (output_index >= stack.size()) {
1478 stack.push_back(result);
1479 } else {
1480 stack[output_index] = result;
1481 }
1482 return 0;
1483 };
1484
1493 static inline size_t execute_MULTIPLY(Builder* builder,
1496 {
1497
1498 if (stack.size() == 0) {
1499 return 1;
1500 }
1501 size_t first_index = instruction.arguments.mulArgs.in % stack.size();
1502 size_t output_index = instruction.arguments.mulArgs.out;
1503 ScalarField scalar = instruction.arguments.mulArgs.scalar;
1504
1505#ifdef FUZZING_SHOW_INFORMATION
1506 PREP_SINGLE_ARG(stack, first_index, output_index)
1507 std::cout << out << " = " << rhs << std::endl;
1508#endif
1509 ExecutionHandler result;
1510 result = stack[first_index].mul(builder, scalar);
1511 // If the output index is larger than the number of elements in stack, append
1512 if (output_index >= stack.size()) {
1513 stack.push_back(result);
1514 } else {
1515 stack[output_index] = result;
1516 }
1517 return 0;
1518 };
1519
1528 static inline size_t execute_BATCH_MUL(Builder* builder,
1531 {
1532 (void)builder;
1533 if (stack.size() == 0) {
1534 return 1;
1535 }
1537 std::vector<ScalarField> to_mul;
1538 for (size_t i = 0; i < instruction.arguments.batchMulArgs.add_elements_count; i++) {
1539 to_add.push_back(stack[(size_t)instruction.arguments.batchMulArgs.inputs[i] % stack.size()]);
1540 to_mul.push_back(instruction.arguments.batchMulArgs.scalars[i]);
1541 }
1542 size_t output_index = (size_t)instruction.arguments.batchMulArgs.output_index;
1543
1544#ifdef FUZZING_SHOW_INFORMATION
1545 std::string res = "";
1546 bool is_const = true;
1547 for (size_t i = 0; i < instruction.arguments.batchMulArgs.add_elements_count; i++) {
1548 size_t idx = instruction.arguments.batchMulArgs.inputs[i] % stack.size();
1549 std::string el = stack[idx].cycle_group.is_constant() ? "c" : "w";
1550 el += std::to_string(idx);
1551 res += el + ", ";
1552 is_const &= stack[idx].cycle_group.is_constant();
1553 }
1554 std::string out = is_const ? "c" : "w";
1555 out = ((output_index >= stack.size()) ? "auto " : "") + out;
1556 out += std::to_string(output_index >= stack.size() ? stack.size() : output_index);
1557 std::cout << out << " = cycle_group_t::batch_mul({" << res << "}, {";
1558#endif
1559 auto result = ExecutionHandler::batch_mul(builder, to_add, to_mul);
1560
1561#ifdef FUZZING_SHOW_INFORMATION
1562 std::cout << "});" << std::endl;
1563#endif
1564 // If the output index is larger than the number of elements in stack, append
1565 if (output_index >= stack.size()) {
1566 stack.push_back(result);
1567 } else {
1568 stack[output_index] = result;
1569 }
1570 return 0;
1571 };
1572
1581 static inline size_t execute_RANDOMSEED(Builder* builder,
1584 {
1585 (void)builder;
1586 (void)stack;
1587
1588 VarianceRNG.reseed(instruction.arguments.randomseed);
1589 return 0;
1590 };
1591 };
1592
1597
1608 {
1609 (void)builder;
1610 for (size_t i = 0; i < stack.size(); i++) {
1611 auto element = stack[i];
1612 if (element.cycle_group.get_value() != AffineElement(element.base)) {
1613 std::cerr << "Failed at " << i << " with actual value " << AffineElement(element.base)
1614 << " and value in CycleGroup " << element.cycle_group.get_value() << std::endl;
1615 return false;
1616 }
1617 if ((AffineElement::one() * element.base_scalar) != AffineElement(element.base)) {
1618 std::cerr << "Failed at " << i << " with actual mul value " << element.base
1619 << " and value in scalar * CG " << element.cycle_group.get_value() * element.base_scalar
1620 << std::endl;
1621 return false;
1622 }
1623
1624 bool fake_standardized = element.cycle_group.is_standard();
1625 fake_standardized &= element.cycle_group.is_point_at_infinity().get_value();
1626 fake_standardized &= (element.cycle_group.x.get_value() != 0) || (element.cycle_group.y.get_value() != 0);
1627 if (fake_standardized) {
1628 std::cerr << "Failed at " << i << " with value claimed to be standard: (0, 0) but the actual value is ("
1629 << element.cycle_group.x.get_value() << ", " << element.cycle_group.y.get_value() << ")"
1630 << std::endl;
1631 return false;
1632 }
1633 }
1634 return true;
1635 }
1636};
1637
1638#ifdef HAVOC_TESTING
1639
1640extern "C" int LLVMFuzzerInitialize(int* argc, char*** argv)
1641{
1642 (void)argc;
1643 (void)argv;
1644 // These are the settings, optimized for the safeuint class (under them, fuzzer reaches maximum expected
1645 // coverage in 40 seconds)
1646 fuzzer_havoc_settings = HavocSettings{ .GEN_LLVM_POST_MUTATION_PROB = 30, // Out of 200
1647 .GEN_MUTATION_COUNT_LOG = 5, // -Fully checked
1648 .GEN_STRUCTURAL_MUTATION_PROBABILITY = 300, // Fully checked
1649 .GEN_VALUE_MUTATION_PROBABILITY = 700, // Fully checked
1650 .ST_MUT_DELETION_PROBABILITY = 100, // Fully checked
1651 .ST_MUT_DUPLICATION_PROBABILITY = 80, // Fully checked
1652 .ST_MUT_INSERTION_PROBABILITY = 120, // Fully checked
1653 .ST_MUT_MAXIMUM_DELETION_LOG = 6, // 2 because of limit
1654 .ST_MUT_MAXIMUM_DUPLICATION_LOG = 2, // -Fully checked
1655 .ST_MUT_SWAP_PROBABILITY = 50, // Fully checked
1656 .VAL_MUT_LLVM_MUTATE_PROBABILITY = 250, // Fully checked
1657 .VAL_MUT_MONTGOMERY_PROBABILITY = 130, // Fully checked
1658 .VAL_MUT_NON_MONTGOMERY_PROBABILITY = 50, // Fully checked
1659 .VAL_MUT_SMALL_ADDITION_PROBABILITY = 110, // Fully checked
1660 .VAL_MUT_SPECIAL_VALUE_PROBABILITY = 130, // Fully checked
1661 .structural_mutation_distribution = {},
1662 .value_mutation_distribution = {} };
1668 /*
1669 std::random_device rd;
1670 std::uniform_int_distribution<uint64_t> dist(0, ~(uint64_t)(0));
1671 srandom(static_cast<unsigned int>(dist(rd)));
1672
1673 fuzzer_havoc_settings =
1674 HavocSettings{ .GEN_MUTATION_COUNT_LOG = static_cast<size_t>((random() % 8) + 1),
1675 .GEN_STRUCTURAL_MUTATION_PROBABILITY = static_cast<size_t>(random() % 100),
1676 .GEN_VALUE_MUTATION_PROBABILITY = static_cast<size_t>(random() % 100),
1677 .ST_MUT_DELETION_PROBABILITY = static_cast<size_t>(random() % 100),
1678 .ST_MUT_DUPLICATION_PROBABILITY = static_cast<size_t>(random() % 100),
1679 .ST_MUT_INSERTION_PROBABILITY = static_cast<size_t>((random() % 99) + 1),
1680 .ST_MUT_MAXIMUM_DELETION_LOG = static_cast<size_t>((random() % 8) + 1),
1681 .ST_MUT_MAXIMUM_DUPLICATION_LOG = static_cast<size_t>((random() % 8) + 1),
1682 .ST_MUT_SWAP_PROBABILITY = static_cast<size_t>(random() % 100),
1683 .VAL_MUT_LLVM_MUTATE_PROBABILITY = static_cast<size_t>(random() % 100),
1684 .VAL_MUT_MONTGOMERY_PROBABILITY = static_cast<size_t>(random() % 100),
1685 .VAL_MUT_NON_MONTGOMERY_PROBABILITY = static_cast<size_t>(random() % 100),
1686 .VAL_MUT_SMALL_ADDITION_PROBABILITY = static_cast<size_t>(random() % 100),
1687 .VAL_MUT_SPECIAL_VALUE_PROBABILITY = static_cast<size_t>(random() % 100)
1688
1689 };
1690 while (fuzzer_havoc_settings.GEN_STRUCTURAL_MUTATION_PROBABILITY == 0 &&
1691 fuzzer_havoc_settings.GEN_VALUE_MUTATION_PROBABILITY == 0) {
1692 fuzzer_havoc_settings.GEN_STRUCTURAL_MUTATION_PROBABILITY = static_cast<size_t>(random() % 8);
1693 fuzzer_havoc_settings.GEN_VALUE_MUTATION_PROBABILITY = static_cast<size_t>(random() % 8);
1694 }
1695 */
1696
1697 // fuzzer_havoc_settings.GEN_LLVM_POST_MUTATION_PROB = static_cast<size_t>(((random() % (20 - 1)) + 1) * 10);
1702 /*
1703 std::cerr << "CUSTOM MUTATOR SETTINGS:" << std::endl
1704 << "################################################################" << std::endl
1705 << "GEN_LLVM_POST_MUTATION_PROB: " << fuzzer_havoc_settings.GEN_LLVM_POST_MUTATION_PROB << std::endl
1706 << "GEN_MUTATION_COUNT_LOG: " << fuzzer_havoc_settings.GEN_MUTATION_COUNT_LOG << std::endl
1707 << "GEN_STRUCTURAL_MUTATION_PROBABILITY: " <<
1708 fuzzer_havoc_settings.GEN_STRUCTURAL_MUTATION_PROBABILITY
1709 << std::endl
1710 << "GEN_VALUE_MUTATION_PROBABILITY: " << fuzzer_havoc_settings.GEN_VALUE_MUTATION_PROBABILITY <<
1711 std::endl
1712 << "ST_MUT_DELETION_PROBABILITY: " << fuzzer_havoc_settings.ST_MUT_DELETION_PROBABILITY << std::endl
1713 << "ST_MUT_DUPLICATION_PROBABILITY: " << fuzzer_havoc_settings.ST_MUT_DUPLICATION_PROBABILITY <<
1714 std::endl
1715 << "ST_MUT_INSERTION_PROBABILITY: " << fuzzer_havoc_settings.ST_MUT_INSERTION_PROBABILITY << std::endl
1716 << "ST_MUT_MAXIMUM_DELETION_LOG: " << fuzzer_havoc_settings.ST_MUT_MAXIMUM_DELETION_LOG << std::endl
1717 << "ST_MUT_MAXIMUM_DUPLICATION_LOG: " << fuzzer_havoc_settings.ST_MUT_MAXIMUM_DUPLICATION_LOG <<
1718 std::endl
1719 << "ST_MUT_SWAP_PROBABILITY: " << fuzzer_havoc_settings.ST_MUT_SWAP_PROBABILITY << std::endl
1720 << "VAL_MUT_LLVM_MUTATE_PROBABILITY: " << fuzzer_havoc_settings.VAL_MUT_LLVM_MUTATE_PROBABILITY
1721 << std::endl
1722 << "VAL_MUT_MONTGOMERY_PROBABILITY: " << fuzzer_havoc_settings.VAL_MUT_MONTGOMERY_PROBABILITY <<
1723 std::endl
1724 << "VAL_MUT_NON_MONTGOMERY_PROBABILITY: " << fuzzer_havoc_settings.VAL_MUT_NON_MONTGOMERY_PROBABILITY
1725 << std::endl
1726 << "VAL_MUT_SMALL_ADDITION_PROBABILITY: " << fuzzer_havoc_settings.VAL_MUT_SMALL_ADDITION_PROBABILITY
1727 << std::endl
1728 << "VAL_MUT_SMALL_MULTIPLICATION_PROBABILITY: "
1729 << fuzzer_havoc_settings.VAL_MUT_SMALL_MULTIPLICATION_PROBABILITY << std::endl
1730 << "VAL_MUT_SPECIAL_VALUE_PROBABILITY: " << fuzzer_havoc_settings.VAL_MUT_SPECIAL_VALUE_PROBABILITY
1731 << std::endl;
1732 */
1733 std::vector<size_t> structural_mutation_distribution;
1734 std::vector<size_t> value_mutation_distribution;
1735 size_t temp = 0;
1736 temp += fuzzer_havoc_settings.ST_MUT_DELETION_PROBABILITY;
1737 structural_mutation_distribution.push_back(temp);
1738 temp += fuzzer_havoc_settings.ST_MUT_DUPLICATION_PROBABILITY;
1739 structural_mutation_distribution.push_back(temp);
1740 temp += fuzzer_havoc_settings.ST_MUT_INSERTION_PROBABILITY;
1741 structural_mutation_distribution.push_back(temp);
1742 temp += fuzzer_havoc_settings.ST_MUT_SWAP_PROBABILITY;
1743 structural_mutation_distribution.push_back(temp);
1744 fuzzer_havoc_settings.structural_mutation_distribution = structural_mutation_distribution;
1745
1746 temp = 0;
1747 temp += fuzzer_havoc_settings.VAL_MUT_LLVM_MUTATE_PROBABILITY;
1748 value_mutation_distribution.push_back(temp);
1749 temp += fuzzer_havoc_settings.VAL_MUT_SMALL_ADDITION_PROBABILITY;
1750 value_mutation_distribution.push_back(temp);
1751 temp += fuzzer_havoc_settings.VAL_MUT_SPECIAL_VALUE_PROBABILITY;
1752 value_mutation_distribution.push_back(temp);
1753
1754 fuzzer_havoc_settings.value_mutation_distribution = value_mutation_distribution;
1755 return 0;
1756}
1757#endif
1758
1763extern "C" size_t LLVMFuzzerTestOneInput(const uint8_t* Data, size_t Size)
1764{
1765 RunWithBuilders<CycleGroupBase, FuzzerCircuitTypes>(Data, Size, VarianceRNG);
1766 return 0;
1767}
1768
1769#pragma clang diagnostic pop
static constexpr size_t CONSTANT_WITNESS
static constexpr size_t ADD
static constexpr size_t CONSTANT
static constexpr size_t MULTIPLY
static constexpr size_t COND_ASSIGN
static constexpr size_t ASSERT_EQUAL
static constexpr size_t RANDOMSEED
static constexpr size_t DBL
static constexpr size_t BATCH_MUL
static constexpr size_t SET
static constexpr size_t WITNESS
static constexpr size_t NEG
static constexpr size_t SUBTRACT
static constexpr size_t SET_INF
This class implements the execution of cycle group with an oracle to detect discrepancies.
static size_t execute_SET_INF(Builder *builder, std::vector< ExecutionHandler > &stack, Instruction &instruction)
Execute the SET_INF instruction.
static size_t execute_SUBTRACT(Builder *builder, std::vector< ExecutionHandler > &stack, Instruction &instruction)
Execute the subtraction operator instruction.
static size_t execute_COND_ASSIGN(Builder *builder, std::vector< ExecutionHandler > &stack, Instruction &instruction)
Execute the COND_ASSIGN instruction.
static size_t execute_DBL(Builder *builder, std::vector< ExecutionHandler > &stack, Instruction &instruction)
Execute the DBL instruction.
ExecutionHandler(ScalarField s, GroupElement g, cycle_group_t w_g)
static bool_t construct_predicate(Builder *builder, const bool predicate)
ExecutionHandler set_inf(Builder *builder)
static size_t execute_NEG(Builder *builder, std::vector< ExecutionHandler > &stack, Instruction &instruction)
Execute the NEG instruction.
static size_t execute_WITNESS(Builder *builder, std::vector< ExecutionHandler > &stack, Instruction &instruction)
Execute the witness instruction (push witness cycle group to the stack)
static size_t execute_BATCH_MUL(Builder *builder, std::vector< ExecutionHandler > &stack, Instruction &instruction)
Execute the BATCH_MUL instruction.
static size_t execute_RANDOMSEED(Builder *builder, std::vector< ExecutionHandler > &stack, Instruction &instruction)
Execute the RANDOMSEED instruction.
static size_t execute_ASSERT_EQUAL(Builder *builder, std::vector< ExecutionHandler > &stack, Instruction &instruction)
Execute the ASSERT_EQUAL instruction.
static size_t execute_SET(Builder *builder, std::vector< ExecutionHandler > &stack, Instruction &instruction)
Execute the SET instruction.
static size_t execute_CONSTANT_WITNESS(Builder *builder, std::vector< ExecutionHandler > &stack, Instruction &instruction)
Execute the constant_witness instruction (push a safeuint witness equal to the constant to the stack)
static size_t execute_ADD(Builder *builder, std::vector< ExecutionHandler > &stack, Instruction &instruction)
Execute the addition operator instruction.
ExecutionHandler mul(Builder *builder, const ScalarField &multiplier)
static ExecutionHandler batch_mul(Builder *builder, const std::vector< ExecutionHandler > &to_add, const std::vector< ScalarField > &to_mul)
ExecutionHandler operator_sub(Builder *builder, const ExecutionHandler &other)
static size_t execute_MULTIPLY(Builder *builder, std::vector< ExecutionHandler > &stack, Instruction &instruction)
Execute the multiply instruction.
void assert_equal(Builder *builder, ExecutionHandler &other)
static size_t execute_CONSTANT(Builder *builder, std::vector< ExecutionHandler > &stack, Instruction &instruction)
Execute the constant instruction (push constant cycle group to the stack)
ExecutionHandler operator_add(Builder *builder, const ExecutionHandler &other)
ExecutionHandler conditional_assign(Builder *builder, ExecutionHandler &other, const bool predicate)
ExecutionHandler set(Builder *builder)
static Instruction generateRandom(T &rng)
Generates a random instruction.
static Instruction mutateInstruction(Instruction instruction, T &rng, HavocSettings &havoc_config)
Mutate a single instruction.
static ScalarField mutateScalarElement(ScalarField e, T &rng, HavocSettings &havoc_config)
Mutate the value of a scalar element.
static Element mutateGroupElement(Element e, T &rng, HavocSettings &havoc_config)
Mutate the value of a group element.
Optional subclass that governs limits on the use of certain instructions, since some of them can be t...
Parser class handles the parsing and writing the instructions back to data buffer.
static void writeInstruction(Instruction &instruction, uint8_t *Data)
Write a single instruction to buffer.
static Instruction parseInstructionArgs(uint8_t *Data)
Parse a single instruction from data.
The class parametrizing CycleGroup fuzzing instructions, execution, etc.
typename bb::stdlib::witness_t< Builder > witness_t
typename bb::stdlib::cycle_group< Builder >::Curve Curve
typename bb::stdlib::public_witness_t< Builder > public_witness_t
typename bb::stdlib::cycle_group< Builder > cycle_group_t
typename bb::stdlib::field_t< Builder > field_t
static bool postProcess(Builder *builder, std::vector< CycleGroupBase::ExecutionHandler > &stack)
Check that the resulting values are equal to expected.
std::vector< ExecutionHandler > ExecutionState
typename bb::stdlib::bool_t< Builder > bool_t
typename cycle_group_t::cycle_scalar cycle_scalar_t
typename Curve::Element GroupElement
typename Curve::AffineElement AffineElement
typename Curve::ScalarField ScalarField
typename Curve::BaseField BaseField
Class for quickly deterministically creating new random values. We don't care about distribution much...
Definition fuzzer.hpp:63
void reseed(uint32_t seed)
Definition fuzzer.hpp:75
uint32_t next()
Definition fuzzer.hpp:68
typename Group::element Element
Definition bn254.hpp:21
bb::fq BaseField
Definition bn254.hpp:19
typename Group::affine_element AffineElement
Definition bn254.hpp:22
bb::fr ScalarField
Definition bn254.hpp:18
Implements boolean logic in-circuit.
Definition bool.hpp:59
cycle_group represents a group Element of the proving system's embedded curve i.e....
static cycle_group from_constant_witness(Builder *_context, const AffineElement &_in)
Converts a native AffineElement into a witness, but constrains the witness values to be known constan...
typename Builder::EmbeddedCurve Curve
static cycle_group conditional_assign(const bool_t &predicate, const cycle_group &lhs, const cycle_group &rhs)
static cycle_group from_witness(Builder *_context, const AffineElement &_in)
Converts an AffineElement into a circuit witness.
::bb::stdlib::cycle_scalar< Builder > cycle_scalar
static cycle_group batch_mul(const std::vector< cycle_group > &base_points, const std::vector< BigScalarField > &scalars, GeneratorContext context={})
Concept for a simple PRNG which returns a uint32_t when next is called.
Definition fuzzer.hpp:125
AluTraceBuilder builder
Definition alu.test.cpp:123
#define INV_MONT_CONVERSION_SCALAR
constexpr size_t MINIMUM_MUL_ELEMENTS
constexpr size_t MAXIMUM_MUL_ELEMENTS
#define INV_MONT_CONVERSION
size_t LLVMFuzzerMutate(uint8_t *Data, size_t Size, size_t MaxSize)
FastRandom VarianceRNG(0)
bool circuit_should_fail
int LLVMFuzzerInitialize(int *argc, char ***argv)
#define MONT_CONVERSION_SCALAR
#define MONT_CONVERSION
size_t LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size)
Fuzzer entry function.
#define PUT_RANDOM_BYTE_IF_LUCKY(variable)
ssize_t offset
Definition engine.cpp:36
Instruction instruction
constexpr decltype(auto) get(::tuplet::tuple< T... > &&t) noexcept
Definition tuple.hpp:13
std::string to_string(bb::avm2::ValueTag tag)
ScalarField scalars[MAXIMUM_MUL_ELEMENTS]
Element(ScalarField s=ScalarField::one(), GroupElement g=GroupElement::one())
size_t GEN_LLVM_POST_MUTATION_PROB
Definition fuzzer.hpp:28