10#define EXPAND(arg) EXPAND1(EXPAND1(EXPAND1(EXPAND1(arg))))
11#define EXPAND1(arg) EXPAND2(EXPAND2(EXPAND2(EXPAND2(arg))))
12#define EXPAND2(arg) EXPAND3(EXPAND3(EXPAND3(EXPAND3(arg))))
13#define EXPAND3(arg) EXPAND4(EXPAND4(EXPAND4(EXPAND4(arg))))
14#define EXPAND4(arg) arg
16#define FOR_EACH(macro, ...) __VA_OPT__(EXPAND(FOR_EACH_HELPER(macro, __VA_ARGS__)))
17#define FOR_EACH_HELPER(macro, a1, ...) macro(a1) __VA_OPT__(FOR_EACH_AGAIN PARENS(macro, __VA_ARGS__))
18#define FOR_EACH_AGAIN() FOR_EACH_HELPER
20#define ALL_POSSIBLE_OPCODES \
21 CONSTANT, WITNESS, CONSTANT_WITNESS, ADD, SUBTRACT, MULTIPLY, DIVIDE, ADD_TWO, MADD, MULT_MADD, MSUB_DIV, SQR, \
22 ASSERT_EQUAL, ASSERT_NOT_EQUAL, SQR_ADD, ASSERT_EQUAL, ASSERT_NOT_EQUAL, SQR_ADD, SUBTRACT_WITH_CONSTRAINT, \
23 DIVIDE_WITH_CONSTRAINTS, SLICE, ASSERT_ZERO, ASSERT_NOT_ZERO, COND_NEGATE, ADD_MULTI, ASSERT_VALID, \
24 COND_SELECT, DOUBLE, RANDOMSEED, SELECT_IF_ZERO, SELECT_IF_EQ, REVERSE, GET_BIT, SET_BIT, SET, INVERT, AND, \
25 OR, XOR, MODULO, SHL, SHR, ROL, ROR, NOT, BATCH_MUL, COND_ASSIGN
70 state =
static_cast<uint32_t
>(
71 (
static_cast<uint64_t
>(
state) *
static_cast<uint64_t
>(363364578) +
static_cast<uint64_t
>(537)) %
72 static_cast<uint64_t
>(3758096939));
88template <
typename T>
static inline uint256_t fast_log_distributed_uint256(T& rng)
93 uint16_t* p = (uint16_t*)&temp;
94 uint8_t mask_size =
static_cast<uint8_t
>(rng.next() & 0xff);
95 for (
size_t i = 0; i < 16; i++) {
96 *(p + i) =
static_cast<uint16_t
>(rng.next() & 0xffff);
110 uint64_t parts[4] = { 0, 0, 0, 0 };
112 for (
size_t i = 0; i < (buffer_size + 7) / 8; i++) {
113 size_t to_read = (buffer_size - i * 8) < 8 ? buffer_size - i * 8 : 8;
116 return uint256_t(parts[0], parts[1], parts[2], parts[3]);
136 std::make_tuple(T::CONSTANT,
149 T::SUBTRACT_WITH_CONSTRAINT,
150 T::DIVIDE_WITH_CONSTRAINTS,
165 std::make_tuple(T::GEN_MUTATION_COUNT_LOG, T::GEN_STRUCTURAL_MUTATION_PROBABILITY)
167 T::GEN_MUTATION_COUNT_LOG <= 7;
176 typename T::ArgSizes;
177 typename T::Instruction;
178 typename T::ExecutionState;
179 typename T::ExecutionHandler;
200template <
typename T,
typename Composer,
typename Context>
213 typename T::InstructionWeights;
214 T::InstructionWeights::_LIMIT;
226template <
typename T,
typename FF>
227inline static FF mutateFieldElement(
FF e, T& rng)
233 bool convert_to_montgomery = (rng.next() & 1);
236#define MONT_CONVERSION_LOCAL \
237 if (convert_to_montgomery) { \
238 value_data = uint256_t(e.to_montgomery_form()); \
240 value_data = uint256_t(e); \
243#define INV_MONT_CONVERSION_LOCAL \
244 if (convert_to_montgomery) { \
245 e = FF(value_data).from_montgomery_form(); \
247 e = FF(value_data); \
252 const size_t choice = rng.next() % 4;
260 }
else if (choice < 3) {
263 if (convert_to_montgomery) {
264 e = e.to_montgomery_form();
266 if (rng.next() & 1) {
267 e +=
FF(rng.next() & 0xff);
269 e -=
FF(rng.next() & 0xff);
271 if (convert_to_montgomery) {
272 e = e.from_montgomery_form();
277 switch (rng.next() % 8) {
306 if (convert_to_montgomery) {
307 e = e.from_montgomery_form();
331 const size_t instructions_count = instructions.size();
332 if (instructions_count <= 2) {
335 const size_t first_element_index = rng.
next() % instructions_count;
336 size_t second_element_index = rng.
next() % instructions_count;
337 if (first_element_index == second_element_index) {
338 second_element_index = (second_element_index + 1) % instructions_count;
340 std::iter_swap(instructions.begin() +
static_cast<int>(first_element_index),
341 instructions.begin() +
static_cast<int>(second_element_index));
356 const size_t instructions_count = instructions.size();
357 if (instructions_count == 0) {
360 if ((rng.
next() & 1) != 0U) {
361 instructions.erase(instructions.begin() + (rng.
next() % instructions_count));
364 const size_t max_deletion_log =
365 std::min(
static_cast<size_t>(64 - __builtin_clzll(
static_cast<uint64_t
>(instructions_count)) - 1),
368 if (max_deletion_log == 0) {
371 const size_t deletion_size = 1 << (rng.
next() % max_deletion_log);
372 const size_t start = rng.
next() % (instructions_count + 1 - deletion_size);
373 instructions.erase(instructions.begin() +
static_cast<int>(start),
374 instructions.begin() +
static_cast<int>(start + deletion_size));
388 const size_t instructions_count = instructions.size();
389 if (instructions_count == 0) {
393 typename T::Instruction chosen_instruction = instructions[rng.
next() % instructions_count];
395 instructions.begin() + (rng.
next() % (instructions_count + 1)), duplication_size, chosen_instruction);
401 (void)havoc_settings;
402 instructions.insert(instructions.begin() +
static_cast<int>(rng.
next() % (instructions.size() + 1)),
403 T::Instruction::template generateRandom<FastRandom>(rng));
418 const size_t choice = rng.
next() % prob_pool;
443 const size_t instructions_count = instructions.size();
444 if (instructions_count == 0) {
447 const size_t chosen = rng.
next() % instructions_count;
448 instructions[chosen] =
449 T::Instruction::template mutateInstruction<FastRandom>(instructions[chosen], rng, havoc_settings);
458 const size_t mutation_count = 1 << T::HavocConfig::MUTATION_COUNT_LOG;
462 for (
size_t i = 0; i < mutation_count; i++) {
463 uint32_t val = rng.
next();
492 const size_t vecA_size = vecA.size();
493 const size_t vecB_size = vecB.size();
495 if (vecA_size == 0) {
498 if (vecB_size == 0) {
503 const size_t final_result_size = rng.
next() % (vecA_size + vecB_size) + 1;
506 size_t* inIndex = &indexA;
507 size_t inSize = vecA_size;
508 auto inIterator = vecA.begin();
509 size_t current_result_size = 0;
510 bool currentlyUsingA =
true;
512 while (current_result_size < final_result_size && (indexA < vecA_size || indexB < vecB_size)) {
514 size_t result_size_left = final_result_size - current_result_size;
516 if (*inIndex < inSize) {
518 size_t inSizeLeft = inSize - *inIndex;
519 size_t maxExtraSize = std::min(result_size_left, inSizeLeft);
520 if (maxExtraSize != 0) {
522 size_t copySize = (rng.
next() % maxExtraSize) + 1;
523 result.insert(result.begin() +
static_cast<long>(current_result_size),
524 inIterator +
static_cast<long>((*inIndex)),
526 inIterator +
static_cast<long>((*inIndex) + copySize));
528 *inIndex += copySize;
529 current_result_size += copySize;
533 inIndex = currentlyUsingA ? &indexB : &indexA;
534 inSize = currentlyUsingA ? vecB_size : vecA_size;
535 inIterator = currentlyUsingA ? vecB.begin() : vecA.begin();
536 currentlyUsingA = !currentlyUsingA;
552 auto* pData =
const_cast<uint8_t*
>(Data);
553 size_t size_left = Size;
554 while (size_left != 0) {
555 uint8_t chosen_operation = *pData;
560#define PARSE_OPCODE(name) \
561 if constexpr (requires { T::ArgSizes::name; }) \
562 if constexpr (T::ArgSizes::name != size_t(-1)) { \
563 if (chosen_operation == T::Instruction::OPCODE::name) { \
564 if (size_left < T::ArgSizes::name) { \
565 return fuzzingInstructions; \
567 fuzzingInstructions.push_back( \
568 T::Parser::template parseInstructionArgs<T::Instruction::OPCODE::name>(pData)); \
569 size_left -= T::ArgSizes::name; \
570 pData += T::ArgSizes::name; \
575#define PARSE_ALL_OPCODES(...) FOR_EACH(PARSE_OPCODE, __VA_ARGS__)
579 return fuzzingInstructions;
593 uint8_t* pData = Data;
594 size_t size_left = MaxSize;
597#define WRITE_OPCODE_IF(name) \
598 if constexpr (requires { T::ArgSizes::name; }) \
599 if constexpr (T::ArgSizes::name != (size_t)-1) { \
600 if (instruction.id == T::Instruction::OPCODE::name) { \
601 if (size_left >= (T::ArgSizes::name + 1)) { \
602 T::Parser::template writeInstruction<T::Instruction::OPCODE::name>(instruction, pData); \
603 size_left -= (T::ArgSizes::name + 1); \
604 pData += (T::ArgSizes::name + 1); \
606 return MaxSize - size_left; \
612#define WRITE_ALL_OPCODES(...) FOR_EACH(WRITE_OPCODE_IF, __VA_ARGS__)
616 return MaxSize - size_left;
625 template <
typename Composer>
631 typename T::ExecutionState state;
632 Composer composer = Composer();
635 size_t total_instruction_weight = 0;
636 (void)total_instruction_weight;
639#define EXECUTE_OPCODE_IF(name) \
640 if constexpr (requires { T::ArgSizes::name; }) \
641 if constexpr (T::ArgSizes::name != size_t(-1)) { \
642 if (instruction.id == T::Instruction::OPCODE::name) { \
643 if constexpr (InstructionWeightsEnabled<T>) { \
644 if (!((total_instruction_weight + T::InstructionWeights::name) > T::InstructionWeights::_LIMIT)) { \
645 total_instruction_weight += T::InstructionWeights::name; \
646 if (T::ExecutionHandler::execute_##name(&composer, state, instruction)) { \
654 if (T::ExecutionHandler::execute_##name(&composer, state, instruction)) { \
660#define EXECUTE_ALL_OPCODES(...) FOR_EACH(EXECUTE_OPCODE_IF, __VA_ARGS__)
664 bool final_value_check =
true;
667 final_value_check = T::postProcess(&composer, state);
669#ifdef FUZZING_SHOW_INFORMATION
670 if (!final_value_check) {
676#ifndef FUZZING_DISABLE_WARNINGS
678 info(
"circuit should fail");
687 if (!final_value_check) {
716template <
template <
typename>
class Fuzzer,
typename Composer>
724template <
template <
typename>
class Fuzzer, uint64_t Composers>
727 RunWithBuilder<Fuzzer, bb::UltraCircuitBuilder>(Data, Size,
VarianceRNG);
#define BB_ASSERT_LTE(left, right,...)
FastRandom VarianceRNG(0)
A templated class containing most of the fuzzing logic for a generic Arithmetic class.
static void mutateInstructionVector(std::vector< typename T::Instruction > &instructions, FastRandom &rng)
static size_t writeInstructionsToBuffer(std::vector< typename T::Instruction > &instructions, uint8_t *Data, size_t MaxSize)
Write instructions into the buffer until there are no instructions left or there is no more space.
static void duplicateInstruction(std::vector< typename T::Instruction > &instructions, FastRandom &rng, HavocSettings &havoc_settings)
Mutator duplicating an instruction.
static std::vector< typename T::Instruction > parseDataIntoInstructions(const uint8_t *Data, size_t Size)
Parses a given data buffer into a vector of instructions for testing the arithmetic.
static void swapTwoInstructions(std::vector< typename T::Instruction > &instructions, FastRandom &rng)
Mutator swapping two instructions together.
static size_t MutateInstructionBuffer(uint8_t *Data, size_t Size, size_t MaxSize, FastRandom &rng)
Interpret the data buffer as a series of arithmetic instructions and mutate it accordingly.
static std::vector< typename T::Instruction > crossoverInstructionVector(const std::vector< typename T::Instruction > &vecA, const std::vector< typename T::Instruction > &vecB, FastRandom &rng)
Splice two instruction vectors into one randomly.
static void deleteInstructions(std::vector< typename T::Instruction > &instructions, FastRandom &rng, HavocSettings &havoc_settings)
Mutator, deleting a sequence of instructions.
static void executeInstructions(std::vector< typename T::Instruction > &instructions)
Execute instructions in a loop.
static void insertRandomInstruction(std::vector< typename T::Instruction > &instructions, FastRandom &rng, HavocSettings &havoc_settings)
static void mutateInstructionValue(std::vector< typename T::Instruction > &instructions, FastRandom &rng, HavocSettings &havoc_settings)
Choose a random instruction from the vector and mutate it.
static void mutateInstructionStructure(std::vector< typename T::Instruction > &instructions, FastRandom &rng, HavocSettings &havoc_settings)
Mutator for instruction structure.
Class for quickly deterministically creating new random values. We don't care about distribution much...
FastRandom(uint32_t seed)
void reseed(uint32_t seed)
static bool check(const Builder &circuit)
Check the witness satisifies the circuit.
Concept specifying the class used by the fuzzer.
Fuzzer uses only composers with check_circuit function.
Concept for Havoc Configurations.
Concept for forcing ArgumentSizes to be size_t.
This concept is used when we want to limit the number of executions of certain instructions (for exam...
The fuzzer can use a postprocessing function that is specific to the type being fuzzed.
Concept for a simple PRNG which returns a uint32_t when next is called.
const std::vector< FF > data
StrictMock< MockContext > context
#define INV_MONT_CONVERSION_LOCAL
#define WRITE_ALL_OPCODES(...)
uint256_t read_uint256(const uint8_t *data, size_t buffer_size=32)
size_t LLVMFuzzerMutate(uint8_t *Data, size_t Size, size_t MaxSize)
constexpr void RunWithBuilder(const uint8_t *Data, const size_t Size, FastRandom &VarianceRNG)
#define EXECUTE_ALL_OPCODES(...)
#define PARSE_ALL_OPCODES(...)
#define ALL_POSSIBLE_OPCODES
#define MONT_CONVERSION_LOCAL
constexpr void RunWithBuilders(const uint8_t *Data, const size_t Size, FastRandom &VarianceRNG)
constexpr decltype(auto) get(::tuplet::tuple< T... > &&t) noexcept
size_t GEN_VALUE_MUTATION_PROBABILITY
size_t ST_MUT_MAXIMUM_DELETION_LOG
size_t GEN_MUTATION_COUNT_LOG
size_t ST_MUT_MAXIMUM_DUPLICATION_LOG
size_t VAL_MUT_LLVM_MUTATE_PROBABILITY
size_t ST_MUT_DELETION_PROBABILITY
size_t VAL_MUT_SMALL_ADDITION_PROBABILITY
size_t ST_MUT_DUPLICATION_PROBABILITY
size_t VAL_MUT_SPECIAL_VALUE_PROBABILITY
std::vector< size_t > value_mutation_distribution
size_t GEN_STRUCTURAL_MUTATION_PROBABILITY
size_t VAL_MUT_MONTGOMERY_PROBABILITY
size_t VAL_MUT_NON_MONTGOMERY_PROBABILITY
size_t ST_MUT_SWAP_PROBABILITY
size_t GEN_LLVM_POST_MUTATION_PROB
std::vector< size_t > structural_mutation_distribution
size_t ST_MUT_INSERTION_PROBABILITY
static constexpr field get_root_of_unity(size_t subgroup_size) noexcept
static constexpr field one()
static constexpr uint256_t modulus
constexpr std::pair< bool, field > sqrt() const noexcept
Compute square root of the field element.
static constexpr field zero()