Barretenberg
The ZK-SNARK library at the core of Aztec
Loading...
Searching...
No Matches
fuzzer.hpp
Go to the documentation of this file.
1#pragma once
5
6// NOLINTBEGIN(cppcoreguidelines-macro-usage, google-runtime-int)
7#define PARENS ()
8
9// Rescan macro tokens 256 times
10#define EXPAND(arg) EXPAND1(EXPAND1(EXPAND1(EXPAND1(arg))))
11#define EXPAND1(arg) EXPAND2(EXPAND2(EXPAND2(EXPAND2(arg))))
12#define EXPAND2(arg) EXPAND3(EXPAND3(EXPAND3(EXPAND3(arg))))
13#define EXPAND3(arg) EXPAND4(EXPAND4(EXPAND4(EXPAND4(arg))))
14#define EXPAND4(arg) arg
15
16#define FOR_EACH(macro, ...) __VA_OPT__(EXPAND(FOR_EACH_HELPER(macro, __VA_ARGS__)))
17#define FOR_EACH_HELPER(macro, a1, ...) macro(a1) __VA_OPT__(FOR_EACH_AGAIN PARENS(macro, __VA_ARGS__))
18#define FOR_EACH_AGAIN() FOR_EACH_HELPER
19
20#define ALL_POSSIBLE_OPCODES \
21 CONSTANT, WITNESS, CONSTANT_WITNESS, ADD, SUBTRACT, MULTIPLY, DIVIDE, ADD_TWO, MADD, MULT_MADD, MSUB_DIV, SQR, \
22 ASSERT_EQUAL, ASSERT_NOT_EQUAL, SQR_ADD, ASSERT_EQUAL, ASSERT_NOT_EQUAL, SQR_ADD, SUBTRACT_WITH_CONSTRAINT, \
23 DIVIDE_WITH_CONSTRAINTS, SLICE, ASSERT_ZERO, ASSERT_NOT_ZERO, COND_NEGATE, ADD_MULTI, ASSERT_VALID, \
24 COND_SELECT, DOUBLE, RANDOMSEED, SELECT_IF_ZERO, SELECT_IF_EQ, REVERSE, GET_BIT, SET_BIT, SET, INVERT, AND, \
25 OR, XOR, MODULO, SHL, SHR, ROL, ROR, NOT, BATCH_MUL, COND_ASSIGN
26
28 size_t GEN_LLVM_POST_MUTATION_PROB; // Controls frequency of additional mutation after structural ones
29 size_t GEN_MUTATION_COUNT_LOG; // This is the logarithm of the number of micromutations applied during mutation of a
30 // testcase
31 size_t GEN_STRUCTURAL_MUTATION_PROBABILITY; // The probability of applying a structural mutation
32 // (DELETION/DUPLICATION/INSERTION/SWAP)
33 size_t GEN_VALUE_MUTATION_PROBABILITY; // The probability of applying a value mutation
34 size_t ST_MUT_DELETION_PROBABILITY; // The probability of applying DELETION mutation
35 size_t ST_MUT_DUPLICATION_PROBABILITY; // The probability of applying DUPLICATION mutation
36 size_t ST_MUT_INSERTION_PROBABILITY; // The probability of applying INSERTION mutation
37 size_t ST_MUT_MAXIMUM_DELETION_LOG; // The logarithm of the maximum of deletions
38 size_t ST_MUT_MAXIMUM_DUPLICATION_LOG; // The logarithm of the maximum of duplication
39 size_t ST_MUT_SWAP_PROBABILITY; // The probability of a SWAP mutation
40 size_t VAL_MUT_LLVM_MUTATE_PROBABILITY; // The probablity of using the LLVM mutator on field element value
41 size_t VAL_MUT_MONTGOMERY_PROBABILITY; // The probability of converting to montgomery form before applying value
42 // mutations
43 size_t VAL_MUT_NON_MONTGOMERY_PROBABILITY; // The probability of not converting to montgomery form before applying
44 // value mutations
45 size_t VAL_MUT_SMALL_ADDITION_PROBABILITY; // The probability of performing small additions
46 size_t VAL_MUT_SPECIAL_VALUE_PROBABILITY; // The probability of assigning special values (0,1, p-1, p-2, p-1/2)
47 std::vector<size_t> structural_mutation_distribution; // Holds the values to quickly select a structural mutation
48 // based on chosen probabilities
49 std::vector<size_t> value_mutation_distribution; // Holds the values to quickly select a value mutation based on
50 // chosen probabilities
51};
52#ifdef HAVOC_TESTING
53
54HavocSettings fuzzer_havoc_settings;
55#endif
56// This is an external function in Libfuzzer used internally by custom mutators
57extern "C" size_t LLVMFuzzerMutate(uint8_t* Data, size_t Size, size_t MaxSize);
58
64 uint32_t state;
65
66 public:
67 FastRandom(uint32_t seed) { reseed(seed); }
68 uint32_t next()
69 {
70 state = static_cast<uint32_t>(
71 (static_cast<uint64_t>(state) * static_cast<uint64_t>(363364578) + static_cast<uint64_t>(537)) %
72 static_cast<uint64_t>(3758096939));
73 return state;
74 }
75 void reseed(uint32_t seed)
76 {
77 if (seed == 0) {
78 seed = 1;
79 }
80 state = seed;
81 }
82};
83
84// Sample a uint256_t value from log distribution
85// That is we first sample the bit count in [0..255]
86// And then shrink the random [0..2^255] value
87// This helps to get smaller values more frequently
88template <typename T> static inline uint256_t fast_log_distributed_uint256(T& rng)
89{
90 uint256_t temp;
91 // Generate a random mask_size-bit value
92 // We want to sample from log distribution instead of uniform
93 uint16_t* p = (uint16_t*)&temp;
94 uint8_t mask_size = static_cast<uint8_t>(rng.next() & 0xff);
95 for (size_t i = 0; i < 16; i++) {
96 *(p + i) = static_cast<uint16_t>(rng.next() & 0xffff);
97 }
98 uint256_t mask = (uint256_t(1) << mask_size) - 1;
99 temp &= mask;
100 temp += 1; // I believe we want to avoid lots of infs
101 return temp;
102}
103
104// Read uint256_t from raw bytes.
105// Don't use dereference casts, since the data may be not aligned and it causes segfault
106uint256_t read_uint256(const uint8_t* data, size_t buffer_size = 32)
107{
108 BB_ASSERT_LTE(buffer_size, 32U);
109
110 uint64_t parts[4] = { 0, 0, 0, 0 };
111
112 for (size_t i = 0; i < (buffer_size + 7) / 8; i++) {
113 size_t to_read = (buffer_size - (i * 8)) < 8 ? buffer_size - (i * 8) : 8;
114 std::memcpy(&parts[i], data + (i * 8), to_read);
115 }
116 return uint256_t(parts[0], parts[1], parts[2], parts[3]);
117}
118
119// Convert preprocessor flag to constexpr for cleaner call sites
120#ifdef FUZZING_SHOW_INFORMATION
121constexpr bool SHOW_FUZZING_INFO = true;
122#else
123constexpr bool SHOW_FUZZING_INFO = false;
124#endif
125
127template <typename... Args> inline void debug_log(Args&&... args)
128{
129 if constexpr (SHOW_FUZZING_INFO) {
130 (std::cout << ... << std::forward<Args>(args));
131 }
132}
133
134#ifdef FUZZING_SHOW_INFORMATION
139struct FormattedArgs {
140 std::string lhs;
141 std::string rhs;
142 std::string out;
143};
144
152template <typename Stack>
153inline FormattedArgs format_single_arg(const Stack& stack, size_t first_index, size_t output_index)
154{
155 std::string rhs = stack[first_index].cycle_group.is_constant() ? "c" : "w";
156 std::string out = rhs;
157 rhs += std::to_string(first_index);
158 out += std::to_string(output_index >= stack.size() ? stack.size() : output_index);
159 out = (output_index >= stack.size() ? "auto " : "") + out;
160 return FormattedArgs{ .lhs = "", .rhs = rhs, .out = out };
161}
162
171template <typename Stack>
172inline FormattedArgs format_two_arg(const Stack& stack, size_t first_index, size_t second_index, size_t output_index)
173{
174 std::string lhs = stack[first_index].cycle_group.is_constant() ? "c" : "w";
175 std::string rhs = stack[second_index].cycle_group.is_constant() ? "c" : "w";
176 std::string out =
177 (stack[first_index].cycle_group.is_constant() && stack[second_index].cycle_group.is_constant()) ? "c" : "w";
178 lhs += std::to_string(first_index);
179 rhs += std::to_string(second_index);
180 out += std::to_string(output_index >= stack.size() ? stack.size() : output_index);
181 out = (output_index >= stack.size() ? "auto " : "") + out;
182 return FormattedArgs{ .lhs = lhs, .rhs = rhs, .out = out };
183}
184#endif
185
191template <typename T>
192concept SimpleRng = requires(T a) {
193 { a.next() } -> std::convertible_to<uint32_t>;
194};
200template <typename T>
201concept InstructionArgumentSizes = requires {
202 {
203 std::make_tuple(T::CONSTANT,
204 T::WITNESS,
205 T::CONSTANT_WITNESS,
206 T::ADD,
207 T::SUBTRACT,
208 T::MULTIPLY,
209 T::DIVIDE,
210 T::ADD_TWO,
211 T::MADD,
212 T::MULT_MADD,
213 T::MSUB_DIV,
214 T::SQR,
215 T::SQR_ADD,
216 T::SUBTRACT_WITH_CONSTRAINT,
217 T::DIVIDE_WITH_CONSTRAINTS,
218 T::SLICE,
219 T::ASSERT_ZERO,
220 T::ASSERT_NOT_ZERO)
222};
223
229template <typename T>
230concept HavocConfigConstraint = requires {
231 {
232 std::make_tuple(T::GEN_MUTATION_COUNT_LOG, T::GEN_STRUCTURAL_MUTATION_PROBABILITY)
234 T::GEN_MUTATION_COUNT_LOG <= 7;
235};
241template <typename T>
243 typename T::ArgSizes;
244 typename T::Instruction;
245 typename T::ExecutionState;
246 typename T::ExecutionHandler;
248};
249
255template <typename T>
256concept CheckableComposer = requires(T a) {
258};
259
267template <typename T, typename Composer, typename Context>
268concept PostProcessingEnabled = requires(Composer composer, Context context) {
269 { T::postProcess(&composer, context) } -> std::same_as<bool>;
270};
271
278template <typename T>
279concept InstructionWeightsEnabled = requires {
280 typename T::InstructionWeights;
281 T::InstructionWeights::_LIMIT;
282};
283
293template <typename T, typename FF>
294inline static FF mutateFieldElement(FF e, T& rng)
295 requires SimpleRng<T>
296{
297 // With a certain probability, we apply changes to the Montgomery form, rather than the plain form. This
298 // has merit, since the computation is performed in montgomery form and comparisons are often performed
299 // in it, too. Libfuzzer comparison tracing logic can then be enabled in Montgomery form
300 bool convert_to_montgomery = (rng.next() & 1);
301 uint256_t value_data;
302 // Conversion at the start
303#define MONT_CONVERSION_LOCAL \
304 if (convert_to_montgomery) { \
305 value_data = uint256_t(e.to_montgomery_form()); \
306 } else { \
307 value_data = uint256_t(e); \
308 }
309 // Inverse conversion at the end
310#define INV_MONT_CONVERSION_LOCAL \
311 if (convert_to_montgomery) { \
312 e = FF(value_data).from_montgomery_form(); \
313 } else { \
314 e = FF(value_data); \
315 }
316
317 // Pick the last value from the mutation distribution vector
318 // Choose mutation
319 const size_t choice = rng.next() % 4;
320 // 50% probability to use standard mutation
321 if (choice < 2) {
322 // Delegate mutation to libfuzzer (bit/byte mutations, autodictionary, etc)
324 // NOLINTNEXTLINE(cppcoreguidelines-pro-type-reinterpret-cast)
325 LLVMFuzzerMutate(reinterpret_cast<uint8_t*>(&value_data), sizeof(uint256_t), sizeof(uint256_t));
327 } else if (choice < 3) { // 25% to use small additions
328
329 // Small addition/subtraction
330 if (convert_to_montgomery) {
331 e = e.to_montgomery_form();
332 }
333 if (rng.next() & 1) {
334 e += FF(rng.next() & 0xff);
335 } else {
336 e -= FF(rng.next() & 0xff);
337 }
338 if (convert_to_montgomery) {
339 e = e.from_montgomery_form();
340 }
341 } else { // 25% to use special values
342
343 // Substitute field element with a special value
344 switch (rng.next() % 8) {
345 case 0:
346 e = FF::zero();
347 break;
348 case 1:
349 e = FF::one();
350 break;
351 case 2:
352 e = -FF::one();
353 break;
354 case 3:
355 e = FF::one().sqrt().second;
356 break;
357 case 4:
358 e = FF::one().sqrt().second.invert();
359 break;
360 case 5:
362 break;
363 case 6:
364 e = FF(2);
365 break;
366 case 7:
367 e = FF((FF::modulus - 1) / 2);
368 break;
369 default:
370 abort();
371 break;
372 }
373 if (convert_to_montgomery) {
374 e = e.from_montgomery_form();
375 }
376 }
377 // Return instruction
378 return e;
379}
380
386template <typename T>
389 private:
397 {
398 const size_t instructions_count = instructions.size();
399 if (instructions_count <= 2) {
400 return;
401 }
402 const size_t first_element_index = rng.next() % instructions_count;
403 size_t second_element_index = rng.next() % instructions_count;
404 if (first_element_index == second_element_index) {
405 second_element_index = (second_element_index + 1) % instructions_count;
406 }
407 std::iter_swap(instructions.begin() + static_cast<int>(first_element_index),
408 instructions.begin() + static_cast<int>(second_element_index));
409 }
410
419 FastRandom& rng,
420 HavocSettings& havoc_settings)
421 {
422
423 const size_t instructions_count = instructions.size();
424 if (instructions_count == 0) {
425 return;
426 }
427 if ((rng.next() & 1) != 0U) {
428 instructions.erase(instructions.begin() + (rng.next() % instructions_count));
429 } else {
430 // We get the logarithm of number of instructions and subtract 1 to delete at most half
431 const size_t max_deletion_log =
432 std::min(static_cast<size_t>(64 - __builtin_clzll(static_cast<uint64_t>(instructions_count)) - 1),
433 havoc_settings.ST_MUT_MAXIMUM_DELETION_LOG);
434
435 if (max_deletion_log == 0) {
436 return;
437 }
438 const size_t deletion_size = 1 << (rng.next() % max_deletion_log);
439 const size_t start = rng.next() % (instructions_count + 1 - deletion_size);
440 instructions.erase(instructions.begin() + static_cast<int>(start),
441 instructions.begin() + static_cast<int>(start + deletion_size));
442 }
443 }
452 FastRandom& rng,
453 HavocSettings& havoc_settings)
454 {
455 const size_t instructions_count = instructions.size();
456 if (instructions_count == 0) {
457 return;
458 }
459 const size_t duplication_size = 1 << (rng.next() % havoc_settings.ST_MUT_MAXIMUM_DUPLICATION_LOG);
460 typename T::Instruction chosen_instruction = instructions[rng.next() % instructions_count];
461 instructions.insert(
462 instructions.begin() + (rng.next() % (instructions_count + 1)), duplication_size, chosen_instruction);
463 }
465 FastRandom& rng,
466 HavocSettings& havoc_settings)
467 {
468 (void)havoc_settings;
469 instructions.insert(instructions.begin() + static_cast<int>(rng.next() % (instructions.size() + 1)),
470 T::Instruction::template generateRandom<FastRandom>(rng));
471 }
480 FastRandom& rng,
481 HavocSettings& havoc_settings)
482 {
483 const size_t structural_mutators_count = havoc_settings.structural_mutation_distribution.size();
484 const size_t prob_pool = havoc_settings.structural_mutation_distribution[structural_mutators_count - 1];
485 const size_t choice = rng.next() % prob_pool;
486 if (choice < havoc_settings.structural_mutation_distribution[0]) {
487 deleteInstructions(instructions, rng, havoc_settings);
488 } else if (choice < havoc_settings.structural_mutation_distribution[1]) {
489
490 duplicateInstruction(instructions, rng, havoc_settings);
491 } else if (choice < havoc_settings.structural_mutation_distribution[2]) {
492 insertRandomInstruction(instructions, rng, havoc_settings);
493 } else {
494
495 swapTwoInstructions(instructions, rng);
496 }
497 }
506 FastRandom& rng,
507 HavocSettings& havoc_settings)
508 {
509
510 const size_t instructions_count = instructions.size();
511 if (instructions_count == 0) {
512 return;
513 }
514 const size_t chosen = rng.next() % instructions_count;
515 instructions[chosen] =
516 T::Instruction::template mutateInstruction<FastRandom>(instructions[chosen], rng, havoc_settings);
517 }
518
520 {
521#ifdef HAVOC_TESTING
522 // If we are testing which havoc settings are best, then we use global parameters
523 const size_t mutation_count = 1 << fuzzer_havoc_settings.GEN_MUTATION_COUNT_LOG;
524#else
525 const size_t mutation_count = 1 << T::HavocConfig::MUTATION_COUNT_LOG;
526 HavocSettings fuzzer_havoc_settings;
527 // FILL the values
528#endif
529 for (size_t i = 0; i < mutation_count; i++) {
530 uint32_t val = rng.next();
531 if ((val % (fuzzer_havoc_settings.GEN_STRUCTURAL_MUTATION_PROBABILITY +
532 fuzzer_havoc_settings.GEN_VALUE_MUTATION_PROBABILITY)) <
533 fuzzer_havoc_settings.GEN_STRUCTURAL_MUTATION_PROBABILITY) {
534 // mutate structure
535 mutateInstructionStructure(instructions, rng, fuzzer_havoc_settings);
536 } else {
537 // mutate a single instruction vector
538
539 mutateInstructionValue(instructions, rng, fuzzer_havoc_settings);
540 }
541 }
542 }
543
544 public:
556 FastRandom& rng)
557 {
558 // Get vector sizes
559 const size_t vecA_size = vecA.size();
560 const size_t vecB_size = vecB.size();
561 // If one of them is empty, just return the other one
562 if (vecA_size == 0) {
563 return vecB;
564 }
565 if (vecB_size == 0) {
566 return vecA;
567 }
569 // Choose the size of th resulting vector
570 const size_t final_result_size = rng.next() % (vecA_size + vecB_size) + 1;
571 size_t indexA = 0;
572 size_t indexB = 0;
573 size_t* inIndex = &indexA;
574 size_t inSize = vecA_size;
575 auto inIterator = vecA.begin();
576 size_t current_result_size = 0;
577 bool currentlyUsingA = true;
578 // What we do is basically pick a sequence from one, follow with a sequence from the other
579 while (current_result_size < final_result_size && (indexA < vecA_size || indexB < vecB_size)) {
580 // Get the size left
581 size_t result_size_left = final_result_size - current_result_size;
582 // If we can still read from this vector
583 if (*inIndex < inSize) {
584 // Get the size left in this vector and in the output vector and pick the lowest
585 size_t inSizeLeft = inSize - *inIndex;
586 size_t maxExtraSize = std::min(result_size_left, inSizeLeft);
587 if (maxExtraSize != 0) {
588 // If not zero, get a random number of elements from input
589 size_t copySize = (rng.next() % maxExtraSize) + 1;
590 result.insert(result.begin() + static_cast<long>(current_result_size),
591 inIterator + static_cast<long>((*inIndex)),
592
593 inIterator + static_cast<long>((*inIndex) + copySize));
594 // Update indexes and sizes
595 *inIndex += copySize;
596 current_result_size += copySize;
597 }
598 }
599 // Switch input vector
600 inIndex = currentlyUsingA ? &indexB : &indexA;
601 inSize = currentlyUsingA ? vecB_size : vecA_size;
602 inIterator = currentlyUsingA ? vecB.begin() : vecA.begin();
603 currentlyUsingA = !currentlyUsingA;
604 }
605 // Return spliced vector
606 return result;
607 }
616 {
617 std::vector<typename T::Instruction> fuzzingInstructions;
618 // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
619 auto* pData = const_cast<uint8_t*>(Data);
620 size_t size_left = Size;
621 while (size_left != 0) {
622 uint8_t chosen_operation = *pData;
623 size_left -= 1;
624 pData++;
625 // If the opcode is enabled (exists and arguments' size is not -1), check if it's the right opcode. If it
626 // is, parse it with a designated function
627#define PARSE_OPCODE(name) \
628 if constexpr (requires { T::ArgSizes::name; }) \
629 if constexpr (T::ArgSizes::name != size_t(-1)) { \
630 if (chosen_operation == T::Instruction::OPCODE::name) { \
631 if (size_left < T::ArgSizes::name) { \
632 return fuzzingInstructions; \
633 } \
634 fuzzingInstructions.push_back( \
635 T::Parser::template parseInstructionArgs<T::Instruction::OPCODE::name>(pData)); \
636 size_left -= T::ArgSizes::name; \
637 pData += T::ArgSizes::name; \
638 continue; \
639 } \
640 }
641 // Create handlers for all opcodes that are in ArgsSizes
642#define PARSE_ALL_OPCODES(...) FOR_EACH(PARSE_OPCODE, __VA_ARGS__)
643
645 }
646 return fuzzingInstructions;
647 }
657 uint8_t* Data,
658 size_t MaxSize)
659 {
660 uint8_t* pData = Data;
661 size_t size_left = MaxSize;
662 for (auto& instruction : instructions) {
663 // If the opcode is enabled and it's this opcode, use a designated function to serialize it
664#define WRITE_OPCODE_IF(name) \
665 if constexpr (requires { T::ArgSizes::name; }) \
666 if constexpr (T::ArgSizes::name != (size_t)-1) { \
667 if (instruction.id == T::Instruction::OPCODE::name) { \
668 if (size_left >= (T::ArgSizes::name + 1)) { \
669 T::Parser::template writeInstruction<T::Instruction::OPCODE::name>(instruction, pData); \
670 size_left -= (T::ArgSizes::name + 1); \
671 pData += (T::ArgSizes::name + 1); \
672 } else { \
673 return MaxSize - size_left; \
674 } \
675 continue; \
676 } \
677 }
678 // Create handlers for all opcodes that are in ArgsSizes
679#define WRITE_ALL_OPCODES(...) FOR_EACH(WRITE_OPCODE_IF, __VA_ARGS__)
680
682 }
683 return MaxSize - size_left;
684 }
685
692 template <typename Composer>
693 // TODO(@Rumata888)(from Zac: maybe try to fix? not comfortable refactoring this myself. Issue #1807)
694 // NOLINTNEXTLINE(readability-function-size, google-readability-function-size)
697 {
698 typename T::ExecutionState state;
699 Composer composer = Composer();
700 // This is a global variable, so that the execution handling class could alter it and signal to the input tester
701 circuit_should_fail = false;
702 size_t total_instruction_weight = 0;
703 (void)total_instruction_weight;
704 for (auto& instruction : instructions) {
705 // If instruction enabled and this is it, delegate to the handler
706#define EXECUTE_OPCODE_IF(name) \
707 if constexpr (requires { T::ArgSizes::name; }) \
708 if constexpr (T::ArgSizes::name != size_t(-1)) { \
709 if (instruction.id == T::Instruction::OPCODE::name) { \
710 if constexpr (InstructionWeightsEnabled<T>) { \
711 if (!((total_instruction_weight + T::InstructionWeights::name) > T::InstructionWeights::_LIMIT)) { \
712 total_instruction_weight += T::InstructionWeights::name; \
713 if (T::ExecutionHandler::execute_##name(&composer, state, instruction)) { \
714 return; \
715 } \
716 } else { \
717 return; \
718 } \
719 } else { \
720 \
721 if (T::ExecutionHandler::execute_##name(&composer, state, instruction)) { \
722 return; \
723 } \
724 } \
725 } \
726 }
727#define EXECUTE_ALL_OPCODES(...) FOR_EACH(EXECUTE_OPCODE_IF, __VA_ARGS__)
728
730 }
731 bool final_value_check = true;
732 // If there is a posprocessing function, use it
734 final_value_check = T::postProcess(&composer, state);
735
736#ifdef FUZZING_SHOW_INFORMATION
737 if (!final_value_check) {
738 std::cerr << "Final value check failed" << std::endl;
739 }
740#endif
741 }
742 bool check_result = bb::CircuitChecker::check(composer) && final_value_check;
743#ifndef FUZZING_DISABLE_WARNINGS
745 info("circuit should fail");
746 }
747#endif
748 // If the circuit is correct, but it should fail, abort
749 if (check_result && circuit_should_fail) {
750 abort();
751 }
752 // If the circuit is incorrect, but there's no reason, abort
753 if ((!check_result) && (!circuit_should_fail)) {
754 if (!final_value_check) {
755 std::cerr << "Final value check failed" << std::endl;
756 } else {
757 std::cerr << "Circuit failed" << std::endl;
758 }
759
760 abort();
761 }
762 }
763
772 static size_t MutateInstructionBuffer(uint8_t* Data, size_t Size, size_t MaxSize, FastRandom& rng)
773 {
774 // Parse the vector
776 // Mutate the vector of instructions
777 mutateInstructionVector(instructions, rng);
778 // Serialize the vector of instructions back to buffer
779 return writeInstructionsToBuffer(instructions, Data, MaxSize);
780 }
781};
782
783template <template <typename> class Fuzzer, typename Composer>
784constexpr void RunWithBuilder(const uint8_t* Data, const size_t Size, FastRandom& VarianceRNG)
785{
787 auto instructions = ArithmeticFuzzHelper<Fuzzer<Composer>>::parseDataIntoInstructions(Data, Size);
788 ArithmeticFuzzHelper<Fuzzer<Composer>>::template executeInstructions<Composer>(instructions);
789}
790
791template <template <typename> class Fuzzer, uint64_t Composers>
792constexpr void RunWithBuilders(const uint8_t* Data, const size_t Size, FastRandom& VarianceRNG)
793{
794 RunWithBuilder<Fuzzer, bb::UltraCircuitBuilder>(Data, Size, VarianceRNG);
795}
796
797// NOLINTEND(cppcoreguidelines-macro-usage, google-runtime-int)
#define BB_ASSERT_LTE(left, right,...)
Definition assert.hpp:152
bb::field< bb::Bn254FrParams > FF
Definition field.cpp:22
FastRandom VarianceRNG(0)
bool circuit_should_fail
A templated class containing most of the fuzzing logic for a generic Arithmetic class.
Definition fuzzer.hpp:388
static void mutateInstructionVector(std::vector< typename T::Instruction > &instructions, FastRandom &rng)
Definition fuzzer.hpp:519
static size_t writeInstructionsToBuffer(std::vector< typename T::Instruction > &instructions, uint8_t *Data, size_t MaxSize)
Write instructions into the buffer until there are no instructions left or there is no more space.
Definition fuzzer.hpp:656
static void duplicateInstruction(std::vector< typename T::Instruction > &instructions, FastRandom &rng, HavocSettings &havoc_settings)
Mutator duplicating an instruction.
Definition fuzzer.hpp:451
static std::vector< typename T::Instruction > parseDataIntoInstructions(const uint8_t *Data, size_t Size)
Parses a given data buffer into a vector of instructions for testing the arithmetic.
Definition fuzzer.hpp:615
static void swapTwoInstructions(std::vector< typename T::Instruction > &instructions, FastRandom &rng)
Mutator swapping two instructions together.
Definition fuzzer.hpp:396
static size_t MutateInstructionBuffer(uint8_t *Data, size_t Size, size_t MaxSize, FastRandom &rng)
Interpret the data buffer as a series of arithmetic instructions and mutate it accordingly.
Definition fuzzer.hpp:772
static std::vector< typename T::Instruction > crossoverInstructionVector(const std::vector< typename T::Instruction > &vecA, const std::vector< typename T::Instruction > &vecB, FastRandom &rng)
Splice two instruction vectors into one randomly.
Definition fuzzer.hpp:553
static void deleteInstructions(std::vector< typename T::Instruction > &instructions, FastRandom &rng, HavocSettings &havoc_settings)
Mutator, deleting a sequence of instructions.
Definition fuzzer.hpp:418
static void executeInstructions(std::vector< typename T::Instruction > &instructions)
Execute instructions in a loop.
Definition fuzzer.hpp:695
static void insertRandomInstruction(std::vector< typename T::Instruction > &instructions, FastRandom &rng, HavocSettings &havoc_settings)
Definition fuzzer.hpp:464
static void mutateInstructionValue(std::vector< typename T::Instruction > &instructions, FastRandom &rng, HavocSettings &havoc_settings)
Choose a random instruction from the vector and mutate it.
Definition fuzzer.hpp:505
static void mutateInstructionStructure(std::vector< typename T::Instruction > &instructions, FastRandom &rng, HavocSettings &havoc_settings)
Mutator for instruction structure.
Definition fuzzer.hpp:479
Class for quickly deterministically creating new random values. We don't care about distribution much...
Definition fuzzer.hpp:63
FastRandom(uint32_t seed)
Definition fuzzer.hpp:67
uint32_t state
Definition fuzzer.hpp:64
void reseed(uint32_t seed)
Definition fuzzer.hpp:75
uint32_t next()
Definition fuzzer.hpp:68
static bool check(const Builder &circuit)
Check the witness satisifies the circuit.
void info(Args... args)
Definition log.hpp:75
Concept specifying the class used by the fuzzer.
Definition fuzzer.hpp:242
Fuzzer uses only composers with check_circuit function.
Definition fuzzer.hpp:256
Concept for Havoc Configurations.
Definition fuzzer.hpp:230
Concept for forcing ArgumentSizes to be size_t.
Definition fuzzer.hpp:201
This concept is used when we want to limit the number of executions of certain instructions (for exam...
Definition fuzzer.hpp:279
The fuzzer can use a postprocessing function that is specific to the type being fuzzed.
Definition fuzzer.hpp:268
Concept for a simple PRNG which returns a uint32_t when next is called.
Definition fuzzer.hpp:192
const std::vector< MemoryValue > data
StrictMock< MockContext > context
FF a
#define INV_MONT_CONVERSION_LOCAL
#define WRITE_ALL_OPCODES(...)
uint256_t read_uint256(const uint8_t *data, size_t buffer_size=32)
Definition fuzzer.hpp:106
size_t LLVMFuzzerMutate(uint8_t *Data, size_t Size, size_t MaxSize)
void debug_log(Args &&... args)
Compile-time debug logging helper.
Definition fuzzer.hpp:127
constexpr void RunWithBuilder(const uint8_t *Data, const size_t Size, FastRandom &VarianceRNG)
Definition fuzzer.hpp:784
#define EXECUTE_ALL_OPCODES(...)
#define PARSE_ALL_OPCODES(...)
#define ALL_POSSIBLE_OPCODES
Definition fuzzer.hpp:20
constexpr bool SHOW_FUZZING_INFO
Definition fuzzer.hpp:123
#define MONT_CONVERSION_LOCAL
constexpr void RunWithBuilders(const uint8_t *Data, const size_t Size, FastRandom &VarianceRNG)
Definition fuzzer.hpp:792
Instruction instruction
constexpr decltype(auto) get(::tuplet::tuple< T... > &&t) noexcept
Definition tuple.hpp:13
std::string to_string(bb::avm2::ValueTag tag)
size_t GEN_VALUE_MUTATION_PROBABILITY
Definition fuzzer.hpp:33
size_t ST_MUT_MAXIMUM_DELETION_LOG
Definition fuzzer.hpp:37
size_t GEN_MUTATION_COUNT_LOG
Definition fuzzer.hpp:29
size_t ST_MUT_MAXIMUM_DUPLICATION_LOG
Definition fuzzer.hpp:38
size_t VAL_MUT_LLVM_MUTATE_PROBABILITY
Definition fuzzer.hpp:40
size_t ST_MUT_DELETION_PROBABILITY
Definition fuzzer.hpp:34
size_t VAL_MUT_SMALL_ADDITION_PROBABILITY
Definition fuzzer.hpp:45
size_t ST_MUT_DUPLICATION_PROBABILITY
Definition fuzzer.hpp:35
size_t VAL_MUT_SPECIAL_VALUE_PROBABILITY
Definition fuzzer.hpp:46
std::vector< size_t > value_mutation_distribution
Definition fuzzer.hpp:49
size_t GEN_STRUCTURAL_MUTATION_PROBABILITY
Definition fuzzer.hpp:31
size_t VAL_MUT_MONTGOMERY_PROBABILITY
Definition fuzzer.hpp:41
size_t VAL_MUT_NON_MONTGOMERY_PROBABILITY
Definition fuzzer.hpp:43
size_t ST_MUT_SWAP_PROBABILITY
Definition fuzzer.hpp:39
size_t GEN_LLVM_POST_MUTATION_PROB
Definition fuzzer.hpp:28
std::vector< size_t > structural_mutation_distribution
Definition fuzzer.hpp:47
size_t ST_MUT_INSERTION_PROBABILITY
Definition fuzzer.hpp:36
static constexpr field get_root_of_unity(size_t subgroup_size) noexcept
static constexpr uint256_t modulus
constexpr std::pair< bool, field > sqrt() const noexcept
Compute square root of the field element.