Barretenberg
The ZK-SNARK library at the core of Aztec
Loading...
Searching...
No Matches
cycle_group.fuzzer.hpp
Go to the documentation of this file.
1// === AUDIT STATUS ===
2// internal: { status: not started, auditors: [], date: YYYY-MM-DD }
3// external_1: { status: not started, auditors: [], date: YYYY-MM-DD }
4// external_2: { status: not started, auditors: [], date: YYYY-MM-DD }
5// =====================
6
35#pragma once
40#pragma clang diagnostic push
41
42// -Wc99-designator prevents us from using designators and nested designators
43// in struct intializations
44// such as {.in.first = a, .out = b}, since it's not a part of c++17 standard
45// However the use of them in this particular file heavily increases
46// the readability and conciseness of the CycleGroupBase::Instruction initializations
47#pragma clang diagnostic ignored "-Wc99-designator"
48
49#define HAVOC_TESTING
50
51// This is a global variable, so that the execution handling class could alter it and signal to the input tester
52// that the input should fail
54
56
57// #define FUZZING_SHOW_INFORMATION
58
59// #define DISABLE_MULTIPLICATION
60// #define DISABLE_BATCH_MUL
62
63constexpr size_t MINIMUM_MUL_ELEMENTS = 0;
64constexpr size_t MAXIMUM_MUL_ELEMENTS = 8;
65
66// This is an external function in Libfuzzer used internally by custom mutators
67extern "C" size_t LLVMFuzzerMutate(uint8_t* Data, size_t Size, size_t MaxSize);
68
76enum class SpecialScalarValue : uint8_t {
77 One = 0,
81 RootOfUnity13, // 13th root of unity (arbitrary small root)
82 Two, // Small even number
83 HalfModulus, // (p-1)/2
84 Zero,
85 COUNT // Sentinel value for total count
86};
87
88// Number of special values excluding Zero
89constexpr uint8_t SPECIAL_VALUE_COUNT_NO_ZERO = static_cast<uint8_t>(SpecialScalarValue::Zero);
90
91// Number of special values including Zero
92constexpr uint8_t SPECIAL_VALUE_COUNT = static_cast<uint8_t>(SpecialScalarValue::COUNT);
93
101{
102 switch (type) {
104 return FF::one();
106 return -FF::one();
108 return FF::one().sqrt().second;
110 return FF::one().sqrt().second.invert();
112 return FF::get_root_of_unity(13);
114 return FF(2);
116 return FF((FF::modulus - 1) / 2);
118 return FF::zero();
120 // Fallthrough to abort - COUNT should never be used as a value
121 default:
122 abort(); // Invalid enum value
123 }
124}
125
129template <typename Builder> class CycleGroupBase {
130 private:
138 using GroupElement = typename Curve::Element;
141 using BaseField = typename Curve::BaseField;
142
143 public:
145 public:
146 enum OPCODE {
157#ifndef DISABLE_MULTIPLICATION
159#endif
160#ifndef DISABLE_BATCH_MUL
162#endif
164 _LAST
165 };
166
167 struct Element {
168 Element(ScalarField s = ScalarField::one(), GroupElement g = GroupElement::one())
169 : scalar(s)
170 , value(g) {};
173 };
174
175 struct TwoArgs {
176 uint8_t in;
177 uint8_t out;
178 };
179
180 struct MulArgs {
181 uint8_t in;
182 uint8_t out;
184 };
185
186 struct ThreeArgs {
187 uint8_t in1;
188 uint8_t in2;
189 uint8_t out;
190 };
191
192 struct FourArgs {
193 uint8_t in1;
194 uint8_t in2;
195 uint8_t in3;
196 uint8_t out;
197 };
198
205
217
218 // The type of the instruction
220
221 // Instruction arguments
223
231 template <typename T>
232 inline static Instruction generateRandom(T& rng)
233 requires SimpleRng<T>
234 {
235 OPCODE instruction_opcode = static_cast<OPCODE>(rng.next() % (OPCODE::_LAST));
236 uint8_t in, in1, in2, in3, out;
237 Instruction instr;
238
239 switch (instruction_opcode) {
240 case OPCODE::CONSTANT:
241 case OPCODE::WITNESS:
243 auto scalar = ScalarField(static_cast<uint64_t>(fast_log_distributed_uint256(rng)));
244 auto el = GroupElement::one() * scalar;
245 return { .id = instruction_opcode, .arguments.element = Element(scalar, el) };
246 }
247 case OPCODE::DBL:
248 case OPCODE::NEG:
250 case OPCODE::SET:
251 in = static_cast<uint8_t>(rng.next() & 0xff);
252 out = static_cast<uint8_t>(rng.next() & 0xff);
253 return { .id = instruction_opcode, .arguments.twoArgs = { .in = in, .out = out } };
254 case OPCODE::ADD:
255 case OPCODE::SUBTRACT:
256 in1 = static_cast<uint8_t>(rng.next() & 0xff);
257 in2 = static_cast<uint8_t>(rng.next() & 0xff);
258 out = static_cast<uint8_t>(rng.next() & 0xff);
259 return { .id = instruction_opcode,
260 .arguments.threeArgs.in1 = in1,
261 .arguments.threeArgs.in2 = in2,
262 .arguments.threeArgs.out = out };
264 in1 = static_cast<uint8_t>(rng.next() & 0xff);
265 in2 = static_cast<uint8_t>(rng.next() & 0xff);
266 in3 = static_cast<uint8_t>(rng.next() & 0xff);
267 out = static_cast<uint8_t>(rng.next() & 0xff);
268 return { .id = instruction_opcode,
269 .arguments.fourArgs.in1 = in1,
270 .arguments.fourArgs.in2 = in2,
271 .arguments.fourArgs.in3 = in3,
272 .arguments.fourArgs.out = out };
273#ifndef DISABLE_MULTIPLICATION
274 case OPCODE::MULTIPLY:
275 in = static_cast<uint8_t>(rng.next() & 0xff);
276 out = static_cast<uint8_t>(rng.next() & 0xff);
277 return { .id = instruction_opcode,
278 .arguments.mulArgs.scalar = ScalarField(fast_log_distributed_uint256(rng)),
279 .arguments.mulArgs.in = in,
280 .arguments.mulArgs.out = out };
281#endif
282#ifndef DISABLE_BATCH_MUL
283 case OPCODE::BATCH_MUL: {
284 uint8_t mult_size0 =
286 static_cast<uint8_t>(rng.next() % ((MAXIMUM_MUL_ELEMENTS - MINIMUM_MUL_ELEMENTS) / 2));
287 uint8_t mult_size1 =
289 static_cast<uint8_t>(rng.next() % ((MAXIMUM_MUL_ELEMENTS - MINIMUM_MUL_ELEMENTS) / 2));
290 uint8_t mult_size =
291 mult_size0 +
292 mult_size1; // Sample the amount of batch mul participants from the binomial distribution
293 instr.id = instruction_opcode;
294 instr.arguments.batchMulArgs.add_elements_count = mult_size;
295 for (size_t i = 0; i < mult_size; i++) {
296 instr.arguments.batchMulArgs.inputs[i] = static_cast<uint8_t>(rng.next() & 0xff);
297 }
298 for (size_t i = 0; i < mult_size; i++) {
299 instr.arguments.batchMulArgs.scalars[i] = ScalarField(fast_log_distributed_uint256(rng));
300 }
301 instr.arguments.batchMulArgs.output_index = static_cast<uint8_t>(rng.next() & 0xff);
302 return instr;
303 }
304#endif
306 return { .id = instruction_opcode, .arguments.randomseed = rng.next() * rng.next() };
307
308 default:
309 abort(); // We missed some instructions in switch
310 }
311 }
312
316 template <typename FF> inline static uint256_t to_uint256_montgomery(const FF& value, bool as_montgomery)
317 {
318 if (as_montgomery) {
320 }
321 return uint256_t(value);
322 }
323
327 template <typename FF> inline static FF from_uint256_montgomery(const uint256_t& data, bool from_montgomery)
328 {
329 if (from_montgomery) {
330 return FF(data).from_montgomery_form();
331 }
332 return FF(data);
333 }
334
344 template <typename T>
345 inline static Element mutateGroupElement(Element e, T& rng, HavocSettings& havoc_config)
346 requires SimpleRng<T>
347 {
348 // We can't just randomely modify a point on a curve
349 // But we can modify it's scalar
350 // With a certain probability, we apply changes to the Montgomery form, rather than the plain form. This
351 // has merit, since the computation is performed in montgomery form and comparisons are often performed
352 // in it, too.
353 // By the same logic we can switch between Jacobian and Affine coordinates.
354 // Libfuzzer comparison tracing logic can then be enabled in Montgomery form
355 bool convert_to_montgomery = (rng.next() % (havoc_config.VAL_MUT_MONTGOMERY_PROBABILITY +
356 havoc_config.VAL_MUT_NON_MONTGOMERY_PROBABILITY)) <
357 havoc_config.VAL_MUT_MONTGOMERY_PROBABILITY;
358 bool normalize = (rng.next() % (havoc_config.VAL_MUT_MONTGOMERY_PROBABILITY +
359 havoc_config.VAL_MUT_NON_MONTGOMERY_PROBABILITY)) <
360 havoc_config.VAL_MUT_MONTGOMERY_PROBABILITY;
361 uint256_t value_data;
362
363 // Pick the last value from the mutation distrivution vector
364 const size_t mutation_type_count = havoc_config.value_mutation_distribution.size();
365 // Choose mutation
366 const size_t choice = rng.next() % havoc_config.value_mutation_distribution[mutation_type_count - 1];
367 if (choice < havoc_config.value_mutation_distribution[0]) {
368 // Delegate mutation to libfuzzer (bit/byte mutations, autodictionary, etc)
369 value_data = to_uint256_montgomery(e.scalar, convert_to_montgomery);
370 LLVMFuzzerMutate((uint8_t*)&value_data, sizeof(uint256_t), sizeof(uint256_t));
371 e.scalar = from_uint256_montgomery<ScalarField>(value_data, convert_to_montgomery);
372 e.value = GroupElement::one() * e.scalar;
373 } else if (choice < havoc_config.value_mutation_distribution[1]) {
374 // Small addition/subtraction
375 if (convert_to_montgomery) {
376 e.scalar = e.scalar.to_montgomery_form();
377 }
378 auto extra = ScalarField(rng.next() & 0xff);
379
380 // With 50% probability we add/sub a small value
381 if (rng.next() & 1) {
382 auto switch_sign = static_cast<bool>(rng.next() & 1);
383 if (!switch_sign) {
384 e.scalar += extra;
385 e.value += GroupElement::one() * extra;
386 } else {
387 e.scalar -= extra;
388 e.value -= GroupElement::one() * extra;
389 }
390 } else {
391 // otherwise we multiply by a small value
392 e.scalar *= extra;
393 e.value *= extra;
394 }
395 if (normalize) {
396 e.value = e.value.normalize();
397 }
398 if (convert_to_montgomery) {
399 e.scalar = e.scalar.from_montgomery_form();
400 }
401 } else if (choice < havoc_config.value_mutation_distribution[2]) {
402 if (convert_to_montgomery) {
403 e.scalar = e.scalar.to_montgomery_form();
404 }
405 // Substitute scalar element with a special value
406 auto special_value = static_cast<SpecialScalarValue>(rng.next() % SPECIAL_VALUE_COUNT);
407 e.scalar = get_special_scalar_value<ScalarField>(special_value);
408 if (convert_to_montgomery) {
409 e.scalar = e.scalar.to_montgomery_form();
410 }
411 e.value = GroupElement::one() * e.scalar;
412 }
413 // Return value
414 return e;
415 }
425 template <typename T>
426 inline static ScalarField mutateScalarElement(ScalarField e, T& rng, HavocSettings& havoc_config)
427 requires SimpleRng<T>
428 {
429 // With a certain probability, we apply changes to the Montgomery form, rather than the plain form. This
430 // has merit, since the computation is performed in montgomery form and comparisons are often performed
431 // in it, too.
432 // Libfuzzer comparison tracing logic can then be enabled in Montgomery form
433 bool convert_to_montgomery = (rng.next() % (havoc_config.VAL_MUT_MONTGOMERY_PROBABILITY +
434 havoc_config.VAL_MUT_NON_MONTGOMERY_PROBABILITY)) <
435 havoc_config.VAL_MUT_MONTGOMERY_PROBABILITY;
436 uint256_t value_data;
437
438 // Pick the last value from the mutation distrivution vector
439 const size_t mutation_type_count = havoc_config.value_mutation_distribution.size();
440 // Choose mutation
441 const size_t choice = rng.next() % havoc_config.value_mutation_distribution[mutation_type_count - 1];
442 if (choice < havoc_config.value_mutation_distribution[0]) {
443 // Delegate mutation to libfuzzer (bit/byte mutations, autodictionary, etc)
444 value_data = to_uint256_montgomery(e, convert_to_montgomery);
445 LLVMFuzzerMutate((uint8_t*)&value_data, sizeof(uint256_t), sizeof(uint256_t));
446 e = from_uint256_montgomery<ScalarField>(value_data, convert_to_montgomery);
447 } else if (choice < havoc_config.value_mutation_distribution[1]) {
448 // Small addition/subtraction
449 if (convert_to_montgomery) {
450 e = e.to_montgomery_form();
451 }
452 auto extra = ScalarField(rng.next() & 0xff);
453
454 // With 50% probability we add/sub a small value
455 if (rng.next() & 1) {
456 auto switch_sign = static_cast<bool>(rng.next() & 1);
457 if (!switch_sign) {
458 e += extra;
459 } else {
460 e -= extra;
461 }
462 } else {
463 // otherwise we multiply by a small value
464 e *= extra;
465 }
466 if (convert_to_montgomery) {
467 e = e.from_montgomery_form();
468 }
469 } else if (choice < havoc_config.value_mutation_distribution[2]) {
470 if (convert_to_montgomery) {
471 e = e.to_montgomery_form();
472 }
473 // Substitute scalar element with a special value excluding zero
474 // I think that zeros from mutateGroupElement are enough zeros produced
475 auto special_value = static_cast<SpecialScalarValue>(rng.next() % SPECIAL_VALUE_COUNT_NO_ZERO);
476 e = get_special_scalar_value<ScalarField>(special_value);
477 if (convert_to_montgomery) {
478 e = e.to_montgomery_form();
479 }
480 }
481 // Return value
482 return e;
483 }
493 template <typename T>
495 requires SimpleRng<T>
496 {
497#define PUT_RANDOM_BYTE_IF_LUCKY(variable) \
498 if (rng.next() & 1) { \
499 variable = rng.next() & 0xff; \
500 }
501 // Depending on instruction type...
502 switch (instruction.id) {
503 case OPCODE::CONSTANT:
504 case OPCODE::WITNESS:
506 // If it represents pushing a value on the stack with a 50% probability randomly sample a bit_range
507 // Maybe mutate the value
508 if (rng.next() & 1) {
509 instruction.arguments.element =
510 mutateGroupElement(instruction.arguments.element, rng, havoc_config);
511 }
512 break;
513
514 case OPCODE::DBL:
515 case OPCODE::NEG:
517 case OPCODE::SET:
518 PUT_RANDOM_BYTE_IF_LUCKY(instruction.arguments.twoArgs.in);
519 PUT_RANDOM_BYTE_IF_LUCKY(instruction.arguments.twoArgs.out);
520 break;
521#ifndef DISABLE_MULTIPLICATION
522 case OPCODE::MULTIPLY:
523 PUT_RANDOM_BYTE_IF_LUCKY(instruction.arguments.mulArgs.in);
524 PUT_RANDOM_BYTE_IF_LUCKY(instruction.arguments.mulArgs.out);
525 if (rng.next() & 1) {
526 instruction.arguments.mulArgs.scalar =
527 mutateScalarElement(instruction.arguments.mulArgs.scalar, rng, havoc_config);
528 }
529 break;
530#endif
531 case OPCODE::ADD:
532 case OPCODE::SUBTRACT:
533 PUT_RANDOM_BYTE_IF_LUCKY(instruction.arguments.threeArgs.in1);
534 PUT_RANDOM_BYTE_IF_LUCKY(instruction.arguments.threeArgs.in2);
535 PUT_RANDOM_BYTE_IF_LUCKY(instruction.arguments.threeArgs.out);
536 break;
538 PUT_RANDOM_BYTE_IF_LUCKY(instruction.arguments.fourArgs.in1);
539 PUT_RANDOM_BYTE_IF_LUCKY(instruction.arguments.fourArgs.in2);
540 PUT_RANDOM_BYTE_IF_LUCKY(instruction.arguments.fourArgs.in3);
541 PUT_RANDOM_BYTE_IF_LUCKY(instruction.arguments.fourArgs.out);
542 break;
543#ifndef DISABLE_BATCH_MUL
545 if (rng.next() & 1) {
546 uint8_t mult_size0 =
548 static_cast<uint8_t>(rng.next() % ((MAXIMUM_MUL_ELEMENTS - MINIMUM_MUL_ELEMENTS) / 2));
549 uint8_t mult_size1 =
551 static_cast<uint8_t>(rng.next() % ((MAXIMUM_MUL_ELEMENTS - MINIMUM_MUL_ELEMENTS) / 2));
552 uint8_t mult_size =
553 mult_size0 +
554 mult_size1; // Sample the amount of batch mul participants from the binomial distribution
555 instruction.arguments.batchMulArgs.add_elements_count = mult_size;
556 }
557 if (instruction.arguments.batchMulArgs.add_elements_count && (rng.next() & 1)) {
558 size_t mut_count =
559 static_cast<uint8_t>(rng.next() % (instruction.arguments.batchMulArgs.add_elements_count));
560 for (size_t i = 0; i < mut_count; i++) {
561 size_t ind =
562 rng.next() % static_cast<size_t>(instruction.arguments.batchMulArgs.add_elements_count);
563 PUT_RANDOM_BYTE_IF_LUCKY(instruction.arguments.batchMulArgs.inputs[ind]);
564 }
565 }
566 if (instruction.arguments.batchMulArgs.add_elements_count && (rng.next() & 1)) {
567 size_t mut_count =
568 static_cast<uint8_t>(rng.next() % (instruction.arguments.batchMulArgs.add_elements_count));
569 for (size_t i = 0; i < mut_count; i++) {
570 size_t ind =
571 rng.next() % static_cast<size_t>(instruction.arguments.batchMulArgs.add_elements_count);
572 instruction.arguments.batchMulArgs.scalars[ind] =
573 mutateScalarElement(instruction.arguments.batchMulArgs.scalars[ind], rng, havoc_config);
574 }
575 }
576 PUT_RANDOM_BYTE_IF_LUCKY(instruction.arguments.batchMulArgs.output_index);
577 break;
578#endif
580 instruction.arguments.randomseed = rng.next();
581 break;
582 default:
583 abort(); // We missed some instructions in switch
584 return instruction;
585 }
586 return instruction;
587 }
588 };
589 // We use argsizes to both specify the size of data needed to parse the instruction and to signal that the
590 // instruction is enabled (if it is -1,it's disabled )
591 class ArgSizes {
592 public:
593 static constexpr size_t CONSTANT = sizeof(typename Instruction::Element);
594 static constexpr size_t WITNESS = sizeof(typename Instruction::Element);
595 static constexpr size_t CONSTANT_WITNESS = sizeof(typename Instruction::Element);
596 static constexpr size_t DBL = 2;
597 static constexpr size_t NEG = 2;
598 static constexpr size_t ASSERT_EQUAL = 2;
599 static constexpr size_t SET = 2;
600 static constexpr size_t ADD = 3;
601 static constexpr size_t SUBTRACT = 3;
602 static constexpr size_t COND_ASSIGN = 4;
603#ifndef DISABLE_MULTIPLICATION
604 static constexpr size_t MULTIPLY = sizeof(typename Instruction::MulArgs);
605#endif
606#ifndef DISABLE_BATCH_MUL
607 static constexpr size_t BATCH_MUL = sizeof(typename Instruction::BatchMulArgs);
608#endif
609 static constexpr size_t RANDOMSEED = sizeof(uint32_t);
610 };
611
618 public:
619 static constexpr size_t SET = 0;
620 static constexpr size_t RANDOMSEED = 0;
621
622 static constexpr size_t CONSTANT = 1;
623 static constexpr size_t WITNESS = 1;
624 static constexpr size_t CONSTANT_WITNESS = 1;
625 static constexpr size_t ADD = 1;
626 static constexpr size_t SUBTRACT = 1;
627 static constexpr size_t DBL = 1;
628 static constexpr size_t NEG = 1;
629 static constexpr size_t COND_ASSIGN = 1;
630
631#ifndef DISABLE_MULTIPLICATION
632 static constexpr size_t MULTIPLY = 2;
633#endif
634 static constexpr size_t ASSERT_EQUAL = 2;
635
636#ifndef DISABLE_BATCH_MUL
637 static constexpr size_t BATCH_MUL = 4;
638#endif
639 static constexpr size_t _LIMIT = 64;
640 };
645 class Parser {
646 public:
654 template <typename Instruction::OPCODE opcode> inline static Instruction parseInstructionArgs(uint8_t* Data)
655 {
656 Instruction instr;
657 instr.id = static_cast<typename Instruction::OPCODE>(opcode);
658 switch (opcode) {
662 auto scalar = ScalarField::serialize_from_buffer(Data);
663 auto el = GroupElement::one() * scalar;
664 instr.arguments.element = typename Instruction::Element(scalar, el);
665 break;
666 }
671 instr.arguments.twoArgs = { .in = *Data, .out = *(Data + 1) };
672 break;
675 instr.arguments.threeArgs = { .in1 = *Data, .in2 = *(Data + 1), .out = *(Data + 2) };
676 break;
678 instr.arguments.fourArgs = { .in1 = *Data, .in2 = *(Data + 1), .in3 = *(Data + 2), .out = *(Data + 3) };
679 break;
680
681#ifndef DISABLE_MULTIPLICATION
683 instr.arguments.mulArgs.in = *Data;
684 instr.arguments.mulArgs.out = *(Data + 1);
685 instr.arguments.mulArgs.scalar = ScalarField::serialize_from_buffer(Data + 2);
686 break;
687#endif
688#ifndef DISABLE_BATCH_MUL
690 // In case of LLVM native instruction mutator
694 }
695 instr.arguments.batchMulArgs.output_index = *(Data + 1);
696
698 memcpy(instr.arguments.batchMulArgs.inputs, Data + 2, n);
699
700 size_t offset = n + 2;
701 for (size_t i = 0; i < n; i++) {
702 instr.arguments.batchMulArgs.scalars[i] = ScalarField::serialize_from_buffer(Data + offset);
703 offset += sizeof(ScalarField);
704 }
705 }
706#endif
708 memcpy(&instr.arguments.randomseed, Data, sizeof(uint32_t));
709 break;
710 default:
711 abort(); // We missed some instructions in switch
712 }
713 return instr;
714 }
722 template <typename Instruction::OPCODE instruction_opcode>
723 inline static void writeInstruction(Instruction& instruction, uint8_t* Data)
724 {
725 *Data = instruction.id;
726 switch (instruction_opcode) {
730 ScalarField::serialize_to_buffer(instruction.arguments.element.scalar, Data + 1);
731 break;
736 *(Data + 1) = instruction.arguments.twoArgs.in;
737 *(Data + 2) = instruction.arguments.twoArgs.out;
738 break;
741 *(Data + 1) = instruction.arguments.threeArgs.in1;
742 *(Data + 2) = instruction.arguments.threeArgs.in2;
743 *(Data + 3) = instruction.arguments.threeArgs.out;
744 break;
746 *(Data + 1) = instruction.arguments.fourArgs.in1;
747 *(Data + 2) = instruction.arguments.fourArgs.in2;
748 *(Data + 3) = instruction.arguments.fourArgs.in3;
749 *(Data + 4) = instruction.arguments.fourArgs.out;
750 break;
751#ifndef DISABLE_MULTIPLICATION
753 *(Data + 1) = instruction.arguments.mulArgs.in;
754 *(Data + 2) = instruction.arguments.mulArgs.out;
755 ScalarField::serialize_to_buffer(instruction.arguments.mulArgs.scalar, Data + 3);
756 break;
757#endif
758#ifndef DISABLE_BATCH_MUL
760 *(Data + 1) = instruction.arguments.batchMulArgs.add_elements_count;
761 *(Data + 2) = instruction.arguments.batchMulArgs.output_index;
762
763 size_t n = instruction.arguments.batchMulArgs.add_elements_count;
764
765 memcpy(Data + 3, instruction.arguments.batchMulArgs.inputs, n);
766 size_t offset = n + 3;
767 for (size_t i = 0; i < n; i++) {
768 ScalarField::serialize_to_buffer(instruction.arguments.batchMulArgs.scalars[i], Data + offset);
769 offset += sizeof(ScalarField);
770 }
771 break;
772 }
773#endif
775 memcpy(Data + 1, &instruction.arguments.randomseed, sizeof(uint32_t));
776 break;
777 default:
778 abort(); // We missed some instructions in switch
779 }
780 return;
781 };
782 };
788 private:
789 static bool_t construct_predicate(Builder* builder, const bool predicate)
790 {
791 /* The context field of a predicate can be nullptr;
792 * in that case, the function that handles the predicate
793 * will use the context of another input parameter
794 */
795 const bool predicate_is_const = static_cast<bool>(VarianceRNG.next() & 1);
796 if (predicate_is_const) {
797 const bool predicate_has_ctx = static_cast<bool>(VarianceRNG.next() % 2);
798 debug_log("bool_t(", (predicate_has_ctx ? "&builder," : "nullptr,"), (predicate ? "true)" : "false)"));
799 return bool_t(predicate_has_ctx ? builder : nullptr, predicate);
800 }
801 debug_log("bool_t(witness_t(&builder, ", (predicate ? "true));" : "false))"));
802 return bool_t(witness_t(builder, predicate));
803 }
804
806 {
807 const bool reconstruct = static_cast<bool>(VarianceRNG.next() % 2);
808 if (!reconstruct) {
809 return this->cycle_group;
810 }
811 return cycle_group_t(this->cycle_group);
812 }
813
814 public:
818
819 ExecutionHandler() = default;
821 : base_scalar(s)
822 , base(g)
823 , cycle_group(w_g)
824 {}
825
826 private:
836 const ExecutionHandler& other,
837 const ScalarField& base_scalar_res,
838 const GroupElement& base_res)
839 {
840 uint8_t dbl_path = VarianceRNG.next() % 4;
841 switch (dbl_path) {
842 case 0:
843 debug_log("left.dbl", "\n");
844 return ExecutionHandler(base_scalar_res, base_res, this->cg().dbl());
845 case 1:
846 debug_log("right.dbl", "\n");
847 return ExecutionHandler(base_scalar_res, base_res, other.cg().dbl());
848 case 2:
849 return ExecutionHandler(base_scalar_res, base_res, this->cg() + other.cg());
850 case 3:
851 return ExecutionHandler(base_scalar_res, base_res, other.cg() + this->cg());
852 }
853 return {};
854 }
855
865 const ScalarField& base_scalar_res,
866 const GroupElement& base_res)
867 {
868 uint8_t inf_path = VarianceRNG.next() % 2;
869 cycle_group_t res;
870 switch (inf_path) {
871 case 0:
872 return ExecutionHandler(base_scalar_res, base_res, this->cg() + other.cg());
873 case 1:
874 return ExecutionHandler(base_scalar_res, base_res, other.cg() + this->cg());
875 }
876 return {};
877 }
878
887 const ScalarField& base_scalar_res,
888 const GroupElement& base_res)
889 {
890 bool smth_inf = this->cycle_group.is_point_at_infinity().get_value() ||
891 other.cycle_group.is_point_at_infinity().get_value();
892 uint8_t add_option = smth_inf ? 4 + (VarianceRNG.next() % 2) : VarianceRNG.next() % 6;
893
894 switch (add_option) {
895 case 0:
896 debug_log("left.unconditional_add(right);", "\n");
897 return ExecutionHandler(base_scalar_res, base_res, this->cg().unconditional_add(other.cg()));
898 case 1:
899 debug_log("right.unconditional_add(left);", "\n");
900 return ExecutionHandler(base_scalar_res, base_res, other.cg().unconditional_add(this->cg()));
901 case 2:
902 debug_log("left.checked_unconditional_add(right);", "\n");
903 return ExecutionHandler(base_scalar_res, base_res, this->cg().checked_unconditional_add(other.cg()));
904 case 3:
905 debug_log("right.checked_unconditional_add(left);", "\n");
906 return ExecutionHandler(base_scalar_res, base_res, other.cg().checked_unconditional_add(this->cg()));
907 case 4:
908 return ExecutionHandler(base_scalar_res, base_res, this->cg() + other.cg());
909 case 5:
910 return ExecutionHandler(base_scalar_res, base_res, other.cg() + this->cg());
911 }
912 return {};
913 }
914
915 public:
917 {
918 ScalarField base_scalar_res = this->base_scalar + other.base_scalar;
919 GroupElement base_res = this->base + other.base;
920
921 // Test doubling path when points are equal
922 if (other.cg().get_value() == this->cg().get_value()) {
923 return handle_add_doubling_case(builder, other, base_scalar_res, base_res);
924 }
925
926 // Test infinity path when points are negations
927 if (other.cg().get_value() == -this->cg().get_value()) {
928 return handle_add_infinity_case(other, base_scalar_res, base_res);
929 }
930
931 // Test normal addition paths
932 return handle_add_normal_case(other, base_scalar_res, base_res);
933 }
934
935 private:
940 const ExecutionHandler& other,
941 const ScalarField& base_scalar_res,
942 const GroupElement& base_res)
943 {
944 uint8_t dbl_path = VarianceRNG.next() % 3;
945
946 switch (dbl_path) {
947 case 0:
948 debug_log("left.dbl();", "\n");
949 return ExecutionHandler(base_scalar_res, base_res, this->cg().dbl());
950 case 1:
951 debug_log("-right.dbl();", "\n");
952 return ExecutionHandler(base_scalar_res, base_res, -other.cg().dbl());
953 case 2:
954 return ExecutionHandler(base_scalar_res, base_res, this->cg() - other.cg());
955 }
956 return {};
957 }
958
963 const ScalarField& base_scalar_res,
964 const GroupElement& base_res)
965 {
966 return ExecutionHandler(base_scalar_res, base_res, this->cg() - other.cg());
967 }
968
973 const ScalarField& base_scalar_res,
974 const GroupElement& base_res)
975 {
976 bool smth_inf = this->cycle_group.is_point_at_infinity().get_value() ||
977 other.cycle_group.is_point_at_infinity().get_value();
978 uint8_t add_option = smth_inf ? 2 : VarianceRNG.next() % 3;
979
980 switch (add_option) {
981 case 0:
982 debug_log("left.unconditional_subtract(right);", "\n");
983 return ExecutionHandler(base_scalar_res, base_res, this->cg().unconditional_subtract(other.cg()));
984 case 1:
985 debug_log("left.checked_unconditional_subtract(right);", "\n");
986 return ExecutionHandler(
987 base_scalar_res, base_res, this->cg().checked_unconditional_subtract(other.cg()));
988 case 2:
989 return ExecutionHandler(base_scalar_res, base_res, this->cg() - other.cg());
990 }
991 return {};
992 }
993
994 public:
999 {
1000 ScalarField base_scalar_res = this->base_scalar - other.base_scalar;
1001 GroupElement base_res = this->base - other.base;
1002
1003 if (other.cg().get_value() == -this->cg().get_value()) {
1004 return handle_sub_doubling_case(builder, other, base_scalar_res, base_res);
1005 }
1006 if (other.cg().get_value() == this->cg().get_value()) {
1007 return handle_sub_infinity_case(other, base_scalar_res, base_res);
1008 }
1009 return handle_sub_normal_case(other, base_scalar_res, base_res);
1010 }
1011
1013 {
1014 bool is_witness = VarianceRNG.next() & 1;
1015 debug_log(" * cycle_scalar_t",
1016 (is_witness ? "::from_witness(&builder, " : "("),
1017 "ScalarField(\"",
1018 multiplier,
1019 "\"));");
1020 auto scalar = is_witness ? cycle_scalar_t::from_witness(builder, multiplier) : cycle_scalar_t(multiplier);
1021 return ExecutionHandler(this->base_scalar * multiplier, this->base * multiplier, this->cg() * scalar);
1022 }
1023
1025 const std::vector<ExecutionHandler>& to_add,
1026 const std::vector<ScalarField>& to_mul)
1027 {
1029 to_add_cg.reserve(to_add.size());
1031 to_mul_cs.reserve(to_mul.size());
1032
1033 GroupElement accumulator_cg = GroupElement::one();
1034 ScalarField accumulator_cs = ScalarField::zero();
1035
1036 for (size_t i = 0; i < to_add.size(); i++) {
1037 to_add_cg.push_back(to_add[i].cycle_group);
1038
1039 bool is_witness = VarianceRNG.next() & 1;
1040 debug_log("cycle_scalar_t",
1041 (is_witness ? "::from_witness(&builder, " : "("),
1042 "ScalarField(\"",
1043 to_mul[i],
1044 "\")), ");
1045 auto scalar = is_witness ? cycle_scalar_t::from_witness(builder, to_mul[i]) : cycle_scalar_t(to_mul[i]);
1046 to_mul_cs.push_back(scalar);
1047
1048 accumulator_cg += to_add[i].base * to_mul[i];
1049 accumulator_cs += to_add[i].base_scalar * to_mul[i];
1050 }
1051 accumulator_cg -= GroupElement::one();
1052
1053 // Handle the linearly dependant case that is
1054 // assumed to not happen in real life
1055 if (accumulator_cg.is_point_at_infinity()) {
1056 to_add_cg.push_back(cycle_group_t(GroupElement::one()));
1057 to_mul_cs.push_back(cycle_scalar_t(ScalarField::one()));
1058 accumulator_cg += GroupElement::one();
1059 accumulator_cs += ScalarField::one();
1060 }
1061
1062 auto batch_mul_res = cycle_group_t::batch_mul(to_add_cg, to_mul_cs);
1063 return ExecutionHandler(accumulator_cs, accumulator_cg, batch_mul_res);
1064 }
1065
1067 {
1068 this->base_scalar = -this->base_scalar;
1069 this->base = -this->base;
1070 this->cycle_group = -this->cycle_group;
1071 }
1072
1074 {
1075 return ExecutionHandler(this->base_scalar + this->base_scalar, this->base.dbl(), this->cg().dbl());
1076 }
1077
1079 {
1080 ScalarField new_base_scalar = predicate ? other.base_scalar : this->base_scalar;
1081 GroupElement new_base = predicate ? other.base : this->base;
1082 cycle_group_t new_cycle_group =
1083 cycle_group_t::conditional_assign(construct_predicate(builder, predicate), other.cg(), this->cg());
1084 return ExecutionHandler(new_base_scalar, new_base, new_cycle_group);
1085 }
1086
1088 {
1089 if (other.cg().is_constant()) {
1090 if (this->cg().is_constant()) {
1091 // Assert equal does nothing in this case
1092 return;
1093 }
1094 }
1095 auto to_add = cycle_group_t::from_witness(builder, AffineElement(this->base - other.base));
1096 auto to_ae = other.cg() + to_add;
1097 this->cg().assert_equal(to_ae);
1098 }
1099
1100 /* Explicit re-instantiation using the various cycle_group_t constructors */
1102 {
1103 uint32_t switch_case = VarianceRNG.next() % 4;
1104 switch (switch_case) {
1105 case 0:
1106 debug_log("cycle_group_t(", "\n");
1107 /* construct via cycle_group_t */
1108 return ExecutionHandler(this->base_scalar, this->base, cycle_group_t(this->cycle_group));
1109 case 1: {
1110 debug_log("cycle_group_t::from",
1111 (this->cycle_group.is_constant() ? "" : "_constant"),
1112 "_witness(&builder, e.get_value());");
1113 /* construct via AffineElement */
1114 AffineElement e = this->cycle_group.get_value();
1115 if (this->cycle_group.is_constant()) {
1116 return ExecutionHandler(
1117 this->base_scalar, this->base, cycle_group_t::from_constant_witness(builder, e));
1118 }
1119 return ExecutionHandler(this->base_scalar, this->base, cycle_group_t::from_witness(builder, e));
1120 }
1121 case 2: {
1122 debug_log("tmp = el;", "\n");
1123 debug_log("res = cycle_group_t(tmp);", "\n");
1124 /* Invoke assigment operator */
1125 cycle_group_t cg_new(builder);
1126 cg_new = this->cg();
1127 return ExecutionHandler(this->base_scalar, this->base, cycle_group_t(cg_new));
1128 }
1129 case 3: {
1130 debug_log("tmp = el;", "\n");
1131 debug_log("res = cycle_group_t(std::move(tmp));", "\n");
1132 /* Invoke move constructor */
1133 cycle_group_t cg_copy = this->cg();
1134 return ExecutionHandler(this->base_scalar, this->base, cycle_group_t(std::move(cg_copy)));
1135 }
1136 default:
1137 abort();
1138 }
1139 }
1148 static inline size_t execute_CONSTANT(Builder* builder,
1151 {
1152 (void)builder;
1153 stack.push_back(
1154 ExecutionHandler(instruction.arguments.element.scalar,
1155 instruction.arguments.element.value,
1156 cycle_group_t(static_cast<AffineElement>(instruction.arguments.element.value))));
1157 debug_log(
1158 "auto c", stack.size() - 1, " = cycle_group_t(ae(\"", instruction.arguments.element.scalar, "\"));\n");
1159 return 0;
1160 };
1161
1170 static inline size_t execute_WITNESS(Builder* builder,
1173 {
1174 stack.push_back(ExecutionHandler(
1175 instruction.arguments.element.scalar,
1176 instruction.arguments.element.value,
1177 cycle_group_t::from_witness(builder, static_cast<AffineElement>(instruction.arguments.element.value))));
1178 debug_log("auto w",
1179 stack.size() - 1,
1180 " = cycle_group_t::from_witness(&builder, ae(\"",
1181 instruction.arguments.element.scalar,
1182 "\"));\n");
1183 return 0;
1184 }
1185
1198 {
1199 stack.push_back(
1200 ExecutionHandler(instruction.arguments.element.scalar,
1201 instruction.arguments.element.value,
1203 builder, static_cast<AffineElement>(instruction.arguments.element.value))));
1204 debug_log("auto cw",
1205 stack.size() - 1,
1206 " = cycle_group_t::from_constant_witness(&builder, ae(\"",
1207 instruction.arguments.element.scalar,
1208 "\"));\n");
1209 return 0;
1210 }
1211
1220 static inline size_t execute_DBL(Builder* builder,
1223 {
1224 (void)builder;
1225 if (stack.size() == 0) {
1226 return 1;
1227 }
1228 size_t first_index = instruction.arguments.twoArgs.in % stack.size();
1229 size_t output_index = instruction.arguments.twoArgs.out;
1230
1231 if constexpr (SHOW_FUZZING_INFO) {
1232 auto args = format_single_arg(stack, first_index, output_index);
1233 debug_log(args.out, " = ", args.rhs, ".dbl();", "\n");
1234 }
1235 ExecutionHandler result = stack[first_index].dbl();
1236 // If the output index is larger than the number of elements in stack, append
1237 if (output_index >= stack.size()) {
1238 stack.push_back(result);
1239 } else {
1240 stack[output_index] = result;
1241 }
1242 return 0;
1243 };
1244
1253 static inline size_t execute_NEG(Builder* builder,
1256 {
1257 (void)builder;
1258 if (stack.size() == 0) {
1259 return 1;
1260 }
1261 size_t first_index = instruction.arguments.twoArgs.in % stack.size();
1262 size_t output_index = instruction.arguments.twoArgs.out;
1263
1264 if constexpr (SHOW_FUZZING_INFO) {
1265 auto args = format_single_arg(stack, first_index, output_index);
1266 debug_log(args.out, " = -", args.rhs, ";", "\n");
1267 }
1268 ExecutionHandler result = -stack[first_index];
1269 // If the output index is larger than the number of elements in stack, append
1270 if (output_index >= stack.size()) {
1271 stack.push_back(result);
1272 } else {
1273 stack[output_index] = result;
1274 }
1275 return 0;
1276 };
1277
1286 static inline size_t execute_ASSERT_EQUAL(Builder* builder,
1289 {
1290 if (stack.size() == 0) {
1291 return 1;
1292 }
1293 size_t first_index = instruction.arguments.twoArgs.in % stack.size();
1294 size_t second_index = instruction.arguments.twoArgs.out % stack.size();
1295
1296 if constexpr (SHOW_FUZZING_INFO) {
1297 auto args = format_two_arg(stack, first_index, second_index, 0);
1298 debug_log("assert_equal(", args.lhs, ", ", args.rhs, ", builder);", "\n");
1299 }
1300 stack[first_index].assert_equal(builder, stack[second_index]);
1301 return 0;
1302 };
1303
1312 static inline size_t execute_SET(Builder* builder,
1315 {
1316 (void)builder;
1317 if (stack.size() == 0) {
1318 return 1;
1319 }
1320 size_t first_index = instruction.arguments.twoArgs.in % stack.size();
1321 size_t output_index = instruction.arguments.twoArgs.out;
1322
1323 ExecutionHandler result;
1324 if constexpr (SHOW_FUZZING_INFO) {
1325 auto args = format_single_arg(stack, first_index, output_index);
1326 debug_log(args.out, " = ");
1327 // Need to split logs here, since `set` produces extra logs
1328 result = stack[first_index].set(builder);
1329 debug_log(args.rhs, ");", "\n");
1330 } else {
1331 result = stack[first_index].set(builder);
1332 }
1333 // If the output index is larger than the number of elements in stack, append
1334 if (output_index >= stack.size()) {
1335 stack.push_back(result);
1336 } else {
1337 stack[output_index] = result;
1338 }
1339 return 0;
1340 };
1341
1350 static inline size_t execute_ADD(Builder* builder,
1353 {
1354 (void)builder;
1355 if (stack.size() == 0) {
1356 return 1;
1357 }
1358 size_t first_index = instruction.arguments.threeArgs.in1 % stack.size();
1359 size_t second_index = instruction.arguments.threeArgs.in2 % stack.size();
1360 size_t output_index = instruction.arguments.threeArgs.out;
1361
1362 if constexpr (SHOW_FUZZING_INFO) {
1363 auto args = format_two_arg(stack, first_index, second_index, output_index);
1364 debug_log(args.out, " = ", args.lhs, " + ", args.rhs, ";", "\n");
1365 }
1366 ExecutionHandler result = stack[first_index].operator_add(builder, stack[second_index]);
1367 // If the output index is larger than the number of elements in stack, append
1368 if (output_index >= stack.size()) {
1369 stack.push_back(result);
1370 } else {
1371 stack[output_index] = result;
1372 }
1373 return 0;
1374 };
1375
1384 static inline size_t execute_SUBTRACT(Builder* builder,
1387 {
1388 (void)builder;
1389 if (stack.size() == 0) {
1390 return 1;
1391 }
1392 size_t first_index = instruction.arguments.threeArgs.in1 % stack.size();
1393 size_t second_index = instruction.arguments.threeArgs.in2 % stack.size();
1394 size_t output_index = instruction.arguments.threeArgs.out;
1395
1396 if constexpr (SHOW_FUZZING_INFO) {
1397 auto args = format_two_arg(stack, first_index, second_index, output_index);
1398 debug_log(args.out, " = ", args.lhs, " - ", args.rhs, ";", "\n");
1399 }
1400 ExecutionHandler result = stack[first_index].operator_sub(builder, stack[second_index]);
1401 // If the output index is larger than the number of elements in stack, append
1402 if (output_index >= stack.size()) {
1403 stack.push_back(result);
1404 } else {
1405 stack[output_index] = result;
1406 }
1407 return 0;
1408 };
1409
1418 static inline size_t execute_COND_ASSIGN(Builder* builder,
1421 {
1422 (void)builder;
1423 if (stack.size() == 0) {
1424 return 1;
1425 }
1426 size_t first_index = instruction.arguments.fourArgs.in1 % stack.size();
1427 size_t second_index = instruction.arguments.fourArgs.in2 % stack.size();
1428 size_t output_index = instruction.arguments.fourArgs.out % stack.size();
1429 bool predicate = instruction.arguments.fourArgs.in3 % 2;
1430
1431 ExecutionHandler result;
1432 if constexpr (SHOW_FUZZING_INFO) {
1433 auto args = format_two_arg(stack, first_index, second_index, output_index);
1434 debug_log(args.out, " = cycle_group_t::conditional_assign(");
1435 // Need to split logs here, since `conditional_assign` produces extra logs
1436 result = stack[first_index].conditional_assign(builder, stack[second_index], predicate);
1437 debug_log(args.rhs, ", ", args.lhs, ");", "\n");
1438 } else {
1439 result = stack[first_index].conditional_assign(builder, stack[second_index], predicate);
1440 }
1441
1442 // If the output index is larger than the number of elements in stack, append
1443 if (output_index >= stack.size()) {
1444 stack.push_back(result);
1445 } else {
1446 stack[output_index] = result;
1447 }
1448 return 0;
1449 };
1450
1459 static inline size_t execute_MULTIPLY(Builder* builder,
1462 {
1463
1464 if (stack.size() == 0) {
1465 return 1;
1466 }
1467 size_t first_index = instruction.arguments.mulArgs.in % stack.size();
1468 size_t output_index = instruction.arguments.mulArgs.out;
1469 ScalarField scalar = instruction.arguments.mulArgs.scalar;
1470
1471 if constexpr (SHOW_FUZZING_INFO) {
1472 auto args = format_single_arg(stack, first_index, output_index);
1473 debug_log(args.out, " = ", args.rhs);
1474 }
1475 ExecutionHandler result = stack[first_index].mul(builder, scalar);
1476 // If the output index is larger than the number of elements in stack, append
1477 if (output_index >= stack.size()) {
1478 stack.push_back(result);
1479 } else {
1480 stack[output_index] = result;
1481 }
1482 return 0;
1483 };
1484
1493 static inline size_t execute_BATCH_MUL(Builder* builder,
1496 {
1497 (void)builder;
1498 if (stack.size() == 0) {
1499 return 1;
1500 }
1502 std::vector<ScalarField> to_mul;
1503 for (size_t i = 0; i < instruction.arguments.batchMulArgs.add_elements_count; i++) {
1504 to_add.push_back(stack[(size_t)instruction.arguments.batchMulArgs.inputs[i] % stack.size()]);
1505 to_mul.push_back(instruction.arguments.batchMulArgs.scalars[i]);
1506 }
1507 size_t output_index = (size_t)instruction.arguments.batchMulArgs.output_index;
1508
1509 ExecutionHandler result;
1510 if constexpr (SHOW_FUZZING_INFO) {
1511 std::string res = "";
1512 bool is_const = true;
1513 for (size_t i = 0; i < instruction.arguments.batchMulArgs.add_elements_count; i++) {
1514 size_t idx = instruction.arguments.batchMulArgs.inputs[i] % stack.size();
1515 std::string el = stack[idx].cycle_group.is_constant() ? "c" : "w";
1516 el += std::to_string(idx);
1517 res += el + ", ";
1518 is_const &= stack[idx].cycle_group.is_constant();
1519 }
1520 std::string out = is_const ? "c" : "w";
1521 out = ((output_index >= stack.size()) ? "auto " : "") + out;
1522 out += std::to_string(output_index >= stack.size() ? stack.size() : output_index);
1523 debug_log(out, " = cycle_group_t::batch_mul({", res, "}, {");
1524 // Need to split logs here, since `conditional_assign` produces extra logs
1525 result = ExecutionHandler::batch_mul(builder, to_add, to_mul);
1526 debug_log("});", "\n");
1527 } else {
1528 result = ExecutionHandler::batch_mul(builder, to_add, to_mul);
1529 }
1530 // If the output index is larger than the number of elements in stack, append
1531 if (output_index >= stack.size()) {
1532 stack.push_back(result);
1533 } else {
1534 stack[output_index] = result;
1535 }
1536 return 0;
1537 };
1538
1547 static inline size_t execute_RANDOMSEED(Builder* builder,
1550 {
1551 (void)builder;
1552 (void)stack;
1553
1554 VarianceRNG.reseed(instruction.arguments.randomseed);
1555 return 0;
1556 };
1557 };
1558
1563
1574 {
1575 (void)builder;
1576 for (size_t i = 0; i < stack.size(); i++) {
1577 auto element = stack[i];
1578 if (element.cycle_group.get_value() != AffineElement(element.base)) {
1579 std::cerr << "Failed at " << i << " with actual value " << AffineElement(element.base)
1580 << " and value in CycleGroup " << element.cycle_group.get_value() << std::endl;
1581 return false;
1582 }
1583 if ((AffineElement::one() * element.base_scalar) != AffineElement(element.base)) {
1584 std::cerr << "Failed at " << i << " with actual mul value " << element.base
1585 << " and value in scalar * CG " << element.cycle_group.get_value() * element.base_scalar
1586 << std::endl;
1587 return false;
1588 }
1589 }
1590 return true;
1591 }
1592};
1593
1594#ifdef HAVOC_TESTING
1595
1596extern "C" int LLVMFuzzerInitialize(int* argc, char*** argv)
1597{
1598 (void)argc;
1599 (void)argv;
1600 // These are the settings, optimized for the safeuint class (under them, fuzzer reaches maximum expected
1601 // coverage in 40 seconds)
1602 fuzzer_havoc_settings = HavocSettings{ .GEN_LLVM_POST_MUTATION_PROB = 30, // Out of 200
1603 .GEN_MUTATION_COUNT_LOG = 5, // -Fully checked
1604 .GEN_STRUCTURAL_MUTATION_PROBABILITY = 300, // Fully checked
1605 .GEN_VALUE_MUTATION_PROBABILITY = 700, // Fully checked
1606 .ST_MUT_DELETION_PROBABILITY = 100, // Fully checked
1607 .ST_MUT_DUPLICATION_PROBABILITY = 80, // Fully checked
1608 .ST_MUT_INSERTION_PROBABILITY = 120, // Fully checked
1609 .ST_MUT_MAXIMUM_DELETION_LOG = 6, // 2 because of limit
1610 .ST_MUT_MAXIMUM_DUPLICATION_LOG = 2, // -Fully checked
1611 .ST_MUT_SWAP_PROBABILITY = 50, // Fully checked
1612 .VAL_MUT_LLVM_MUTATE_PROBABILITY = 250, // Fully checked
1613 .VAL_MUT_MONTGOMERY_PROBABILITY = 130, // Fully checked
1614 .VAL_MUT_NON_MONTGOMERY_PROBABILITY = 50, // Fully checked
1615 .VAL_MUT_SMALL_ADDITION_PROBABILITY = 110, // Fully checked
1616 .VAL_MUT_SPECIAL_VALUE_PROBABILITY = 130, // Fully checked
1617 .structural_mutation_distribution = {},
1618 .value_mutation_distribution = {} };
1624 /*
1625 std::random_device rd;
1626 std::uniform_int_distribution<uint64_t> dist(0, ~(uint64_t)(0));
1627 srandom(static_cast<unsigned int>(dist(rd)));
1628
1629 fuzzer_havoc_settings =
1630 HavocSettings{ .GEN_MUTATION_COUNT_LOG = static_cast<size_t>((random() % 8) + 1),
1631 .GEN_STRUCTURAL_MUTATION_PROBABILITY = static_cast<size_t>(random() % 100),
1632 .GEN_VALUE_MUTATION_PROBABILITY = static_cast<size_t>(random() % 100),
1633 .ST_MUT_DELETION_PROBABILITY = static_cast<size_t>(random() % 100),
1634 .ST_MUT_DUPLICATION_PROBABILITY = static_cast<size_t>(random() % 100),
1635 .ST_MUT_INSERTION_PROBABILITY = static_cast<size_t>((random() % 99) + 1),
1636 .ST_MUT_MAXIMUM_DELETION_LOG = static_cast<size_t>((random() % 8) + 1),
1637 .ST_MUT_MAXIMUM_DUPLICATION_LOG = static_cast<size_t>((random() % 8) + 1),
1638 .ST_MUT_SWAP_PROBABILITY = static_cast<size_t>(random() % 100),
1639 .VAL_MUT_LLVM_MUTATE_PROBABILITY = static_cast<size_t>(random() % 100),
1640 .VAL_MUT_MONTGOMERY_PROBABILITY = static_cast<size_t>(random() % 100),
1641 .VAL_MUT_NON_MONTGOMERY_PROBABILITY = static_cast<size_t>(random() % 100),
1642 .VAL_MUT_SMALL_ADDITION_PROBABILITY = static_cast<size_t>(random() % 100),
1643 .VAL_MUT_SPECIAL_VALUE_PROBABILITY = static_cast<size_t>(random() % 100)
1644
1645 };
1646 while (fuzzer_havoc_settings.GEN_STRUCTURAL_MUTATION_PROBABILITY == 0 &&
1647 fuzzer_havoc_settings.GEN_VALUE_MUTATION_PROBABILITY == 0) {
1648 fuzzer_havoc_settings.GEN_STRUCTURAL_MUTATION_PROBABILITY = static_cast<size_t>(random() % 8);
1649 fuzzer_havoc_settings.GEN_VALUE_MUTATION_PROBABILITY = static_cast<size_t>(random() % 8);
1650 }
1651 */
1652
1653 // fuzzer_havoc_settings.GEN_LLVM_POST_MUTATION_PROB = static_cast<size_t>(((random() % (20 - 1)) + 1) * 10);
1658 /*
1659 std::cerr << "CUSTOM MUTATOR SETTINGS:" << std::endl
1660 << "################################################################" << std::endl
1661 << "GEN_LLVM_POST_MUTATION_PROB: " << fuzzer_havoc_settings.GEN_LLVM_POST_MUTATION_PROB << std::endl
1662 << "GEN_MUTATION_COUNT_LOG: " << fuzzer_havoc_settings.GEN_MUTATION_COUNT_LOG << std::endl
1663 << "GEN_STRUCTURAL_MUTATION_PROBABILITY: " <<
1664 fuzzer_havoc_settings.GEN_STRUCTURAL_MUTATION_PROBABILITY
1665 << std::endl
1666 << "GEN_VALUE_MUTATION_PROBABILITY: " << fuzzer_havoc_settings.GEN_VALUE_MUTATION_PROBABILITY <<
1667 std::endl
1668 << "ST_MUT_DELETION_PROBABILITY: " << fuzzer_havoc_settings.ST_MUT_DELETION_PROBABILITY << std::endl
1669 << "ST_MUT_DUPLICATION_PROBABILITY: " << fuzzer_havoc_settings.ST_MUT_DUPLICATION_PROBABILITY <<
1670 std::endl
1671 << "ST_MUT_INSERTION_PROBABILITY: " << fuzzer_havoc_settings.ST_MUT_INSERTION_PROBABILITY << std::endl
1672 << "ST_MUT_MAXIMUM_DELETION_LOG: " << fuzzer_havoc_settings.ST_MUT_MAXIMUM_DELETION_LOG << std::endl
1673 << "ST_MUT_MAXIMUM_DUPLICATION_LOG: " << fuzzer_havoc_settings.ST_MUT_MAXIMUM_DUPLICATION_LOG <<
1674 std::endl
1675 << "ST_MUT_SWAP_PROBABILITY: " << fuzzer_havoc_settings.ST_MUT_SWAP_PROBABILITY << std::endl
1676 << "VAL_MUT_LLVM_MUTATE_PROBABILITY: " << fuzzer_havoc_settings.VAL_MUT_LLVM_MUTATE_PROBABILITY
1677 << std::endl
1678 << "VAL_MUT_MONTGOMERY_PROBABILITY: " << fuzzer_havoc_settings.VAL_MUT_MONTGOMERY_PROBABILITY <<
1679 std::endl
1680 << "VAL_MUT_NON_MONTGOMERY_PROBABILITY: " << fuzzer_havoc_settings.VAL_MUT_NON_MONTGOMERY_PROBABILITY
1681 << std::endl
1682 << "VAL_MUT_SMALL_ADDITION_PROBABILITY: " << fuzzer_havoc_settings.VAL_MUT_SMALL_ADDITION_PROBABILITY
1683 << std::endl
1684 << "VAL_MUT_SMALL_MULTIPLICATION_PROBABILITY: "
1685 << fuzzer_havoc_settings.VAL_MUT_SMALL_MULTIPLICATION_PROBABILITY << std::endl
1686 << "VAL_MUT_SPECIAL_VALUE_PROBABILITY: " << fuzzer_havoc_settings.VAL_MUT_SPECIAL_VALUE_PROBABILITY
1687 << std::endl;
1688 */
1689 std::vector<size_t> structural_mutation_distribution;
1690 std::vector<size_t> value_mutation_distribution;
1691 size_t temp = 0;
1692 temp += fuzzer_havoc_settings.ST_MUT_DELETION_PROBABILITY;
1693 structural_mutation_distribution.push_back(temp);
1694 temp += fuzzer_havoc_settings.ST_MUT_DUPLICATION_PROBABILITY;
1695 structural_mutation_distribution.push_back(temp);
1696 temp += fuzzer_havoc_settings.ST_MUT_INSERTION_PROBABILITY;
1697 structural_mutation_distribution.push_back(temp);
1698 temp += fuzzer_havoc_settings.ST_MUT_SWAP_PROBABILITY;
1699 structural_mutation_distribution.push_back(temp);
1700 fuzzer_havoc_settings.structural_mutation_distribution = structural_mutation_distribution;
1701
1702 temp = 0;
1703 temp += fuzzer_havoc_settings.VAL_MUT_LLVM_MUTATE_PROBABILITY;
1704 value_mutation_distribution.push_back(temp);
1705 temp += fuzzer_havoc_settings.VAL_MUT_SMALL_ADDITION_PROBABILITY;
1706 value_mutation_distribution.push_back(temp);
1707 temp += fuzzer_havoc_settings.VAL_MUT_SPECIAL_VALUE_PROBABILITY;
1708 value_mutation_distribution.push_back(temp);
1709
1710 fuzzer_havoc_settings.value_mutation_distribution = value_mutation_distribution;
1711 return 0;
1712}
1713#endif
1714
1719extern "C" size_t LLVMFuzzerTestOneInput(const uint8_t* Data, size_t Size)
1720{
1721 RunWithBuilders<CycleGroupBase, FuzzerCircuitTypes>(Data, Size, VarianceRNG);
1722 return 0;
1723}
1724
1725#pragma clang diagnostic pop
bb::field< bb::Bn254FrParams > FF
Definition field.cpp:22
static constexpr size_t CONSTANT_WITNESS
static constexpr size_t ADD
static constexpr size_t CONSTANT
static constexpr size_t MULTIPLY
static constexpr size_t COND_ASSIGN
static constexpr size_t ASSERT_EQUAL
static constexpr size_t RANDOMSEED
static constexpr size_t DBL
static constexpr size_t BATCH_MUL
static constexpr size_t SET
static constexpr size_t WITNESS
static constexpr size_t NEG
static constexpr size_t SUBTRACT
This class implements the execution of cycle group with an oracle to detect discrepancies.
static size_t execute_SUBTRACT(Builder *builder, std::vector< ExecutionHandler > &stack, Instruction &instruction)
Execute the subtraction operator instruction.
static size_t execute_COND_ASSIGN(Builder *builder, std::vector< ExecutionHandler > &stack, Instruction &instruction)
Execute the COND_ASSIGN instruction.
static size_t execute_DBL(Builder *builder, std::vector< ExecutionHandler > &stack, Instruction &instruction)
Execute the DBL instruction.
ExecutionHandler(ScalarField s, GroupElement g, cycle_group_t w_g)
ExecutionHandler handle_add_doubling_case(Builder *builder, const ExecutionHandler &other, const ScalarField &base_scalar_res, const GroupElement &base_res)
Handle addition when points are equal (requires doubling)
static bool_t construct_predicate(Builder *builder, const bool predicate)
ExecutionHandler handle_sub_normal_case(const ExecutionHandler &other, const ScalarField &base_scalar_res, const GroupElement &base_res)
Handle normal subtraction case (no special edge cases)
static size_t execute_NEG(Builder *builder, std::vector< ExecutionHandler > &stack, Instruction &instruction)
Execute the NEG instruction.
static size_t execute_WITNESS(Builder *builder, std::vector< ExecutionHandler > &stack, Instruction &instruction)
Execute the witness instruction (push witness cycle group to the stack)
ExecutionHandler handle_sub_doubling_case(Builder *builder, const ExecutionHandler &other, const ScalarField &base_scalar_res, const GroupElement &base_res)
Handle subtraction when points are negations: x - (-x) = 2x (doubling case)
static size_t execute_BATCH_MUL(Builder *builder, std::vector< ExecutionHandler > &stack, Instruction &instruction)
Execute the BATCH_MUL instruction.
static size_t execute_RANDOMSEED(Builder *builder, std::vector< ExecutionHandler > &stack, Instruction &instruction)
Execute the RANDOMSEED instruction.
static size_t execute_ASSERT_EQUAL(Builder *builder, std::vector< ExecutionHandler > &stack, Instruction &instruction)
Execute the ASSERT_EQUAL instruction.
static size_t execute_SET(Builder *builder, std::vector< ExecutionHandler > &stack, Instruction &instruction)
Execute the SET instruction.
static size_t execute_CONSTANT_WITNESS(Builder *builder, std::vector< ExecutionHandler > &stack, Instruction &instruction)
Execute the constant_witness instruction (push a safeuint witness equal to the constant to the stack)
static size_t execute_ADD(Builder *builder, std::vector< ExecutionHandler > &stack, Instruction &instruction)
Execute the addition operator instruction.
ExecutionHandler mul(Builder *builder, const ScalarField &multiplier)
static ExecutionHandler batch_mul(Builder *builder, const std::vector< ExecutionHandler > &to_add, const std::vector< ScalarField > &to_mul)
ExecutionHandler handle_sub_infinity_case(const ExecutionHandler &other, const ScalarField &base_scalar_res, const GroupElement &base_res)
Handle subtraction when points are equal: x - x = 0 (point at infinity)
ExecutionHandler operator_sub(Builder *builder, const ExecutionHandler &other)
Subtract two ExecutionHandlers, exploring different code paths for edge cases.
static size_t execute_MULTIPLY(Builder *builder, std::vector< ExecutionHandler > &stack, Instruction &instruction)
Execute the multiply instruction.
void assert_equal(Builder *builder, ExecutionHandler &other)
ExecutionHandler handle_add_infinity_case(const ExecutionHandler &other, const ScalarField &base_scalar_res, const GroupElement &base_res)
Handle addition when points are negations (result is point at infinity)
ExecutionHandler handle_add_normal_case(const ExecutionHandler &other, const ScalarField &base_scalar_res, const GroupElement &base_res)
Handle normal addition (no special edge cases)
static size_t execute_CONSTANT(Builder *builder, std::vector< ExecutionHandler > &stack, Instruction &instruction)
Execute the constant instruction (push constant cycle group to the stack)
ExecutionHandler operator_add(Builder *builder, const ExecutionHandler &other)
ExecutionHandler conditional_assign(Builder *builder, ExecutionHandler &other, const bool predicate)
ExecutionHandler set(Builder *builder)
static uint256_t to_uint256_montgomery(const FF &value, bool as_montgomery)
Convert a scalar field element to uint256_t, optionally using Montgomery form.
static Instruction generateRandom(T &rng)
Generates a random instruction.
static Instruction mutateInstruction(Instruction instruction, T &rng, HavocSettings &havoc_config)
Mutate a single instruction.
static FF from_uint256_montgomery(const uint256_t &data, bool from_montgomery)
Convert uint256_t back to scalar field element, optionally from Montgomery form.
static ScalarField mutateScalarElement(ScalarField e, T &rng, HavocSettings &havoc_config)
Mutate the value of a scalar element.
static Element mutateGroupElement(Element e, T &rng, HavocSettings &havoc_config)
Mutate the value of a group element.
Optional subclass that governs limits on the use of certain instructions, since some of them can be t...
Parser class handles the parsing and writing the instructions back to data buffer.
static void writeInstruction(Instruction &instruction, uint8_t *Data)
Write a single instruction to buffer.
static Instruction parseInstructionArgs(uint8_t *Data)
Parse a single instruction from data.
The class parametrizing CycleGroup fuzzing instructions, execution, etc.
typename bb::stdlib::witness_t< Builder > witness_t
typename bb::stdlib::cycle_group< Builder >::Curve Curve
typename bb::stdlib::public_witness_t< Builder > public_witness_t
typename bb::stdlib::cycle_group< Builder > cycle_group_t
typename bb::stdlib::field_t< Builder > field_t
static bool postProcess(Builder *builder, std::vector< CycleGroupBase::ExecutionHandler > &stack)
Check that the resulting values are equal to expected.
std::vector< ExecutionHandler > ExecutionState
typename bb::stdlib::bool_t< Builder > bool_t
typename cycle_group_t::cycle_scalar cycle_scalar_t
typename Curve::Element GroupElement
typename Curve::AffineElement AffineElement
typename Curve::ScalarField ScalarField
typename Curve::BaseField BaseField
Class for quickly deterministically creating new random values. We don't care about distribution much...
Definition fuzzer.hpp:63
void reseed(uint32_t seed)
Definition fuzzer.hpp:75
uint32_t next()
Definition fuzzer.hpp:68
typename Group::element Element
Definition bn254.hpp:21
bb::fq BaseField
Definition bn254.hpp:19
typename Group::affine_element AffineElement
Definition bn254.hpp:22
bb::fr ScalarField
Definition bn254.hpp:18
Implements boolean logic in-circuit.
Definition bool.hpp:59
cycle_group represents a group Element of the proving system's embedded curve, i.e....
static cycle_group from_constant_witness(Builder *_context, const AffineElement &_in)
Converts a native AffineElement into a witness, but constrains the witness values to be known constan...
static cycle_group conditional_assign(const bool_t &predicate, const cycle_group &lhs, const cycle_group &rhs)
static cycle_group from_witness(Builder *_context, const AffineElement &_in)
Converts an AffineElement into a circuit witness.
::bb::stdlib::cycle_scalar< Builder > cycle_scalar
static cycle_group batch_mul(const std::vector< cycle_group > &base_points, const std::vector< BigScalarField > &scalars, GeneratorContext context={})
Concept for a simple PRNG which returns a uint32_t when next is called.
Definition fuzzer.hpp:192
AluTraceBuilder builder
Definition alu.test.cpp:124
const std::vector< MemoryValue > data
constexpr size_t MINIMUM_MUL_ELEMENTS
constexpr uint8_t SPECIAL_VALUE_COUNT_NO_ZERO
constexpr size_t MAXIMUM_MUL_ELEMENTS
constexpr uint8_t SPECIAL_VALUE_COUNT
SpecialScalarValue
Special scalar field values used for mutation testing.
size_t LLVMFuzzerMutate(uint8_t *Data, size_t Size, size_t MaxSize)
FastRandom VarianceRNG(0)
bool circuit_should_fail
int LLVMFuzzerInitialize(int *argc, char ***argv)
FF get_special_scalar_value(SpecialScalarValue type)
Generate a special scalar field value for testing.
size_t LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size)
Fuzzer entry function.
#define PUT_RANDOM_BYTE_IF_LUCKY(variable)
ssize_t offset
Definition engine.cpp:36
void debug_log(Args &&... args)
Compile-time debug logging helper.
Definition fuzzer.hpp:127
constexpr bool SHOW_FUZZING_INFO
Definition fuzzer.hpp:123
Instruction instruction
constexpr decltype(auto) get(::tuplet::tuple< T... > &&t) noexcept
Definition tuple.hpp:13
std::string to_string(bb::avm2::ValueTag tag)
ScalarField scalars[MAXIMUM_MUL_ELEMENTS]
Element(ScalarField s=ScalarField::one(), GroupElement g=GroupElement::one())
size_t GEN_LLVM_POST_MUTATION_PROB
Definition fuzzer.hpp:28
static constexpr field get_root_of_unity(size_t subgroup_size) noexcept
static constexpr uint256_t modulus
BB_INLINE constexpr field to_montgomery_form() const noexcept
constexpr std::pair< bool, field > sqrt() const noexcept
Compute square root of the field element.
BB_INLINE constexpr field from_montgomery_form() const noexcept