40#pragma clang diagnostic push
47#pragma clang diagnostic ignored "-Wc99-designator"
157#ifndef DISABLE_MULTIPLICATION
160#ifndef DISABLE_BATCH_MUL
231 template <
typename T>
236 uint8_t in, in1, in2, in3, out;
239 switch (instruction_opcode) {
243 auto scalar =
ScalarField(
static_cast<uint64_t
>(fast_log_distributed_uint256(rng)));
244 auto el = GroupElement::one() * scalar;
245 return { .id = instruction_opcode, .arguments.element =
Element(scalar, el) };
251 in =
static_cast<uint8_t
>(rng.next() & 0xff);
252 out =
static_cast<uint8_t
>(rng.next() & 0xff);
253 return { .id = instruction_opcode, .arguments.twoArgs = { .in = in, .out = out } };
256 in1 =
static_cast<uint8_t
>(rng.next() & 0xff);
257 in2 =
static_cast<uint8_t
>(rng.next() & 0xff);
258 out =
static_cast<uint8_t
>(rng.next() & 0xff);
259 return { .id = instruction_opcode,
260 .arguments.threeArgs.in1 = in1,
261 .arguments.threeArgs.in2 = in2,
262 .arguments.threeArgs.out = out };
264 in1 =
static_cast<uint8_t
>(rng.next() & 0xff);
265 in2 =
static_cast<uint8_t
>(rng.next() & 0xff);
266 in3 =
static_cast<uint8_t
>(rng.next() & 0xff);
267 out =
static_cast<uint8_t
>(rng.next() & 0xff);
268 return { .id = instruction_opcode,
269 .arguments.fourArgs.in1 = in1,
270 .arguments.fourArgs.in2 = in2,
271 .arguments.fourArgs.in3 = in3,
272 .arguments.fourArgs.out = out };
273#ifndef DISABLE_MULTIPLICATION
275 in =
static_cast<uint8_t
>(rng.next() & 0xff);
276 out =
static_cast<uint8_t
>(rng.next() & 0xff);
277 return { .id = instruction_opcode,
279 .arguments.mulArgs.in = in,
280 .arguments.mulArgs.out = out };
282#ifndef DISABLE_BATCH_MUL
293 instr.
id = instruction_opcode;
295 for (
size_t i = 0; i < mult_size; i++) {
298 for (
size_t i = 0; i < mult_size; i++) {
306 return { .
id = instruction_opcode, .arguments.randomseed = rng.next() * rng.next() };
329 if (from_montgomery) {
344 template <
typename T>
355 bool convert_to_montgomery = (rng.next() % (havoc_config.VAL_MUT_MONTGOMERY_PROBABILITY +
356 havoc_config.VAL_MUT_NON_MONTGOMERY_PROBABILITY)) <
357 havoc_config.VAL_MUT_MONTGOMERY_PROBABILITY;
358 bool normalize = (rng.next() % (havoc_config.VAL_MUT_MONTGOMERY_PROBABILITY +
359 havoc_config.VAL_MUT_NON_MONTGOMERY_PROBABILITY)) <
360 havoc_config.VAL_MUT_MONTGOMERY_PROBABILITY;
364 const size_t mutation_type_count = havoc_config.value_mutation_distribution.size();
366 const size_t choice = rng.next() % havoc_config.value_mutation_distribution[mutation_type_count - 1];
367 if (choice < havoc_config.value_mutation_distribution[0]) {
371 e.scalar = from_uint256_montgomery<ScalarField>(value_data, convert_to_montgomery);
372 e.value = GroupElement::one() * e.scalar;
373 }
else if (choice < havoc_config.value_mutation_distribution[1]) {
375 if (convert_to_montgomery) {
376 e.scalar = e.scalar.to_montgomery_form();
381 if (rng.next() & 1) {
382 auto switch_sign =
static_cast<bool>(rng.next() & 1);
385 e.value += GroupElement::one() * extra;
388 e.value -= GroupElement::one() * extra;
396 e.value = e.value.normalize();
398 if (convert_to_montgomery) {
399 e.scalar = e.scalar.from_montgomery_form();
401 }
else if (choice < havoc_config.value_mutation_distribution[2]) {
402 if (convert_to_montgomery) {
403 e.scalar = e.scalar.to_montgomery_form();
407 e.scalar = get_special_scalar_value<ScalarField>(special_value);
408 if (convert_to_montgomery) {
409 e.scalar = e.scalar.to_montgomery_form();
411 e.value = GroupElement::one() * e.scalar;
425 template <
typename T>
433 bool convert_to_montgomery = (rng.next() % (havoc_config.VAL_MUT_MONTGOMERY_PROBABILITY +
434 havoc_config.VAL_MUT_NON_MONTGOMERY_PROBABILITY)) <
435 havoc_config.VAL_MUT_MONTGOMERY_PROBABILITY;
439 const size_t mutation_type_count = havoc_config.value_mutation_distribution.size();
441 const size_t choice = rng.next() % havoc_config.value_mutation_distribution[mutation_type_count - 1];
442 if (choice < havoc_config.value_mutation_distribution[0]) {
446 e = from_uint256_montgomery<ScalarField>(value_data, convert_to_montgomery);
447 }
else if (choice < havoc_config.value_mutation_distribution[1]) {
449 if (convert_to_montgomery) {
450 e = e.to_montgomery_form();
455 if (rng.next() & 1) {
456 auto switch_sign =
static_cast<bool>(rng.next() & 1);
466 if (convert_to_montgomery) {
467 e = e.from_montgomery_form();
469 }
else if (choice < havoc_config.value_mutation_distribution[2]) {
470 if (convert_to_montgomery) {
471 e = e.to_montgomery_form();
476 e = get_special_scalar_value<ScalarField>(special_value);
477 if (convert_to_montgomery) {
478 e = e.to_montgomery_form();
493 template <
typename T>
497#define PUT_RANDOM_BYTE_IF_LUCKY(variable) \
498 if (rng.next() & 1) { \
499 variable = rng.next() & 0xff; \
508 if (rng.next() & 1) {
521#ifndef DISABLE_MULTIPLICATION
525 if (rng.next() & 1) {
543#ifndef DISABLE_BATCH_MUL
545 if (rng.next() & 1) {
555 instruction.arguments.batchMulArgs.add_elements_count = mult_size;
557 if (
instruction.arguments.batchMulArgs.add_elements_count && (rng.next() & 1)) {
559 static_cast<uint8_t
>(rng.next() % (
instruction.arguments.batchMulArgs.add_elements_count));
560 for (
size_t i = 0; i < mut_count; i++) {
562 rng.next() %
static_cast<size_t>(
instruction.arguments.batchMulArgs.add_elements_count);
566 if (
instruction.arguments.batchMulArgs.add_elements_count && (rng.next() & 1)) {
568 static_cast<uint8_t
>(rng.next() % (
instruction.arguments.batchMulArgs.add_elements_count));
569 for (
size_t i = 0; i < mut_count; i++) {
571 rng.next() %
static_cast<size_t>(
instruction.arguments.batchMulArgs.add_elements_count);
596 static constexpr size_t DBL = 2;
597 static constexpr size_t NEG = 2;
599 static constexpr size_t SET = 2;
600 static constexpr size_t ADD = 3;
603#ifndef DISABLE_MULTIPLICATION
606#ifndef DISABLE_BATCH_MUL
619 static constexpr size_t SET = 0;
625 static constexpr size_t ADD = 1;
627 static constexpr size_t DBL = 1;
628 static constexpr size_t NEG = 1;
631#ifndef DISABLE_MULTIPLICATION
636#ifndef DISABLE_BATCH_MUL
662 auto scalar = ScalarField::serialize_from_buffer(Data);
663 auto el = GroupElement::one() * scalar;
678 instr.
arguments.
fourArgs = { .
in1 = *Data, .in2 = *(Data + 1), .in3 = *(Data + 2), .out = *(Data + 3) };
681#ifndef DISABLE_MULTIPLICATION
688#ifndef DISABLE_BATCH_MUL
701 for (
size_t i = 0; i < n; i++) {
722 template <
typename Instruction::OPCODE instruction_opcode>
726 switch (instruction_opcode) {
730 ScalarField::serialize_to_buffer(
instruction.arguments.element.scalar, Data + 1);
751#ifndef DISABLE_MULTIPLICATION
755 ScalarField::serialize_to_buffer(
instruction.arguments.mulArgs.scalar, Data + 3);
758#ifndef DISABLE_BATCH_MUL
760 *(Data + 1) =
instruction.arguments.batchMulArgs.add_elements_count;
761 *(Data + 2) =
instruction.arguments.batchMulArgs.output_index;
763 size_t n =
instruction.arguments.batchMulArgs.add_elements_count;
765 memcpy(Data + 3,
instruction.arguments.batchMulArgs.inputs, n);
767 for (
size_t i = 0; i < n; i++) {
768 ScalarField::serialize_to_buffer(
instruction.arguments.batchMulArgs.scalars[i], Data +
offset);
775 memcpy(Data + 1, &
instruction.arguments.randomseed,
sizeof(uint32_t));
795 const bool predicate_is_const =
static_cast<bool>(
VarianceRNG.
next() & 1);
796 if (predicate_is_const) {
797 const bool predicate_has_ctx =
static_cast<bool>(
VarianceRNG.
next() % 2);
798 debug_log(
"bool_t(", (predicate_has_ctx ?
"&builder," :
"nullptr,"), (predicate ?
"true)" :
"false)"));
799 return bool_t(predicate_has_ctx ?
builder :
nullptr, predicate);
801 debug_log(
"bool_t(witness_t(&builder, ", (predicate ?
"true));" :
"false))"));
890 bool smth_inf = this->cycle_group.is_point_at_infinity().get_value() ||
891 other.
cycle_group.is_point_at_infinity().get_value();
894 switch (add_option) {
896 debug_log(
"left.unconditional_add(right);",
"\n");
899 debug_log(
"right.unconditional_add(left);",
"\n");
900 return ExecutionHandler(base_scalar_res, base_res, other.
cg().unconditional_add(this->cg()));
902 debug_log(
"left.checked_unconditional_add(right);",
"\n");
903 return ExecutionHandler(base_scalar_res, base_res, this->
cg().checked_unconditional_add(other.
cg()));
905 debug_log(
"right.checked_unconditional_add(left);",
"\n");
906 return ExecutionHandler(base_scalar_res, base_res, other.
cg().checked_unconditional_add(this->cg()));
922 if (other.
cg().get_value() == this->cg().get_value()) {
927 if (other.
cg().get_value() == -this->cg().get_value()) {
976 bool smth_inf = this->cycle_group.is_point_at_infinity().get_value() ||
977 other.
cycle_group.is_point_at_infinity().get_value();
980 switch (add_option) {
982 debug_log(
"left.unconditional_subtract(right);",
"\n");
983 return ExecutionHandler(base_scalar_res, base_res, this->
cg().unconditional_subtract(other.
cg()));
985 debug_log(
"left.checked_unconditional_subtract(right);",
"\n");
987 base_scalar_res, base_res, this->
cg().checked_unconditional_subtract(other.
cg()));
1003 if (other.
cg().get_value() == -this->cg().get_value()) {
1006 if (other.
cg().get_value() == this->cg().get_value()) {
1016 (is_witness ?
"::from_witness(&builder, " :
"("),
1020 auto scalar = is_witness ? cycle_scalar_t::from_witness(
builder, multiplier) :
cycle_scalar_t(multiplier);
1021 return ExecutionHandler(this->base_scalar * multiplier, this->base * multiplier, this->
cg() * scalar);
1026 const std::vector<ScalarField>& to_mul)
1029 to_add_cg.reserve(to_add.size());
1031 to_mul_cs.reserve(to_mul.size());
1036 for (
size_t i = 0; i < to_add.size(); i++) {
1041 (is_witness ?
"::from_witness(&builder, " :
"("),
1045 auto scalar = is_witness ? cycle_scalar_t::from_witness(
builder, to_mul[i]) :
cycle_scalar_t(to_mul[i]);
1046 to_mul_cs.push_back(scalar);
1048 accumulator_cg += to_add[i].base * to_mul[i];
1049 accumulator_cs += to_add[i].base_scalar * to_mul[i];
1051 accumulator_cg -= GroupElement::one();
1055 if (accumulator_cg.is_point_at_infinity()) {
1058 accumulator_cg += GroupElement::one();
1059 accumulator_cs += ScalarField::one();
1069 this->base = -this->
base;
1075 return ExecutionHandler(this->base_scalar + this->base_scalar, this->base.dbl(), this->cg().dbl());
1089 if (other.
cg().is_constant()) {
1090 if (this->
cg().is_constant()) {
1096 auto to_ae = other.
cg() + to_add;
1097 this->
cg().assert_equal(to_ae);
1104 switch (switch_case) {
1111 (this->cycle_group.is_constant() ?
"" :
"_constant"),
1112 "_witness(&builder, e.get_value());");
1115 if (this->cycle_group.is_constant()) {
1123 debug_log(
"res = cycle_group_t(tmp);",
"\n");
1126 cg_new = this->
cg();
1131 debug_log(
"res = cycle_group_t(std::move(tmp));",
"\n");
1158 "auto c", stack.size() - 1,
" = cycle_group_t(ae(\"",
instruction.arguments.element.scalar,
"\"));\n");
1180 " = cycle_group_t::from_witness(&builder, ae(\"",
1206 " = cycle_group_t::from_constant_witness(&builder, ae(\"",
1225 if (stack.size() == 0) {
1228 size_t first_index =
instruction.arguments.twoArgs.in % stack.size();
1229 size_t output_index =
instruction.arguments.twoArgs.out;
1232 auto args = format_single_arg(stack, first_index, output_index);
1233 debug_log(args.out,
" = ", args.rhs,
".dbl();",
"\n");
1237 if (output_index >= stack.size()) {
1238 stack.push_back(result);
1240 stack[output_index] = result;
1258 if (stack.size() == 0) {
1261 size_t first_index =
instruction.arguments.twoArgs.in % stack.size();
1262 size_t output_index =
instruction.arguments.twoArgs.out;
1265 auto args = format_single_arg(stack, first_index, output_index);
1266 debug_log(args.out,
" = -", args.rhs,
";",
"\n");
1270 if (output_index >= stack.size()) {
1271 stack.push_back(result);
1273 stack[output_index] = result;
1290 if (stack.size() == 0) {
1293 size_t first_index =
instruction.arguments.twoArgs.in % stack.size();
1294 size_t second_index =
instruction.arguments.twoArgs.out % stack.size();
1297 auto args = format_two_arg(stack, first_index, second_index, 0);
1298 debug_log(
"assert_equal(", args.lhs,
", ", args.rhs,
", builder);",
"\n");
1300 stack[first_index].assert_equal(
builder, stack[second_index]);
1317 if (stack.size() == 0) {
1320 size_t first_index =
instruction.arguments.twoArgs.in % stack.size();
1321 size_t output_index =
instruction.arguments.twoArgs.out;
1325 auto args = format_single_arg(stack, first_index, output_index);
1334 if (output_index >= stack.size()) {
1335 stack.push_back(result);
1337 stack[output_index] = result;
1355 if (stack.size() == 0) {
1358 size_t first_index =
instruction.arguments.threeArgs.in1 % stack.size();
1359 size_t second_index =
instruction.arguments.threeArgs.in2 % stack.size();
1360 size_t output_index =
instruction.arguments.threeArgs.out;
1363 auto args = format_two_arg(stack, first_index, second_index, output_index);
1364 debug_log(args.out,
" = ", args.lhs,
" + ", args.rhs,
";",
"\n");
1368 if (output_index >= stack.size()) {
1369 stack.push_back(result);
1371 stack[output_index] = result;
1389 if (stack.size() == 0) {
1392 size_t first_index =
instruction.arguments.threeArgs.in1 % stack.size();
1393 size_t second_index =
instruction.arguments.threeArgs.in2 % stack.size();
1394 size_t output_index =
instruction.arguments.threeArgs.out;
1397 auto args = format_two_arg(stack, first_index, second_index, output_index);
1398 debug_log(args.out,
" = ", args.lhs,
" - ", args.rhs,
";",
"\n");
1402 if (output_index >= stack.size()) {
1403 stack.push_back(result);
1405 stack[output_index] = result;
1423 if (stack.size() == 0) {
1426 size_t first_index =
instruction.arguments.fourArgs.in1 % stack.size();
1427 size_t second_index =
instruction.arguments.fourArgs.in2 % stack.size();
1428 size_t output_index =
instruction.arguments.fourArgs.out % stack.size();
1429 bool predicate =
instruction.arguments.fourArgs.in3 % 2;
1433 auto args = format_two_arg(stack, first_index, second_index, output_index);
1434 debug_log(args.out,
" = cycle_group_t::conditional_assign(");
1437 debug_log(args.rhs,
", ", args.lhs,
");",
"\n");
1443 if (output_index >= stack.size()) {
1444 stack.push_back(result);
1446 stack[output_index] = result;
1464 if (stack.size() == 0) {
1467 size_t first_index =
instruction.arguments.mulArgs.in % stack.size();
1468 size_t output_index =
instruction.arguments.mulArgs.out;
1472 auto args = format_single_arg(stack, first_index, output_index);
1477 if (output_index >= stack.size()) {
1478 stack.push_back(result);
1480 stack[output_index] = result;
1498 if (stack.size() == 0) {
1502 std::vector<ScalarField> to_mul;
1503 for (
size_t i = 0; i <
instruction.arguments.batchMulArgs.add_elements_count; i++) {
1504 to_add.push_back(stack[(
size_t)
instruction.arguments.batchMulArgs.inputs[i] % stack.size()]);
1505 to_mul.push_back(
instruction.arguments.batchMulArgs.scalars[i]);
1507 size_t output_index = (size_t)
instruction.arguments.batchMulArgs.output_index;
1511 std::string res =
"";
1512 bool is_const =
true;
1513 for (
size_t i = 0; i <
instruction.arguments.batchMulArgs.add_elements_count; i++) {
1514 size_t idx =
instruction.arguments.batchMulArgs.inputs[i] % stack.size();
1515 std::string el = stack[idx].
cycle_group.is_constant() ?
"c" :
"w";
1518 is_const &= stack[idx].cycle_group.is_constant();
1520 std::string out = is_const ?
"c" :
"w";
1521 out = ((output_index >= stack.size()) ?
"auto " :
"") + out;
1522 out +=
std::to_string(output_index >= stack.size() ? stack.size() : output_index);
1523 debug_log(out,
" = cycle_group_t::batch_mul({", res,
"}, {");
1531 if (output_index >= stack.size()) {
1532 stack.push_back(result);
1534 stack[output_index] = result;
1576 for (
size_t i = 0; i < stack.size(); i++) {
1577 auto element = stack[i];
1578 if (element.cycle_group.get_value() !=
AffineElement(element.base)) {
1580 <<
" and value in CycleGroup " << element.cycle_group.get_value() <<
std::endl;
1583 if ((AffineElement::one() * element.base_scalar) !=
AffineElement(element.base)) {
1584 std::cerr <<
"Failed at " << i <<
" with actual mul value " << element.base
1585 <<
" and value in scalar * CG " << element.cycle_group.get_value() * element.base_scalar
1603 .GEN_MUTATION_COUNT_LOG = 5,
1604 .GEN_STRUCTURAL_MUTATION_PROBABILITY = 300,
1605 .GEN_VALUE_MUTATION_PROBABILITY = 700,
1606 .ST_MUT_DELETION_PROBABILITY = 100,
1607 .ST_MUT_DUPLICATION_PROBABILITY = 80,
1608 .ST_MUT_INSERTION_PROBABILITY = 120,
1609 .ST_MUT_MAXIMUM_DELETION_LOG = 6,
1610 .ST_MUT_MAXIMUM_DUPLICATION_LOG = 2,
1611 .ST_MUT_SWAP_PROBABILITY = 50,
1612 .VAL_MUT_LLVM_MUTATE_PROBABILITY = 250,
1613 .VAL_MUT_MONTGOMERY_PROBABILITY = 130,
1614 .VAL_MUT_NON_MONTGOMERY_PROBABILITY = 50,
1615 .VAL_MUT_SMALL_ADDITION_PROBABILITY = 110,
1616 .VAL_MUT_SPECIAL_VALUE_PROBABILITY = 130,
1617 .structural_mutation_distribution = {},
1618 .value_mutation_distribution = {} };
1689 std::vector<size_t> structural_mutation_distribution;
1690 std::vector<size_t> value_mutation_distribution;
1692 temp += fuzzer_havoc_settings.ST_MUT_DELETION_PROBABILITY;
1693 structural_mutation_distribution.push_back(temp);
1694 temp += fuzzer_havoc_settings.ST_MUT_DUPLICATION_PROBABILITY;
1695 structural_mutation_distribution.push_back(temp);
1696 temp += fuzzer_havoc_settings.ST_MUT_INSERTION_PROBABILITY;
1697 structural_mutation_distribution.push_back(temp);
1698 temp += fuzzer_havoc_settings.ST_MUT_SWAP_PROBABILITY;
1699 structural_mutation_distribution.push_back(temp);
1700 fuzzer_havoc_settings.structural_mutation_distribution = structural_mutation_distribution;
1703 temp += fuzzer_havoc_settings.VAL_MUT_LLVM_MUTATE_PROBABILITY;
1704 value_mutation_distribution.push_back(temp);
1705 temp += fuzzer_havoc_settings.VAL_MUT_SMALL_ADDITION_PROBABILITY;
1706 value_mutation_distribution.push_back(temp);
1707 temp += fuzzer_havoc_settings.VAL_MUT_SPECIAL_VALUE_PROBABILITY;
1708 value_mutation_distribution.push_back(temp);
1710 fuzzer_havoc_settings.value_mutation_distribution = value_mutation_distribution;
1721 RunWithBuilders<CycleGroupBase, FuzzerCircuitTypes>(Data, Size,
VarianceRNG);
1725#pragma clang diagnostic pop
bb::field< bb::Bn254FrParams > FF
static constexpr size_t CONSTANT_WITNESS
static constexpr size_t ADD
static constexpr size_t CONSTANT
static constexpr size_t MULTIPLY
static constexpr size_t COND_ASSIGN
static constexpr size_t ASSERT_EQUAL
static constexpr size_t RANDOMSEED
static constexpr size_t DBL
static constexpr size_t BATCH_MUL
static constexpr size_t SET
static constexpr size_t WITNESS
static constexpr size_t NEG
static constexpr size_t SUBTRACT
This class implements the execution of cycle group with an oracle to detect discrepancies.
ExecutionHandler operator-()
static size_t execute_SUBTRACT(Builder *builder, std::vector< ExecutionHandler > &stack, Instruction &instruction)
Execute the subtraction operator instruction.
static size_t execute_COND_ASSIGN(Builder *builder, std::vector< ExecutionHandler > &stack, Instruction &instruction)
Execute the COND_ASSIGN instruction.
ExecutionHandler()=default
static size_t execute_DBL(Builder *builder, std::vector< ExecutionHandler > &stack, Instruction &instruction)
Execute the DBL instruction.
ExecutionHandler(ScalarField s, GroupElement g, cycle_group_t w_g)
ExecutionHandler handle_add_doubling_case(Builder *builder, const ExecutionHandler &other, const ScalarField &base_scalar_res, const GroupElement &base_res)
Handle addition when points are equal (requires doubling)
static bool_t construct_predicate(Builder *builder, const bool predicate)
ExecutionHandler handle_sub_normal_case(const ExecutionHandler &other, const ScalarField &base_scalar_res, const GroupElement &base_res)
Handle normal subtraction case (no special edge cases)
static size_t execute_NEG(Builder *builder, std::vector< ExecutionHandler > &stack, Instruction &instruction)
Execute the NEG instruction.
static size_t execute_WITNESS(Builder *builder, std::vector< ExecutionHandler > &stack, Instruction &instruction)
Execute the witness instruction (push witness cycle group to the stack)
ExecutionHandler handle_sub_doubling_case(Builder *builder, const ExecutionHandler &other, const ScalarField &base_scalar_res, const GroupElement &base_res)
Handle subtraction when points are negations: x - (-x) = 2x (doubling case)
static size_t execute_BATCH_MUL(Builder *builder, std::vector< ExecutionHandler > &stack, Instruction &instruction)
Execute the BATCH_MUL instruction.
static size_t execute_RANDOMSEED(Builder *builder, std::vector< ExecutionHandler > &stack, Instruction &instruction)
Execute the RANDOMSEED instruction.
static size_t execute_ASSERT_EQUAL(Builder *builder, std::vector< ExecutionHandler > &stack, Instruction &instruction)
Execute the ASSERT_EQUAL instruction.
static size_t execute_SET(Builder *builder, std::vector< ExecutionHandler > &stack, Instruction &instruction)
Execute the SET instruction.
static size_t execute_CONSTANT_WITNESS(Builder *builder, std::vector< ExecutionHandler > &stack, Instruction &instruction)
Execute the constant_witness instruction (push a safeuint witness equal to the constant to the stack)
static size_t execute_ADD(Builder *builder, std::vector< ExecutionHandler > &stack, Instruction &instruction)
Execute the addition operator instruction.
ExecutionHandler mul(Builder *builder, const ScalarField &multiplier)
static ExecutionHandler batch_mul(Builder *builder, const std::vector< ExecutionHandler > &to_add, const std::vector< ScalarField > &to_mul)
ExecutionHandler handle_sub_infinity_case(const ExecutionHandler &other, const ScalarField &base_scalar_res, const GroupElement &base_res)
Handle subtraction when points are equal: x - x = 0 (point at infinity)
ExecutionHandler operator_sub(Builder *builder, const ExecutionHandler &other)
Subtract two ExecutionHandlers, exploring different code paths for edge cases.
static size_t execute_MULTIPLY(Builder *builder, std::vector< ExecutionHandler > &stack, Instruction &instruction)
Execute the multiply instruction.
void assert_equal(Builder *builder, ExecutionHandler &other)
ExecutionHandler handle_add_infinity_case(const ExecutionHandler &other, const ScalarField &base_scalar_res, const GroupElement &base_res)
Handle addition when points are negations (result is point at infinity)
cycle_group_t cycle_group
ExecutionHandler handle_add_normal_case(const ExecutionHandler &other, const ScalarField &base_scalar_res, const GroupElement &base_res)
Handle normal addition (no special edge cases)
static size_t execute_CONSTANT(Builder *builder, std::vector< ExecutionHandler > &stack, Instruction &instruction)
Execute the constant instruction (push constant cycle group to the stack)
ExecutionHandler operator_add(Builder *builder, const ExecutionHandler &other)
ExecutionHandler conditional_assign(Builder *builder, ExecutionHandler &other, const bool predicate)
ExecutionHandler set(Builder *builder)
ArgumentContents arguments
static uint256_t to_uint256_montgomery(const FF &value, bool as_montgomery)
Convert a scalar field element to uint256_t, optionally using Montgomery form.
static Instruction generateRandom(T &rng)
Generates a random instruction.
static Instruction mutateInstruction(Instruction instruction, T &rng, HavocSettings &havoc_config)
Mutate a single instruction.
static FF from_uint256_montgomery(const uint256_t &data, bool from_montgomery)
Convert uint256_t back to scalar field element, optionally from Montgomery form.
static ScalarField mutateScalarElement(ScalarField e, T &rng, HavocSettings &havoc_config)
Mutate the value of a scalar element.
static Element mutateGroupElement(Element e, T &rng, HavocSettings &havoc_config)
Mutate the value of a group element.
Optional subclass that governs limits on the use of certain instructions, since some of them can be t...
static constexpr size_t RANDOMSEED
static constexpr size_t SET
static constexpr size_t BATCH_MUL
static constexpr size_t COND_ASSIGN
static constexpr size_t WITNESS
static constexpr size_t DBL
static constexpr size_t _LIMIT
static constexpr size_t SUBTRACT
static constexpr size_t ASSERT_EQUAL
static constexpr size_t CONSTANT_WITNESS
static constexpr size_t CONSTANT
static constexpr size_t MULTIPLY
static constexpr size_t NEG
static constexpr size_t ADD
Parser class handles the parsing and writing the instructions back to data buffer.
static void writeInstruction(Instruction &instruction, uint8_t *Data)
Write a single instruction to buffer.
static Instruction parseInstructionArgs(uint8_t *Data)
Parse a single instruction from data.
The class parametrizing CycleGroup fuzzing instructions, execution, etc.
typename bb::stdlib::witness_t< Builder > witness_t
typename bb::stdlib::cycle_group< Builder >::Curve Curve
typename bb::stdlib::public_witness_t< Builder > public_witness_t
typename bb::stdlib::cycle_group< Builder > cycle_group_t
typename bb::stdlib::field_t< Builder > field_t
static bool postProcess(Builder *builder, std::vector< CycleGroupBase::ExecutionHandler > &stack)
Check that the resulting values are equal to expected.
std::vector< ExecutionHandler > ExecutionState
typename bb::stdlib::bool_t< Builder > bool_t
typename cycle_group_t::cycle_scalar cycle_scalar_t
typename Curve::Element GroupElement
typename Curve::AffineElement AffineElement
typename Curve::ScalarField ScalarField
typename Curve::BaseField BaseField
Class for quickly deterministically creating new random values. We don't care about distribution much...
void reseed(uint32_t seed)
typename Group::element Element
typename Group::affine_element AffineElement
Implements boolean logic in-circuit.
cycle_group represents a group Element of the proving system's embedded curve, i.e....
static cycle_group from_constant_witness(Builder *_context, const AffineElement &_in)
Converts a native AffineElement into a witness, but constrains the witness values to be known constan...
static cycle_group conditional_assign(const bool_t &predicate, const cycle_group &lhs, const cycle_group &rhs)
static cycle_group from_witness(Builder *_context, const AffineElement &_in)
Converts an AffineElement into a circuit witness.
::bb::stdlib::cycle_scalar< Builder > cycle_scalar
static cycle_group batch_mul(const std::vector< cycle_group > &base_points, const std::vector< BigScalarField > &scalars, GeneratorContext context={})
Concept for a simple PRNG which returns a uint32_t when next is called.
const std::vector< MemoryValue > data
constexpr size_t MINIMUM_MUL_ELEMENTS
constexpr uint8_t SPECIAL_VALUE_COUNT_NO_ZERO
constexpr size_t MAXIMUM_MUL_ELEMENTS
constexpr uint8_t SPECIAL_VALUE_COUNT
SpecialScalarValue
Special scalar field values used for mutation testing.
size_t LLVMFuzzerMutate(uint8_t *Data, size_t Size, size_t MaxSize)
FastRandom VarianceRNG(0)
int LLVMFuzzerInitialize(int *argc, char ***argv)
FF get_special_scalar_value(SpecialScalarValue type)
Generate a special scalar field value for testing.
size_t LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size)
Fuzzer entry function.
#define PUT_RANDOM_BYTE_IF_LUCKY(variable)
void debug_log(Args &&... args)
Compile-time debug logging helper.
constexpr bool SHOW_FUZZING_INFO
constexpr decltype(auto) get(::tuplet::tuple< T... > &&t) noexcept
std::string to_string(bb::avm2::ValueTag tag)
BatchMulArgs batchMulArgs
uint8_t inputs[MAXIMUM_MUL_ELEMENTS]
ScalarField scalars[MAXIMUM_MUL_ELEMENTS]
uint8_t add_elements_count
Element(ScalarField s=ScalarField::one(), GroupElement g=GroupElement::one())
size_t GEN_LLVM_POST_MUTATION_PROB
static constexpr field get_root_of_unity(size_t subgroup_size) noexcept
static constexpr field one()
static constexpr uint256_t modulus
BB_INLINE constexpr field to_montgomery_form() const noexcept
constexpr std::pair< bool, field > sqrt() const noexcept
Compute square root of the field element.
BB_INLINE constexpr field from_montgomery_form() const noexcept
static constexpr field zero()