17 const uint64_t stagger,
27 BB_ASSERT_LT(fragment_u64, (1ULL << stagger),
"biggroup_nafs: fragment value ≥ 2^{stagger}");
30 int fragment =
static_cast<int>(fragment_u64);
37 if (!is_negative && wnaf_skew) {
38 fragment -= (1 << stagger);
42 if (is_negative && wnaf_skew) {
43 fragment += (1 << stagger);
49 bool output_skew = (fragment_u64 & 1) == 0;
50 if (!is_negative && output_skew) {
52 }
else if (is_negative && output_skew) {
57 const int signed_wnaf_value = (fragment / 2);
58 constexpr int wnaf_window_size = (1ULL << (wnaf_size - 1));
59 uint64_t output_fragment = 0;
61 output_fragment =
static_cast<uint64_t
>(wnaf_window_size + signed_wnaf_value - 1);
63 output_fragment =
static_cast<uint64_t
>(wnaf_window_size + signed_wnaf_value);
72 C*
builder,
const uint64_t* wnaf_values,
bool is_negative,
size_t rounds,
const bool range_constrain_wnaf)
74 constexpr uint64_t wnaf_window_size = (1ULL << (wnaf_size - 1));
77 for (
size_t i = 0; i < rounds; ++i) {
79 const bool predicate = (wnaf_values[i] >> 31U) & 1U;
80 const uint64_t wnaf_magnitude = (wnaf_values[i] & 0x7fffffffU);
85 uint64_t offset_wnaf_entry = 0;
86 if ((!predicate && !is_negative) || (predicate && is_negative)) {
87 offset_wnaf_entry = wnaf_window_size + wnaf_magnitude;
89 offset_wnaf_entry = wnaf_window_size - wnaf_magnitude - 1;
95 if (range_constrain_wnaf) {
98 wnaf_entries.emplace_back(wnaf_entry);
110 const size_t stagger,
115 for (
size_t i = 0; i < rounds; ++i) {
116 field_ct entry = wnaf[rounds - 1 - i];
118 accumulator.emplace_back(entry);
124 sum += (stagger_fragment);
128 Fr reconstructed_positive_part =
132 reconstructed_positive_part =
133 (reconstructed_positive_part + reconstructed_positive_part)
138 constexpr uint64_t wnaf_window_size = (1ULL << (wnaf_size - 1));
139 uint256_t negative_constant_wnaf_offset(0);
140 for (
size_t i = 0; i < rounds; ++i) {
141 negative_constant_wnaf_offset +=
uint256_t((wnaf_window_size * 2) - 1) * (
uint256_t(1) << (i * wnaf_size));
145 negative_constant_wnaf_offset = negative_constant_wnaf_offset << stagger;
149 negative_constant_wnaf_offset += ((1ULL << wnaf_size) - 1ULL);
153 Fr reconstructed_negative_part =
157 Fr reconstructed = reconstructed_positive_part - reconstructed_negative_part;
159 return reconstructed;
169 const bool range_constrain_wnaf,
173 constexpr size_t num_rounds = ((num_bits + wnaf_size - 1) / wnaf_size);
176 const uint64_t stagger_mask = (1ULL << stagger) - 1;
179 const uint64_t stagger_scalar = scalar.
data[0] & stagger_mask;
182 bool skew_without_stagger =
false;
184 k_u256 = k_u256 >> stagger;
187 &k_u256.data[0], &wnaf_values[0], skew_without_stagger, 0);
190 &k_u256.data[0], &wnaf_values[0], skew_without_stagger, 0);
194 const size_t num_rounds_excluding_stagger_bits = ((num_bits + wnaf_size - 1 - stagger) / wnaf_size);
197 const auto [first_fragment, skew] =
198 get_staggered_wnaf_fragment_value<wnaf_size>(stagger_scalar, stagger, is_negative, skew_without_stagger);
203 builder, &wnaf_values[0], is_negative, num_rounds_excluding_stagger_bits, range_constrain_wnaf);
210 bool_ct both_skews_cannot_be_one = !(positive_skew & negative_skew);
212 bool_ct(
builder,
true),
"biggroup_nafs: both positive and negative skews cannot be set at the same time");
219 if (range_constrain_wnaf) {
224 Fr reconstructed = reconstruct_bigfield_from_wnaf<wnaf_size>(
225 builder, wnaf, positive_skew, negative_skew, stagger_fragment, stagger, num_rounds_excluding_stagger_bits);
228 .positive_skew = positive_skew,
229 .negative_skew = negative_skew,
230 .least_significant_wnaf_fragment = stagger_fragment,
231 .has_wnaf_fragment = (stagger > 0) };
326 const Fr& scalar,
const bool range_constrain_wnaf)
356 constexpr size_t num_bits = 129;
366 bool klo_negative =
false;
367 bool khi_negative =
false;
384 const auto [klo_reconstructed, klo_out] =
386 builder, klo, lo_stagger, klo_negative, range_constrain_wnaf,
true);
388 const auto [khi_reconstructed, khi_out] =
390 builder, khi, hi_stagger, khi_negative, range_constrain_wnaf,
false);
395 Fr reconstructed_scalar = khi_reconstructed.madd(minus_lambda, { klo_reconstructed });
401 if (scalar.is_constant()) {
402 reconstructed_scalar.self_reduce();
406 scalar.assert_equal(reconstructed_scalar,
"biggroup_nafs: reconstructed scalar does not match reduced input");
408 return { .klo = klo_out, .khi = khi_out };
419 uint256_t scalar_multiplier = scalar_multiplier_512.
lo;
423 const size_t num_rounds = (max_num_bits == 0 || scalar_multiplier == 0) ?
Fr::modulus.
get_msb() + 1 : max_num_bits;
426 if (scalar_multiplier == 0) {
442 const bool skew_value = !scalar_multiplier.
get_bit(0);
443 scalar_multiplier +=
uint256_t(
static_cast<uint64_t
>(skew_value));
447 naf_entries[num_rounds].set_origin_tag(scalar.get_origin_tag());
449 for (
size_t i = 0; i < num_rounds - 1; ++i) {
453 const bool next_entry = scalar_multiplier.
get_bit(i + 1);
457 naf_entries[num_rounds - i - 1].set_origin_tag(scalar.get_origin_tag());
463 naf_entries[0].set_origin_tag(scalar.get_origin_tag());
466 if constexpr (!Fr::is_composite) {
467 std::vector<Fr> accumulators;
468 for (
size_t i = 0; i < num_rounds; ++i) {
470 Fr entry(naf_entries[num_rounds - i - 1]);
474 accumulators.emplace_back(entry);
476 accumulators.emplace_back(-
Fr(naf_entries[num_rounds]));
477 Fr accumulator_result = Fr::accumulate(accumulators);
478 scalar.assert_equal(accumulator_result);
480 const auto reconstruct_half_naf = [](
bool_ct* nafs,
const size_t half_round_length) {
483 for (
size_t i = 0; i < half_round_length; ++i) {
484 negative_accumulator = negative_accumulator + negative_accumulator +
field_ct(nafs[i]);
485 positive_accumulator = positive_accumulator + positive_accumulator +
field_ct(1) -
field_ct(nafs[i]);
487 return std::make_pair(positive_accumulator, negative_accumulator);
493 if (num_rounds > Fr::NUM_LIMB_BITS * 2) {
494 const size_t midpoint = num_rounds - (Fr::NUM_LIMB_BITS * 2);
495 hi_accumulators = reconstruct_half_naf(&naf_entries[0], midpoint);
496 lo_accumulators = reconstruct_half_naf(&naf_entries[midpoint], num_rounds - midpoint);
500 lo_accumulators = reconstruct_half_naf(&naf_entries[0], num_rounds);
505 lo_accumulators.second = lo_accumulators.second +
field_ct(naf_entries[num_rounds]);
507 Fr reconstructed_positive =
Fr(lo_accumulators.first, hi_accumulators.first);
508 Fr reconstructed_negative =
Fr(lo_accumulators.second, hi_accumulators.second);
509 Fr accumulator = reconstructed_positive - reconstructed_negative;
515 if (scalar.is_constant()) {
516 accumulator.self_reduce();
520 accumulator.assert_equal(scalar);
524 const auto original_tag = scalar.get_origin_tag();
525 for (
auto& naf_entry : naf_entries) {
526 naf_entry.set_origin_tag(original_tag);
void fixed_wnaf(const uint64_t *scalar, uint64_t *wnaf, bool &skew_map, const uint64_t point_index, const uint64_t num_points, const size_t wnaf_bits) noexcept
Performs fixed-window non-adjacent form (WNAF) computation for scalar multiplication.