21 constexpr size_t extra_bits = 65UL;
23 return ((num_bits + extra_bits) / 512UL) + ((num_bits + extra_bits) % 512UL > 0);
28 for (
size_t i = 0; i < 8; i++) {
29 input[i] = init_constants[i];
33template <
typename Builder>
43 lookup[ColumnIdx::C2][0],
44 lookup[ColumnIdx::C2][1],
45 lookup[ColumnIdx::C2][2],
46 lookup[ColumnIdx::C2][3],
49 lookup[ColumnIdx::C3][0],
50 lookup[ColumnIdx::C3][1],
51 lookup[ColumnIdx::C3][2],
52 lookup[ColumnIdx::C3][3],
59template <
typename Builder>
64 Builder* ctx = w_in[0].get_context();
67 for (
size_t i = 0; i < 16; ++i) {
69 if (!ctx && w_in[i].get_context()) {
70 ctx = w_in[i].get_context();
74 for (
size_t i = 16; i < 64; ++i) {
75 auto& w_left = w_sparse[i - 15];
76 auto& w_right = w_sparse[i - 2];
78 if (!w_left.has_sparse_limbs) {
79 w_left = convert_witness(w_left.normal);
81 if (!w_right.has_sparse_limbs) {
82 w_right = convert_witness(w_right.normal);
86 w_left.sparse_limbs[0] * left_multipliers[0],
87 w_left.sparse_limbs[1] * left_multipliers[1],
88 w_left.sparse_limbs[2] * left_multipliers[2],
89 w_left.sparse_limbs[3] * left_multipliers[3],
93 w_right.sparse_limbs[0] * right_multipliers[0],
94 w_right.sparse_limbs[1] * right_multipliers[1],
95 w_right.sparse_limbs[2] * right_multipliers[2],
96 w_right.sparse_limbs[3] * right_multipliers[3],
100 left[0].
add_two(left[1], left[2]).
add_two(left[3], w_left.rotated_limbs[1]) *
fr(4);
102 const field_pt xor_result_sparse = right[0]
104 .
add_two(right[3], w_right.rotated_limbs[2])
105 .
add_two(w_right.rotated_limbs[3], left_xor_sparse);
111 field_pt w_out_raw = xor_result.
add_two(w_sparse[i - 16].normal, w_sparse[i - 7].normal);
123 field_pt w_out_raw_inv_pow_two = w_out_raw * inv_pow_two;
124 field_pt w_out_inv_pow_two = w_out * inv_pow_two;
125 field_pt divisor = w_out_raw_inv_pow_two - w_out_inv_pow_two;
134 for (
size_t i = 0; i < 64; ++i) {
135 w_extended[i] = w_sparse[i].normal;
140template <
typename Builder>
150template <
typename Builder>
160template <
typename Builder>
168 field_pt rotation_result = lookup[ColumnIdx::C3][0];
170 e.
sparse = lookup[ColumnIdx::C2][0];
172 field_pt sparse_limb_3 = lookup[ColumnIdx::C2][2];
175 field_pt xor_result = (rotation_result *
fr(7))
176 .add_two(e.
sparse * (rotation_coefficients[0] *
fr(7) +
fr(1)),
177 sparse_limb_3 * (rotation_coefficients[2] *
fr(7)));
183 return choose_result;
186template <
typename Builder>
195 lookup[ColumnIdx::C3][0];
196 a.sparse = lookup[ColumnIdx::C2][0];
198 field_pt sparse_accumulator_2 = lookup[ColumnIdx::C2][1];
200 field_pt xor_result = (rotation_result *
fr(4))
201 .add_two(
a.sparse * (rotation_coefficients[0] *
fr(4) +
fr(1)),
202 sparse_accumulator_2 * (rotation_coefficients[1] *
fr(4)));
208 return majority_result;
211template <
typename Builder>
217 Builder* ctx =
a.get_context() ?
a.get_context() :
b.get_context();
221 uint256_t normalized_sum =
static_cast<uint32_t
>(
sum.data[0]);
223 if (
a.is_constant() &&
b.is_constant()) {
224 return field_pt(ctx, normalized_sum);
247template <
typename Builder>
261 auto b = map_into_maj_sparse_form(h_init[1]);
262 auto c = map_into_maj_sparse_form(h_init[2]);
265 auto f = map_into_choose_sparse_form(h_init[5]);
266 auto g = map_into_choose_sparse_form(h_init[6]);
272 const auto w = extend_witness(input);
278 for (
size_t i = 0; i < 64; ++i) {
279 auto ch = choose(e, f,
g);
280 auto maj = majority(
a,
b, c);
281 auto temp1 = ch.add_two(h.
normal, w[i] +
fr(round_constants[i]));
312 for (
size_t i = 0; i < 8; i++) {
313 output[i].create_range_constraint(32);
static field_ct add_normalize(const field_ct &a, const field_ct &b)
static std::array< field_ct, 64 > extend_witness(const std::array< field_ct, 16 > &w_in)
static field_ct majority(sparse_value &a, const sparse_value &b, const sparse_value &c)
static void prepare_constants(std::array< field_ct, 8 > &input)
static field_ct choose(sparse_value &e, const sparse_value &f, const sparse_value &g)
static sparse_value map_into_choose_sparse_form(const field_ct &e)
static sparse_value map_into_maj_sparse_form(const field_ct &e)
static sparse_witness_limbs convert_witness(const field_ct &w)
static std::array< field_ct, 8 > sha256_block(const std::array< field_ct, 8 > &h_init, const std::array< field_ct, 16 > &input)
Apply the SHA-256 compression function to a single 512-bit message block.
void create_range_constraint(size_t num_bits, std::string const &msg="field_t::range_constraint") const
Let x = *this.normalize(), constrain x.v < 2^{num_bits}.
bb::fr get_value() const
Given a := *this, compute its value given by a.v * a.mul + a.add.
field_t add_two(const field_t &add_b, const field_t &add_c) const
Efficiently compute (this + a + b) using big_mul gate.
static plookup::ReadData< field_pt > get_lookup_accumulators(const plookup::MultiTableId id, const field_pt &key_a, const field_pt &key_b=0, const bool is_2_to_1_lookup=false)
static field_pt read_from_1_to_2_table(const plookup::MultiTableId id, const field_pt &key_a)
stdlib::witness_t< bb::UltraCircuitBuilder > witness_pt
stdlib::field_t< UltraCircuitBuilder > field_pt
std::array< bb::fr, 3 > get_choose_rotation_multipliers()
std::array< bb::fr, 3 > get_majority_rotation_multipliers()
field_t< Builder > add_normalize(const field_t< Builder > &a, const field_t< Builder > &b)
void g(field_t< Builder > state[BLAKE_STATE_SIZE], size_t a, size_t b, size_t c, size_t d, field_t< Builder > x, field_t< Builder > y)
constexpr size_t get_num_blocks(const size_t num_bits)
Entry point for Barretenberg command-line interface.
field< Bn254FrParams > fr
Inner sum(Cont< Inner, Args... > const &in)
constexpr decltype(auto) get(::tuplet::tuple< T... > &&t) noexcept
BB_INLINE constexpr field pow(const uint256_t &exponent) const noexcept
constexpr field invert() const noexcept
BB_INLINE constexpr field from_montgomery_form() const noexcept
std::array< field_ct, 4 > rotated_limbs
std::array< field_ct, 4 > sparse_limbs