diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index dc694cb8c1d8..73dc79e18afd 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -48,7 +48,7 @@ jobs: # prepare images locally, tagged by commit hash - name: "Build E2E Image" timeout-minutes: 40 - run: earthly ./yarn-project+export-end-to-end + run: earthly-ci ./yarn-project+export-end-to-end # We base our e2e list used in e2e-x86 off the targets in ./yarn-project/end-to-end # (Note ARM uses just 2 tests as a smoketest) - name: Create list of end-to-end jobs @@ -76,7 +76,7 @@ jobs: - name: Test working-directory: ./yarn-project/end-to-end/ timeout-minutes: 25 - run: earthly -P --no-output +${{ matrix.test }} --e2e_mode=cache + run: earthly-ci -P --no-output +${{ matrix.test }} --e2e_mode=cache # TODO # - name: Upload logs # run: BRANCH=${{ github.ref_name }} PULL_REQUEST=${{ github.event.number }} scripts/ci/upload_logs_to_s3 ./yarn-project/end-to-end/log @@ -101,7 +101,7 @@ jobs: working-directory: ./barretenberg/cpp/ timeout-minutes: 25 # limit our parallelism to half our cores - run: earthly --no-output +test --hardware_concurrency=64 + run: earthly-ci --no-output +test --hardware_concurrency=64 noir-projects: needs: setup @@ -117,7 +117,7 @@ jobs: concurrency_key: noir-projects-${{ inputs.username || github.actor }}-x86 - name: "Noir Projects" timeout-minutes: 25 - run: earthly --no-output ./noir-projects/+test + run: earthly-ci --no-output ./noir-projects/+test yarn-project-formatting: needs: setup @@ -134,7 +134,7 @@ jobs: concurrency_key: yarn-project-formatting-${{ github.actor }}-x86 - name: "Yarn Project Tests" timeout-minutes: 25 - run: earthly --no-output ./yarn-project/+format-check + run: earthly-ci --no-output ./yarn-project/+format-check yarn-project-test: needs: noir-projects @@ -151,7 +151,7 @@ jobs: concurrency_key: yarn-project-test-${{ github.actor }}-x86 - name: "Yarn Project Tests" timeout-minutes: 25 - run: earthly --no-output ./yarn-project/+test + run: earthly-ci --no-output ./yarn-project/+test # push benchmarking binaries to dockerhub registry bb-bench-binaries: @@ -169,7 +169,7 @@ jobs: - name: Build and Push Binaries timeout-minutes: 15 working-directory: ./barretenberg/cpp/ - run: earthly --push +bench-binaries + run: earthly-ci --push +bench-binaries setup-bench: uses: ./.github/workflows/setup-runner.yml @@ -200,25 +200,34 @@ jobs: - name: Client IVC Bench working-directory: ./barretenberg/cpp/ timeout-minutes: 15 - run: earthly --no-output +bench-client-ivc --bench_mode=cache + run: earthly-ci --no-output +bench-client-ivc --bench_mode=cache - name: Ultrahonk Bench working-directory: ./barretenberg/cpp/ timeout-minutes: 15 - run: earthly --no-output +bench-ultra-honk --bench_mode=cache + run: earthly-ci --no-output +bench-ultra-honk --bench_mode=cache merge-check: runs-on: ubuntu-latest needs: - [ - e2e, - bb-native-tests, - bb-bench, - yarn-project-formatting, - yarn-project-test, - ] + - e2e + - bb-native-tests + - bb-bench + - yarn-project-formatting + - yarn-project-test + if: always() steps: - - run: echo Pull request merging now allowed. + - run: | + echo "e2e status: ${{ needs.e2e.result }}" + echo "bb-native-tests status: ${{ needs.bb-native-tests.result }}" + echo "bb-bench status: ${{ needs.bb-bench.result }}" + echo "yarn-project-formatting status: ${{ needs.yarn-project-formatting.result }}" + echo "yarn-project-test status: ${{ needs.yarn-project-test.result }}" + if [[ "${{ needs.e2e.result }}" != 'success' || "${{ needs.bb-native-tests.result }}" != 'success' || "${{ needs.bb-bench.result }}" != 'success' || "${{ needs.yarn-project-formatting.result }}" != 'success' || "${{ needs.yarn-project-test.result }}" != 'success' ]]; then + echo "Pull request merging not allowed due to failures." + exit 1 + fi + echo "Pull request merging now allowed." notify: needs: diff --git a/.github/workflows/setup-runner.yml b/.github/workflows/setup-runner.yml index 748ab5edeab8..a29ccc6d5e5f 100644 --- a/.github/workflows/setup-runner.yml +++ b/.github/workflows/setup-runner.yml @@ -62,6 +62,30 @@ jobs: with: ref: ${{ github.event.pull_request.head.sha }} + - name: Checkout Merge Pipeline Files + uses: actions/checkout@v4 + # Only check PRs for consistency (not master) + if: ${{ github.event.pull_request.head.sha != '' }} + with: + path: merge-commit-pipeline-files + sparse-checkout: | + .github/workflows/ci.yml + .github/workflows/setup-runner.yml + + - name: Ensure CI Consistency + # Only check PRs for consistency (not master) + if: ${{ github.event.pull_request.head.sha != '' }} + run: | + # Compare the checked-out CI configuration files with the reference files + if ! git diff --no-index .github/workflows/ci.yml merge-commit-pipeline-files/.github/workflows/ci.yml; then + echo "Error: ci.yml changes in master (or PR base). Please merge these changes." + exit 1 + fi + if ! git diff --no-index .github/workflows/setup-runner.yml merge-commit-pipeline-files/.github/workflows/setup-runner.yml; then + echo "Error: setup-runner.yml changes in master (or PR base). Please merge these changes." + exit 1 + fi + - name: Start EC2 runner uses: ./.github/spot-runner-action with: diff --git a/.noir-sync-commit b/.noir-sync-commit index 19e9a0d30124..78cfb1ccf975 100644 --- a/.noir-sync-commit +++ b/.noir-sync-commit @@ -1 +1 @@ -0cf2e2a1b8d247bed03ba5b7b1be5cd30f0d51b2 +1ec9cdc7013e867db3672d27e3a6104e4b7e7eef \ No newline at end of file diff --git a/avm-transpiler/Cargo.lock b/avm-transpiler/Cargo.lock index e922fb18971c..7e530d6cc7b4 100644 --- a/avm-transpiler/Cargo.lock +++ b/avm-transpiler/Cargo.lock @@ -159,10 +159,6 @@ dependencies = [ "windows-sys", ] -[[package]] -name = "arena" -version = "0.28.0" - [[package]] name = "ark-bn254" version = "0.4.0" @@ -1211,6 +1207,10 @@ dependencies = [ "toml", ] +[[package]] +name = "noirc_arena" +version = "0.28.0" + [[package]] name = "noirc_driver" version = "0.28.0" @@ -1272,13 +1272,13 @@ name = "noirc_frontend" version = "0.28.0" dependencies = [ "acvm", - "arena", "chumsky", "fm", "im", "iter-extended", "lalrpop", "lalrpop-util", + "noirc_arena", "noirc_errors", "noirc_printable_type", "petgraph", diff --git a/barretenberg/.gitrepo b/barretenberg/.gitrepo index 5ade9c0d671d..3232d03252c3 100644 --- a/barretenberg/.gitrepo +++ b/barretenberg/.gitrepo @@ -6,7 +6,7 @@ [subrepo] remote = https://github.com/AztecProtocol/barretenberg branch = master - commit = 2edbb89f3954bccf7cff3d338abca9ac22216bed - parent = 99e12b1abd7e66e871b41572a54cee63b5300d96 + commit = 795c999a3b7fe8d85af05ffc09ddfd349c00e5a4 + parent = 0a64279ba1b2b3bb6627c675b8a0b116be17f579 method = merge cmdver = 0.4.6 diff --git a/barretenberg/acir_tests/Dockerfile.bb b/barretenberg/acir_tests/Dockerfile.bb index cda8d054ca38..20cc12846dfc 100644 --- a/barretenberg/acir_tests/Dockerfile.bb +++ b/barretenberg/acir_tests/Dockerfile.bb @@ -10,8 +10,12 @@ COPY . . # Run every acir test through native bb build prove_then_verify flow for UltraPlonk. # This ensures we test independent pk construction through real/garbage witness data paths. RUN FLOW=prove_then_verify ./run_acir_tests.sh -# Construct and verify a UltraHonk proof for all acir programs -RUN FLOW=prove_and_verify_ultra_honk ./run_acir_tests.sh +# Construct and separately verify a UltraHonk proof for a single program +RUN FLOW=prove_then_verify_ultra_honk ./run_acir_tests.sh double_verify_nested_proof +# Construct and separately verify a GoblinUltraHonk proof for all acir programs +RUN FLOW=prove_then_verify_goblin_ultra_honk ./run_acir_tests.sh +# Construct and verify a UltraHonk proof for a single program +RUN FLOW=prove_and_verify_ultra_honk ./run_acir_tests.sh double_verify_nested_proof # Construct and verify a Goblin UltraHonk (GUH) proof for a single arbitrary program RUN FLOW=prove_and_verify_goblin_ultra_honk ./run_acir_tests.sh 6_array # Construct and verify a UltraHonk proof for all ACIR programs using the new witness stack workflow diff --git a/barretenberg/acir_tests/flows/prove_then_verify_goblin_ultra_honk.sh b/barretenberg/acir_tests/flows/prove_then_verify_goblin_ultra_honk.sh new file mode 100755 index 000000000000..9586e6841ebc --- /dev/null +++ b/barretenberg/acir_tests/flows/prove_then_verify_goblin_ultra_honk.sh @@ -0,0 +1,12 @@ +#!/bin/sh +set -eu + +VFLAG=${VERBOSE:+-v} +BFLAG="-b ./target/acir.gz" +FLAGS="-c $CRS_PATH $VFLAG" + +# Test we can perform the proof/verify flow. +# This ensures we test independent pk construction through real/garbage witness data paths. +$BIN prove_goblin_ultra_honk -o proof $FLAGS $BFLAG +$BIN write_vk_goblin_ultra_honk -o vk $FLAGS $BFLAG +$BIN verify_goblin_ultra_honk -k vk -p proof $FLAGS diff --git a/barretenberg/acir_tests/flows/prove_then_verify_ultra_honk.sh b/barretenberg/acir_tests/flows/prove_then_verify_ultra_honk.sh new file mode 100755 index 000000000000..bfb20c27cf09 --- /dev/null +++ b/barretenberg/acir_tests/flows/prove_then_verify_ultra_honk.sh @@ -0,0 +1,12 @@ +#!/bin/sh +set -eu + +VFLAG=${VERBOSE:+-v} +BFLAG="-b ./target/acir.gz" +FLAGS="-c $CRS_PATH $VFLAG" + +# Test we can perform the proof/verify flow. +# This ensures we test independent pk construction through real/garbage witness data paths. +$BIN prove_ultra_honk -o proof $FLAGS $BFLAG +$BIN write_vk_ultra_honk -o vk $FLAGS $BFLAG +$BIN verify_ultra_honk -k vk -p proof $FLAGS diff --git a/barretenberg/cpp/src/barretenberg/bb/main.cpp b/barretenberg/cpp/src/barretenberg/bb/main.cpp index e40f358fc326..8ff5c5ba3381 100644 --- a/barretenberg/cpp/src/barretenberg/bb/main.cpp +++ b/barretenberg/cpp/src/barretenberg/bb/main.cpp @@ -579,6 +579,116 @@ bool avm_verify(const std::filesystem::path& proof_path) return true; } +/** + * @brief Creates a proof for an ACIR circuit + * + * Communication: + * - stdout: The proof is written to stdout as a byte array + * - Filesystem: The proof is written to the path specified by outputPath + * + * @param bytecodePath Path to the file containing the serialized circuit + * @param witnessPath Path to the file containing the serialized witness + * @param outputPath Path to write the proof to + */ +template +void prove_honk(const std::string& bytecodePath, const std::string& witnessPath, const std::string& outputPath) +{ + using Builder = Flavor::CircuitBuilder; + using Prover = UltraProver_; + + auto constraint_system = get_constraint_system(bytecodePath); + auto witness = get_witness(witnessPath); + + auto builder = acir_format::create_circuit(constraint_system, 0, witness); + + const size_t additional_gates_buffer = 15; // conservatively large to be safe + size_t srs_size = builder.get_circuit_subgroup_size(builder.get_total_circuit_size() + additional_gates_buffer); + init_bn254_crs(srs_size); + + // Construct Honk proof + Prover prover{ builder }; + auto proof = prover.construct_proof(); + + if (outputPath == "-") { + writeRawBytesToStdout(to_buffer(proof)); + vinfo("proof written to stdout"); + } else { + write_file(outputPath, to_buffer(proof)); + vinfo("proof written to: ", outputPath); + } +} + +/** + * @brief Verifies a proof for an ACIR circuit + * + * Note: The fact that the proof was computed originally by parsing an ACIR circuit is not of importance + * because this method uses the verification key to verify the proof. + * + * Communication: + * - proc_exit: A boolean value is returned indicating whether the proof is valid. + * an exit code of 0 will be returned for success and 1 for failure. + * + * @param proof_path Path to the file containing the serialized proof + * @param vk_path Path to the file containing the serialized verification key + * @return true If the proof is valid + * @return false If the proof is invalid + */ +template bool verify_honk(const std::string& proof_path, const std::string& vk_path) +{ + using VerificationKey = Flavor::VerificationKey; + using Verifier = UltraVerifier_; + using VerifierCommitmentKey = bb::VerifierCommitmentKey; + + auto g2_data = get_bn254_g2_data(CRS_PATH); + srs::init_crs_factory({}, g2_data); + auto proof = from_buffer>(read_file(proof_path)); + auto verification_key = std::make_shared(from_buffer(read_file(vk_path))); + verification_key->pcs_verification_key = std::make_shared(); + + Verifier verifier{ verification_key }; + + bool verified = verifier.verify_proof(proof); + + vinfo("verified: ", verified); + return verified; +} + +/** + * @brief Writes a verification key for an ACIR circuit to a file + * + * Communication: + * - stdout: The verification key is written to stdout as a byte array + * - Filesystem: The verification key is written to the path specified by outputPath + * + * @param bytecodePath Path to the file containing the serialized circuit + * @param outputPath Path to write the verification key to + */ +template void write_vk_honk(const std::string& bytecodePath, const std::string& outputPath) +{ + using Builder = Flavor::CircuitBuilder; + using ProverInstance = ProverInstance_; + using VerificationKey = Flavor::VerificationKey; + + auto constraint_system = get_constraint_system(bytecodePath); + auto builder = acir_format::create_circuit(constraint_system, 0, {}); + + const size_t additional_gates_buffer = 15; // conservatively large to be safe + size_t srs_size = builder.get_circuit_subgroup_size(builder.get_total_circuit_size() + additional_gates_buffer); + init_bn254_crs(srs_size); + + ProverInstance prover_inst(builder); + VerificationKey vk( + prover_inst.proving_key); // uses a partial form of the proving key which only has precomputed entities + + auto serialized_vk = to_buffer(vk); + if (outputPath == "-") { + writeRawBytesToStdout(serialized_vk); + vinfo("vk written to stdout"); + } else { + write_file(outputPath, serialized_vk); + vinfo("vk written to: ", outputPath); + } +} /** * @brief Creates a proof for an ACIR circuit, outputs the proof and verification key in binary and 'field' format * @@ -721,6 +831,22 @@ int main(int argc, char* argv[]) } else if (command == "avm_verify") { std::filesystem::path proof_path = get_option(args, "-p", "./proofs/avm_proof"); return avm_verify(proof_path) ? 0 : 1; + } else if (command == "prove_ultra_honk") { + std::string output_path = get_option(args, "-o", "./proofs/proof"); + prove_honk(bytecode_path, witness_path, output_path); + } else if (command == "verify_ultra_honk") { + return verify_honk(proof_path, vk_path) ? 0 : 1; + } else if (command == "write_vk_ultra_honk") { + std::string output_path = get_option(args, "-o", "./target/vk"); + write_vk_honk(bytecode_path, output_path); + } else if (command == "prove_goblin_ultra_honk") { + std::string output_path = get_option(args, "-o", "./proofs/proof"); + prove_honk(bytecode_path, witness_path, output_path); + } else if (command == "verify_goblin_ultra_honk") { + return verify_honk(proof_path, vk_path) ? 0 : 1; + } else if (command == "write_vk_goblin_ultra_honk") { + std::string output_path = get_option(args, "-o", "./target/vk"); + write_vk_honk(bytecode_path, output_path); } else { std::cerr << "Unknown command: " << command << "\n"; return 1; diff --git a/barretenberg/cpp/src/barretenberg/benchmark/ultra_bench/ultra_honk_rounds.bench.cpp b/barretenberg/cpp/src/barretenberg/benchmark/ultra_bench/ultra_honk_rounds.bench.cpp index 7c35497b1f52..79b53abb683c 100644 --- a/barretenberg/cpp/src/barretenberg/benchmark/ultra_bench/ultra_honk_rounds.bench.cpp +++ b/barretenberg/cpp/src/barretenberg/benchmark/ultra_bench/ultra_honk_rounds.bench.cpp @@ -55,7 +55,6 @@ BB_PROFILE static void test_round_inner(State& state, GoblinUltraProver& prover, // we need to get the relation_parameters and prover_polynomials from the oink_prover prover.instance->proving_key = std::move(oink_prover.proving_key); prover.instance->relation_parameters = oink_prover.relation_parameters; - prover.instance->prover_polynomials = GoblinUltraFlavor::ProverPolynomials(prover.instance->proving_key); time_if_index(RELATION_CHECK, [&] { prover.execute_relation_check_rounds(); }); time_if_index(ZEROMORPH, [&] { prover.execute_zeromorph_rounds(); }); } diff --git a/barretenberg/cpp/src/barretenberg/common/serialize.hpp b/barretenberg/cpp/src/barretenberg/common/serialize.hpp index 2489b3703b11..3f3cd3778e4b 100644 --- a/barretenberg/cpp/src/barretenberg/common/serialize.hpp +++ b/barretenberg/cpp/src/barretenberg/common/serialize.hpp @@ -371,7 +371,8 @@ template inline void read(B& it, std::optional& opt_ } template -concept HasGetAll = requires(T t) { t.get_all(); }; +concept HasGetAll = requires(T t) { t.get_all(); } && ! +msgpack_concepts::HasMsgPack; // Write out a struct that defines get_all() template inline void write(B& buf, T const& value) diff --git a/barretenberg/cpp/src/barretenberg/eccvm/eccvm_flavor.hpp b/barretenberg/cpp/src/barretenberg/eccvm/eccvm_flavor.hpp index c3c26a6ef701..e1828ca8fe4d 100644 --- a/barretenberg/cpp/src/barretenberg/eccvm/eccvm_flavor.hpp +++ b/barretenberg/cpp/src/barretenberg/eccvm/eccvm_flavor.hpp @@ -234,32 +234,32 @@ class ECCVMFlavor { static auto get_to_be_shifted(PrecomputedAndWitnessEntitiesSuperset& entities) { // NOTE: must match order of ShiftedEntities above! - return RefArray{ entities.transcript_mul, - entities.transcript_msm_count, - entities.transcript_accumulator_x, - entities.transcript_accumulator_y, - entities.precompute_scalar_sum, - entities.precompute_s1hi, - entities.precompute_dx, - entities.precompute_dy, - entities.precompute_tx, - entities.precompute_ty, - entities.msm_transition, - entities.msm_add, - entities.msm_double, - entities.msm_skew, - entities.msm_accumulator_x, - entities.msm_accumulator_y, - entities.msm_count, - entities.msm_round, - entities.msm_add1, - entities.msm_pc, - entities.precompute_pc, - entities.transcript_pc, - entities.precompute_round, - entities.transcript_accumulator_empty, - entities.precompute_select, - entities.z_perm }; + return RefArray{ entities.transcript_mul, // column 0 + entities.transcript_msm_count, // column 1 + entities.transcript_accumulator_x, // column 2 + entities.transcript_accumulator_y, // column 3 + entities.precompute_scalar_sum, // column 4 + entities.precompute_s1hi, // column 5 + entities.precompute_dx, // column 6 + entities.precompute_dy, // column 7 + entities.precompute_tx, // column 8 + entities.precompute_ty, // column 9 + entities.msm_transition, // column 10 + entities.msm_add, // column 11 + entities.msm_double, // column 12 + entities.msm_skew, // column 13 + entities.msm_accumulator_x, // column 14 + entities.msm_accumulator_y, // column 15 + entities.msm_count, // column 16 + entities.msm_round, // column 17 + entities.msm_add1, // column 18 + entities.msm_pc, // column 19 + entities.precompute_pc, // column 20 + entities.transcript_pc, // column 21 + entities.precompute_round, // column 22 + entities.transcript_accumulator_empty, // column 23 + entities.precompute_select, // column 24 + entities.z_perm }; // column 25 } /** * @brief A base class labelling all entities (for instance, all of the polynomials used by the prover during @@ -293,72 +293,10 @@ class ECCVMFlavor { auto get_to_be_shifted() { return ECCVMFlavor::get_to_be_shifted(*this); } auto get_shifted() { return ShiftedEntities::get_all(); }; + auto get_precomputed() { return PrecomputedEntities::get_all(); }; }; public: - /** - * @brief The proving key is responsible for storing the polynomials used by the prover. - * @note TODO(Cody): Maybe multiple inheritance is the right thing here. In that case, nothing should eve - * inherit from ProvingKey. - */ - class ProvingKey : public ProvingKey_, WitnessEntities, CommitmentKey> { - public: - // Expose constructors on the base class - using Base = ProvingKey_, WitnessEntities, CommitmentKey>; - using Base::Base; - - ProvingKey(const CircuitBuilder& builder) - : ProvingKey_, WitnessEntities, CommitmentKey>( - builder.get_circuit_subgroup_size(builder.get_num_gates()), 0) - { - const auto [_lagrange_first, _lagrange_last] = - compute_first_and_last_lagrange_polynomials(circuit_size); - lagrange_first = _lagrange_first; - lagrange_last = _lagrange_last; - { - Polynomial _lagrange_second(circuit_size); - _lagrange_second[1] = 1; - lagrange_second = _lagrange_second.share(); - } - } - - auto get_to_be_shifted() { return ECCVMFlavor::get_to_be_shifted(*this); } - // The plookup wires that store plookup read data. - RefArray get_table_column_wires() { return {}; }; - }; - - /** - * @brief The verification key is responsible for storing the the commitments to the precomputed (non-witnessk) - * polynomials used by the verifier. - * - * @note Note the discrepancy with what sort of data is stored here vs in the proving key. We may want to - * resolve that, and split out separate PrecomputedPolynomials/Commitments data for clarity but also for - * portability of our circuits. - */ - class VerificationKey : public VerificationKey_, VerifierCommitmentKey> { - public: - std::vector public_inputs; - - VerificationKey(const size_t circuit_size, const size_t num_public_inputs) - : VerificationKey_(circuit_size, num_public_inputs) - {} - - VerificationKey(const std::shared_ptr& proving_key) - : public_inputs(proving_key->public_inputs) - { - this->pcs_verification_key = std::make_shared(proving_key->circuit_size); - this->circuit_size = proving_key->circuit_size; - this->log_circuit_size = numeric::get_msb(this->circuit_size); - this->num_public_inputs = proving_key->num_public_inputs; - this->pub_inputs_offset = proving_key->pub_inputs_offset; - - for (auto [polynomial, commitment] : - zip_view(proving_key->get_precomputed_polynomials(), this->get_all())) { - commitment = proving_key->commitment_key->commit(polynomial); - } - } - }; - /** * @brief A container for polynomials produced after the first round of sumcheck. * @todo TODO(#394) Use polynomial classes for guaranteed memory alignment. @@ -432,6 +370,13 @@ class ECCVMFlavor { } return result; } + // Set all shifted polynomials based on their to-be-shifted counterpart + void set_shifted() + { + for (auto [shifted, to_be_shifted] : zip_view(get_shifted(), get_to_be_shifted())) { + shifted = to_be_shifted.shifted(); + } + } /** * @brief Compute the ECCVM flavor polynomial data required to generate an ECCVM Proof @@ -513,7 +458,7 @@ class ECCVMFlavor { table (reads come from msm_x/y3, msm_x/y4) * @return ProverPolynomials */ - ProverPolynomials(CircuitBuilder& builder) + ProverPolynomials(const CircuitBuilder& builder) { const auto msms = builder.get_msms(); const auto flattened_muls = builder.get_flattened_scalar_muls(msms); @@ -652,31 +597,57 @@ class ECCVMFlavor { msm_slice4[i] = msm_state[i].add_state[3].slice; } }); - transcript_mul_shift = transcript_mul.shifted(); - transcript_msm_count_shift = transcript_msm_count.shifted(); - transcript_accumulator_x_shift = transcript_accumulator_x.shifted(); - transcript_accumulator_y_shift = transcript_accumulator_y.shifted(); - precompute_scalar_sum_shift = precompute_scalar_sum.shifted(); - precompute_s1hi_shift = precompute_s1hi.shifted(); - precompute_dx_shift = precompute_dx.shifted(); - precompute_dy_shift = precompute_dy.shifted(); - precompute_tx_shift = precompute_tx.shifted(); - precompute_ty_shift = precompute_ty.shifted(); - msm_transition_shift = msm_transition.shifted(); - msm_add_shift = msm_add.shifted(); - msm_double_shift = msm_double.shifted(); - msm_skew_shift = msm_skew.shifted(); - msm_accumulator_x_shift = msm_accumulator_x.shifted(); - msm_accumulator_y_shift = msm_accumulator_y.shifted(); - msm_count_shift = msm_count.shifted(); - msm_round_shift = msm_round.shifted(); - msm_add1_shift = msm_add1.shifted(); - msm_pc_shift = msm_pc.shifted(); - precompute_pc_shift = precompute_pc.shifted(); - transcript_pc_shift = transcript_pc.shifted(); - precompute_round_shift = precompute_round.shifted(); - transcript_accumulator_empty_shift = transcript_accumulator_empty.shifted(); - precompute_select_shift = precompute_select.shifted(); + this->set_shifted(); + } + }; + + /** + * @brief The proving key is responsible for storing the polynomials used by the prover. + * + */ + class ProvingKey : public ProvingKey_ { + public: + // Expose constructors on the base class + using Base = ProvingKey_; + using Base::Base; + + ProverPolynomials polynomials; // storage for all polynomials evaluated by the prover + + ProvingKey(const CircuitBuilder& builder) + : Base(builder.get_circuit_subgroup_size(builder.get_num_gates()), 0) + , polynomials(builder) + {} + }; + + /** + * @brief The verification key is responsible for storing the the commitments to the precomputed (non-witnessk) + * polynomials used by the verifier. + * + * @note Note the discrepancy with what sort of data is stored here vs in the proving key. We may want to + * resolve that, and split out separate PrecomputedPolynomials/Commitments data for clarity but also for + * portability of our circuits. + */ + class VerificationKey : public VerificationKey_, VerifierCommitmentKey> { + public: + std::vector public_inputs; + + VerificationKey(const size_t circuit_size, const size_t num_public_inputs) + : VerificationKey_(circuit_size, num_public_inputs) + {} + + VerificationKey(const std::shared_ptr& proving_key) + : public_inputs(proving_key->public_inputs) + { + this->pcs_verification_key = std::make_shared(proving_key->circuit_size); + this->circuit_size = proving_key->circuit_size; + this->log_circuit_size = numeric::get_msb(this->circuit_size); + this->num_public_inputs = proving_key->num_public_inputs; + this->pub_inputs_offset = proving_key->pub_inputs_offset; + + for (auto [polynomial, commitment] : + zip_view(proving_key->polynomials.get_precomputed(), this->get_all())) { + commitment = proving_key->commitment_key->commit(polynomial); + } } }; diff --git a/barretenberg/cpp/src/barretenberg/eccvm/eccvm_prover.cpp b/barretenberg/cpp/src/barretenberg/eccvm/eccvm_prover.cpp index 0f5066d8e2be..ea860e1e4c71 100644 --- a/barretenberg/cpp/src/barretenberg/eccvm/eccvm_prover.cpp +++ b/barretenberg/cpp/src/barretenberg/eccvm/eccvm_prover.cpp @@ -14,7 +14,6 @@ namespace bb { ECCVMProver::ECCVMProver(CircuitBuilder& builder, const std::shared_ptr& transcript) : transcript(transcript) - , prover_polynomials(builder) { BB_OP_COUNT_TIME_NAME("ECCVMProver(CircuitBuilder&)"); @@ -24,13 +23,6 @@ ECCVMProver::ECCVMProver(CircuitBuilder& builder, const std::shared_ptr(builder); - // Share all unshifted polys from the prover polynomials to the proving key. Note: this means that updating a - // polynomial in one container automatically updates it in the other via the shared memory. - for (auto [prover_poly, key_poly] : zip_view(prover_polynomials.get_unshifted(), key->get_all())) { - ASSERT(flavor_get_label(prover_polynomials, prover_poly) == flavor_get_label(*key, key_poly)); - key_poly = prover_poly.share(); - } - commitment_key = std::make_shared(key->circuit_size); } @@ -51,7 +43,7 @@ void ECCVMProver::execute_preamble_round() */ void ECCVMProver::execute_wire_commitments_round() { - auto wire_polys = key->get_wires(); + auto wire_polys = key->polynomials.get_wires(); auto labels = commitment_labels.get_wires(); for (size_t idx = 0; idx < wire_polys.size(); ++idx) { transcript->send_to_verifier(labels[idx], commitment_key->commit(wire_polys[idx])); @@ -78,8 +70,9 @@ void ECCVMProver::execute_log_derivative_commitments_round() relation_parameters.eccvm_set_permutation_delta = relation_parameters.eccvm_set_permutation_delta.invert(); // Compute inverse polynomial for our logarithmic-derivative lookup method compute_logderivative_inverse( - prover_polynomials, relation_parameters, key->circuit_size); - transcript->send_to_verifier(commitment_labels.lookup_inverses, commitment_key->commit(key->lookup_inverses)); + key->polynomials, relation_parameters, key->circuit_size); + transcript->send_to_verifier(commitment_labels.lookup_inverses, + commitment_key->commit(key->polynomials.lookup_inverses)); } /** @@ -89,9 +82,9 @@ void ECCVMProver::execute_log_derivative_commitments_round() void ECCVMProver::execute_grand_product_computation_round() { // Compute permutation grand product and their commitments - compute_permutation_grand_products(key, prover_polynomials, relation_parameters); + compute_grand_products(key->polynomials, relation_parameters); - transcript->send_to_verifier(commitment_labels.z_perm, commitment_key->commit(key->z_perm)); + transcript->send_to_verifier(commitment_labels.z_perm, commitment_key->commit(key->polynomials.z_perm)); } /** @@ -108,7 +101,7 @@ void ECCVMProver::execute_relation_check_rounds() for (size_t idx = 0; idx < gate_challenges.size(); idx++) { gate_challenges[idx] = transcript->template get_challenge("Sumcheck:gate_challenge_" + std::to_string(idx)); } - sumcheck_output = sumcheck.prove(prover_polynomials, relation_parameters, alpha, gate_challenges); + sumcheck_output = sumcheck.prove(key->polynomials, relation_parameters, alpha, gate_challenges); } /** @@ -118,8 +111,8 @@ void ECCVMProver::execute_relation_check_rounds() * */ void ECCVMProver::execute_zeromorph_rounds() { - ZeroMorph::prove(prover_polynomials.get_unshifted(), - prover_polynomials.get_to_be_shifted(), + ZeroMorph::prove(key->polynomials.get_unshifted(), + key->polynomials.get_to_be_shifted(), sumcheck_output.claimed_evaluations.get_unshifted(), sumcheck_output.claimed_evaluations.get_shifted(), sumcheck_output.challenge, @@ -146,11 +139,11 @@ void ECCVMProver::execute_transcript_consistency_univariate_opening_round() // Get the challenge at which we evaluate the polynomials as univariates evaluation_challenge_x = transcript->template get_challenge("Translation:evaluation_challenge_x"); - translation_evaluations.op = key->transcript_op.evaluate(evaluation_challenge_x); - translation_evaluations.Px = key->transcript_Px.evaluate(evaluation_challenge_x); - translation_evaluations.Py = key->transcript_Py.evaluate(evaluation_challenge_x); - translation_evaluations.z1 = key->transcript_z1.evaluate(evaluation_challenge_x); - translation_evaluations.z2 = key->transcript_z2.evaluate(evaluation_challenge_x); + translation_evaluations.op = key->polynomials.transcript_op.evaluate(evaluation_challenge_x); + translation_evaluations.Px = key->polynomials.transcript_Px.evaluate(evaluation_challenge_x); + translation_evaluations.Py = key->polynomials.transcript_Py.evaluate(evaluation_challenge_x); + translation_evaluations.z1 = key->polynomials.transcript_z1.evaluate(evaluation_challenge_x); + translation_evaluations.z2 = key->polynomials.transcript_z2.evaluate(evaluation_challenge_x); // Add the univariate evaluations to the transcript transcript->send_to_verifier("Translation:op", translation_evaluations.op); @@ -164,8 +157,9 @@ void ECCVMProver::execute_transcript_consistency_univariate_opening_round() FF ipa_batching_challenge = transcript->template get_challenge("Translation:ipa_batching_challenge"); // Collect the polynomials and evaluations to be batched - RefArray univariate_polynomials{ key->transcript_op, key->transcript_Px, key->transcript_Py, - key->transcript_z1, key->transcript_z2, hack }; + RefArray univariate_polynomials{ key->polynomials.transcript_op, key->polynomials.transcript_Px, + key->polynomials.transcript_Py, key->polynomials.transcript_z1, + key->polynomials.transcript_z2, hack }; std::array univariate_evaluations; for (auto [eval, polynomial] : zip_view(univariate_evaluations, univariate_polynomials)) { eval = polynomial.evaluate(evaluation_challenge_x); diff --git a/barretenberg/cpp/src/barretenberg/eccvm/eccvm_prover.hpp b/barretenberg/cpp/src/barretenberg/eccvm/eccvm_prover.hpp index b91b6d851e20..a4f99d8cde0e 100644 --- a/barretenberg/cpp/src/barretenberg/eccvm/eccvm_prover.hpp +++ b/barretenberg/cpp/src/barretenberg/eccvm/eccvm_prover.hpp @@ -3,6 +3,7 @@ #include "barretenberg/eccvm/eccvm_flavor.hpp" #include "barretenberg/goblin/translation_evaluations.hpp" #include "barretenberg/honk/proof_system/types/proof.hpp" +#include "barretenberg/plonk_honk_shared/library/grand_product_library.hpp" #include "barretenberg/relations/relation_parameters.hpp" #include "barretenberg/sumcheck/sumcheck_output.hpp" #include "barretenberg/transcript/transcript.hpp" @@ -18,7 +19,6 @@ class ECCVMProver { using CommitmentKey = typename Flavor::CommitmentKey; using ProvingKey = typename Flavor::ProvingKey; using Polynomial = typename Flavor::Polynomial; - using ProverPolynomials = typename Flavor::ProverPolynomials; using CommitmentLabels = typename Flavor::CommitmentLabels; using Transcript = typename Flavor::Transcript; using TranslationEvaluations = bb::TranslationEvaluations; @@ -50,9 +50,6 @@ class ECCVMProver { std::shared_ptr key; - // Container for spans of all polynomials required by the prover (i.e. all multivariates evaluated by Sumcheck). - ProverPolynomials prover_polynomials; - CommitmentLabels commitment_labels; // Container for d + 1 Fold polynomials produced by Gemini diff --git a/barretenberg/cpp/src/barretenberg/eccvm/eccvm_trace_checker.cpp b/barretenberg/cpp/src/barretenberg/eccvm/eccvm_trace_checker.cpp index 349ec65c675c..c8acc2400bea 100644 --- a/barretenberg/cpp/src/barretenberg/eccvm/eccvm_trace_checker.cpp +++ b/barretenberg/cpp/src/barretenberg/eccvm/eccvm_trace_checker.cpp @@ -1,5 +1,6 @@ #include "eccvm_trace_checker.hpp" #include "barretenberg/eccvm/eccvm_flavor.hpp" +#include "barretenberg/plonk_honk_shared/library/grand_product_library.hpp" using namespace bb; @@ -31,7 +32,7 @@ bool ECCVMTraceChecker::check(Builder& builder) ProverPolynomials polynomials(builder); const size_t num_rows = polynomials.get_polynomial_size(); compute_logderivative_inverse>(polynomials, params, num_rows); - compute_permutation_grand_product>(num_rows, polynomials, params); + compute_grand_product>(polynomials, params); polynomials.z_perm_shift = Polynomial(polynomials.z_perm.shifted()); diff --git a/barretenberg/cpp/src/barretenberg/execution_trace/execution_trace.cpp b/barretenberg/cpp/src/barretenberg/execution_trace/execution_trace.cpp index b232fa00799e..d582089921ec 100644 --- a/barretenberg/cpp/src/barretenberg/execution_trace/execution_trace.cpp +++ b/barretenberg/cpp/src/barretenberg/execution_trace/execution_trace.cpp @@ -6,10 +6,10 @@ namespace bb { template -void ExecutionTrace_::populate(Builder& builder, typename Flavor::ProvingKey& proving_key) +void ExecutionTrace_::populate(Builder& builder, typename Flavor::ProvingKey& proving_key, bool is_structured) { // Construct wire polynomials, selector polynomials, and copy cycles from raw circuit data - auto trace_data = construct_trace_data(builder, proving_key.circuit_size); + auto trace_data = construct_trace_data(builder, proving_key.circuit_size, is_structured); add_wires_and_selectors_to_proving_key(trace_data, builder, proving_key); @@ -31,10 +31,12 @@ void ExecutionTrace_::add_wires_and_selectors_to_proving_key(TraceData& typename Flavor::ProvingKey& proving_key) { if constexpr (IsHonkFlavor) { - for (auto [pkey_wire, trace_wire] : zip_view(proving_key.get_wires(), trace_data.wires)) { + for (auto [pkey_wire, trace_wire] : zip_view(proving_key.polynomials.get_wires(), trace_data.wires)) { pkey_wire = trace_wire.share(); } - for (auto [pkey_selector, trace_selector] : zip_view(proving_key.get_selectors(), trace_data.selectors)) { + proving_key.polynomials.set_shifted(); // Ensure shifted wires are set correctly + for (auto [pkey_selector, trace_selector] : + zip_view(proving_key.polynomials.get_selectors(), trace_data.selectors)) { pkey_selector = trace_selector.share(); } proving_key.pub_inputs_offset = trace_data.pub_inputs_offset; @@ -69,7 +71,8 @@ void ExecutionTrace_::add_memory_records_to_proving_key(TraceData& trace template typename ExecutionTrace_::TraceData ExecutionTrace_::construct_trace_data(Builder& builder, - size_t dyadic_circuit_size) + size_t dyadic_circuit_size, + bool is_structured) { TraceData trace_data{ dyadic_circuit_size, builder }; @@ -113,7 +116,12 @@ typename ExecutionTrace_::TraceData ExecutionTrace_::construct_t trace_data.pub_inputs_offset = offset; } - offset += block_size; + // If the trace is structured, we populate the data from the next block at a fixed block size offset + if (is_structured) { + offset += builder.FIXED_BLOCK_SIZE; + } else { // otherwise, the next block starts immediately following the previous one + offset += block_size; + } } return trace_data; } @@ -140,28 +148,17 @@ void ExecutionTrace_::add_ecc_op_wires_to_proving_key(Builder& builder, typename Flavor::ProvingKey& proving_key) requires IsGoblinFlavor { - // Initialize the ecc op wire polynomials to zero on the whole domain - std::array op_wire_polynomials; - for (auto& poly : op_wire_polynomials) { - poly = Polynomial{ proving_key.circuit_size }; - } - Polynomial ecc_op_selector{ proving_key.circuit_size }; - // Copy the ecc op data from the conventional wires into the op wires over the range of ecc op gates + auto& ecc_op_selector = proving_key.polynomials.lagrange_ecc_op; const size_t op_wire_offset = Flavor::has_zero_row ? 1 : 0; - for (auto [ecc_op_wire, wire] : zip_view(op_wire_polynomials, proving_key.get_wires())) { + for (auto [ecc_op_wire, wire] : + zip_view(proving_key.polynomials.get_ecc_op_wires(), proving_key.polynomials.get_wires())) { for (size_t i = 0; i < builder.blocks.ecc_op.size(); ++i) { size_t idx = i + op_wire_offset; ecc_op_wire[idx] = wire[idx]; - ecc_op_selector[idx] = 1; // construct the selector as the indicator on the ecc op block + ecc_op_selector[idx] = 1; // construct selector as the indicator on the ecc op block } } - - proving_key.ecc_op_wire_1 = op_wire_polynomials[0].share(); - proving_key.ecc_op_wire_2 = op_wire_polynomials[1].share(); - proving_key.ecc_op_wire_3 = op_wire_polynomials[2].share(); - proving_key.ecc_op_wire_4 = op_wire_polynomials[3].share(); - proving_key.lagrange_ecc_op = ecc_op_selector.share(); } template class ExecutionTrace_; diff --git a/barretenberg/cpp/src/barretenberg/execution_trace/execution_trace.hpp b/barretenberg/cpp/src/barretenberg/execution_trace/execution_trace.hpp index 3dd3899242e5..ddbc4babffee 100644 --- a/barretenberg/cpp/src/barretenberg/execution_trace/execution_trace.hpp +++ b/barretenberg/cpp/src/barretenberg/execution_trace/execution_trace.hpp @@ -39,10 +39,16 @@ template class ExecutionTrace_ { /** * @brief Given a circuit, populate a proving key with wire polys, selector polys, and sigma/id polys + * @note By default, this method constructs an exectution trace that is sorted by gate type. Optionally, it + * constructs a trace that is both sorted and "structured" in the sense that each block/gate-type has a fixed amount + * of space within the wire polynomials, regardless of how many actual constraints of each type exist. This is + * useful primarily for folding since it guarantees that the set of relations that must be executed at each row is + * consistent across all instances. * * @param builder + * @param is_structured whether or not the trace is to be structured with a fixed block size */ - static void populate(Builder& builder, ProvingKey&); + static void populate(Builder& builder, ProvingKey&, bool is_structured = false); private: /** @@ -78,9 +84,10 @@ template class ExecutionTrace_ { * * @param builder * @param dyadic_circuit_size + * @param is_structured whether or not the trace is to be structured with a fixed block size * @return TraceData */ - static TraceData construct_trace_data(Builder& builder, size_t dyadic_circuit_size); + static TraceData construct_trace_data(Builder& builder, size_t dyadic_circuit_size, bool is_structured = false); /** * @brief Populate the public inputs block diff --git a/barretenberg/cpp/src/barretenberg/flavor/flavor.hpp b/barretenberg/cpp/src/barretenberg/flavor/flavor.hpp index d2a847a0dbcc..18eda8ecd0cf 100644 --- a/barretenberg/cpp/src/barretenberg/flavor/flavor.hpp +++ b/barretenberg/cpp/src/barretenberg/flavor/flavor.hpp @@ -98,8 +98,35 @@ class PrecomputedEntitiesBase { * @tparam FF The scalar field on which we will encode our polynomial data. When instantiating, this may be extractable * from the other template paramter. */ +template class ProvingKey_ { + public: + size_t circuit_size; + bool contains_recursive_proof; + std::vector recursive_proof_public_input_indices; + bb::EvaluationDomain evaluation_domain; + std::shared_ptr commitment_key; + size_t num_public_inputs; + size_t log_circuit_size; + + // Offset off the public inputs from the start of the execution trace + size_t pub_inputs_offset = 0; + + // The number of public inputs has to be the same for all instances because they are + // folded element by element. + std::vector public_inputs; + + ProvingKey_() = default; + ProvingKey_(const size_t circuit_size, const size_t num_public_inputs) + { + this->commitment_key = std::make_shared(circuit_size + 1); + this->evaluation_domain = bb::EvaluationDomain(circuit_size, circuit_size); + this->circuit_size = circuit_size; + this->log_circuit_size = numeric::get_msb(circuit_size); + this->num_public_inputs = num_public_inputs; + }; +}; template -class ProvingKey_ : public PrecomputedPolynomials, public WitnessPolynomials { +class ProvingKeyAvm_ : public PrecomputedPolynomials, public WitnessPolynomials { public: using Polynomial = typename PrecomputedPolynomials::DataType; using FF = typename Polynomial::FF; @@ -110,9 +137,7 @@ class ProvingKey_ : public PrecomputedPolynomials, public WitnessPolynomials { bb::EvaluationDomain evaluation_domain; std::shared_ptr commitment_key; - // offset due to placing zero wires at the start of execution trace - // non-zero for Instances constructed from circuits, this concept doesn't exist for accumulated - // instances + // Offset off the public inputs from the start of the execution trace size_t pub_inputs_offset = 0; // The number of public inputs has to be the same for all instances because they are @@ -128,8 +153,8 @@ class ProvingKey_ : public PrecomputedPolynomials, public WitnessPolynomials { auto get_witness_polynomials() { return WitnessPolynomials::get_all(); } auto get_precomputed_polynomials() { return PrecomputedPolynomials::get_all(); } auto get_selectors() { return PrecomputedPolynomials::get_selectors(); } - ProvingKey_() = default; - ProvingKey_(const size_t circuit_size, const size_t num_public_inputs) + ProvingKeyAvm_() = default; + ProvingKeyAvm_(const size_t circuit_size, const size_t num_public_inputs) { this->commitment_key = std::make_shared(circuit_size + 1); this->evaluation_domain = bb::EvaluationDomain(circuit_size, circuit_size); diff --git a/barretenberg/cpp/src/barretenberg/flavor/flavor.test.cpp b/barretenberg/cpp/src/barretenberg/flavor/flavor.test.cpp index 889f4f9504cf..06d24472a9a3 100644 --- a/barretenberg/cpp/src/barretenberg/flavor/flavor.test.cpp +++ b/barretenberg/cpp/src/barretenberg/flavor/flavor.test.cpp @@ -17,7 +17,7 @@ TEST(Flavor, Getters) // set size_t coset_idx = 0; - for (auto& id_poly : proving_key.get_id_polynomials()) { + for (auto& id_poly : proving_key.polynomials.get_ids()) { typename Flavor::Polynomial new_poly(proving_key.circuit_size); for (size_t i = 0; i < proving_key.circuit_size; ++i) { id_poly[i] = coset_idx * proving_key.circuit_size + i; @@ -26,9 +26,9 @@ TEST(Flavor, Getters) } // Polynomials in the proving key can be set through loops over subsets produced by the getters - EXPECT_EQ(proving_key.id_1[0], FF(0)); - EXPECT_EQ(proving_key.id_2[0], FF(4)); - EXPECT_EQ(proving_key.id_3[0], FF(8)); + EXPECT_EQ(proving_key.polynomials.id_1[0], FF(0)); + EXPECT_EQ(proving_key.polynomials.id_2[0], FF(4)); + EXPECT_EQ(proving_key.polynomials.id_3[0], FF(8)); Flavor::ProverPolynomials prover_polynomials; Flavor::CommitmentLabels commitment_labels; diff --git a/barretenberg/cpp/src/barretenberg/flavor/relation_definitions.hpp b/barretenberg/cpp/src/barretenberg/flavor/relation_definitions.hpp index 203542d91275..0b1fed6342ee 100644 --- a/barretenberg/cpp/src/barretenberg/flavor/relation_definitions.hpp +++ b/barretenberg/cpp/src/barretenberg/flavor/relation_definitions.hpp @@ -31,9 +31,9 @@ #define SUMCHECK_PERMUTATION_CLASS(...) _SUMCHECK_PERMUTATION_CLASS(__VA_ARGS__) #define DEFINE_SUMCHECK_PERMUTATION_CLASS(RelationImpl, Flavor) \ - PERMUTATION_METHOD(compute_permutation_numerator, RelationImpl, Flavor, UnivariateAccumulator0, ExtendedEdge) \ - PERMUTATION_METHOD(compute_permutation_numerator, RelationImpl, Flavor, ValueAccumulator0, EvaluationEdge) \ - PERMUTATION_METHOD(compute_permutation_numerator, RelationImpl, Flavor, ValueAccumulator0, EntityEdge) \ - PERMUTATION_METHOD(compute_permutation_denominator, RelationImpl, Flavor, UnivariateAccumulator0, ExtendedEdge) \ - PERMUTATION_METHOD(compute_permutation_denominator, RelationImpl, Flavor, ValueAccumulator0, EvaluationEdge) \ - PERMUTATION_METHOD(compute_permutation_denominator, RelationImpl, Flavor, ValueAccumulator0, EntityEdge) + PERMUTATION_METHOD(compute_grand_product_numerator, RelationImpl, Flavor, UnivariateAccumulator0, ExtendedEdge) \ + PERMUTATION_METHOD(compute_grand_product_numerator, RelationImpl, Flavor, ValueAccumulator0, EvaluationEdge) \ + PERMUTATION_METHOD(compute_grand_product_numerator, RelationImpl, Flavor, ValueAccumulator0, EntityEdge) \ + PERMUTATION_METHOD(compute_grand_product_denominator, RelationImpl, Flavor, UnivariateAccumulator0, ExtendedEdge) \ + PERMUTATION_METHOD(compute_grand_product_denominator, RelationImpl, Flavor, ValueAccumulator0, EvaluationEdge) \ + PERMUTATION_METHOD(compute_grand_product_denominator, RelationImpl, Flavor, ValueAccumulator0, EntityEdge) diff --git a/barretenberg/cpp/src/barretenberg/goblin/mock_circuits.hpp b/barretenberg/cpp/src/barretenberg/goblin/mock_circuits.hpp index cb87096063ee..e6457060f6f2 100644 --- a/barretenberg/cpp/src/barretenberg/goblin/mock_circuits.hpp +++ b/barretenberg/cpp/src/barretenberg/goblin/mock_circuits.hpp @@ -116,7 +116,7 @@ class GoblinMockCircuits { * * @param builder */ - static void construct_simple_circuit(GoblinUltraBuilder& builder) + static void add_some_ecc_op_gates(GoblinUltraBuilder& builder) { // Add some arbitrary ecc op gates for (size_t i = 0; i < 3; ++i) { @@ -127,7 +127,16 @@ class GoblinMockCircuits { } // queues the result of the preceding ECC builder.queue_ecc_eq(); // should be eq and reset + } + /** + * @brief Generate a simple test circuit with some ECC op gates and conventional arithmetic gates + * + * @param builder + */ + static void construct_simple_circuit(GoblinUltraBuilder& builder) + { + add_some_ecc_op_gates(builder); MockCircuits::construct_arithmetic_circuit(builder); } diff --git a/barretenberg/cpp/src/barretenberg/honk/proof_system/permutation_library.hpp b/barretenberg/cpp/src/barretenberg/honk/proof_system/permutation_library.hpp index a061412d85d4..8ecb9e376ce4 100644 --- a/barretenberg/cpp/src/barretenberg/honk/proof_system/permutation_library.hpp +++ b/barretenberg/cpp/src/barretenberg/honk/proof_system/permutation_library.hpp @@ -7,166 +7,6 @@ namespace bb { -/** - * @brief Compute a permutation grand product polynomial Z_perm(X) - * * - * @details - * Z_perm may be defined in terms of its values on X_i = 0,1,...,n-1 as Z_perm[0] = 1 and for i = 1:n-1 - * relation::numerator(j) - * Z_perm[i] = ∏ -------------------------------------------------------------------------------- - * relation::denominator(j) - * - * where ∏ := ∏_{j=0:i-1} - * - * The specific algebraic relation used by Z_perm is defined by Flavor::GrandProductRelations - * - * For example, in Flavor::Standard the relation describes: - * - * (w_1(j) + β⋅id_1(j) + γ) ⋅ (w_2(j) + β⋅id_2(j) + γ) ⋅ (w_3(j) + β⋅id_3(j) + γ) - * Z_perm[i] = ∏ -------------------------------------------------------------------------------- - * (w_1(j) + β⋅σ_1(j) + γ) ⋅ (w_2(j) + β⋅σ_2(j) + γ) ⋅ (w_3(j) + β⋅σ_3(j) + γ) - * where ∏ := ∏_{j=0:i-1} and id_i(X) = id(X) + n*(i-1) - * - * For Flavor::Ultra both the UltraPermutation and Lookup grand products are computed by this method. - * - * The grand product is constructed over the course of three steps. - * - * For expositional simplicity, write Z_perm[i] as - * - * A(j) - * Z_perm[i] = ∏ -------------------------- - * B(h) - * - * Step 1) Compute 2 length-n polynomials A, B - * Step 2) Compute 2 length-n polynomials numerator = ∏ A(j), nenominator = ∏ B(j) - * Step 3) Compute Z_perm[i + 1] = numerator[i] / denominator[i] (recall: Z_perm[0] = 1) - * - * Note: Step (3) utilizes Montgomery batch inversion to replace n-many inversions with - */ -template -void compute_permutation_grand_product(const size_t circuit_size, - auto& full_polynomials, - bb::RelationParameters& relation_parameters) -{ - using FF = typename Flavor::FF; - using Polynomial = typename Flavor::Polynomial; - using Accumulator = std::tuple_element_t<0, typename GrandProdRelation::SumcheckArrayOfValuesOverSubrelations>; - - // Allocate numerator/denominator polynomials that will serve as scratch space - // TODO(zac) we can re-use the permutation polynomial as the numerator polynomial. - // Reduces readability (issue #2215) - Polynomial numerator = Polynomial{ circuit_size }; - Polynomial denominator = Polynomial{ circuit_size }; - - // Step (1) - // Populate `numerator` and `denominator` with the algebra described by GrandProdRelation - static constexpr size_t MIN_CIRCUIT_SIZE_TO_MULTITHREAD = 64; - const size_t num_threads = circuit_size >= MIN_CIRCUIT_SIZE_TO_MULTITHREAD - ? (circuit_size >= get_num_cpus_pow2() ? get_num_cpus_pow2() : 1) - : 1; - const size_t block_size = circuit_size / num_threads; - parallel_for(num_threads, [&](size_t thread_idx) { - const size_t start = thread_idx * block_size; - const size_t end = (thread_idx + 1) * block_size; - for (size_t i = start; i < end; ++i) { - - typename Flavor::AllValues evaluations; - for (auto [eval, poly] : zip_view(evaluations.get_all(), full_polynomials.get_all())) { - eval = poly.size() > i ? poly[i] : 0; - } - numerator[i] = GrandProdRelation::template compute_permutation_numerator(evaluations, - relation_parameters); - denominator[i] = GrandProdRelation::template compute_permutation_denominator( - evaluations, relation_parameters); - } - }); - - // Step (2) - // Compute the accumulating product of the numerator and denominator terms. - // This step is split into three parts for efficient multithreading: - // (i) compute ∏ A(j), ∏ B(j) subproducts for each thread - // (ii) compute scaling factor required to convert each subproduct into a single running product - // (ii) combine subproducts into a single running product - // - // For example, consider 4 threads and a size-8 numerator { a0, a1, a2, a3, a4, a5, a6, a7 } - // (i) Each thread computes 1 element of N = {{ a0, a0a1 }, { a2, a2a3 }, { a4, a4a5 }, { a6, a6a7 }} - // (ii) Take partial products P = { 1, a0a1, a2a3, a4a5 } - // (iii) Each thread j computes N[i][j]*P[j]= - // {{a0,a0a1},{a0a1a2,a0a1a2a3},{a0a1a2a3a4,a0a1a2a3a4a5},{a0a1a2a3a4a5a6,a0a1a2a3a4a5a6a7}} - std::vector partial_numerators(num_threads); - std::vector partial_denominators(num_threads); - - parallel_for(num_threads, [&](size_t thread_idx) { - const size_t start = thread_idx * block_size; - const size_t end = (thread_idx + 1) * block_size; - for (size_t i = start; i < end - 1; ++i) { - numerator[i + 1] *= numerator[i]; - denominator[i + 1] *= denominator[i]; - } - partial_numerators[thread_idx] = numerator[end - 1]; - partial_denominators[thread_idx] = denominator[end - 1]; - }); - - parallel_for(num_threads, [&](size_t thread_idx) { - const size_t start = thread_idx * block_size; - const size_t end = (thread_idx + 1) * block_size; - if (thread_idx > 0) { - FF numerator_scaling = 1; - FF denominator_scaling = 1; - - for (size_t j = 0; j < thread_idx; ++j) { - numerator_scaling *= partial_numerators[j]; - denominator_scaling *= partial_denominators[j]; - } - for (size_t i = start; i < end; ++i) { - numerator[i] *= numerator_scaling; - denominator[i] *= denominator_scaling; - } - } - - // Final step: invert denominator - FF::batch_invert(std::span{ &denominator[start], block_size }); - }); - - // Step (3) Compute z_perm[i] = numerator[i] / denominator[i] - auto& grand_product_polynomial = GrandProdRelation::get_grand_product_polynomial(full_polynomials); - grand_product_polynomial[0] = 0; - parallel_for(num_threads, [&](size_t thread_idx) { - const size_t start = thread_idx * block_size; - const size_t end = (thread_idx == num_threads - 1) ? circuit_size - 1 : (thread_idx + 1) * block_size; - for (size_t i = start; i < end; ++i) { - grand_product_polynomial[i + 1] = numerator[i] * denominator[i]; - } - }); -} - -template -void compute_permutation_grand_products(std::shared_ptr& key, - typename Flavor::ProverPolynomials& full_polynomials, - bb::RelationParameters& relation_parameters) -{ - using GrandProductRelations = typename Flavor::GrandProductRelations; - using FF = typename Flavor::FF; - - constexpr size_t NUM_RELATIONS = std::tuple_size{}; - bb::constexpr_for<0, NUM_RELATIONS, 1>([&]() { - using PermutationRelation = typename std::tuple_element::type; - - // Assign the grand product polynomial to the relevant std::span member of `full_polynomials` (and its shift) - // For example, for UltraPermutationRelation, this will be `full_polynomials.z_perm` - // For example, for LookupRelation, this will be `full_polynomials.z_lookup` - bb::Polynomial& full_polynomial = PermutationRelation::get_grand_product_polynomial(full_polynomials); - bb::Polynomial& key_polynomial = PermutationRelation::get_grand_product_polynomial(*key); - full_polynomial = key_polynomial.share(); - - compute_permutation_grand_product( - key->circuit_size, full_polynomials, relation_parameters); - bb::Polynomial& full_polynomial_shift = - PermutationRelation::get_shifted_grand_product_polynomial(full_polynomials); - full_polynomial_shift = key_polynomial.shifted(); - }); -} - /** * @brief Compute new polynomials which are the concatenated versions of other polynomials * @@ -181,16 +21,15 @@ void compute_permutation_grand_products(std::shared_ptr void compute_concatenated_polynomials(StorageHandle* proving_key) +template void compute_concatenated_polynomials(typename Flavor::ProverPolynomials& polynomials) { // Concatenation groups are vectors of polynomials that are concatenated together - auto concatenation_groups = proving_key->get_concatenation_groups(); + auto concatenation_groups = polynomials.get_concatenation_groups(); // Resulting concatenated polynomials - auto targets = proving_key->get_concatenated_constraints(); + auto targets = polynomials.get_concatenated_constraints(); // Targets have to be full-sized polynomials. We can compute the mini circuit size from them by dividing by // concatenation index @@ -236,11 +75,10 @@ template void compute_concatenated_pol * can construct a proof when ( k + 1 ) ⋅ ( max_range/ 3 + 1 ) < concatenated size * * @tparam Flavor - * @tparam StorageHandle * @param proving_key */ -template -void compute_goblin_translator_range_constraint_ordered_polynomials(StorageHandle* proving_key, +template +void compute_goblin_translator_range_constraint_ordered_polynomials(typename Flavor::ProverPolynomials& polynomials, size_t mini_circuit_dyadic_size) { @@ -271,14 +109,14 @@ void compute_goblin_translator_range_constraint_ordered_polynomials(StorageHandl } std::vector> ordered_vectors_uint(num_concatenated_wires); - auto ordered_constraint_polynomials = std::vector{ &proving_key->ordered_range_constraints_0, - &proving_key->ordered_range_constraints_1, - &proving_key->ordered_range_constraints_2, - &proving_key->ordered_range_constraints_3 }; + auto ordered_constraint_polynomials = std::vector{ &polynomials.ordered_range_constraints_0, + &polynomials.ordered_range_constraints_1, + &polynomials.ordered_range_constraints_2, + &polynomials.ordered_range_constraints_3 }; std::vector extra_denominator_uint(full_circuit_size); // Get information which polynomials need to be concatenated - auto concatenation_groups = proving_key->get_concatenation_groups(); + auto concatenation_groups = polynomials.get_concatenation_groups(); // A function that transfers elements from each of the polynomials in the chosen concatenation group in the uint // ordered polynomials @@ -352,7 +190,7 @@ void compute_goblin_translator_range_constraint_ordered_polynomials(StorageHandl // And copy it to the actual polynomial std::transform(extra_denominator_uint.cbegin(), extra_denominator_uint.cend(), - proving_key->ordered_range_constraints_4.begin(), + polynomials.ordered_range_constraints_4.begin(), [](uint32_t in) { return FF(in); }); } diff --git a/barretenberg/cpp/src/barretenberg/plonk/composer/ultra_composer.cpp b/barretenberg/cpp/src/barretenberg/plonk/composer/ultra_composer.cpp index bc856916cb83..d72ec79e4ee3 100644 --- a/barretenberg/cpp/src/barretenberg/plonk/composer/ultra_composer.cpp +++ b/barretenberg/cpp/src/barretenberg/plonk/composer/ultra_composer.cpp @@ -242,7 +242,18 @@ void UltraComposer::add_table_column_selector_poly_to_proving_key(polynomial& se void UltraComposer::construct_table_polynomials(CircuitBuilder& circuit, size_t subgroup_size) { size_t additional_offset = s_randomness + 1; - auto table_polynomials = construct_lookup_table_polynomials(circuit, subgroup_size, additional_offset); + + using Polynomial = typename Flavor::Polynomial; + std::array table_polynomials; + for (auto& poly : table_polynomials) { + poly = Polynomial(subgroup_size); + } + + construct_lookup_table_polynomials( + { table_polynomials[0], table_polynomials[1], table_polynomials[2], table_polynomials[3] }, + circuit, + subgroup_size, + additional_offset); // // In the case of using UltraPlonkComposer for a circuit which does _not_ make use of any lookup tables, all four // // table columns would be all zeros. This would result in these polys' commitments all being the point at diff --git a/barretenberg/cpp/src/barretenberg/plonk_honk_shared/arithmetization/arithmetization.hpp b/barretenberg/cpp/src/barretenberg/plonk_honk_shared/arithmetization/arithmetization.hpp index 21490b283cf2..c864af55e7c9 100644 --- a/barretenberg/cpp/src/barretenberg/plonk_honk_shared/arithmetization/arithmetization.hpp +++ b/barretenberg/cpp/src/barretenberg/plonk_honk_shared/arithmetization/arithmetization.hpp @@ -119,6 +119,7 @@ template class UltraArith { public: static constexpr size_t NUM_WIRES = 4; static constexpr size_t NUM_SELECTORS = 11; + static constexpr size_t FIXED_BLOCK_SIZE = 1 << 10; // Size of each block in a structured trace (arbitrary for now) using FF = FF_; class UltraTraceBlock : public ExecutionTraceBlock { @@ -165,7 +166,7 @@ template class UltraArith { auto get() { return RefArray{ pub_inputs, arithmetic, delta_range, elliptic, aux, lookup }; } - void summarize() + void summarize() const { info("Gate blocks summary:"); info("pub inputs:\t", pub_inputs.size()); @@ -196,6 +197,8 @@ template class UltraHonkArith { public: static constexpr size_t NUM_WIRES = 4; static constexpr size_t NUM_SELECTORS = 14; + static constexpr size_t FIXED_BLOCK_SIZE = 1 << 10; // Size of each block in a structured trace (arbitrary for now) + using FF = FF_; class UltraHonkTraceBlock : public ExecutionTraceBlock { @@ -279,7 +282,7 @@ template class UltraHonkArith { aux, lookup, busread, poseidon_external, poseidon_internal }; } - void summarize() + void summarize() const { info("Gate blocks summary:"); info("goblin ecc op:\t", ecc_op.size()); diff --git a/barretenberg/cpp/src/barretenberg/plonk_honk_shared/composer/composer_lib.hpp b/barretenberg/cpp/src/barretenberg/plonk_honk_shared/composer/composer_lib.hpp index b4ab7818f40e..06a901d7425f 100644 --- a/barretenberg/cpp/src/barretenberg/plonk_honk_shared/composer/composer_lib.hpp +++ b/barretenberg/cpp/src/barretenberg/plonk_honk_shared/composer/composer_lib.hpp @@ -1,4 +1,5 @@ #pragma once +#include "barretenberg/common/ref_array.hpp" #include "barretenberg/flavor/flavor.hpp" #include "barretenberg/polynomials/polynomial_store.hpp" @@ -7,15 +8,11 @@ namespace bb { template -std::array construct_lookup_table_polynomials( - const typename Flavor::CircuitBuilder& circuit, size_t dyadic_circuit_size, size_t additional_offset = 0) +void construct_lookup_table_polynomials(RefArray table_polynomials, + const typename Flavor::CircuitBuilder& circuit, + size_t dyadic_circuit_size, + size_t additional_offset = 0) { - using Polynomial = typename Flavor::Polynomial; - std::array table_polynomials; - for (auto& poly : table_polynomials) { - poly = Polynomial(dyadic_circuit_size); - } - // Create lookup selector polynomials which interpolate each table column. // Our selector polys always need to interpolate the full subgroup size, so here we offset so as to // put the table column's values at the end. (The first gates are for non-lookup constraints). @@ -23,6 +20,7 @@ std::array construct_lookup_table_polynomials( // ^^^^^^^^^ ^^^^^^^^ ^^^^^^^ ^nonzero to ensure uniqueness and to avoid infinity commitments // | table randomness // ignored, as used for regular constraints and padding to the next power of 2. + ASSERT(dyadic_circuit_size > circuit.get_tables_size() + additional_offset); size_t offset = dyadic_circuit_size - circuit.get_tables_size() - additional_offset; for (const auto& table : circuit.lookup_tables) { @@ -36,7 +34,6 @@ std::array construct_lookup_table_polynomials( ++offset; } } - return table_polynomials; } /** diff --git a/barretenberg/cpp/src/barretenberg/plonk_honk_shared/composer/permutation_lib.hpp b/barretenberg/cpp/src/barretenberg/plonk_honk_shared/composer/permutation_lib.hpp index 04629363042a..52bd04ec97df 100644 --- a/barretenberg/cpp/src/barretenberg/plonk_honk_shared/composer/permutation_lib.hpp +++ b/barretenberg/cpp/src/barretenberg/plonk_honk_shared/composer/permutation_lib.hpp @@ -180,7 +180,7 @@ PermutationMapping compute_permutation_mapping( template void compute_honk_style_permutation_lagrange_polynomials_from_mapping( const RefSpan& permutation_polynomials, // sigma or ID poly - std::array, Flavor::NUM_WIRES>& permutation_mappings, + const std::array, Flavor::NUM_WIRES>& permutation_mappings, typename Flavor::ProvingKey* proving_key) { using FF = typename Flavor::FF; @@ -384,9 +384,9 @@ void compute_permutation_argument_polynomials(const typename Flavor::CircuitBuil } else if constexpr (IsUltraFlavor) { // any UltraHonk flavor // Compute Honk-style sigma and ID polynomials from the corresponding mappings compute_honk_style_permutation_lagrange_polynomials_from_mapping( - key->get_sigma_polynomials(), mapping.sigmas, key); + key->polynomials.get_sigmas(), mapping.sigmas, key); compute_honk_style_permutation_lagrange_polynomials_from_mapping( - key->get_id_polynomials(), mapping.ids, key); + key->polynomials.get_ids(), mapping.ids, key); } } diff --git a/barretenberg/cpp/src/barretenberg/plonk_honk_shared/library/grand_product_library.hpp b/barretenberg/cpp/src/barretenberg/plonk_honk_shared/library/grand_product_library.hpp index 78899bc1a226..43d01587541d 100644 --- a/barretenberg/cpp/src/barretenberg/plonk_honk_shared/library/grand_product_library.hpp +++ b/barretenberg/cpp/src/barretenberg/plonk_honk_shared/library/grand_product_library.hpp @@ -48,8 +48,7 @@ namespace bb { * Note: Step (3) utilizes Montgomery batch inversion to replace n-many inversions with */ template -void compute_grand_product(const size_t circuit_size, - typename Flavor::ProverPolynomials& full_polynomials, +void compute_grand_product(typename Flavor::ProverPolynomials& full_polynomials, bb::RelationParameters& relation_parameters) { using FF = typename Flavor::FF; @@ -58,6 +57,7 @@ void compute_grand_product(const size_t circuit_size, // Allocate numerator/denominator polynomials that will serve as scratch space // TODO(zac) we can re-use the permutation polynomial as the numerator polynomial. Reduces readability + size_t circuit_size = full_polynomials.get_polynomial_size(); Polynomial numerator{ circuit_size }; Polynomial denominator{ circuit_size }; @@ -65,16 +65,14 @@ void compute_grand_product(const size_t circuit_size, // Populate `numerator` and `denominator` with the algebra described by Relation const size_t num_threads = circuit_size >= get_num_cpus_pow2() ? get_num_cpus_pow2() : 1; const size_t block_size = circuit_size / num_threads; - auto full_polynomials_view = full_polynomials.get_all(); parallel_for(num_threads, [&](size_t thread_idx) { const size_t start = thread_idx * block_size; const size_t end = (thread_idx + 1) * block_size; typename Flavor::AllValues evaluations; - auto evaluations_view = evaluations.get_all(); // TODO(https://github.com/AztecProtocol/barretenberg/issues/940): construction of evaluations is equivalent to // calling get_row which creates full copies. avoid? for (size_t i = start; i < end; ++i) { - for (auto [eval, full_poly] : zip_view(evaluations_view, full_polynomials_view)) { + for (auto [eval, full_poly] : zip_view(evaluations.get_all(), full_polynomials.get_all())) { eval = full_poly.size() > i ? full_poly[i] : 0; } numerator[i] = GrandProdRelation::template compute_grand_product_numerator( @@ -143,29 +141,21 @@ void compute_grand_product(const size_t circuit_size, }); } +/** + * @brief Compute the grand product corresponding to each grand-product relation defined in the Flavor + * + */ template -void compute_grand_products(const typename Flavor::ProvingKey& key, - typename Flavor::ProverPolynomials& full_polynomials, +void compute_grand_products(typename Flavor::ProverPolynomials& full_polynomials, bb::RelationParameters& relation_parameters) { using GrandProductRelations = typename Flavor::GrandProductRelations; - using FF = typename Flavor::FF; constexpr size_t NUM_RELATIONS = std::tuple_size{}; bb::constexpr_for<0, NUM_RELATIONS, 1>([&]() { using GrandProdRelation = typename std::tuple_element::type; - // Assign the grand product polynomial to the relevant std::span member of `full_polynomials` (and its shift) - // For example, for UltraPermutationRelation, this will be `full_polynomials.z_perm` - // For example, for LookupRelation, this will be `full_polynomials.z_lookup` - bb::Polynomial& full_polynomial = GrandProdRelation::get_grand_product_polynomial(full_polynomials); - auto& key_polynomial = GrandProdRelation::get_grand_product_polynomial(key); - full_polynomial = key_polynomial.share(); - - compute_grand_product(key.circuit_size, full_polynomials, relation_parameters); - bb::Polynomial& full_polynomial_shift = - GrandProdRelation::get_shifted_grand_product_polynomial(full_polynomials); - full_polynomial_shift = key_polynomial.shifted(); + compute_grand_product(full_polynomials, relation_parameters); }); } diff --git a/barretenberg/cpp/src/barretenberg/polynomials/polynomial.test.cpp b/barretenberg/cpp/src/barretenberg/polynomials/polynomial.test.cpp new file mode 100644 index 000000000000..59e4d5fa7474 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/polynomials/polynomial.test.cpp @@ -0,0 +1,61 @@ +#include +#include + +#include "barretenberg/polynomials/polynomial.hpp" + +using namespace bb; + +// Simple test/demonstration of shifted functionality +TEST(Polynomial, Shifted) +{ + using FF = bb::fr; + using Polynomial = Polynomial; + const size_t SIZE = 10; + auto poly = Polynomial::random(SIZE); + poly[0] = 0; // make it shiftable + + // Instantiate the shift via the shited method + auto poly_shifted = poly.shifted(); + + EXPECT_EQ(poly_shifted.size(), poly.size()); + + // The shift is indeed the shift + for (size_t i = 0; i < poly_shifted.size(); ++i) { + EXPECT_EQ(poly_shifted.at(i), poly.at(i + 1)); + } + + // If I change the original polynomial, the shift is updated accordingly + poly[3] = 25; + for (size_t i = 0; i < poly_shifted.size(); ++i) { + EXPECT_EQ(poly_shifted.at(i), poly.at(i + 1)); + } +} + +// Simple test/demonstration of share functionality +TEST(Polynomial, Share) +{ + using FF = bb::fr; + using Polynomial = Polynomial; + const size_t SIZE = 10; + auto poly = Polynomial::random(SIZE); + + // "clone" the poly via the share method + auto poly_clone = poly.share(); + + // The two are indeed equal + EXPECT_EQ(poly_clone, poly); + + // Changing one changes the other + poly[3] = 25; + EXPECT_EQ(poly_clone, poly); + + poly_clone[2] = 13; + EXPECT_EQ(poly_clone, poly); + + // If reset the original poly, it will no longer be equal to the clone made earlier + // Note: if we had not made a clone, the memory from the original poly would be leaked + auto poly2 = Polynomial::random(SIZE); + poly = poly2.share(); + + EXPECT_NE(poly_clone, poly); +} diff --git a/barretenberg/cpp/src/barretenberg/protogalaxy/combiner.test.cpp b/barretenberg/cpp/src/barretenberg/protogalaxy/combiner.test.cpp index 2e6f87f73f99..72a6fa53233c 100644 --- a/barretenberg/cpp/src/barretenberg/protogalaxy/combiner.test.cpp +++ b/barretenberg/cpp/src/barretenberg/protogalaxy/combiner.test.cpp @@ -44,8 +44,7 @@ TEST(Protogalaxy, CombinerOn2Instances) auto prover_polynomials = get_sequential_prover_polynomials( /*log_circuit_size=*/1, idx * 128); restrict_to_standard_arithmetic_relation(prover_polynomials); - instance->prover_polynomials = std::move(prover_polynomials); - instance->proving_key = Flavor::ProvingKey(); + instance->proving_key.polynomials = std::move(prover_polynomials); instance->proving_key.circuit_size = 2; instance_data[idx] = instance; } @@ -78,8 +77,7 @@ TEST(Protogalaxy, CombinerOn2Instances) auto prover_polynomials = get_zero_prover_polynomials( /*log_circuit_size=*/1); restrict_to_standard_arithmetic_relation(prover_polynomials); - instance->prover_polynomials = std::move(prover_polynomials); - instance->proving_key = Flavor::ProvingKey(); + instance->proving_key.polynomials = std::move(prover_polynomials); instance->proving_key.circuit_size = 2; instance_data[idx] = instance; } @@ -104,13 +102,13 @@ TEST(Protogalaxy, CombinerOn2Instances) polys.q_o[idx] = -1; }; - create_add_gate(instances[0]->prover_polynomials, 0, 1, 2); - create_add_gate(instances[0]->prover_polynomials, 1, 0, 4); - create_add_gate(instances[1]->prover_polynomials, 0, 3, 4); - create_mul_gate(instances[1]->prover_polynomials, 1, 1, 4); + create_add_gate(instances[0]->proving_key.polynomials, 0, 1, 2); + create_add_gate(instances[0]->proving_key.polynomials, 1, 0, 4); + create_add_gate(instances[1]->proving_key.polynomials, 0, 3, 4); + create_mul_gate(instances[1]->proving_key.polynomials, 1, 1, 4); - restrict_to_standard_arithmetic_relation(instances[0]->prover_polynomials); - restrict_to_standard_arithmetic_relation(instances[1]->prover_polynomials); + restrict_to_standard_arithmetic_relation(instances[0]->proving_key.polynomials); + restrict_to_standard_arithmetic_relation(instances[1]->proving_key.polynomials); /* Instance 0 Instance 1 w_l w_r w_o q_m q_l q_r q_o q_c w_l w_r w_o q_m q_l q_r q_o q_c @@ -169,8 +167,7 @@ TEST(Protogalaxy, CombinerOn4Instances) auto instance = std::make_shared(); auto prover_polynomials = get_zero_prover_polynomials( /*log_circuit_size=*/1); - instance->prover_polynomials = std::move(prover_polynomials); - instance->proving_key = Flavor::ProvingKey(); + instance->proving_key.polynomials = std::move(prover_polynomials); instance->proving_key.circuit_size = 2; instance_data[idx] = instance; } @@ -178,10 +175,10 @@ TEST(Protogalaxy, CombinerOn4Instances) ProverInstances instances{ instance_data }; instances.alphas.fill(bb::Univariate(FF(0))); // focus on the arithmetic relation only - zero_all_selectors(instances[0]->prover_polynomials); - zero_all_selectors(instances[1]->prover_polynomials); - zero_all_selectors(instances[2]->prover_polynomials); - zero_all_selectors(instances[3]->prover_polynomials); + zero_all_selectors(instances[0]->proving_key.polynomials); + zero_all_selectors(instances[1]->proving_key.polynomials); + zero_all_selectors(instances[2]->proving_key.polynomials); + zero_all_selectors(instances[3]->proving_key.polynomials); auto pow_polynomial = PowPolynomial(std::vector{ 2 }); auto result = prover.compute_combiner(instances, pow_polynomial); diff --git a/barretenberg/cpp/src/barretenberg/protogalaxy/decider_prover.cpp b/barretenberg/cpp/src/barretenberg/protogalaxy/decider_prover.cpp index 1175e6d7f8f7..7c17c906600e 100644 --- a/barretenberg/cpp/src/barretenberg/protogalaxy/decider_prover.cpp +++ b/barretenberg/cpp/src/barretenberg/protogalaxy/decider_prover.cpp @@ -40,8 +40,8 @@ template void DeciderProver_::execute_relation_ch * */ template void DeciderProver_::execute_zeromorph_rounds() { - ZeroMorph::prove(accumulator->prover_polynomials.get_unshifted(), - accumulator->prover_polynomials.get_to_be_shifted(), + ZeroMorph::prove(accumulator->proving_key.polynomials.get_unshifted(), + accumulator->proving_key.polynomials.get_to_be_shifted(), sumcheck_output.claimed_evaluations.get_unshifted(), sumcheck_output.claimed_evaluations.get_shifted(), sumcheck_output.challenge, diff --git a/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy.test.cpp b/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy.test.cpp index bf72f4ca5eba..0b51c91f57b4 100644 --- a/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy.test.cpp +++ b/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy.test.cpp @@ -4,6 +4,7 @@ #include "barretenberg/protogalaxy/decider_verifier.hpp" #include "barretenberg/protogalaxy/protogalaxy_prover.hpp" #include "barretenberg/protogalaxy/protogalaxy_verifier.hpp" +#include "barretenberg/stdlib_circuit_builders/mock_circuits.hpp" #include @@ -43,24 +44,27 @@ template class ProtoGalaxyTests : public testing::Test { static void construct_circuit(Builder& builder) { + MockCircuits::add_arithmetic_gates(builder); if constexpr (IsGoblinFlavor) { - GoblinMockCircuits::construct_simple_circuit(builder); - } else { - FF a = FF::random_element(); - FF b = FF::random_element(); - FF c = FF::random_element(); - FF d = a + b + c; - uint32_t a_idx = builder.add_public_variable(a); - uint32_t b_idx = builder.add_variable(b); - uint32_t c_idx = builder.add_variable(c); - uint32_t d_idx = builder.add_variable(d); - - builder.create_big_add_gate({ a_idx, b_idx, c_idx, d_idx, FF(1), FF(1), FF(1), FF(-1), FF(0) }); + GoblinMockCircuits::add_some_ecc_op_gates(builder); } } + // Construct prover and verifier instance for a provided circuit and add to tuple + static void construct_prover_and_verifier_instance(TupleOfInstances& instances, + Builder& builder, + bool structured = false) + { + + auto prover_instance = std::make_shared(builder, structured); + auto verification_key = std::make_shared(prover_instance->proving_key); + auto verifier_instance = std::make_shared(verification_key); + get<0>(instances).emplace_back(prover_instance); + get<1>(instances).emplace_back(verifier_instance); + } + // constructs num_insts number of prover and verifier instances - static TupleOfInstances construct_instances(size_t num_insts) + static TupleOfInstances construct_instances(size_t num_insts, bool structured = false) { TupleOfInstances instances; // TODO(https://github.com/AztecProtocol/barretenberg/issues/938): Parallelize this loop @@ -68,24 +72,11 @@ template class ProtoGalaxyTests : public testing::Test { auto builder = typename Flavor::CircuitBuilder(); construct_circuit(builder); - auto prover_instance = std::make_shared(builder); - auto verification_key = std::make_shared(prover_instance->proving_key); - auto verifier_instance = std::make_shared(verification_key); - get<0>(instances).emplace_back(prover_instance); - get<1>(instances).emplace_back(verifier_instance); + construct_prover_and_verifier_instance(instances, builder, structured); } return instances; } - static ProverPolynomials construct_full_prover_polynomials(auto& input_polynomials) - { - ProverPolynomials full_polynomials; - for (auto [prover_poly, input_poly] : zip_view(full_polynomials.get_all(), input_polynomials)) { - prover_poly = input_poly.share(); - } - return full_polynomials; - } - static std::tuple, std::shared_ptr> fold_and_verify( const std::vector>& prover_instances, const std::vector>& verifier_instances) @@ -102,7 +93,7 @@ template class ProtoGalaxyTests : public testing::Test { { auto instance_size = accumulator->proving_key.circuit_size; auto expected_honk_evals = ProtoGalaxyProver::compute_full_honk_evaluations( - accumulator->prover_polynomials, accumulator->alphas, accumulator->relation_parameters); + accumulator->proving_key.polynomials, accumulator->alphas, accumulator->relation_parameters); // Construct pow(\vec{betas*}) as in the paper auto expected_pows = PowPolynomial(accumulator->gate_challenges); expected_pows.compute_values(); @@ -152,13 +143,12 @@ template class ProtoGalaxyTests : public testing::Test { instance->proving_key.compute_logderivative_inverse(instance->relation_parameters); } instance->proving_key.compute_grand_product_polynomials(instance->relation_parameters); - instance->prover_polynomials = ProverPolynomials(instance->proving_key); for (auto& alpha : instance->alphas) { alpha = FF::random_element(); } auto full_honk_evals = ProtoGalaxyProver::compute_full_honk_evaluations( - instance->prover_polynomials, instance->alphas, instance->relation_parameters); + instance->proving_key.polynomials, instance->alphas, instance->relation_parameters); // Evaluations should be 0 for valid circuit for (const auto& eval : full_honk_evals) { @@ -194,11 +184,12 @@ template class ProtoGalaxyTests : public testing::Test { using RelationSeparator = typename Flavor::RelationSeparator; const size_t log_instance_size(3); const size_t instance_size(1 << log_instance_size); - std::array, Flavor::NUM_ALL_ENTITIES> random_polynomials; - for (auto& poly : random_polynomials) { + // Construct fully random prover polynomials + ProverPolynomials full_polynomials; + for (auto& poly : full_polynomials.get_all()) { poly = bb::Polynomial::random(instance_size); } - auto full_polynomials = construct_full_prover_polynomials(random_polynomials); + auto relation_parameters = bb::RelationParameters::get_random(); RelationSeparator alphas; for (auto& alpha : alphas) { @@ -223,7 +214,7 @@ template class ProtoGalaxyTests : public testing::Test { } auto accumulator = std::make_shared(); - accumulator->prover_polynomials = std::move(full_polynomials); + accumulator->proving_key.polynomials = std::move(full_polynomials); accumulator->gate_challenges = betas; accumulator->target_sum = target_sum; accumulator->relation_parameters = relation_parameters; @@ -332,6 +323,73 @@ template class ProtoGalaxyTests : public testing::Test { decide_and_verify(prover_accumulator_2, verifier_accumulator_2, true); } + /** + * @brief Testing two valid rounds of folding followed by the decider for a structured trace. + * + */ + static void test_full_protogalaxy_structured_trace() + { + bool structured = true; + TupleOfInstances instances = construct_instances(2, structured); + + auto [prover_accumulator, verifier_accumulator] = fold_and_verify(get<0>(instances), get<1>(instances)); + check_accumulator_target_sum_manual(prover_accumulator, true); + + TupleOfInstances instances_2 = construct_instances(1, structured); // just one set of prover/verifier instances + + auto [prover_accumulator_2, verifier_accumulator_2] = fold_and_verify( + { prover_accumulator, get<0>(instances_2)[0] }, { verifier_accumulator, get<1>(instances_2)[0] }); + check_accumulator_target_sum_manual(prover_accumulator_2, true); + info(prover_accumulator_2->proving_key.circuit_size); + decide_and_verify(prover_accumulator_2, verifier_accumulator_2, true); + } + + /** + * @brief Testing two valid rounds of folding followed by the decider for a structured trace. + * @details Here we're interested in folding inhomogeneous circuits, i.e. circuits with different numbers of + * constraints, which should be automatically handled by the structured trace + * + */ + static void test_full_protogalaxy_structured_trace_inhomogeneous_circuits() + { + bool structured = true; + + // Construct three circuits to be folded, each with a different number of constraints + Builder builder1; + Builder builder2; + Builder builder3; + construct_circuit(builder1); + construct_circuit(builder2); + construct_circuit(builder3); + + // Create inhomogenous circuits by adding a different number of add gates to each + MockCircuits::add_arithmetic_gates(builder1, 10); + MockCircuits::add_arithmetic_gates(builder2, 100); + MockCircuits::add_arithmetic_gates(builder3, 1000); + + // Construct the Prover/Verifier instances for the first two circuits + TupleOfInstances instances; + construct_prover_and_verifier_instance(instances, builder1, structured); + construct_prover_and_verifier_instance(instances, builder2, structured); + + // Fold the first two instances + auto [prover_accumulator, verifier_accumulator] = fold_and_verify(get<0>(instances), get<1>(instances)); + check_accumulator_target_sum_manual(prover_accumulator, true); + + // Construct the Prover/Verifier instance for the third circuit + TupleOfInstances instances_2; + construct_prover_and_verifier_instance(instances_2, builder3, structured); + + // Fold 3rd instance into accumulator + auto [prover_accumulator_2, verifier_accumulator_2] = fold_and_verify( + { prover_accumulator, get<0>(instances_2)[0] }, { verifier_accumulator, get<1>(instances_2)[0] }); + check_accumulator_target_sum_manual(prover_accumulator_2, true); + info(prover_accumulator_2->proving_key.circuit_size); + + // Decide on final accumulator + decide_and_verify(prover_accumulator_2, verifier_accumulator_2, true); + } + /** * @brief Ensure tampering a commitment and then calling the decider causes the decider verification to fail. * @@ -365,7 +423,7 @@ template class ProtoGalaxyTests : public testing::Test { check_accumulator_target_sum_manual(prover_accumulator, true); // Tamper with an accumulator polynomial - prover_accumulator->prover_polynomials.w_l[1] = FF::random_element(); + prover_accumulator->proving_key.polynomials.w_l[1] = FF::random_element(); check_accumulator_target_sum_manual(prover_accumulator, false); TupleOfInstances insts_2 = construct_instances(1); // just one set of prover/verifier instances @@ -431,6 +489,15 @@ TYPED_TEST(ProtoGalaxyTests, FullProtogalaxyTest) TestFixture::test_full_protogalaxy(); } +TYPED_TEST(ProtoGalaxyTests, FullProtogalaxyStructuredTrace) +{ + TestFixture::test_full_protogalaxy_structured_trace(); +} +TYPED_TEST(ProtoGalaxyTests, FullProtogalaxyStructuredTraceInhomogeneous) +{ + TestFixture::test_full_protogalaxy_structured_trace_inhomogeneous_circuits(); +} + TYPED_TEST(ProtoGalaxyTests, TamperedCommitment) { TestFixture::test_tampered_commitment(); diff --git a/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy_prover.cpp b/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy_prover.cpp index ccafde8cc61e..6d67ab105764 100644 --- a/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy_prover.cpp +++ b/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy_prover.cpp @@ -11,7 +11,6 @@ void ProtoGalaxyProver_::finalise_and_send_instance(std::shared auto [proving_key, relation_params, alphas] = oink_prover.prove(); instance->proving_key = std::move(proving_key); instance->relation_parameters = std::move(relation_params); - instance->prover_polynomials = ProverPolynomials(instance->proving_key); instance->alphas = std::move(alphas); } @@ -80,7 +79,7 @@ std::shared_ptr ProtoGalaxyProver_gate_challenges = instances.next_gate_challenges; // Initialize accumulator proving key polynomials - auto accumulator_polys = next_accumulator->proving_key.get_all(); + auto accumulator_polys = next_accumulator->proving_key.polynomials.get_all(); run_loop_in_parallel(Flavor::NUM_FOLDED_ENTITIES, [&](size_t start_idx, size_t end_idx) { for (size_t poly_idx = start_idx; poly_idx < end_idx; poly_idx++) { auto& acc_poly = accumulator_polys[poly_idx]; @@ -92,7 +91,7 @@ std::shared_ptr ProtoGalaxyProver_proving_key.get_all(); + auto input_polys = instances[inst_idx]->proving_key.polynomials.get_all(); run_loop_in_parallel(Flavor::NUM_FOLDED_ENTITIES, [&](size_t start_idx, size_t end_idx) { for (size_t poly_idx = start_idx; poly_idx < end_idx; poly_idx++) { auto& acc_poly = accumulator_polys[poly_idx]; @@ -141,10 +140,6 @@ std::shared_ptr ProtoGalaxyProver_relation_parameters = folded_relation_parameters; next_accumulator->proving_key = std::move(instances[0]->proving_key); - // Derive the prover polynomials from the proving key polynomials since we only fold the unshifted polynomials. This - // is extremely cheap since we only call .share() and .shifted() polynomial functions. We need the folded prover - // polynomials for the decider. - next_accumulator->prover_polynomials = ProverPolynomials(next_accumulator->proving_key); return next_accumulator; } diff --git a/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy_prover.hpp b/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy_prover.hpp index 470d8f110b5f..d9cffcca9c70 100644 --- a/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy_prover.hpp +++ b/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy_prover.hpp @@ -255,7 +255,7 @@ template class ProtoGalaxyProver_ { { BB_OP_COUNT_TIME(); auto full_honk_evaluations = compute_full_honk_evaluations( - accumulator->prover_polynomials, accumulator->alphas, accumulator->relation_parameters); + accumulator->proving_key.polynomials, accumulator->alphas, accumulator->relation_parameters); const auto betas = accumulator->gate_challenges; assert(betas.size() == deltas.size()); auto coeffs = construct_perturbator_coefficients(betas, deltas, full_honk_evaluations); diff --git a/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_set_relation.cpp b/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_set_relation.cpp index 852ceded6990..3fa0e512058e 100644 --- a/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_set_relation.cpp +++ b/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_set_relation.cpp @@ -34,7 +34,7 @@ namespace bb { */ template template -Accumulator ECCVMSetRelationImpl::compute_permutation_numerator(const AllEntities& in, const Parameters& params) +Accumulator ECCVMSetRelationImpl::compute_grand_product_numerator(const AllEntities& in, const Parameters& params) { using View = typename Accumulator::View; @@ -227,7 +227,7 @@ Accumulator ECCVMSetRelationImpl::compute_permutation_numerator(const AllEnt template template -Accumulator ECCVMSetRelationImpl::compute_permutation_denominator(const AllEntities& in, const Parameters& params) +Accumulator ECCVMSetRelationImpl::compute_grand_product_denominator(const AllEntities& in, const Parameters& params) { using View = typename Accumulator::View; @@ -244,7 +244,7 @@ Accumulator ECCVMSetRelationImpl::compute_permutation_denominator(const AllE /** * @brief First term: tuple of (pc, round, wnaf_slice), used to determine which points we extract from lookup tables * when evaluaing MSMs in ECCVMMsmRelation. - * These values must be equivalent to the values computed in the 1st term of `compute_permutation_numerator` + * These values must be equivalent to the values computed in the 1st term of `compute_grand_product_numerator` */ Accumulator denominator(1); // degree-0 { @@ -283,7 +283,7 @@ Accumulator ECCVMSetRelationImpl::compute_permutation_denominator(const AllE * @brief Second term: tuple of (transcript_pc, transcript_Px, transcript_Py, z1) OR (transcript_pc, \lambda * * transcript_Px, -transcript_Py, z2) for each scalar multiplication in ECCVMTranscriptRelation columns. (the latter * term uses the curve endomorphism: \lambda = cube root of unity). These values must be equivalent to the second - * term values in `compute_permutation_numerator` + * term values in `compute_grand_product_numerator` */ { const auto& transcript_pc = View(in.transcript_pc); @@ -373,10 +373,10 @@ void ECCVMSetRelationImpl::accumulate(ContainerOverSubrelations& accumulator using View = typename Accumulator::View; // degree-11 - Accumulator numerator_evaluation = compute_permutation_numerator(in, params); + Accumulator numerator_evaluation = compute_grand_product_numerator(in, params); // degree-17 - Accumulator denominator_evaluation = compute_permutation_denominator(in, params); + Accumulator denominator_evaluation = compute_grand_product_denominator(in, params); const auto& lagrange_first = View(in.lagrange_first); const auto& lagrange_last = View(in.lagrange_last); diff --git a/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_set_relation.hpp b/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_set_relation.hpp index 41043a881342..44fd632dee9a 100644 --- a/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_set_relation.hpp +++ b/barretenberg/cpp/src/barretenberg/relations/ecc_vm/ecc_set_relation.hpp @@ -32,10 +32,10 @@ template class ECCVMSetRelationImpl { inline static auto& get_shifted_grand_product_polynomial(auto& input) { return input.z_perm_shift; } template - static Accumulator compute_permutation_numerator(const AllEntities& in, const Parameters& params); + static Accumulator compute_grand_product_numerator(const AllEntities& in, const Parameters& params); template - static Accumulator compute_permutation_denominator(const AllEntities& in, const Parameters& params); + static Accumulator compute_grand_product_denominator(const AllEntities& in, const Parameters& params); template static void accumulate(ContainerOverSubrelations& accumulator, diff --git a/barretenberg/cpp/src/barretenberg/serialize/msgpack_impl/name_value_pair_macro.hpp b/barretenberg/cpp/src/barretenberg/serialize/msgpack_impl/name_value_pair_macro.hpp index 7e4207f87fa6..8cd422e7aac1 100644 --- a/barretenberg/cpp/src/barretenberg/serialize/msgpack_impl/name_value_pair_macro.hpp +++ b/barretenberg/cpp/src/barretenberg/serialize/msgpack_impl/name_value_pair_macro.hpp @@ -35,12 +35,22 @@ _28, \ _29, \ _30, \ + _31, \ + _32, \ + _33, \ + _34, \ + _35, \ N, \ ...) \ N -// AD: support for 30 fields!? one may ask. Well, after 20 not being enough... +// AD: support for 40 fields!? one may ask. Well, after 30 not being enough... #define VA_NARGS(...) \ VA_NARGS_IMPL(__VA_ARGS__, \ + 35, \ + 34, \ + 33, \ + 32, \ + 31, \ 30, \ 29, \ 28, \ @@ -106,6 +116,11 @@ #define _NVP28(x, ...) _NVP1(x), _NVP27(__VA_ARGS__) #define _NVP29(x, ...) _NVP1(x), _NVP28(__VA_ARGS__) #define _NVP30(x, ...) _NVP1(x), _NVP29(__VA_ARGS__) +#define _NVP31(x, ...) _NVP1(x), _NVP30(__VA_ARGS__) +#define _NVP32(x, ...) _NVP1(x), _NVP31(__VA_ARGS__) +#define _NVP33(x, ...) _NVP1(x), _NVP32(__VA_ARGS__) +#define _NVP34(x, ...) _NVP1(x), _NVP33(__VA_ARGS__) +#define _NVP35(x, ...) _NVP1(x), _NVP34(__VA_ARGS__) #define CONCAT(a, b) a##b #define _NVP_N(n) CONCAT(_NVP, n) diff --git a/barretenberg/cpp/src/barretenberg/stdlib/honk_recursion/verifier/goblin_verifier.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/honk_recursion/verifier/goblin_verifier.test.cpp deleted file mode 100644 index 84e0d6e0807a..000000000000 --- a/barretenberg/cpp/src/barretenberg/stdlib/honk_recursion/verifier/goblin_verifier.test.cpp +++ /dev/null @@ -1,279 +0,0 @@ -#include "barretenberg/circuit_checker/circuit_checker.hpp" -#include "barretenberg/common/test.hpp" -#include "barretenberg/stdlib/hash/blake3s/blake3s.hpp" -#include "barretenberg/stdlib/hash/pedersen/pedersen.hpp" -#include "barretenberg/stdlib/honk_recursion/verifier/ultra_recursive_verifier.hpp" -#include "barretenberg/stdlib/primitives/curves/bn254.hpp" -#include "barretenberg/stdlib_circuit_builders/ultra_recursive_flavor.hpp" -#include "barretenberg/ultra_honk/ultra_prover.hpp" -#include "barretenberg/ultra_honk/ultra_verifier.hpp" - -namespace bb::stdlib::recursion::honk { - -/** - * @brief Test suite for recursive verification of Goblin Ultra Honk proofs - * @details The recursive verification circuit is arithmetized in two different ways: 1) using the conventional Ultra - * arithmetization (UltraCircuitBuilder), or 2) a Goblin-style Ultra arithmetization (GoblinUltraCircuitBuilder). - * - * @tparam Builder Circuit builder for the recursive verifier circuit - */ -template class GoblinRecursiveVerifierTest : public testing::Test { - - // Define types for the inner circuit, i.e. the circuit whose proof will be recursively verified - using InnerFlavor = GoblinUltraFlavor; - using InnerProver = GoblinUltraProver; - using InnerVerifier = GoblinUltraVerifier; - using InnerBuilder = typename InnerFlavor::CircuitBuilder; - using InnerProverInstance = ProverInstance_; - using InnerCurve = bn254; - using InnerCommitment = InnerFlavor::Commitment; - using InnerFF = InnerFlavor::FF; - - // Types for recursive verifier circuit - using OuterBuilder = typename OuterFlavor::CircuitBuilder; - using OuterProver = UltraProver_; - using OuterVerifier = UltraVerifier_; - using OuterProverInstance = ProverInstance_; - using RecursiveFlavor = GoblinUltraRecursiveFlavor_; - using RecursiveVerifier = UltraRecursiveVerifier_; - using VerificationKey = typename RecursiveVerifier::VerificationKey; - - /** - * @brief Create a non-trivial arbitrary inner circuit, the proof of which will be recursively verified - * - * @param builder - * @param public_inputs - * @param log_num_gates - */ - static InnerBuilder create_inner_circuit(size_t log_num_gates = 10) - { - using fr_ct = InnerCurve::ScalarField; - using fq_ct = InnerCurve::BaseField; - using point_ct = InnerCurve::AffineElement; - using public_witness_ct = InnerCurve::public_witness_ct; - using witness_ct = InnerCurve::witness_ct; - using byte_array_ct = InnerCurve::byte_array_ct; - using fr = typename InnerCurve::ScalarFieldNative; - using point = typename InnerCurve::GroupNative::affine_element; - - // Instantiate ECC op queue and add mock data to simulate interaction with a previous circuit - auto op_queue = std::make_shared(); - - InnerBuilder builder(op_queue); - // Add a mul accum op and an equality op - auto p = point::one() * fr::random_element(); - auto scalar = fr::random_element(); - builder.queue_ecc_mul_accum(p, scalar); - builder.queue_ecc_eq(); - - // Create 2^log_n many add gates based on input log num gates - const size_t num_gates = 1 << log_num_gates; - for (size_t i = 0; i < num_gates; ++i) { - fr a = fr::random_element(); - uint32_t a_idx = builder.add_variable(a); - - fr b = fr::random_element(); - fr c = fr::random_element(); - fr d = a + b + c; - uint32_t b_idx = builder.add_variable(b); - uint32_t c_idx = builder.add_variable(c); - uint32_t d_idx = builder.add_variable(d); - - builder.create_big_add_gate({ a_idx, b_idx, c_idx, d_idx, fr(1), fr(1), fr(1), fr(-1), fr(0) }); - } - - // Add some arbitrary goblin-style ECC op gates via a batch mul - size_t num_points = 5; - std::vector circuit_points; - std::vector circuit_scalars; - for (size_t i = 0; i < num_points; ++i) { - circuit_points.push_back(point_ct::from_witness(&builder, point::random_element())); - circuit_scalars.push_back(fr_ct::from_witness(&builder, fr::random_element())); - } - point_ct::batch_mul(circuit_points, circuit_scalars); - - // Define some additional arbitrary convetional circuit logic - fr_ct a(public_witness_ct(&builder, fr::random_element())); - fr_ct b(public_witness_ct(&builder, fr::random_element())); - fr_ct c(public_witness_ct(&builder, fr::random_element())); - - for (size_t i = 0; i < 32; ++i) { - a = (a * b) + b + a; - a = a.madd(b, c); - } - pedersen_hash::hash({ a, b }); - byte_array_ct to_hash(&builder, "nonsense test data"); - blake3s(to_hash); - - fr bigfield_data = fr::random_element(); - fr bigfield_data_a{ bigfield_data.data[0], bigfield_data.data[1], 0, 0 }; - fr bigfield_data_b{ bigfield_data.data[2], bigfield_data.data[3], 0, 0 }; - - fq_ct big_a(fr_ct(witness_ct(&builder, bigfield_data_a.to_montgomery_form())), fr_ct(witness_ct(&builder, 0))); - fq_ct big_b(fr_ct(witness_ct(&builder, bigfield_data_b.to_montgomery_form())), fr_ct(witness_ct(&builder, 0))); - - big_a* big_b; - - return builder; - }; - - public: - static void SetUpTestSuite() { bb::srs::init_crs_factory("../srs_db/ignition"); } - - /** - * @brief Create inner circuit and call check_circuit on it - * - */ - static void test_inner_circuit() - { - auto inner_circuit = create_inner_circuit(); - - bool result = CircuitChecker::check(inner_circuit); - - EXPECT_EQ(result, true); - } - - /** - * @brief Instantiate a recursive verification key from the native verification key produced by the inner cicuit - * builder. Check consistency beteen the native and stdlib types. - * - */ - static void test_recursive_verification_key_creation() - { - // Create an arbitrary inner circuit - auto inner_circuit = create_inner_circuit(); - OuterBuilder outer_circuit; - - // Compute native verification key - auto instance = std::make_shared(inner_circuit); - InnerProver prover(instance); // A prerequisite for computing VK - auto verification_key = std::make_shared(instance->proving_key); - // Instantiate the recursive verifier using the native verification key - RecursiveVerifier verifier{ &outer_circuit, verification_key }; - - // Spot check some values in the recursive VK to ensure it was constructed correctly - EXPECT_EQ(verifier.key->circuit_size, verification_key->circuit_size); - EXPECT_EQ(verifier.key->log_circuit_size, verification_key->log_circuit_size); - EXPECT_EQ(verifier.key->num_public_inputs, verification_key->num_public_inputs); - EXPECT_EQ(verifier.key->q_m.get_value(), verification_key->q_m); - EXPECT_EQ(verifier.key->q_r.get_value(), verification_key->q_r); - EXPECT_EQ(verifier.key->sigma_1.get_value(), verification_key->sigma_1); - EXPECT_EQ(verifier.key->id_3.get_value(), verification_key->id_3); - EXPECT_EQ(verifier.key->lagrange_ecc_op.get_value(), verification_key->lagrange_ecc_op); - } - - /** - * @brief Construct a recursive verification circuit for the proof of an inner circuit then call check_circuit on - it - * - */ - static void test_recursive_verification() - { - // Create an arbitrary inner circuit - auto inner_circuit = create_inner_circuit(); - - // Generate a proof over the inner circuit - auto instance = std::make_shared(inner_circuit); - InnerProver inner_prover(instance); - auto verification_key = std::make_shared(instance->proving_key); - auto inner_proof = inner_prover.construct_proof(); - - // Create a recursive verification circuit for the proof of the inner circuit - OuterBuilder outer_circuit; - RecursiveVerifier verifier{ &outer_circuit, verification_key }; - auto pairing_points = verifier.verify_proof(inner_proof); - info("Recursive Verifier Goblin: num gates = ", outer_circuit.num_gates); - - // Check for a failure flag in the recursive verifier circuit - EXPECT_EQ(outer_circuit.failed(), false) << outer_circuit.err(); - - // Check 1: Perform native verification then perform the pairing on the outputs of the recursive - // verifier and check that the result agrees. - InnerVerifier native_verifier(verification_key); - auto native_result = native_verifier.verify_proof(inner_proof); - auto recursive_result = native_verifier.key->pcs_verification_key->pairing_check(pairing_points[0].get_value(), - pairing_points[1].get_value()); - EXPECT_EQ(recursive_result, native_result); - - // Check 2: Ensure that the underlying native and recursive verification algorithms agree by ensuring - // the manifests produced by each agree. - auto recursive_manifest = verifier.transcript->get_manifest(); - auto native_manifest = native_verifier.transcript->get_manifest(); - for (size_t i = 0; i < recursive_manifest.size(); ++i) { - EXPECT_EQ(recursive_manifest[i], native_manifest[i]); - } - - // Check 3: Construct and verify a proof of the recursive verifier circuit - { - auto instance = std::make_shared(outer_circuit); - OuterProver prover(instance); - auto verification_key = std::make_shared(instance->proving_key); - OuterVerifier verifier(verification_key); - auto proof = prover.construct_proof(); - bool verified = verifier.verify_proof(proof); - - ASSERT(verified); - } - } - - /** - * @brief Construct a verifier circuit for a proof whose data has been tampered with. Expect failure - * TODO(bberg #656): For now we get a "bad" proof by arbitrarily tampering with bits in a valid proof. It would be - * much nicer to explicitly change meaningful components, e.g. such that one of the multilinear evaluations is - * wrong. This is difficult now but should be straightforward if the proof is a struct. - */ - static void test_recursive_verification_fails() - { - // Create an arbitrary inner circuit - auto inner_circuit = create_inner_circuit(); - - // Generate a proof over the inner circuit - auto instance = std::make_shared(inner_circuit); - InnerProver inner_prover(instance); - auto inner_proof = inner_prover.construct_proof(); - - // Arbitrarily tamper with the proof to be verified - inner_prover.transcript->deserialize_full_transcript(); - inner_prover.transcript->sorted_accum_comm = InnerCommitment::one() * InnerFF::random_element(); - inner_prover.transcript->serialize_full_transcript(); - inner_proof = inner_prover.export_proof(); - - // Generate the corresponding inner verification key - auto inner_verification_key = std::make_shared(instance->proving_key); - - // Create a recursive verification circuit for the proof of the inner circuit - OuterBuilder outer_circuit; - RecursiveVerifier verifier{ &outer_circuit, inner_verification_key }; - verifier.verify_proof(inner_proof); - - // We expect the circuit check to fail due to the bad proof - EXPECT_FALSE(CircuitChecker::check(outer_circuit)); - } -}; - -// Run the recursive verifier tests with conventional Ultra builder and Goblin builder -using Flavors = testing::Types; - -TYPED_TEST_SUITE(GoblinRecursiveVerifierTest, Flavors); - -HEAVY_TYPED_TEST(GoblinRecursiveVerifierTest, InnerCircuit) -{ - TestFixture::test_inner_circuit(); -} - -HEAVY_TYPED_TEST(GoblinRecursiveVerifierTest, RecursiveVerificationKey) -{ - TestFixture::test_recursive_verification_key_creation(); -} - -HEAVY_TYPED_TEST(GoblinRecursiveVerifierTest, DISABLED_SingleRecursiveVerification) -{ - TestFixture::test_recursive_verification(); -}; - -HEAVY_TYPED_TEST(GoblinRecursiveVerifierTest, DISABLED_SingleRecursiveVerificationFailure) -{ - TestFixture::test_recursive_verification_fails(); -}; - -} // namespace bb::stdlib::recursion::honk \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/stdlib/honk_recursion/verifier/protogalaxy_recursive_verifier.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/honk_recursion/verifier/protogalaxy_recursive_verifier.test.cpp index 22c9b7f29f4a..ebe037413566 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/honk_recursion/verifier/protogalaxy_recursive_verifier.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/honk_recursion/verifier/protogalaxy_recursive_verifier.test.cpp @@ -346,7 +346,7 @@ template class ProtoGalaxyRecursiveTests : public tes auto verification_key = std::make_shared(prover_inst->proving_key); auto verifier_inst = std::make_shared(verification_key); - prover_accumulator->prover_polynomials.w_l[1] = FF::random_element(); + prover_accumulator->proving_key.polynomials.w_l[1] = FF::random_element(); // Generate a folding proof with the incorrect polynomials which would result in the prover having the wrong // target sum diff --git a/barretenberg/cpp/src/barretenberg/stdlib/honk_recursion/verifier/verifier.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/honk_recursion/verifier/verifier.test.cpp index d7af13251960..8fe40431d422 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/honk_recursion/verifier/verifier.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/honk_recursion/verifier/verifier.test.cpp @@ -5,40 +5,40 @@ #include "barretenberg/stdlib/honk_recursion/verifier/ultra_recursive_verifier.hpp" #include "barretenberg/stdlib/primitives/curves/bn254.hpp" #include "barretenberg/stdlib_circuit_builders/ultra_recursive_flavor.hpp" - #include "barretenberg/ultra_honk/ultra_prover.hpp" #include "barretenberg/ultra_honk/ultra_verifier.hpp" namespace bb::stdlib::recursion::honk { /** - * @brief Test suite for recursive verification of conventional Ultra Honk proofs - * @details The recursive verification circuit is arithmetized in two different ways: 1) using the conventional Ultra - * arithmetization (UltraCircuitBuilder), or 2) a Goblin-style Ultra arithmetization (GoblinUltraCircuitBuilder). + * @brief Test suite for recursive verification of Honk proofs for both Ultra and GoblinUltra arithmetisation. + * @details `Inner*` types describe the type of circuits (and everything else required to generate a proof) that we aim + * to recursively verify. `Outer*` describes the arithmetisation of the recursive verifier circuit and the types + * required to ensure the recursive verifier circuit is correct (i.e. by producing a proof and verifying it). * - * @tparam Builder + * @tparam RecursiveFlavor defines the recursive verifier, what the arithmetisation of its circuit should be and what + * types of proofs it recursively verifies. */ -template class HonkRecursiveVerifierTest : public testing::Test { - - // Define types relevant for testing +template class RecursiveVerifierTest : public testing::Test { - using InnerFlavor = UltraFlavor; - using InnerProverInstance = ProverInstance_; - using InnerProver = UltraProver; - using InnerVerifier = UltraVerifier; + // Define types for the inner circuit, i.e. the circuit whose proof will be recursively verified + using InnerFlavor = typename RecursiveFlavor::NativeFlavor; + using InnerProver = UltraProver_; + using InnerVerifier = UltraVerifier_; using InnerBuilder = typename InnerFlavor::CircuitBuilder; + using InnerProverInstance = ProverInstance_; using InnerCurve = bn254; - using Commitment = InnerFlavor::Commitment; - using FF = InnerFlavor::FF; + using InnerCommitment = InnerFlavor::Commitment; + using InnerFF = InnerFlavor::FF; - // Types for recursive verifier circuit - using OuterBuilder = typename OuterFlavor::CircuitBuilder; - using RecursiveFlavor = UltraRecursiveFlavor_; - using RecursiveVerifier = UltraRecursiveVerifier_; + // Defines types for the outer circuit, i.e. the circuit of the recursive verifier + using OuterBuilder = typename RecursiveFlavor::CircuitBuilder; + using OuterFlavor = std::conditional_t, GoblinUltraFlavor, UltraFlavor>; using OuterProver = UltraProver_; using OuterVerifier = UltraVerifier_; using OuterProverInstance = ProverInstance_; + using RecursiveVerifier = UltraRecursiveVerifier_; using VerificationKey = typename RecursiveVerifier::VerificationKey; /** @@ -48,14 +48,18 @@ template class HonkRecursiveVerifierTest : public testing * @param public_inputs * @param log_num_gates */ - static void create_inner_circuit(InnerBuilder& builder, size_t log_num_gates = 10) + static InnerBuilder create_inner_circuit(size_t log_num_gates = 10) { using fr_ct = InnerCurve::ScalarField; using fq_ct = InnerCurve::BaseField; + using point_ct = InnerCurve::AffineElement; using public_witness_ct = InnerCurve::public_witness_ct; using witness_ct = InnerCurve::witness_ct; using byte_array_ct = InnerCurve::byte_array_ct; using fr = typename InnerCurve::ScalarFieldNative; + using point = typename InnerCurve::GroupNative::affine_element; + + InnerBuilder builder; // Create 2^log_n many add gates based on input log num gates const size_t num_gates = 1 << log_num_gates; @@ -73,7 +77,18 @@ template class HonkRecursiveVerifierTest : public testing builder.create_big_add_gate({ a_idx, b_idx, c_idx, d_idx, fr(1), fr(1), fr(1), fr(-1), fr(0) }); } - // Define some additional non-trivial but arbitrary circuit logic + // Perform a batch mul which will add some arbitrary goblin-style ECC op gates if the circuit arithmetic is + // goblinisied otherwise it will add the conventional nonnative gates + size_t num_points = 5; + std::vector circuit_points; + std::vector circuit_scalars; + for (size_t i = 0; i < num_points; ++i) { + circuit_points.push_back(point_ct::from_witness(&builder, point::random_element())); + circuit_scalars.push_back(fr_ct::from_witness(&builder, fr::random_element())); + } + point_ct::batch_mul(circuit_points, circuit_scalars); + + // Define some additional arbitrary convetional circuit logic fr_ct a(public_witness_ct(&builder, fr::random_element())); fr_ct b(public_witness_ct(&builder, fr::random_element())); fr_ct c(public_witness_ct(&builder, fr::random_element())); @@ -94,6 +109,8 @@ template class HonkRecursiveVerifierTest : public testing fq_ct big_b(fr_ct(witness_ct(&builder, bigfield_data_b.to_montgomery_form())), fr_ct(witness_ct(&builder, 0))); big_a* big_b; + + return builder; }; public: @@ -105,11 +122,10 @@ template class HonkRecursiveVerifierTest : public testing */ static void test_inner_circuit() { - InnerBuilder builder; + auto inner_circuit = create_inner_circuit(); - create_inner_circuit(builder); + bool result = CircuitChecker::check(inner_circuit); - bool result = CircuitChecker::check(builder); EXPECT_EQ(result, true); } @@ -120,11 +136,9 @@ template class HonkRecursiveVerifierTest : public testing */ static void test_recursive_verification_key_creation() { - InnerBuilder inner_circuit; - OuterBuilder outer_circuit; - // Create an arbitrary inner circuit - create_inner_circuit(inner_circuit); + auto inner_circuit = create_inner_circuit(); + OuterBuilder outer_circuit; // Compute native verification key auto instance = std::make_shared(inner_circuit); @@ -137,34 +151,31 @@ template class HonkRecursiveVerifierTest : public testing EXPECT_EQ(verifier.key->circuit_size, verification_key->circuit_size); EXPECT_EQ(verifier.key->log_circuit_size, verification_key->log_circuit_size); EXPECT_EQ(verifier.key->num_public_inputs, verification_key->num_public_inputs); - EXPECT_EQ(verifier.key->q_m.get_value(), verification_key->q_m); - EXPECT_EQ(verifier.key->q_r.get_value(), verification_key->q_r); - EXPECT_EQ(verifier.key->sigma_1.get_value(), verification_key->sigma_1); - EXPECT_EQ(verifier.key->id_3.get_value(), verification_key->id_3); + for (auto [vk_poly, native_vk_poly] : zip_view(verifier.key->get_all(), verification_key->get_all())) { + EXPECT_EQ(vk_poly.get_value(), native_vk_poly); + } } /** - * @brief Construct a recursive verification circuit for the proof of an inner circuit then call check_circuit on it - * + * @brief Construct a recursive verification circuit for the proof of an inner circuit then call check_circuit on + * it. */ static void test_recursive_verification() { // Create an arbitrary inner circuit - InnerBuilder inner_circuit; - create_inner_circuit(inner_circuit); + auto inner_circuit = create_inner_circuit(); // Generate a proof over the inner circuit auto instance = std::make_shared(inner_circuit); InnerProver inner_prover(instance); - auto inner_proof = inner_prover.construct_proof(); - auto verification_key = std::make_shared(instance->proving_key); + auto inner_proof = inner_prover.construct_proof(); // Create a recursive verification circuit for the proof of the inner circuit OuterBuilder outer_circuit; RecursiveVerifier verifier{ &outer_circuit, verification_key }; auto pairing_points = verifier.verify_proof(inner_proof); - info("Recursive Verifier Ultra: num gates = ", outer_circuit.num_gates); + info("Recursive Verifier: num gates = ", outer_circuit.num_gates); // Check for a failure flag in the recursive verifier circuit EXPECT_EQ(outer_circuit.failed(), false) << outer_circuit.err(); @@ -207,8 +218,7 @@ template class HonkRecursiveVerifierTest : public testing static void test_recursive_verification_fails() { // Create an arbitrary inner circuit - InnerBuilder inner_circuit; - create_inner_circuit(inner_circuit); + auto inner_circuit = create_inner_circuit(); // Generate a proof over the inner circuit auto instance = std::make_shared(inner_circuit); @@ -217,15 +227,16 @@ template class HonkRecursiveVerifierTest : public testing // Arbitrarily tamper with the proof to be verified inner_prover.transcript->deserialize_full_transcript(); - inner_prover.transcript->sorted_accum_comm = Commitment::one() * FF::random_element(); + inner_prover.transcript->sorted_accum_comm = InnerCommitment::one() * InnerFF::random_element(); inner_prover.transcript->serialize_full_transcript(); inner_proof = inner_prover.export_proof(); - auto verification_key = std::make_shared(instance->proving_key); + // Generate the corresponding inner verification key + auto inner_verification_key = std::make_shared(instance->proving_key); // Create a recursive verification circuit for the proof of the inner circuit OuterBuilder outer_circuit; - RecursiveVerifier verifier{ &outer_circuit, verification_key }; + RecursiveVerifier verifier{ &outer_circuit, inner_verification_key }; verifier.verify_proof(inner_proof); // We expect the circuit check to fail due to the bad proof @@ -234,26 +245,29 @@ template class HonkRecursiveVerifierTest : public testing }; // Run the recursive verifier tests with conventional Ultra builder and Goblin builder -using Flavors = testing::Types; +using Flavors = testing::Types, + GoblinUltraRecursiveFlavor_, + UltraRecursiveFlavor_, + UltraRecursiveFlavor_>; -TYPED_TEST_SUITE(HonkRecursiveVerifierTest, Flavors); +TYPED_TEST_SUITE(RecursiveVerifierTest, Flavors); -HEAVY_TYPED_TEST(HonkRecursiveVerifierTest, InnerCircuit) +HEAVY_TYPED_TEST(RecursiveVerifierTest, InnerCircuit) { TestFixture::test_inner_circuit(); } -HEAVY_TYPED_TEST(HonkRecursiveVerifierTest, RecursiveVerificationKey) +HEAVY_TYPED_TEST(RecursiveVerifierTest, RecursiveVerificationKey) { TestFixture::test_recursive_verification_key_creation(); } -HEAVY_TYPED_TEST(HonkRecursiveVerifierTest, DISABLED_SingleRecursiveVerification) +HEAVY_TYPED_TEST(RecursiveVerifierTest, SingleRecursiveVerification) { TestFixture::test_recursive_verification(); }; -HEAVY_TYPED_TEST(HonkRecursiveVerifierTest, DISABLED_SingleRecursiveVerificationFailure) +HEAVY_TYPED_TEST(RecursiveVerifierTest, SingleRecursiveVerificationFailure) { TestFixture::test_recursive_verification_fails(); }; diff --git a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/goblin_ultra_flavor.hpp b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/goblin_ultra_flavor.hpp index 677749016b5e..49b0e6791699 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/goblin_ultra_flavor.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/goblin_ultra_flavor.hpp @@ -240,6 +240,10 @@ class GoblinUltraFlavor { DEFINE_COMPOUND_GET_ALL(PrecomputedEntities, WitnessEntities, ShiftedEntities) auto get_wires() { return RefArray{ this->w_l, this->w_r, this->w_o, this->w_4 }; }; + auto get_selectors() { return PrecomputedEntities::get_selectors(); } + auto get_sigmas() { return RefArray{ this->sigma_1, this->sigma_2, this->sigma_3, this->sigma_4 }; }; + auto get_ids() { return RefArray{ this->id_1, this->id_2, this->id_3, this->id_4 }; }; + auto get_tables() { return RefArray{ this->table_1, this->table_2, this->table_3, this->table_4 }; }; auto get_ecc_op_wires() { return RefArray{ this->ecc_op_wire_1, this->ecc_op_wire_2, this->ecc_op_wire_3, this->ecc_op_wire_4 }; @@ -260,28 +264,71 @@ class GoblinUltraFlavor { auto get_shifted() { return ShiftedEntities::get_all(); }; }; + /** + * @brief A field element for each entity of the flavor. These entities represent the prover polynomials evaluated + * at one point. + */ + class AllValues : public AllEntities { + public: + using Base = AllEntities; + using Base::Base; + }; + + /** + * @brief A container for the prover polynomials handles. + */ + class ProverPolynomials : public AllEntities { + public: + // Define all operations as default, except copy construction/assignment + ProverPolynomials() = default; + ProverPolynomials(size_t circuit_size) + { // Initialize all unshifted polynomials to the zero polynomial and initialize the shifted polys + for (auto& poly : get_unshifted()) { + poly = Polynomial{ circuit_size }; + } + set_shifted(); + } + ProverPolynomials& operator=(const ProverPolynomials&) = delete; + ProverPolynomials(const ProverPolynomials& o) = delete; + ProverPolynomials(ProverPolynomials&& o) noexcept = default; + ProverPolynomials& operator=(ProverPolynomials&& o) noexcept = default; + ~ProverPolynomials() = default; + [[nodiscard]] size_t get_polynomial_size() const { return q_c.size(); } + [[nodiscard]] AllValues get_row(size_t row_idx) const + { + AllValues result; + for (auto [result_field, polynomial] : zip_view(result.get_all(), this->get_all())) { + result_field = polynomial[row_idx]; + } + return result; + } + + void set_shifted() + { + for (auto [shifted, to_be_shifted] : zip_view(get_shifted(), get_to_be_shifted())) { + shifted = to_be_shifted.shifted(); + } + } + }; + /** * @brief The proving key is responsible for storing the polynomials used by the prover. - * @note TODO(Cody): Maybe multiple inheritance is the right thing here. In that case, nothing should eve inherit - * from ProvingKey. + * */ - class ProvingKey : public ProvingKey_, WitnessEntities, CommitmentKey> { + class ProvingKey : public ProvingKey_ { public: // Expose constructors on the base class - using Base = ProvingKey_, WitnessEntities, CommitmentKey>; + using Base = ProvingKey_; using Base::Base; + ProvingKey(const size_t circuit_size, const size_t num_public_inputs) + : Base(circuit_size, num_public_inputs) + , polynomials(circuit_size){}; + std::vector memory_read_records; std::vector memory_write_records; std::array sorted_polynomials; - - auto get_to_be_shifted() - { - return RefArray{ this->table_1, this->table_2, this->table_3, this->table_4, this->w_l, this->w_r, - this->w_o, this->w_4, this->sorted_accum, this->z_perm, this->z_lookup }; - }; - // The plookup wires that store plookup read data. - auto get_table_column_wires() { return RefArray{ w_l, w_r, w_o }; }; + ProverPolynomials polynomials; // storage for all polynomials evaluated by the prover void compute_sorted_accumulator_polynomials(const FF& eta, const FF& eta_two, const FF& eta_three) { @@ -306,7 +353,7 @@ class GoblinUltraFlavor { void compute_sorted_list_accumulator(const FF& eta, const FF& eta_two, const FF& eta_three) { - auto sorted_list_accumulator = Polynomial{ this->circuit_size }; + auto& sorted_list_accumulator = polynomials.sorted_accum; // Construct s via Horner, i.e. s = s_1 + η(s_2 + η(s_3 + η*s_4)) for (size_t i = 0; i < this->circuit_size; ++i) { @@ -316,7 +363,6 @@ class GoblinUltraFlavor { T0 += sorted_polynomials[0][i]; sorted_list_accumulator[i] = T0; } - sorted_accum = sorted_list_accumulator.share(); } /** @@ -333,7 +379,7 @@ class GoblinUltraFlavor { // The plookup memory record values are computed at the indicated indices as // w4 = w3 * eta^3 + w2 * eta^2 + w1 * eta + read_write_flag; // (See plookup_auxiliary_widget.hpp for details) - auto wires = get_wires(); + auto wires = polynomials.get_wires(); // Compute read record values for (const auto& gate_idx : memory_read_records) { @@ -360,17 +406,13 @@ class GoblinUltraFlavor { */ void compute_logderivative_inverse(const RelationParameters& relation_parameters) { - auto prover_polynomials = ProverPolynomials(*this); - // Compute inverses for calldata reads DatabusLookupRelation::compute_logderivative_inverse( - prover_polynomials, relation_parameters, this->circuit_size); - this->calldata_inverses = prover_polynomials.calldata_inverses; + this->polynomials, relation_parameters, this->circuit_size); // Compute inverses for return data reads DatabusLookupRelation::compute_logderivative_inverse( - prover_polynomials, relation_parameters, this->circuit_size); - this->return_data_inverses = prover_polynomials.return_data_inverses; + this->polynomials, relation_parameters, this->circuit_size); } /** @@ -391,10 +433,7 @@ class GoblinUltraFlavor { relation_parameters.lookup_grand_product_delta = lookup_grand_product_delta; // Compute permutation and lookup grand product polynomials - auto prover_polynomials = ProverPolynomials(*this); - compute_grand_products(*this, prover_polynomials, relation_parameters); - this->z_perm = prover_polynomials.z_perm; - this->z_lookup = prover_polynomials.z_lookup; + compute_grand_products(this->polynomials, relation_parameters); } }; @@ -423,10 +462,113 @@ class GoblinUltraFlavor { this->num_public_inputs = proving_key.num_public_inputs; this->pub_inputs_offset = proving_key.pub_inputs_offset; - for (auto [polynomial, commitment] : zip_view(proving_key.get_precomputed_polynomials(), this->get_all())) { + for (auto [polynomial, commitment] : zip_view(proving_key.polynomials.get_precomputed(), this->get_all())) { commitment = proving_key.commitment_key->commit(polynomial); } } + // TODO(https://github.com/AztecProtocol/barretenberg/issues/964): Clean the boilerplate up. + VerificationKey(const size_t circuit_size, + const size_t num_public_inputs, + const size_t pub_inputs_offset, + const Commitment& q_m, + const Commitment& q_c, + const Commitment& q_l, + const Commitment& q_r, + const Commitment& q_o, + const Commitment& q_4, + const Commitment& q_arith, + const Commitment& q_delta_range, + const Commitment& q_elliptic, + const Commitment& q_aux, + const Commitment& q_lookup, + const Commitment& q_busread, + const Commitment& q_poseidon2_external, + const Commitment& q_poseidon2_internal, + const Commitment& sigma_1, + const Commitment& sigma_2, + const Commitment& sigma_3, + const Commitment& sigma_4, + const Commitment& id_1, + const Commitment& id_2, + const Commitment& id_3, + const Commitment& id_4, + const Commitment& table_1, + const Commitment& table_2, + const Commitment& table_3, + const Commitment& table_4, + const Commitment& lagrange_first, + const Commitment& lagrange_last, + const Commitment& lagrange_ecc_op, + const Commitment& databus_id) + { + this->circuit_size = circuit_size; + this->log_circuit_size = numeric::get_msb(this->circuit_size); + this->num_public_inputs = num_public_inputs; + this->pub_inputs_offset = pub_inputs_offset; + this->q_m = q_m; + this->q_c = q_c; + this->q_l = q_l; + this->q_r = q_r; + this->q_o = q_o; + this->q_4 = q_4; + this->q_arith = q_arith; + this->q_delta_range = q_delta_range; + this->q_elliptic = q_elliptic; + this->q_aux = q_aux; + this->q_lookup = q_lookup; + this->q_busread = q_busread; + this->q_poseidon2_external = q_poseidon2_external; + this->q_poseidon2_internal = q_poseidon2_internal; + this->sigma_1 = sigma_1; + this->sigma_2 = sigma_2; + this->sigma_3 = sigma_3; + this->sigma_4 = sigma_4; + this->id_1 = id_1; + this->id_2 = id_2; + this->id_3 = id_3; + this->id_4 = id_4; + this->table_1 = table_1; + this->table_2 = table_2; + this->table_3 = table_3; + this->table_4 = table_4; + this->lagrange_first = lagrange_first; + this->lagrange_last = lagrange_last; + this->lagrange_ecc_op = lagrange_ecc_op; + this->databus_id = databus_id; + } + MSGPACK_FIELDS(circuit_size, + num_public_inputs, + pub_inputs_offset, + q_m, + q_c, + q_l, + q_r, + q_o, + q_4, + q_arith, + q_delta_range, + q_elliptic, + q_aux, + q_lookup, + q_busread, + q_poseidon2_external, + q_poseidon2_internal, + sigma_1, + sigma_2, + sigma_3, + sigma_4, + id_1, + id_2, + id_3, + id_4, + table_1, + table_2, + table_3, + table_4, + lagrange_first, + lagrange_last, + lagrange_ecc_op, + databus_id); }; /** * @brief A container for storing the partially evaluated multivariates produced by sumcheck. @@ -455,51 +597,6 @@ class GoblinUltraFlavor { */ using ExtendedEdges = ProverUnivariates; - /** - * @brief A field element for each entity of the flavor. These entities represent the prover polynomials evaluated - * at one point. - */ - class AllValues : public AllEntities { - public: - using Base = AllEntities; - using Base::Base; - }; - - /** - * @brief A container for the prover polynomials handles. - */ - class ProverPolynomials : public AllEntities { - public: - // TODO(https://github.com/AztecProtocol/barretenberg/issues/925), proving_key could be const ref - ProverPolynomials(ProvingKey& proving_key) - { - for (auto [prover_poly, key_poly] : zip_view(this->get_unshifted(), proving_key.get_all())) { - ASSERT(flavor_get_label(*this, prover_poly) == flavor_get_label(proving_key, key_poly)); - prover_poly = key_poly.share(); - } - for (auto [prover_poly, key_poly] : zip_view(this->get_shifted(), proving_key.get_to_be_shifted())) { - ASSERT(flavor_get_label(*this, prover_poly) == (flavor_get_label(proving_key, key_poly) + "_shift")); - prover_poly = key_poly.shifted(); - } - } - // Define all operations as default, except copy construction/assignment - ProverPolynomials() = default; - ProverPolynomials& operator=(const ProverPolynomials&) = delete; - ProverPolynomials(const ProverPolynomials& o) = delete; - ProverPolynomials(ProverPolynomials&& o) noexcept = default; - ProverPolynomials& operator=(ProverPolynomials&& o) noexcept = default; - ~ProverPolynomials() = default; - [[nodiscard]] size_t get_polynomial_size() const { return q_c.size(); } - [[nodiscard]] AllValues get_row(size_t row_idx) const - { - AllValues result; - for (auto [result_field, polynomial] : zip_view(result.get_all(), this->get_all())) { - result_field = polynomial[row_idx]; - } - return result; - } - }; - /** * @brief A container for the witness commitments. */ diff --git a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/grand_product_library.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/grand_product_library.test.cpp index 7a2ef577349b..9ab7c789f67b 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/grand_product_library.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/grand_product_library.test.cpp @@ -47,33 +47,15 @@ template class GrandProductTests : public testing::Test { */ template static void test_permutation_grand_product_construction() { - // Define some mock inputs for proving key constructor - static const size_t num_gates = 8; - static const size_t num_public_inputs = 0; - - // Instatiate a proving_key and make a pointer to it. This will be used to instantiate a Prover. - auto proving_key = std::make_shared(num_gates, num_public_inputs); - - // static const size_t program_width = StandardProver::settings_::program_width; - - // Construct mock wire and permutation polynomials. - // Note: for the purpose of checking the consistency between two methods of computing z_perm, these polynomials - // can simply be random. We're not interested in the particular properties of the result. - std::vector wires; - std::vector sigmas; - std::vector ids; - - auto wire_polynomials = proving_key->get_wires(); - auto sigma_polynomials = proving_key->get_sigma_polynomials(); - auto id_polynomials = proving_key->get_id_polynomials(); - for (size_t i = 0; i < Flavor::NUM_WIRES; ++i) { - wires.emplace_back(get_random_polynomial(num_gates)); - sigmas.emplace_back(get_random_polynomial(num_gates)); - ids.emplace_back(get_random_polynomial(num_gates)); - - populate_span(wire_polynomials[i], wires[i]); - populate_span(sigma_polynomials[i], sigmas[i]); - populate_span(id_polynomials[i], ids[i]); + using ProverPolynomials = typename Flavor::ProverPolynomials; + + // Set a mock circuit size + static const size_t circuit_size = 8; + + // Construct a ProverPolynomials object with completely random polynomials + ProverPolynomials prover_polynomials; + for (auto& poly : prover_polynomials.get_all()) { + poly = get_random_polynomial(circuit_size); } // Get random challenges @@ -88,12 +70,6 @@ template class GrandProductTests : public testing::Test { .lookup_grand_product_delta = 1, }; - typename Flavor::ProverPolynomials prover_polynomials; - for (auto [prover_poly, key_poly] : zip_view(prover_polynomials.get_unshifted(), proving_key->get_all())) { - ASSERT(flavor_get_label(prover_polynomials, prover_poly) == flavor_get_label(*proving_key, key_poly)); - prover_poly = key_poly.share(); - } - // Method 1: Compute z_perm using 'compute_grand_product_polynomial' as the prover would in practice constexpr size_t PERMUTATION_RELATION_INDEX = 0; using LHS = @@ -101,7 +77,7 @@ template class GrandProductTests : public testing::Test { ASSERT(Flavor::NUM_WIRES == 4); using RHS = typename bb::UltraPermutationRelation; static_assert(std::same_as); - compute_grand_product(proving_key->circuit_size, prover_polynomials, params); + compute_grand_product(prover_polynomials, params); // Method 2: Compute z_perm locally using the simplest non-optimized syntax possible. The comment below, // which describes the computation in 4 steps, is adapted from a similar comment in @@ -129,11 +105,14 @@ template class GrandProductTests : public testing::Test { */ // Make scratch space for the numerator and denominator accumulators. - std::array, Flavor::NUM_WIRES> numerator_accum; - std::array, Flavor::NUM_WIRES> denominator_accum; + std::array, Flavor::NUM_WIRES> numerator_accum; + std::array, Flavor::NUM_WIRES> denominator_accum; + auto wires = prover_polynomials.get_wires(); + auto sigmas = prover_polynomials.get_sigmas(); + auto ids = prover_polynomials.get_ids(); // Step (1) - for (size_t i = 0; i < proving_key->circuit_size; ++i) { + for (size_t i = 0; i < circuit_size; ++i) { for (size_t k = 0; k < Flavor::NUM_WIRES; ++k) { numerator_accum[k][i] = wires[k][i] + (ids[k][i] * beta) + gamma; // w_k(i) + β.id_k(i) + γ denominator_accum[k][i] = wires[k][i] + (sigmas[k][i] * beta) + gamma; // w_k(i) + β.σ_k(i) + γ @@ -142,14 +121,14 @@ template class GrandProductTests : public testing::Test { // Step (2) for (size_t k = 0; k < Flavor::NUM_WIRES; ++k) { - for (size_t i = 0; i < proving_key->circuit_size - 1; ++i) { + for (size_t i = 0; i < circuit_size - 1; ++i) { numerator_accum[k][i + 1] *= numerator_accum[k][i]; denominator_accum[k][i + 1] *= denominator_accum[k][i]; } } // Step (3) - for (size_t i = 0; i < proving_key->circuit_size; ++i) { + for (size_t i = 0; i < circuit_size; ++i) { for (size_t k = 1; k < Flavor::NUM_WIRES; ++k) { numerator_accum[0][i] *= numerator_accum[k][i]; denominator_accum[0][i] *= denominator_accum[k][i]; @@ -157,15 +136,15 @@ template class GrandProductTests : public testing::Test { } // Step (4) - Polynomial z_permutation_expected(proving_key->circuit_size); + Polynomial z_permutation_expected(circuit_size); z_permutation_expected[0] = FF::zero(); // Z_0 = 1 // Note: in practice, we replace this expensive element-wise division with Montgomery batch inversion - for (size_t i = 0; i < proving_key->circuit_size - 1; ++i) { + for (size_t i = 0; i < circuit_size - 1; ++i) { z_permutation_expected[i + 1] = numerator_accum[0][i] / denominator_accum[0][i]; } // Check consistency between locally computed z_perm and the one computed by the prover library - EXPECT_EQ(proving_key->z_perm, z_permutation_expected); + EXPECT_EQ(prover_polynomials.z_perm, z_permutation_expected); }; /** @@ -178,51 +157,19 @@ template class GrandProductTests : public testing::Test { */ static void test_lookup_grand_product_construction() { - // Define some mock inputs for proving key constructor - static const size_t circuit_size = 8; - static const size_t num_public_inputs = 0; - - // Instatiate a proving_key and make a pointer to it. This will be used to instantiate a Prover. using Flavor = UltraFlavor; - auto proving_key = std::make_shared(circuit_size, num_public_inputs); - - // Construct mock wire and permutation polynomials. - // Note: for the purpose of checking the consistency between two methods of computing z_lookup, these - // polynomials can simply be random. We're not interested in the particular properties of the result. - std::vector wires; - auto wire_polynomials = proving_key->get_wires(); - // Note(luke): Use of 3 wires is fundamental to the structure of the tables and should not be tied to NUM_WIRES - // for now - for (size_t i = 0; i < 3; ++i) { // TODO(Cody): will this test ever generalize? - Polynomial random_polynomial = get_random_polynomial(circuit_size); - random_polynomial[0] = 0; // when computing shifts, 1st element needs to be 0 - wires.emplace_back(random_polynomial); - populate_span(wire_polynomials[i], random_polynomial); - } + using ProverPolynomials = typename Flavor::ProverPolynomials; - std::vector tables; - auto table_polynomials = proving_key->get_table_polynomials(); - for (auto& table_polynomial : table_polynomials) { - Polynomial random_polynomial = get_random_polynomial(circuit_size); - random_polynomial[0] = 0; // when computing shifts, 1st element needs to be 0 - tables.emplace_back(random_polynomial); - populate_span(table_polynomial, random_polynomial); - } + // Set a mock circuit size + static const size_t circuit_size = 8; - auto sorted_batched = get_random_polynomial(circuit_size); - sorted_batched[0] = 0; // when computing shifts, 1st element needs to be 0 - auto column_1_step_size = get_random_polynomial(circuit_size); - auto column_2_step_size = get_random_polynomial(circuit_size); - auto column_3_step_size = get_random_polynomial(circuit_size); - auto lookup_index_selector = get_random_polynomial(circuit_size); - auto lookup_selector = get_random_polynomial(circuit_size); - - proving_key->sorted_accum = sorted_batched.share(); - populate_span(proving_key->q_r, column_1_step_size); - populate_span(proving_key->q_m, column_2_step_size); - populate_span(proving_key->q_c, column_3_step_size); - populate_span(proving_key->q_o, lookup_index_selector); - populate_span(proving_key->q_lookup, lookup_selector); + // Construct a ProverPolynomials object with completely random polynomials + ProverPolynomials prover_polynomials; + for (auto& poly : prover_polynomials.get_unshifted()) { + poly = get_random_polynomial(circuit_size); + poly[0] = 0; // for shiftability + } + prover_polynomials.set_shifted(); // Get random challenges auto beta = FF::random_element(); @@ -241,28 +188,12 @@ template class GrandProductTests : public testing::Test { .lookup_grand_product_delta = 1, }; - typename Flavor::ProverPolynomials prover_polynomials; - for (auto [prover_poly, key_poly] : zip_view(prover_polynomials.get_unshifted(), proving_key->get_all())) { - ASSERT(flavor_get_label(prover_polynomials, prover_poly) == flavor_get_label(*proving_key, key_poly)); - prover_poly = key_poly.share(); - } - for (auto [prover_poly, key_poly] : - zip_view(prover_polynomials.get_shifted(), proving_key->get_to_be_shifted())) { - ASSERT(flavor_get_label(prover_polynomials, prover_poly) == - flavor_get_label(*proving_key, key_poly) + "_shift"); - prover_poly = key_poly.shifted(); - } - // Test a few assignments - EXPECT_EQ(&proving_key->z_lookup[0], &prover_polynomials.z_lookup[0]); - EXPECT_EQ(&proving_key->sigma_1[0], &prover_polynomials.sigma_1[0]); - EXPECT_EQ(&proving_key->lagrange_last[0], &prover_polynomials.lagrange_last[0]); - // Method 1: Compute z_lookup using the prover library method constexpr size_t LOOKUP_RELATION_INDEX = 1; using LHS = typename std::tuple_element::type; using RHS = LookupRelation; static_assert(std::same_as); - compute_grand_product(proving_key->circuit_size, prover_polynomials, params); + compute_grand_product(prover_polynomials, params); // Method 2: Compute the lookup grand product polynomial Z_lookup: // @@ -279,6 +210,15 @@ template class GrandProductTests : public testing::Test { // Step (1) + auto wires = prover_polynomials.get_wires(); + auto tables = prover_polynomials.get_tables(); + auto sorted_batched = prover_polynomials.sorted_accum; + auto column_1_step_size = prover_polynomials.q_r; + auto column_2_step_size = prover_polynomials.q_m; + auto column_3_step_size = prover_polynomials.q_c; + auto lookup_index_selector = prover_polynomials.q_o; + auto lookup_selector = prover_polynomials.q_lookup; + // Note: block_mask is used for efficient modulus, i.e. i % N := i & (N-1), for N = 2^k const size_t block_mask = circuit_size - 1; // Initialize 't(X)' to be used in an expression of the form t(X) + β*t(Xω) @@ -332,7 +272,7 @@ template class GrandProductTests : public testing::Test { z_lookup_expected[i + 1] = accumulators[0][i] / accumulators[3][i]; } - EXPECT_EQ(proving_key->z_lookup, z_lookup_expected); + EXPECT_EQ(prover_polynomials.z_lookup, z_lookup_expected); }; }; diff --git a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/mock_circuits.hpp b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/mock_circuits.hpp index cb523ce7186a..36937d8c3476 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/mock_circuits.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/mock_circuits.hpp @@ -10,20 +10,16 @@ class MockCircuits { using Point = Curve::AffineElement; /** - * @brief Populate a builder with a specified number of arithmetic gates; includes a PI + * @brief Add a specified number of arithmetic gates (with public inputs) to the provided circuit * * @param builder * @param num_gates */ template - static void construct_arithmetic_circuit(Builder& builder, const size_t target_log2_dyadic_size = 4) + static void add_arithmetic_gates_with_public_inputs(Builder& builder, const size_t num_gates = 4) { - const size_t target_dyadic_size = 1 << target_log2_dyadic_size; - const size_t num_preamble_gates = builder.num_gates; - ASSERT(target_dyadic_size >= num_preamble_gates); - // For good measure, include a gate with some public inputs - if (target_dyadic_size > num_preamble_gates) { + for (size_t i = 0; i < num_gates; ++i) { FF a = FF::random_element(); FF b = FF::random_element(); FF c = FF::random_element(); @@ -35,6 +31,48 @@ class MockCircuits { builder.create_big_add_gate({ a_idx, b_idx, c_idx, d_idx, FF(1), FF(1), FF(1), FF(-1), FF(0) }); } + } + + /** + * @brief Add a specified number of arithmetic gates to the provided circuit + * + * @param builder + * @param num_gates + */ + template static void add_arithmetic_gates(Builder& builder, const size_t num_gates = 4) + { + // For good measure, include a gate with some public inputs + for (size_t i = 0; i < num_gates; ++i) { + FF a = FF::random_element(); + FF b = FF::random_element(); + FF c = FF::random_element(); + FF d = a + b + c; + uint32_t a_idx = builder.add_variable(a); + uint32_t b_idx = builder.add_variable(b); + uint32_t c_idx = builder.add_variable(c); + uint32_t d_idx = builder.add_variable(d); + + builder.create_big_add_gate({ a_idx, b_idx, c_idx, d_idx, FF(1), FF(1), FF(1), FF(-1), FF(0) }); + } + } + + /** + * @brief Populate a builder with a specified number of arithmetic gates; includes a PI + * + * @param builder + * @param num_gates + */ + template + static void construct_arithmetic_circuit(Builder& builder, const size_t target_log2_dyadic_size = 4) + { + const size_t target_dyadic_size = 1 << target_log2_dyadic_size; + const size_t num_preamble_gates = builder.num_gates; + ASSERT(target_dyadic_size >= num_preamble_gates); + + // For good measure, include a gate with some public inputs + if (target_dyadic_size > num_preamble_gates) { + add_arithmetic_gates_with_public_inputs(builder, 1); + } // A proper treatment of this would dynamically calculate how many gates to add given static information about // Builder, but a major overhaul of the execution trace is underway, so we just elect to use a hack. Namely, for @@ -46,19 +84,10 @@ class MockCircuits { // to prevent underflow of the loop upper limit; target size >= 16 should suffice ASSERT(target_dyadic_size > OFFSET_HACK + num_preamble_gates); - // Add arbitrary arithmetic gates to obtain a total of num_gates-many gates - FF a = FF::random_element(); - FF b = FF::random_element(); - FF c = FF::random_element(); - FF d = a + b + c; - uint32_t a_idx = builder.add_variable(a); - uint32_t b_idx = builder.add_variable(b); - uint32_t c_idx = builder.add_variable(c); - uint32_t d_idx = builder.add_variable(d); + size_t num_gates_to_add = target_dyadic_size - OFFSET_HACK - 1 - num_preamble_gates; - for (size_t i = 0; i < target_dyadic_size - OFFSET_HACK - 1 - num_preamble_gates; ++i) { - builder.create_big_add_gate({ a_idx, b_idx, c_idx, d_idx, FF(1), FF(1), FF(1), FF(-1), FF(0) }); - } + // Add arbitrary arithmetic gates to obtain a total of num_gates-many gates + add_arithmetic_gates(builder, num_gates_to_add); } /** diff --git a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/permutation_lib.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/permutation_lib.test.cpp index a47fdc8cbc71..18eddd125e2f 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/permutation_lib.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/permutation_lib.test.cpp @@ -76,7 +76,7 @@ TEST_F(PermutationHelperTests, ComputeHonkStyleSigmaLagrangePolynomialsFromMappi auto mapping = compute_permutation_mapping(circuit_constructor, proving_key.get(), {}); compute_honk_style_permutation_lagrange_polynomials_from_mapping( - proving_key->get_sigma_polynomials(), mapping.sigmas, proving_key.get()); + proving_key->polynomials.get_sigmas(), mapping.sigmas, proving_key.get()); } TEST_F(PermutationHelperTests, ComputeStandardAuxPolynomials) diff --git a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/standard_circuit_builder.hpp b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/standard_circuit_builder.hpp index e6ede171ffa8..a421a10adaa1 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/standard_circuit_builder.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/standard_circuit_builder.hpp @@ -15,6 +15,7 @@ template class StandardCircuitBuilder_ : public CircuitBuilderBase using Arithmetization = StandardArith; using GateBlocks = typename Arithmetization::TraceBlocks; static constexpr size_t NUM_WIRES = Arithmetization::NUM_WIRES; + static constexpr size_t FIXED_BLOCK_SIZE = 0; // not used, for compatibility only // Keeping NUM_WIRES, at least temporarily, for backward compatibility static constexpr size_t program_width = Arithmetization::NUM_WIRES; static constexpr size_t num_selectors = Arithmetization::NUM_SELECTORS; diff --git a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/ultra_circuit_builder.hpp b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/ultra_circuit_builder.hpp index feecbf0938d4..90dde82d76c0 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/ultra_circuit_builder.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib_circuit_builders/ultra_circuit_builder.hpp @@ -33,6 +33,7 @@ class UltraCircuitBuilder_ : public CircuitBuilderBase { + public: + using Base = AllEntities; + using Base::Base; + }; + + /** + * @brief A container for polynomials handles. + */ + // TODO(https://github.com/AztecProtocol/barretenberg/issues/966): use inheritance + class ProverPolynomials : public AllEntities { + public: + // Define all operations as default, except copy construction/assignment + ProverPolynomials() = default; + ProverPolynomials(size_t circuit_size) + { // Initialize all unshifted polynomials to the zero polynomial and initialize the shifted polys + for (auto& poly : get_unshifted()) { + poly = Polynomial{ circuit_size }; + } + set_shifted(); + } + ProverPolynomials& operator=(const ProverPolynomials&) = delete; + ProverPolynomials(const ProverPolynomials& o) = delete; + ProverPolynomials(ProverPolynomials&& o) noexcept = default; + ProverPolynomials& operator=(ProverPolynomials&& o) noexcept = default; + ~ProverPolynomials() = default; + [[nodiscard]] size_t get_polynomial_size() const { return q_c.size(); } + [[nodiscard]] AllValues get_row(const size_t row_idx) const + { + AllValues result; + for (auto [result_field, polynomial] : zip_view(result.get_all(), get_all())) { + result_field = polynomial[row_idx]; + } + return result; + } + + // Set all shifted polynomials based on their to-be-shifted counterpart + void set_shifted() + { + for (auto [shifted, to_be_shifted] : zip_view(get_shifted(), get_to_be_shifted())) { + shifted = to_be_shifted.shifted(); + } + } + }; /** * @brief The proving key is responsible for storing the polynomials used by the prover. - * @note TODO(Cody): Maybe multiple inheritance is the right thing here. In that case, nothing should eve inherit - * from ProvingKey. + * */ - class ProvingKey : public ProvingKey_, WitnessEntities, CommitmentKey> { + class ProvingKey : public ProvingKey_ { public: // Expose constructors on the base class - using Base = ProvingKey_, WitnessEntities, CommitmentKey>; + using Base = ProvingKey_; using Base::Base; + ProvingKey(const size_t circuit_size, const size_t num_public_inputs) + : Base(circuit_size, num_public_inputs) + , polynomials(circuit_size){}; + std::vector memory_read_records; std::vector memory_write_records; std::array sorted_polynomials; - - auto get_to_be_shifted() - { - return RefArray{ this->table_1, this->table_2, this->table_3, this->table_4, this->w_l, this->w_r, - this->w_o, this->w_4, this->sorted_accum, this->z_perm, this->z_lookup }; - }; - // The plookup wires that store plookup read data. - auto get_table_column_wires() { return RefArray{ w_l, w_r, w_o }; }; + ProverPolynomials polynomials; // storage for all polynomials evaluated by the prover void compute_sorted_accumulator_polynomials(const FF& eta, const FF& eta_two, const FF& eta_three) { @@ -309,7 +360,7 @@ class UltraFlavor { */ void compute_sorted_list_accumulator(const FF& eta, const FF& eta_two, const FF& eta_three) { - auto sorted_list_accumulator = Polynomial{ this->circuit_size }; + auto& sorted_list_accumulator = polynomials.sorted_accum; // Construct s via Horner, i.e. s = s_1 + η(s_2 + η(s_3 + η*s_4)) for (size_t i = 0; i < this->circuit_size; ++i) { @@ -319,7 +370,6 @@ class UltraFlavor { T0 += sorted_polynomials[0][i]; sorted_list_accumulator[i] = T0; } - sorted_accum = sorted_list_accumulator.share(); } /** @@ -336,7 +386,7 @@ class UltraFlavor { // The plookup memory record values are computed at the indicated indices as // w4 = w3 * eta^3 + w2 * eta^2 + w1 * eta + read_write_flag; // (See plookup_auxiliary_widget.hpp for details) - auto wires = get_wires(); + auto wires = polynomials.get_wires(); // Compute read record values for (const auto& gate_idx : memory_read_records) { @@ -372,10 +422,7 @@ class UltraFlavor { relation_parameters.lookup_grand_product_delta = lookup_grand_product_delta; // Compute permutation and lookup grand product polynomials - auto prover_polynomials = ProverPolynomials(*this); - compute_grand_products(*this, prover_polynomials, relation_parameters); - this->z_perm = prover_polynomials.z_perm; - this->z_lookup = prover_polynomials.z_lookup; + compute_grand_products(this->polynomials, relation_parameters); } }; @@ -402,53 +449,98 @@ class UltraFlavor { this->num_public_inputs = proving_key.num_public_inputs; this->pub_inputs_offset = proving_key.pub_inputs_offset; - for (auto [polynomial, commitment] : zip_view(proving_key.get_precomputed_polynomials(), this->get_all())) { + for (auto [polynomial, commitment] : zip_view(proving_key.polynomials.get_precomputed(), this->get_all())) { commitment = proving_key.commitment_key->commit(polynomial); } } - }; - /** - * @brief A field element for each entity of the flavor. These entities represent the prover polynomials - * evaluated at one point. - */ - class AllValues : public AllEntities { - public: - using Base = AllEntities; - using Base::Base; - }; - - /** - * @brief A container for polynomials handles. - */ - class ProverPolynomials : public AllEntities { - public: - ProverPolynomials(ProvingKey& proving_key) - { - for (auto [prover_poly, key_poly] : zip_view(this->get_unshifted(), proving_key.get_all())) { - ASSERT(flavor_get_label(*this, prover_poly) == flavor_get_label(proving_key, key_poly)); - prover_poly = key_poly.share(); - } - for (auto [prover_poly, key_poly] : zip_view(this->get_shifted(), proving_key.get_to_be_shifted())) { - ASSERT(flavor_get_label(*this, prover_poly) == (flavor_get_label(proving_key, key_poly) + "_shift")); - prover_poly = key_poly.shifted(); - } - } - // Define all operations as default, except copy construction/assignment - ProverPolynomials() = default; - ProverPolynomials& operator=(const ProverPolynomials&) = delete; - ProverPolynomials(const ProverPolynomials& o) = delete; - ProverPolynomials(ProverPolynomials&& o) noexcept = default; - ProverPolynomials& operator=(ProverPolynomials&& o) noexcept = default; - ~ProverPolynomials() = default; - [[nodiscard]] size_t get_polynomial_size() const { return q_c.size(); } - [[nodiscard]] AllValues get_row(const size_t row_idx) const + // TODO(https://github.com/AztecProtocol/barretenberg/issues/964): Clean the boilerplate up. + VerificationKey(const size_t circuit_size, + const size_t num_public_inputs, + const size_t pub_inputs_offset, + const Commitment& q_m, + const Commitment& q_c, + const Commitment& q_l, + const Commitment& q_r, + const Commitment& q_o, + const Commitment& q_4, + const Commitment& q_arith, + const Commitment& q_delta_range, + const Commitment& q_elliptic, + const Commitment& q_aux, + const Commitment& q_lookup, + const Commitment& sigma_1, + const Commitment& sigma_2, + const Commitment& sigma_3, + const Commitment& sigma_4, + const Commitment& id_1, + const Commitment& id_2, + const Commitment& id_3, + const Commitment& id_4, + const Commitment& table_1, + const Commitment& table_2, + const Commitment& table_3, + const Commitment& table_4, + const Commitment& lagrange_first, + const Commitment& lagrange_last) { - AllValues result; - for (auto [result_field, polynomial] : zip_view(result.get_all(), get_all())) { - result_field = polynomial[row_idx]; - } - return result; + this->circuit_size = circuit_size; + this->log_circuit_size = numeric::get_msb(this->circuit_size); + this->num_public_inputs = num_public_inputs; + this->pub_inputs_offset = pub_inputs_offset; + this->q_m = q_m; + this->q_c = q_c; + this->q_l = q_l; + this->q_r = q_r; + this->q_o = q_o; + this->q_4 = q_4; + this->q_arith = q_arith; + this->q_delta_range = q_delta_range; + this->q_elliptic = q_elliptic; + this->q_aux = q_aux; + this->q_lookup = q_lookup; + this->sigma_1 = sigma_1; + this->sigma_2 = sigma_2; + this->sigma_3 = sigma_3; + this->sigma_4 = sigma_4; + this->id_1 = id_1; + this->id_2 = id_2; + this->id_3 = id_3; + this->id_4 = id_4; + this->table_1 = table_1; + this->table_2 = table_2; + this->table_3 = table_3; + this->table_4 = table_4; + this->lagrange_first = lagrange_first; + this->lagrange_last = lagrange_last; } + MSGPACK_FIELDS(circuit_size, + num_public_inputs, + pub_inputs_offset, + q_m, + q_c, + q_l, + q_r, + q_o, + q_4, + q_arith, + q_delta_range, + q_elliptic, + q_aux, + q_lookup, + sigma_1, + sigma_2, + sigma_3, + sigma_4, + id_1, + id_2, + id_3, + id_4, + table_1, + table_2, + table_3, + table_4, + lagrange_first, + lagrange_last); }; /** diff --git a/barretenberg/cpp/src/barretenberg/sumcheck/instance/instances.hpp b/barretenberg/cpp/src/barretenberg/sumcheck/instance/instances.hpp index dcfc88830983..50e1848a0a4d 100644 --- a/barretenberg/cpp/src/barretenberg/sumcheck/instance/instances.hpp +++ b/barretenberg/cpp/src/barretenberg/sumcheck/instance/instances.hpp @@ -76,9 +76,9 @@ template struct ProverInstances_ { auto get_polynomials_views() const { // As a practical measure, get the first instance's view to deduce the array type - std::arrayprover_polynomials.get_all()), NUM> views; + std::arrayproving_key.polynomials.get_all()), NUM> views; for (size_t i = 0; i < NUM; i++) { - views[i] = _data[i]->prover_polynomials.get_all(); + views[i] = _data[i]->proving_key.polynomials.get_all(); } return views; } diff --git a/barretenberg/cpp/src/barretenberg/sumcheck/instance/prover_instance.cpp b/barretenberg/cpp/src/barretenberg/sumcheck/instance/prover_instance.cpp index 6be3a9f42d47..3b42eab9d0fb 100644 --- a/barretenberg/cpp/src/barretenberg/sumcheck/instance/prover_instance.cpp +++ b/barretenberg/cpp/src/barretenberg/sumcheck/instance/prover_instance.cpp @@ -41,10 +41,10 @@ template void ProverInstance_::construct_databus_polynomials(Circuit& circuit) requires IsGoblinFlavor { - Polynomial public_calldata{ dyadic_circuit_size }; - Polynomial calldata_read_counts{ dyadic_circuit_size }; - Polynomial public_return_data{ dyadic_circuit_size }; - Polynomial return_data_read_counts{ dyadic_circuit_size }; + auto& public_calldata = proving_key.polynomials.calldata; + auto& calldata_read_counts = proving_key.polynomials.calldata_read_counts; + auto& public_return_data = proving_key.polynomials.return_data; + auto& return_data_read_counts = proving_key.polynomials.return_data_read_counts; auto calldata = circuit.get_calldata(); auto return_data = circuit.get_return_data(); @@ -59,27 +59,11 @@ void ProverInstance_::construct_databus_polynomials(Circuit& circuit) return_data_read_counts[idx] = return_data.get_read_count(idx); } - Polynomial databus_id{ dyadic_circuit_size }; + auto& databus_id = proving_key.polynomials.databus_id; // Compute a simple identity polynomial for use in the databus lookup argument for (size_t i = 0; i < databus_id.size(); ++i) { databus_id[i] = i; } - - proving_key.calldata = public_calldata.share(); - proving_key.calldata_read_counts = calldata_read_counts.share(); - proving_key.return_data = public_return_data.share(); - proving_key.return_data_read_counts = return_data_read_counts.share(); - proving_key.databus_id = databus_id.share(); -} - -template -void ProverInstance_::construct_table_polynomials(Circuit& circuit, size_t dyadic_circuit_size) -{ - auto table_polynomials = construct_lookup_table_polynomials(circuit, dyadic_circuit_size); - proving_key.table_1 = table_polynomials[0].share(); - proving_key.table_2 = table_polynomials[1].share(); - proving_key.table_3 = table_polynomials[2].share(); - proving_key.table_4 = table_polynomials[3].share(); } template class ProverInstance_; diff --git a/barretenberg/cpp/src/barretenberg/sumcheck/instance/prover_instance.hpp b/barretenberg/cpp/src/barretenberg/sumcheck/instance/prover_instance.hpp index d17f45ce5956..48b5c8e4ff3d 100644 --- a/barretenberg/cpp/src/barretenberg/sumcheck/instance/prover_instance.hpp +++ b/barretenberg/cpp/src/barretenberg/sumcheck/instance/prover_instance.hpp @@ -31,7 +31,6 @@ template class ProverInstance_ { public: ProvingKey proving_key; - ProverPolynomials prover_polynomials; RelationSeparator alphas; bb::RelationParameters relation_parameters; @@ -42,21 +41,35 @@ template class ProverInstance_ { std::vector gate_challenges; FF target_sum; - ProverInstance_(Circuit& circuit) + ProverInstance_(Circuit& circuit, bool is_structured = false) { BB_OP_COUNT_TIME_NAME("ProverInstance(Circuit&)"); circuit.add_gates_to_ensure_all_polys_are_non_zero(); circuit.finalize_circuit(); + // If using a structured trace, ensure that no block exceeds the fixed size + if (is_structured) { + for (auto& block : circuit.blocks.get()) { + ASSERT(block.size() <= circuit.FIXED_BLOCK_SIZE); + } + } + + // TODO(https://github.com/AztecProtocol/barretenberg/issues/905): This is adding ops to the op queue but NOT to + // the circuit, meaning the ECCVM/Translator will use different ops than the main circuit. This will lead to + // failure once https://github.com/AztecProtocol/barretenberg/issues/746 is resolved. if constexpr (IsGoblinFlavor) { circuit.op_queue->append_nonzero_ops(); } - dyadic_circuit_size = compute_dyadic_size(circuit); + if (is_structured) { // Compute dyadic size based on a structured trace with fixed block size + dyadic_circuit_size = compute_structured_dyadic_size(circuit); + } else { // Otherwise, compute conventional dyadic circuit size + dyadic_circuit_size = compute_dyadic_size(circuit); + } - proving_key = std::move(ProvingKey(dyadic_circuit_size, circuit.public_inputs.size())); + proving_key = ProvingKey(dyadic_circuit_size, circuit.public_inputs.size()); // Construct and add to proving key the wire, selector and copy constraint polynomials - Trace::populate(circuit, proving_key); + Trace::populate(circuit, proving_key, is_structured); // If Goblin, construct the databus polynomials if constexpr (IsGoblinFlavor) { @@ -64,16 +77,14 @@ template class ProverInstance_ { } // First and last lagrange polynomials (in the full circuit size) - const auto [lagrange_first, lagrange_last] = - compute_first_and_last_lagrange_polynomials(dyadic_circuit_size); - proving_key.lagrange_first = lagrange_first; - proving_key.lagrange_last = lagrange_last; + proving_key.polynomials.lagrange_first[0] = 1; + proving_key.polynomials.lagrange_last[dyadic_circuit_size - 1] = 1; - construct_table_polynomials(circuit, dyadic_circuit_size); + construct_lookup_table_polynomials(proving_key.polynomials.get_tables(), circuit, dyadic_circuit_size); proving_key.sorted_polynomials = construct_sorted_list_polynomials(circuit, dyadic_circuit_size); - std::span public_wires_source = proving_key.w_r; + std::span public_wires_source = proving_key.polynomials.w_r; // Construct the public inputs array for (size_t i = 0; i < proving_key.num_public_inputs; ++i) { @@ -85,9 +96,6 @@ template class ProverInstance_ { ProverInstance_() = default; ~ProverInstance_() = default; - void compute_databus_id() - requires IsGoblinFlavor; - private: static constexpr size_t num_zero_rows = Flavor::has_zero_row ? 1 : 0; static constexpr size_t NUM_WIRES = Circuit::NUM_WIRES; @@ -95,10 +103,19 @@ template class ProverInstance_ { size_t compute_dyadic_size(Circuit&); + /** + * @brief Compute dyadic size based on a structured trace with fixed block size + * + */ + size_t compute_structured_dyadic_size(Circuit& builder) + { + size_t num_blocks = builder.blocks.get().size(); + size_t minimum_size = num_blocks * builder.FIXED_BLOCK_SIZE; + return builder.get_circuit_subgroup_size(minimum_size); + } + void construct_databus_polynomials(Circuit&) requires IsGoblinFlavor; - - void construct_table_polynomials(Circuit&, size_t); }; } // namespace bb diff --git a/barretenberg/cpp/src/barretenberg/sumcheck/instance/prover_instance.test.cpp b/barretenberg/cpp/src/barretenberg/sumcheck/instance/prover_instance.test.cpp index 55834fac7b33..98a1f836f2ba 100644 --- a/barretenberg/cpp/src/barretenberg/sumcheck/instance/prover_instance.test.cpp +++ b/barretenberg/cpp/src/barretenberg/sumcheck/instance/prover_instance.test.cpp @@ -66,7 +66,7 @@ template class InstanceTests : public testing::Test { // Method 1: computed sorted list accumulator polynomial using prover library method instance.proving_key.compute_sorted_list_accumulator(eta, eta_two, eta_three); - auto sorted_list_accumulator = instance.proving_key.sorted_accum; + auto sorted_list_accumulator = instance.proving_key.polynomials.sorted_accum; // Compute s = s_1 + η*s_2 + η²*s_3 + η³*s_4 Polynomial sorted_list_accumulator_expected{ sorted_list_polynomials[0] }; diff --git a/barretenberg/cpp/src/barretenberg/sumcheck/sumcheck.hpp b/barretenberg/cpp/src/barretenberg/sumcheck/sumcheck.hpp index bbf104425d6b..56a4fc3e9b16 100644 --- a/barretenberg/cpp/src/barretenberg/sumcheck/sumcheck.hpp +++ b/barretenberg/cpp/src/barretenberg/sumcheck/sumcheck.hpp @@ -71,8 +71,10 @@ template class SumcheckProver { */ SumcheckOutput prove(std::shared_ptr instance) { - return prove( - instance->prover_polynomials, instance->relation_parameters, instance->alphas, instance->gate_challenges); + return prove(instance->proving_key.polynomials, + instance->relation_parameters, + instance->alphas, + instance->gate_challenges); }; /** diff --git a/barretenberg/cpp/src/barretenberg/translator_vm/goblin_translator_flavor.hpp b/barretenberg/cpp/src/barretenberg/translator_vm/goblin_translator_flavor.hpp index 46eac85ce3ad..ff7cc5fffa8c 100644 --- a/barretenberg/cpp/src/barretenberg/translator_vm/goblin_translator_flavor.hpp +++ b/barretenberg/cpp/src/barretenberg/translator_vm/goblin_translator_flavor.hpp @@ -131,24 +131,14 @@ class GoblinTranslatorFlavor { inline void compute_lagrange_polynomials(const CircuitBuilder& builder) { - const size_t circuit_size = compute_dyadic_circuit_size(builder); const size_t mini_circuit_dyadic_size = compute_mini_circuit_dyadic_size(builder); - Polynomial lagrange_polynomial_odd_in_minicircuit(circuit_size); - Polynomial lagrange_polynomial_even_in_minicircut(circuit_size); - Polynomial lagrange_polynomial_second(circuit_size); - Polynomial lagrange_polynomial_second_to_last_in_minicircuit(circuit_size); - for (size_t i = 1; i < mini_circuit_dyadic_size - 1; i += 2) { - lagrange_polynomial_odd_in_minicircuit[i] = 1; - lagrange_polynomial_even_in_minicircut[i + 1] = 1; + this->lagrange_odd_in_minicircuit[i] = 1; + this->lagrange_even_in_minicircuit[i + 1] = 1; } - this->lagrange_odd_in_minicircuit = lagrange_polynomial_odd_in_minicircuit.share(); - this->lagrange_even_in_minicircuit = lagrange_polynomial_even_in_minicircut.share(); - lagrange_polynomial_second[1] = 1; - lagrange_polynomial_second_to_last_in_minicircuit[mini_circuit_dyadic_size - 2] = 1; - this->lagrange_second_to_last_in_minicircuit = lagrange_polynomial_second_to_last_in_minicircuit.share(); - this->lagrange_second = lagrange_polynomial_second.share(); + this->lagrange_second[1] = 1; + this->lagrange_second_to_last_in_minicircuit[mini_circuit_dyadic_size - 2] = 1; } /** @@ -287,13 +277,22 @@ class GoblinTranslatorFlavor { relation_wide_limbs_range_constraint_0, // column 76 relation_wide_limbs_range_constraint_1, // column 77 relation_wide_limbs_range_constraint_2, // column 78 - relation_wide_limbs_range_constraint_3, // column 79 - ordered_range_constraints_0, // column 80 - ordered_range_constraints_1, // column 81 - ordered_range_constraints_2, // column 82 - ordered_range_constraints_3, // column 83 - ordered_range_constraints_4); // column 84 + relation_wide_limbs_range_constraint_3); // column 79 + }; + + // TODO(https://github.com/AztecProtocol/barretenberg/issues/907) + // Note: These are technically derived from wires but do not depend on challenges (like z_perm). They are committed + // to in the wires commitment round. + template class OrderedRangeConstraints { + public: + DEFINE_FLAVOR_MEMBERS(DataType, + ordered_range_constraints_0, // column 0 + ordered_range_constraints_1, // column 1 + ordered_range_constraints_2, // column 2 + ordered_range_constraints_3, // column 3 + ordered_range_constraints_4); // column 4 }; + template class WireNonshiftedEntities { public: DEFINE_FLAVOR_MEMBERS(DataType, @@ -311,26 +310,38 @@ class GoblinTranslatorFlavor { template class WitnessEntities : public WireNonshiftedEntities, public WireToBeShiftedEntities, + public OrderedRangeConstraints, public DerivedWitnessEntities, public ConcatenatedRangeConstraints { public: DEFINE_COMPOUND_GET_ALL(WireNonshiftedEntities, WireToBeShiftedEntities, + OrderedRangeConstraints, DerivedWitnessEntities, ConcatenatedRangeConstraints) + // Used when populating wire polynomials directly from circuit data auto get_wires() { return concatenate(WireNonshiftedEntities::get_all(), WireToBeShiftedEntities::get_all()); }; + // Used when computing commitments to wires + ordered range constraints during proof consrtuction + auto get_wires_and_ordered_range_constraints() + { + return concatenate(WireNonshiftedEntities::get_all(), + WireToBeShiftedEntities::get_all(), + OrderedRangeConstraints::get_all()); + }; + // everything but ConcatenatedRangeConstraints (used for ZeroMorph input since concatenated handled separately) // TODO(https://github.com/AztecProtocol/barretenberg/issues/810) auto get_unshifted_without_concatenated() { return concatenate(WireNonshiftedEntities::get_all(), WireToBeShiftedEntities::get_all(), + OrderedRangeConstraints::get_all(), DerivedWitnessEntities::get_all()); } @@ -338,6 +349,7 @@ class GoblinTranslatorFlavor { { return concatenate(WireNonshiftedEntities::get_all(), WireToBeShiftedEntities::get_all(), + OrderedRangeConstraints::get_all(), DerivedWitnessEntities::get_all(), ConcatenatedRangeConstraints::get_all()); } @@ -345,12 +357,14 @@ class GoblinTranslatorFlavor { { return concatenate(WireNonshiftedEntities::get_labels(), WireToBeShiftedEntities::get_labels(), + OrderedRangeConstraints::get_labels(), DerivedWitnessEntities::get_labels(), ConcatenatedRangeConstraints::get_labels()); } auto get_to_be_shifted() { return concatenate(WireToBeShiftedEntities::get_all(), + OrderedRangeConstraints::get_all(), DerivedWitnessEntities::get_all()); }; @@ -559,6 +573,9 @@ class GoblinTranslatorFlavor { {} DEFINE_COMPOUND_GET_ALL(PrecomputedEntities, WitnessEntities, ShiftedEntities) + + auto get_precomputed() { return PrecomputedEntities::get_all(); }; + /** * @brief Get the polynomials that are concatenated for the permutation relation * @@ -722,8 +739,8 @@ class GoblinTranslatorFlavor { // Next power of 2 const size_t mini_circuit_dyadic_size = builder.get_circuit_subgroup_size(total_num_gates); - // The actual circuit size is several times bigger than the trace in the builder, because we use - // concatenation to bring the degree of relations down, while extending the length. + // The actual circuit size is several times bigger than the trace in the builder, because we use concatenation + // to bring the degree of relations down, while extending the length. return mini_circuit_dyadic_size * CONCATENATION_GROUP_SIZE; } @@ -732,53 +749,89 @@ class GoblinTranslatorFlavor { return builder.get_circuit_subgroup_size(compute_total_num_gates(builder)); } + /** + * @brief A field element for each entity of the flavor. These entities represent the prover polynomials + * evaluated at one point. + */ + class AllValues : public AllEntities { + public: + using Base = AllEntities; + using Base::Base; + }; + /** + * @brief A container for the prover polynomials handles. + */ + class ProverPolynomials : public AllEntities { + public: + // Define all operations as default, except copy construction/assignment + ProverPolynomials() = default; + // Constructor to init all unshifted polys to the zero polynomial and set the shifted poly data + ProverPolynomials(size_t circuit_size) + { + for (auto& poly : get_unshifted()) { + poly = Polynomial{ circuit_size }; + } + set_shifted(); + } + ProverPolynomials& operator=(const ProverPolynomials&) = delete; + ProverPolynomials(const ProverPolynomials& o) = delete; + ProverPolynomials(ProverPolynomials&& o) noexcept = default; + ProverPolynomials& operator=(ProverPolynomials&& o) noexcept = default; + ~ProverPolynomials() = default; + [[nodiscard]] size_t get_polynomial_size() const { return this->op.size(); } + /** + * @brief Returns the evaluations of all prover polynomials at one point on the boolean + * hypercube, which represents one row in the execution trace. + */ + [[nodiscard]] AllValues get_row(size_t row_idx) const + { + AllValues result; + for (auto [result_field, polynomial] : zip_view(result.get_all(), this->get_all())) { + result_field = polynomial[row_idx]; + } + return result; + } + // Set all shifted polynomials based on their to-be-shifted counterpart + void set_shifted() + { + for (auto [shifted, to_be_shifted] : zip_view(get_shifted(), get_to_be_shifted())) { + shifted = to_be_shifted.shifted(); + } + } + }; + /** * @brief The proving key is responsible for storing the polynomials used by the prover. - * @note TODO(Cody): Maybe multiple inheritance is the right thing here. In that case, nothing should eve - * inherit from ProvingKey. + * */ - class ProvingKey : public ProvingKey_, WitnessEntities, CommitmentKey> { + class ProvingKey : public ProvingKey_ { public: BF batching_challenge_v = { 0 }; BF evaluation_input_x = { 0 }; + ProverPolynomials polynomials; // storage for all polynomials evaluated by the prover // Expose constructors on the base class - using Base = ProvingKey_, WitnessEntities, CommitmentKey>; + using Base = ProvingKey_; using Base::Base; ProvingKey() = default; ProvingKey(const CircuitBuilder& builder) - : ProvingKey_, WitnessEntities, CommitmentKey>( - compute_dyadic_circuit_size(builder), 0) + : Base(compute_dyadic_circuit_size(builder), 0) , batching_challenge_v(builder.batching_challenge_v) , evaluation_input_x(builder.evaluation_input_x) + , polynomials(this->circuit_size) { // First and last lagrange polynomials (in the full circuit size) - const auto [lagrange_first, lagrange_last] = - compute_first_and_last_lagrange_polynomials(compute_dyadic_circuit_size(builder)); - this->lagrange_first = lagrange_first; - this->lagrange_last = lagrange_last; + polynomials.lagrange_first[0] = 1; + polynomials.lagrange_last[circuit_size - 1] = 1; // Compute polynomials with odd and even indices set to 1 up to the minicircuit margin + lagrange // polynomials at second and second to last indices in the minicircuit - compute_lagrange_polynomials(builder); + polynomials.compute_lagrange_polynomials(builder); // Compute the numerator for the permutation argument with several repetitions of steps bridging 0 and - // maximum range constraint - compute_extra_range_constraint_numerator(); - } - - // TODO(https://github.com/AztecProtocol/barretenberg/issues/810): get around this by properly having - // concatenated range be a concept outside of witnessentities - std::vector get_labels() - { - return concatenate(PrecomputedEntities::get_labels(), - WitnessEntities::get_unshifted_labels()); - } - auto get_all() - { - return concatenate(PrecomputedEntities::get_all(), - WitnessEntities::get_unshifted()); + // maximum range constraint compute_extra_range_constraint_numerator(); + polynomials.compute_extra_range_constraint_numerator(); } }; @@ -807,46 +860,11 @@ class GoblinTranslatorFlavor { this->pub_inputs_offset = proving_key->pub_inputs_offset; for (auto [polynomial, commitment] : - zip_view(proving_key->get_precomputed_polynomials(), this->get_all())) { + zip_view(proving_key->polynomials.get_precomputed(), this->get_all())) { commitment = proving_key->commitment_key->commit(polynomial); } } }; - /** - * @brief A field element for each entity of the flavor. These entities represent the prover polynomials - * evaluated at one point. - */ - class AllValues : public AllEntities { - public: - using Base = AllEntities; - using Base::Base; - }; - /** - * @brief A container for the prover polynomials handles. - */ - class ProverPolynomials : public AllEntities { - public: - // Define all operations as default, except copy construction/assignment - ProverPolynomials() = default; - ProverPolynomials& operator=(const ProverPolynomials&) = delete; - ProverPolynomials(const ProverPolynomials& o) = delete; - ProverPolynomials(ProverPolynomials&& o) noexcept = default; - ProverPolynomials& operator=(ProverPolynomials&& o) noexcept = default; - ~ProverPolynomials() = default; - [[nodiscard]] size_t get_polynomial_size() const { return this->op.size(); } - /** - * @brief Returns the evaluations of all prover polynomials at one point on the boolean hypercube, which - * represents one row in the execution trace. - */ - [[nodiscard]] AllValues get_row(size_t row_idx) const - { - AllValues result; - for (auto [result_field, polynomial] : zip_view(result.get_all(), this->get_all())) { - result_field = polynomial[row_idx]; - } - return result; - } - }; /** * @brief A container for easier mapping of polynomials diff --git a/barretenberg/cpp/src/barretenberg/translator_vm/goblin_translator_prover.cpp b/barretenberg/cpp/src/barretenberg/translator_vm/goblin_translator_prover.cpp index ae7f49569481..52776acbe42d 100644 --- a/barretenberg/cpp/src/barretenberg/translator_vm/goblin_translator_prover.cpp +++ b/barretenberg/cpp/src/barretenberg/translator_vm/goblin_translator_prover.cpp @@ -18,52 +18,8 @@ GoblinTranslatorProver::GoblinTranslatorProver(CircuitBuilder& circuit_builder, // Compute total number of gates, dyadic circuit size, etc. key = std::make_shared(circuit_builder); - dyadic_circuit_size = key->circuit_size; compute_witness(circuit_builder); compute_commitment_key(key->circuit_size); - - for (auto [prover_poly, key_poly] : zip_view(prover_polynomials.get_unshifted(), key->get_all())) { - ASSERT(flavor_get_label(prover_polynomials, prover_poly) == flavor_get_label(*key, key_poly)); - prover_poly = key_poly.share(); - } - for (auto [prover_poly, key_poly] : zip_view(prover_polynomials.get_shifted(), key->get_to_be_shifted())) { - ASSERT(flavor_get_label(prover_polynomials, prover_poly) == flavor_get_label(*key, key_poly) + "_shift"); - prover_poly = key_poly.shifted(); - } -} - -/** - * @brief Construct the witness polynomials from the witness vectors in the circuit constructor. - * - * @details In goblin translator wires come as is, since they have to reflect the structure of polynomials in the first - * 4 wires, which we've commited to - * - * @tparam Flavor provides the circuit constructor type and the number of wires. - * @param circuit_builder - * @param dyadic_circuit_size Power of 2 circuit size - * @todo TODO(https://github.com/AztecProtocol/barretenberg/issues/783) Optimize memory operations. - * @return std::vector - * */ -std::vector construct_wire_polynomials( - const GoblinTranslatorProver::CircuitBuilder& circuit_builder, const size_t dyadic_circuit_size) -{ - const size_t num_gates = circuit_builder.num_gates; - - std::vector wire_polynomials; - // Populate the wire polynomials with values from conventional wires - for (size_t wire_idx = 0; wire_idx < GoblinTranslatorFlavor::NUM_WIRES; ++wire_idx) { - // Expect all values to be set to 0 initially - GoblinTranslatorProver::Polynomial w_lagrange(dyadic_circuit_size); - - // Insert conventional gate wire values into the wire polynomial - for (size_t i = 0; i < num_gates; ++i) { - auto& wire = circuit_builder.wires[wire_idx]; - w_lagrange[i] = circuit_builder.get_variable(wire[i]); - } - - wire_polynomials.push_back(std::move(w_lagrange)); - } - return wire_polynomials; } /** @@ -76,103 +32,22 @@ void GoblinTranslatorProver::compute_witness(CircuitBuilder& circuit_builder) return; } - // Construct the conventional wire polynomials - auto wire_polynomials = construct_wire_polynomials(circuit_builder, dyadic_circuit_size); - - // TODO(https://github.com/AztecProtocol/barretenberg/issues/907) - // In order: - // wire_polynomials - // = WireEntities::get_wires - concatenated - // = WireNonShiftedEntities + WireToBeShiftedEntities - concatenated - key->op = wire_polynomials[0]; - key->x_lo_y_hi = wire_polynomials[1]; - key->x_hi_z_1 = wire_polynomials[2]; - key->y_lo_z_2 = wire_polynomials[3]; - key->p_x_low_limbs = wire_polynomials[4]; - key->p_x_low_limbs_range_constraint_0 = wire_polynomials[5]; - key->p_x_low_limbs_range_constraint_1 = wire_polynomials[6]; - key->p_x_low_limbs_range_constraint_2 = wire_polynomials[7]; - key->p_x_low_limbs_range_constraint_3 = wire_polynomials[8]; - key->p_x_low_limbs_range_constraint_4 = wire_polynomials[9]; - key->p_x_low_limbs_range_constraint_tail = wire_polynomials[10]; - key->p_x_high_limbs = wire_polynomials[11]; - key->p_x_high_limbs_range_constraint_0 = wire_polynomials[12]; - key->p_x_high_limbs_range_constraint_1 = wire_polynomials[13]; - key->p_x_high_limbs_range_constraint_2 = wire_polynomials[14]; - key->p_x_high_limbs_range_constraint_3 = wire_polynomials[15]; - key->p_x_high_limbs_range_constraint_4 = wire_polynomials[16]; - key->p_x_high_limbs_range_constraint_tail = wire_polynomials[17]; - key->p_y_low_limbs = wire_polynomials[18]; - key->p_y_low_limbs_range_constraint_0 = wire_polynomials[19]; - key->p_y_low_limbs_range_constraint_1 = wire_polynomials[20]; - key->p_y_low_limbs_range_constraint_2 = wire_polynomials[21]; - key->p_y_low_limbs_range_constraint_3 = wire_polynomials[22]; - key->p_y_low_limbs_range_constraint_4 = wire_polynomials[23]; - key->p_y_low_limbs_range_constraint_tail = wire_polynomials[24]; - key->p_y_high_limbs = wire_polynomials[25]; - key->p_y_high_limbs_range_constraint_0 = wire_polynomials[26]; - key->p_y_high_limbs_range_constraint_1 = wire_polynomials[27]; - key->p_y_high_limbs_range_constraint_2 = wire_polynomials[28]; - key->p_y_high_limbs_range_constraint_3 = wire_polynomials[29]; - key->p_y_high_limbs_range_constraint_4 = wire_polynomials[30]; - key->p_y_high_limbs_range_constraint_tail = wire_polynomials[31]; - key->z_low_limbs = wire_polynomials[32]; - key->z_low_limbs_range_constraint_0 = wire_polynomials[33]; - key->z_low_limbs_range_constraint_1 = wire_polynomials[34]; - key->z_low_limbs_range_constraint_2 = wire_polynomials[35]; - key->z_low_limbs_range_constraint_3 = wire_polynomials[36]; - key->z_low_limbs_range_constraint_4 = wire_polynomials[37]; - key->z_low_limbs_range_constraint_tail = wire_polynomials[38]; - key->z_high_limbs = wire_polynomials[39]; - key->z_high_limbs_range_constraint_0 = wire_polynomials[40]; - key->z_high_limbs_range_constraint_1 = wire_polynomials[41]; - key->z_high_limbs_range_constraint_2 = wire_polynomials[42]; - key->z_high_limbs_range_constraint_3 = wire_polynomials[43]; - key->z_high_limbs_range_constraint_4 = wire_polynomials[44]; - key->z_high_limbs_range_constraint_tail = wire_polynomials[45]; - key->accumulators_binary_limbs_0 = wire_polynomials[46]; - key->accumulators_binary_limbs_1 = wire_polynomials[47]; - key->accumulators_binary_limbs_2 = wire_polynomials[48]; - key->accumulators_binary_limbs_3 = wire_polynomials[49]; - key->accumulator_low_limbs_range_constraint_0 = wire_polynomials[50]; - key->accumulator_low_limbs_range_constraint_1 = wire_polynomials[51]; - key->accumulator_low_limbs_range_constraint_2 = wire_polynomials[52]; - key->accumulator_low_limbs_range_constraint_3 = wire_polynomials[53]; - key->accumulator_low_limbs_range_constraint_4 = wire_polynomials[54]; - key->accumulator_low_limbs_range_constraint_tail = wire_polynomials[55]; - key->accumulator_high_limbs_range_constraint_0 = wire_polynomials[56]; - key->accumulator_high_limbs_range_constraint_1 = wire_polynomials[57]; - key->accumulator_high_limbs_range_constraint_2 = wire_polynomials[58]; - key->accumulator_high_limbs_range_constraint_3 = wire_polynomials[59]; - key->accumulator_high_limbs_range_constraint_4 = wire_polynomials[60]; - key->accumulator_high_limbs_range_constraint_tail = wire_polynomials[61]; - key->quotient_low_binary_limbs = wire_polynomials[62]; - key->quotient_high_binary_limbs = wire_polynomials[63]; - key->quotient_low_limbs_range_constraint_0 = wire_polynomials[64]; - key->quotient_low_limbs_range_constraint_1 = wire_polynomials[65]; - key->quotient_low_limbs_range_constraint_2 = wire_polynomials[66]; - key->quotient_low_limbs_range_constraint_3 = wire_polynomials[67]; - key->quotient_low_limbs_range_constraint_4 = wire_polynomials[68]; - key->quotient_low_limbs_range_constraint_tail = wire_polynomials[69]; - key->quotient_high_limbs_range_constraint_0 = wire_polynomials[70]; - key->quotient_high_limbs_range_constraint_1 = wire_polynomials[71]; - key->quotient_high_limbs_range_constraint_2 = wire_polynomials[72]; - key->quotient_high_limbs_range_constraint_3 = wire_polynomials[73]; - key->quotient_high_limbs_range_constraint_4 = wire_polynomials[74]; - key->quotient_high_limbs_range_constraint_tail = wire_polynomials[75]; - key->relation_wide_limbs = wire_polynomials[76]; - key->relation_wide_limbs_range_constraint_0 = wire_polynomials[77]; - key->relation_wide_limbs_range_constraint_1 = wire_polynomials[78]; - key->relation_wide_limbs_range_constraint_2 = wire_polynomials[79]; - key->relation_wide_limbs_range_constraint_3 = wire_polynomials[80]; + // Populate the wire polynomials from the wire vectors in the circuit constructor. Note: In goblin translator wires + // come as is, since they have to reflect the structure of polynomials in the first 4 wires, which we've commited to + for (auto [wire_poly, wire] : zip_view(key->polynomials.get_wires(), circuit_builder.wires)) { + for (size_t i = 0; i < circuit_builder.num_gates; ++i) { + wire_poly[i] = circuit_builder.get_variable(wire[i]); + } + } // We construct concatenated versions of range constraint polynomials, where several polynomials are concatenated // into one. These polynomials are not commited to. - bb::compute_concatenated_polynomials(key.get()); + bb::compute_concatenated_polynomials(key->polynomials); // We also contruct ordered polynomials, which have the same values as concatenated ones + enough values to bridge // the range from 0 to maximum range defined by the range constraint. - bb::compute_goblin_translator_range_constraint_ordered_polynomials(key.get(), mini_circuit_dyadic_size); + bb::compute_goblin_translator_range_constraint_ordered_polynomials(key->polynomials, + mini_circuit_dyadic_size); computed_witness = true; } @@ -198,10 +73,10 @@ void GoblinTranslatorProver::execute_preamble_round() const auto SHIFT = uint256_t(1) << Flavor::NUM_LIMB_BITS; const auto SHIFTx2 = uint256_t(1) << (Flavor::NUM_LIMB_BITS * 2); const auto SHIFTx3 = uint256_t(1) << (Flavor::NUM_LIMB_BITS * 3); - const auto accumulated_result = - BF(uint256_t(key->accumulators_binary_limbs_0[1]) + uint256_t(key->accumulators_binary_limbs_1[1]) * SHIFT + - uint256_t(key->accumulators_binary_limbs_2[1]) * SHIFTx2 + - uint256_t(key->accumulators_binary_limbs_3[1]) * SHIFTx3); + const auto accumulated_result = BF(uint256_t(key->polynomials.accumulators_binary_limbs_0[1]) + + uint256_t(key->polynomials.accumulators_binary_limbs_1[1]) * SHIFT + + uint256_t(key->polynomials.accumulators_binary_limbs_2[1]) * SHIFTx2 + + uint256_t(key->polynomials.accumulators_binary_limbs_3[1]) * SHIFTx3); transcript->send_to_verifier("circuit_size", circuit_size); transcript->send_to_verifier("evaluation_input_x", key->evaluation_input_x); transcript->send_to_verifier("accumulated_result", accumulated_result); @@ -213,9 +88,9 @@ void GoblinTranslatorProver::execute_preamble_round() */ void GoblinTranslatorProver::execute_wire_and_sorted_constraints_commitments_round() { - // Commit to all wire polynomials - auto wire_polys = key->get_wires(); - auto labels = commitment_labels.get_wires(); + // Commit to all wire polynomials and ordered range constraint polynomials + auto wire_polys = key->polynomials.get_wires_and_ordered_range_constraints(); + auto labels = commitment_labels.get_wires_and_ordered_range_constraints(); for (size_t idx = 0; idx < wire_polys.size(); ++idx) { transcript->send_to_verifier(labels[idx], commitment_key->commit(wire_polys[idx])); } @@ -241,10 +116,10 @@ void GoblinTranslatorProver::execute_grand_product_computation_round() uint_evaluation_input.slice(NUM_LIMB_BITS * 3, NUM_LIMB_BITS * 4), uint_evaluation_input }; - relation_parameters.accumulated_result = { key->accumulators_binary_limbs_0[1], - key->accumulators_binary_limbs_1[1], - key->accumulators_binary_limbs_2[1], - key->accumulators_binary_limbs_3[1] }; + relation_parameters.accumulated_result = { key->polynomials.accumulators_binary_limbs_0[1], + key->polynomials.accumulators_binary_limbs_1[1], + key->polynomials.accumulators_binary_limbs_2[1], + key->polynomials.accumulators_binary_limbs_3[1] }; std::vector uint_batching_challenge_powers; auto batching_challenge_v = key->batching_challenge_v; @@ -266,9 +141,9 @@ void GoblinTranslatorProver::execute_grand_product_computation_round() }; } // Compute constraint permutation grand product - compute_grand_products(*key, prover_polynomials, relation_parameters); + compute_grand_products(key->polynomials, relation_parameters); - transcript->send_to_verifier(commitment_labels.z_perm, commitment_key->commit(key->z_perm)); + transcript->send_to_verifier(commitment_labels.z_perm, commitment_key->commit(key->polynomials.z_perm)); } /** @@ -285,7 +160,7 @@ void GoblinTranslatorProver::execute_relation_check_rounds() for (size_t idx = 0; idx < gate_challenges.size(); idx++) { gate_challenges[idx] = transcript->template get_challenge("Sumcheck:gate_challenge_" + std::to_string(idx)); } - sumcheck_output = sumcheck.prove(prover_polynomials, relation_parameters, alpha, gate_challenges); + sumcheck_output = sumcheck.prove(key->polynomials, relation_parameters, alpha, gate_challenges); } /** @@ -296,16 +171,16 @@ void GoblinTranslatorProver::execute_relation_check_rounds() void GoblinTranslatorProver::execute_zeromorph_rounds() { using ZeroMorph = ZeroMorphProver_; - ZeroMorph::prove(prover_polynomials.get_unshifted_without_concatenated(), - prover_polynomials.get_to_be_shifted(), + ZeroMorph::prove(key->polynomials.get_unshifted_without_concatenated(), + key->polynomials.get_to_be_shifted(), sumcheck_output.claimed_evaluations.get_unshifted_without_concatenated(), sumcheck_output.claimed_evaluations.get_shifted(), sumcheck_output.challenge, commitment_key, transcript, - prover_polynomials.get_concatenated_constraints(), + key->polynomials.get_concatenated_constraints(), sumcheck_output.claimed_evaluations.get_concatenated_constraints(), - prover_polynomials.get_concatenation_groups()); + key->polynomials.get_concatenation_groups()); } HonkProof& GoblinTranslatorProver::export_proof() diff --git a/barretenberg/cpp/src/barretenberg/translator_vm/goblin_translator_prover.hpp b/barretenberg/cpp/src/barretenberg/translator_vm/goblin_translator_prover.hpp index 7fdc6228baab..077632dfc044 100644 --- a/barretenberg/cpp/src/barretenberg/translator_vm/goblin_translator_prover.hpp +++ b/barretenberg/cpp/src/barretenberg/translator_vm/goblin_translator_prover.hpp @@ -18,7 +18,6 @@ class GoblinTranslatorProver { using CommitmentKey = typename Flavor::CommitmentKey; using ProvingKey = typename Flavor::ProvingKey; using Polynomial = typename Flavor::Polynomial; - using ProverPolynomials = typename Flavor::ProverPolynomials; using CommitmentLabels = typename Flavor::CommitmentLabels; using PCS = typename Flavor::PCS; using Transcript = typename Flavor::Transcript; @@ -47,9 +46,6 @@ class GoblinTranslatorProver { std::shared_ptr key; - // Container for spans of all polynomials required by the prover (i.e. all multivariates evaluated by Sumcheck). - ProverPolynomials prover_polynomials; - CommitmentLabels commitment_labels; std::shared_ptr commitment_key; diff --git a/barretenberg/cpp/src/barretenberg/translator_vm/relation_correctness.test.cpp b/barretenberg/cpp/src/barretenberg/translator_vm/relation_correctness.test.cpp index 1d15be935fbb..c1b1cca37788 100644 --- a/barretenberg/cpp/src/barretenberg/translator_vm/relation_correctness.test.cpp +++ b/barretenberg/cpp/src/barretenberg/translator_vm/relation_correctness.test.cpp @@ -137,17 +137,16 @@ TEST_F(GoblinTranslatorRelationCorrectnessTests, Permutation) fill_polynomial_with_random_14_bit_values(prover_polynomials.relation_wide_limbs_range_constraint_3); // Compute ordered range constraint polynomials that go in the denominator of the grand product polynomial - compute_goblin_translator_range_constraint_ordered_polynomials(&prover_polynomials, mini_circuit_size); + compute_goblin_translator_range_constraint_ordered_polynomials(prover_polynomials, mini_circuit_size); // Compute the fixed numerator (part of verification key) prover_polynomials.compute_extra_range_constraint_numerator(); // Compute concatenated polynomials (4 polynomials produced from other constraint polynomials by concatenation) - compute_concatenated_polynomials(&prover_polynomials); + compute_concatenated_polynomials(prover_polynomials); // Compute the grand product polynomial - compute_grand_product>( - full_circuit_size, prover_polynomials, params); + compute_grand_product>(prover_polynomials, params); prover_polynomials.z_perm_shift = prover_polynomials.z_perm.shifted(); using Relations = typename Flavor::Relations; diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/goblin_ultra_composer.test.cpp b/barretenberg/cpp/src/barretenberg/ultra_honk/goblin_ultra_composer.test.cpp index eabc2edbb271..dc5f8e76dc08 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/goblin_ultra_composer.test.cpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/goblin_ultra_composer.test.cpp @@ -59,6 +59,43 @@ class GoblinUltraHonkComposerTests : public ::testing::Test { }; } // namespace +/** + * @brief Test proof construction/verification for a circuit with ECC op gates, public inputs, and basic arithmetic + * gates + * + */ +TEST_F(GoblinUltraHonkComposerTests, Basic) +{ + GoblinUltraCircuitBuilder builder; + + GoblinMockCircuits::construct_simple_circuit(builder); + + // Construct and verify Honk proof + bool honk_verified = construct_and_verify_honk_proof(builder); + EXPECT_TRUE(honk_verified); +} + +/** + * @brief Test proof construction/verification for a structured execution trace + * + */ +TEST_F(GoblinUltraHonkComposerTests, BasicStructured) +{ + GoblinUltraCircuitBuilder builder; + + GoblinMockCircuits::construct_simple_circuit(builder); + + // Construct and verify Honk proof using a structured trace + bool structured = true; + auto instance = std::make_shared>(builder, structured); + builder.blocks.summarize(); + GoblinUltraProver prover(instance); + auto verification_key = std::make_shared(instance->proving_key); + GoblinUltraVerifier verifier(verification_key); + auto proof = prover.construct_proof(); + EXPECT_TRUE(verifier.verify_proof(proof)); +} + /** * @brief Test proof construction/verification for a circuit with ECC op gates, public inputs, and basic arithmetic * gates diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/oink_prover.cpp b/barretenberg/cpp/src/barretenberg/ultra_honk/oink_prover.cpp index 700ece5181f2..b34fd9588d45 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/oink_prover.cpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/oink_prover.cpp @@ -65,9 +65,9 @@ template void OinkProver::execute_wire_commitment { // Commit to the first three wire polynomials of the instance // We only commit to the fourth wire polynomial after adding memory recordss - witness_commitments.w_l = commitment_key->commit(proving_key.w_l); - witness_commitments.w_r = commitment_key->commit(proving_key.w_r); - witness_commitments.w_o = commitment_key->commit(proving_key.w_o); + witness_commitments.w_l = commitment_key->commit(proving_key.polynomials.w_l); + witness_commitments.w_r = commitment_key->commit(proving_key.polynomials.w_r); + witness_commitments.w_o = commitment_key->commit(proving_key.polynomials.w_o); auto wire_comms = witness_commitments.get_wires(); auto wire_labels = commitment_labels.get_wires(); @@ -77,10 +77,10 @@ template void OinkProver::execute_wire_commitment if constexpr (IsGoblinFlavor) { // Commit to Goblin ECC op wires - witness_commitments.ecc_op_wire_1 = commitment_key->commit(proving_key.ecc_op_wire_1); - witness_commitments.ecc_op_wire_2 = commitment_key->commit(proving_key.ecc_op_wire_2); - witness_commitments.ecc_op_wire_3 = commitment_key->commit(proving_key.ecc_op_wire_3); - witness_commitments.ecc_op_wire_4 = commitment_key->commit(proving_key.ecc_op_wire_4); + witness_commitments.ecc_op_wire_1 = commitment_key->commit(proving_key.polynomials.ecc_op_wire_1); + witness_commitments.ecc_op_wire_2 = commitment_key->commit(proving_key.polynomials.ecc_op_wire_2); + witness_commitments.ecc_op_wire_3 = commitment_key->commit(proving_key.polynomials.ecc_op_wire_3); + witness_commitments.ecc_op_wire_4 = commitment_key->commit(proving_key.polynomials.ecc_op_wire_4); auto op_wire_comms = witness_commitments.get_ecc_op_wires(); auto labels = commitment_labels.get_ecc_op_wires(); @@ -89,13 +89,14 @@ template void OinkProver::execute_wire_commitment } // Commit to DataBus columns and corresponding read counts - witness_commitments.calldata = commitment_key->commit(proving_key.calldata); - witness_commitments.calldata_read_counts = commitment_key->commit(proving_key.calldata_read_counts); + witness_commitments.calldata = commitment_key->commit(proving_key.polynomials.calldata); + witness_commitments.calldata_read_counts = commitment_key->commit(proving_key.polynomials.calldata_read_counts); transcript->send_to_verifier(domain_separator + commitment_labels.calldata, witness_commitments.calldata); transcript->send_to_verifier(domain_separator + commitment_labels.calldata_read_counts, witness_commitments.calldata_read_counts); - witness_commitments.return_data = commitment_key->commit(proving_key.return_data); - witness_commitments.return_data_read_counts = commitment_key->commit(proving_key.return_data_read_counts); + witness_commitments.return_data = commitment_key->commit(proving_key.polynomials.return_data); + witness_commitments.return_data_read_counts = + commitment_key->commit(proving_key.polynomials.return_data_read_counts); transcript->send_to_verifier(domain_separator + commitment_labels.return_data, witness_commitments.return_data); transcript->send_to_verifier(domain_separator + commitment_labels.return_data_read_counts, witness_commitments.return_data_read_counts); @@ -119,8 +120,8 @@ template void OinkProver::execute_sorted_list_acc relation_parameters.eta, relation_parameters.eta_two, relation_parameters.eta_three); // Commit to the sorted witness-table accumulator and the finalized (i.e. with memory records) fourth wire // polynomial - witness_commitments.sorted_accum = commitment_key->commit(proving_key.sorted_accum); - witness_commitments.w_4 = commitment_key->commit(proving_key.w_4); + witness_commitments.sorted_accum = commitment_key->commit(proving_key.polynomials.sorted_accum); + witness_commitments.w_4 = commitment_key->commit(proving_key.polynomials.w_4); transcript->send_to_verifier(domain_separator + commitment_labels.sorted_accum, witness_commitments.sorted_accum); transcript->send_to_verifier(domain_separator + commitment_labels.w_4, witness_commitments.w_4); @@ -138,8 +139,9 @@ template void OinkProver::execute_log_derivative_ if constexpr (IsGoblinFlavor) { // Compute and commit to the logderivative inverse used in DataBus proving_key.compute_logderivative_inverse(relation_parameters); - witness_commitments.calldata_inverses = commitment_key->commit(proving_key.calldata_inverses); - witness_commitments.return_data_inverses = commitment_key->commit(proving_key.return_data_inverses); + + witness_commitments.calldata_inverses = commitment_key->commit(proving_key.polynomials.calldata_inverses); + witness_commitments.return_data_inverses = commitment_key->commit(proving_key.polynomials.return_data_inverses); transcript->send_to_verifier(domain_separator + commitment_labels.calldata_inverses, witness_commitments.calldata_inverses); transcript->send_to_verifier(domain_separator + commitment_labels.return_data_inverses, @@ -153,11 +155,10 @@ template void OinkProver::execute_log_derivative_ */ template void OinkProver::execute_grand_product_computation_round() { - proving_key.compute_grand_product_polynomials(relation_parameters); - witness_commitments.z_perm = commitment_key->commit(proving_key.z_perm); - witness_commitments.z_lookup = commitment_key->commit(proving_key.z_lookup); + witness_commitments.z_perm = commitment_key->commit(proving_key.polynomials.z_perm); + witness_commitments.z_lookup = commitment_key->commit(proving_key.polynomials.z_lookup); transcript->send_to_verifier(domain_separator + commitment_labels.z_perm, witness_commitments.z_perm); transcript->send_to_verifier(domain_separator + commitment_labels.z_lookup, witness_commitments.z_lookup); diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/relation_correctness.test.cpp b/barretenberg/cpp/src/barretenberg/ultra_honk/relation_correctness.test.cpp index ac3ca627ce51..12ac51adf353 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/relation_correctness.test.cpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/relation_correctness.test.cpp @@ -276,19 +276,18 @@ TEST_F(UltraRelationCorrectnessTests, Ultra) instance->relation_parameters.eta_two, instance->relation_parameters.eta_three); instance->proving_key.compute_grand_product_polynomials(instance->relation_parameters); - instance->prover_polynomials = Flavor::ProverPolynomials(instance->proving_key); // Check that selectors are nonzero to ensure corresponding relation has nontrivial contribution - ensure_non_zero(proving_key.q_arith); - ensure_non_zero(proving_key.q_delta_range); - ensure_non_zero(proving_key.q_lookup); - ensure_non_zero(proving_key.q_elliptic); - ensure_non_zero(proving_key.q_aux); + ensure_non_zero(proving_key.polynomials.q_arith); + ensure_non_zero(proving_key.polynomials.q_delta_range); + ensure_non_zero(proving_key.polynomials.q_lookup); + ensure_non_zero(proving_key.polynomials.q_elliptic); + ensure_non_zero(proving_key.polynomials.q_aux); // Construct the round for applying sumcheck relations and results for storing computed results using Relations = typename Flavor::Relations; - auto& prover_polynomials = instance->prover_polynomials; + auto& prover_polynomials = instance->proving_key.polynomials; auto params = instance->relation_parameters; // Check that each relation is satisfied across each row of the prover polynomials check_relation>(circuit_size, prover_polynomials, params); @@ -333,28 +332,27 @@ TEST_F(UltraRelationCorrectnessTests, GoblinUltra) instance->relation_parameters.eta_three); instance->proving_key.compute_logderivative_inverse(instance->relation_parameters); instance->proving_key.compute_grand_product_polynomials(instance->relation_parameters); - instance->prover_polynomials = Flavor::ProverPolynomials(instance->proving_key); // Check that selectors are nonzero to ensure corresponding relation has nontrivial contribution - ensure_non_zero(proving_key.q_arith); - ensure_non_zero(proving_key.q_delta_range); - ensure_non_zero(proving_key.q_lookup); - ensure_non_zero(proving_key.q_elliptic); - ensure_non_zero(proving_key.q_aux); - ensure_non_zero(proving_key.q_busread); - ensure_non_zero(proving_key.q_poseidon2_external); - ensure_non_zero(proving_key.q_poseidon2_internal); - - ensure_non_zero(proving_key.calldata); - ensure_non_zero(proving_key.calldata_read_counts); - ensure_non_zero(proving_key.calldata_inverses); - ensure_non_zero(proving_key.return_data); - ensure_non_zero(proving_key.return_data_read_counts); - ensure_non_zero(proving_key.return_data_inverses); + ensure_non_zero(proving_key.polynomials.q_arith); + ensure_non_zero(proving_key.polynomials.q_delta_range); + ensure_non_zero(proving_key.polynomials.q_lookup); + ensure_non_zero(proving_key.polynomials.q_elliptic); + ensure_non_zero(proving_key.polynomials.q_aux); + ensure_non_zero(proving_key.polynomials.q_busread); + ensure_non_zero(proving_key.polynomials.q_poseidon2_external); + ensure_non_zero(proving_key.polynomials.q_poseidon2_internal); + + ensure_non_zero(proving_key.polynomials.calldata); + ensure_non_zero(proving_key.polynomials.calldata_read_counts); + ensure_non_zero(proving_key.polynomials.calldata_inverses); + ensure_non_zero(proving_key.polynomials.return_data); + ensure_non_zero(proving_key.polynomials.return_data_read_counts); + ensure_non_zero(proving_key.polynomials.return_data_inverses); // Construct the round for applying sumcheck relations and results for storing computed results using Relations = typename Flavor::Relations; - auto& prover_polynomials = instance->prover_polynomials; + auto& prover_polynomials = instance->proving_key.polynomials; auto params = instance->relation_parameters; // Check that each relation is satisfied across each row of the prover polynomials diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/sumcheck.test.cpp b/barretenberg/cpp/src/barretenberg/ultra_honk/sumcheck.test.cpp index 9c267bb72b5a..5962b8ba212f 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/sumcheck.test.cpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/sumcheck.test.cpp @@ -161,7 +161,6 @@ TEST_F(SumcheckTestsRealCircuit, Ultra) instance->relation_parameters.eta_two, instance->relation_parameters.eta_three); instance->proving_key.compute_grand_product_polynomials(instance->relation_parameters); - instance->prover_polynomials = Flavor::ProverPolynomials(instance->proving_key); auto prover_transcript = Transcript::prover_init_empty(); auto circuit_size = instance->proving_key.circuit_size; diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_composer.test.cpp b/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_composer.test.cpp index c6015ca160ce..564afd23c4a7 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_composer.test.cpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_composer.test.cpp @@ -4,6 +4,7 @@ #include "barretenberg/plonk_honk_shared/library/grand_product_delta.hpp" #include "barretenberg/relations/permutation_relation.hpp" #include "barretenberg/relations/relation_parameters.hpp" +#include "barretenberg/stdlib_circuit_builders/mock_circuits.hpp" #include "barretenberg/stdlib_circuit_builders/plookup_tables/fixed_base/fixed_base.hpp" #include "barretenberg/stdlib_circuit_builders/plookup_tables/types.hpp" #include "barretenberg/stdlib_circuit_builders/ultra_circuit_builder.hpp" @@ -70,21 +71,44 @@ TEST_F(UltraHonkComposerTests, ANonZeroPolynomialIsAGoodPolynomial) auto instance = std::make_shared(circuit_builder); UltraProver prover(instance); auto proof = prover.construct_proof(); - auto& proving_key = instance->proving_key; + auto& polynomials = instance->proving_key.polynomials; - for (auto& poly : proving_key.get_selectors()) { + for (auto& poly : polynomials.get_selectors()) { ensure_non_zero(poly); } - for (auto& poly : proving_key.get_table_polynomials()) { + for (auto& poly : polynomials.get_tables()) { ensure_non_zero(poly); } - for (auto& poly : proving_key.get_wires()) { + for (auto& poly : polynomials.get_wires()) { ensure_non_zero(poly); } } +/** + * @brief Test proof construction/verification for a structured execution trace + * + */ +TEST_F(UltraHonkComposerTests, StructuredTrace) +{ + auto builder = UltraCircuitBuilder(); + size_t num_gates = 3; + + // Add some arbitrary arithmetic gates that utilize public inputs + MockCircuits::add_arithmetic_gates_with_public_inputs(builder, num_gates); + + // Construct an instance with a structured execution trace + bool structured = true; + auto instance = std::make_shared(builder, structured); + info(instance->proving_key.circuit_size); + UltraProver prover(instance); + auto verification_key = std::make_shared(instance->proving_key); + UltraVerifier verifier(verification_key); + auto proof = prover.construct_proof(); + EXPECT_TRUE(verifier.verify_proof(proof)); +} + /** * @brief Test simple circuit with public inputs * @@ -95,19 +119,7 @@ TEST_F(UltraHonkComposerTests, PublicInputs) size_t num_gates = 10; // Add some arbitrary arithmetic gates that utilize public inputs - for (size_t i = 0; i < num_gates; ++i) { - fr a = fr::random_element(); - uint32_t a_idx = builder.add_public_variable(a); - - fr b = fr::random_element(); - fr c = fr::random_element(); - fr d = a + b + c; - uint32_t b_idx = builder.add_variable(b); - uint32_t c_idx = builder.add_variable(c); - uint32_t d_idx = builder.add_variable(d); - - builder.create_big_add_gate({ a_idx, b_idx, c_idx, d_idx, fr(1), fr(1), fr(1), fr(-1), fr(0) }); - } + MockCircuits::add_arithmetic_gates_with_public_inputs(builder, num_gates); prove_and_verify(builder, /*expected_result=*/true); } @@ -195,15 +207,8 @@ TEST_F(UltraHonkComposerTests, create_gates_from_plookup_accumulators) expected_scalar >>= table_bits; } } - auto instance = std::make_shared(circuit_builder); - UltraProver prover(instance); - auto verification_key = std::make_shared(instance->proving_key); - UltraVerifier verifier(verification_key); - auto proof = prover.construct_proof(); - - bool result = verifier.verify_proof(proof); - EXPECT_EQ(result, true); + prove_and_verify(circuit_builder, /*expected_result=*/true); } TEST_F(UltraHonkComposerTests, test_no_lookup_proof) diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_prover.cpp b/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_prover.cpp index f3fc6b0e5be8..92166a00142e 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_prover.cpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_prover.cpp @@ -57,8 +57,8 @@ template void UltraProver_::execute_relation_chec * */ template void UltraProver_::execute_zeromorph_rounds() { - ZeroMorph::prove(instance->prover_polynomials.get_unshifted(), - instance->prover_polynomials.get_to_be_shifted(), + ZeroMorph::prove(instance->proving_key.polynomials.get_unshifted(), + instance->proving_key.polynomials.get_to_be_shifted(), sumcheck_output.claimed_evaluations.get_unshifted(), sumcheck_output.claimed_evaluations.get_shifted(), sumcheck_output.challenge, @@ -79,7 +79,6 @@ template HonkProof& UltraProver_::construct_proof instance->proving_key = std::move(proving_key); instance->relation_parameters = std::move(relation_params); instance->alphas = alphas; - instance->prover_polynomials = ProverPolynomials(instance->proving_key); // Fiat-Shamir: alpha // Run sumcheck subprotocol. diff --git a/barretenberg/cpp/src/barretenberg/vm/generated/avm_flavor.hpp b/barretenberg/cpp/src/barretenberg/vm/generated/avm_flavor.hpp index e55fbc4dc80f..fdafe0ffaac9 100644 --- a/barretenberg/cpp/src/barretenberg/vm/generated/avm_flavor.hpp +++ b/barretenberg/cpp/src/barretenberg/vm/generated/avm_flavor.hpp @@ -1566,10 +1566,11 @@ class AvmFlavor { }; public: - class ProvingKey : public ProvingKey_, WitnessEntities, CommitmentKey> { + class ProvingKey + : public ProvingKeyAvm_, WitnessEntities, CommitmentKey> { public: // Expose constructors on the base class - using Base = ProvingKey_, WitnessEntities, CommitmentKey>; + using Base = ProvingKeyAvm_, WitnessEntities, CommitmentKey>; using Base::Base; RefVector get_to_be_shifted() diff --git a/docs/docs/protocol-specs/gas-and-fees/fee-payment-asset.md b/docs/docs/protocol-specs/gas-and-fees/fee-payment-asset.md new file mode 100644 index 000000000000..d5dfb648f4b8 --- /dev/null +++ b/docs/docs/protocol-specs/gas-and-fees/fee-payment-asset.md @@ -0,0 +1,22 @@ +--- +title: Fee Payment Asset +--- + +# Fee Payment Asset + +The Fee Payment Asset (FPA) is an enshrined asset in the Aztec network that is used to pay fees. + +The FPA has several important properties: + +- It is fungible +- It cannot be transferred between accounts on the Aztec network +- It is obtained on Aztec via a bridge from Ethereum +- It only has public balances + +All transactions on the Aztec network have a [non-zero transaction_fee](./fee-schedule.md#da-gas), denominated in FPA, which must be paid for the transaction to be included in the block. + +When a block is successfully published on L1, the sequencer is paid on L1 the sum of all transaction fees in the block, denominated in FPA. + +:::danger +We need a definition of the L1 fee payment asset. +::: diff --git a/docs/docs/protocol-specs/gas-and-fees/fee-payments-and-metering.md b/docs/docs/protocol-specs/gas-and-fees/fee-payments-and-metering.md deleted file mode 100644 index 1e3f882b4b58..000000000000 --- a/docs/docs/protocol-specs/gas-and-fees/fee-payments-and-metering.md +++ /dev/null @@ -1,360 +0,0 @@ -# Fee Payments and Metering - -## Requirements - -Private state transition execution and proving is performed by the end user. However, once a transaction is submitted to the network, further resource is required to verify private kernel proofs, effect public state transitions and include the transaction within a rollup. This comes at the expense of the sequencer selected for the current slot. These resources include, but are not limited to: - -1. Transaction [validation](../transactions/validity.md) -1. Execution of public function bytecode -1. Generation of initial witnesses and proving of public and rollup circuits -1. Storage of world state and computation of merkle proofs -1. Finalization of state transition functions on Ethereum -1. Storage of private notes - -Sequencers will need compensating for their efforts, leading to requirements for the provision of payments to the sequencer. Note, some of the computation may be outsourced to third parties as part of the prover selection mechanism, the cost of this is borne by the sequencer outside of the protocol. - -We can define a number of requirements that serve to provide a transparent and fair mechanism of fee payments between transaction senders and sequencers. - -1. Senders need to accurately quantify the resource consumption of a transaction and generate an appropriate fee for it. -2. Senders need to be assured that they will be charged fees fairly and deterministically for execution of their transaction and inclusion in a rollup. -3. Senders need to be refunded for any unused fee resulting from processing their transaction. -4. Senders need to be able to successfully submit a transaction when they have not previously used Aztec before or possess any funds on the network. -5. Sequencers need to be fairly and deterministically compensated for their expense in including transactions in a rollup. -6. Sequencers require agency in accepting transactions based on the fee that is being paid. -7. Sequencers need certainty that they will be paid for their effort in executing transactions, even if any public component of the transaction fails or insufficient fees are provided for this execution. -8. Sequencers need protection against grief or DOS attacks. More specifically, sequencers need to be confident that they will not be required to expend an unreasonable amount of effort before being able to reliably determine the fee endowed to a transaction. -9. The network at large needs to be protected from situations where nodes are unable to sync blocks because of unconstrained state growth. - -## High Level Concepts and Design - -1. We will use concepts of L1, L2 and DA gas to universally define units of resource for the Ethereum and Aztec networks respectively. L1 gas directly mirrors the actual gas specification as defined by Ethereum, L2 gas covers all resource expended on the L2 network. Finally, DA gas accounts for the data stored on the network's Data Availability solution. -2. We will deterministically quantify all resource consumption of a transaction into 8 values. We will define these values later but essentially they represent the amortized and transaction-specific quantities of each of L1, L2 and DA gas. -3. The transaction sender will provide a single fee for the transaction. This will be split into 3 components to cover each of the L1, L2 and DA gas costs. The sender will specify `feePerGas` and `gasLimit` for each component. Doing so provides protection to the sender that the amount of fee applicable to any component is constrained. -4. We will constrain the sequencer to apply the correct amortized and transaction-specific fees; ensuring the sender can not be charged arbitrarily. -5. We will define a method by which fees are paid to the sequencer in a single asset, but where the fee payment mechanism enables transaction senders to pay in any asset. -6. Upon accepting a transaction, we will constrain the sequencer to receive payment and provide any refund owing via the methods specified by the sender. -7. We will define limits on the amount of L1, L2 and DA gas that can be consumed by a block. - -## Fee Payment Assets - -We will enshrine a single public asset to be deployed at genesis for fee payments. This asset will be deposited into the Aztec Network via a one-way bridge from L1 ( see [cross chain communication](../l1-smart-contracts/index.md) ) and can **only** be used for the payment of fees. No other asset can be accepted by a sequencer for fee payments. Ultimately, fees will be paid to the sequencer on L1, with the enshrined L2 asset being burnt as part of the production of a rollup. - -:::note -TODO: Section further down on 1559 burning. -::: - -A process will be enshrined by which users can have fees paid on their behalf as part of the same transaction. This facilitates the deployment of contracts designed for paying fees on behalf of transaction senders. - -## Transaction Phases - -Transactions will be divided into 3 phases: - -1. The fee preparation phase can contain both private and/or public functions. -2. The application logic phase can contain both private and/or public functions. -3. The fee distribution phase only contains a public function. - -All of these phases occur **within the same transaction**, ultimately resulting in 2 sets of public inputs being emitted from the private kernel circuits. Those related to the fee payment and those related to the application logic. State changes requested by the application logic are reverted if any component fails. State changes in the fee preparation and distribution components are only reverted if either of those components fail. - -![Transaction Components](/img/protocol-specs/gas-and-fees/Transaction.png) - -The fee preparation and fee distribution phases respectively are responsible for ensuring that sufficient quantity of the fee payment asset is made available for the transaction and that it is correctly distributed to the sequencer with any refund being returned to the transaction sender. The sequencer will have have agency over which contract methods they are willing to accept for execution in these phases and will have visibility over the arguments passed to them. This is important as these functions must be successfully executed in order for the sequencer to be paid. It is assumed that the network will settle on a number of universally recognized fee payment contracts implementing fee preparation and distribution. - -## Gas Metering - -Broadly speaking, resource consumption incurred by the sequencer falls into categories of transaction-specific consumption and amortized, per-rollup consumption. Each operation performed by the sequencer can be attributed with a fixed amount of gas per unit, representing its level of resource consumption. The unit will differ between operations, for example in some operations it may be per-byte whilst in others it could be per-opcode. What matters is that we are able to determine the total gas consumption of any given transaction. - -Examples of operations for which we want to measure gas consumption are: - -1. Execution of public function bytecode and proving public function execution -2. Executing and proving the rollup circuits -3. Validating that a transaction is not attempting to double spend -4. Accessing and/or modifying contract storage -5. Executing L1 verification -6. Publishing to a data availability layer and verifying the published data - -Some operations are specific to a transaction, such as public function execution. The quantity of gas consumed is solely determined by the nature of the requested function. Other costs such as L1 verification are amortized over all of the transactions within a rollup. These amortized gas values will be apportioned by the sequencer at the point of creating a rollup based on the rollup's size. - -Additionally, these gas consuming operations reflect 3 different domains, L1, L2 and DA (Data Availability). - -A comprehensive table of gas consuming operations can be found in the [fee schedule](./fee-schedule.md). - -## Paying Transaction Fees - -Transactions will need to be provided with sufficient fees to cover their gas consumption. The [private kernel circuits](../circuits/high-level-topology.md) understand a transaction's private execution as having 2 phases. The first phase is for the payment of fees. It is during this phase that the private execution must generate side-effects and enqueued function calls for the fee preparation and fee distribution phases of the transaction. These side-effects are deemed non-revertible. Typically, only contracts designed to be written as transaction entrypoints will need to be concerned with these phases and once the fee payment execution is complete, the transaction is moved to the second phase where all execution is considered the application logic. The [private kernel circuits](../circuits/high-level-topology.md) maintain a 'high water mark' of side effects below which those side effects are deemed non-revertible. - -Transaction senders will need to compute a sufficient fee for the transaction considering both the transaction specific and amortized gas consumption. Transaction specific L1, L2, and DA gas can be calculated via simulation whereas amortized gas will need to be calculated by using a transaction sender specified minimum amortization. This minimum amortization is simply the minimum sized rollup that the transaction sender is willing to be included in. From this value, the amortized L1, L2 and DA gas values can be determined. Finally, a fixed amount of gas for the execution of fee distribution will need to be specified. - -An example of L2 gas amortization could be the transaction sender specifying a minimum amortization of 1024 transactions. The transaction sender would then compute the amount of amortized gas required for a rollup of that size: - -``` -TotalGasToBeAmortized = (1024 - 2) * GMerge + GRoot -L2AmortizedGasLimit = TotalGasToBeAmortized / 1024 - -Where - GMerge = The gas cost of proving the merge rollup circuit. - GRoot = The gas cost of proving the root rollup circuit. -``` - -In this example, were the transaction to be included within a rollup larger than 1024 transactions, the transaction sender would be refunded this amortization difference. - -The private kernel circuits will output 8 `Gas` values. The 6 `GasLimit`'s represent maximum quantities of gas that the transaction sender permits to be consumed. Insufficient limits will cause the transaction to revert with an `OutOfGas` condition. Fees will be refunded to the transaction sender for unused quantities of gas, The `FeeDistributionGas` values are fixed amounts of gas effectively representing fixed fees that the transaction sender is willing to pay for their chosen fee distribution. - - -| Value | Description | -| -------- | -------- | -| `L1AmortizedGasLimit` | The maximum quantity of gas permitted for use in amortized L1 operations | -| `L1TxGasLimit` | The maximum quantity of gas permitted for use in transaction-specific L1 operations | -| `L2AmortizedGasLimit` | The maximum quantity of gas permitted for use in amortized L2 operations | -| `L2TxGasLimit` | The maximum quantity of gas permitted for use in transaction-specific L2 operations | -| `L2FeeDistributionGas` | The quantity of L2 gas the sequencer can charge for executing the fee distribution function | -| `DAFeeDistributionGas` | The quantity of DA gas the sequencer can charge for publishing state updates and events, which are produced as part of fee distribution | -| `DAAmortizedGasLimit` | The maximum quantity of gas permitted for use in amortized Data Availability operations | -| `DATxGasLimit` | The maximum quantity of DA gas permitted for use in transaction specific Data Availability functions | - -By constraining each of these values individually, the transaction sender is protected from a dishonest sequencer allocating an unfairly high amount of gas to one category and leaving insufficient gas for other categories causing a transaction to erroneously be deemed 'out of gas' and a fee taken for improper execution. - -Along with the transaction's `GasLimit`'s, the private kernel circuits will output `FeePerGas` values for each of L1, L2 and DA domains. - - -| Value | Description | -| -------- | -------- | -| `feePerL1Gas` | The per-gas quantity of fee payment asset used to pay for L1 gas | -| `feePerL2Gas` | The per-gas quantity of fee payment asset used to pay for L2 gas | -| `feePerDAGas` | The per-gas quantity of fee payment asset used to pay for DA gas | - -The total fees provided with a transaction can now be derived. - -``` -L1Fee = (L1AmortizedGasLimit + L1TxGasLimit) * feePerL1Gas -L2Fee = (L2AmortizedGasLimit + L2TxGasLimit + L2FeeDistributionGas) * feePerL2Gas -DAFee = (DAAmortizedGasLimit + DATxGasLimit + DAFeeDistributionGas) * feePerDAGas - -TotalFee = L1Fee + L2Fee + DAFee -``` - -## Executing Transactions and Collecting Fees - -Having selected a transaction, a sequencer executes it's 3 phases, maintaining agency over the ability to select transactions that specify supported functions for fee preparation and distribution. In case of application logic failure, execution of fee preparation and distribution continues and only side-effects from application logic are reverted. - -The transaction's fee preparation and fee distribution functions must be called with the correct arguments, this will be constrained by the [public kernel circuits](../circuits/high-level-topology.md) in the same way as any other public function. The sequencer will need to provide correct inputs to the public VM circuit for the following values which may be required by fee distribution functions. - - -| Value | Description | -| -------- | -------- | -| `feePerL1Gas` | Taken from the transaction | -| `feePerL2Gas` | Taken from the transaction | -| `feePerDAGas` | Taken from the transaction | -| `totalFee` | Total fee provided, the product of all gas limits and fee per gas values specified in the transaction | -| `l1GasUsed` | The accumulated quantity of L1 gas used, both amortized and per-transaction | -| `l2GasUsed` | The accumulated quantity of L2 gas used, both amortized and per-transaction | -| `DAGasUsed` | The accumulated quantity of DA gas used, both amortized and per-transaction | -| `feeRecipient` | The aztec address designated as the recipient of fees for the current block | - -The values of gas used must be calculated and applied appropriately by the sequencer, a variety of constraints are in place for this. - -1. The sequencer specifies the size of rollup being produced to the base rollup circuit and uses this value when calculating amortized gas consumption. This value is a public input of the base rollup circuit. -2. The sequencer specifies the fee recipient to the base rollup circuit and uses this value in fee distribution calls. This value is a public input of the base rollup circuit. -3. The sequencer calculates an initial set of values for consumed transaction specific and amortized gas. -4. All forms of gas usage are accumulated by the public VM circuit and form part of the public inputs for the public kernel circuit. -5. The public kernel circuit public inputs also include the gas and fee related inputs provided to the public VM circuit. -6. The base rollup circuit computes the total amount of L1, L2 and DA gas consumed by the transaction, considering both private and public execution and transaction specific and amortized gas. It also considers reverted public execution. These values are public inputs to the circuit. -7. The base rollup circuit verifies the fee distribution function was provided with correct values by constraining the appropriate public kernel public inputs. -8. The values of rollup size, fee recipient and gas used are propagated up the rollup tree. The values of gas used are accumulated at each merge circuit and verified at the root rollup as not breaching the block gas limits. Rollup size and fee recipient are constrained in every merge circuit to be equal in the public inputs of the circuit's children. - -:::note -TODO: Links to circuit pages and public inputs about the above -::: - -## Block Limits - -The root rollup circuit will constrain block-wide limits on the quantities of gas that can be consumed. This is primarily to prevent state growth an ensure that network participants can sync to the head of the chain. - -## EIP-1559 - -:::note -This is a big TODO. We intend to implement some kind of 1559 system but it is still very much under discussion. -::: - -## Payment Flow Examples - -### Native Payment Asset Flow - -In this scenario, Alice wishes to pay for her transaction. She already has a balance of the fee payment asset (FPA) and wishes to pay for her transaction using some of this balance. This will be a public payment as that is all that is supported by the FPA. This scenario does not require a fee preparation step. Alice already has a balance of FPA and as this asset can **only** be used to pay for fees, their is no danger of it being spent within the application logic phase of Alice's transaction. Therefore, Alice can simply enqueue a fee payment method on the FPA contract as the fee distribution phase of this transaction. - -```mermaid -sequenceDiagram - box L2 Actors - participant Alice - participant Sequencer - end - - box L2 Contracts - participant AccountContract - participant App - participant FPA - end - - Alice->>AccountContract: run entrypoint - AccountContract->>FPA: enqueue FPA.pay_fee(max_fee) msg_sender == Alice as fee distribution function - AccountContract->>App: app logic - App->>AccountContract: response - AccountContract->>Alice: finished private execution - - Alice->>Sequencer: tx object - - Sequencer->>Sequencer: Recognize whitelisted function FPA.pay_fee(max_fee) and msg.sender == Alice - Sequencer->>FPA: verify that Alice has >= funds required from tx object - FPA->>Sequencer: Alice has >= funds required from tx object - Sequencer->>App: app logic - App->>Sequencer: response - Sequencer->>FPA: FPA.pay_fee(max_fee) - FPA->>FPA: calculate fee based on inputs to VM circuit - FPA->>Alice: Alice's balance is reduced by fee amount - FPA->>Sequencer: Sequencer's balance is increased by fee amount -``` - -### Non-Native Asset Private Payment Flow - -In this scenario, Alice doesn't have a balance of FPA, but does have a balance of an alternative asset (AST) that she wishes to provide as payment instead. A third party has deployed a Fee Payment Contract (FPC) that has a balance of FPA and is willing to pay on behalf of Alice in exchange for AST plus a commission for doing so. This will be a private payment such that Alice will provide private funds to the FPC in return for the FPC making a public payment on Alice's behalf. - -```mermaid -sequenceDiagram - box L2 Actors - participant Alice - participant Sequencer - end - - box L2 Contracts - participant AccountContract - participant App - participant FPA - participant AST - participant FPC - end - - Alice->>Alice: transient auth witness for AST private transfer - Alice->>AccountContract: run entrypoint - AccountContract->>FPC: private_fee_entrypoint(AST, max_fee, nonce) - - FPC->>AST: AST.transfer(FPC, max_fee + commission, nonce) - AST->>AccountContract: check auth witness - FPC->>FPC: enqueue FPA.private_fee_payment(max_fee) msg_sender == FPC as fee distribution function - FPC->>AccountContract: response - AccountContract->>App: app logic - App->>AccountContract: response - AccountContract->>Alice: finished private execution - - Alice->>Sequencer: tx object - - Sequencer->>Sequencer: Recognize whitelisted function FPA.private_fee_payment(max_fee) and msg.sender == FPC - Sequencer->>FPA: verify that FPC has >= funds required from tx object - FPA->>Sequencer: FPC has >= funds required from tx object - - Sequencer->>App: app logic - App->>Sequencer: response - - Sequencer->>FPA: FPA.private_fee_payment(max_fee) - FPA->>FPA: calculate fee based on inputs to VM circuit - FPA->>FPC: FPC's balance is reduced by fee amount - FPA->>Sequencer: Sequencer's balance is increased by fee amount - FPA->>Alice: Alice is provided with a private refund -``` - -### Non-Native Asset Public Payment Flow - -This scenario is similar to the non-native asset private payment flow but here Alice owns a public balance of AST that she wishes to give to the Fee Payment Contract in return for it paying for Alice's transaction. - -```mermaid -sequenceDiagram - box L2 Actors - participant Alice - participant Sequencer - end - - box L2 Contracts - participant AccountContract - participant App - participant FPA - participant AST - participant FPC - end - - Alice->>AccountContract: run entrypoint - AccountContract->>AccountContract: public auth witness for AST transfer - AccountContract->>FPC: public_fee_entrypoint(AST, max_fee, nonce) - activate FPC - FPC->>FPC: enqueue FPC.public_fee_preparation(Alice, AST, max_fee, nonce) as fee preparation with msg_sender == FPC - FPC->>FPC: enqueue FPC.public_fee_payment(Alice, AST, max_fee) as fee distribution with msg_sender == FPC - FPC->>AccountContract: deactivate FPC - AccountContract->>App: app logic - App->>AccountContract: response - AccountContract->>Alice: finished private execution - - Alice->>Sequencer: tx object - - Sequencer->>Sequencer: Recognize whitelisted function FPC.public_fee_preparation(Alice, AST, max_fee, nonce) and msg.sender == FPC - Sequencer->>FPC: FPC.public_fee_preparation(Alice, AST, max_fee, nonce) - activate FPC - FPC->>AST: AST.transfer_public(Alice, FPC, max_fee + commission, nonce) - AST->>AccountContract: check auth witness - AccountContract->>AST: response - AST->>FPC: response - FPC->>FPA: FPA.check_balance(max_fee) - FPA->>FPC: response - FPC->>Sequencer: FPC has the funds - deactivate FPC - - Sequencer->>App: app logic - App->>Sequencer: response - - Sequencer->>Sequencer: Recognize whitelisted function FPC.public_fee_payment(Alice, AST, max_fee) and msg.sender == FPC - Sequencer->>FPC: FPC.public_fee_payment(Alice, AST, max_fee) - activate FPC - FPC->>FPA: FPA.pay_fee(max_fee) - FPA->>FPA: calculate fee based on inputs to VM circuit - FPA->>Sequencer: Sequencer's balance is increased by fee amount - FPA->>FPC: rebate value - FPC->>AST: AST.transfer_public(FPC, Alice, rebate, 0) - AST->>FPC: response - FPC->>Alice: Alice's balance is increased by rebate value - deactivate FPC -``` - -### DApp Sponsorship - -In this scenario a DApp wishes to pay the fee on behalf of a user for interacting with it. The DApp has a balance of FPA from which it wishes to pay for the transaction. It shares many similarities with the previous native asset fee payment scenario. - -```mermaid -sequenceDiagram - box L2 Actors - participant Alice - participant Sequencer - end - - box L2 Contracts - participant AccountContract - participant DApp - participant FPA - end - - Alice->>DApp: run entrypoint - DApp->>AccountContract: check auth witness - AccountContract->>DApp: app logic - DApp->>DApp: check if will sponsor action - DApp->>FPA: enqueue FPA.pay_fee(max_fee) and msg_sender == DApp as fee distribution - DApp->>DApp: app logic - DApp->>Alice: finished private execution - - Alice->>Sequencer: tx object - - Sequencer->>Sequencer: Recognize whitelisted function FPA.pay_fee(max_fee) and msg.sender == DApp - Sequencer->>FPA: verify that DApp has >= funds required from tx object - FPA->>Sequencer: DApp has >= funds required from tx object - - Sequencer->>DApp: app logic - DApp->>Sequencer: response - Sequencer->>FPA: FPA.pay_fee(max_fee) - FPA->>FPA: calculate fee based on inputs to VM circuit - FPA->>DApp: DApp's balance is reduced by fee amount - FPA->>Sequencer: Sequencer's balance is increased by fee amount -``` diff --git a/docs/docs/protocol-specs/gas-and-fees/fee-schedule.md b/docs/docs/protocol-specs/gas-and-fees/fee-schedule.md index 389df2949010..a7120a506129 100644 --- a/docs/docs/protocol-specs/gas-and-fees/fee-schedule.md +++ b/docs/docs/protocol-specs/gas-and-fees/fee-schedule.md @@ -1,26 +1,107 @@ # Fee Schedule -:::info -This section is a placeholder, we will flesh this out in much greater detail when we come to profile operations and assign gas costs +The [transaction fee](./specifying-gas-fee-info.md#transaction-fee) is comprised of a DA component, an L2 component, and an inclusion fee. The DA and L2 components are calculated by multiplying the gas consumed in each dimension by the respective `feePerGas` value. The inclusion fee is a fixed cost associated with the transaction, which is used to cover the cost of verifying the encompassing rollup proof on L1. + +# DA Gas + +DA gas is consumed to cover the costs associated with publishing data associated with a transaction. + +These data include: + - new note hashes + - new nullifiers + - new l2 -> l1 message hashes + - new public data writes + - new logs + - protocol metadata (e.g. the amount of gas consumed, revert code, etc.) + +The DA gas used is then calculated as: + +``` +DA_BYTES_PER_FIELD = 32 +DA_GAS_PER_BYTE = 16 +FIXED_DA_GAS = 512 + +# FIXED_DA_GAS covers the protocol metadata, +# which should remain less than 512/16 = 32 bytes + +da_gas_per_field = DA_BYTES_PER_FIELD * DA_GAS_PER_BYTE + +note_hash_gas = da_gas_per_field * (number of notes) +nullifier_gas = da_gas_per_field * (number of nullifiers) +l2_to_l1_message_gas = da_gas_per_field * (number of l2_to_l1_messages) + +# public data writes specify a value and index +public_data_writes_gas = 2 * da_gas_per_field * (number of public_data_writes) + +log_gas = DA_GAS_PER_BYTE * (unencrypted_log_preimages_length + encrypted_log_preimages_length) + +da_gas_used = FIXED_DA_GAS + + note_hash_gas + + nullifier_gas + + l2_to_l1_message_gas + + public_data_writes_gas + + log_gas + + teardown_da_gas +``` + +:::note Non-zero `transaction_fees` +A side effect of the above calculation is that all transactions will have a non-zero `transaction_fee`. ::: - -| Action | Resource Domain | Consumption Calculation | Comment | -| -------- | -------- | -------- | ------- | -| Verifying the private kernel proof | L2 | Fixed L2/Transaction | | -| Verifying each nullifier against the world state | L2 | Fixed L2/Tx nullifier | | -| Verifying each nullifier against others in the same block | L2 | Fixed L2/Tx nullifier | Whilst not strictly a fixed cost, this would need to be allocated as a fixed cost as it depends on the composition of the rollup | -| Verifying log preimages against the sha256 log hashes contained in the private kernel public inputs | L2 | L2 gas per pre-image field | | -| Verifying contract deployment data against the sha256 hash of this data contained in the private kernel public inputs | L2 | L2 gas per hash | | -| Publishing contract data to DA | DA | DA gas per byte | | -| Publishing state updates to DA | DA | DA gas per byte | | -| Publishing notes/tags to DA | DA | DA gas per byte | | -| Publishing L2->L1 messages | L1 | Calldata gas per byte + processing & storing of the message | | -| Public function execution | L2 | L2 gas per function opcode | | -| Proving the public VM circuit for a public function | L2 | Fixed L2/Tx public function | | -| Proving the public kernel circuit for a public function | L2 | Fixed L2/Tx public function | | -| Proving the base rollup circuit | L2 | Fixed L2/Transaction | -| Proving the merge rollup circuit | L2 | Amortized L2/Transaction | -| Proving the root rollup circuit | L2 | Amortized L2/Transaction | -| Publishing the block header to L1 | L1 | Amortized L1/Transaction | -| Verifying the rollup proof | L1 | Amortized L1/Transaction | \ No newline at end of file +# L2 Gas + +L2 gas is consumed to cover the costs associated with executing the public VM, proving the public VM circuit, and proving the public kernel circuit. + +The public vm has an [instruction set](../public-vm/instruction-set.mdx) with opcode level gas metering to cover the cost of actions performed within the public VM. + +Additionally, there is a fixed cost associated with each iteration of the public VM (i.e. the number of enqueued public function calls, plus 1 if there is a teardown function), which is used to cover the cost of proving the public VM circuit. + +The L2 gas used is then calculated as: + +``` +AVM_STARTUP_L2_GAS = 1024 + + +num_avm_invocations = (number of enqueued public function calls) + + (is there a teardown function ? 1 : 0) + +l2_gas_used = AVM_STARTUP_L2_GAS * num_avm_invocations + + teardown_l2_gas + + (gas reported as consumed by the public VM) +``` + +:::warning L2 Gas from Private +In the current implementation, private execution does not consume L2 gas. This will change in future versions of the protocol, because there is still work that needs to be performed by the sequencer correspondent to the private outputs, which is effectively L2 gas. The following operations performed in private execution will likely consume L2 gas in future versions of the protocol: +- emitting note hashes (due to tree insertion) +- emitting nullifiers (due to tree insertion) +- possibly emitting logs (due to validation checks) +::: + +# Max Inclusion Fee + +Each transaction, and each block, has inescapable overhead costs associated with it which are not directly related to the amount of data or computation performed. + +These costs include: +- verifying the private kernel proof of each transaction +- executing/proving the base/merge/root rollup circuits + - includes verifying that every new nullifier is unique across the tx/block + - includes processing l2->l1 messages of each transaction, even if they are empty (and thus have no DA gas cost) + - includes ingesting l1->l2 messages that were posted during the previous block + - injecting a public data write to levy the transaction fee on the `fee_payer` +- publishing the block header to the rollup contract on L1 + - includes verification of the rollup proof + - includes insertion of the new root of the l2->l1 message tree into the L1 Outbox + - consumes the pending messages in the L1 Inbox +- publishing the block header to DA + +See [the l1 contracts section](../l1-smart-contracts/index.md) for more information on the L1 Inbox and Outbox. + +Users cover these costs by [specifying an inclusion fee](./specifying-gas-fee-info.md#specifying-gas--fee-info), which is different from other parameters specified in that it is a fixed fee offered to the sequencer, denominated in [FPA](./fee-payment-asset.md). + +Even though these line items will be the same for every transaction in a block, the **cost** to the sequencer will vary, particularly based on: +- congestion on L1 +- prevailing price of proof generation + +A price discovery mechanism is being developed to help users set the inclusion fee appropriately. + + diff --git a/docs/docs/protocol-specs/gas-and-fees/index.md b/docs/docs/protocol-specs/gas-and-fees/index.md index 17647cd29590..55e2808eeefd 100644 --- a/docs/docs/protocol-specs/gas-and-fees/index.md +++ b/docs/docs/protocol-specs/gas-and-fees/index.md @@ -4,8 +4,16 @@ title: Gas & Fees # Gas & Fees -Private message delivery encompasses the encryption, tagging, and broadcasting of private messages on the Aztec Network. +The Aztec network uses a fee system to incentivize sequencers to process transactions and publish blocks. + +This section breaks down: +- [the fee payment asset](./fee-payment-asset.md) +- [how users specify gas/fee parameters in their transactions](./specifying-gas-fee-info.md) +- [fee abstraction](./tx-setup-and-teardown.md) +- [tracking gas/fee information in the kernel circuits](./kernel-tracking.md) +- [how gas/fees cover the costs of transaction execution](./fee-schedule.md) +- [published data pertaining to gas/fees](./published-gas-and-fee-data.md) import DocCardList from '@theme/DocCardList'; - \ No newline at end of file + diff --git a/docs/docs/protocol-specs/gas-and-fees/kernel-tracking.md b/docs/docs/protocol-specs/gas-and-fees/kernel-tracking.md new file mode 100644 index 000000000000..bf678054ce55 --- /dev/null +++ b/docs/docs/protocol-specs/gas-and-fees/kernel-tracking.md @@ -0,0 +1,544 @@ +--- +title: Kernel Tracking +--- + +# Kernel Tracking + +Gas and fees are tracked throughout the kernel circuits to ensure that users are charged correctly for their transactions. + +# Private Kernel Circuits Overview + +On the private side, the ordering of the circuits is: + +1. PrivateKernelInit +2. PrivateKernelInner +3. PrivateKernelTail or PrivateKernelTailToPublic + +The structs are (irrelevant fields omitted): + +```mermaid +classDiagram + +class PrivateContextInputs { + +TxContext tx_context +} +PrivateContextInputs --> TxContext + +class PrivateCallData { + +PrivateCallStackItem call_stack_item +} +PrivateCallData --> PrivateCallStackItem + +class PrivateCallStackItem { + +AztecAddress contract_address + +PrivateCircuitPublicInputs public_inputs +} +PrivateCallStackItem --> PrivateCircuitPublicInputs + +class PrivateCircuitPublicInputs { + +TxContext tx_context + +bool is_fee_payer + +u32 min_revertible_side_effect_counter + +Field public_teardown_function_hash + +Header historical_header +} +PrivateCircuitPublicInputs --> TxContext +PrivateCircuitPublicInputs --> Header + +class PrivateKernelCircuitPublicInputs { + +u32 min_revertible_side_effect_counter + +AztecAddress fee_payer + +Field public_teardown_function_hash + +PrivateAccumulatedData end + +CombinedConstantData constants +} +PrivateKernelCircuitPublicInputs --> PrivateAccumulatedData +PrivateKernelCircuitPublicInputs --> CombinedConstantData + +class PrivateAccumulatedData { + +Field encrypted_log_preimages_length + +Field unencrypted_log_preimages_length + +Field[MAX_NEW_L2_TO_L1_MSGS_PER_TX] new_l2_to_l1_msgs + +SideEffect[MAX_ENCRYPTED_LOGS_PER_TX] encrypted_logs_hashes + +SideEffect[MAX_UNENCRYPTED_LOGS_PER_TX] unencrypted_logs_hashes + +SideEffect[MAX_NEW_NOTE_HASHES_PER_TX] new_note_hashes + +SideEffectLinkedToNoteHash[MAX_NEW_NULLIFIERS_PER_TX] new_nullifiers + +CallRequest[MAX_PRIVATE_CALL_STACK_LENGTH_PER_TX] private_call_stack + +CallRequest[MAX_PUBLIC_CALL_STACK_LENGTH_PER_TX] public_call_stack +} + +class CombinedConstantData { + +Header historical_header + +TxContext tx_context +} +CombinedConstantData --> Header +CombinedConstantData --> TxContext + +class Header { + +GlobalVariables global_variables +} +Header --> GlobalVariables + +class GlobalVariables { + +GasFees gas_fees +} +GlobalVariables --> GasFees + +class GasFees { + +Fr fee_per_da_gas + +Fr fee_per_l2_gas +} + +class TxContext { + +GasSettings gas_settings +} +TxContext --> GasSettings + +class GasSettings { + +Gas gas_limits + +Gas teardown_gas_allocations + +GasFees max_fees_per_gas + +Field max_inclusion_fee +} +GasSettings --> Gas +GasSettings --> GasFees + +class Gas { + +u32 l2_gas + +u32 da_gas +} + +class TxRequest { + +TxContext tx_context +} +TxRequest --> TxContext + +class PrivateKernelInitCircuitPrivateInputs { + +PrivateCallData private_call_data + +TxRequest tx_request +} +PrivateKernelInitCircuitPrivateInputs --> PrivateCallData +PrivateKernelInitCircuitPrivateInputs --> TxRequest + +class PrivateKernelInnerCircuitPrivateInputs { + +PrivateKernelData previous_kernel + +PrivateCallData private_call +} +PrivateKernelInnerCircuitPrivateInputs --> PrivateKernelData +PrivateKernelInnerCircuitPrivateInputs --> PrivateCallData + +class PrivateKernelData { + +PrivateKernelCircuitPublicInputs public_inputs +} +PrivateKernelData --> PrivateKernelCircuitPublicInputs + +class PrivateKernelTailCircuitPrivateInputs { + +PrivateKernelData previous_kernel +} +PrivateKernelTailCircuitPrivateInputs --> PrivateKernelData + +class PrivateKernelTailToPublicCircuitPrivateInputs { + +PrivateKernelData previous_kernel +} +PrivateKernelTailToPublicCircuitPrivateInputs --> PrivateKernelData + +``` + +## Private Context Initialization + +Whenever a private function is run, it has a `PrivateContext` associated with it, which is initialized in part from a `PrivateContextInputs` object. + +The [gas settings that users specify](./specifying-gas-fee-info.md) become part of the values in the `TxContext` within the `PrivateContextInputs` of the [entrypoint](./tx-setup-and-teardown.md#defining-setup). These values are copied to the `PrivateCircuitPublicInputs`. + +The same `TxContext` is provided as part of the `TxRequest` in the `PrivateKernelInitCircuitPrivateInputs`. This is done to ensure that the `TxContext` in the `PrivateCallData` (what was executed) matches the `TxContext` in the `TxRequest` (users' intent). + + +## Private Kernel Init + +The PrivateKernelInit circuit takes in a `PrivateCallData` and a `TxRequest` and outputs a `PrivateKernelCircuitPublicInputs`. + +It must: + +- check that the `TxContext` provided as in the `TxRequest` input matches the `TxContext` in the `PrivateCallData` +- copy the `TxContext` from the `TxRequest` to the `PrivateKernelCircuitPublicInputs.constants.tx_context` +- copy the `Header` from the `PrivateCircuitPublicInputs` to the `PrivateKernelCircuitPublicInputs.constants.historical_header` +- set the min_revertible_side_effect_counter if it is present in the `PrivateCallData` +- set the `fee_payer` if the `is_fee_payer` flag is set in the `PrivateCircuitPublicInputs` +- set the `public_teardown_function_hash` if it is present in the `PrivateCircuitPublicInputs` + +## Private Kernel Inner + +The PrivateKernelInner circuit takes in a `PrivateKernelData` and a `PrivateCallData` and ultimately outputs a `PrivateKernelCircuitPublicInputs`. + +It must: + +- set the `fee_payer` if the `is_fee_payer` flag is set in the `PrivateCircuitPublicInputs` (and is not set in the input `PrivateKernelData`) +- set the `public_teardown_function_hash` if it is present in the `PrivateCircuitPublicInputs` (and is not set in the input `PrivateKernelData`) +- copy the constants from the `PrivateKernelData` to the `PrivateKernelCircuitPublicInputs.constants` + +## Private Kernel Tail + +The PrivateKernelTail circuit takes in a `PrivateKernelData` and outputs a `KernelCircuitPublicInputs` (see diagram below). + +This is only used when there are no enqueued public functions or public teardown functions. + +It must: + +- check that there are no enqueued public functions or public teardown function +- compute the gas used + - this will only include DA gas *and* any gas specified in the `teardown_gas_allocations` +- ensure the gas used is less than the gas limits +- ensure that `fee_payer` is set, and set it in the `KernelCircuitPublicInputs` +- copy the constants from the `PrivateKernelData` to the `KernelCircuitPublicInputs.constants` + +:::note +Transactions without a public component can safely set their teardown gas allocations to zero. They are included as part of the gas computation in the private kernel tail for consistency (limits always include teardown gas allocations) and future-compatibility if we have a need for private teardown functions. +::: + +## Private Kernel Tail to Public + +The PrivateKernelTailToPublic circuit takes in a `PrivateKernelData` and outputs a `PublicKernelCircuitPublicInputs` (see diagram below). + +This is only used when there are enqueued public functions or a public teardown function. + +It must: + +- check that there are enqueued public functions or a public teardown function +- partition the side effects produced during private execution into revertible and non-revertible sets of `PublicAccumulatedData` +- compute gas used for the revertible and non-revertible. Both sets can have a DA component, but the revertible set will also include the teardown gas allocations the user specified (if any). This ensures that the user effectively pre-pays for the gas consumed in teardown. +- ensure that `fee_payer` is set, and set it in the `PublicKernelCircuitPublicInputs` +- copy the constants from the `PrivateKernelData` to the `PublicKernelCircuitPublicInputs.constants` + +# Mempool/Node Validation + +A `Tx` broadcasted to the network has: + +``` +Tx { + /** + * Output of the private kernel circuit for this tx. + */ + data: PrivateKernelTailCircuitPublicInputs, + /** + * Proof from the private kernel circuit. + */ + proof: Proof, + /** + * Encrypted logs generated by the tx. + */ + encryptedLogs: EncryptedTxL2Logs, + /** + * Unencrypted logs generated by the tx. + */ + unencryptedLogs: UnencryptedTxL2Logs, + /** + * Enqueued public functions from the private circuit to be run by the sequencer. + * Preimages of the public call stack entries from the private kernel circuit output. + */ + enqueuedPublicFunctionCalls: PublicCallRequest[], + /** + * Public teardown function from the private circuit to be run by the sequencer. + * Preimage of the public teardown function hash from the private kernel circuit output. + */ + publicTeardownFunctionCall: PublicCallRequest, +} +``` + +Where the `PrivateKernelTailCircuitPublicInputs` may be destined for the base rollup (if there is no public component), or the public kernel circuits (if there is a public component). + +Regardless, it has a `fee_payer` set. + +When a node receives a transaction, it must check that: +1. the `fee_payer` is set +2. the `fee_payer` has a balance of [FPA](./fee-payment-asset.md) greater than the computed [transaction fee](./specifying-gas-fee-info.md#transaction-fee) if the transaction has no public component +3. the `fee_payer` has a balance of FPA greater than the computed [max transaction fee](./specifying-gas-fee-info.md#maximum-transaction-fee) if the transaction has a public component + +See other [validity conditions](../transactions/validity.md). + +# Public Kernel Circuits + +On the public side, the order of the circuits is: +1. PublicKernelSetup +2. PublicKernelAppLogic +3. PublicKernelTeardown +4. PublicKernelTail + +The structs are (irrelevant fields omitted): + +```mermaid +classDiagram + +class Gas { + +u32 l2_gas + +u32 da_gas +} + +class PublicKernelSetupCircuitPrivateInputs { + +PublicKernelData previous_kernel + +PublicCallData public_call +} +PublicKernelSetupCircuitPrivateInputs --> PublicKernelData +PublicKernelSetupCircuitPrivateInputs --> PublicCallData + +class PublicKernelData { + +PublicKernelCircuitPublicInputs public_inputs +} +PublicKernelData --> PublicKernelCircuitPublicInputs + +class PublicKernelCircuitPublicInputs { + +PublicAccumulatedData end_non_revertible + +PublicAccumulatedData end + +CombinedConstantData constants + +PublicConstantData public_constants + +u8 revert_code +} +PublicKernelCircuitPublicInputs --> PublicAccumulatedData +PublicKernelCircuitPublicInputs --> CombinedConstantData + +class CombinedConstantData { + +Header historical_header + +TxContext tx_context +} + +class PublicConstantData { + +AztecAddress fee_payer + +Field public_teardown_function_hash +} + +class PublicAccumulatedData { + +Field encrypted_log_preimages_length + +Field unencrypted_log_preimages_length + +Field[MAX_NEW_L2_TO_L1_MSGS_PER_TX] new_l2_to_l1_msgs + +SideEffect[MAX_ENCRYPTED_LOGS_PER_TX] encrypted_logs_hashes + +SideEffect[MAX_UNENCRYPTED_LOGS_PER_TX] unencrypted_logs_hashes + +SideEffect[MAX_NEW_NOTE_HASHES_PER_TX] new_note_hashes + +SideEffectLinkedToNoteHash[MAX_NEW_NULLIFIERS_PER_TX] new_nullifiers + +CallRequest[MAX_PUBLIC_CALL_STACK_LENGTH_PER_TX] public_call_stack + +PublicDataUpdateRequest[MAX_PUBLIC_DATA_UPDATE_REQUESTS_PER_TX] public_data_update_requests + +Gas gas_used +} +PublicAccumulatedData --> Gas + +class PublicCallData { + +PublicCallStackItem call_stack_item +} +PublicCallData --> PublicCallStackItem + +class PublicCallStackItem { + +PublicCircuitPublicInputs public_inputs +} +PublicCallStackItem --> PublicCircuitPublicInputs + +class PublicCircuitPublicInputs { + +u8: revert_code + +Gas start_gas_left + +Gas end_gas_left + +Field transaction_fee + +GlobalVariables global_variables +} +PublicCircuitPublicInputs --> Gas +PublicCircuitPublicInputs --> GlobalVariables + +class PublicKernelAppLogicCircuitPrivateInputs { + +PublicKernelData previous_kernel + +PublicCallData public_call +} +PublicKernelAppLogicCircuitPrivateInputs --> PublicKernelData +PublicKernelAppLogicCircuitPrivateInputs --> PublicCallData + +class PublicKernelTeardownCircuitPrivateInputs { + +PublicKernelData previous_kernel + +PublicCallData public_call +} +PublicKernelTeardownCircuitPrivateInputs --> PublicKernelData +PublicKernelTeardownCircuitPrivateInputs --> PublicCallData + +class PublicKernelTailCircuitPrivateInputs { + +PublicKernelData previous_kernel +} +PublicKernelTailCircuitPrivateInputs --> PublicKernelData + +class KernelCircuitPublicInputs { + +CombinedAccumulatedData end + +CombinedConstantData constants + +PartialStateReference start_state + +u8 revert_code + +AztecAddress fee_payer +} +KernelCircuitPublicInputs --> CombinedAccumulatedData +KernelCircuitPublicInputs --> CombinedConstantData + + +class CombinedAccumulatedData { + +Field encrypted_log_preimages_length + +Field unencrypted_log_preimages_length + +Field encrypted_logs_hash + +Field unencrypted_logs_hash + +Field[MAX_NEW_L2_TO_L1_MSGS_PER_TX] new_l2_to_l1_msgs + +Field[MAX_NEW_NOTE_HASHES_PER_TX] new_note_hashes + +Field[MAX_NEW_NULLIFIERS_PER_TX] new_nullifiers + +PublicDataUpdateRequest[MAX_PUBLIC_DATA_UPDATE_REQUESTS_PER_TX] public_data_update_requests + +Gas gas_used +} +CombinedAccumulatedData --> Gas + +class PublicContextInputs { + +Header historical_header + +GlobalVariables public_global_variables + +Gas gas_left + +Field transaction_fee +} +PublicContextInputs --> Header +PublicContextInputs --> GlobalVariables +PublicContextInputs --> Gas + +class GlobalVariables { + +GasFees gas_fees +} +GlobalVariables --> GasFees + +class GasFees { + +Fr fee_per_da_gas + +Fr fee_per_l2_gas +} + +``` + +## Public Context Initialization + +Whenever a public function is run, it has a `PublicContext` associated with it, which is initialized in part from a `PublicContextInputs` object. + +The sequencer must provide information including the current `gas_fees`, the current `gas_left`, and the `transaction_fee`, but we cannot trust these values to be correct: we must compute the correct values in the public kernel circuits, and validate that the sequencer provided the correct values. + +Further, the sequencer is only obligated to provide the `transaction_fee` to the teardown function, as that is the only point at which the transaction fee can be known. + +## Public Circuit Public Inputs + +The "outputs" of the public functions are coming from the public VM. + +Therefore, once we verify that the `start_gas_left` which the sequencer provided is correct, we can trust the `end_gas_left` that the public VM reports. + +Further, we can trust that the `transaction_fee` the public VM reported is the one which was made available to the public functions during teardown (though we must verify that the sequencer provided the correct value). + +## Public Kernel Setup + +The PublicKernelSetup circuit takes in a `PublicKernelData` and a `PublicCallData` and outputs a `PublicKernelCircuitPublicInputs`. + +It must assert that the `revert_code` in the `PublicCircuitPublicInputs` is equal to zero. + +It must assert that the `public_call.call_stack_item.public_inputs.global_variables.gas_fees` are valid according to the [update rules defined](./published-gas-and-fee-data.md#updating-the-gasfees-object). + +It must compute the gas used in the `PublicKernelData` provided, and verify that the `gas_limits` in the `PublicKernelData`'s `TxContext` *minus* the computed `gas_used` is equal to the `start_gas_left` specified on the `PublicCircuitPublicInputs`. + +This ensures that the public VM was provided with the correct starting gas values. + +It must update the gas used in `end_non_revertible` as: + +```rust +# assume the previous PublicKernelCircuitPublicInputs was copied to circuit_outputs +pub fn update_non_revertible_gas_used(public_call: PublicCallData, circuit_outputs: &mut PublicKernelCircuitPublicInputsBuilder) { + let tx_gas_limits = circuit_outputs.constants.tx_context.gas_settings.gas_limits; + let call_gas_left = public_call.call_stack_item.public_inputs.end_gas_left; + let accum_end_gas_used = circuit_outputs.end.gas_used; + + circuit_outputs.end_non_revertible.gas_used = tx_gas_limits + .sub(call_gas_left) + .sub(accum_end_gas_used); +} +``` + +:::note global gas limit for all enqueued public calls +**Within** the AVM, users may specify gas limits for each public function call. This **does not apply** to the "top-level" enqueued call: they all pull from the same global gas limit, and there is no way to "catch" an "out of gas" at this top-level apart from reverting. +::: + +## Public Kernel App Logic + +The PublicKernelAppLogic circuit takes in a `PublicKernelData` and a `PublicCallData` and outputs a `PublicKernelCircuitPublicInputs`. + +It must perform the same computation as the PublicKernelSetup regarding verification of the `start_gas_left` and the `gas_fees`. + +It must check the `revert_code` in the `PublicCircuitPublicInputs`. + +### If the `revert_code` is zero + +Instead of updating `end_non_revertible`, it must update `end` as: + +```rust +# assume the previous PublicKernelCircuitPublicInputs was copied to circuit_outputs +pub fn update_revertible_gas_used(public_call: PublicCallData, circuit_outputs: &mut PublicKernelCircuitPublicInputsBuilder) { + let tx_gas_limits = circuit_outputs.constants.tx_context.gas_settings.gas_limits; + let call_gas_left = public_call.call_stack_item.public_inputs.end_gas_left; + let accum_end_non_revertible_gas_used = circuit_outputs.end_non_revertible.gas_used; + + circuit_outputs.end.gas_used = tx_gas_limits + .sub(call_gas_left) + .sub(accum_end_non_revertible_gas_used); +} +``` + +### If the `revert_code` is non-zero + +All side effects from the `revertible` set are discarded. + +It consumes all the gas left: + +```rust +# assume the previous PublicKernelCircuitPublicInputs was copied to circuit_outputs +pub fn update_revertible_gas_used(public_call: PublicCallData, circuit_outputs: &mut PublicKernelCircuitPublicInputsBuilder) { + let tx_gas_limits = circuit_outputs.constants.tx_context.gas_settings.gas_limits; + let accum_end_non_revertible_gas_used = circuit_outputs.end_non_revertible.gas_used; + + circuit_outputs.end.gas_used = tx_gas_limits + .sub(accum_end_non_revertible_gas_used); +} +``` + +It sets the `revert_code` in `PublicKernelCircuitPublicInputs` to `1`. + +:::note Gas reserved for public teardown +Recall in the [Private Kernel Tail to Public](#private-kernel-tail-to-public) circuit, the gas allocated for the public teardown function was included in the `end` gas used. This ensures that we have gas available for teardown even though app logic consumed all gas. +::: + +:::warning +Consuming all gas left in the event of revert creates incentives for the sequencer to arrange transactions such that they revert, which is suboptimal. Future improvements will likely address this by only consuming the gas that was actually used, even in the event of revert. +::: + +## Public Kernel Teardown + +The PublicKernelTeardown circuit takes in a `PublicKernelData` and a `PublicCallData` and outputs a `PublicKernelCircuitPublicInputs`. + +It must perform the same computation as the PublicKernelSetup regarding verification of the `gas_fees`. + +It must assert that the `start_gas_left` is equal to the PublicKernelData's `public_inputs.constants.tx_context.gas_settings.teardown_gas_allocations` + +It must also compute the gas used in the `PublicKernelData` provided, and the [transaction fee](./specifying-gas-fee-info.md#transaction-fee) using this computed value, then verify that the `transaction_fee` in the `PublicCircuitPublicInputs` is equal to the computed transaction fee. + +This ensures that the public VM was provided with the correct transaction fee, and that teardown did not exceed the gas limits. + +### Handling reverts + +Teardown is attempted even if the app logic failed. + +The teardown kernel can see if the app logic failed by checking if `revert_code` in the `PublicKernelCircuitPublicInputs` is set to `1`. + +It also has access to the `revert_code` reported by the AVM of the current call within `PublicCircuitPublicInputs`. + +The interplay between these two `revert_code`s is as follows: + +| Kernel `revert_code` | current AVM `revert_code` | Resulting Kernel `revert_code` | +| -------------------- | ------------------------- | ------------------------------ | +| 0 | 0 | 0 | +| 1 | 0 | 1 | +| 0 | 1 | 2 | +| 1 | 1 | 3 | +| 2 or 3 | (any) | (unchanged) | + + +# Base Rollup Kernel Circuit + +The base rollup kernel circuit takes in a `KernelData`, which contains a `KernelCircuitPublicInputs`, which it uses to compute the `transaction_fee`. + +Additionally, it verifies that the max fees per gas specified by the user are greater than the current block's fees per gas. + +After the public data writes specific to this transaction have been processed, and a new tree root is produced, the kernel circuit injects an additional public data write based upon that root which deducts the transaction fee from the `fee_payer`'s balance. + diff --git a/docs/docs/protocol-specs/gas-and-fees/published-gas-and-fee-data.md b/docs/docs/protocol-specs/gas-and-fees/published-gas-and-fee-data.md new file mode 100644 index 000000000000..f46f80856225 --- /dev/null +++ b/docs/docs/protocol-specs/gas-and-fees/published-gas-and-fee-data.md @@ -0,0 +1,30 @@ +--- +title: Published Gas & Fee Data +--- + +# Published Gas & Fee Data + +When a block is published to L1, it includes information about the gas and fees at a block-level, and at a transaction-level. + +# Block-level Data + +The block header contains a `GlobalVariables`, which contains a `GasFees` object. This object contains the following fields: +- `feePerDaGas`: The fee in [FPA](./fee-payment-asset.md) per unit of DA gas consumed for transactions in the block. +- `feePerL2Gas`: The fee in FPA per unit of L2 gas consumed for transactions in the block. + +## Updating the `GasFees` Object + +Presently, the `feePerDaGas` and `feePerL2Gas` are fixed at `1` FPA per unit of DA gas and L2 gas consumed, respectively. + +In the future, these values may be updated dynamically based on network conditions. + +:::note Gas Targets +Should we move to a 1559-style fee market with block-level gas targets, there is an interesting point where gas "used" presently includes the entire [`teardown_gas_allocation`](./specifying-gas-fee-info.md) regardless of how much of that allocation was spent. In the future, if this becomes a concern, we can update our accounting to reflect the true gas used for the purposes of updating the `GasFees` object, though the user will be charged the full `teardown_gas_allocation` regardless. +::: + +# Transaction-level Data + +The transaction data which is published to L1 is a `TxEffects` object, which includes +- `transaction_fee`: the fee paid by the transaction in FPA + + diff --git a/docs/docs/protocol-specs/gas-and-fees/specifying-gas-fee-info.md b/docs/docs/protocol-specs/gas-and-fees/specifying-gas-fee-info.md new file mode 100644 index 000000000000..fca9c67c86f5 --- /dev/null +++ b/docs/docs/protocol-specs/gas-and-fees/specifying-gas-fee-info.md @@ -0,0 +1,138 @@ +--- +title: Specifying Gas & Fee Info +--- + +# Specifying Gas & Fee Info + +When users submit a `TxExecutionRequest` on the Aztec Network, they provide a `TxContext`, which holds `GasSettings` for the transaction. + +An abridged version of the class diagram is shown below: + +```mermaid +classDiagram +class TxExecutionRequest { + +TxContext txContext +} + +class TxContext { + +GasSettings gasSettings +} + +class GasSettings { + +Gas gasLimits + +Gas teardownGasLimits + +GasFees maxFeesPerGas + +Fr maxInclusionFee +} + +class Gas { + +UInt32 daGas + +UInt32 l2Gas +} + +class GasFees { + +Fr feePerDaGas + +Fr feePerL2Gas +} + +TxContext --> GasSettings +GasSettings --> Gas +GasSettings --> GasFees +``` + +:::note +All fees are denominated in the [Fee Payment Asset (FPA)](./fee-payment-asset.md). +::: + +# Gas Dimensions and Max Inclusion Fee + +Transactions are metered for their gas consumption across two dimensions: + +1. **Data Availability (DA) Gas**: This dimension measures data usage by the transaction, e.g. creating/spending notes, emitting logs, etc. +2. **Layer 2 (L2) Gas**: This dimension measures computation usage of the public VM. + +This is similar to the gas model in Ethereum, where transaction consume gas to perform operations, and may also consume blob gas for storing data. + +Separately, every transaction has overhead costs associated with it, e.g. verifying its encompassing rollup proof on L1, which are captured in the `maxInclusionFee`, which is not tied to gas consumption on the transaction, but is specified in FPA. + +See the [Fee Schedule](./fee-schedule.md) for a detailed breakdown of costs associated with different actions. + + +# `gasLimits` and `teardownGasLimits` + +Transactions can optionally have a "teardown" phase as part of their public execution, during which the "transaction fee" is available to public functions. This is useful to transactions/contracts that need to compute a "refund", e.g. contracts that facilitate [fee abstraction](./tx-setup-and-teardown.md). + +Because the transaction fee must be known at the time teardown is executed, transactions must effectively "prepay" for the teardown phase. Thus, the `teardownGasLimits` are portions of the `gasLimits` that are reserved for the teardown phase. + +For example, if a transaction has `gasLimits` of 1000 DA gas and 2000 L2 gas, and `teardownGasLimits` of 100 DA gas and 200 L2 gas, then the transaction will be able to consume 900 DA gas and 1800 L2 gas during the main execution phase, but 100 DA gas and 200 L2 gas **will be consumed** to cover the teardown phase: even if teardown does not consume that much gas, the transaction will still be charged for it; even if the transaction does not have a teardown phase, the gas will still be consumed. + +# `maxFeesPerGas` and `feePerGas` + +The `maxFeesPerGas` field specifies the maximum fees that the user is willing to pay per gas unit consumed in each dimension. + +Separately, the **protocol** specifies the current `feePerGas` for each dimension, which is used to calculate the transaction fee. + +These are held in the L2 blocks `Header` + +```mermaid +classDiagram +class Header { + +GlobalVariables globalVariables +} + +class GlobalVariables { + +GasFees gasFees +} + +class GasFees { + +Fr feePerDaGas + +Fr feePerL2Gas +} + +Header --> GlobalVariables +GlobalVariables --> GasFees +``` + +A transaction cannot be executed if the `maxFeesPerGas` is less than the `feePerGas` for any dimension. + +The `feePerGas` is presently held constant at `1` for both dimensions, but may be updated in future protocol versions. + +# Transaction Fee + +The transaction fee is calculated as: + +``` +transactionFee = maxInclusionFee + (DA gas consumed * feePerDaGas) + (L2 gas consumed * feePerL2Gas) +``` + +:::note +Why is the "max" inclusion fee charged? We're working on a mechanism that will allow users to specify a maximum fee they are willing to pay, and the network will only charge them the actual fee. This is not yet implemented, so the "max" fee is always charged. +::: + +See more on how the "gas consumed" values are calculated in the [Fee Schedule](./fee-schedule.md). + +# Maximum Transaction Fee + +The final transaction fee cannot be calculated until all public function execution is complete. However, a maximum theoretical fee can be calculated as: + +``` +maxTransactionFee = maxInclusionFee + (gasLimits.daGas * maxFeesPerDaGas) + (gasLimits.l2Gas * maxFeesPerL2Gas) +``` + +This is useful for imposing [validity conditions](./kernel-tracking.md#mempoolnode-validation). + +# `fee_payer` + +The `fee_payer` is the entity that pays the transaction fee. + +It is effectively set in private by the contract that calls `context.set_as_fee_payer()`. + +This manifests as a boolean flag `is_fee_payer` in the `PrivateCircuitPublicInputs`. The private kernel circuits will check this flag for every call stack item. + +When a call stack item is found with `is_fee_payer` set, the kernel circuit will set `fee_payer` in its `PrivateKernelCircuitPublicInputs` to be the `callStackItem.contractAddress`. + +This is subsequently passed through the `PublicKernelCircuitPublicInputs` to the `KernelCircuitPublicInputs`. + +If the `fee_payer` is not set, the transaction will be considered invalid. + +If a transaction attempts to set `fee_payer` multiple times, the transaction will be considered invalid. diff --git a/docs/docs/protocol-specs/gas-and-fees/tx-setup-and-teardown.md b/docs/docs/protocol-specs/gas-and-fees/tx-setup-and-teardown.md new file mode 100644 index 000000000000..08021198b375 --- /dev/null +++ b/docs/docs/protocol-specs/gas-and-fees/tx-setup-and-teardown.md @@ -0,0 +1,99 @@ +--- +title: Transaction Setup and Teardown +--- + +# Transaction Setup and Teardown + +All transactions on the Aztec network have a private component, which is processed locally, and optionally have a public component, which is processed by sequencers using the [Public VM (AVM)](../public-vm/intro.md). + +Transactions are broken into distinct phases: + +1. Private setup +2. Private app logic +3. Public setup +4. Public app logic +5. Public teardown +6. Base rollup + +The private setup phase is used to specify what public function will be called for public teardown, and what entity will pay the transaction fee (i.e. the `fee_payer`). + +The "setup" phases are "non-revertible", meaning that if execution fails, the transaction is considered invalid and cannot be included in a block. + +If execution fails in the private app logic phase, the user will not be able to generate a valid proof of their private computation, so the transaction will not be included in a block. + +If the execution fails in the public app logic the _side effects_ from private app logic and public app logic will be reverted, but the transaction can still be included in a block. Execution then proceeds to the public teardown phase. + +If the execution fails in the public teardown phase, the _side effects_ from private app logic, public app logic, and public teardown will be reverted, but the transaction can still be included in a block. Execution then proceeds to the base rollup phase. + +In the event of a failure in public app logic or teardown, the user is charged their full [gas limit](./specifying-gas-fee-info.md#gaslimits-and-teardowngaslimits) for the transaction across all dimensions. + +The public teardown phase is the only phase where the final transaction fee is available to public functions. [See more](./specifying-gas-fee-info.md#gaslimits-and-teardowngaslimits). + +In the base rollup, the kernel circuit injects a public data write that levies the transaction fee on the `fee_payer`. + +# An example: Fee Abstraction + +Consider a user, Alice, who does not have FPA but wishes to interact with the network. Suppose she has a private balance of a fictitious asset "BananaCoin" that supports public and private balances. + +Suppose there is a Fee Payment Contract (FPC) that has been deployed by another user to the network. Alice can structure her transaction as follows: + +0. Before the transaction, Alice creates a private authwit in her wallet, allowing the FPC to unshield a specified amount of BananaCoin from Alice's private balance to the FPC's public balance. +1. Private setup: + - Alice calls a private function on the FPC which is exposed for public fee payment in BananaCoin. + - The FPC checks that the amount of teardown gas Alice has allocated is sufficient to cover the gas associated with the teardown function it will use to provide a refund to Alice. + - The FPC specifies its teardown function as the one the transaction will use. + - The FPC enqueues a public call to itself for the public setup phase. + - The FPC designates itself as the `fee_payer`. +2. Private app logic: + - Alice performs an arbitrary computation in private, potentially consuming DA gas. +3. Public setup: + - The FPC transfers the specified amount of BananaCoin from Alice to itself. +4. Public app logic: + - Alice performs an arbitrary computation in public, potentially consuming DA and L2 gas. +5. Public teardown: + - The FPC looks at `transaction_fee` to compute Alice's corresponding refund of BananaCoin. + - The FPC transfers the refund to Alice via a pending shield. +6. Base rollup: + - The Base rollup kernel circuit injects a public data write that levies the transaction fee on the `fee_payer`. + +This illustrates the utility of the various phases. In particular, we see why the setup phase must not be revertible: if Alice's public app logic fails, the FPC is still going to pay the fee in the base rollup; if public setup were revertible, the transfer of Alice's BananaCoin would revert so the FPC would be losing money. + +# Sequencer Whitelisting + +Because a transaction is invalid if it fails in the public setup phase, sequencers are taking a risk by processing them. To mitigate this risk, it is expected that sequencers will only process transactions that use public functions that they have whitelisted. + +# Defining Setup + +The private function that is executed first is referred to as the "entrypoint". + +Tracking which side effects belong to setup versus app logic is done by keeping track of [side effect counters](../circuits/private-kernel-initial.mdx#processing-a-private-function-call), and storing the value of the counter at which the setup phase ends within the private context. + +This value is stored in the `PrivateContext` as the `min_revertible_side_effect_counter`, and is set by calling `context.end_setup()`. + +This is converted into the `PrivateCircuitPublicInputs` as `min_revertible_side_effect_counter`. + +Execution of the entrypoint is always verified/processed by the `PrivateKernelInit` circuit. + +It is only the `PrivateKernelInit` circuit that looks at the `min_revertible_side_effect_counter` as reported by `PrivateCirclePublicInputs`, and thus it is only the entrypoint that can effectively call `context.end_setup()`. + +# Defining Teardown + +At any point during private execution, a contract may call `context.set_public_teardown_function` to specify a public function that will be called during the public teardown phase. This function takes the same arguments as `context.call_public_function`, but does not have a side effect counter associated with it. + +Similar to `call_public_function`, this results in the hash of a `PublicCallStackItem` being set on `PrivateCircuitPublicInputs` as `public_teardown_function_hash`. + +The private kernel circuits will verify that this hash is set at most once. + +# Interpreting the `min_revertible_side_effect_counter` + +Notes, nullifiers, and logs are examples of side effects that are partitioned into setup and app logic. + +[Enqueueing a public function](../calls/enqueued-calls.md) from private is also a side effect: if the counter associated with an enqueued public function is less than the `min_revertible_side_effect_counter`, the public function will be executed during the public setup phase, otherwise it will be executed during the public app logic phase. + +As mentioned above, setting the public teardown function is not a side effect. + +If a transaction has enqueued public functions, or has a public teardown function, then during the PrivateKernelTailToPublic the `min_revertible_side_effect_counter` is used to partition the side effects produced during private execution into revertible and non-revertible sets on the `PublicKernelCircuitPublicInputs`, i.e. `end` and `end_non_revertible`. + +The public teardown function is set on the `PublicKernelCircuitPublicInputs` as `public_teardown_function_hash`. + +If a transaction does not have any enqueued public functions, and does not have a public teardown function, then the `PrivateKernelTail` is used instead of the `PrivateKernelTailToPublic`, and no partitioning is done. diff --git a/docs/docs/protocol-specs/public-vm/gen/_instruction-set.mdx b/docs/docs/protocol-specs/public-vm/gen/_instruction-set.mdx index 0870b1400613..2b9540ac569a 100644 --- a/docs/docs/protocol-specs/public-vm/gen/_instruction-set.mdx +++ b/docs/docs/protocol-specs/public-vm/gen/_instruction-set.mdx @@ -1178,6 +1178,7 @@ context.machineState.pc = loc`} - **Details**: Target location is an immediate value (a constant in the bytecode). - **Bit-size**: 48 +[![](/img/protocol-specs/public-vm/bit-formats/INTERNALCALL.png)](/img/protocol-specs/public-vm/bit-formats/INTERNALCALL.png) ### `INTERNALRETURN` Return from an internal call. Pop from the internal call stack and jump to the popped location. diff --git a/docs/docs/protocol-specs/transactions/validity.md b/docs/docs/protocol-specs/transactions/validity.md index d109bded1692..5a179ee51840 100644 --- a/docs/docs/protocol-specs/transactions/validity.md +++ b/docs/docs/protocol-specs/transactions/validity.md @@ -20,5 +20,6 @@ Mike review: If we have written definitions for the various kinds of "`data`" de - The `unencryptedLogs` should match the `unencryptedLogsHash` and `unencryptedLogPreimagesLength` in the transaction `data`. - Each public call stack item in the transaction `data` should have a corresponding preimage in the `enqueuedPublicFunctionCalls`. - Each new contract data in transaction `data` should have a corresponding preimage in the `newContracts`. +- **Able to pay fee**: The [fee can be paid](../gas-and-fees/kernel-tracking.md#mempoolnode-validation). Note that all checks but the last one are enforced by the base rollup circuit when the transaction is included in a block. diff --git a/docs/sidebars.js b/docs/sidebars.js index 0884c74caeb0..defdf9972108 100644 --- a/docs/sidebars.js +++ b/docs/sidebars.js @@ -762,8 +762,12 @@ const sidebars = { type: "category", link: { type: "doc", id: "protocol-specs/gas-and-fees/index" }, items: [ - "protocol-specs/gas-and-fees/fee-payments-and-metering", + "protocol-specs/gas-and-fees/fee-payment-asset", + "protocol-specs/gas-and-fees/specifying-gas-fee-info", + "protocol-specs/gas-and-fees/tx-setup-and-teardown", + "protocol-specs/gas-and-fees/kernel-tracking", "protocol-specs/gas-and-fees/fee-schedule", + "protocol-specs/gas-and-fees/published-gas-and-fee-data", ], }, { diff --git a/l1-contracts/src/core/libraries/ConstantsGen.sol b/l1-contracts/src/core/libraries/ConstantsGen.sol index d75ec0cc5453..4ad6b9baab72 100644 --- a/l1-contracts/src/core/libraries/ConstantsGen.sol +++ b/l1-contracts/src/core/libraries/ConstantsGen.sol @@ -144,8 +144,9 @@ library Constants { + MAX_PUBLIC_CALL_STACK_LENGTH_PER_CALL + (SIDE_EFFECT_LENGTH * MAX_NEW_NOTE_HASHES_PER_CALL) + (SIDE_EFFECT_LINKED_TO_NOTE_HASH_LENGTH * MAX_NEW_NULLIFIERS_PER_CALL) + (L2_TO_L1_MESSAGE_LENGTH * MAX_NEW_L2_TO_L1_MSGS_PER_CALL) + 2 - + (SIDE_EFFECT_LENGTH * MAX_UNENCRYPTED_LOGS_PER_CALL) + 1 + HEADER_LENGTH + AZTEC_ADDRESS_LENGTH /* revert_code */ - + 1 + 2 * GAS_LENGTH /* transaction_fee */ + 1; + + (SIDE_EFFECT_LENGTH * MAX_UNENCRYPTED_LOGS_PER_CALL) + 1 + HEADER_LENGTH + + GLOBAL_VARIABLES_LENGTH + AZTEC_ADDRESS_LENGTH /* revert_code */ + 1 + 2 * GAS_LENGTH /* transaction_fee */ + + 1; uint256 internal constant PRIVATE_CALL_STACK_ITEM_LENGTH = AZTEC_ADDRESS_LENGTH + FUNCTION_DATA_LENGTH + PRIVATE_CIRCUIT_PUBLIC_INPUTS_LENGTH; uint256 internal constant ENQUEUE_PUBLIC_FUNCTION_CALL_RETURN_LENGTH = diff --git a/noir-projects/aztec-nr/.gitrepo b/noir-projects/aztec-nr/.gitrepo index a42b9dcbaede..6d9c9a29f805 100644 --- a/noir-projects/aztec-nr/.gitrepo +++ b/noir-projects/aztec-nr/.gitrepo @@ -6,7 +6,7 @@ [subrepo] remote = https://github.com/AztecProtocol/aztec-nr branch = master - commit = 0ac78cba5344f4416a18540271033f9949a2e13a + commit = 071b146a0fa3951fdd05b2a2732bac331bc79f73 method = merge cmdver = 0.4.6 - parent = d919e2c74a2e5d6332a1ec8ab261ede3d9d204d4 + parent = 2a14f3b48f79177094fc77fd3cc22bf779515ad0 diff --git a/noir-projects/aztec-nr/aztec/src/context/private_context.nr b/noir-projects/aztec-nr/aztec/src/context/private_context.nr index 43006332b78c..184849fff95e 100644 --- a/noir-projects/aztec-nr/aztec/src/context/private_context.nr +++ b/noir-projects/aztec-nr/aztec/src/context/private_context.nr @@ -9,7 +9,7 @@ use crate::{ }; use dep::protocol_types::{ abis::{ - gas::Gas, call_context::CallContext, function_data::FunctionData, + global_variables::GlobalVariables, gas::Gas, call_context::CallContext, function_data::FunctionData, function_selector::FunctionSelector, max_block_number::MaxBlockNumber, nullifier_key_validation_request::NullifierKeyValidationRequest, private_call_stack_item::PrivateCallStackItem, @@ -500,6 +500,7 @@ impl PrivateContext { unencrypted_logs_hashes: [SideEffect::empty(); MAX_UNENCRYPTED_LOGS_PER_CALL], unencrypted_log_preimages_length: 0, historical_header: Header::empty(), + global_variables: GlobalVariables::empty(), prover_address: AztecAddress::zero(), revert_code: 0, start_gas_left: Gas::empty(), diff --git a/noir-projects/aztec-nr/aztec/src/context/public_context.nr b/noir-projects/aztec-nr/aztec/src/context/public_context.nr index acda01b17b6d..80182425b17f 100644 --- a/noir-projects/aztec-nr/aztec/src/context/public_context.nr +++ b/noir-projects/aztec-nr/aztec/src/context/public_context.nr @@ -167,6 +167,7 @@ impl PublicContext { unencrypted_logs_hashes: self.unencrypted_logs_hashes.storage, unencrypted_log_preimages_length, historical_header: self.inputs.historical_header, + global_variables: self.inputs.public_global_variables, prover_address: self.prover_address, revert_code: 0, start_gas_left: self.inputs.gas_left, diff --git a/noir-projects/noir-protocol-circuits/crates/private-kernel-lib/src/private_kernel_init.nr b/noir-projects/noir-protocol-circuits/crates/private-kernel-lib/src/private_kernel_init.nr index b348adc4ad04..f83df8a71aee 100644 --- a/noir-projects/noir-protocol-circuits/crates/private-kernel-lib/src/private_kernel_init.nr +++ b/noir-projects/noir-protocol-circuits/crates/private-kernel-lib/src/private_kernel_init.nr @@ -19,10 +19,10 @@ struct PrivateKernelInitCircuitPrivateInputs { impl PrivateKernelInitCircuitPrivateInputs { fn initialize_end_values(self, public_inputs: &mut PrivateKernelCircuitPublicInputsBuilder) { - public_inputs.constants = CombinedConstantData { - historical_header: self.private_call.call_stack_item.public_inputs.historical_header, - tx_context: self.tx_request.tx_context, - }; + public_inputs.constants = CombinedConstantData::private( + self.private_call.call_stack_item.public_inputs.historical_header, + self.tx_request.tx_context, + ); public_inputs.min_revertible_side_effect_counter = self.private_call.call_stack_item.public_inputs.min_revertible_side_effect_counter; } diff --git a/noir-projects/noir-protocol-circuits/crates/public-kernel-lib/src/common.nr b/noir-projects/noir-protocol-circuits/crates/public-kernel-lib/src/common.nr index 855da0a6ec4f..9004bbec8c8e 100644 --- a/noir-projects/noir-protocol-circuits/crates/public-kernel-lib/src/common.nr +++ b/noir-projects/noir-protocol-circuits/crates/public-kernel-lib/src/common.nr @@ -4,7 +4,8 @@ use dep::types::{ kernel_circuit_public_inputs::PublicKernelCircuitPublicInputsBuilder, kernel_data::PublicKernelData, public_call_data::PublicCallData, public_data_read::PublicDataRead, public_data_update_request::PublicDataUpdateRequest, read_request::ReadRequestContext, - side_effect::{SideEffect, SideEffectLinkedToNoteHash} + side_effect::{SideEffect, SideEffectLinkedToNoteHash}, global_variables::GlobalVariables, + combined_constant_data::CombinedConstantData }, address::AztecAddress, contrakt::{storage_read::StorageRead, storage_update_request::StorageUpdateRequest}, @@ -33,6 +34,34 @@ pub fn validate_inputs(public_call: PublicCallData) { assert(public_call.bytecode_hash != 0, "Bytecode hash cannot be zero"); } +pub fn validate_public_call_global_variables(public_call: PublicCallData, constants: CombinedConstantData) { + let public_call_globals = public_call.call_stack_item.public_inputs.global_variables; + assert( + public_call_globals == constants.global_variables, "Global variables injected into the public call do not match constants" + ); +} + +// Validates constants injected into the public call are correct. +// Note that the previous_kernel.public_inputs.constants returned from the private kernel tail +// will be empty, so in the first run on of this circuit we load them from the first public +// call, following the same pattern as in the private_kernel_init. +// TODO(@spalladino): This can be a security risk since it allows a sequencer to run public +// circuits with empty global variables. This must be patched by having a differentiated init public +// circuit that runs only once, or by having a way to differentiate when we're coming from a private +// kernel tail vs from another public run. +pub fn initialize_from_or_validate_public_call_variables( + previous_kernel: PublicKernelData, + public_call: PublicCallData, + public_inputs: &mut PublicKernelCircuitPublicInputsBuilder +) { + if public_inputs.constants.global_variables.is_empty() { + let public_call_global_variables = public_call.call_stack_item.public_inputs.global_variables; + public_inputs.constants.global_variables = public_call_global_variables; + } else { + validate_public_call_global_variables(public_call, previous_kernel.public_inputs.constants); + } +} + pub fn validate_public_call_non_revert(public_call: PublicCallData) { assert(public_call.call_stack_item.public_inputs.revert_code == 0, "Public call cannot be reverted"); } diff --git a/noir-projects/noir-protocol-circuits/crates/public-kernel-lib/src/public_kernel_app_logic.nr b/noir-projects/noir-protocol-circuits/crates/public-kernel-lib/src/public_kernel_app_logic.nr index e372e50f43d3..07beccedc3c8 100644 --- a/noir-projects/noir-protocol-circuits/crates/public-kernel-lib/src/public_kernel_app_logic.nr +++ b/noir-projects/noir-protocol-circuits/crates/public-kernel-lib/src/public_kernel_app_logic.nr @@ -30,6 +30,9 @@ impl PublicKernelAppLogicCircuitPrivateInputs { // validate the inputs common to all invocation circumstances common::validate_inputs(self.public_call); + // validate constants injected into the public call are correct or set them if this is the first public call + common::initialize_from_or_validate_public_call_variables(self.previous_kernel, self.public_call, &mut public_inputs); + // validate the inputs unique to having a previous public kernel self.validate_inputs(); @@ -485,4 +488,25 @@ mod tests { builder.failed(); } + + #[test] + fn propagates_global_variables_if_empty() { + let mut builder = PublicKernelAppLogicCircuitPrivateInputsBuilder::new(); + + builder.public_call.public_inputs.global_variables.block_number = 11; + + let public_inputs = builder.execute(); + + assert_eq(public_inputs.constants.global_variables.block_number, 11); + } + + #[test(should_fail_with="Global variables injected into the public call do not match constants")] + fn validates_global_variables() { + let mut builder = PublicKernelAppLogicCircuitPrivateInputsBuilder::new(); + + builder.previous_kernel.global_variables.block_number = 10; + builder.public_call.public_inputs.global_variables.block_number = 11; + + builder.failed(); + } } diff --git a/noir-projects/noir-protocol-circuits/crates/public-kernel-lib/src/public_kernel_setup.nr b/noir-projects/noir-protocol-circuits/crates/public-kernel-lib/src/public_kernel_setup.nr index e918d78ad64d..d81f22114b70 100644 --- a/noir-projects/noir-protocol-circuits/crates/public-kernel-lib/src/public_kernel_setup.nr +++ b/noir-projects/noir-protocol-circuits/crates/public-kernel-lib/src/public_kernel_setup.nr @@ -8,7 +8,8 @@ struct PublicKernelSetupCircuitPrivateInputs { // Note: One might think that our previous_kernel ought to be // a PrivateKernelTailData. However, we instead supply a PublicKernelData. // This is because PrivateKernelTailData is a subset of PublicKernelData. - // And we just initialize the missing values to zero in TS before passing it to the circuit. + // And we just initialize the missing values to zero in TS before passing it to the circuit, + // except for the constants.global_variables which we populate with the current block values. // This is a bit of a hack, but it allows us to reuse the setup circuit until // the setup phase of the public kernel is complete. Maybe in a perfect world we would // have a SetupInit, SetupInner, etc, but this will change anyway once the public VM is able to @@ -36,6 +37,9 @@ impl PublicKernelSetupCircuitPrivateInputs { // validate the inputs common to all invocation circumstances common::validate_inputs(self.public_call); + // validate constants injected into the public call are correct or set them if this is the first public call + common::initialize_from_or_validate_public_call_variables(self.previous_kernel, self.public_call, &mut public_inputs); + // validate the inputs unique to having a previous private kernel self.validate_inputs(); @@ -540,4 +544,25 @@ mod tests { builder.failed(); } + + #[test] + fn propagates_global_variables_if_empty() { + let mut builder = PublicKernelSetupCircuitPrivateInputsBuilder::new(); + + builder.public_call.public_inputs.global_variables.block_number = 11; + + let public_inputs = builder.execute(); + + assert_eq(public_inputs.constants.global_variables.block_number, 11); + } + + #[test(should_fail_with="Global variables injected into the public call do not match constants")] + fn validates_global_variables() { + let mut builder = PublicKernelSetupCircuitPrivateInputsBuilder::new(); + + builder.previous_kernel.global_variables.block_number = 10; + builder.public_call.public_inputs.global_variables.block_number = 11; + + builder.failed(); + } } diff --git a/noir-projects/noir-protocol-circuits/crates/public-kernel-lib/src/public_kernel_teardown.nr b/noir-projects/noir-protocol-circuits/crates/public-kernel-lib/src/public_kernel_teardown.nr index 9b19bb71dcc5..6b55d131ac22 100644 --- a/noir-projects/noir-protocol-circuits/crates/public-kernel-lib/src/public_kernel_teardown.nr +++ b/noir-projects/noir-protocol-circuits/crates/public-kernel-lib/src/public_kernel_teardown.nr @@ -32,12 +32,11 @@ impl PublicKernelTeardownCircuitPrivateInputs { } // Validates the transaction fee injected into the app circuit is properly computed from gas_used and block gas_fees - fn validate_transaction_fee(self) { + fn validate_transaction_fee(self, public_inputs: PublicKernelCircuitPublicInputsBuilder) { let transaction_fee = self.public_call.call_stack_item.public_inputs.transaction_fee; // Note that teardown_gas is already included in end.gas_used as it was injected by the private kernel let total_gas_used = self.previous_kernel.public_inputs.end.gas_used.add(self.previous_kernel.public_inputs.end_non_revertible.gas_used); - // TODO(palla/gas): Load gas fees from a PublicConstantData struct that's currently missing - let block_gas_fees = GasFees::default(); + let block_gas_fees = public_inputs.constants.global_variables.gas_fees; let inclusion_fee = self.previous_kernel.public_inputs.constants.tx_context.gas_settings.inclusion_fee; let computed_transaction_fee = total_gas_used.compute_fee(block_gas_fees) + inclusion_fee; @@ -74,6 +73,9 @@ impl PublicKernelTeardownCircuitPrivateInputs { // validate the inputs common to all invocation circumstances common::validate_inputs(self.public_call); + // validate constants injected into the public call are correct or set them if this is the first public call + common::initialize_from_or_validate_public_call_variables(self.previous_kernel, self.public_call, &mut public_inputs); + // validate the inputs unique to having a previous private kernel self.validate_inputs(); @@ -82,7 +84,7 @@ impl PublicKernelTeardownCircuitPrivateInputs { common::validate_call_against_request(self.public_call, call_request); self.validate_start_gas(); - self.validate_transaction_fee(); + self.validate_transaction_fee(public_inputs); common::update_validation_requests(self.public_call, &mut public_inputs); @@ -425,4 +427,25 @@ mod tests { builder.failed(); } + + #[test] + fn propagates_global_variables_if_empty() { + let mut builder = PublicKernelTeardownCircuitPrivateInputsBuilder::new(); + + builder.public_call.public_inputs.global_variables.block_number = 11; + + let public_inputs = builder.execute(); + + assert_eq(public_inputs.constants.global_variables.block_number, 11); + } + + #[test(should_fail_with="Global variables injected into the public call do not match constants")] + fn validates_global_variables() { + let mut builder = PublicKernelTeardownCircuitPrivateInputsBuilder::new(); + + builder.previous_kernel.global_variables.block_number = 10; + builder.public_call.public_inputs.global_variables.block_number = 11; + + builder.failed(); + } } diff --git a/noir-projects/noir-protocol-circuits/crates/rollup-lib/src/base/base_rollup_inputs.nr b/noir-projects/noir-protocol-circuits/crates/rollup-lib/src/base/base_rollup_inputs.nr index db3a8cddfb0b..b6e59abff550 100644 --- a/noir-projects/noir-protocol-circuits/crates/rollup-lib/src/base/base_rollup_inputs.nr +++ b/noir-projects/noir-protocol-circuits/crates/rollup-lib/src/base/base_rollup_inputs.nr @@ -66,6 +66,14 @@ impl BaseRollupInputs { == self.constants.global_variables.version, "kernel version does not match the rollup version" ); + // Verify the kernel global variables if set, note these can be empty if this is a request coming directly from the private kernel tail. + // TODO(@spalladino) How can we check that this is a request coming from the private kernel tail? + assert( + self.kernel_data.public_inputs.constants.global_variables.is_empty() + | (self.kernel_data.public_inputs.constants.global_variables + == self.constants.global_variables), "kernel global variables do not match the rollup global variables" + ); + self.validate_kernel_start_state(); let rollup_validation_requests = self.kernel_data.public_inputs.rollup_validation_requests; @@ -983,6 +991,14 @@ mod tests { builder.fails(); } + #[test(should_fail_with = "kernel global variables do not match the rollup global variables")] + unconstrained fn constants_global_variables_dont_match_kernels() { + let mut builder = BaseRollupInputsBuilder::new(); + builder.kernel_data.global_variables.block_number = 6; + builder.constants.global_variables.block_number = 7; + builder.fails(); + } + #[test(should_fail_with = "kernel max_block_number is smaller than block number")] unconstrained fn constants_dont_satisfy_smaller_max_block_number() { let mut builder = BaseRollupInputsBuilder::new(); diff --git a/noir-projects/noir-protocol-circuits/crates/types/src/abis/combined_constant_data.nr b/noir-projects/noir-protocol-circuits/crates/types/src/abis/combined_constant_data.nr index 09655af7058e..0d823df58d2a 100644 --- a/noir-projects/noir-protocol-circuits/crates/types/src/abis/combined_constant_data.nr +++ b/noir-projects/noir-protocol-circuits/crates/types/src/abis/combined_constant_data.nr @@ -1,6 +1,7 @@ use crate::transaction::tx_context::TxContext; use crate::header::Header; use crate::traits::Empty; +use crate::abis::global_variables::GlobalVariables; struct CombinedConstantData { historical_header: Header, @@ -9,6 +10,14 @@ struct CombinedConstantData { // a situation we could be using header from a block before the upgrade took place but be using the updated // protocol to execute and prove the transaction. tx_context: TxContext, + + global_variables: GlobalVariables, +} + +impl CombinedConstantData { + pub fn private(historical_header: Header, tx_context: TxContext) -> CombinedConstantData { + CombinedConstantData { historical_header, tx_context, global_variables: GlobalVariables::empty() } + } } impl Empty for CombinedConstantData { @@ -16,6 +25,7 @@ impl Empty for CombinedConstantData { CombinedConstantData { historical_header: Header::empty(), tx_context: TxContext::empty(), + global_variables: GlobalVariables::empty() } } } diff --git a/noir-projects/noir-protocol-circuits/crates/types/src/abis/gas_fees.nr b/noir-projects/noir-protocol-circuits/crates/types/src/abis/gas_fees.nr index 09d75aae0c8a..45e9ed3ab661 100644 --- a/noir-projects/noir-protocol-circuits/crates/types/src/abis/gas_fees.nr +++ b/noir-projects/noir-protocol-circuits/crates/types/src/abis/gas_fees.nr @@ -18,6 +18,10 @@ impl GasFees { pub fn default() -> Self { GasFees::new(1, 1, 1) } + + pub fn is_empty(self) -> bool { + (self.fee_per_da_gas == 0) & (self.fee_per_l1_gas == 0) & (self.fee_per_l2_gas == 0) + } } impl Serialize for GasFees { diff --git a/noir-projects/noir-protocol-circuits/crates/types/src/abis/global_variables.nr b/noir-projects/noir-protocol-circuits/crates/types/src/abis/global_variables.nr index 1a7a5a68315b..f08db75bb735 100644 --- a/noir-projects/noir-protocol-circuits/crates/types/src/abis/global_variables.nr +++ b/noir-projects/noir-protocol-circuits/crates/types/src/abis/global_variables.nr @@ -17,6 +17,18 @@ struct GlobalVariables { } // docs:end:global-variables +impl GlobalVariables { + fn is_empty(self) -> bool { + (self.chain_id == 0) + & (self.version == 0) + & (self.block_number == 0) + & (self.timestamp == 0) + & (self.coinbase.is_zero()) + & (self.fee_recipient.is_zero()) + & (self.gas_fees.is_empty()) + } +} + impl Serialize for GlobalVariables { fn serialize(self) -> [Field; GLOBAL_VARIABLES_LENGTH] { let mut serialized: BoundedVec = BoundedVec::new(); diff --git a/noir-projects/noir-protocol-circuits/crates/types/src/abis/public_call_stack_item.nr b/noir-projects/noir-protocol-circuits/crates/types/src/abis/public_call_stack_item.nr index d176bfa53e1b..1939b3034f0c 100644 --- a/noir-projects/noir-protocol-circuits/crates/types/src/abis/public_call_stack_item.nr +++ b/noir-projects/noir-protocol-circuits/crates/types/src/abis/public_call_stack_item.nr @@ -69,7 +69,7 @@ mod tests { let call_stack_item = PublicCallStackItem { contract_address, public_inputs, is_execution_request: true, function_data }; // Value from public_call_stack_item.test.ts "Computes a callstack item request hash" test - let test_data_call_stack_item_request_hash = 0x1b06f4a4960455e9f01c20d4cb01afbf8c8f39eb50094c5d1ad6725ced0f7d08; + let test_data_call_stack_item_request_hash = 0x22848497ff97ff3a4517aec32454059030fb5a3ef4f3ca533ee40132d7a63aea; assert_eq(call_stack_item.hash(), test_data_call_stack_item_request_hash); } @@ -87,7 +87,7 @@ mod tests { let call_stack_item = PublicCallStackItem { contract_address, public_inputs, is_execution_request: false, function_data }; // Value from public_call_stack_item.test.ts "Computes a callstack item hash" test - let test_data_call_stack_item_hash = 0x1f3f1902ca41ffd6fd7191fa5a52edd677444a9b6ae8f4448336fa71a4b2d5cc; + let test_data_call_stack_item_hash = 0x0e18ddd9aaddae02d45598f0278d925e289913384d6e15057ce5b4a9e8e7488d; assert_eq(call_stack_item.hash(), test_data_call_stack_item_hash); } } diff --git a/noir-projects/noir-protocol-circuits/crates/types/src/abis/public_circuit_public_inputs.nr b/noir-projects/noir-protocol-circuits/crates/types/src/abis/public_circuit_public_inputs.nr index 154b3e05a772..2dba18750fd8 100644 --- a/noir-projects/noir-protocol-circuits/crates/types/src/abis/public_circuit_public_inputs.nr +++ b/noir-projects/noir-protocol-circuits/crates/types/src/abis/public_circuit_public_inputs.nr @@ -1,7 +1,7 @@ use crate::{ abis::{ call_context::CallContext, read_request::ReadRequest, - side_effect::{SideEffect, SideEffectLinkedToNoteHash}, gas::Gas + side_effect::{SideEffect, SideEffectLinkedToNoteHash}, gas::Gas, global_variables::GlobalVariables }, address::AztecAddress, constants::{ @@ -46,6 +46,9 @@ struct PublicCircuitPublicInputs { // previous to the one in which the tx is included. historical_header: Header, + // Global variables injected into this circuit + global_variables: GlobalVariables, + prover_address: AztecAddress, revert_code: u8, @@ -99,6 +102,7 @@ impl Serialize for PublicCircuitPublicInput } fields.push(self.unencrypted_log_preimages_length); fields.extend_from_array(self.historical_header.serialize()); + fields.extend_from_array(self.global_variables.serialize()); fields.push(self.prover_address.to_field()); fields.push(self.revert_code as Field); fields.extend_from_array(self.start_gas_left.serialize()); @@ -129,6 +133,7 @@ impl Deserialize for PublicCircuitPublicInp unencrypted_logs_hashes: reader.read_struct_array(SideEffect::deserialize, [SideEffect::empty(); MAX_UNENCRYPTED_LOGS_PER_CALL]), unencrypted_log_preimages_length: reader.read(), historical_header: reader.read_struct(Header::deserialize), + global_variables: reader.read_struct(GlobalVariables::deserialize), prover_address: reader.read_struct(AztecAddress::deserialize), revert_code: reader.read() as u8, start_gas_left: reader.read_struct(Gas::deserialize), @@ -166,6 +171,7 @@ impl Empty for PublicCircuitPublicInputs { unencrypted_logs_hashes: [SideEffect::empty(); MAX_UNENCRYPTED_LOGS_PER_CALL], unencrypted_log_preimages_length: 0, historical_header: Header::empty(), + global_variables: GlobalVariables::empty(), prover_address: AztecAddress::zero(), revert_code: 0 as u8, start_gas_left: Gas::empty(), @@ -189,6 +195,6 @@ fn empty_hash() { let hash = inputs.hash(); // Value from public_circuit_public_inputs.test.ts "computes empty item hash" test - let test_data_empty_hash = 0x237c89f8b29c3fb169b889940a714b3c72017cb2941d0724d4668a030794d2fb; + let test_data_empty_hash = 0x2d91debc43bd6354caef4fd152975e7c6dd44e8623b6b62c21b9f547f2fabd32; assert_eq(hash, test_data_empty_hash); } diff --git a/noir-projects/noir-protocol-circuits/crates/types/src/constants.nr b/noir-projects/noir-protocol-circuits/crates/types/src/constants.nr index 1cc4dfdca859..aef7fa3e8a0b 100644 --- a/noir-projects/noir-protocol-circuits/crates/types/src/constants.nr +++ b/noir-projects/noir-protocol-circuits/crates/types/src/constants.nr @@ -165,7 +165,7 @@ global TX_CONTEXT_LENGTH: u64 = 2 + GAS_SETTINGS_LENGTH; global TX_REQUEST_LENGTH: u64 = 2 + TX_CONTEXT_LENGTH + FUNCTION_DATA_LENGTH; global HEADER_LENGTH: u64 = APPEND_ONLY_TREE_SNAPSHOT_LENGTH + CONTENT_COMMITMENT_LENGTH + STATE_REFERENCE_LENGTH + GLOBAL_VARIABLES_LENGTH; global PRIVATE_CIRCUIT_PUBLIC_INPUTS_LENGTH: u64 = CALL_CONTEXT_LENGTH + 3 + MAX_BLOCK_NUMBER_LENGTH + (SIDE_EFFECT_LENGTH * MAX_NOTE_HASH_READ_REQUESTS_PER_CALL) + (READ_REQUEST_LENGTH * MAX_NULLIFIER_READ_REQUESTS_PER_CALL) + (NULLIFIER_KEY_VALIDATION_REQUEST_LENGTH * MAX_NULLIFIER_KEY_VALIDATION_REQUESTS_PER_CALL) + (SIDE_EFFECT_LENGTH * MAX_NEW_NOTE_HASHES_PER_CALL) + (SIDE_EFFECT_LINKED_TO_NOTE_HASH_LENGTH * MAX_NEW_NULLIFIERS_PER_CALL) + MAX_PRIVATE_CALL_STACK_LENGTH_PER_CALL + MAX_PUBLIC_CALL_STACK_LENGTH_PER_CALL + (L2_TO_L1_MESSAGE_LENGTH * MAX_NEW_L2_TO_L1_MSGS_PER_CALL) + 2 + (SIDE_EFFECT_LENGTH * MAX_ENCRYPTED_LOGS_PER_CALL) + (SIDE_EFFECT_LENGTH * MAX_UNENCRYPTED_LOGS_PER_CALL) + 2 + HEADER_LENGTH + TX_CONTEXT_LENGTH; -global PUBLIC_CIRCUIT_PUBLIC_INPUTS_LENGTH: u64 = CALL_CONTEXT_LENGTH + 2 + (READ_REQUEST_LENGTH * MAX_NULLIFIER_READ_REQUESTS_PER_CALL) + (READ_REQUEST_LENGTH * MAX_NULLIFIER_NON_EXISTENT_READ_REQUESTS_PER_CALL) + (CONTRACT_STORAGE_UPDATE_REQUEST_LENGTH * MAX_PUBLIC_DATA_UPDATE_REQUESTS_PER_CALL) + (CONTRACT_STORAGE_READ_LENGTH * MAX_PUBLIC_DATA_READS_PER_CALL) + MAX_PUBLIC_CALL_STACK_LENGTH_PER_CALL + (SIDE_EFFECT_LENGTH * MAX_NEW_NOTE_HASHES_PER_CALL) + (SIDE_EFFECT_LINKED_TO_NOTE_HASH_LENGTH * MAX_NEW_NULLIFIERS_PER_CALL) + (L2_TO_L1_MESSAGE_LENGTH * MAX_NEW_L2_TO_L1_MSGS_PER_CALL) + 2 + (SIDE_EFFECT_LENGTH * MAX_UNENCRYPTED_LOGS_PER_CALL) + 1 + HEADER_LENGTH + AZTEC_ADDRESS_LENGTH + /* revert_code */ 1 + 2 * GAS_LENGTH + /* transaction_fee */ 1; +global PUBLIC_CIRCUIT_PUBLIC_INPUTS_LENGTH: u64 = CALL_CONTEXT_LENGTH + 2 + (READ_REQUEST_LENGTH * MAX_NULLIFIER_READ_REQUESTS_PER_CALL) + (READ_REQUEST_LENGTH * MAX_NULLIFIER_NON_EXISTENT_READ_REQUESTS_PER_CALL) + (CONTRACT_STORAGE_UPDATE_REQUEST_LENGTH * MAX_PUBLIC_DATA_UPDATE_REQUESTS_PER_CALL) + (CONTRACT_STORAGE_READ_LENGTH * MAX_PUBLIC_DATA_READS_PER_CALL) + MAX_PUBLIC_CALL_STACK_LENGTH_PER_CALL + (SIDE_EFFECT_LENGTH * MAX_NEW_NOTE_HASHES_PER_CALL) + (SIDE_EFFECT_LINKED_TO_NOTE_HASH_LENGTH * MAX_NEW_NULLIFIERS_PER_CALL) + (L2_TO_L1_MESSAGE_LENGTH * MAX_NEW_L2_TO_L1_MSGS_PER_CALL) + 2 + (SIDE_EFFECT_LENGTH * MAX_UNENCRYPTED_LOGS_PER_CALL) + 1 + HEADER_LENGTH + GLOBAL_VARIABLES_LENGTH + AZTEC_ADDRESS_LENGTH + /* revert_code */ 1 + 2 * GAS_LENGTH + /* transaction_fee */ 1; global PRIVATE_CALL_STACK_ITEM_LENGTH: u64 = AZTEC_ADDRESS_LENGTH + FUNCTION_DATA_LENGTH + PRIVATE_CIRCUIT_PUBLIC_INPUTS_LENGTH; global ENQUEUE_PUBLIC_FUNCTION_CALL_RETURN_LENGTH: u64 = 2 + FUNCTION_DATA_LENGTH + CALL_CONTEXT_LENGTH; diff --git a/noir-projects/noir-protocol-circuits/crates/types/src/tests/fixture_builder.nr b/noir-projects/noir-protocol-circuits/crates/types/src/tests/fixture_builder.nr index a70de06ebfad..4d02ae8cea09 100644 --- a/noir-projects/noir-protocol-circuits/crates/types/src/tests/fixture_builder.nr +++ b/noir-projects/noir-protocol-circuits/crates/types/src/tests/fixture_builder.nr @@ -6,7 +6,7 @@ use crate::{ CombinedAccumulatedData, PrivateAccumulatedData, PrivateAccumulatedDataBuilder, PublicAccumulatedData, PublicAccumulatedDataBuilder }, - combined_constant_data::CombinedConstantData, + global_variables::GlobalVariables, combined_constant_data::CombinedConstantData, kernel_circuit_public_inputs::{KernelCircuitPublicInputs, PrivateKernelCircuitPublicInputs, PublicKernelCircuitPublicInputs}, kernel_data::{PrivateKernelData, PublicKernelData, KernelData}, max_block_number::MaxBlockNumber, nullifier_key_validation_request::NullifierKeyValidationRequestContext, @@ -35,6 +35,7 @@ struct FixtureBuilder { // Constant data. historical_header: Header, tx_context: TxContext, + global_variables: GlobalVariables, // Accumulated data. new_note_hashes: BoundedVec, @@ -111,12 +112,17 @@ impl FixtureBuilder { counter: 0, start_state: PartialStateReference::empty(), gas_used: Gas::empty(), - non_revertible_gas_used: Gas::empty() + non_revertible_gas_used: Gas::empty(), + global_variables: GlobalVariables::empty() } } pub fn to_constant_data(self) -> CombinedConstantData { - CombinedConstantData { historical_header: self.historical_header, tx_context: self.tx_context } + CombinedConstantData { + historical_header: self.historical_header, + tx_context: self.tx_context, + global_variables: self.global_variables + } } pub fn to_private_accumulated_data(self) -> PrivateAccumulatedData { @@ -427,6 +433,7 @@ impl Empty for FixtureBuilder { storage_contract_address: AztecAddress::zero(), historical_header: Header::empty(), tx_context: TxContext::empty(), + global_variables: GlobalVariables::empty(), new_note_hashes: BoundedVec::new(), new_nullifiers: BoundedVec::new(), new_l2_to_l1_msgs: BoundedVec::new(), diff --git a/noir-projects/noir-protocol-circuits/crates/types/src/tests/public_circuit_public_inputs_builder.nr b/noir-projects/noir-protocol-circuits/crates/types/src/tests/public_circuit_public_inputs_builder.nr index f6d712bb814a..db56f3b08521 100644 --- a/noir-projects/noir-protocol-circuits/crates/types/src/tests/public_circuit_public_inputs_builder.nr +++ b/noir-projects/noir-protocol-circuits/crates/types/src/tests/public_circuit_public_inputs_builder.nr @@ -1,7 +1,8 @@ use crate::{ abis::{ gas::Gas, call_context::CallContext, public_circuit_public_inputs::PublicCircuitPublicInputs, - read_request::ReadRequest, side_effect::{SideEffect, SideEffectLinkedToNoteHash} + read_request::ReadRequest, side_effect::{SideEffect, SideEffectLinkedToNoteHash}, + global_variables::GlobalVariables }, address::AztecAddress, contrakt::{storage_read::StorageRead, storage_update_request::StorageUpdateRequest}, header::Header, @@ -34,6 +35,7 @@ struct PublicCircuitPublicInputsBuilder { unencrypted_logs_hashes: BoundedVec, unencrypted_log_preimages_length: Field, historical_header: Header, + global_variables: GlobalVariables, prover_address: AztecAddress, revert_code: u8, start_gas_left: Gas, @@ -67,6 +69,7 @@ impl PublicCircuitPublicInputsBuilder { unencrypted_logs_hashes: self.unencrypted_logs_hashes.storage, unencrypted_log_preimages_length: self.unencrypted_log_preimages_length, historical_header: self.historical_header, + global_variables: self.global_variables, prover_address: self.prover_address, revert_code: self.revert_code, start_gas_left: self.start_gas_left, @@ -95,6 +98,7 @@ impl Empty for PublicCircuitPublicInputsBuilder { unencrypted_logs_hashes: BoundedVec::new(), unencrypted_log_preimages_length: 0, historical_header: Header::empty(), + global_variables: GlobalVariables::empty(), prover_address: AztecAddress::zero(), revert_code: 0 as u8, start_gas_left: Gas::empty(), diff --git a/noir/Earthfile b/noir/Earthfile index 9e1f7610b82b..63f377746072 100644 --- a/noir/Earthfile +++ b/noir/Earthfile @@ -12,6 +12,7 @@ nargo: noir-repo/noir_stdlib \ noir-repo/tooling \ noir-repo/test_programs \ + noir-repo/utils \ noir-repo/Cargo.lock \ noir-repo/Cargo.toml \ noir-repo @@ -49,6 +50,7 @@ packages: noir-repo/scripts \ noir-repo/test_programs \ noir-repo/tooling \ + noir-repo/utils \ noir-repo/Cargo.lock \ noir-repo/.yarnrc.yml \ noir-repo/.yarn \ diff --git a/noir/noir-repo/Cargo.lock b/noir/noir-repo/Cargo.lock index 377e3339d7ef..2e31dee9a60b 100644 --- a/noir/noir-repo/Cargo.lock +++ b/noir/noir-repo/Cargo.lock @@ -233,10 +233,6 @@ version = "1.0.75" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6" -[[package]] -name = "arena" -version = "0.28.0" - [[package]] name = "ark-bls12-381" version = "0.4.0" @@ -3100,6 +3096,10 @@ dependencies = [ "wasm-bindgen-test", ] +[[package]] +name = "noirc_arena" +version = "0.28.0" + [[package]] name = "noirc_driver" version = "0.28.0" @@ -3161,7 +3161,6 @@ name = "noirc_frontend" version = "0.28.0" dependencies = [ "acvm", - "arena", "base64 0.21.2", "chumsky", "fm", @@ -3169,6 +3168,7 @@ dependencies = [ "iter-extended", "lalrpop", "lalrpop-util", + "noirc_arena", "noirc_errors", "noirc_printable_type", "petgraph", diff --git a/noir/noir-repo/Cargo.toml b/noir/noir-repo/Cargo.toml index 8911f6bfccb5..6fe7f099e82c 100644 --- a/noir/noir-repo/Cargo.toml +++ b/noir/noir-repo/Cargo.toml @@ -4,6 +4,7 @@ members = [ # Aztec Macro crate for metaprogramming "aztec_macros", # Compiler crates + "compiler/noirc_arena", "compiler/noirc_evaluator", "compiler/noirc_frontend", "compiler/noirc_errors", @@ -11,10 +12,7 @@ members = [ "compiler/noirc_printable_type", "compiler/fm", "compiler/wasm", - # Utility crates used by the Noir compiler - "compiler/utils/arena", - "compiler/utils/iter-extended", - # Crates related to tooling built ontop of the Noir compiler + # Crates related to tooling built on top of the Noir compiler "tooling/backend_interface", "tooling/bb_abstraction_leaks", "tooling/lsp", @@ -35,6 +33,8 @@ members = [ "acvm-repo/brillig_vm", "acvm-repo/blackbox_solver", "acvm-repo/bn254_blackbox_solver", + # Utility crates + "utils/iter-extended", ] default-members = ["tooling/nargo_cli", "tooling/acvm_cli"] resolver = "2" @@ -61,9 +61,8 @@ acvm_blackbox_solver = { version = "0.44.0", path = "acvm-repo/blackbox_solver", bn254_blackbox_solver = { version = "0.44.0", path = "acvm-repo/bn254_blackbox_solver", default-features = false } # Noir compiler workspace dependencies -arena = { path = "compiler/utils/arena" } fm = { path = "compiler/fm" } -iter-extended = { path = "compiler/utils/iter-extended" } +noirc_arena = { path = "compiler/noirc_arena" } noirc_driver = { path = "compiler/noirc_driver" } noirc_errors = { path = "compiler/noirc_errors" } noirc_evaluator = { path = "compiler/noirc_evaluator" } @@ -80,6 +79,9 @@ noirc_abi = { path = "tooling/noirc_abi" } bb_abstraction_leaks = { path = "tooling/bb_abstraction_leaks" } acvm_cli = { path = "tooling/acvm_cli" } +# Misc utils crates +iter-extended = { path = "utils/iter-extended" } + # LSP async-lsp = { version = "0.1.0", default-features = false } lsp-types = "0.94.1" diff --git a/noir/noir-repo/acvm-repo/acvm_js/build.sh b/noir/noir-repo/acvm-repo/acvm_js/build.sh index ee93413ab85a..c07d2d8a4c1d 100755 --- a/noir/noir-repo/acvm-repo/acvm_js/build.sh +++ b/noir/noir-repo/acvm-repo/acvm_js/build.sh @@ -25,7 +25,7 @@ function run_if_available { require_command jq require_command cargo require_command wasm-bindgen -# require_command wasm-opt +#require_command wasm-opt self_path=$(dirname "$(readlink -f "$0")") pname=$(cargo read-manifest | jq -r '.name') diff --git a/noir/noir-repo/compiler/utils/arena/Cargo.toml b/noir/noir-repo/compiler/noirc_arena/Cargo.toml similarity index 86% rename from noir/noir-repo/compiler/utils/arena/Cargo.toml rename to noir/noir-repo/compiler/noirc_arena/Cargo.toml index f6bd764ee62b..b94f997b7b9d 100644 --- a/noir/noir-repo/compiler/utils/arena/Cargo.toml +++ b/noir/noir-repo/compiler/noirc_arena/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "arena" +name = "noirc_arena" version.workspace = true authors.workspace = true edition.workspace = true diff --git a/noir/noir-repo/compiler/utils/arena/src/lib.rs b/noir/noir-repo/compiler/noirc_arena/src/lib.rs similarity index 100% rename from noir/noir-repo/compiler/utils/arena/src/lib.rs rename to noir/noir-repo/compiler/noirc_arena/src/lib.rs diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa.rs index 92461e1a1b12..7a7ee24a6701 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa.rs @@ -9,10 +9,7 @@ use std::collections::{BTreeMap, BTreeSet}; -use crate::{ - brillig::Brillig, - errors::{RuntimeError, SsaReport}, -}; +use crate::errors::{RuntimeError, SsaReport}; use acvm::acir::{ circuit::{ brillig::BrilligBytecode, Circuit, ExpressionWidth, Program as AcirProgram, PublicInputs, @@ -334,10 +331,6 @@ impl SsaBuilder { Ok(self.print(msg)) } - fn to_brillig(&self, print_brillig_trace: bool) -> Brillig { - self.ssa.to_brillig(print_brillig_trace) - } - fn print(self, msg: &str) -> Self { if self.print_ssa_passes { println!("{msg}\n{}", self.ssa); diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/generated_acir.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/generated_acir.rs index 1d3bc3fce575..c084ba37fee6 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/generated_acir.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/generated_acir.rs @@ -419,55 +419,6 @@ impl GeneratedAcir { Ok(limb_witnesses) } - /// Returns an expression which represents `lhs * rhs` - /// - /// If one has multiplicative term and the other is of degree one or more, - /// the function creates [intermediate variables][`Witness`] accordingly. - /// There are two cases where we can optimize the multiplication between two expressions: - /// 1. If the sum of the degrees of both expressions is at most 2, then we can just multiply them - /// as each term in the result will be degree-2. - /// 2. If one expression is a constant, then we can just multiply the constant with the other expression - /// - /// (1) is because an [`Expression`] can hold at most a degree-2 univariate polynomial - /// which is what you get when you multiply two degree-1 univariate polynomials. - pub(crate) fn mul_with_witness(&mut self, lhs: &Expression, rhs: &Expression) -> Expression { - use std::borrow::Cow; - let lhs_is_linear = lhs.is_linear(); - let rhs_is_linear = rhs.is_linear(); - - // Case 1: The sum of the degrees of both expressions is at most 2. - // - // If one of the expressions is constant then it does not increase the degree when multiplying by another expression. - // If both of the expressions are linear (degree <=1) then the product will be at most degree 2. - let both_are_linear = lhs_is_linear && rhs_is_linear; - let either_is_const = lhs.is_const() || rhs.is_const(); - if both_are_linear || either_is_const { - return (lhs * rhs).expect("Both expressions are degree <= 1"); - } - - // Case 2: One or both of the sides needs to be reduced to a degree-1 univariate polynomial - let lhs_reduced = if lhs_is_linear { - Cow::Borrowed(lhs) - } else { - Cow::Owned(self.get_or_create_witness(lhs).into()) - }; - - // If the lhs and rhs are the same, then we do not need to reduce - // rhs, we only need to square the lhs. - if lhs == rhs { - return (&*lhs_reduced * &*lhs_reduced) - .expect("Both expressions are reduced to be degree <= 1"); - }; - - let rhs_reduced = if rhs_is_linear { - Cow::Borrowed(rhs) - } else { - Cow::Owned(self.get_or_create_witness(rhs).into()) - }; - - (&*lhs_reduced * &*rhs_reduced).expect("Both expressions are reduced to be degree <= 1") - } - /// Adds an inversion brillig opcode. /// /// This code will invert `expr` without applying constraints diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/acir_gen/mod.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/acir_gen/mod.rs index 80dcb5db3288..c5e4e88862f7 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/acir_gen/mod.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/acir_gen/mod.rs @@ -28,8 +28,8 @@ use crate::brillig::brillig_ir::artifact::{BrilligParameter, GeneratedBrillig}; use crate::brillig::brillig_ir::BrilligContext; use crate::brillig::{brillig_gen::brillig_fn::FunctionContext as BrilligFunctionContext, Brillig}; use crate::errors::{InternalError, InternalWarning, RuntimeError, SsaReport}; -use crate::ssa::ir::function::InlineType; pub(crate) use acir_ir::generated_acir::GeneratedAcir; +use noirc_frontend::monomorphization::ast::InlineType; use acvm::acir::circuit::brillig::BrilligBytecode; use acvm::acir::circuit::{AssertionPayload, OpcodeLocation}; @@ -389,12 +389,12 @@ impl<'a> Context<'a> { match function.runtime() { RuntimeType::Acir(inline_type) => { match inline_type { - InlineType::Fold => {} InlineType::Inline => { if function.id() != ssa.main_id { panic!("ACIR function should have been inlined earlier if not marked otherwise"); } } + InlineType::Fold | InlineType::Never => {} } // We only want to convert entry point functions. This being `main` and those marked with `InlineType::Fold` Ok(Some(self.convert_acir_main(function, ssa, brillig)?)) @@ -2629,25 +2629,22 @@ mod test { }, FieldElement, }; + use noirc_frontend::monomorphization::ast::InlineType; use crate::{ brillig::Brillig, ssa::{ acir_gen::acir_ir::generated_acir::BrilligStdlibFunc, function_builder::FunctionBuilder, - ir::{ - function::{FunctionId, InlineType}, - instruction::BinaryOp, - map::Id, - types::Type, - }, + ir::{function::FunctionId, instruction::BinaryOp, map::Id, types::Type}, }, }; fn build_basic_foo_with_return( builder: &mut FunctionBuilder, foo_id: FunctionId, - is_brillig_func: bool, + // `InlineType` can only exist on ACIR functions, so if the option is `None` we should generate a Brillig function + inline_type: Option, ) { // fn foo f1 { // b0(v0: Field, v1: Field): @@ -2655,10 +2652,10 @@ mod test { // constrain v2 == u1 0 // return v0 // } - if is_brillig_func { - builder.new_brillig_function("foo".into(), foo_id); + if let Some(inline_type) = inline_type { + builder.new_function("foo".into(), foo_id, inline_type); } else { - builder.new_function("foo".into(), foo_id, InlineType::Fold); + builder.new_brillig_function("foo".into(), foo_id); } let foo_v0 = builder.add_parameter(Type::field()); let foo_v1 = builder.add_parameter(Type::field()); @@ -2669,8 +2666,25 @@ mod test { builder.terminate_with_return(vec![foo_v0]); } + /// Check that each `InlineType` which prevents inlining functions generates code in the same manner + #[test] + fn basic_calls_fold() { + basic_call_with_outputs_assert(InlineType::Fold); + call_output_as_next_call_input(InlineType::Fold); + basic_nested_call(InlineType::Fold); + + call_output_as_next_call_input(InlineType::Never); + basic_nested_call(InlineType::Never); + basic_call_with_outputs_assert(InlineType::Never); + } + #[test] - fn basic_call_with_outputs_assert() { + #[should_panic] + fn call_without_inline_attribute() { + basic_call_with_outputs_assert(InlineType::Inline); + } + + fn basic_call_with_outputs_assert(inline_type: InlineType) { // acir(inline) fn main f0 { // b0(v0: Field, v1: Field): // v2 = call f1(v0, v1) @@ -2698,7 +2712,7 @@ mod test { builder.insert_constrain(main_call1_results[0], main_call2_results[0], None); builder.terminate_with_return(vec![]); - build_basic_foo_with_return(&mut builder, foo_id, false); + build_basic_foo_with_return(&mut builder, foo_id, Some(inline_type)); let ssa = builder.finish(); @@ -2764,8 +2778,7 @@ mod test { } } - #[test] - fn call_output_as_next_call_input() { + fn call_output_as_next_call_input(inline_type: InlineType) { // acir(inline) fn main f0 { // b0(v0: Field, v1: Field): // v3 = call f1(v0, v1) @@ -2794,7 +2807,7 @@ mod test { builder.insert_constrain(main_call1_results[0], main_call2_results[0], None); builder.terminate_with_return(vec![]); - build_basic_foo_with_return(&mut builder, foo_id, false); + build_basic_foo_with_return(&mut builder, foo_id, Some(inline_type)); let ssa = builder.finish(); @@ -2813,8 +2826,7 @@ mod test { check_call_opcode(&main_opcodes[1], 1, vec![Witness(2), Witness(1)], vec![Witness(3)]); } - #[test] - fn basic_nested_call() { + fn basic_nested_call(inline_type: InlineType) { // SSA for the following Noir program: // fn main(x: Field, y: pub Field) { // let z = func_with_nested_foo_call(x, y); @@ -2870,7 +2882,7 @@ mod test { builder.new_function( "func_with_nested_foo_call".into(), func_with_nested_foo_call_id, - InlineType::Fold, + inline_type, ); let func_with_nested_call_v0 = builder.add_parameter(Type::field()); let func_with_nested_call_v1 = builder.add_parameter(Type::field()); @@ -2885,7 +2897,7 @@ mod test { .to_vec(); builder.terminate_with_return(vec![foo_call[0]]); - build_basic_foo_with_return(&mut builder, foo_id, false); + build_basic_foo_with_return(&mut builder, foo_id, Some(inline_type)); let ssa = builder.finish(); @@ -2996,8 +3008,8 @@ mod test { builder.insert_call(bar, vec![main_v0, main_v1], vec![Type::field()]).to_vec(); builder.terminate_with_return(vec![]); - build_basic_foo_with_return(&mut builder, foo_id, true); - build_basic_foo_with_return(&mut builder, bar_id, true); + build_basic_foo_with_return(&mut builder, foo_id, None); + build_basic_foo_with_return(&mut builder, bar_id, None); let ssa = builder.finish(); let brillig = ssa.to_brillig(false); @@ -3124,7 +3136,7 @@ mod test { builder.terminate_with_return(vec![]); - build_basic_foo_with_return(&mut builder, foo_id, true); + build_basic_foo_with_return(&mut builder, foo_id, None); let ssa = builder.finish(); // We need to generate Brillig artifacts for the regular Brillig function and pass them to the ACIR generation pass. @@ -3210,8 +3222,10 @@ mod test { builder.terminate_with_return(vec![]); - build_basic_foo_with_return(&mut builder, foo_id, true); - build_basic_foo_with_return(&mut builder, bar_id, false); + // Build a Brillig function + build_basic_foo_with_return(&mut builder, foo_id, None); + // Build an ACIR function which has the same logic as the Brillig function above + build_basic_foo_with_return(&mut builder, bar_id, Some(InlineType::Fold)); let ssa = builder.finish(); // We need to generate Brillig artifacts for the regular Brillig function and pass them to the ACIR generation pass. @@ -3295,18 +3309,15 @@ mod test { // This check right now expects to only call one Brillig function. let mut num_normal_brillig_calls = 0; for (i, opcode) in opcodes.iter().enumerate() { - match opcode { - Opcode::BrilligCall { id, .. } => { - if brillig_stdlib_function_locations.get(&OpcodeLocation::Acir(i)).is_some() { - // We should have already checked Brillig stdlib functions and only want to check normal Brillig calls here - continue; - } - // We only generate one normal Brillig call so we should expect a function ID of `0` - let expected_id = 0u32; - assert_eq!(*id, expected_id, "Expected an id of {expected_id} but got {id}"); - num_normal_brillig_calls += 1; + if let Opcode::BrilligCall { id, .. } = opcode { + if brillig_stdlib_function_locations.get(&OpcodeLocation::Acir(i)).is_some() { + // We should have already checked Brillig stdlib functions and only want to check normal Brillig calls here + continue; } - _ => {} + // We only generate one normal Brillig call so we should expect a function ID of `0` + let expected_id = 0u32; + assert_eq!(*id, expected_id, "Expected an id of {expected_id} but got {id}"); + num_normal_brillig_calls += 1; } } diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/function_builder/mod.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/function_builder/mod.rs index e149769f786d..c0aa86c89bba 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/function_builder/mod.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/function_builder/mod.rs @@ -4,6 +4,7 @@ use std::{borrow::Cow, collections::BTreeMap, rc::Rc}; use acvm::FieldElement; use noirc_errors::Location; +use noirc_frontend::monomorphization::ast::InlineType; use crate::ssa::ir::{ basic_block::BasicBlockId, @@ -17,7 +18,7 @@ use super::{ ir::{ basic_block::BasicBlock, dfg::{CallStack, InsertInstructionResult}, - function::{InlineType, RuntimeType}, + function::RuntimeType, instruction::{ConstrainError, ErrorSelector, ErrorType, InstructionId, Intrinsic}, }, ssa_gen::Ssa, diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ir/function.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ir/function.rs index 011bee36661b..057786bf5ec6 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ir/function.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ir/function.rs @@ -1,6 +1,7 @@ use std::collections::BTreeSet; use iter_extended::vecmap; +use noirc_frontend::monomorphization::ast::InlineType; use super::basic_block::BasicBlockId; use super::dfg::DataFlowGraph; @@ -17,18 +18,6 @@ pub(crate) enum RuntimeType { Brillig, } -/// Represents how a RuntimeType::Acir function should be inlined. -/// This type is only relevant for ACIR functions as we do not inline any Brillig functions -#[derive(Default, Clone, Copy, PartialEq, Eq, Debug, Hash)] -pub(crate) enum InlineType { - /// The most basic entry point can expect all its functions to be inlined. - /// All function calls are expected to be inlined into a single ACIR. - #[default] - Inline, - /// Functions marked as foldable will not be inlined and compiled separately into ACIR - Fold, -} - impl RuntimeType { /// Returns whether the runtime type represents an entry point. /// We return `false` for InlineType::Inline on default, which is true @@ -36,10 +25,7 @@ impl RuntimeType { /// handling in any places where this function determines logic. pub(crate) fn is_entry_point(&self) -> bool { match self { - RuntimeType::Acir(inline_type) => match inline_type { - InlineType::Inline => false, - InlineType::Fold => true, - }, + RuntimeType::Acir(inline_type) => inline_type.is_entry_point(), RuntimeType::Brillig => true, } } @@ -163,15 +149,6 @@ impl std::fmt::Display for RuntimeType { } } -impl std::fmt::Display for InlineType { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - InlineType::Inline => write!(f, "inline"), - InlineType::Fold => write!(f, "fold"), - } - } -} - /// FunctionId is a reference for a function /// /// This Id is how each function refers to other functions diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/inlining.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/inlining.rs index ead3cac071c9..0b78d47fbb12 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/inlining.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/opt/inlining.rs @@ -517,12 +517,12 @@ impl<'function> PerFunctionContext<'function> { #[cfg(test)] mod test { use acvm::FieldElement; + use noirc_frontend::monomorphization::ast::InlineType; use crate::ssa::{ function_builder::FunctionBuilder, ir::{ basic_block::BasicBlockId, - function::InlineType, instruction::{BinaryOp, Intrinsic, TerminatorInstruction}, map::Id, types::Type, diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ssa_gen/context.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ssa_gen/context.rs index 276a8247ceac..f2cc2ba53cc2 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ssa_gen/context.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ssa_gen/context.rs @@ -11,9 +11,8 @@ use noirc_frontend::monomorphization::ast::{FuncId, Program}; use crate::errors::RuntimeError; use crate::ssa::function_builder::FunctionBuilder; use crate::ssa::ir::basic_block::BasicBlockId; -use crate::ssa::ir::dfg::DataFlowGraph; +use crate::ssa::ir::function::FunctionId as IrFunctionId; use crate::ssa::ir::function::{Function, RuntimeType}; -use crate::ssa::ir::function::{FunctionId as IrFunctionId, InlineType}; use crate::ssa::ir::instruction::BinaryOp; use crate::ssa::ir::instruction::Instruction; use crate::ssa::ir::map::AtomicCounter; @@ -126,8 +125,7 @@ impl<'a> FunctionContext<'a> { if func.unconstrained { self.builder.new_brillig_function(func.name.clone(), id); } else { - let inline_type = if func.should_fold { InlineType::Fold } else { InlineType::Inline }; - self.builder.new_function(func.name.clone(), id, inline_type); + self.builder.new_function(func.name.clone(), id, func.inline_type); } self.add_parameters_to_scope(&func.parameters); } @@ -1005,72 +1003,6 @@ fn operator_requires_swapped_operands(op: BinaryOpKind) -> bool { matches!(op, Greater | LessEqual) } -/// If the operation requires its result to be truncated because it is an integer, the maximum -/// number of bits that result may occupy is returned. -fn operator_result_max_bit_size_to_truncate( - op: BinaryOpKind, - lhs: ValueId, - rhs: ValueId, - dfg: &DataFlowGraph, -) -> Option { - let lhs_type = dfg.type_of_value(lhs); - let rhs_type = dfg.type_of_value(rhs); - - let get_bit_size = |typ| match typ { - Type::Numeric(NumericType::Signed { bit_size } | NumericType::Unsigned { bit_size }) => { - Some(bit_size) - } - _ => None, - }; - - let lhs_bit_size = get_bit_size(lhs_type)?; - let rhs_bit_size = get_bit_size(rhs_type)?; - use BinaryOpKind::*; - match op { - Add => Some(std::cmp::max(lhs_bit_size, rhs_bit_size) + 1), - Subtract => Some(std::cmp::max(lhs_bit_size, rhs_bit_size) + 1), - Multiply => { - if lhs_bit_size == 1 || rhs_bit_size == 1 { - // Truncation is unnecessary as multiplication by a boolean value cannot cause an overflow. - None - } else { - Some(lhs_bit_size + rhs_bit_size) - } - } - - ShiftLeft => { - if let Some(rhs_constant) = dfg.get_numeric_constant(rhs) { - // Happy case is that we know precisely by how many bits the the integer will - // increase: lhs_bit_size + rhs - return Some(lhs_bit_size + (rhs_constant.to_u128() as u32)); - } - // Unhappy case is that we don't yet know the rhs value, (even though it will - // eventually have to resolve to a constant). The best we can is assume the value of - // rhs to be the maximum value of it's numeric type. If that turns out to be larger - // than the native field's bit size, we full back to using that. - - // The formula for calculating the max bit size of a left shift is: - // lhs_bit_size + 2^{rhs_bit_size} - 1 - // Inferring the max bit size of left shift from its operands can result in huge - // number, that might not only be larger than the native field's max bit size, but - // furthermore might not be representable as a u32. Hence we use overflow checks and - // fallback to the native field's max bits. - let field_max_bits = FieldElement::max_num_bits(); - let (rhs_bit_size_pow_2, overflows) = 2_u32.overflowing_pow(rhs_bit_size); - if overflows { - return Some(field_max_bits); - } - let (max_bits_plus_1, overflows) = rhs_bit_size_pow_2.overflowing_add(lhs_bit_size); - if overflows { - return Some(field_max_bits); - } - let max_bit_size = std::cmp::min(max_bits_plus_1 - 1, field_max_bits); - Some(max_bit_size) - } - _ => None, - } -} - /// Converts the given operator to the appropriate BinaryOp. /// Take care when using this to insert a binary instruction: this requires /// checking operator_requires_not and operator_requires_swapped_operands diff --git a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ssa_gen/mod.rs b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ssa_gen/mod.rs index 8de09d4309bc..c121ac19ff97 100644 --- a/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ssa_gen/mod.rs +++ b/noir/noir-repo/compiler/noirc_evaluator/src/ssa/ssa_gen/mod.rs @@ -13,10 +13,7 @@ use noirc_frontend::monomorphization::ast::{self, Expression, Program}; use crate::{ errors::RuntimeError, - ssa::{ - function_builder::data_bus::DataBusBuilder, - ir::{function::InlineType, instruction::Intrinsic}, - }, + ssa::{function_builder::data_bus::DataBusBuilder, ir::instruction::Intrinsic}, }; use self::{ @@ -61,9 +58,7 @@ pub(crate) fn generate_ssa( if force_brillig_runtime || main.unconstrained { RuntimeType::Brillig } else { - let main_inline_type = - if main.should_fold { InlineType::Fold } else { InlineType::Inline }; - RuntimeType::Acir(main_inline_type) + RuntimeType::Acir(main.inline_type) }, &context, ); diff --git a/noir/noir-repo/compiler/noirc_frontend/Cargo.toml b/noir/noir-repo/compiler/noirc_frontend/Cargo.toml index 84c9393fa374..0430d214d53f 100644 --- a/noir/noir-repo/compiler/noirc_frontend/Cargo.toml +++ b/noir/noir-repo/compiler/noirc_frontend/Cargo.toml @@ -10,10 +10,10 @@ license.workspace = true [dependencies] acvm.workspace = true +noirc_arena.workspace = true noirc_errors.workspace = true noirc_printable_type.workspace = true fm.workspace = true -arena.workspace = true iter-extended.workspace = true chumsky.workspace = true thiserror.workspace = true diff --git a/noir/noir-repo/compiler/noirc_frontend/src/ast/function.rs b/noir/noir-repo/compiler/noirc_frontend/src/ast/function.rs index 9816218c5f78..9115178671ed 100644 --- a/noir/noir-repo/compiler/noirc_frontend/src/ast/function.rs +++ b/noir/noir-repo/compiler/noirc_frontend/src/ast/function.rs @@ -109,6 +109,7 @@ impl From for NoirFunction { Some(FunctionAttribute::Oracle(_)) => FunctionKind::Oracle, Some(FunctionAttribute::Recursive) => FunctionKind::Recursive, Some(FunctionAttribute::Fold) => FunctionKind::Normal, + Some(FunctionAttribute::Inline(_)) => FunctionKind::Normal, None => FunctionKind::Normal, }; diff --git a/noir/noir-repo/compiler/noirc_frontend/src/hir/comptime/errors.rs b/noir/noir-repo/compiler/noirc_frontend/src/hir/comptime/errors.rs index 67d9a006b22f..8de444252aba 100644 --- a/noir/noir-repo/compiler/noirc_frontend/src/hir/comptime/errors.rs +++ b/noir/noir-repo/compiler/noirc_frontend/src/hir/comptime/errors.rs @@ -1,11 +1,11 @@ -use crate::Type; +use crate::{hir::def_collector::dc_crate::CompilationError, Type}; use acvm::FieldElement; -use noirc_errors::Location; +use noirc_errors::{CustomDiagnostic, Location}; use super::value::Value; /// The possible errors that can halt the interpreter. -#[derive(Debug)] +#[derive(Debug, Clone)] pub enum InterpreterError { ArgumentCountMismatch { expected: usize, actual: usize, call_location: Location }, TypeMismatch { expected: Type, value: Value, location: Location }, @@ -16,7 +16,7 @@ pub enum InterpreterError { NonBoolUsedInIf { value: Value, location: Location }, NonBoolUsedInConstrain { value: Value, location: Location }, FailingConstraint { message: Option, location: Location }, - NoMethodFound { object: Value, typ: Type, location: Location }, + NoMethodFound { name: String, typ: Type, location: Location }, NonIntegerUsedInLoop { value: Value, location: Location }, NonPointerDereferenced { value: Value, location: Location }, NonTupleOrStructInMemberAccess { value: Value, location: Location }, @@ -51,3 +51,221 @@ pub enum InterpreterError { #[allow(unused)] pub(super) type IResult = std::result::Result; + +impl InterpreterError { + pub fn into_compilation_error_pair(self) -> (CompilationError, fm::FileId) { + let location = self.get_location(); + (CompilationError::InterpreterError(self), location.file) + } + + pub fn get_location(&self) -> Location { + match self { + InterpreterError::ArgumentCountMismatch { call_location: location, .. } + | InterpreterError::TypeMismatch { location, .. } + | InterpreterError::NonComptimeVarReferenced { location, .. } + | InterpreterError::IntegerOutOfRangeForType { location, .. } + | InterpreterError::ErrorNodeEncountered { location, .. } + | InterpreterError::NonFunctionCalled { location, .. } + | InterpreterError::NonBoolUsedInIf { location, .. } + | InterpreterError::NonBoolUsedInConstrain { location, .. } + | InterpreterError::FailingConstraint { location, .. } + | InterpreterError::NoMethodFound { location, .. } + | InterpreterError::NonIntegerUsedInLoop { location, .. } + | InterpreterError::NonPointerDereferenced { location, .. } + | InterpreterError::NonTupleOrStructInMemberAccess { location, .. } + | InterpreterError::NonArrayIndexed { location, .. } + | InterpreterError::NonIntegerUsedAsIndex { location, .. } + | InterpreterError::NonIntegerIntegerLiteral { location, .. } + | InterpreterError::NonIntegerArrayLength { location, .. } + | InterpreterError::NonNumericCasted { location, .. } + | InterpreterError::IndexOutOfBounds { location, .. } + | InterpreterError::ExpectedStructToHaveField { location, .. } + | InterpreterError::TypeUnsupported { location, .. } + | InterpreterError::InvalidValueForUnary { location, .. } + | InterpreterError::InvalidValuesForBinary { location, .. } + | InterpreterError::CastToNonNumericType { location, .. } + | InterpreterError::QuoteInRuntimeCode { location, .. } + | InterpreterError::NonStructInConstructor { location, .. } + | InterpreterError::CannotInlineMacro { location, .. } + | InterpreterError::UnquoteFoundDuringEvaluation { location, .. } + | InterpreterError::Unimplemented { location, .. } + | InterpreterError::BreakNotInLoop { location, .. } + | InterpreterError::ContinueNotInLoop { location, .. } => *location, + InterpreterError::Break | InterpreterError::Continue => { + panic!("Tried to get the location of Break/Continue error!") + } + } + } +} + +impl From for CustomDiagnostic { + fn from(error: InterpreterError) -> Self { + match error { + InterpreterError::ArgumentCountMismatch { expected, actual, call_location } => { + let only = if expected > actual { "only " } else { "" }; + let plural = if expected == 1 { "" } else { "s" }; + let was_were = if actual == 1 { "was" } else { "were" }; + let msg = format!( + "Expected {expected} argument{plural}, but {only}{actual} {was_were} provided" + ); + + let few_many = if actual < expected { "few" } else { "many" }; + let secondary = format!("Too {few_many} arguments"); + CustomDiagnostic::simple_error(msg, secondary, call_location.span) + } + InterpreterError::TypeMismatch { expected, value, location } => { + let typ = value.get_type(); + let msg = format!("Expected `{expected}` but a value of type `{typ}` was given"); + CustomDiagnostic::simple_error(msg, String::new(), location.span) + } + InterpreterError::NonComptimeVarReferenced { name, location } => { + let msg = format!("Non-comptime variable `{name}` referenced in comptime code"); + let secondary = "Non-comptime variables can't be used in comptime code".to_string(); + CustomDiagnostic::simple_error(msg, secondary, location.span) + } + InterpreterError::IntegerOutOfRangeForType { value, typ, location } => { + let int = match value.try_into_u128() { + Some(int) => int.to_string(), + None => value.to_string(), + }; + let msg = format!("{int} is outside the range of the {typ} type"); + CustomDiagnostic::simple_error(msg, String::new(), location.span) + } + InterpreterError::ErrorNodeEncountered { location } => { + let msg = "Internal Compiler Error: Error node encountered".to_string(); + let secondary = "This is a bug, please report this if found!".to_string(); + CustomDiagnostic::simple_error(msg, secondary, location.span) + } + InterpreterError::NonFunctionCalled { value, location } => { + let msg = "Only functions may be called".to_string(); + let secondary = format!("Expression has type {}", value.get_type()); + CustomDiagnostic::simple_error(msg, secondary, location.span) + } + InterpreterError::NonBoolUsedInIf { value, location } => { + let msg = format!("Expected a `bool` but found `{}`", value.get_type()); + let secondary = "If conditions must be a boolean value".to_string(); + CustomDiagnostic::simple_error(msg, secondary, location.span) + } + InterpreterError::NonBoolUsedInConstrain { value, location } => { + let msg = format!("Expected a `bool` but found `{}`", value.get_type()); + CustomDiagnostic::simple_error(msg, String::new(), location.span) + } + InterpreterError::FailingConstraint { message, location } => { + let (primary, secondary) = match message { + Some(msg) => (format!("{msg:?}"), "Assertion failed".into()), + None => ("Assertion failed".into(), String::new()), + }; + CustomDiagnostic::simple_error(primary, secondary, location.span) + } + InterpreterError::NoMethodFound { name, typ, location } => { + let msg = format!("No method named `{name}` found for type `{typ}`"); + CustomDiagnostic::simple_error(msg, String::new(), location.span) + } + InterpreterError::NonIntegerUsedInLoop { value, location } => { + let typ = value.get_type(); + let msg = format!("Non-integer type `{typ}` used in for loop"); + let secondary = if matches!(typ.as_ref(), &Type::FieldElement) { + "`field` is not an integer type, try `u64` instead".to_string() + } else { + String::new() + }; + CustomDiagnostic::simple_error(msg, secondary, location.span) + } + InterpreterError::NonPointerDereferenced { value, location } => { + let typ = value.get_type(); + let msg = format!("Only references may be dereferenced, but found `{typ}`"); + CustomDiagnostic::simple_error(msg, String::new(), location.span) + } + InterpreterError::NonTupleOrStructInMemberAccess { value, location } => { + let msg = format!("The type `{}` has no fields to access", value.get_type()); + CustomDiagnostic::simple_error(msg, String::new(), location.span) + } + InterpreterError::NonArrayIndexed { value, location } => { + let msg = format!("Expected an array or slice but found a(n) {}", value.get_type()); + let secondary = "Only arrays or slices may be indexed".into(); + CustomDiagnostic::simple_error(msg, secondary, location.span) + } + InterpreterError::NonIntegerUsedAsIndex { value, location } => { + let msg = format!("Expected an integer but found a(n) {}", value.get_type()); + let secondary = + "Only integers may be indexed. Note that this excludes `field`s".into(); + CustomDiagnostic::simple_error(msg, secondary, location.span) + } + InterpreterError::NonIntegerIntegerLiteral { typ, location } => { + let msg = format!("This integer literal somehow has the type `{typ}`"); + let secondary = "This is likely a bug".into(); + CustomDiagnostic::simple_error(msg, secondary, location.span) + } + InterpreterError::NonIntegerArrayLength { typ, location } => { + let msg = format!("Non-integer array length: `{typ}`"); + let secondary = "Array lengths must be integers".into(); + CustomDiagnostic::simple_error(msg, secondary, location.span) + } + InterpreterError::NonNumericCasted { value, location } => { + let msg = "Only numeric types may be casted".into(); + let secondary = format!("`{}` is non-numeric", value.get_type()); + CustomDiagnostic::simple_error(msg, secondary, location.span) + } + InterpreterError::IndexOutOfBounds { index, length, location } => { + let msg = format!("{index} is out of bounds for the array of length {length}"); + CustomDiagnostic::simple_error(msg, String::new(), location.span) + } + InterpreterError::ExpectedStructToHaveField { value, field_name, location } => { + let typ = value.get_type(); + let msg = format!("The type `{typ}` has no field named `{field_name}`"); + CustomDiagnostic::simple_error(msg, String::new(), location.span) + } + InterpreterError::TypeUnsupported { typ, location } => { + let msg = + format!("The type `{typ}` is currently unsupported in comptime expressions"); + CustomDiagnostic::simple_error(msg, String::new(), location.span) + } + InterpreterError::InvalidValueForUnary { value, operator, location } => { + let msg = format!("`{}` cannot be used with unary {operator}", value.get_type()); + CustomDiagnostic::simple_error(msg, String::new(), location.span) + } + InterpreterError::InvalidValuesForBinary { lhs, rhs, operator, location } => { + let lhs = lhs.get_type(); + let rhs = rhs.get_type(); + let msg = format!("No implementation for `{lhs}` {operator} `{rhs}`",); + CustomDiagnostic::simple_error(msg, String::new(), location.span) + } + InterpreterError::CastToNonNumericType { typ, location } => { + let msg = format!("Cannot cast to non-numeric type `{typ}`"); + CustomDiagnostic::simple_error(msg, String::new(), location.span) + } + InterpreterError::QuoteInRuntimeCode { location } => { + let msg = "`quote` may only be used in comptime code".into(); + CustomDiagnostic::simple_error(msg, String::new(), location.span) + } + InterpreterError::NonStructInConstructor { typ, location } => { + let msg = format!("`{typ}` is not a struct type"); + CustomDiagnostic::simple_error(msg, String::new(), location.span) + } + InterpreterError::CannotInlineMacro { value, location } => { + let msg = "Cannot inline value into runtime code if it contains references".into(); + let secondary = format!("Cannot inline value {value:?}"); + CustomDiagnostic::simple_error(msg, secondary, location.span) + } + InterpreterError::UnquoteFoundDuringEvaluation { location } => { + let msg = "Unquote found during comptime evaluation".into(); + let secondary = "This is a bug".into(); + CustomDiagnostic::simple_error(msg, secondary, location.span) + } + InterpreterError::Unimplemented { item, location } => { + let msg = format!("{item} is currently unimplemented"); + CustomDiagnostic::simple_error(msg, String::new(), location.span) + } + InterpreterError::BreakNotInLoop { location } => { + let msg = "There is no loop to break out of!".into(); + CustomDiagnostic::simple_error(msg, String::new(), location.span) + } + InterpreterError::ContinueNotInLoop { location } => { + let msg = "There is no loop to continue!".into(); + CustomDiagnostic::simple_error(msg, String::new(), location.span) + } + InterpreterError::Break => unreachable!("Uncaught InterpreterError::Break"), + InterpreterError::Continue => unreachable!("Uncaught InterpreterError::Continue"), + } + } +} diff --git a/noir/noir-repo/compiler/noirc_frontend/src/hir/comptime/interpreter.rs b/noir/noir-repo/compiler/noirc_frontend/src/hir/comptime/interpreter.rs index c01c985a40cd..4bdd4eaec9a8 100644 --- a/noir/noir-repo/compiler/noirc_frontend/src/hir/comptime/interpreter.rs +++ b/noir/noir-repo/compiler/noirc_frontend/src/hir/comptime/interpreter.rs @@ -506,11 +506,8 @@ impl<'a> Interpreter<'a> { Value::U64(value) => Ok(Value::U64(0 - value)), value => { let location = self.interner.expr_location(&id); - Err(InterpreterError::InvalidValueForUnary { - value, - location, - operator: "minus", - }) + let operator = "minus"; + Err(InterpreterError::InvalidValueForUnary { value, location, operator }) } }, crate::ast::UnaryOp::Not => match rhs { @@ -880,7 +877,7 @@ impl<'a> Interpreter<'a> { if let Some(method) = method { self.call_function(method, arguments, location) } else { - Err(InterpreterError::NoMethodFound { object, typ, location }) + Err(InterpreterError::NoMethodFound { name: method_name.clone(), typ, location }) } } diff --git a/noir/noir-repo/compiler/noirc_frontend/src/hir/comptime/mod.rs b/noir/noir-repo/compiler/noirc_frontend/src/hir/comptime/mod.rs index 26e05d675b35..148fa56b4cb6 100644 --- a/noir/noir-repo/compiler/noirc_frontend/src/hir/comptime/mod.rs +++ b/noir/noir-repo/compiler/noirc_frontend/src/hir/comptime/mod.rs @@ -5,4 +5,5 @@ mod scan; mod tests; mod value; +pub use errors::InterpreterError; pub use interpreter::Interpreter; diff --git a/noir/noir-repo/compiler/noirc_frontend/src/hir/def_collector/dc_crate.rs b/noir/noir-repo/compiler/noirc_frontend/src/hir/def_collector/dc_crate.rs index 7805f36cdb2b..d8839b33ff4e 100644 --- a/noir/noir-repo/compiler/noirc_frontend/src/hir/def_collector/dc_crate.rs +++ b/noir/noir-repo/compiler/noirc_frontend/src/hir/def_collector/dc_crate.rs @@ -1,7 +1,7 @@ use super::dc_mod::collect_defs; use super::errors::{DefCollectorErrorKind, DuplicateType}; use crate::graph::CrateId; -use crate::hir::comptime::Interpreter; +use crate::hir::comptime::{Interpreter, InterpreterError}; use crate::hir::def_map::{CrateDefMap, LocalModuleId, ModuleId}; use crate::hir::resolution::errors::ResolverError; @@ -155,6 +155,7 @@ pub enum CompilationError { DefinitionError(DefCollectorErrorKind), ResolverError(ResolverError), TypeError(TypeCheckError), + InterpreterError(InterpreterError), } impl From for CustomDiagnostic { @@ -164,6 +165,7 @@ impl From for CustomDiagnostic { CompilationError::DefinitionError(error) => error.into(), CompilationError::ResolverError(error) => error.into(), CompilationError::TypeError(error) => error.into(), + CompilationError::InterpreterError(error) => error.into(), } } } @@ -500,13 +502,15 @@ impl ResolvedModule { } /// Evaluate all `comptime` expressions in this module - fn evaluate_comptime(&self, interner: &mut NodeInterner) { + fn evaluate_comptime(&mut self, interner: &mut NodeInterner) { let mut interpreter = Interpreter::new(interner); for (_file, function) in &self.functions { - // .unwrap() is temporary here until we can convert - // from InterpreterError to (CompilationError, FileId) - interpreter.scan_function(*function).unwrap(); + // The file returned by the error may be different than the file the + // function is in so only use the error's file id. + if let Err(error) = interpreter.scan_function(*function) { + self.errors.push(error.into_compilation_error_pair()); + } } } diff --git a/noir/noir-repo/compiler/noirc_frontend/src/hir/def_map/mod.rs b/noir/noir-repo/compiler/noirc_frontend/src/hir/def_map/mod.rs index 7c0090ff95b5..590c2e3d6b69 100644 --- a/noir/noir-repo/compiler/noirc_frontend/src/hir/def_map/mod.rs +++ b/noir/noir-repo/compiler/noirc_frontend/src/hir/def_map/mod.rs @@ -5,8 +5,8 @@ use crate::macros_api::MacroProcessor; use crate::node_interner::{FuncId, GlobalId, NodeInterner, StructId}; use crate::parser::{parse_program, ParsedModule, ParserError}; use crate::token::{FunctionAttribute, SecondaryAttribute, TestScope}; -use arena::{Arena, Index}; use fm::{FileId, FileManager}; +use noirc_arena::{Arena, Index}; use noirc_errors::Location; use std::collections::{BTreeMap, HashMap}; mod module_def; diff --git a/noir/noir-repo/compiler/noirc_frontend/src/hir/resolution/errors.rs b/noir/noir-repo/compiler/noirc_frontend/src/hir/resolution/errors.rs index 0fac6f96086c..70e7a8e40f2a 100644 --- a/noir/noir-repo/compiler/noirc_frontend/src/hir/resolution/errors.rs +++ b/noir/noir-repo/compiler/noirc_frontend/src/hir/resolution/errors.rs @@ -86,6 +86,10 @@ pub enum ResolverError { JumpInConstrainedFn { is_break: bool, span: Span }, #[error("break/continue are only allowed within loops")] JumpOutsideLoop { is_break: bool, span: Span }, + #[error("#[inline(tag)] attribute is only allowed on constrained functions")] + InlineAttributeOnUnconstrained { ident: Ident }, + #[error("#[fold] attribute is only allowed on constrained functions")] + FoldAttributeOnUnconstrained { ident: Ident }, } impl ResolverError { @@ -340,6 +344,30 @@ impl From for Diagnostic { span, ) }, + ResolverError::InlineAttributeOnUnconstrained { ident } => { + let name = &ident.0.contents; + + let mut diag = Diagnostic::simple_error( + format!("misplaced #[inline(tag)] attribute on unconstrained function {name}. Only allowed on constrained functions"), + "misplaced #[inline(tag)] attribute".to_string(), + ident.0.span(), + ); + + diag.add_note("The `#[inline(tag)]` attribute specifies to the compiler whether it should diverge from auto-inlining constrained functions".to_owned()); + diag + } + ResolverError::FoldAttributeOnUnconstrained { ident } => { + let name = &ident.0.contents; + + let mut diag = Diagnostic::simple_error( + format!("misplaced #[fold] attribute on unconstrained function {name}. Only allowed on constrained functions"), + "misplaced #[fold] attribute".to_string(), + ident.0.span(), + ); + + diag.add_note("The `#[fold]` attribute specifies whether a constrained function should be treated as a separate circuit rather than inlined into the program entry point".to_owned()); + diag + } } } } diff --git a/noir/noir-repo/compiler/noirc_frontend/src/hir/resolution/resolver.rs b/noir/noir-repo/compiler/noirc_frontend/src/hir/resolution/resolver.rs index 2f2677def88a..b1f9e536aa5a 100644 --- a/noir/noir-repo/compiler/noirc_frontend/src/hir/resolution/resolver.rs +++ b/noir/noir-repo/compiler/noirc_frontend/src/hir/resolution/resolver.rs @@ -926,7 +926,23 @@ impl<'a> Resolver<'a> { let name_ident = HirIdent::non_trait_method(id, location); let attributes = func.attributes().clone(); + let has_inline_attribute = attributes.is_inline(); let should_fold = attributes.is_foldable(); + if !self.inline_attribute_allowed(func) { + if has_inline_attribute { + self.push_err(ResolverError::InlineAttributeOnUnconstrained { + ident: func.name_ident().clone(), + }); + } else if should_fold { + self.push_err(ResolverError::FoldAttributeOnUnconstrained { + ident: func.name_ident().clone(), + }); + } + } + // Both the #[fold] and #[inline(tag)] alter a function's inline type and code generation in similar ways. + // In certain cases such as type checking (for which the following flag will be used) both attributes + // indicate we should code generate in the same way. Thus, we unify the attributes into one flag here. + let has_inline_or_fold_attribute = has_inline_attribute || should_fold; let mut generics = vecmap(&self.generics, |(_, typevar, _)| typevar.clone()); let mut parameters = vec![]; @@ -1021,7 +1037,7 @@ impl<'a> Resolver<'a> { has_body: !func.def.body.is_empty(), trait_constraints: self.resolve_trait_constraints(&func.def.where_clause), is_entry_point: self.is_entry_point_function(func), - should_fold, + has_inline_or_fold_attribute, } } @@ -1057,6 +1073,12 @@ impl<'a> Resolver<'a> { } } + fn inline_attribute_allowed(&self, func: &NoirFunction) -> bool { + // Inline attributes are only relevant for constrained functions + // as all unconstrained functions are not inlined + !func.def.is_unconstrained + } + fn declare_numeric_generics(&mut self, params: &[Type], return_type: &Type) { if self.generics.is_empty() { return; diff --git a/noir/noir-repo/compiler/noirc_frontend/src/hir/type_check/mod.rs b/noir/noir-repo/compiler/noirc_frontend/src/hir/type_check/mod.rs index 44dab6dee3de..03ebb44fa1f6 100644 --- a/noir/noir-repo/compiler/noirc_frontend/src/hir/type_check/mod.rs +++ b/noir/noir-repo/compiler/noirc_frontend/src/hir/type_check/mod.rs @@ -173,7 +173,7 @@ fn check_if_type_is_valid_for_program_input( ) { let meta = type_checker.interner.function_meta(&func_id); if (meta.is_entry_point && !param.1.is_valid_for_program_input()) - || (meta.should_fold && !param.1.is_valid_non_inlined_function_input()) + || (meta.has_inline_or_fold_attribute && !param.1.is_valid_non_inlined_function_input()) { let span = param.0.span(); errors.push(TypeCheckError::InvalidTypeForEntryPoint { span }); @@ -545,7 +545,7 @@ pub mod test { trait_constraints: Vec::new(), direct_generics: Vec::new(), is_entry_point: true, - should_fold: false, + has_inline_or_fold_attribute: false, }; interner.push_fn_meta(func_meta, func_id); @@ -672,7 +672,7 @@ pub mod test { } fn local_module_id(&self) -> LocalModuleId { - LocalModuleId(arena::Index::unsafe_zeroed()) + LocalModuleId(noirc_arena::Index::unsafe_zeroed()) } fn module_id(&self) -> ModuleId { @@ -724,7 +724,7 @@ pub mod test { let mut def_maps = BTreeMap::new(); let file = FileId::default(); - let mut modules = arena::Arena::default(); + let mut modules = noirc_arena::Arena::default(); let location = Location::new(Default::default(), file); modules.insert(ModuleData::new(None, location, false)); diff --git a/noir/noir-repo/compiler/noirc_frontend/src/hir_def/function.rs b/noir/noir-repo/compiler/noirc_frontend/src/hir_def/function.rs index 67b6412a21c5..57d3038a135d 100644 --- a/noir/noir-repo/compiler/noirc_frontend/src/hir_def/function.rs +++ b/noir/noir-repo/compiler/noirc_frontend/src/hir_def/function.rs @@ -126,8 +126,8 @@ pub struct FuncMeta { pub is_entry_point: bool, /// True if this function is marked with an attribute - /// that indicates it should not be inlined, such as for folding. - pub should_fold: bool, + /// that indicates it should not be inlined, such as `fold` or `inline(never)` + pub has_inline_or_fold_attribute: bool, } impl FuncMeta { diff --git a/noir/noir-repo/compiler/noirc_frontend/src/lexer/token.rs b/noir/noir-repo/compiler/noirc_frontend/src/lexer/token.rs index 0242fc7e7ff9..82e17ac3912d 100644 --- a/noir/noir-repo/compiler/noirc_frontend/src/lexer/token.rs +++ b/noir/noir-repo/compiler/noirc_frontend/src/lexer/token.rs @@ -581,6 +581,10 @@ impl Attributes { pub fn is_foldable(&self) -> bool { self.function.as_ref().map_or(false, |func_attribute| func_attribute.is_foldable()) } + + pub fn is_inline(&self) -> bool { + self.function.as_ref().map_or(false, |func_attribute| func_attribute.is_inline()) + } } /// An Attribute can be either a Primary Attribute or a Secondary Attribute @@ -641,6 +645,10 @@ impl Attribute { ["test"] => Attribute::Function(FunctionAttribute::Test(TestScope::None)), ["recursive"] => Attribute::Function(FunctionAttribute::Recursive), ["fold"] => Attribute::Function(FunctionAttribute::Fold), + ["inline", tag] => { + validate(tag)?; + Attribute::Function(FunctionAttribute::Inline(tag.to_string())) + } ["test", name] => { validate(name)?; let malformed_scope = @@ -693,6 +701,7 @@ pub enum FunctionAttribute { Test(TestScope), Recursive, Fold, + Inline(String), } impl FunctionAttribute { @@ -725,6 +734,13 @@ impl FunctionAttribute { pub fn is_foldable(&self) -> bool { matches!(self, FunctionAttribute::Fold) } + + /// Check whether we have an `inline` attribute + /// Although we also do not want to inline foldable functions, + /// we keep the two attributes distinct for clarity. + pub fn is_inline(&self) -> bool { + matches!(self, FunctionAttribute::Inline(_)) + } } impl fmt::Display for FunctionAttribute { @@ -736,6 +752,7 @@ impl fmt::Display for FunctionAttribute { FunctionAttribute::Oracle(ref k) => write!(f, "#[oracle({k})]"), FunctionAttribute::Recursive => write!(f, "#[recursive]"), FunctionAttribute::Fold => write!(f, "#[fold]"), + FunctionAttribute::Inline(ref k) => write!(f, "#[inline({k})]"), } } } @@ -781,6 +798,7 @@ impl AsRef for FunctionAttribute { FunctionAttribute::Test { .. } => "", FunctionAttribute::Recursive => "", FunctionAttribute::Fold => "", + FunctionAttribute::Inline(string) => string, } } } diff --git a/noir/noir-repo/compiler/noirc_frontend/src/monomorphization/ast.rs b/noir/noir-repo/compiler/noirc_frontend/src/monomorphization/ast.rs index ef3af57d3030..468938ecf084 100644 --- a/noir/noir-repo/compiler/noirc_frontend/src/monomorphization/ast.rs +++ b/noir/noir-repo/compiler/noirc_frontend/src/monomorphization/ast.rs @@ -5,8 +5,11 @@ use noirc_errors::{ Location, }; -use crate::ast::{BinaryOpKind, Distinctness, IntegerBitSize, Signedness, Visibility}; use crate::hir_def::{function::FunctionSignature, types::Type as HirType}; +use crate::{ + ast::{BinaryOpKind, Distinctness, IntegerBitSize, Signedness, Visibility}, + token::{Attributes, FunctionAttribute}, +}; /// The monomorphized AST is expression-based, all statements are also /// folded into this expression enum. Compared to the HIR, the monomorphized @@ -200,6 +203,60 @@ pub enum LValue { pub type Parameters = Vec<(LocalId, /*mutable:*/ bool, /*name:*/ String, Type)>; +/// Represents how an Acir function should be inlined. +/// This type is only relevant for ACIR functions as we do not inline any Brillig functions +#[derive(Default, Clone, Copy, PartialEq, Eq, Debug, Hash)] +pub enum InlineType { + /// The most basic entry point can expect all its functions to be inlined. + /// All function calls are expected to be inlined into a single ACIR. + #[default] + Inline, + /// Functions marked as foldable will not be inlined and compiled separately into ACIR + Fold, + /// Similar to `Fold`, these functions will not be inlined and compile separately into ACIR. + /// They are different from `Fold` though as they are expected to be inlined into the program + /// entry point before being used in the backend. + Never, +} + +impl From<&Attributes> for InlineType { + fn from(attributes: &Attributes) -> Self { + attributes.function.as_ref().map_or(InlineType::default(), |func_attribute| { + match func_attribute { + FunctionAttribute::Fold => InlineType::Fold, + FunctionAttribute::Inline(tag) => { + if tag == "never" { + InlineType::Never + } else { + InlineType::default() + } + } + _ => InlineType::default(), + } + }) + } +} + +impl InlineType { + pub fn is_entry_point(&self) -> bool { + match self { + InlineType::Inline => false, + InlineType::Fold => true, + InlineType::Never => true, + } + } +} + +impl std::fmt::Display for InlineType { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + InlineType::Inline => write!(f, "inline"), + InlineType::Fold => write!(f, "fold"), + InlineType::Never => write!(f, "inline(never)"), + } + } +} + #[derive(Debug, Clone, Hash)] pub struct Function { pub id: FuncId, @@ -210,7 +267,7 @@ pub struct Function { pub return_type: Type, pub unconstrained: bool, - pub should_fold: bool, + pub inline_type: InlineType, pub func_sig: FunctionSignature, } diff --git a/noir/noir-repo/compiler/noirc_frontend/src/monomorphization/mod.rs b/noir/noir-repo/compiler/noirc_frontend/src/monomorphization/mod.rs index d7213667c48c..c831e4be2454 100644 --- a/noir/noir-repo/compiler/noirc_frontend/src/monomorphization/mod.rs +++ b/noir/noir-repo/compiler/noirc_frontend/src/monomorphization/mod.rs @@ -30,6 +30,7 @@ use std::{ unreachable, }; +use self::ast::InlineType; use self::debug_types::DebugTypeTracker; use self::{ ast::{Definition, FuncId, Function, LocalId, Program}, @@ -132,7 +133,7 @@ pub fn monomorphize_debug( .finished_functions .iter() .flat_map(|(_, f)| { - if f.should_fold || f.id == Program::main_id() { + if f.inline_type.is_entry_point() || f.id == Program::main_id() { Some(f.func_sig.clone()) } else { None @@ -304,7 +305,8 @@ impl<'interner> Monomorphizer<'interner> { let return_type = Self::convert_type(return_type, meta.location)?; let unconstrained = modifiers.is_unconstrained; - let should_fold = meta.should_fold; + let attributes = self.interner.function_attributes(&f); + let inline_type = InlineType::from(attributes); let parameters = self.parameters(&meta.parameters)?; let body = self.expr(body_expr_id)?; @@ -315,7 +317,7 @@ impl<'interner> Monomorphizer<'interner> { body, return_type, unconstrained, - should_fold, + inline_type, func_sig, }; @@ -1399,7 +1401,7 @@ impl<'interner> Monomorphizer<'interner> { body, return_type, unconstrained, - should_fold: false, + inline_type: InlineType::default(), func_sig: FunctionSignature::default(), }; self.push_function(id, function); @@ -1525,7 +1527,7 @@ impl<'interner> Monomorphizer<'interner> { body, return_type, unconstrained, - should_fold: false, + inline_type: InlineType::default(), func_sig: FunctionSignature::default(), }; self.push_function(id, function); @@ -1650,7 +1652,7 @@ impl<'interner> Monomorphizer<'interner> { body, return_type, unconstrained, - should_fold: false, + inline_type: InlineType::default(), func_sig: FunctionSignature::default(), }; self.push_function(id, function); diff --git a/noir/noir-repo/compiler/noirc_frontend/src/node_interner.rs b/noir/noir-repo/compiler/noirc_frontend/src/node_interner.rs index 9d3a79820dc0..5e4fa3df6c59 100644 --- a/noir/noir-repo/compiler/noirc_frontend/src/node_interner.rs +++ b/noir/noir-repo/compiler/noirc_frontend/src/node_interner.rs @@ -2,9 +2,9 @@ use std::borrow::Cow; use std::collections::HashMap; use std::ops::Deref; -use arena::{Arena, Index}; use fm::FileId; use iter_extended::vecmap; +use noirc_arena::{Arena, Index}; use noirc_errors::{Location, Span, Spanned}; use petgraph::algo::tarjan_scc; use petgraph::prelude::DiGraph; diff --git a/noir/noir-repo/compiler/noirc_frontend/src/resolve_locations.rs b/noir/noir-repo/compiler/noirc_frontend/src/resolve_locations.rs index b5f1b1d0c646..ac8c96a092ee 100644 --- a/noir/noir-repo/compiler/noirc_frontend/src/resolve_locations.rs +++ b/noir/noir-repo/compiler/noirc_frontend/src/resolve_locations.rs @@ -1,4 +1,4 @@ -use arena::Index; +use noirc_arena::Index; use noirc_errors::Location; use crate::hir_def::expr::HirExpression; diff --git a/noir/noir-repo/compiler/noirc_frontend/src/tests.rs b/noir/noir-repo/compiler/noirc_frontend/src/tests.rs index 31bf2245b1fb..5d0f2472a43f 100644 --- a/noir/noir-repo/compiler/noirc_frontend/src/tests.rs +++ b/noir/noir-repo/compiler/noirc_frontend/src/tests.rs @@ -31,8 +31,8 @@ mod test { hir::def_map::{CrateDefMap, LocalModuleId}, parse_program, }; - use arena::Arena; use fm::FileManager; + use noirc_arena::Arena; pub(crate) fn has_parser_error(errors: &[(CompilationError, FileId)]) -> bool { errors.iter().any(|(e, _f)| matches!(e, CompilationError::ParseError(_))) @@ -1282,4 +1282,36 @@ fn lambda$f1(mut env$l1: (Field)) -> Field { "#; assert_eq!(get_program_errors(src).len(), 0); } + + #[test] + fn deny_inline_attribute_on_unconstrained() { + let src = r#" + #[inline(never)] + unconstrained fn foo(x: Field, y: Field) { + assert(x != y); + } + "#; + let errors = get_program_errors(src); + assert_eq!(errors.len(), 1); + assert!(matches!( + errors[0].0, + CompilationError::ResolverError(ResolverError::InlineAttributeOnUnconstrained { .. }) + )); + } + + #[test] + fn deny_fold_attribute_on_unconstrained() { + let src = r#" + #[fold] + unconstrained fn foo(x: Field, y: Field) { + assert(x != y); + } + "#; + let errors = get_program_errors(src); + assert_eq!(errors.len(), 1); + assert!(matches!( + errors[0].0, + CompilationError::ResolverError(ResolverError::FoldAttributeOnUnconstrained { .. }) + )); + } } diff --git a/noir/noir-repo/docs/docs/noir/standard_library/cryptographic_primitives/hashes.mdx b/noir/noir-repo/docs/docs/noir/standard_library/cryptographic_primitives/hashes.mdx index 7329880c7a7a..efa52b2c3f20 100644 --- a/noir/noir-repo/docs/docs/noir/standard_library/cryptographic_primitives/hashes.mdx +++ b/noir/noir-repo/docs/docs/noir/standard_library/cryptographic_primitives/hashes.mdx @@ -13,18 +13,21 @@ import BlackBoxInfo from '@site/src/components/Notes/_blackbox.mdx'; ## sha256 Given an array of bytes, returns the resulting sha256 hash. +Specify a message_size to hash only the first `message_size` bytes of the input. #include_code sha256 noir_stdlib/src/hash.nr rust example: +#include_code sha256_var test_programs/execution_success/sha256/src/main.nr rust ```rust fn main() { let x = [163, 117, 178, 149]; // some random bytes - let hash = std::hash::sha256(x); + let hash = std::sha256::sha256_var(x, 4); } ``` + ## blake2s diff --git a/noir/noir-repo/flake.lock b/noir/noir-repo/flake.lock deleted file mode 100644 index 5a9f9470a1fd..000000000000 --- a/noir/noir-repo/flake.lock +++ /dev/null @@ -1,170 +0,0 @@ -{ - "nodes": { - "crane": { - "inputs": { - "flake-compat": [ - "flake-compat" - ], - "flake-utils": [ - "flake-utils" - ], - "nixpkgs": [ - "nixpkgs" - ], - "rust-overlay": "rust-overlay" - }, - "locked": { - "lastModified": 1681177078, - "narHash": "sha256-ZNIjBDou2GOabcpctiQykEQVkI8BDwk7TyvlWlI4myE=", - "owner": "ipetkov", - "repo": "crane", - "rev": "0c9f468ff00576577d83f5019a66c557ede5acf6", - "type": "github" - }, - "original": { - "owner": "ipetkov", - "repo": "crane", - "type": "github" - } - }, - "fenix": { - "inputs": { - "nixpkgs": [ - "nixpkgs" - ], - "rust-analyzer-src": "rust-analyzer-src" - }, - "locked": { - "lastModified": 1694499657, - "narHash": "sha256-u/fZtLtN7VcDrMMVrdsFy93PEkaiK+tNpJT9on4SGdU=", - "owner": "nix-community", - "repo": "fenix", - "rev": "2895ff377cbb3cb6f5dd92066734b0447cb04e20", - "type": "github" - }, - "original": { - "owner": "nix-community", - "repo": "fenix", - "type": "github" - } - }, - "flake-compat": { - "flake": false, - "locked": { - "lastModified": 1673956053, - "narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=", - "owner": "edolstra", - "repo": "flake-compat", - "rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9", - "type": "github" - }, - "original": { - "owner": "edolstra", - "repo": "flake-compat", - "type": "github" - } - }, - "flake-utils": { - "inputs": { - "systems": "systems" - }, - "locked": { - "lastModified": 1681202837, - "narHash": "sha256-H+Rh19JDwRtpVPAWp64F+rlEtxUWBAQW28eAi3SRSzg=", - "owner": "numtide", - "repo": "flake-utils", - "rev": "cfacdce06f30d2b68473a46042957675eebb3401", - "type": "github" - }, - "original": { - "owner": "numtide", - "repo": "flake-utils", - "type": "github" - } - }, - "nixpkgs": { - "locked": { - "lastModified": 1695559356, - "narHash": "sha256-kXZ1pUoImD9OEbPCwpTz4tHsNTr4CIyIfXb3ocuR8sI=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "261abe8a44a7e8392598d038d2e01f7b33cf26d0", - "type": "github" - }, - "original": { - "owner": "NixOS", - "ref": "nixos-23.05", - "repo": "nixpkgs", - "type": "github" - } - }, - "root": { - "inputs": { - "crane": "crane", - "fenix": "fenix", - "flake-compat": "flake-compat", - "flake-utils": "flake-utils", - "nixpkgs": "nixpkgs" - } - }, - "rust-analyzer-src": { - "flake": false, - "locked": { - "lastModified": 1694421477, - "narHash": "sha256-df6YZzR57VFzkOPwIohJfC0fRwgq6yUPbMJkKAtQyAE=", - "owner": "rust-lang", - "repo": "rust-analyzer", - "rev": "cc6c8209cbaf7df55013977cf5cc8488d6b7ff1c", - "type": "github" - }, - "original": { - "owner": "rust-lang", - "ref": "nightly", - "repo": "rust-analyzer", - "type": "github" - } - }, - "rust-overlay": { - "inputs": { - "flake-utils": [ - "crane", - "flake-utils" - ], - "nixpkgs": [ - "crane", - "nixpkgs" - ] - }, - "locked": { - "lastModified": 1694484610, - "narHash": "sha256-aeSDkp7fkAqtVjW3QUn7vq7BKNlFul/BiGgdv7rK+mA=", - "owner": "oxalica", - "repo": "rust-overlay", - "rev": "c5b977a7e6a295697fa1f9c42174fd6313b38df4", - "type": "github" - }, - "original": { - "owner": "oxalica", - "repo": "rust-overlay", - "type": "github" - } - }, - "systems": { - "locked": { - "lastModified": 1681028828, - "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", - "owner": "nix-systems", - "repo": "default", - "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", - "type": "github" - }, - "original": { - "owner": "nix-systems", - "repo": "default", - "type": "github" - } - } - }, - "root": "root", - "version": 7 -} diff --git a/noir/noir-repo/noir_stdlib/src/hash.nr b/noir/noir-repo/noir_stdlib/src/hash.nr index 26a9fa6c2c0f..ef1cb0889d74 100644 --- a/noir/noir-repo/noir_stdlib/src/hash.nr +++ b/noir/noir-repo/noir_stdlib/src/hash.nr @@ -4,6 +4,7 @@ mod poseidon2; use crate::default::Default; use crate::uint128::U128; +use crate::sha256::{digest, sha256_var}; #[foreign(sha256)] // docs:start:sha256 diff --git a/noir/noir-repo/noir_stdlib/src/sha256.nr b/noir/noir-repo/noir_stdlib/src/sha256.nr index 8ca6808568d2..d856043fcfa8 100644 --- a/noir/noir-repo/noir_stdlib/src/sha256.nr +++ b/noir/noir-repo/noir_stdlib/src/sha256.nr @@ -17,19 +17,42 @@ fn msg_u8_to_u32(msg: [u8; 64]) -> [u32; 16] { } // SHA-256 hash function pub fn digest(msg: [u8; N]) -> [u8; 32] { + sha256_var(msg, N) +} + +fn hash_final_block(msg_block: [u8; 64], mut state: [u32; 8]) -> [u8; 32] { + let mut out_h: [u8; 32] = [0; 32]; // Digest as sequence of bytes + + // Hash final padded block + state = crate::hash::sha256_compression(msg_u8_to_u32(msg_block), state); + + // Return final hash as byte array + for j in 0..8 { + let h_bytes = (state[7 - j] as Field).to_le_bytes(4); + for k in 0..4 { + out_h[31 - 4*j - k] = h_bytes[k]; + } + } + + out_h +} + +// Variable size SHA-256 hash +pub fn sha256_var(msg: [u8; N], message_size: u64) -> [u8; 32] { let mut msg_block: [u8; 64] = [0; 64]; let mut h: [u32; 8] = [1779033703, 3144134277, 1013904242, 2773480762, 1359893119, 2600822924, 528734635, 1541459225]; // Intermediate hash, starting with the canonical initial value - let mut out_h: [u8; 32] = [0; 32]; // Digest as sequence of bytes let mut i: u64 = 0; // Message byte pointer for k in 0..N { - // Populate msg_block - msg_block[i] = msg[k]; - i = i + 1; - if i == 64 { - // Enough to hash block - h = crate::hash::sha256_compression(msg_u8_to_u32(msg_block), h); + if k < message_size { + // Populate msg_block + msg_block[i] = msg[k]; + i = i + 1; + if i == 64 { + // Enough to hash block + h = crate::hash::sha256_compression(msg_u8_to_u32(msg_block), h); - i = 0; + i = 0; + } } } // Pad the rest such that we have a [u32; 2] block at the end representing the length @@ -53,7 +76,7 @@ pub fn digest(msg: [u8; N]) -> [u8; 32] { i = 0; } - let len = 8 * msg.len(); + let len = 8 * message_size; let len_bytes = (len as Field).to_le_bytes(8); for _i in 0..64 { // In any case, fill blocks up with zeros until the last 64 (i.e. until i = 56). @@ -67,16 +90,5 @@ pub fn digest(msg: [u8; N]) -> [u8; 32] { i += 8; } } - // Hash final padded block - h = crate::hash::sha256_compression(msg_u8_to_u32(msg_block), h); - - // Return final hash as byte array - for j in 0..8 { - let h_bytes = (h[7 - j] as Field).to_le_bytes(4); - for k in 0..4 { - out_h[31 - 4*j - k] = h_bytes[k]; - } - } - - out_h + hash_final_block(msg_block, h) } diff --git a/noir/noir-repo/test_programs/execution_success/inline_never_basic/Nargo.toml b/noir/noir-repo/test_programs/execution_success/inline_never_basic/Nargo.toml new file mode 100644 index 000000000000..16691770d764 --- /dev/null +++ b/noir/noir-repo/test_programs/execution_success/inline_never_basic/Nargo.toml @@ -0,0 +1,7 @@ +[package] +name = "inline_never_basic" +type = "bin" +authors = [""] +compiler_version = ">=0.27.0" + +[dependencies] \ No newline at end of file diff --git a/noir/noir-repo/test_programs/execution_success/inline_never_basic/Prover.toml b/noir/noir-repo/test_programs/execution_success/inline_never_basic/Prover.toml new file mode 100644 index 000000000000..f28f2f8cc48f --- /dev/null +++ b/noir/noir-repo/test_programs/execution_success/inline_never_basic/Prover.toml @@ -0,0 +1,2 @@ +x = "5" +y = "10" diff --git a/noir/noir-repo/test_programs/execution_success/inline_never_basic/src/main.nr b/noir/noir-repo/test_programs/execution_success/inline_never_basic/src/main.nr new file mode 100644 index 000000000000..1922aaedb6c8 --- /dev/null +++ b/noir/noir-repo/test_programs/execution_success/inline_never_basic/src/main.nr @@ -0,0 +1,8 @@ +fn main(x: Field, y: pub Field) { + basic_check(x, y); +} + +#[inline(never)] +fn basic_check(x: Field, y: Field) { + assert(x != y); +} diff --git a/noir/noir-repo/test_programs/execution_success/sha256/src/main.nr b/noir/noir-repo/test_programs/execution_success/sha256/src/main.nr index fd5340e2384d..d4240ded8b38 100644 --- a/noir/noir-repo/test_programs/execution_success/sha256/src/main.nr +++ b/noir/noir-repo/test_programs/execution_success/sha256/src/main.nr @@ -14,6 +14,8 @@ use dep::std; fn main(x: Field, result: [u8; 32]) { // We use the `as` keyword here to denote the fact that we want to take just the first byte from the x Field // The padding is taken care of by the program - let digest = std::hash::sha256([x as u8]); + // docs:start:sha256_var + let digest = std::hash::sha256_var([x as u8], 1); + // docs:end:sha256_var assert(digest == result); } diff --git a/noir/noir-repo/tooling/debugger/ignored-tests.txt b/noir/noir-repo/tooling/debugger/ignored-tests.txt index 3b63f8d55427..8f49fe273e16 100644 --- a/noir/noir-repo/tooling/debugger/ignored-tests.txt +++ b/noir/noir-repo/tooling/debugger/ignored-tests.txt @@ -17,4 +17,4 @@ fold_basic_nested_call fold_call_witness_condition fold_after_inlined_calls fold_numeric_generic_poseidon - +inline_never_basic diff --git a/noir/noir-repo/tooling/noir_js_backend_barretenberg/package.json b/noir/noir-repo/tooling/noir_js_backend_barretenberg/package.json index 438e91ff3020..b57822696742 100644 --- a/noir/noir-repo/tooling/noir_js_backend_barretenberg/package.json +++ b/noir/noir-repo/tooling/noir_js_backend_barretenberg/package.json @@ -3,7 +3,7 @@ "contributors": [ "The Noir Team " ], - "version": "0.27.0", + "version": "0.28.0", "packageManager": "yarn@3.5.1", "license": "(MIT OR Apache-2.0)", "type": "module", diff --git a/noir/noir-repo/tooling/noirc_abi_wasm/build.sh b/noir/noir-repo/tooling/noirc_abi_wasm/build.sh index ee93413ab85a..c07d2d8a4c1d 100755 --- a/noir/noir-repo/tooling/noirc_abi_wasm/build.sh +++ b/noir/noir-repo/tooling/noirc_abi_wasm/build.sh @@ -25,7 +25,7 @@ function run_if_available { require_command jq require_command cargo require_command wasm-bindgen -# require_command wasm-opt +#require_command wasm-opt self_path=$(dirname "$(readlink -f "$0")") pname=$(cargo read-manifest | jq -r '.name') diff --git a/noir/noir-repo/compiler/utils/iter-extended/Cargo.toml b/noir/noir-repo/utils/iter-extended/Cargo.toml similarity index 100% rename from noir/noir-repo/compiler/utils/iter-extended/Cargo.toml rename to noir/noir-repo/utils/iter-extended/Cargo.toml diff --git a/noir/noir-repo/compiler/utils/iter-extended/src/lib.rs b/noir/noir-repo/utils/iter-extended/src/lib.rs similarity index 100% rename from noir/noir-repo/compiler/utils/iter-extended/src/lib.rs rename to noir/noir-repo/utils/iter-extended/src/lib.rs diff --git a/noir/scripts/test_native.sh b/noir/scripts/test_native.sh index 5b1bb1b180ae..1f6d633935db 100755 --- a/noir/scripts/test_native.sh +++ b/noir/scripts/test_native.sh @@ -14,4 +14,4 @@ RUSTFLAGS=-Dwarnings cargo clippy --workspace --locked --release ./.github/scripts/cargo-binstall-install.sh cargo-binstall cargo-nextest --version 0.9.67 -y --secure -cargo nextest run --locked --release -E '!test(hello_world_example) & !test(simple_verifier_codegen)' +cargo nextest run --workspace --locked --release -E '!test(hello_world_example) & !test(simple_verifier_codegen)' diff --git a/scripts/earthly-ci b/scripts/earthly-ci new file mode 100755 index 000000000000..43eeb9b17aa8 --- /dev/null +++ b/scripts/earthly-ci @@ -0,0 +1,36 @@ +#!/usr/bin/env bash +# A wrapper for Earthly that is meant to caught signs of known intermittent failures and continue. +# The silver lining is if Earthly does crash, the cache can pick up the build. +set -eu -o pipefail + +# Flag to determine if -i is present +INTERACTIVE=false +# Check for -i flag in the arguments +for arg in "$@"; do + if [ "$arg" == "-i" ] || [ "$arg" == "--interactive" ]; then + INTERACTIVE=true + break + fi +done + +OUTPUT_FILE=$(mktemp) +# capture output to handle earthly edge cases +if $INTERACTIVE ; then + # don't play nice with tee if interactive + earthly $@ +elif ! earthly $@ 2>&1 | tee $OUTPUT_FILE >&2 ; then + # we try earthly once, capturing output + # if we get one of our (unfortunate) known failures, handle retries + # TODO potentially handle other intermittent errors here + if grep 'failed to get edge: inconsistent graph state' $OUTPUT_FILE >/dev/null ; then + # TODO when earthly is overloaded we sometimes get + # 'failed to solve: failed to get edge: inconsistent graph state' + echo "Got 'inconsistent graph state'. Restarting earthly. See https://github.com/earthly/earthly/issues/2454'" + earthly $@ + # TODO handle + # could not configure satellite: failed getting org: unable to authenticate: failed to execute login request: Post + else + # otherwise, propagate error + exit 1 + fi +fi diff --git a/yarn-project/circuit-types/src/tx_effect.ts b/yarn-project/circuit-types/src/tx_effect.ts index 54664d104fb2..7d2e991602fc 100644 --- a/yarn-project/circuit-types/src/tx_effect.ts +++ b/yarn-project/circuit-types/src/tx_effect.ts @@ -33,7 +33,8 @@ export class TxEffect { */ public nullifiers: Fr[], /** - * The L2 to L1 messages to be inserted into the messagebox on L1. + * The hash of L2 to L1 messages to be inserted into the messagebox on L1. + * TODO(just-mitch): rename to l2ToL1MsgHashes */ public l2ToL1Msgs: Fr[], /** diff --git a/yarn-project/circuits.js/src/constants.gen.ts b/yarn-project/circuits.js/src/constants.gen.ts index 096d8663dae1..62a986338e19 100644 --- a/yarn-project/circuits.js/src/constants.gen.ts +++ b/yarn-project/circuits.js/src/constants.gen.ts @@ -141,6 +141,7 @@ export const PUBLIC_CIRCUIT_PUBLIC_INPUTS_LENGTH = SIDE_EFFECT_LENGTH * MAX_UNENCRYPTED_LOGS_PER_CALL + 1 + HEADER_LENGTH + + GLOBAL_VARIABLES_LENGTH + AZTEC_ADDRESS_LENGTH + /* revert_code */ 1 + 2 * GAS_LENGTH + diff --git a/yarn-project/circuits.js/src/structs/__snapshots__/public_call_stack_item.test.ts.snap b/yarn-project/circuits.js/src/structs/__snapshots__/public_call_stack_item.test.ts.snap index 18794323156e..733ca2607b5b 100644 --- a/yarn-project/circuits.js/src/structs/__snapshots__/public_call_stack_item.test.ts.snap +++ b/yarn-project/circuits.js/src/structs/__snapshots__/public_call_stack_item.test.ts.snap @@ -1,9 +1,9 @@ // Jest Snapshot v1, https://goo.gl/fbAQLP -exports[`PublicCallStackItem Computes a callstack item hash 1`] = `"0x1f3f1902ca41ffd6fd7191fa5a52edd677444a9b6ae8f4448336fa71a4b2d5cc"`; +exports[`PublicCallStackItem Computes a callstack item hash 1`] = `"0x0e18ddd9aaddae02d45598f0278d925e289913384d6e15057ce5b4a9e8e7488d"`; -exports[`PublicCallStackItem Computes a callstack item request hash 1`] = `"0x1b06f4a4960455e9f01c20d4cb01afbf8c8f39eb50094c5d1ad6725ced0f7d08"`; +exports[`PublicCallStackItem Computes a callstack item request hash 1`] = `"0x22848497ff97ff3a4517aec32454059030fb5a3ef4f3ca533ee40132d7a63aea"`; -exports[`PublicCallStackItem computes empty item hash 1`] = `Fr<0x040c3667dd703bad4465ba5d12e7a422959395f76299794aa9eeaf5044d9e157>`; +exports[`PublicCallStackItem computes empty item hash 1`] = `Fr<0x004e1dc292cd5919dcea653efb6d791458a1eee853432e27d385ef56714edce9>`; -exports[`PublicCallStackItem computes hash 1`] = `Fr<0x0f3fde3c615e9d95337fbbf3f835b3e26187de0de9f199320b53355f4089bb88>`; +exports[`PublicCallStackItem computes hash 1`] = `Fr<0x1efb84fc01ae8d6e8c27826100655f2d24344fa6edef9907649243770fc0798d>`; diff --git a/yarn-project/circuits.js/src/structs/__snapshots__/public_circuit_public_inputs.test.ts.snap b/yarn-project/circuits.js/src/structs/__snapshots__/public_circuit_public_inputs.test.ts.snap index 55e7a2367875..7b63132cfd70 100644 --- a/yarn-project/circuits.js/src/structs/__snapshots__/public_circuit_public_inputs.test.ts.snap +++ b/yarn-project/circuits.js/src/structs/__snapshots__/public_circuit_public_inputs.test.ts.snap @@ -1,5 +1,5 @@ // Jest Snapshot v1, https://goo.gl/fbAQLP -exports[`PublicCircuitPublicInputs computes empty inputs hash 1`] = `Fr<0x237c89f8b29c3fb169b889940a714b3c72017cb2941d0724d4668a030794d2fb>`; +exports[`PublicCircuitPublicInputs computes empty inputs hash 1`] = `Fr<0x2d91debc43bd6354caef4fd152975e7c6dd44e8623b6b62c21b9f547f2fabd32>`; -exports[`PublicCircuitPublicInputs hash matches snapshot 1`] = `Fr<0x0d22cf387fb73386318033d92d07d203ad5c3d1e332734fa58b9ec08fbf0ceac>`; +exports[`PublicCircuitPublicInputs hash matches snapshot 1`] = `Fr<0x1710f7b36cf97619af020da867f7029d826d45b8237e6205981d1c15307875e0>`; diff --git a/yarn-project/circuits.js/src/structs/kernel/combined_constant_data.ts b/yarn-project/circuits.js/src/structs/kernel/combined_constant_data.ts index 189d6a277642..2fea7415bc7b 100644 --- a/yarn-project/circuits.js/src/structs/kernel/combined_constant_data.ts +++ b/yarn-project/circuits.js/src/structs/kernel/combined_constant_data.ts @@ -1,5 +1,7 @@ import { BufferReader, serializeToBuffer } from '@aztec/foundation/serialize'; +import { type FieldsOf } from '@aztec/foundation/types'; +import { GlobalVariables } from '../global_variables.js'; import { Header } from '../header.js'; import { TxContext } from '../tx_context.js'; @@ -8,9 +10,7 @@ import { TxContext } from '../tx_context.js'; */ export class CombinedConstantData { constructor( - /** - * Header of a block whose state is used during execution (not the block the transaction is included in). - */ + /** Header of a block whose state is used during execution (not the block the transaction is included in). */ public historicalHeader: Header, /** * Context of the transaction. @@ -21,10 +21,17 @@ export class CombinedConstantData { * protocol to execute and prove the transaction. */ public txContext: TxContext, + + /** Present when output by a public kernel, empty otherwise. */ + public globalVariables: GlobalVariables, ) {} toBuffer() { - return serializeToBuffer(this.historicalHeader, this.txContext); + return serializeToBuffer(this.historicalHeader, this.txContext, this.globalVariables); + } + + static from({ historicalHeader, txContext, globalVariables }: FieldsOf): CombinedConstantData { + return new CombinedConstantData(historicalHeader, txContext, globalVariables); } /** @@ -34,10 +41,14 @@ export class CombinedConstantData { */ static fromBuffer(buffer: Buffer | BufferReader): CombinedConstantData { const reader = BufferReader.asReader(buffer); - return new CombinedConstantData(reader.readObject(Header), reader.readObject(TxContext)); + return new CombinedConstantData( + reader.readObject(Header), + reader.readObject(TxContext), + reader.readObject(GlobalVariables), + ); } static empty() { - return new CombinedConstantData(Header.empty(), TxContext.empty()); + return new CombinedConstantData(Header.empty(), TxContext.empty(), GlobalVariables.empty()); } } diff --git a/yarn-project/circuits.js/src/structs/public_circuit_public_inputs.ts b/yarn-project/circuits.js/src/structs/public_circuit_public_inputs.ts index 220101ff45a7..90b2f337de6d 100644 --- a/yarn-project/circuits.js/src/structs/public_circuit_public_inputs.ts +++ b/yarn-project/circuits.js/src/structs/public_circuit_public_inputs.ts @@ -29,6 +29,7 @@ import { CallContext } from './call_context.js'; import { ContractStorageRead } from './contract_storage_read.js'; import { ContractStorageUpdateRequest } from './contract_storage_update_request.js'; import { Gas } from './gas.js'; +import { GlobalVariables } from './global_variables.js'; import { Header } from './header.js'; import { L2ToL1Message } from './l2_to_l1_message.js'; import { ReadRequest } from './read_request.js'; @@ -112,6 +113,8 @@ export class PublicCircuitPublicInputs { * previous to the one in which the tx is included. */ public historicalHeader: Header, + /** Global variables for the block. */ + public globalVariables: GlobalVariables, /** * Address of the prover. */ @@ -163,6 +166,7 @@ export class PublicCircuitPublicInputs { makeTuple(MAX_UNENCRYPTED_LOGS_PER_CALL, SideEffect.empty), Fr.ZERO, Header.empty(), + GlobalVariables.empty(), AztecAddress.ZERO, RevertCode.OK, Gas.empty(), @@ -193,6 +197,7 @@ export class PublicCircuitPublicInputs { isArrayEmpty(this.unencryptedLogsHashes, item => item.isEmpty()) && this.unencryptedLogPreimagesLength.isZero() && this.historicalHeader.isEmpty() && + this.globalVariables.isEmpty() && this.proverAddress.isZero() && this.revertCode.isOK() && this.startGasLeft.isEmpty() && @@ -224,6 +229,7 @@ export class PublicCircuitPublicInputs { fields.unencryptedLogsHashes, fields.unencryptedLogPreimagesLength, fields.historicalHeader, + fields.globalVariables, fields.proverAddress, fields.revertCode, fields.startGasLeft, @@ -274,6 +280,7 @@ export class PublicCircuitPublicInputs { reader.readArray(MAX_UNENCRYPTED_LOGS_PER_CALL, SideEffect), reader.readObject(Fr), reader.readObject(Header), + reader.readObject(GlobalVariables), reader.readObject(AztecAddress), reader.readObject(RevertCode), reader.readObject(Gas), @@ -302,6 +309,7 @@ export class PublicCircuitPublicInputs { reader.readArray(MAX_UNENCRYPTED_LOGS_PER_CALL, SideEffect), reader.readField(), Header.fromFields(reader), + GlobalVariables.fromFields(reader), AztecAddress.fromFields(reader), RevertCode.fromFields(reader), Gas.fromFields(reader), diff --git a/yarn-project/circuits.js/src/tests/factories.ts b/yarn-project/circuits.js/src/tests/factories.ts index 7fdb5eb4f56e..4d22544afd8c 100644 --- a/yarn-project/circuits.js/src/tests/factories.ts +++ b/yarn-project/circuits.js/src/tests/factories.ts @@ -183,7 +183,7 @@ export function makeTxContext(seed: number = 1): TxContext { * @returns A constant data object. */ export function makeConstantData(seed = 1): CombinedConstantData { - return new CombinedConstantData(makeHeader(seed, undefined), makeTxContext(seed + 4)); + return new CombinedConstantData(makeHeader(seed, undefined), makeTxContext(seed + 4), makeGlobalVariables(seed + 5)); } /** @@ -296,7 +296,7 @@ export function makeRollupValidationRequests(seed = 1) { } export function makeCombinedConstantData(seed = 1): CombinedConstantData { - return new CombinedConstantData(makeHeader(seed), makeTxContext(seed + 0x100)); + return new CombinedConstantData(makeHeader(seed), makeTxContext(seed + 0x100), makeGlobalVariables(seed + 0x200)); } /** @@ -459,6 +459,7 @@ export function makePublicCircuitPublicInputs( tupleGenerator(MAX_UNENCRYPTED_LOGS_PER_CALL, sideEffectFromNumber, seed + 0x901, SideEffect.empty), fr(seed + 0x902), makeHeader(seed + 0xa00, undefined), + makeGlobalVariables(seed + 0xa01), makeAztecAddress(seed + 0xb01), RevertCode.OK, makeGas(seed + 0xc00), diff --git a/yarn-project/end-to-end/src/benchmarks/bench_tx_size_fees.test.ts b/yarn-project/end-to-end/src/benchmarks/bench_tx_size_fees.test.ts index 0ab7176c82c2..d20aa63ad5a9 100644 --- a/yarn-project/end-to-end/src/benchmarks/bench_tx_size_fees.test.ts +++ b/yarn-project/end-to-end/src/benchmarks/bench_tx_size_fees.test.ts @@ -61,12 +61,12 @@ describe('benchmarks/tx_size_fees', () => { await token.methods.mint_public(aliceWallet.getAddress(), 100e9).send().wait(); }); - it.each<() => Promise>([ - () => Promise.resolve(undefined), - () => NativeFeePaymentMethod.create(aliceWallet), - () => Promise.resolve(new PublicFeePaymentMethod(token.address, fpc.address, aliceWallet)), - () => Promise.resolve(new PrivateFeePaymentMethod(token.address, fpc.address, aliceWallet)), - ])('sends a tx with a fee', async createPaymentMethod => { + it.each<[string, () => Promise]>([ + ['no', () => Promise.resolve(undefined)], + ['native fee', () => NativeFeePaymentMethod.create(aliceWallet)], + ['public fee', () => Promise.resolve(new PublicFeePaymentMethod(token.address, fpc.address, aliceWallet))], + ['private fee', () => Promise.resolve(new PrivateFeePaymentMethod(token.address, fpc.address, aliceWallet))], + ] as const)('sends a tx with a fee with %s payment method', async (_name, createPaymentMethod) => { const paymentMethod = await createPaymentMethod(); const gasSettings = GasSettings.default(); const tx = await token.methods diff --git a/yarn-project/end-to-end/src/e2e_account_init_fees.test.ts b/yarn-project/end-to-end/src/flakey_e2e_account_init_fees.test.ts similarity index 100% rename from yarn-project/end-to-end/src/e2e_account_init_fees.test.ts rename to yarn-project/end-to-end/src/flakey_e2e_account_init_fees.test.ts diff --git a/yarn-project/noir-protocol-circuits-types/src/type_conversion.ts b/yarn-project/noir-protocol-circuits-types/src/type_conversion.ts index 818ea14d4d0e..1e758ef4c80f 100644 --- a/yarn-project/noir-protocol-circuits-types/src/type_conversion.ts +++ b/yarn-project/noir-protocol-circuits-types/src/type_conversion.ts @@ -1195,6 +1195,7 @@ export function mapCombinedConstantDataFromNoir(combinedConstantData: CombinedCo return new CombinedConstantData( mapHeaderFromNoir(combinedConstantData.historical_header), mapTxContextFromNoir(combinedConstantData.tx_context), + mapGlobalVariablesFromNoir(combinedConstantData.global_variables), ); } @@ -1207,6 +1208,7 @@ export function mapCombinedConstantDataToNoir(combinedConstantData: CombinedCons return { historical_header: mapHeaderToNoir(combinedConstantData.historicalHeader), tx_context: mapTxContextToNoir(combinedConstantData.txContext), + global_variables: mapGlobalVariablesToNoir(combinedConstantData.globalVariables), }; } @@ -1561,6 +1563,7 @@ export function mapPublicCircuitPublicInputsToNoir( unencrypted_logs_hashes: mapTuple(publicInputs.unencryptedLogsHashes, mapSideEffectToNoir), unencrypted_log_preimages_length: mapFieldToNoir(publicInputs.unencryptedLogPreimagesLength), historical_header: mapHeaderToNoir(publicInputs.historicalHeader), + global_variables: mapGlobalVariablesToNoir(publicInputs.globalVariables), prover_address: mapAztecAddressToNoir(publicInputs.proverAddress), revert_code: mapRevertCodeToNoir(publicInputs.revertCode), start_gas_left: mapGasToNoir(publicInputs.startGasLeft), diff --git a/yarn-project/sequencer-client/src/global_variable_builder/global_builder.ts b/yarn-project/sequencer-client/src/global_variable_builder/global_builder.ts index 3ec8a8eb64cd..2954703d6420 100644 --- a/yarn-project/sequencer-client/src/global_variable_builder/global_builder.ts +++ b/yarn-project/sequencer-client/src/global_variable_builder/global_builder.ts @@ -86,11 +86,17 @@ export class SimpleTestGlobalVariableBuilder implements GlobalVariableBuilder { lastTimestamp = new Fr(lastTimestamp.value + 1n); } - this.log.debug( - `Built global variables for block ${blockNumber}: (${chainId}, ${version}, ${blockNumber}, ${lastTimestamp}, ${coinbase}, ${feeRecipient})`, - ); - const gasFees = GasFees.default(); + const globalVariables = new GlobalVariables( + chainId, + version, + blockNumber, + lastTimestamp, + coinbase, + feeRecipient, + gasFees, + ); + this.log.debug(`Built global variables for block ${blockNumber}`, globalVariables.toJSON()); return new GlobalVariables(chainId, version, blockNumber, lastTimestamp, coinbase, feeRecipient, gasFees); } } diff --git a/yarn-project/simulator/src/public/abstract_phase_manager.ts b/yarn-project/simulator/src/public/abstract_phase_manager.ts index 27f513dd4d2a..f84298d6dea4 100644 --- a/yarn-project/simulator/src/public/abstract_phase_manager.ts +++ b/yarn-project/simulator/src/public/abstract_phase_manager.ts @@ -421,6 +421,7 @@ export abstract class AbstractPhaseManager { ), unencryptedLogPreimagesLength, historicalHeader: this.historicalHeader, + globalVariables: this.globalVariables, startGasLeft: Gas.from(result.startGasLeft), endGasLeft: Gas.from(result.endGasLeft), transactionFee: result.transactionFee, diff --git a/yarn-project/simulator/src/public/executor.ts b/yarn-project/simulator/src/public/executor.ts index 9e5c3a098c1b..a5fd6a081770 100644 --- a/yarn-project/simulator/src/public/executor.ts +++ b/yarn-project/simulator/src/public/executor.ts @@ -207,6 +207,8 @@ async function executePublicFunctionAcvm( const nestedExecutions = context.getNestedExecutions(); const unencryptedLogs = context.getUnencryptedLogs(); + + // TODO(palla/gas): We should be loading these values from the returned PublicCircuitPublicInputs const startGasLeft = context.availableGas; const endGasLeft = context.availableGas; // No gas consumption in non-AVM