From aecb979d98b490030e2eaa8d04680c96afddb771 Mon Sep 17 00:00:00 2001 From: porcuquine Date: Fri, 2 Jun 2023 14:39:44 -0700 Subject: [PATCH 1/4] Assign correct FIELD. --- src/field.rs | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/src/field.rs b/src/field.rs index beab15436c..3d06836b31 100644 --- a/src/field.rs +++ b/src/field.rs @@ -215,11 +215,11 @@ impl LurkField for blstrs::Scalar { const FIELD: LanguageField = LanguageField::BLS12_381; } -impl LurkField for pasta_curves::Fp { +impl LurkField for pasta_curves::pallas::Scalar { const FIELD: LanguageField = LanguageField::Pallas; } -impl LurkField for pasta_curves::Fq { +impl LurkField for pasta_curves::vesta::Scalar { const FIELD: LanguageField = LanguageField::Vesta; } @@ -332,6 +332,7 @@ impl<'de, F: LurkField> Deserialize<'de> for FWrap { pub mod tests { use crate::light_data::Encodable; use blstrs::Scalar as Fr; + use pasta_curves::{pallas, vesta}; use super::*; @@ -347,11 +348,11 @@ pub mod tests { repr_bytes_consistency(f1) } #[test] - fn prop_pallas_repr_bytes_consistency(f1 in any::>()) { + fn prop_pallas_repr_bytes_consistency(f1 in any::>()) { repr_bytes_consistency(f1) } #[test] - fn prop_vesta_repr_bytes_consistency(f1 in any::>()) { + fn prop_vesta_repr_bytes_consistency(f1 in any::>()) { repr_bytes_consistency(f1) } } @@ -406,11 +407,11 @@ pub mod tests { repr_canonicity(f1) } #[test] - fn prop_pallas_repr_canonicity(f1 in any::>()) { + fn prop_pallas_repr_canonicity(f1 in any::>()) { repr_canonicity(f1) } #[test] - fn prop_vesta_repr_canonicity(f1 in any::>()) { + fn prop_vesta_repr_canonicity(f1 in any::>()) { repr_canonicity(f1) } #[test] @@ -437,7 +438,7 @@ pub mod tests { proptest! { #[test] fn prop_pallas_tag_roundtrip(x in any::()){ - let f1 = pasta_curves::Fp::from(x); + let f1 = pallas::Scalar::from(x); let bytes = f1.to_repr().as_ref().to_vec(); let mut bytes_from_u64 = [0u8; 32]; bytes_from_u64[..8].copy_from_slice(&x.to_le_bytes()); @@ -446,7 +447,7 @@ pub mod tests { #[test] fn prop_vesta_tag_roundtrip(x in any::()){ - let f1 = pasta_curves::Fq::from(x); + let f1 = vesta::Scalar::from(x); let bytes = f1.to_repr().as_ref().to_vec(); let mut bytes_from_u64 = [0u8; 32]; bytes_from_u64[..8].copy_from_slice(&x.to_le_bytes()); From c9d5472a957a62040f33fc15755e09512ffd72a7 Mon Sep 17 00:00:00 2001 From: porcuquine Date: Fri, 2 Jun 2023 14:58:57 -0700 Subject: [PATCH 2/4] Use obviously-identifying Scalar type aliases. --- clutch/src/main.rs | 21 ++++++++++++++++----- src/main.rs | 18 +++++++++--------- 2 files changed, 25 insertions(+), 14 deletions(-) diff --git a/clutch/src/main.rs b/clutch/src/main.rs index 276f38cec0..fb575f2a63 100644 --- a/clutch/src/main.rs +++ b/clutch/src/main.rs @@ -4,8 +4,8 @@ use clutch::ClutchState; use lurk::eval::lang::{Coproc, Lang}; use lurk::field::LanguageField; -use lurk::proof::nova; use lurk::repl::repl_cli; +use pasta_curves::pallas; fn main() -> Result<()> { pretty_env_logger::init(); @@ -24,10 +24,21 @@ fn main() -> Result<()> { match field { LanguageField::Pallas => repl_cli::< - nova::S1, - ClutchState>, - Coproc, - >(Lang::>::new()), + pallas::Scalar, + ClutchState>, + Coproc, + >(Lang::>::new()), + // TODO: Support all LanguageFields. + // LanguageField::BLS12_381 => repl_cli::< + // blstrs::Scalar, + // ClutchState>, + // Coproc, + // >(Lang::>::new()), + // LanguageField::Vesta => repl_cli::< + // vesta::Scalar, + // ClutchState>, + // Coproc, + // >(Lang::>::new()), _ => panic!("unsupported field"), } } diff --git a/src/main.rs b/src/main.rs index 826ee87377..9c7a66c6a4 100644 --- a/src/main.rs +++ b/src/main.rs @@ -2,8 +2,8 @@ use anyhow::Result; use lurk::eval::lang::{Coproc, Lang}; use lurk::field::LanguageField; -use lurk::proof::nova; use lurk::repl::{repl_cli, ReplState}; +use pasta_curves::{pallas, vesta}; fn main() -> Result<()> { pretty_env_logger::init(); @@ -27,14 +27,14 @@ fn main() -> Result<()> { Coproc, >(Lang::>::new()), LanguageField::Pallas => repl_cli::< - nova::S1, - ReplState>, - Coproc, - >(Lang::>::new()), + pallas::Scalar, + ReplState>, + Coproc, + >(Lang::>::new()), LanguageField::Vesta => repl_cli::< - nova::S2, - ReplState>, - Coproc, - >(Lang::>::new()), + vesta::Scalar, + ReplState>, + Coproc, + >(Lang::>::new()), } } From 14bdbf062c956a6f897d344a9752abc6f77b55b8 Mon Sep 17 00:00:00 2001 From: porcuquine Date: Fri, 2 Jun 2023 15:02:18 -0700 Subject: [PATCH 3/4] Correct fields and use obvious aliases. --- benches/end2end.rs | 37 +++++++++++++++++-------------------- benches/fibonacci.rs | 16 ++++++++-------- 2 files changed, 25 insertions(+), 28 deletions(-) diff --git a/benches/end2end.rs b/benches/end2end.rs index 53b8742a30..97e6e965fa 100644 --- a/benches/end2end.rs +++ b/benches/end2end.rs @@ -15,6 +15,7 @@ use lurk::{ public_parameters, store::Store, }; +use pasta_curves::pallas; use std::sync::Arc; use std::time::Duration; @@ -46,8 +47,7 @@ fn end2end_benchmark(c: &mut Criterion) { .sample_size(10); let limit = 1_000_000_000; - let lang_pallas = - Lang::>::new(); + let lang_pallas = Lang::>::new(); let lang_pallas_rc = Arc::new(lang_pallas.clone()); let reduction_count = DEFAULT_REDUCTION_COUNT; @@ -64,7 +64,7 @@ fn end2end_benchmark(c: &mut Criterion) { group.bench_with_input(benchmark_id, &size, |b, &s| { b.iter(|| { - let ptr = go_base::(&mut store, s.0, s.1); + let ptr = go_base::(&mut store, s.0, s.1); let _result = prover .evaluate_and_prove(&pp, ptr, env, &mut store, limit, lang_pallas_rc.clone()) .unwrap(); @@ -81,7 +81,7 @@ fn store_benchmark(c: &mut Criterion) { .sample_size(60); let mut bls12_store = Store::::default(); - let mut pallas_store = Store::::default(); + let mut pallas_store = Store::::default(); // todo!() rfc out into more flexible test cases let sizes = vec![(10, 16), (10, 160)]; @@ -99,7 +99,7 @@ fn store_benchmark(c: &mut Criterion) { let pasta_id = BenchmarkId::new("store_go_base_pallas", ¶meter_string); group.bench_with_input(pasta_id, &size, |b, &s| { b.iter(|| { - let result = go_base::(&mut pallas_store, s.0, s.1); + let result = go_base::(&mut pallas_store, s.0, s.1); black_box(result) }) }); @@ -115,7 +115,7 @@ fn hydration_benchmark(c: &mut Criterion) { .sample_size(60); let mut bls12_store = Store::::default(); - let mut pallas_store = Store::::default(); + let mut pallas_store = Store::::default(); // todo!() rfc out into more flexible test cases let sizes = vec![(10, 16), (10, 160)]; @@ -133,7 +133,7 @@ fn hydration_benchmark(c: &mut Criterion) { { let benchmark_id = BenchmarkId::new("hydration_go_base_pallas", ¶meter_string); group.bench_with_input(benchmark_id, &size, |b, &s| { - let _ptr = go_base::(&mut pallas_store, s.0, s.1); + let _ptr = go_base::(&mut pallas_store, s.0, s.1); b.iter(|| pallas_store.hydrate_scalar_cache()) }); } @@ -150,9 +150,9 @@ fn eval_benchmark(c: &mut Criterion) { let limit = 1_000_000_000; let lang_bls12 = Lang::>::new(); - let lang_pallas = Lang::>::new(); + let lang_pallas = Lang::>::new(); let mut bls12_store = Store::::default(); - let mut pallas_store = Store::::default(); + let mut pallas_store = Store::::default(); // todo!() rfc out into more flexible test cases let sizes = vec![(10, 16), (10, 160)]; @@ -179,7 +179,7 @@ fn eval_benchmark(c: &mut Criterion) { { let benchmark_id = BenchmarkId::new("eval_go_base_pallas", ¶meter_string); group.bench_with_input(benchmark_id, &size, |b, &s| { - let ptr = go_base::(&mut pallas_store, s.0, s.1); + let ptr = go_base::(&mut pallas_store, s.0, s.1); b.iter(|| { Evaluator::new( ptr, @@ -205,15 +205,15 @@ fn eval_benchmark(c: &mut Criterion) { // let limit = 1_000_000_000; // let _lang_bls = Lang::>::new(); -// let _lang_pallas = Lang::>::new(); -// let lang_pallas = Lang::>::new(); +// let _lang_pallas = Lang::>::new(); +// let lang_pallas = Lang::>::new(); // let reduction_count = DEFAULT_REDUCTION_COUNT; // group.bench_function("circuit_generation_go_base_10_16_nova", |b| { // let mut store = Store::default(); // let env = empty_sym_env(&store); -// let ptr = go_base::(&mut store, black_box(10), black_box(16)); +// let ptr = go_base::(&mut store, black_box(10), black_box(16)); // let prover = NovaProver::new(reduction_count, lang_pallas.clone()); // let pp = public_parameters::public_params(reduction_count).unwrap(); @@ -238,8 +238,7 @@ fn prove_benchmark(c: &mut Criterion) { .sample_size(10); let limit = 1_000_000_000; - let lang_pallas = - Lang::>::new(); + let lang_pallas = Lang::>::new(); let lang_pallas_rc = Arc::new(lang_pallas.clone()); let mut store = Store::default(); let reduction_count = DEFAULT_REDUCTION_COUNT; @@ -248,7 +247,7 @@ fn prove_benchmark(c: &mut Criterion) { let benchmark_id = BenchmarkId::new("prove_go_base_nova", format!("_{}_{}", size.0, size.1)); group.bench_with_input(benchmark_id, &size, |b, &s| { - let ptr = go_base::(&mut store, s.0, s.1); + let ptr = go_base::(&mut store, s.0, s.1); let prover = NovaProver::new(reduction_count, lang_pallas.clone()); let pp = public_parameters::public_params(reduction_count, lang_pallas_rc.clone()).unwrap(); let frames = prover @@ -271,8 +270,7 @@ fn verify_benchmark(c: &mut Criterion) { .sample_size(10); let limit = 1_000_000_000; - let lang_pallas = - Lang::>::new(); + let lang_pallas = Lang::>::new(); let lang_pallas_rc = Arc::new(lang_pallas.clone()); let mut store = Store::default(); let reduction_count = DEFAULT_REDUCTION_COUNT; @@ -314,8 +312,7 @@ fn verify_compressed_benchmark(c: &mut Criterion) { .sample_size(10); let limit = 1_000_000_000; - let lang_pallas = - Lang::>::new(); + let lang_pallas = Lang::>::new(); let lang_pallas_rc = Arc::new(lang_pallas.clone()); let mut store = Store::default(); let reduction_count = DEFAULT_REDUCTION_COUNT; diff --git a/benches/fibonacci.rs b/benches/fibonacci.rs index b1841673ed..a4245f6215 100644 --- a/benches/fibonacci.rs +++ b/benches/fibonacci.rs @@ -5,6 +5,8 @@ use criterion::{ BenchmarkId, Criterion, SamplingMode, }; +use pasta_curves::pallas; + use lurk::{ eval::{ empty_sym_env, @@ -41,8 +43,7 @@ fn fib(store: &mut Store, a: u64) -> Ptr { #[allow(dead_code)] fn fibo_total(name: &str, iterations: u64, c: &mut BenchmarkGroup) { let limit: usize = 10_000_000_000; - let lang_pallas = - Lang::>::new(); + let lang_pallas = Lang::>::new(); let lang_rc = Arc::new(lang_pallas.clone()); let reduction_count = DEFAULT_REDUCTION_COUNT; @@ -55,7 +56,7 @@ fn fibo_total(name: &str, iterations: u64, c: &mut |b, iterations| { let mut store = Store::default(); let env = empty_sym_env(&store); - let ptr = fib::(&mut store, black_box(*iterations)); + let ptr = fib::(&mut store, black_box(*iterations)); let prover = NovaProver::new(reduction_count, lang_pallas.clone()); b.iter_batched( @@ -75,14 +76,14 @@ fn fibo_total(name: &str, iterations: u64, c: &mut #[allow(dead_code)] fn fibo_eval(name: &str, iterations: u64, c: &mut BenchmarkGroup) { let limit = 10_000_000_000; - let lang_pallas = Lang::>::new(); + let lang_pallas = Lang::>::new(); c.bench_with_input( BenchmarkId::new(name.to_string(), iterations), &(iterations), |b, iterations| { let mut store = Store::default(); - let ptr = fib::(&mut store, black_box(*iterations)); + let ptr = fib::(&mut store, black_box(*iterations)); b.iter(|| { let result = Evaluator::new(ptr, empty_sym_env(&store), &mut store, limit, &lang_pallas) @@ -95,8 +96,7 @@ fn fibo_eval(name: &str, iterations: u64, c: &mut B fn fibo_prove(name: &str, iterations: u64, c: &mut BenchmarkGroup) { let limit = 10_000_000_000; - let lang_pallas = - Lang::>::new(); + let lang_pallas = Lang::>::new(); let lang_rc = Arc::new(lang_pallas.clone()); let reduction_count = DEFAULT_REDUCTION_COUNT; let pp = public_params(reduction_count, lang_rc.clone()).unwrap(); @@ -107,7 +107,7 @@ fn fibo_prove(name: &str, iterations: u64, c: &mut |b, iterations| { let mut store = Store::default(); let env = empty_sym_env(&store); - let ptr = fib::(&mut store, black_box(*iterations)); + let ptr = fib::(&mut store, black_box(*iterations)); let prover = NovaProver::new(reduction_count, lang_pallas.clone()); let frames = prover From 5b3cb67e4560f8e74e01dc70dc8df1761cc78762 Mon Sep 17 00:00:00 2001 From: porcuquine Date: Fri, 2 Jun 2023 15:06:52 -0700 Subject: [PATCH 4/4] Document field names. --- src/field.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/field.rs b/src/field.rs index 3d06836b31..75bd751fba 100644 --- a/src/field.rs +++ b/src/field.rs @@ -22,6 +22,14 @@ use crate::tag::{ContTag, ExprTag, Op1, Op2}; /// The type of finite fields used in the language /// For Pallas/Vesta see https://electriccoin.co/blog/the-pasta-curves-for-halo-2-and-beyond/ +/// +/// Please note: +/// - pasta_curves::pallas::Scalar = pasta_curves::Fq +/// - pasta_curves::vesta::Scalar = pasta_curves::Fp +/// +/// Because confusion on this point, perhaps combined with cargo-cult copying of incorrect previous usage has led to +/// inconsistencies and inaccuracies in the code base, please prefer the named Scalar forms when correspondence to a +/// named `LanguageField` is important. pub enum LanguageField { /// The Pallas field, Pallas,