Skip to content

Commit

Permalink
feat: added conversions a<->b
Browse files Browse the repository at this point in the history
  • Loading branch information
0xThemis committed Oct 15, 2024
1 parent d9b8412 commit d1c806c
Show file tree
Hide file tree
Showing 8 changed files with 515 additions and 208 deletions.
2 changes: 1 addition & 1 deletion mpc-core/src/protocols/rep3/rngs.rs
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ impl Rep3Rand {
}

pub fn random_biguint(&mut self, bitlen: usize) -> (BigUint, BigUint) {
let limbsize = (bitlen + 31) / 32;
let limbsize = bitlen.div_ceil(8);
let a = BigUint::new((0..limbsize).map(|_| self.rng1.gen()).collect());
let b = BigUint::new((0..limbsize).map(|_| self.rng2.gen()).collect());
let mask = (BigUint::from(1u32) << bitlen) - BigUint::one();
Expand Down
84 changes: 81 additions & 3 deletions mpc-core/src/protocols/rep3new.rs
Original file line number Diff line number Diff line change
@@ -1,8 +1,86 @@
mod arithmetic;
mod binary;
mod a2b;
pub mod arithmetic;
pub mod binary;

pub use arithmetic::types::Rep3PrimeFieldShare;
pub use arithmetic::types::Rep3PrimeFieldShareVec;
pub use arithmetic::Arithmetic;

pub use binary::types::Rep3BigUintShare;

pub mod conversion {
use ark_ff::PrimeField;
use num_bigint::BigUint;

use crate::protocols::rep3::{id::PartyID, network::Rep3Network};

type IoResult<T> = std::io::Result<T>;
use super::{a2b, arithmetic::IoContext, Rep3BigUintShare, Rep3PrimeFieldShare};

//re-export a2b
pub use super::a2b::a2b;

/// Transforms the replicated shared value x from a binary sharing to an arithmetic sharing. I.e., x = x_1 xor x_2 xor x_3 gets transformed into x = x'_1 + x'_2 + x'_3. This implementation currently works only for a binary sharing of a valid field element, i.e., x = x_1 xor x_2 xor x_3 < p.
// Keep in mind: Only works if the input is actually a binary sharing of a valid field element
// If the input has the correct number of bits, but is >= P, then either x can be reduced with self.low_depth_sub_p_cmux(x) first, or self.low_depth_binary_add_2_mod_p(x, y) is extended to subtract 2P in parallel as well. The second solution requires another multiplexer in the end.
pub async fn b2a<F: PrimeField, N: Rep3Network>(
x: Rep3BigUintShare,
io_context: &mut IoContext<N>,
) -> IoResult<Rep3PrimeFieldShare<F>> {
let mut y = Rep3BigUintShare::zero_share();
let mut res = Rep3PrimeFieldShare::zero_share();

let bitlen = usize::try_from(F::MODULUS_BIT_SIZE).expect("u32 fits into usize");
let (mut r, r2) = io_context.rngs.rand.random_biguint(bitlen);
r ^= r2;

match io_context.id {
PartyID::ID0 => {
let k3 = io_context.rngs.bitcomp2.random_fes_3keys::<F>();

res.b = (k3.0 + k3.1 + k3.2).neg();
y.a = r;
}
PartyID::ID1 => {
let k2 = io_context.rngs.bitcomp1.random_fes_3keys::<F>();

res.a = (k2.0 + k2.1 + k2.2).neg();
y.a = r;
}
PartyID::ID2 => {
let k2 = io_context.rngs.bitcomp1.random_fes_3keys::<F>();
let k3 = io_context.rngs.bitcomp2.random_fes_3keys::<F>();

let k2_comp = k2.0 + k2.1 + k2.2;
let k3_comp = k3.0 + k3.1 + k3.2;
let val: BigUint = (k2_comp + k3_comp).into();
y.a = val ^ r;
res.a = k3_comp.neg();
res.b = k2_comp.neg();
}
}

// Reshare y
io_context.network.send_next(y.a.to_owned())?;
let local_b = io_context.network.recv_prev()?;
y.b = local_b;

let z = a2b::low_depth_binary_add_mod_p::<F, N>(x, y, io_context, bitlen).await?;

match io_context.id {
PartyID::ID0 => {
io_context.network.send_next(z.b.to_owned())?;
let rcv: BigUint = io_context.network.recv_prev()?;
res.a = (z.a ^ z.b ^ rcv).into();
}
PartyID::ID1 => {
let rcv: BigUint = io_context.network.recv_prev()?;
res.b = (z.a ^ z.b ^ rcv).into();
}
PartyID::ID2 => {
io_context.network.send_next(z.b)?;
}
}
Ok(res)
}
}
216 changes: 216 additions & 0 deletions mpc-core/src/protocols/rep3new/a2b.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,216 @@
use ark_ff::One;
use ark_ff::PrimeField;
use ark_ff::Zero;
use num_bigint::BigUint;

use crate::protocols::rep3::id::PartyID;
use crate::protocols::rep3::network::Rep3Network;

use super::arithmetic::IoContext;
use super::binary;
use super::Rep3BigUintShare;
use super::Rep3PrimeFieldShare;

type IoResult<T> = std::io::Result<T>;

/// Transforms the replicated shared value x from an arithmetic sharing to a binary sharing. I.e., x = x_1 + x_2 + x_3 gets transformed into x = x'_1 xor x'_2 xor x'_3.
pub async fn a2b<F: PrimeField, N: Rep3Network>(
x: &Rep3PrimeFieldShare<F>,
io_context: &mut IoContext<N>,
) -> IoResult<Rep3BigUintShare> {
let mut x01 = Rep3BigUintShare::zero_share();
let mut x2 = Rep3BigUintShare::zero_share();

let bitlen = usize::try_from(F::MODULUS_BIT_SIZE).expect("u32 fits into usize");

let (mut r, r2) = io_context.rngs.rand.random_biguint(bitlen);
r ^= r2;

match io_context.id {
PartyID::ID0 => {
x01.a = r;
x2.b = x.b.into();
}
PartyID::ID1 => {
let val: BigUint = (x.a + x.b).into();
x01.a = val ^ r;
}
PartyID::ID2 => {
x01.a = r;
x2.a = x.a.into();
}
}

// Reshare x01
io_context.network.send_next(x01.a.to_owned())?;
let local_b = io_context.network.recv_prev()?;
x01.b = local_b;

low_depth_binary_add_mod_p::<F, N>(x01, x2, io_context, bitlen).await
}

pub(super) async fn low_depth_binary_add_mod_p<F: PrimeField, N: Rep3Network>(
x1: Rep3BigUintShare,
x2: Rep3BigUintShare,
io_context: &mut IoContext<N>,
bitlen: usize,
) -> IoResult<Rep3BigUintShare> {
let x = low_depth_binary_add(x1, x2, io_context, bitlen).await?;
low_depth_sub_p_cmux::<F, N>(x, io_context, bitlen).await
}

async fn low_depth_binary_add<N: Rep3Network>(
x1: Rep3BigUintShare,
x2: Rep3BigUintShare,
io_context: &mut IoContext<N>,
bitlen: usize,
) -> IoResult<Rep3BigUintShare> {
// Add x1 + x2 via a packed Kogge-Stone adder
let p = &x1 ^ &x2;
let g = binary::and(&x1, &x2, io_context, bitlen).await?;
kogge_stone_inner(p, g, io_context, bitlen).await
}

async fn kogge_stone_inner<N: Rep3Network>(
mut p: Rep3BigUintShare,
mut g: Rep3BigUintShare,
io_context: &mut IoContext<N>,
bitlen: usize,
) -> IoResult<Rep3BigUintShare> {
let d = ceil_log2(bitlen);
let s_ = p.to_owned();

for i in 0..d {
let shift = 1 << i;
let mut p_ = p.to_owned();
let mut g_ = g.to_owned();
let mask = (BigUint::from(1u64) << (bitlen - shift)) - BigUint::one();
p_ &= &mask;
g_ &= &mask;
let p_shift = &p >> shift;

// TODO: Make and more communication efficient, ATM we send the full element for each level, even though they reduce in size
// maybe just input the mask into AND?
let (r1, r2) = and_twice(p_shift, g_, p_, io_context, bitlen - shift).await?;
p = r2 << shift;
g ^= &(r1 << shift);
}
g <<= 1;
g ^= &s_;
Ok(g)
}

async fn low_depth_sub_p_cmux<F: PrimeField, N: Rep3Network>(
mut x: Rep3BigUintShare,
io_context: &mut IoContext<N>,
bitlen: usize,
) -> IoResult<Rep3BigUintShare> {
let mask = (BigUint::from(1u64) << bitlen) - BigUint::one();
let x_msb = &x >> bitlen;
x &= &mask;
let mut y = low_depth_binary_sub_p::<F, N>(&x, io_context, bitlen).await?;
let y_msb = &y >> (bitlen + 1);
y &= &mask;

// Spread the ov share to the whole biguint
let ov_a = (x_msb.a.iter_u64_digits().next().unwrap_or_default()
^ y_msb.a.iter_u64_digits().next().unwrap_or_default())
& 1;
let ov_b = (x_msb.b.iter_u64_digits().next().unwrap_or_default()
^ y_msb.b.iter_u64_digits().next().unwrap_or_default())
& 1;

let ov_a = if ov_a == 1 {
mask.to_owned()
} else {
BigUint::zero()
};
let ov_b = if ov_b == 1 { mask } else { BigUint::zero() };
let ov = Rep3BigUintShare::new(ov_a, ov_b);

// one big multiplexer
let res = binary::cmux(&ov, &y, &x, io_context, bitlen).await?;
Ok(res)
}

// Calculates 2^k + x1 - x2
async fn low_depth_binary_sub<N: Rep3Network>(
x1: Rep3BigUintShare,
x2: Rep3BigUintShare,
io_context: &mut IoContext<N>,
bitlen: usize,
) -> IoResult<Rep3BigUintShare> {
// Let x2' = be the bit_not of x2
// Add x1 + x2' via a packed Kogge-Stone adder, where carry_in = 1
// This is equivalent to x1 - x2 = x1 + two's complement of x2
let mask = (BigUint::from(1u64) << bitlen) - BigUint::one();
// bitnot of x2
let x2 = binary::xor_public(&x2, &mask, io_context.id);
// Now start the Kogge-Stone adder
let p = &x1 ^ &x2;
let mut g = binary::and(&x1, &x2, io_context, bitlen).await?;
// Since carry_in = 1, we need to XOR the LSB of x1 and x2 to g (i.e., xor the LSB of p)
g ^= &(&p & &BigUint::one());

let res = kogge_stone_inner(p, g, io_context, bitlen).await?;
let res = binary::xor_public(&res, &BigUint::one(), io_context.id); // cin=1
Ok(res)
}

fn ceil_log2(x: usize) -> usize {
let mut y = 0;
let mut x = x - 1;
while x > 0 {
x >>= 1;
y += 1;
}
y
}

async fn and_twice<N: Rep3Network>(
a: Rep3BigUintShare,
b1: Rep3BigUintShare,
b2: Rep3BigUintShare,
io_context: &mut IoContext<N>,
bitlen: usize,
) -> IoResult<(Rep3BigUintShare, Rep3BigUintShare)> {
debug_assert!(a.a.bits() <= bitlen as u64);
debug_assert!(b1.a.bits() <= bitlen as u64);
debug_assert!(b2.a.bits() <= bitlen as u64);
let (mut mask1, mask_b) = io_context.rngs.rand.random_biguint(bitlen);
mask1 ^= mask_b;

let (mut mask2, mask_b) = io_context.rngs.rand.random_biguint(bitlen);
mask2 ^= mask_b;

let local_a1 = (&b1 & &a) ^ mask1;
let local_a2 = (&a & &b2) ^ mask2;
io_context.network.send_next(local_a1.to_owned())?;
io_context.network.send_next(local_a2.to_owned())?;
let local_b1 = io_context.network.recv_prev()?;
let local_b2 = io_context.network.recv_prev()?;

let r1 = Rep3BigUintShare {
a: local_a1,
b: local_b1,
};
let r2 = Rep3BigUintShare {
a: local_a2,
b: local_b2,
};

Ok((r1, r2))
}

async fn low_depth_binary_sub_p<F: PrimeField, N: Rep3Network>(
x: &Rep3BigUintShare,
io_context: &mut IoContext<N>,
bitlen: usize,
) -> IoResult<Rep3BigUintShare> {
let p_ = (BigUint::from(1u64) << (bitlen + 1)) - F::MODULUS.into();

// Add x1 + p_ via a packed Kogge-Stone adder
let p = binary::xor_public(&x, &p_, io_context.id);
let g = x & &p_;
kogge_stone_inner(p, g, io_context, bitlen + 1).await
}
Loading

0 comments on commit d1c806c

Please sign in to comment.