From bdb27cc94b620dec4706887b1131ba0c8c21bbdb Mon Sep 17 00:00:00 2001 From: David Nevado Date: Mon, 4 Dec 2023 16:33:21 +0100 Subject: [PATCH] Add Pluto-Eris cycle of curves (#98) * add: Pluto/Eris curves Co-authored-by: Zhiyong-Gong <63758161+John-Gong-Math@users.noreply.github.com> address review comments Remove leftover add: test vectors for `test_from_u512` add and clean docs * Update Legendre impl Complete Pluto Eris docs --- src/lib.rs | 1 + src/pluto_eris/curve.rs | 287 +++++++++++ src/pluto_eris/engine.rs | 945 ++++++++++++++++++++++++++++++++++ src/pluto_eris/fields/fp.rs | 629 ++++++++++++++++++++++ src/pluto_eris/fields/fp12.rs | 666 ++++++++++++++++++++++++ src/pluto_eris/fields/fp2.rs | 777 ++++++++++++++++++++++++++++ src/pluto_eris/fields/fp6.rs | 786 ++++++++++++++++++++++++++++ src/pluto_eris/fields/fq.rs | 620 ++++++++++++++++++++++ src/pluto_eris/fields/mod.rs | 856 ++++++++++++++++++++++++++++++ src/pluto_eris/mod.rs | 15 + 10 files changed, 5582 insertions(+) create mode 100644 src/pluto_eris/curve.rs create mode 100644 src/pluto_eris/engine.rs create mode 100644 src/pluto_eris/fields/fp.rs create mode 100644 src/pluto_eris/fields/fp12.rs create mode 100644 src/pluto_eris/fields/fp2.rs create mode 100644 src/pluto_eris/fields/fp6.rs create mode 100644 src/pluto_eris/fields/fq.rs create mode 100644 src/pluto_eris/fields/mod.rs create mode 100644 src/pluto_eris/mod.rs diff --git a/src/lib.rs b/src/lib.rs index 0ef9d3e3..82613ca9 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -9,6 +9,7 @@ pub mod serde; pub mod bn256; pub mod grumpkin; pub mod pasta; +pub mod pluto_eris; pub mod secp256k1; pub mod secp256r1; pub mod secq256k1; diff --git a/src/pluto_eris/curve.rs b/src/pluto_eris/curve.rs new file mode 100644 index 00000000..95b61adb --- /dev/null +++ b/src/pluto_eris/curve.rs @@ -0,0 +1,287 @@ +use super::fields::{fp::Fp, fp2::Fp2, fq::Fq}; +use crate::ff::WithSmallOrderMulGroup; +use crate::ff::{Field, PrimeField}; +use crate::group::{prime::PrimeCurveAffine, Curve, Group as _, GroupEncoding}; +use crate::hash_to_curve::svdw_hash_to_curve; +use crate::{Coordinates, CurveAffine, CurveExt}; +use core::cmp; +use core::fmt::Debug; +use core::iter::Sum; +use core::ops::{Add, Mul, Neg, Sub}; +use group::cofactor::CofactorGroup; +use rand::RngCore; +use subtle::{Choice, ConditionallySelectable, ConstantTimeEq, CtOption}; + +#[cfg(feature = "derive_serde")] +use serde::{Deserialize, Serialize}; + +use crate::{ + impl_add_binop_specify_output, impl_binops_additive, impl_binops_additive_specify_output, + impl_binops_multiplicative, impl_binops_multiplicative_mixed, impl_sub_binop_specify_output, + new_curve_impl, +}; + +const G1_GENERATOR_X: Fp = Fp::from_raw([ + 0x9ffffcd2ffffffff, + 0xa2a7e8c30006b945, + 0xe4a7a5fe8fadffd6, + 0x443f9a5cda8a6c7b, + 0xa803ca76f439266f, + 0x0130e0000d7f70e4, + 0x2400000000002400, +]); +const G1_GENERATOR_Y: Fp = Fp::from_raw([0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]); + +const PLUTO_A: Fp = Fp::ZERO; +const PLUTO_B: Fp = Fp::from_raw([0x39, 0, 0, 0, 0, 0, 0]); + +const ERIS_GENERATOR_X: Fq = Fq::from_raw([ + 0x1ffffcd2ffffffff, + 0x9ca7e85d60050af4, + 0xe4a775fe8e177fd6, + 0x443f9a5c7a8a6c7b, + 0xa803ca76f439266f, + 0x0130e0000d7f70e4, + 0x2400000000002400, +]); +const ERIS_GENERATOR_Y: Fq = Fq::from_raw([0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]); + +const ERIS_A: Fq = Fq::ZERO; +const ERIS_B: Fq = Fq::from_raw([0x39, 0, 0, 0, 0, 0, 0]); + +const G2_GENERATOR_X: Fp2 = Fp2 { + // 0x13576c81faf3a13fd815d0e9bd54b845ee935948b84498b27ca972bfb93722e223c9e276a4ebe7559cfc86dd865f07d64f2b5fe6556f9066 + c0: Fp::from_raw([ + 0x4f2b5fe6556f9066, + 0x9cfc86dd865f07d6, + 0x23c9e276a4ebe755, + 0x7ca972bfb93722e2, + 0xee935948b84498b2, + 0xd815d0e9bd54b845, + 0x13576c81faf3a13f, + ]), + + //0x142164cb875db0465e5092f9380f44f555243d011699b7393029f2d201554727aeb383298fdf5847b9b3dff01bbe8d63fe7c781a8fd7bf21 + c1: Fp::from_raw([ + 0xfe7c781a8fd7bf21, + 0xb9b3dff01bbe8d63, + 0xaeb383298fdf5847, + 0x3029f2d201554727, + 0x55243d011699b739, + 0x5e5092f9380f44f5, + 0x142164cb875db046, + ]), +}; +const G2_GENERATOR_Y: Fp2 = Fp2 { + //0x2239f7408ead478c58e88d4df1e7418c42fdbb92e64ba85aa4dc17d7dace3f32eb471c004db774bfe78574aca67b3898cd1b78ad106ab9fe + c0: Fp::from_raw([ + 0xcd1b78ad106ab9fe, + 0xe78574aca67b3898, + 0xeb471c004db774bf, + 0xa4dc17d7dace3f32, + 0x42fdbb92e64ba85a, + 0x58e88d4df1e7418c, + 0x2239f7408ead478c, + ]), + + // 0x1260b04d51136590dbb53dfd7caf450aeca714555bbe4f079ca65d97eb28fc9fc697b4e10bbcd9e0539ef82a731fb88ed49e3c080e6d945d + c1: Fp::from_raw([ + 0xd49e3c080e6d945d, + 0x539ef82a731fb88e, + 0xc697b4e10bbcd9e0, + 0x9ca65d97eb28fc9f, + 0xeca714555bbe4f07, + 0xdbb53dfd7caf450a, + 0x1260b04d51136590, + ]), +}; + +const TRITON_A: Fp2 = Fp2::ZERO; + +// u + 3 +const TRITON_B: Fp2 = Fp2 { + c0: Fp::from_raw([0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]), + c1: Fp::ONE, +}; + +impl CofactorGroup for G1 { + type Subgroup = G1; + + fn clear_cofactor(&self) -> Self { + *self + } + + fn into_subgroup(self) -> CtOption { + CtOption::new(self, 1.into()) + } + + fn is_torsion_free(&self) -> Choice { + 1.into() + } +} + +new_curve_impl!( + (pub), + G1, + G1Affine, + false, + Fp, + Fq, + (G1_GENERATOR_X,G1_GENERATOR_Y), + PLUTO_A, + PLUTO_B, + "pluto", + |curve_id, domain_prefix| svdw_hash_to_curve(curve_id, domain_prefix, G1::SVDW_Z), +); + +impl group::cofactor::CofactorGroup for Eris { + type Subgroup = Eris; + + fn clear_cofactor(&self) -> Self { + *self + } + + fn into_subgroup(self) -> CtOption { + CtOption::new(self, 1.into()) + } + + fn is_torsion_free(&self) -> Choice { + 1.into() + } +} + +impl G1 { + /// Constant Z for the Shallue-van de Woestijne map. + /// Computed using https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-10.html#svdw-z-code + const SVDW_Z: Fp = Fp::ONE; +} + +new_curve_impl!( + (pub), + Eris, + ErisAffine, + false, + Fq, + Fp, + (ERIS_GENERATOR_X,ERIS_GENERATOR_Y), + ERIS_A, + ERIS_B, + "eris", + |curve_id, domain_prefix| svdw_hash_to_curve(curve_id, domain_prefix, Eris::SVDW_Z), +); + +impl CofactorGroup for G2 { + type Subgroup = G2; + + fn clear_cofactor(&self) -> Self { + // cofactor = 2*p - q + //0x24000000000024000130e0000d7f70e4a803ca76f439266f443f9a5d3a8a6c7be4a7d5fe91447fd6a8a7e928a00867971ffffcd300000001 + let e: [u8; 56] = [ + 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x00, 0x01, 0x30, 0xe0, 0x00, 0x0d, 0x7f, + 0x70, 0xe4, 0xa8, 0x03, 0xca, 0x76, 0xf4, 0x39, 0x26, 0x6f, 0x44, 0x3f, 0x9a, 0x5d, + 0x3a, 0x8a, 0x6c, 0x7b, 0xe4, 0xa7, 0xd5, 0xfe, 0x91, 0x44, 0x7f, 0xd6, 0xa8, 0xa7, + 0xe9, 0x28, 0xa0, 0x08, 0x67, 0x97, 0x1f, 0xff, 0xfc, 0xd3, 0x00, 0x00, 0x00, 0x01, + ]; + + // self * TRITON_COFACTOR + let mut acc = G2::identity(); + for bit in e + .iter() + .flat_map(|byte| (0..8).rev().map(move |i| Choice::from((byte >> i) & 1u8))) + .skip(1) + { + acc = acc.double(); + acc = G2::conditional_select(&acc, &(acc + self), bit); + } + acc + } + + fn into_subgroup(self) -> CtOption { + CtOption::new(self.clear_cofactor(), 1.into()) + } + + fn is_torsion_free(&self) -> Choice { + // group order = p + let e: [u8; 56] = [ + 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x00, 0x01, 0x30, 0xe0, 0x00, 0x0d, 0x7f, + 0x70, 0xe4, 0xa8, 0x03, 0xca, 0x76, 0xf4, 0x39, 0x26, 0x6f, 0x44, 0x3f, 0x9a, 0x5c, + 0xda, 0x8a, 0x6c, 0x7b, 0xe4, 0xa7, 0xa5, 0xfe, 0x8f, 0xad, 0xff, 0xd6, 0xa2, 0xa7, + 0xe8, 0xc3, 0x00, 0x06, 0xb9, 0x45, 0x9f, 0xff, 0xfc, 0xd3, 0x00, 0x00, 0x00, 0x01, + ]; + // self * GROUP_ORDER; + let mut acc = G2::identity(); + for bit in e + .iter() + .flat_map(|byte| (0..8).rev().map(move |i| Choice::from((byte >> i) & 1u8))) + .skip(1) + { + acc = acc.double(); + acc = G2::conditional_select(&acc, &(acc + self), bit); + } + acc.is_identity() + } +} + +impl Eris { + /// Constant Z for the Shallue-van de Woestijne map. + /// Computed using https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-10.html#svdw-z-code + const SVDW_Z: Fq = Fq::ONE; +} + +new_curve_impl!( + (pub), + G2, + G2Affine, + false, + Fp2, + Fq, + (G2_GENERATOR_X,G2_GENERATOR_Y), + TRITON_A, + TRITON_B, + "triton", + |_, _| unimplemented!(), +); + +#[test] +fn test_curve_pluto() { + crate::tests::curve::curve_tests::(); +} +#[test] +fn test_curve_eris() { + crate::tests::curve::curve_tests::(); +} +#[test] +fn test_curve_triton() { + crate::tests::curve::curve_tests::(); +} + +#[test] +fn test_serialization() { + crate::tests::curve::random_serialization_test::(); + crate::tests::curve::random_serialization_test::(); + crate::tests::curve::random_serialization_test::(); + #[cfg(feature = "derive_serde")] + crate::tests::curve::random_serde_test::(); + #[cfg(feature = "derive_serde")] + crate::tests::curve::random_serde_test::(); + #[cfg(feature = "derive_serde")] + crate::tests::curve::random_serde_test::(); +} + +#[test] +fn test_hash_to_curve() { + crate::tests::curve::hash_to_curve_test::(); + crate::tests::curve::hash_to_curve_test::(); +} + +#[test] +fn test_endo_consistency() { + let g = Eris::generator(); + assert_eq!(g * Fp::ZETA, g.endo()); + + let g = G1::generator(); + assert_eq!(g * Fq::ZETA, g.endo()); + + let g = G2::generator(); + assert_eq!(g * Fq::ZETA, g.endo()); +} diff --git a/src/pluto_eris/engine.rs b/src/pluto_eris/engine.rs new file mode 100644 index 00000000..d35fc749 --- /dev/null +++ b/src/pluto_eris/engine.rs @@ -0,0 +1,945 @@ +#![allow(clippy::suspicious_arithmetic_impl)] +use crate::ff::{Field, PrimeField}; +use crate::group::cofactor::CofactorCurveAffine; +use crate::group::Group; +use crate::pairing::{Engine, MillerLoopResult, MultiMillerLoop, PairingCurveAffine}; +use crate::pluto_eris::curve::*; +use crate::pluto_eris::fields::fp::*; +use crate::pluto_eris::fields::fp12::*; +use crate::pluto_eris::fields::fp2::*; +use crate::pluto_eris::fields::fp6::FROBENIUS_COEFF_FP6_C1; +use crate::pluto_eris::fields::fq::*; +use core::borrow::Borrow; +use core::iter::Sum; +use core::ops::{Add, Mul, MulAssign, Neg, Sub}; +use rand_core::RngCore; +use subtle::{Choice, ConditionallySelectable, ConstantTimeEq}; + +/// Adaptation of Algorithm 1, https://eprint.iacr.org/2013/722.pdf +/// the parameter for the curve Pluto: u = -0x4000000000001000008780000000 +const NEG_PLUTO_U: u128 = 0x4000000000001000008780000000; + +const NEG_SIX_U_PLUS_2_NAF: [i8; 114] = [ + 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1, 0, -1, 0, -1, 0, 1, 0, -1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + -1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 1, +]; + +/// Value of (57/(u + 3))^((p - 1)/2) where u^2 + 5 = 0 in Fp2. +const XI_TO_P_MINUS_1_OVER_2: Fp2 = Fp2 { + c0: Fp::from_raw([ + 0x54cf5ad1c0926216, + 0x186c1f3ce4a46d4e, + 0x9c23800ce9c9452f, + 0x50e0d09ff6d6c08b, + 0x7cf421e4d46f6666, + 0x678664ba4b6d8343, + 0x21cc26d5de0f80f4, + ]), + + c1: Fp::from_raw([ + 0xc0505f4c260e91f4, + 0xe7bbd15f10723657, + 0xb4b3e0c35358097e, + 0x87c56f42a558750d, + 0x4b7211d23f34f0ae, + 0xf6839d29e2f0d250, + 0x16ebe8b2e12a1106, + ]), +}; + +impl PairingCurveAffine for G1Affine { + type Pair = G2Affine; + type PairingResult = Gt; + + fn pairing_with(&self, other: &Self::Pair) -> Self::PairingResult { + Pluto::pairing(self, other) + } +} + +impl PairingCurveAffine for G2Affine { + type Pair = G1Affine; + type PairingResult = Gt; + + fn pairing_with(&self, other: &Self::Pair) -> Self::PairingResult { + Pluto::pairing(other, self) + } +} + +#[derive(Copy, Clone, Debug, Default)] +pub struct Gt(pub(crate) Fp12); + +impl std::fmt::Display for Gt { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", self) + } +} + +impl ConstantTimeEq for Gt { + fn ct_eq(&self, other: &Self) -> Choice { + self.0.ct_eq(&other.0) + } +} + +impl ConditionallySelectable for Gt { + fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self { + Gt(Fp12::conditional_select(&a.0, &b.0, choice)) + } +} + +impl Eq for Gt {} +impl PartialEq for Gt { + #[inline] + fn eq(&self, other: &Self) -> bool { + bool::from(self.ct_eq(other)) + } +} + +impl Gt { + /// Returns the group identity, which is $1$. + pub const fn identity() -> Gt { + Gt(Fp12::ONE) + } + + /// Doubles this group element. + pub fn double(&self) -> Gt { + Gt(self.0.square()) + } +} + +impl<'a> Neg for &'a Gt { + type Output = Gt; + + #[inline] + fn neg(self) -> Gt { + // The element is unitary, so we just conjugate. + let mut u = self.0; + u.conjugate(); + Gt(u) + } +} + +impl Neg for Gt { + type Output = Gt; + + #[inline] + fn neg(self) -> Gt { + -&self + } +} + +impl<'a, 'b> Add<&'b Gt> for &'a Gt { + type Output = Gt; + + #[inline] + fn add(self, rhs: &'b Gt) -> Gt { + Gt(self.0 * rhs.0) + } +} + +impl<'a, 'b> Sub<&'b Gt> for &'a Gt { + type Output = Gt; + + #[inline] + fn sub(self, rhs: &'b Gt) -> Gt { + self + (-rhs) + } +} + +impl<'a, 'b> Mul<&'b Fq> for &'a Gt { + type Output = Gt; + + fn mul(self, other: &'b Fq) -> Self::Output { + let mut acc = Gt::identity(); + + for bit in other + .to_repr() + .iter() + .rev() + .flat_map(|byte| (0..8).rev().map(move |i| Choice::from((byte >> i) & 1u8))) + .skip(1) + { + acc = acc.double(); + acc = Gt::conditional_select(&acc, &(acc + self), bit); + } + + acc + } +} + +use crate::{ + impl_add_binop_specify_output, impl_binops_additive, impl_binops_additive_specify_output, + impl_binops_multiplicative, impl_binops_multiplicative_mixed, impl_sub_binop_specify_output, +}; +impl_binops_additive!(Gt, Gt); +impl_binops_multiplicative!(Gt, Fq); + +impl Sum for Gt +where + T: Borrow, +{ + fn sum(iter: I) -> Self + where + I: Iterator, + { + iter.fold(Self::identity(), |acc, item| acc + item.borrow()) + } +} + +impl Group for Gt { + type Scalar = Fq; + + fn random(_: impl RngCore) -> Self { + unimplemented!(); + } + + fn identity() -> Self { + Self::identity() + } + + fn generator() -> Self { + unimplemented!(); + } + + fn is_identity(&self) -> Choice { + self.ct_eq(&Self::identity()) + } + + #[must_use] + fn double(&self) -> Self { + self.double() + } +} + +/// Points of G2 in Jacobian coordinates. +/// These are points lie in the twisted curve E'(Fp2). +#[derive(Clone, Debug)] +pub struct G2Prepared { + pub(crate) coeffs: Vec<(Fp2, Fp2, Fp2)>, + pub(crate) infinity: bool, +} + +impl G2Prepared { + /// Returns true if `self` is the infinity point. + pub fn is_zero(&self) -> bool { + self.infinity + } + + /// Prepares a G2 point in affine coordinates. + pub fn from_affine(q: G2Affine) -> Self { + if bool::from(q.is_identity()) { + return G2Prepared { + coeffs: vec![], + infinity: true, + }; + } + + /// Adaptation of Algorithm 26, https://eprint.iacr.org/2010/354.pdf + fn doubling_step(r: &mut G2) -> (Fp2, Fp2, Fp2) { + let mut tmp0 = r.x; + tmp0.square_assign(); + + let mut tmp1 = r.y; + tmp1.square_assign(); + + let mut tmp2 = tmp1; + tmp2.square_assign(); + + let mut tmp3 = tmp1; + tmp3 += &r.x; + tmp3.square_assign(); + tmp3 -= &tmp0; + tmp3 -= &tmp2; + tmp3.double_assign(); + + let mut tmp4 = tmp0; + tmp4.double_assign(); + tmp4 += &tmp0; + + let mut tmp6 = r.x; + tmp6 += &tmp4; + + let mut tmp5 = tmp4; + tmp5.square_assign(); + + let mut zsquared = r.z; + zsquared.square_assign(); + + r.x = tmp5; + r.x -= &tmp3; + r.x -= &tmp3; + + r.z += &r.y; + r.z.square_assign(); + r.z -= &tmp1; + r.z -= &zsquared; + + r.y = tmp3; + r.y -= &r.x; + r.y.mul_assign(&tmp4); + + tmp2.double_assign(); + tmp2.double_assign(); + tmp2.double_assign(); + + r.y -= &tmp2; + + // up to here everything was by algorith, line 11 + // use R instead of new T + + // tmp3 is the first part of line 12 + tmp3 = tmp4; + tmp3.mul_assign(&zsquared); + tmp3.double_assign(); + tmp3 = tmp3.neg(); + + // tmp6 is from line 14 + tmp6.square_assign(); + tmp6 -= &tmp0; + tmp6 -= &tmp5; + + tmp1.double_assign(); + tmp1.double_assign(); + + tmp6 -= &tmp1; + + // tmp0 is the first part of line 16 + tmp0 = r.z; + tmp0.mul_assign(&zsquared); + tmp0.double_assign(); + + (tmp0, tmp3, tmp6) + } + + // Adaptation of Algorithm 27, https://eprint.iacr.org/2010/354.pdf + fn addition_step(r: &mut G2, q: &G2Affine) -> (Fp2, Fp2, Fp2) { + let mut zsquared = r.z; + zsquared.square_assign(); + + let mut ysquared = q.y; + ysquared.square_assign(); + + // t0 corresponds to line 1 + let mut t0 = zsquared; + t0.mul_assign(&q.x); + + // t1 corresponds to lines 2 and 3 + let mut t1 = q.y; + t1 += &r.z; + t1.square_assign(); + t1 -= &ysquared; + t1 -= &zsquared; + t1.mul_assign(&zsquared); + + // t2 corresponds to line 4 + let mut t2 = t0; + t2 -= &r.x; + + // t3 corresponds to line 5 + let mut t3 = t2; + t3.square_assign(); + + // t4 corresponds to line 6 + let mut t4 = t3; + t4.double_assign(); + t4.double_assign(); + + // t5 corresponds to line 7 + let mut t5 = t4; + t5.mul_assign(&t2); + + // t6 corresponds to line 8 + let mut t6 = t1; + t6 -= &r.y; + t6 -= &r.y; + + // t9 corresponds to line 9 + let mut t9 = t6; + t9.mul_assign(&q.x); + + // corresponds to line 10 + let mut t7 = t4; + t7.mul_assign(&r.x); + + // corresponds to line 11, but assigns to r.x instead of T.x + r.x = t6; + r.x.square_assign(); + r.x -= &t5; + r.x -= &t7; + r.x -= &t7; + + // corresponds to line 12, but assigns to r.z instead of T.z + r.z += &t2; + r.z.square_assign(); + r.z -= &zsquared; + r.z -= &t3; + + // corresponds to line 13 + let mut t10 = q.y; + t10 += &r.z; + + // corresponds to line 14 + let mut t8 = t7; + t8 -= &r.x; + t8.mul_assign(&t6); + + // corresponds to line 15 + t0 = r.y; + t0.mul_assign(&t5); + t0.double_assign(); + + // corresponds to line 12, but assigns to r.y instead of T.y + r.y = t8; + r.y -= &t0; + + // corresponds to line 17 + t10.square_assign(); + t10 -= &ysquared; + + let mut ztsquared = r.z; + ztsquared.square_assign(); + + t10 -= &ztsquared; + + // corresponds to line 18 + t9.double_assign(); + t9 -= &t10; + + // t10 = 2*Zt from Algo 27, line 19 + t10 = r.z; + t10.double_assign(); + + // t1 = first multiplicator of line 21 + t6 = t6.neg(); + + t1 = t6; + t1.double_assign(); + + // t9 corresponds to t9 from Algo 27 + (t10, t1, t9) + } + + let mut coeffs = vec![]; + let mut r: G2 = q.into(); + + let mut negq = q; + negq = -negq; + + coeffs.push(doubling_step(&mut r)); + + let last_position = NEG_SIX_U_PLUS_2_NAF.len() - 2; + match NEG_SIX_U_PLUS_2_NAF[last_position] { + 1 => { + coeffs.push(addition_step(&mut r, &q)); + } + -1 => { + coeffs.push(addition_step(&mut r, &negq)); + } + _ => (), + } + + for i in (0..last_position).rev() { + coeffs.push(doubling_step(&mut r)); + + match NEG_SIX_U_PLUS_2_NAF[i] { + 1 => { + coeffs.push(addition_step(&mut r, &q)); + } + -1 => { + coeffs.push(addition_step(&mut r, &negq)); + } + _ => continue, + } + } + + let mut neg_r = r; + neg_r = -neg_r; + + let mut q1 = q; + + q1.x.c1 = q1.x.c1.neg(); + q1.x.mul_assign(&FROBENIUS_COEFF_FP6_C1[1]); + + q1.y.c1 = q1.y.c1.neg(); + q1.y.mul_assign(&XI_TO_P_MINUS_1_OVER_2); + + coeffs.push(addition_step(&mut neg_r, &q1)); + + let mut minusq2 = q; + minusq2.x.mul_assign(&FROBENIUS_COEFF_FP6_C1[2]); + + coeffs.push(addition_step(&mut neg_r, &minusq2)); + + G2Prepared { + coeffs, + infinity: false, + } + } +} + +impl From for G2Prepared { + fn from(q: G2Affine) -> G2Prepared { + G2Prepared::from_affine(q) + } +} + +impl MillerLoopResult for Gt { + type Gt = Self; + fn final_exponentiation(&self) -> Gt { + fn exp_by_x(f: &mut Fp12) { + let x = NEG_PLUTO_U; + let mut res = Fp12::ONE; + for i in (0..111).rev() { + res.cyclotomic_square(); + if ((x >> i) & 1) == 1 { + res.mul_assign(f); + } + } + res.conjugate(); + *f = res; + } + + let r = self.0; + let mut f1 = self.0; + f1.conjugate(); + + Gt(r.invert() + .map(|mut f2| { + let mut r = f1; + r.mul_assign(&f2); + f2 = r; + r.frobenius_map(2); + r.mul_assign(&f2); + + let mut fp = r; + fp.frobenius_map(1); + + let mut fp2 = r; + fp2.frobenius_map(2); + let mut fp3 = fp2; + fp3.frobenius_map(1); + + let mut fu = r; + exp_by_x(&mut fu); + + let mut fu2 = fu; + exp_by_x(&mut fu2); + + let mut fu3 = fu2; + exp_by_x(&mut fu3); + + let mut y3 = fu; + y3.frobenius_map(1); + + let mut fu2p = fu2; + fu2p.frobenius_map(1); + + let mut fu3p = fu3; + fu3p.frobenius_map(1); + + let mut y2 = fu2; + y2.frobenius_map(2); + + let mut y0 = fp; + y0.mul_assign(&fp2); + y0.mul_assign(&fp3); + + let mut y1 = r; + y1.conjugate(); + + let mut y5 = fu2; + y5.conjugate(); + + y3.conjugate(); + + let mut y4 = fu; + y4.mul_assign(&fu2p); + y4.conjugate(); + + let mut y6 = fu3; + y6.mul_assign(&fu3p); + y6.conjugate(); + + y6.cyclotomic_square(); + y6.mul_assign(&y4); + y6.mul_assign(&y5); + + let mut t1 = y3; + t1.mul_assign(&y5); + t1.mul_assign(&y6); + + y6.mul_assign(&y2); + + t1.cyclotomic_square(); + t1.mul_assign(&y6); + t1.cyclotomic_square(); + + let mut t0 = t1; + t0.mul_assign(&y1); + + t1.mul_assign(&y0); + + t0.cyclotomic_square(); + t0.mul_assign(&t1); + + t0 + }) + .unwrap()) + } +} +impl MultiMillerLoop for Pluto { + /// The prepared form of `Self::G2Affine`. + type G2Prepared = G2Prepared; + + /// The type returned by `Engine::miller_loop`. + type Result = Gt; + fn multi_miller_loop(terms: &[(&G1Affine, &G2Prepared)]) -> Self::Result { + let mut pairs = vec![]; + for &(p, q) in terms { + if !bool::from(p.is_identity()) && !q.is_zero() { + pairs.push((p, q.coeffs.iter())); + } + } + + // Final steps of the line function on prepared coefficients + fn ell(f: &mut Fp12, coeffs: &(Fp2, Fp2, Fp2), p: &G1Affine) { + let mut c0 = coeffs.0; + let mut c1 = coeffs.1; + + c0.c0.mul_assign(&p.y); + c0.c1.mul_assign(&p.y); + + c1.c0.mul_assign(&p.x); + c1.c1.mul_assign(&p.x); + + // Sparse multiplication in Fq12 + f.mul_by_034(&c0, &c1, &coeffs.2); + } + + let mut f = Fp12::ONE; + + for &mut (p, ref mut coeffs) in &mut pairs { + ell(&mut f, coeffs.next().unwrap(), p); + } + + // length - 2 + let len_min2 = NEG_SIX_U_PLUS_2_NAF.len() - 2; + + if NEG_SIX_U_PLUS_2_NAF[len_min2] != 0 { + for &mut (p, ref mut coeffs) in &mut pairs { + ell(&mut f, coeffs.next().unwrap(), p); + } + } + + for x in NEG_SIX_U_PLUS_2_NAF[..len_min2].iter().rev() { + f.square_assign(); + + for &mut (p, ref mut coeffs) in &mut pairs { + ell(&mut f, coeffs.next().unwrap(), p); + } + if *x != 0 { + for &mut (p, ref mut coeffs) in &mut pairs { + ell(&mut f, coeffs.next().unwrap(), p); + } + } + } + + f.conjugate(); + + for &mut (p, ref mut coeffs) in &mut pairs { + ell(&mut f, coeffs.next().unwrap(), p); + } + + for &mut (p, ref mut coeffs) in &mut pairs { + ell(&mut f, coeffs.next().unwrap(), p); + } + + for &mut (_p, ref mut coeffs) in &mut pairs { + assert_eq!(coeffs.next(), None); + } + + Gt(f) + } +} + +/// Pluto pairing-friendly curve. See: https://github.com/daira/pluto-eris +#[derive(Clone, Debug)] +pub struct Pluto; + +impl Engine for Pluto { + type Fr = Fq; + type G1 = G1; + type G1Affine = G1Affine; + type G2 = G2; + type G2Affine = G2Affine; + type Gt = Gt; + + fn pairing(p: &Self::G1Affine, q: &Self::G2Affine) -> Self::Gt { + let q = G2Prepared::from_affine(*q); + let terms: &[(&G1Affine, &G2Prepared)] = &[(p, &q)]; + let u = Self::multi_miller_loop(terms); + u.final_exponentiation() + } +} + +#[cfg(test)] +use rand::SeedableRng; +#[cfg(test)] +use rand_xorshift::XorShiftRng; + +#[test] +fn test_pairing() { + let g1 = G1::generator(); + let mut g2 = G2::generator(); + g2 = g2.double(); + let pair12 = Pluto::pairing(&G1Affine::from(g1), &G2Affine::from(g2)); + + let mut g1 = G1::generator(); + let g2 = G2::generator(); + g1 = g1.double(); + let pair21 = Pluto::pairing(&G1Affine::from(g1), &G2Affine::from(g2)); + + assert_eq!(pair12, pair21); + + let g1 = G1::generator(); + let mut g2 = G2::generator(); + g2 = g2.double().double(); + let pair12 = Pluto::pairing(&G1Affine::from(g1), &G2Affine::from(g2)); + + let mut g1 = G1::generator(); + let mut g2 = G2::generator(); + g1 = g1.double(); + g2 = g2.double(); + let pair21 = Pluto::pairing(&G1Affine::from(g1), &G2Affine::from(g2)); + + assert_eq!(pair12, pair21); + + let mut rng = XorShiftRng::from_seed([ + 0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc, + 0xe5, + ]); + for _ in 0..100 { + let a = Fq::random(&mut rng); + let b = Fq::random(&mut rng); + + let mut g1 = G1::generator(); + g1.mul_assign(a); + + let mut g2 = G2::generator(); + g1.mul_assign(b); + + let pair_ab = Pluto::pairing(&G1Affine::from(g1), &G2Affine::from(g2)); + + g1 = G1::generator(); + g1.mul_assign(b); + + g2 = G2::generator(); + g1.mul_assign(a); + + let pair_ba = Pluto::pairing(&G1Affine::from(g1), &G2Affine::from(g2)); + + assert_eq!(pair_ab, pair_ba); + } +} + +#[test] +fn tricking_miller_loop_result() { + assert_eq!( + Pluto::multi_miller_loop(&[(&G1Affine::identity(), &G2Affine::generator().into())]).0, + Fp12::one() + ); + assert_eq!( + Pluto::multi_miller_loop(&[(&G1Affine::generator(), &G2Affine::identity().into())]).0, + Fp12::one() + ); + assert_ne!( + Pluto::multi_miller_loop(&[ + (&G1Affine::generator(), &G2Affine::generator().into()), + (&-G1Affine::generator(), &G2Affine::generator().into()) + ]) + .0, + Fp12::one() + ); + assert_eq!( + Pluto::multi_miller_loop(&[ + (&G1Affine::generator(), &G2Affine::generator().into()), + (&-G1Affine::generator(), &G2Affine::generator().into()) + ]) + .final_exponentiation(), + Gt::identity() + ); +} + +#[test] +fn random_bilinearity_tests() { + let mut rng = XorShiftRng::from_seed([ + 0x55, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc, + 0xe5, + ]); + + for _ in 0..10 { + let mut a = G1::generator(); + let ka = Fq::random(&mut rng); + a.mul_assign(ka); + + let mut b = G2::generator(); + let kb = Fq::random(&mut rng); + b.mul_assign(kb); + + let c = Fq::random(&mut rng); + let d = Fq::random(&mut rng); + + let mut ac = a; + ac.mul_assign(c); + + let mut ad = a; + ad.mul_assign(d); + + let mut bc = b; + bc.mul_assign(c); + + let mut bd = b; + bd.mul_assign(d); + + let acbd = Pluto::pairing(&G1Affine::from(ac), &G2Affine::from(bd)); + let adbc = Pluto::pairing(&G1Affine::from(ad), &G2Affine::from(bc)); + + let mut cd = c; + cd.mul_assign(&d); + + cd *= Fq([1, 0, 0, 0, 0, 0, 0]); + + let abcd = Gt(Pluto::pairing(&G1Affine::from(a), &G2Affine::from(b)) + .0 + .pow_vartime(cd.0)); + + assert_eq!(acbd, adbc); + assert_eq!(acbd, abcd); + } +} + +#[test] +pub fn engine_tests() { + let mut rng = XorShiftRng::from_seed([ + 0x56, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc, + 0xe5, + ]); + + for _ in 0..10 { + let a = G1Affine::from(G1::random(&mut rng)); + let b = G2Affine::from(G2::random(&mut rng)); + + assert!(a.pairing_with(&b) == b.pairing_with(&a)); + assert!(a.pairing_with(&b) == Pluto::pairing(&a, &b)); + } + + for _ in 0..10 { + let z1 = G1Affine::identity(); + let z2 = G2Prepared::from(G2Affine::identity()); + + let a = G1Affine::from(G1::random(&mut rng)); + let b = G2Prepared::from(G2Affine::from(G2::random(&mut rng))); + let c = G1Affine::from(G1::random(&mut rng)); + let d = G2Prepared::from(G2Affine::from(G2::random(&mut rng))); + + assert_eq!( + Fp12::ONE, + Pluto::multi_miller_loop(&[(&z1, &b)]) + .final_exponentiation() + .0, + ); + + assert_eq!( + Fp12::ONE, + Pluto::multi_miller_loop(&[(&a, &z2)]) + .final_exponentiation() + .0, + ); + + assert_eq!( + Pluto::multi_miller_loop(&[(&z1, &b), (&c, &d)]).final_exponentiation(), + Pluto::multi_miller_loop(&[(&a, &z2), (&c, &d)]).final_exponentiation(), + ); + + assert_eq!( + Pluto::multi_miller_loop(&[(&a, &b), (&z1, &d)]).final_exponentiation(), + Pluto::multi_miller_loop(&[(&a, &b), (&c, &z2)]).final_exponentiation(), + ); + } +} + +#[test] +fn random_miller_loop_tests() { + let mut rng = XorShiftRng::from_seed([ + 0x58, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc, + 0xe5, + ]); + + // Exercise a double miller loop + for _ in 0..10 { + let a = G1Affine::from(G1::random(&mut rng)); + let b = G2Affine::from(G2::random(&mut rng)); + let c = G1Affine::from(G1::random(&mut rng)); + let d = G2Affine::from(G2::random(&mut rng)); + + let ab = Pluto::pairing(&a, &b); + let cd = Pluto::pairing(&c, &d); + + let mut abcd = ab; + abcd = Gt(abcd.0 * cd.0); + + let b = G2Prepared::from(b); + let d = G2Prepared::from(d); + + let abcd_with_double_loop = + Pluto::multi_miller_loop(&[(&a, &b), (&c, &d)]).final_exponentiation(); + + assert_eq!(abcd, abcd_with_double_loop); + } +} + +#[test] +pub fn multi_miller_final_exp_tests() { + let g1 = G1::generator(); + let g2 = G2::generator(); + + let mut rng = XorShiftRng::from_seed([ + 0x56, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc, + 0xe5, + ]); + + for _ in 0..10 { + let s = Fq::random(&mut rng); + + let mut s_g1 = g1; + s_g1.mul_assign(s); + + let mut s_g2 = g2; + s_g2.mul_assign(s); + + let s_g2_prepared = G2Prepared::from(G2Affine::from(s_g2)); + let g2_prepared = G2Prepared::from(G2Affine::from(g2)); + + let (term_1, term_2) = ( + (&G1Affine::from(g1), &s_g2_prepared), + (&-G1Affine::from(s_g1), &g2_prepared), + ); + + let terms = &[term_1, term_2]; + + assert!( + bool::from( + Pluto::multi_miller_loop(&terms[..]) + .final_exponentiation() + .is_identity(), + ), + "trivial pairing check failed" + ); + + let lhs = Pluto::pairing(&G1Affine::from(g1), &G2Affine::from(s_g2)); + let rhs = Pluto::pairing(&G1Affine::from(s_g1), &G2Affine::from(g2)); + + assert_eq!(lhs, rhs, "failed trivial check"); + } +} diff --git a/src/pluto_eris/fields/fp.rs b/src/pluto_eris/fields/fp.rs new file mode 100644 index 00000000..3651a19a --- /dev/null +++ b/src/pluto_eris/fields/fp.rs @@ -0,0 +1,629 @@ +use crate::arithmetic::{adc, mac, sbb}; +use crate::ff::{FromUniformBytes, PrimeField, WithSmallOrderMulGroup}; +use crate::{ + extend_field_legendre, field_arithmetic_7_limbs, field_bits_7_limbs, field_common_7_limbs, + impl_from_u64_7_limbs, +}; +use crate::{ + impl_add_binop_specify_output, impl_binops_additive, impl_binops_additive_specify_output, + impl_binops_multiplicative, impl_binops_multiplicative_mixed, impl_sub_binop_specify_output, + impl_sum_prod, +}; +use core::convert::TryInto; +use core::fmt; +use core::ops::{Add, Mul, Neg, Sub}; +use rand::RngCore; +use std::slice::Iter; +use subtle::{Choice, ConditionallySelectable, ConstantTimeEq, CtOption}; + +#[cfg(feature = "derive_serde")] +use serde::{Deserialize, Serialize}; + +/// This represents an element of $\mathbb{F}_p$ where +/// +/// `p = 0x24000000000024000130e0000d7f70e4a803ca76f439266f443f9a5cda8a6c7be4a7a5fe8fadffd6a2a7e8c30006b9459ffffcd300000001` +/// +/// is the base field of the Pluto curve. +/// The internal representation of this type is seven 64-bit unsigned +/// integers in little-endian order which account for the 446 bits required to be represented. +/// `Fp` values are always in Montgomery form; i.e., Fp(a) = aR mod p, with R = 2^448. +#[derive(Clone, Copy, PartialEq, Eq, Hash)] +#[cfg_attr(feature = "derive_serde", derive(Serialize, Deserialize))] +pub struct Fp(pub(crate) [u64; 7]); + +/// Size of `Fp` element in bytes +const SIZE: usize = 56; + +/// Constant representing the modulus +/// p = 0x24000000000024000130e0000d7f70e4a803ca76f439266f443f9a5cda8a6c7be4a7a5fe8fadffd6a2a7e8c30006b9459ffffcd300000001 +const MODULUS: Fp = Fp([ + 0x9ffffcd300000001, + 0xa2a7e8c30006b945, + 0xe4a7a5fe8fadffd6, + 0x443f9a5cda8a6c7b, + 0xa803ca76f439266f, + 0x0130e0000d7f70e4, + 0x2400000000002400, +]); + +/// The modulus as u32 limbs. +#[cfg(not(target_pointer_width = "64"))] +const MODULUS_LIMBS_32: [u32; 14] = [ + 0x00000001, 0x9ffffcd3, 0x0006b945, 0xa2a7e8c3, 0x8fadffd6, 0xe4a7a5fe, 0xda8a6c7b, 0x443f9a5c, + 0xf439266f, 0xa803ca76, 0x0d7f70e4, 0x0130e000, 0x00002400, 0x24000000, +]; + +// pub const NEGATIVE_ONE: Fp = Fp([]); + +pub(crate) const MODULUS_STR: &str = "0x24000000000024000130e0000d7f70e4a803ca76f439266f443f9a5cda8a6c7be4a7a5fe8fadffd6a2a7e8c30006b9459ffffcd300000001"; + +/// INV = -r^{-1} mod 2^64 +/// `0x9ffffcd2ffffffff` +const INV: u64 = 0x9ffffcd2ffffffff; + +/// Let M be the power of `2^64` nearest to `Self::MODULUS_BITS`. Then `R = M % Self::MODULUS`. +/// `R = 2^448 mod p` +/// `0x3ffffffffff03fff7a9dfffa183e9bf67e576bf526ff2f52242c7760637089cbf6a760a123e01218d68a2aaffd0ef18a000163afffffff9` +const R: Fp = Fp([ + 0xa000163afffffff9, + 0x8d68a2aaffd0ef18, + 0xbf6a760a123e0121, + 0x2242c7760637089c, + 0x67e576bf526ff2f5, + 0xf7a9dfffa183e9bf, + 0x03ffffffffff03ff, +]); + +/// `R^2 = 2^896 mod p` +/// `0x1a4b16581f66e3cc8bcb0f20758aec8520b6db3d7481a84c734fd363b575c23e7a42067a8ccd154b4b20c07277ae01f1d9702c6d54dc0598` +const R2: Fp = Fp([ + 0xd9702c6d54dc0598, + 0x4b20c07277ae01f1, + 0x7a42067a8ccd154b, + 0x734fd363b575c23e, + 0x20b6db3d7481a84c, + 0x8bcb0f20758aec85, + 0x1a4b16581f66e3cc, +]); + +/// `R^3 = 2^1792 mod p` +/// `0x1f51e40a048ddc1789010189f4df0ae1f3bc57efac4b3280b25aa8b46a40b225e5446680e4c4ea0449937d6b40e58f05c67afa3fe916dd69` +const R3: Fp = Fp([ + 0xc67afa3fe916dd69, + 0x49937d6b40e58f05, + 0xe5446680e4c4ea04, + 0xb25aa8b46a40b225, + 0xf3bc57efac4b3280, + 0x89010189f4df0ae1, + 0x1f51e40a048ddc17, +]); + +/// `GENERATOR = 10 mod p` is a generator of the `p - 1` order multiplicative +/// subgroup, or in other words a primitive root of the field. +const GENERATOR: Fp = Fp::from_raw([0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]); + +/// Size of the 2-adic sub-group of the field. +const S: u32 = 32; + +/// GENERATOR^t where t * 2^s + 1 = p +/// with t odd. In other words, this +/// is a 2^s root of unity. +/// `0x2d39f8c5f9adb3f35fe3f4222db17451ddd9602a013af5276bdbe3903ec85fc889232f5c8bc6857060c75e6f399661d6c7b82d31d563091` +const ROOT_OF_UNITY: Fp = Fp::from_raw([ + 0x6c7b82d31d563091, + 0x060c75e6f399661d, + 0x889232f5c8bc6857, + 0x76bdbe3903ec85fc, + 0x1ddd9602a013af52, + 0x35fe3f4222db1745, + 0x02d39f8c5f9adb3f, +]); + +/// 1 / ROOT_OF_UNITY mod p +/// `0x17725d635b00cda4153eb10c7105919d012822bd86c08691803272fbc5c9f8378055eb56ae2d55f9272bf208aad57f666deaead2c693ff66` +const ROOT_OF_UNITY_INV: Fp = Fp::from_raw([ + 0x6deaead2c693ff66, + 0x272bf208aad57f66, + 0x8055eb56ae2d55f9, + 0x803272fbc5c9f837, + 0x012822bd86c08691, + 0x153eb10c7105919d, + 0x17725d635b00cda4, +]); + +/// 1 / 2 mod p +/// `0x12000000000012000098700006bfb8725401e53b7a1c9337a21fcd2e6d45363df253d2ff47d6ffeb5153f46180035ca2cffffe6980000001` +pub(crate) const TWO_INV: Fp = Fp::from_raw([ + 0xcffffe6980000001, + 0x5153f46180035ca2, + 0xf253d2ff47d6ffeb, + 0xa21fcd2e6d45363d, + 0x5401e53b7a1c9337, + 0x0098700006bfb872, + 0x1200000000001200, +]); +/// GENERATOR^{2^s} where t * 2^s + 1 = r with t odd. In other words, this is a t root of unity. +/// `0xeacefc6504d028d42ed23fc8766d5a5f195b456887e1e0021fb760c53233e9170c23749b459b95cc6cbb5faf3754a1e1916b2007775db04` +const DELTA: Fp = Fp::from_raw([ + 0x1916b2007775db04, + 0xc6cbb5faf3754a1e, + 0x70c23749b459b95c, + 0x21fb760c53233e91, + 0xf195b456887e1e00, + 0x42ed23fc8766d5a5, + 0x0eacefc6504d028d, +]); + +/// `ZETA^3 = 1 mod p` where `ZETA^2 != 1 mod p` +/// `0x480000000000360001c950000d7ee0e4a803c956d01c903d720dc8ad8b38dffaf50c100004c37ffffffe` +const ZETA: Fp = Fp::from_raw([ + 0x100004c37ffffffe, + 0xc8ad8b38dffaf50c, + 0xc956d01c903d720d, + 0x50000d7ee0e4a803, + 0x00000000360001c9, + 0x0000000000004800, + 0x0000000000000000, +]); + +/// NEG_ONE ; -1 mod p +pub(crate) const NEG_ONE: Fp = Fp::from_raw([ + 0x9ffffcd300000000, + 0xa2a7e8c30006b945, + 0xe4a7a5fe8fadffd6, + 0x443f9a5cda8a6c7b, + 0xa803ca76f439266f, + 0x0130e0000d7f70e4, + 0x2400000000002400, +]); + +impl_binops_additive!(Fp, Fp); +impl_binops_multiplicative!(Fp, Fp); +field_common_7_limbs!( + Fp, + FpRepr, + MODULUS, + INV, + MODULUS_STR, + TWO_INV, + ROOT_OF_UNITY_INV, + DELTA, + ZETA, + R, + R2, + R3 +); +impl_sum_prod!(Fp); +impl_from_u64_7_limbs!(Fp, R2); +field_arithmetic_7_limbs!(Fp, MODULUS, INV, sparse); + +#[cfg(target_pointer_width = "64")] +field_bits_7_limbs!(Fp, MODULUS); +#[cfg(not(target_pointer_width = "64"))] +field_bits_7_limbs!(Fp, MODULUS, MODULUS_LIMBS_32); + +extend_field_legendre!(Fp); + +impl Fp { + pub const fn size() -> usize { + SIZE + } +} + +impl ff::Field for Fp { + const ZERO: Self = Self::zero(); + const ONE: Self = Self::one(); + + fn random(mut rng: impl RngCore) -> Self { + Self::from_u512([ + rng.next_u64(), + rng.next_u64(), + rng.next_u64(), + rng.next_u64(), + rng.next_u64(), + rng.next_u64(), + rng.next_u64(), + rng.next_u64(), + ]) + } + + fn double(&self) -> Self { + self.double() + } + + #[inline(always)] + fn square(&self) -> Self { + self.square() + } + + /// Computes the multiplicative inverse of this element, + /// failing if the element is zero. + fn invert(&self) -> CtOption { + // self^(p - 2) + let tmp = self.pow([ + 0x9ffffcd2ffffffff, + 0xa2a7e8c30006b945, + 0xe4a7a5fe8fadffd6, + 0x443f9a5cda8a6c7b, + 0xa803ca76f439266f, + 0x0130e0000d7f70e4, + 0x2400000000002400, + ]); + + CtOption::new(tmp, !self.ct_eq(&Self::zero())) + } + + fn sqrt(&self) -> CtOption { + /// `(t - 1) // 2` where t * 2^s + 1 = p with t odd. + const T_MINUS1_OVER2: [u64; 7] = [ + 0x80035ca2cffffe69, + 0x47d6ffeb5153f461, + 0x6d45363df253d2ff, + 0x7a1c9337a21fcd2e, + 0x06bfb8725401e53b, + 0x0000120000987000, + 0x0000000012000000, + ]; + ff::helpers::sqrt_tonelli_shanks(self, T_MINUS1_OVER2) + } + + fn sqrt_ratio(num: &Self, div: &Self) -> (Choice, Self) { + ff::helpers::sqrt_ratio_generic(num, div) + } +} + +#[derive(Clone, Copy, Debug)] +/// Canonical little-endian representation of a `Fp` element. +pub struct FpRepr { + pub repr: [u8; SIZE], +} + +impl FpRepr { + /// Returns an iterator over the bytes of the canoncial representation of the element. + pub fn iter(&self) -> Iter<'_, u8> { + self.repr.iter() + } +} + +impl Default for FpRepr { + fn default() -> Self { + FpRepr { repr: [0u8; SIZE] } + } +} + +impl AsRef<[u8]> for FpRepr { + fn as_ref(&self) -> &[u8] { + self.repr.as_ref() + } +} + +impl AsMut<[u8]> for FpRepr { + fn as_mut(&mut self) -> &mut [u8] { + self.repr.as_mut() + } +} +impl From<[u8; SIZE]> for FpRepr { + fn from(repr: [u8; SIZE]) -> Self { + Self { repr } + } +} + +impl ff::PrimeField for Fp { + type Repr = FpRepr; + + const NUM_BITS: u32 = 446; + const CAPACITY: u32 = 445; + const MODULUS: &'static str = MODULUS_STR; + const MULTIPLICATIVE_GENERATOR: Self = GENERATOR; + const ROOT_OF_UNITY: Self = ROOT_OF_UNITY; + const ROOT_OF_UNITY_INV: Self = ROOT_OF_UNITY_INV; + const TWO_INV: Self = TWO_INV; + const DELTA: Self = DELTA; + const S: u32 = S; + + fn from_repr(repr: Self::Repr) -> CtOption { + let mut tmp = Self([0, 0, 0, 0, 0, 0, 0]); + let repr = repr.repr; + + tmp.0[0] = u64::from_le_bytes(repr[0..8].try_into().unwrap()); + tmp.0[1] = u64::from_le_bytes(repr[8..16].try_into().unwrap()); + tmp.0[2] = u64::from_le_bytes(repr[16..24].try_into().unwrap()); + tmp.0[3] = u64::from_le_bytes(repr[24..32].try_into().unwrap()); + tmp.0[4] = u64::from_le_bytes(repr[32..40].try_into().unwrap()); + tmp.0[5] = u64::from_le_bytes(repr[40..48].try_into().unwrap()); + tmp.0[6] = u64::from_le_bytes(repr[48..56].try_into().unwrap()); + + // Try to subtract the modulus + let (_, borrow) = sbb(tmp.0[0], MODULUS.0[0], 0); + let (_, borrow) = sbb(tmp.0[1], MODULUS.0[1], borrow); + let (_, borrow) = sbb(tmp.0[2], MODULUS.0[2], borrow); + let (_, borrow) = sbb(tmp.0[3], MODULUS.0[3], borrow); + let (_, borrow) = sbb(tmp.0[4], MODULUS.0[4], borrow); + let (_, borrow) = sbb(tmp.0[5], MODULUS.0[5], borrow); + let (_, borrow) = sbb(tmp.0[6], MODULUS.0[6], borrow); + + // If the element is smaller than MODULUS then the + // subtraction will underflow, producing a borrow value + // of 0xffff...ffff. Otherwise, it'll be zero. + let is_some = (borrow as u8) & 1; + + // Convert to Montgomery form by computing + // (a.R^0 * R^2) / R = a.R + tmp *= &R2; + + CtOption::new(tmp, Choice::from(is_some)) + } + + fn to_repr(&self) -> Self::Repr { + // Turn into canonical form by computing + // (a.R) / R = a + let tmp = Self::montgomery_reduce(&[ + self.0[0], self.0[1], self.0[2], self.0[3], self.0[4], self.0[5], self.0[6], 0, 0, 0, + 0, 0, 0, 0, + ]); + + let mut res = [0; SIZE]; + res[0..8].copy_from_slice(&tmp.0[0].to_le_bytes()); + res[8..16].copy_from_slice(&tmp.0[1].to_le_bytes()); + res[16..24].copy_from_slice(&tmp.0[2].to_le_bytes()); + res[24..32].copy_from_slice(&tmp.0[3].to_le_bytes()); + res[32..40].copy_from_slice(&tmp.0[4].to_le_bytes()); + res[40..48].copy_from_slice(&tmp.0[5].to_le_bytes()); + res[48..56].copy_from_slice(&tmp.0[6].to_le_bytes()); + res.into() + } + + fn is_odd(&self) -> Choice { + Choice::from(self.to_repr().repr[0] & 1) + } +} + +impl FromUniformBytes<64> for Fp { + /// Converts a 512-bit little endian integer into + /// an `Fp` by reducing by the modulus. + fn from_uniform_bytes(bytes: &[u8; 64]) -> Self { + Self::from_u512([ + u64::from_le_bytes(bytes[0..8].try_into().unwrap()), + u64::from_le_bytes(bytes[8..16].try_into().unwrap()), + u64::from_le_bytes(bytes[16..24].try_into().unwrap()), + u64::from_le_bytes(bytes[24..32].try_into().unwrap()), + u64::from_le_bytes(bytes[32..40].try_into().unwrap()), + u64::from_le_bytes(bytes[40..48].try_into().unwrap()), + u64::from_le_bytes(bytes[48..56].try_into().unwrap()), + u64::from_le_bytes(bytes[56..64].try_into().unwrap()), + ]) + } +} + +impl WithSmallOrderMulGroup<3> for Fp { + const ZETA: Self = ZETA; +} + +#[cfg(test)] +mod test { + use crate::serde::SerdeObject; + + use super::*; + use ark_std::{end_timer, start_timer}; + use ff::Field; + use rand::SeedableRng; + use rand_core::OsRng; + use rand_xorshift::XorShiftRng; + + #[test] + fn test_sqrt() { + let v = (Fp::TWO_INV).square().sqrt().unwrap(); + assert!(v == Fp::TWO_INV || (-v) == Fp::TWO_INV); + + for _ in 0..10000 { + let a = Fp::random(OsRng); + let mut b = a; + b = b.square(); + + let b = b.sqrt().unwrap(); + let mut negb = b; + negb = negb.neg(); + + assert!(a == b || a == negb); + } + } + + #[test] + fn test_field() { + crate::tests::field::random_field_tests::("Eris scalar".to_string()); + } + + #[test] + fn test_delta() { + assert_eq!(Fp::DELTA, GENERATOR.pow([1u64 << Fp::S])); + assert_eq!(Fp::DELTA, Fp::MULTIPLICATIVE_GENERATOR.pow([1u64 << Fp::S])); + } + + #[test] + fn test_zeta() { + assert_eq!(Fp::ZETA * Fp::ZETA * Fp::ZETA, Fp::ONE); + assert_ne!(Fp::ZETA * Fp::ZETA, Fp::ONE); + } + + #[test] + fn test_from_u512() { + const N_VECS: usize = 10; + let expected_results = [ + Fp::from_raw([ + 0x93638251ffeffed3, + 0x4d32f4d20020be11, + 0x9c39ee168df390f0, + 0xaeef355d313cce4b, + 0xc97c592ef6030675, + 0xd7bc83d286537318, + 0x01d4a87b24f91154, + ]), + Fp::from_raw([ + 0x63e0a8f1beefc612, + 0xbb28d56dae950a42, + 0x5264111f4a5ea3ad, + 0xbebe71c829f662f7, + 0xa760708568d6060c, + 0x617d8b0cda3f6328, + 0x03096ea964e009c0, + ]), + Fp::from_raw([ + 0xdeaedbda63b3e431, + 0x65892bc45ec174c8, + 0x83ad8d96c18556c7, + 0x3fce5f9d2c537fbe, + 0x001666753a4972d1, + 0x9f7f457a48d6d322, + 0x20b2fadc6bf4004d, + ]), + Fp::from_raw([ + 0x6eea9cbd68b174cf, + 0x63aa4abda18f73e6, + 0x0a6ccc999b1c7864, + 0x0f90b43928625cc2, + 0x55f541b0680af76b, + 0x2045de539849b035, + 0x1d5d7b5f6e8cc333, + ]), + Fp::from_raw([ + 0x673df0f69b71a763, + 0x215a1362cfe53e1e, + 0x7028d2b3766b0f40, + 0x996ac521f57a7f05, + 0x5006663a5c8cea53, + 0xd7ead2b7c71e460d, + 0x0f7c36b781cba9ed, + ]), + Fp::from_raw([ + 0x2eed10e8f00b189d, + 0xe6c79fb4600e94d4, + 0x2a9066b23daac6d4, + 0x476d275780b553fe, + 0xc3f2296317f71051, + 0xb1d2bb5373270c43, + 0x0e18a3597be61302, + ]), + Fp::from_raw([ + 0x7fbbc6b3e494ca68, + 0x2afcc7335152430b, + 0x93d5bd3acbccf3b3, + 0x61a76bb383622b8c, + 0x93efc4d40d7fac4d, + 0x0a791ad7698655a7, + 0x22b10d5c1090eec8, + ]), + Fp::from_raw([ + 0x596eec60211ad67b, + 0xf23f57b9f9db8c07, + 0x33e66f105ffc5e45, + 0xb10ef45226f3ae42, + 0xb98a559ccfc0ba32, + 0x819ba919d0b6e9b5, + 0x20f73876330a90e8, + ]), + Fp::from_raw([ + 0xbade57a48e2d9868, + 0xe61829ffe983fcfc, + 0xd0d080b774c31996, + 0xa1d712ef206b4a2f, + 0x7957f20173071cf6, + 0xf850f49359458652, + 0x17ba9f9aa08b9ee2, + ]), + Fp::from_raw([ + 0xd0239c8282ccc372, + 0xfa20a695ee8f6288, + 0x269f2ef315e029a5, + 0xcc915da35e10b4e6, + 0x8406f6977aadce0f, + 0xd7d5d8bc4497465a, + 0x08ce8bee1323d4f9, + ]), + ]; + + let mut seeded_rng = XorShiftRng::seed_from_u64(0u64); + let uniform_bytes = std::iter::from_fn(|| { + let mut bytes = [0u8; 64]; + seeded_rng.fill_bytes(&mut bytes); + Some(bytes) + }) + .take(N_VECS) + .collect::>(); + + for i in 0..N_VECS { + let p = Fp::from_uniform_bytes(&uniform_bytes[i]); + assert_eq!(expected_results[i], p); + } + } + + #[test] + #[cfg(feature = "bits")] + fn test_bits() { + crate::tests::field::random_bits_tests::("Fp".to_string()); + } + + #[test] + fn test_serialization() { + crate::tests::field::random_serialization_test::("Fp".to_string()); + #[cfg(feature = "derive_serde")] + crate::tests::field::random_serde_test::("Fp".to_string()); + } + + fn is_less_than(x: &[u64; 7], y: &[u64; 7]) -> bool { + match x[6].cmp(&y[6]) { + core::cmp::Ordering::Less => return true, + core::cmp::Ordering::Greater => return false, + _ => {} + } + match x[5].cmp(&y[5]) { + core::cmp::Ordering::Less => return true, + core::cmp::Ordering::Greater => return false, + _ => {} + } + match x[4].cmp(&y[4]) { + core::cmp::Ordering::Less => return true, + core::cmp::Ordering::Greater => return false, + _ => {} + } + match x[3].cmp(&y[3]) { + core::cmp::Ordering::Less => return true, + core::cmp::Ordering::Greater => return false, + _ => {} + } + match x[2].cmp(&y[2]) { + core::cmp::Ordering::Less => return true, + core::cmp::Ordering::Greater => return false, + _ => {} + } + match x[1].cmp(&y[1]) { + core::cmp::Ordering::Less => return true, + core::cmp::Ordering::Greater => return false, + _ => {} + } + x[0].lt(&y[0]) + } + + #[test] + fn test_serialization_check() { + let mut rng = XorShiftRng::from_seed([ + 0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, + 0xbc, 0xe5, + ]); + let start = start_timer!(|| "serialize Fp"); + // failure check + for _ in 0..1000000 { + let rand_word = [(); 7].map(|_| rng.next_u64()); + let a = Fp(rand_word); + let rand_bytes = a.to_raw_bytes(); + match is_less_than(&rand_word, &MODULUS.0) { + false => { + assert!(Fp::from_raw_bytes(&rand_bytes).is_none()); + } + _ => { + assert_eq!(Fp::from_raw_bytes(&rand_bytes), Some(a)); + } + } + } + end_timer!(start); + } +} diff --git a/src/pluto_eris/fields/fp12.rs b/src/pluto_eris/fields/fp12.rs new file mode 100644 index 00000000..e5b6a3c8 --- /dev/null +++ b/src/pluto_eris/fields/fp12.rs @@ -0,0 +1,666 @@ +use super::fp::Fp; +use super::fp2::Fp2; +use super::fp6::Fp6; +use crate::ff::Field; +use core::ops::{Add, Mul, Neg, Sub}; +use rand::RngCore; +use subtle::{Choice, ConditionallySelectable, ConstantTimeEq, CtOption}; + +/// -GAMMA is a quadratic non-residue in Fp6. Fp12 = Fp6[X]/(X^2 + GAMMA) +/// We introduce the variable w such that w^2 = -GAMMA +/// GAMMA = - v +#[derive(Copy, Clone, Debug, Eq, PartialEq, Default)] +pub struct Fp12 { + c0: Fp6, + c1: Fp6, +} + +impl ConditionallySelectable for Fp12 { + fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self { + Fp12 { + c0: Fp6::conditional_select(&a.c0, &b.c0, choice), + c1: Fp6::conditional_select(&a.c1, &b.c1, choice), + } + } +} + +impl ConstantTimeEq for Fp12 { + fn ct_eq(&self, other: &Self) -> Choice { + self.c0.ct_eq(&other.c0) & self.c1.ct_eq(&other.c1) + } +} + +impl Neg for Fp12 { + type Output = Fp12; + + #[inline] + fn neg(self) -> Fp12 { + -&self + } +} + +impl<'a> Neg for &'a Fp12 { + type Output = Fp12; + + #[inline] + fn neg(self) -> Fp12 { + self.neg() + } +} + +impl<'a, 'b> Sub<&'b Fp12> for &'a Fp12 { + type Output = Fp12; + + #[inline] + fn sub(self, rhs: &'b Fp12) -> Fp12 { + self.sub(rhs) + } +} + +impl<'a, 'b> Add<&'b Fp12> for &'a Fp12 { + type Output = Fp12; + + #[inline] + fn add(self, rhs: &'b Fp12) -> Fp12 { + self.add(rhs) + } +} + +impl<'a, 'b> Mul<&'b Fp12> for &'a Fp12 { + type Output = Fp12; + + #[inline] + fn mul(self, rhs: &'b Fp12) -> Fp12 { + self.mul(rhs) + } +} + +use crate::{ + impl_add_binop_specify_output, impl_binops_additive, impl_binops_additive_specify_output, + impl_binops_multiplicative, impl_binops_multiplicative_mixed, impl_sub_binop_specify_output, + impl_sum_prod, +}; +impl_binops_additive!(Fp12, Fp12); +impl_binops_multiplicative!(Fp12, Fp12); +impl_sum_prod!(Fp12); + +impl Fp12 { + #[inline] + pub const fn zero() -> Self { + Fp12 { + c0: Fp6::ZERO, + c1: Fp6::ZERO, + } + } + + #[inline] + pub const fn one() -> Self { + Fp12 { + c0: Fp6::ONE, + c1: Fp6::ZERO, + } + } + + pub fn mul_assign(&mut self, other: &Self) { + let t0 = self.c0 * other.c0; + let mut t1 = self.c1 * other.c1; + let t2 = other.c0 + other.c1; + + self.c1 += &self.c0; + self.c1 *= &t2; + self.c1 -= &t0; + self.c1 -= &t1; + + t1.mul_by_nonresidue(); + self.c0 = t0 + t1; + } + + pub fn square_assign(&mut self) { + let mut ab = self.c0 * self.c1; + + let c0c1 = self.c0 + self.c1; + + let mut c0 = self.c1; + c0.mul_by_nonresidue(); + c0 += &self.c0; + c0 *= &c0c1; + c0 -= &ab; + self.c1 = ab; + self.c1 += &ab; + ab.mul_by_nonresidue(); + c0 -= &ab; + self.c0 = c0; + } + + pub fn double(&self) -> Self { + Self { + c0: self.c0.double(), + c1: self.c1.double(), + } + } + + pub fn double_assign(&mut self) { + self.c0 = self.c0.double(); + self.c1 = self.c1.double(); + } + + pub fn add(&self, other: &Self) -> Self { + Self { + c0: self.c0 + other.c0, + c1: self.c1 + other.c1, + } + } + + pub fn sub(&self, other: &Self) -> Self { + Self { + c0: self.c0 - other.c0, + c1: self.c1 - other.c1, + } + } + + pub fn mul(&self, other: &Self) -> Self { + let mut t = *other; + t.mul_assign(self); + t + } + + pub fn square(&self) -> Self { + let mut t = *self; + t.square_assign(); + t + } + + #[inline(always)] + pub fn neg(&self) -> Self { + Self { + c0: -self.c0, + c1: -self.c1, + } + } + + #[inline(always)] + pub fn conjugate(&mut self) { + self.c1 = -self.c1; + } + + pub fn frobenius_map(&mut self, power: usize) { + self.c0.frobenius_map(power); + self.c1.frobenius_map(power); + + self.c1.c0.mul_assign(&FROBENIUS_COEFF_FP12_C1[power % 12]); + self.c1.c1.mul_assign(&FROBENIUS_COEFF_FP12_C1[power % 12]); + self.c1.c2.mul_assign(&FROBENIUS_COEFF_FP12_C1[power % 12]); + } + + pub fn mul_by_014(&mut self, c0: &Fp2, c1: &Fp2, c4: &Fp2) { + let mut aa = self.c0; + aa.mul_by_01(c0, c1); + let mut bb = self.c1; + bb.mul_by_1(c4); + let o = c1 + c4; + self.c1 += &self.c0; + self.c1.mul_by_01(c0, &o); + self.c1 -= &aa; + self.c1 -= &bb; + self.c0 = bb; + self.c0.mul_by_nonresidue(); + self.c0 += &aa; + } + + pub fn mul_by_034(&mut self, c0: &Fp2, c3: &Fp2, c4: &Fp2) { + let t0 = Fp6 { + c0: self.c0.c0 * c0, + c1: self.c0.c1 * c0, + c2: self.c0.c2 * c0, + }; + let mut t1 = self.c1; + t1.mul_by_01(c3, c4); + let o = c0 + c3; + let mut t2 = self.c0 + self.c1; + t2.mul_by_01(&o, c4); + t2 -= t0; + self.c1 = t2 - t1; + t1.mul_by_nonresidue(); + self.c0 = t0 + t1; + } + + pub fn invert(&self) -> CtOption { + let mut c0s = self.c0; + c0s.square_assign(); + let mut c1s = self.c1; + c1s.square_assign(); + c1s.mul_by_nonresidue(); + c0s -= &c1s; + + c0s.invert().map(|t| { + let mut tmp = Fp12 { c0: t, c1: t }; + tmp.c0.mul_assign(&self.c0); + tmp.c1.mul_assign(&self.c1); + tmp.c1 = tmp.c1.neg(); + + tmp + }) + } + + pub fn cyclotomic_square(&mut self) { + fn fp4_square(c0: &mut Fp2, c1: &mut Fp2, a0: &Fp2, a1: &Fp2) { + let t0 = a0.square(); + let t1 = a1.square(); + let mut t2 = t1; + t2.mul_by_nonresidue(); + *c0 = t2 + t0; + t2 = a0 + a1; + t2.square_assign(); + t2 -= t0; + *c1 = t2 - t1; + } + + let mut t3 = Fp2::zero(); + let mut t4 = Fp2::zero(); + let mut t5 = Fp2::zero(); + let mut t6 = Fp2::zero(); + + fp4_square(&mut t3, &mut t4, &self.c0.c0, &self.c1.c1); + let mut t2 = t3 - self.c0.c0; + t2.double_assign(); + self.c0.c0 = t2 + t3; + + t2 = t4 + self.c1.c1; + t2.double_assign(); + self.c1.c1 = t2 + t4; + + fp4_square(&mut t3, &mut t4, &self.c1.c0, &self.c0.c2); + fp4_square(&mut t5, &mut t6, &self.c0.c1, &self.c1.c2); + + t2 = t3 - self.c0.c1; + t2.double_assign(); + self.c0.c1 = t2 + t3; + t2 = t4 + self.c1.c2; + t2.double_assign(); + self.c1.c2 = t2 + t4; + t3 = t6; + t3.mul_by_nonresidue(); + t2 = t3 + self.c1.c0; + t2.double_assign(); + self.c1.c0 = t2 + t3; + t2 = t5 - self.c0.c2; + t2.double_assign(); + self.c0.c2 = t2 + t5; + } +} + +impl Field for Fp12 { + const ZERO: Self = Self::zero(); + const ONE: Self = Self::one(); + + fn random(mut rng: impl RngCore) -> Self { + Fp12 { + c0: Fp6::random(&mut rng), + c1: Fp6::random(&mut rng), + } + } + + fn is_zero(&self) -> Choice { + self.c0.is_zero() & self.c1.is_zero() + } + + fn square(&self) -> Self { + self.square() + } + + fn double(&self) -> Self { + self.double() + } + + fn sqrt(&self) -> CtOption { + // The square root method is typically only required for finding y-coordinate + // given the x-coordinate of an EC point. Fields over which we have not + // defined a curve do not need this method. + unimplemented!() + } + + fn sqrt_ratio(_num: &Self, _div: &Self) -> (Choice, Self) { + // The square root method is typically only required for finding y-coordinate + // given the x-coordinate of an EC point. Fields over which we have not + // defined a curve do not need this method. + unimplemented!() + } + + fn invert(&self) -> CtOption { + self.invert() + } +} + +/// Fp2(v)^((p^i-1)/6) for i=0,...,11 +pub const FROBENIUS_COEFF_FP12_C1: [Fp2; 12] = [ + // Fp2(v)**(((p^0) - 1) / 6) + Fp2::ONE, + // Fp2(v)**(((p^1) - 1) / 6) + Fp2 { + // 0x3c3ad3da8b99cb1df0709dc343113ccd9892dedd51f30695d89c647b90de8f41df055384b9e6cfd4e70648622c750f32ee965dfef2303d3 + c0: Fp::from_raw([ + 0x2ee965dfef2303d3, + 0x4e70648622c750f3, + 0x1df055384b9e6cfd, + 0x5d89c647b90de8f4, + 0xd9892dedd51f3069, + 0xdf0709dc343113cc, + 0x03c3ad3da8b99cb1, + ]), + // 0x149fd9ed2c7affe7aaa3b912182da22dccb29838628f04b6f333d052540294889f03876b2ddb143559f9373f4cf44e6afa0be24ad758a5ff + c1: Fp::from_raw([ + 0xfa0be24ad758a5ff, + 0x59f9373f4cf44e6a, + 0x9f03876b2ddb1435, + 0xf333d05254029488, + 0xccb29838628f04b6, + 0xaaa3b912182da22d, + 0x149fd9ed2c7affe7, + ]), + }, + // Fp2(v)**(((p^2) - 1) / 6) + Fp2 { + // 0x480000000000360001c950000d7ee0e4a803c956d01c903d720dc8ad8b38dffaf50c100004c37fffffff + c0: Fp::from_raw([ + 0x100004c37fffffff, + 0xc8ad8b38dffaf50c, + 0xc956d01c903d720d, + 0x50000d7ee0e4a803, + 0x00000000360001c9, + 0x0000000000004800, + 0x0000000000000000, + ]), + c1: Fp::ZERO, + }, + // Fp2(v)**(((p^3) - 1) / 6) + Fp2 { + // 0x1baee9e044d94d205764b80089c40010af5ca1e56a2a81e6a5d8739325984fc889d390efef216fe4f4af912a897f60a128a3be71be4995ca + c0: Fp::from_raw([ + 0x28a3be71be4995ca, + 0xf4af912a897f60a1, + 0x89d390efef216fe4, + 0xa5d8739325984fc8, + 0xaf5ca1e56a2a81e6, + 0x5764b80089c40010, + 0x1baee9e044d94d20, + ]), + // 0x20d4c11700e832829b26f1795339413be65e47a7716bc8bc07cd6b44b03ef1130b3c35a77291b29d6f45d28e4ef1ecb9678f4479a1151232 + c1: Fp::from_raw([ + 0x678f4479a1151232, + 0x6f45d28e4ef1ecb9, + 0x0b3c35a77291b29d, + 0x07cd6b44b03ef113, + 0xe65e47a7716bc8bc, + 0x9b26f1795339413b, + 0x20d4c11700e83282, + ]), + }, + // Fp2(v)**(((p^4) - 1) / 6) + Fp2 { + // 0x480000000000360001c950000d7ee0e4a803c956d01c903d720dc8ad8b38dffaf50c100004c37ffffffe + c0: Fp::from_raw([ + 0x100004c37ffffffe, + 0xc8ad8b38dffaf50c, + 0xc956d01c903d720d, + 0x50000d7ee0e4a803, + 0x00000000360001c9, + 0x0000000000004800, + 0x0000000000000000, + ]), + c1: Fp::ZERO, + }, + // Fp2(v)**(((p^5) - 1) / 6) + Fp2 { + // 0x17eb3ca29c1fb06e785dae245592ec43d5d373f7950b517d484ead4b6c8a66d46be33bb7a38302e7a63f2ca466b80fadf9ba5891cf2691f7 + c0: Fp::from_raw([ + 0xf9ba5891cf2691f7, + 0xa63f2ca466b80fad, + 0x6be33bb7a38302e7, + 0x484ead4b6c8a66d4, + 0xd5d373f7950b517d, + 0x785dae245592ec43, + 0x17eb3ca29c1fb06e, + ]), + // 0xc34e729d46d329af08338673b0b9f0e19abaf6f0edcc40514999af25c3c5c8a6c38ae3c44b69e68154c9b4f01fd9e4e6d83622ec9bc6c33 + c1: Fp::from_raw([ + 0x6d83622ec9bc6c33, + 0x154c9b4f01fd9e4e, + 0x6c38ae3c44b69e68, + 0x14999af25c3c5c8a, + 0x19abaf6f0edcc405, + 0xf08338673b0b9f0e, + 0x0c34e729d46d329a, + ]), + }, + // Fp2(v)**(((p^6) - 1) / 6) + Fp2 { + // 0x24000000000024000130e0000d7f70e4a803ca76f439266f443f9a5cda8a6c7be4a7a5fe8fadffd6a2a7e8c30006b9459ffffcd300000000 + c0: Fp::from_raw([ + 0x9ffffcd300000000, + 0xa2a7e8c30006b945, + 0xe4a7a5fe8fadffd6, + 0x443f9a5cda8a6c7b, + 0xa803ca76f439266f, + 0x0130e0000d7f70e4, + 0x2400000000002400, + ]), + c1: Fp::ZERO, + }, + // Fp2(v)**(((p^7) - 1) / 6) + Fp2 { + // 0x203c52c25746874e2229d623d94e5d17ce7a9c891f19f605e6b5d415217c8387c6b750c6440f92d95437843cdd3f6852711696f310dcfc2e + c0: Fp::from_raw([ + 0x711696f310dcfc2e, + 0x5437843cdd3f6852, + 0xc6b750c6440f92d9, + 0xe6b5d415217c8387, + 0xce7a9c891f19f605, + 0x2229d623d94e5d17, + 0x203c52c25746874e, + ]), + // 0xf602612d3852418568d26edf551ceb6db51323e91aa21b8510bca0a8687d7f345a41e9361d2eba148aeb183b3126adaa5f41a8828a75a02 + c1: Fp::from_raw([ + 0xa5f41a8828a75a02, + 0x48aeb183b3126ada, + 0x45a41e9361d2eba1, + 0x510bca0a8687d7f3, + 0xdb51323e91aa21b8, + 0x568d26edf551ceb6, + 0x0f602612d3852418, + ]), + }, + // Fp2(v)**(((p^8) - 1) / 6) + Fp2 { + // 0x24000000000024000130e0000d7f28e4a803ca76be3924a5f43f8cddf9a5c4781b50d5e1ff708dc8d9fa5d8a200bc4398ffff80f80000002 + c0: Fp::from_raw([ + 0x8ffff80f80000002, + 0xd9fa5d8a200bc439, + 0x1b50d5e1ff708dc8, + 0xf43f8cddf9a5c478, + 0xa803ca76be3924a5, + 0x0130e0000d7f28e4, + 0x2400000000002400, + ]), + c1: Fp::ZERO, + }, + // Fp2(v)**(((p^9) - 1) / 6) + Fp2 { + // 0x851161fbb26d6dfa9cc27ff83bb70d3f8a728918a0ea4889e6726c9b4f21cb35ad4150ea08c8ff1adf85798768758a4775c3e6141b66a37 + c0: Fp::from_raw([ + 0x775c3e6141b66a37, + 0xadf85798768758a4, + 0x5ad4150ea08c8ff1, + 0x9e6726c9b4f21cb3, + 0xf8a728918a0ea488, + 0xa9cc27ff83bb70d3, + 0x0851161fbb26d6df, + ]), + // 0x32b3ee8ff17f17d6609ee86ba462fa8c1a582cf82cd5db33c722f182a4b7b68d96b70571d1c4d3933621634b114cc8c3870b8595eeaedcf + c1: Fp::from_raw([ + 0x3870b8595eeaedcf, + 0x33621634b114cc8c, + 0xd96b70571d1c4d39, + 0x3c722f182a4b7b68, + 0xc1a582cf82cd5db3, + 0x6609ee86ba462fa8, + 0x032b3ee8ff17f17d, + ]), + }, + // Fp2(v)**(((p^10) - 1) / 6) + Fp2 { + // 0x24000000000024000130e0000d7f28e4a803ca76be3924a5f43f8cddf9a5c4781b50d5e1ff708dc8d9fa5d8a200bc4398ffff80f80000003 + c0: Fp::from_raw([ + 0x8ffff80f80000003, + 0xd9fa5d8a200bc439, + 0x1b50d5e1ff708dc8, + 0xf43f8cddf9a5c478, + 0xa803ca76be3924a5, + 0x0130e0000d7f28e4, + 0x2400000000002400, + ]), + c1: Fp::ZERO, + }, + // Fp2(v)**(((p^11) - 1) / 6) + Fp2 { + // 0xc14c35d63e0739188d331dbb7ec84a0d230567f5f2dd4f1fbf0ed116e0005a778c46a46ec2afceefc68bc1e994ea997a645a44130d96e0a + c0: Fp::from_raw([ + 0xa645a44130d96e0a, + 0xfc68bc1e994ea997, + 0x78c46a46ec2afcee, + 0xfbf0ed116e0005a7, + 0xd230567f5f2dd4f1, + 0x88d331dbb7ec84a0, + 0x0c14c35d63e07391, + ]), + // 0x17cb18d62b92f16510ada798d273d1d68e581b07e55c626a2fa5ff6a7e4e0ff1786ef7c24af7616e8d5b4d73fe091af7327c9aa4364393ce + c1: Fp::from_raw([ + 0x327c9aa4364393ce, + 0x8d5b4d73fe091af7, + 0x786ef7c24af7616e, + 0x2fa5ff6a7e4e0ff1, + 0x8e581b07e55c626a, + 0x10ada798d273d1d6, + 0x17cb18d62b92f165, + ]), + }, +]; + +#[cfg(test)] +use rand::SeedableRng; +#[cfg(test)] +use rand_xorshift::XorShiftRng; + +#[test] +fn test_fp12_mul_by_014() { + let mut rng = XorShiftRng::from_seed([ + 0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc, + 0xe5, + ]); + + for _ in 0..1000 { + let c0 = Fp2::random(&mut rng); + let c1 = Fp2::random(&mut rng); + let c5 = Fp2::random(&mut rng); + let mut a = Fp12::random(&mut rng); + let mut b = a; + + a.mul_by_014(&c0, &c1, &c5); + b.mul_assign(&Fp12 { + c0: Fp6 { + c0, + c1, + c2: Fp2::zero(), + }, + c1: Fp6 { + c0: Fp2::zero(), + c1: c5, + c2: Fp2::zero(), + }, + }); + + assert_eq!(a, b); + } +} + +#[test] +fn test_fp12_mul_by_034() { + let mut rng = XorShiftRng::from_seed([ + 0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc, + 0xe5, + ]); + + for _ in 0..1000 { + let c0 = Fp2::random(&mut rng); + let c3 = Fp2::random(&mut rng); + let c4 = Fp2::random(&mut rng); + let mut a = Fp12::random(&mut rng); + let mut b = a; + + a.mul_by_034(&c0, &c3, &c4); + b.mul_assign(&Fp12 { + c0: Fp6 { + c0, + c1: Fp2::zero(), + c2: Fp2::zero(), + }, + c1: Fp6 { + c0: c3, + c1: c4, + c2: Fp2::zero(), + }, + }); + + assert_eq!(a, b); + } +} + +#[test] +fn test_squaring() { + let mut rng = XorShiftRng::from_seed([ + 0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc, + 0xe5, + ]); + + for _ in 0..1000 { + let mut a = Fp12::random(&mut rng); + let mut b = a; + b.mul_assign(&a); + a.square_assign(); + assert_eq!(a, b); + } +} + +#[test] +fn test_frobenius() { + let mut rng = XorShiftRng::from_seed([ + 0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc, + 0xe5, + ]); + + for _ in 0..50 { + for i in 0..13 { + let mut a = Fp12::random(&mut rng); + let mut b = a; + + for _ in 0..i { + a = a.pow_vartime(&[ + 0x9ffffcd300000001, + 0xa2a7e8c30006b945, + 0xe4a7a5fe8fadffd6, + 0x443f9a5cda8a6c7b, + 0xa803ca76f439266f, + 0x0130e0000d7f70e4, + 0x2400000000002400, + ]); + } + b.frobenius_map(i); + + assert_eq!(a, b); + } + } +} + +#[test] +fn test_field() { + crate::tests::field::random_field_tests::("fp12".to_string()); +} diff --git a/src/pluto_eris/fields/fp2.rs b/src/pluto_eris/fields/fp2.rs new file mode 100644 index 00000000..14536fb6 --- /dev/null +++ b/src/pluto_eris/fields/fp2.rs @@ -0,0 +1,777 @@ +use super::fp::{Fp, MODULUS_STR}; +use crate::ff::{Field, FromUniformBytes, PrimeField, WithSmallOrderMulGroup}; +use crate::ff_ext::Legendre; +use core::convert::TryInto; +use core::ops::{Add, Mul, Neg, Sub}; +use rand::RngCore; +use std::cmp::Ordering; +use std::ops::MulAssign; +use subtle::{Choice, ConditionallySelectable, ConstantTimeEq, CtOption}; + +#[cfg(feature = "derive_serde")] +use serde::{Deserialize, Serialize}; + +/// -ALPHA is a quadratic non-residue in Fp. Fp2 = Fp[X]/(X^2 + ALPHA) +/// We introduce the variable u such that u^2 = -ALPHA + +/// U_SQUARE = -5 +/// 0x24000000000024000130e0000d7f70e4a803ca76f439266f443f9a5cda8a6c7be4a7a5fe8fadffd6a2a7e8c30006b9459ffffcd2fffffffc +const U_SQUARE: Fp = Fp::from_raw([ + 0x9ffffcd2fffffffc, + 0xa2a7e8c30006b945, + 0xe4a7a5fe8fadffd6, + 0x443f9a5cda8a6c7b, + 0xa803ca76f439266f, + 0x0130e0000d7f70e4, + 0x2400000000002400, +]); + +const NEG_ONE: Fp2 = Fp2 { + c0: super::fp::NEG_ONE, + c1: Fp::ZERO, +}; + +/// An element of Fp2, represented by c0 + c1 * u. +#[derive(Copy, Clone, Debug, Eq, PartialEq)] +#[cfg_attr(feature = "derive_serde", derive(Serialize, Deserialize))] +pub struct Fp2 { + pub c0: Fp, + pub c1: Fp, +} + +/// `Fp2` elements are ordered lexicographically. +impl Ord for Fp2 { + #[inline(always)] + fn cmp(&self, other: &Fp2) -> Ordering { + match self.c1.cmp(&other.c1) { + Ordering::Greater => Ordering::Greater, + Ordering::Less => Ordering::Less, + Ordering::Equal => self.c0.cmp(&other.c0), + } + } +} + +impl PartialOrd for Fp2 { + #[inline(always)] + fn partial_cmp(&self, other: &Fp2) -> Option { + Some(self.cmp(other)) + } +} + +impl ConditionallySelectable for Fp2 { + fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self { + Fp2 { + c0: Fp::conditional_select(&a.c0, &b.c0, choice), + c1: Fp::conditional_select(&a.c1, &b.c1, choice), + } + } +} + +impl ConstantTimeEq for Fp2 { + fn ct_eq(&self, other: &Self) -> Choice { + self.c0.ct_eq(&other.c0) & self.c1.ct_eq(&other.c1) + } +} + +impl Default for Fp2 { + #[inline] + fn default() -> Self { + Self::ZERO + } +} + +impl From for [u8; 112] { + fn from(value: Fp2) -> [u8; 112] { + value.to_bytes() + } +} + +impl<'a> From<&'a Fp2> for [u8; 112] { + fn from(value: &'a Fp2) -> [u8; 112] { + value.to_bytes() + } +} + +impl Neg for Fp2 { + type Output = Fp2; + + #[inline] + fn neg(self) -> Fp2 { + -&self + } +} + +impl<'a> Neg for &'a Fp2 { + type Output = Fp2; + + #[inline] + fn neg(self) -> Fp2 { + self.neg() + } +} + +impl<'a, 'b> Sub<&'b Fp2> for &'a Fp2 { + type Output = Fp2; + + #[inline] + fn sub(self, rhs: &'b Fp2) -> Fp2 { + self.sub(rhs) + } +} + +impl<'a, 'b> Add<&'b Fp2> for &'a Fp2 { + type Output = Fp2; + + #[inline] + fn add(self, rhs: &'b Fp2) -> Fp2 { + self.add(rhs) + } +} + +impl<'a, 'b> Mul<&'b Fp2> for &'a Fp2 { + type Output = Fp2; + + #[inline] + fn mul(self, rhs: &'b Fp2) -> Fp2 { + self.mul(rhs) + } +} + +use crate::{ + impl_add_binop_specify_output, impl_binops_additive, impl_binops_additive_specify_output, + impl_binops_multiplicative, impl_binops_multiplicative_mixed, impl_sub_binop_specify_output, + impl_sum_prod, +}; +impl_binops_additive!(Fp2, Fp2); +impl_binops_multiplicative!(Fp2, Fp2); +impl_sum_prod!(Fp2); + +/// Size in bytes of a `Fp2` element. +const SIZE: usize = 112; +/// Size in bytes of a each coefficient of `Fp2`. +const COEF_SIZE: usize = 56; + +impl Fp2 { + /// Returns the zero element. + #[inline] + pub const fn zero() -> Fp2 { + Fp2 { + c0: Fp::zero(), + c1: Fp::zero(), + } + } + + /// Returns the unit. + #[inline] + pub const fn one() -> Fp2 { + Fp2 { + c0: Fp::one(), + c1: Fp::zero(), + } + } + + /// Given its `Fp` coefficients c0, c1. Returns the element of `Fp2`: c0 + c1 * u. + pub const fn new(c0: Fp, c1: Fp) -> Self { + Fp2 { c0, c1 } + } + + /// Returns the size in bytes of a `Fp2` element. + pub const fn size() -> usize { + SIZE + } + + /// Attempts to convert a little-endian byte representation of + /// a scalar into a `Fp`, failing if the input is not canonical. + pub fn from_bytes(bytes: &[u8; SIZE]) -> CtOption { + let c0 = Fp::from_bytes(bytes[0..COEF_SIZE].try_into().unwrap()); + let c1 = Fp::from_bytes(bytes[COEF_SIZE..SIZE].try_into().unwrap()); + CtOption::new( + Fp2 { + c0: c0.unwrap(), + c1: c1.unwrap(), + }, + c0.is_some() & c1.is_some(), + ) + } + + /// Converts an element of `Fp` into a byte representation in + /// little-endian byte order. + pub fn to_bytes(self) -> [u8; SIZE] { + let mut res = [0u8; SIZE]; + let c0_bytes = self.c0.to_bytes(); + let c1_bytes = self.c1.to_bytes(); + res[0..COEF_SIZE].copy_from_slice(&c0_bytes[..]); + res[COEF_SIZE..SIZE].copy_from_slice(&c1_bytes[..]); + res + } + + // TODO: This is a naive method using 4 multiplications + pub fn mul_assign(&mut self, other: &Self) { + // r0 = s0 * s0 + U_SQUARE * s1 * o1 + // r1 = s0 * o1 - s1 * o0 + + let t0 = self.c0 * other.c0; + let t1 = self.c0 * other.c1; + let t2 = self.c1 * other.c0; + let t3 = self.c1 * other.c1; + + self.c0 = t0 + U_SQUARE * t3; + self.c1 = t1 + t2 + } + + // TODO: This is a naive method using 3 multiplications + pub fn square_assign(&mut self) { + // r0 = s0^2 + U_SQUARE * s1^2 + // r1 = 2* s0s1 + + let ab = self.c0 * self.c1; + let a2 = self.c0 * self.c0; + let b2 = self.c1 * self.c1; + + self.c1 = ab.double(); + self.c0 = a2 + U_SQUARE * b2; + } + + pub fn double(&self) -> Self { + Self { + c0: self.c0.double(), + c1: self.c1.double(), + } + } + + pub fn double_assign(&mut self) { + self.c0 = self.c0.double(); + self.c1 = self.c1.double(); + } + + pub fn add(&self, other: &Self) -> Self { + Self { + c0: self.c0.add(&other.c0), + c1: self.c1.add(&other.c1), + } + } + + pub fn sub(&self, other: &Self) -> Self { + Self { + c0: self.c0.sub(&other.c0), + c1: self.c1.sub(&other.c1), + } + } + + pub fn mul(&self, other: &Self) -> Self { + let mut t = *other; + t.mul_assign(self); + t + } + + pub fn square(&self) -> Self { + let mut t = *self; + t.square_assign(); + t + } + + pub fn neg(&self) -> Self { + Self { + c0: self.c0.neg(), + c1: self.c1.neg(), + } + } + + // conjucate by negating c1 + pub fn conjugate(&mut self) { + self.c1 = -self.c1; + } + + pub fn frobenius_map(&mut self, power: usize) { + //TODO Replace with constant time version if needed + if power % 2 != 0 { + self.conjugate() + } + } + + /// Multiply this element by cubic nonresidue: V_CUBE = 57/(u+3) + pub fn mul_by_nonresidue(&mut self) { + // (x + y * u) * 57/(u + 3) + self.mul_assign(&super::fp6::V_CUBE) + } + + pub fn invert(&self) -> CtOption { + let mut t1 = self.c1; + t1 = t1.square(); + t1 *= U_SQUARE; + let mut t0 = self.c0; + t0 = t0.square(); + //t0 = c0^2 - U_SQUARE c1^2 + t0 -= &t1; + t0.invert().map(|t| { + let mut tmp = Fp2 { + c0: self.c0, + c1: self.c1, + }; + tmp.c0 *= &t; + tmp.c1 *= &t; + tmp.c1 = -tmp.c1; + + tmp + }) + } + + /// Norm of Fp2 as extension field in u over Fp + fn norm(&self) -> Fp { + // norm = self * self.cojungate() + let t0 = self.c0.square(); + let t1 = self.c1.square() * U_SQUARE; + t1 - t0 + } +} + +impl Legendre for Fp2 { + fn legendre(&self) -> i64 { + self.norm().legendre() + } +} + +impl Field for Fp2 { + const ZERO: Self = Self::zero(); + const ONE: Self = Self::one(); + + fn random(mut rng: impl RngCore) -> Self { + Fp2 { + c0: Fp::random(&mut rng), + c1: Fp::random(&mut rng), + } + } + + fn is_zero(&self) -> Choice { + self.c0.is_zero() & self.c1.is_zero() + } + + fn square(&self) -> Self { + self.square() + } + + fn double(&self) -> Self { + self.double() + } + + fn sqrt(&self) -> CtOption { + // Algorithm 10, https://eprint.iacr.org/2012/685.pdf + + // Aux elements. Described in PRECOMPUTATION of Algorithm 10. + // As element of Fp2: E = 0 + U * + // 0x13e275a1fa6a13af7a82a3d83bc9e63a667c70cf991a36e603b21f15823a404a021848271d63f0875d232408689b4c6c67153f9701e19938 + const E: Fp2 = Fp2 { + c0: Fp::ZERO, + c1: Fp::from_raw([ + 0x67153f9701e19938, + 0x5d232408689b4c6c, + 0x021848271d63f087, + 0x03b21f15823a404a, + 0x667c70cf991a36e6, + 0x7a82a3d83bc9e63a, + 0x13e275a1fa6a13af, + ]), + }; + + // As element of Fp2: f = 5 + 0 * U + // 0x5 + const F: Fp2 = Fp2 { + c0: Fp::from_raw([0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]), + c1: Fp::ZERO, + }; + + // Algorithm (not constant time) + let b = self.pow_vartime(&[ + // (p-1)/4 = + // 0x900000000000900004c3800035fdc392a00f29dbd0e499bd10fe69736a29b1ef929e97fa3eb7ff5a8a9fa30c001ae5167ffff34c0000000 + 0x67ffff34c0000000, + 0xa8a9fa30c001ae51, + 0xf929e97fa3eb7ff5, + 0xd10fe69736a29b1e, + 0x2a00f29dbd0e499b, + 0x004c3800035fdc39, + 0x0900000000000900, + ]); + + let b_2 = b.square(); + let mut b_2_q = b_2; + b_2_q.frobenius_map(1); + + let a0 = b_2_q * b_2; + if a0 == NEG_ONE { + CtOption::new(a0, Choice::from(0)) + } else { + let mut x = b; + x.frobenius_map(1); + if x * b == Fp2::ONE { + let x0 = (b_2 * self).c0.sqrt().unwrap(); + x.c0.mul_assign(x0); + x.c1.mul_assign(x0); + CtOption::new(x, Choice::from(1)) + } else { + let x0 = (self * b_2 * F).sqrt().unwrap(); + x *= x0 * E; + CtOption::new(x, Choice::from(1)) + } + } + } + + fn sqrt_ratio(num: &Self, div: &Self) -> (Choice, Self) { + ff::helpers::sqrt_ratio_generic(num, div) + } + + fn invert(&self) -> CtOption { + self.invert() + } +} + +impl From for Fp2 { + fn from(bit: bool) -> Fp2 { + if bit { + Fp2::ONE + } else { + Fp2::ZERO + } + } +} + +impl From for Fp2 { + fn from(val: u64) -> Self { + Fp2 { + c0: Fp::from(val), + c1: Fp::zero(), + } + } +} + +// This trait is only implemented to satisfy the requirement of CurveExt +impl PrimeField for Fp2 { + type Repr = Fp2Bytes; + + const MODULUS: &'static str = MODULUS_STR; + const MULTIPLICATIVE_GENERATOR: Self = Fp2 { + c0: Fp::MULTIPLICATIVE_GENERATOR, + c1: Fp::ZERO, + }; + const NUM_BITS: u32 = 446; + const CAPACITY: u32 = 445; + const S: u32 = 0; + + // TODO: Check that we can just 0 this and forget. + const ROOT_OF_UNITY: Self = Fp2::zero(); + const ROOT_OF_UNITY_INV: Self = Fp2 { + c0: Fp::zero(), + c1: Fp::zero(), + }; + const DELTA: Self = Fp2 { + c0: Fp::zero(), + c1: Fp::zero(), + }; + const TWO_INV: Self = Fp2 { + c0: Fp::TWO_INV, + c1: Fp::zero(), + }; + + fn from_repr(repr: Self::Repr) -> CtOption { + let c0 = Fp::from_bytes(&repr.0[..COEF_SIZE].try_into().unwrap()); + let c1 = Fp::from_bytes(&repr.0[COEF_SIZE..].try_into().unwrap()); + // Disallow overflow representation + CtOption::new(Fp2::new(c0.unwrap(), c1.unwrap()), Choice::from(1)) + } + + fn to_repr(&self) -> Self::Repr { + Fp2Bytes(self.to_bytes()) + } + + fn is_odd(&self) -> Choice { + Choice::from(self.to_repr().as_ref()[0] & 1) + } +} + +impl FromUniformBytes<64> for Fp2 { + fn from_uniform_bytes(bytes: &[u8; 64]) -> Self { + Self::new(Fp::from_uniform_bytes(bytes), Fp::zero()) + } +} +#[derive(Clone, Copy, Debug)] +/// Canonical little-endian representation of a `Fp2` element. +/// First half of the bytes represent `c0`, the second half represent `c1`. +pub struct Fp2Bytes([u8; SIZE]); + +impl Default for Fp2Bytes { + fn default() -> Self { + Self([0u8; SIZE]) + } +} + +impl AsMut<[u8]> for Fp2Bytes { + fn as_mut(&mut self) -> &mut [u8] { + &mut self.0 + } +} + +impl AsRef<[u8]> for Fp2Bytes { + fn as_ref(&self) -> &[u8] { + &self.0 + } +} + +impl crate::serde::SerdeObject for Fp2 { + fn from_raw_bytes_unchecked(bytes: &[u8]) -> Self { + debug_assert_eq!(bytes.len(), 112); + let [c0, c1] = [0, 56].map(|i| Fp::from_raw_bytes_unchecked(&bytes[i..i + 56])); + Self { c0, c1 } + } + fn from_raw_bytes(bytes: &[u8]) -> Option { + if bytes.len() != SIZE { + return None; + } + let [c0, c1] = [0, COEF_SIZE].map(|i| Fp::from_raw_bytes(&bytes[i..i + COEF_SIZE])); + c0.zip(c1).map(|(c0, c1)| Self { c0, c1 }) + } + fn to_raw_bytes(&self) -> Vec { + let mut res = Vec::with_capacity(SIZE); + for limb in self.c0.0.iter().chain(self.c1.0.iter()) { + res.extend_from_slice(&limb.to_le_bytes()); + } + res + } + fn read_raw_unchecked(reader: &mut R) -> Self { + let [c0, c1] = [(); 2].map(|_| Fp::read_raw_unchecked(reader)); + Self { c0, c1 } + } + fn read_raw(reader: &mut R) -> std::io::Result { + let c0 = Fp::read_raw(reader)?; + let c1 = Fp::read_raw(reader)?; + Ok(Self { c0, c1 }) + } + fn write_raw(&self, writer: &mut W) -> std::io::Result<()> { + self.c0.write_raw(writer)?; + self.c1.write_raw(writer) + } +} + +impl WithSmallOrderMulGroup<3> for Fp2 { + const ZETA: Self = Fp2 { + // 0x24000000000024000130e0000d7f28e4a803ca76be3924a5f43f8cddf9a5c4781b50d5e1ff708dc8d9fa5d8a200bc4398ffff80f80000002 + c0: Fp::from_raw([ + 0x8ffff80f80000002, + 0xd9fa5d8a200bc439, + 0x1b50d5e1ff708dc8, + 0xf43f8cddf9a5c478, + 0xa803ca76be3924a5, + 0x0130e0000d7f28e4, + 0x2400000000002400, + ]), + c1: Fp::zero(), + }; +} + +#[cfg(test)] +use rand::SeedableRng; +#[cfg(test)] +use rand_xorshift::XorShiftRng; + +#[test] +fn test_ser() { + let mut rng = XorShiftRng::from_seed([ + 0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc, + 0xe5, + ]); + + let a0 = Fp2::random(&mut rng); + let a_bytes = a0.to_bytes(); + let a1 = Fp2::from_bytes(&a_bytes).unwrap(); + assert_eq!(a0, a1); +} + +#[test] +fn test_fp2_ordering() { + let mut a = Fp2 { + c0: Fp::zero(), + c1: Fp::zero(), + }; + + let mut b = a; + + assert!(a.cmp(&b) == Ordering::Equal); + b.c0 += &Fp::one(); + assert!(a.cmp(&b) == Ordering::Less); + a.c0 += &Fp::one(); + assert!(a.cmp(&b) == Ordering::Equal); + b.c1 += &Fp::one(); + assert!(a.cmp(&b) == Ordering::Less); + a.c0 += &Fp::one(); + assert!(a.cmp(&b) == Ordering::Less); + a.c1 += &Fp::one(); + assert!(a.cmp(&b) == Ordering::Greater); + b.c0 += &Fp::one(); + assert!(a.cmp(&b) == Ordering::Equal); +} + +#[test] +fn test_fp2_basics() { + assert_eq!( + Fp2 { + c0: Fp::zero(), + c1: Fp::zero(), + }, + Fp2::ZERO + ); + assert_eq!( + Fp2 { + c0: Fp::one(), + c1: Fp::zero(), + }, + Fp2::ONE + ); + assert_eq!(Fp2::ZERO.is_zero().unwrap_u8(), 1); + assert_eq!(Fp2::ONE.is_zero().unwrap_u8(), 0); + assert_eq!( + Fp2 { + c0: Fp::zero(), + c1: Fp::one(), + } + .is_zero() + .unwrap_u8(), + 0 + ); +} + +#[test] +fn test_fp2_squaring() { + // u + 1 + let mut a = Fp2 { + c0: Fp::one(), + c1: Fp::one(), + }; + // (u + 1) ^2 = 1 + u^2 + 2u = -4 + 2u + a.square_assign(); + let minus_4 = -Fp::from(4u64); + assert_eq!( + a, + Fp2 { + c0: minus_4, + c1: Fp::one() + Fp::one(), + } + ); + + // u + let mut a = Fp2 { + c0: Fp::zero(), + c1: Fp::one(), + }; + // u^2 + a.square_assign(); + assert_eq!( + a, + Fp2 { + c0: U_SQUARE, + c1: Fp::zero(), + } + ); +} + +#[test] +fn test_fp2_mul_nonresidue() { + let mut rng = XorShiftRng::from_seed([ + 0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc, + 0xe5, + ]); + let nqr = super::fp6::V_CUBE; + for _ in 0..1000 { + let mut a = Fp2::random(&mut rng); + let mut b = a; + a.mul_by_nonresidue(); + b.mul_assign(&nqr); + + assert_eq!(a, b); + } +} + +#[test] +pub fn test_sqrt() { + let mut rng = XorShiftRng::from_seed([ + 0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc, + 0xe5, + ]); + const N_ITER: usize = 1000; + for _ in 0..N_ITER { + let a = Fp2::random(&mut rng); + if a.legendre() == -1 { + assert!(bool::from(a.sqrt().is_none())); + } + } + + for _ in 0..N_ITER { + let a = Fp2::random(&mut rng); + let mut b = a; + b.square_assign(); + assert_eq!(b.legendre(), 1); + + let b = b.sqrt().unwrap(); + let mut negb = b; + negb = negb.neg(); + + assert!(a == b || a == negb); + } + + let mut c = Fp2::ONE; + for _ in 0..N_ITER { + let mut b = c; + b.square_assign(); + assert_eq!(b.legendre(), 1); + + b = b.sqrt().unwrap(); + + if b != c { + b = b.neg(); + } + + assert_eq!(b, c); + + c += &Fp2::ONE; + } +} + +#[test] +fn test_frobenius() { + let mut rng = XorShiftRng::from_seed([ + 0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc, + 0xe5, + ]); + + for _ in 0..50 { + for i in 0..8 { + let mut a = Fp2::random(&mut rng); + let mut b = a; + + for _ in 0..i { + a = a.pow_vartime(&[ + 0x9ffffcd300000001, + 0xa2a7e8c30006b945, + 0xe4a7a5fe8fadffd6, + 0x443f9a5cda8a6c7b, + 0xa803ca76f439266f, + 0x0130e0000d7f70e4, + 0x2400000000002400, + ]); + } + b.frobenius_map(i); + + assert_eq!(a, b); + } + } +} + +#[test] +fn test_field() { + crate::tests::field::random_field_tests::("fp2".to_string()); +} + +#[test] +fn test_serialization() { + crate::tests::field::random_serialization_test::("fp2".to_string()); + #[cfg(feature = "derive_serde")] + crate::tests::field::random_serde_test::("fp2".to_string()); +} diff --git a/src/pluto_eris/fields/fp6.rs b/src/pluto_eris/fields/fp6.rs new file mode 100644 index 00000000..9fcbb01b --- /dev/null +++ b/src/pluto_eris/fields/fp6.rs @@ -0,0 +1,786 @@ +use super::fp::Fp; +use super::fp2::Fp2; +use crate::ff::Field; +use core::ops::{Add, Mul, Neg, Sub}; +use rand::RngCore; +use subtle::{Choice, ConditionallySelectable, ConstantTimeEq, CtOption}; + +/// -BETA is a cubic non-residue in Fp2. Fp6 = Fp2[X]/(X^3 + BETA) +/// We introduce the variable v such that v^3 = -BETA +/// BETA = - 57/(z+3) + +/// V_CUBE = 57/(u+3) +pub(crate) const V_CUBE: Fp2 = Fp2 { + // 0xcdb6db6db6dc3b6dbda9924971b3a9ace4a7f2a7bcb449573cd928ee056022c3f6072240ebe2483833bf7b35b701d98ddb6da4b5b6db6e8 + c0: Fp::from_raw([ + 0xddb6da4b5b6db6e8, + 0x833bf7b35b701d98, + 0x3f6072240ebe2483, + 0x73cd928ee056022c, + 0xce4a7f2a7bcb4495, + 0xdbda9924971b3a9a, + 0x0cdb6db6db6dc3b6, + ]), + // 0x7b6db6db6db756db71cc2492776bcc3489319197d79f5f3457b57ef5366ce1a8c6d1148d5a5491bb523fb0536dcde8eeb6db62d36db6db3 + c1: Fp::from_raw([ + 0xeb6db62d36db6db3, + 0xb523fb0536dcde8e, + 0x8c6d1148d5a5491b, + 0x457b57ef5366ce1a, + 0x489319197d79f5f3, + 0xb71cc2492776bcc3, + 0x07b6db6db6db756d, + ]), +}; + +#[derive(Copy, Clone, Debug, Eq, PartialEq, Default)] +/// The `Fp6` element c0 + c1 * v + c2 * v3 +pub struct Fp6 { + pub c0: Fp2, + pub c1: Fp2, + pub c2: Fp2, +} + +impl ConditionallySelectable for Fp6 { + fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self { + Fp6 { + c0: Fp2::conditional_select(&a.c0, &b.c0, choice), + c1: Fp2::conditional_select(&a.c1, &b.c1, choice), + c2: Fp2::conditional_select(&a.c2, &b.c2, choice), + } + } +} + +impl ConstantTimeEq for Fp6 { + fn ct_eq(&self, other: &Self) -> Choice { + self.c0.ct_eq(&other.c0) & self.c1.ct_eq(&other.c1) & self.c2.ct_eq(&other.c2) + } +} + +impl Neg for Fp6 { + type Output = Fp6; + + #[inline] + fn neg(self) -> Fp6 { + -&self + } +} + +impl<'a> Neg for &'a Fp6 { + type Output = Fp6; + + #[inline] + fn neg(self) -> Fp6 { + self.neg() + } +} + +impl<'a, 'b> Sub<&'b Fp6> for &'a Fp6 { + type Output = Fp6; + + #[inline] + fn sub(self, rhs: &'b Fp6) -> Fp6 { + self.sub(rhs) + } +} + +impl<'a, 'b> Add<&'b Fp6> for &'a Fp6 { + type Output = Fp6; + + #[inline] + fn add(self, rhs: &'b Fp6) -> Fp6 { + self.add(rhs) + } +} + +impl<'a, 'b> Mul<&'b Fp6> for &'a Fp6 { + type Output = Fp6; + + #[inline] + fn mul(self, rhs: &'b Fp6) -> Fp6 { + self.mul(rhs) + } +} + +use crate::{ + impl_add_binop_specify_output, impl_binops_additive, impl_binops_additive_specify_output, + impl_binops_multiplicative, impl_binops_multiplicative_mixed, impl_sub_binop_specify_output, + impl_sum_prod, +}; +impl_binops_additive!(Fp6, Fp6); +impl_binops_multiplicative!(Fp6, Fp6); +impl_sum_prod!(Fp6); + +impl Fp6 { + #[inline] + pub const fn zero() -> Self { + Fp6 { + c0: Fp2::ZERO, + c1: Fp2::ZERO, + c2: Fp2::ZERO, + } + } + + #[inline] + pub const fn one() -> Self { + Fp6 { + c0: Fp2::ONE, + c1: Fp2::ZERO, + c2: Fp2::ZERO, + } + } + + pub fn mul_assign(&mut self, other: &Self) { + let mut a_a = self.c0; + let mut b_b = self.c1; + let mut c_c = self.c2; + a_a *= &other.c0; + b_b *= &other.c1; + c_c *= &other.c2; + + let mut t1 = other.c1; + t1 += &other.c2; + { + let mut tmp = self.c1; + tmp += &self.c2; + + t1 *= &tmp; + t1 -= &b_b; + t1 -= &c_c; + t1.mul_by_nonresidue(); + t1 += &a_a; + } + + let mut t3 = other.c0; + t3 += &other.c2; + { + let mut tmp = self.c0; + tmp += &self.c2; + + t3 *= &tmp; + t3 -= &a_a; + t3 += &b_b; + t3 -= &c_c; + } + + let mut t2 = other.c0; + t2 += &other.c1; + { + let mut tmp = self.c0; + tmp += &self.c1; + + t2 *= &tmp; + t2 -= &a_a; + t2 -= &b_b; + c_c.mul_by_nonresidue(); + t2 += &c_c; + } + + self.c0 = t1; + self.c1 = t2; + self.c2 = t3; + } + + pub fn square_assign(&mut self) { + // s0 = a^2 + let mut s0 = self.c0; + s0.square_assign(); + // s1 = 2ab + let mut ab = self.c0; + ab *= &self.c1; + let mut s1 = ab; + s1.double_assign(); + // s2 = (a - b + c)^2 + let mut s2 = self.c0; + s2 -= &self.c1; + s2 += &self.c2; + s2.square_assign(); + // bc + let mut bc = self.c1; + bc *= &self.c2; + // s3 = 2bc + let mut s3 = bc; + s3.double_assign(); + // s4 = c^2 + let mut s4 = self.c2; + s4.square_assign(); + + // new c0 = 2bc.mul_by_xi + a^2 + self.c0 = s3; + self.c0.mul_by_nonresidue(); + // self.c0.mul_by_xi(); + self.c0 += &s0; + + // new c1 = (c^2).mul_by_xi + 2ab + self.c1 = s4; + self.c1.mul_by_nonresidue(); + // self.c1.mul_by_xi(); + self.c1 += &s1; + + // new c2 = 2ab + (a - b + c)^2 + 2bc - a^2 - c^2 = b^2 + 2ac + self.c2 = s1; + self.c2 += &s2; + self.c2 += &s3; + self.c2 -= &s0; + self.c2 -= &s4; + } + + pub fn double(&self) -> Self { + Self { + c0: self.c0.double(), + c1: self.c1.double(), + c2: self.c2.double(), + } + } + + pub fn double_assign(&mut self) { + self.c0 = self.c0.double(); + self.c1 = self.c1.double(); + self.c2 = self.c2.double(); + } + + pub fn add(&self, other: &Self) -> Self { + Self { + c0: self.c0 + other.c0, + c1: self.c1 + other.c1, + c2: self.c2 + other.c2, + } + } + + pub fn sub(&self, other: &Self) -> Self { + Self { + c0: self.c0 - other.c0, + c1: self.c1 - other.c1, + c2: self.c2 - other.c2, + } + } + + pub fn mul(&self, other: &Self) -> Self { + let mut t = *other; + t.mul_assign(self); + t + } + + pub fn square(&self) -> Self { + let mut t = *self; + t.square_assign(); + t + } + + pub fn neg(&self) -> Self { + Self { + c0: -self.c0, + c1: -self.c1, + c2: -self.c2, + } + } + + pub fn frobenius_map(&mut self, power: usize) { + self.c0.frobenius_map(power); + self.c1.frobenius_map(power); + self.c2.frobenius_map(power); + + self.c1.mul_assign(&FROBENIUS_COEFF_FP6_C1[power % 6]); + self.c2.mul_assign(&FROBENIUS_COEFF_FP6_C2[power % 6]); + } + + /// Multiply by cubic nonresidue v. + pub fn mul_by_nonresidue(&mut self) { + use std::mem::swap; + swap(&mut self.c0, &mut self.c1); + swap(&mut self.c0, &mut self.c2); + // c0, c1, c2 -> c2, c0, c1 + self.c0.mul_by_nonresidue(); + } + + pub fn mul_by_1(&mut self, c1: &Fp2) { + let mut b_b = self.c1; + b_b *= c1; + + let mut t1 = *c1; + { + let mut tmp = self.c1; + tmp += &self.c2; + + t1 *= &tmp; + t1 -= &b_b; + t1.mul_by_nonresidue(); + } + + let mut t2 = *c1; + { + let mut tmp = self.c0; + tmp += &self.c1; + + t2 *= &tmp; + t2 -= &b_b; + } + + self.c0 = t1; + self.c1 = t2; + self.c2 = b_b; + } + + pub fn mul_by_01(&mut self, c0: &Fp2, c1: &Fp2) { + let mut a_a = self.c0; + let mut b_b = self.c1; + a_a *= c0; + b_b *= c1; + + let mut t1 = *c1; + { + let mut tmp = self.c1; + tmp += &self.c2; + + t1 *= &tmp; + t1 -= &b_b; + t1.mul_by_nonresidue(); + t1 += &a_a; + } + + let mut t3 = *c0; + { + let mut tmp = self.c0; + tmp += &self.c2; + + t3 *= &tmp; + t3 -= &a_a; + t3 += &b_b; + } + + let mut t2 = *c0; + t2 += c1; + { + let mut tmp = self.c0; + tmp += &self.c1; + + t2 *= &tmp; + t2 -= &a_a; + t2 -= &b_b; + } + + self.c0 = t1; + self.c1 = t2; + self.c2 = t3; + } + + fn invert(&self) -> CtOption { + let mut c0 = self.c2; + c0.mul_by_nonresidue(); + c0 *= &self.c1; + c0 = -c0; + { + let mut c0s = self.c0; + c0s.square_assign(); + c0 += &c0s; + } + let mut c1 = self.c2; + c1.square_assign(); + c1.mul_by_nonresidue(); + { + let mut c01 = self.c0; + c01 *= &self.c1; + c1 -= &c01; + } + let mut c2 = self.c1; + c2.square_assign(); + { + let mut c02 = self.c0; + c02 *= &self.c2; + c2 -= &c02; + } + + let mut tmp1 = self.c2; + tmp1 *= &c1; + let mut tmp2 = self.c1; + tmp2 *= &c2; + tmp1 += &tmp2; + tmp1.mul_by_nonresidue(); + tmp2 = self.c0; + tmp2 *= &c0; + tmp1 += &tmp2; + + tmp1.invert().map(|t| { + let mut tmp = Fp6 { + c0: t, + c1: t, + c2: t, + }; + tmp.c0 *= &c0; + tmp.c1 *= &c1; + tmp.c2 *= &c2; + + tmp + }) + } +} + +impl Field for Fp6 { + const ZERO: Self = Self::zero(); + const ONE: Self = Self::one(); + + fn random(mut rng: impl RngCore) -> Self { + Fp6 { + c0: Fp2::random(&mut rng), + c1: Fp2::random(&mut rng), + c2: Fp2::random(&mut rng), + } + } + + fn is_zero(&self) -> Choice { + self.c0.is_zero() & self.c1.is_zero() + } + + fn square(&self) -> Self { + self.square() + } + + fn double(&self) -> Self { + self.double() + } + + fn sqrt(&self) -> CtOption { + unimplemented!() + } + + fn sqrt_ratio(_num: &Self, _div: &Self) -> (Choice, Self) { + unimplemented!() + } + + fn invert(&self) -> CtOption { + self.invert() + } +} + +/// Fp2 coefficients for the efficient computation of Frobenius Endomorphism in Fp6. +pub(crate) const FROBENIUS_COEFF_FP6_C1: [Fp2; 6] = [ + // Fp2(v^3)**(((p^0) - 1) / 3) + Fp2::ONE, + // Fp2(v^3)**(((p^1) - 1) / 3) + Fp2 { + // 0x120de97f024c55bc3bc0d351f4c70da1e3886170077a50986f93678bc921dcd5041bc4bb14cc42dc52e787634eccc335a001825382850d03 + c0: Fp::from_raw([ + 0xa001825382850d03, + 0x52e787634eccc335, + 0x041bc4bb14cc42dc, + 0x6f93678bc921dcd5, + 0xe3886170077a5098, + 0x3bc0d351f4c70da1, + 0x120de97f024c55bc, + ]), + // 0x2096f3f804d973afd82becc2ef081b76132461908eadbe3da1a7f5502b7091965efa1ddf4658080413be1b7cd3c9ea0e2772fea378a9b322 + c1: Fp::from_raw([ + 0x2772fea378a9b322, + 0x13be1b7cd3c9ea0e, + 0x5efa1ddf46580804, + 0xa1a7f5502b709196, + 0x132461908eadbe3d, + 0xd82becc2ef081b76, + 0x2096f3f804d973af, + ]), + }, + // Fp2(v^3)**(((p^2) - 1) / 3) + Fp2 { + // 0x480000000000360001c950000d7ee0e4a803c956d01c903d720dc8ad8b38dffaf50c100004c37ffffffe + c0: Fp::from_raw([ + 0x100004c37ffffffe, + 0xc8ad8b38dffaf50c, + 0xc956d01c903d720d, + 0x50000d7ee0e4a803, + 0x00000000360001c9, + 0x0000000000004800, + 0x0000000000000000, + ]), + c1: Fp::ZERO, + }, + // Fp2(v^3)**(((p^3) - 1) / 3) + Fp2 { + // 0x1f9cd069c59f50a72511749de232911d833b798e78bd98c02913e38315a71c287cd52ae30d09b78a8b43b17b4c3ea938a04518fa783eb497 + c0: Fp::from_raw([ + 0xa04518fa783eb497, + 0x8b43b17b4c3ea938, + 0x7cd52ae30d09b78a, + 0x2913e38315a71c28, + 0x833b798e78bd98c0, + 0x2511749de232911d, + 0x1f9cd069c59f50a7, + ]), + // 0x23affd628747cbaec26943f93dc9eab63f4af36699fe6d74c0aa2122aa7cb689e8faacb3479a973a4a728fcb77b150ee77240d4066e42ac5 + c1: Fp::from_raw([ + 0x77240d4066e42ac5, + 0x4a728fcb77b150ee, + 0xe8faacb3479a973a, + 0xc0aa2122aa7cb689, + 0x3f4af36699fe6d74, + 0xc26943f93dc9eab6, + 0x23affd628747cbae, + ]), + }, + // Fp2(v^3)**(((p^4) - 1) / 3) + Fp2 { + // 0x24000000000024000130e0000d7f28e4a803ca76be3924a5f43f8cddf9a5c4781b50d5e1ff708dc8d9fa5d8a200bc4398ffff80f80000002 + c0: Fp::from_raw([ + 0x8ffff80f80000002, + 0xd9fa5d8a200bc439, + 0x1b50d5e1ff708dc8, + 0xf43f8cddf9a5c478, + 0xa803ca76be3924a5, + 0x0130e0000d7f28e4, + 0x2400000000002400, + ]), + c1: Fp::ZERO, + }, + // Fp2(v^3)**(((p^5) - 1) / 3) + Fp2 { + // 0x165546173814a19ca18f781044054309e943b9ef683a6385efd7e9aad64bdffa485e5c5efd860546672498a76502061cffb95e58053c3e68 + c0: Fp::from_raw([ + 0xffb95e58053c3e68, + 0x672498a76502061c, + 0x485e5c5efd860546, + 0xefd7e9aad64bdffa, + 0xe943b9ef683a6385, + 0xa18f781044054309, + 0x165546173814a19c, + ]), + // 0x3b90ea573df08a167cc8f43ee2cdb9cfd983ff6bfc6212c262d1e46df2790d7815a816a9169606ee71f263db492378ea168edc22072221b + c1: Fp::from_raw([ + 0xa168edc22072221b, + 0xe71f263db492378e, + 0x815a816a9169606e, + 0x262d1e46df2790d7, + 0xfd983ff6bfc6212c, + 0x67cc8f43ee2cdb9c, + 0x03b90ea573df08a1, + ]), + }, +]; + +/// Fp2 coefficients for the efficient computation of Frobenius Endomorphism in Fp6. +pub(crate) const FROBENIUS_COEFF_FP6_C2: [Fp2; 6] = [ + // Fp2(v^3)**(((2p^0) - 2) / 3) + Fp2::ONE, + // Fp2(v^3)**(((2p^1) - 2) / 3) + Fp2 { + // 0x93733692ce3cdcfc34610bac6bd22c4dc590efb038c82998c9549048e7b424cc00e17ffb4a61950d0ec132a7b38f09db0a818e422737f7c + c0: Fp::from_raw([ + 0xb0a818e422737f7c, + 0xd0ec132a7b38f09d, + 0xc00e17ffb4a61950, + 0x8c9549048e7b424c, + 0xdc590efb038c8299, + 0xc34610bac6bd22c4, + 0x093733692ce3cdcf, + ]), + // 0x12cb19daadc92882ba3593aa6f3e6bf426f29bd46039e3036f61d0bd35f39ebecdac3209d9df546061c90b4940d9031c240ce398421dc7dc + c1: Fp::from_raw([ + 0x240ce398421dc7dc, + 0x61c90b4940d9031c, + 0xcdac3209d9df5460, + 0x6f61d0bd35f39ebe, + 0x26f29bd46039e303, + 0xba3593aa6f3e6bf4, + 0x12cb19daadc92882, + ]), + }, + // Fp2(v^3)**(((2p^2) - 2) / 3) + Fp2 { + // 0x24000000000024000130e0000d7f28e4a803ca76be3924a5f43f8cddf9a5c4781b50d5e1ff708dc8d9fa5d8a200bc4398ffff80f80000002 + c0: Fp::from_raw([ + 0x8ffff80f80000002, + 0xd9fa5d8a200bc439, + 0x1b50d5e1ff708dc8, + 0xf43f8cddf9a5c478, + 0xa803ca76be3924a5, + 0x0130e0000d7f28e4, + 0x2400000000002400, + ]), + c1: Fp::ZERO, + }, + // Fp2(v^3)**(((2p^3) - 2) / 3) + Fp2 { + // 0x85cc83a7eeba2ef5f7dd2f9f1405312b2ce0cbc85b8561e1657aaf1e85b82299aa5ace8b26b78d88f57e1c7a87f75556885980d6c8d2186 + c0: Fp::from_raw([ + 0x6885980d6c8d2186, + 0x8f57e1c7a87f7555, + 0x9aa5ace8b26b78d8, + 0x1657aaf1e85b8229, + 0xb2ce0cbc85b8561e, + 0x5f7dd2f9f1405312, + 0x085cc83a7eeba2ef, + ]), + // 0xda3357ee4e6a9836af75e8ec0dbd23e7abc03d404620899ee0ea8b684b9400d58d5ebe487e523680bbe8a0dd9ea1d312bca2a953ab51c9b + c1: Fp::from_raw([ + 0x2bca2a953ab51c9b, + 0x0bbe8a0dd9ea1d31, + 0x58d5ebe487e52368, + 0xee0ea8b684b9400d, + 0x7abc03d404620899, + 0x6af75e8ec0dbd23e, + 0x0da3357ee4e6a983, + ]), + }, + // Fp2(v^3)**(((2p^4) - 2) / 3) + Fp2 { + // 0x480000000000360001c950000d7ee0e4a803c956d01c903d720dc8ad8b38dffaf50c100004c37ffffffe + c0: Fp::from_raw([ + 0x100004c37ffffffe, + 0xc8ad8b38dffaf50c, + 0xc956d01c903d720d, + 0x50000d7ee0e4a803, + 0x00000000360001c9, + 0x0000000000004800, + 0x0000000000000000, + ]), + c1: Fp::ZERO, + }, + // Fp2(v^3)**(((2p^5) - 2) / 3) + Fp2 { + // 0x126c045c5430b340de6cfc4b5581fb0d18dcaebf6af44db7a152a66663b3a80589f3e116289c6dad4263f3d0dc4e535286d24be170ff5eff + c0: Fp::from_raw([ + 0x86d24be170ff5eff, + 0x4263f3d0dc4e5352, + 0x89f3e116289c6dad, + 0xa152a66663b3a805, + 0x18dcaebf6af44db7, + 0xde6cfc4b5581fb0d, + 0x126c045c5430b340, + ]), + // 0x391b0a66d5051f9dc03edc6dd6532b206552ace8f9d3ad1e6cf20e91fdd8dafbe2588102de9880e3520536be54398f85028eea5832d1b8a + c1: Fp::from_raw([ + 0x5028eea5832d1b8a, + 0x3520536be54398f8, + 0xbe2588102de9880e, + 0xe6cf20e91fdd8daf, + 0x06552ace8f9d3ad1, + 0xdc03edc6dd6532b2, + 0x0391b0a66d5051f9, + ]), + }, +]; + +#[cfg(test)] +use rand::SeedableRng; +#[cfg(test)] +use rand_xorshift::XorShiftRng; + +#[test] +fn test_fp6_mul_nonresidue() { + let mut rng = XorShiftRng::from_seed([ + 0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc, + 0xe5, + ]); + + let nqr = Fp6 { + c0: Fp2::zero(), + c1: Fp2::one(), + c2: Fp2::zero(), + }; + + for _ in 0..1000 { + let mut a = Fp6::random(&mut rng); + let mut b = a; + a.mul_by_nonresidue(); + b.mul_assign(&nqr); + + assert_eq!(a, b); + } +} + +#[test] +fn test_fp6_mul_by_1() { + let mut rng = XorShiftRng::from_seed([ + 0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc, + 0xe5, + ]); + + for _ in 0..1000 { + let c1 = Fp2::random(&mut rng); + let mut a = Fp6::random(&mut rng); + let mut b = a; + + a.mul_by_1(&c1); + b.mul_assign(&Fp6 { + c0: Fp2::zero(), + c1, + c2: Fp2::zero(), + }); + + assert_eq!(a, b); + } +} + +#[test] +fn test_fp6_mul_by_01() { + let mut rng = XorShiftRng::from_seed([ + 0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc, + 0xe5, + ]); + + for _ in 0..1000 { + let c0 = Fp2::random(&mut rng); + let c1 = Fp2::random(&mut rng); + let mut a = Fp6::random(&mut rng); + let mut b = a; + + a.mul_by_01(&c0, &c1); + b.mul_assign(&Fp6 { + c0, + c1, + c2: Fp2::zero(), + }); + + assert_eq!(a, b); + } +} + +#[test] +fn test_squaring() { + let mut rng = XorShiftRng::from_seed([ + 0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc, + 0xe5, + ]); + + for _ in 0..1000 { + let mut a = Fp6::random(&mut rng); + let mut b = a; + b.mul_assign(&a); + a.square_assign(); + assert_eq!(a, b); + } +} + +#[test] +fn test_frobenius() { + let mut rng = XorShiftRng::from_seed([ + 0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc, + 0xe5, + ]); + + for _ in 0..50 { + for i in 0..8 { + let mut a = Fp6::random(&mut rng); + let mut b = a; + + for _ in 0..i { + a = a.pow_vartime(&[ + // p + 0x9ffffcd300000001, + 0xa2a7e8c30006b945, + 0xe4a7a5fe8fadffd6, + 0x443f9a5cda8a6c7b, + 0xa803ca76f439266f, + 0x0130e0000d7f70e4, + 0x2400000000002400, + ]); + } + b.frobenius_map(i); + + assert_eq!(a, b); + } + } +} + +#[test] +fn test_field() { + crate::tests::field::random_field_tests::("fp6".to_string()); +} diff --git a/src/pluto_eris/fields/fq.rs b/src/pluto_eris/fields/fq.rs new file mode 100644 index 00000000..7aecd8a7 --- /dev/null +++ b/src/pluto_eris/fields/fq.rs @@ -0,0 +1,620 @@ +use crate::arithmetic::{adc, mac, sbb}; +use crate::ff::{FromUniformBytes, PrimeField, WithSmallOrderMulGroup}; +use crate::{ + extend_field_legendre, field_arithmetic_7_limbs, field_bits_7_limbs, field_common_7_limbs, + impl_from_u64_7_limbs, +}; +use crate::{ + impl_add_binop_specify_output, impl_binops_additive, impl_binops_additive_specify_output, + impl_binops_multiplicative, impl_binops_multiplicative_mixed, impl_sub_binop_specify_output, + impl_sum_prod, +}; +use core::convert::TryInto; +use core::fmt; +use core::ops::{Add, Mul, Neg, Sub}; +use rand::RngCore; +use std::slice::Iter; +use subtle::{Choice, ConditionallySelectable, ConstantTimeEq, CtOption}; + +#[cfg(feature = "derive_serde")] +use serde::{Deserialize, Serialize}; + +/// This represents an element of $\mathbb{F}_q$ where +/// +/// `q = 0x24000000000024000130e0000d7f70e4a803ca76f439266f443f9a5c7a8a6c7be4a775fe8e177fd69ca7e85d60050af41ffffcd300000001` +/// +/// is the scalar field of the Pluto curve (and the base field of the Eris curve). +/// The internal representation of this type is seven 64-bit unsigned +/// integers in little-endian order which account for the 446 bits required to be represented. +/// `Fq` values are always in Montgomery form; i.e., Fq(a) = aR mod q, with R = 2^448. +#[derive(Clone, Copy, PartialEq, Eq, Hash)] +#[cfg_attr(feature = "derive_serde", derive(Serialize, Deserialize))] +pub struct Fq(pub(crate) [u64; 7]); + +/// Size of `Fq` element in bytes +const SIZE: usize = 56; + +/// Constant representing the modulus +/// `p = 0x24000000000024000130e0000d7f70e4a803ca76f439266f443f9a5c7a8a6c7be4a775fe8e177fd69ca7e85d60050af41ffffcd300000001` +const MODULUS: Fq = Fq([ + 0x1ffffcd300000001, + 0x9ca7e85d60050af4, + 0xe4a775fe8e177fd6, + 0x443f9a5c7a8a6c7b, + 0xa803ca76f439266f, + 0x0130e0000d7f70e4, + 0x2400000000002400, +]); + +/// The modulus as u32 limbs. +#[cfg(not(target_pointer_width = "64"))] +const MODULUS_LIMBS_32: [u32; 14] = [ + 0x00000001, 0x1ffffcd3, 0x60050af4, 0x9ca7e85d, 0x8e177fd6, 0xe4a775fe, 0x7a8a6c7b, 0x443f9a5c, + 0xf439266f, 0xa803ca76, 0x0d7f70e4, 0x0130e000, 0x00002400, 0x24000000, +]; + +const MODULUS_STR: &str = "0x24000000000024000130e0000d7f70e4a803ca76f439266f443f9a5c7a8a6c7be4a775fe8e177fd69ca7e85d60050af41ffffcd300000001"; + +/// INV = -(q^{-1} mod 2^64) mod 2^64 +/// `0x1ffffcd2ffffffff` +const INV: u64 = 0x1ffffcd2ffffffff; + +/// Let M be the power of `2^64` nearest to `Self::MODULUS_BITS`. Then `R = M % Self::MODULUS`. +/// `R = 2^448 mod q` +/// `0x3ffffffffff03fff7a9dfffa183e9bf67e576bf526ff2f52242c778a637089cbf6bc60a1d5b8121b768a5725fdcb3532000163afffffff9` +const R: Fq = Fq([ + 0x2000163afffffff9, + 0xb768a5725fdcb353, + 0xbf6bc60a1d5b8121, + 0x2242c778a637089c, + 0x67e576bf526ff2f5, + 0xf7a9dfffa183e9bf, + 0x3ffffffffff03ff, +]); + +/// `R^2 = 2^896 mod q` +/// `0x50d7c998f46144ee436895a5a630ff544d51e923f64695651da4da1c97f716419bd905e6e4ff6c2bc64e865fe4552ad740808c831022522` +const R2: Fq = Fq([ + 0x740808c831022522, + 0xbc64e865fe4552ad, + 0x19bd905e6e4ff6c2, + 0x51da4da1c97f7164, + 0x44d51e923f646956, + 0xe436895a5a630ff5, + 0x050d7c998f46144e, +]); + +/// `R^3 = 2^1792 mod q` +/// `0x2f2c41fb476072baa10b8225e69f7de3b2c1031e6d01279e65191fab1f6ce25295c3c8bd6945406c89b51b218477a6f7252704d7495b38a` +const R3: Fq = Fq([ + 0x7252704d7495b38a, + 0xc89b51b218477a6f, + 0x295c3c8bd6945406, + 0xe65191fab1f6ce25, + 0x3b2c1031e6d01279, + 0xaa10b8225e69f7de, + 0x02f2c41fb476072b, +]); + +/// `GENERATOR = 7 mod q` is a generator of the `q - 1` order multiplicative +/// subgroup, or in other words a primitive root of the field. +const GENERATOR: Fq = Fq::from_raw([0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]); + +/// Size of the 2-adic sub-group of the field. +const S: u32 = 32; + +/// GENERATOR^t where t * 2^s + 1 = q +/// with t odd. In other words, this is a 2^s root of unity. +/// `0x0a5e6f78289fd24b1c64c90821c44cdce9ba1b3e90f2e88957f869667f6dfdbdbce6bb9ed38a8c2382fa11e3d3810fcc3c7bb406ec7bce04` + +const ROOT_OF_UNITY: Fq = Fq::from_raw([ + 0x3c7bb406ec7bce04, + 0x82fa11e3d3810fcc, + 0xbce6bb9ed38a8c23, + 0x57f869667f6dfdbd, + 0xe9ba1b3e90f2e889, + 0x1c64c90821c44cdc, + 0x0a5e6f78289fd24b, +]); + +/// 1 / ROOT_OF_UNITY mod q +/// `0x1a8c636e293fe9928f85aa6ec68f950ebb57e3f0502dd05667c990c1c2f57128c77768be1824fd3f60869f410287a1879ec16a35ca69b6fb` + +const ROOT_OF_UNITY_INV: Fq = Fq::from_raw([ + 0x9ec16a35ca69b6fb, + 0x60869f410287a187, + 0xc77768be1824fd3f, + 0x67c990c1c2f57128, + 0xbb57e3f0502dd056, + 0x8f85aa6ec68f950e, + 0x1a8c636e293fe992, +]); + +/// 1 / 2 mod q +/// `0x12000000000012000098700006bfb8725401e53b7a1c9337a21fcd2e3d45363df253baff470bbfeb4e53f42eb002857a0ffffe6980000001` +const TWO_INV: Fq = Fq::from_raw([ + 0x0ffffe6980000001, + 0x4e53f42eb002857a, + 0xf253baff470bbfeb, + 0xa21fcd2e3d45363d, + 0x5401e53b7a1c9337, + 0x0098700006bfb872, + 0x1200000000001200, +]); + +/// GENERATOR^{2^s} where t * 2^s + 1 = q with t odd. In other words, this is a t root of unity. +/// 0x657946fe07116ceca983fe28713a2b257ab7a7866c95121e727f3776c3e84cb0a14f6a7f83f8cdaeadb479c657bdf2de4589640faf72e67 +const DELTA: Fq = Fq::from_raw([ + 0xe4589640faf72e67, + 0xeadb479c657bdf2d, + 0x0a14f6a7f83f8cda, + 0xe727f3776c3e84cb, + 0x57ab7a7866c95121, + 0xca983fe28713a2b2, + 0x657946fe07116ce, +]); + +/// `ZETA^3 = 1 mod q` where `ZETA^2 != 1 mod q` +/// `0x9000000000006c000392a0001afee1c9500792ae3039253e641ba35817a29ffaf50be000032cfffffffe` + +const ZETA: Fq = Fq::from_raw([ + 0xe000032cfffffffe, + 0xa35817a29ffaf50b, + 0x92ae3039253e641b, + 0xa0001afee1c95007, + 0x000000006c000392, + 0x0000000000009000, + 0x0000000000000000, +]); + +impl_binops_additive!(Fq, Fq); +impl_binops_multiplicative!(Fq, Fq); +field_common_7_limbs!( + Fq, + FqRepr, + MODULUS, + INV, + MODULUS_STR, + TWO_INV, + ROOT_OF_UNITY_INV, + DELTA, + ZETA, + R, + R2, + R3 +); +impl_sum_prod!(Fq); +impl_from_u64_7_limbs!(Fq, R2); +field_arithmetic_7_limbs!(Fq, MODULUS, INV, sparse); + +#[cfg(target_pointer_width = "64")] +field_bits_7_limbs!(Fq, MODULUS); +#[cfg(not(target_pointer_width = "64"))] +field_bits_7_limbs!(Fq, MODULUS, MODULUS_LIMBS_32); + +extend_field_legendre!(Fq); + +impl Fq { + /// Return field element size in bytes. + pub const fn size() -> usize { + SIZE + } +} + +impl ff::Field for Fq { + const ZERO: Self = Self::zero(); + const ONE: Self = Self::one(); + + fn random(mut rng: impl RngCore) -> Self { + Self::from_u512([ + rng.next_u64(), + rng.next_u64(), + rng.next_u64(), + rng.next_u64(), + rng.next_u64(), + rng.next_u64(), + rng.next_u64(), + rng.next_u64(), + ]) + } + + fn double(&self) -> Self { + self.double() + } + + #[inline(always)] + fn square(&self) -> Self { + self.square() + } + + /// Computes the multiplicative inverse of this element, + /// failing if the element is zero. + fn invert(&self) -> CtOption { + // self^(q - 2) + let tmp = self.pow_vartime([ + 0x1ffffcd2ffffffff, + 0x9ca7e85d60050af4, + 0xe4a775fe8e177fd6, + 0x443f9a5c7a8a6c7b, + 0xa803ca76f439266f, + 0x0130e0000d7f70e4, + 0x2400000000002400, + ]); + + CtOption::new(tmp, !self.ct_eq(&Self::zero())) + } + + fn sqrt(&self) -> CtOption { + /// `(t - 1) // 2` where t * 2^s + 1 = q with t odd. + const T_MINUS1_OVER2: [u64; 7] = [ + 0xb002857a0ffffe69, + 0x470bbfeb4e53f42e, + 0x3d45363df253baff, + 0x7a1c9337a21fcd2e, + 0x06bfb8725401e53b, + 0x0000120000987000, + 0x0000000012000000, + ]; + ff::helpers::sqrt_tonelli_shanks(self, T_MINUS1_OVER2) + } + + fn sqrt_ratio(num: &Self, div: &Self) -> (Choice, Self) { + ff::helpers::sqrt_ratio_generic(num, div) + } +} + +#[derive(Clone, Copy, Debug)] +/// Canonical little-endian representation of a `Fq` element. +pub struct FqRepr { + pub repr: [u8; SIZE], +} + +impl FqRepr { + /// Returns an iterator over the bytes of the canoncial representation of the element. + pub fn iter(&self) -> Iter<'_, u8> { + self.repr.iter() + } +} + +impl Default for FqRepr { + fn default() -> Self { + FqRepr { repr: [0u8; SIZE] } + } +} + +impl AsRef<[u8]> for FqRepr { + fn as_ref(&self) -> &[u8] { + self.repr.as_ref() + } +} + +impl AsMut<[u8]> for FqRepr { + fn as_mut(&mut self) -> &mut [u8] { + self.repr.as_mut() + } +} +impl From<[u8; SIZE]> for FqRepr { + fn from(repr: [u8; SIZE]) -> Self { + Self { repr } + } +} + +impl ff::PrimeField for Fq { + type Repr = FqRepr; + + const NUM_BITS: u32 = 446; + const CAPACITY: u32 = 445; + const MODULUS: &'static str = MODULUS_STR; + const MULTIPLICATIVE_GENERATOR: Self = GENERATOR; + const ROOT_OF_UNITY: Self = ROOT_OF_UNITY; + const ROOT_OF_UNITY_INV: Self = ROOT_OF_UNITY_INV; + const TWO_INV: Self = TWO_INV; + const DELTA: Self = DELTA; + const S: u32 = S; + + fn from_repr(repr: Self::Repr) -> CtOption { + let mut tmp = Self([0, 0, 0, 0, 0, 0, 0]); + let repr = repr.repr; + + tmp.0[0] = u64::from_le_bytes(repr[0..8].try_into().unwrap()); + tmp.0[1] = u64::from_le_bytes(repr[8..16].try_into().unwrap()); + tmp.0[2] = u64::from_le_bytes(repr[16..24].try_into().unwrap()); + tmp.0[3] = u64::from_le_bytes(repr[24..32].try_into().unwrap()); + tmp.0[4] = u64::from_le_bytes(repr[32..40].try_into().unwrap()); + tmp.0[5] = u64::from_le_bytes(repr[40..48].try_into().unwrap()); + tmp.0[6] = u64::from_le_bytes(repr[48..56].try_into().unwrap()); + + // Try to subtract the modulus + let (_, borrow) = sbb(tmp.0[0], MODULUS.0[0], 0); + let (_, borrow) = sbb(tmp.0[1], MODULUS.0[1], borrow); + let (_, borrow) = sbb(tmp.0[2], MODULUS.0[2], borrow); + let (_, borrow) = sbb(tmp.0[3], MODULUS.0[3], borrow); + let (_, borrow) = sbb(tmp.0[4], MODULUS.0[4], borrow); + let (_, borrow) = sbb(tmp.0[5], MODULUS.0[5], borrow); + let (_, borrow) = sbb(tmp.0[6], MODULUS.0[6], borrow); + + // If the element is smaller than MODULUS then the + // subtraction will underflow, producing a borrow value + // of 0xffff...ffff. Otherwise, it'll be zero. + let is_some = (borrow as u8) & 1; + + // Convert to Montgomery form by computing + // (a.R^0 * R^2) / R = a.R + tmp *= &R2; + + CtOption::new(tmp, Choice::from(is_some)) + } + + fn to_repr(&self) -> Self::Repr { + // Turn into canonical form by computing + // (a.R) / R = a + let tmp = Self::montgomery_reduce(&[ + self.0[0], self.0[1], self.0[2], self.0[3], self.0[4], self.0[5], self.0[6], 0, 0, 0, + 0, 0, 0, 0, + ]); + + let mut res = [0; SIZE]; + res[0..8].copy_from_slice(&tmp.0[0].to_le_bytes()); + res[8..16].copy_from_slice(&tmp.0[1].to_le_bytes()); + res[16..24].copy_from_slice(&tmp.0[2].to_le_bytes()); + res[24..32].copy_from_slice(&tmp.0[3].to_le_bytes()); + res[32..40].copy_from_slice(&tmp.0[4].to_le_bytes()); + res[40..48].copy_from_slice(&tmp.0[5].to_le_bytes()); + res[48..56].copy_from_slice(&tmp.0[6].to_le_bytes()); + res.into() + } + + fn is_odd(&self) -> Choice { + Choice::from(self.to_repr().repr[0] & 1) + } +} + +impl FromUniformBytes<64> for Fq { + /// Converts a 512-bit little endian integer into + /// an `Fq` by reducing by the modulus. + fn from_uniform_bytes(bytes: &[u8; 64]) -> Self { + Self::from_u512([ + u64::from_le_bytes(bytes[0..8].try_into().unwrap()), + u64::from_le_bytes(bytes[8..16].try_into().unwrap()), + u64::from_le_bytes(bytes[16..24].try_into().unwrap()), + u64::from_le_bytes(bytes[24..32].try_into().unwrap()), + u64::from_le_bytes(bytes[32..40].try_into().unwrap()), + u64::from_le_bytes(bytes[40..48].try_into().unwrap()), + u64::from_le_bytes(bytes[48..56].try_into().unwrap()), + u64::from_le_bytes(bytes[56..64].try_into().unwrap()), + ]) + } +} + +impl WithSmallOrderMulGroup<3> for Fq { + const ZETA: Self = ZETA; +} + +#[cfg(test)] +mod test { + use crate::serde::SerdeObject; + + use super::*; + use ark_std::{end_timer, start_timer}; + use ff::Field; + use rand::SeedableRng; + use rand_core::OsRng; + use rand_xorshift::XorShiftRng; + + #[test] + fn test_sqrt() { + let v = (Fq::TWO_INV).square().sqrt().unwrap(); + assert!(v == Fq::TWO_INV || (-v) == Fq::TWO_INV); + + for _ in 0..10000 { + let a = Fq::random(OsRng); + let mut b = a; + b = b.square(); + + let b = b.sqrt().unwrap(); + let mut negb = b; + negb = negb.neg(); + + assert!(a == b || a == negb); + } + } + + #[test] + fn test_field() { + crate::tests::field::random_field_tests::("Pluto scalar".to_string()); + } + + #[test] + fn test_zeta() { + assert_eq!(Fq::ZETA * Fq::ZETA * Fq::ZETA, Fq::ONE); + assert_ne!(Fq::ZETA * Fq::ZETA, Fq::ONE); + } + + #[test] + fn test_delta() { + assert_eq!(Fq::DELTA, GENERATOR.pow([1u64 << Fq::S])); + assert_eq!(Fq::DELTA, Fq::MULTIPLICATIVE_GENERATOR.pow([1u64 << Fq::S])); + } + + #[test] + fn test_from_u512() { + const N_VECS: usize = 10; + let expected_results = [ + Fq::from_raw([ + 0x93638251ffeffed3, + 0xb17ab6ae332352b4, + 0xbf2731af91057325, + 0x7b700ef5a22260d0, + 0xc97c59318d325250, + 0xd7bc83d286537318, + 0x01d4a87b24f91154, + ]), + Fq::from_raw([ + 0x63e0a8f1beefc612, + 0x080f69572a9ddaae, + 0xb9ff1cf0e1f7c067, + 0xd8d8bf5b522bc48b, + 0xa7607085c7065359, + 0x617d8b0cda3f6328, + 0x03096ea964e009c0, + ]), + Fq::from_raw([ + 0x5eaedbda63b3e431, + 0x90ebbfa6f11a9266, + 0x4528cf4d506c9f9b, + 0x8c6ac679e9ac3856, + 0x001666755d9c2c57, + 0x9f7f457a48d6d322, + 0x20b2fadc6bf4004d, + ]), + Fq::from_raw([ + 0xeeea9cbd68b174cf, + 0x84af9e4ce5a781a5, + 0x3578772b5b482647, + 0x6b202eb54b7df723, + 0x55f541b1436b7660, + 0x2045de539849b035, + 0x1d5d7b5f6e8cc333, + ]), + Fq::from_raw([ + 0xe73df0f69b71a763, + 0xbccfb84010979d9d, + 0x1ce3c87be8bf3247, + 0x695fde61877cb617, + 0x5006663bd0944209, + 0xd7ead2b7c71e460d, + 0x0f7c36b781cba9ed, + ]), + Fq::from_raw([ + 0xaeed10e8f00b189d, + 0x5190807038915743, + 0x90b840c0a13b0307, + 0x20fa8cc52c3a9a28, + 0xc3f229646be29c1d, + 0xb1d2bb5373270c43, + 0x0e18a3597be61302, + ]), + Fq::from_raw([ + 0xffbbc6b3e494ca68, + 0x30d4a100158c1751, + 0x0328dae560dff403, + 0x1495c3ce50cce340, + 0x93efc4d4d6ea0079, + 0x0a791ad7698655a7, + 0x22b10d5c1090eec8, + ]), + Fq::from_raw([ + 0xd96eec60211ad67b, + 0x4d081a969b3d8488, + 0x57c9b5abbeec4cf0, + 0x13ced15637e4b0eb, + 0xb98a559f49b0071c, + 0x819ba919d0b6e9b5, + 0x20f73876330a90e8, + ]), + Fq::from_raw([ + 0xbade57a48e2d9868, + 0xc688e43e21f9d2fc, + 0x848a82da9e1d75dc, + 0xae5f4536b9d60aa7, + 0x7957f2028c96467b, + 0xf850f49359458652, + 0x17ba9f9aa08b9ee2, + ]), + Fq::from_raw([ + 0xd0239c8282ccc372, + 0x4a777ad0b66181ea, + 0x53737d5f19e61bfc, + 0x5340b579fe7c4c83, + 0x8406f69a0f89f90a, + 0xd7d5d8bc4497465a, + 0x08ce8bee1323d4f9, + ]), + ]; + + let mut seeded_rng = XorShiftRng::seed_from_u64(0u64); + let uniform_bytes = std::iter::from_fn(|| { + let mut bytes = [0u8; 64]; + seeded_rng.fill_bytes(&mut bytes); + Some(bytes) + }) + .take(N_VECS) + .collect::>(); + + for i in 0..N_VECS { + let q = Fq::from_uniform_bytes(&uniform_bytes[i]); + assert_eq!(expected_results[i], q); + } + } + + #[test] + #[cfg(feature = "bits")] + fn test_bits() { + crate::tests::field::random_bits_tests::("Fq".to_string()); + } + + #[test] + fn test_serialization() { + crate::tests::field::random_serialization_test::("Fq".to_string()); + #[cfg(feature = "derive_serde")] + crate::tests::field::random_serde_test::("Fq".to_string()); + } + + fn is_less_than(x: &[u64; 7], y: &[u64; 7]) -> bool { + match x[6].cmp(&y[6]) { + core::cmp::Ordering::Less => return true, + core::cmp::Ordering::Greater => return false, + _ => {} + } + match x[5].cmp(&y[5]) { + core::cmp::Ordering::Less => return true, + core::cmp::Ordering::Greater => return false, + _ => {} + } + match x[4].cmp(&y[4]) { + core::cmp::Ordering::Less => return true, + core::cmp::Ordering::Greater => return false, + _ => {} + } + match x[3].cmp(&y[3]) { + core::cmp::Ordering::Less => return true, + core::cmp::Ordering::Greater => return false, + _ => {} + } + match x[2].cmp(&y[2]) { + core::cmp::Ordering::Less => return true, + core::cmp::Ordering::Greater => return false, + _ => {} + } + match x[1].cmp(&y[1]) { + core::cmp::Ordering::Less => return true, + core::cmp::Ordering::Greater => return false, + _ => {} + } + x[0].lt(&y[0]) + } + + #[test] + fn test_serialization_check() { + let mut rng = XorShiftRng::from_seed([ + 0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, + 0xbc, 0xe5, + ]); + let start = start_timer!(|| "serialize Fq"); + // failure check + for _ in 0..1000000 { + let rand_word = [(); 7].map(|_| rng.next_u64()); + let a = Fq(rand_word); + let rand_bytes = a.to_raw_bytes(); + match is_less_than(&rand_word, &MODULUS.0) { + false => { + assert!(Fq::from_raw_bytes(&rand_bytes).is_none()); + } + _ => { + assert_eq!(Fq::from_raw_bytes(&rand_bytes), Some(a)); + } + } + } + end_timer!(start); + } +} diff --git a/src/pluto_eris/fields/mod.rs b/src/pluto_eris/fields/mod.rs new file mode 100644 index 00000000..2ed86a6b --- /dev/null +++ b/src/pluto_eris/fields/mod.rs @@ -0,0 +1,856 @@ +pub mod fp; +pub mod fp12; +pub mod fp2; +pub mod fp6; +pub mod fq; + +#[macro_export] +macro_rules! impl_from_u64_7_limbs { + ($field:ident, $r2:ident) => { + impl From for $field { + fn from(val: u64) -> $field { + $field([val, 0, 0, 0, 0, 0, 0]) * $r2 + } + } + }; +} + +#[macro_export] +macro_rules! field_common_7_limbs { + ( + $field:ident, + $field_repr:ident, + $modulus:ident, + $inv:ident, + $modulus_str:ident, + $two_inv:ident, + $root_of_unity_inv:ident, + $delta:ident, + $zeta:ident, + $r:ident, + $r2:ident, + $r3:ident + ) => { + impl $field { + /// Returns zero, the additive identity. + #[inline] + pub const fn zero() -> $field { + $field([0, 0, 0, 0, 0, 0, 0]) + } + + /// Returns one, the multiplicative identity. + #[inline] + pub const fn one() -> $field { + $r + } + + // Returns the Jacobi symbol, where the numerator and denominator + // are the element and the characteristic of the field, respectively. + // The Jacobi symbol is applicable to odd moduli + // while the Legendre symbol is applicable to prime moduli. + // They are equivalent for prime moduli. + #[inline(always)] + pub fn jacobi(&self) -> i64 { + $crate::ff_ext::jacobi::jacobi::<8>(&self.0, &$modulus.0) + } + + fn from_u512(limbs: [u64; 8]) -> $field { + // We reduce an arbitrary 512-bit number by decomposing it into two 256-bit digits + // with the higher bits multiplied by 2^256. Thus, we perform two reductions + // + // 1. the lower bits are multiplied by R^2, as normal + // 2. the upper bits are multiplied by R^2 * 2^256 = R^3 + // + // and computing their sum in the field. It remains to see that arbitrary 256-bit + // numbers can be placed into Montgomery form safely using the reduction. The + // reduction works so long as the product is less than R=2^256 multiplied by + // the modulus. This holds because for any `c` smaller than the modulus, we have + // that (2^256 - 1)*c is an acceptable product for the reduction. Therefore, the + // reduction always works so long as `c` is in the field; in this case it is either the + // constant `R2` or `R3`. + let d0 = $field([ + limbs[0], limbs[1], limbs[2], limbs[3], limbs[4], limbs[5], limbs[6], + ]); + let d1 = $field([limbs[7], 0u64, 0u64, 0u64, 0u64, 0u64, 0u64]); + // Convert to Montgomery form + d0 * $r2 + d1 * $r3 + } + + /// Converts from an integer represented in little endian + /// into its (congruent) `$field` representation. + pub const fn from_raw(val: [u64; 7]) -> Self { + #[cfg(feature = "asm")] + { + let (r0, carry) = mac(0, val[0], $r2.0[0], 0); + let (r1, carry) = mac(0, val[0], $r2.0[1], carry); + let (r2, carry) = mac(0, val[0], $r2.0[2], carry); + let (r3, carry) = mac(0, val[0], $r2.0[3], carry); + let (r4, carry) = mac(0, val[0], $r2.0[4], carry); + let (r5, carry) = mac(0, val[0], $r2.0[5], carry); + let (r6, r7) = mac(0, val[0], $r2.0[6], carry); + + let (r1, carry) = mac(r1, val[1], $r2.0[0], 0); + let (r2, carry) = mac(r2, val[1], $r2.0[1], carry); + let (r3, carry) = mac(r3, val[1], $r2.0[2], carry); + let (r4, carry) = mac(r4, val[1], $r2.0[3], carry); + let (r5, carry) = mac(r5, val[1], $r2.0[4], carry); + let (r6, carry) = mac(r6, val[1], $r2.0[5], carry); + let (r7, r8) = mac(r7, val[1], $r2.0[6], carry); + + let (r2, carry) = mac(r2, val[2], $r2.0[0], 0); + let (r3, carry) = mac(r3, val[2], $r2.0[1], carry); + let (r4, carry) = mac(r4, val[2], $r2.0[2], carry); + let (r5, carry) = mac(r5, val[2], $r2.0[3], carry); + let (r6, carry) = mac(r6, val[2], $r2.0[4], carry); + let (r7, carry) = mac(r7, val[2], $r2.0[5], carry); + let (r8, r9) = mac(r8, val[2], $r2.0[6], carry); + + let (r3, carry) = mac(r3, val[3], $r2.0[0], 0); + let (r4, carry) = mac(r4, val[3], $r2.0[1], carry); + let (r5, carry) = mac(r5, val[3], $r2.0[2], carry); + let (r6, carry) = mac(r6, val[3], $r2.0[3], carry); + let (r7, carry) = mac(r7, val[3], $r2.0[4], carry); + let (r8, carry) = mac(r8, val[3], $r2.0[5], carry); + let (r9, r10) = mac(r9, val[3], $r2.0[6], carry); + + let (r4, carry) = mac(r4, val[4], $r2.0[0], 0); + let (r5, carry) = mac(r5, val[4], $r2.0[1], carry); + let (r6, carry) = mac(r6, val[4], $r2.0[2], carry); + let (r7, carry) = mac(r7, val[4], $r2.0[3], carry); + let (r8, carry) = mac(r8, val[4], $r2.0[4], carry); + let (r9, carry) = mac(r9, val[4], $r2.0[5], carry); + let (r10, r11) = mac(r10, val[4], $r2.0[6], carry); + + let (r5, carry) = mac(r5, val[5], $r2.0[0], 0); + let (r6, carry) = mac(r6, val[5], $r2.0[1], carry); + let (r7, carry) = mac(r7, val[5], $r2.0[2], carry); + let (r8, carry) = mac(r8, val[5], $r2.0[3], carry); + let (r9, carry) = mac(r9, val[5], $r2.0[4], carry); + let (r10, carry) = mac(r10, val[5], $r2.0[5], carry); + let (r11, r12) = mac(r11, val[5], $r2.0[6], carry); + + let (r6, carry) = mac(r6, val[6], $r2.0[0], 0); + let (r7, carry) = mac(r7, val[6], $r2.0[1], carry); + let (r8, carry) = mac(r8, val[6], $r2.0[2], carry); + let (r9, carry) = mac(r9, val[6], $r2.0[3], carry); + let (r10, carry) = mac(r10, val[6], $r2.0[4], carry); + let (r11, carry) = mac(r11, val[6], $r2.0[5], carry); + let (r12, r13) = mac(r12, val[6], $r2.0[6], carry); + + // Montgomery reduction + let k = r0.wrapping_mul($inv); + let (_, carry) = mac(r0, k, $modulus.0[0], 0); + let (r1, carry) = mac(r1, k, $modulus.0[1], carry); + let (r2, carry) = mac(r2, k, $modulus.0[2], carry); + let (r3, carry) = mac(r3, k, $modulus.0[3], carry); + let (r4, carry) = mac(r4, k, $modulus.0[4], carry); + let (r5, carry) = mac(r5, k, $modulus.0[5], carry); + let (r6, carry) = mac(r6, k, $modulus.0[6], carry); + let (r7, carry2) = adc(r7, 0, carry); + + let k = r1.wrapping_mul($inv); + let (_, carry) = mac(r1, k, $modulus.0[0], 0); + let (r2, carry) = mac(r2, k, $modulus.0[1], carry); + let (r3, carry) = mac(r3, k, $modulus.0[2], carry); + let (r4, carry) = mac(r4, k, $modulus.0[3], carry); + let (r5, carry) = mac(r5, k, $modulus.0[4], carry); + let (r6, carry) = mac(r6, k, $modulus.0[5], carry); + let (r7, carry) = mac(r7, k, $modulus.0[6], carry); + let (r8, carry2) = adc(r8, carry2, carry); + + let k = r2.wrapping_mul($inv); + let (_, carry) = mac(r2, k, $modulus.0[0], 0); + let (r3, carry) = mac(r3, k, $modulus.0[1], carry); + let (r4, carry) = mac(r4, k, $modulus.0[2], carry); + let (r5, carry) = mac(r5, k, $modulus.0[3], carry); + let (r6, carry) = mac(r6, k, $modulus.0[4], carry); + let (r7, carry) = mac(r7, k, $modulus.0[5], carry); + let (r8, carry) = mac(r8, k, $modulus.0[6], carry); + let (r9, carry2) = adc(r9, carry2, carry); + + let k = r3.wrapping_mul($inv); + let (_, carry) = mac(r3, k, $modulus.0[0], 0); + let (r4, carry) = mac(r4, k, $modulus.0[1], carry); + let (r5, carry) = mac(r5, k, $modulus.0[2], carry); + let (r6, carry) = mac(r6, k, $modulus.0[3], carry); + let (r7, carry) = mac(r7, k, $modulus.0[4], carry); + let (r8, carry) = mac(r8, k, $modulus.0[5], carry); + let (r9, carry) = mac(r9, k, $modulus.0[6], carry); + let (r10, carry2) = adc(r10, carry2, carry); + + let k = r4.wrapping_mul($inv); + let (_, carry) = mac(r4, k, $modulus.0[0], 0); + let (r5, carry) = mac(r5, k, $modulus.0[1], carry); + let (r6, carry) = mac(r6, k, $modulus.0[2], carry); + let (r7, carry) = mac(r7, k, $modulus.0[3], carry); + let (r8, carry) = mac(r8, k, $modulus.0[4], carry); + let (r9, carry) = mac(r9, k, $modulus.0[5], carry); + let (r10, carry) = mac(r10, k, $modulus.0[6], carry); + let (r11, carry2) = adc(r11, carry2, carry); + + let k = r5.wrapping_mul($inv); + let (_, carry) = mac(r5, k, $modulus.0[0], 0); + let (r6, carry) = mac(r6, k, $modulus.0[1], carry); + let (r7, carry) = mac(r7, k, $modulus.0[2], carry); + let (r8, carry) = mac(r8, k, $modulus.0[3], carry); + let (r9, carry) = mac(r9, k, $modulus.0[4], carry); + let (r10, carry) = mac(r10, k, $modulus.0[5], carry); + let (r11, carry) = mac(r11, k, $modulus.0[6], carry); + let (r12, carry2) = adc(r12, carry2, carry); + + let k = r6.wrapping_mul($inv); + let (_, carry) = mac(r6, k, $modulus.0[0], 0); + let (r7, carry) = mac(r7, k, $modulus.0[1], carry); + let (r8, carry) = mac(r8, k, $modulus.0[2], carry); + let (r9, carry) = mac(r9, k, $modulus.0[3], carry); + let (r10, carry) = mac(r10, k, $modulus.0[4], carry); + let (r11, carry) = mac(r11, k, $modulus.0[5], carry); + let (r12, carry) = mac(r12, k, $modulus.0[6], carry); + let (r13, carry2) = adc(r13, carry2, carry); + + // Result may be within MODULUS of the correct value + let (d0, borrow) = sbb(r7, $modulus.0[0], 0); + let (d1, borrow) = sbb(r8, $modulus.0[1], borrow); + let (d2, borrow) = sbb(r9, $modulus.0[2], borrow); + let (d3, borrow) = sbb(r10, $modulus.0[3], borrow); + let (d4, borrow) = sbb(r11, $modulus.0[4], borrow); + let (d5, borrow) = sbb(r12, $modulus.0[5], borrow); + let (d6, borrow) = sbb(r13, $modulus.0[6], borrow); + let (_, borrow) = sbb(carry2, 0, borrow); + let (d0, carry) = adc(d0, $modulus.0[0] & borrow, 0); + let (d1, carry) = adc(d1, $modulus.0[1] & borrow, carry); + let (d2, carry) = adc(d2, $modulus.0[2] & borrow, carry); + let (d3, carry) = adc(d3, $modulus.0[3] & borrow, carry); + let (d4, carry) = adc(d4, $modulus.0[4] & borrow, carry); + let (d5, carry) = adc(d5, $modulus.0[5] & borrow, carry); + let (d6, _) = adc(d6, $modulus.0[6] & borrow, carry); + + $field([d0, d1, d2, d3, d4, d5, d6]) + } + #[cfg(not(feature = "asm"))] + { + (&$field(val)).mul(&$r2) + } + } + + /// Attempts to convert a little-endian byte representation of + /// a scalar into a `Fr`, failing if the input is not canonical. + pub fn from_bytes(bytes: &[u8; 56]) -> CtOption<$field> { + ::from_repr($field_repr { repr: *bytes }) + } + + /// Converts an element of `Fr` into a byte representation in + /// little-endian byte order. + pub fn to_bytes(&self) -> [u8; 56] { + ::to_repr(self).repr + } + + /// Lexicographic comparison of Montgomery forms. + #[inline(always)] + const fn is_less_than(x: &[u64; 7], y: &[u64; 7]) -> bool { + let (_, borrow) = sbb(x[0], y[0], 0); + let (_, borrow) = sbb(x[1], y[1], borrow); + let (_, borrow) = sbb(x[2], y[2], borrow); + let (_, borrow) = sbb(x[3], y[3], borrow); + let (_, borrow) = sbb(x[4], y[4], borrow); + let (_, borrow) = sbb(x[5], y[5], borrow); + let (_, borrow) = sbb(x[6], y[6], borrow); + borrow >> 63 == 1 + } + } + + impl fmt::Debug for $field { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let tmp = self.to_repr(); + write!(f, "0x")?; + for &b in tmp.iter().rev() { + write!(f, "{:02x}", b)?; + } + Ok(()) + } + } + + impl Default for $field { + #[inline] + fn default() -> Self { + Self::zero() + } + } + + impl From for $field { + fn from(bit: bool) -> $field { + if bit { + $field::one() + } else { + $field::zero() + } + } + } + + impl ConstantTimeEq for $field { + fn ct_eq(&self, other: &Self) -> Choice { + self.0[0].ct_eq(&other.0[0]) + & self.0[1].ct_eq(&other.0[1]) + & self.0[2].ct_eq(&other.0[2]) + & self.0[3].ct_eq(&other.0[3]) + & self.0[4].ct_eq(&other.0[4]) + & self.0[5].ct_eq(&other.0[5]) + & self.0[6].ct_eq(&other.0[6]) + } + } + + impl core::cmp::Ord for $field { + fn cmp(&self, other: &Self) -> core::cmp::Ordering { + let left = self.to_repr(); + let right = other.to_repr(); + left.iter() + .zip(right.iter()) + .rev() + .find_map(|(left_byte, right_byte)| match left_byte.cmp(right_byte) { + core::cmp::Ordering::Equal => None, + res => Some(res), + }) + .unwrap_or(core::cmp::Ordering::Equal) + } + } + + impl core::cmp::PartialOrd for $field { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } + } + + impl ConditionallySelectable for $field { + fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self { + $field([ + u64::conditional_select(&a.0[0], &b.0[0], choice), + u64::conditional_select(&a.0[1], &b.0[1], choice), + u64::conditional_select(&a.0[2], &b.0[2], choice), + u64::conditional_select(&a.0[3], &b.0[3], choice), + u64::conditional_select(&a.0[4], &b.0[4], choice), + u64::conditional_select(&a.0[5], &b.0[5], choice), + u64::conditional_select(&a.0[6], &b.0[6], choice), + ]) + } + } + + impl<'a> Neg for &'a $field { + type Output = $field; + + #[inline] + fn neg(self) -> $field { + self.neg() + } + } + + impl Neg for $field { + type Output = $field; + + #[inline] + fn neg(self) -> $field { + -&self + } + } + + impl<'a, 'b> Sub<&'b $field> for &'a $field { + type Output = $field; + + #[inline] + fn sub(self, rhs: &'b $field) -> $field { + self.sub(rhs) + } + } + + impl<'a, 'b> Add<&'b $field> for &'a $field { + type Output = $field; + + #[inline] + fn add(self, rhs: &'b $field) -> $field { + self.add(rhs) + } + } + + impl<'a, 'b> Mul<&'b $field> for &'a $field { + type Output = $field; + + #[inline] + fn mul(self, rhs: &'b $field) -> $field { + self.mul(rhs) + } + } + + impl From<$field> for [u8; 56] { + fn from(value: $field) -> [u8; 56] { + value.to_repr().repr + } + } + + impl<'a> From<&'a $field> for [u8; 56] { + fn from(value: &'a $field) -> [u8; 56] { + value.to_repr().repr + } + } + + impl $crate::serde::SerdeObject for $field { + fn from_raw_bytes_unchecked(bytes: &[u8]) -> Self { + debug_assert_eq!(bytes.len(), 56); + let inner = [0, 8, 16, 24, 32, 40, 48] + .map(|i| u64::from_le_bytes(bytes[i..i + 8].try_into().unwrap())); + Self(inner) + } + fn from_raw_bytes(bytes: &[u8]) -> Option { + if bytes.len() != 56 { + return None; + } + let elt = Self::from_raw_bytes_unchecked(bytes); + Self::is_less_than(&elt.0, &$modulus.0).then(|| elt) + } + fn to_raw_bytes(&self) -> Vec { + let mut res = Vec::with_capacity(56); + for limb in self.0.iter() { + res.extend_from_slice(&limb.to_le_bytes()); + } + res + } + fn read_raw_unchecked(reader: &mut R) -> Self { + let inner = [(); 7].map(|_| { + let mut buf = [0; 8]; + reader.read_exact(&mut buf).unwrap(); + u64::from_le_bytes(buf) + }); + Self(inner) + } + fn read_raw(reader: &mut R) -> std::io::Result { + let mut inner = [0u64; 7]; + for limb in inner.iter_mut() { + let mut buf = [0; 8]; + reader.read_exact(&mut buf)?; + *limb = u64::from_le_bytes(buf); + } + let elt = Self(inner); + Self::is_less_than(&elt.0, &$modulus.0) + .then(|| elt) + .ok_or_else(|| { + std::io::Error::new( + std::io::ErrorKind::InvalidData, + "input number is not less than field modulus", + ) + }) + } + fn write_raw(&self, writer: &mut W) -> std::io::Result<()> { + for limb in self.0.iter() { + writer.write_all(&limb.to_le_bytes())?; + } + Ok(()) + } + } + }; +} + +#[macro_export] +macro_rules! field_arithmetic_7_limbs { + ($field:ident, $modulus:ident, $inv:ident, $field_type:ident) => { + $crate::field_specific_7_limbs!($field, $modulus, $inv, $field_type); + impl $field { + /// Doubles this field element. + #[inline] + pub const fn double(&self) -> $field { + self.add(self) + } + + /// Squares this element. + #[inline] + pub const fn square(&self) -> $field { + let (r1, carry) = mac(0, self.0[0], self.0[1], 0); + let (r2, carry) = mac(0, self.0[0], self.0[2], carry); + let (r3, carry) = mac(0, self.0[0], self.0[3], carry); + let (r4, carry) = mac(0, self.0[0], self.0[4], carry); + let (r5, carry) = mac(0, self.0[0], self.0[5], carry); + let (r6, r7) = mac(0, self.0[0], self.0[6], carry); + + let (r3, carry) = mac(r3, self.0[1], self.0[2], 0); + let (r4, carry) = mac(r4, self.0[1], self.0[3], carry); + let (r5, carry) = mac(r5, self.0[1], self.0[4], carry); + let (r6, carry) = mac(r6, self.0[1], self.0[5], carry); + let (r7, r8) = mac(r7, self.0[1], self.0[6], carry); + + let (r5, carry) = mac(r5, self.0[2], self.0[3], 0); + let (r6, carry) = mac(r6, self.0[2], self.0[4], carry); + let (r7, carry) = mac(r7, self.0[2], self.0[5], carry); + let (r8, r9) = mac(r8, self.0[2], self.0[6], carry); + + let (r7, carry) = mac(r7, self.0[3], self.0[4], 0); + let (r8, carry) = mac(r8, self.0[3], self.0[5], carry); + let (r9, r10) = mac(r9, self.0[3], self.0[6], carry); + + let (r9, carry) = mac(r9, self.0[4], self.0[5], 0); + let (r10, r11) = mac(r10, self.0[4], self.0[6], carry); + + let (r11, r12) = mac(r11, self.0[5], self.0[6], 0); + + let r13 = r12 >> 63; + let r12 = (r12 << 1) | (r11 >> 63); + let r11 = (r11 << 1) | (r10 >> 63); + let r10 = (r10 << 1) | (r9 >> 63); + let r9 = (r9 << 1) | (r8 >> 63); + let r8 = (r8 << 1) | (r7 >> 63); + let r7 = (r7 << 1) | (r6 >> 63); + let r6 = (r6 << 1) | (r5 >> 63); + let r5 = (r5 << 1) | (r4 >> 63); + let r4 = (r4 << 1) | (r3 >> 63); + let r3 = (r3 << 1) | (r2 >> 63); + let r2 = (r2 << 1) | (r1 >> 63); + let r1 = r1 << 1; + + let (r0, carry) = mac(0, self.0[0], self.0[0], 0); + let (r1, carry) = adc(0, r1, carry); + let (r2, carry) = mac(r2, self.0[1], self.0[1], carry); + let (r3, carry) = adc(0, r3, carry); + let (r4, carry) = mac(r4, self.0[2], self.0[2], carry); + let (r5, carry) = adc(0, r5, carry); + let (r6, carry) = mac(r6, self.0[3], self.0[3], carry); + let (r7, carry) = adc(0, r7, carry); + let (r8, carry) = mac(r8, self.0[4], self.0[4], carry); + let (r9, carry) = adc(0, r9, carry); + let (r10, carry) = mac(r10, self.0[5], self.0[5], carry); + let (r11, carry) = adc(0, r11, carry); + let (r12, carry) = mac(r12, self.0[6], self.0[6], carry); + let (r13, _) = adc(0, r13, carry); + + $field::montgomery_reduce(&[ + r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, + ]) + } + + /// Multiplies `rhs` by `self`, returning the result. + #[inline] + pub const fn mul(&self, rhs: &Self) -> $field { + // Schoolbook multiplication + let (r0, carry) = mac(0, self.0[0], rhs.0[0], 0); + let (r1, carry) = mac(0, self.0[0], rhs.0[1], carry); + let (r2, carry) = mac(0, self.0[0], rhs.0[2], carry); + let (r3, carry) = mac(0, self.0[0], rhs.0[3], carry); + let (r4, carry) = mac(0, self.0[0], rhs.0[4], carry); + let (r5, carry) = mac(0, self.0[0], rhs.0[5], carry); + let (r6, r7) = mac(0, self.0[0], rhs.0[6], carry); + + let (r1, carry) = mac(r1, self.0[1], rhs.0[0], 0); + let (r2, carry) = mac(r2, self.0[1], rhs.0[1], carry); + let (r3, carry) = mac(r3, self.0[1], rhs.0[2], carry); + let (r4, carry) = mac(r4, self.0[1], rhs.0[3], carry); + let (r5, carry) = mac(r5, self.0[1], rhs.0[4], carry); + let (r6, carry) = mac(r6, self.0[1], rhs.0[5], carry); + let (r7, r8) = mac(r7, self.0[1], rhs.0[6], carry); + + let (r2, carry) = mac(r2, self.0[2], rhs.0[0], 0); + let (r3, carry) = mac(r3, self.0[2], rhs.0[1], carry); + let (r4, carry) = mac(r4, self.0[2], rhs.0[2], carry); + let (r5, carry) = mac(r5, self.0[2], rhs.0[3], carry); + let (r6, carry) = mac(r6, self.0[2], rhs.0[4], carry); + let (r7, carry) = mac(r7, self.0[2], rhs.0[5], carry); + let (r8, r9) = mac(r8, self.0[2], rhs.0[6], carry); + + let (r3, carry) = mac(r3, self.0[3], rhs.0[0], 0); + let (r4, carry) = mac(r4, self.0[3], rhs.0[1], carry); + let (r5, carry) = mac(r5, self.0[3], rhs.0[2], carry); + let (r6, carry) = mac(r6, self.0[3], rhs.0[3], carry); + let (r7, carry) = mac(r7, self.0[3], rhs.0[4], carry); + let (r8, carry) = mac(r8, self.0[3], rhs.0[5], carry); + let (r9, r10) = mac(r9, self.0[3], rhs.0[6], carry); + + let (r4, carry) = mac(r4, self.0[4], rhs.0[0], 0); + let (r5, carry) = mac(r5, self.0[4], rhs.0[1], carry); + let (r6, carry) = mac(r6, self.0[4], rhs.0[2], carry); + let (r7, carry) = mac(r7, self.0[4], rhs.0[3], carry); + let (r8, carry) = mac(r8, self.0[4], rhs.0[4], carry); + let (r9, carry) = mac(r9, self.0[4], rhs.0[5], carry); + let (r10, r11) = mac(r10, self.0[4], rhs.0[6], carry); + + let (r5, carry) = mac(r5, self.0[5], rhs.0[0], 0); + let (r6, carry) = mac(r6, self.0[5], rhs.0[1], carry); + let (r7, carry) = mac(r7, self.0[5], rhs.0[2], carry); + let (r8, carry) = mac(r8, self.0[5], rhs.0[3], carry); + let (r9, carry) = mac(r9, self.0[5], rhs.0[4], carry); + let (r10, carry) = mac(r10, self.0[5], rhs.0[5], carry); + let (r11, r12) = mac(r11, self.0[5], rhs.0[6], carry); + + let (r6, carry) = mac(r6, self.0[6], rhs.0[0], 0); + let (r7, carry) = mac(r7, self.0[6], rhs.0[1], carry); + let (r8, carry) = mac(r8, self.0[6], rhs.0[2], carry); + let (r9, carry) = mac(r9, self.0[6], rhs.0[3], carry); + let (r10, carry) = mac(r10, self.0[6], rhs.0[4], carry); + let (r11, carry) = mac(r11, self.0[6], rhs.0[5], carry); + let (r12, r13) = mac(r12, self.0[6], rhs.0[6], carry); + + $field::montgomery_reduce(&[ + r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, + ]) + } + + /// Subtracts `rhs` from `self`, returning the result. + #[inline] + pub const fn sub(&self, rhs: &Self) -> Self { + let (d0, borrow) = sbb(self.0[0], rhs.0[0], 0); + let (d1, borrow) = sbb(self.0[1], rhs.0[1], borrow); + let (d2, borrow) = sbb(self.0[2], rhs.0[2], borrow); + let (d3, borrow) = sbb(self.0[3], rhs.0[3], borrow); + let (d4, borrow) = sbb(self.0[4], rhs.0[4], borrow); + let (d5, borrow) = sbb(self.0[5], rhs.0[5], borrow); + let (d6, borrow) = sbb(self.0[6], rhs.0[6], borrow); + + // If underflow occurred on the final limb, borrow = 0xfff...fff, otherwise + // borrow = 0x000...000. Thus, we use it as a mask to conditionally add the modulus. + let (d0, carry) = adc(d0, $modulus.0[0] & borrow, 0); + let (d1, carry) = adc(d1, $modulus.0[1] & borrow, carry); + let (d2, carry) = adc(d2, $modulus.0[2] & borrow, carry); + let (d3, carry) = adc(d3, $modulus.0[3] & borrow, carry); + let (d4, carry) = adc(d4, $modulus.0[4] & borrow, carry); + let (d5, carry) = adc(d5, $modulus.0[5] & borrow, carry); + let (d6, _) = adc(d6, $modulus.0[6] & borrow, carry); + + $field([d0, d1, d2, d3, d4, d5, d6]) + } + + /// Negates `self`. + #[inline] + pub const fn neg(&self) -> Self { + // Subtract `self` from `MODULUS` to negate. Ignore the final + // borrow because it cannot underflow; self is guaranteed to + // be in the field. + let (d0, borrow) = sbb($modulus.0[0], self.0[0], 0); + let (d1, borrow) = sbb($modulus.0[1], self.0[1], borrow); + let (d2, borrow) = sbb($modulus.0[2], self.0[2], borrow); + let (d3, borrow) = sbb($modulus.0[3], self.0[3], borrow); + let (d4, borrow) = sbb($modulus.0[4], self.0[4], borrow); + let (d5, borrow) = sbb($modulus.0[5], self.0[5], borrow); + let (d6, _) = sbb($modulus.0[6], self.0[6], borrow); + + // `tmp` could be `MODULUS` if `self` was zero. Create a mask that is + // zero if `self` was zero, and `u64::max_value()` if self was nonzero. + let mask = (((self.0[0] + | self.0[1] + | self.0[2] + | self.0[3] + | self.0[4] + | self.0[5] + | self.0[6]) + == 0) as u64) + .wrapping_sub(1); + + $field([ + d0 & mask, + d1 & mask, + d2 & mask, + d3 & mask, + d4 & mask, + d5 & mask, + d6 & mask, + ]) + } + } + }; +} + +#[macro_export] +macro_rules! field_specific_7_limbs { + ($field:ident, $modulus:ident, $inv:ident, sparse) => { + impl $field { + /// Adds `rhs` to `self`, returning the result. + #[inline] + pub const fn add(&self, rhs: &Self) -> Self { + let (d0, carry) = adc(self.0[0], rhs.0[0], 0); + let (d1, carry) = adc(self.0[1], rhs.0[1], carry); + let (d2, carry) = adc(self.0[2], rhs.0[2], carry); + let (d3, carry) = adc(self.0[3], rhs.0[3], carry); + let (d4, carry) = adc(self.0[4], rhs.0[4], carry); + let (d5, carry) = adc(self.0[5], rhs.0[5], carry); + let (d6, _) = adc(self.0[6], rhs.0[6], carry); + + // Attempt to subtract the modulus, to ensure the value + // is smaller than the modulus. + (&$field([d0, d1, d2, d3, d4, d5, d6])).sub(&$modulus) + } + + #[inline(always)] + pub(crate) const fn montgomery_reduce(r: &[u64; 14]) -> $field { + // The Montgomery reduction here is based on Algorithm 14.32 in + // Handbook of Applied Cryptography + // . + + let k = r[0].wrapping_mul($inv); + let (_, carry) = mac(r[0], k, $modulus.0[0], 0); + let (r1, carry) = mac(r[1], k, $modulus.0[1], carry); + let (r2, carry) = mac(r[2], k, $modulus.0[2], carry); + let (r3, carry) = mac(r[3], k, $modulus.0[3], carry); + let (r4, carry) = mac(r[4], k, $modulus.0[4], carry); + let (r5, carry) = mac(r[5], k, $modulus.0[5], carry); + let (r6, carry) = mac(r[6], k, $modulus.0[6], carry); + let (r7, carry2) = adc(r[7], 0, carry); + + let k = r1.wrapping_mul($inv); + let (_, carry) = mac(r1, k, $modulus.0[0], 0); + let (r2, carry) = mac(r2, k, $modulus.0[1], carry); + let (r3, carry) = mac(r3, k, $modulus.0[2], carry); + let (r4, carry) = mac(r4, k, $modulus.0[3], carry); + let (r5, carry) = mac(r5, k, $modulus.0[4], carry); + let (r6, carry) = mac(r6, k, $modulus.0[5], carry); + let (r7, carry) = mac(r7, k, $modulus.0[6], carry); + let (r8, carry2) = adc(r[8], carry2, carry); + + let k = r2.wrapping_mul($inv); + let (_, carry) = mac(r2, k, $modulus.0[0], 0); + let (r3, carry) = mac(r3, k, $modulus.0[1], carry); + let (r4, carry) = mac(r4, k, $modulus.0[2], carry); + let (r5, carry) = mac(r5, k, $modulus.0[3], carry); + let (r6, carry) = mac(r6, k, $modulus.0[4], carry); + let (r7, carry) = mac(r7, k, $modulus.0[5], carry); + let (r8, carry) = mac(r8, k, $modulus.0[6], carry); + let (r9, carry2) = adc(r[9], carry2, carry); + + let k = r3.wrapping_mul($inv); + let (_, carry) = mac(r3, k, $modulus.0[0], 0); + let (r4, carry) = mac(r4, k, $modulus.0[1], carry); + let (r5, carry) = mac(r5, k, $modulus.0[2], carry); + let (r6, carry) = mac(r6, k, $modulus.0[3], carry); + let (r7, carry) = mac(r7, k, $modulus.0[4], carry); + let (r8, carry) = mac(r8, k, $modulus.0[5], carry); + let (r9, carry) = mac(r9, k, $modulus.0[6], carry); + let (r10, carry2) = adc(r[10], carry2, carry); + + let k = r4.wrapping_mul($inv); + let (_, carry) = mac(r4, k, $modulus.0[0], 0); + let (r5, carry) = mac(r5, k, $modulus.0[1], carry); + let (r6, carry) = mac(r6, k, $modulus.0[2], carry); + let (r7, carry) = mac(r7, k, $modulus.0[3], carry); + let (r8, carry) = mac(r8, k, $modulus.0[4], carry); + let (r9, carry) = mac(r9, k, $modulus.0[5], carry); + let (r10, carry) = mac(r10, k, $modulus.0[6], carry); + let (r11, carry2) = adc(r[11], carry2, carry); + + let k = r5.wrapping_mul($inv); + let (_, carry) = mac(r5, k, $modulus.0[0], 0); + let (r6, carry) = mac(r6, k, $modulus.0[1], carry); + let (r7, carry) = mac(r7, k, $modulus.0[2], carry); + let (r8, carry) = mac(r8, k, $modulus.0[3], carry); + let (r9, carry) = mac(r9, k, $modulus.0[4], carry); + let (r10, carry) = mac(r10, k, $modulus.0[5], carry); + let (r11, carry) = mac(r11, k, $modulus.0[6], carry); + let (r12, carry2) = adc(r[12], carry2, carry); + + let k = r6.wrapping_mul($inv); + let (_, carry) = mac(r6, k, $modulus.0[0], 0); + let (r7, carry) = mac(r7, k, $modulus.0[1], carry); + let (r8, carry) = mac(r8, k, $modulus.0[2], carry); + let (r9, carry) = mac(r9, k, $modulus.0[3], carry); + let (r10, carry) = mac(r10, k, $modulus.0[4], carry); + let (r11, carry) = mac(r11, k, $modulus.0[5], carry); + let (r12, carry) = mac(r12, k, $modulus.0[6], carry); + let (r13, _) = adc(r[13], carry2, carry); + // Result may be within MODULUS of the correct value + (&$field([r7, r8, r9, r10, r11, r12, r13])).sub(&$modulus) + } + } + }; + ($field:ident, $modulus:ident, $inv:ident, dense) => { + impl $field { + /// Adds `rhs` to `self`, returning the result. + #[inline] + pub const fn add(&self, rhs: &Self) -> Self { + let (d0, carry) = adc(self.0[0], rhs.0[0], 0); + let (d1, carry) = adc(self.0[1], rhs.0[1], carry); + let (d2, carry) = adc(self.0[2], rhs.0[2], carry); + let (d3, carry) = adc(self.0[3], rhs.0[3], carry); + let (d4, carry) = adc(self.0[4], rhs.0[4], carry); + let (d5, carry) = adc(self.0[5], rhs.0[5], carry); + let (d6, carry) = adc(self.0[6], rhs.0[6], carry); + + // Attempt to subtract the modulus, to ensure the value + // is smaller than the modulus. + let (d0, borrow) = sbb(d0, $modulus.0[0], 0); + let (d1, borrow) = sbb(d1, $modulus.0[1], borrow); + let (d2, borrow) = sbb(d2, $modulus.0[2], borrow); + let (d3, borrow) = sbb(d3, $modulus.0[3], borrow); + let (d4, borrow) = sbb(d4, $modulus.0[4], borrow); + let (d5, borrow) = sbb(d5, $modulus.0[5], borrow); + let (_, borrow) = sbb(carry, 0, borrow); + + let (d0, carry) = adc(d0, $modulus.0[0] & borrow, 0); + let (d1, carry) = adc(d1, $modulus.0[1] & borrow, carry); + let (d2, carry) = adc(d2, $modulus.0[2] & borrow, carry); + let (d3, carry) = adc(d3, $modulus.0[3] & borrow, carry); + let (d4, carry) = adc(d4, $modulus.0[4] & borrow, carry); + let (d5, carry) = adc(d5, $modulus.0[5] & borrow, carry); + let (d6, _) = adc(d6, $modulus.0[6] & borrow, carry); + + $field([d0, d1, d2, d3, d4, d5, d6]) + } + } + }; +} + +#[macro_export] +macro_rules! field_bits_7_limbs { + // For #[cfg(target_pointer_width = "64")] + ($field:ident, $modulus:ident) => { + #[cfg(feature = "bits")] + #[cfg_attr(docsrs, doc(cfg(feature = "bits")))] + impl ::ff::PrimeFieldBits for $field { + type ReprBits = [u64; 7]; + + fn to_le_bits(&self) -> ::ff::FieldBits { + let bytes = self.to_repr().repr; + + let limbs = [ + u64::from_le_bytes(bytes[0..8].try_into().unwrap()), + u64::from_le_bytes(bytes[8..16].try_into().unwrap()), + u64::from_le_bytes(bytes[16..24].try_into().unwrap()), + u64::from_le_bytes(bytes[24..32].try_into().unwrap()), + u64::from_le_bytes(bytes[32..40].try_into().unwrap()), + u64::from_le_bytes(bytes[40..48].try_into().unwrap()), + u64::from_le_bytes(bytes[48..56].try_into().unwrap()), + ]; + + ::ff::FieldBits::new(limbs) + } + + fn char_le_bits() -> ::ff::FieldBits { + ::ff::FieldBits::new($modulus.0) + } + } + }; + // For #[cfg(not(target_pointer_width = "64"))] + ($field:ident, $modulus:ident, $modulus_limbs_32:ident) => { + #[cfg(feature = "bits")] + #[cfg_attr(docsrs, doc(cfg(feature = "bits")))] + impl ::ff::PrimeFieldBits for $field { + type ReprBits = [u32; 14]; + + fn to_le_bits(&self) -> ::ff::FieldBits { + let bytes = self.to_repr().repr; + + let limbs = [ + u32::from_le_bytes(bytes[0..4].try_into().unwrap()), + u32::from_le_bytes(bytes[4..8].try_into().unwrap()), + u32::from_le_bytes(bytes[8..12].try_into().unwrap()), + u32::from_le_bytes(bytes[12..16].try_into().unwrap()), + u32::from_le_bytes(bytes[16..20].try_into().unwrap()), + u32::from_le_bytes(bytes[20..24].try_into().unwrap()), + u32::from_le_bytes(bytes[24..28].try_into().unwrap()), + u32::from_le_bytes(bytes[28..32].try_into().unwrap()), + u32::from_le_bytes(bytes[32..36].try_into().unwrap()), + u32::from_le_bytes(bytes[36..40].try_into().unwrap()), + u32::from_le_bytes(bytes[40..44].try_into().unwrap()), + u32::from_le_bytes(bytes[44..48].try_into().unwrap()), + u32::from_le_bytes(bytes[48..52].try_into().unwrap()), + u32::from_le_bytes(bytes[52..56].try_into().unwrap()), + ]; + + ::ff::FieldBits::new(limbs) + } + + fn char_le_bits() -> ::ff::FieldBits { + ::ff::FieldBits::new($modulus_limbs_32) + } + } + }; +} diff --git a/src/pluto_eris/mod.rs b/src/pluto_eris/mod.rs new file mode 100644 index 00000000..fd5c0aac --- /dev/null +++ b/src/pluto_eris/mod.rs @@ -0,0 +1,15 @@ +//! # `Pluto\Eris half-pairing ccyle` +//! +//! Implementation of the Pluto / Eris half-pairing cycle of prime order elliptic curves. +//! +//! Supporting evidence: https://github.com/daira/pluto-eris +//! Field constant derivation: https://github.com/davidnevadoc/ec-constants/tree/main/pluto_eris +//! Pairing constants derivation: https://github.com/John-Gong-Math/pluto_eris/blob/main/pluto_pairing.ipynb +mod curve; +mod engine; +mod fields; + +pub use curve::*; +pub use engine::*; +pub use fields::fp::*; +pub use fields::fq::*;