From b2ec4a878d66ad660948c081c266c46a6b9ffc2a Mon Sep 17 00:00:00 2001 From: Andrew Poelstra Date: Thu, 7 Mar 2024 23:45:45 +0000 Subject: [PATCH 1/5] clippy: fix some nits introduced by #651 --- src/miniscript/astelem.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/miniscript/astelem.rs b/src/miniscript/astelem.rs index d6f47d9b9..d11781a5e 100644 --- a/src/miniscript/astelem.rs +++ b/src/miniscript/astelem.rs @@ -139,7 +139,7 @@ fn fmt_1( a: &D, is_debug: bool, ) -> fmt::Result { - f.write_str(&name)?; + f.write_str(name)?; conditional_fmt(f, a, is_debug)?; f.write_str(")") } @@ -150,7 +150,7 @@ fn fmt_2( b: &D, is_debug: bool, ) -> fmt::Result { - f.write_str(&name)?; + f.write_str(name)?; conditional_fmt(f, a, is_debug)?; f.write_str(",")?; conditional_fmt(f, b, is_debug)?; @@ -163,7 +163,7 @@ fn fmt_n( list: &[D], is_debug: bool, ) -> fmt::Result { - f.write_str(&name)?; + f.write_str(name)?; write!(f, "{}", first)?; for el in list { f.write_str(",")?; From 0666aef121424357b7f4e254f3ac950894ab8236 Mon Sep 17 00:00:00 2001 From: Andrew Poelstra Date: Thu, 7 Mar 2024 22:44:50 +0000 Subject: [PATCH 2/5] error: remove some unused variants There is more to come, but do a bunch now because it's cathartic. --- src/lib.rs | 44 -------------------------------------------- 1 file changed, 44 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 514ccdfe8..6c644e6e4 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -443,8 +443,6 @@ pub enum Error { Unexpected(String), /// Name of a fragment contained `:` multiple times MultiColon(String), - /// Name of a fragment contained `@` multiple times - MultiAt(String), /// Name of a fragment contained `@` but we were not parsing an OR AtOutsideOr(String), /// Encountered a wrapping character that we don't recognize @@ -453,16 +451,8 @@ pub enum Error { NonTopLevel(String), /// Parsed a miniscript but there were more script opcodes after it Trailing(String), - /// Failed to parse a push as a public key - BadPubkey(bitcoin::key::Error), - /// Could not satisfy a script (fragment) because of a missing hash preimage - MissingHash(sha256::Hash), /// Could not satisfy a script (fragment) because of a missing signature MissingSig(bitcoin::PublicKey), - /// Could not satisfy, relative locktime not met - RelativeLocktimeNotMet(u32), - /// Could not satisfy, absolute locktime not met - AbsoluteLocktimeNotMet(u32), /// General failure to satisfy CouldNotSatisfy, /// Typechecking failed @@ -482,8 +472,6 @@ pub enum Error { ContextError(miniscript::context::ScriptContextError), /// Recursion depth exceeded when parsing policy/miniscript from string MaxRecursiveDepthExceeded, - /// Script size too large - ScriptSizeTooLarge, /// Anything but c:pk(key) (P2PK), c:pk_h(key) (P2PKH), and thresh_m(k,...) /// up to n=3 is invalid by standardness (bare) NonStandardBareScript, @@ -495,12 +483,8 @@ pub enum Error { BareDescriptorAddr, /// PubKey invalid under current context PubKeyCtxError(miniscript::decode::KeyParseError, &'static str), - /// Attempted to call function that requires PreComputed taproot info - TaprootSpendInfoUnavialable, /// No script code for Tr descriptors TrNoScriptCode, - /// No explicit script for Tr descriptors - TrNoExplicitScript, /// At least two BIP389 key expressions in the descriptor contain tuples of /// derivation indexes of different lengths. MultipathDescLenMismatch, @@ -512,8 +496,6 @@ pub enum Error { // https://github.com/sipa/miniscript/pull/5 for discussion on this number const MAX_RECURSION_DEPTH: u32 = 402; -// https://github.com/bitcoin/bips/blob/master/bip-0141.mediawiki -const MAX_SCRIPT_SIZE: u32 = 10000; impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { @@ -531,23 +513,12 @@ impl fmt::Display for Error { Error::UnexpectedStart => f.write_str("unexpected start of script"), Error::Unexpected(ref s) => write!(f, "unexpected «{}»", s), Error::MultiColon(ref s) => write!(f, "«{}» has multiple instances of «:»", s), - Error::MultiAt(ref s) => write!(f, "«{}» has multiple instances of «@»", s), Error::AtOutsideOr(ref s) => write!(f, "«{}» contains «@» in non-or() context", s), Error::UnknownWrapper(ch) => write!(f, "unknown wrapper «{}:»", ch), Error::NonTopLevel(ref s) => write!(f, "non-T miniscript: {}", s), Error::Trailing(ref s) => write!(f, "trailing tokens: {}", s), - Error::MissingHash(ref h) => write!(f, "missing preimage of hash {}", h), Error::MissingSig(ref pk) => write!(f, "missing signature for key {:?}", pk), - Error::RelativeLocktimeNotMet(n) => { - write!(f, "required relative locktime CSV of {} blocks, not met", n) - } - Error::AbsoluteLocktimeNotMet(n) => write!( - f, - "required absolute locktime CLTV of {} blocks, not met", - n - ), Error::CouldNotSatisfy => f.write_str("could not satisfy"), - Error::BadPubkey(ref e) => fmt::Display::fmt(e, f), Error::TypeCheck(ref e) => write!(f, "typecheck: {}", e), Error::BadDescriptor(ref e) => write!(f, "Invalid descriptor: {}", e), Error::Secp(ref e) => fmt::Display::fmt(e, f), @@ -561,11 +532,6 @@ impl fmt::Display for Error { "Recursive depth over {} not permitted", MAX_RECURSION_DEPTH ), - Error::ScriptSizeTooLarge => write!( - f, - "Standardness rules imply bitcoin than {} bytes", - MAX_SCRIPT_SIZE - ), Error::NonStandardBareScript => write!( f, "Anything but c:pk(key) (P2PK), c:pk_h(key) (P2PKH), and thresh_m(k,...) \ @@ -579,9 +545,7 @@ impl fmt::Display for Error { write!(f, "Pubkey error: {} under {} scriptcontext", pk, ctx) } Error::MultiATooManyKeys(k) => write!(f, "MultiA too many keys {}", k), - Error::TaprootSpendInfoUnavialable => write!(f, "Taproot Spend Info not computed."), Error::TrNoScriptCode => write!(f, "No script code for Tr descriptors"), - Error::TrNoExplicitScript => write!(f, "No script code for Tr descriptors"), Error::MultipathDescLenMismatch => write!(f, "At least two BIP389 key expressions in the descriptor contain tuples of derivation indexes of different lengths"), Error::AbsoluteLockTime(ref e) => e.fmt(f), Error::RelativeLockTime(ref e) => e.fmt(f), @@ -605,30 +569,22 @@ impl error::Error for Error { | UnexpectedStart | Unexpected(_) | MultiColon(_) - | MultiAt(_) | AtOutsideOr(_) | UnknownWrapper(_) | NonTopLevel(_) | Trailing(_) - | MissingHash(_) | MissingSig(_) - | RelativeLocktimeNotMet(_) - | AbsoluteLocktimeNotMet(_) | CouldNotSatisfy | TypeCheck(_) | BadDescriptor(_) | MaxRecursiveDepthExceeded - | ScriptSizeTooLarge | NonStandardBareScript | ImpossibleSatisfaction | BareDescriptorAddr - | TaprootSpendInfoUnavialable | TrNoScriptCode - | TrNoExplicitScript | MultipathDescLenMismatch => None, Script(e) => Some(e), AddrError(e) => Some(e), - BadPubkey(e) => Some(e), Secp(e) => Some(e), #[cfg(feature = "compiler")] CompilerError(e) => Some(e), From 78db616daa4335f211493436d8aa27613cda4bf4 Mon Sep 17 00:00:00 2001 From: Andrew Poelstra Date: Sun, 10 Mar 2024 22:12:48 +0000 Subject: [PATCH 3/5] consolidate some FIXMEs When you grep the codebase for FIXME it's nice if the output isn't overwhelmed by the same message repeated 20 times. --- src/miniscript/types/mod.rs | 16 +--------------- 1 file changed, 1 insertion(+), 15 deletions(-) diff --git a/src/miniscript/types/mod.rs b/src/miniscript/types/mod.rs index 2ab554ca1..658331c99 100644 --- a/src/miniscript/types/mod.rs +++ b/src/miniscript/types/mod.rs @@ -220,7 +220,7 @@ impl Type { /// Constructor for the type of the `a:` fragment. pub const fn cast_alt(self) -> Result { - // FIXME need to do manual `?` because ? is not supported in constfns. + // FIXME need to do manual `?` because ? is not supported in constfns. (Also below.) Ok(Type { corr: match Correctness::cast_alt(self.corr) { Ok(x) => x, @@ -232,7 +232,6 @@ impl Type { /// Constructor for the type of the `s:` fragment. pub const fn cast_swap(self) -> Result { - // FIXME need to do manual `?` because ? is not supported in constfns. Ok(Type { corr: match Correctness::cast_swap(self.corr) { Ok(x) => x, @@ -255,7 +254,6 @@ impl Type { /// Constructor for the type of the `d:` fragment. pub const fn cast_dupif(self) -> Result { - // FIXME need to do manual `?` because ? is not supported in constfns. Ok(Type { corr: match Correctness::cast_dupif(self.corr) { Ok(x) => x, @@ -267,7 +265,6 @@ impl Type { /// Constructor for the type of the `v:` fragment. pub const fn cast_verify(self) -> Result { - // FIXME need to do manual `?` because ? is not supported in constfns. Ok(Type { corr: match Correctness::cast_verify(self.corr) { Ok(x) => x, @@ -279,7 +276,6 @@ impl Type { /// Constructor for the type of the `j:` fragment. pub const fn cast_nonzero(self) -> Result { - // FIXME need to do manual `?` because ? is not supported in constfns. Ok(Type { corr: match Correctness::cast_nonzero(self.corr) { Ok(x) => x, @@ -291,7 +287,6 @@ impl Type { /// Constructor for the type of the `n:` fragment. pub const fn cast_zeronotequal(self) -> Result { - // FIXME need to do manual `?` because ? is not supported in constfns. Ok(Type { corr: match Correctness::cast_zeronotequal(self.corr) { Ok(x) => x, @@ -303,7 +298,6 @@ impl Type { /// Constructor for the type of the `t:` fragment. pub const fn cast_true(self) -> Result { - // FIXME need to do manual `?` because ? is not supported in constfns. Ok(Type { corr: match Correctness::cast_true(self.corr) { Ok(x) => x, @@ -315,7 +309,6 @@ impl Type { /// Constructor for the type of the `u:` fragment. pub const fn cast_unlikely(self) -> Result { - // FIXME need to do manual `?` because ? is not supported in constfns. Ok(Type { corr: match Correctness::cast_or_i_false(self.corr) { Ok(x) => x, @@ -327,7 +320,6 @@ impl Type { /// Constructor for the type of the `l:` fragment. pub const fn cast_likely(self) -> Result { - // FIXME need to do manual `?` because ? is not supported in constfns. Ok(Type { corr: match Correctness::cast_or_i_false(self.corr) { Ok(x) => x, @@ -339,7 +331,6 @@ impl Type { /// Constructor for the type of the `and_b` fragment. pub const fn and_b(left: Self, right: Self) -> Result { - // FIXME need to do manual `?` because ? is not supported in constfns. Ok(Type { corr: match Correctness::and_b(left.corr, right.corr) { Ok(x) => x, @@ -351,7 +342,6 @@ impl Type { /// Constructor for the type of the `and_v` fragment. pub const fn and_v(left: Self, right: Self) -> Result { - // FIXME need to do manual `?` because ? is not supported in constfns. Ok(Type { corr: match Correctness::and_v(left.corr, right.corr) { Ok(x) => x, @@ -363,7 +353,6 @@ impl Type { /// Constructor for the type of the `or_b` fragment. pub const fn or_b(left: Self, right: Self) -> Result { - // FIXME need to do manual `?` because ? is not supported in constfns. Ok(Type { corr: match Correctness::or_b(left.corr, right.corr) { Ok(x) => x, @@ -375,7 +364,6 @@ impl Type { /// Constructor for the type of the `or_b` fragment. pub const fn or_d(left: Self, right: Self) -> Result { - // FIXME need to do manual `?` because ? is not supported in constfns. Ok(Type { corr: match Correctness::or_d(left.corr, right.corr) { Ok(x) => x, @@ -387,7 +375,6 @@ impl Type { /// Constructor for the type of the `or_c` fragment. pub const fn or_c(left: Self, right: Self) -> Result { - // FIXME need to do manual `?` because ? is not supported in constfns. Ok(Type { corr: match Correctness::or_c(left.corr, right.corr) { Ok(x) => x, @@ -410,7 +397,6 @@ impl Type { /// Constructor for the type of the `and_or` fragment. pub const fn and_or(a: Self, b: Self, c: Self) -> Result { - // FIXME need to do manual `?` because ? is not supported in constfns. Ok(Type { corr: match Correctness::and_or(a.corr, b.corr, c.corr) { Ok(x) => x, From bc3b8dc2ec549d3b8e98c54f3347bac4a18b87b8 Mon Sep 17 00:00:00 2001 From: Andrew Poelstra Date: Thu, 7 Mar 2024 22:40:09 +0000 Subject: [PATCH 4/5] policy: rename Threshold variant to Thresh This matches the same variant in Terminal, matches the string serialization "thresh", and will avoid weird compiler errors related to collisions with the upcoming `Threshold` type. This will be an annoying API-breaking change, but fortunately(?) this PR is already breaking the API for thresholds in an annoying way. --- src/descriptor/sortedmulti.rs | 2 +- src/descriptor/tr.rs | 4 +- src/policy/compiler.rs | 19 +++++---- src/policy/concrete.rs | 47 ++++++++++------------ src/policy/mod.rs | 32 +++++++-------- src/policy/semantic.rs | 75 ++++++++++++++++------------------- 6 files changed, 82 insertions(+), 97 deletions(-) diff --git a/src/descriptor/sortedmulti.rs b/src/descriptor/sortedmulti.rs index 3862ccf15..1a97790c0 100644 --- a/src/descriptor/sortedmulti.rs +++ b/src/descriptor/sortedmulti.rs @@ -198,7 +198,7 @@ impl SortedMultiVec { impl policy::Liftable for SortedMultiVec { fn lift(&self) -> Result, Error> { - let ret = policy::semantic::Policy::Threshold( + let ret = policy::semantic::Policy::Thresh( self.k, self.pks .iter() diff --git a/src/descriptor/tr.rs b/src/descriptor/tr.rs index 931ffaa1d..0bce1ecf0 100644 --- a/src/descriptor/tr.rs +++ b/src/descriptor/tr.rs @@ -616,7 +616,7 @@ impl Liftable for TapTree { fn lift(&self) -> Result, Error> { fn lift_helper(s: &TapTree) -> Result, Error> { match *s { - TapTree::Tree { ref left, ref right, height: _ } => Ok(Policy::Threshold( + TapTree::Tree { ref left, ref right, height: _ } => Ok(Policy::Thresh( 1, vec![Arc::new(lift_helper(left)?), Arc::new(lift_helper(right)?)], )), @@ -632,7 +632,7 @@ impl Liftable for TapTree { impl Liftable for Tr { fn lift(&self) -> Result, Error> { match &self.tree { - Some(root) => Ok(Policy::Threshold( + Some(root) => Ok(Policy::Thresh( 1, vec![ Arc::new(Policy::Key(self.internal_key.clone())), diff --git a/src/policy/compiler.rs b/src/policy/compiler.rs index d9ec92455..86ca8f114 100644 --- a/src/policy/compiler.rs +++ b/src/policy/compiler.rs @@ -1008,7 +1008,7 @@ where compile_binary!(&mut l_comp[3], &mut r_comp[2], [lw, rw], Terminal::OrI); compile_binary!(&mut r_comp[3], &mut l_comp[2], [rw, lw], Terminal::OrI); } - Concrete::Threshold(k, ref subs) => { + Concrete::Thresh(k, ref subs) => { let n = subs.len(); let k_over_n = k as f64 / n as f64; @@ -1389,7 +1389,7 @@ mod tests { let policy: BPolicy = Concrete::Or(vec![ ( 127, - Arc::new(Concrete::Threshold( + Arc::new(Concrete::Thresh( 3, key_pol[0..5].iter().map(|p| (p.clone()).into()).collect(), )), @@ -1398,7 +1398,7 @@ mod tests { 1, Arc::new(Concrete::And(vec![ Arc::new(Concrete::Older(RelLockTime::from_height(10000))), - Arc::new(Concrete::Threshold( + Arc::new(Concrete::Thresh( 2, key_pol[5..8].iter().map(|p| (p.clone()).into()).collect(), )), @@ -1519,7 +1519,7 @@ mod tests { .iter() .map(|pubkey| Arc::new(Concrete::Key(*pubkey))) .collect(); - let big_thresh = Concrete::Threshold(*k, pubkeys); + let big_thresh = Concrete::Thresh(*k, pubkeys); let big_thresh_ms: SegwitMiniScript = big_thresh.compile().unwrap(); if *k == 21 { // N * (PUSH + pubkey + CHECKSIGVERIFY) @@ -1555,8 +1555,8 @@ mod tests { .collect(); let thresh_res: Result = Concrete::Or(vec![ - (1, Arc::new(Concrete::Threshold(keys_a.len(), keys_a))), - (1, Arc::new(Concrete::Threshold(keys_b.len(), keys_b))), + (1, Arc::new(Concrete::Thresh(keys_a.len(), keys_a))), + (1, Arc::new(Concrete::Thresh(keys_b.len(), keys_b))), ]) .compile(); let script_size = thresh_res.clone().and_then(|m| Ok(m.script_size())); @@ -1573,8 +1573,7 @@ mod tests { .iter() .map(|pubkey| Arc::new(Concrete::Key(*pubkey))) .collect(); - let thresh_res: Result = - Concrete::Threshold(keys.len(), keys).compile(); + let thresh_res: Result = Concrete::Thresh(keys.len(), keys).compile(); let n_elements = thresh_res .clone() .and_then(|m| Ok(m.max_satisfaction_witness_elements())); @@ -1595,7 +1594,7 @@ mod tests { .map(|pubkey| Arc::new(Concrete::Key(*pubkey))) .collect(); let thresh_res: Result = - Concrete::Threshold(keys.len() - 1, keys).compile(); + Concrete::Thresh(keys.len() - 1, keys).compile(); let ops_count = thresh_res.clone().and_then(|m| Ok(m.ext.ops.op_count())); assert_eq!( thresh_res, @@ -1609,7 +1608,7 @@ mod tests { .iter() .map(|pubkey| Arc::new(Concrete::Key(*pubkey))) .collect(); - let thresh_res = Concrete::Threshold(keys.len() - 1, keys).compile::(); + let thresh_res = Concrete::Thresh(keys.len() - 1, keys).compile::(); let ops_count = thresh_res.clone().and_then(|m| Ok(m.ext.ops.op_count())); assert_eq!( thresh_res, diff --git a/src/policy/concrete.rs b/src/policy/concrete.rs index 75481ff1d..b70c37ba8 100644 --- a/src/policy/concrete.rs +++ b/src/policy/concrete.rs @@ -69,7 +69,7 @@ pub enum Policy { /// relative probabilities for each one. Or(Vec<(usize, Arc>)>), /// A set of descriptors, satisfactions must be provided for `k` of them. - Threshold(usize, Vec>>), + Thresh(usize, Vec>>), } /// Detailed error type for concrete policies. @@ -187,7 +187,7 @@ impl Policy { }) .collect::>() } - Policy::Threshold(k, ref subs) if *k == 1 => { + Policy::Thresh(k, ref subs) if *k == 1 => { let total_odds = subs.len(); subs.iter() .flat_map(|policy| policy.to_tapleaf_prob_vec(prob / total_odds as f64)) @@ -242,7 +242,7 @@ impl Policy { /// ### TapTree compilation /// /// The policy tree constructed by root-level disjunctions over [`Policy::Or`] and - /// [`Policy::Threshold`](1, ..) which is flattened into a vector (with respective + /// [`Policy::Thresh`](1, ..) which is flattened into a vector (with respective /// probabilities derived from odds) of policies. /// /// For example, the policy `thresh(1,or(pk(A),pk(B)),and(or(pk(C),pk(D)),pk(E)))` gives the @@ -294,7 +294,7 @@ impl Policy { /// ### TapTree compilation /// /// The policy tree constructed by root-level disjunctions over [`Policy::Or`] and - /// [`Policy::Threshold`](k, ..n..) which is flattened into a vector (with respective + /// [`Policy::Thresh`](k, ..n..) which is flattened into a vector (with respective /// probabilities derived from odds) of policies. For example, the policy /// `thresh(1,or(pk(A),pk(B)),and(or(pk(C),pk(D)),pk(E)))` gives the vector /// `[pk(A),pk(B),and(or(pk(C),pk(D)),pk(E)))]`. @@ -407,13 +407,13 @@ impl Policy { .map(|(odds, pol)| (prob * *odds as f64 / total_odds as f64, pol.clone())) .collect::>() } - Policy::Threshold(k, subs) if *k == 1 => { + Policy::Thresh(k, subs) if *k == 1 => { let total_odds = subs.len(); subs.iter() .map(|pol| (prob / total_odds as f64, pol.clone())) .collect::>() } - Policy::Threshold(k, subs) if *k != subs.len() => generate_combination(subs, prob, *k), + Policy::Thresh(k, subs) if *k != subs.len() => generate_combination(subs, prob, *k), pol => vec![(prob, Arc::new(pol.clone()))], } } @@ -562,7 +562,7 @@ impl Policy { .enumerate() .map(|(i, (prob, _))| (*prob, child_n(i))) .collect()), - Threshold(ref k, ref subs) => Threshold(*k, (0..subs.len()).map(child_n).collect()), + Thresh(ref k, ref subs) => Thresh(*k, (0..subs.len()).map(child_n).collect()), }; translated.push(Arc::new(new_policy)); } @@ -588,9 +588,7 @@ impl Policy { .enumerate() .map(|(i, (prob, _))| (*prob, child_n(i))) .collect())), - Threshold(k, ref subs) => { - Some(Threshold(*k, (0..subs.len()).map(child_n).collect())) - } + Thresh(k, ref subs) => Some(Thresh(*k, (0..subs.len()).map(child_n).collect())), _ => None, }; match new_policy { @@ -615,7 +613,7 @@ impl Policy { } /// Gets the number of [TapLeaf](`TapTree::Leaf`)s considering exhaustive root-level [`Policy::Or`] - /// and [`Policy::Threshold`] disjunctions for the `TapTree`. + /// and [`Policy::Thresh`] disjunctions for the `TapTree`. #[cfg(feature = "compiler")] fn num_tap_leaves(&self) -> usize { use Policy::*; @@ -626,7 +624,7 @@ impl Policy { let num = match data.node { Or(subs) => (0..subs.len()).map(num_for_child_n).sum(), - Threshold(k, subs) if *k == 1 => (0..subs.len()).map(num_for_child_n).sum(), + Thresh(k, subs) if *k == 1 => (0..subs.len()).map(num_for_child_n).sum(), _ => 1, }; nums.push(num); @@ -709,7 +707,7 @@ impl Policy { let iter = (0..subs.len()).map(info_for_child_n); TimelockInfo::combine_threshold(1, iter) } - Threshold(ref k, subs) => { + Thresh(ref k, subs) => { let iter = (0..subs.len()).map(info_for_child_n); TimelockInfo::combine_threshold(*k, iter) } @@ -745,7 +743,7 @@ impl Policy { return Err(PolicyError::NonBinaryArgOr); } } - Threshold(k, ref subs) => { + Thresh(k, ref subs) => { if k == 0 || k > subs.len() { return Err(PolicyError::IncorrectThresh); } @@ -789,7 +787,7 @@ impl Policy { }); (all_safe, atleast_one_safe && all_non_mall) } - Threshold(k, ref subs) => { + Thresh(k, ref subs) => { let (safe_count, non_mall_count) = (0..subs.len()).map(acc_for_child_n).fold( (0, 0), |(safe_count, non_mall_count), (safe, non_mall)| { @@ -841,7 +839,7 @@ impl fmt::Debug for Policy { } f.write_str(")") } - Policy::Threshold(k, ref subs) => { + Policy::Thresh(k, ref subs) => { write!(f, "thresh({}", k)?; for sub in subs { write!(f, ",{:?}", sub)?; @@ -884,7 +882,7 @@ impl fmt::Display for Policy { } f.write_str(")") } - Policy::Threshold(k, ref subs) => { + Policy::Thresh(k, ref subs) => { write!(f, "thresh({}", k)?; for sub in subs { write!(f, ",{}", sub)?; @@ -999,7 +997,7 @@ impl Policy { for arg in &top.args[1..] { subs.push(Policy::from_tree(arg)?); } - Ok(Policy::Threshold(thresh as usize, subs.into_iter().map(Arc::new).collect())) + Ok(Policy::Thresh(thresh as usize, subs.into_iter().map(Arc::new).collect())) } _ => Err(errstr(top.name)), } @@ -1041,7 +1039,7 @@ fn with_huffman_tree( Ok(node) } -/// Enumerates a [`Policy::Threshold(k, ..n..)`] into `n` different thresh's. +/// Enumerates a [`Policy::Thresh(k, ..n..)`] into `n` different thresh's. /// /// ## Strategy /// @@ -1063,7 +1061,7 @@ fn generate_combination( .enumerate() .filter_map(|(j, sub)| if j != i { Some(Arc::clone(sub)) } else { None }) .collect(); - ret.push((prob / policy_vec.len() as f64, Arc::new(Policy::Threshold(k, policies)))); + ret.push((prob / policy_vec.len() as f64, Arc::new(Policy::Thresh(k, policies)))); } ret } @@ -1077,7 +1075,7 @@ impl<'a, Pk: MiniscriptKey> TreeLike for &'a Policy { | Ripemd160(_) | Hash160(_) => Tree::Nullary, And(ref subs) => Tree::Nary(subs.iter().map(Arc::as_ref).collect()), Or(ref v) => Tree::Nary(v.iter().map(|(_, p)| p.as_ref()).collect()), - Threshold(_, ref subs) => Tree::Nary(subs.iter().map(Arc::as_ref).collect()), + Thresh(_, ref subs) => Tree::Nary(subs.iter().map(Arc::as_ref).collect()), } } } @@ -1091,7 +1089,7 @@ impl TreeLike for Arc> { | Ripemd160(_) | Hash160(_) => Tree::Nullary, And(ref subs) => Tree::Nary(subs.iter().map(Arc::clone).collect()), Or(ref v) => Tree::Nary(v.iter().map(|(_, p)| Arc::clone(p)).collect()), - Threshold(_, ref subs) => Tree::Nary(subs.iter().map(Arc::clone).collect()), + Thresh(_, ref subs) => Tree::Nary(subs.iter().map(Arc::clone).collect()), } } } @@ -1137,10 +1135,7 @@ mod compiler_tests { .map(|sub_pol| { ( 0.25, - Arc::new(Policy::Threshold( - 2, - sub_pol.into_iter().map(|p| Arc::new(p)).collect(), - )), + Arc::new(Policy::Thresh(2, sub_pol.into_iter().map(|p| Arc::new(p)).collect())), ) }) .collect::>(); diff --git a/src/policy/mod.rs b/src/policy/mod.rs index 99dade57e..5f1e26828 100644 --- a/src/policy/mod.rs +++ b/src/policy/mod.rs @@ -138,15 +138,12 @@ impl Liftable for Terminal { | Terminal::NonZero(ref sub) | Terminal::ZeroNotEqual(ref sub) => sub.node.lift()?, Terminal::AndV(ref left, ref right) | Terminal::AndB(ref left, ref right) => { - Semantic::Threshold( - 2, - vec![Arc::new(left.node.lift()?), Arc::new(right.node.lift()?)], - ) + Semantic::Thresh(2, vec![Arc::new(left.node.lift()?), Arc::new(right.node.lift()?)]) } - Terminal::AndOr(ref a, ref b, ref c) => Semantic::Threshold( + Terminal::AndOr(ref a, ref b, ref c) => Semantic::Thresh( 1, vec![ - Arc::new(Semantic::Threshold( + Arc::new(Semantic::Thresh( 2, vec![Arc::new(a.node.lift()?), Arc::new(b.node.lift()?)], )), @@ -156,17 +153,16 @@ impl Liftable for Terminal { Terminal::OrB(ref left, ref right) | Terminal::OrD(ref left, ref right) | Terminal::OrC(ref left, ref right) - | Terminal::OrI(ref left, ref right) => Semantic::Threshold( - 1, - vec![Arc::new(left.node.lift()?), Arc::new(right.node.lift()?)], - ), + | Terminal::OrI(ref left, ref right) => { + Semantic::Thresh(1, vec![Arc::new(left.node.lift()?), Arc::new(right.node.lift()?)]) + } Terminal::Thresh(k, ref subs) => { let semantic_subs: Result>, Error> = subs.iter().map(|s| s.node.lift()).collect(); let semantic_subs = semantic_subs?.into_iter().map(Arc::new).collect(); - Semantic::Threshold(k, semantic_subs) + Semantic::Thresh(k, semantic_subs) } - Terminal::Multi(k, ref keys) | Terminal::MultiA(k, ref keys) => Semantic::Threshold( + Terminal::Multi(k, ref keys) | Terminal::MultiA(k, ref keys) => Semantic::Thresh( k, keys.iter() .map(|k| Arc::new(Semantic::Key(k.clone()))) @@ -214,19 +210,19 @@ impl Liftable for Concrete { let semantic_subs: Result>, Error> = subs.iter().map(Liftable::lift).collect(); let semantic_subs = semantic_subs?.into_iter().map(Arc::new).collect(); - Semantic::Threshold(2, semantic_subs) + Semantic::Thresh(2, semantic_subs) } Concrete::Or(ref subs) => { let semantic_subs: Result>, Error> = subs.iter().map(|(_p, sub)| sub.lift()).collect(); let semantic_subs = semantic_subs?.into_iter().map(Arc::new).collect(); - Semantic::Threshold(1, semantic_subs) + Semantic::Thresh(1, semantic_subs) } - Concrete::Threshold(k, ref subs) => { + Concrete::Thresh(k, ref subs) => { let semantic_subs: Result>, Error> = subs.iter().map(Liftable::lift).collect(); let semantic_subs = semantic_subs?.into_iter().map(Arc::new).collect(); - Semantic::Threshold(k, semantic_subs) + Semantic::Thresh(k, semantic_subs) } } .normalized(); @@ -362,10 +358,10 @@ mod tests { .parse() .unwrap(); assert_eq!( - Semantic::Threshold( + Semantic::Thresh( 1, vec![ - Arc::new(Semantic::Threshold( + Arc::new(Semantic::Thresh( 2, vec![ Arc::new(Semantic::Key(key_a)), diff --git a/src/policy/semantic.rs b/src/policy/semantic.rs index 3895e0552..73002f05e 100644 --- a/src/policy/semantic.rs +++ b/src/policy/semantic.rs @@ -47,7 +47,7 @@ pub enum Policy { /// A HASH160 whose preimage must be provided to satisfy the descriptor. Hash160(Pk::Hash160), /// A set of descriptors, satisfactions must be provided for `k` of them. - Threshold(usize, Vec>>), + Thresh(usize, Vec>>), } impl ForEachKey for Policy { @@ -122,7 +122,7 @@ impl Policy { Hash160(ref h) => t.hash160(h).map(Hash160)?, Older(ref n) => Older(*n), After(ref n) => After(*n), - Threshold(ref k, ref subs) => Threshold(*k, (0..subs.len()).map(child_n).collect()), + Thresh(ref k, ref subs) => Thresh(*k, (0..subs.len()).map(child_n).collect()), }; translated.push(Arc::new(new_policy)); } @@ -174,7 +174,7 @@ impl Policy { let n_terminals_for_child_n = |n| n_terminals[data.child_indices[n]]; let num = match data.node { - Threshold(_k, subs) => (0..subs.len()).map(n_terminals_for_child_n).sum(), + Thresh(_k, subs) => (0..subs.len()).map(n_terminals_for_child_n).sum(), Trivial | Unsatisfiable => 0, _leaf => 1, }; @@ -190,7 +190,7 @@ impl Policy { fn first_constraint(&self) -> Policy { debug_assert!(self.clone().normalized() == self.clone()); match self { - &Policy::Threshold(_k, ref subs) => subs[0].first_constraint(), + &Policy::Thresh(_k, ref subs) => subs[0].first_constraint(), first => first.clone(), } } @@ -201,19 +201,19 @@ impl Policy { // normalized policy pub(crate) fn satisfy_constraint(self, witness: &Policy, available: bool) -> Policy { debug_assert!(self.clone().normalized() == self); - if let Policy::Threshold { .. } = *witness { - // We can't debug_assert on Policy::Threshold. + if let Policy::Thresh { .. } = *witness { + // We can't debug_assert on Policy::Thresh. panic!("should be unreachable") } let ret = match self { - Policy::Threshold(k, subs) => { + Policy::Thresh(k, subs) => { let mut ret_subs = vec![]; for sub in subs { ret_subs.push(sub.as_ref().clone().satisfy_constraint(witness, available)); } let ret_subs = ret_subs.into_iter().map(Arc::new).collect(); - Policy::Threshold(k, ret_subs) + Policy::Thresh(k, ret_subs) } ref leaf if leaf == witness => { if available { @@ -240,7 +240,7 @@ impl fmt::Debug for Policy { Policy::Hash256(ref h) => write!(f, "hash256({})", h), Policy::Ripemd160(ref h) => write!(f, "ripemd160({})", h), Policy::Hash160(ref h) => write!(f, "hash160({})", h), - Policy::Threshold(k, ref subs) => { + Policy::Thresh(k, ref subs) => { if k == subs.len() { write!(f, "and(")?; } else if k == 1 { @@ -273,7 +273,7 @@ impl fmt::Display for Policy { Policy::Hash256(ref h) => write!(f, "hash256({})", h), Policy::Ripemd160(ref h) => write!(f, "ripemd160({})", h), Policy::Hash160(ref h) => write!(f, "hash160({})", h), - Policy::Threshold(k, ref subs) => { + Policy::Thresh(k, ref subs) => { if k == subs.len() { write!(f, "and(")?; } else if k == 1 { @@ -342,7 +342,7 @@ impl expression::FromTree for Policy { for arg in &top.args { subs.push(Arc::new(Policy::from_tree(arg)?)); } - Ok(Policy::Threshold(nsubs, subs)) + Ok(Policy::Thresh(nsubs, subs)) } ("or", nsubs) => { if nsubs < 2 { @@ -352,7 +352,7 @@ impl expression::FromTree for Policy { for arg in &top.args { subs.push(Arc::new(Policy::from_tree(arg)?)); } - Ok(Policy::Threshold(1, subs)) + Ok(Policy::Thresh(1, subs)) } ("thresh", nsubs) => { if nsubs == 0 || nsubs == 1 { @@ -379,7 +379,7 @@ impl expression::FromTree for Policy { for arg in &top.args[1..] { subs.push(Arc::new(Policy::from_tree(arg)?)); } - Ok(Policy::Threshold(thresh as usize, subs)) + Ok(Policy::Thresh(thresh as usize, subs)) } _ => Err(errstr(top.name)), } @@ -391,7 +391,7 @@ impl Policy { /// `Unsatisfiable`s. Does not reorder any branches; use `.sort`. pub fn normalized(self) -> Policy { match self { - Policy::Threshold(k, subs) => { + Policy::Thresh(k, subs) => { let mut ret_subs = Vec::with_capacity(subs.len()); let subs: Vec<_> = subs @@ -416,15 +416,15 @@ impl Policy { for sub in subs { match sub.as_ref() { Policy::Trivial | Policy::Unsatisfiable => {} - Policy::Threshold(ref k, ref subs) => { + Policy::Thresh(ref k, ref subs) => { match (is_and, is_or) { (true, true) => { // means m = n = 1, thresh(1,X) type thing. - ret_subs.push(Arc::new(Policy::Threshold(*k, subs.to_vec()))); + ret_subs.push(Arc::new(Policy::Thresh(*k, subs.to_vec()))); } (true, false) if *k == subs.len() => ret_subs.extend(subs.to_vec()), // and case (false, true) if *k == 1 => ret_subs.extend(subs.to_vec()), // or case - _ => ret_subs.push(Arc::new(Policy::Threshold(*k, subs.to_vec()))), + _ => ret_subs.push(Arc::new(Policy::Thresh(*k, subs.to_vec()))), } } x => ret_subs.push(Arc::new(x.clone())), @@ -440,11 +440,11 @@ impl Policy { // Only one strong reference because we created the Arc when pushing to ret_subs. Arc::try_unwrap(policy).unwrap() } else if is_and { - Policy::Threshold(ret_subs.len(), ret_subs) + Policy::Thresh(ret_subs.len(), ret_subs) } else if is_or { - Policy::Threshold(1, ret_subs) + Policy::Thresh(1, ret_subs) } else { - Policy::Threshold(m, ret_subs) + Policy::Thresh(m, ret_subs) } } x => x, @@ -515,14 +515,12 @@ impl Policy { let new_policy = match data.node.as_ref() { Older(ref t) => { if relative::LockTime::from(*t).is_implied_by(age) { - Some(Policy::Older(*t)) + Some(Older(*t)) } else { - Some(Policy::Unsatisfiable) + Some(Unsatisfiable) } } - Threshold(k, ref subs) => { - Some(Threshold(*k, (0..subs.len()).map(child_n).collect())) - } + Thresh(k, ref subs) => Some(Thresh(*k, (0..subs.len()).map(child_n).collect())), _ => None, }; match new_policy { @@ -554,9 +552,7 @@ impl Policy { Some(Unsatisfiable) } } - Threshold(k, ref subs) => { - Some(Threshold(*k, (0..subs.len()).map(child_n).collect())) - } + Thresh(k, ref subs) => Some(Thresh(*k, (0..subs.len()).map(child_n).collect())), _ => None, }; match new_policy { @@ -597,7 +593,7 @@ impl Policy { Trivial | After(..) | Older(..) | Sha256(..) | Hash256(..) | Ripemd160(..) | Hash160(..) => Some(0), Key(..) => Some(1), - Threshold(k, ref subs) => { + Thresh(k, ref subs) => { let mut sublens = (0..subs.len()) .filter_map(minimum_n_keys_for_child_n) .collect::>(); @@ -631,10 +627,10 @@ impl Policy { let child_n = |n| Arc::clone(&sorted[data.child_indices[n]]); let new_policy = match data.node.as_ref() { - Threshold(k, ref subs) => { + Thresh(k, ref subs) => { let mut subs = (0..subs.len()).map(child_n).collect::>(); subs.sort(); - Some(Threshold(*k, subs)) + Some(Thresh(*k, subs)) } _ => None, }; @@ -657,7 +653,7 @@ impl<'a, Pk: MiniscriptKey> TreeLike for &'a Policy { match *self { Unsatisfiable | Trivial | Key(_) | After(_) | Older(_) | Sha256(_) | Hash256(_) | Ripemd160(_) | Hash160(_) => Tree::Nullary, - Threshold(_, ref subs) => Tree::Nary(subs.iter().map(Arc::as_ref).collect()), + Thresh(_, ref subs) => Tree::Nary(subs.iter().map(Arc::as_ref).collect()), } } } @@ -669,7 +665,7 @@ impl TreeLike for Arc> { match self.as_ref() { Unsatisfiable | Trivial | Key(_) | After(_) | Older(_) | Sha256(_) | Hash256(_) | Ripemd160(_) | Hash160(_) => Tree::Nullary, - Threshold(_, ref subs) => Tree::Nary(subs.iter().map(Arc::clone).collect()), + Thresh(_, ref subs) => Tree::Nary(subs.iter().map(Arc::clone).collect()), } } } @@ -744,7 +740,7 @@ mod tests { let policy = StringPolicy::from_str("or(pk(),older(1000))").unwrap(); assert_eq!( policy, - Policy::Threshold( + Policy::Thresh( 1, vec![ Policy::Key("".to_owned()).into(), @@ -775,7 +771,7 @@ mod tests { let policy = StringPolicy::from_str("or(pk(),UNSATISFIABLE)").unwrap(); assert_eq!( policy, - Policy::Threshold( + Policy::Thresh( 1, vec![ Policy::Key("".to_owned()).into(), @@ -791,7 +787,7 @@ mod tests { let policy = StringPolicy::from_str("and(pk(),UNSATISFIABLE)").unwrap(); assert_eq!( policy, - Policy::Threshold( + Policy::Thresh( 2, vec![ Policy::Key("".to_owned()).into(), @@ -812,7 +808,7 @@ mod tests { .unwrap(); assert_eq!( policy, - Policy::Threshold( + Policy::Thresh( 2, vec![ Policy::Older(RelLockTime::from_height(1000)).into(), @@ -836,7 +832,7 @@ mod tests { .unwrap(); assert_eq!( policy, - Policy::Threshold( + Policy::Thresh( 2, vec![ Policy::Older(RelLockTime::from_height(1000)).into(), @@ -949,8 +945,7 @@ mod tests { "or(and(older(4096),thresh(2,pk(A),pk(B),pk(C))),thresh(11,pk(F1),pk(F2),pk(F3),pk(F4),pk(F5),pk(F6),pk(F7),pk(F8),pk(F9),pk(F10),pk(F11),pk(F12),pk(F13),pk(F14)))").unwrap(); // Very bad idea to add master key,pk but let's have it have 50M blocks let master_key = StringPolicy::from_str("and(older(50000000),pk(master))").unwrap(); - let new_liquid_pol = - Policy::Threshold(1, vec![liquid_pol.clone().into(), master_key.into()]); + let new_liquid_pol = Policy::Thresh(1, vec![liquid_pol.clone().into(), master_key.into()]); assert!(liquid_pol.clone().entails(new_liquid_pol.clone()).unwrap()); assert!(!new_liquid_pol.entails(liquid_pol.clone()).unwrap()); From 200991dbed0411bcad36990b366feddc7580ae0b Mon Sep 17 00:00:00 2001 From: Andrew Poelstra Date: Tue, 12 Mar 2024 14:13:34 +0000 Subject: [PATCH 5/5] compiler: improve logic for deciding between thresholds and ands In our threshold logic we have some special cases to handle multi, multi_a, and conjunctions. However, we were not choosing between these special cases correctly. Our logic was roughly that we would try to use multi_a if all children were pk()s, OR try to use multi if all children were pk() and there weren't too many, OR try to use a conjunction if k == n. The correct logic is: if all children are keys, and there aren't too many, try to use multi or multi_a. ALSO, if k == n, try to use a conjunction. With this fix, the compiler correctly finds that conjunctions are more efficient than CHECKMULTISIG when k == n and n < 3. When n == 3 the two cases have equal cost, but it seems to prefer the conjunction. It also correctly finds that when k == n, it is always more efficient to use a conjunction than to use CHECKSIGADD. This change necessitates changing some tests. --- examples/taproot.rs | 12 ++++---- src/miniscript/decode.rs | 8 ++---- src/miniscript/limits.rs | 2 ++ src/policy/compiler.rs | 61 +++++++++++++++++++++++++--------------- src/policy/mod.rs | 19 +++++++------ 5 files changed, 59 insertions(+), 43 deletions(-) diff --git a/examples/taproot.rs b/examples/taproot.rs index cb01539ac..766d45a75 100644 --- a/examples/taproot.rs +++ b/examples/taproot.rs @@ -45,7 +45,7 @@ fn main() { let desc = pol.compile_tr(Some("UNSPENDABLE_KEY".to_string())).unwrap(); let expected_desc = - Descriptor::::from_str("tr(Ca,{and_v(v:pk(In),older(9)),multi_a(2,hA,S)})") + Descriptor::::from_str("tr(Ca,{and_v(v:pk(In),older(9)),and_v(v:pk(hA),pk(S))})") .unwrap(); assert_eq!(desc, expected_desc); @@ -73,7 +73,7 @@ fn main() { ); assert_eq!( iter.next().unwrap(), - (1u8, &Miniscript::::from_str("multi_a(2,hA,S)").unwrap()) + (1u8, &Miniscript::::from_str("and_v(v:pk(hA),pk(S))").unwrap()) ); assert_eq!(iter.next(), None); } @@ -97,19 +97,19 @@ fn main() { let real_desc = desc.translate_pk(&mut t).unwrap(); // Max satisfaction weight for compilation, corresponding to the script-path spend - // `multi_a(2,PUBKEY_1,PUBKEY_2) at tap tree depth 1, having: + // `and_v(PUBKEY_1,PUBKEY_2) at tap tree depth 1, having: // // max_witness_size = varint(control_block_size) + control_block size + // varint(script_size) + script_size + max_satisfaction_size - // = 1 + 65 + 1 + 70 + 132 = 269 + // = 1 + 65 + 1 + 68 + 132 = 269 let max_sat_wt = real_desc.max_weight_to_satisfy().unwrap(); - assert_eq!(max_sat_wt, 269); + assert_eq!(max_sat_wt, 267); // Compute the bitcoin address and check if it matches let network = Network::Bitcoin; let addr = real_desc.address(network).unwrap(); let expected_addr = bitcoin::Address::from_str( - "bc1pcc8ku64slu3wu04a6g376d2s8ck9y5alw5sus4zddvn8xgpdqw2swrghwx", + "bc1p4l2xzq7js40965s5w0fknd287kdlmt2dljte37zsc5a34u0h9c4q85snyd", ) .unwrap() .assume_checked(); diff --git a/src/miniscript/decode.rs b/src/miniscript/decode.rs index de2ab195f..6500eb8d5 100644 --- a/src/miniscript/decode.rs +++ b/src/miniscript/decode.rs @@ -11,11 +11,10 @@ use core::marker::PhantomData; use std::error; use bitcoin::hashes::{hash160, ripemd160, sha256, Hash}; -use bitcoin::Weight; use sync::Arc; use crate::miniscript::lex::{Token as Tk, TokenIter}; -use crate::miniscript::limits::MAX_PUBKEYS_PER_MULTISIG; +use crate::miniscript::limits::{MAX_PUBKEYS_IN_CHECKSIGADD, MAX_PUBKEYS_PER_MULTISIG}; use crate::miniscript::types::extra_props::ExtData; use crate::miniscript::types::Type; use crate::miniscript::ScriptContext; @@ -451,10 +450,9 @@ pub fn parse( }, // MultiA Tk::NumEqual, Tk::Num(k) => { - let max = Weight::MAX_BLOCK.to_wu() / 32; // Check size before allocating keys - if k as u64 > max { - return Err(Error::MultiATooManyKeys(max)) + if k as usize > MAX_PUBKEYS_IN_CHECKSIGADD { + return Err(Error::MultiATooManyKeys(MAX_PUBKEYS_IN_CHECKSIGADD as u64)) } let mut keys = Vec::with_capacity(k as usize); // atleast k capacity while tokens.peek() == Some(&Tk::CheckSigAdd) { diff --git a/src/miniscript/limits.rs b/src/miniscript/limits.rs index 1631d7bb3..abd111f96 100644 --- a/src/miniscript/limits.rs +++ b/src/miniscript/limits.rs @@ -33,3 +33,5 @@ pub const MAX_BLOCK_WEIGHT: usize = 4000000; /// Maximum pubkeys as arguments to CHECKMULTISIG // https://github.com/bitcoin/bitcoin/blob/6acda4b00b3fc1bfac02f5de590e1a5386cbc779/src/script/script.h#L30 pub const MAX_PUBKEYS_PER_MULTISIG: usize = 20; +/// Maximum pubkeys in a CHECKSIGADD construction. +pub const MAX_PUBKEYS_IN_CHECKSIGADD: usize = (bitcoin::Weight::MAX_BLOCK.to_wu() / 32) as usize; diff --git a/src/policy/compiler.rs b/src/policy/compiler.rs index 86ca8f114..dc60a3784 100644 --- a/src/policy/compiler.rs +++ b/src/policy/compiler.rs @@ -12,7 +12,7 @@ use std::error; use sync::Arc; use crate::miniscript::context::SigType; -use crate::miniscript::limits::MAX_PUBKEYS_PER_MULTISIG; +use crate::miniscript::limits::{MAX_PUBKEYS_IN_CHECKSIGADD, MAX_PUBKEYS_PER_MULTISIG}; use crate::miniscript::types::{self, ErrorKind, ExtData, Type}; use crate::miniscript::ScriptContext; use crate::policy::Concrete; @@ -1065,24 +1065,26 @@ where }) .collect(); - match Ctx::sig_type() { - SigType::Schnorr if key_vec.len() == subs.len() => { - insert_wrap!(AstElemExt::terminal(Terminal::MultiA(k, key_vec))) - } - SigType::Ecdsa - if key_vec.len() == subs.len() && subs.len() <= MAX_PUBKEYS_PER_MULTISIG => - { - insert_wrap!(AstElemExt::terminal(Terminal::Multi(k, key_vec))) + if key_vec.len() == subs.len() { + match Ctx::sig_type() { + SigType::Schnorr => { + if key_vec.len() <= MAX_PUBKEYS_IN_CHECKSIGADD { + insert_wrap!(AstElemExt::terminal(Terminal::MultiA(k, key_vec))) + } + } + SigType::Ecdsa => { + if key_vec.len() <= MAX_PUBKEYS_PER_MULTISIG { + insert_wrap!(AstElemExt::terminal(Terminal::Multi(k, key_vec))) + } + } } - _ if k == subs.len() => { - let mut it = subs.iter(); - let mut policy = it.next().expect("No sub policy in thresh() ?").clone(); - policy = - it.fold(policy, |acc, pol| Concrete::And(vec![acc, pol.clone()]).into()); + } + if k == subs.len() { + let mut it = subs.iter(); + let mut policy = it.next().expect("No sub policy in thresh() ?").clone(); + policy = it.fold(policy, |acc, pol| Concrete::And(vec![acc, pol.clone()]).into()); - ret = best_compilations(policy_cache, policy.as_ref(), sat_prob, dissat_prob)?; - } - _ => {} + ret = best_compilations(policy_cache, policy.as_ref(), sat_prob, dissat_prob)?; } // FIXME: Should we also optimize thresh(1, subs) ? @@ -1501,13 +1503,19 @@ mod tests { fn compile_thresh() { let (keys, _) = pubkeys_and_a_sig(21); - // Up until 20 keys, thresh should be compiled to a multi no matter the value of k + // For 3 < n <= 20, thresh should be compiled to a multi no matter the value of k for k in 1..4 { - let small_thresh: BPolicy = - policy_str!("thresh({},pk({}),pk({}),pk({}))", k, keys[0], keys[1], keys[2]); + let small_thresh: BPolicy = policy_str!( + "thresh({},pk({}),pk({}),pk({}),pk({}))", + k, + keys[0], + keys[1], + keys[2], + keys[3] + ); let small_thresh_ms: SegwitMiniScript = small_thresh.compile().unwrap(); let small_thresh_ms_expected: SegwitMiniScript = - ms_str!("multi({},{},{},{})", k, keys[0], keys[1], keys[2]); + ms_str!("multi({},{},{},{},{})", k, keys[0], keys[1], keys[2], keys[3]); assert_eq!(small_thresh_ms, small_thresh_ms_expected); } @@ -1640,8 +1648,15 @@ mod tests { let small_thresh: Concrete = policy_str!("{}", &format!("thresh({},pk(B),pk(C),pk(D))", k)); let small_thresh_ms: Miniscript = small_thresh.compile().unwrap(); - let small_thresh_ms_expected: Miniscript = ms_str!("multi_a({},B,C,D)", k); - assert_eq!(small_thresh_ms, small_thresh_ms_expected); + // When k == 3 it is more efficient to use and_v than multi_a + if k == 3 { + assert_eq!( + small_thresh_ms, + ms_str!("and_v(v:and_v(vc:pk_k(B),c:pk_k(C)),c:pk_k(D))") + ); + } else { + assert_eq!(small_thresh_ms, ms_str!("multi_a({},B,C,D)", k)); + } } } } diff --git a/src/policy/mod.rs b/src/policy/mod.rs index 5f1e26828..7faccbc14 100644 --- a/src/policy/mod.rs +++ b/src/policy/mod.rs @@ -505,12 +505,12 @@ mod tests { Tr::::from_str( "tr(UNSPEND ,{ { - {multi_a(7,B,C,D,E,F,G,H),multi_a(7,A,C,D,E,F,G,H)}, - {multi_a(7,A,B,D,E,F,G,H),multi_a(7,A,B,C,E,F,G,H)} + {and_v(v:and_v(v:and_v(v:and_v(v:and_v(v:and_v(v:pk(B),pk(C)),pk(D)),pk(E)),pk(F)),pk(G)),pk(H)),and_v(v:and_v(v:and_v(v:and_v(v:and_v(v:and_v(v:pk(A),pk(C)),pk(D)),pk(E)),pk(F)),pk(G)),pk(H))}, + {and_v(v:and_v(v:and_v(v:and_v(v:and_v(v:and_v(v:pk(A),pk(B)),pk(D)),pk(E)),pk(F)),pk(G)),pk(H)),and_v(v:and_v(v:and_v(v:and_v(v:and_v(v:and_v(v:pk(A),pk(B)),pk(C)),pk(E)),pk(F)),pk(G)),pk(H))} }, { - {multi_a(7,A,B,C,D,F,G,H),multi_a(7,A,B,C,D,E,G,H)} - ,{multi_a(7,A,B,C,D,E,F,H),multi_a(7,A,B,C,D,E,F,G)} + {and_v(v:and_v(v:and_v(v:and_v(v:and_v(v:and_v(v:pk(A),pk(B)),pk(C)),pk(D)),pk(F)),pk(G)),pk(H)),and_v(v:and_v(v:and_v(v:and_v(v:and_v(v:and_v(v:pk(A),pk(B)),pk(C)),pk(D)),pk(E)),pk(G)),pk(H))}, + {and_v(v:and_v(v:and_v(v:and_v(v:and_v(v:and_v(v:pk(A),pk(B)),pk(C)),pk(D)),pk(E)),pk(F)),pk(H)),and_v(v:and_v(v:and_v(v:and_v(v:and_v(v:and_v(v:pk(A),pk(B)),pk(C)),pk(D)),pk(E)),pk(F)),pk(G))} }})" .replace(&['\t', ' ', '\n'][..], "") .as_str(), @@ -526,18 +526,19 @@ mod tests { let desc = pol .compile_tr_private_experimental(Some(unspendable_key.clone())) .unwrap(); + println!("{}", desc); let expected_desc = Descriptor::Tr( Tr::::from_str( "tr(UNSPEND, {{ - {multi_a(3,A,D,E),multi_a(3,A,C,E)}, - {multi_a(3,A,C,D),multi_a(3,A,B,E)}\ + {and_v(v:and_v(v:pk(A),pk(D)),pk(E)),and_v(v:and_v(v:pk(A),pk(C)),pk(E))}, + {and_v(v:and_v(v:pk(A),pk(C)),pk(D)),and_v(v:and_v(v:pk(A),pk(B)),pk(E))} }, { - {multi_a(3,A,B,D),multi_a(3,A,B,C)}, + {and_v(v:and_v(v:pk(A),pk(B)),pk(D)),and_v(v:and_v(v:pk(A),pk(B)),pk(C))}, { - {multi_a(3,C,D,E),multi_a(3,B,D,E)}, - {multi_a(3,B,C,E),multi_a(3,B,C,D)} + {and_v(v:and_v(v:pk(C),pk(D)),pk(E)),and_v(v:and_v(v:pk(B),pk(D)),pk(E))}, + {and_v(v:and_v(v:pk(B),pk(C)),pk(E)),and_v(v:and_v(v:pk(B),pk(C)),pk(D))} }}})" .replace(&['\t', ' ', '\n'][..], "") .as_str(),