From 6f54ed921be62b93ecb4c44fadeff60cac1d85ca Mon Sep 17 00:00:00 2001 From: "Tobin C. Harding" Date: Wed, 12 Jul 2023 15:32:54 +1000 Subject: [PATCH 01/41] Pin serde_json As usual crates in the Rust ecosystem don't give a f**k about semver rules. Pin `serde_json` for 1.41 and 1.47 CI runs. Remove the pinning docs from the README and instead point devs to the CI script. (The docs are stale currently and are an unnecessary maintenance burden.) --- README.md | 9 +-------- contrib/test.sh | 2 ++ 2 files changed, 3 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 9f46164bb..3dcc102a9 100644 --- a/README.md +++ b/README.md @@ -44,14 +44,7 @@ This library should always compile with any combination of features (minus `no-std`) on **Rust 1.41.1** or **Rust 1.47** with `no-std`. Some dependencies do not play nicely with our MSRV, if you are running the tests -you may need to pin as follows: - -``` -cargo update --package url --precise 2.2.2 -cargo update --package form_urlencoded --precise 1.0.1 -cargo update -p once_cell --precise 1.13.1 -cargo update -p bzip2 --precise 0.4.2 -``` +you may need to pin some dependencies. See `./contrib/test.sh` for current pinning. ## Contributing diff --git a/contrib/test.sh b/contrib/test.sh index eed23e09c..631cd6940 100755 --- a/contrib/test.sh +++ b/contrib/test.sh @@ -17,12 +17,14 @@ fi # Pin dependencies required to build with Rust 1.41.1 if cargo --version | grep "1\.41\.0"; then cargo update -p once_cell --precise 1.13.1 + cargo update -p serde_json --precise 1.0.99 cargo update -p serde --precise 1.0.156 fi # Pin dependencies required to build with Rust 1.47.0 if cargo --version | grep "1\.47\.0"; then cargo update -p once_cell --precise 1.13.1 + cargo update -p serde_json --precise 1.0.99 cargo update -p serde --precise 1.0.156 fi From a797fcc629c2458058e0d36a59e0a8afd2874f4a Mon Sep 17 00:00:00 2001 From: "Tobin C. Harding" Date: Mon, 3 Jul 2023 11:19:00 +1000 Subject: [PATCH 02/41] examples: htlc: Fix doc typos --- examples/htlc.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/htlc.rs b/examples/htlc.rs index 4e7bf3572..029af4dbc 100644 --- a/examples/htlc.rs +++ b/examples/htlc.rs @@ -51,7 +51,7 @@ fn main() { "or(and(pk(022222222222222222222222222222222222222222222222222222222222222222),sha256(1111111111111111111111111111111111111111111111111111111111111111)),and(pk(020202020202020202020202020202020202020202020202020202020202020202),older(4444)))" ); - // Get the scriptPpubkey for this Wsh descriptor. + // Get the scriptPubkey for this Wsh descriptor. assert_eq!( format!("{:x}", htlc_descriptor.script_pubkey()), "0020d853877af928a8d2a569c9c0ed14bd16f6a80ce9cccaf8a6150fd8f7f8867ae2" @@ -63,7 +63,7 @@ fn main() { "21022222222222222222222222222222222222222222222222222222222222222222ac6476a91451814f108670aced2d77c1805ddd6634bc9d473188ad025c11b26782012088a82011111111111111111111111111111111111111111111111111111111111111118768" ); - // Get the address for this Wsh descriptor.v + // Get the address for this Wsh descriptor. assert_eq!( format!("{}", htlc_descriptor.address(Network::Bitcoin)), "bc1qmpfcw7he9z5d9ftfe8qw699azmm2sr8fen903fs4plv007yx0t3qxfmqv5" From a836bcb85fb5f5de082fa401704a22536117fb02 Mon Sep 17 00:00:00 2001 From: "Tobin C. Harding" Date: Mon, 3 Jul 2023 11:23:31 +1000 Subject: [PATCH 03/41] examples: Use SPDX licence identifiers As we do in the main source directory; use SPDX license identifiers in all of the examples files. The SPDX license is a balance between minimal noise and having license coverage for those who want/like/need it. Note, removes the "written by" section if it uses Andrews name because he has stated elsewhere that this is ok. Leaves Thomas' attribution in there. --- examples/htlc.rs | 15 ++------------- examples/parse.rs | 14 +------------- examples/psbt.rs | 2 ++ examples/psbt_sign_finalize.rs | 2 ++ examples/sign_multisig.rs | 14 +------------- examples/taproot.rs | 2 ++ examples/verify_tx.rs | 14 +------------- examples/xpub_descriptors.rs | 14 +------------- 8 files changed, 12 insertions(+), 65 deletions(-) diff --git a/examples/htlc.rs b/examples/htlc.rs index 029af4dbc..80dcc5e3c 100644 --- a/examples/htlc.rs +++ b/examples/htlc.rs @@ -1,16 +1,5 @@ -// Miniscript -// Written in 2019 by -// Thomas Eizinger -// -// To the extent possible under law, the author(s) have dedicated all -// copyright and related and neighboring rights to this software to -// the public domain worldwide. This software is distributed without -// any warranty. -// -// You should have received a copy of the CC0 Public Domain Dedication -// along with this software. -// If not, see . -// +// Written by Thomas Eizinger +// SPDX-License-Identifier: CC0-1.0 //! Example: Create an HTLC with miniscript using the policy compiler diff --git a/examples/parse.rs b/examples/parse.rs index 9bd00ff8c..04c2fd837 100644 --- a/examples/parse.rs +++ b/examples/parse.rs @@ -1,16 +1,4 @@ -// Miniscript -// Written in 2019 by -// Andrew Poelstra -// -// To the extent possible under law, the author(s) have dedicated all -// copyright and related and neighboring rights to this software to -// the public domain worldwide. This software is distributed without -// any warranty. -// -// You should have received a copy of the CC0 Public Domain Dedication -// along with this software. -// If not, see . -// +// SPDX-License-Identifier: CC0-1.0 //! Example: Parsing a descriptor from a string. diff --git a/examples/psbt.rs b/examples/psbt.rs index b0fcad06c..a8b7e22f4 100644 --- a/examples/psbt.rs +++ b/examples/psbt.rs @@ -1,3 +1,5 @@ +// SPDX-License-Identifier: CC0-1.0 + use bitcoin::consensus::encode::deserialize; use bitcoin::hashes::hex::FromHex; use bitcoin::psbt::Psbt; diff --git a/examples/psbt_sign_finalize.rs b/examples/psbt_sign_finalize.rs index 44c397582..6cd7ad0f1 100644 --- a/examples/psbt_sign_finalize.rs +++ b/examples/psbt_sign_finalize.rs @@ -1,3 +1,5 @@ +// SPDX-License-Identifier: CC0-1.0 + use std::collections::BTreeMap; use std::str::FromStr; diff --git a/examples/sign_multisig.rs b/examples/sign_multisig.rs index e4b1cac33..1e913f609 100644 --- a/examples/sign_multisig.rs +++ b/examples/sign_multisig.rs @@ -1,16 +1,4 @@ -// Miniscript -// Written in 2019 by -// Andrew Poelstra -// -// To the extent possible under law, the author(s) have dedicated all -// copyright and related and neighboring rights to this software to -// the public domain worldwide. This software is distributed without -// any warranty. -// -// You should have received a copy of the CC0 Public Domain Dedication -// along with this software. -// If not, see . -// +// SPDX-License-Identifier: CC0-1.0 //! Example: Signing a 2-of-3 multisignature. diff --git a/examples/taproot.rs b/examples/taproot.rs index 0d64ebeff..b9f6958f7 100644 --- a/examples/taproot.rs +++ b/examples/taproot.rs @@ -1,3 +1,5 @@ +// SPDX-License-Identifier: CC0-1.0 + use std::collections::HashMap; use std::str::FromStr; diff --git a/examples/verify_tx.rs b/examples/verify_tx.rs index 9f2387a2f..98a21d43c 100644 --- a/examples/verify_tx.rs +++ b/examples/verify_tx.rs @@ -1,16 +1,4 @@ -// Miniscript -// Written in 2019 by -// Andrew Poelstra -// -// To the extent possible under law, the author(s) have dedicated all -// copyright and related and neighboring rights to this software to -// the public domain worldwide. This software is distributed without -// any warranty. -// -// You should have received a copy of the CC0 Public Domain Dedication -// along with this software. -// If not, see . -// +// SPDX-License-Identifier: CC0-1.0 //! Example: Verifying a signed transaction. diff --git a/examples/xpub_descriptors.rs b/examples/xpub_descriptors.rs index f640fad35..08f31ecee 100644 --- a/examples/xpub_descriptors.rs +++ b/examples/xpub_descriptors.rs @@ -1,16 +1,4 @@ -// Miniscript -// Written in 2019 by -// Andrew Poelstra -// -// To the extent possible under law, the author(s) have dedicated all -// copyright and related and neighboring rights to this software to -// the public domain worldwide. This software is distributed without -// any warranty. -// -// You should have received a copy of the CC0 Public Domain Dedication -// along with this software. -// If not, see . -// +// SPDX-License-Identifier: CC0-1.0 //! Example: Parsing a xpub and getting an address. From e83c09cae2acb3271b1cbe3173ac48fcc00dd9e8 Mon Sep 17 00:00:00 2001 From: "Tobin C. Harding" Date: Mon, 3 Jul 2023 11:58:37 +1000 Subject: [PATCH 04/41] Move bip-174 test out of examples and into tests The `examples/psbt.rs` file isn't adding much value as an example, it is just implementing tests against test vectors from the bip. Better to put it in `tests` than in `examples`. --- Cargo.toml | 4 ---- contrib/test.sh | 1 - examples/psbt.rs => tests/bip-174.rs | 0 3 files changed, 5 deletions(-) rename examples/psbt.rs => tests/bip-174.rs (100%) diff --git a/Cargo.toml b/Cargo.toml index 8b3d57749..e71cba51d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -52,10 +52,6 @@ required-features = ["std"] name = "verify_tx" required-features = ["std"] -[[example]] -name = "psbt" -required-features = ["std"] - [[example]] name = "xpub_descriptors" required-features = ["std"] diff --git a/contrib/test.sh b/contrib/test.sh index 631cd6940..4e7233d82 100755 --- a/contrib/test.sh +++ b/contrib/test.sh @@ -58,7 +58,6 @@ then cargo run --example parse cargo run --example sign_multisig cargo run --example verify_tx > /dev/null - cargo run --example psbt cargo run --example xpub_descriptors cargo run --example taproot --features=compiler cargo run --example psbt_sign_finalize --features=base64 diff --git a/examples/psbt.rs b/tests/bip-174.rs similarity index 100% rename from examples/psbt.rs rename to tests/bip-174.rs From 93ef50282d4f2604056d9c68e42882248b82d883 Mon Sep 17 00:00:00 2001 From: "Tobin C. Harding" Date: Mon, 3 Jul 2023 12:02:20 +1000 Subject: [PATCH 05/41] examples: Remove base64 dev dependency We already have `base64` available from the `bitcoin` dependency, use it instead of using a dev dependency. --- Cargo.toml | 1 - examples/psbt_sign_finalize.rs | 3 +-- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index e71cba51d..5c35fd1c8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -34,7 +34,6 @@ actual-serde = { package = "serde", version = "1.0.103", optional = true } serde_test = "1.0.147" bitcoin = { version = "0.30.0", features = ["base64"] } secp256k1 = {version = "0.27.0", features = ["rand-std"]} -actual-base64 = { package = "base64", version = "0.13.0" } [[example]] name = "htlc" diff --git a/examples/psbt_sign_finalize.rs b/examples/psbt_sign_finalize.rs index 6cd7ad0f1..c9b836d6d 100644 --- a/examples/psbt_sign_finalize.rs +++ b/examples/psbt_sign_finalize.rs @@ -3,9 +3,8 @@ use std::collections::BTreeMap; use std::str::FromStr; -use actual_base64 as base64; use bitcoin::sighash::SighashCache; -use bitcoin::PrivateKey; +use bitcoin::{base64, PrivateKey}; use miniscript::bitcoin::consensus::encode::deserialize; use miniscript::bitcoin::hashes::hex::FromHex; use miniscript::bitcoin::psbt::PartiallySignedTransaction as Psbt; From b1ce6cdb062fe899119096a830fc35b34d76fd26 Mon Sep 17 00:00:00 2001 From: "Tobin C. Harding" Date: Mon, 3 Jul 2023 12:14:15 +1000 Subject: [PATCH 06/41] examples: Clean up import statements Examples are meant to showcase how to use `miniscript`; software using `miniscript` as a dependency should not need to depend directly on `bitcoin` since we re-export it from the `miniscript` crate root. Our examples are more applicable if they do not assume a dependency on `bitcoin` exists. Use `use miniscript::bitcoin` to get at types/functions in `bitcoin`. --- examples/htlc.rs | 2 +- examples/psbt_sign_finalize.rs | 8 ++++---- examples/taproot.rs | 8 ++++---- examples/verify_tx.rs | 6 +++--- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/examples/htlc.rs b/examples/htlc.rs index 80dcc5e3c..8f864d639 100644 --- a/examples/htlc.rs +++ b/examples/htlc.rs @@ -5,7 +5,7 @@ use std::str::FromStr; -use bitcoin::Network; +use miniscript::bitcoin::Network; use miniscript::descriptor::Wsh; use miniscript::policy::{Concrete, Liftable}; diff --git a/examples/psbt_sign_finalize.rs b/examples/psbt_sign_finalize.rs index c9b836d6d..ab05401b8 100644 --- a/examples/psbt_sign_finalize.rs +++ b/examples/psbt_sign_finalize.rs @@ -3,13 +3,13 @@ use std::collections::BTreeMap; use std::str::FromStr; -use bitcoin::sighash::SighashCache; -use bitcoin::{base64, PrivateKey}; use miniscript::bitcoin::consensus::encode::deserialize; use miniscript::bitcoin::hashes::hex::FromHex; -use miniscript::bitcoin::psbt::PartiallySignedTransaction as Psbt; +use miniscript::bitcoin::psbt::{self, Psbt}; +use miniscript::bitcoin::sighash::SighashCache; use miniscript::bitcoin::{ - self, psbt, secp256k1, Address, Network, OutPoint, Script, Sequence, Transaction, TxIn, TxOut, + self, base64, secp256k1, Address, Network, OutPoint, PrivateKey, Script, Sequence, Transaction, + TxIn, TxOut, }; use miniscript::psbt::{PsbtExt, PsbtInputExt}; use miniscript::Descriptor; diff --git a/examples/taproot.rs b/examples/taproot.rs index b9f6958f7..677546f83 100644 --- a/examples/taproot.rs +++ b/examples/taproot.rs @@ -3,10 +3,10 @@ use std::collections::HashMap; use std::str::FromStr; -use bitcoin::address::WitnessVersion; -use bitcoin::key::XOnlyPublicKey; -use bitcoin::secp256k1::{rand, KeyPair}; -use bitcoin::Network; +use miniscript::bitcoin::address::WitnessVersion; +use miniscript::bitcoin::key::{KeyPair, XOnlyPublicKey}; +use miniscript::bitcoin::secp256k1::rand; +use miniscript::bitcoin::Network; use miniscript::descriptor::DescriptorType; use miniscript::policy::Concrete; use miniscript::{translate_hash_fail, Descriptor, Miniscript, Tap, TranslatePk, Translator}; diff --git a/examples/verify_tx.rs b/examples/verify_tx.rs index 98a21d43c..bcac4fd3e 100644 --- a/examples/verify_tx.rs +++ b/examples/verify_tx.rs @@ -4,9 +4,9 @@ use std::str::FromStr; -use bitcoin::consensus::Decodable; -use bitcoin::secp256k1::{self, Secp256k1}; -use bitcoin::{absolute, sighash, Sequence}; +use miniscript::bitcoin::consensus::Decodable; +use miniscript::bitcoin::secp256k1::{self, Secp256k1}; +use miniscript::bitcoin::{absolute, sighash, Sequence}; use miniscript::interpreter::KeySigPair; fn main() { From d981a95799aaf76e3d23c3838056617e92fd62c3 Mon Sep 17 00:00:00 2001 From: "Tobin C. Harding" Date: Mon, 3 Jul 2023 12:33:56 +1000 Subject: [PATCH 07/41] examples: taproot: Do minor docs cleanup --- examples/taproot.rs | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/examples/taproot.rs b/examples/taproot.rs index 677546f83..bb4974bd4 100644 --- a/examples/taproot.rs +++ b/examples/taproot.rs @@ -23,9 +23,8 @@ impl Translator for StrPkTranslator { self.pk_map.get(pk).copied().ok_or(()) } - // We don't need to implement these methods as we are not using them in the policy - // Fail if we encounter any hash fragments. - // See also translate_hash_clone! macro + // We don't need to implement these methods as we are not using them in the policy. + // Fail if we encounter any hash fragments. See also translate_hash_clone! macro. translate_hash_fail!(String, XOnlyPublicKey, ()); } @@ -54,7 +53,7 @@ fn main() { // Check whether the descriptors are safe. assert!(desc.sanity_check().is_ok()); - // Descriptor Type and Version should match respectively for Taproot + // Descriptor type and version should match respectively for taproot let desc_type = desc.desc_type(); assert_eq!(desc_type, DescriptorType::Tr); assert_eq!(desc_type.segwit_version().unwrap(), WitnessVersion::V1); @@ -101,11 +100,12 @@ fn main() { let real_desc = desc.translate_pk(&mut t).unwrap(); - // Max Satisfaction Weight for compilation, corresponding to the script-path spend - // `multi_a(2,PUBKEY_1,PUBKEY_2) at taptree depth 1, having - // Max Witness Size = varint(control_block_size) + control_block size + - // varint(script_size) + script_size + max_satisfaction_size - // = 1 + 65 + 1 + 70 + 132 = 269 + // Max satisfaction weight for compilation, corresponding to the script-path spend + // `multi_a(2,PUBKEY_1,PUBKEY_2) at taptree depth 1, having: + // + // max_witness_size = varint(control_block_size) + control_block size + + // varint(script_size) + script_size + max_satisfaction_size + // = 1 + 65 + 1 + 70 + 132 = 269 let max_sat_wt = real_desc.max_weight_to_satisfy().unwrap(); assert_eq!(max_sat_wt, 269); From 288a6f1efe257cd45876e580671f0cb6b430959d Mon Sep 17 00:00:00 2001 From: "Tobin C. Harding" Date: Mon, 3 Jul 2023 12:34:23 +1000 Subject: [PATCH 08/41] examples: taproot: Use turbofish syntax --- examples/taproot.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/taproot.rs b/examples/taproot.rs index bb4974bd4..3c8c6b66e 100644 --- a/examples/taproot.rs +++ b/examples/taproot.rs @@ -40,7 +40,7 @@ fn main() { .replace(&[' ', '\n', '\t'][..], ""); let _ms = Miniscript::::from_str("and_v(v:ripemd160(H),pk(A))").unwrap(); - let pol: Concrete = Concrete::from_str(&pol_str).unwrap(); + let pol = Concrete::::from_str(&pol_str).unwrap(); // In case we can't find an internal key for the given policy, we set the internal key to // a random pubkey as specified by BIP341 (which are *unspendable* by any party :p) let desc = pol.compile_tr(Some("UNSPENDABLE_KEY".to_string())).unwrap(); From 0e4fa343c4771fba1e194b9b31e45777c3c2390c Mon Sep 17 00:00:00 2001 From: "Tobin C. Harding" Date: Mon, 3 Jul 2023 13:06:35 +1000 Subject: [PATCH 09/41] Move semantic/abstract docs to the semantic module The comment on why we use semantic and abstract interchangeable is better placed in the `semantic` module docs. --- src/policy/mod.rs | 2 -- src/policy/semantic.rs | 3 +++ 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/policy/mod.rs b/src/policy/mod.rs index 83348333d..335da849b 100644 --- a/src/policy/mod.rs +++ b/src/policy/mod.rs @@ -20,8 +20,6 @@ pub mod concrete; pub mod semantic; pub use self::concrete::Policy as Concrete; -/// Semantic policies are "abstract" policies elsewhere; but we -/// avoid this word because it is a reserved keyword in Rust pub use self::semantic::Policy as Semantic; use crate::descriptor::Descriptor; use crate::miniscript::{Miniscript, ScriptContext}; diff --git a/src/policy/semantic.rs b/src/policy/semantic.rs index b188492bb..b2c41fd73 100644 --- a/src/policy/semantic.rs +++ b/src/policy/semantic.rs @@ -2,6 +2,9 @@ // SPDX-License-Identifier: CC0-1.0 //! Abstract Policies +//! +//! We use the term "semantic" for abstract policies because "abstract" is +//! a reserved keyword in Rust. use core::str::FromStr; use core::{fmt, str}; From 062d1b8ba3de2081211c1dd5c7761babcdaa0bfd Mon Sep 17 00:00:00 2001 From: "Tobin C. Harding" Date: Mon, 3 Jul 2023 14:26:58 +1000 Subject: [PATCH 10/41] Remove duplicate docs Currently the `translate_pk` functions on `semantic::Policy` and `concrete::Policy` have exactly the same docs in the `# Examples` section, down to the usage of "abstract" in both. Direct docs readers from one function to the other to see the example. --- src/policy/concrete.rs | 44 ++---------------------------------------- 1 file changed, 2 insertions(+), 42 deletions(-) diff --git a/src/policy/concrete.rs b/src/policy/concrete.rs index 8a080200e..4ab053490 100644 --- a/src/policy/concrete.rs +++ b/src/policy/concrete.rs @@ -689,49 +689,9 @@ impl Policy { } } - /// Convert a policy using one kind of public key to another - /// type of public key + /// Convert a policy using one kind of public key to another type of public key. /// - /// # Example - /// - /// ``` - /// use miniscript::{bitcoin::PublicKey, policy::concrete::Policy, Translator, hash256}; - /// use std::str::FromStr; - /// use miniscript::translate_hash_fail; - /// use std::collections::HashMap; - /// use miniscript::bitcoin::hashes::{sha256, hash160, ripemd160}; - /// let alice_key = "0270cf3c71f65a3d93d285d9149fddeeb638f87a2d4d8cf16c525f71c417439777"; - /// let bob_key = "02f43b15c50a436f5335dbea8a64dd3b4e63e34c3b50c42598acb5f4f336b5d2fb"; - /// let placeholder_policy = Policy::::from_str("and(pk(alice_key),pk(bob_key))").unwrap(); - /// - /// // Information to translator abstract String type keys to concrete bitcoin::PublicKey. - /// // In practice, wallets would map from String key names to BIP32 keys - /// struct StrPkTranslator { - /// pk_map: HashMap - /// } - /// - /// // If we also wanted to provide mapping of other associated types(sha256, older etc), - /// // we would use the general Translator Trait. - /// impl Translator for StrPkTranslator { - /// // Provides the translation public keys P -> Q - /// fn pk(&mut self, pk: &String) -> Result { - /// self.pk_map.get(pk).copied().ok_or(()) // Dummy Err - /// } - /// - /// // Fail for hash types - /// translate_hash_fail!(String, bitcoin::PublicKey, ()); - /// } - /// - /// let mut pk_map = HashMap::new(); - /// pk_map.insert(String::from("alice_key"), bitcoin::PublicKey::from_str(alice_key).unwrap()); - /// pk_map.insert(String::from("bob_key"), bitcoin::PublicKey::from_str(bob_key).unwrap()); - /// let mut t = StrPkTranslator { pk_map: pk_map }; - /// - /// let real_policy = placeholder_policy.translate_pk(&mut t).unwrap(); - /// - /// let expected_policy = Policy::from_str(&format!("and(pk({}),pk({}))", alice_key, bob_key)).unwrap(); - /// assert_eq!(real_policy, expected_policy); - /// ``` + /// For example usage please see [`crate::policy::semantic::Policy::translate_pk`]. pub fn translate_pk(&self, t: &mut T) -> Result, E> where T: Translator, From 1bd3070b0e3fca2e87a096fb5913a2db06ee7b17 Mon Sep 17 00:00:00 2001 From: "Tobin C. Harding" Date: Thu, 13 Jul 2023 14:59:34 +1000 Subject: [PATCH 11/41] policy: Put some love into the rustdocs Put some love and care into the docs in the `policy` module: - Use standard tense on functions - Use full sentences - Fix grammar/spelling/typos - Tidy up rustdoc examples - Use standard section headers (eg, `# Returns`) - Fix links - Use backticks I doubt this is perfect because fixing (and reviewing) docs is a bit boring but I'm confident it is an improvement :) There was a couple of functions that I could not re-write the docs for at the time I did this work but I've misplaced them for now. --- src/policy/compiler.rs | 4 +- src/policy/concrete.rs | 133 +++++++++++++++++++++-------------------- src/policy/mod.rs | 48 ++++++++------- src/policy/semantic.rs | 129 ++++++++++++++++++++------------------- 4 files changed, 163 insertions(+), 151 deletions(-) diff --git a/src/policy/compiler.rs b/src/policy/compiler.rs index 9b29ee6ba..3d90a9f24 100644 --- a/src/policy/compiler.rs +++ b/src/policy/compiler.rs @@ -24,7 +24,7 @@ use crate::{policy, Miniscript, MiniscriptKey, Terminal}; type PolicyCache = BTreeMap<(Concrete, OrdF64, Option), BTreeMap>>; -///Ordered f64 for comparison +/// Ordered f64 for comparison. #[derive(Copy, Clone, PartialEq, PartialOrd, Debug)] pub(crate) struct OrdF64(pub f64); @@ -36,7 +36,7 @@ impl Ord for OrdF64 { } } -/// Detailed Error type for Compiler +/// Detailed error type for compiler. #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum CompilerError { /// Compiler has non-safe input policy. diff --git a/src/policy/concrete.rs b/src/policy/concrete.rs index 4ab053490..9cb184738 100644 --- a/src/policy/concrete.rs +++ b/src/policy/concrete.rs @@ -35,35 +35,35 @@ use crate::{errstr, AbsLockTime, Error, ForEachKey, MiniscriptKey, Translator}; #[cfg(feature = "compiler")] const MAX_COMPILATION_LEAVES: usize = 1024; -/// Concrete policy which corresponds directly to a Miniscript structure, +/// Concrete policy which corresponds directly to a miniscript structure, /// and whose disjunctions are annotated with satisfaction probabilities -/// to assist the compiler +/// to assist the compiler. #[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] pub enum Policy { - /// Unsatisfiable + /// Unsatisfiable. Unsatisfiable, - /// Trivially satisfiable + /// Trivially satisfiable. Trivial, - /// A public key which must sign to satisfy the descriptor + /// A public key which must sign to satisfy the descriptor. Key(Pk), - /// An absolute locktime restriction + /// An absolute locktime restriction. After(AbsLockTime), - /// A relative locktime restriction + /// A relative locktime restriction. Older(Sequence), - /// A SHA256 whose preimage must be provided to satisfy the descriptor + /// A SHA256 whose preimage must be provided to satisfy the descriptor. Sha256(Pk::Sha256), - /// A SHA256d whose preimage must be provided to satisfy the descriptor + /// A SHA256d whose preimage must be provided to satisfy the descriptor. Hash256(Pk::Hash256), - /// A RIPEMD160 whose preimage must be provided to satisfy the descriptor + /// A RIPEMD160 whose preimage must be provided to satisfy the descriptor. Ripemd160(Pk::Ripemd160), - /// A HASH160 whose preimage must be provided to satisfy the descriptor + /// A HASH160 whose preimage must be provided to satisfy the descriptor. Hash160(Pk::Hash160), - /// A list of sub-policies, all of which must be satisfied + /// A list of sub-policies, all of which must be satisfied. And(Vec>), /// A list of sub-policies, one of which must be satisfied, along with - /// relative probabilities for each one + /// relative probabilities for each one. Or(Vec<(usize, Policy)>), - /// A set of descriptors, satisfactions must be provided for `k` of them + /// A set of descriptors, satisfactions must be provided for `k` of them. Threshold(usize, Vec>), } @@ -183,29 +183,28 @@ impl From> for PolicyArc { } } -/// Detailed Error type for Policies +/// Detailed error type for concrete policies. #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum PolicyError { - /// `And` fragments only support two args + /// `And` fragments only support two args. NonBinaryArgAnd, - /// `Or` fragments only support two args + /// `Or` fragments only support two args. NonBinaryArgOr, - /// `Thresh` fragment can only have `1<=k<=n` + /// `Thresh` fragment can only have `1<=k<=n`. IncorrectThresh, - /// `older` or `after` fragment can only have `n = 0` + /// `older` or `after` fragment can only have `n = 0`. ZeroTime, - /// `after` fragment can only have ` n < 2^31` + /// `after` fragment can only have `n < 2^31`. TimeTooFar, - /// Semantic Policy Error: `And` `Or` fragments must take args: k > 1 + /// Semantic Policy Error: `And` `Or` fragments must take args: `k > 1`. InsufficientArgsforAnd, - /// Semantic Policy Error: `And` `Or` fragments must take args: k > 1 + /// Semantic policy error: `And` `Or` fragments must take args: `k > 1`. InsufficientArgsforOr, - /// Entailment max terminals exceeded + /// Entailment max terminals exceeded. EntailmentMaxTerminals, - /// lifting error: Cannot lift policies that have - /// a combination of height and timelocks. + /// Cannot lift policies that have a combination of height and timelocks. HeightTimelockCombination, - /// Duplicate Public Keys + /// Duplicate Public Keys. DuplicatePubKeys, } @@ -278,8 +277,8 @@ impl error::Error for PolicyError { } impl Policy { - /// Flatten the [`Policy`] tree structure into a Vector of tuple `(leaf script, leaf probability)` - /// with leaf probabilities corresponding to odds for sub-branch in the policy. + /// Flattens the [`Policy`] tree structure into a vector of tuples `(leaf script, leaf probability)` + /// with leaf probabilities corresponding to odds for each sub-branch in the policy. /// We calculate the probability of selecting the sub-branch at every level and calculate the /// leaf probabilities as the probability of traversing through required branches to reach the /// leaf node, i.e. multiplication of the respective probabilities. @@ -298,7 +297,7 @@ impl Policy { /// /// ## Constraints /// - /// Since this splitting might lead to exponential blow-up, we constraint the number of + /// Since this splitting might lead to exponential blow-up, we constrain the number of /// leaf-nodes to [`MAX_COMPILATION_LEAVES`]. #[cfg(feature = "compiler")] fn to_tapleaf_prob_vec(&self, prob: f64) -> Vec<(f64, Policy)> { @@ -323,7 +322,7 @@ impl Policy { } } - /// Extract the internal_key from policy tree. + /// Extracts the internal_key from this policy tree. #[cfg(feature = "compiler")] fn extract_key(self, unspendable_key: Option) -> Result<(Pk, Policy), Error> { let mut internal_key: Option = None; @@ -366,13 +365,14 @@ impl Policy { } } - /// Compile the [`Policy`] into a [`Descriptor::Tr`]. + /// Compiles the [`Policy`] into a [`Descriptor::Tr`]. /// /// ### TapTree compilation /// - /// The policy tree constructed by root-level disjunctions over [`Or`][`Policy::Or`] and - /// [`Thresh`][`Policy::Threshold`](1, ..) which is flattened into a vector (with respective + /// The policy tree constructed by root-level disjunctions over [`Policy::Or`] and + /// [`Policy::Threshold`](1, ..) which is flattened into a vector (with respective /// probabilities derived from odds) of policies. + /// /// For example, the policy `thresh(1,or(pk(A),pk(B)),and(or(pk(C),pk(D)),pk(E)))` gives the /// vector `[pk(A),pk(B),and(or(pk(C),pk(D)),pk(E)))]`. Each policy in the vector is compiled /// into the respective miniscripts. A Huffman Tree is created from this vector which optimizes @@ -424,7 +424,7 @@ impl Policy { /// ### TapTree compilation /// /// The policy tree constructed by root-level disjunctions over [`Policy::Or`] and - /// [`Policy::Threshold`] (k, ..n..) which is flattened into a vector (with respective + /// [`Policy::Threshold`](k, ..n..) which is flattened into a vector (with respective /// probabilities derived from odds) of policies. For example, the policy /// `thresh(1,or(pk(A),pk(B)),and(or(pk(C),pk(D)),pk(E)))` gives the vector /// `[pk(A),pk(B),and(or(pk(C),pk(D)),pk(E)))]`. @@ -437,8 +437,6 @@ impl Policy { /// enumeration or limits exceed. For a given [`Policy`], we maintain an [ordered /// set](`BTreeSet`) of `(prob, policy)` (ordered by probability) to maintain the list of /// enumerated sub-policies whose disjunction is isomorphic to initial policy (*invariant*). - /// - /// [`Policy`]: crate::policy::concrete::Policy #[cfg(feature = "compiler")] pub fn compile_tr_private_experimental( &self, @@ -480,16 +478,16 @@ impl Policy { } } - /// Compile the [`Policy`] into desc_ctx [`Descriptor`] + /// Compiles the [`Policy`] into `desc_ctx` [`Descriptor`] /// - /// In case of [Tr][`DescriptorCtx::Tr`], `internal_key` is used for the Taproot comilation when + /// In case of [`DescriptorCtx::Tr`], `internal_key` is used for the taproot compilation when /// no public key can be inferred from the given policy. /// /// # NOTE: /// - /// It is **not recommended** to use policy as a stable identifier for a miniscript. - /// You should use the policy compiler once, and then use the miniscript output as a stable identifier. - /// See the compiler document in doc/compiler.md for more details. + /// It is **not recommended** to use policy as a stable identifier for a miniscript. You should + /// use the policy compiler once, and then use the miniscript output as a stable identifier. See + /// the compiler document in [`doc/compiler.md`] for more details. #[cfg(feature = "compiler")] pub fn compile_to_descriptor( &self, @@ -511,13 +509,13 @@ impl Policy { } } - /// Compile the descriptor into an optimized `Miniscript` representation + /// Compiles the descriptor into an optimized `Miniscript` representation. /// /// # NOTE: /// - /// It is **not recommended** to use policy as a stable identifier for a miniscript. - /// You should use the policy compiler once, and then use the miniscript output as a stable identifier. - /// See the compiler document in doc/compiler.md for more details. + /// It is **not recommended** to use policy as a stable identifier for a miniscript. You should + /// use the policy compiler once, and then use the miniscript output as a stable identifier. See + /// the compiler document in doc/compiler.md for more details. #[cfg(feature = "compiler")] pub fn compile(&self) -> Result, CompilerError> { self.is_valid()?; @@ -531,10 +529,11 @@ impl Policy { #[cfg(feature = "compiler")] impl PolicyArc { - /// Given a [`Policy`], return a vector of policies whose disjunction is isomorphic to the initial one. - /// This function is supposed to incrementally expand i.e. represent the policy as disjunction over - /// sub-policies output by it. The probability calculations are similar as - /// [to_tapleaf_prob_vec][`Policy::to_tapleaf_prob_vec`] + /// Returns a vector of policies whose disjunction is isomorphic to the initial one. + /// + /// This function is supposed to incrementally expand i.e. represent the policy as + /// disjunction over sub-policies output by it. The probability calculations are similar + /// to [`Policy::to_tapleaf_prob_vec`]. #[cfg(feature = "compiler")] fn enumerate_pol(&self, prob: f64) -> Vec<(f64, Arc)> { match self { @@ -563,8 +562,6 @@ impl PolicyArc { /// enumeration or limits exceed. For a given [`Policy`], we maintain an [ordered /// set](`BTreeSet`) of `(prob, policy)` (ordered by probability) to maintain the list of /// enumerated sub-policies whose disjunction is isomorphic to initial policy (*invariant*). - /// - /// [`Policy`]: crate::policy::concrete::Policy #[cfg(feature = "compiler")] fn enumerate_policy_tree(self, prob: f64) -> Vec<(f64, Arc)> { let mut tapleaf_prob_vec = BTreeSet::<(Reverse, Arc)>::new(); @@ -689,7 +686,7 @@ impl Policy { } } - /// Convert a policy using one kind of public key to another type of public key. + /// Converts a policy using one kind of public key to another type of public key. /// /// For example usage please see [`crate::policy::semantic::Policy::translate_pk`]. pub fn translate_pk(&self, t: &mut T) -> Result, E> @@ -733,7 +730,7 @@ impl Policy { } } - /// Translate `Concrete::Key(key)` to `Concrete::Unsatisfiable` when extracting TapKey + /// Translates `Concrete::Key(key)` to `Concrete::Unsatisfiable` when extracting `TapKey`. pub fn translate_unsatisfiable_pk(self, key: &Pk) -> Policy { match self { Policy::Key(ref k) if k.clone() == *key => Policy::Unsatisfiable, @@ -757,7 +754,7 @@ impl Policy { } } - /// Get all keys in the policy + /// Gets all keys in the policy. pub fn keys(&self) -> Vec<&Pk> { match *self { Policy::Key(ref pk) => vec![pk], @@ -774,8 +771,8 @@ impl Policy { } } - /// Get the number of [TapLeaf][`TapTree::Leaf`] considering exhaustive root-level [OR][`Policy::Or`] - /// and [Thresh][`Policy::Threshold`] disjunctions for the TapTree. + /// Gets the number of [TapLeaf](`TapTree::Leaf`)s considering exhaustive root-level [`Policy::Or`] + /// and [`Policy::Threshold`] disjunctions for the `TapTree`. #[cfg(feature = "compiler")] fn num_tap_leaves(&self) -> usize { match self { @@ -787,7 +784,7 @@ impl Policy { } } - /// Check on the number of TapLeaves + /// Does checks on the number of `TapLeaf`s. #[cfg(feature = "compiler")] fn check_num_tapleaves(&self) -> Result<(), Error> { if self.num_tap_leaves() > MAX_COMPILATION_LEAVES { @@ -796,7 +793,7 @@ impl Policy { Ok(()) } - /// Check whether the policy contains duplicate public keys + /// Checks whether the policy contains duplicate public keys. pub fn check_duplicate_keys(&self) -> Result<(), PolicyError> { let pks = self.keys(); let pks_len = pks.len(); @@ -811,8 +808,11 @@ impl Policy { /// Checks whether the given concrete policy contains a combination of /// timelocks and heightlocks. + /// + /// # Returns + /// /// Returns an error if there is at least one satisfaction that contains - /// a combination of hieghtlock and timelock. + /// a combination of heightlock and timelock. pub fn check_timelocks(&self) -> Result<(), PolicyError> { let timelocks = self.check_timelocks_helper(); if timelocks.contains_combination { @@ -922,11 +922,14 @@ impl Policy { _ => Ok(()), } } - /// This returns whether any possible compilation of the policy could be - /// compiled as non-malleable and safe. Note that this returns a tuple - /// (safe, non-malleable) to avoid because the non-malleability depends on - /// safety and we would like to cache results. + + /// Checks if any possible compilation of the policy could be compiled + /// as non-malleable and safe. + /// + /// # Returns /// + /// Returns a tuple `(safe, non-malleable)` to avoid the fact that + /// non-malleability depends on safety and we would like to cache results. pub fn is_safe_nonmalleable(&self) -> (bool, bool) { match *self { Policy::Unsatisfiable | Policy::Trivial => (true, true), @@ -1192,7 +1195,7 @@ impl_from_tree!( } ); -/// Create a Huffman Tree from compiled [Miniscript] nodes +/// Creates a Huffman Tree from compiled [`Miniscript`] nodes. #[cfg(feature = "compiler")] fn with_huffman_tree( ms: Vec<(OrdF64, Miniscript)>, @@ -1223,7 +1226,7 @@ fn with_huffman_tree( Ok(node) } -/// Enumerate a [Thresh][`Policy::Threshold`](k, ..n..) into `n` different thresh. +/// Enumerates a [`Policy::Threshold(k, ..n..)`] into `n` different thresh's. /// /// ## Strategy /// diff --git a/src/policy/mod.rs b/src/policy/mod.rs index 335da849b..857ae0207 100644 --- a/src/policy/mod.rs +++ b/src/policy/mod.rs @@ -25,35 +25,38 @@ use crate::descriptor::Descriptor; use crate::miniscript::{Miniscript, ScriptContext}; use crate::{Error, MiniscriptKey, Terminal}; -/// Policy entailment algorithm maximum number of terminals allowed +/// Policy entailment algorithm maximum number of terminals allowed. const ENTAILMENT_MAX_TERMINALS: usize = 20; + /// Trait describing script representations which can be lifted into /// an abstract policy, by discarding information. +/// /// After Lifting all policies are converted into `KeyHash(Pk::HasH)` to /// maintain the following invariant(modulo resource limits): /// `Lift(Concrete) == Concrete -> Miniscript -> Script -> Miniscript -> Semantic` -/// Lifting from [Miniscript], [Descriptor] can fail -/// if the miniscript contains a timelock combination or if it contains a -/// branch that exceeds resource limits. -/// Lifting from Concrete policies can fail if it contains a timelock -/// combination. It is possible that concrete policy has some branches that -/// exceed resource limits for any compilation, but cannot detect such -/// policies while lifting. Note that our compiler would not succeed for any -/// such policies. +/// +/// Lifting from [`Miniscript`] or [`Descriptor`] can fail if the miniscript +/// contains a timelock combination or if it contains a branch that exceeds +/// resource limits. +/// +/// Lifting from concrete policies can fail if the policy contains a timelock +/// combination. It is possible that a concrete policy has some branches that +/// exceed resource limits for any compilation but cannot detect such policies +/// while lifting. Note that our compiler would not succeed for any such +/// policies. pub trait Liftable { - /// Convert the object into an abstract policy + /// Converts this object into an abstract policy. fn lift(&self) -> Result, Error>; } -/// Detailed Error type for Policies +/// Error occurring during lifting. #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum LiftError { - /// Cannot lift policies that have - /// a combination of height and timelocks. + /// Cannot lift policies that have a combination of height and timelocks. HeightTimelockCombination, - /// Duplicate Public Keys + /// Duplicate public keys. BranchExceedResourceLimits, - /// Cannot lift raw descriptors + /// Cannot lift raw descriptors. RawDescriptorLift, } @@ -83,14 +86,13 @@ impl error::Error for LiftError { } impl Miniscript { - /// Lifting corresponds conversion of miniscript into Policy - /// [policy.semantic.Policy] for human readable or machine analysis. - /// However, naively lifting miniscripts can result in incorrect - /// interpretations that don't correspond underlying semantics when - /// we try to spend them on bitcoin network. - /// This can occur if the miniscript contains a - /// 1. Timelock combination - /// 2. Contains a spend that exceeds resource limits + /// Lifting corresponds to conversion of a miniscript into a [`Semantic`] + /// policy for human readable or machine analysis. However, naively lifting + /// miniscripts can result in incorrect interpretations that don't + /// correspond to the underlying semantics when we try to spend them on + /// bitcoin network. This can occur if the miniscript contains: + /// 1. A combination of timelocks + /// 2. A spend that exceeds resource limits pub fn lift_check(&self) -> Result<(), LiftError> { if !self.within_resource_limits() { Err(LiftError::BranchExceedResourceLimits) diff --git a/src/policy/semantic.rs b/src/policy/semantic.rs index b2c41fd73..38dbbfd70 100644 --- a/src/policy/semantic.rs +++ b/src/policy/semantic.rs @@ -3,8 +3,8 @@ //! Abstract Policies //! -//! We use the term "semantic" for abstract policies because "abstract" is -//! a reserved keyword in Rust. +//! We use the terms "semantic" and "abstract" interchangeably because +//! "abstract" is a reserved keyword in Rust. use core::str::FromStr; use core::{fmt, str}; @@ -16,33 +16,33 @@ use super::ENTAILMENT_MAX_TERMINALS; use crate::prelude::*; use crate::{errstr, expression, AbsLockTime, Error, ForEachKey, MiniscriptKey, Translator}; -/// Abstract policy which corresponds to the semantics of a Miniscript -/// and which allows complex forms of analysis, e.g. filtering and -/// normalization. +/// Abstract policy which corresponds to the semantics of a miniscript and +/// which allows complex forms of analysis, e.g. filtering and normalization. +/// /// Semantic policies store only hashes of keys to ensure that objects -/// representing the same policy are lifted to the same `Semantic`, +/// representing the same policy are lifted to the same abstract `Policy`, /// regardless of their choice of `pk` or `pk_h` nodes. #[derive(Clone, PartialEq, Eq, PartialOrd, Ord)] pub enum Policy { - /// Unsatisfiable + /// Unsatisfiable. Unsatisfiable, - /// Trivially satisfiable + /// Trivially satisfiable. Trivial, - /// Signature and public key matching a given hash is required + /// Signature and public key matching a given hash is required. Key(Pk), - /// An absolute locktime restriction + /// An absolute locktime restriction. After(AbsLockTime), - /// A relative locktime restriction + /// A relative locktime restriction. Older(Sequence), - /// A SHA256 whose preimage must be provided to satisfy the descriptor + /// A SHA256 whose preimage must be provided to satisfy the descriptor. Sha256(Pk::Sha256), - /// A SHA256d whose preimage must be provided to satisfy the descriptor + /// A SHA256d whose preimage must be provided to satisfy the descriptor. Hash256(Pk::Hash256), - /// A RIPEMD160 whose preimage must be provided to satisfy the descriptor + /// A RIPEMD160 whose preimage must be provided to satisfy the descriptor. Ripemd160(Pk::Ripemd160), - /// A HASH160 whose preimage must be provided to satisfy the descriptor + /// A HASH160 whose preimage must be provided to satisfy the descriptor. Hash160(Pk::Hash160), - /// A set of descriptors, satisfactions must be provided for `k` of them + /// A set of descriptors, satisfactions must be provided for `k` of them. Threshold(usize, Vec>), } @@ -50,14 +50,16 @@ impl Policy where Pk: MiniscriptKey, { - /// Construct a `Policy::After` from `n`. Helper function equivalent to - /// `Policy::After(absolute::LockTime::from_consensus(n))`. + /// Constructs a `Policy::After` from `n`. + /// + /// Helper function equivalent to `Policy::After(absolute::LockTime::from_consensus(n))`. pub fn after(n: u32) -> Policy { Policy::After(AbsLockTime::from(absolute::LockTime::from_consensus(n))) } - /// Construct a `Policy::Older` from `n`. Helper function equivalent to - /// `Policy::Older(Sequence::from_consensus(n))`. + /// Construct a `Policy::Older` from `n`. + /// + /// Helper function equivalent to `Policy::Older(Sequence::from_consensus(n))`. pub fn older(n: u32) -> Policy { Policy::Older(Sequence::from_consensus(n)) } @@ -86,42 +88,41 @@ impl Policy { } } - /// Convert a policy using one kind of public key to another - /// type of public key + /// Converts a policy using one kind of public key to another type of public key. /// - /// # Example + /// # Examples /// /// ``` - /// use miniscript::{bitcoin::{hashes::hash160, PublicKey}, policy::semantic::Policy, Translator}; - /// use miniscript::translate_hash_fail; - /// use std::str::FromStr; /// use std::collections::HashMap; + /// use std::str::FromStr; + /// use miniscript::bitcoin::{hashes::hash160, PublicKey}; + /// use miniscript::{translate_hash_fail, policy::semantic::Policy, Translator}; /// let alice_pk = "02c79ef3ede6d14f72a00d0e49b4becfb152197b64c0707425c4f231df29500ee7"; /// let bob_pk = "03d008a849fbf474bd17e9d2c1a827077a468150e58221582ec3410ab309f5afe4"; /// let placeholder_policy = Policy::::from_str("and(pk(alice_pk),pk(bob_pk))").unwrap(); /// - /// // Information to translator abstract String type keys to concrete bitcoin::PublicKey. - /// // In practice, wallets would map from String key names to BIP32 keys + /// // Information to translate abstract string type keys to concrete `bitcoin::PublicKey`s. + /// // In practice, wallets would map from string key names to BIP32 keys. /// struct StrPkTranslator { /// pk_map: HashMap /// } /// - /// // If we also wanted to provide mapping of other associated types(sha256, older etc), - /// // we would use the general Translator Trait. + /// // If we also wanted to provide mapping of other associated types (sha256, older etc), + /// // we would use the general [`Translator`] trait. /// impl Translator for StrPkTranslator { /// fn pk(&mut self, pk: &String) -> Result { /// self.pk_map.get(pk).copied().ok_or(()) // Dummy Err /// } /// - /// // Handy macro for failing if we encounter any other fragment. - /// // also see translate_hash_clone! for cloning instead of failing + /// // Handy macro for failing if we encounter any other fragment. + /// // See also [`translate_hash_clone!`] for cloning instead of failing. /// translate_hash_fail!(String, bitcoin::PublicKey, ()); /// } /// /// let mut pk_map = HashMap::new(); /// pk_map.insert(String::from("alice_pk"), bitcoin::PublicKey::from_str(alice_pk).unwrap()); /// pk_map.insert(String::from("bob_pk"), bitcoin::PublicKey::from_str(bob_pk).unwrap()); - /// let mut t = StrPkTranslator { pk_map: pk_map }; + /// let mut t = StrPkTranslator { pk_map }; /// /// let real_policy = placeholder_policy.translate_pk(&mut t).unwrap(); /// @@ -159,11 +160,12 @@ impl Policy { } } - /// This function computes whether the current policy entails the second one. + /// Computes whether the current policy entails the second one. + /// /// A |- B means every satisfaction of A is also a satisfaction of B. - /// This implementation will run slow for larger policies but should be sufficient for - /// most practical policies. - + /// + /// This implementation will run slowly for larger policies but should be + /// sufficient for most practical policies. // This algorithm has a naive implementation. It is possible to optimize this // by memoizing and maintaining a hashmap. pub fn entails(self, other: Policy) -> Result { @@ -211,11 +213,10 @@ impl Policy { } } - // Helper function that takes in witness and its availability, - // changing it to true or false and returning the resultant normalized - // policy. - // Witness is currently encoded as policy. Only accepts leaf fragment and - // a normalized policy + // Helper function that takes in witness and its availability, changing it + // to true or false and returning the resultant normalized policy. Witness + // is currently encoded as policy. Only accepts leaf fragment and a + // normalized policy pub(crate) fn satisfy_constraint(self, witness: &Policy, available: bool) -> Policy { debug_assert!(self.clone().normalized() == self); if let Policy::Threshold { .. } = *witness { @@ -405,7 +406,7 @@ impl_from_tree!( ); impl Policy { - /// Flatten out trees of `And`s and `Or`s; eliminate `Trivial` and + /// Flattens out trees of `And`s and `Or`s; eliminate `Trivial` and /// `Unsatisfiable`s. Does not reorder any branches; use `.sort`. pub fn normalized(self) -> Policy { match self { @@ -460,10 +461,11 @@ impl Policy { } } - /// Helper function to detect a true/trivial policy - /// This function only checks whether the policy is Policy::Trivial - /// For checking if the normalized form is trivial, the caller - /// is expected to normalize the policy first. + /// Detects a true/trivial policy. + /// + /// Only checks whether the policy is `Policy::Trivial`, to check if the + /// normalized form is trivial, the caller is expected to normalize the + /// policy first. pub fn is_trivial(&self) -> bool { match *self { Policy::Trivial => true, @@ -471,10 +473,11 @@ impl Policy { } } - /// Helper function to detect a false/unsatisfiable policy - /// This function only checks whether the policy is Policy::Unsatisfiable - /// For checking if the normalized form is unsatisfiable, the caller - /// is expected to normalize the policy first. + /// Detects a false/unsatisfiable policy. + /// + /// Only checks whether the policy is `Policy::Unsatisfiable`, to check if + /// the normalized form is unsatisfiable, the caller is expected to + /// normalize the policy first. pub fn is_unsatisfiable(&self) -> bool { match *self { Policy::Unsatisfiable => true, @@ -501,8 +504,8 @@ impl Policy { } } - /// Returns a list of all relative timelocks, not including 0, - /// which appear in the policy + /// Returns a list of all relative timelocks, not including 0, which appear + /// in the policy. pub fn relative_timelocks(&self) -> Vec { let mut ret = self.real_relative_timelocks(); ret.sort_unstable(); @@ -529,8 +532,8 @@ impl Policy { } } - /// Returns a list of all absolute timelocks, not including 0, - /// which appear in the policy + /// Returns a list of all absolute timelocks, not including 0, which appear + /// in the policy. pub fn absolute_timelocks(&self) -> Vec { let mut ret = self.real_absolute_timelocks(); ret.sort_unstable(); @@ -538,7 +541,7 @@ impl Policy { ret } - /// Filter a policy by eliminating relative timelock constraints + /// Filters a policy by eliminating relative timelock constraints /// that are not satisfied at the given `age`. pub fn at_age(mut self, age: Sequence) -> Policy { self = match self { @@ -560,7 +563,7 @@ impl Policy { self.normalized() } - /// Filter a policy by eliminating absolute timelock constraints + /// Filters a policy by eliminating absolute timelock constraints /// that are not satisfied at the given `n` (`n OP_CHECKLOCKTIMEVERIFY`). pub fn at_lock_time(mut self, n: absolute::LockTime) -> Policy { use absolute::LockTime::*; @@ -587,7 +590,7 @@ impl Policy { self.normalized() } - /// Count the number of public keys and keyhashes referenced in a policy. + /// Counts the number of public keys and keyhashes referenced in a policy. /// Duplicate keys will be double-counted. pub fn n_keys(&self) -> usize { match *self { @@ -603,8 +606,11 @@ impl Policy { } } - /// Count the minimum number of public keys for which signatures - /// could be used to satisfy the policy. + /// Counts the minimum number of public keys for which signatures could be + /// used to satisfy the policy. + /// + /// # Returns + /// /// Returns `None` if the policy is not satisfiable. pub fn minimum_n_keys(&self) -> Option { match *self { @@ -633,7 +639,8 @@ impl Policy { } impl Policy { - /// "Sort" a policy to bring it into a canonical form to allow comparisons. + /// "Sorts" a policy to bring it into a canonical form to allow comparisons. + /// /// Does **not** allow policies to be compared for functional equivalence; /// in general this appears to require Gröbner basis techniques that are not /// implemented. From 3c0ff73d14e0a570225cec3a543bb0191e38acda Mon Sep 17 00:00:00 2001 From: Andrew Poelstra Date: Fri, 14 Jul 2023 21:27:00 +0000 Subject: [PATCH 12/41] iter: add module, copied in large from rust-simplicity Introduces the new `iter` module which provides iteration abilities over various tree-like structures. This is largely copied from rust-simplicity, but is both simpler (because there is no sharing) and more complicated (because we need to handle n-ary nodes, not just binary ones). This currently uses Arc<[T]> in a number of places; we should eventually replace these with some sort of ArrayVec-type structure. --- src/iter/mod.rs | 93 ++++++++++++++ src/iter/tree.rs | 324 +++++++++++++++++++++++++++++++++++++++++++++++ src/lib.rs | 1 + 3 files changed, 418 insertions(+) create mode 100644 src/iter/mod.rs create mode 100644 src/iter/tree.rs diff --git a/src/iter/mod.rs b/src/iter/mod.rs new file mode 100644 index 000000000..6a8ea60ad --- /dev/null +++ b/src/iter/mod.rs @@ -0,0 +1,93 @@ +// Written in 2023 by Andrew Poelstra +// SPDX-License-Identifier: CC0-1.0 + +//! Abstract Tree Iteration +//! +//! This module provides functionality to treat Miniscript objects abstractly +//! as trees, iterating over them in various orders. The iterators in this +//! module can be used to avoid explicitly recursive algorithms. +//! + +mod tree; + +pub use tree::{ + PostOrderIter, PostOrderIterItem, PreOrderIter, PreOrderIterItem, Tree, TreeLike, + VerbosePreOrderIter, +}; + +use crate::sync::Arc; +use crate::{Miniscript, MiniscriptKey, ScriptContext, Terminal}; + +impl<'a, Pk: MiniscriptKey, Ctx: ScriptContext> TreeLike for &'a Miniscript { + fn as_node(&self) -> Tree { + match self.node { + Terminal::PkK(..) + | Terminal::PkH(..) + | Terminal::RawPkH(..) + | Terminal::After(..) + | Terminal::Older(..) + | Terminal::Sha256(..) + | Terminal::Hash256(..) + | Terminal::Ripemd160(..) + | Terminal::Hash160(..) + | Terminal::True + | Terminal::False + | Terminal::Multi(..) + | Terminal::MultiA(..) => Tree::Nullary, + Terminal::Alt(ref sub) + | Terminal::Swap(ref sub) + | Terminal::Check(ref sub) + | Terminal::DupIf(ref sub) + | Terminal::Verify(ref sub) + | Terminal::NonZero(ref sub) + | Terminal::ZeroNotEqual(ref sub) => Tree::Unary(sub), + Terminal::AndV(ref left, ref right) + | Terminal::AndB(ref left, ref right) + | Terminal::OrB(ref left, ref right) + | Terminal::OrD(ref left, ref right) + | Terminal::OrC(ref left, ref right) + | Terminal::OrI(ref left, ref right) => Tree::Binary(left, right), + Terminal::AndOr(ref a, ref b, ref c) => Tree::Nary(Arc::from([a.as_ref(), b, c])), + Terminal::Thresh(_, ref subs) => Tree::Nary(subs.iter().map(Arc::as_ref).collect()), + } + } +} + +impl TreeLike for Arc> { + fn as_node(&self) -> Tree { + match self.node { + Terminal::PkK(..) + | Terminal::PkH(..) + | Terminal::RawPkH(..) + | Terminal::After(..) + | Terminal::Older(..) + | Terminal::Sha256(..) + | Terminal::Hash256(..) + | Terminal::Ripemd160(..) + | Terminal::Hash160(..) + | Terminal::True + | Terminal::False + | Terminal::Multi(..) + | Terminal::MultiA(..) => Tree::Nullary, + Terminal::Alt(ref sub) + | Terminal::Swap(ref sub) + | Terminal::Check(ref sub) + | Terminal::DupIf(ref sub) + | Terminal::Verify(ref sub) + | Terminal::NonZero(ref sub) + | Terminal::ZeroNotEqual(ref sub) => Tree::Unary(Arc::clone(sub)), + Terminal::AndV(ref left, ref right) + | Terminal::AndB(ref left, ref right) + | Terminal::OrB(ref left, ref right) + | Terminal::OrD(ref left, ref right) + | Terminal::OrC(ref left, ref right) + | Terminal::OrI(ref left, ref right) => { + Tree::Binary(Arc::clone(left), Arc::clone(right)) + } + Terminal::AndOr(ref a, ref b, ref c) => { + Tree::Nary(Arc::from([Arc::clone(a), Arc::clone(b), Arc::clone(c)])) + } + Terminal::Thresh(_, ref subs) => Tree::Nary(subs.iter().map(Arc::clone).collect()), + } + } +} diff --git a/src/iter/tree.rs b/src/iter/tree.rs new file mode 100644 index 000000000..4ff0e1ee9 --- /dev/null +++ b/src/iter/tree.rs @@ -0,0 +1,324 @@ +// Written in 2023 by Andrew Poelstra +// SPDX-License-Identifier: CC0-1.0 + +//! Abstract Trees +//! +//! This module provides the [`TreeLike`] trait which represents a node in a +//! tree, and several iterators over trees whose nodes implement this trait. +//! + +use crate::prelude::*; +use crate::sync::Arc; + +/// Abstract node of a tree. +/// +/// Tracks the arity (out-degree) of a node, which is the only thing that +/// is needed for iteration purposes. +pub enum Tree { + /// Combinator with no children. + Nullary, + /// Combinator with one child. + Unary(T), + /// Combinator with two children. + Binary(T, T), + /// Combinator with more than two children. + Nary(Arc<[T]>), +} + +/// A trait for any structure which has the shape of a Miniscript tree. +/// +/// As a general rule, this should be implemented on references to nodes, +/// rather than nodes themselves, because it provides algorithms that +/// assume copying is cheap. +/// +/// To implement this trait, you only need to implement the [`TreeLike::as_node`] +/// method, which will usually be very mechanical. Everything else is provided. +/// However, to avoid allocations, it may make sense to also implement +/// [`TreeLike::n_children`] and [`TreeLike::nth_child`] because the default +/// implementations will allocate vectors for n-ary nodes. +pub trait TreeLike: Clone + Sized { + /// Interpret the node as an abstract node. + fn as_node(&self) -> Tree; + + /// Accessor for the number of children this node has. + fn n_children(&self) -> usize { + match self.as_node() { + Tree::Nullary => 0, + Tree::Unary(..) => 1, + Tree::Binary(..) => 2, + Tree::Nary(children) => children.len(), + } + } + + /// Accessor for the nth child of the node, if a child with that index exists. + fn nth_child(&self, n: usize) -> Option { + match (n, self.as_node()) { + (_, Tree::Nullary) => None, + (0, Tree::Unary(sub)) => Some(sub), + (_, Tree::Unary(..)) => None, + (0, Tree::Binary(sub, _)) => Some(sub), + (1, Tree::Binary(_, sub)) => Some(sub), + (_, Tree::Binary(..)) => None, + (n, Tree::Nary(children)) => children.get(n).cloned(), + } + } + + /// Obtains an iterator of all the nodes rooted at the node, in pre-order. + fn pre_order_iter(self) -> PreOrderIter { + PreOrderIter { stack: vec![self] } + } + + /// Obtains a verbose iterator of all the nodes rooted at the DAG, in pre-order. + /// + /// See the documentation of [`VerbosePreOrderIter`] for more information about what + /// this does. Essentially, if you find yourself using [`Self::pre_order_iter`] and + /// then adding a stack to manually track which items and their children have been + /// yielded, you may be better off using this iterator instead. + fn verbose_pre_order_iter(self) -> VerbosePreOrderIter { + VerbosePreOrderIter { + stack: vec![PreOrderIterItem::initial(self, None)], + index: 0, + } + } + + /// Obtains an iterator of all the nodes rooted at the DAG, in post order. + /// + /// Each node is only yielded once, at the leftmost position that it + /// appears in the DAG. + fn post_order_iter(self) -> PostOrderIter { + PostOrderIter { + index: 0, + stack: vec![IterStackItem::unprocessed(self, None)], + } + } +} + +/// Element stored internally on the stack of a [`PostOrderIter`]. +/// +/// This is **not** the type that is yielded by the [`PostOrderIter`]; +/// in fact, this type is not even exported. +#[derive(Clone, Debug)] +struct IterStackItem { + /// The element on the stack + elem: T, + /// Whether we have dealt with this item (and pushed its children, + /// if any) yet. + processed: bool, + /// If the item has been processed, the index of its children. + child_indices: Vec, + /// Whether the element is a left- or right-child of its parent. + parent_stack_idx: Option, +} + +impl IterStackItem { + /// Constructor for a new stack item with a given element and relationship + /// to its parent. + fn unprocessed(elem: T, parent_stack_idx: Option) -> Self { + IterStackItem { + processed: false, + child_indices: Vec::with_capacity(elem.n_children()), + parent_stack_idx, + elem, + } + } +} + +/// Iterates over a DAG in _post order_. +/// +/// That means nodes are yielded in the order (left child, right child, parent). +#[derive(Clone, Debug)] +pub struct PostOrderIter { + /// The index of the next item to be yielded + index: usize, + /// A stack of elements to be yielded; each element is a node, then its left + /// and right children (if they exist and if they have been yielded already) + stack: Vec>, +} + +/// A set of data yielded by a `PostOrderIter`. +pub struct PostOrderIterItem { + /// The actual node data + pub node: T, + /// The index of this node (equivalent to if you'd called `.enumerate()` on + /// the iterator) + pub index: usize, + /// The indices of this node's children. + pub child_indices: Vec, +} + +impl Iterator for PostOrderIter { + type Item = PostOrderIterItem; + + fn next(&mut self) -> Option { + let mut current = self.stack.pop()?; + + if !current.processed { + current.processed = true; + + // When we first encounter an item, it is completely unknown; it is + // nominally the next item to be yielded, but it might have children, + // and if so, they come first + let current_stack_idx = self.stack.len(); + let n_children = current.elem.n_children(); + self.stack.push(current); + for idx in (0..n_children).rev() { + self.stack.push(IterStackItem::unprocessed( + self.stack[current_stack_idx].elem.nth_child(idx).unwrap(), + Some(current_stack_idx), + )); + } + self.next() + } else { + // The second time we encounter an item, we have dealt with its children, + // updated the child indices for this item, and are now ready to yield it + // rather than putting it back in the stack. + // + // Before yielding though, we must the item's parent's child indices with + // this item's index. + if let Some(idx) = current.parent_stack_idx { + self.stack[idx].child_indices.push(self.index); + } + + self.index += 1; + Some(PostOrderIterItem { + node: current.elem, + index: self.index - 1, + child_indices: current.child_indices, + }) + } + } +} + +/// Iterates over a [`TreeLike`] in _pre order_. +/// +/// Unlike the post-order iterator, this one does not keep track of indices +/// (this would be impractical since when we yield a node we have not yet +/// yielded its children, so we cannot know their indices). If you do need +/// the indices for some reason, the best strategy may be to run the +/// post-order iterator, collect into a vector, then iterate through that +/// backward. +#[derive(Clone, Debug)] +pub struct PreOrderIter { + /// A stack of elements to be yielded. As items are yielded, their right + /// children are put onto the stack followed by their left, so that the + /// appropriate one will be yielded on the next iteration. + stack: Vec, +} + +impl Iterator for PreOrderIter { + type Item = T; + + fn next(&mut self) -> Option { + // This algorithm is _significantly_ simpler than the post-order one, + // mainly because we don't care about child indices. + let top = self.stack.pop()?; + match top.as_node() { + Tree::Nullary => {} + Tree::Unary(next) => self.stack.push(next), + Tree::Binary(left, right) => { + self.stack.push(right); + self.stack.push(left); + } + Tree::Nary(children) => { + self.stack.extend(children.into_iter().rev().cloned()); + } + } + Some(top) + } +} + +/// Iterates over a [`TreeLike`] in "verbose pre order", yielding extra state changes. +/// +/// This yields nodes followed by their children, followed by the node *again* +/// after each child. This means that each node will be yielded a total of +/// (n+1) times, where n is its number of children. +/// +/// The different times that a node is yielded can be distinguished by looking +/// at the [`PreOrderIterItem::n_children_yielded`] (which, in particular, +/// will be 0 on the first yield) and [`PreOrderIterItem::is_complete`] (which +/// will be true on the last yield) fields of the yielded item. +#[derive(Clone, Debug)] +pub struct VerbosePreOrderIter { + /// A stack of elements to be yielded. As items are yielded, their right + /// children are put onto the stack followed by their left, so that the + /// appropriate one will be yielded on the next iteration. + stack: Vec>, + /// The index of the next item to be yielded. + /// + /// Note that unlike the [`PostOrderIter`], this value is not monotonic + /// and not equivalent to just using `enumerate` on the iterator, because + /// elements may be yielded multiple times. + index: usize, +} + +impl Iterator for VerbosePreOrderIter { + type Item = PreOrderIterItem; + + fn next(&mut self) -> Option { + // This algorithm is still simpler than the post-order one, because while + // we care about node indices, we don't care about their childrens' indices. + let mut top = self.stack.pop()?; + + // If this is the first time we're be yielding this element, set its index. + if top.n_children_yielded == 0 { + top.index = self.index; + self.index += 1; + } + // Push the next child. + let n_children = top.node.n_children(); + if top.n_children_yielded < n_children { + self.stack.push(top.clone().increment(n_children)); + let child = top.node.nth_child(top.n_children_yielded).unwrap(); + self.stack + .push(PreOrderIterItem::initial(child, Some(top.node.clone()))); + } + + // Then yield the element. + Some(top) + } +} + +/// A set of data yielded by a [`VerbosePreOrderIter`]. +#[derive(Clone, Debug)] +pub struct PreOrderIterItem { + /// The actual element being yielded. + pub node: T, + /// The parent of this node. `None` for the initial node, but will be + /// populated for all other nodes. + pub parent: Option, + /// The index when the element was first yielded. + pub index: usize, + /// How many of this item's children have been yielded. + /// + /// This can also be interpreted as a count of how many times this + /// item has been yielded before. + pub n_children_yielded: usize, + /// Whether this item is done (will not be yielded again). + pub is_complete: bool, +} + +impl PreOrderIterItem { + /// Creates a `PreOrderIterItem` which yields a given element for the first time. + /// + /// Marks the index as 0. The index must be manually set before yielding. + fn initial(node: T, parent: Option) -> Self { + PreOrderIterItem { + is_complete: node.n_children() == 0, + node, + parent, + index: 0, + n_children_yielded: 0, + } + } + + /// Creates a `PreOrderIterItem` which yields a given element again. + fn increment(self, n_children: usize) -> Self { + PreOrderIterItem { + node: self.node, + index: self.index, + parent: self.parent, + n_children_yielded: self.n_children_yielded + 1, + is_complete: self.n_children_yielded + 1 == n_children, + } + } +} diff --git a/src/lib.rs b/src/lib.rs index 69a551018..084f4ac9a 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -122,6 +122,7 @@ pub use pub_macros::*; pub mod descriptor; pub mod expression; pub mod interpreter; +pub mod iter; pub mod miniscript; pub mod policy; pub mod psbt; From 2930938783f9953e1c1c9b3f14ab7131b92e1787 Mon Sep 17 00:00:00 2001 From: Andrew Poelstra Date: Fri, 14 Jul 2023 22:09:40 +0000 Subject: [PATCH 13/41] miniscript: eliminate recursion in for_each_key This is a breaking change because we no longer implement `ForEachKey` on `Terminal`. But IMHO we should never have implemented that trait (or any trait, really..) directly on `Terminal`. The fact that we did was just an implementation detail of how we used to do a lot of iteration. --- src/miniscript/astelem.rs | 48 ++------------------------------------- src/miniscript/mod.rs | 27 ++++++++++++++++++---- 2 files changed, 24 insertions(+), 51 deletions(-) diff --git a/src/miniscript/astelem.rs b/src/miniscript/astelem.rs index 62dddf633..7be6704b5 100644 --- a/src/miniscript/astelem.rs +++ b/src/miniscript/astelem.rs @@ -21,8 +21,8 @@ use crate::miniscript::ScriptContext; use crate::prelude::*; use crate::util::MsKeyBuilder; use crate::{ - errstr, expression, script_num_size, AbsLockTime, Error, ForEachKey, Miniscript, MiniscriptKey, - Terminal, ToPublicKey, TranslateErr, TranslatePk, Translator, + errstr, expression, script_num_size, AbsLockTime, Error, Miniscript, MiniscriptKey, Terminal, + ToPublicKey, TranslateErr, TranslatePk, Translator, }; impl Terminal { @@ -64,44 +64,6 @@ where } impl Terminal { - pub(super) fn real_for_each_key<'a, F: FnMut(&'a Pk) -> bool>(&'a self, pred: &mut F) -> bool { - match *self { - Terminal::PkK(ref p) => pred(p), - Terminal::PkH(ref p) => pred(p), - Terminal::RawPkH(..) - | Terminal::After(..) - | Terminal::Older(..) - | Terminal::Sha256(..) - | Terminal::Hash256(..) - | Terminal::Ripemd160(..) - | Terminal::Hash160(..) - | Terminal::True - | Terminal::False => true, - Terminal::Alt(ref sub) - | Terminal::Swap(ref sub) - | Terminal::Check(ref sub) - | Terminal::DupIf(ref sub) - | Terminal::Verify(ref sub) - | Terminal::NonZero(ref sub) - | Terminal::ZeroNotEqual(ref sub) => sub.real_for_each_key(pred), - Terminal::AndV(ref left, ref right) - | Terminal::AndB(ref left, ref right) - | Terminal::OrB(ref left, ref right) - | Terminal::OrD(ref left, ref right) - | Terminal::OrC(ref left, ref right) - | Terminal::OrI(ref left, ref right) => { - left.real_for_each_key(&mut *pred) && right.real_for_each_key(pred) - } - Terminal::AndOr(ref a, ref b, ref c) => { - a.real_for_each_key(&mut *pred) - && b.real_for_each_key(&mut *pred) - && c.real_for_each_key(pred) - } - Terminal::Thresh(_, ref subs) => subs.iter().all(|sub| sub.real_for_each_key(pred)), - Terminal::Multi(_, ref keys) | Terminal::MultiA(_, ref keys) => keys.iter().all(pred), - } - } - pub(super) fn real_translate_pk( &self, t: &mut T, @@ -250,12 +212,6 @@ impl Terminal { } } -impl ForEachKey for Terminal { - fn for_each_key<'a, F: FnMut(&'a Pk) -> bool>(&'a self, mut pred: F) -> bool { - self.real_for_each_key(&mut pred) - } -} - impl fmt::Debug for Terminal { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.write_str("[")?; diff --git a/src/miniscript/mod.rs b/src/miniscript/mod.rs index c31199d6b..afe829a0e 100644 --- a/src/miniscript/mod.rs +++ b/src/miniscript/mod.rs @@ -22,6 +22,7 @@ use bitcoin::taproot::{LeafVersion, TapLeafHash}; use self::analyzable::ExtParams; pub use self::context::{BareCtx, Legacy, Segwitv0, Tap}; +use crate::iter::TreeLike; use crate::prelude::*; use crate::TranslateErr; @@ -296,7 +297,27 @@ impl Miniscript { impl ForEachKey for Miniscript { fn for_each_key<'a, F: FnMut(&'a Pk) -> bool>(&'a self, mut pred: F) -> bool { - self.real_for_each_key(&mut pred) + for ms in self.pre_order_iter() { + match ms.node { + Terminal::PkK(ref p) => { + if !pred(p) { + return false; + } + } + Terminal::PkH(ref p) => { + if !pred(p) { + return false; + } + } + Terminal::Multi(_, ref keys) | Terminal::MultiA(_, ref keys) => { + if !keys.iter().all(&mut pred) { + return false; + } + } + _ => {} + } + } + true } } @@ -319,10 +340,6 @@ where } impl Miniscript { - fn real_for_each_key<'a, F: FnMut(&'a Pk) -> bool>(&'a self, pred: &mut F) -> bool { - self.node.real_for_each_key(pred) - } - pub(super) fn real_translate_pk( &self, t: &mut T, From 8c32a19b4813b7030bc6851a30a1d38ce4241bc3 Mon Sep 17 00:00:00 2001 From: Andrew Poelstra Date: Sat, 15 Jul 2023 14:14:30 +0000 Subject: [PATCH 14/41] miniscript: eliminate recursion from TranslatePk This is again a breaking change because we remove the trait impl from `Terminal`, but again I don't think the trait impl should've existed. This also exposed an "off-label" use of `real_translate_pk` to convert context types as well as pk types, which I apparently explicitly reviewed and ACKed when reviewing #426, but which in retrospect looks a little funny. Rename this function to translate_pk_ctx so it's clear that it's doing two jobs. --- src/interpreter/inner.rs | 4 +- src/miniscript/astelem.rs | 97 +-------------------------------------- src/miniscript/mod.rs | 56 ++++++++++++++++++++-- 3 files changed, 54 insertions(+), 103 deletions(-) diff --git a/src/interpreter/inner.rs b/src/interpreter/inner.rs index 538cacb7e..59de53324 100644 --- a/src/interpreter/inner.rs +++ b/src/interpreter/inner.rs @@ -380,7 +380,7 @@ impl ToNoChecks for Miniscript { translate_hash_clone!(bitcoin::PublicKey, BitcoinKey, ()); } - self.real_translate_pk(&mut TranslateFullPk) + self.translate_pk_ctx(&mut TranslateFullPk) .expect("Translation should succeed") } } @@ -397,7 +397,7 @@ impl ToNoChecks for Miniscript Terminal { @@ -46,102 +46,7 @@ impl Terminal { } } -impl TranslatePk for Terminal -where - Pk: MiniscriptKey, - Q: MiniscriptKey, - Ctx: ScriptContext, -{ - type Output = Terminal; - - /// Converts an AST element with one public key type to one of another public key type. - fn translate_pk(&self, translate: &mut T) -> Result> - where - T: Translator, - { - self.real_translate_pk(translate) - } -} - impl Terminal { - pub(super) fn real_translate_pk( - &self, - t: &mut T, - ) -> Result, TranslateErr> - where - Q: MiniscriptKey, - CtxQ: ScriptContext, - T: Translator, - { - let frag: Terminal = match *self { - Terminal::PkK(ref p) => Terminal::PkK(t.pk(p)?), - Terminal::PkH(ref p) => Terminal::PkH(t.pk(p)?), - Terminal::RawPkH(ref p) => Terminal::RawPkH(*p), - Terminal::After(n) => Terminal::After(n), - Terminal::Older(n) => Terminal::Older(n), - Terminal::Sha256(ref x) => Terminal::Sha256(t.sha256(x)?), - Terminal::Hash256(ref x) => Terminal::Hash256(t.hash256(x)?), - Terminal::Ripemd160(ref x) => Terminal::Ripemd160(t.ripemd160(x)?), - Terminal::Hash160(ref x) => Terminal::Hash160(t.hash160(x)?), - Terminal::True => Terminal::True, - Terminal::False => Terminal::False, - Terminal::Alt(ref sub) => Terminal::Alt(Arc::new(sub.real_translate_pk(t)?)), - Terminal::Swap(ref sub) => Terminal::Swap(Arc::new(sub.real_translate_pk(t)?)), - Terminal::Check(ref sub) => Terminal::Check(Arc::new(sub.real_translate_pk(t)?)), - Terminal::DupIf(ref sub) => Terminal::DupIf(Arc::new(sub.real_translate_pk(t)?)), - Terminal::Verify(ref sub) => Terminal::Verify(Arc::new(sub.real_translate_pk(t)?)), - Terminal::NonZero(ref sub) => Terminal::NonZero(Arc::new(sub.real_translate_pk(t)?)), - Terminal::ZeroNotEqual(ref sub) => { - Terminal::ZeroNotEqual(Arc::new(sub.real_translate_pk(t)?)) - } - Terminal::AndV(ref left, ref right) => Terminal::AndV( - Arc::new(left.real_translate_pk(t)?), - Arc::new(right.real_translate_pk(t)?), - ), - Terminal::AndB(ref left, ref right) => Terminal::AndB( - Arc::new(left.real_translate_pk(t)?), - Arc::new(right.real_translate_pk(t)?), - ), - Terminal::AndOr(ref a, ref b, ref c) => Terminal::AndOr( - Arc::new(a.real_translate_pk(t)?), - Arc::new(b.real_translate_pk(t)?), - Arc::new(c.real_translate_pk(t)?), - ), - Terminal::OrB(ref left, ref right) => Terminal::OrB( - Arc::new(left.real_translate_pk(t)?), - Arc::new(right.real_translate_pk(t)?), - ), - Terminal::OrD(ref left, ref right) => Terminal::OrD( - Arc::new(left.real_translate_pk(t)?), - Arc::new(right.real_translate_pk(t)?), - ), - Terminal::OrC(ref left, ref right) => Terminal::OrC( - Arc::new(left.real_translate_pk(t)?), - Arc::new(right.real_translate_pk(t)?), - ), - Terminal::OrI(ref left, ref right) => Terminal::OrI( - Arc::new(left.real_translate_pk(t)?), - Arc::new(right.real_translate_pk(t)?), - ), - Terminal::Thresh(k, ref subs) => { - let subs: Result>>, _> = subs - .iter() - .map(|s| s.real_translate_pk(t).map(Arc::new)) - .collect(); - Terminal::Thresh(k, subs?) - } - Terminal::Multi(k, ref keys) => { - let keys: Result, _> = keys.iter().map(|k| t.pk(k)).collect(); - Terminal::Multi(k, keys?) - } - Terminal::MultiA(k, ref keys) => { - let keys: Result, _> = keys.iter().map(|k| t.pk(k)).collect(); - Terminal::MultiA(k, keys?) - } - }; - Ok(frag) - } - /// Substitutes raw public keys hashes with the public keys as provided by map. pub fn substitute_raw_pkh(&self, pk_map: &BTreeMap) -> Terminal { match self { diff --git a/src/miniscript/mod.rs b/src/miniscript/mod.rs index afe829a0e..ade83542a 100644 --- a/src/miniscript/mod.rs +++ b/src/miniscript/mod.rs @@ -331,16 +331,16 @@ where /// Translates a struct from one generic to another where the translation /// for Pk is provided by [`Translator`] - fn translate_pk(&self, translate: &mut T) -> Result> + fn translate_pk(&self, t: &mut T) -> Result> where T: Translator, { - self.real_translate_pk(translate) + self.translate_pk_ctx(t) } } impl Miniscript { - pub(super) fn real_translate_pk( + pub(super) fn translate_pk_ctx( &self, t: &mut T, ) -> Result, TranslateErr> @@ -349,8 +349,54 @@ impl Miniscript { CtxQ: ScriptContext, T: Translator, { - let inner = self.node.real_translate_pk(t)?; - Miniscript::from_ast(inner).map_err(TranslateErr::OuterError) + let mut translated = vec![]; + for data in Arc::new(self.clone()).post_order_iter() { + // convenience method to reduce typing + let child_n = |n| Arc::clone(&translated[data.child_indices[n]]); + + let new_term = match data.node.node { + Terminal::PkK(ref p) => Terminal::PkK(t.pk(p)?), + Terminal::PkH(ref p) => Terminal::PkH(t.pk(p)?), + Terminal::RawPkH(ref p) => Terminal::RawPkH(*p), + Terminal::After(n) => Terminal::After(n), + Terminal::Older(n) => Terminal::Older(n), + Terminal::Sha256(ref x) => Terminal::Sha256(t.sha256(x)?), + Terminal::Hash256(ref x) => Terminal::Hash256(t.hash256(x)?), + Terminal::Ripemd160(ref x) => Terminal::Ripemd160(t.ripemd160(x)?), + Terminal::Hash160(ref x) => Terminal::Hash160(t.hash160(x)?), + Terminal::True => Terminal::True, + Terminal::False => Terminal::False, + Terminal::Alt(..) => Terminal::Alt(child_n(0)), + Terminal::Swap(..) => Terminal::Swap(child_n(0)), + Terminal::Check(..) => Terminal::Check(child_n(0)), + Terminal::DupIf(..) => Terminal::DupIf(child_n(0)), + Terminal::Verify(..) => Terminal::Verify(child_n(0)), + Terminal::NonZero(..) => Terminal::NonZero(child_n(0)), + Terminal::ZeroNotEqual(..) => Terminal::ZeroNotEqual(child_n(0)), + Terminal::AndV(..) => Terminal::AndV(child_n(0), child_n(1)), + Terminal::AndB(..) => Terminal::AndB(child_n(0), child_n(1)), + Terminal::AndOr(..) => Terminal::AndOr(child_n(0), child_n(1), child_n(2)), + Terminal::OrB(..) => Terminal::OrB(child_n(0), child_n(1)), + Terminal::OrD(..) => Terminal::OrD(child_n(0), child_n(1)), + Terminal::OrC(..) => Terminal::OrC(child_n(0), child_n(1)), + Terminal::OrI(..) => Terminal::OrI(child_n(0), child_n(1)), + Terminal::Thresh(k, ref subs) => { + Terminal::Thresh(k, (0..subs.len()).map(child_n).collect()) + } + Terminal::Multi(k, ref keys) => { + let keys: Result, _> = keys.iter().map(|k| t.pk(k)).collect(); + Terminal::Multi(k, keys?) + } + Terminal::MultiA(k, ref keys) => { + let keys: Result, _> = keys.iter().map(|k| t.pk(k)).collect(); + Terminal::MultiA(k, keys?) + } + }; + let new_ms = Miniscript::from_ast(new_term).map_err(TranslateErr::OuterError)?; + translated.push(Arc::new(new_ms)); + } + + Ok(Arc::try_unwrap(translated.pop().unwrap()).unwrap()) } /// Substitutes raw public keys hashes with the public keys as provided by map. From d858ffc30643261189d805391e5a17a26cc53f80 Mon Sep 17 00:00:00 2001 From: Andrew Poelstra Date: Sat, 15 Jul 2023 14:14:57 +0000 Subject: [PATCH 15/41] miniscript: remove recursion from script_size As usual, we remove a method of the same name from `Terminal`, which is a breaking change. --- src/miniscript/astelem.rs | 63 +-------------------------------------- src/miniscript/mod.rs | 52 ++++++++++++++++++++++++++++++-- 2 files changed, 51 insertions(+), 64 deletions(-) diff --git a/src/miniscript/astelem.rs b/src/miniscript/astelem.rs index 2b1a35595..7c228ae8f 100644 --- a/src/miniscript/astelem.rs +++ b/src/miniscript/astelem.rs @@ -21,8 +21,7 @@ use crate::miniscript::ScriptContext; use crate::prelude::*; use crate::util::MsKeyBuilder; use crate::{ - errstr, expression, script_num_size, AbsLockTime, Error, Miniscript, MiniscriptKey, Terminal, - ToPublicKey, + errstr, expression, AbsLockTime, Error, Miniscript, MiniscriptKey, Terminal, ToPublicKey, }; impl Terminal { @@ -665,64 +664,4 @@ impl Terminal { } } } - - /// Size, in bytes of the script-pubkey. If this Miniscript is used outside - /// of segwit (e.g. in a bare or P2SH descriptor), this quantity should be - /// multiplied by 4 to compute the weight. - /// - /// In general, it is not recommended to use this function directly, but - /// to instead call the corresponding function on a `Descriptor`, which - /// will handle the segwit/non-segwit technicalities for you. - pub fn script_size(&self) -> usize { - match *self { - Terminal::PkK(ref pk) => Ctx::pk_len(pk), - Terminal::PkH(..) | Terminal::RawPkH(..) => 24, - Terminal::After(n) => script_num_size(n.to_consensus_u32() as usize) + 1, - Terminal::Older(n) => script_num_size(n.to_consensus_u32() as usize) + 1, - Terminal::Sha256(..) => 33 + 6, - Terminal::Hash256(..) => 33 + 6, - Terminal::Ripemd160(..) => 21 + 6, - Terminal::Hash160(..) => 21 + 6, - Terminal::True => 1, - Terminal::False => 1, - Terminal::Alt(ref sub) => sub.node.script_size() + 2, - Terminal::Swap(ref sub) => sub.node.script_size() + 1, - Terminal::Check(ref sub) => sub.node.script_size() + 1, - Terminal::DupIf(ref sub) => sub.node.script_size() + 3, - Terminal::Verify(ref sub) => { - sub.node.script_size() + usize::from(!sub.ext.has_free_verify) - } - Terminal::NonZero(ref sub) => sub.node.script_size() + 4, - Terminal::ZeroNotEqual(ref sub) => sub.node.script_size() + 1, - Terminal::AndV(ref l, ref r) => l.node.script_size() + r.node.script_size(), - Terminal::AndB(ref l, ref r) => l.node.script_size() + r.node.script_size() + 1, - Terminal::AndOr(ref a, ref b, ref c) => { - a.node.script_size() + b.node.script_size() + c.node.script_size() + 3 - } - Terminal::OrB(ref l, ref r) => l.node.script_size() + r.node.script_size() + 1, - Terminal::OrD(ref l, ref r) => l.node.script_size() + r.node.script_size() + 3, - Terminal::OrC(ref l, ref r) => l.node.script_size() + r.node.script_size() + 2, - Terminal::OrI(ref l, ref r) => l.node.script_size() + r.node.script_size() + 3, - Terminal::Thresh(k, ref subs) => { - assert!(!subs.is_empty(), "threshold must be nonempty"); - script_num_size(k) // k - + 1 // EQUAL - + subs.iter().map(|s| s.node.script_size()).sum::() - + subs.len() // ADD - - 1 // no ADD on first element - } - Terminal::Multi(k, ref pks) => { - script_num_size(k) - + 1 - + script_num_size(pks.len()) - + pks.iter().map(|pk| Ctx::pk_len(pk)).sum::() - } - Terminal::MultiA(k, ref pks) => { - script_num_size(k) - + 1 // NUMEQUAL - + pks.iter().map(|pk| Ctx::pk_len(pk)).sum::() // n keys - + pks.len() // n times CHECKSIGADD - } - } - } } diff --git a/src/miniscript/mod.rs b/src/miniscript/mod.rs index ade83542a..8868aa635 100644 --- a/src/miniscript/mod.rs +++ b/src/miniscript/mod.rs @@ -24,7 +24,7 @@ use self::analyzable::ExtParams; pub use self::context::{BareCtx, Legacy, Segwitv0, Tap}; use crate::iter::TreeLike; use crate::prelude::*; -use crate::TranslateErr; +use crate::{script_num_size, TranslateErr}; pub mod analyzable; pub mod astelem; @@ -259,7 +259,55 @@ where /// to instead call the corresponding function on a `Descriptor`, which /// will handle the segwit/non-segwit technicalities for you. pub fn script_size(&self) -> usize { - self.node.script_size() + let mut len = 0; + for ms in self.pre_order_iter() { + len += match ms.node { + Terminal::PkK(ref pk) => Ctx::pk_len(pk), + Terminal::PkH(..) | Terminal::RawPkH(..) => 24, + Terminal::After(n) => script_num_size(n.to_consensus_u32() as usize) + 1, + Terminal::Older(n) => script_num_size(n.to_consensus_u32() as usize) + 1, + Terminal::Sha256(..) => 33 + 6, + Terminal::Hash256(..) => 33 + 6, + Terminal::Ripemd160(..) => 21 + 6, + Terminal::Hash160(..) => 21 + 6, + Terminal::True => 1, + Terminal::False => 1, + Terminal::Alt(..) => 2, + Terminal::Swap(..) => 1, + Terminal::Check(..) => 1, + Terminal::DupIf(..) => 3, + Terminal::Verify(ref sub) => usize::from(!sub.ext.has_free_verify), + Terminal::NonZero(..) => 4, + Terminal::ZeroNotEqual(..) => 1, + Terminal::AndV(..) => 0, + Terminal::AndB(..) => 1, + Terminal::AndOr(..) => 3, + Terminal::OrB(..) => 1, + Terminal::OrD(..) => 3, + Terminal::OrC(..) => 2, + Terminal::OrI(..) => 3, + Terminal::Thresh(k, ref subs) => { + assert!(!subs.is_empty(), "threshold must be nonempty"); + script_num_size(k) // k + + 1 // EQUAL + + subs.len() // ADD + - 1 // no ADD on first element + } + Terminal::Multi(k, ref pks) => { + script_num_size(k) + + 1 + + script_num_size(pks.len()) + + pks.iter().map(|pk| Ctx::pk_len(pk)).sum::() + } + Terminal::MultiA(k, ref pks) => { + script_num_size(k) + + 1 // NUMEQUAL + + pks.iter().map(|pk| Ctx::pk_len(pk)).sum::() // n keys + + pks.len() // n times CHECKSIGADD + } + } + } + len } } From 67e91b9115031c7f773b67314370685e0c5d2f03 Mon Sep 17 00:00:00 2001 From: Andrew Poelstra Date: Fri, 14 Jul 2023 23:27:35 +0000 Subject: [PATCH 16/41] miniscript: eliminate recursion in substitute_raw_pkh As always, dropping the same method from Terminal --- src/miniscript/astelem.rs | 71 --------------------------------------- src/miniscript/mod.rs | 17 +++++++++- 2 files changed, 16 insertions(+), 72 deletions(-) diff --git a/src/miniscript/astelem.rs b/src/miniscript/astelem.rs index 7c228ae8f..5c85b53d5 100644 --- a/src/miniscript/astelem.rs +++ b/src/miniscript/astelem.rs @@ -45,77 +45,6 @@ impl Terminal { } } -impl Terminal { - /// Substitutes raw public keys hashes with the public keys as provided by map. - pub fn substitute_raw_pkh(&self, pk_map: &BTreeMap) -> Terminal { - match self { - Terminal::RawPkH(ref p) => match pk_map.get(p) { - Some(pk) => Terminal::PkH(pk.clone()).into(), - None => Terminal::RawPkH(*p).into(), - }, - Terminal::PkK(..) - | Terminal::PkH(..) - | Terminal::Multi(..) - | Terminal::MultiA(..) - | Terminal::After(..) - | Terminal::Older(..) - | Terminal::Sha256(..) - | Terminal::Hash256(..) - | Terminal::Ripemd160(..) - | Terminal::Hash160(..) - | Terminal::True - | Terminal::False => self.clone().into(), - Terminal::Alt(ref sub) => Terminal::Alt(Arc::new(sub.substitute_raw_pkh(pk_map))), - Terminal::Swap(ref sub) => Terminal::Swap(Arc::new(sub.substitute_raw_pkh(pk_map))), - Terminal::Check(ref sub) => Terminal::Check(Arc::new(sub.substitute_raw_pkh(pk_map))), - Terminal::DupIf(ref sub) => Terminal::DupIf(Arc::new(sub.substitute_raw_pkh(pk_map))), - Terminal::Verify(ref sub) => Terminal::Verify(Arc::new(sub.substitute_raw_pkh(pk_map))), - Terminal::NonZero(ref sub) => { - Terminal::NonZero(Arc::new(sub.substitute_raw_pkh(pk_map))) - } - Terminal::ZeroNotEqual(ref sub) => { - Terminal::ZeroNotEqual(Arc::new(sub.substitute_raw_pkh(pk_map))) - } - Terminal::AndV(ref left, ref right) => Terminal::AndV( - Arc::new(left.substitute_raw_pkh(pk_map)), - Arc::new(right.substitute_raw_pkh(pk_map)), - ), - Terminal::AndB(ref left, ref right) => Terminal::AndB( - Arc::new(left.substitute_raw_pkh(pk_map)), - Arc::new(right.substitute_raw_pkh(pk_map)), - ), - Terminal::AndOr(ref a, ref b, ref c) => Terminal::AndOr( - Arc::new(a.substitute_raw_pkh(pk_map)), - Arc::new(b.substitute_raw_pkh(pk_map)), - Arc::new(c.substitute_raw_pkh(pk_map)), - ), - Terminal::OrB(ref left, ref right) => Terminal::OrB( - Arc::new(left.substitute_raw_pkh(pk_map)), - Arc::new(right.substitute_raw_pkh(pk_map)), - ), - Terminal::OrD(ref left, ref right) => Terminal::OrD( - Arc::new(left.substitute_raw_pkh(pk_map)), - Arc::new(right.substitute_raw_pkh(pk_map)), - ), - Terminal::OrC(ref left, ref right) => Terminal::OrC( - Arc::new(left.substitute_raw_pkh(pk_map)), - Arc::new(right.substitute_raw_pkh(pk_map)), - ), - Terminal::OrI(ref left, ref right) => Terminal::OrI( - Arc::new(left.substitute_raw_pkh(pk_map)), - Arc::new(right.substitute_raw_pkh(pk_map)), - ), - Terminal::Thresh(k, ref subs) => { - let subs: Vec>> = subs - .iter() - .map(|s| Arc::new(s.substitute_raw_pkh(pk_map))) - .collect(); - Terminal::Thresh(*k, subs) - } - } - } -} - impl fmt::Debug for Terminal { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.write_str("[")?; diff --git a/src/miniscript/mod.rs b/src/miniscript/mod.rs index 8868aa635..db0b2baa6 100644 --- a/src/miniscript/mod.rs +++ b/src/miniscript/mod.rs @@ -449,7 +449,22 @@ impl Miniscript { /// Substitutes raw public keys hashes with the public keys as provided by map. pub fn substitute_raw_pkh(&self, pk_map: &BTreeMap) -> Miniscript { - Miniscript::from_ast(self.node.substitute_raw_pkh(pk_map)).expect("type check failed") + let mut translated = vec![]; + for data in Arc::new(self.clone()).post_order_iter() { + let new_term = if let Terminal::RawPkH(ref p) = data.node.node { + match pk_map.get(p) { + Some(pk) => Terminal::PkH(pk.clone()).into(), + None => Terminal::RawPkH(*p).into(), + } + } else { + data.node.node.clone() + }; + + let new_ms = Miniscript::from_ast(new_term).expect("typeck"); + translated.push(Arc::new(new_ms)); + } + + Arc::try_unwrap(translated.pop().unwrap()).unwrap() } } From c50bdf8681b0bf188bbc7f1524e7251ac8e07525 Mon Sep 17 00:00:00 2001 From: Steven Roose Date: Sun, 16 Jul 2023 19:50:42 +0100 Subject: [PATCH 17/41] psbt: Rewrite input replacement to avoid forgetting fields --- src/psbt/finalizer.rs | 25 +++++-------------------- 1 file changed, 5 insertions(+), 20 deletions(-) diff --git a/src/psbt/finalizer.rs b/src/psbt/finalizer.rs index 28902cc36..c42966f42 100644 --- a/src/psbt/finalizer.rs +++ b/src/psbt/finalizer.rs @@ -8,6 +8,8 @@ //! `https://github.com/bitcoin/bips/blob/master/bip-0174.mediawiki` //! +use core::mem; + use bitcoin::hashes::hash160; use bitcoin::key::XOnlyPublicKey; use bitcoin::secp256k1::{self, Secp256k1}; @@ -415,8 +417,10 @@ pub(super) fn finalize_input( // Now mutate the psbt input. Note that we cannot error after this point. // If the input is mutated, it means that the finalization succeeded. { + let original = mem::replace(&mut psbt.inputs[index], Default::default()); let input = &mut psbt.inputs[index]; - //Fill in the satisfactions + input.non_witness_utxo = original.non_witness_utxo; + input.witness_utxo = original.witness_utxo; input.final_script_sig = if script_sig.is_empty() { None } else { @@ -427,25 +431,6 @@ pub(super) fn finalize_input( } else { Some(witness) }; - //reset everything - input.partial_sigs.clear(); // 0x02 - input.sighash_type = None; // 0x03 - input.redeem_script = None; // 0x04 - input.witness_script = None; // 0x05 - input.bip32_derivation.clear(); // 0x05 - // finalized witness 0x06 and 0x07 are not clear - // 0x09 Proof of reserves not yet supported - input.ripemd160_preimages.clear(); // 0x0a - input.sha256_preimages.clear(); // 0x0b - input.hash160_preimages.clear(); // 0x0c - input.hash256_preimages.clear(); // 0x0d - // psbt v2 fields till 0x012 not supported - input.tap_key_sig = None; // 0x013 - input.tap_script_sigs.clear(); // 0x014 - input.tap_scripts.clear(); // 0x015 - input.tap_key_origins.clear(); // 0x16 - input.tap_internal_key = None; // x017 - input.tap_merkle_root = None; // 0x018 } Ok(()) From b238366492a320ce58a01b5549cd2459abd9465f Mon Sep 17 00:00:00 2001 From: sanket1729 Date: Tue, 18 Jul 2023 00:03:43 -0700 Subject: [PATCH 18/41] Update MSRV to 1.48 --- .github/workflows/fuzz.yml | 2 +- .github/workflows/rust.yml | 3 +-- README.md | 5 ++--- bitcoind-tests/Cargo.toml | 2 +- clippy.toml | 2 +- contrib/test.sh | 16 ++++++---------- fuzz/Cargo.toml | 2 +- 7 files changed, 13 insertions(+), 19 deletions(-) diff --git a/.github/workflows/fuzz.yml b/.github/workflows/fuzz.yml index e3225591b..bd00b62f8 100644 --- a/.github/workflows/fuzz.yml +++ b/.github/workflows/fuzz.yml @@ -39,7 +39,7 @@ compile_descriptor, key: cache-${{ matrix.target }}-${{ hashFiles('**/Cargo.toml','**/Cargo.lock') }} - uses: actions-rs/toolchain@v1 with: - toolchain: 1.58 + toolchain: 1.64 override: true profile: minimal - name: fuzz diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index d035a0d6d..bdc2b76d0 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -54,8 +54,7 @@ jobs: - rust: stable - rust: beta - rust: nightly - - rust: 1.41.1 - - rust: 1.47 + - rust: 1.48 steps: - name: Checkout Crate uses: actions/checkout@v2 diff --git a/README.md b/README.md index 3dcc102a9..2c3d02d31 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ ![Build](https://github.com/rust-bitcoin/rust-miniscript/workflows/Continuous%20integration/badge.svg) -**Minimum Supported Rust Version:** 1.41.1 +**Minimum Supported Rust Version:** 1.48.0 # Miniscript @@ -40,8 +40,7 @@ The cargo feature `std` is enabled by default. At least one of the features `std Enabling the `no-std` feature does not disable `std`. To disable the `std` feature you must disable default features. The `no-std` feature only enables additional features required for this crate to be usable without `std`. Both can be enabled without conflict. ## Minimum Supported Rust Version (MSRV) -This library should always compile with any combination of features (minus -`no-std`) on **Rust 1.41.1** or **Rust 1.47** with `no-std`. +This library should always compile with any combination of features on **Rust 1.48.0**. Some dependencies do not play nicely with our MSRV, if you are running the tests you may need to pin some dependencies. See `./contrib/test.sh` for current pinning. diff --git a/bitcoind-tests/Cargo.toml b/bitcoind-tests/Cargo.toml index 57eb1c530..ebed07c62 100644 --- a/bitcoind-tests/Cargo.toml +++ b/bitcoind-tests/Cargo.toml @@ -9,7 +9,7 @@ publish = false [dependencies] miniscript = {path = "../"} -bitcoind = { version = "0.30.0" } +bitcoind = { version = "0.32.0" } actual-rand = { package = "rand", version = "0.8.4"} secp256k1 = {version = "0.27.0", features = ["rand-std"]} internals = { package = "bitcoin-private", version = "0.1.0", default_features = false } \ No newline at end of file diff --git a/clippy.toml b/clippy.toml index 799264ef1..11d46a73f 100644 --- a/clippy.toml +++ b/clippy.toml @@ -1 +1 @@ -msrv = "1.41.1" +msrv = "1.48.0" diff --git a/contrib/test.sh b/contrib/test.sh index 4e7233d82..fd904e954 100755 --- a/contrib/test.sh +++ b/contrib/test.sh @@ -14,18 +14,14 @@ then cargo fmt -- --check fi -# Pin dependencies required to build with Rust 1.41.1 -if cargo --version | grep "1\.41\.0"; then +# Pin dependencies required to build with Rust 1.48.0 +if cargo --version | grep "1\.48\.0"; then cargo update -p once_cell --precise 1.13.1 + cargo update -p quote --precise 1.0.28 + cargo update -p proc-macro2 --precise 1.0.63 cargo update -p serde_json --precise 1.0.99 - cargo update -p serde --precise 1.0.156 -fi - -# Pin dependencies required to build with Rust 1.47.0 -if cargo --version | grep "1\.47\.0"; then - cargo update -p once_cell --precise 1.13.1 - cargo update -p serde_json --precise 1.0.99 - cargo update -p serde --precise 1.0.156 + cargo update -p serde --precise 1.0.152 + cargo update -p log --precise 0.4.18 fi # Test bitcoind integration tests if told to (this only works with the stable toolchain) diff --git a/fuzz/Cargo.toml b/fuzz/Cargo.toml index 542183526..5cf32fca4 100644 --- a/fuzz/Cargo.toml +++ b/fuzz/Cargo.toml @@ -12,7 +12,7 @@ cargo-fuzz = true honggfuzz = { version = "0.5.55", default-features = false } miniscript = { path = "..", features = [ "compiler" ] } -regex = "1.4" +regex = "1.0" [[bin]] name = "roundtrip_miniscript_str" From 68c7c49d9a5c53de346cd1c562bd0a1250c112a4 Mon Sep 17 00:00:00 2001 From: sanket1729 Date: Tue, 18 Jul 2023 00:20:14 -0700 Subject: [PATCH 19/41] clippy warnings --- src/descriptor/bare.rs | 2 +- src/descriptor/key.rs | 8 ++++---- src/descriptor/sh.rs | 4 ++-- src/descriptor/tr.rs | 5 ++--- src/interpreter/inner.rs | 2 +- src/iter/tree.rs | 2 +- src/miniscript/analyzable.rs | 5 +---- src/miniscript/astelem.rs | 2 +- src/miniscript/context.rs | 4 +--- src/miniscript/decode.rs | 17 ++++++++--------- src/miniscript/mod.rs | 4 ++-- src/policy/compiler.rs | 22 ++++++++++++++++------ src/policy/concrete.rs | 13 ++++--------- src/policy/semantic.rs | 10 ++-------- src/psbt/finalizer.rs | 4 ++-- src/psbt/mod.rs | 2 +- src/util.rs | 8 ++++---- 17 files changed, 53 insertions(+), 61 deletions(-) diff --git a/src/descriptor/bare.rs b/src/descriptor/bare.rs index 074a21b61..771dd8738 100644 --- a/src/descriptor/bare.rs +++ b/src/descriptor/bare.rs @@ -192,7 +192,7 @@ where where T: Translator, { - Ok(Bare::new(self.ms.translate_pk(t)?).map_err(TranslateErr::OuterError)?) + Bare::new(self.ms.translate_pk(t)?).map_err(TranslateErr::OuterError) } } diff --git a/src/descriptor/key.rs b/src/descriptor/key.rs index 7bad264d7..83308e0dc 100644 --- a/src/descriptor/key.rs +++ b/src/descriptor/key.rs @@ -999,13 +999,13 @@ impl MiniscriptKey for DescriptorPublicKey { } fn is_x_only_key(&self) -> bool { - match self { + matches!( + self, DescriptorPublicKey::Single(SinglePub { key: SinglePubKey::XOnly(ref _key), .. - }) => true, - _ => false, - } + }) + ) } fn num_der_paths(&self) -> usize { diff --git a/src/descriptor/sh.rs b/src/descriptor/sh.rs index a421058a7..95a3997d6 100644 --- a/src/descriptor/sh.rs +++ b/src/descriptor/sh.rs @@ -348,13 +348,13 @@ impl Sh { let witness_script = wsh.inner_script().to_v0_p2wsh(); let push_bytes = <&PushBytes>::try_from(witness_script.as_bytes()) .expect("Witness script is not too large"); - script::Builder::new().push_slice(&push_bytes).into_script() + script::Builder::new().push_slice(push_bytes).into_script() } ShInner::Wpkh(ref wpkh) => { let redeem_script = wpkh.script_pubkey(); let push_bytes: &PushBytes = <&PushBytes>::try_from(redeem_script.as_bytes()).expect("Script not too large"); - script::Builder::new().push_slice(&push_bytes).into_script() + script::Builder::new().push_slice(push_bytes).into_script() } ShInner::SortedMulti(..) | ShInner::Ms(..) => ScriptBuf::new(), } diff --git a/src/descriptor/tr.rs b/src/descriptor/tr.rs index 4e0b6a475..32cd776e5 100644 --- a/src/descriptor/tr.rs +++ b/src/descriptor/tr.rs @@ -350,7 +350,7 @@ impl Tr { let builder = bitcoin::blockdata::script::Builder::new(); builder .push_opcode(opcodes::all::OP_PUSHNUM_1) - .push_slice(&output_key.serialize()) + .push_slice(output_key.serialize()) .into_script() } @@ -405,8 +405,7 @@ where type Item = (u8, &'a Miniscript); fn next(&mut self) -> Option { - while !self.stack.is_empty() { - let (depth, last) = self.stack.pop().expect("Size checked above"); + while let Some((depth, last)) = self.stack.pop() { match *last { TapTree::Tree(ref l, ref r) => { self.stack.push((depth + 1, r)); diff --git a/src/interpreter/inner.rs b/src/interpreter/inner.rs index 59de53324..266670eaf 100644 --- a/src/interpreter/inner.rs +++ b/src/interpreter/inner.rs @@ -43,7 +43,7 @@ fn script_from_stack_elem( ) -> Result, Error> { match *elem { stack::Element::Push(sl) => { - Miniscript::parse_with_ext(&bitcoin::Script::from_bytes(sl), &ExtParams::allow_all()) + Miniscript::parse_with_ext(bitcoin::Script::from_bytes(sl), &ExtParams::allow_all()) .map_err(Error::from) } stack::Element::Satisfied => { diff --git a/src/iter/tree.rs b/src/iter/tree.rs index 4ff0e1ee9..10dbf5339 100644 --- a/src/iter/tree.rs +++ b/src/iter/tree.rs @@ -220,7 +220,7 @@ impl Iterator for PreOrderIter { self.stack.push(left); } Tree::Nary(children) => { - self.stack.extend(children.into_iter().rev().cloned()); + self.stack.extend(children.iter().rev().cloned()); } } Some(top) diff --git a/src/miniscript/analyzable.rs b/src/miniscript/analyzable.rs index 8b1929a65..a1795fa62 100644 --- a/src/miniscript/analyzable.rs +++ b/src/miniscript/analyzable.rs @@ -221,10 +221,7 @@ impl Miniscript { /// Whether the given miniscript contains a raw pkh fragment pub fn contains_raw_pkh(&self) -> bool { - self.iter().any(|ms| match ms.node { - Terminal::RawPkH(_) => true, - _ => false, - }) + self.iter().any(|ms| matches!(ms.node, Terminal::RawPkH(_))) } /// Check whether the underlying Miniscript is safe under the current context diff --git a/src/miniscript/astelem.rs b/src/miniscript/astelem.rs index 5c85b53d5..22fbcd537 100644 --- a/src/miniscript/astelem.rs +++ b/src/miniscript/astelem.rs @@ -465,7 +465,7 @@ impl Terminal { Terminal::RawPkH(ref hash) => builder .push_opcode(opcodes::all::OP_DUP) .push_opcode(opcodes::all::OP_HASH160) - .push_slice(&hash.to_byte_array()) + .push_slice(hash.to_byte_array()) .push_opcode(opcodes::all::OP_EQUALVERIFY), Terminal::After(t) => builder .push_int(absolute::LockTime::from(t).to_consensus_u32() as i64) diff --git a/src/miniscript/context.rs b/src/miniscript/context.rs index 3d820de96..f3563b83c 100644 --- a/src/miniscript/context.rs +++ b/src/miniscript/context.rs @@ -430,9 +430,7 @@ impl ScriptContext for Legacy { } Ok(()) } - Terminal::MultiA(..) => { - return Err(ScriptContextError::MultiANotAllowed); - } + Terminal::MultiA(..) => Err(ScriptContextError::MultiANotAllowed), _ => Ok(()), } } diff --git a/src/miniscript/decode.rs b/src/miniscript/decode.rs index dbf4adcfe..8c1b66364 100644 --- a/src/miniscript/decode.rs +++ b/src/miniscript/decode.rs @@ -651,13 +651,12 @@ pub fn parse( } fn is_and_v(tokens: &mut TokenIter) -> bool { - match tokens.peek() { - None - | Some(&Tk::If) - | Some(&Tk::NotIf) - | Some(&Tk::Else) - | Some(&Tk::ToAltStack) - | Some(&Tk::Swap) => false, - _ => true, - } + !matches!( + tokens.peek(), + None | Some(&Tk::If) + | Some(&Tk::NotIf) + | Some(&Tk::Else) + | Some(&Tk::ToAltStack) + | Some(&Tk::Swap) + ) } diff --git a/src/miniscript/mod.rs b/src/miniscript/mod.rs index db0b2baa6..320e80c4c 100644 --- a/src/miniscript/mod.rs +++ b/src/miniscript/mod.rs @@ -453,8 +453,8 @@ impl Miniscript { for data in Arc::new(self.clone()).post_order_iter() { let new_term = if let Terminal::RawPkH(ref p) = data.node.node { match pk_map.get(p) { - Some(pk) => Terminal::PkH(pk.clone()).into(), - None => Terminal::RawPkH(*p).into(), + Some(pk) => Terminal::PkH(pk.clone()), + None => Terminal::RawPkH(*p), } } else { data.node.node.clone() diff --git a/src/policy/compiler.rs b/src/policy/compiler.rs index 3d90a9f24..2a2be95fa 100644 --- a/src/policy/compiler.rs +++ b/src/policy/compiler.rs @@ -25,10 +25,17 @@ type PolicyCache = BTreeMap<(Concrete, OrdF64, Option), BTreeMap>>; /// Ordered f64 for comparison. -#[derive(Copy, Clone, PartialEq, PartialOrd, Debug)] +#[derive(Copy, Clone, PartialEq, Debug)] pub(crate) struct OrdF64(pub f64); impl Eq for OrdF64 {} +// We could derive PartialOrd, but we can't derive Ord, and clippy wants us +// to derive both or neither. Better to be explicit. +impl PartialOrd for OrdF64 { + fn partial_cmp(&self, other: &OrdF64) -> Option { + self.0.partial_cmp(&other.0) + } +} impl Ord for OrdF64 { fn cmp(&self, other: &OrdF64) -> cmp::Ordering { // will panic if given NaN @@ -525,6 +532,7 @@ impl AstElemExt { } /// Different types of casts possible for each node. +#[allow(clippy::type_complexity)] #[derive(Copy, Clone)] struct Cast { node: fn(Arc>) -> Terminal, @@ -675,7 +683,7 @@ fn insert_elem( // whose subtype is the current element and have worse cost. *map = mem::take(map) .into_iter() - .filter(|&(ref existing_key, ref existing_elem)| { + .filter(|(existing_key, existing_elem)| { let existing_elem_cost = existing_elem.cost_1d(sat_prob, dissat_prob); !(elem_key.is_subtype(*existing_key) && existing_elem_cost >= elem_cost) }) @@ -864,7 +872,7 @@ where let rw = subs[1].0 as f64 / total; //and-or - if let (&Concrete::And(ref x), _) = (&subs[0].1, &subs[1].1) { + if let (Concrete::And(x), _) = (&subs[0].1, &subs[1].1) { let mut a1 = best_compilations( policy_cache, &x[0], @@ -887,7 +895,7 @@ where compile_tern!(&mut a1, &mut b2, &mut c, [lw, rw]); compile_tern!(&mut b1, &mut a2, &mut c, [lw, rw]); }; - if let (_, &Concrete::And(ref x)) = (&subs[0].1, &subs[1].1) { + if let (_, Concrete::And(x)) = (&subs[0].1, &subs[1].1) { let mut a1 = best_compilations( policy_cache, &x[0], @@ -959,7 +967,7 @@ where let mut best_es = Vec::with_capacity(n); let mut best_ws = Vec::with_capacity(n); - let mut min_value = (0, f64::INFINITY as f64); + let mut min_value = (0, f64::INFINITY); for (i, ast) in subs.iter().enumerate() { let sp = sat_prob * k_over_n; //Expressions must be dissatisfiable @@ -1048,6 +1056,7 @@ where /// Helper function to compile different types of binary fragments. /// `sat_prob` and `dissat_prob` represent the sat and dissat probabilities of /// root or. `weights` represent the odds for taking each sub branch +#[allow(clippy::too_many_arguments)] fn compile_binary( policy_cache: &mut PolicyCache, policy: &Concrete, @@ -1082,6 +1091,7 @@ where /// Helper function to compile different order of and_or fragments. /// `sat_prob` and `dissat_prob` represent the sat and dissat probabilities of /// root and_or node. `weights` represent the odds for taking each sub branch +#[allow(clippy::too_many_arguments)] fn compile_tern( policy_cache: &mut PolicyCache, policy: &Concrete, @@ -1162,7 +1172,7 @@ where { best_compilations(policy_cache, policy, sat_prob, dissat_prob)? .into_iter() - .filter(|&(ref key, ref val)| { + .filter(|(key, val)| { key.ty.corr.base == basic_type && key.ty.corr.unit && val.ms.ty.mall.dissat == types::Dissat::Unique diff --git a/src/policy/concrete.rs b/src/policy/concrete.rs index 9cb184738..0be8da1df 100644 --- a/src/policy/concrete.rs +++ b/src/policy/concrete.rs @@ -305,17 +305,15 @@ impl Policy { Policy::Or(ref subs) => { let total_odds: usize = subs.iter().map(|(ref k, _)| k).sum(); subs.iter() - .map(|(k, ref policy)| { + .flat_map(|(k, ref policy)| { policy.to_tapleaf_prob_vec(prob * *k as f64 / total_odds as f64) }) - .flatten() .collect::>() } Policy::Threshold(k, ref subs) if *k == 1 => { let total_odds = subs.len(); subs.iter() - .map(|policy| policy.to_tapleaf_prob_vec(prob / total_odds as f64)) - .flatten() + .flat_map(|policy| policy.to_tapleaf_prob_vec(prob / total_odds as f64)) .collect::>() } x => vec![(prob, x.clone())], @@ -333,10 +331,7 @@ impl Policy { let key_prob_map: HashMap<_, _> = self .to_tapleaf_prob_vec(1.0) .into_iter() - .filter(|(_, ref pol)| match *pol { - Concrete::Key(..) => true, - _ => false, - }) + .filter(|(_, ref pol)| matches!(*pol, Concrete::Key(..))) .map(|(prob, key)| (key, prob)) .collect(); @@ -359,7 +354,7 @@ impl Policy { } } match (internal_key, unspendable_key) { - (Some(ref key), _) => Ok((key.clone(), self.translate_unsatisfiable_pk(&key))), + (Some(ref key), _) => Ok((key.clone(), self.translate_unsatisfiable_pk(key))), (_, Some(key)) => Ok((key, self)), _ => Err(errstr("No viable internal key found.")), } diff --git a/src/policy/semantic.rs b/src/policy/semantic.rs index 38dbbfd70..95a155a27 100644 --- a/src/policy/semantic.rs +++ b/src/policy/semantic.rs @@ -467,10 +467,7 @@ impl Policy { /// normalized form is trivial, the caller is expected to normalize the /// policy first. pub fn is_trivial(&self) -> bool { - match *self { - Policy::Trivial => true, - _ => false, - } + matches!(*self, Policy::Trivial) } /// Detects a false/unsatisfiable policy. @@ -479,10 +476,7 @@ impl Policy { /// the normalized form is unsatisfiable, the caller is expected to /// normalize the policy first. pub fn is_unsatisfiable(&self) -> bool { - match *self { - Policy::Unsatisfiable => true, - _ => false, - } + matches!(*self, Policy::Unsatisfiable) } /// Helper function to do the recursion in `timelocks`. diff --git a/src/psbt/finalizer.rs b/src/psbt/finalizer.rs index c42966f42..93cc1887d 100644 --- a/src/psbt/finalizer.rs +++ b/src/psbt/finalizer.rs @@ -165,7 +165,7 @@ fn get_descriptor(psbt: &Psbt, index: usize) -> Result, In *script_pubkey == addr.script_pubkey() }); match partial_sig_contains_pk { - Some((pk, _sig)) => Descriptor::new_pkh(*pk).map_err(|e| InputError::from(e)), + Some((pk, _sig)) => Descriptor::new_pkh(*pk).map_err(InputError::from), None => Err(InputError::MissingPubkey), } } else if script_pubkey.is_v0_p2wpkh() { @@ -316,7 +316,7 @@ fn interpreter_inp_check>( let cltv = psbt.unsigned_tx.lock_time; let csv = psbt.unsigned_tx.input[index].sequence; let interpreter = - interpreter::Interpreter::from_txdata(&spk, &script_sig, witness, csv, cltv.into()) + interpreter::Interpreter::from_txdata(&spk, script_sig, witness, csv, cltv) .map_err(|e| Error::InputError(InputError::Interpreter(e), index))?; let iter = interpreter.iter(secp, &psbt.unsigned_tx, index, utxos); if let Some(error) = iter.filter_map(Result::err).next() { diff --git a/src/psbt/mod.rs b/src/psbt/mod.rs index 668669fd5..d3e37eceb 100644 --- a/src/psbt/mod.rs +++ b/src/psbt/mod.rs @@ -341,7 +341,7 @@ impl<'psbt, Pk: MiniscriptKey + ToPublicKey> Satisfier for PsbtInputSatisfie return false; } - let lock_time = absolute::LockTime::from(self.psbt.unsigned_tx.lock_time); + let lock_time = self.psbt.unsigned_tx.lock_time; >::check_after(&lock_time, n) } diff --git a/src/util.rs b/src/util.rs index baf49066e..0564108a3 100644 --- a/src/util.rs +++ b/src/util.rs @@ -26,7 +26,7 @@ pub(crate) fn witness_to_scriptsig(witness: &[Vec]) -> ScriptBuf { } else { let push = <&PushBytes>::try_from(wit.as_slice()) .expect("All pushes in miniscript are <73 bytes"); - b = b.push_slice(&push) + b = b.push_slice(push) } } b.into_script() @@ -55,7 +55,7 @@ impl MsKeyBuilder for script::Builder { { match Ctx::sig_type() { context::SigType::Ecdsa => self.push_key(&key.to_public_key()), - context::SigType::Schnorr => self.push_slice(&key.to_x_only_pubkey().serialize()), + context::SigType::Schnorr => self.push_slice(key.to_x_only_pubkey().serialize()), } } @@ -65,9 +65,9 @@ impl MsKeyBuilder for script::Builder { Ctx: ScriptContext, { match Ctx::sig_type() { - context::SigType::Ecdsa => self.push_slice(&key.to_public_key().pubkey_hash()), + context::SigType::Ecdsa => self.push_slice(key.to_public_key().pubkey_hash()), context::SigType::Schnorr => { - self.push_slice(&PubkeyHash::hash(&key.to_x_only_pubkey().serialize())) + self.push_slice(PubkeyHash::hash(&key.to_x_only_pubkey().serialize())) } } } From a3966059fa38e64800f86306fb9312e67ba7f1dc Mon Sep 17 00:00:00 2001 From: sanket1729 Date: Tue, 18 Jul 2023 18:15:42 -0700 Subject: [PATCH 20/41] Fix taproot internal key parsing Caught by fuzz tests. We directly assumed that the internal key is a valid string if it was between '(' and ')' and did not contain any ',' --- fuzz/fuzz_targets/roundtrip_descriptor.rs | 24 +++++++++++++-- src/descriptor/tr.rs | 36 +++++++++++++---------- 2 files changed, 42 insertions(+), 18 deletions(-) diff --git a/fuzz/fuzz_targets/roundtrip_descriptor.rs b/fuzz/fuzz_targets/roundtrip_descriptor.rs index d74569e88..b9363f130 100644 --- a/fuzz/fuzz_targets/roundtrip_descriptor.rs +++ b/fuzz/fuzz_targets/roundtrip_descriptor.rs @@ -23,9 +23,27 @@ fn main() { #[cfg(test)] mod tests { - use super::*; + fn extend_vec_from_hex(hex: &str, out: &mut Vec) { + let mut b = 0; + for (idx, c) in hex.as_bytes().iter().enumerate() { + b <<= 4; + match *c { + b'A'..=b'F' => b |= c - b'A' + 10, + b'a'..=b'f' => b |= c - b'a' + 10, + b'0'..=b'9' => b |= c - b'0', + _ => panic!("Bad hex"), + } + if (idx & 1) == 1 { + out.push(b); + b = 0; + } + } + } + #[test] - fn test() { - do_test(b"pkh()"); + fn duplicate_crash3() { + let mut a = Vec::new(); + extend_vec_from_hex("747228726970656d616e645f6e5b5c79647228726970656d616e645f6e5b5c7964646464646464646464646464646464646464646464646464646b5f6872702c29", &mut a); + super::do_test(&a); } } diff --git a/src/descriptor/tr.rs b/src/descriptor/tr.rs index 32cd776e5..18e6da1f1 100644 --- a/src/descriptor/tr.rs +++ b/src/descriptor/tr.rs @@ -527,8 +527,14 @@ fn parse_tr_tree(s: &str) -> Result { if s.len() > 3 && &s[..3] == "tr(" && s.as_bytes()[s.len() - 1] == b')' { let rest = &s[3..s.len() - 1]; if !rest.contains(',') { + let key = expression::Tree::from_str(rest)?; + if !key.args.is_empty() { + return Err(Error::Unexpected( + "invalid taproot internal key".to_string(), + )); + } let internal_key = expression::Tree { - name: rest, + name: key.name, args: vec![], }; return Ok(expression::Tree { @@ -540,8 +546,14 @@ fn parse_tr_tree(s: &str) -> Result { let (key, script) = split_once(rest, ',') .ok_or_else(|| Error::BadDescriptor("invalid taproot descriptor".to_string()))?; + let key = expression::Tree::from_str(key)?; + if !key.args.is_empty() { + return Err(Error::Unexpected( + "invalid taproot internal key".to_string(), + )); + } let internal_key = expression::Tree { - name: key, + name: key.name, args: vec![], }; if script.is_empty() { @@ -568,19 +580,13 @@ fn split_once(inp: &str, delim: char) -> Option<(&str, &str)> { if inp.is_empty() { None } else { - let mut found = inp.len(); - for (idx, ch) in inp.chars().enumerate() { - if ch == delim { - found = idx; - break; - } - } - // No comma or trailing comma found - if found >= inp.len() - 1 { - Some((inp, "")) - } else { - Some((&inp[..found], &inp[found + 1..])) - } + // find the first character that matches delim + let res = inp + .chars() + .position(|ch| ch == delim) + .map(|idx| (&inp[..idx], &inp[idx + 1..])) + .unwrap_or((inp, "")); + Some(res) } } From 60fde9e9519125d3b1ecccea302a0a6e6ce484fa Mon Sep 17 00:00:00 2001 From: sanket1729 Date: Tue, 18 Jul 2023 23:54:26 -0700 Subject: [PATCH 21/41] Check that we only accept INPUT_CHARSET in from_str This results in fuzzer crash when trying compute the checksum in `Display` implemenation of Descriptor --- src/descriptor/checksum.rs | 2 +- src/descriptor/tr.rs | 6 +----- src/expression.rs | 26 +++++++++++++++++++------- src/policy/concrete.rs | 6 +----- src/policy/semantic.rs | 6 +----- 5 files changed, 23 insertions(+), 23 deletions(-) diff --git a/src/descriptor/checksum.rs b/src/descriptor/checksum.rs index 4613db3fd..23ff72aa2 100644 --- a/src/descriptor/checksum.rs +++ b/src/descriptor/checksum.rs @@ -8,10 +8,10 @@ use core::fmt; use core::iter::FromIterator; +pub use crate::expression::INPUT_CHARSET; use crate::prelude::*; use crate::Error; -const INPUT_CHARSET: &str = "0123456789()[],'/*abcdefgh@:$%{}IJKLMNOPQRSTUVWXYZ&+-.;<=>?!^_|~ijklmnopqrstuvwxyzABCDEFGH`#\"\\ "; const CHECKSUM_CHARSET: &[u8] = b"qpzry9x8gf2tvdw0s3jn54khce6mua7l"; fn poly_mod(mut c: u64, val: u64) -> u64 { diff --git a/src/descriptor/tr.rs b/src/descriptor/tr.rs index 18e6da1f1..af8aeedab 100644 --- a/src/descriptor/tr.rs +++ b/src/descriptor/tr.rs @@ -518,11 +518,7 @@ impl fmt::Display for Tr { // Helper function to parse string into miniscript tree form fn parse_tr_tree(s: &str) -> Result { - for ch in s.bytes() { - if !ch.is_ascii() { - return Err(Error::Unprintable(ch)); - } - } + expression::check_valid_chars(s)?; if s.len() > 3 && &s[..3] == "tr(" && s.as_bytes()[s.len() - 1] == b')' { let rest = &s[3..s.len() - 1]; diff --git a/src/expression.rs b/src/expression.rs index d3363abc4..22a3b53a2 100644 --- a/src/expression.rs +++ b/src/expression.rs @@ -8,6 +8,9 @@ use core::str::FromStr; use crate::prelude::*; use crate::{errstr, Error, MAX_RECURSION_DEPTH}; +/// Allowed characters are descriptor strings. +pub const INPUT_CHARSET: &str = "0123456789()[],'/*abcdefgh@:$%{}IJKLMNOPQRSTUVWXYZ&+-.;<=>?!^_|~ijklmnopqrstuvwxyzABCDEFGH`#\"\\ "; + #[derive(Debug)] /// A token of the form `x(...)` or `x` pub struct Tree<'a> { @@ -166,13 +169,7 @@ impl<'a> Tree<'a> { /// Parses a tree from a string #[allow(clippy::should_implement_trait)] // Cannot use std::str::FromStr because of lifetimes. pub fn from_str(s: &'a str) -> Result, Error> { - // Filter out non-ASCII because we byte-index strings all over the - // place and Rust gets very upset when you splinch a string. - for ch in s.bytes() { - if !ch.is_ascii() { - return Err(Error::Unprintable(ch)); - } - } + check_valid_chars(s)?; let (top, rem) = Tree::from_slice(s)?; if rem.is_empty() { @@ -183,6 +180,21 @@ impl<'a> Tree<'a> { } } +/// Filter out non-ASCII because we byte-index strings all over the +/// place and Rust gets very upset when you splinch a string. +pub fn check_valid_chars(s: &str) -> Result<(), Error> { + for ch in s.bytes() { + if !ch.is_ascii() { + return Err(Error::Unprintable(ch)); + } + // TODO: Avoid linear search overhead by using OnceCell to cache this in a BTreeMap. + INPUT_CHARSET + .find(char::from(ch)) + .ok_or_else(|| Error::Unprintable(ch))?; + } + Ok(()) +} + /// Parse a string as a u32, for timelocks or thresholds pub fn parse_num(s: &str) -> Result { if s.len() > 1 { diff --git a/src/policy/concrete.rs b/src/policy/concrete.rs index 0be8da1df..74c675397 100644 --- a/src/policy/concrete.rs +++ b/src/policy/concrete.rs @@ -1061,11 +1061,7 @@ impl_from_str!( Policy, type Err = Error;, fn from_str(s: &str) -> Result, Error> { - for ch in s.as_bytes() { - if *ch < 20 || *ch > 127 { - return Err(Error::Unprintable(*ch)); - } - } + expression::check_valid_chars(s)?; let tree = expression::Tree::from_str(s)?; let policy: Policy = FromTree::from_tree(&tree)?; diff --git a/src/policy/semantic.rs b/src/policy/semantic.rs index 95a155a27..10b08545d 100644 --- a/src/policy/semantic.rs +++ b/src/policy/semantic.rs @@ -315,11 +315,7 @@ impl_from_str!( Policy, type Err = Error;, fn from_str(s: &str) -> Result, Error> { - for ch in s.as_bytes() { - if *ch < 20 || *ch > 127 { - return Err(Error::Unprintable(*ch)); - } - } + expression::check_valid_chars(s)?; let tree = expression::Tree::from_str(s)?; expression::FromTree::from_tree(&tree) From 6b76997b14fd606b6ceba884012512c06200a4a4 Mon Sep 17 00:00:00 2001 From: sanket1729 Date: Wed, 19 Jul 2023 12:11:46 -0700 Subject: [PATCH 22/41] Use a static map to lookup which characters are allowed Also improves the efficiency of descriptor checksum code. --- src/descriptor/checksum.rs | 13 +++++++++---- src/expression.rs | 39 ++++++++++++++++++++++++++++++++++---- 2 files changed, 44 insertions(+), 8 deletions(-) diff --git a/src/descriptor/checksum.rs b/src/descriptor/checksum.rs index 23ff72aa2..10aaaacfc 100644 --- a/src/descriptor/checksum.rs +++ b/src/descriptor/checksum.rs @@ -8,7 +8,7 @@ use core::fmt; use core::iter::FromIterator; -pub use crate::expression::INPUT_CHARSET; +pub use crate::expression::VALID_CHARS; use crate::prelude::*; use crate::Error; @@ -101,9 +101,14 @@ impl Engine { /// state! It is safe to continue feeding it data but the result will not be meaningful. pub fn input(&mut self, s: &str) -> Result<(), Error> { for ch in s.chars() { - let pos = INPUT_CHARSET.find(ch).ok_or_else(|| { - Error::BadDescriptor(format!("Invalid character in checksum: '{}'", ch)) - })? as u64; + let pos = VALID_CHARS + .get(ch as usize) + .ok_or_else(|| { + Error::BadDescriptor(format!("Invalid character in checksum: '{}'", ch)) + })? + .ok_or_else(|| { + Error::BadDescriptor(format!("Invalid character in checksum: '{}'", ch)) + })? as u64; self.c = poly_mod(self.c, pos & 31); self.cls = self.cls * 3 + (pos >> 5); self.clscount += 1; diff --git a/src/expression.rs b/src/expression.rs index 22a3b53a2..580b8c5b9 100644 --- a/src/expression.rs +++ b/src/expression.rs @@ -11,6 +11,26 @@ use crate::{errstr, Error, MAX_RECURSION_DEPTH}; /// Allowed characters are descriptor strings. pub const INPUT_CHARSET: &str = "0123456789()[],'/*abcdefgh@:$%{}IJKLMNOPQRSTUVWXYZ&+-.;<=>?!^_|~ijklmnopqrstuvwxyzABCDEFGH`#\"\\ "; +/// Map of valid characters in descriptor strings. +#[rustfmt::skip] +pub const VALID_CHARS: [Option; 128] = [ + None, None, None, None, None, None, None, None, None, None, None, None, None, + None, None, None, None, None, None, None, None, None, None, None, None, None, + None, None, None, None, None, None, Some(94), Some(59), Some(92), Some(91), + Some(28), Some(29), Some(50), Some(15), Some(10), Some(11), Some(17), Some(51), + Some(14), Some(52), Some(53), Some(16), Some(0), Some(1), Some(2), Some(3), + Some(4), Some(5), Some(6), Some(7), Some(8), Some(9), Some(27), Some(54), + Some(55), Some(56), Some(57), Some(58), Some(26), Some(82), Some(83), + Some(84), Some(85), Some(86), Some(87), Some(88), Some(89), Some(32), Some(33), + Some(34), Some(35), Some(36), Some(37), Some(38), Some(39), Some(40), Some(41), + Some(42), Some(43), Some(44), Some(45), Some(46), Some(47), Some(48), Some(49), + Some(12), Some(93), Some(13), Some(60), Some(61), Some(90), Some(18), Some(19), + Some(20), Some(21), Some(22), Some(23), Some(24), Some(25), Some(64), Some(65), + Some(66), Some(67), Some(68), Some(69), Some(70), Some(71), Some(72), Some(73), + Some(74), Some(75), Some(76), Some(77), Some(78), Some(79), Some(80), Some(81), + Some(30), Some(62), Some(31), Some(63), None, +]; + #[derive(Debug)] /// A token of the form `x(...)` or `x` pub struct Tree<'a> { @@ -187,10 +207,12 @@ pub fn check_valid_chars(s: &str) -> Result<(), Error> { if !ch.is_ascii() { return Err(Error::Unprintable(ch)); } - // TODO: Avoid linear search overhead by using OnceCell to cache this in a BTreeMap. - INPUT_CHARSET - .find(char::from(ch)) - .ok_or_else(|| Error::Unprintable(ch))?; + // Index bounds: We know that ch is ASCII, so it is <= 127. + if VALID_CHARS[ch as usize].is_none() { + return Err(Error::Unexpected( + "Only characters in INPUT_CHARSET are allowed".to_string(), + )); + } } Ok(()) } @@ -265,4 +287,13 @@ mod tests { assert!(parse_num("+6").is_err()); assert!(parse_num("-6").is_err()); } + + #[test] + fn test_valid_char_map() { + let mut valid_chars = [None; 128]; + for (i, ch) in super::INPUT_CHARSET.chars().enumerate() { + valid_chars[ch as usize] = Some(i as u8); + } + assert_eq!(valid_chars, super::VALID_CHARS); + } } From 82b836e91465c41d9a1c48ed545dc71051a3a054 Mon Sep 17 00:00:00 2001 From: "Tobin C. Harding" Date: Tue, 25 Jul 2023 14:52:29 +1000 Subject: [PATCH 23/41] rustfmt: Change fn_args_layout to fn_params_layout Running `cargo +nightly fmt` emits a bunch of warnings of form: Warning: the `fn_args_layout` option is deprecated. Use `fn_params_layout`. instead As suggested change the `fn_args_layout` option to `fn_params_layout`. --- rustfmt.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rustfmt.toml b/rustfmt.toml index 37abad596..b2a63aa46 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -44,7 +44,7 @@ enum_discrim_align_threshold = 0 match_arm_blocks = true match_arm_leading_pipes = "Never" force_multiline_blocks = false -fn_args_layout = "Tall" +fn_params_layout = "Tall" brace_style = "SameLineWhere" control_brace_style = "AlwaysSameLine" trailing_semicolon = true From 1a33e67503e84f549bb739650118c96ead9f0a0f Mon Sep 17 00:00:00 2001 From: "Tobin C. Harding" Date: Thu, 27 Oct 2022 13:57:11 +1100 Subject: [PATCH 24/41] Remove unstable feature Currently it is not possible to run the test suite with `cargo test --all-features`, this is because we use a feature called `unstable` to enable the unstable `test` crate that is used for benchmarking. There is another way to conditionally enable the test crate and guard the benchmark code. We can use a custom configuration option `bench` and then set it using `RUSTFLAGS='--cfg=bench`. --- Cargo.toml | 1 - README.md | 5 +++++ contrib/test.sh | 18 ++++++++++++++++-- src/lib.rs | 6 ++++-- src/policy/compiler.rs | 2 +- src/policy/mod.rs | 2 +- 6 files changed, 27 insertions(+), 7 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 5c35fd1c8..b29043148 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -17,7 +17,6 @@ no-std = ["hashbrown", "bitcoin/no-std"] compiler = [] trace = [] -unstable = [] serde = ["actual-serde", "bitcoin/serde"] rand = ["bitcoin/rand"] base64 = ["bitcoin/base64"] diff --git a/README.md b/README.md index 2c3d02d31..21fe72b8d 100644 --- a/README.md +++ b/README.md @@ -53,6 +53,11 @@ architectural mismatches. If you have any questions or ideas you want to discuss please join us in [##miniscript](https://web.libera.chat/?channels=##miniscript) on Libera. +## Benchmarks + +We use a custom Rust compiler configuration conditional to guard the bench mark code. To run the +bench marks use: `RUSTFLAGS='--cfg=bench' cargo +nightly bench`. + ## Release Notes diff --git a/contrib/test.sh b/contrib/test.sh index fd904e954..80bd8ef4c 100755 --- a/contrib/test.sh +++ b/contrib/test.sh @@ -7,6 +7,12 @@ FEATURES="compiler serde rand base64" cargo --version rustc --version +# Cache the toolchain we are using. +NIGHTLY=false +if cargo --version | grep nightly; then + NIGHTLY=true +fi + # Format if told to if [ "$DO_FMT" = true ] then @@ -77,10 +83,18 @@ then done fi -# Bench if told to (this only works with the nightly toolchain) +# Bench if told to, only works with non-stable toolchain (nightly, beta). if [ "$DO_BENCH" = true ] then - cargo bench --features="unstable compiler" + if [ "$NIGHTLY" = false ]; then + if [ -n "$RUSTUP_TOOLCHAIN" ]; then + echo "RUSTUP_TOOLCHAIN is set to a non-nightly toolchain but DO_BENCH requires a nightly toolchain" + else + echo "DO_BENCH requires a nightly toolchain" + fi + exit 1 + fi + RUSTFLAGS='--cfg=bench' cargo bench fi # Build the docs if told to (this only works with the nightly toolchain) diff --git a/src/lib.rs b/src/lib.rs index 084f4ac9a..7a51f1405 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -74,7 +74,8 @@ //! #![cfg_attr(all(not(feature = "std"), not(test)), no_std)] -#![cfg_attr(all(test, feature = "unstable"), feature(test))] +// Experimental features we need. +#![cfg_attr(bench, feature(test))] // Coding conventions #![deny(unsafe_code)] #![deny(non_upper_case_globals)] @@ -107,7 +108,8 @@ extern crate core; #[cfg(feature = "serde")] pub use actual_serde as serde; -#[cfg(all(test, feature = "unstable"))] + +#[cfg(bench)] extern crate test; #[macro_use] diff --git a/src/policy/compiler.rs b/src/policy/compiler.rs index 2a2be95fa..9bba05589 100644 --- a/src/policy/compiler.rs +++ b/src/policy/compiler.rs @@ -1598,7 +1598,7 @@ mod tests { } } -#[cfg(all(test, feature = "unstable"))] +#[cfg(bench)] mod benches { use std::str::FromStr; diff --git a/src/policy/mod.rs b/src/policy/mod.rs index 857ae0207..292feff78 100644 --- a/src/policy/mod.rs +++ b/src/policy/mod.rs @@ -541,7 +541,7 @@ mod tests { } } -#[cfg(all(test, feature = "compiler", feature = "unstable"))] +#[cfg(all(bench, feature = "compiler"))] mod benches { use core::str::FromStr; From 1081b468be4042eda6b3bf5c63cbe8e03e90d847 Mon Sep 17 00:00:00 2001 From: "Tobin C. Harding" Date: Thu, 27 Jul 2023 13:25:50 +1000 Subject: [PATCH 25/41] Pin serde_test to 1.0.152 This is just a dev dependency so I did not think that hard about it, just took a stab at using the same version that we in for `serde` and it works. --- contrib/test.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/contrib/test.sh b/contrib/test.sh index 80bd8ef4c..727c605b0 100755 --- a/contrib/test.sh +++ b/contrib/test.sh @@ -28,6 +28,7 @@ if cargo --version | grep "1\.48\.0"; then cargo update -p serde_json --precise 1.0.99 cargo update -p serde --precise 1.0.152 cargo update -p log --precise 0.4.18 + cargo update -p serde_test --precise 1.0.152 fi # Test bitcoind integration tests if told to (this only works with the stable toolchain) From 211f0cb09f5f943e435b6c6691b8660da288422a Mon Sep 17 00:00:00 2001 From: "Tobin C. Harding" Date: Thu, 27 Jul 2023 14:47:08 +1000 Subject: [PATCH 26/41] Combine terminal variants We have various variants in this pattern that return the same thing, we can combine them to make the code more terse with no loss of clarity. --- src/miniscript/satisfy.rs | 22 +++++----------------- 1 file changed, 5 insertions(+), 17 deletions(-) diff --git a/src/miniscript/satisfy.rs b/src/miniscript/satisfy.rs index 037bdccb1..0b526ca77 100644 --- a/src/miniscript/satisfy.rs +++ b/src/miniscript/satisfy.rs @@ -1332,15 +1332,11 @@ impl Satisfaction { stack: Witness::empty(), has_sig: false, }, - Terminal::True => Satisfaction { - stack: Witness::Impossible, - has_sig: false, - }, - Terminal::Older(_) => Satisfaction { - stack: Witness::Impossible, - has_sig: false, - }, - Terminal::After(_) => Satisfaction { + Terminal::True + | Terminal::Older(_) + | Terminal::After(_) + | Terminal::Verify(_) + | Terminal::OrC(..) => Satisfaction { stack: Witness::Impossible, has_sig: false, }, @@ -1361,10 +1357,6 @@ impl Satisfaction { stack: Witness::push_0(), has_sig: false, }, - Terminal::Verify(_) => Satisfaction { - stack: Witness::Impossible, - has_sig: false, - }, Terminal::AndV(ref v, ref other) => { let vsat = Self::satisfy_helper(&v.node, stfr, root_has_sig, leaf_hash, min_fn, thresh_fn); @@ -1406,10 +1398,6 @@ impl Satisfaction { has_sig: rnsat.has_sig || lnsat.has_sig, } } - Terminal::OrC(..) => Satisfaction { - stack: Witness::Impossible, - has_sig: false, - }, Terminal::OrI(ref l, ref r) => { let lnsat = Self::dissatisfy_helper( &l.node, From 87a16ef9a4d8f01be53720fef2ca63003d774e4c Mon Sep 17 00:00:00 2001 From: "Tobin C. Harding" Date: Thu, 27 Jul 2023 13:41:49 +1000 Subject: [PATCH 27/41] Remove written in clause Recently we discussed in `rust-bitcoin` the pointlessness of the `\\ Written in ...` clause above the license terms. We came to the conclusion that: - It probably wasn't written then and who cares anyway because we patch files all the time - It almost certainly wasn't written by just this person because we have a bunch of contributors - Use of "the rust bitcoin devs" is just noise and no signal Remove the "written in" clause if it contains "the rust bitcoin devs" or Andrew's name (because he was part of the conversation in rust-bitcoin) Do no remove clauses that have Sanket's name or Maxim's. Its up to them if they want attribution so we leave it there for now. Leave Andrew's name in `lib.rs` as a little nod-of-the-head to him as the original author. I do not know the full relationship between Sanket and Andrew and who did what so please forgive me Sanket if this is wrong. --- src/descriptor/bare.rs | 1 - src/descriptor/mod.rs | 1 - src/descriptor/segwitv0.rs | 1 - src/descriptor/sh.rs | 1 - src/descriptor/sortedmulti.rs | 1 - src/expression.rs | 1 - src/iter/mod.rs | 1 - src/iter/tree.rs | 1 - src/miniscript/analyzable.rs | 1 - src/miniscript/astelem.rs | 1 - src/miniscript/decode.rs | 1 - src/miniscript/lex.rs | 1 - src/miniscript/mod.rs | 1 - src/miniscript/satisfy.rs | 1 - src/miniscript/types/correctness.rs | 1 - src/miniscript/types/malleability.rs | 1 - src/miniscript/types/mod.rs | 1 - src/policy/compiler.rs | 1 - src/policy/concrete.rs | 1 - src/policy/mod.rs | 1 - src/policy/semantic.rs | 1 - src/psbt/mod.rs | 1 - 22 files changed, 22 deletions(-) diff --git a/src/descriptor/bare.rs b/src/descriptor/bare.rs index 771dd8738..a8028fe60 100644 --- a/src/descriptor/bare.rs +++ b/src/descriptor/bare.rs @@ -1,4 +1,3 @@ -// Written in 2020 by the rust-miniscript developers // SPDX-License-Identifier: CC0-1.0 //! # Bare Output Descriptors diff --git a/src/descriptor/mod.rs b/src/descriptor/mod.rs index 99d89f381..7551e1ad4 100644 --- a/src/descriptor/mod.rs +++ b/src/descriptor/mod.rs @@ -1,4 +1,3 @@ -// Written in 2018 by Andrew Poelstra // SPDX-License-Identifier: CC0-1.0 //! # Output Descriptors diff --git a/src/descriptor/segwitv0.rs b/src/descriptor/segwitv0.rs index 6b50d5559..4d7c4e7a3 100644 --- a/src/descriptor/segwitv0.rs +++ b/src/descriptor/segwitv0.rs @@ -1,4 +1,3 @@ -// Written in 2020 by the rust-miniscript developers // SPDX-License-Identifier: CC0-1.0 //! # Segwit Output Descriptors diff --git a/src/descriptor/sh.rs b/src/descriptor/sh.rs index 95a3997d6..1f18c641a 100644 --- a/src/descriptor/sh.rs +++ b/src/descriptor/sh.rs @@ -1,4 +1,3 @@ -// Written in 2020 by the rust-miniscript developers // SPDX-License-Identifier: CC0-1.0 //! # P2SH Descriptors diff --git a/src/descriptor/sortedmulti.rs b/src/descriptor/sortedmulti.rs index 96a2e43d0..7e1242e83 100644 --- a/src/descriptor/sortedmulti.rs +++ b/src/descriptor/sortedmulti.rs @@ -1,4 +1,3 @@ -// Written in 2020 by the rust-miniscript developers // SPDX-License-Identifier: CC0-1.0 //! # Sorted Multi diff --git a/src/expression.rs b/src/expression.rs index 580b8c5b9..bcfebdae2 100644 --- a/src/expression.rs +++ b/src/expression.rs @@ -1,4 +1,3 @@ -// Written in 2019 by Andrew Poelstra // SPDX-License-Identifier: CC0-1.0 //! # Function-like Expression Language diff --git a/src/iter/mod.rs b/src/iter/mod.rs index 6a8ea60ad..5389b438f 100644 --- a/src/iter/mod.rs +++ b/src/iter/mod.rs @@ -1,4 +1,3 @@ -// Written in 2023 by Andrew Poelstra // SPDX-License-Identifier: CC0-1.0 //! Abstract Tree Iteration diff --git a/src/iter/tree.rs b/src/iter/tree.rs index 10dbf5339..e4884e991 100644 --- a/src/iter/tree.rs +++ b/src/iter/tree.rs @@ -1,4 +1,3 @@ -// Written in 2023 by Andrew Poelstra // SPDX-License-Identifier: CC0-1.0 //! Abstract Trees diff --git a/src/miniscript/analyzable.rs b/src/miniscript/analyzable.rs index a1795fa62..4c30e7df9 100644 --- a/src/miniscript/analyzable.rs +++ b/src/miniscript/analyzable.rs @@ -1,4 +1,3 @@ -// Written in 2018 by Andrew Poelstra // SPDX-License-Identifier: CC0-1.0 //! Miniscript Analysis diff --git a/src/miniscript/astelem.rs b/src/miniscript/astelem.rs index 22fbcd537..c7d403679 100644 --- a/src/miniscript/astelem.rs +++ b/src/miniscript/astelem.rs @@ -1,4 +1,3 @@ -// Written in 2019 by Andrew Poelstra // SPDX-License-Identifier: CC0-1.0 //! AST Elements diff --git a/src/miniscript/decode.rs b/src/miniscript/decode.rs index 8c1b66364..d5fa907a6 100644 --- a/src/miniscript/decode.rs +++ b/src/miniscript/decode.rs @@ -1,4 +1,3 @@ -// Written in 2019 by Andrew Poelstra // SPDX-License-Identifier: CC0-1.0 //! Script Decoder diff --git a/src/miniscript/lex.rs b/src/miniscript/lex.rs index bb06811f9..e00eb42e7 100644 --- a/src/miniscript/lex.rs +++ b/src/miniscript/lex.rs @@ -1,4 +1,3 @@ -// Written in 2018 by Andrew Poelstra // SPDX-License-Identifier: CC0-1.0 //! Lexer diff --git a/src/miniscript/mod.rs b/src/miniscript/mod.rs index 320e80c4c..eb1e6213d 100644 --- a/src/miniscript/mod.rs +++ b/src/miniscript/mod.rs @@ -1,4 +1,3 @@ -// Written in 2019 by Andrew Poelstra // SPDX-License-Identifier: CC0-1.0 //! # Abstract Syntax Tree diff --git a/src/miniscript/satisfy.rs b/src/miniscript/satisfy.rs index 0b526ca77..7ba993e49 100644 --- a/src/miniscript/satisfy.rs +++ b/src/miniscript/satisfy.rs @@ -1,4 +1,3 @@ -// Written in 2018 by Andrew Poelstra // SPDX-License-Identifier: CC0-1.0 //! # Satisfaction and Dissatisfaction diff --git a/src/miniscript/types/correctness.rs b/src/miniscript/types/correctness.rs index 0569ad366..a4c3b2fa9 100644 --- a/src/miniscript/types/correctness.rs +++ b/src/miniscript/types/correctness.rs @@ -1,4 +1,3 @@ -// Written in 2019 by Andrew Poelstra // SPDX-License-Identifier: CC0-1.0 //! Correctness/Soundness type properties diff --git a/src/miniscript/types/malleability.rs b/src/miniscript/types/malleability.rs index 4b12d7a85..d1b8e4dba 100644 --- a/src/miniscript/types/malleability.rs +++ b/src/miniscript/types/malleability.rs @@ -1,4 +1,3 @@ -// Written in 2019 by Andrew Poelstra // SPDX-License-Identifier: CC0-1.0 //! Malleability-related Type properties diff --git a/src/miniscript/types/mod.rs b/src/miniscript/types/mod.rs index d5c4ab0f7..35a29605e 100644 --- a/src/miniscript/types/mod.rs +++ b/src/miniscript/types/mod.rs @@ -1,4 +1,3 @@ -// Written in 2019 by Andrew Poelstra // SPDX-License-Identifier: CC0-1.0 //! Miniscript Types diff --git a/src/policy/compiler.rs b/src/policy/compiler.rs index 9bba05589..c3c13ec62 100644 --- a/src/policy/compiler.rs +++ b/src/policy/compiler.rs @@ -1,4 +1,3 @@ -// Written in 2019 by Andrew Poelstra // SPDX-License-Identifier: CC0-1.0 //! # Policy Compiler diff --git a/src/policy/concrete.rs b/src/policy/concrete.rs index 74c675397..0b9045ee2 100644 --- a/src/policy/concrete.rs +++ b/src/policy/concrete.rs @@ -1,4 +1,3 @@ -// Written in 2019 by Andrew Poelstra // SPDX-License-Identifier: CC0-1.0 //! Concrete Policies diff --git a/src/policy/mod.rs b/src/policy/mod.rs index 292feff78..8e80dd489 100644 --- a/src/policy/mod.rs +++ b/src/policy/mod.rs @@ -1,4 +1,3 @@ -// Written in 2018 by Andrew Poelstra // SPDX-License-Identifier: CC0-1.0 //! Script Policies diff --git a/src/policy/semantic.rs b/src/policy/semantic.rs index 10b08545d..aa2de8680 100644 --- a/src/policy/semantic.rs +++ b/src/policy/semantic.rs @@ -1,4 +1,3 @@ -// Written in 2019 by Andrew Poelstra // SPDX-License-Identifier: CC0-1.0 //! Abstract Policies diff --git a/src/psbt/mod.rs b/src/psbt/mod.rs index d3e37eceb..d9f0ed047 100644 --- a/src/psbt/mod.rs +++ b/src/psbt/mod.rs @@ -1,4 +1,3 @@ -// Written in 2019 by Andrew Poelstra // SPDX-License-Identifier: CC0-1.0 //! # Partially-Signed Bitcoin Transactions From c3c508eabbaa17adf3d9a180b298a9c7543316a8 Mon Sep 17 00:00:00 2001 From: "Tobin C. Harding" Date: Thu, 27 Jul 2023 15:01:36 +1000 Subject: [PATCH 28/41] Remove double whitespace --- src/expression.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/expression.rs b/src/expression.rs index 580b8c5b9..39d5b9f84 100644 --- a/src/expression.rs +++ b/src/expression.rs @@ -9,7 +9,7 @@ use crate::prelude::*; use crate::{errstr, Error, MAX_RECURSION_DEPTH}; /// Allowed characters are descriptor strings. -pub const INPUT_CHARSET: &str = "0123456789()[],'/*abcdefgh@:$%{}IJKLMNOPQRSTUVWXYZ&+-.;<=>?!^_|~ijklmnopqrstuvwxyzABCDEFGH`#\"\\ "; +pub const INPUT_CHARSET: &str = "0123456789()[],'/*abcdefgh@:$%{}IJKLMNOPQRSTUVWXYZ&+-.;<=>?!^_|~ijklmnopqrstuvwxyzABCDEFGH`#\"\\ "; /// Map of valid characters in descriptor strings. #[rustfmt::skip] From c6fa7ca1940f0eaed179e871417042dc256c2414 Mon Sep 17 00:00:00 2001 From: "Tobin C. Harding" Date: Thu, 27 Jul 2023 15:13:44 +1000 Subject: [PATCH 29/41] Import Terminal::* to make code more terse The use of `Terminal::Foo` repeatedly clutters the code with no added meaning, we can import all the variants and make the code more terse and easier to read. Refactor only, no logic changes. --- src/iter/mod.rs | 94 +++++++++++++++++++------------------------------ 1 file changed, 36 insertions(+), 58 deletions(-) diff --git a/src/iter/mod.rs b/src/iter/mod.rs index 6a8ea60ad..17d8e323c 100644 --- a/src/iter/mod.rs +++ b/src/iter/mod.rs @@ -20,74 +20,52 @@ use crate::{Miniscript, MiniscriptKey, ScriptContext, Terminal}; impl<'a, Pk: MiniscriptKey, Ctx: ScriptContext> TreeLike for &'a Miniscript { fn as_node(&self) -> Tree { + use Terminal::*; match self.node { - Terminal::PkK(..) - | Terminal::PkH(..) - | Terminal::RawPkH(..) - | Terminal::After(..) - | Terminal::Older(..) - | Terminal::Sha256(..) - | Terminal::Hash256(..) - | Terminal::Ripemd160(..) - | Terminal::Hash160(..) - | Terminal::True - | Terminal::False - | Terminal::Multi(..) - | Terminal::MultiA(..) => Tree::Nullary, - Terminal::Alt(ref sub) - | Terminal::Swap(ref sub) - | Terminal::Check(ref sub) - | Terminal::DupIf(ref sub) - | Terminal::Verify(ref sub) - | Terminal::NonZero(ref sub) - | Terminal::ZeroNotEqual(ref sub) => Tree::Unary(sub), - Terminal::AndV(ref left, ref right) - | Terminal::AndB(ref left, ref right) - | Terminal::OrB(ref left, ref right) - | Terminal::OrD(ref left, ref right) - | Terminal::OrC(ref left, ref right) - | Terminal::OrI(ref left, ref right) => Tree::Binary(left, right), - Terminal::AndOr(ref a, ref b, ref c) => Tree::Nary(Arc::from([a.as_ref(), b, c])), - Terminal::Thresh(_, ref subs) => Tree::Nary(subs.iter().map(Arc::as_ref).collect()), + PkK(..) | PkH(..) | RawPkH(..) | After(..) | Older(..) | Sha256(..) | Hash256(..) + | Ripemd160(..) | Hash160(..) | True | False | Multi(..) | MultiA(..) => Tree::Nullary, + Alt(ref sub) + | Swap(ref sub) + | Check(ref sub) + | DupIf(ref sub) + | Verify(ref sub) + | NonZero(ref sub) + | ZeroNotEqual(ref sub) => Tree::Unary(sub), + AndV(ref left, ref right) + | AndB(ref left, ref right) + | OrB(ref left, ref right) + | OrD(ref left, ref right) + | OrC(ref left, ref right) + | OrI(ref left, ref right) => Tree::Binary(left, right), + AndOr(ref a, ref b, ref c) => Tree::Nary(Arc::from([a.as_ref(), b, c])), + Thresh(_, ref subs) => Tree::Nary(subs.iter().map(Arc::as_ref).collect()), } } } impl TreeLike for Arc> { fn as_node(&self) -> Tree { + use Terminal::*; match self.node { - Terminal::PkK(..) - | Terminal::PkH(..) - | Terminal::RawPkH(..) - | Terminal::After(..) - | Terminal::Older(..) - | Terminal::Sha256(..) - | Terminal::Hash256(..) - | Terminal::Ripemd160(..) - | Terminal::Hash160(..) - | Terminal::True - | Terminal::False - | Terminal::Multi(..) - | Terminal::MultiA(..) => Tree::Nullary, - Terminal::Alt(ref sub) - | Terminal::Swap(ref sub) - | Terminal::Check(ref sub) - | Terminal::DupIf(ref sub) - | Terminal::Verify(ref sub) - | Terminal::NonZero(ref sub) - | Terminal::ZeroNotEqual(ref sub) => Tree::Unary(Arc::clone(sub)), - Terminal::AndV(ref left, ref right) - | Terminal::AndB(ref left, ref right) - | Terminal::OrB(ref left, ref right) - | Terminal::OrD(ref left, ref right) - | Terminal::OrC(ref left, ref right) - | Terminal::OrI(ref left, ref right) => { - Tree::Binary(Arc::clone(left), Arc::clone(right)) - } - Terminal::AndOr(ref a, ref b, ref c) => { + PkK(..) | PkH(..) | RawPkH(..) | After(..) | Older(..) | Sha256(..) | Hash256(..) + | Ripemd160(..) | Hash160(..) | True | False | Multi(..) | MultiA(..) => Tree::Nullary, + Alt(ref sub) + | Swap(ref sub) + | Check(ref sub) + | DupIf(ref sub) + | Verify(ref sub) + | NonZero(ref sub) + | ZeroNotEqual(ref sub) => Tree::Unary(Arc::clone(sub)), + AndV(ref left, ref right) + | AndB(ref left, ref right) + | OrB(ref left, ref right) + | OrD(ref left, ref right) + | OrC(ref left, ref right) + | OrI(ref left, ref right) => Tree::Binary(Arc::clone(left), Arc::clone(right)), + AndOr(ref a, ref b, ref c) => { Tree::Nary(Arc::from([Arc::clone(a), Arc::clone(b), Arc::clone(c)])) } - Terminal::Thresh(_, ref subs) => Tree::Nary(subs.iter().map(Arc::clone).collect()), + Thresh(_, ref subs) => Tree::Nary(subs.iter().map(Arc::clone).collect()), } } } From 40672382100492be6418d0ae5810fa1eeda79d7c Mon Sep 17 00:00:00 2001 From: "Tobin C. Harding" Date: Thu, 27 Jul 2023 15:30:43 +1000 Subject: [PATCH 30/41] Remove unnecessary path from Terminal The `Terminal` type as unambiguous, no need to qualify it. --- src/descriptor/mod.rs | 15 +++++++-------- src/descriptor/sortedmulti.rs | 8 ++++---- 2 files changed, 11 insertions(+), 12 deletions(-) diff --git a/src/descriptor/mod.rs b/src/descriptor/mod.rs index 99d89f381..c5d9108cd 100644 --- a/src/descriptor/mod.rs +++ b/src/descriptor/mod.rs @@ -22,11 +22,12 @@ use bitcoin::{secp256k1, Address, Network, Script, ScriptBuf, TxIn, Witness}; use sync::Arc; use self::checksum::verify_checksum; +use crate::miniscript::decode::Terminal; use crate::miniscript::{Legacy, Miniscript, Segwitv0}; use crate::prelude::*; use crate::{ - expression, hash256, miniscript, BareCtx, Error, ForEachKey, MiniscriptKey, Satisfier, - ToPublicKey, TranslateErr, TranslatePk, Translator, + expression, hash256, BareCtx, Error, ForEachKey, MiniscriptKey, Satisfier, ToPublicKey, + TranslateErr, TranslatePk, Translator, }; mod bare; @@ -167,12 +168,10 @@ impl Descriptor { /// Create a new pk descriptor pub fn new_pk(pk: Pk) -> Self { // roundabout way to constuct `c:pk_k(pk)` - let ms: Miniscript = - Miniscript::from_ast(miniscript::decode::Terminal::Check(Arc::new( - Miniscript::from_ast(miniscript::decode::Terminal::PkK(pk)) - .expect("Type check cannot fail"), - ))) - .expect("Type check cannot fail"); + let ms: Miniscript = Miniscript::from_ast(Terminal::Check(Arc::new( + Miniscript::from_ast(Terminal::PkK(pk)).expect("Type check cannot fail"), + ))) + .expect("Type check cannot fail"); Descriptor::Bare(Bare::new(ms).expect("Context checks cannot fail for p2pk")) } diff --git a/src/descriptor/sortedmulti.rs b/src/descriptor/sortedmulti.rs index 96a2e43d0..b699f3029 100644 --- a/src/descriptor/sortedmulti.rs +++ b/src/descriptor/sortedmulti.rs @@ -17,8 +17,8 @@ use crate::miniscript::decode::Terminal; use crate::miniscript::limits::MAX_PUBKEYS_PER_MULTISIG; use crate::prelude::*; use crate::{ - errstr, expression, miniscript, policy, script_num_size, Error, ForEachKey, Miniscript, - MiniscriptKey, Satisfier, ToPublicKey, TranslateErr, Translator, + errstr, expression, policy, script_num_size, Error, ForEachKey, Miniscript, MiniscriptKey, + Satisfier, ToPublicKey, TranslateErr, Translator, }; /// Contents of a "sortedmulti" descriptor @@ -45,7 +45,7 @@ impl SortedMultiVec { // Check the limits before creating a new SortedMultiVec // For example, under p2sh context the scriptlen can only be // upto 520 bytes. - let term: miniscript::decode::Terminal = Terminal::Multi(k, pks.clone()); + let term: Terminal = Terminal::Multi(k, pks.clone()); let ms = Miniscript::from_ast(term)?; // This would check all the consensus rules for p2sh/p2wsh and @@ -226,9 +226,9 @@ impl fmt::Display for SortedMultiVec Date: Thu, 27 Jul 2023 15:40:12 +1000 Subject: [PATCH 31/41] Improve rustdocs on Miniscript type --- src/miniscript/mod.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/miniscript/mod.rs b/src/miniscript/mod.rs index 320e80c4c..495b15cdc 100644 --- a/src/miniscript/mod.rs +++ b/src/miniscript/mod.rs @@ -50,14 +50,14 @@ use crate::{expression, Error, ForEachKey, MiniscriptKey, ToPublicKey, Translate #[cfg(test)] mod ms_tests; -/// Top-level script AST type +/// The top-level miniscript abstract syntax tree (AST). #[derive(Clone)] pub struct Miniscript { - ///A node in the Abstract Syntax Tree( + /// A node in the AST. pub node: Terminal, - ///The correctness and malleability type information for the AST node + /// The correctness and malleability type information for the AST node. pub ty: types::Type, - ///Additional information helpful for extra analysis. + /// Additional information helpful for extra analysis. pub ext: types::extra_props::ExtData, /// Context PhantomData. Only accessible inside this crate phantom: PhantomData, From 1d1ac370d6a294080fb703199d66a3bfec971a4b Mon Sep 17 00:00:00 2001 From: "Tobin C. Harding" Date: Thu, 27 Jul 2023 15:43:57 +1000 Subject: [PATCH 32/41] Move Miniscript Hash impl underneath others The `Hash` trait is related to PartialEq, Eq, PartialOrd, Ord - as such put it next to them. --- src/miniscript/mod.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/miniscript/mod.rs b/src/miniscript/mod.rs index 495b15cdc..bccb97cd5 100644 --- a/src/miniscript/mod.rs +++ b/src/miniscript/mod.rs @@ -95,18 +95,18 @@ impl PartialEq for Miniscript { /// by the ast. impl Eq for Miniscript {} -impl fmt::Debug for Miniscript { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{:?}", self.node) - } -} - impl hash::Hash for Miniscript { fn hash(&self, state: &mut H) { self.node.hash(state); } } +impl fmt::Debug for Miniscript { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{:?}", self.node) + } +} + impl Miniscript { /// Add type information(Type and Extdata) to Miniscript based on /// `AstElem` fragment. Dependent on display and clone because of Error From fd09756d1a629dfff8c4736354d764ccd048ca27 Mon Sep 17 00:00:00 2001 From: "Tobin C. Harding" Date: Thu, 27 Jul 2023 15:47:16 +1000 Subject: [PATCH 33/41] Make Miniscript cmp/ord/hash docs uniform Improve and make the docs all the same. --- src/miniscript/mod.rs | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/src/miniscript/mod.rs b/src/miniscript/mod.rs index bccb97cd5..1a034b28d 100644 --- a/src/miniscript/mod.rs +++ b/src/miniscript/mod.rs @@ -64,8 +64,8 @@ pub struct Miniscript { } /// `PartialOrd` of `Miniscript` must depend only on node and not the type information. -/// The type information and extra_properties can be deterministically determined -/// by the ast. +/// +/// The type information and extra properties are implied by the AST. impl PartialOrd for Miniscript { fn partial_cmp(&self, other: &Miniscript) -> Option { Some(self.node.cmp(&other.node)) @@ -73,8 +73,8 @@ impl PartialOrd for Miniscript { } /// `Ord` of `Miniscript` must depend only on node and not the type information. -/// The type information and extra_properties can be deterministically determined -/// by the ast. +/// +/// The type information and extra properties are implied by the AST. impl Ord for Miniscript { fn cmp(&self, other: &Miniscript) -> cmp::Ordering { self.node.cmp(&other.node) @@ -82,8 +82,8 @@ impl Ord for Miniscript { } /// `PartialEq` of `Miniscript` must depend only on node and not the type information. -/// The type information and extra_properties can be deterministically determined -/// by the ast. +/// +/// The type information and extra properties are implied by the AST. impl PartialEq for Miniscript { fn eq(&self, other: &Miniscript) -> bool { self.node.eq(&other.node) @@ -91,10 +91,13 @@ impl PartialEq for Miniscript { } /// `Eq` of `Miniscript` must depend only on node and not the type information. -/// The type information and extra_properties can be deterministically determined -/// by the ast. +/// +/// The type information and extra properties are implied by the AST. impl Eq for Miniscript {} +/// `Hash` of `Miniscript` must depend only on node and not the type information. +/// +/// The type information and extra properties are implied by the AST. impl hash::Hash for Miniscript { fn hash(&self, state: &mut H) { self.node.hash(state); From 6c5db91a779d1d4ab78011e2ee5379323fac7869 Mon Sep 17 00:00:00 2001 From: "Tobin C. Harding" Date: Thu, 27 Jul 2023 15:54:45 +1000 Subject: [PATCH 34/41] Move Miniscript impl block under type Move the `Miniscript` impl block (with the constructors in in) to be underneath the struct. Refactor only, no logic changes. --- src/miniscript/mod.rs | 68 +++++++++++++++++++++---------------------- 1 file changed, 34 insertions(+), 34 deletions(-) diff --git a/src/miniscript/mod.rs b/src/miniscript/mod.rs index 1a034b28d..a1dce679e 100644 --- a/src/miniscript/mod.rs +++ b/src/miniscript/mod.rs @@ -63,6 +63,40 @@ pub struct Miniscript { phantom: PhantomData, } +impl Miniscript { + /// Add type information(Type and Extdata) to Miniscript based on + /// `AstElem` fragment. Dependent on display and clone because of Error + /// Display code of type_check. + pub fn from_ast(t: Terminal) -> Result, Error> { + let res = Miniscript { + ty: Type::type_check(&t, |_| None)?, + ext: ExtData::type_check(&t, |_| None)?, + node: t, + phantom: PhantomData, + }; + Ctx::check_global_consensus_validity(&res)?; + Ok(res) + } + + /// Create a new `Miniscript` from a `Terminal` node and a `Type` annotation + /// This does not check the typing rules. The user is responsible for ensuring + /// that the type provided is correct. + /// + /// You should almost always use `Miniscript::from_ast` instead of this function. + pub fn from_components_unchecked( + node: Terminal, + ty: types::Type, + ext: types::extra_props::ExtData, + ) -> Miniscript { + Miniscript { + node, + ty, + ext, + phantom: PhantomData, + } + } +} + /// `PartialOrd` of `Miniscript` must depend only on node and not the type information. /// /// The type information and extra properties are implied by the AST. @@ -110,40 +144,6 @@ impl fmt::Debug for Miniscript { } } -impl Miniscript { - /// Add type information(Type and Extdata) to Miniscript based on - /// `AstElem` fragment. Dependent on display and clone because of Error - /// Display code of type_check. - pub fn from_ast(t: Terminal) -> Result, Error> { - let res = Miniscript { - ty: Type::type_check(&t, |_| None)?, - ext: ExtData::type_check(&t, |_| None)?, - node: t, - phantom: PhantomData, - }; - Ctx::check_global_consensus_validity(&res)?; - Ok(res) - } - - /// Create a new `Miniscript` from a `Terminal` node and a `Type` annotation - /// This does not check the typing rules. The user is responsible for ensuring - /// that the type provided is correct. - /// - /// You should almost always use `Miniscript::from_ast` instead of this function. - pub fn from_components_unchecked( - node: Terminal, - ty: types::Type, - ext: types::extra_props::ExtData, - ) -> Miniscript { - Miniscript { - node, - ty, - ext, - phantom: PhantomData, - } - } -} - impl fmt::Display for Miniscript { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.node) From dbf9a62e89b5269ec988983b1339c78d4c87ce03 Mon Sep 17 00:00:00 2001 From: "Tobin C. Harding" Date: Thu, 27 Jul 2023 15:56:49 +1000 Subject: [PATCH 35/41] Combine Miniscript conversion and constructor blocks Combine the two impl blocks that contain constructors and conversion functions. Refactor only, no logic changes. --- src/miniscript/mod.rs | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/src/miniscript/mod.rs b/src/miniscript/mod.rs index a1dce679e..d65dab672 100644 --- a/src/miniscript/mod.rs +++ b/src/miniscript/mod.rs @@ -95,6 +95,16 @@ impl Miniscript { phantom: PhantomData, } } + + /// Extracts the `AstElem` representing the root of the miniscript + pub fn into_inner(self) -> Terminal { + self.node + } + + /// Get a reference to the inner `AstElem` representing the root of miniscript + pub fn as_inner(&self) -> &Terminal { + &self.node + } } /// `PartialOrd` of `Miniscript` must depend only on node and not the type information. @@ -150,18 +160,6 @@ impl fmt::Display for Miniscript } } -impl Miniscript { - /// Extracts the `AstElem` representing the root of the miniscript - pub fn into_inner(self) -> Terminal { - self.node - } - - /// Get a reference to the inner `AstElem` representing the root of miniscript - pub fn as_inner(&self) -> &Terminal { - &self.node - } -} - impl Miniscript { /// Attempt to parse an insane(scripts don't clear sanity checks) /// script into a Miniscript representation. From e3c1b7c3a7968d7686f3bc0e1fdd4d6ce55cce2d Mon Sep 17 00:00:00 2001 From: "Tobin C. Harding" Date: Thu, 27 Jul 2023 15:58:41 +1000 Subject: [PATCH 36/41] Move Miniscript encode/script_size into main block Move the `encode` and `script_size` functions to the main impl block. Refactor only, no logic changes. --- src/miniscript/mod.rs | 140 ++++++++++++++++++++---------------------- 1 file changed, 67 insertions(+), 73 deletions(-) diff --git a/src/miniscript/mod.rs b/src/miniscript/mod.rs index d65dab672..a42f1ee9e 100644 --- a/src/miniscript/mod.rs +++ b/src/miniscript/mod.rs @@ -105,6 +105,73 @@ impl Miniscript { pub fn as_inner(&self) -> &Terminal { &self.node } + + /// Encode as a Bitcoin script + pub fn encode(&self) -> script::ScriptBuf + where + Pk: ToPublicKey, + { + self.node.encode(script::Builder::new()).into_script() + } + + /// Size, in bytes of the script-pubkey. If this Miniscript is used outside + /// of segwit (e.g. in a bare or P2SH descriptor), this quantity should be + /// multiplied by 4 to compute the weight. + /// + /// In general, it is not recommended to use this function directly, but + /// to instead call the corresponding function on a `Descriptor`, which + /// will handle the segwit/non-segwit technicalities for you. + pub fn script_size(&self) -> usize { + let mut len = 0; + for ms in self.pre_order_iter() { + len += match ms.node { + Terminal::PkK(ref pk) => Ctx::pk_len(pk), + Terminal::PkH(..) | Terminal::RawPkH(..) => 24, + Terminal::After(n) => script_num_size(n.to_consensus_u32() as usize) + 1, + Terminal::Older(n) => script_num_size(n.to_consensus_u32() as usize) + 1, + Terminal::Sha256(..) => 33 + 6, + Terminal::Hash256(..) => 33 + 6, + Terminal::Ripemd160(..) => 21 + 6, + Terminal::Hash160(..) => 21 + 6, + Terminal::True => 1, + Terminal::False => 1, + Terminal::Alt(..) => 2, + Terminal::Swap(..) => 1, + Terminal::Check(..) => 1, + Terminal::DupIf(..) => 3, + Terminal::Verify(ref sub) => usize::from(!sub.ext.has_free_verify), + Terminal::NonZero(..) => 4, + Terminal::ZeroNotEqual(..) => 1, + Terminal::AndV(..) => 0, + Terminal::AndB(..) => 1, + Terminal::AndOr(..) => 3, + Terminal::OrB(..) => 1, + Terminal::OrD(..) => 3, + Terminal::OrC(..) => 2, + Terminal::OrI(..) => 3, + Terminal::Thresh(k, ref subs) => { + assert!(!subs.is_empty(), "threshold must be nonempty"); + script_num_size(k) // k + + 1 // EQUAL + + subs.len() // ADD + - 1 // no ADD on first element + } + Terminal::Multi(k, ref pks) => { + script_num_size(k) + + 1 + + script_num_size(pks.len()) + + pks.iter().map(|pk| Ctx::pk_len(pk)).sum::() + } + Terminal::MultiA(k, ref pks) => { + script_num_size(k) + + 1 // NUMEQUAL + + pks.iter().map(|pk| Ctx::pk_len(pk)).sum::() // n keys + + pks.len() // n times CHECKSIGADD + } + } + } + len + } } /// `PartialOrd` of `Miniscript` must depend only on node and not the type information. @@ -239,79 +306,6 @@ impl Miniscript { } } -impl Miniscript -where - Pk: MiniscriptKey, - Ctx: ScriptContext, -{ - /// Encode as a Bitcoin script - pub fn encode(&self) -> script::ScriptBuf - where - Pk: ToPublicKey, - { - self.node.encode(script::Builder::new()).into_script() - } - - /// Size, in bytes of the script-pubkey. If this Miniscript is used outside - /// of segwit (e.g. in a bare or P2SH descriptor), this quantity should be - /// multiplied by 4 to compute the weight. - /// - /// In general, it is not recommended to use this function directly, but - /// to instead call the corresponding function on a `Descriptor`, which - /// will handle the segwit/non-segwit technicalities for you. - pub fn script_size(&self) -> usize { - let mut len = 0; - for ms in self.pre_order_iter() { - len += match ms.node { - Terminal::PkK(ref pk) => Ctx::pk_len(pk), - Terminal::PkH(..) | Terminal::RawPkH(..) => 24, - Terminal::After(n) => script_num_size(n.to_consensus_u32() as usize) + 1, - Terminal::Older(n) => script_num_size(n.to_consensus_u32() as usize) + 1, - Terminal::Sha256(..) => 33 + 6, - Terminal::Hash256(..) => 33 + 6, - Terminal::Ripemd160(..) => 21 + 6, - Terminal::Hash160(..) => 21 + 6, - Terminal::True => 1, - Terminal::False => 1, - Terminal::Alt(..) => 2, - Terminal::Swap(..) => 1, - Terminal::Check(..) => 1, - Terminal::DupIf(..) => 3, - Terminal::Verify(ref sub) => usize::from(!sub.ext.has_free_verify), - Terminal::NonZero(..) => 4, - Terminal::ZeroNotEqual(..) => 1, - Terminal::AndV(..) => 0, - Terminal::AndB(..) => 1, - Terminal::AndOr(..) => 3, - Terminal::OrB(..) => 1, - Terminal::OrD(..) => 3, - Terminal::OrC(..) => 2, - Terminal::OrI(..) => 3, - Terminal::Thresh(k, ref subs) => { - assert!(!subs.is_empty(), "threshold must be nonempty"); - script_num_size(k) // k - + 1 // EQUAL - + subs.len() // ADD - - 1 // no ADD on first element - } - Terminal::Multi(k, ref pks) => { - script_num_size(k) - + 1 - + script_num_size(pks.len()) - + pks.iter().map(|pk| Ctx::pk_len(pk)).sum::() - } - Terminal::MultiA(k, ref pks) => { - script_num_size(k) - + 1 // NUMEQUAL - + pks.iter().map(|pk| Ctx::pk_len(pk)).sum::() // n keys - + pks.len() // n times CHECKSIGADD - } - } - } - len - } -} - impl Miniscript { /// Maximum number of witness elements used to satisfy the Miniscript /// fragment, including the witness script itself. Used to estimate From 32342de7f4213ccffeb4c9279ae15197927f5087 Mon Sep 17 00:00:00 2001 From: "Tobin C. Harding" Date: Thu, 27 Jul 2023 16:00:09 +1000 Subject: [PATCH 37/41] Move Miniscript max_satisfaction* to main block Move the two `max_satisfaction*` functions to the main impl block. Refactor only, no logic changes. --- src/miniscript/mod.rs | 62 +++++++++++++++++++++---------------------- 1 file changed, 30 insertions(+), 32 deletions(-) diff --git a/src/miniscript/mod.rs b/src/miniscript/mod.rs index a42f1ee9e..3eac6f851 100644 --- a/src/miniscript/mod.rs +++ b/src/miniscript/mod.rs @@ -172,6 +172,36 @@ impl Miniscript { } len } + + /// Maximum number of witness elements used to satisfy the Miniscript + /// fragment, including the witness script itself. Used to estimate + /// the weight of the `VarInt` that specifies this number in a serialized + /// transaction. + /// + /// This function may returns Error when the Miniscript is + /// impossible to satisfy + pub fn max_satisfaction_witness_elements(&self) -> Result { + self.ext + .stack_elem_count_sat + .map(|x| x + 1) + .ok_or(Error::ImpossibleSatisfaction) + } + + /// Maximum size, in bytes, of a satisfying witness. For Segwit outputs + /// `one_cost` should be set to 2, since the number `1` requires two + /// bytes to encode. For non-segwit outputs `one_cost` should be set to + /// 1, since `OP_1` is available in scriptSigs. + /// + /// In general, it is not recommended to use this function directly, but + /// to instead call the corresponding function on a `Descriptor`, which + /// will handle the segwit/non-segwit technicalities for you. + /// + /// All signatures are assumed to be 73 bytes in size, including the + /// length prefix (segwit) or push opcode (pre-segwit) and sighash + /// postfix. + pub fn max_satisfaction_size(&self) -> Result { + Ctx::max_satisfaction_size(self).ok_or(Error::ImpossibleSatisfaction) + } } /// `PartialOrd` of `Miniscript` must depend only on node and not the type information. @@ -306,38 +336,6 @@ impl Miniscript { } } -impl Miniscript { - /// Maximum number of witness elements used to satisfy the Miniscript - /// fragment, including the witness script itself. Used to estimate - /// the weight of the `VarInt` that specifies this number in a serialized - /// transaction. - /// - /// This function may returns Error when the Miniscript is - /// impossible to satisfy - pub fn max_satisfaction_witness_elements(&self) -> Result { - self.ext - .stack_elem_count_sat - .map(|x| x + 1) - .ok_or(Error::ImpossibleSatisfaction) - } - - /// Maximum size, in bytes, of a satisfying witness. For Segwit outputs - /// `one_cost` should be set to 2, since the number `1` requires two - /// bytes to encode. For non-segwit outputs `one_cost` should be set to - /// 1, since `OP_1` is available in scriptSigs. - /// - /// In general, it is not recommended to use this function directly, but - /// to instead call the corresponding function on a `Descriptor`, which - /// will handle the segwit/non-segwit technicalities for you. - /// - /// All signatures are assumed to be 73 bytes in size, including the - /// length prefix (segwit) or push opcode (pre-segwit) and sighash - /// postfix. - pub fn max_satisfaction_size(&self) -> Result { - Ctx::max_satisfaction_size(self).ok_or(Error::ImpossibleSatisfaction) - } -} - impl ForEachKey for Miniscript { fn for_each_key<'a, F: FnMut(&'a Pk) -> bool>(&'a self, mut pred: F) -> bool { for ms in self.pre_order_iter() { From 5fd22a6ac611c0810f2b2af3238db7fe0f1a389b Mon Sep 17 00:00:00 2001 From: "Tobin C. Harding" Date: Thu, 27 Jul 2023 16:02:01 +1000 Subject: [PATCH 38/41] Move Miniscript satisfy* functions into main block Move the two `satisfy*` functions to the main impl block. Refactor only, no logic changes. --- src/miniscript/mod.rs | 100 +++++++++++++++++++++--------------------- 1 file changed, 49 insertions(+), 51 deletions(-) diff --git a/src/miniscript/mod.rs b/src/miniscript/mod.rs index 3eac6f851..871273fb7 100644 --- a/src/miniscript/mod.rs +++ b/src/miniscript/mod.rs @@ -202,6 +202,55 @@ impl Miniscript { pub fn max_satisfaction_size(&self) -> Result { Ctx::max_satisfaction_size(self).ok_or(Error::ImpossibleSatisfaction) } + + /// Attempt to produce non-malleable satisfying witness for the + /// witness script represented by the parse tree + pub fn satisfy>(&self, satisfier: S) -> Result>, Error> + where + Pk: ToPublicKey, + { + // Only satisfactions for default versions (0xc0) are allowed. + let leaf_hash = TapLeafHash::from_script(&self.encode(), LeafVersion::TapScript); + match satisfy::Satisfaction::satisfy(&self.node, &satisfier, self.ty.mall.safe, &leaf_hash) + .stack + { + satisfy::Witness::Stack(stack) => { + Ctx::check_witness::(&stack)?; + Ok(stack) + } + satisfy::Witness::Unavailable | satisfy::Witness::Impossible => { + Err(Error::CouldNotSatisfy) + } + } + } + + /// Attempt to produce a malleable satisfying witness for the + /// witness script represented by the parse tree + pub fn satisfy_malleable>( + &self, + satisfier: S, + ) -> Result>, Error> + where + Pk: ToPublicKey, + { + let leaf_hash = TapLeafHash::from_script(&self.encode(), LeafVersion::TapScript); + match satisfy::Satisfaction::satisfy_mall( + &self.node, + &satisfier, + self.ty.mall.safe, + &leaf_hash, + ) + .stack + { + satisfy::Witness::Stack(stack) => { + Ctx::check_witness::(&stack)?; + Ok(stack) + } + satisfy::Witness::Unavailable | satisfy::Witness::Impossible => { + Err(Error::CouldNotSatisfy) + } + } + } } /// `PartialOrd` of `Miniscript` must depend only on node and not the type information. @@ -500,57 +549,6 @@ impl_block_str!( } ); -impl Miniscript { - /// Attempt to produce non-malleable satisfying witness for the - /// witness script represented by the parse tree - pub fn satisfy>(&self, satisfier: S) -> Result>, Error> - where - Pk: ToPublicKey, - { - // Only satisfactions for default versions (0xc0) are allowed. - let leaf_hash = TapLeafHash::from_script(&self.encode(), LeafVersion::TapScript); - match satisfy::Satisfaction::satisfy(&self.node, &satisfier, self.ty.mall.safe, &leaf_hash) - .stack - { - satisfy::Witness::Stack(stack) => { - Ctx::check_witness::(&stack)?; - Ok(stack) - } - satisfy::Witness::Unavailable | satisfy::Witness::Impossible => { - Err(Error::CouldNotSatisfy) - } - } - } - - /// Attempt to produce a malleable satisfying witness for the - /// witness script represented by the parse tree - pub fn satisfy_malleable>( - &self, - satisfier: S, - ) -> Result>, Error> - where - Pk: ToPublicKey, - { - let leaf_hash = TapLeafHash::from_script(&self.encode(), LeafVersion::TapScript); - match satisfy::Satisfaction::satisfy_mall( - &self.node, - &satisfier, - self.ty.mall.safe, - &leaf_hash, - ) - .stack - { - satisfy::Witness::Stack(stack) => { - Ctx::check_witness::(&stack)?; - Ok(stack) - } - satisfy::Witness::Unavailable | satisfy::Witness::Impossible => { - Err(Error::CouldNotSatisfy) - } - } - } -} - impl_from_tree!( ;Ctx; ScriptContext, Arc>, From 23897d11be018bba68eb640c397fa928abe17570 Mon Sep 17 00:00:00 2001 From: "Tobin C. Harding" Date: Thu, 27 Jul 2023 16:04:27 +1000 Subject: [PATCH 39/41] Put the two Miniscript impl blocks together There are now two impl blocks for `Miniscript` with different generics, put one directly under the other. To match the diff this should be said "move trait impls below the second Miniscript impl block". --- src/miniscript/mod.rs | 106 +++++++++++++++++++++--------------------- 1 file changed, 53 insertions(+), 53 deletions(-) diff --git a/src/miniscript/mod.rs b/src/miniscript/mod.rs index 871273fb7..d72337a40 100644 --- a/src/miniscript/mod.rs +++ b/src/miniscript/mod.rs @@ -253,59 +253,6 @@ impl Miniscript { } } -/// `PartialOrd` of `Miniscript` must depend only on node and not the type information. -/// -/// The type information and extra properties are implied by the AST. -impl PartialOrd for Miniscript { - fn partial_cmp(&self, other: &Miniscript) -> Option { - Some(self.node.cmp(&other.node)) - } -} - -/// `Ord` of `Miniscript` must depend only on node and not the type information. -/// -/// The type information and extra properties are implied by the AST. -impl Ord for Miniscript { - fn cmp(&self, other: &Miniscript) -> cmp::Ordering { - self.node.cmp(&other.node) - } -} - -/// `PartialEq` of `Miniscript` must depend only on node and not the type information. -/// -/// The type information and extra properties are implied by the AST. -impl PartialEq for Miniscript { - fn eq(&self, other: &Miniscript) -> bool { - self.node.eq(&other.node) - } -} - -/// `Eq` of `Miniscript` must depend only on node and not the type information. -/// -/// The type information and extra properties are implied by the AST. -impl Eq for Miniscript {} - -/// `Hash` of `Miniscript` must depend only on node and not the type information. -/// -/// The type information and extra properties are implied by the AST. -impl hash::Hash for Miniscript { - fn hash(&self, state: &mut H) { - self.node.hash(state); - } -} - -impl fmt::Debug for Miniscript { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{:?}", self.node) - } -} - -impl fmt::Display for Miniscript { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", self.node) - } -} - impl Miniscript { /// Attempt to parse an insane(scripts don't clear sanity checks) /// script into a Miniscript representation. @@ -385,6 +332,59 @@ impl Miniscript { } } +/// `PartialOrd` of `Miniscript` must depend only on node and not the type information. +/// +/// The type information and extra properties are implied by the AST. +impl PartialOrd for Miniscript { + fn partial_cmp(&self, other: &Miniscript) -> Option { + Some(self.node.cmp(&other.node)) + } +} + +/// `Ord` of `Miniscript` must depend only on node and not the type information. +/// +/// The type information and extra properties are implied by the AST. +impl Ord for Miniscript { + fn cmp(&self, other: &Miniscript) -> cmp::Ordering { + self.node.cmp(&other.node) + } +} + +/// `PartialEq` of `Miniscript` must depend only on node and not the type information. +/// +/// The type information and extra properties are implied by the AST. +impl PartialEq for Miniscript { + fn eq(&self, other: &Miniscript) -> bool { + self.node.eq(&other.node) + } +} + +/// `Eq` of `Miniscript` must depend only on node and not the type information. +/// +/// The type information and extra properties are implied by the AST. +impl Eq for Miniscript {} + +/// `Hash` of `Miniscript` must depend only on node and not the type information. +/// +/// The type information and extra properties are implied by the AST. +impl hash::Hash for Miniscript { + fn hash(&self, state: &mut H) { + self.node.hash(state); + } +} + +impl fmt::Debug for Miniscript { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{:?}", self.node) + } +} + +impl fmt::Display for Miniscript { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.node) + } +} + impl ForEachKey for Miniscript { fn for_each_key<'a, F: FnMut(&'a Pk) -> bool>(&'a self, mut pred: F) -> bool { for ms in self.pre_order_iter() { From e1e95d38f53485d205eced9e5fe040e9d00cf1cc Mon Sep 17 00:00:00 2001 From: "Tobin C. Harding" Date: Thu, 27 Jul 2023 16:26:45 +1000 Subject: [PATCH 40/41] Compress patter match in script_size Compress match arms using `|`, this makes the code more terse and easier to quickly see if terminal variants return the expected length (for the trivial ones at least). --- src/miniscript/mod.rs | 31 +++++++++++-------------------- 1 file changed, 11 insertions(+), 20 deletions(-) diff --git a/src/miniscript/mod.rs b/src/miniscript/mod.rs index d72337a40..bd2fef77a 100644 --- a/src/miniscript/mod.rs +++ b/src/miniscript/mod.rs @@ -122,33 +122,24 @@ impl Miniscript { /// to instead call the corresponding function on a `Descriptor`, which /// will handle the segwit/non-segwit technicalities for you. pub fn script_size(&self) -> usize { + use Terminal::*; + let mut len = 0; for ms in self.pre_order_iter() { len += match ms.node { + AndV(..) => 0, + True | False | Swap(..) | Check(..) | ZeroNotEqual(..) | AndB(..) | OrB(..) => 1, + Alt(..) | OrC(..) => 2, + DupIf(..) | AndOr(..) | OrD(..) | OrI(..) => 3, + NonZero(..) => 4, + PkH(..) | RawPkH(..) => 24, + Ripemd160(..) | Hash160(..) => 21 + 6, + Sha256(..) | Hash256(..) => 33 + 6, + Terminal::PkK(ref pk) => Ctx::pk_len(pk), - Terminal::PkH(..) | Terminal::RawPkH(..) => 24, Terminal::After(n) => script_num_size(n.to_consensus_u32() as usize) + 1, Terminal::Older(n) => script_num_size(n.to_consensus_u32() as usize) + 1, - Terminal::Sha256(..) => 33 + 6, - Terminal::Hash256(..) => 33 + 6, - Terminal::Ripemd160(..) => 21 + 6, - Terminal::Hash160(..) => 21 + 6, - Terminal::True => 1, - Terminal::False => 1, - Terminal::Alt(..) => 2, - Terminal::Swap(..) => 1, - Terminal::Check(..) => 1, - Terminal::DupIf(..) => 3, Terminal::Verify(ref sub) => usize::from(!sub.ext.has_free_verify), - Terminal::NonZero(..) => 4, - Terminal::ZeroNotEqual(..) => 1, - Terminal::AndV(..) => 0, - Terminal::AndB(..) => 1, - Terminal::AndOr(..) => 3, - Terminal::OrB(..) => 1, - Terminal::OrD(..) => 3, - Terminal::OrC(..) => 2, - Terminal::OrI(..) => 3, Terminal::Thresh(k, ref subs) => { assert!(!subs.is_empty(), "threshold must be nonempty"); script_num_size(k) // k From a60c642a54d685450aa9e117d854c624108ff7e5 Mon Sep 17 00:00:00 2001 From: "Tobin C. Harding" Date: Thu, 27 Jul 2023 16:39:52 +1000 Subject: [PATCH 41/41] Refactor satisfy* functions Reduce duplicate code by factoring out a helper function. Refactor only, no logic changes. --- src/miniscript/mod.rs | 28 +++++++++++++--------------- 1 file changed, 13 insertions(+), 15 deletions(-) diff --git a/src/miniscript/mod.rs b/src/miniscript/mod.rs index bd2fef77a..98e51fd13 100644 --- a/src/miniscript/mod.rs +++ b/src/miniscript/mod.rs @@ -202,17 +202,9 @@ impl Miniscript { { // Only satisfactions for default versions (0xc0) are allowed. let leaf_hash = TapLeafHash::from_script(&self.encode(), LeafVersion::TapScript); - match satisfy::Satisfaction::satisfy(&self.node, &satisfier, self.ty.mall.safe, &leaf_hash) - .stack - { - satisfy::Witness::Stack(stack) => { - Ctx::check_witness::(&stack)?; - Ok(stack) - } - satisfy::Witness::Unavailable | satisfy::Witness::Impossible => { - Err(Error::CouldNotSatisfy) - } - } + let satisfaction = + satisfy::Satisfaction::satisfy(&self.node, &satisfier, self.ty.mall.safe, &leaf_hash); + self._satisfy(satisfaction) } /// Attempt to produce a malleable satisfying witness for the @@ -225,14 +217,20 @@ impl Miniscript { Pk: ToPublicKey, { let leaf_hash = TapLeafHash::from_script(&self.encode(), LeafVersion::TapScript); - match satisfy::Satisfaction::satisfy_mall( + let satisfaction = satisfy::Satisfaction::satisfy_mall( &self.node, &satisfier, self.ty.mall.safe, &leaf_hash, - ) - .stack - { + ); + self._satisfy(satisfaction) + } + + fn _satisfy(&self, satisfaction: satisfy::Satisfaction) -> Result>, Error> + where + Pk: ToPublicKey, + { + match satisfaction.stack { satisfy::Witness::Stack(stack) => { Ctx::check_witness::(&stack)?; Ok(stack)