diff --git a/.github/workflows/fuzz.yml b/.github/workflows/fuzz.yml index e3225591b..bd00b62f8 100644 --- a/.github/workflows/fuzz.yml +++ b/.github/workflows/fuzz.yml @@ -39,7 +39,7 @@ compile_descriptor, key: cache-${{ matrix.target }}-${{ hashFiles('**/Cargo.toml','**/Cargo.lock') }} - uses: actions-rs/toolchain@v1 with: - toolchain: 1.58 + toolchain: 1.64 override: true profile: minimal - name: fuzz diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index d035a0d6d..bdc2b76d0 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -54,8 +54,7 @@ jobs: - rust: stable - rust: beta - rust: nightly - - rust: 1.41.1 - - rust: 1.47 + - rust: 1.48 steps: - name: Checkout Crate uses: actions/checkout@v2 diff --git a/Cargo.toml b/Cargo.toml index 314b1930f..d47c679fa 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -17,7 +17,6 @@ no-std = ["hashbrown", "bitcoin/no-std"] compiler = [] trace = [] -unstable = [] serde = ["actual-serde", "bitcoin/serde"] rand = ["bitcoin/rand"] base64 = ["bitcoin/base64"] @@ -34,7 +33,6 @@ actual-serde = { package = "serde", version = "1.0.103", optional = true } serde_test = "1.0.147" bitcoin = { version = "0.30.0", features = ["base64"] } secp256k1 = {version = "0.27.0", features = ["rand-std"]} -actual-base64 = { package = "base64", version = "0.13.0" } [[example]] name = "htlc" @@ -52,10 +50,6 @@ required-features = ["std"] name = "verify_tx" required-features = ["std"] -[[example]] -name = "psbt" -required-features = ["std"] - [[example]] name = "xpub_descriptors" required-features = ["std"] diff --git a/README.md b/README.md index 9f46164bb..21fe72b8d 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ ![Build](https://github.com/rust-bitcoin/rust-miniscript/workflows/Continuous%20integration/badge.svg) -**Minimum Supported Rust Version:** 1.41.1 +**Minimum Supported Rust Version:** 1.48.0 # Miniscript @@ -40,18 +40,10 @@ The cargo feature `std` is enabled by default. At least one of the features `std Enabling the `no-std` feature does not disable `std`. To disable the `std` feature you must disable default features. The `no-std` feature only enables additional features required for this crate to be usable without `std`. Both can be enabled without conflict. ## Minimum Supported Rust Version (MSRV) -This library should always compile with any combination of features (minus -`no-std`) on **Rust 1.41.1** or **Rust 1.47** with `no-std`. +This library should always compile with any combination of features on **Rust 1.48.0**. Some dependencies do not play nicely with our MSRV, if you are running the tests -you may need to pin as follows: - -``` -cargo update --package url --precise 2.2.2 -cargo update --package form_urlencoded --precise 1.0.1 -cargo update -p once_cell --precise 1.13.1 -cargo update -p bzip2 --precise 0.4.2 -``` +you may need to pin some dependencies. See `./contrib/test.sh` for current pinning. ## Contributing @@ -61,6 +53,11 @@ architectural mismatches. If you have any questions or ideas you want to discuss please join us in [##miniscript](https://web.libera.chat/?channels=##miniscript) on Libera. +## Benchmarks + +We use a custom Rust compiler configuration conditional to guard the bench mark code. To run the +bench marks use: `RUSTFLAGS='--cfg=bench' cargo +nightly bench`. + ## Release Notes diff --git a/clippy.toml b/clippy.toml index 799264ef1..11d46a73f 100644 --- a/clippy.toml +++ b/clippy.toml @@ -1 +1 @@ -msrv = "1.41.1" +msrv = "1.48.0" diff --git a/contrib/test.sh b/contrib/test.sh index eed23e09c..727c605b0 100755 --- a/contrib/test.sh +++ b/contrib/test.sh @@ -7,6 +7,12 @@ FEATURES="compiler serde rand base64" cargo --version rustc --version +# Cache the toolchain we are using. +NIGHTLY=false +if cargo --version | grep nightly; then + NIGHTLY=true +fi + # Format if told to if [ "$DO_FMT" = true ] then @@ -14,16 +20,15 @@ then cargo fmt -- --check fi -# Pin dependencies required to build with Rust 1.41.1 -if cargo --version | grep "1\.41\.0"; then - cargo update -p once_cell --precise 1.13.1 - cargo update -p serde --precise 1.0.156 -fi - -# Pin dependencies required to build with Rust 1.47.0 -if cargo --version | grep "1\.47\.0"; then +# Pin dependencies required to build with Rust 1.48.0 +if cargo --version | grep "1\.48\.0"; then cargo update -p once_cell --precise 1.13.1 - cargo update -p serde --precise 1.0.156 + cargo update -p quote --precise 1.0.28 + cargo update -p proc-macro2 --precise 1.0.63 + cargo update -p serde_json --precise 1.0.99 + cargo update -p serde --precise 1.0.152 + cargo update -p log --precise 0.4.18 + cargo update -p serde_test --precise 1.0.152 fi # Test bitcoind integration tests if told to (this only works with the stable toolchain) @@ -56,7 +61,6 @@ then cargo run --example parse cargo run --example sign_multisig cargo run --example verify_tx > /dev/null - cargo run --example psbt cargo run --example xpub_descriptors cargo run --example taproot --features=compiler cargo run --example psbt_sign_finalize --features=base64 @@ -80,10 +84,18 @@ then done fi -# Bench if told to (this only works with the nightly toolchain) +# Bench if told to, only works with non-stable toolchain (nightly, beta). if [ "$DO_BENCH" = true ] then - cargo bench --features="unstable compiler" + if [ "$NIGHTLY" = false ]; then + if [ -n "$RUSTUP_TOOLCHAIN" ]; then + echo "RUSTUP_TOOLCHAIN is set to a non-nightly toolchain but DO_BENCH requires a nightly toolchain" + else + echo "DO_BENCH requires a nightly toolchain" + fi + exit 1 + fi + RUSTFLAGS='--cfg=bench' cargo bench fi # Build the docs if told to (this only works with the nightly toolchain) diff --git a/examples/htlc.rs b/examples/htlc.rs index 4e7bf3572..8f864d639 100644 --- a/examples/htlc.rs +++ b/examples/htlc.rs @@ -1,22 +1,11 @@ -// Miniscript -// Written in 2019 by -// Thomas Eizinger -// -// To the extent possible under law, the author(s) have dedicated all -// copyright and related and neighboring rights to this software to -// the public domain worldwide. This software is distributed without -// any warranty. -// -// You should have received a copy of the CC0 Public Domain Dedication -// along with this software. -// If not, see . -// +// Written by Thomas Eizinger +// SPDX-License-Identifier: CC0-1.0 //! Example: Create an HTLC with miniscript using the policy compiler use std::str::FromStr; -use bitcoin::Network; +use miniscript::bitcoin::Network; use miniscript::descriptor::Wsh; use miniscript::policy::{Concrete, Liftable}; @@ -51,7 +40,7 @@ fn main() { "or(and(pk(022222222222222222222222222222222222222222222222222222222222222222),sha256(1111111111111111111111111111111111111111111111111111111111111111)),and(pk(020202020202020202020202020202020202020202020202020202020202020202),older(4444)))" ); - // Get the scriptPpubkey for this Wsh descriptor. + // Get the scriptPubkey for this Wsh descriptor. assert_eq!( format!("{:x}", htlc_descriptor.script_pubkey()), "0020d853877af928a8d2a569c9c0ed14bd16f6a80ce9cccaf8a6150fd8f7f8867ae2" @@ -63,7 +52,7 @@ fn main() { "21022222222222222222222222222222222222222222222222222222222222222222ac6476a91451814f108670aced2d77c1805ddd6634bc9d473188ad025c11b26782012088a82011111111111111111111111111111111111111111111111111111111111111118768" ); - // Get the address for this Wsh descriptor.v + // Get the address for this Wsh descriptor. assert_eq!( format!("{}", htlc_descriptor.address(Network::Bitcoin)), "bc1qmpfcw7he9z5d9ftfe8qw699azmm2sr8fen903fs4plv007yx0t3qxfmqv5" diff --git a/examples/parse.rs b/examples/parse.rs index 9bd00ff8c..04c2fd837 100644 --- a/examples/parse.rs +++ b/examples/parse.rs @@ -1,16 +1,4 @@ -// Miniscript -// Written in 2019 by -// Andrew Poelstra -// -// To the extent possible under law, the author(s) have dedicated all -// copyright and related and neighboring rights to this software to -// the public domain worldwide. This software is distributed without -// any warranty. -// -// You should have received a copy of the CC0 Public Domain Dedication -// along with this software. -// If not, see . -// +// SPDX-License-Identifier: CC0-1.0 //! Example: Parsing a descriptor from a string. diff --git a/examples/psbt_sign_finalize.rs b/examples/psbt_sign_finalize.rs index 8108b4b7e..e11e6c792 100644 --- a/examples/psbt_sign_finalize.rs +++ b/examples/psbt_sign_finalize.rs @@ -1,14 +1,15 @@ +// SPDX-License-Identifier: CC0-1.0 + use std::collections::BTreeMap; use std::str::FromStr; -use actual_base64 as base64; -use bitcoin::sighash::SighashCache; -use bitcoin::PrivateKey; use miniscript::bitcoin::consensus::encode::deserialize; use miniscript::bitcoin::hashes::hex::FromHex; -use miniscript::bitcoin::psbt::PartiallySignedTransaction as Psbt; +use miniscript::bitcoin::psbt::{self, Psbt}; +use miniscript::bitcoin::sighash::SighashCache; use miniscript::bitcoin::{ - self, psbt, secp256k1, Address, Network, OutPoint, Script, Sequence, Transaction, TxIn, TxOut, + self, base64, secp256k1, Address, Network, OutPoint, PrivateKey, Script, Sequence, Transaction, + TxIn, TxOut, }; use miniscript::plan::Assets; use miniscript::psbt::{PsbtExt, PsbtInputExt}; diff --git a/examples/sign_multisig.rs b/examples/sign_multisig.rs index e4b1cac33..1e913f609 100644 --- a/examples/sign_multisig.rs +++ b/examples/sign_multisig.rs @@ -1,16 +1,4 @@ -// Miniscript -// Written in 2019 by -// Andrew Poelstra -// -// To the extent possible under law, the author(s) have dedicated all -// copyright and related and neighboring rights to this software to -// the public domain worldwide. This software is distributed without -// any warranty. -// -// You should have received a copy of the CC0 Public Domain Dedication -// along with this software. -// If not, see . -// +// SPDX-License-Identifier: CC0-1.0 //! Example: Signing a 2-of-3 multisignature. diff --git a/examples/taproot.rs b/examples/taproot.rs index 0d64ebeff..3c8c6b66e 100644 --- a/examples/taproot.rs +++ b/examples/taproot.rs @@ -1,10 +1,12 @@ +// SPDX-License-Identifier: CC0-1.0 + use std::collections::HashMap; use std::str::FromStr; -use bitcoin::address::WitnessVersion; -use bitcoin::key::XOnlyPublicKey; -use bitcoin::secp256k1::{rand, KeyPair}; -use bitcoin::Network; +use miniscript::bitcoin::address::WitnessVersion; +use miniscript::bitcoin::key::{KeyPair, XOnlyPublicKey}; +use miniscript::bitcoin::secp256k1::rand; +use miniscript::bitcoin::Network; use miniscript::descriptor::DescriptorType; use miniscript::policy::Concrete; use miniscript::{translate_hash_fail, Descriptor, Miniscript, Tap, TranslatePk, Translator}; @@ -21,9 +23,8 @@ impl Translator for StrPkTranslator { self.pk_map.get(pk).copied().ok_or(()) } - // We don't need to implement these methods as we are not using them in the policy - // Fail if we encounter any hash fragments. - // See also translate_hash_clone! macro + // We don't need to implement these methods as we are not using them in the policy. + // Fail if we encounter any hash fragments. See also translate_hash_clone! macro. translate_hash_fail!(String, XOnlyPublicKey, ()); } @@ -39,7 +40,7 @@ fn main() { .replace(&[' ', '\n', '\t'][..], ""); let _ms = Miniscript::::from_str("and_v(v:ripemd160(H),pk(A))").unwrap(); - let pol: Concrete = Concrete::from_str(&pol_str).unwrap(); + let pol = Concrete::::from_str(&pol_str).unwrap(); // In case we can't find an internal key for the given policy, we set the internal key to // a random pubkey as specified by BIP341 (which are *unspendable* by any party :p) let desc = pol.compile_tr(Some("UNSPENDABLE_KEY".to_string())).unwrap(); @@ -52,7 +53,7 @@ fn main() { // Check whether the descriptors are safe. assert!(desc.sanity_check().is_ok()); - // Descriptor Type and Version should match respectively for Taproot + // Descriptor type and version should match respectively for taproot let desc_type = desc.desc_type(); assert_eq!(desc_type, DescriptorType::Tr); assert_eq!(desc_type.segwit_version().unwrap(), WitnessVersion::V1); @@ -99,11 +100,12 @@ fn main() { let real_desc = desc.translate_pk(&mut t).unwrap(); - // Max Satisfaction Weight for compilation, corresponding to the script-path spend - // `multi_a(2,PUBKEY_1,PUBKEY_2) at taptree depth 1, having - // Max Witness Size = varint(control_block_size) + control_block size + - // varint(script_size) + script_size + max_satisfaction_size - // = 1 + 65 + 1 + 70 + 132 = 269 + // Max satisfaction weight for compilation, corresponding to the script-path spend + // `multi_a(2,PUBKEY_1,PUBKEY_2) at taptree depth 1, having: + // + // max_witness_size = varint(control_block_size) + control_block size + + // varint(script_size) + script_size + max_satisfaction_size + // = 1 + 65 + 1 + 70 + 132 = 269 let max_sat_wt = real_desc.max_weight_to_satisfy().unwrap(); assert_eq!(max_sat_wt, 269); diff --git a/examples/verify_tx.rs b/examples/verify_tx.rs index 9f2387a2f..bcac4fd3e 100644 --- a/examples/verify_tx.rs +++ b/examples/verify_tx.rs @@ -1,24 +1,12 @@ -// Miniscript -// Written in 2019 by -// Andrew Poelstra -// -// To the extent possible under law, the author(s) have dedicated all -// copyright and related and neighboring rights to this software to -// the public domain worldwide. This software is distributed without -// any warranty. -// -// You should have received a copy of the CC0 Public Domain Dedication -// along with this software. -// If not, see . -// +// SPDX-License-Identifier: CC0-1.0 //! Example: Verifying a signed transaction. use std::str::FromStr; -use bitcoin::consensus::Decodable; -use bitcoin::secp256k1::{self, Secp256k1}; -use bitcoin::{absolute, sighash, Sequence}; +use miniscript::bitcoin::consensus::Decodable; +use miniscript::bitcoin::secp256k1::{self, Secp256k1}; +use miniscript::bitcoin::{absolute, sighash, Sequence}; use miniscript::interpreter::KeySigPair; fn main() { diff --git a/examples/xpub_descriptors.rs b/examples/xpub_descriptors.rs index f640fad35..08f31ecee 100644 --- a/examples/xpub_descriptors.rs +++ b/examples/xpub_descriptors.rs @@ -1,16 +1,4 @@ -// Miniscript -// Written in 2019 by -// Andrew Poelstra -// -// To the extent possible under law, the author(s) have dedicated all -// copyright and related and neighboring rights to this software to -// the public domain worldwide. This software is distributed without -// any warranty. -// -// You should have received a copy of the CC0 Public Domain Dedication -// along with this software. -// If not, see . -// +// SPDX-License-Identifier: CC0-1.0 //! Example: Parsing a xpub and getting an address. diff --git a/fuzz/Cargo.toml b/fuzz/Cargo.toml index 542183526..5cf32fca4 100644 --- a/fuzz/Cargo.toml +++ b/fuzz/Cargo.toml @@ -12,7 +12,7 @@ cargo-fuzz = true honggfuzz = { version = "0.5.55", default-features = false } miniscript = { path = "..", features = [ "compiler" ] } -regex = "1.4" +regex = "1.0" [[bin]] name = "roundtrip_miniscript_str" diff --git a/fuzz/fuzz_targets/roundtrip_descriptor.rs b/fuzz/fuzz_targets/roundtrip_descriptor.rs index d74569e88..b9363f130 100644 --- a/fuzz/fuzz_targets/roundtrip_descriptor.rs +++ b/fuzz/fuzz_targets/roundtrip_descriptor.rs @@ -23,9 +23,27 @@ fn main() { #[cfg(test)] mod tests { - use super::*; + fn extend_vec_from_hex(hex: &str, out: &mut Vec) { + let mut b = 0; + for (idx, c) in hex.as_bytes().iter().enumerate() { + b <<= 4; + match *c { + b'A'..=b'F' => b |= c - b'A' + 10, + b'a'..=b'f' => b |= c - b'a' + 10, + b'0'..=b'9' => b |= c - b'0', + _ => panic!("Bad hex"), + } + if (idx & 1) == 1 { + out.push(b); + b = 0; + } + } + } + #[test] - fn test() { - do_test(b"pkh()"); + fn duplicate_crash3() { + let mut a = Vec::new(); + extend_vec_from_hex("747228726970656d616e645f6e5b5c79647228726970656d616e645f6e5b5c7964646464646464646464646464646464646464646464646464646b5f6872702c29", &mut a); + super::do_test(&a); } } diff --git a/rustfmt.toml b/rustfmt.toml index 37abad596..b2a63aa46 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -44,7 +44,7 @@ enum_discrim_align_threshold = 0 match_arm_blocks = true match_arm_leading_pipes = "Never" force_multiline_blocks = false -fn_args_layout = "Tall" +fn_params_layout = "Tall" brace_style = "SameLineWhere" control_brace_style = "AlwaysSameLine" trailing_semicolon = true diff --git a/src/descriptor/bare.rs b/src/descriptor/bare.rs index 87cbc3bbe..eaed40785 100644 --- a/src/descriptor/bare.rs +++ b/src/descriptor/bare.rs @@ -1,4 +1,3 @@ -// Written in 2020 by the rust-miniscript developers // SPDX-License-Identifier: CC0-1.0 //! # Bare Output Descriptors @@ -219,7 +218,7 @@ where where T: Translator, { - Ok(Bare::new(self.ms.translate_pk(t)?).map_err(TranslateErr::OuterError)?) + Bare::new(self.ms.translate_pk(t)?).map_err(TranslateErr::OuterError) } } diff --git a/src/descriptor/checksum.rs b/src/descriptor/checksum.rs index 4613db3fd..10aaaacfc 100644 --- a/src/descriptor/checksum.rs +++ b/src/descriptor/checksum.rs @@ -8,10 +8,10 @@ use core::fmt; use core::iter::FromIterator; +pub use crate::expression::VALID_CHARS; use crate::prelude::*; use crate::Error; -const INPUT_CHARSET: &str = "0123456789()[],'/*abcdefgh@:$%{}IJKLMNOPQRSTUVWXYZ&+-.;<=>?!^_|~ijklmnopqrstuvwxyzABCDEFGH`#\"\\ "; const CHECKSUM_CHARSET: &[u8] = b"qpzry9x8gf2tvdw0s3jn54khce6mua7l"; fn poly_mod(mut c: u64, val: u64) -> u64 { @@ -101,9 +101,14 @@ impl Engine { /// state! It is safe to continue feeding it data but the result will not be meaningful. pub fn input(&mut self, s: &str) -> Result<(), Error> { for ch in s.chars() { - let pos = INPUT_CHARSET.find(ch).ok_or_else(|| { - Error::BadDescriptor(format!("Invalid character in checksum: '{}'", ch)) - })? as u64; + let pos = VALID_CHARS + .get(ch as usize) + .ok_or_else(|| { + Error::BadDescriptor(format!("Invalid character in checksum: '{}'", ch)) + })? + .ok_or_else(|| { + Error::BadDescriptor(format!("Invalid character in checksum: '{}'", ch)) + })? as u64; self.c = poly_mod(self.c, pos & 31); self.cls = self.cls * 3 + (pos >> 5); self.clscount += 1; diff --git a/src/descriptor/key.rs b/src/descriptor/key.rs index c0333ec13..21fc73143 100644 --- a/src/descriptor/key.rs +++ b/src/descriptor/key.rs @@ -1061,13 +1061,13 @@ impl MiniscriptKey for DescriptorPublicKey { } fn is_x_only_key(&self) -> bool { - match self { + matches!( + self, DescriptorPublicKey::Single(SinglePub { key: SinglePubKey::XOnly(ref _key), .. - }) => true, - _ => false, - } + }) + ) } fn num_der_paths(&self) -> usize { diff --git a/src/descriptor/mod.rs b/src/descriptor/mod.rs index 528fa760a..71700bab9 100644 --- a/src/descriptor/mod.rs +++ b/src/descriptor/mod.rs @@ -1,4 +1,3 @@ -// Written in 2018 by Andrew Poelstra // SPDX-License-Identifier: CC0-1.0 //! # Output Descriptors @@ -24,10 +23,11 @@ use sync::Arc; use self::checksum::verify_checksum; use crate::miniscript::{satisfy, Legacy, Miniscript, Segwitv0}; use crate::plan::{AssetProvider, Plan}; +use crate::miniscript::decode::Terminal; use crate::prelude::*; use crate::{ - expression, hash256, miniscript, BareCtx, Error, ForEachKey, MiniscriptKey, Satisfier, - ToPublicKey, TranslateErr, TranslatePk, Translator, + expression, hash256, BareCtx, Error, ForEachKey, MiniscriptKey, Satisfier, ToPublicKey, + TranslateErr, TranslatePk, Translator, }; mod bare; @@ -168,12 +168,10 @@ impl Descriptor { /// Create a new pk descriptor pub fn new_pk(pk: Pk) -> Self { // roundabout way to constuct `c:pk_k(pk)` - let ms: Miniscript = - Miniscript::from_ast(miniscript::decode::Terminal::Check(Arc::new( - Miniscript::from_ast(miniscript::decode::Terminal::PkK(pk)) - .expect("Type check cannot fail"), - ))) - .expect("Type check cannot fail"); + let ms: Miniscript = Miniscript::from_ast(Terminal::Check(Arc::new( + Miniscript::from_ast(Terminal::PkK(pk)).expect("Type check cannot fail"), + ))) + .expect("Type check cannot fail"); Descriptor::Bare(Bare::new(ms).expect("Context checks cannot fail for p2pk")) } diff --git a/src/descriptor/segwitv0.rs b/src/descriptor/segwitv0.rs index 0f308e475..1d78b87b1 100644 --- a/src/descriptor/segwitv0.rs +++ b/src/descriptor/segwitv0.rs @@ -1,4 +1,3 @@ -// Written in 2020 by the rust-miniscript developers // SPDX-License-Identifier: CC0-1.0 //! # Segwit Output Descriptors diff --git a/src/descriptor/sh.rs b/src/descriptor/sh.rs index ee072ebf9..94b8eb1fc 100644 --- a/src/descriptor/sh.rs +++ b/src/descriptor/sh.rs @@ -1,4 +1,3 @@ -// Written in 2020 by the rust-miniscript developers // SPDX-License-Identifier: CC0-1.0 //! # P2SH Descriptors @@ -351,13 +350,13 @@ impl Sh { let witness_script = wsh.inner_script().to_v0_p2wsh(); let push_bytes = <&PushBytes>::try_from(witness_script.as_bytes()) .expect("Witness script is not too large"); - script::Builder::new().push_slice(&push_bytes).into_script() + script::Builder::new().push_slice(push_bytes).into_script() } ShInner::Wpkh(ref wpkh) => { let redeem_script = wpkh.script_pubkey(); let push_bytes: &PushBytes = <&PushBytes>::try_from(redeem_script.as_bytes()).expect("Script not too large"); - script::Builder::new().push_slice(&push_bytes).into_script() + script::Builder::new().push_slice(push_bytes).into_script() } ShInner::SortedMulti(..) | ShInner::Ms(..) => ScriptBuf::new(), } diff --git a/src/descriptor/sortedmulti.rs b/src/descriptor/sortedmulti.rs index 0907f660d..3887d8752 100644 --- a/src/descriptor/sortedmulti.rs +++ b/src/descriptor/sortedmulti.rs @@ -1,4 +1,3 @@ -// Written in 2020 by the rust-miniscript developers // SPDX-License-Identifier: CC0-1.0 //! # Sorted Multi @@ -19,8 +18,8 @@ use crate::miniscript::satisfy::{Placeholder, Satisfaction}; use crate::plan::AssetProvider; use crate::prelude::*; use crate::{ - errstr, expression, miniscript, policy, script_num_size, Error, ForEachKey, Miniscript, - MiniscriptKey, Satisfier, ToPublicKey, TranslateErr, Translator, + errstr, expression, policy, script_num_size, Error, ForEachKey, Miniscript, MiniscriptKey, + Satisfier, ToPublicKey, TranslateErr, Translator, }; /// Contents of a "sortedmulti" descriptor @@ -47,7 +46,7 @@ impl SortedMultiVec { // Check the limits before creating a new SortedMultiVec // For example, under p2sh context the scriptlen can only be // upto 520 bytes. - let term: miniscript::decode::Terminal = Terminal::Multi(k, pks.clone()); + let term: Terminal = Terminal::Multi(k, pks.clone()); let ms = Miniscript::from_ast(term)?; // This would check all the consensus rules for p2sh/p2wsh and @@ -238,9 +237,9 @@ impl fmt::Display for SortedMultiVec Tr { let builder = bitcoin::blockdata::script::Builder::new(); builder .push_opcode(opcodes::all::OP_PUSHNUM_1) - .push_slice(&output_key.serialize()) + .push_slice(output_key.serialize()) .into_script() } @@ -449,8 +449,7 @@ where type Item = (u8, &'a Miniscript); fn next(&mut self) -> Option { - while !self.stack.is_empty() { - let (depth, last) = self.stack.pop().expect("Size checked above"); + while let Some((depth, last)) = self.stack.pop() { match *last { TapTree::Tree(ref l, ref r) => { self.stack.push((depth + 1, r)); @@ -563,17 +562,19 @@ impl fmt::Display for Tr { // Helper function to parse string into miniscript tree form fn parse_tr_tree(s: &str) -> Result { - for ch in s.bytes() { - if !ch.is_ascii() { - return Err(Error::Unprintable(ch)); - } - } + expression::check_valid_chars(s)?; if s.len() > 3 && &s[..3] == "tr(" && s.as_bytes()[s.len() - 1] == b')' { let rest = &s[3..s.len() - 1]; if !rest.contains(',') { + let key = expression::Tree::from_str(rest)?; + if !key.args.is_empty() { + return Err(Error::Unexpected( + "invalid taproot internal key".to_string(), + )); + } let internal_key = expression::Tree { - name: rest, + name: key.name, args: vec![], }; return Ok(expression::Tree { @@ -585,8 +586,14 @@ fn parse_tr_tree(s: &str) -> Result { let (key, script) = split_once(rest, ',') .ok_or_else(|| Error::BadDescriptor("invalid taproot descriptor".to_string()))?; + let key = expression::Tree::from_str(key)?; + if !key.args.is_empty() { + return Err(Error::Unexpected( + "invalid taproot internal key".to_string(), + )); + } let internal_key = expression::Tree { - name: key, + name: key.name, args: vec![], }; if script.is_empty() { @@ -613,19 +620,13 @@ fn split_once(inp: &str, delim: char) -> Option<(&str, &str)> { if inp.is_empty() { None } else { - let mut found = inp.len(); - for (idx, ch) in inp.chars().enumerate() { - if ch == delim { - found = idx; - break; - } - } - // No comma or trailing comma found - if found >= inp.len() - 1 { - Some((inp, "")) - } else { - Some((&inp[..found], &inp[found + 1..])) - } + // find the first character that matches delim + let res = inp + .chars() + .position(|ch| ch == delim) + .map(|idx| (&inp[..idx], &inp[idx + 1..])) + .unwrap_or((inp, "")); + Some(res) } } diff --git a/src/expression.rs b/src/expression.rs index d3363abc4..fe7d14785 100644 --- a/src/expression.rs +++ b/src/expression.rs @@ -1,4 +1,3 @@ -// Written in 2019 by Andrew Poelstra // SPDX-License-Identifier: CC0-1.0 //! # Function-like Expression Language @@ -8,6 +7,29 @@ use core::str::FromStr; use crate::prelude::*; use crate::{errstr, Error, MAX_RECURSION_DEPTH}; +/// Allowed characters are descriptor strings. +pub const INPUT_CHARSET: &str = "0123456789()[],'/*abcdefgh@:$%{}IJKLMNOPQRSTUVWXYZ&+-.;<=>?!^_|~ijklmnopqrstuvwxyzABCDEFGH`#\"\\ "; + +/// Map of valid characters in descriptor strings. +#[rustfmt::skip] +pub const VALID_CHARS: [Option; 128] = [ + None, None, None, None, None, None, None, None, None, None, None, None, None, + None, None, None, None, None, None, None, None, None, None, None, None, None, + None, None, None, None, None, None, Some(94), Some(59), Some(92), Some(91), + Some(28), Some(29), Some(50), Some(15), Some(10), Some(11), Some(17), Some(51), + Some(14), Some(52), Some(53), Some(16), Some(0), Some(1), Some(2), Some(3), + Some(4), Some(5), Some(6), Some(7), Some(8), Some(9), Some(27), Some(54), + Some(55), Some(56), Some(57), Some(58), Some(26), Some(82), Some(83), + Some(84), Some(85), Some(86), Some(87), Some(88), Some(89), Some(32), Some(33), + Some(34), Some(35), Some(36), Some(37), Some(38), Some(39), Some(40), Some(41), + Some(42), Some(43), Some(44), Some(45), Some(46), Some(47), Some(48), Some(49), + Some(12), Some(93), Some(13), Some(60), Some(61), Some(90), Some(18), Some(19), + Some(20), Some(21), Some(22), Some(23), Some(24), Some(25), Some(64), Some(65), + Some(66), Some(67), Some(68), Some(69), Some(70), Some(71), Some(72), Some(73), + Some(74), Some(75), Some(76), Some(77), Some(78), Some(79), Some(80), Some(81), + Some(30), Some(62), Some(31), Some(63), None, +]; + #[derive(Debug)] /// A token of the form `x(...)` or `x` pub struct Tree<'a> { @@ -166,13 +188,7 @@ impl<'a> Tree<'a> { /// Parses a tree from a string #[allow(clippy::should_implement_trait)] // Cannot use std::str::FromStr because of lifetimes. pub fn from_str(s: &'a str) -> Result, Error> { - // Filter out non-ASCII because we byte-index strings all over the - // place and Rust gets very upset when you splinch a string. - for ch in s.bytes() { - if !ch.is_ascii() { - return Err(Error::Unprintable(ch)); - } - } + check_valid_chars(s)?; let (top, rem) = Tree::from_slice(s)?; if rem.is_empty() { @@ -183,6 +199,23 @@ impl<'a> Tree<'a> { } } +/// Filter out non-ASCII because we byte-index strings all over the +/// place and Rust gets very upset when you splinch a string. +pub fn check_valid_chars(s: &str) -> Result<(), Error> { + for ch in s.bytes() { + if !ch.is_ascii() { + return Err(Error::Unprintable(ch)); + } + // Index bounds: We know that ch is ASCII, so it is <= 127. + if VALID_CHARS[ch as usize].is_none() { + return Err(Error::Unexpected( + "Only characters in INPUT_CHARSET are allowed".to_string(), + )); + } + } + Ok(()) +} + /// Parse a string as a u32, for timelocks or thresholds pub fn parse_num(s: &str) -> Result { if s.len() > 1 { @@ -253,4 +286,13 @@ mod tests { assert!(parse_num("+6").is_err()); assert!(parse_num("-6").is_err()); } + + #[test] + fn test_valid_char_map() { + let mut valid_chars = [None; 128]; + for (i, ch) in super::INPUT_CHARSET.chars().enumerate() { + valid_chars[ch as usize] = Some(i as u8); + } + assert_eq!(valid_chars, super::VALID_CHARS); + } } diff --git a/src/interpreter/inner.rs b/src/interpreter/inner.rs index 538cacb7e..266670eaf 100644 --- a/src/interpreter/inner.rs +++ b/src/interpreter/inner.rs @@ -43,7 +43,7 @@ fn script_from_stack_elem( ) -> Result, Error> { match *elem { stack::Element::Push(sl) => { - Miniscript::parse_with_ext(&bitcoin::Script::from_bytes(sl), &ExtParams::allow_all()) + Miniscript::parse_with_ext(bitcoin::Script::from_bytes(sl), &ExtParams::allow_all()) .map_err(Error::from) } stack::Element::Satisfied => { @@ -380,7 +380,7 @@ impl ToNoChecks for Miniscript { translate_hash_clone!(bitcoin::PublicKey, BitcoinKey, ()); } - self.real_translate_pk(&mut TranslateFullPk) + self.translate_pk_ctx(&mut TranslateFullPk) .expect("Translation should succeed") } } @@ -397,7 +397,7 @@ impl ToNoChecks for Miniscript TreeLike for &'a Miniscript { + fn as_node(&self) -> Tree { + use Terminal::*; + match self.node { + PkK(..) | PkH(..) | RawPkH(..) | After(..) | Older(..) | Sha256(..) | Hash256(..) + | Ripemd160(..) | Hash160(..) | True | False | Multi(..) | MultiA(..) => Tree::Nullary, + Alt(ref sub) + | Swap(ref sub) + | Check(ref sub) + | DupIf(ref sub) + | Verify(ref sub) + | NonZero(ref sub) + | ZeroNotEqual(ref sub) => Tree::Unary(sub), + AndV(ref left, ref right) + | AndB(ref left, ref right) + | OrB(ref left, ref right) + | OrD(ref left, ref right) + | OrC(ref left, ref right) + | OrI(ref left, ref right) => Tree::Binary(left, right), + AndOr(ref a, ref b, ref c) => Tree::Nary(Arc::from([a.as_ref(), b, c])), + Thresh(_, ref subs) => Tree::Nary(subs.iter().map(Arc::as_ref).collect()), + } + } +} + +impl TreeLike for Arc> { + fn as_node(&self) -> Tree { + use Terminal::*; + match self.node { + PkK(..) | PkH(..) | RawPkH(..) | After(..) | Older(..) | Sha256(..) | Hash256(..) + | Ripemd160(..) | Hash160(..) | True | False | Multi(..) | MultiA(..) => Tree::Nullary, + Alt(ref sub) + | Swap(ref sub) + | Check(ref sub) + | DupIf(ref sub) + | Verify(ref sub) + | NonZero(ref sub) + | ZeroNotEqual(ref sub) => Tree::Unary(Arc::clone(sub)), + AndV(ref left, ref right) + | AndB(ref left, ref right) + | OrB(ref left, ref right) + | OrD(ref left, ref right) + | OrC(ref left, ref right) + | OrI(ref left, ref right) => Tree::Binary(Arc::clone(left), Arc::clone(right)), + AndOr(ref a, ref b, ref c) => { + Tree::Nary(Arc::from([Arc::clone(a), Arc::clone(b), Arc::clone(c)])) + } + Thresh(_, ref subs) => Tree::Nary(subs.iter().map(Arc::clone).collect()), + } + } +} diff --git a/src/iter/tree.rs b/src/iter/tree.rs new file mode 100644 index 000000000..e4884e991 --- /dev/null +++ b/src/iter/tree.rs @@ -0,0 +1,323 @@ +// SPDX-License-Identifier: CC0-1.0 + +//! Abstract Trees +//! +//! This module provides the [`TreeLike`] trait which represents a node in a +//! tree, and several iterators over trees whose nodes implement this trait. +//! + +use crate::prelude::*; +use crate::sync::Arc; + +/// Abstract node of a tree. +/// +/// Tracks the arity (out-degree) of a node, which is the only thing that +/// is needed for iteration purposes. +pub enum Tree { + /// Combinator with no children. + Nullary, + /// Combinator with one child. + Unary(T), + /// Combinator with two children. + Binary(T, T), + /// Combinator with more than two children. + Nary(Arc<[T]>), +} + +/// A trait for any structure which has the shape of a Miniscript tree. +/// +/// As a general rule, this should be implemented on references to nodes, +/// rather than nodes themselves, because it provides algorithms that +/// assume copying is cheap. +/// +/// To implement this trait, you only need to implement the [`TreeLike::as_node`] +/// method, which will usually be very mechanical. Everything else is provided. +/// However, to avoid allocations, it may make sense to also implement +/// [`TreeLike::n_children`] and [`TreeLike::nth_child`] because the default +/// implementations will allocate vectors for n-ary nodes. +pub trait TreeLike: Clone + Sized { + /// Interpret the node as an abstract node. + fn as_node(&self) -> Tree; + + /// Accessor for the number of children this node has. + fn n_children(&self) -> usize { + match self.as_node() { + Tree::Nullary => 0, + Tree::Unary(..) => 1, + Tree::Binary(..) => 2, + Tree::Nary(children) => children.len(), + } + } + + /// Accessor for the nth child of the node, if a child with that index exists. + fn nth_child(&self, n: usize) -> Option { + match (n, self.as_node()) { + (_, Tree::Nullary) => None, + (0, Tree::Unary(sub)) => Some(sub), + (_, Tree::Unary(..)) => None, + (0, Tree::Binary(sub, _)) => Some(sub), + (1, Tree::Binary(_, sub)) => Some(sub), + (_, Tree::Binary(..)) => None, + (n, Tree::Nary(children)) => children.get(n).cloned(), + } + } + + /// Obtains an iterator of all the nodes rooted at the node, in pre-order. + fn pre_order_iter(self) -> PreOrderIter { + PreOrderIter { stack: vec![self] } + } + + /// Obtains a verbose iterator of all the nodes rooted at the DAG, in pre-order. + /// + /// See the documentation of [`VerbosePreOrderIter`] for more information about what + /// this does. Essentially, if you find yourself using [`Self::pre_order_iter`] and + /// then adding a stack to manually track which items and their children have been + /// yielded, you may be better off using this iterator instead. + fn verbose_pre_order_iter(self) -> VerbosePreOrderIter { + VerbosePreOrderIter { + stack: vec![PreOrderIterItem::initial(self, None)], + index: 0, + } + } + + /// Obtains an iterator of all the nodes rooted at the DAG, in post order. + /// + /// Each node is only yielded once, at the leftmost position that it + /// appears in the DAG. + fn post_order_iter(self) -> PostOrderIter { + PostOrderIter { + index: 0, + stack: vec![IterStackItem::unprocessed(self, None)], + } + } +} + +/// Element stored internally on the stack of a [`PostOrderIter`]. +/// +/// This is **not** the type that is yielded by the [`PostOrderIter`]; +/// in fact, this type is not even exported. +#[derive(Clone, Debug)] +struct IterStackItem { + /// The element on the stack + elem: T, + /// Whether we have dealt with this item (and pushed its children, + /// if any) yet. + processed: bool, + /// If the item has been processed, the index of its children. + child_indices: Vec, + /// Whether the element is a left- or right-child of its parent. + parent_stack_idx: Option, +} + +impl IterStackItem { + /// Constructor for a new stack item with a given element and relationship + /// to its parent. + fn unprocessed(elem: T, parent_stack_idx: Option) -> Self { + IterStackItem { + processed: false, + child_indices: Vec::with_capacity(elem.n_children()), + parent_stack_idx, + elem, + } + } +} + +/// Iterates over a DAG in _post order_. +/// +/// That means nodes are yielded in the order (left child, right child, parent). +#[derive(Clone, Debug)] +pub struct PostOrderIter { + /// The index of the next item to be yielded + index: usize, + /// A stack of elements to be yielded; each element is a node, then its left + /// and right children (if they exist and if they have been yielded already) + stack: Vec>, +} + +/// A set of data yielded by a `PostOrderIter`. +pub struct PostOrderIterItem { + /// The actual node data + pub node: T, + /// The index of this node (equivalent to if you'd called `.enumerate()` on + /// the iterator) + pub index: usize, + /// The indices of this node's children. + pub child_indices: Vec, +} + +impl Iterator for PostOrderIter { + type Item = PostOrderIterItem; + + fn next(&mut self) -> Option { + let mut current = self.stack.pop()?; + + if !current.processed { + current.processed = true; + + // When we first encounter an item, it is completely unknown; it is + // nominally the next item to be yielded, but it might have children, + // and if so, they come first + let current_stack_idx = self.stack.len(); + let n_children = current.elem.n_children(); + self.stack.push(current); + for idx in (0..n_children).rev() { + self.stack.push(IterStackItem::unprocessed( + self.stack[current_stack_idx].elem.nth_child(idx).unwrap(), + Some(current_stack_idx), + )); + } + self.next() + } else { + // The second time we encounter an item, we have dealt with its children, + // updated the child indices for this item, and are now ready to yield it + // rather than putting it back in the stack. + // + // Before yielding though, we must the item's parent's child indices with + // this item's index. + if let Some(idx) = current.parent_stack_idx { + self.stack[idx].child_indices.push(self.index); + } + + self.index += 1; + Some(PostOrderIterItem { + node: current.elem, + index: self.index - 1, + child_indices: current.child_indices, + }) + } + } +} + +/// Iterates over a [`TreeLike`] in _pre order_. +/// +/// Unlike the post-order iterator, this one does not keep track of indices +/// (this would be impractical since when we yield a node we have not yet +/// yielded its children, so we cannot know their indices). If you do need +/// the indices for some reason, the best strategy may be to run the +/// post-order iterator, collect into a vector, then iterate through that +/// backward. +#[derive(Clone, Debug)] +pub struct PreOrderIter { + /// A stack of elements to be yielded. As items are yielded, their right + /// children are put onto the stack followed by their left, so that the + /// appropriate one will be yielded on the next iteration. + stack: Vec, +} + +impl Iterator for PreOrderIter { + type Item = T; + + fn next(&mut self) -> Option { + // This algorithm is _significantly_ simpler than the post-order one, + // mainly because we don't care about child indices. + let top = self.stack.pop()?; + match top.as_node() { + Tree::Nullary => {} + Tree::Unary(next) => self.stack.push(next), + Tree::Binary(left, right) => { + self.stack.push(right); + self.stack.push(left); + } + Tree::Nary(children) => { + self.stack.extend(children.iter().rev().cloned()); + } + } + Some(top) + } +} + +/// Iterates over a [`TreeLike`] in "verbose pre order", yielding extra state changes. +/// +/// This yields nodes followed by their children, followed by the node *again* +/// after each child. This means that each node will be yielded a total of +/// (n+1) times, where n is its number of children. +/// +/// The different times that a node is yielded can be distinguished by looking +/// at the [`PreOrderIterItem::n_children_yielded`] (which, in particular, +/// will be 0 on the first yield) and [`PreOrderIterItem::is_complete`] (which +/// will be true on the last yield) fields of the yielded item. +#[derive(Clone, Debug)] +pub struct VerbosePreOrderIter { + /// A stack of elements to be yielded. As items are yielded, their right + /// children are put onto the stack followed by their left, so that the + /// appropriate one will be yielded on the next iteration. + stack: Vec>, + /// The index of the next item to be yielded. + /// + /// Note that unlike the [`PostOrderIter`], this value is not monotonic + /// and not equivalent to just using `enumerate` on the iterator, because + /// elements may be yielded multiple times. + index: usize, +} + +impl Iterator for VerbosePreOrderIter { + type Item = PreOrderIterItem; + + fn next(&mut self) -> Option { + // This algorithm is still simpler than the post-order one, because while + // we care about node indices, we don't care about their childrens' indices. + let mut top = self.stack.pop()?; + + // If this is the first time we're be yielding this element, set its index. + if top.n_children_yielded == 0 { + top.index = self.index; + self.index += 1; + } + // Push the next child. + let n_children = top.node.n_children(); + if top.n_children_yielded < n_children { + self.stack.push(top.clone().increment(n_children)); + let child = top.node.nth_child(top.n_children_yielded).unwrap(); + self.stack + .push(PreOrderIterItem::initial(child, Some(top.node.clone()))); + } + + // Then yield the element. + Some(top) + } +} + +/// A set of data yielded by a [`VerbosePreOrderIter`]. +#[derive(Clone, Debug)] +pub struct PreOrderIterItem { + /// The actual element being yielded. + pub node: T, + /// The parent of this node. `None` for the initial node, but will be + /// populated for all other nodes. + pub parent: Option, + /// The index when the element was first yielded. + pub index: usize, + /// How many of this item's children have been yielded. + /// + /// This can also be interpreted as a count of how many times this + /// item has been yielded before. + pub n_children_yielded: usize, + /// Whether this item is done (will not be yielded again). + pub is_complete: bool, +} + +impl PreOrderIterItem { + /// Creates a `PreOrderIterItem` which yields a given element for the first time. + /// + /// Marks the index as 0. The index must be manually set before yielding. + fn initial(node: T, parent: Option) -> Self { + PreOrderIterItem { + is_complete: node.n_children() == 0, + node, + parent, + index: 0, + n_children_yielded: 0, + } + } + + /// Creates a `PreOrderIterItem` which yields a given element again. + fn increment(self, n_children: usize) -> Self { + PreOrderIterItem { + node: self.node, + index: self.index, + parent: self.parent, + n_children_yielded: self.n_children_yielded + 1, + is_complete: self.n_children_yielded + 1 == n_children, + } + } +} diff --git a/src/lib.rs b/src/lib.rs index 1c1641b2b..3ab09e6d6 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -74,7 +74,8 @@ //! #![cfg_attr(all(not(feature = "std"), not(test)), no_std)] -#![cfg_attr(all(test, feature = "unstable"), feature(test))] +// Experimental features we need. +#![cfg_attr(bench, feature(test))] // Coding conventions #![deny(unsafe_code)] #![deny(non_upper_case_globals)] @@ -107,7 +108,8 @@ extern crate core; #[cfg(feature = "serde")] pub use actual_serde as serde; -#[cfg(all(test, feature = "unstable"))] + +#[cfg(bench)] extern crate test; #[macro_use] @@ -122,6 +124,7 @@ pub use pub_macros::*; pub mod descriptor; pub mod expression; pub mod interpreter; +pub mod iter; pub mod miniscript; pub mod plan; pub mod policy; diff --git a/src/miniscript/analyzable.rs b/src/miniscript/analyzable.rs index 8b1929a65..4c30e7df9 100644 --- a/src/miniscript/analyzable.rs +++ b/src/miniscript/analyzable.rs @@ -1,4 +1,3 @@ -// Written in 2018 by Andrew Poelstra // SPDX-License-Identifier: CC0-1.0 //! Miniscript Analysis @@ -221,10 +220,7 @@ impl Miniscript { /// Whether the given miniscript contains a raw pkh fragment pub fn contains_raw_pkh(&self) -> bool { - self.iter().any(|ms| match ms.node { - Terminal::RawPkH(_) => true, - _ => false, - }) + self.iter().any(|ms| matches!(ms.node, Terminal::RawPkH(_))) } /// Check whether the underlying Miniscript is safe under the current context diff --git a/src/miniscript/astelem.rs b/src/miniscript/astelem.rs index 62dddf633..c7d403679 100644 --- a/src/miniscript/astelem.rs +++ b/src/miniscript/astelem.rs @@ -1,4 +1,3 @@ -// Written in 2019 by Andrew Poelstra // SPDX-License-Identifier: CC0-1.0 //! AST Elements @@ -21,8 +20,7 @@ use crate::miniscript::ScriptContext; use crate::prelude::*; use crate::util::MsKeyBuilder; use crate::{ - errstr, expression, script_num_size, AbsLockTime, Error, ForEachKey, Miniscript, MiniscriptKey, - Terminal, ToPublicKey, TranslateErr, TranslatePk, Translator, + errstr, expression, AbsLockTime, Error, Miniscript, MiniscriptKey, Terminal, ToPublicKey, }; impl Terminal { @@ -46,216 +44,6 @@ impl Terminal { } } -impl TranslatePk for Terminal -where - Pk: MiniscriptKey, - Q: MiniscriptKey, - Ctx: ScriptContext, -{ - type Output = Terminal; - - /// Converts an AST element with one public key type to one of another public key type. - fn translate_pk(&self, translate: &mut T) -> Result> - where - T: Translator, - { - self.real_translate_pk(translate) - } -} - -impl Terminal { - pub(super) fn real_for_each_key<'a, F: FnMut(&'a Pk) -> bool>(&'a self, pred: &mut F) -> bool { - match *self { - Terminal::PkK(ref p) => pred(p), - Terminal::PkH(ref p) => pred(p), - Terminal::RawPkH(..) - | Terminal::After(..) - | Terminal::Older(..) - | Terminal::Sha256(..) - | Terminal::Hash256(..) - | Terminal::Ripemd160(..) - | Terminal::Hash160(..) - | Terminal::True - | Terminal::False => true, - Terminal::Alt(ref sub) - | Terminal::Swap(ref sub) - | Terminal::Check(ref sub) - | Terminal::DupIf(ref sub) - | Terminal::Verify(ref sub) - | Terminal::NonZero(ref sub) - | Terminal::ZeroNotEqual(ref sub) => sub.real_for_each_key(pred), - Terminal::AndV(ref left, ref right) - | Terminal::AndB(ref left, ref right) - | Terminal::OrB(ref left, ref right) - | Terminal::OrD(ref left, ref right) - | Terminal::OrC(ref left, ref right) - | Terminal::OrI(ref left, ref right) => { - left.real_for_each_key(&mut *pred) && right.real_for_each_key(pred) - } - Terminal::AndOr(ref a, ref b, ref c) => { - a.real_for_each_key(&mut *pred) - && b.real_for_each_key(&mut *pred) - && c.real_for_each_key(pred) - } - Terminal::Thresh(_, ref subs) => subs.iter().all(|sub| sub.real_for_each_key(pred)), - Terminal::Multi(_, ref keys) | Terminal::MultiA(_, ref keys) => keys.iter().all(pred), - } - } - - pub(super) fn real_translate_pk( - &self, - t: &mut T, - ) -> Result, TranslateErr> - where - Q: MiniscriptKey, - CtxQ: ScriptContext, - T: Translator, - { - let frag: Terminal = match *self { - Terminal::PkK(ref p) => Terminal::PkK(t.pk(p)?), - Terminal::PkH(ref p) => Terminal::PkH(t.pk(p)?), - Terminal::RawPkH(ref p) => Terminal::RawPkH(*p), - Terminal::After(n) => Terminal::After(n), - Terminal::Older(n) => Terminal::Older(n), - Terminal::Sha256(ref x) => Terminal::Sha256(t.sha256(x)?), - Terminal::Hash256(ref x) => Terminal::Hash256(t.hash256(x)?), - Terminal::Ripemd160(ref x) => Terminal::Ripemd160(t.ripemd160(x)?), - Terminal::Hash160(ref x) => Terminal::Hash160(t.hash160(x)?), - Terminal::True => Terminal::True, - Terminal::False => Terminal::False, - Terminal::Alt(ref sub) => Terminal::Alt(Arc::new(sub.real_translate_pk(t)?)), - Terminal::Swap(ref sub) => Terminal::Swap(Arc::new(sub.real_translate_pk(t)?)), - Terminal::Check(ref sub) => Terminal::Check(Arc::new(sub.real_translate_pk(t)?)), - Terminal::DupIf(ref sub) => Terminal::DupIf(Arc::new(sub.real_translate_pk(t)?)), - Terminal::Verify(ref sub) => Terminal::Verify(Arc::new(sub.real_translate_pk(t)?)), - Terminal::NonZero(ref sub) => Terminal::NonZero(Arc::new(sub.real_translate_pk(t)?)), - Terminal::ZeroNotEqual(ref sub) => { - Terminal::ZeroNotEqual(Arc::new(sub.real_translate_pk(t)?)) - } - Terminal::AndV(ref left, ref right) => Terminal::AndV( - Arc::new(left.real_translate_pk(t)?), - Arc::new(right.real_translate_pk(t)?), - ), - Terminal::AndB(ref left, ref right) => Terminal::AndB( - Arc::new(left.real_translate_pk(t)?), - Arc::new(right.real_translate_pk(t)?), - ), - Terminal::AndOr(ref a, ref b, ref c) => Terminal::AndOr( - Arc::new(a.real_translate_pk(t)?), - Arc::new(b.real_translate_pk(t)?), - Arc::new(c.real_translate_pk(t)?), - ), - Terminal::OrB(ref left, ref right) => Terminal::OrB( - Arc::new(left.real_translate_pk(t)?), - Arc::new(right.real_translate_pk(t)?), - ), - Terminal::OrD(ref left, ref right) => Terminal::OrD( - Arc::new(left.real_translate_pk(t)?), - Arc::new(right.real_translate_pk(t)?), - ), - Terminal::OrC(ref left, ref right) => Terminal::OrC( - Arc::new(left.real_translate_pk(t)?), - Arc::new(right.real_translate_pk(t)?), - ), - Terminal::OrI(ref left, ref right) => Terminal::OrI( - Arc::new(left.real_translate_pk(t)?), - Arc::new(right.real_translate_pk(t)?), - ), - Terminal::Thresh(k, ref subs) => { - let subs: Result>>, _> = subs - .iter() - .map(|s| s.real_translate_pk(t).map(Arc::new)) - .collect(); - Terminal::Thresh(k, subs?) - } - Terminal::Multi(k, ref keys) => { - let keys: Result, _> = keys.iter().map(|k| t.pk(k)).collect(); - Terminal::Multi(k, keys?) - } - Terminal::MultiA(k, ref keys) => { - let keys: Result, _> = keys.iter().map(|k| t.pk(k)).collect(); - Terminal::MultiA(k, keys?) - } - }; - Ok(frag) - } - - /// Substitutes raw public keys hashes with the public keys as provided by map. - pub fn substitute_raw_pkh(&self, pk_map: &BTreeMap) -> Terminal { - match self { - Terminal::RawPkH(ref p) => match pk_map.get(p) { - Some(pk) => Terminal::PkH(pk.clone()).into(), - None => Terminal::RawPkH(*p).into(), - }, - Terminal::PkK(..) - | Terminal::PkH(..) - | Terminal::Multi(..) - | Terminal::MultiA(..) - | Terminal::After(..) - | Terminal::Older(..) - | Terminal::Sha256(..) - | Terminal::Hash256(..) - | Terminal::Ripemd160(..) - | Terminal::Hash160(..) - | Terminal::True - | Terminal::False => self.clone().into(), - Terminal::Alt(ref sub) => Terminal::Alt(Arc::new(sub.substitute_raw_pkh(pk_map))), - Terminal::Swap(ref sub) => Terminal::Swap(Arc::new(sub.substitute_raw_pkh(pk_map))), - Terminal::Check(ref sub) => Terminal::Check(Arc::new(sub.substitute_raw_pkh(pk_map))), - Terminal::DupIf(ref sub) => Terminal::DupIf(Arc::new(sub.substitute_raw_pkh(pk_map))), - Terminal::Verify(ref sub) => Terminal::Verify(Arc::new(sub.substitute_raw_pkh(pk_map))), - Terminal::NonZero(ref sub) => { - Terminal::NonZero(Arc::new(sub.substitute_raw_pkh(pk_map))) - } - Terminal::ZeroNotEqual(ref sub) => { - Terminal::ZeroNotEqual(Arc::new(sub.substitute_raw_pkh(pk_map))) - } - Terminal::AndV(ref left, ref right) => Terminal::AndV( - Arc::new(left.substitute_raw_pkh(pk_map)), - Arc::new(right.substitute_raw_pkh(pk_map)), - ), - Terminal::AndB(ref left, ref right) => Terminal::AndB( - Arc::new(left.substitute_raw_pkh(pk_map)), - Arc::new(right.substitute_raw_pkh(pk_map)), - ), - Terminal::AndOr(ref a, ref b, ref c) => Terminal::AndOr( - Arc::new(a.substitute_raw_pkh(pk_map)), - Arc::new(b.substitute_raw_pkh(pk_map)), - Arc::new(c.substitute_raw_pkh(pk_map)), - ), - Terminal::OrB(ref left, ref right) => Terminal::OrB( - Arc::new(left.substitute_raw_pkh(pk_map)), - Arc::new(right.substitute_raw_pkh(pk_map)), - ), - Terminal::OrD(ref left, ref right) => Terminal::OrD( - Arc::new(left.substitute_raw_pkh(pk_map)), - Arc::new(right.substitute_raw_pkh(pk_map)), - ), - Terminal::OrC(ref left, ref right) => Terminal::OrC( - Arc::new(left.substitute_raw_pkh(pk_map)), - Arc::new(right.substitute_raw_pkh(pk_map)), - ), - Terminal::OrI(ref left, ref right) => Terminal::OrI( - Arc::new(left.substitute_raw_pkh(pk_map)), - Arc::new(right.substitute_raw_pkh(pk_map)), - ), - Terminal::Thresh(k, ref subs) => { - let subs: Vec>> = subs - .iter() - .map(|s| Arc::new(s.substitute_raw_pkh(pk_map))) - .collect(); - Terminal::Thresh(*k, subs) - } - } - } -} - -impl ForEachKey for Terminal { - fn for_each_key<'a, F: FnMut(&'a Pk) -> bool>(&'a self, mut pred: F) -> bool { - self.real_for_each_key(&mut pred) - } -} - impl fmt::Debug for Terminal { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.write_str("[")?; @@ -676,7 +464,7 @@ impl Terminal { Terminal::RawPkH(ref hash) => builder .push_opcode(opcodes::all::OP_DUP) .push_opcode(opcodes::all::OP_HASH160) - .push_slice(&hash.to_byte_array()) + .push_slice(hash.to_byte_array()) .push_opcode(opcodes::all::OP_EQUALVERIFY), Terminal::After(t) => builder .push_int(absolute::LockTime::from(t).to_consensus_u32() as i64) @@ -804,64 +592,4 @@ impl Terminal { } } } - - /// Size, in bytes of the script-pubkey. If this Miniscript is used outside - /// of segwit (e.g. in a bare or P2SH descriptor), this quantity should be - /// multiplied by 4 to compute the weight. - /// - /// In general, it is not recommended to use this function directly, but - /// to instead call the corresponding function on a `Descriptor`, which - /// will handle the segwit/non-segwit technicalities for you. - pub fn script_size(&self) -> usize { - match *self { - Terminal::PkK(ref pk) => Ctx::pk_len(pk), - Terminal::PkH(..) | Terminal::RawPkH(..) => 24, - Terminal::After(n) => script_num_size(n.to_consensus_u32() as usize) + 1, - Terminal::Older(n) => script_num_size(n.to_consensus_u32() as usize) + 1, - Terminal::Sha256(..) => 33 + 6, - Terminal::Hash256(..) => 33 + 6, - Terminal::Ripemd160(..) => 21 + 6, - Terminal::Hash160(..) => 21 + 6, - Terminal::True => 1, - Terminal::False => 1, - Terminal::Alt(ref sub) => sub.node.script_size() + 2, - Terminal::Swap(ref sub) => sub.node.script_size() + 1, - Terminal::Check(ref sub) => sub.node.script_size() + 1, - Terminal::DupIf(ref sub) => sub.node.script_size() + 3, - Terminal::Verify(ref sub) => { - sub.node.script_size() + usize::from(!sub.ext.has_free_verify) - } - Terminal::NonZero(ref sub) => sub.node.script_size() + 4, - Terminal::ZeroNotEqual(ref sub) => sub.node.script_size() + 1, - Terminal::AndV(ref l, ref r) => l.node.script_size() + r.node.script_size(), - Terminal::AndB(ref l, ref r) => l.node.script_size() + r.node.script_size() + 1, - Terminal::AndOr(ref a, ref b, ref c) => { - a.node.script_size() + b.node.script_size() + c.node.script_size() + 3 - } - Terminal::OrB(ref l, ref r) => l.node.script_size() + r.node.script_size() + 1, - Terminal::OrD(ref l, ref r) => l.node.script_size() + r.node.script_size() + 3, - Terminal::OrC(ref l, ref r) => l.node.script_size() + r.node.script_size() + 2, - Terminal::OrI(ref l, ref r) => l.node.script_size() + r.node.script_size() + 3, - Terminal::Thresh(k, ref subs) => { - assert!(!subs.is_empty(), "threshold must be nonempty"); - script_num_size(k) // k - + 1 // EQUAL - + subs.iter().map(|s| s.node.script_size()).sum::() - + subs.len() // ADD - - 1 // no ADD on first element - } - Terminal::Multi(k, ref pks) => { - script_num_size(k) - + 1 - + script_num_size(pks.len()) - + pks.iter().map(|pk| Ctx::pk_len(pk)).sum::() - } - Terminal::MultiA(k, ref pks) => { - script_num_size(k) - + 1 // NUMEQUAL - + pks.iter().map(|pk| Ctx::pk_len(pk)).sum::() // n keys - + pks.len() // n times CHECKSIGADD - } - } - } } diff --git a/src/miniscript/context.rs b/src/miniscript/context.rs index 3d820de96..f3563b83c 100644 --- a/src/miniscript/context.rs +++ b/src/miniscript/context.rs @@ -430,9 +430,7 @@ impl ScriptContext for Legacy { } Ok(()) } - Terminal::MultiA(..) => { - return Err(ScriptContextError::MultiANotAllowed); - } + Terminal::MultiA(..) => Err(ScriptContextError::MultiANotAllowed), _ => Ok(()), } } diff --git a/src/miniscript/decode.rs b/src/miniscript/decode.rs index dbf4adcfe..d5fa907a6 100644 --- a/src/miniscript/decode.rs +++ b/src/miniscript/decode.rs @@ -1,4 +1,3 @@ -// Written in 2019 by Andrew Poelstra // SPDX-License-Identifier: CC0-1.0 //! Script Decoder @@ -651,13 +650,12 @@ pub fn parse( } fn is_and_v(tokens: &mut TokenIter) -> bool { - match tokens.peek() { - None - | Some(&Tk::If) - | Some(&Tk::NotIf) - | Some(&Tk::Else) - | Some(&Tk::ToAltStack) - | Some(&Tk::Swap) => false, - _ => true, - } + !matches!( + tokens.peek(), + None | Some(&Tk::If) + | Some(&Tk::NotIf) + | Some(&Tk::Else) + | Some(&Tk::ToAltStack) + | Some(&Tk::Swap) + ) } diff --git a/src/miniscript/lex.rs b/src/miniscript/lex.rs index bb06811f9..e00eb42e7 100644 --- a/src/miniscript/lex.rs +++ b/src/miniscript/lex.rs @@ -1,4 +1,3 @@ -// Written in 2018 by Andrew Poelstra // SPDX-License-Identifier: CC0-1.0 //! Lexer diff --git a/src/miniscript/mod.rs b/src/miniscript/mod.rs index c0f60d048..3a4226939 100644 --- a/src/miniscript/mod.rs +++ b/src/miniscript/mod.rs @@ -1,4 +1,3 @@ -// Written in 2019 by Andrew Poelstra // SPDX-License-Identifier: CC0-1.0 //! # Abstract Syntax Tree @@ -22,8 +21,9 @@ use bitcoin::taproot::{LeafVersion, TapLeafHash}; use self::analyzable::ExtParams; pub use self::context::{BareCtx, Legacy, Segwitv0, Tap}; +use crate::iter::TreeLike; use crate::prelude::*; -use crate::TranslateErr; +use crate::{script_num_size, TranslateErr}; pub mod analyzable; pub mod astelem; @@ -51,63 +51,19 @@ use crate::{ #[cfg(test)] mod ms_tests; -/// Top-level script AST type +/// The top-level miniscript abstract syntax tree (AST). #[derive(Clone)] pub struct Miniscript { - ///A node in the Abstract Syntax Tree( + /// A node in the AST. pub node: Terminal, - ///The correctness and malleability type information for the AST node + /// The correctness and malleability type information for the AST node. pub ty: types::Type, - ///Additional information helpful for extra analysis. + /// Additional information helpful for extra analysis. pub ext: types::extra_props::ExtData, /// Context PhantomData. Only accessible inside this crate phantom: PhantomData, } -/// `PartialOrd` of `Miniscript` must depend only on node and not the type information. -/// The type information and extra_properties can be deterministically determined -/// by the ast. -impl PartialOrd for Miniscript { - fn partial_cmp(&self, other: &Miniscript) -> Option { - Some(self.node.cmp(&other.node)) - } -} - -/// `Ord` of `Miniscript` must depend only on node and not the type information. -/// The type information and extra_properties can be deterministically determined -/// by the ast. -impl Ord for Miniscript { - fn cmp(&self, other: &Miniscript) -> cmp::Ordering { - self.node.cmp(&other.node) - } -} - -/// `PartialEq` of `Miniscript` must depend only on node and not the type information. -/// The type information and extra_properties can be deterministically determined -/// by the ast. -impl PartialEq for Miniscript { - fn eq(&self, other: &Miniscript) -> bool { - self.node.eq(&other.node) - } -} - -/// `Eq` of `Miniscript` must depend only on node and not the type information. -/// The type information and extra_properties can be deterministically determined -/// by the ast. -impl Eq for Miniscript {} - -impl fmt::Debug for Miniscript { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{:?}", self.node) - } -} - -impl hash::Hash for Miniscript { - fn hash(&self, state: &mut H) { - self.node.hash(state); - } -} - impl Miniscript { /// Add type information(Type and Extdata) to Miniscript based on /// `AstElem` fragment. Dependent on display and clone because of Error @@ -140,15 +96,7 @@ impl Miniscript { phantom: PhantomData, } } -} -impl fmt::Display for Miniscript { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", self.node) - } -} - -impl Miniscript { /// Extracts the `AstElem` representing the root of the miniscript pub fn into_inner(self) -> Terminal { self.node @@ -158,6 +106,141 @@ impl Miniscript { pub fn as_inner(&self) -> &Terminal { &self.node } + + /// Encode as a Bitcoin script + pub fn encode(&self) -> script::ScriptBuf + where + Pk: ToPublicKey, + { + self.node.encode(script::Builder::new()).into_script() + } + + /// Size, in bytes of the script-pubkey. If this Miniscript is used outside + /// of segwit (e.g. in a bare or P2SH descriptor), this quantity should be + /// multiplied by 4 to compute the weight. + /// + /// In general, it is not recommended to use this function directly, but + /// to instead call the corresponding function on a `Descriptor`, which + /// will handle the segwit/non-segwit technicalities for you. + pub fn script_size(&self) -> usize { + use Terminal::*; + + let mut len = 0; + for ms in self.pre_order_iter() { + len += match ms.node { + AndV(..) => 0, + True | False | Swap(..) | Check(..) | ZeroNotEqual(..) | AndB(..) | OrB(..) => 1, + Alt(..) | OrC(..) => 2, + DupIf(..) | AndOr(..) | OrD(..) | OrI(..) => 3, + NonZero(..) => 4, + PkH(..) | RawPkH(..) => 24, + Ripemd160(..) | Hash160(..) => 21 + 6, + Sha256(..) | Hash256(..) => 33 + 6, + + Terminal::PkK(ref pk) => Ctx::pk_len(pk), + Terminal::After(n) => script_num_size(n.to_consensus_u32() as usize) + 1, + Terminal::Older(n) => script_num_size(n.to_consensus_u32() as usize) + 1, + Terminal::Verify(ref sub) => usize::from(!sub.ext.has_free_verify), + Terminal::Thresh(k, ref subs) => { + assert!(!subs.is_empty(), "threshold must be nonempty"); + script_num_size(k) // k + + 1 // EQUAL + + subs.len() // ADD + - 1 // no ADD on first element + } + Terminal::Multi(k, ref pks) => { + script_num_size(k) + + 1 + + script_num_size(pks.len()) + + pks.iter().map(|pk| Ctx::pk_len(pk)).sum::() + } + Terminal::MultiA(k, ref pks) => { + script_num_size(k) + + 1 // NUMEQUAL + + pks.iter().map(|pk| Ctx::pk_len(pk)).sum::() // n keys + + pks.len() // n times CHECKSIGADD + } + } + } + len + } + + /// Maximum number of witness elements used to satisfy the Miniscript + /// fragment, including the witness script itself. Used to estimate + /// the weight of the `VarInt` that specifies this number in a serialized + /// transaction. + /// + /// This function may returns Error when the Miniscript is + /// impossible to satisfy + pub fn max_satisfaction_witness_elements(&self) -> Result { + self.ext + .stack_elem_count_sat + .map(|x| x + 1) + .ok_or(Error::ImpossibleSatisfaction) + } + + /// Maximum size, in bytes, of a satisfying witness. For Segwit outputs + /// `one_cost` should be set to 2, since the number `1` requires two + /// bytes to encode. For non-segwit outputs `one_cost` should be set to + /// 1, since `OP_1` is available in scriptSigs. + /// + /// In general, it is not recommended to use this function directly, but + /// to instead call the corresponding function on a `Descriptor`, which + /// will handle the segwit/non-segwit technicalities for you. + /// + /// All signatures are assumed to be 73 bytes in size, including the + /// length prefix (segwit) or push opcode (pre-segwit) and sighash + /// postfix. + pub fn max_satisfaction_size(&self) -> Result { + Ctx::max_satisfaction_size(self).ok_or(Error::ImpossibleSatisfaction) + } + + /// Attempt to produce non-malleable satisfying witness for the + /// witness script represented by the parse tree + pub fn satisfy>(&self, satisfier: S) -> Result>, Error> + where + Pk: ToPublicKey, + { + // Only satisfactions for default versions (0xc0) are allowed. + let leaf_hash = TapLeafHash::from_script(&self.encode(), LeafVersion::TapScript); + let satisfaction = + satisfy::Satisfaction::satisfy(&self.node, &satisfier, self.ty.mall.safe, &leaf_hash); + self._satisfy(satisfaction) + } + + /// Attempt to produce a malleable satisfying witness for the + /// witness script represented by the parse tree + pub fn satisfy_malleable>( + &self, + satisfier: S, + ) -> Result>, Error> + where + Pk: ToPublicKey, + { + let leaf_hash = TapLeafHash::from_script(&self.encode(), LeafVersion::TapScript); + let satisfaction = satisfy::Satisfaction::satisfy_mall( + &self.node, + &satisfier, + self.ty.mall.safe, + &leaf_hash, + ); + self._satisfy(satisfaction) + } + + fn _satisfy(&self, satisfaction: satisfy::Satisfaction) -> Result>, Error> + where + Pk: ToPublicKey, + { + match satisfaction.stack { + satisfy::Witness::Stack(stack) => { + Ctx::check_witness::(&stack)?; + Ok(stack) + } + satisfy::Witness::Unavailable | satisfy::Witness::Impossible => { + Err(Error::CouldNotSatisfy) + } + } + } } impl Miniscript { @@ -239,66 +322,82 @@ impl Miniscript { } } -impl Miniscript -where - Pk: MiniscriptKey, - Ctx: ScriptContext, -{ - /// Encode as a Bitcoin script - pub fn encode(&self) -> script::ScriptBuf - where - Pk: ToPublicKey, - { - self.node.encode(script::Builder::new()).into_script() +/// `PartialOrd` of `Miniscript` must depend only on node and not the type information. +/// +/// The type information and extra properties are implied by the AST. +impl PartialOrd for Miniscript { + fn partial_cmp(&self, other: &Miniscript) -> Option { + Some(self.node.cmp(&other.node)) } +} - /// Size, in bytes of the script-pubkey. If this Miniscript is used outside - /// of segwit (e.g. in a bare or P2SH descriptor), this quantity should be - /// multiplied by 4 to compute the weight. - /// - /// In general, it is not recommended to use this function directly, but - /// to instead call the corresponding function on a `Descriptor`, which - /// will handle the segwit/non-segwit technicalities for you. - pub fn script_size(&self) -> usize { - self.node.script_size() +/// `Ord` of `Miniscript` must depend only on node and not the type information. +/// +/// The type information and extra properties are implied by the AST. +impl Ord for Miniscript { + fn cmp(&self, other: &Miniscript) -> cmp::Ordering { + self.node.cmp(&other.node) } } -impl Miniscript { - /// Maximum number of witness elements used to satisfy the Miniscript - /// fragment, including the witness script itself. Used to estimate - /// the weight of the `VarInt` that specifies this number in a serialized - /// transaction. - /// - /// This function may returns Error when the Miniscript is - /// impossible to satisfy - pub fn max_satisfaction_witness_elements(&self) -> Result { - self.ext - .stack_elem_count_sat - .map(|x| x + 1) - .ok_or(Error::ImpossibleSatisfaction) +/// `PartialEq` of `Miniscript` must depend only on node and not the type information. +/// +/// The type information and extra properties are implied by the AST. +impl PartialEq for Miniscript { + fn eq(&self, other: &Miniscript) -> bool { + self.node.eq(&other.node) } +} - /// Maximum size, in bytes, of a satisfying witness. For Segwit outputs - /// `one_cost` should be set to 2, since the number `1` requires two - /// bytes to encode. For non-segwit outputs `one_cost` should be set to - /// 1, since `OP_1` is available in scriptSigs. - /// - /// In general, it is not recommended to use this function directly, but - /// to instead call the corresponding function on a `Descriptor`, which - /// will handle the segwit/non-segwit technicalities for you. - /// - /// All signatures are assumed to be 73 bytes in size, including the - /// length prefix (segwit) or push opcode (pre-segwit) and sighash - /// postfix. - pub fn max_satisfaction_size(&self) -> Result { - Ctx::max_satisfaction_size(self).ok_or(Error::ImpossibleSatisfaction) +/// `Eq` of `Miniscript` must depend only on node and not the type information. +/// +/// The type information and extra properties are implied by the AST. +impl Eq for Miniscript {} + +/// `Hash` of `Miniscript` must depend only on node and not the type information. +/// +/// The type information and extra properties are implied by the AST. +impl hash::Hash for Miniscript { + fn hash(&self, state: &mut H) { + self.node.hash(state); + } +} + +impl fmt::Debug for Miniscript { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{:?}", self.node) + } +} + +impl fmt::Display for Miniscript { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.node) } } impl ForEachKey for Miniscript { fn for_each_key<'a, F: FnMut(&'a Pk) -> bool>(&'a self, mut pred: F) -> bool { - self.real_for_each_key(&mut pred) + for ms in self.pre_order_iter() { + match ms.node { + Terminal::PkK(ref p) => { + if !pred(p) { + return false; + } + } + Terminal::PkH(ref p) => { + if !pred(p) { + return false; + } + } + Terminal::Multi(_, ref keys) | Terminal::MultiA(_, ref keys) => { + if !keys.iter().all(&mut pred) { + return false; + } + } + _ => {} + } + } + true } } @@ -312,20 +411,16 @@ where /// Translates a struct from one generic to another where the translation /// for Pk is provided by [`Translator`] - fn translate_pk(&self, translate: &mut T) -> Result> + fn translate_pk(&self, t: &mut T) -> Result> where T: Translator, { - self.real_translate_pk(translate) + self.translate_pk_ctx(t) } } impl Miniscript { - fn real_for_each_key<'a, F: FnMut(&'a Pk) -> bool>(&'a self, pred: &mut F) -> bool { - self.node.real_for_each_key(pred) - } - - pub(super) fn real_translate_pk( + pub(super) fn translate_pk_ctx( &self, t: &mut T, ) -> Result, TranslateErr> @@ -334,13 +429,74 @@ impl Miniscript { CtxQ: ScriptContext, T: Translator, { - let inner = self.node.real_translate_pk(t)?; - Miniscript::from_ast(inner).map_err(TranslateErr::OuterError) + let mut translated = vec![]; + for data in Arc::new(self.clone()).post_order_iter() { + // convenience method to reduce typing + let child_n = |n| Arc::clone(&translated[data.child_indices[n]]); + + let new_term = match data.node.node { + Terminal::PkK(ref p) => Terminal::PkK(t.pk(p)?), + Terminal::PkH(ref p) => Terminal::PkH(t.pk(p)?), + Terminal::RawPkH(ref p) => Terminal::RawPkH(*p), + Terminal::After(n) => Terminal::After(n), + Terminal::Older(n) => Terminal::Older(n), + Terminal::Sha256(ref x) => Terminal::Sha256(t.sha256(x)?), + Terminal::Hash256(ref x) => Terminal::Hash256(t.hash256(x)?), + Terminal::Ripemd160(ref x) => Terminal::Ripemd160(t.ripemd160(x)?), + Terminal::Hash160(ref x) => Terminal::Hash160(t.hash160(x)?), + Terminal::True => Terminal::True, + Terminal::False => Terminal::False, + Terminal::Alt(..) => Terminal::Alt(child_n(0)), + Terminal::Swap(..) => Terminal::Swap(child_n(0)), + Terminal::Check(..) => Terminal::Check(child_n(0)), + Terminal::DupIf(..) => Terminal::DupIf(child_n(0)), + Terminal::Verify(..) => Terminal::Verify(child_n(0)), + Terminal::NonZero(..) => Terminal::NonZero(child_n(0)), + Terminal::ZeroNotEqual(..) => Terminal::ZeroNotEqual(child_n(0)), + Terminal::AndV(..) => Terminal::AndV(child_n(0), child_n(1)), + Terminal::AndB(..) => Terminal::AndB(child_n(0), child_n(1)), + Terminal::AndOr(..) => Terminal::AndOr(child_n(0), child_n(1), child_n(2)), + Terminal::OrB(..) => Terminal::OrB(child_n(0), child_n(1)), + Terminal::OrD(..) => Terminal::OrD(child_n(0), child_n(1)), + Terminal::OrC(..) => Terminal::OrC(child_n(0), child_n(1)), + Terminal::OrI(..) => Terminal::OrI(child_n(0), child_n(1)), + Terminal::Thresh(k, ref subs) => { + Terminal::Thresh(k, (0..subs.len()).map(child_n).collect()) + } + Terminal::Multi(k, ref keys) => { + let keys: Result, _> = keys.iter().map(|k| t.pk(k)).collect(); + Terminal::Multi(k, keys?) + } + Terminal::MultiA(k, ref keys) => { + let keys: Result, _> = keys.iter().map(|k| t.pk(k)).collect(); + Terminal::MultiA(k, keys?) + } + }; + let new_ms = Miniscript::from_ast(new_term).map_err(TranslateErr::OuterError)?; + translated.push(Arc::new(new_ms)); + } + + Ok(Arc::try_unwrap(translated.pop().unwrap()).unwrap()) } /// Substitutes raw public keys hashes with the public keys as provided by map. pub fn substitute_raw_pkh(&self, pk_map: &BTreeMap) -> Miniscript { - Miniscript::from_ast(self.node.substitute_raw_pkh(pk_map)).expect("type check failed") + let mut translated = vec![]; + for data in Arc::new(self.clone()).post_order_iter() { + let new_term = if let Terminal::RawPkH(ref p) = data.node.node { + match pk_map.get(p) { + Some(pk) => Terminal::PkH(pk.clone()), + None => Terminal::RawPkH(*p), + } + } else { + data.node.node.clone() + }; + + let new_ms = Miniscript::from_ast(new_term).expect("typeck"); + translated.push(Arc::new(new_ms)); + } + + Arc::try_unwrap(translated.pop().unwrap()).unwrap() } } diff --git a/src/miniscript/satisfy.rs b/src/miniscript/satisfy.rs index 88fcbc92d..61b518265 100644 --- a/src/miniscript/satisfy.rs +++ b/src/miniscript/satisfy.rs @@ -1,4 +1,3 @@ -// Written in 2018 by Andrew Poelstra // SPDX-License-Identifier: CC0-1.0 //! # Satisfaction and Dissatisfaction diff --git a/src/miniscript/types/correctness.rs b/src/miniscript/types/correctness.rs index 0569ad366..a4c3b2fa9 100644 --- a/src/miniscript/types/correctness.rs +++ b/src/miniscript/types/correctness.rs @@ -1,4 +1,3 @@ -// Written in 2019 by Andrew Poelstra // SPDX-License-Identifier: CC0-1.0 //! Correctness/Soundness type properties diff --git a/src/miniscript/types/malleability.rs b/src/miniscript/types/malleability.rs index 4b12d7a85..d1b8e4dba 100644 --- a/src/miniscript/types/malleability.rs +++ b/src/miniscript/types/malleability.rs @@ -1,4 +1,3 @@ -// Written in 2019 by Andrew Poelstra // SPDX-License-Identifier: CC0-1.0 //! Malleability-related Type properties diff --git a/src/miniscript/types/mod.rs b/src/miniscript/types/mod.rs index d5c4ab0f7..35a29605e 100644 --- a/src/miniscript/types/mod.rs +++ b/src/miniscript/types/mod.rs @@ -1,4 +1,3 @@ -// Written in 2019 by Andrew Poelstra // SPDX-License-Identifier: CC0-1.0 //! Miniscript Types diff --git a/src/policy/compiler.rs b/src/policy/compiler.rs index 9b29ee6ba..c3c13ec62 100644 --- a/src/policy/compiler.rs +++ b/src/policy/compiler.rs @@ -1,4 +1,3 @@ -// Written in 2019 by Andrew Poelstra // SPDX-License-Identifier: CC0-1.0 //! # Policy Compiler @@ -24,11 +23,18 @@ use crate::{policy, Miniscript, MiniscriptKey, Terminal}; type PolicyCache = BTreeMap<(Concrete, OrdF64, Option), BTreeMap>>; -///Ordered f64 for comparison -#[derive(Copy, Clone, PartialEq, PartialOrd, Debug)] +/// Ordered f64 for comparison. +#[derive(Copy, Clone, PartialEq, Debug)] pub(crate) struct OrdF64(pub f64); impl Eq for OrdF64 {} +// We could derive PartialOrd, but we can't derive Ord, and clippy wants us +// to derive both or neither. Better to be explicit. +impl PartialOrd for OrdF64 { + fn partial_cmp(&self, other: &OrdF64) -> Option { + self.0.partial_cmp(&other.0) + } +} impl Ord for OrdF64 { fn cmp(&self, other: &OrdF64) -> cmp::Ordering { // will panic if given NaN @@ -36,7 +42,7 @@ impl Ord for OrdF64 { } } -/// Detailed Error type for Compiler +/// Detailed error type for compiler. #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum CompilerError { /// Compiler has non-safe input policy. @@ -525,6 +531,7 @@ impl AstElemExt { } /// Different types of casts possible for each node. +#[allow(clippy::type_complexity)] #[derive(Copy, Clone)] struct Cast { node: fn(Arc>) -> Terminal, @@ -675,7 +682,7 @@ fn insert_elem( // whose subtype is the current element and have worse cost. *map = mem::take(map) .into_iter() - .filter(|&(ref existing_key, ref existing_elem)| { + .filter(|(existing_key, existing_elem)| { let existing_elem_cost = existing_elem.cost_1d(sat_prob, dissat_prob); !(elem_key.is_subtype(*existing_key) && existing_elem_cost >= elem_cost) }) @@ -864,7 +871,7 @@ where let rw = subs[1].0 as f64 / total; //and-or - if let (&Concrete::And(ref x), _) = (&subs[0].1, &subs[1].1) { + if let (Concrete::And(x), _) = (&subs[0].1, &subs[1].1) { let mut a1 = best_compilations( policy_cache, &x[0], @@ -887,7 +894,7 @@ where compile_tern!(&mut a1, &mut b2, &mut c, [lw, rw]); compile_tern!(&mut b1, &mut a2, &mut c, [lw, rw]); }; - if let (_, &Concrete::And(ref x)) = (&subs[0].1, &subs[1].1) { + if let (_, Concrete::And(x)) = (&subs[0].1, &subs[1].1) { let mut a1 = best_compilations( policy_cache, &x[0], @@ -959,7 +966,7 @@ where let mut best_es = Vec::with_capacity(n); let mut best_ws = Vec::with_capacity(n); - let mut min_value = (0, f64::INFINITY as f64); + let mut min_value = (0, f64::INFINITY); for (i, ast) in subs.iter().enumerate() { let sp = sat_prob * k_over_n; //Expressions must be dissatisfiable @@ -1048,6 +1055,7 @@ where /// Helper function to compile different types of binary fragments. /// `sat_prob` and `dissat_prob` represent the sat and dissat probabilities of /// root or. `weights` represent the odds for taking each sub branch +#[allow(clippy::too_many_arguments)] fn compile_binary( policy_cache: &mut PolicyCache, policy: &Concrete, @@ -1082,6 +1090,7 @@ where /// Helper function to compile different order of and_or fragments. /// `sat_prob` and `dissat_prob` represent the sat and dissat probabilities of /// root and_or node. `weights` represent the odds for taking each sub branch +#[allow(clippy::too_many_arguments)] fn compile_tern( policy_cache: &mut PolicyCache, policy: &Concrete, @@ -1162,7 +1171,7 @@ where { best_compilations(policy_cache, policy, sat_prob, dissat_prob)? .into_iter() - .filter(|&(ref key, ref val)| { + .filter(|(key, val)| { key.ty.corr.base == basic_type && key.ty.corr.unit && val.ms.ty.mall.dissat == types::Dissat::Unique @@ -1588,7 +1597,7 @@ mod tests { } } -#[cfg(all(test, feature = "unstable"))] +#[cfg(bench)] mod benches { use std::str::FromStr; diff --git a/src/policy/concrete.rs b/src/policy/concrete.rs index 8a080200e..0b9045ee2 100644 --- a/src/policy/concrete.rs +++ b/src/policy/concrete.rs @@ -1,4 +1,3 @@ -// Written in 2019 by Andrew Poelstra // SPDX-License-Identifier: CC0-1.0 //! Concrete Policies @@ -35,35 +34,35 @@ use crate::{errstr, AbsLockTime, Error, ForEachKey, MiniscriptKey, Translator}; #[cfg(feature = "compiler")] const MAX_COMPILATION_LEAVES: usize = 1024; -/// Concrete policy which corresponds directly to a Miniscript structure, +/// Concrete policy which corresponds directly to a miniscript structure, /// and whose disjunctions are annotated with satisfaction probabilities -/// to assist the compiler +/// to assist the compiler. #[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] pub enum Policy { - /// Unsatisfiable + /// Unsatisfiable. Unsatisfiable, - /// Trivially satisfiable + /// Trivially satisfiable. Trivial, - /// A public key which must sign to satisfy the descriptor + /// A public key which must sign to satisfy the descriptor. Key(Pk), - /// An absolute locktime restriction + /// An absolute locktime restriction. After(AbsLockTime), - /// A relative locktime restriction + /// A relative locktime restriction. Older(Sequence), - /// A SHA256 whose preimage must be provided to satisfy the descriptor + /// A SHA256 whose preimage must be provided to satisfy the descriptor. Sha256(Pk::Sha256), - /// A SHA256d whose preimage must be provided to satisfy the descriptor + /// A SHA256d whose preimage must be provided to satisfy the descriptor. Hash256(Pk::Hash256), - /// A RIPEMD160 whose preimage must be provided to satisfy the descriptor + /// A RIPEMD160 whose preimage must be provided to satisfy the descriptor. Ripemd160(Pk::Ripemd160), - /// A HASH160 whose preimage must be provided to satisfy the descriptor + /// A HASH160 whose preimage must be provided to satisfy the descriptor. Hash160(Pk::Hash160), - /// A list of sub-policies, all of which must be satisfied + /// A list of sub-policies, all of which must be satisfied. And(Vec>), /// A list of sub-policies, one of which must be satisfied, along with - /// relative probabilities for each one + /// relative probabilities for each one. Or(Vec<(usize, Policy)>), - /// A set of descriptors, satisfactions must be provided for `k` of them + /// A set of descriptors, satisfactions must be provided for `k` of them. Threshold(usize, Vec>), } @@ -183,29 +182,28 @@ impl From> for PolicyArc { } } -/// Detailed Error type for Policies +/// Detailed error type for concrete policies. #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum PolicyError { - /// `And` fragments only support two args + /// `And` fragments only support two args. NonBinaryArgAnd, - /// `Or` fragments only support two args + /// `Or` fragments only support two args. NonBinaryArgOr, - /// `Thresh` fragment can only have `1<=k<=n` + /// `Thresh` fragment can only have `1<=k<=n`. IncorrectThresh, - /// `older` or `after` fragment can only have `n = 0` + /// `older` or `after` fragment can only have `n = 0`. ZeroTime, - /// `after` fragment can only have ` n < 2^31` + /// `after` fragment can only have `n < 2^31`. TimeTooFar, - /// Semantic Policy Error: `And` `Or` fragments must take args: k > 1 + /// Semantic Policy Error: `And` `Or` fragments must take args: `k > 1`. InsufficientArgsforAnd, - /// Semantic Policy Error: `And` `Or` fragments must take args: k > 1 + /// Semantic policy error: `And` `Or` fragments must take args: `k > 1`. InsufficientArgsforOr, - /// Entailment max terminals exceeded + /// Entailment max terminals exceeded. EntailmentMaxTerminals, - /// lifting error: Cannot lift policies that have - /// a combination of height and timelocks. + /// Cannot lift policies that have a combination of height and timelocks. HeightTimelockCombination, - /// Duplicate Public Keys + /// Duplicate Public Keys. DuplicatePubKeys, } @@ -278,8 +276,8 @@ impl error::Error for PolicyError { } impl Policy { - /// Flatten the [`Policy`] tree structure into a Vector of tuple `(leaf script, leaf probability)` - /// with leaf probabilities corresponding to odds for sub-branch in the policy. + /// Flattens the [`Policy`] tree structure into a vector of tuples `(leaf script, leaf probability)` + /// with leaf probabilities corresponding to odds for each sub-branch in the policy. /// We calculate the probability of selecting the sub-branch at every level and calculate the /// leaf probabilities as the probability of traversing through required branches to reach the /// leaf node, i.e. multiplication of the respective probabilities. @@ -298,7 +296,7 @@ impl Policy { /// /// ## Constraints /// - /// Since this splitting might lead to exponential blow-up, we constraint the number of + /// Since this splitting might lead to exponential blow-up, we constrain the number of /// leaf-nodes to [`MAX_COMPILATION_LEAVES`]. #[cfg(feature = "compiler")] fn to_tapleaf_prob_vec(&self, prob: f64) -> Vec<(f64, Policy)> { @@ -306,24 +304,22 @@ impl Policy { Policy::Or(ref subs) => { let total_odds: usize = subs.iter().map(|(ref k, _)| k).sum(); subs.iter() - .map(|(k, ref policy)| { + .flat_map(|(k, ref policy)| { policy.to_tapleaf_prob_vec(prob * *k as f64 / total_odds as f64) }) - .flatten() .collect::>() } Policy::Threshold(k, ref subs) if *k == 1 => { let total_odds = subs.len(); subs.iter() - .map(|policy| policy.to_tapleaf_prob_vec(prob / total_odds as f64)) - .flatten() + .flat_map(|policy| policy.to_tapleaf_prob_vec(prob / total_odds as f64)) .collect::>() } x => vec![(prob, x.clone())], } } - /// Extract the internal_key from policy tree. + /// Extracts the internal_key from this policy tree. #[cfg(feature = "compiler")] fn extract_key(self, unspendable_key: Option) -> Result<(Pk, Policy), Error> { let mut internal_key: Option = None; @@ -334,10 +330,7 @@ impl Policy { let key_prob_map: HashMap<_, _> = self .to_tapleaf_prob_vec(1.0) .into_iter() - .filter(|(_, ref pol)| match *pol { - Concrete::Key(..) => true, - _ => false, - }) + .filter(|(_, ref pol)| matches!(*pol, Concrete::Key(..))) .map(|(prob, key)| (key, prob)) .collect(); @@ -360,19 +353,20 @@ impl Policy { } } match (internal_key, unspendable_key) { - (Some(ref key), _) => Ok((key.clone(), self.translate_unsatisfiable_pk(&key))), + (Some(ref key), _) => Ok((key.clone(), self.translate_unsatisfiable_pk(key))), (_, Some(key)) => Ok((key, self)), _ => Err(errstr("No viable internal key found.")), } } - /// Compile the [`Policy`] into a [`Descriptor::Tr`]. + /// Compiles the [`Policy`] into a [`Descriptor::Tr`]. /// /// ### TapTree compilation /// - /// The policy tree constructed by root-level disjunctions over [`Or`][`Policy::Or`] and - /// [`Thresh`][`Policy::Threshold`](1, ..) which is flattened into a vector (with respective + /// The policy tree constructed by root-level disjunctions over [`Policy::Or`] and + /// [`Policy::Threshold`](1, ..) which is flattened into a vector (with respective /// probabilities derived from odds) of policies. + /// /// For example, the policy `thresh(1,or(pk(A),pk(B)),and(or(pk(C),pk(D)),pk(E)))` gives the /// vector `[pk(A),pk(B),and(or(pk(C),pk(D)),pk(E)))]`. Each policy in the vector is compiled /// into the respective miniscripts. A Huffman Tree is created from this vector which optimizes @@ -424,7 +418,7 @@ impl Policy { /// ### TapTree compilation /// /// The policy tree constructed by root-level disjunctions over [`Policy::Or`] and - /// [`Policy::Threshold`] (k, ..n..) which is flattened into a vector (with respective + /// [`Policy::Threshold`](k, ..n..) which is flattened into a vector (with respective /// probabilities derived from odds) of policies. For example, the policy /// `thresh(1,or(pk(A),pk(B)),and(or(pk(C),pk(D)),pk(E)))` gives the vector /// `[pk(A),pk(B),and(or(pk(C),pk(D)),pk(E)))]`. @@ -437,8 +431,6 @@ impl Policy { /// enumeration or limits exceed. For a given [`Policy`], we maintain an [ordered /// set](`BTreeSet`) of `(prob, policy)` (ordered by probability) to maintain the list of /// enumerated sub-policies whose disjunction is isomorphic to initial policy (*invariant*). - /// - /// [`Policy`]: crate::policy::concrete::Policy #[cfg(feature = "compiler")] pub fn compile_tr_private_experimental( &self, @@ -480,16 +472,16 @@ impl Policy { } } - /// Compile the [`Policy`] into desc_ctx [`Descriptor`] + /// Compiles the [`Policy`] into `desc_ctx` [`Descriptor`] /// - /// In case of [Tr][`DescriptorCtx::Tr`], `internal_key` is used for the Taproot comilation when + /// In case of [`DescriptorCtx::Tr`], `internal_key` is used for the taproot compilation when /// no public key can be inferred from the given policy. /// /// # NOTE: /// - /// It is **not recommended** to use policy as a stable identifier for a miniscript. - /// You should use the policy compiler once, and then use the miniscript output as a stable identifier. - /// See the compiler document in doc/compiler.md for more details. + /// It is **not recommended** to use policy as a stable identifier for a miniscript. You should + /// use the policy compiler once, and then use the miniscript output as a stable identifier. See + /// the compiler document in [`doc/compiler.md`] for more details. #[cfg(feature = "compiler")] pub fn compile_to_descriptor( &self, @@ -511,13 +503,13 @@ impl Policy { } } - /// Compile the descriptor into an optimized `Miniscript` representation + /// Compiles the descriptor into an optimized `Miniscript` representation. /// /// # NOTE: /// - /// It is **not recommended** to use policy as a stable identifier for a miniscript. - /// You should use the policy compiler once, and then use the miniscript output as a stable identifier. - /// See the compiler document in doc/compiler.md for more details. + /// It is **not recommended** to use policy as a stable identifier for a miniscript. You should + /// use the policy compiler once, and then use the miniscript output as a stable identifier. See + /// the compiler document in doc/compiler.md for more details. #[cfg(feature = "compiler")] pub fn compile(&self) -> Result, CompilerError> { self.is_valid()?; @@ -531,10 +523,11 @@ impl Policy { #[cfg(feature = "compiler")] impl PolicyArc { - /// Given a [`Policy`], return a vector of policies whose disjunction is isomorphic to the initial one. - /// This function is supposed to incrementally expand i.e. represent the policy as disjunction over - /// sub-policies output by it. The probability calculations are similar as - /// [to_tapleaf_prob_vec][`Policy::to_tapleaf_prob_vec`] + /// Returns a vector of policies whose disjunction is isomorphic to the initial one. + /// + /// This function is supposed to incrementally expand i.e. represent the policy as + /// disjunction over sub-policies output by it. The probability calculations are similar + /// to [`Policy::to_tapleaf_prob_vec`]. #[cfg(feature = "compiler")] fn enumerate_pol(&self, prob: f64) -> Vec<(f64, Arc)> { match self { @@ -563,8 +556,6 @@ impl PolicyArc { /// enumeration or limits exceed. For a given [`Policy`], we maintain an [ordered /// set](`BTreeSet`) of `(prob, policy)` (ordered by probability) to maintain the list of /// enumerated sub-policies whose disjunction is isomorphic to initial policy (*invariant*). - /// - /// [`Policy`]: crate::policy::concrete::Policy #[cfg(feature = "compiler")] fn enumerate_policy_tree(self, prob: f64) -> Vec<(f64, Arc)> { let mut tapleaf_prob_vec = BTreeSet::<(Reverse, Arc)>::new(); @@ -689,49 +680,9 @@ impl Policy { } } - /// Convert a policy using one kind of public key to another - /// type of public key - /// - /// # Example - /// - /// ``` - /// use miniscript::{bitcoin::PublicKey, policy::concrete::Policy, Translator, hash256}; - /// use std::str::FromStr; - /// use miniscript::translate_hash_fail; - /// use std::collections::HashMap; - /// use miniscript::bitcoin::hashes::{sha256, hash160, ripemd160}; - /// let alice_key = "0270cf3c71f65a3d93d285d9149fddeeb638f87a2d4d8cf16c525f71c417439777"; - /// let bob_key = "02f43b15c50a436f5335dbea8a64dd3b4e63e34c3b50c42598acb5f4f336b5d2fb"; - /// let placeholder_policy = Policy::::from_str("and(pk(alice_key),pk(bob_key))").unwrap(); - /// - /// // Information to translator abstract String type keys to concrete bitcoin::PublicKey. - /// // In practice, wallets would map from String key names to BIP32 keys - /// struct StrPkTranslator { - /// pk_map: HashMap - /// } - /// - /// // If we also wanted to provide mapping of other associated types(sha256, older etc), - /// // we would use the general Translator Trait. - /// impl Translator for StrPkTranslator { - /// // Provides the translation public keys P -> Q - /// fn pk(&mut self, pk: &String) -> Result { - /// self.pk_map.get(pk).copied().ok_or(()) // Dummy Err - /// } - /// - /// // Fail for hash types - /// translate_hash_fail!(String, bitcoin::PublicKey, ()); - /// } - /// - /// let mut pk_map = HashMap::new(); - /// pk_map.insert(String::from("alice_key"), bitcoin::PublicKey::from_str(alice_key).unwrap()); - /// pk_map.insert(String::from("bob_key"), bitcoin::PublicKey::from_str(bob_key).unwrap()); - /// let mut t = StrPkTranslator { pk_map: pk_map }; + /// Converts a policy using one kind of public key to another type of public key. /// - /// let real_policy = placeholder_policy.translate_pk(&mut t).unwrap(); - /// - /// let expected_policy = Policy::from_str(&format!("and(pk({}),pk({}))", alice_key, bob_key)).unwrap(); - /// assert_eq!(real_policy, expected_policy); - /// ``` + /// For example usage please see [`crate::policy::semantic::Policy::translate_pk`]. pub fn translate_pk(&self, t: &mut T) -> Result, E> where T: Translator, @@ -773,7 +724,7 @@ impl Policy { } } - /// Translate `Concrete::Key(key)` to `Concrete::Unsatisfiable` when extracting TapKey + /// Translates `Concrete::Key(key)` to `Concrete::Unsatisfiable` when extracting `TapKey`. pub fn translate_unsatisfiable_pk(self, key: &Pk) -> Policy { match self { Policy::Key(ref k) if k.clone() == *key => Policy::Unsatisfiable, @@ -797,7 +748,7 @@ impl Policy { } } - /// Get all keys in the policy + /// Gets all keys in the policy. pub fn keys(&self) -> Vec<&Pk> { match *self { Policy::Key(ref pk) => vec![pk], @@ -814,8 +765,8 @@ impl Policy { } } - /// Get the number of [TapLeaf][`TapTree::Leaf`] considering exhaustive root-level [OR][`Policy::Or`] - /// and [Thresh][`Policy::Threshold`] disjunctions for the TapTree. + /// Gets the number of [TapLeaf](`TapTree::Leaf`)s considering exhaustive root-level [`Policy::Or`] + /// and [`Policy::Threshold`] disjunctions for the `TapTree`. #[cfg(feature = "compiler")] fn num_tap_leaves(&self) -> usize { match self { @@ -827,7 +778,7 @@ impl Policy { } } - /// Check on the number of TapLeaves + /// Does checks on the number of `TapLeaf`s. #[cfg(feature = "compiler")] fn check_num_tapleaves(&self) -> Result<(), Error> { if self.num_tap_leaves() > MAX_COMPILATION_LEAVES { @@ -836,7 +787,7 @@ impl Policy { Ok(()) } - /// Check whether the policy contains duplicate public keys + /// Checks whether the policy contains duplicate public keys. pub fn check_duplicate_keys(&self) -> Result<(), PolicyError> { let pks = self.keys(); let pks_len = pks.len(); @@ -851,8 +802,11 @@ impl Policy { /// Checks whether the given concrete policy contains a combination of /// timelocks and heightlocks. + /// + /// # Returns + /// /// Returns an error if there is at least one satisfaction that contains - /// a combination of hieghtlock and timelock. + /// a combination of heightlock and timelock. pub fn check_timelocks(&self) -> Result<(), PolicyError> { let timelocks = self.check_timelocks_helper(); if timelocks.contains_combination { @@ -962,11 +916,14 @@ impl Policy { _ => Ok(()), } } - /// This returns whether any possible compilation of the policy could be - /// compiled as non-malleable and safe. Note that this returns a tuple - /// (safe, non-malleable) to avoid because the non-malleability depends on - /// safety and we would like to cache results. + + /// Checks if any possible compilation of the policy could be compiled + /// as non-malleable and safe. /// + /// # Returns + /// + /// Returns a tuple `(safe, non-malleable)` to avoid the fact that + /// non-malleability depends on safety and we would like to cache results. pub fn is_safe_nonmalleable(&self) -> (bool, bool) { match *self { Policy::Unsatisfiable | Policy::Trivial => (true, true), @@ -1103,11 +1060,7 @@ impl_from_str!( Policy, type Err = Error;, fn from_str(s: &str) -> Result, Error> { - for ch in s.as_bytes() { - if *ch < 20 || *ch > 127 { - return Err(Error::Unprintable(*ch)); - } - } + expression::check_valid_chars(s)?; let tree = expression::Tree::from_str(s)?; let policy: Policy = FromTree::from_tree(&tree)?; @@ -1232,7 +1185,7 @@ impl_from_tree!( } ); -/// Create a Huffman Tree from compiled [Miniscript] nodes +/// Creates a Huffman Tree from compiled [`Miniscript`] nodes. #[cfg(feature = "compiler")] fn with_huffman_tree( ms: Vec<(OrdF64, Miniscript)>, @@ -1263,7 +1216,7 @@ fn with_huffman_tree( Ok(node) } -/// Enumerate a [Thresh][`Policy::Threshold`](k, ..n..) into `n` different thresh. +/// Enumerates a [`Policy::Threshold(k, ..n..)`] into `n` different thresh's. /// /// ## Strategy /// diff --git a/src/policy/mod.rs b/src/policy/mod.rs index 83348333d..8e80dd489 100644 --- a/src/policy/mod.rs +++ b/src/policy/mod.rs @@ -1,4 +1,3 @@ -// Written in 2018 by Andrew Poelstra // SPDX-License-Identifier: CC0-1.0 //! Script Policies @@ -20,42 +19,43 @@ pub mod concrete; pub mod semantic; pub use self::concrete::Policy as Concrete; -/// Semantic policies are "abstract" policies elsewhere; but we -/// avoid this word because it is a reserved keyword in Rust pub use self::semantic::Policy as Semantic; use crate::descriptor::Descriptor; use crate::miniscript::{Miniscript, ScriptContext}; use crate::{Error, MiniscriptKey, Terminal}; -/// Policy entailment algorithm maximum number of terminals allowed +/// Policy entailment algorithm maximum number of terminals allowed. const ENTAILMENT_MAX_TERMINALS: usize = 20; + /// Trait describing script representations which can be lifted into /// an abstract policy, by discarding information. +/// /// After Lifting all policies are converted into `KeyHash(Pk::HasH)` to /// maintain the following invariant(modulo resource limits): /// `Lift(Concrete) == Concrete -> Miniscript -> Script -> Miniscript -> Semantic` -/// Lifting from [Miniscript], [Descriptor] can fail -/// if the miniscript contains a timelock combination or if it contains a -/// branch that exceeds resource limits. -/// Lifting from Concrete policies can fail if it contains a timelock -/// combination. It is possible that concrete policy has some branches that -/// exceed resource limits for any compilation, but cannot detect such -/// policies while lifting. Note that our compiler would not succeed for any -/// such policies. +/// +/// Lifting from [`Miniscript`] or [`Descriptor`] can fail if the miniscript +/// contains a timelock combination or if it contains a branch that exceeds +/// resource limits. +/// +/// Lifting from concrete policies can fail if the policy contains a timelock +/// combination. It is possible that a concrete policy has some branches that +/// exceed resource limits for any compilation but cannot detect such policies +/// while lifting. Note that our compiler would not succeed for any such +/// policies. pub trait Liftable { - /// Convert the object into an abstract policy + /// Converts this object into an abstract policy. fn lift(&self) -> Result, Error>; } -/// Detailed Error type for Policies +/// Error occurring during lifting. #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum LiftError { - /// Cannot lift policies that have - /// a combination of height and timelocks. + /// Cannot lift policies that have a combination of height and timelocks. HeightTimelockCombination, - /// Duplicate Public Keys + /// Duplicate public keys. BranchExceedResourceLimits, - /// Cannot lift raw descriptors + /// Cannot lift raw descriptors. RawDescriptorLift, } @@ -85,14 +85,13 @@ impl error::Error for LiftError { } impl Miniscript { - /// Lifting corresponds conversion of miniscript into Policy - /// [policy.semantic.Policy] for human readable or machine analysis. - /// However, naively lifting miniscripts can result in incorrect - /// interpretations that don't correspond underlying semantics when - /// we try to spend them on bitcoin network. - /// This can occur if the miniscript contains a - /// 1. Timelock combination - /// 2. Contains a spend that exceeds resource limits + /// Lifting corresponds to conversion of a miniscript into a [`Semantic`] + /// policy for human readable or machine analysis. However, naively lifting + /// miniscripts can result in incorrect interpretations that don't + /// correspond to the underlying semantics when we try to spend them on + /// bitcoin network. This can occur if the miniscript contains: + /// 1. A combination of timelocks + /// 2. A spend that exceeds resource limits pub fn lift_check(&self) -> Result<(), LiftError> { if !self.within_resource_limits() { Err(LiftError::BranchExceedResourceLimits) @@ -541,7 +540,7 @@ mod tests { } } -#[cfg(all(test, feature = "compiler", feature = "unstable"))] +#[cfg(all(bench, feature = "compiler"))] mod benches { use core::str::FromStr; diff --git a/src/policy/semantic.rs b/src/policy/semantic.rs index b188492bb..aa2de8680 100644 --- a/src/policy/semantic.rs +++ b/src/policy/semantic.rs @@ -1,7 +1,9 @@ -// Written in 2019 by Andrew Poelstra // SPDX-License-Identifier: CC0-1.0 //! Abstract Policies +//! +//! We use the terms "semantic" and "abstract" interchangeably because +//! "abstract" is a reserved keyword in Rust. use core::str::FromStr; use core::{fmt, str}; @@ -13,33 +15,33 @@ use super::ENTAILMENT_MAX_TERMINALS; use crate::prelude::*; use crate::{errstr, expression, AbsLockTime, Error, ForEachKey, MiniscriptKey, Translator}; -/// Abstract policy which corresponds to the semantics of a Miniscript -/// and which allows complex forms of analysis, e.g. filtering and -/// normalization. +/// Abstract policy which corresponds to the semantics of a miniscript and +/// which allows complex forms of analysis, e.g. filtering and normalization. +/// /// Semantic policies store only hashes of keys to ensure that objects -/// representing the same policy are lifted to the same `Semantic`, +/// representing the same policy are lifted to the same abstract `Policy`, /// regardless of their choice of `pk` or `pk_h` nodes. #[derive(Clone, PartialEq, Eq, PartialOrd, Ord)] pub enum Policy { - /// Unsatisfiable + /// Unsatisfiable. Unsatisfiable, - /// Trivially satisfiable + /// Trivially satisfiable. Trivial, - /// Signature and public key matching a given hash is required + /// Signature and public key matching a given hash is required. Key(Pk), - /// An absolute locktime restriction + /// An absolute locktime restriction. After(AbsLockTime), - /// A relative locktime restriction + /// A relative locktime restriction. Older(Sequence), - /// A SHA256 whose preimage must be provided to satisfy the descriptor + /// A SHA256 whose preimage must be provided to satisfy the descriptor. Sha256(Pk::Sha256), - /// A SHA256d whose preimage must be provided to satisfy the descriptor + /// A SHA256d whose preimage must be provided to satisfy the descriptor. Hash256(Pk::Hash256), - /// A RIPEMD160 whose preimage must be provided to satisfy the descriptor + /// A RIPEMD160 whose preimage must be provided to satisfy the descriptor. Ripemd160(Pk::Ripemd160), - /// A HASH160 whose preimage must be provided to satisfy the descriptor + /// A HASH160 whose preimage must be provided to satisfy the descriptor. Hash160(Pk::Hash160), - /// A set of descriptors, satisfactions must be provided for `k` of them + /// A set of descriptors, satisfactions must be provided for `k` of them. Threshold(usize, Vec>), } @@ -47,14 +49,16 @@ impl Policy where Pk: MiniscriptKey, { - /// Construct a `Policy::After` from `n`. Helper function equivalent to - /// `Policy::After(absolute::LockTime::from_consensus(n))`. + /// Constructs a `Policy::After` from `n`. + /// + /// Helper function equivalent to `Policy::After(absolute::LockTime::from_consensus(n))`. pub fn after(n: u32) -> Policy { Policy::After(AbsLockTime::from(absolute::LockTime::from_consensus(n))) } - /// Construct a `Policy::Older` from `n`. Helper function equivalent to - /// `Policy::Older(Sequence::from_consensus(n))`. + /// Construct a `Policy::Older` from `n`. + /// + /// Helper function equivalent to `Policy::Older(Sequence::from_consensus(n))`. pub fn older(n: u32) -> Policy { Policy::Older(Sequence::from_consensus(n)) } @@ -83,42 +87,41 @@ impl Policy { } } - /// Convert a policy using one kind of public key to another - /// type of public key + /// Converts a policy using one kind of public key to another type of public key. /// - /// # Example + /// # Examples /// /// ``` - /// use miniscript::{bitcoin::{hashes::hash160, PublicKey}, policy::semantic::Policy, Translator}; - /// use miniscript::translate_hash_fail; - /// use std::str::FromStr; /// use std::collections::HashMap; + /// use std::str::FromStr; + /// use miniscript::bitcoin::{hashes::hash160, PublicKey}; + /// use miniscript::{translate_hash_fail, policy::semantic::Policy, Translator}; /// let alice_pk = "02c79ef3ede6d14f72a00d0e49b4becfb152197b64c0707425c4f231df29500ee7"; /// let bob_pk = "03d008a849fbf474bd17e9d2c1a827077a468150e58221582ec3410ab309f5afe4"; /// let placeholder_policy = Policy::::from_str("and(pk(alice_pk),pk(bob_pk))").unwrap(); /// - /// // Information to translator abstract String type keys to concrete bitcoin::PublicKey. - /// // In practice, wallets would map from String key names to BIP32 keys + /// // Information to translate abstract string type keys to concrete `bitcoin::PublicKey`s. + /// // In practice, wallets would map from string key names to BIP32 keys. /// struct StrPkTranslator { /// pk_map: HashMap /// } /// - /// // If we also wanted to provide mapping of other associated types(sha256, older etc), - /// // we would use the general Translator Trait. + /// // If we also wanted to provide mapping of other associated types (sha256, older etc), + /// // we would use the general [`Translator`] trait. /// impl Translator for StrPkTranslator { /// fn pk(&mut self, pk: &String) -> Result { /// self.pk_map.get(pk).copied().ok_or(()) // Dummy Err /// } /// - /// // Handy macro for failing if we encounter any other fragment. - /// // also see translate_hash_clone! for cloning instead of failing + /// // Handy macro for failing if we encounter any other fragment. + /// // See also [`translate_hash_clone!`] for cloning instead of failing. /// translate_hash_fail!(String, bitcoin::PublicKey, ()); /// } /// /// let mut pk_map = HashMap::new(); /// pk_map.insert(String::from("alice_pk"), bitcoin::PublicKey::from_str(alice_pk).unwrap()); /// pk_map.insert(String::from("bob_pk"), bitcoin::PublicKey::from_str(bob_pk).unwrap()); - /// let mut t = StrPkTranslator { pk_map: pk_map }; + /// let mut t = StrPkTranslator { pk_map }; /// /// let real_policy = placeholder_policy.translate_pk(&mut t).unwrap(); /// @@ -156,11 +159,12 @@ impl Policy { } } - /// This function computes whether the current policy entails the second one. + /// Computes whether the current policy entails the second one. + /// /// A |- B means every satisfaction of A is also a satisfaction of B. - /// This implementation will run slow for larger policies but should be sufficient for - /// most practical policies. - + /// + /// This implementation will run slowly for larger policies but should be + /// sufficient for most practical policies. // This algorithm has a naive implementation. It is possible to optimize this // by memoizing and maintaining a hashmap. pub fn entails(self, other: Policy) -> Result { @@ -208,11 +212,10 @@ impl Policy { } } - // Helper function that takes in witness and its availability, - // changing it to true or false and returning the resultant normalized - // policy. - // Witness is currently encoded as policy. Only accepts leaf fragment and - // a normalized policy + // Helper function that takes in witness and its availability, changing it + // to true or false and returning the resultant normalized policy. Witness + // is currently encoded as policy. Only accepts leaf fragment and a + // normalized policy pub(crate) fn satisfy_constraint(self, witness: &Policy, available: bool) -> Policy { debug_assert!(self.clone().normalized() == self); if let Policy::Threshold { .. } = *witness { @@ -311,11 +314,7 @@ impl_from_str!( Policy, type Err = Error;, fn from_str(s: &str) -> Result, Error> { - for ch in s.as_bytes() { - if *ch < 20 || *ch > 127 { - return Err(Error::Unprintable(*ch)); - } - } + expression::check_valid_chars(s)?; let tree = expression::Tree::from_str(s)?; expression::FromTree::from_tree(&tree) @@ -402,7 +401,7 @@ impl_from_tree!( ); impl Policy { - /// Flatten out trees of `And`s and `Or`s; eliminate `Trivial` and + /// Flattens out trees of `And`s and `Or`s; eliminate `Trivial` and /// `Unsatisfiable`s. Does not reorder any branches; use `.sort`. pub fn normalized(self) -> Policy { match self { @@ -457,26 +456,22 @@ impl Policy { } } - /// Helper function to detect a true/trivial policy - /// This function only checks whether the policy is Policy::Trivial - /// For checking if the normalized form is trivial, the caller - /// is expected to normalize the policy first. + /// Detects a true/trivial policy. + /// + /// Only checks whether the policy is `Policy::Trivial`, to check if the + /// normalized form is trivial, the caller is expected to normalize the + /// policy first. pub fn is_trivial(&self) -> bool { - match *self { - Policy::Trivial => true, - _ => false, - } + matches!(*self, Policy::Trivial) } - /// Helper function to detect a false/unsatisfiable policy - /// This function only checks whether the policy is Policy::Unsatisfiable - /// For checking if the normalized form is unsatisfiable, the caller - /// is expected to normalize the policy first. + /// Detects a false/unsatisfiable policy. + /// + /// Only checks whether the policy is `Policy::Unsatisfiable`, to check if + /// the normalized form is unsatisfiable, the caller is expected to + /// normalize the policy first. pub fn is_unsatisfiable(&self) -> bool { - match *self { - Policy::Unsatisfiable => true, - _ => false, - } + matches!(*self, Policy::Unsatisfiable) } /// Helper function to do the recursion in `timelocks`. @@ -498,8 +493,8 @@ impl Policy { } } - /// Returns a list of all relative timelocks, not including 0, - /// which appear in the policy + /// Returns a list of all relative timelocks, not including 0, which appear + /// in the policy. pub fn relative_timelocks(&self) -> Vec { let mut ret = self.real_relative_timelocks(); ret.sort_unstable(); @@ -526,8 +521,8 @@ impl Policy { } } - /// Returns a list of all absolute timelocks, not including 0, - /// which appear in the policy + /// Returns a list of all absolute timelocks, not including 0, which appear + /// in the policy. pub fn absolute_timelocks(&self) -> Vec { let mut ret = self.real_absolute_timelocks(); ret.sort_unstable(); @@ -535,7 +530,7 @@ impl Policy { ret } - /// Filter a policy by eliminating relative timelock constraints + /// Filters a policy by eliminating relative timelock constraints /// that are not satisfied at the given `age`. pub fn at_age(mut self, age: Sequence) -> Policy { self = match self { @@ -557,7 +552,7 @@ impl Policy { self.normalized() } - /// Filter a policy by eliminating absolute timelock constraints + /// Filters a policy by eliminating absolute timelock constraints /// that are not satisfied at the given `n` (`n OP_CHECKLOCKTIMEVERIFY`). pub fn at_lock_time(mut self, n: absolute::LockTime) -> Policy { use absolute::LockTime::*; @@ -584,7 +579,7 @@ impl Policy { self.normalized() } - /// Count the number of public keys and keyhashes referenced in a policy. + /// Counts the number of public keys and keyhashes referenced in a policy. /// Duplicate keys will be double-counted. pub fn n_keys(&self) -> usize { match *self { @@ -600,8 +595,11 @@ impl Policy { } } - /// Count the minimum number of public keys for which signatures - /// could be used to satisfy the policy. + /// Counts the minimum number of public keys for which signatures could be + /// used to satisfy the policy. + /// + /// # Returns + /// /// Returns `None` if the policy is not satisfiable. pub fn minimum_n_keys(&self) -> Option { match *self { @@ -630,7 +628,8 @@ impl Policy { } impl Policy { - /// "Sort" a policy to bring it into a canonical form to allow comparisons. + /// "Sorts" a policy to bring it into a canonical form to allow comparisons. + /// /// Does **not** allow policies to be compared for functional equivalence; /// in general this appears to require Gröbner basis techniques that are not /// implemented. diff --git a/src/psbt/finalizer.rs b/src/psbt/finalizer.rs index eea792191..e03cede0e 100644 --- a/src/psbt/finalizer.rs +++ b/src/psbt/finalizer.rs @@ -8,6 +8,8 @@ //! `https://github.com/bitcoin/bips/blob/master/bip-0174.mediawiki` //! +use core::mem; + use bitcoin::hashes::hash160; use bitcoin::key::XOnlyPublicKey; use bitcoin::secp256k1::{self, Secp256k1}; @@ -177,7 +179,7 @@ fn get_descriptor(psbt: &Psbt, index: usize) -> Result, In *script_pubkey == addr.script_pubkey() }); match partial_sig_contains_pk { - Some((pk, _sig)) => Descriptor::new_pkh(*pk).map_err(|e| InputError::from(e)), + Some((pk, _sig)) => Descriptor::new_pkh(*pk).map_err(InputError::from), None => Err(InputError::MissingPubkey), } } else if script_pubkey.is_v0_p2wpkh() { @@ -328,7 +330,7 @@ fn interpreter_inp_check>( let cltv = psbt.unsigned_tx.lock_time; let csv = psbt.unsigned_tx.input[index].sequence; let interpreter = - interpreter::Interpreter::from_txdata(&spk, &script_sig, witness, csv, cltv.into()) + interpreter::Interpreter::from_txdata(&spk, script_sig, witness, csv, cltv) .map_err(|e| Error::InputError(InputError::Interpreter(e), index))?; let iter = interpreter.iter(secp, &psbt.unsigned_tx, index, utxos); if let Some(error) = iter.filter_map(Result::err).next() { @@ -429,8 +431,10 @@ pub(super) fn finalize_input( // Now mutate the psbt input. Note that we cannot error after this point. // If the input is mutated, it means that the finalization succeeded. { + let original = mem::replace(&mut psbt.inputs[index], Default::default()); let input = &mut psbt.inputs[index]; - //Fill in the satisfactions + input.non_witness_utxo = original.non_witness_utxo; + input.witness_utxo = original.witness_utxo; input.final_script_sig = if script_sig.is_empty() { None } else { @@ -441,25 +445,6 @@ pub(super) fn finalize_input( } else { Some(witness) }; - //reset everything - input.partial_sigs.clear(); // 0x02 - input.sighash_type = None; // 0x03 - input.redeem_script = None; // 0x04 - input.witness_script = None; // 0x05 - input.bip32_derivation.clear(); // 0x05 - // finalized witness 0x06 and 0x07 are not clear - // 0x09 Proof of reserves not yet supported - input.ripemd160_preimages.clear(); // 0x0a - input.sha256_preimages.clear(); // 0x0b - input.hash160_preimages.clear(); // 0x0c - input.hash256_preimages.clear(); // 0x0d - // psbt v2 fields till 0x012 not supported - input.tap_key_sig = None; // 0x013 - input.tap_script_sigs.clear(); // 0x014 - input.tap_scripts.clear(); // 0x015 - input.tap_key_origins.clear(); // 0x16 - input.tap_internal_key = None; // x017 - input.tap_merkle_root = None; // 0x018 } Ok(()) diff --git a/src/psbt/mod.rs b/src/psbt/mod.rs index cd1292c93..0e7e0b9f1 100644 --- a/src/psbt/mod.rs +++ b/src/psbt/mod.rs @@ -1,4 +1,3 @@ -// Written in 2019 by Andrew Poelstra // SPDX-License-Identifier: CC0-1.0 //! # Partially-Signed Bitcoin Transactions @@ -305,7 +304,7 @@ impl<'psbt, Pk: MiniscriptKey + ToPublicKey> Satisfier for PsbtInputSatisfie return false; } - let lock_time = absolute::LockTime::from(self.psbt.unsigned_tx.lock_time); + let lock_time = self.psbt.unsigned_tx.lock_time; >::check_after(&lock_time, n) } diff --git a/src/util.rs b/src/util.rs index 09d7e7b95..c5e40685f 100644 --- a/src/util.rs +++ b/src/util.rs @@ -56,7 +56,7 @@ pub(crate) fn witness_to_scriptsig(witness: &[Vec]) -> ScriptBuf { } else { let push = <&PushBytes>::try_from(wit.as_slice()) .expect("All pushes in miniscript are <73 bytes"); - b = b.push_slice(&push) + b = b.push_slice(push) } } b.into_script() @@ -85,7 +85,7 @@ impl MsKeyBuilder for script::Builder { { match Ctx::sig_type() { context::SigType::Ecdsa => self.push_key(&key.to_public_key()), - context::SigType::Schnorr => self.push_slice(&key.to_x_only_pubkey().serialize()), + context::SigType::Schnorr => self.push_slice(key.to_x_only_pubkey().serialize()), } } @@ -95,9 +95,9 @@ impl MsKeyBuilder for script::Builder { Ctx: ScriptContext, { match Ctx::sig_type() { - context::SigType::Ecdsa => self.push_slice(&key.to_public_key().pubkey_hash()), + context::SigType::Ecdsa => self.push_slice(key.to_public_key().pubkey_hash()), context::SigType::Schnorr => { - self.push_slice(&PubkeyHash::hash(&key.to_x_only_pubkey().serialize())) + self.push_slice(PubkeyHash::hash(&key.to_x_only_pubkey().serialize())) } } } diff --git a/examples/psbt.rs b/tests/bip-174.rs similarity index 99% rename from examples/psbt.rs rename to tests/bip-174.rs index b0fcad06c..a8b7e22f4 100644 --- a/examples/psbt.rs +++ b/tests/bip-174.rs @@ -1,3 +1,5 @@ +// SPDX-License-Identifier: CC0-1.0 + use bitcoin::consensus::encode::deserialize; use bitcoin::hashes::hex::FromHex; use bitcoin::psbt::Psbt;