From 182bc214568e44399609d32a738ccfed2d998ad2 Mon Sep 17 00:00:00 2001 From: elnosh Date: Fri, 21 Jun 2024 15:02:03 -0500 Subject: [PATCH 001/127] Fix inbound capacity comment --- src/types.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/types.rs b/src/types.rs index 89a14e163..0c2faeb78 100644 --- a/src/types.rs +++ b/src/types.rs @@ -200,11 +200,11 @@ pub struct ChannelDetails { /// balance is not available for inclusion in new outbound HTLCs). This further does not include /// any pending outgoing HTLCs which are awaiting some other resolution to be sent. pub outbound_capacity_msat: u64, - /// The available outbound capacity for sending HTLCs to the remote peer. + /// The available inbound capacity for receiving HTLCs from the remote peer. /// /// The amount does not include any pending HTLCs which are not yet resolved /// (and, thus, whose balance is not available for inclusion in new inbound HTLCs). This further - /// does not include any pending outgoing HTLCs which are awaiting some other resolution to be + /// does not include any pending incoming HTLCs which are awaiting some other resolution to be /// sent. pub inbound_capacity_msat: u64, /// The number of required confirmations on the funding transactions before the funding is From f2074f1dc8363ae04578b76e904cbbdbba6c18a7 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 24 Jun 2024 13:23:41 -0400 Subject: [PATCH 002/127] Allow start from outer runtime We add a `Node::start_with_runtime` method that allows to reuse a pre-existing runtime, e.g., to avoid stacking runtime contexts when running in a tokio async environment. --- src/event.rs | 4 ++-- src/lib.rs | 21 +++++++++++++++------ src/payment/bolt11.rs | 4 ++-- src/payment/bolt12.rs | 4 ++-- src/payment/onchain.rs | 4 ++-- src/payment/spontaneous.rs | 4 ++-- 6 files changed, 25 insertions(+), 16 deletions(-) diff --git a/src/event.rs b/src/event.rs index 838df4230..e319ab5e4 100644 --- a/src/event.rs +++ b/src/event.rs @@ -354,7 +354,7 @@ where network_graph: Arc, payment_store: Arc>, peer_store: Arc>, - runtime: Arc>>, + runtime: Arc>>>, logger: L, config: Arc, } @@ -369,7 +369,7 @@ where channel_manager: Arc, connection_manager: Arc>, output_sweeper: Arc, network_graph: Arc, payment_store: Arc>, peer_store: Arc>, - runtime: Arc>>, logger: L, config: Arc, + runtime: Arc>>>, logger: L, config: Arc, ) -> Self { Self { event_queue, diff --git a/src/lib.rs b/src/lib.rs index de2a0badf..1c137d355 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -171,7 +171,7 @@ uniffi::include_scaffolding!("ldk_node"); /// /// Needs to be initialized and instantiated through [`Builder::build`]. pub struct Node { - runtime: Arc>>, + runtime: Arc>>>, stop_sender: tokio::sync::watch::Sender<()>, event_handling_stopped_sender: tokio::sync::watch::Sender<()>, config: Arc, @@ -211,6 +211,20 @@ impl Node { /// After this returns, the [`Node`] instance can be controlled via the provided API methods in /// a thread-safe manner. pub fn start(&self) -> Result<(), Error> { + let runtime = + Arc::new(tokio::runtime::Builder::new_multi_thread().enable_all().build().unwrap()); + self.start_with_runtime(runtime) + } + + /// Starts the necessary background tasks (such as handling events coming from user input, + /// LDK/BDK, and the peer-to-peer network) on the the given `runtime`. + /// + /// This allows to have LDK Node reuse an outer pre-existing runtime, e.g., to avoid stacking Tokio + /// runtime contexts. + /// + /// After this returns, the [`Node`] instance can be controlled via the provided API methods in + /// a thread-safe manner. + pub fn start_with_runtime(&self, runtime: Arc) -> Result<(), Error> { // Acquire a run lock and hold it until we're setup. let mut runtime_lock = self.runtime.write().unwrap(); if runtime_lock.is_some() { @@ -225,8 +239,6 @@ impl Node { self.config.network ); - let runtime = tokio::runtime::Builder::new_multi_thread().enable_all().build().unwrap(); - // Block to ensure we update our fee rate cache once on startup let fee_estimator = Arc::clone(&self.fee_estimator); let sync_logger = Arc::clone(&self.logger); @@ -862,9 +874,6 @@ impl Node { ); } - // Shutdown our runtime. By now ~no or only very few tasks should be left. - runtime.shutdown_timeout(Duration::from_secs(10)); - log_info!(self.logger, "Shutdown complete."); Ok(()) } diff --git a/src/payment/bolt11.rs b/src/payment/bolt11.rs index e8d030bc0..3641d6870 100644 --- a/src/payment/bolt11.rs +++ b/src/payment/bolt11.rs @@ -33,7 +33,7 @@ use std::time::SystemTime; /// [BOLT 11]: https://github.com/lightning/bolts/blob/master/11-payment-encoding.md /// [`Node::bolt11_payment`]: crate::Node::bolt11_payment pub struct Bolt11Payment { - runtime: Arc>>, + runtime: Arc>>>, channel_manager: Arc, connection_manager: Arc>>, keys_manager: Arc, @@ -46,7 +46,7 @@ pub struct Bolt11Payment { impl Bolt11Payment { pub(crate) fn new( - runtime: Arc>>, + runtime: Arc>>>, channel_manager: Arc, connection_manager: Arc>>, keys_manager: Arc, diff --git a/src/payment/bolt12.rs b/src/payment/bolt12.rs index 5fd1208cc..40f1fc369 100644 --- a/src/payment/bolt12.rs +++ b/src/payment/bolt12.rs @@ -28,7 +28,7 @@ use std::time::{Duration, SystemTime, UNIX_EPOCH}; /// [BOLT 12]: https://github.com/lightning/bolts/blob/master/12-offer-encoding.md /// [`Node::bolt12_payment`]: crate::Node::bolt12_payment pub struct Bolt12Payment { - runtime: Arc>>, + runtime: Arc>>>, channel_manager: Arc, payment_store: Arc>>, logger: Arc, @@ -36,7 +36,7 @@ pub struct Bolt12Payment { impl Bolt12Payment { pub(crate) fn new( - runtime: Arc>>, + runtime: Arc>>>, channel_manager: Arc, payment_store: Arc>>, logger: Arc, ) -> Self { diff --git a/src/payment/onchain.rs b/src/payment/onchain.rs index 8a879ae8c..5c1365de3 100644 --- a/src/payment/onchain.rs +++ b/src/payment/onchain.rs @@ -15,7 +15,7 @@ use std::sync::{Arc, RwLock}; /// /// [`Node::onchain_payment`]: crate::Node::onchain_payment pub struct OnchainPayment { - runtime: Arc>>, + runtime: Arc>>>, wallet: Arc, channel_manager: Arc, config: Arc, @@ -24,7 +24,7 @@ pub struct OnchainPayment { impl OnchainPayment { pub(crate) fn new( - runtime: Arc>>, wallet: Arc, + runtime: Arc>>>, wallet: Arc, channel_manager: Arc, config: Arc, logger: Arc, ) -> Self { Self { runtime, wallet, channel_manager, config, logger } diff --git a/src/payment/spontaneous.rs b/src/payment/spontaneous.rs index 482df42d9..13047eab9 100644 --- a/src/payment/spontaneous.rs +++ b/src/payment/spontaneous.rs @@ -23,7 +23,7 @@ use std::sync::{Arc, RwLock}; /// /// [`Node::spontaneous_payment`]: crate::Node::spontaneous_payment pub struct SpontaneousPayment { - runtime: Arc>>, + runtime: Arc>>>, channel_manager: Arc, keys_manager: Arc, payment_store: Arc>>, @@ -33,7 +33,7 @@ pub struct SpontaneousPayment { impl SpontaneousPayment { pub(crate) fn new( - runtime: Arc>>, + runtime: Arc>>>, channel_manager: Arc, keys_manager: Arc, payment_store: Arc>>, config: Arc, logger: Arc, From a7466c6f55f39bdb878b77e3af1a8825e354b092 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 8 Jul 2024 10:08:27 +0200 Subject: [PATCH 003/127] Revert "Pin `url` to v2.5.0 in CI to fix MSRV breakage" This reverts commit 0285b55c84f292da9bc4f3a60e748509fc54bafe. --- .github/workflows/rust.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 4837063b7..5ce1306ee 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -49,7 +49,6 @@ jobs: cargo update -p proptest --precise "1.2.0" --verbose # proptest 1.3.0 requires rustc 1.64.0 cargo update -p regex --precise "1.9.6" --verbose # regex 1.10.0 requires rustc 1.65.0 cargo update -p home --precise "0.5.5" --verbose # home v0.5.9 requires rustc 1.70 or newer - cargo update -p url --precise "2.5.0" --verbose # url v2.5.1 requires rustc 1.67 or newer - name: Set RUSTFLAGS to deny warnings if: "matrix.toolchain == 'stable'" run: echo "RUSTFLAGS=-D warnings" >> "$GITHUB_ENV" From a60900f8ba40e26795862037095e5424c743a235 Mon Sep 17 00:00:00 2001 From: jbesraa Date: Wed, 10 Jul 2024 11:48:07 +0300 Subject: [PATCH 004/127] Ignore all `target` folders including bindings --- .gitignore | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.gitignore b/.gitignore index de30b070c..3d24bbceb 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,7 @@ # Generated by Cargo # will have compiled files and executables /target/ +/bindings/uniffi-bindgen/target/ # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html @@ -24,3 +25,5 @@ swift.swiftdoc /bindings/swift/LDKNodeFFI.xcframework /bindings/kotlin/ldk-node-android/lib/src/main/jniLibs /bindings/kotlin/ldk-node-android/lib/src/main/kotlin/org/lightningdevkit/ldknode/ldk_node.kt +/bindings/kotlin/ldk-node-jvm/lib/src/main/kotlin/org/lightningdevkit/ldknode/ldk_node.kt +/bindings/kotlin/ldk-node-jvm/lib/src/main/resources/ From abfcc2ee11eec69ad87f6d44e1ada5b2c18cb627 Mon Sep 17 00:00:00 2001 From: jbesraa Date: Wed, 10 Jul 2024 11:53:19 +0300 Subject: [PATCH 005/127] Pin `cc` to `1.0.105` for MSRV --- .github/workflows/rust.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 5ce1306ee..9a92825f3 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -49,6 +49,7 @@ jobs: cargo update -p proptest --precise "1.2.0" --verbose # proptest 1.3.0 requires rustc 1.64.0 cargo update -p regex --precise "1.9.6" --verbose # regex 1.10.0 requires rustc 1.65.0 cargo update -p home --precise "0.5.5" --verbose # home v0.5.9 requires rustc 1.70 or newer + cargo update -p cc --precise "1.0.105" --verbose # cc 1.0.106 requires rustc 1.67 or newer - name: Set RUSTFLAGS to deny warnings if: "matrix.toolchain == 'stable'" run: echo "RUSTFLAGS=-D warnings" >> "$GITHUB_ENV" From 479328985e8988e519c268243ca73a5e67669760 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 15 Jul 2024 09:01:25 +0200 Subject: [PATCH 006/127] Revert "Pin `cc` to `1.0.105` for MSRV" This reverts commit abfcc2ee11eec69ad87f6d44e1ada5b2c18cb627. --- .github/workflows/rust.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 9a92825f3..5ce1306ee 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -49,7 +49,6 @@ jobs: cargo update -p proptest --precise "1.2.0" --verbose # proptest 1.3.0 requires rustc 1.64.0 cargo update -p regex --precise "1.9.6" --verbose # regex 1.10.0 requires rustc 1.65.0 cargo update -p home --precise "0.5.5" --verbose # home v0.5.9 requires rustc 1.70 or newer - cargo update -p cc --precise "1.0.105" --verbose # cc 1.0.106 requires rustc 1.67 or newer - name: Set RUSTFLAGS to deny warnings if: "matrix.toolchain == 'stable'" run: echo "RUSTFLAGS=-D warnings" >> "$GITHUB_ENV" From 77a0bbede4f063ee50ca2aa36fb019e79d3f8707 Mon Sep 17 00:00:00 2001 From: Ian Slane <105389948+slanesuke@users.noreply.github.com> Date: Thu, 18 Jul 2024 08:42:12 -0600 Subject: [PATCH 007/127] Add BIP21 Unified QR Code Support (#302) * Add `UnifiedQrPayment` module for BIP21 URIs Firstly, I thought I staged and made commits for `unified_qr.rs` so sorry if this is out of order! But in `unified_qr.rs` I - I introduced the `UnifiedQrPayment` struct to handle creating and paying BIP21 URIs - `receive` generates a URI with an on-chain address and BOLT11 invoice and returns the URI as a string - `send` will parse a given URI string and attempt to send the BOLT12 offer, BOLT11 invoice, then if those fail the fallback on-chain address will be paid to. - Then I included tests for URI generation and URI parsing - Also has logging and error handling for payment operations * Add `unified_qr_payment` payment handler to `Node` - implement unified_qr_payment method to create the Unified QR payment handler - Includes conditional UniFFI features and updates docs with BIP21 and BOLT11 links * Add support for `unified_qr` in payment mod - Included unified_qr in payment module - Added `PaymentResult` and `UnifiedQrPayment` from unified_qr for public use * Add bip21 crate to handle BIP21 URIs * Add `UnifiedQrPayment` and `PaymentResult` to `ldk_node.udl` - Introduced `UnifiedQrPayment` method to `Node` interface - Add `UnifiedQrPayment` interface with `receieve and `send` methods - Add `PaymentResult` interface (enum) with `Onchain`, `Bolt11` and `Bolt12` fields These changes add support for our UniFFI bindings and enable the use of `unified_qr_payment` payment handler in Swift, and Kotlin. * Update `Error` enum with URI related errors - Add `UriParameterFailed` and `InvalidUri` fields to the `Error` enum - Added related error messages in the Display impl for the new fields * Add `PaymentResult` import for UniFFI bindings - Added `PaymentResult` so the .udl could access the enum - Added comment to explain the need to import any re-exported items to enure they're accessible in UniFFI. (becasue rustc says to add them in `lib.rs` * Add Unified QR `send`/`receive` integration tests - Added `unified_qr_send_receive` test to verify the `UnifedQrPayment` functionality - Added logic to handle paying a `BOLT12` offer, `BOLT11` invoice, and if those fail `On-chain` tx from a URI. - Validated each payments successful event - Ensured the off-chain and on-chain balacnes reflected the payment attempts * Update PR with optimizations and nit fixups The changes include: - Fixed a handful of nits for better readability in docs and simple grammar errors and made various name changes that affected the committed files. - Added a helper function in unified_qr.rs called capitalize_qr_params to format the lightning param in the receive method - Removed the optional message in the receive method and made it a required &str - Adjusted UDL formatting to use tabs instead of spaces These changes were made to improve code quality and maintainability based on the review feedback * Refactor URI parsing and add Bolt12 offer in receive Changes include: - Modified serialize_params to serialize both invoices and offers - Refactored deserialize_temp by removing the code that was parsing based on the lightning invoice/offer prefix. I instead used for loop to iterate over each lightning parameter, attempting to parse the string as an offer first, and then as an invoice. May need to log an error if neither succeeds - Added support for Bolt12 offers in the receive method - Updated capitalize_params function to handle multiple lightning parameters - Added a generate_bip21_uri test to show what the uri looks like in integration_tests_rust - Adjusted integration tests. Still needs work Still trying to figure out a bug related to Bolt12 offers being "paid" when it should fall back to an on-chain tx * Update BOLT12 offer to use `lno` key In this commit: - In serialize_params, BOLT12 offers were changed to be serialized with the `lno` key rather than the `lightning` key - During deserializing, I had to make the same update. Used a match to check whether it was a `lightning` or `lno` key and then parsed accordingly. - Next, a small name change: capitalize_qr_params to format_uri. Previously I changed the value after "&lightning" to all caps, but the "&lno=" value wasn't being changed. So, added a helper method inside format_uri to capitalize the values given the key! - Updated corresponding tests with `lno` update Small nits: - Updated QrPaymentResult with more thorough docs - Added a parsing test with an offer * Refactor for clarity and improve error handling This commit fixes a handful of minor comments/nits that include: - Updates to set the `bip21` crates default-features to false, to minimize dependencies. - Enable the `std` feature since we use/benefit from it. - In `receive` return `InvoiceCreationFailed` or `OfferCreationFailed` when creating an invoice or offer. Rather than silently logging the error. - Also in `receive` we first check if an amount is specified, and if not, return an error and abort. - Pass in `Config` to `UnifiedQrPayment` struct to use the users config network. - In `send` instead of checking each network for the `NetworkChecked` URI, we pass in the `Config::Network`. - Simplifed param parsing in `deserialize_temp` by directly finding the key and parsing the corresponding value. - General documentation fixes. - In parsing tests, moved longer invoice/offer strings into. variables that start with expected_ for clarity. * Fix docs for clarity Cleaned up the docs so they are easier to understand for the user. Also changed the message param in receive to description. --- Cargo.toml | 1 + bindings/ldk_node.udl | 17 ++ src/error.rs | 6 + src/lib.rs | 39 ++- src/payment/mod.rs | 2 + src/payment/unified_qr.rs | 421 ++++++++++++++++++++++++++++++++ src/uniffi_types.rs | 6 + tests/integration_tests_rust.rs | 158 +++++++++++- 8 files changed, 645 insertions(+), 5 deletions(-) create mode 100644 src/payment/unified_qr.rs diff --git a/Cargo.toml b/Cargo.toml index d4a87b2a2..206f5f2dd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -61,6 +61,7 @@ reqwest = { version = "0.11", default-features = false, features = ["json", "rus rusqlite = { version = "0.28.0", features = ["bundled"] } bitcoin = "0.30.2" bip39 = "2.0.0" +bip21 = { version = "0.3.1", features = ["std"], default-features = false } rand = "0.8.5" chrono = { version = "0.4", default-features = false, features = ["clock"] } diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index 2723db573..aedf9f6ab 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -63,6 +63,7 @@ interface Node { Bolt12Payment bolt12_payment(); SpontaneousPayment spontaneous_payment(); OnchainPayment onchain_payment(); + UnifiedQrPayment unified_qr_payment(); [Throws=NodeError] void connect(PublicKey node_id, SocketAddress address, boolean persist); [Throws=NodeError] @@ -148,6 +149,13 @@ interface OnchainPayment { Txid send_all_to_address([ByRef]Address address); }; +interface UnifiedQrPayment { + [Throws=NodeError] + string receive(u64 amount_sats, [ByRef]string message, u32 expiry_sec); + [Throws=NodeError] + QrPaymentResult send([ByRef]string uri_str); +}; + [Error] enum NodeError { "AlreadyRunning", @@ -175,6 +183,7 @@ enum NodeError { "GossipUpdateFailed", "GossipUpdateTimeout", "LiquidityRequestFailed", + "UriParameterParsingFailed", "InvalidAddress", "InvalidSocketAddress", "InvalidPublicKey", @@ -191,6 +200,7 @@ enum NodeError { "InvalidRefund", "InvalidChannelId", "InvalidNetwork", + "InvalidUri", "DuplicatePayment", "UnsupportedCurrency", "InsufficientFunds", @@ -276,6 +286,13 @@ interface PaymentKind { Spontaneous(PaymentHash hash, PaymentPreimage? preimage); }; +[Enum] +interface QrPaymentResult { + Onchain(Txid txid); + Bolt11(PaymentId payment_id); + Bolt12(PaymentId payment_id); +}; + enum PaymentDirection { "Inbound", "Outbound", diff --git a/src/error.rs b/src/error.rs index a8671d9a7..7506b013b 100644 --- a/src/error.rs +++ b/src/error.rs @@ -53,6 +53,8 @@ pub enum Error { GossipUpdateTimeout, /// A liquidity request operation failed. LiquidityRequestFailed, + /// Parsing a URI parameter has failed. + UriParameterParsingFailed, /// The given address is invalid. InvalidAddress, /// The given network address is invalid. @@ -85,6 +87,8 @@ pub enum Error { InvalidChannelId, /// The given network is invalid. InvalidNetwork, + /// The given URI is invalid. + InvalidUri, /// A payment with the given hash has already been initiated. DuplicatePayment, /// The provided offer was denonminated in an unsupported currency. @@ -131,6 +135,7 @@ impl fmt::Display for Error { Self::GossipUpdateFailed => write!(f, "Failed to update gossip data."), Self::GossipUpdateTimeout => write!(f, "Updating gossip data timed out."), Self::LiquidityRequestFailed => write!(f, "Failed to request inbound liquidity."), + Self::UriParameterParsingFailed => write!(f, "Failed to parse a URI parameter."), Self::InvalidAddress => write!(f, "The given address is invalid."), Self::InvalidSocketAddress => write!(f, "The given network address is invalid."), Self::InvalidPublicKey => write!(f, "The given public key is invalid."), @@ -147,6 +152,7 @@ impl fmt::Display for Error { Self::InvalidRefund => write!(f, "The given refund is invalid."), Self::InvalidChannelId => write!(f, "The given channel ID is invalid."), Self::InvalidNetwork => write!(f, "The given network is invalid."), + Self::InvalidUri => write!(f, "The given URI is invalid."), Self::DuplicatePayment => { write!(f, "A payment with the given hash has already been initiated.") }, diff --git a/src/lib.rs b/src/lib.rs index 1c137d355..206fe52d8 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -133,7 +133,10 @@ use gossip::GossipSource; use graph::NetworkGraph; use liquidity::LiquiditySource; use payment::store::PaymentStore; -use payment::{Bolt11Payment, Bolt12Payment, OnchainPayment, PaymentDetails, SpontaneousPayment}; +use payment::{ + Bolt11Payment, Bolt12Payment, OnchainPayment, PaymentDetails, SpontaneousPayment, + UnifiedQrPayment, +}; use peer_store::{PeerInfo, PeerStore}; use types::{ Broadcaster, BumpTransactionEventHandler, ChainMonitor, ChannelManager, DynStore, FeeEstimator, @@ -1072,6 +1075,40 @@ impl Node { )) } + /// Returns a payment handler allowing to create [BIP 21] URIs with an on-chain, [BOLT 11], + /// and [BOLT 12] payment options. + /// + /// [BOLT 11]: https://github.com/lightning/bolts/blob/master/11-payment-encoding.md + /// [BOLT 12]: https://github.com/lightning/bolts/blob/master/12-offer-encoding.md + /// [BIP 21]: https://github.com/bitcoin/bips/blob/master/bip-0021.mediawiki + #[cfg(not(feature = "uniffi"))] + pub fn unified_qr_payment(&self) -> UnifiedQrPayment { + UnifiedQrPayment::new( + self.onchain_payment().into(), + self.bolt11_payment().into(), + self.bolt12_payment().into(), + Arc::clone(&self.config), + Arc::clone(&self.logger), + ) + } + + /// Returns a payment handler allowing to create [BIP 21] URIs with an on-chain, [BOLT 11], + /// and [BOLT 12] payment options. + /// + /// [BOLT 11]: https://github.com/lightning/bolts/blob/master/11-payment-encoding.md + /// [BOLT 12]: https://github.com/lightning/bolts/blob/master/12-offer-encoding.md + /// [BIP 21]: https://github.com/bitcoin/bips/blob/master/bip-0021.mediawiki + #[cfg(feature = "uniffi")] + pub fn unified_qr_payment(&self) -> Arc { + Arc::new(UnifiedQrPayment::new( + self.onchain_payment(), + self.bolt11_payment(), + self.bolt12_payment(), + Arc::clone(&self.config), + Arc::clone(&self.logger), + )) + } + /// Retrieve a list of known channels. pub fn list_channels(&self) -> Vec { self.channel_manager.list_channels().into_iter().map(|c| c.into()).collect() diff --git a/src/payment/mod.rs b/src/payment/mod.rs index 1862bf2df..ac4fc5663 100644 --- a/src/payment/mod.rs +++ b/src/payment/mod.rs @@ -5,9 +5,11 @@ mod bolt12; mod onchain; mod spontaneous; pub(crate) mod store; +mod unified_qr; pub use bolt11::Bolt11Payment; pub use bolt12::Bolt12Payment; pub use onchain::OnchainPayment; pub use spontaneous::SpontaneousPayment; pub use store::{LSPFeeLimits, PaymentDetails, PaymentDirection, PaymentKind, PaymentStatus}; +pub use unified_qr::{QrPaymentResult, UnifiedQrPayment}; diff --git a/src/payment/unified_qr.rs b/src/payment/unified_qr.rs new file mode 100644 index 000000000..a4551eb8a --- /dev/null +++ b/src/payment/unified_qr.rs @@ -0,0 +1,421 @@ +// This file is Copyright its original authors, visible in version control +// history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license +// , at your option. +// You may not use this file except in accordance with one or both of these +// licenses. + +//! Holds a payment handler allowing to create [BIP 21] URIs with an on-chain, [BOLT 11], and [BOLT 12] payment +//! options. +//! +//! [BIP 21]: https://github.com/bitcoin/bips/blob/master/bip-0021.mediawiki +//! [BOLT 11]: https://github.com/lightning/bolts/blob/master/11-payment-encoding.md +//! [BOLT 12]: https://github.com/lightning/bolts/blob/master/12-offer-encoding.md +use crate::error::Error; +use crate::logger::{log_error, FilesystemLogger, Logger}; +use crate::payment::{Bolt11Payment, Bolt12Payment, OnchainPayment}; +use crate::Config; + +use lightning::ln::channelmanager::PaymentId; +use lightning::offers::offer::Offer; +use lightning_invoice::Bolt11Invoice; + +use bip21::de::ParamKind; +use bip21::{DeserializationError, DeserializeParams, Param, SerializeParams}; +use bitcoin::address::{NetworkChecked, NetworkUnchecked}; +use bitcoin::{Amount, Txid}; + +use std::sync::Arc; +use std::vec::IntoIter; + +type Uri<'a> = bip21::Uri<'a, NetworkChecked, Extras>; + +#[derive(Debug, Clone)] +struct Extras { + bolt11_invoice: Option, + bolt12_offer: Option, +} + +/// A payment handler allowing to create [BIP 21] URIs with an on-chain, [BOLT 11], and [BOLT 12] payment +/// option. +/// +/// Should be retrieved by calling [`Node::unified_qr_payment`] +/// +/// [BIP 21]: https://github.com/bitcoin/bips/blob/master/bip-0021.mediawiki +/// [BOLT 11]: https://github.com/lightning/bolts/blob/master/11-payment-encoding.md +/// [BOLT 12]: https://github.com/lightning/bolts/blob/master/12-offer-encoding.md +/// [`Node::unified_qr_payment`]: crate::Node::unified_qr_payment +pub struct UnifiedQrPayment { + onchain_payment: Arc, + bolt11_invoice: Arc, + bolt12_payment: Arc, + config: Arc, + logger: Arc, +} + +impl UnifiedQrPayment { + pub(crate) fn new( + onchain_payment: Arc, bolt11_invoice: Arc, + bolt12_payment: Arc, config: Arc, logger: Arc, + ) -> Self { + Self { onchain_payment, bolt11_invoice, bolt12_payment, config, logger } + } + + /// Generates a URI with an on-chain address, [BOLT 11] invoice and [BOLT 12] offer. + /// + /// The URI allows users to send the payment request allowing the wallet to decide + /// which payment method to use. This enables a fallback mechanism: older wallets + /// can always pay using the provided on-chain address, while newer wallets will + /// typically opt to use the provided BOLT11 invoice or BOLT12 offer. + /// + /// # Parameters + /// - `amount_sats`: The amount to be received, specified in satoshis. + /// - `description`: A description or note associated with the payment. + /// This message is visible to the payer and can provide context or details about the payment. + /// - `expiry_sec`: The expiration time for the payment, specified in seconds. + /// + /// Returns a payable URI that can be used to request and receive a payment of the amount + /// given. In case of an error, the function returns `Error::WalletOperationFailed`for on-chain + /// address issues, `Error::InvoiceCreationFailed` for BOLT11 invoice issues, or + /// `Error::OfferCreationFailed` for BOLT12 offer issues. + /// + /// The generated URI can then be given to a QR code library. + /// + /// [BOLT 11]: https://github.com/lightning/bolts/blob/master/11-payment-encoding.md + /// [BOLT 12]: https://github.com/lightning/bolts/blob/master/12-offer-encoding.md + pub fn receive( + &self, amount_sats: u64, description: &str, expiry_sec: u32, + ) -> Result { + let onchain_address = self.onchain_payment.new_address()?; + + let amount_msats = amount_sats * 1_000; + + let bolt12_offer = match self.bolt12_payment.receive(amount_msats, description) { + Ok(offer) => Some(offer), + Err(e) => { + log_error!(self.logger, "Failed to create offer: {}", e); + return Err(Error::OfferCreationFailed); + }, + }; + + let bolt11_invoice = + match self.bolt11_invoice.receive(amount_msats, description, expiry_sec) { + Ok(invoice) => Some(invoice), + Err(e) => { + log_error!(self.logger, "Failed to create invoice {}", e); + return Err(Error::InvoiceCreationFailed); + }, + }; + + let extras = Extras { bolt11_invoice, bolt12_offer }; + + let mut uri = Uri::with_extras(onchain_address, extras); + uri.amount = Some(Amount::from_sat(amount_sats)); + uri.message = Some(description.into()); + + Ok(format_uri(uri)) + } + + /// Sends a payment given a [BIP 21] URI. + /// + /// This method parses the provided URI string and attempts to send the payment. If the URI + /// has an offer and or invoice, it will try to pay the offer first followed by the invoice. + /// If they both fail, the on-chain payment will be paid. + /// + /// Returns a `QrPaymentResult` indicating the outcome of the payment. If an error + /// occurs, an `Error` is returned detailing the issue encountered. + /// + /// [BIP 21]: https://github.com/bitcoin/bips/blob/master/bip-0021.mediawiki + pub fn send(&self, uri_str: &str) -> Result { + let uri: bip21::Uri = + uri_str.parse().map_err(|_| Error::InvalidUri)?; + + let uri_network_checked = + uri.clone().require_network(self.config.network).map_err(|_| Error::InvalidNetwork)?; + + if let Some(offer) = uri_network_checked.extras.bolt12_offer { + match self.bolt12_payment.send(&offer, None) { + Ok(payment_id) => return Ok(QrPaymentResult::Bolt12 { payment_id }), + Err(e) => log_error!(self.logger, "Failed to send BOLT12 offer: {:?}. This is part of a unified QR code payment. Falling back to the BOLT11 invoice.", e), + } + } + + if let Some(invoice) = uri_network_checked.extras.bolt11_invoice { + match self.bolt11_invoice.send(&invoice) { + Ok(payment_id) => return Ok(QrPaymentResult::Bolt11 { payment_id }), + Err(e) => log_error!(self.logger, "Failed to send BOLT11 invoice: {:?}. This is part of a unified QR code payment. Falling back to the on-chain transaction.", e), + } + } + + let amount = match uri_network_checked.amount { + Some(amount) => amount, + None => { + log_error!(self.logger, "No amount specified in the URI. Aborting the payment."); + return Err(Error::InvalidAmount); + }, + }; + + let txid = + self.onchain_payment.send_to_address(&uri_network_checked.address, amount.to_sat())?; + + Ok(QrPaymentResult::Onchain { txid }) + } +} + +/// Represents the result of a payment made using a [BIP 21] QR code. +/// +/// After a successful on-chain transaction, the transaction ID ([`Txid`]) is returned. +/// For BOLT11 and BOLT12 payments, the corresponding [`PaymentId`] is returned. +/// +/// [BIP 21]: https://github.com/bitcoin/bips/blob/master/bip-0021.mediawiki +/// [`PaymentId`]: lightning::ln::channelmanager::PaymentId +/// [`Txid`]: bitcoin::hash_types::Txid +pub enum QrPaymentResult { + /// An on-chain payment. + Onchain { + /// The transaction ID (txid) of the on-chain payment. + txid: Txid, + }, + /// A [BOLT 11] payment. + /// + /// [BOLT 11]: https://github.com/lightning/bolts/blob/master/11-payment-encoding.md + Bolt11 { + /// The payment ID for the BOLT11 invoice. + payment_id: PaymentId, + }, + /// A [BOLT 12] offer payment, i.e., a payment for an [`Offer`]. + /// + /// [BOLT 12]: https://github.com/lightning/bolts/blob/master/12-offer-encoding.md + /// [`Offer`]: crate::lightning::offers::offer::Offer + Bolt12 { + /// The payment ID for the BOLT12 offer. + payment_id: PaymentId, + }, +} + +fn format_uri(uri: bip21::Uri) -> String { + let mut uri = format!("{:#}", uri); + + fn value_to_uppercase(uri: &mut String, key: &str) { + let mut start = 0; + while let Some(index) = uri[start..].find(key) { + let start_index = start + index; + let end_index = uri[start_index..].find('&').map_or(uri.len(), |i| start_index + i); + let lightning_value = &uri[start_index + key.len()..end_index]; + let uppercase_lighting_value = lightning_value.to_uppercase(); + uri.replace_range(start_index + key.len()..end_index, &uppercase_lighting_value); + start = end_index + } + } + value_to_uppercase(&mut uri, "lightning="); + value_to_uppercase(&mut uri, "lno="); + uri +} + +impl<'a> SerializeParams for &'a Extras { + type Key = &'static str; + type Value = String; + type Iterator = IntoIter<(Self::Key, Self::Value)>; + + fn serialize_params(self) -> Self::Iterator { + let mut params = Vec::new(); + + if let Some(bolt11_invoice) = &self.bolt11_invoice { + params.push(("lightning", bolt11_invoice.to_string())); + } + if let Some(bolt12_offer) = &self.bolt12_offer { + params.push(("lno", bolt12_offer.to_string())); + } + + params.into_iter() + } +} + +impl<'a> DeserializeParams<'a> for Extras { + type DeserializationState = DeserializationState; +} + +#[derive(Default)] +struct DeserializationState { + bolt11_invoice: Option, + bolt12_offer: Option, +} + +impl<'a> bip21::de::DeserializationState<'a> for DeserializationState { + type Value = Extras; + + fn is_param_known(&self, key: &str) -> bool { + key == "lightning" || key == "lno" + } + + fn deserialize_temp( + &mut self, key: &str, value: Param<'_>, + ) -> Result::Error> { + match key { + "lightning" => { + let bolt11_value = + String::try_from(value).map_err(|_| Error::UriParameterParsingFailed)?; + if let Ok(invoice) = bolt11_value.parse::() { + self.bolt11_invoice = Some(invoice); + Ok(bip21::de::ParamKind::Known) + } else { + Ok(bip21::de::ParamKind::Unknown) + } + }, + "lno" => { + let bolt12_value = + String::try_from(value).map_err(|_| Error::UriParameterParsingFailed)?; + if let Ok(offer) = bolt12_value.parse::() { + self.bolt12_offer = Some(offer); + Ok(bip21::de::ParamKind::Known) + } else { + Ok(bip21::de::ParamKind::Unknown) + } + }, + _ => Ok(bip21::de::ParamKind::Unknown), + } + } + + fn finalize(self) -> Result::Error> { + Ok(Extras { bolt11_invoice: self.bolt11_invoice, bolt12_offer: self.bolt12_offer }) + } +} + +impl DeserializationError for Extras { + type Error = Error; +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::payment::unified_qr::Extras; + use bitcoin::{Address, Network}; + use std::str::FromStr; + + #[test] + fn parse_uri() { + let uri_test1 = "BITCOIN:TB1QRSCD05XNY6QZ63TF9GJELGVK6D3UDJFEKK62VU?amount=1&message=Test%20message&lightning=LNTB1000M1PNXWM7MDQ523JHXAPQD4JHXUMPVAJSNP4QWP9QD2JFP8DUZ46JQG5LTKVDH04YG52G6UF2YAXP8H7YZPZM3DM5PP5KUP7YT429UP9Z4ACPA60R7WETSTL66549MG05P0JN0C4L2NCC40SSP5R0LH86DJCL0NK8HZHNZHX92VVUAAVNE48Z5RVKVY5DKTRQ0DMP7S9QYYSGQCQPCXQRRAQYR59FGN2VVC5R6DS0AZMETH493ZU56H0WSVMGYCW9LEPZ032PGQNZMQ6XKVEH90Z02C0NH3J5QGDAWCS2YC2ZNP22J0ZD0PPF78N4QQQEXTYS2"; + let expected_bolt11_invoice_1 = "LNTB1000M1PNXWM7MDQ523JHXAPQD4JHXUMPVAJSNP4QWP9QD2JFP8DUZ46JQG5LTKVDH04YG52G6UF2YAXP8H7YZPZM3DM5PP5KUP7YT429UP9Z4ACPA60R7WETSTL66549MG05P0JN0C4L2NCC40SSP5R0LH86DJCL0NK8HZHNZHX92VVUAAVNE48Z5RVKVY5DKTRQ0DMP7S9QYYSGQCQPCXQRRAQYR59FGN2VVC5R6DS0AZMETH493ZU56H0WSVMGYCW9LEPZ032PGQNZMQ6XKVEH90Z02C0NH3J5QGDAWCS2YC2ZNP22J0ZD0PPF78N4QQQEXTYS2"; + let parsed_uri = uri_test1 + .parse::>() + .expect("Failed Parsing") + .require_network(Network::Testnet) + .expect("Invalid Network"); + + assert_eq!( + parsed_uri.address, + bitcoin::Address::from_str("TB1QRSCD05XNY6QZ63TF9GJELGVK6D3UDJFEKK62VU") + .unwrap() + .require_network(Network::Testnet) + .unwrap() + ); + + assert_eq!(Amount::from_sat(100_000_000), Amount::from(parsed_uri.amount.unwrap())); + + if let Some(invoice) = parsed_uri.extras.bolt11_invoice { + assert_eq!(invoice, Bolt11Invoice::from_str(expected_bolt11_invoice_1).unwrap()); + } else { + panic!("No Lightning invoice found"); + } + + let uri_with_offer = "BITCOIN:BCRT1QM0NW9S05QDPGC6F52FPKA9U6Q6VWTT5WVS30R2?amount=0.001&message=asdf&lightning=LNBCRT1M1PNGMY98DQ8V9EKGESNP4QDH5SL00QK4842UZMZVJVX2NLUZT4E6P2ZC2DLAGCU565TP42AUDYPP5XD0PRS5CRDLZVU8DNQQU08W9F4YP0XRXW06ZSHCLCHZU9X28HSSSSP5ES30JG9J4VK2CRW80YXTLRJU2M097TXMFTHR00VC5V0LGKVMURRQ9QYYSGQCQPCXQRRAQRZJQ0Q0K9CDYFSVZAJ5V3PDWYWDMHLEYCVD7TG0SVMY4AM4P6GQZJZ5XQQQQYQQX2QQQUQQQQLGQQQQQQQQFQWDQZX24PSHN68A9D4X4HD89F3XVC7DGGRDTFCA5WH4KZ546GSRTJVACA34QQ3DZ9W4JHLJD3XZRW44RA0RET6RDSRJCEZQC6AXANX6QPHZKHJK&lno=LNO1QGSQVGNWGCG35Z6EE2H3YCZRADDM72XRFUA9UVE2RLRM9DEU7XYFZRCYZPGTGRDWMGU44QPYUXLHLLMLWN4QSPQ97HSSQZSYV9EKGESSWCPK7JRAAUZ6574TSTVFJFSE20LSFWH8G9GTPFHL4RRJN23VX4TH35SRWKCNQ6S8R9ZW9HU5RXMPXVYCJVK2KY3NTEA8VXZTMWJF4NAJCCAQZQ7YZ7KDDZ600LAW2S2E7Q6XDYLPSMLMV4YAY0QXX5NC8QH05JRNUYQPQCAHK8Y5KQ8H9X624LS6A9GWFTGKYYPUZVUKKM93DWETTL8A7NE84L7SNHCSGR006EACQRQP8YWY6WPS0TS"; + let expected_bolt11_invoice_2 = "LNBCRT1M1PNGMY98DQ8V9EKGESNP4QDH5SL00QK4842UZMZVJVX2NLUZT4E6P2ZC2DLAGCU565TP42AUDYPP5XD0PRS5CRDLZVU8DNQQU08W9F4YP0XRXW06ZSHCLCHZU9X28HSSSSP5ES30JG9J4VK2CRW80YXTLRJU2M097TXMFTHR00VC5V0LGKVMURRQ9QYYSGQCQPCXQRRAQRZJQ0Q0K9CDYFSVZAJ5V3PDWYWDMHLEYCVD7TG0SVMY4AM4P6GQZJZ5XQQQQYQQX2QQQUQQQQLGQQQQQQQQFQWDQZX24PSHN68A9D4X4HD89F3XVC7DGGRDTFCA5WH4KZ546GSRTJVACA34QQ3DZ9W4JHLJD3XZRW44RA0RET6RDSRJCEZQC6AXANX6QPHZKHJK"; + let expected_bolt12_offer_2 = "LNO1QGSQVGNWGCG35Z6EE2H3YCZRADDM72XRFUA9UVE2RLRM9DEU7XYFZRCYZPGTGRDWMGU44QPYUXLHLLMLWN4QSPQ97HSSQZSYV9EKGESSWCPK7JRAAUZ6574TSTVFJFSE20LSFWH8G9GTPFHL4RRJN23VX4TH35SRWKCNQ6S8R9ZW9HU5RXMPXVYCJVK2KY3NTEA8VXZTMWJF4NAJCCAQZQ7YZ7KDDZ600LAW2S2E7Q6XDYLPSMLMV4YAY0QXX5NC8QH05JRNUYQPQCAHK8Y5KQ8H9X624LS6A9GWFTGKYYPUZVUKKM93DWETTL8A7NE84L7SNHCSGR006EACQRQP8YWY6WPS0TS"; + let parsed_uri_with_offer = uri_with_offer + .parse::>() + .expect("Failed Parsing") + .require_network(Network::Regtest) + .expect("Invalid Network"); + + assert_eq!(Amount::from_sat(100_000), Amount::from(parsed_uri_with_offer.amount.unwrap())); + + assert_eq!( + parsed_uri_with_offer.address, + bitcoin::Address::from_str("BCRT1QM0NW9S05QDPGC6F52FPKA9U6Q6VWTT5WVS30R2") + .unwrap() + .require_network(Network::Regtest) + .unwrap() + ); + + if let Some(invoice) = parsed_uri_with_offer.extras.bolt11_invoice { + assert_eq!(invoice, Bolt11Invoice::from_str(expected_bolt11_invoice_2).unwrap()); + } else { + panic!("No invoice found.") + } + + if let Some(offer) = parsed_uri_with_offer.extras.bolt12_offer { + assert_eq!(offer, Offer::from_str(expected_bolt12_offer_2).unwrap()); + } else { + panic!("No offer found."); + } + + let zeus_test = "bitcoin:TB1QQ32G6LM2XKT0U2UGASH5DC4CFT3JTPEW65PZZ5?lightning=LNTB500U1PN89HH6PP5MA7K6DRM5SYVD05NTXMGSRNM728J7EHM8KV6VC96YNLKN7G7VDYQDQQCQZRCXQR8Q7SP5HU30L0EEXKYYPQSQYEZELZWUPT62HLJ0KV2662CALGPAML50QPXQ9QXPQYSGQDKTVFXEC8H2DG2GY3C95ETAJ0QKX50XAUCU304PPFV2SQVGFHZ6RMZWJV8MC3M0LXF3GW852C5VSK0DELK0JHLYUTYZDF7QKNAMT4PQQQN24WM&amount=0.0005"; + let expected_bolt11_invoice_3 = "LNTB500U1PN89HH6PP5MA7K6DRM5SYVD05NTXMGSRNM728J7EHM8KV6VC96YNLKN7G7VDYQDQQCQZRCXQR8Q7SP5HU30L0EEXKYYPQSQYEZELZWUPT62HLJ0KV2662CALGPAML50QPXQ9QXPQYSGQDKTVFXEC8H2DG2GY3C95ETAJ0QKX50XAUCU304PPFV2SQVGFHZ6RMZWJV8MC3M0LXF3GW852C5VSK0DELK0JHLYUTYZDF7QKNAMT4PQQQN24WM"; + let uri_test2 = zeus_test + .parse::>() + .expect("Failed Parsing") + .require_network(Network::Testnet) + .expect("Invalid Network"); + + assert_eq!( + uri_test2.address, + bitcoin::Address::from_str("TB1QQ32G6LM2XKT0U2UGASH5DC4CFT3JTPEW65PZZ5") + .unwrap() + .require_network(Network::Testnet) + .unwrap() + ); + + if let Some(invoice) = uri_test2.extras.bolt11_invoice { + assert_eq!(invoice, Bolt11Invoice::from_str(expected_bolt11_invoice_3).unwrap()); + } else { + panic!("No invoice found."); + } + assert_eq!(Amount::from(uri_test2.amount.unwrap()), Amount::from_sat(50000)); + + let muun_test = "bitcoin:bc1q6fmtam67h8wxfwtpumhazhtwyrh3uf039n058zke9xt5hr4ljzwsdcm2pj?amount=0.01&lightning=lnbc10m1pn8g2j4pp575tg4wt8jwgu2lvtk3aj6hy7mc6tnupw07wwkxcvyhtt3wlzw0zsdqqcqzzgxqyz5vqrzjqwnvuc0u4txn35cafc7w94gxvq5p3cu9dd95f7hlrh0fvs46wpvhdv6dzdeg0ww2eyqqqqryqqqqthqqpysp5fkd3k2rzvwdt2av068p58evf6eg50q0eftfhrpugaxkuyje4d25q9qrsgqqkfmnn67s5g6hadrcvf5h0l7p92rtlkwrfqdvc7uuf6lew0czxksvqhyux3zjrl3tlakwhtvezwl24zshnfumukwh0yntqsng9z6glcquvw7kc"; + let expected_bolt11_invoice_4 = "lnbc10m1pn8g2j4pp575tg4wt8jwgu2lvtk3aj6hy7mc6tnupw07wwkxcvyhtt3wlzw0zsdqqcqzzgxqyz5vqrzjqwnvuc0u4txn35cafc7w94gxvq5p3cu9dd95f7hlrh0fvs46wpvhdv6dzdeg0ww2eyqqqqryqqqqthqqpysp5fkd3k2rzvwdt2av068p58evf6eg50q0eftfhrpugaxkuyje4d25q9qrsgqqkfmnn67s5g6hadrcvf5h0l7p92rtlkwrfqdvc7uuf6lew0czxksvqhyux3zjrl3tlakwhtvezwl24zshnfumukwh0yntqsng9z6glcquvw7kc"; + let uri_test3 = muun_test + .parse::>() + .expect("Failed Parsing") + .require_network(Network::Bitcoin) + .expect("Invalid Network"); + assert_eq!( + uri_test3.address, + bitcoin::Address::from_str( + "bc1q6fmtam67h8wxfwtpumhazhtwyrh3uf039n058zke9xt5hr4ljzwsdcm2pj" + ) + .unwrap() + .require_network(Network::Bitcoin) + .unwrap() + ); + + if let Some(invoice) = uri_test3.extras.bolt11_invoice { + assert_eq!(invoice, Bolt11Invoice::from_str(expected_bolt11_invoice_4).unwrap()); + } else { + panic!("No invoice found"); + } + assert_eq!(Amount::from(uri_test3.amount.unwrap()), Amount::from_sat(1_000_000)); + + let muun_test_no_amount = "bitcoin:bc1qwe94y974pjl9kg5afg8tmsc0nz4hct04u78hdhukxvnnphgu48hs9lx3k5?lightning=lnbc1pn8g249pp5f6ytj32ty90jhvw69enf30hwfgdhyymjewywcmfjevflg6s4z86qdqqcqzzgxqyz5vqrzjqwnvuc0u4txn35cafc7w94gxvq5p3cu9dd95f7hlrh0fvs46wpvhdfjjzh2j9f7ye5qqqqryqqqqthqqpysp5mm832athgcal3m7h35sc29j63lmgzvwc5smfjh2es65elc2ns7dq9qrsgqu2xcje2gsnjp0wn97aknyd3h58an7sjj6nhcrm40846jxphv47958c6th76whmec8ttr2wmg6sxwchvxmsc00kqrzqcga6lvsf9jtqgqy5yexa"; + let expected_bolt11_invoice_5 = "lnbc1pn8g249pp5f6ytj32ty90jhvw69enf30hwfgdhyymjewywcmfjevflg6s4z86qdqqcqzzgxqyz5vqrzjqwnvuc0u4txn35cafc7w94gxvq5p3cu9dd95f7hlrh0fvs46wpvhdfjjzh2j9f7ye5qqqqryqqqqthqqpysp5mm832athgcal3m7h35sc29j63lmgzvwc5smfjh2es65elc2ns7dq9qrsgqu2xcje2gsnjp0wn97aknyd3h58an7sjj6nhcrm40846jxphv47958c6th76whmec8ttr2wmg6sxwchvxmsc00kqrzqcga6lvsf9jtqgqy5yexa"; + let uri_test4 = muun_test_no_amount + .parse::>() + .expect("Failed Parsing") + .require_network(Network::Bitcoin) + .expect("Invalid Network"); + assert_eq!( + uri_test4.address, + Address::from_str("bc1qwe94y974pjl9kg5afg8tmsc0nz4hct04u78hdhukxvnnphgu48hs9lx3k5") + .unwrap() + .require_network(Network::Bitcoin) + .unwrap() + ); + if let Some(invoice) = uri_test4.extras.bolt11_invoice { + assert_eq!(invoice, Bolt11Invoice::from_str(expected_bolt11_invoice_5).unwrap()); + } else { + panic!("No invoice found"); + } + } +} diff --git a/src/uniffi_types.rs b/src/uniffi_types.rs index 9dd7e5699..7c2142091 100644 --- a/src/uniffi_types.rs +++ b/src/uniffi_types.rs @@ -1,5 +1,11 @@ +// Importing these items ensures they are accessible in the uniffi bindings +// without introducing unused import warnings in lib.rs. +// +// Make sure to add any re-exported items that need to be used in uniffi below. + pub use crate::graph::{ChannelInfo, ChannelUpdateInfo, NodeAnnouncementInfo, NodeInfo}; pub use crate::payment::store::{LSPFeeLimits, PaymentDirection, PaymentKind, PaymentStatus}; +pub use crate::payment::QrPaymentResult; pub use lightning::events::{ClosureReason, PaymentFailureReason}; pub use lightning::ln::{ChannelId, PaymentHash, PaymentPreimage, PaymentSecret}; diff --git a/tests/integration_tests_rust.rs b/tests/integration_tests_rust.rs index 37ddeb9a7..5a918762a 100644 --- a/tests/integration_tests_rust.rs +++ b/tests/integration_tests_rust.rs @@ -1,13 +1,13 @@ mod common; use common::{ - do_channel_full_cycle, expect_event, expect_payment_received_event, + do_channel_full_cycle, expect_channel_ready_event, expect_event, expect_payment_received_event, expect_payment_successful_event, generate_blocks_and_wait, open_channel, premine_and_distribute_funds, random_config, setup_bitcoind_and_electrsd, setup_builder, setup_node, setup_two_nodes, wait_for_tx, TestSyncStore, }; -use ldk_node::payment::PaymentKind; +use ldk_node::payment::{PaymentKind, QrPaymentResult}; use ldk_node::{Builder, Event, NodeError}; use lightning::ln::channelmanager::PaymentId; @@ -17,8 +17,6 @@ use bitcoin::{Amount, Network}; use std::sync::Arc; -use crate::common::expect_channel_ready_event; - #[test] fn channel_full_cycle() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); @@ -552,3 +550,155 @@ fn simple_bolt12_send_receive() { } assert_eq!(node_a_payments.first().unwrap().amount_msat, Some(overpaid_amount)); } + +#[test] +fn generate_bip21_uri() { + let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); + let (node_a, node_b) = setup_two_nodes(&electrsd, false, true, false); + + let address_a = node_a.onchain_payment().new_address().unwrap(); + let premined_sats = 5_000_000; + + premine_and_distribute_funds( + &bitcoind.client, + &electrsd.client, + vec![address_a], + Amount::from_sat(premined_sats), + ); + + node_a.sync_wallets().unwrap(); + open_channel(&node_a, &node_b, 4_000_000, true, &electrsd); + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6); + + node_a.sync_wallets().unwrap(); + node_b.sync_wallets().unwrap(); + + expect_channel_ready_event!(node_a, node_b.node_id()); + expect_channel_ready_event!(node_b, node_a.node_id()); + + let expected_amount_sats = 100_000; + let expiry_sec = 4_000; + + let uqr_payment = node_b.unified_qr_payment().receive(expected_amount_sats, "asdf", expiry_sec); + + match uqr_payment.clone() { + Ok(ref uri) => { + println!("Generated URI: {}", uri); + assert!(uri.contains("BITCOIN:")); + assert!(uri.contains("lightning=")); + assert!(uri.contains("lno=")); + }, + Err(e) => panic!("Failed to generate URI: {:?}", e), + } +} + +#[test] +fn unified_qr_send_receive() { + let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); + let (node_a, node_b) = setup_two_nodes(&electrsd, false, true, false); + + let address_a = node_a.onchain_payment().new_address().unwrap(); + let premined_sats = 5_000_000; + + premine_and_distribute_funds( + &bitcoind.client, + &electrsd.client, + vec![address_a], + Amount::from_sat(premined_sats), + ); + + node_a.sync_wallets().unwrap(); + open_channel(&node_a, &node_b, 4_000_000, true, &electrsd); + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6); + + node_a.sync_wallets().unwrap(); + node_b.sync_wallets().unwrap(); + + expect_channel_ready_event!(node_a, node_b.node_id()); + expect_channel_ready_event!(node_b, node_a.node_id()); + + // Sleep until we broadcast a node announcement. + while node_b.status().latest_node_announcement_broadcast_timestamp.is_none() { + std::thread::sleep(std::time::Duration::from_millis(10)); + } + + // Sleep one more sec to make sure the node announcement propagates. + std::thread::sleep(std::time::Duration::from_secs(1)); + + let expected_amount_sats = 100_000; + let expiry_sec = 4_000; + + let uqr_payment = node_b.unified_qr_payment().receive(expected_amount_sats, "asdf", expiry_sec); + let uri_str = uqr_payment.clone().unwrap(); + let offer_payment_id: PaymentId = match node_a.unified_qr_payment().send(&uri_str) { + Ok(QrPaymentResult::Bolt12 { payment_id }) => { + println!("\nBolt12 payment sent successfully with PaymentID: {:?}", payment_id); + payment_id + }, + Ok(QrPaymentResult::Bolt11 { payment_id: _ }) => { + panic!("Expected Bolt12 payment but got Bolt11"); + }, + Ok(QrPaymentResult::Onchain { txid: _ }) => { + panic!("Expected Bolt12 payment but get On-chain transaction"); + }, + Err(e) => { + panic!("Expected Bolt12 payment but got error: {:?}", e); + }, + }; + + expect_payment_successful_event!(node_a, Some(offer_payment_id), None); + + // Removed one character from the offer to fall back on to invoice. + // Still needs work + let uri_str_with_invalid_offer = &uri_str[..uri_str.len() - 1]; + let invoice_payment_id: PaymentId = + match node_a.unified_qr_payment().send(uri_str_with_invalid_offer) { + Ok(QrPaymentResult::Bolt12 { payment_id: _ }) => { + panic!("Expected Bolt11 payment but got Bolt12"); + }, + Ok(QrPaymentResult::Bolt11 { payment_id }) => { + println!("\nBolt11 payment sent successfully with PaymentID: {:?}", payment_id); + payment_id + }, + Ok(QrPaymentResult::Onchain { txid: _ }) => { + panic!("Expected Bolt11 payment but got on-chain transaction"); + }, + Err(e) => { + panic!("Expected Bolt11 payment but got error: {:?}", e); + }, + }; + expect_payment_successful_event!(node_a, Some(invoice_payment_id), None); + + let expect_onchain_amount_sats = 800_000; + let onchain_uqr_payment = + node_b.unified_qr_payment().receive(expect_onchain_amount_sats, "asdf", 4_000).unwrap(); + + // Removed a character from the offer, so it would move on to the other parameters. + let txid = match node_a + .unified_qr_payment() + .send(&onchain_uqr_payment.as_str()[..onchain_uqr_payment.len() - 1]) + { + Ok(QrPaymentResult::Bolt12 { payment_id: _ }) => { + panic!("Expected on-chain payment but got Bolt12") + }, + Ok(QrPaymentResult::Bolt11 { payment_id: _ }) => { + panic!("Expected on-chain payment but got Bolt11"); + }, + Ok(QrPaymentResult::Onchain { txid }) => { + println!("\nOn-chain transaction successful with Txid: {}", txid); + txid + }, + Err(e) => { + panic!("Expected on-chain payment but got error: {:?}", e); + }, + }; + + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6); + wait_for_tx(&electrsd.client, txid); + + node_a.sync_wallets().unwrap(); + node_b.sync_wallets().unwrap(); + + assert_eq!(node_b.list_balances().total_onchain_balance_sats, 800_000); + assert_eq!(node_b.list_balances().total_lightning_balance_sats, 200_000); +} From caa4d2272517193fd01035129c50089be4537472 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 30 Jul 2024 09:22:24 -0500 Subject: [PATCH 008/127] Request current fee rates only once .. rather than once per target. --- src/fee_estimator.rs | 61 ++++++++++++++++++-------------------------- 1 file changed, 25 insertions(+), 36 deletions(-) diff --git a/src/fee_estimator.rs b/src/fee_estimator.rs index f1fa7e43b..329cc6e42 100644 --- a/src/fee_estimator.rs +++ b/src/fee_estimator.rs @@ -37,6 +37,29 @@ where } pub(crate) async fn update_fee_estimates(&self) -> Result<(), Error> { + let estimates = tokio::time::timeout( + Duration::from_secs(FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS), + self.esplora_client.get_fee_estimates(), + ) + .await + .map_err(|e| { + log_error!(self.logger, "Updating fee rate estimates timed out: {}", e); + Error::FeerateEstimationUpdateTimeout + })? + .map_err(|e| { + log_error!(self.logger, "Failed to retrieve fee rate estimates: {}", e); + Error::FeerateEstimationUpdateFailed + })?; + + if estimates.is_empty() && self.config.network == Network::Bitcoin { + // Ensure we fail if we didn't receive any estimates. + log_error!( + self.logger, + "Failed to retrieve fee rate estimates: empty fee estimates are dissallowed on Mainnet.", + ); + return Err(Error::FeerateEstimationUpdateFailed); + } + let confirmation_targets = vec![ ConfirmationTarget::OnChainSweep, ConfirmationTarget::MinAllowedAnchorChannelRemoteFee, @@ -57,42 +80,8 @@ where ConfirmationTarget::OutputSpendingFee => 12, }; - let estimates = tokio::time::timeout( - Duration::from_secs(FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS), - self.esplora_client.get_fee_estimates(), - ) - .await - .map_err(|e| { - log_error!( - self.logger, - "Updating fee rate estimates for {:?} timed out: {}", - target, - e - ); - Error::FeerateEstimationUpdateTimeout - })? - .map_err(|e| { - log_error!( - self.logger, - "Failed to retrieve fee rate estimates for {:?}: {}", - target, - e - ); - Error::FeerateEstimationUpdateFailed - })?; - - if estimates.is_empty() && self.config.network == Network::Bitcoin { - // Ensure we fail if we didn't receive any estimates. - log_error!( - self.logger, - "Failed to retrieve fee rate estimates for {:?}: empty fee estimates are dissallowed on Mainnet.", - target, - ); - return Err(Error::FeerateEstimationUpdateFailed); - } - - let converted_estimates = esplora_client::convert_fee_rate(num_blocks, estimates) - .map_err(|e| { + let converted_estimates = + esplora_client::convert_fee_rate(num_blocks, estimates.clone()).map_err(|e| { log_error!( self.logger, "Failed to convert fee rate estimates for {:?}: {}", From 8a59dc09831b8f441da85a3801294adef7a133b0 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 30 Jul 2024 09:26:06 -0500 Subject: [PATCH 009/127] Fix `send_to_address` variable naming in bindings --- bindings/ldk_node.udl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index aedf9f6ab..514876426 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -144,7 +144,7 @@ interface OnchainPayment { [Throws=NodeError] Address new_address(); [Throws=NodeError] - Txid send_to_address([ByRef]Address address, u64 amount_msat); + Txid send_to_address([ByRef]Address address, u64 amount_sats); [Throws=NodeError] Txid send_all_to_address([ByRef]Address address); }; From 5733b58ead765b474937353466088bd469cc7cbe Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 30 Jul 2024 11:19:24 -0500 Subject: [PATCH 010/127] Account for `check-cfg` Recently, Rust 1.80 introduced automatic checking of `cfg` flags (see https://blog.rust-lang.org/2024/05/06/check-cfg.html). Here, we add all custom `cfg`s to the expected list --- Cargo.toml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/Cargo.toml b/Cargo.toml index 206f5f2dd..63e0cbda4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -102,3 +102,13 @@ panic = "abort" [profile.dev] panic = "abort" + +[lints.rust.unexpected_cfgs] +level = "forbid" +# When adding a new cfg attribute, ensure that it is added to this list. +check-cfg = [ + "cfg(vss)", + "cfg(vss_test)", + "cfg(ldk_bench)", + "cfg(tokio_unstable)", +] From f382ea05889fe3601c77fee246d94d634901a0ba Mon Sep 17 00:00:00 2001 From: Ian Slane Date: Thu, 1 Aug 2024 20:02:46 -0600 Subject: [PATCH 011/127] Add `cfg(cln_test)` to `Cargo.toml` --- Cargo.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/Cargo.toml b/Cargo.toml index 63e0cbda4..9d350e18c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -111,4 +111,5 @@ check-cfg = [ "cfg(vss_test)", "cfg(ldk_bench)", "cfg(tokio_unstable)", + "cfg(cln_test)", ] From ed841e0ddb9dd772c04c71ebc0ef377b808eb596 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Wed, 7 Aug 2024 07:55:31 +0200 Subject: [PATCH 012/127] Pin `tokio` to v1.38.1 to fix MSRV build --- .github/workflows/rust.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 5ce1306ee..4b7350f7f 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -49,6 +49,7 @@ jobs: cargo update -p proptest --precise "1.2.0" --verbose # proptest 1.3.0 requires rustc 1.64.0 cargo update -p regex --precise "1.9.6" --verbose # regex 1.10.0 requires rustc 1.65.0 cargo update -p home --precise "0.5.5" --verbose # home v0.5.9 requires rustc 1.70 or newer + cargo update -p tokio --precise "1.38.1" --verbose # tokio v1.39.0 requires rustc 1.70 or newer - name: Set RUSTFLAGS to deny warnings if: "matrix.toolchain == 'stable'" run: echo "RUSTFLAGS=-D warnings" >> "$GITHUB_ENV" From 40170d98ebe621ec4d18d06072977962e192adcb Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 12 Aug 2024 13:05:42 +0200 Subject: [PATCH 013/127] Enable caching for `bitcoind`/`electrs` in CI --- .github/workflows/rust.yml | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 4b7350f7f..6260c6c20 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -53,14 +53,27 @@ jobs: - name: Set RUSTFLAGS to deny warnings if: "matrix.toolchain == 'stable'" run: echo "RUSTFLAGS=-D warnings" >> "$GITHUB_ENV" + - name: Enable caching for bitcoind + id: cache-bitcoind + uses: actions/cache@v4 + with: + path: $HOME/bin/bitcoind + key: bitcoind + - name: Enable caching for electrs + id: cache-electrs + uses: actions/cache@v4 + with: + path: $HOME/bin/electrs + key: electrs - name: Download bitcoind/electrs and set environment variables - if: "matrix.platform != 'windows-latest'" + if: "matrix.platform != 'windows-latest' && (steps.cache-bitcoind.outputs.cache-hit != 'true' || steps.cache-electrs.outputs.cache-hit != 'true')" run: | source ./scripts/download_bitcoind_electrs.sh - cp "$BITCOIND_EXE" "$HOME"/bitcoind - cp "$ELECTRS_EXE" "$HOME"/electrs - echo "BITCOIND_EXE=$HOME/bitcoind" >> "$GITHUB_ENV" - echo "ELECTRS_EXE=$HOME/electrs" >> "$GITHUB_ENV" + mkdir -p "$HOME"/bin + mv "$BITCOIND_EXE" "$HOME"/bin/bitcoind + mv "$ELECTRS_EXE" "$HOME"/bin/electrs + echo "BITCOIND_EXE=$HOME/bin/bitcoind" >> "$GITHUB_ENV" + echo "ELECTRS_EXE=$HOME/bin/electrs" >> "$GITHUB_ENV" - name: Build on Rust ${{ matrix.toolchain }} run: cargo build --verbose --color always - name: Build with UniFFI support on Rust ${{ matrix.toolchain }} From 30ce72d4661e140b7e0407d37d4bb7aa21ec2ab5 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 12 Aug 2024 13:22:28 +0200 Subject: [PATCH 014/127] Make cache `key`s OS-specific .. just to be sure. --- .github/workflows/rust.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 6260c6c20..38242d4cf 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -58,13 +58,13 @@ jobs: uses: actions/cache@v4 with: path: $HOME/bin/bitcoind - key: bitcoind + key: bitcoind-${{ runner.os }} - name: Enable caching for electrs id: cache-electrs uses: actions/cache@v4 with: path: $HOME/bin/electrs - key: electrs + key: electrs-${{ runner.os }} - name: Download bitcoind/electrs and set environment variables if: "matrix.platform != 'windows-latest' && (steps.cache-bitcoind.outputs.cache-hit != 'true' || steps.cache-electrs.outputs.cache-hit != 'true')" run: | From 6b35703d16a045995f8c1f7fccff26263944cd8a Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 12 Aug 2024 15:00:23 +0200 Subject: [PATCH 015/127] Try fix caching ... see if dropping $HOME works. --- .github/workflows/rust.yml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 38242d4cf..848126403 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -57,23 +57,23 @@ jobs: id: cache-bitcoind uses: actions/cache@v4 with: - path: $HOME/bin/bitcoind + path: bin/bitcoind key: bitcoind-${{ runner.os }} - name: Enable caching for electrs id: cache-electrs uses: actions/cache@v4 with: - path: $HOME/bin/electrs + path: bin/electrs key: electrs-${{ runner.os }} - name: Download bitcoind/electrs and set environment variables if: "matrix.platform != 'windows-latest' && (steps.cache-bitcoind.outputs.cache-hit != 'true' || steps.cache-electrs.outputs.cache-hit != 'true')" run: | source ./scripts/download_bitcoind_electrs.sh - mkdir -p "$HOME"/bin - mv "$BITCOIND_EXE" "$HOME"/bin/bitcoind - mv "$ELECTRS_EXE" "$HOME"/bin/electrs - echo "BITCOIND_EXE=$HOME/bin/bitcoind" >> "$GITHUB_ENV" - echo "ELECTRS_EXE=$HOME/bin/electrs" >> "$GITHUB_ENV" + mkdir -p bin + mv "$BITCOIND_EXE" bin/bitcoind + mv "$ELECTRS_EXE" bin/electrs + echo "BITCOIND_EXE=bin/bitcoind" >> "$GITHUB_ENV" + echo "ELECTRS_EXE=bin/electrs" >> "$GITHUB_ENV" - name: Build on Rust ${{ matrix.toolchain }} run: cargo build --verbose --color always - name: Build with UniFFI support on Rust ${{ matrix.toolchain }} From 41ae206c47fd23ac64f4406cc45b914e530ad40f Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 13 Aug 2024 09:45:19 +0200 Subject: [PATCH 016/127] Try fixing caching once more .. since it stopped working now? --- .github/workflows/rust.yml | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 848126403..59f022d1d 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -57,23 +57,23 @@ jobs: id: cache-bitcoind uses: actions/cache@v4 with: - path: bin/bitcoind - key: bitcoind-${{ runner.os }} + path: bin/bitcoind-${{ runner.os }}-${{ runner.arch }} + key: bitcoind-${{ runner.os }}-${{ runner.arch }} - name: Enable caching for electrs id: cache-electrs uses: actions/cache@v4 with: - path: bin/electrs - key: electrs-${{ runner.os }} + path: bin/electrs-${{ runner.os }}-${{ runner.arch }} + key: electrs-${{ runner.os }}-${{ runner.arch }} - name: Download bitcoind/electrs and set environment variables if: "matrix.platform != 'windows-latest' && (steps.cache-bitcoind.outputs.cache-hit != 'true' || steps.cache-electrs.outputs.cache-hit != 'true')" run: | source ./scripts/download_bitcoind_electrs.sh - mkdir -p bin - mv "$BITCOIND_EXE" bin/bitcoind - mv "$ELECTRS_EXE" bin/electrs - echo "BITCOIND_EXE=bin/bitcoind" >> "$GITHUB_ENV" - echo "ELECTRS_EXE=bin/electrs" >> "$GITHUB_ENV" + mkdir bin + mv "$BITCOIND_EXE" bin/bitcoind-${{ runner.os }}-${{ runner.arch }} + mv "$ELECTRS_EXE" bin/electrs-${{ runner.os }}-${{ runner.arch }} + echo "BITCOIND_EXE=$( pwd )/bin/bitcoind-${{ runner.os }}-${{ runner.arch }}" >> "$GITHUB_ENV" + echo "ELECTRS_EXE=$( pwd )/bin/electrs-${{ runner.os }}-${{ runner.arch }}" >> "$GITHUB_ENV" - name: Build on Rust ${{ matrix.toolchain }} run: cargo build --verbose --color always - name: Build with UniFFI support on Rust ${{ matrix.toolchain }} From 0cfb829c77f75fb83f1a8a03a2a3b62bbe506be5 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 13 Aug 2024 15:04:15 +0200 Subject: [PATCH 017/127] Fix caching third attempt .. we previously only set the environment variables when we downloaded the binaries. Here, we set them in a separate step to have them being usable when we're hitting the cache --- .github/workflows/rust.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 59f022d1d..cce80d00d 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -65,13 +65,15 @@ jobs: with: path: bin/electrs-${{ runner.os }}-${{ runner.arch }} key: electrs-${{ runner.os }}-${{ runner.arch }} - - name: Download bitcoind/electrs and set environment variables + - name: Download bitcoind/electrs if: "matrix.platform != 'windows-latest' && (steps.cache-bitcoind.outputs.cache-hit != 'true' || steps.cache-electrs.outputs.cache-hit != 'true')" run: | source ./scripts/download_bitcoind_electrs.sh mkdir bin mv "$BITCOIND_EXE" bin/bitcoind-${{ runner.os }}-${{ runner.arch }} mv "$ELECTRS_EXE" bin/electrs-${{ runner.os }}-${{ runner.arch }} + - name: Set bitcoind/electrs environment variables + run: | echo "BITCOIND_EXE=$( pwd )/bin/bitcoind-${{ runner.os }}-${{ runner.arch }}" >> "$GITHUB_ENV" echo "ELECTRS_EXE=$( pwd )/bin/electrs-${{ runner.os }}-${{ runner.arch }}" >> "$GITHUB_ENV" - name: Build on Rust ${{ matrix.toolchain }} From 683bfb3b17b469f157f64f56c0ee93a43d98d1b8 Mon Sep 17 00:00:00 2001 From: Ian Slane Date: Mon, 15 Jul 2024 15:40:28 -0600 Subject: [PATCH 018/127] Add `payer_note` in `PaymentKind::Bolt12` Add support for including `payer_note` in `Bolt12Offer` and `PaymentKind::Bolt12` and updated the relevant code to handle where the new `payer_note` field was required. --- bindings/ldk_node.udl | 13 ++-- src/error.rs | 3 + src/event.rs | 4 ++ src/payment/bolt12.rs | 101 +++++++++++++++++++++++--------- src/payment/store.rs | 25 ++++++++ src/payment/unified_qr.rs | 4 +- tests/integration_tests_rust.rs | 71 ++++++++++++++++++---- 7 files changed, 171 insertions(+), 50 deletions(-) diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index 514876426..ec183e78f 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -120,17 +120,17 @@ interface Bolt11Payment { interface Bolt12Payment { [Throws=NodeError] - PaymentId send([ByRef]Offer offer, string? payer_note); + PaymentId send([ByRef]Offer offer, u64? quantity, string? payer_note); [Throws=NodeError] - PaymentId send_using_amount([ByRef]Offer offer, string? payer_note, u64 amount_msat); + PaymentId send_using_amount([ByRef]Offer offer, u64 amount_msat, u64? quantity, string? payer_note); [Throws=NodeError] - Offer receive(u64 amount_msat, [ByRef]string description); + Offer receive(u64 amount_msat, [ByRef]string description, u64? quantity); [Throws=NodeError] Offer receive_variable_amount([ByRef]string description); [Throws=NodeError] Bolt12Invoice request_refund_payment([ByRef]Refund refund); [Throws=NodeError] - Refund initiate_refund(u64 amount_msat, u32 expiry_secs); + Refund initiate_refund(u64 amount_msat, u32 expiry_secs, u64? quantity, string? payer_note); }; interface SpontaneousPayment { @@ -201,6 +201,7 @@ enum NodeError { "InvalidChannelId", "InvalidNetwork", "InvalidUri", + "InvalidQuantity", "DuplicatePayment", "UnsupportedCurrency", "InsufficientFunds", @@ -281,8 +282,8 @@ interface PaymentKind { Onchain(); Bolt11(PaymentHash hash, PaymentPreimage? preimage, PaymentSecret? secret); Bolt11Jit(PaymentHash hash, PaymentPreimage? preimage, PaymentSecret? secret, LSPFeeLimits lsp_fee_limits); - Bolt12Offer(PaymentHash? hash, PaymentPreimage? preimage, PaymentSecret? secret, OfferId offer_id); - Bolt12Refund(PaymentHash? hash, PaymentPreimage? preimage, PaymentSecret? secret); + Bolt12Offer(PaymentHash? hash, PaymentPreimage? preimage, PaymentSecret? secret, OfferId offer_id, UntrustedString? payer_note, u64? quantity); + Bolt12Refund(PaymentHash? hash, PaymentPreimage? preimage, PaymentSecret? secret, UntrustedString? payer_note, u64? quantity); Spontaneous(PaymentHash hash, PaymentPreimage? preimage); }; diff --git a/src/error.rs b/src/error.rs index 7506b013b..deaf6db31 100644 --- a/src/error.rs +++ b/src/error.rs @@ -89,6 +89,8 @@ pub enum Error { InvalidNetwork, /// The given URI is invalid. InvalidUri, + /// The given quantity is invalid. + InvalidQuantity, /// A payment with the given hash has already been initiated. DuplicatePayment, /// The provided offer was denonminated in an unsupported currency. @@ -153,6 +155,7 @@ impl fmt::Display for Error { Self::InvalidChannelId => write!(f, "The given channel ID is invalid."), Self::InvalidNetwork => write!(f, "The given network is invalid."), Self::InvalidUri => write!(f, "The given URI is invalid."), + Self::InvalidQuantity => write!(f, "The given quantity is invalid."), Self::DuplicatePayment => { write!(f, "A payment with the given hash has already been initiated.") }, diff --git a/src/event.rs b/src/event.rs index e319ab5e4..c4c5034ff 100644 --- a/src/event.rs +++ b/src/event.rs @@ -597,12 +597,16 @@ where payment_context, .. } => { + let payer_note = payment_context.invoice_request.payer_note_truncated; let offer_id = payment_context.offer_id; + let quantity = payment_context.invoice_request.quantity; let kind = PaymentKind::Bolt12Offer { hash: Some(payment_hash), preimage: payment_preimage, secret: Some(payment_secret), offer_id, + payer_note, + quantity, }; let payment = PaymentDetails::new( diff --git a/src/payment/bolt12.rs b/src/payment/bolt12.rs index 40f1fc369..577dc92ae 100644 --- a/src/payment/bolt12.rs +++ b/src/payment/bolt12.rs @@ -12,12 +12,14 @@ use crate::types::ChannelManager; use lightning::ln::channelmanager::{PaymentId, Retry}; use lightning::offers::invoice::Bolt12Invoice; -use lightning::offers::offer::{Amount, Offer}; +use lightning::offers::offer::{Amount, Offer, Quantity}; use lightning::offers::parse::Bolt12SemanticError; use lightning::offers::refund::Refund; +use lightning::util::string::UntrustedString; use rand::RngCore; +use std::num::NonZeroU64; use std::sync::{Arc, RwLock}; use std::time::{Duration, SystemTime, UNIX_EPOCH}; @@ -47,13 +49,15 @@ impl Bolt12Payment { /// /// If `payer_note` is `Some` it will be seen by the recipient and reflected back in the invoice /// response. - pub fn send(&self, offer: &Offer, payer_note: Option) -> Result { + /// + /// If `quantity` is `Some` it represents the number of items requested. + pub fn send( + &self, offer: &Offer, quantity: Option, payer_note: Option, + ) -> Result { let rt_lock = self.runtime.read().unwrap(); if rt_lock.is_none() { return Err(Error::NotRunning); } - - let quantity = None; let mut random_bytes = [0u8; 32]; rand::thread_rng().fill_bytes(&mut random_bytes); let payment_id = PaymentId(random_bytes); @@ -76,7 +80,7 @@ impl Bolt12Payment { &offer, quantity, None, - payer_note, + payer_note.clone(), payment_id, retry_strategy, max_total_routing_fee_msat, @@ -95,6 +99,8 @@ impl Bolt12Payment { preimage: None, secret: None, offer_id: offer.id(), + payer_note: payer_note.map(UntrustedString), + quantity, }; let payment = PaymentDetails::new( payment_id, @@ -117,6 +123,8 @@ impl Bolt12Payment { preimage: None, secret: None, offer_id: offer.id(), + payer_note: payer_note.map(UntrustedString), + quantity, }; let payment = PaymentDetails::new( payment_id, @@ -143,14 +151,13 @@ impl Bolt12Payment { /// If `payer_note` is `Some` it will be seen by the recipient and reflected back in the invoice /// response. pub fn send_using_amount( - &self, offer: &Offer, payer_note: Option, amount_msat: u64, + &self, offer: &Offer, amount_msat: u64, quantity: Option, payer_note: Option, ) -> Result { let rt_lock = self.runtime.read().unwrap(); if rt_lock.is_none() { return Err(Error::NotRunning); } - let quantity = None; let mut random_bytes = [0u8; 32]; rand::thread_rng().fill_bytes(&mut random_bytes); let payment_id = PaymentId(random_bytes); @@ -177,7 +184,7 @@ impl Bolt12Payment { &offer, quantity, Some(amount_msat), - payer_note, + payer_note.clone(), payment_id, retry_strategy, max_total_routing_fee_msat, @@ -196,6 +203,8 @@ impl Bolt12Payment { preimage: None, secret: None, offer_id: offer.id(), + payer_note: payer_note.map(UntrustedString), + quantity, }; let payment = PaymentDetails::new( payment_id, @@ -218,6 +227,8 @@ impl Bolt12Payment { preimage: None, secret: None, offer_id: offer.id(), + payer_note: payer_note.map(UntrustedString), + quantity, }; let payment = PaymentDetails::new( payment_id, @@ -236,21 +247,32 @@ impl Bolt12Payment { /// Returns a payable offer that can be used to request and receive a payment of the amount /// given. - pub fn receive(&self, amount_msat: u64, description: &str) -> Result { + pub fn receive( + &self, amount_msat: u64, description: &str, quantity: Option, + ) -> Result { let offer_builder = self.channel_manager.create_offer_builder().map_err(|e| { log_error!(self.logger, "Failed to create offer builder: {:?}", e); Error::OfferCreationFailed })?; - let offer = offer_builder - .amount_msats(amount_msat) - .description(description.to_string()) - .build() - .map_err(|e| { - log_error!(self.logger, "Failed to create offer: {:?}", e); - Error::OfferCreationFailed - })?; - Ok(offer) + let mut offer = + offer_builder.amount_msats(amount_msat).description(description.to_string()); + + if let Some(qty) = quantity { + if qty == 0 { + log_error!(self.logger, "Failed to create offer: quantity can't be zero."); + return Err(Error::InvalidQuantity); + } else { + offer = offer.supported_quantity(Quantity::Bounded(NonZeroU64::new(qty).unwrap())) + }; + }; + + let finalized_offer = offer.build().map_err(|e| { + log_error!(self.logger, "Failed to create offer: {:?}", e); + Error::OfferCreationFailed + })?; + + Ok(finalized_offer) } /// Returns a payable offer that can be used to request and receive a payment for which the @@ -281,8 +303,13 @@ impl Bolt12Payment { let payment_hash = invoice.payment_hash(); let payment_id = PaymentId(payment_hash.0); - let kind = - PaymentKind::Bolt12Refund { hash: Some(payment_hash), preimage: None, secret: None }; + let kind = PaymentKind::Bolt12Refund { + hash: Some(payment_hash), + preimage: None, + secret: None, + payer_note: refund.payer_note().map(|note| UntrustedString(note.0.to_string())), + quantity: refund.quantity(), + }; let payment = PaymentDetails::new( payment_id, @@ -298,7 +325,10 @@ impl Bolt12Payment { } /// Returns a [`Refund`] object that can be used to offer a refund payment of the amount given. - pub fn initiate_refund(&self, amount_msat: u64, expiry_secs: u32) -> Result { + pub fn initiate_refund( + &self, amount_msat: u64, expiry_secs: u32, quantity: Option, + payer_note: Option, + ) -> Result { let mut random_bytes = [0u8; 32]; rand::thread_rng().fill_bytes(&mut random_bytes); let payment_id = PaymentId(random_bytes); @@ -309,7 +339,7 @@ impl Bolt12Payment { let retry_strategy = Retry::Timeout(LDK_PAYMENT_RETRY_TIMEOUT); let max_total_routing_fee_msat = None; - let refund = self + let mut refund_builder = self .channel_manager .create_refund_builder( amount_msat, @@ -321,17 +351,30 @@ impl Bolt12Payment { .map_err(|e| { log_error!(self.logger, "Failed to create refund builder: {:?}", e); Error::RefundCreationFailed - })? - .build() - .map_err(|e| { - log_error!(self.logger, "Failed to create refund: {:?}", e); - Error::RefundCreationFailed })?; - log_info!(self.logger, "Offering refund of {}msat", amount_msat); + if let Some(qty) = quantity { + refund_builder = refund_builder.quantity(qty); + } + + if let Some(note) = payer_note.clone() { + refund_builder = refund_builder.payer_note(note); + } + + let refund = refund_builder.build().map_err(|e| { + log_error!(self.logger, "Failed to create refund: {:?}", e); + Error::RefundCreationFailed + })?; - let kind = PaymentKind::Bolt12Refund { hash: None, preimage: None, secret: None }; + log_info!(self.logger, "Offering refund of {}msat", amount_msat); + let kind = PaymentKind::Bolt12Refund { + hash: None, + preimage: None, + secret: None, + payer_note: payer_note.map(|note| UntrustedString(note)), + quantity, + }; let payment = PaymentDetails::new( payment_id, kind, diff --git a/src/payment/store.rs b/src/payment/store.rs index eb3ac091f..3c35043ce 100644 --- a/src/payment/store.rs +++ b/src/payment/store.rs @@ -11,6 +11,7 @@ use lightning::ln::msgs::DecodeError; use lightning::ln::{PaymentHash, PaymentPreimage, PaymentSecret}; use lightning::offers::offer::OfferId; use lightning::util::ser::{Readable, Writeable}; +use lightning::util::string::UntrustedString; use lightning::{ _init_and_read_len_prefixed_tlv_fields, impl_writeable_tlv_based, impl_writeable_tlv_based_enum, write_tlv_fields, @@ -212,6 +213,18 @@ pub enum PaymentKind { secret: Option, /// The ID of the offer this payment is for. offer_id: OfferId, + /// The payer note for the payment. + /// + /// Truncated to [`PAYER_NOTE_LIMIT`] characters. + /// + /// This will always be `None` for payments serialized with version `v0.3.0`. + /// + /// [`PAYER_NOTE_LIMIT`]: lightning::offers::invoice_request::PAYER_NOTE_LIMIT + payer_note: Option, + /// The quantity of an item requested in the offer. + /// + /// This will always be `None` for payments serialized with version `v0.3.0`. + quantity: Option, }, /// A [BOLT 12] 'refund' payment, i.e., a payment for a [`Refund`]. /// @@ -224,6 +237,14 @@ pub enum PaymentKind { preimage: Option, /// The secret used by the payment. secret: Option, + /// The payer note for the refund payment. + /// + /// This will always be `None` for payments serialized with version `v0.3.0`. + payer_note: Option, + /// The quantity of an item that the refund is for. + /// + /// This will always be `None` for payments serialized with version `v0.3.0`. + quantity: Option, }, /// A spontaneous ("keysend") payment. Spontaneous { @@ -249,7 +270,9 @@ impl_writeable_tlv_based_enum!(PaymentKind, }, (6, Bolt12Offer) => { (0, hash, option), + (1, payer_note, option), (2, preimage, option), + (3, quantity, option), (4, secret, option), (6, offer_id, required), }, @@ -259,7 +282,9 @@ impl_writeable_tlv_based_enum!(PaymentKind, }, (10, Bolt12Refund) => { (0, hash, option), + (1, payer_note, option), (2, preimage, option), + (3, quantity, option), (4, secret, option), }; ); diff --git a/src/payment/unified_qr.rs b/src/payment/unified_qr.rs index a4551eb8a..b40be5521 100644 --- a/src/payment/unified_qr.rs +++ b/src/payment/unified_qr.rs @@ -92,7 +92,7 @@ impl UnifiedQrPayment { let amount_msats = amount_sats * 1_000; - let bolt12_offer = match self.bolt12_payment.receive(amount_msats, description) { + let bolt12_offer = match self.bolt12_payment.receive(amount_msats, description, None) { Ok(offer) => Some(offer), Err(e) => { log_error!(self.logger, "Failed to create offer: {}", e); @@ -136,7 +136,7 @@ impl UnifiedQrPayment { uri.clone().require_network(self.config.network).map_err(|_| Error::InvalidNetwork)?; if let Some(offer) = uri_network_checked.extras.bolt12_offer { - match self.bolt12_payment.send(&offer, None) { + match self.bolt12_payment.send(&offer, None, None) { Ok(payment_id) => return Ok(QrPaymentResult::Bolt12 { payment_id }), Err(e) => log_error!(self.logger, "Failed to send BOLT12 offer: {:?}. This is part of a unified QR code payment. Falling back to the BOLT11 invoice.", e), } diff --git a/tests/integration_tests_rust.rs b/tests/integration_tests_rust.rs index 5a918762a..ec2b3d917 100644 --- a/tests/integration_tests_rust.rs +++ b/tests/integration_tests_rust.rs @@ -424,17 +424,31 @@ fn simple_bolt12_send_receive() { std::thread::sleep(std::time::Duration::from_secs(1)); let expected_amount_msat = 100_000_000; - let offer = node_b.bolt12_payment().receive(expected_amount_msat, "asdf").unwrap(); - let payment_id = node_a.bolt12_payment().send(&offer, None).unwrap(); + let offer = node_b.bolt12_payment().receive(expected_amount_msat, "asdf", Some(1)).unwrap(); + let expected_quantity = Some(1); + let expected_payer_note = Some("Test".to_string()); + let payment_id = node_a + .bolt12_payment() + .send(&offer, expected_quantity, expected_payer_note.clone()) + .unwrap(); expect_payment_successful_event!(node_a, Some(payment_id), None); let node_a_payments = node_a.list_payments(); assert_eq!(node_a_payments.len(), 1); match node_a_payments.first().unwrap().kind { - PaymentKind::Bolt12Offer { hash, preimage, secret: _, offer_id } => { + PaymentKind::Bolt12Offer { + hash, + preimage, + secret: _, + offer_id, + quantity: ref qty, + payer_note: ref note, + } => { assert!(hash.is_some()); assert!(preimage.is_some()); assert_eq!(offer_id, offer.id()); + assert_eq!(&expected_quantity, qty); + assert_eq!(expected_payer_note.unwrap(), note.clone().unwrap().0); //TODO: We should eventually set and assert the secret sender-side, too, but the BOLT12 //API currently doesn't allow to do that. }, @@ -448,7 +462,7 @@ fn simple_bolt12_send_receive() { let node_b_payments = node_b.list_payments(); assert_eq!(node_b_payments.len(), 1); match node_b_payments.first().unwrap().kind { - PaymentKind::Bolt12Offer { hash, preimage, secret, offer_id } => { + PaymentKind::Bolt12Offer { hash, preimage, secret, offer_id, .. } => { assert!(hash.is_some()); assert!(preimage.is_some()); assert!(secret.is_some()); @@ -464,22 +478,40 @@ fn simple_bolt12_send_receive() { let offer_amount_msat = 100_000_000; let less_than_offer_amount = offer_amount_msat - 10_000; let expected_amount_msat = offer_amount_msat + 10_000; - let offer = node_b.bolt12_payment().receive(offer_amount_msat, "asdf").unwrap(); + let offer = node_b.bolt12_payment().receive(offer_amount_msat, "asdf", Some(1)).unwrap(); + let expected_quantity = Some(1); + let expected_payer_note = Some("Test".to_string()); assert!(node_a .bolt12_payment() - .send_using_amount(&offer, None, less_than_offer_amount) + .send_using_amount(&offer, less_than_offer_amount, None, None) .is_err()); - let payment_id = - node_a.bolt12_payment().send_using_amount(&offer, None, expected_amount_msat).unwrap(); + let payment_id = node_a + .bolt12_payment() + .send_using_amount( + &offer, + expected_amount_msat, + expected_quantity, + expected_payer_note.clone(), + ) + .unwrap(); expect_payment_successful_event!(node_a, Some(payment_id), None); let node_a_payments = node_a.list_payments_with_filter(|p| p.id == payment_id); assert_eq!(node_a_payments.len(), 1); let payment_hash = match node_a_payments.first().unwrap().kind { - PaymentKind::Bolt12Offer { hash, preimage, secret: _, offer_id } => { + PaymentKind::Bolt12Offer { + hash, + preimage, + secret: _, + offer_id, + quantity: ref qty, + payer_note: ref note, + } => { assert!(hash.is_some()); assert!(preimage.is_some()); assert_eq!(offer_id, offer.id()); + assert_eq!(&expected_quantity, qty); + assert_eq!(expected_payer_note.unwrap(), note.clone().unwrap().0); //TODO: We should eventually set and assert the secret sender-side, too, but the BOLT12 //API currently doesn't allow to do that. hash.unwrap() @@ -495,7 +527,7 @@ fn simple_bolt12_send_receive() { let node_b_payments = node_b.list_payments_with_filter(|p| p.id == node_b_payment_id); assert_eq!(node_b_payments.len(), 1); match node_b_payments.first().unwrap().kind { - PaymentKind::Bolt12Offer { hash, preimage, secret, offer_id } => { + PaymentKind::Bolt12Offer { hash, preimage, secret, offer_id, .. } => { assert!(hash.is_some()); assert!(preimage.is_some()); assert!(secret.is_some()); @@ -509,7 +541,12 @@ fn simple_bolt12_send_receive() { // Now node_b refunds the amount node_a just overpaid. let overpaid_amount = expected_amount_msat - offer_amount_msat; - let refund = node_b.bolt12_payment().initiate_refund(overpaid_amount, 3600).unwrap(); + let expected_quantity = Some(1); + let expected_payer_note = Some("Test".to_string()); + let refund = node_b + .bolt12_payment() + .initiate_refund(overpaid_amount, 3600, expected_quantity, expected_payer_note.clone()) + .unwrap(); let invoice = node_a.bolt12_payment().request_refund_payment(&refund).unwrap(); expect_payment_received_event!(node_a, overpaid_amount); @@ -523,9 +560,17 @@ fn simple_bolt12_send_receive() { let node_b_payments = node_b.list_payments_with_filter(|p| p.id == node_b_payment_id); assert_eq!(node_b_payments.len(), 1); match node_b_payments.first().unwrap().kind { - PaymentKind::Bolt12Refund { hash, preimage, secret: _ } => { + PaymentKind::Bolt12Refund { + hash, + preimage, + secret: _, + quantity: ref qty, + payer_note: ref note, + } => { assert!(hash.is_some()); assert!(preimage.is_some()); + assert_eq!(&expected_quantity, qty); + assert_eq!(expected_payer_note.unwrap(), note.clone().unwrap().0) //TODO: We should eventually set and assert the secret sender-side, too, but the BOLT12 //API currently doesn't allow to do that. }, @@ -539,7 +584,7 @@ fn simple_bolt12_send_receive() { let node_a_payments = node_a.list_payments_with_filter(|p| p.id == node_a_payment_id); assert_eq!(node_a_payments.len(), 1); match node_a_payments.first().unwrap().kind { - PaymentKind::Bolt12Refund { hash, preimage, secret } => { + PaymentKind::Bolt12Refund { hash, preimage, secret, .. } => { assert!(hash.is_some()); assert!(preimage.is_some()); assert!(secret.is_some()); From c5b8d6e4d853c83c9c1f8dbbd14d8e3e599588da Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 22 Aug 2024 09:40:00 +0200 Subject: [PATCH 019/127] Return a non-`Arc`ed `Bolt12Payment` for non-`uniffi` .. somehow this was previously overlooked. --- src/lib.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 206fe52d8..eb1f38d2e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1003,13 +1003,13 @@ impl Node { /// /// [BOLT 12]: https://github.com/lightning/bolts/blob/master/12-offer-encoding.md #[cfg(not(feature = "uniffi"))] - pub fn bolt12_payment(&self) -> Arc { - Arc::new(Bolt12Payment::new( + pub fn bolt12_payment(&self) -> Bolt12Payment { + Bolt12Payment::new( Arc::clone(&self.runtime), Arc::clone(&self.channel_manager), Arc::clone(&self.payment_store), Arc::clone(&self.logger), - )) + ) } /// Returns a payment handler allowing to create and pay [BOLT 12] offers and refunds. From 5fe90d19ab021639442b93a5ab6c55a7ef25adb8 Mon Sep 17 00:00:00 2001 From: Ian Slane Date: Thu, 22 Aug 2024 10:42:51 -0600 Subject: [PATCH 020/127] Introduce `SendingParameters` struct `SendingParameters` allows users to override opinionated values while routing a payment such as `max_total_routing_fees`, `max_path_count` `max_total_cltv_delta`, and `max_channel_saturation_power_of_half` Updated docs for `max_channel_saturation_power_of_half` for clarity. --- bindings/ldk_node.udl | 7 +++++++ src/payment/mod.rs | 49 +++++++++++++++++++++++++++++++++++++++++++ src/uniffi_types.rs | 2 +- 3 files changed, 57 insertions(+), 1 deletion(-) diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index ec183e78f..420480fde 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -319,6 +319,13 @@ dictionary PaymentDetails { u64 latest_update_timestamp; }; +dictionary SendingParameters { + u64? max_total_routing_fee_msat; + u32? max_total_cltv_expiry_delta; + u8? max_path_count; + u8? max_channel_saturation_power_of_half; +}; + [NonExhaustive] enum Network { "Bitcoin", diff --git a/src/payment/mod.rs b/src/payment/mod.rs index ac4fc5663..d1b12de99 100644 --- a/src/payment/mod.rs +++ b/src/payment/mod.rs @@ -13,3 +13,52 @@ pub use onchain::OnchainPayment; pub use spontaneous::SpontaneousPayment; pub use store::{LSPFeeLimits, PaymentDetails, PaymentDirection, PaymentKind, PaymentStatus}; pub use unified_qr::{QrPaymentResult, UnifiedQrPayment}; + +/// Represents information used to route a payment. +#[derive(Clone, Debug, PartialEq)] +pub struct SendingParameters { + /// The maximum total fees, in millisatoshi, that may accrue during route finding. + /// + /// This limit also applies to the total fees that may arise while retrying failed payment + /// paths. + /// + /// Note that values below a few sats may result in some paths being spuriously ignored. + pub max_total_routing_fee_msat: Option, + + /// The maximum total CLTV delta we accept for the route. + /// + /// Defaults to [`DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA`]. + /// + /// [`DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA`]: lightning::routing::router::DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA + pub max_total_cltv_expiry_delta: Option, + + /// The maximum number of paths that may be used by (MPP) payments. + /// + /// Defaults to [`DEFAULT_MAX_PATH_COUNT`]. + /// + /// [`DEFAULT_MAX_PATH_COUNT`]: lightning::routing::router::DEFAULT_MAX_PATH_COUNT + pub max_path_count: Option, + + /// Selects the maximum share of a channel's total capacity which will be sent over a channel, + /// as a power of 1/2. + /// + /// A higher value prefers to send the payment using more MPP parts whereas + /// a lower value prefers to send larger MPP parts, potentially saturating channels and + /// increasing failure probability for those paths. + /// + /// Note that this restriction will be relaxed during pathfinding after paths which meet this + /// restriction have been found. While paths which meet this criteria will be searched for, it + /// is ultimately up to the scorer to select them over other paths. + /// + /// Examples: + /// + /// | Value | Max Proportion of Channel Capacity Used | + /// |-------|-----------------------------------------| + /// | 0 | Up to 100% of the channel’s capacity | + /// | 1 | Up to 50% of the channel’s capacity | + /// | 2 | Up to 25% of the channel’s capacity | + /// | 3 | Up to 12.5% of the channel’s capacity | + /// + /// Default value: 2 + pub max_channel_saturation_power_of_half: Option, +} diff --git a/src/uniffi_types.rs b/src/uniffi_types.rs index 7c2142091..22546e03c 100644 --- a/src/uniffi_types.rs +++ b/src/uniffi_types.rs @@ -5,7 +5,7 @@ pub use crate::graph::{ChannelInfo, ChannelUpdateInfo, NodeAnnouncementInfo, NodeInfo}; pub use crate::payment::store::{LSPFeeLimits, PaymentDirection, PaymentKind, PaymentStatus}; -pub use crate::payment::QrPaymentResult; +pub use crate::payment::{QrPaymentResult, SendingParameters}; pub use lightning::events::{ClosureReason, PaymentFailureReason}; pub use lightning::ln::{ChannelId, PaymentHash, PaymentPreimage, PaymentSecret}; From bf4ddff3c2f73a261dab69acf7598f65879dfd4b Mon Sep 17 00:00:00 2001 From: Ian Slane Date: Mon, 12 Aug 2024 10:26:52 -0600 Subject: [PATCH 021/127] Add default `SendingParameters` to node config Introduced `sending_parameters_config` to `Config` for node-wide routing and pathfinding configuration. Also, added default values for `SendingParameters` to ensure reasonable defaults when no custom settings are provided by the user. --- bindings/ldk_node.udl | 1 + src/config.rs | 10 ++++++++++ 2 files changed, 11 insertions(+) diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index 420480fde..b8d4161eb 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -16,6 +16,7 @@ dictionary Config { u64 probing_liquidity_limit_multiplier; LogLevel log_level; AnchorChannelsConfig? anchor_channels_config; + SendingParameters? sending_parameters_config; }; dictionary AnchorChannelsConfig { diff --git a/src/config.rs b/src/config.rs index d0e72080f..d3e9fb2af 100644 --- a/src/config.rs +++ b/src/config.rs @@ -1,5 +1,7 @@ use std::time::Duration; +use crate::payment::SendingParameters; + use lightning::ln::msgs::SocketAddress; use lightning::util::config::UserConfig; use lightning::util::logger::Level as LogLevel; @@ -86,6 +88,7 @@ pub(crate) const WALLET_KEYS_SEED_LEN: usize = 64; /// | `probing_liquidity_limit_multiplier` | 3 | /// | `log_level` | Debug | /// | `anchor_channels_config` | Some(..) | +/// | `sending_parameters_config` | None | /// /// See [`AnchorChannelsConfig`] for more information on its respective default values. /// @@ -147,6 +150,12 @@ pub struct Config { /// closure. We *will* however still try to get the Anchor spending transactions confirmed /// on-chain with the funds available. pub anchor_channels_config: Option, + + /// Configuration options for payment routing and pathfinding. + /// + /// Setting the `SendingParameters` provides flexibility to customize how payments are routed, + /// including setting limits on routing fees, CLTV expiry, and channel utilization. + pub sending_parameters_config: Option, } impl Default for Config { @@ -164,6 +173,7 @@ impl Default for Config { probing_liquidity_limit_multiplier: DEFAULT_PROBING_LIQUIDITY_LIMIT_MULTIPLIER, log_level: DEFAULT_LOG_LEVEL, anchor_channels_config: Some(AnchorChannelsConfig::default()), + sending_parameters_config: None, } } } From d6e6ff7fde4902d70f779c3756fb520c27e87bcd Mon Sep 17 00:00:00 2001 From: Ian Slane Date: Thu, 1 Aug 2024 12:58:26 -0600 Subject: [PATCH 022/127] Add `SendingParameters` to bolt11 `send` Updated `Bolt11Payment` `send` method to accept `SendingParameters`, as a parameter. If the user provided sending params the default values are overridden. --- .../lightningdevkit/ldknode/LibraryTest.kt | 2 +- bindings/ldk_node.udl | 2 +- src/lib.rs | 2 +- src/payment/bolt11.rs | 46 ++++++++++++++++++- src/payment/unified_qr.rs | 2 +- tests/common/mod.rs | 12 ++--- tests/integration_tests_cln.rs | 2 +- tests/integration_tests_rust.rs | 11 ++++- 8 files changed, 64 insertions(+), 15 deletions(-) diff --git a/bindings/kotlin/ldk-node-jvm/lib/src/test/kotlin/org/lightningdevkit/ldknode/LibraryTest.kt b/bindings/kotlin/ldk-node-jvm/lib/src/test/kotlin/org/lightningdevkit/ldknode/LibraryTest.kt index 6f863e637..b629793cd 100644 --- a/bindings/kotlin/ldk-node-jvm/lib/src/test/kotlin/org/lightningdevkit/ldknode/LibraryTest.kt +++ b/bindings/kotlin/ldk-node-jvm/lib/src/test/kotlin/org/lightningdevkit/ldknode/LibraryTest.kt @@ -224,7 +224,7 @@ class LibraryTest { val invoice = node2.bolt11Payment().receive(2500000u, "asdf", 9217u) - node1.bolt11Payment().send(invoice) + node1.bolt11Payment().send(invoice, null) val paymentSuccessfulEvent = node1.waitNextEvent() println("Got event: $paymentSuccessfulEvent") diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index b8d4161eb..a53c3fd54 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -94,7 +94,7 @@ interface Node { interface Bolt11Payment { [Throws=NodeError] - PaymentId send([ByRef]Bolt11Invoice invoice); + PaymentId send([ByRef]Bolt11Invoice invoice, SendingParameters? sending_parameters); [Throws=NodeError] PaymentId send_using_amount([ByRef]Bolt11Invoice invoice, u64 amount_msat); [Throws=NodeError] diff --git a/src/lib.rs b/src/lib.rs index eb1f38d2e..a1d88ef4c 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -56,7 +56,7 @@ //! node.event_handled(); //! //! let invoice = Bolt11Invoice::from_str("INVOICE_STR").unwrap(); -//! node.bolt11_payment().send(&invoice).unwrap(); +//! node.bolt11_payment().send(&invoice, None).unwrap(); //! //! node.stop().unwrap(); //! } diff --git a/src/payment/bolt11.rs b/src/payment/bolt11.rs index 3641d6870..dae77fb6d 100644 --- a/src/payment/bolt11.rs +++ b/src/payment/bolt11.rs @@ -11,6 +11,7 @@ use crate::payment::store::{ LSPFeeLimits, PaymentDetails, PaymentDetailsUpdate, PaymentDirection, PaymentKind, PaymentStatus, PaymentStore, }; +use crate::payment::SendingParameters; use crate::peer_store::{PeerInfo, PeerStore}; use crate::types::{ChannelManager, KeysManager}; @@ -69,13 +70,20 @@ impl Bolt11Payment { } /// Send a payment given an invoice. - pub fn send(&self, invoice: &Bolt11Invoice) -> Result { + /// + /// If [`SendingParameters`] are provided they will override the node's default routing parameters + /// on a per-field basis. Each field in `SendingParameters` that is set replaces the corresponding + /// default value. Fields that are not set fall back to the node's configured defaults. If no + /// `SendingParameters` are provided, the method fully relies on these defaults. + pub fn send( + &self, invoice: &Bolt11Invoice, sending_parameters: Option, + ) -> Result { let rt_lock = self.runtime.read().unwrap(); if rt_lock.is_none() { return Err(Error::NotRunning); } - let (payment_hash, recipient_onion, route_params) = payment::payment_parameters_from_invoice(&invoice).map_err(|_| { + let (payment_hash, recipient_onion, mut route_params) = payment::payment_parameters_from_invoice(&invoice).map_err(|_| { log_error!(self.logger, "Failed to send payment due to the given invoice being \"zero-amount\". Please use send_using_amount instead."); Error::InvalidInvoice })?; @@ -90,6 +98,40 @@ impl Bolt11Payment { } } + if let Some(user_set_params) = sending_parameters { + if let Some(mut default_params) = + self.config.sending_parameters_config.as_ref().cloned() + { + default_params.max_total_routing_fee_msat = user_set_params + .max_total_routing_fee_msat + .or(default_params.max_total_routing_fee_msat); + default_params.max_total_cltv_expiry_delta = user_set_params + .max_total_cltv_expiry_delta + .or(default_params.max_total_cltv_expiry_delta); + default_params.max_path_count = + user_set_params.max_path_count.or(default_params.max_path_count); + default_params.max_channel_saturation_power_of_half = user_set_params + .max_channel_saturation_power_of_half + .or(default_params.max_channel_saturation_power_of_half); + + route_params.max_total_routing_fee_msat = default_params.max_total_routing_fee_msat; + route_params.payment_params.max_total_cltv_expiry_delta = + default_params.max_total_cltv_expiry_delta.unwrap_or_default(); + route_params.payment_params.max_path_count = + default_params.max_path_count.unwrap_or_default(); + route_params.payment_params.max_channel_saturation_power_of_half = + default_params.max_channel_saturation_power_of_half.unwrap_or_default(); + } + } else if let Some(default_params) = &self.config.sending_parameters_config { + route_params.max_total_routing_fee_msat = default_params.max_total_routing_fee_msat; + route_params.payment_params.max_total_cltv_expiry_delta = + default_params.max_total_cltv_expiry_delta.unwrap_or_default(); + route_params.payment_params.max_path_count = + default_params.max_path_count.unwrap_or_default(); + route_params.payment_params.max_channel_saturation_power_of_half = + default_params.max_channel_saturation_power_of_half.unwrap_or_default(); + } + let payment_secret = Some(*invoice.payment_secret()); let retry_strategy = Retry::Timeout(LDK_PAYMENT_RETRY_TIMEOUT); diff --git a/src/payment/unified_qr.rs b/src/payment/unified_qr.rs index b40be5521..b93610115 100644 --- a/src/payment/unified_qr.rs +++ b/src/payment/unified_qr.rs @@ -143,7 +143,7 @@ impl UnifiedQrPayment { } if let Some(invoice) = uri_network_checked.extras.bolt11_invoice { - match self.bolt11_invoice.send(&invoice) { + match self.bolt11_invoice.send(&invoice, None) { Ok(payment_id) => return Ok(QrPaymentResult::Bolt11 { payment_id }), Err(e) => log_error!(self.logger, "Failed to send BOLT11 invoice: {:?}. This is part of a unified QR code payment. Falling back to the on-chain transaction.", e), } diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 5959bd58e..58a21619b 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -493,8 +493,8 @@ pub(crate) fn do_channel_full_cycle( let invoice = node_b.bolt11_payment().receive(invoice_amount_1_msat, &"asdf", 9217).unwrap(); println!("\nA send"); - let payment_id = node_a.bolt11_payment().send(&invoice).unwrap(); - assert_eq!(node_a.bolt11_payment().send(&invoice), Err(NodeError::DuplicatePayment)); + let payment_id = node_a.bolt11_payment().send(&invoice, None).unwrap(); + assert_eq!(node_a.bolt11_payment().send(&invoice, None), Err(NodeError::DuplicatePayment)); assert_eq!(node_a.list_payments().first().unwrap().id, payment_id); @@ -526,7 +526,7 @@ pub(crate) fn do_channel_full_cycle( assert!(matches!(node_b.payment(&payment_id).unwrap().kind, PaymentKind::Bolt11 { .. })); // Assert we fail duplicate outbound payments and check the status hasn't changed. - assert_eq!(Err(NodeError::DuplicatePayment), node_a.bolt11_payment().send(&invoice)); + assert_eq!(Err(NodeError::DuplicatePayment), node_a.bolt11_payment().send(&invoice, None)); assert_eq!(node_a.payment(&payment_id).unwrap().status, PaymentStatus::Succeeded); assert_eq!(node_a.payment(&payment_id).unwrap().direction, PaymentDirection::Outbound); assert_eq!(node_a.payment(&payment_id).unwrap().amount_msat, Some(invoice_amount_1_msat)); @@ -579,7 +579,7 @@ pub(crate) fn do_channel_full_cycle( let determined_amount_msat = 2345_678; assert_eq!( Err(NodeError::InvalidInvoice), - node_a.bolt11_payment().send(&variable_amount_invoice) + node_a.bolt11_payment().send(&variable_amount_invoice, None) ); println!("\nA send_using_amount"); let payment_id = node_a @@ -616,7 +616,7 @@ pub(crate) fn do_channel_full_cycle( .bolt11_payment() .receive_for_hash(invoice_amount_3_msat, &"asdf", 9217, manual_payment_hash) .unwrap(); - let manual_payment_id = node_a.bolt11_payment().send(&manual_invoice).unwrap(); + let manual_payment_id = node_a.bolt11_payment().send(&manual_invoice, None).unwrap(); let claimable_amount_msat = expect_payment_claimable_event!( node_b, @@ -654,7 +654,7 @@ pub(crate) fn do_channel_full_cycle( .bolt11_payment() .receive_for_hash(invoice_amount_3_msat, &"asdf", 9217, manual_fail_payment_hash) .unwrap(); - let manual_fail_payment_id = node_a.bolt11_payment().send(&manual_fail_invoice).unwrap(); + let manual_fail_payment_id = node_a.bolt11_payment().send(&manual_fail_invoice, None).unwrap(); expect_payment_claimable_event!( node_b, diff --git a/tests/integration_tests_cln.rs b/tests/integration_tests_cln.rs index 7aea13620..95d8f1136 100644 --- a/tests/integration_tests_cln.rs +++ b/tests/integration_tests_cln.rs @@ -98,7 +98,7 @@ fn test_cln() { cln_client.invoice(Some(10_000_000), &rand_label, &rand_label, None, None, None).unwrap(); let parsed_invoice = Bolt11Invoice::from_str(&cln_invoice.bolt11).unwrap(); - node.bolt11_payment().send(&parsed_invoice).unwrap(); + node.bolt11_payment().send(&parsed_invoice, None).unwrap(); common::expect_event!(node, PaymentSuccessful); let cln_listed_invoices = cln_client.listinvoices(Some(&rand_label), None, None, None).unwrap().invoices; diff --git a/tests/integration_tests_rust.rs b/tests/integration_tests_rust.rs index ec2b3d917..67de1c9da 100644 --- a/tests/integration_tests_rust.rs +++ b/tests/integration_tests_rust.rs @@ -7,7 +7,7 @@ use common::{ setup_node, setup_two_nodes, wait_for_tx, TestSyncStore, }; -use ldk_node::payment::{PaymentKind, QrPaymentResult}; +use ldk_node::payment::{PaymentKind, QrPaymentResult, SendingParameters}; use ldk_node::{Builder, Event, NodeError}; use lightning::ln::channelmanager::PaymentId; @@ -156,8 +156,15 @@ fn multi_hop_sending() { // Sleep a bit for gossip to propagate. std::thread::sleep(std::time::Duration::from_secs(1)); + let sending_params = SendingParameters { + max_total_routing_fee_msat: Some(75_000), + max_total_cltv_expiry_delta: Some(1000), + max_path_count: Some(10), + max_channel_saturation_power_of_half: Some(2), + }; + let invoice = nodes[4].bolt11_payment().receive(2_500_000, &"asdf", 9217).unwrap(); - nodes[0].bolt11_payment().send(&invoice).unwrap(); + nodes[0].bolt11_payment().send(&invoice, Some(sending_params)).unwrap(); let payment_id = expect_payment_received_event!(&nodes[4], 2_500_000); let fee_paid_msat = Some(2000); From 2b85ba9e33de6f134bfad1d2f566835a205b04f1 Mon Sep 17 00:00:00 2001 From: Ian Slane Date: Thu, 1 Aug 2024 13:32:33 -0600 Subject: [PATCH 023/127] Add `SendingParameters` to bolt11 `send_using_amount` Added the optional `SendingParameters` to `send_using_amount` in `Bolt11Payment`. If the user provides sending params the values will be overridden otherwise they'll use the default values. --- bindings/ldk_node.udl | 2 +- src/payment/bolt11.rs | 44 +++++++++++++++++++++++++++++++++++++++++-- tests/common/mod.rs | 6 +++--- 3 files changed, 46 insertions(+), 6 deletions(-) diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index a53c3fd54..1e968921d 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -96,7 +96,7 @@ interface Bolt11Payment { [Throws=NodeError] PaymentId send([ByRef]Bolt11Invoice invoice, SendingParameters? sending_parameters); [Throws=NodeError] - PaymentId send_using_amount([ByRef]Bolt11Invoice invoice, u64 amount_msat); + PaymentId send_using_amount([ByRef]Bolt11Invoice invoice, u64 amount_msat, SendingParameters? sending_parameters); [Throws=NodeError] void send_probes([ByRef]Bolt11Invoice invoice); [Throws=NodeError] diff --git a/src/payment/bolt11.rs b/src/payment/bolt11.rs index dae77fb6d..e15273232 100644 --- a/src/payment/bolt11.rs +++ b/src/payment/bolt11.rs @@ -190,14 +190,20 @@ impl Bolt11Payment { } } - /// Send a payment given an invoice and an amount in millisatoshi. + /// Send a payment given an invoice and an amount in millisatoshis. /// /// This will fail if the amount given is less than the value required by the given invoice. /// /// This can be used to pay a so-called "zero-amount" invoice, i.e., an invoice that leaves the /// amount paid to be determined by the user. + /// + /// If [`SendingParameters`] are provided they will override the node's default routing parameters + /// on a per-field basis. Each field in `SendingParameters` that is set replaces the corresponding + /// default value. Fields that are not set fall back to the node's configured defaults. If no + /// `SendingParameters` are provided, the method fully relies on these defaults. pub fn send_using_amount( &self, invoice: &Bolt11Invoice, amount_msat: u64, + sending_parameters: Option, ) -> Result { let rt_lock = self.runtime.read().unwrap(); if rt_lock.is_none() { @@ -238,9 +244,43 @@ impl Bolt11Payment { .with_bolt11_features(features.clone()) .map_err(|_| Error::InvalidInvoice)?; } - let route_params = + let mut route_params = RouteParameters::from_payment_params_and_value(payment_params, amount_msat); + if let Some(user_set_params) = sending_parameters { + if let Some(mut default_params) = + self.config.sending_parameters_config.as_ref().cloned() + { + default_params.max_total_routing_fee_msat = user_set_params + .max_total_routing_fee_msat + .or(default_params.max_total_routing_fee_msat); + default_params.max_total_cltv_expiry_delta = user_set_params + .max_total_cltv_expiry_delta + .or(default_params.max_total_cltv_expiry_delta); + default_params.max_path_count = + user_set_params.max_path_count.or(default_params.max_path_count); + default_params.max_channel_saturation_power_of_half = user_set_params + .max_channel_saturation_power_of_half + .or(default_params.max_channel_saturation_power_of_half); + + route_params.max_total_routing_fee_msat = default_params.max_total_routing_fee_msat; + route_params.payment_params.max_total_cltv_expiry_delta = + default_params.max_total_cltv_expiry_delta.unwrap_or_default(); + route_params.payment_params.max_path_count = + default_params.max_path_count.unwrap_or_default(); + route_params.payment_params.max_channel_saturation_power_of_half = + default_params.max_channel_saturation_power_of_half.unwrap_or_default(); + } + } else if let Some(default_params) = &self.config.sending_parameters_config { + route_params.max_total_routing_fee_msat = default_params.max_total_routing_fee_msat; + route_params.payment_params.max_total_cltv_expiry_delta = + default_params.max_total_cltv_expiry_delta.unwrap_or_default(); + route_params.payment_params.max_path_count = + default_params.max_path_count.unwrap_or_default(); + route_params.payment_params.max_channel_saturation_power_of_half = + default_params.max_channel_saturation_power_of_half.unwrap_or_default(); + } + let retry_strategy = Retry::Timeout(LDK_PAYMENT_RETRY_TIMEOUT); let recipient_fields = RecipientOnionFields::secret_only(*payment_secret); diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 58a21619b..b026a233a 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -541,7 +541,7 @@ pub(crate) fn do_channel_full_cycle( let underpaid_amount = invoice_amount_2_msat - 1; assert_eq!( Err(NodeError::InvalidAmount), - node_a.bolt11_payment().send_using_amount(&invoice, underpaid_amount) + node_a.bolt11_payment().send_using_amount(&invoice, underpaid_amount, None) ); println!("\nB overpaid receive"); @@ -550,7 +550,7 @@ pub(crate) fn do_channel_full_cycle( println!("\nA overpaid send"); let payment_id = - node_a.bolt11_payment().send_using_amount(&invoice, overpaid_amount_msat).unwrap(); + node_a.bolt11_payment().send_using_amount(&invoice, overpaid_amount_msat, None).unwrap(); expect_event!(node_a, PaymentSuccessful); let received_amount = match node_b.wait_next_event() { ref e @ Event::PaymentReceived { amount_msat, .. } => { @@ -584,7 +584,7 @@ pub(crate) fn do_channel_full_cycle( println!("\nA send_using_amount"); let payment_id = node_a .bolt11_payment() - .send_using_amount(&variable_amount_invoice, determined_amount_msat) + .send_using_amount(&variable_amount_invoice, determined_amount_msat, None) .unwrap(); expect_event!(node_a, PaymentSuccessful); From 48223362249e7ce68dea2b2293c7985d78c17848 Mon Sep 17 00:00:00 2001 From: Ian Slane Date: Thu, 1 Aug 2024 14:54:51 -0600 Subject: [PATCH 024/127] Add `SendingParameters` to spontaneous `send` Added optional SendingParameters to the send method in SpontaneousPayment. If the user provides sending params the values will be overridden otherwise they remain the same. --- bindings/ldk_node.udl | 2 +- src/payment/spontaneous.rs | 49 +++++++++++++++++++++++++++++++++++--- tests/common/mod.rs | 2 +- 3 files changed, 48 insertions(+), 5 deletions(-) diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index 1e968921d..6e248e3ff 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -136,7 +136,7 @@ interface Bolt12Payment { interface SpontaneousPayment { [Throws=NodeError] - PaymentId send(u64 amount_msat, PublicKey node_id); + PaymentId send(u64 amount_msat, PublicKey node_id, SendingParameters? sending_parameters); [Throws=NodeError] void send_probes(u64 amount_msat, PublicKey node_id); }; diff --git a/src/payment/spontaneous.rs b/src/payment/spontaneous.rs index 13047eab9..a259b685c 100644 --- a/src/payment/spontaneous.rs +++ b/src/payment/spontaneous.rs @@ -6,6 +6,7 @@ use crate::logger::{log_error, log_info, FilesystemLogger, Logger}; use crate::payment::store::{ PaymentDetails, PaymentDirection, PaymentKind, PaymentStatus, PaymentStore, }; +use crate::payment::SendingParameters; use crate::types::{ChannelManager, KeysManager}; use lightning::ln::channelmanager::{PaymentId, RecipientOnionFields, Retry, RetryableSendFailure}; @@ -41,8 +42,15 @@ impl SpontaneousPayment { Self { runtime, channel_manager, keys_manager, payment_store, config, logger } } - /// Send a spontaneous, aka. "keysend", payment - pub fn send(&self, amount_msat: u64, node_id: PublicKey) -> Result { + /// Send a spontaneous aka. "keysend", payment. + /// + /// If [`SendingParameters`] are provided they will override the node's default routing parameters + /// on a per-field basis. Each field in `SendingParameters` that is set replaces the corresponding + /// default value. Fields that are not set fall back to the node's configured defaults. If no + /// `SendingParameters` are provided, the method fully relies on these defaults. + pub fn send( + &self, amount_msat: u64, node_id: PublicKey, sending_parameters: Option, + ) -> Result { let rt_lock = self.runtime.read().unwrap(); if rt_lock.is_none() { return Err(Error::NotRunning); @@ -61,10 +69,45 @@ impl SpontaneousPayment { } } - let route_params = RouteParameters::from_payment_params_and_value( + let mut route_params = RouteParameters::from_payment_params_and_value( PaymentParameters::from_node_id(node_id, self.config.default_cltv_expiry_delta), amount_msat, ); + + if let Some(user_set_params) = sending_parameters { + if let Some(mut default_params) = + self.config.sending_parameters_config.as_ref().cloned() + { + default_params.max_total_routing_fee_msat = user_set_params + .max_total_routing_fee_msat + .or(default_params.max_total_routing_fee_msat); + default_params.max_total_cltv_expiry_delta = user_set_params + .max_total_cltv_expiry_delta + .or(default_params.max_total_cltv_expiry_delta); + default_params.max_path_count = + user_set_params.max_path_count.or(default_params.max_path_count); + default_params.max_channel_saturation_power_of_half = user_set_params + .max_channel_saturation_power_of_half + .or(default_params.max_channel_saturation_power_of_half); + + route_params.max_total_routing_fee_msat = default_params.max_total_routing_fee_msat; + route_params.payment_params.max_total_cltv_expiry_delta = + default_params.max_total_cltv_expiry_delta.unwrap_or_default(); + route_params.payment_params.max_path_count = + default_params.max_path_count.unwrap_or_default(); + route_params.payment_params.max_channel_saturation_power_of_half = + default_params.max_channel_saturation_power_of_half.unwrap_or_default(); + } + } else if let Some(default_params) = &self.config.sending_parameters_config { + route_params.max_total_routing_fee_msat = default_params.max_total_routing_fee_msat; + route_params.payment_params.max_total_cltv_expiry_delta = + default_params.max_total_cltv_expiry_delta.unwrap_or_default(); + route_params.payment_params.max_path_count = + default_params.max_path_count.unwrap_or_default(); + route_params.payment_params.max_channel_saturation_power_of_half = + default_params.max_channel_saturation_power_of_half.unwrap_or_default(); + } + let recipient_fields = RecipientOnionFields::spontaneous_empty(); match self.channel_manager.send_spontaneous_payment_with_retry( diff --git a/tests/common/mod.rs b/tests/common/mod.rs index b026a233a..6c4dbc1d1 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -695,7 +695,7 @@ pub(crate) fn do_channel_full_cycle( println!("\nA send_spontaneous_payment"); let keysend_amount_msat = 2500_000; let keysend_payment_id = - node_a.spontaneous_payment().send(keysend_amount_msat, node_b.node_id()).unwrap(); + node_a.spontaneous_payment().send(keysend_amount_msat, node_b.node_id(), None).unwrap(); expect_event!(node_a, PaymentSuccessful); let received_keysend_amount = match node_b.wait_next_event() { ref e @ Event::PaymentReceived { amount_msat, .. } => { From 76d06985704a4c81862b231cc1a08e71fe7595fd Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 27 Aug 2024 10:12:10 +0200 Subject: [PATCH 025/127] Fix python `send` --- bindings/python/src/ldk_node/test_ldk_node.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bindings/python/src/ldk_node/test_ldk_node.py b/bindings/python/src/ldk_node/test_ldk_node.py index a593078c1..92c4bf2d1 100644 --- a/bindings/python/src/ldk_node/test_ldk_node.py +++ b/bindings/python/src/ldk_node/test_ldk_node.py @@ -186,7 +186,7 @@ def test_channel_full_cycle(self): node_2.event_handled() invoice = node_2.bolt11_payment().receive(2500000, "asdf", 9217) - node_1.bolt11_payment().send(invoice) + node_1.bolt11_payment().send(invoice, None) payment_successful_event_1 = node_1.wait_next_event() assert isinstance(payment_successful_event_1, Event.PAYMENT_SUCCESSFUL) From 4235a1940b22af0e37fa0ea67e6b88e1585438db Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 27 Aug 2024 10:13:14 +0200 Subject: [PATCH 026/127] Reorder `config` imports --- src/config.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/config.rs b/src/config.rs index d3e9fb2af..065fc18ce 100644 --- a/src/config.rs +++ b/src/config.rs @@ -1,5 +1,3 @@ -use std::time::Duration; - use crate::payment::SendingParameters; use lightning::ln::msgs::SocketAddress; @@ -9,6 +7,8 @@ use lightning::util::logger::Level as LogLevel; use bitcoin::secp256k1::PublicKey; use bitcoin::Network; +use std::time::Duration; + // Config defaults const DEFAULT_STORAGE_DIR_PATH: &str = "/tmp/ldk_node/"; const DEFAULT_NETWORK: Network = Network::Bitcoin; From 3ca42ffff80a7f0a4c256c4726c24b7c57de51f2 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 27 Aug 2024 10:26:02 +0200 Subject: [PATCH 027/127] Rename `sending_paramters_config` field to `sending_parameters` --- bindings/ldk_node.udl | 2 +- src/config.rs | 13 ++++++++----- src/payment/bolt11.rs | 24 ++++++++---------------- src/payment/mod.rs | 5 +---- src/payment/spontaneous.rs | 12 ++++-------- 5 files changed, 22 insertions(+), 34 deletions(-) diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index 6e248e3ff..d3122fa41 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -16,7 +16,7 @@ dictionary Config { u64 probing_liquidity_limit_multiplier; LogLevel log_level; AnchorChannelsConfig? anchor_channels_config; - SendingParameters? sending_parameters_config; + SendingParameters? sending_parameters; }; dictionary AnchorChannelsConfig { diff --git a/src/config.rs b/src/config.rs index 065fc18ce..c8f0b73a7 100644 --- a/src/config.rs +++ b/src/config.rs @@ -88,9 +88,10 @@ pub(crate) const WALLET_KEYS_SEED_LEN: usize = 64; /// | `probing_liquidity_limit_multiplier` | 3 | /// | `log_level` | Debug | /// | `anchor_channels_config` | Some(..) | -/// | `sending_parameters_config` | None | +/// | `sending_parameters` | None | /// -/// See [`AnchorChannelsConfig`] for more information on its respective default values. +/// See [`AnchorChannelsConfig`] and [`SendingParameters`] for more information regarding their +/// respective default values. /// /// [`Node`]: crate::Node pub struct Config { @@ -150,12 +151,14 @@ pub struct Config { /// closure. We *will* however still try to get the Anchor spending transactions confirmed /// on-chain with the funds available. pub anchor_channels_config: Option, - /// Configuration options for payment routing and pathfinding. /// /// Setting the `SendingParameters` provides flexibility to customize how payments are routed, /// including setting limits on routing fees, CLTV expiry, and channel utilization. - pub sending_parameters_config: Option, + /// + /// **Note:** If unset, default parameters will be used, and you will be able to override the + /// parameters on a per-payment basis in the corresponding method calls. + pub sending_parameters: Option, } impl Default for Config { @@ -173,7 +176,7 @@ impl Default for Config { probing_liquidity_limit_multiplier: DEFAULT_PROBING_LIQUIDITY_LIMIT_MULTIPLIER, log_level: DEFAULT_LOG_LEVEL, anchor_channels_config: Some(AnchorChannelsConfig::default()), - sending_parameters_config: None, + sending_parameters: None, } } } diff --git a/src/payment/bolt11.rs b/src/payment/bolt11.rs index e15273232..fc10608fb 100644 --- a/src/payment/bolt11.rs +++ b/src/payment/bolt11.rs @@ -71,10 +71,8 @@ impl Bolt11Payment { /// Send a payment given an invoice. /// - /// If [`SendingParameters`] are provided they will override the node's default routing parameters - /// on a per-field basis. Each field in `SendingParameters` that is set replaces the corresponding - /// default value. Fields that are not set fall back to the node's configured defaults. If no - /// `SendingParameters` are provided, the method fully relies on these defaults. + /// If `sending_parameters` are provided they will override the default as well as the + /// node-wide parameters configured via [`Config::sending_parameters`] on a per-field basis. pub fn send( &self, invoice: &Bolt11Invoice, sending_parameters: Option, ) -> Result { @@ -99,9 +97,7 @@ impl Bolt11Payment { } if let Some(user_set_params) = sending_parameters { - if let Some(mut default_params) = - self.config.sending_parameters_config.as_ref().cloned() - { + if let Some(mut default_params) = self.config.sending_parameters.as_ref().cloned() { default_params.max_total_routing_fee_msat = user_set_params .max_total_routing_fee_msat .or(default_params.max_total_routing_fee_msat); @@ -122,7 +118,7 @@ impl Bolt11Payment { route_params.payment_params.max_channel_saturation_power_of_half = default_params.max_channel_saturation_power_of_half.unwrap_or_default(); } - } else if let Some(default_params) = &self.config.sending_parameters_config { + } else if let Some(default_params) = &self.config.sending_parameters { route_params.max_total_routing_fee_msat = default_params.max_total_routing_fee_msat; route_params.payment_params.max_total_cltv_expiry_delta = default_params.max_total_cltv_expiry_delta.unwrap_or_default(); @@ -197,10 +193,8 @@ impl Bolt11Payment { /// This can be used to pay a so-called "zero-amount" invoice, i.e., an invoice that leaves the /// amount paid to be determined by the user. /// - /// If [`SendingParameters`] are provided they will override the node's default routing parameters - /// on a per-field basis. Each field in `SendingParameters` that is set replaces the corresponding - /// default value. Fields that are not set fall back to the node's configured defaults. If no - /// `SendingParameters` are provided, the method fully relies on these defaults. + /// If `sending_parameters` are provided they will override the default as well as the + /// node-wide parameters configured via [`Config::sending_parameters`] on a per-field basis. pub fn send_using_amount( &self, invoice: &Bolt11Invoice, amount_msat: u64, sending_parameters: Option, @@ -248,9 +242,7 @@ impl Bolt11Payment { RouteParameters::from_payment_params_and_value(payment_params, amount_msat); if let Some(user_set_params) = sending_parameters { - if let Some(mut default_params) = - self.config.sending_parameters_config.as_ref().cloned() - { + if let Some(mut default_params) = self.config.sending_parameters.as_ref().cloned() { default_params.max_total_routing_fee_msat = user_set_params .max_total_routing_fee_msat .or(default_params.max_total_routing_fee_msat); @@ -271,7 +263,7 @@ impl Bolt11Payment { route_params.payment_params.max_channel_saturation_power_of_half = default_params.max_channel_saturation_power_of_half.unwrap_or_default(); } - } else if let Some(default_params) = &self.config.sending_parameters_config { + } else if let Some(default_params) = &self.config.sending_parameters { route_params.max_total_routing_fee_msat = default_params.max_total_routing_fee_msat; route_params.payment_params.max_total_cltv_expiry_delta = default_params.max_total_cltv_expiry_delta.unwrap_or_default(); diff --git a/src/payment/mod.rs b/src/payment/mod.rs index d1b12de99..2f0b767b7 100644 --- a/src/payment/mod.rs +++ b/src/payment/mod.rs @@ -14,7 +14,7 @@ pub use spontaneous::SpontaneousPayment; pub use store::{LSPFeeLimits, PaymentDetails, PaymentDirection, PaymentKind, PaymentStatus}; pub use unified_qr::{QrPaymentResult, UnifiedQrPayment}; -/// Represents information used to route a payment. +/// Represents information used to send a payment. #[derive(Clone, Debug, PartialEq)] pub struct SendingParameters { /// The maximum total fees, in millisatoshi, that may accrue during route finding. @@ -24,21 +24,18 @@ pub struct SendingParameters { /// /// Note that values below a few sats may result in some paths being spuriously ignored. pub max_total_routing_fee_msat: Option, - /// The maximum total CLTV delta we accept for the route. /// /// Defaults to [`DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA`]. /// /// [`DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA`]: lightning::routing::router::DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA pub max_total_cltv_expiry_delta: Option, - /// The maximum number of paths that may be used by (MPP) payments. /// /// Defaults to [`DEFAULT_MAX_PATH_COUNT`]. /// /// [`DEFAULT_MAX_PATH_COUNT`]: lightning::routing::router::DEFAULT_MAX_PATH_COUNT pub max_path_count: Option, - /// Selects the maximum share of a channel's total capacity which will be sent over a channel, /// as a power of 1/2. /// diff --git a/src/payment/spontaneous.rs b/src/payment/spontaneous.rs index a259b685c..9c4d4df8a 100644 --- a/src/payment/spontaneous.rs +++ b/src/payment/spontaneous.rs @@ -44,10 +44,8 @@ impl SpontaneousPayment { /// Send a spontaneous aka. "keysend", payment. /// - /// If [`SendingParameters`] are provided they will override the node's default routing parameters - /// on a per-field basis. Each field in `SendingParameters` that is set replaces the corresponding - /// default value. Fields that are not set fall back to the node's configured defaults. If no - /// `SendingParameters` are provided, the method fully relies on these defaults. + /// If `sending_parameters` are provided they will override the default as well as the + /// node-wide parameters configured via [`Config::sending_parameters`] on a per-field basis. pub fn send( &self, amount_msat: u64, node_id: PublicKey, sending_parameters: Option, ) -> Result { @@ -75,9 +73,7 @@ impl SpontaneousPayment { ); if let Some(user_set_params) = sending_parameters { - if let Some(mut default_params) = - self.config.sending_parameters_config.as_ref().cloned() - { + if let Some(mut default_params) = self.config.sending_parameters.as_ref().cloned() { default_params.max_total_routing_fee_msat = user_set_params .max_total_routing_fee_msat .or(default_params.max_total_routing_fee_msat); @@ -98,7 +94,7 @@ impl SpontaneousPayment { route_params.payment_params.max_channel_saturation_power_of_half = default_params.max_channel_saturation_power_of_half.unwrap_or_default(); } - } else if let Some(default_params) = &self.config.sending_parameters_config { + } else if let Some(default_params) = &self.config.sending_parameters { route_params.max_total_routing_fee_msat = default_params.max_total_routing_fee_msat; route_params.payment_params.max_total_cltv_expiry_delta = default_params.max_total_cltv_expiry_delta.unwrap_or_default(); From 8a432b10bb90dd6601a0a0c05636a28e3096611c Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 27 Aug 2024 11:44:30 +0200 Subject: [PATCH 028/127] Refactor `max_total_routing_fee_msat` to allow setting `None` Previously, the `SendingParamters` field was simply an `Option`, which however means we could just override it to be `Some`. Here, we have it be `Option>` which allows the `None` override. As UniFFI doesn't support `Option>`, we work around this via a dedicated `enum` that is only exposed under the `uniffi` feature. --- bindings/ldk_node.udl | 28 ++++++---- src/payment/bolt11.rs | 90 ++++++++++----------------------- src/payment/mod.rs | 40 ++++++++++++++- src/payment/spontaneous.rs | 45 +++++------------ src/uniffi_types.rs | 2 +- tests/integration_tests_rust.rs | 2 +- 6 files changed, 100 insertions(+), 107 deletions(-) diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index d3122fa41..d5d7638ee 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -151,10 +151,10 @@ interface OnchainPayment { }; interface UnifiedQrPayment { - [Throws=NodeError] - string receive(u64 amount_sats, [ByRef]string message, u32 expiry_sec); - [Throws=NodeError] - QrPaymentResult send([ByRef]string uri_str); + [Throws=NodeError] + string receive(u64 amount_sats, [ByRef]string message, u32 expiry_sec); + [Throws=NodeError] + QrPaymentResult send([ByRef]string uri_str); }; [Error] @@ -290,9 +290,9 @@ interface PaymentKind { [Enum] interface QrPaymentResult { - Onchain(Txid txid); - Bolt11(PaymentId payment_id); - Bolt12(PaymentId payment_id); + Onchain(Txid txid); + Bolt11(PaymentId payment_id); + Bolt12(PaymentId payment_id); }; enum PaymentDirection { @@ -321,10 +321,16 @@ dictionary PaymentDetails { }; dictionary SendingParameters { - u64? max_total_routing_fee_msat; - u32? max_total_cltv_expiry_delta; - u8? max_path_count; - u8? max_channel_saturation_power_of_half; + MaxTotalRoutingFeeLimit? max_total_routing_fee_msat; + u32? max_total_cltv_expiry_delta; + u8? max_path_count; + u8? max_channel_saturation_power_of_half; +}; + +[Enum] +interface MaxTotalRoutingFeeLimit { + None (); + Some ( u64 amount_msat ); }; [NonExhaustive] diff --git a/src/payment/bolt11.rs b/src/payment/bolt11.rs index fc10608fb..07823a17b 100644 --- a/src/payment/bolt11.rs +++ b/src/payment/bolt11.rs @@ -96,37 +96,20 @@ impl Bolt11Payment { } } - if let Some(user_set_params) = sending_parameters { - if let Some(mut default_params) = self.config.sending_parameters.as_ref().cloned() { - default_params.max_total_routing_fee_msat = user_set_params - .max_total_routing_fee_msat - .or(default_params.max_total_routing_fee_msat); - default_params.max_total_cltv_expiry_delta = user_set_params - .max_total_cltv_expiry_delta - .or(default_params.max_total_cltv_expiry_delta); - default_params.max_path_count = - user_set_params.max_path_count.or(default_params.max_path_count); - default_params.max_channel_saturation_power_of_half = user_set_params - .max_channel_saturation_power_of_half - .or(default_params.max_channel_saturation_power_of_half); - - route_params.max_total_routing_fee_msat = default_params.max_total_routing_fee_msat; - route_params.payment_params.max_total_cltv_expiry_delta = - default_params.max_total_cltv_expiry_delta.unwrap_or_default(); - route_params.payment_params.max_path_count = - default_params.max_path_count.unwrap_or_default(); - route_params.payment_params.max_channel_saturation_power_of_half = - default_params.max_channel_saturation_power_of_half.unwrap_or_default(); - } - } else if let Some(default_params) = &self.config.sending_parameters { - route_params.max_total_routing_fee_msat = default_params.max_total_routing_fee_msat; - route_params.payment_params.max_total_cltv_expiry_delta = - default_params.max_total_cltv_expiry_delta.unwrap_or_default(); - route_params.payment_params.max_path_count = - default_params.max_path_count.unwrap_or_default(); - route_params.payment_params.max_channel_saturation_power_of_half = - default_params.max_channel_saturation_power_of_half.unwrap_or_default(); - } + let override_params = + sending_parameters.as_ref().or(self.config.sending_parameters.as_ref()); + if let Some(override_params) = override_params { + override_params + .max_total_routing_fee_msat + .map(|f| route_params.max_total_routing_fee_msat = f.into()); + override_params + .max_total_cltv_expiry_delta + .map(|d| route_params.payment_params.max_total_cltv_expiry_delta = d); + override_params.max_path_count.map(|p| route_params.payment_params.max_path_count = p); + override_params + .max_channel_saturation_power_of_half + .map(|s| route_params.payment_params.max_channel_saturation_power_of_half = s); + }; let payment_secret = Some(*invoice.payment_secret()); let retry_strategy = Retry::Timeout(LDK_PAYMENT_RETRY_TIMEOUT); @@ -241,37 +224,20 @@ impl Bolt11Payment { let mut route_params = RouteParameters::from_payment_params_and_value(payment_params, amount_msat); - if let Some(user_set_params) = sending_parameters { - if let Some(mut default_params) = self.config.sending_parameters.as_ref().cloned() { - default_params.max_total_routing_fee_msat = user_set_params - .max_total_routing_fee_msat - .or(default_params.max_total_routing_fee_msat); - default_params.max_total_cltv_expiry_delta = user_set_params - .max_total_cltv_expiry_delta - .or(default_params.max_total_cltv_expiry_delta); - default_params.max_path_count = - user_set_params.max_path_count.or(default_params.max_path_count); - default_params.max_channel_saturation_power_of_half = user_set_params - .max_channel_saturation_power_of_half - .or(default_params.max_channel_saturation_power_of_half); - - route_params.max_total_routing_fee_msat = default_params.max_total_routing_fee_msat; - route_params.payment_params.max_total_cltv_expiry_delta = - default_params.max_total_cltv_expiry_delta.unwrap_or_default(); - route_params.payment_params.max_path_count = - default_params.max_path_count.unwrap_or_default(); - route_params.payment_params.max_channel_saturation_power_of_half = - default_params.max_channel_saturation_power_of_half.unwrap_or_default(); - } - } else if let Some(default_params) = &self.config.sending_parameters { - route_params.max_total_routing_fee_msat = default_params.max_total_routing_fee_msat; - route_params.payment_params.max_total_cltv_expiry_delta = - default_params.max_total_cltv_expiry_delta.unwrap_or_default(); - route_params.payment_params.max_path_count = - default_params.max_path_count.unwrap_or_default(); - route_params.payment_params.max_channel_saturation_power_of_half = - default_params.max_channel_saturation_power_of_half.unwrap_or_default(); - } + let override_params = + sending_parameters.as_ref().or(self.config.sending_parameters.as_ref()); + if let Some(override_params) = override_params { + override_params + .max_total_routing_fee_msat + .map(|f| route_params.max_total_routing_fee_msat = f.into()); + override_params + .max_total_cltv_expiry_delta + .map(|d| route_params.payment_params.max_total_cltv_expiry_delta = d); + override_params.max_path_count.map(|p| route_params.payment_params.max_path_count = p); + override_params + .max_channel_saturation_power_of_half + .map(|s| route_params.payment_params.max_channel_saturation_power_of_half = s); + }; let retry_strategy = Retry::Timeout(LDK_PAYMENT_RETRY_TIMEOUT); let recipient_fields = RecipientOnionFields::secret_only(*payment_secret); diff --git a/src/payment/mod.rs b/src/payment/mod.rs index 2f0b767b7..f118f3fc8 100644 --- a/src/payment/mod.rs +++ b/src/payment/mod.rs @@ -23,7 +23,16 @@ pub struct SendingParameters { /// paths. /// /// Note that values below a few sats may result in some paths being spuriously ignored. - pub max_total_routing_fee_msat: Option, + #[cfg(not(feature = "uniffi"))] + pub max_total_routing_fee_msat: Option>, + /// The maximum total fees, in millisatoshi, that may accrue during route finding. + /// + /// This limit also applies to the total fees that may arise while retrying failed payment + /// paths. + /// + /// Note that values below a few sats may result in some paths being spuriously ignored. + #[cfg(feature = "uniffi")] + pub max_total_routing_fee_msat: Option, /// The maximum total CLTV delta we accept for the route. /// /// Defaults to [`DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA`]. @@ -59,3 +68,32 @@ pub struct SendingParameters { /// Default value: 2 pub max_channel_saturation_power_of_half: Option, } + +/// Represents the possible states of [`SendingParameters::max_total_routing_fee_msat`]. +// +// Required only in bindings as UniFFI can't expose `Option>`. +#[cfg(feature = "uniffi")] +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum MaxTotalRoutingFeeLimit { + None, + Some { amount_msat: u64 }, +} + +#[cfg(feature = "uniffi")] +impl From for Option { + fn from(value: MaxTotalRoutingFeeLimit) -> Self { + match value { + MaxTotalRoutingFeeLimit::Some { amount_msat } => Some(amount_msat), + MaxTotalRoutingFeeLimit::None => None, + } + } +} + +#[cfg(feature = "uniffi")] +impl From> for MaxTotalRoutingFeeLimit { + fn from(value: Option) -> Self { + value.map_or(MaxTotalRoutingFeeLimit::None, |amount_msat| MaxTotalRoutingFeeLimit::Some { + amount_msat, + }) + } +} diff --git a/src/payment/spontaneous.rs b/src/payment/spontaneous.rs index 9c4d4df8a..5beb443e5 100644 --- a/src/payment/spontaneous.rs +++ b/src/payment/spontaneous.rs @@ -72,37 +72,20 @@ impl SpontaneousPayment { amount_msat, ); - if let Some(user_set_params) = sending_parameters { - if let Some(mut default_params) = self.config.sending_parameters.as_ref().cloned() { - default_params.max_total_routing_fee_msat = user_set_params - .max_total_routing_fee_msat - .or(default_params.max_total_routing_fee_msat); - default_params.max_total_cltv_expiry_delta = user_set_params - .max_total_cltv_expiry_delta - .or(default_params.max_total_cltv_expiry_delta); - default_params.max_path_count = - user_set_params.max_path_count.or(default_params.max_path_count); - default_params.max_channel_saturation_power_of_half = user_set_params - .max_channel_saturation_power_of_half - .or(default_params.max_channel_saturation_power_of_half); - - route_params.max_total_routing_fee_msat = default_params.max_total_routing_fee_msat; - route_params.payment_params.max_total_cltv_expiry_delta = - default_params.max_total_cltv_expiry_delta.unwrap_or_default(); - route_params.payment_params.max_path_count = - default_params.max_path_count.unwrap_or_default(); - route_params.payment_params.max_channel_saturation_power_of_half = - default_params.max_channel_saturation_power_of_half.unwrap_or_default(); - } - } else if let Some(default_params) = &self.config.sending_parameters { - route_params.max_total_routing_fee_msat = default_params.max_total_routing_fee_msat; - route_params.payment_params.max_total_cltv_expiry_delta = - default_params.max_total_cltv_expiry_delta.unwrap_or_default(); - route_params.payment_params.max_path_count = - default_params.max_path_count.unwrap_or_default(); - route_params.payment_params.max_channel_saturation_power_of_half = - default_params.max_channel_saturation_power_of_half.unwrap_or_default(); - } + let override_params = + sending_parameters.as_ref().or(self.config.sending_parameters.as_ref()); + if let Some(override_params) = override_params { + override_params + .max_total_routing_fee_msat + .map(|f| route_params.max_total_routing_fee_msat = f.into()); + override_params + .max_total_cltv_expiry_delta + .map(|d| route_params.payment_params.max_total_cltv_expiry_delta = d); + override_params.max_path_count.map(|p| route_params.payment_params.max_path_count = p); + override_params + .max_channel_saturation_power_of_half + .map(|s| route_params.payment_params.max_channel_saturation_power_of_half = s); + }; let recipient_fields = RecipientOnionFields::spontaneous_empty(); diff --git a/src/uniffi_types.rs b/src/uniffi_types.rs index 22546e03c..566ef8d72 100644 --- a/src/uniffi_types.rs +++ b/src/uniffi_types.rs @@ -5,7 +5,7 @@ pub use crate::graph::{ChannelInfo, ChannelUpdateInfo, NodeAnnouncementInfo, NodeInfo}; pub use crate::payment::store::{LSPFeeLimits, PaymentDirection, PaymentKind, PaymentStatus}; -pub use crate::payment::{QrPaymentResult, SendingParameters}; +pub use crate::payment::{MaxTotalRoutingFeeLimit, QrPaymentResult, SendingParameters}; pub use lightning::events::{ClosureReason, PaymentFailureReason}; pub use lightning::ln::{ChannelId, PaymentHash, PaymentPreimage, PaymentSecret}; diff --git a/tests/integration_tests_rust.rs b/tests/integration_tests_rust.rs index 67de1c9da..b3788f9d4 100644 --- a/tests/integration_tests_rust.rs +++ b/tests/integration_tests_rust.rs @@ -157,7 +157,7 @@ fn multi_hop_sending() { std::thread::sleep(std::time::Duration::from_secs(1)); let sending_params = SendingParameters { - max_total_routing_fee_msat: Some(75_000), + max_total_routing_fee_msat: Some(Some(75_000).into()), max_total_cltv_expiry_delta: Some(1000), max_path_count: Some(10), max_channel_saturation_power_of_half: Some(2), From b282d4293410c3b216be124ae541bb1478b31ccf Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 27 Aug 2024 11:58:30 +0200 Subject: [PATCH 029/127] Drop `default_cltv_expiry_delta` .. as it was used for spontaneous payments only and hence a bit misleading. We drop it for now and see if any users would complain. If so, it would probably be sufficient for it to be an optional parameter on the spontaneous payments methods. --- bindings/ldk_node.udl | 1 - src/config.rs | 4 ---- src/payment/spontaneous.rs | 8 +++++--- 3 files changed, 5 insertions(+), 8 deletions(-) diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index d5d7638ee..0607e71c3 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -8,7 +8,6 @@ dictionary Config { string? log_dir_path; Network network; sequence? listening_addresses; - u32 default_cltv_expiry_delta; u64 onchain_wallet_sync_interval_secs; u64 wallet_sync_interval_secs; u64 fee_rate_cache_update_interval_secs; diff --git a/src/config.rs b/src/config.rs index c8f0b73a7..f0c2c856b 100644 --- a/src/config.rs +++ b/src/config.rs @@ -12,7 +12,6 @@ use std::time::Duration; // Config defaults const DEFAULT_STORAGE_DIR_PATH: &str = "/tmp/ldk_node/"; const DEFAULT_NETWORK: Network = Network::Bitcoin; -const DEFAULT_CLTV_EXPIRY_DELTA: u32 = 144; const DEFAULT_BDK_WALLET_SYNC_INTERVAL_SECS: u64 = 80; const DEFAULT_LDK_WALLET_SYNC_INTERVAL_SECS: u64 = 30; const DEFAULT_FEE_RATE_CACHE_UPDATE_INTERVAL_SECS: u64 = 60 * 10; @@ -105,8 +104,6 @@ pub struct Config { pub network: Network, /// The addresses on which the node will listen for incoming connections. pub listening_addresses: Option>, - /// The default CLTV expiry delta to be used for payments. - pub default_cltv_expiry_delta: u32, /// The time in-between background sync attempts of the onchain wallet, in seconds. /// /// **Note:** A minimum of 10 seconds is always enforced. @@ -168,7 +165,6 @@ impl Default for Config { log_dir_path: None, network: DEFAULT_NETWORK, listening_addresses: None, - default_cltv_expiry_delta: DEFAULT_CLTV_EXPIRY_DELTA, onchain_wallet_sync_interval_secs: DEFAULT_BDK_WALLET_SYNC_INTERVAL_SECS, wallet_sync_interval_secs: DEFAULT_LDK_WALLET_SYNC_INTERVAL_SECS, fee_rate_cache_update_interval_secs: DEFAULT_FEE_RATE_CACHE_UPDATE_INTERVAL_SECS, diff --git a/src/payment/spontaneous.rs b/src/payment/spontaneous.rs index 5beb443e5..b7b8dcc03 100644 --- a/src/payment/spontaneous.rs +++ b/src/payment/spontaneous.rs @@ -18,6 +18,9 @@ use bitcoin::secp256k1::PublicKey; use std::sync::{Arc, RwLock}; +// The default `final_cltv_expiry_delta` we apply when not set. +const LDK_DEFAULT_FINAL_CLTV_EXPIRY_DELTA: u32 = 144; + /// A payment handler allowing to send spontaneous ("keysend") payments. /// /// Should be retrieved by calling [`Node::spontaneous_payment`]. @@ -68,7 +71,7 @@ impl SpontaneousPayment { } let mut route_params = RouteParameters::from_payment_params_and_value( - PaymentParameters::from_node_id(node_id, self.config.default_cltv_expiry_delta), + PaymentParameters::from_node_id(node_id, LDK_DEFAULT_FINAL_CLTV_EXPIRY_DELTA), amount_msat, ); @@ -153,13 +156,12 @@ impl SpontaneousPayment { } let liquidity_limit_multiplier = Some(self.config.probing_liquidity_limit_multiplier); - let cltv_expiry_delta = self.config.default_cltv_expiry_delta; self.channel_manager .send_spontaneous_preflight_probes( node_id, amount_msat, - cltv_expiry_delta, + LDK_DEFAULT_FINAL_CLTV_EXPIRY_DELTA, liquidity_limit_multiplier, ) .map_err(|e| { From 09d68ee38b2ecdc5b8bc960cb35204c8e4a9b614 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Sun, 25 Aug 2024 12:01:44 +0300 Subject: [PATCH 030/127] Refactor `ChannelConfig` / `MaxDustHTLCExposure` Previously, we chose to expose `ChannelConfig` as a Uniffi `interface`, providing accessor methods. Unfortunately this forced us to `Arc` it everywhere in the API, and also didn't allow to retrieve the currently set dust exposure limits. Here, we refactor our version of `ChannelConfig` to be a normal `struct` (Uniffi `dictionary`), and only expose the `MaxDustHTLCExposure` as an enum-`interface`. --- bindings/ldk_node.udl | 27 +++--- src/lib.rs | 10 +-- src/types.rs | 185 +++++++++++++++++++++++------------------- 3 files changed, 118 insertions(+), 104 deletions(-) diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index 6e248e3ff..c30578d4b 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -404,20 +404,19 @@ dictionary BalanceDetails { sequence pending_balances_from_channel_closures; }; -interface ChannelConfig { - constructor(); - u32 forwarding_fee_proportional_millionths(); - void set_forwarding_fee_proportional_millionths(u32 value); - u32 forwarding_fee_base_msat(); - void set_forwarding_fee_base_msat(u32 fee_msat); - u16 cltv_expiry_delta(); - void set_cltv_expiry_delta(u16 value); - u64 force_close_avoidance_max_fee_satoshis(); - void set_force_close_avoidance_max_fee_satoshis(u64 value_sat); - boolean accept_underpaying_htlcs(); - void set_accept_underpaying_htlcs(boolean value); - void set_max_dust_htlc_exposure_from_fixed_limit(u64 limit_msat); - void set_max_dust_htlc_exposure_from_fee_rate_multiplier(u64 multiplier); +dictionary ChannelConfig { + u32 forwarding_fee_proportional_millionths; + u32 forwarding_fee_base_msat; + u16 cltv_expiry_delta; + MaxDustHTLCExposure max_dust_htlc_exposure; + u64 force_close_avoidance_max_fee_satoshis; + boolean accept_underpaying_htlcs; +}; + +[Enum] +interface MaxDustHTLCExposure { + FixedLimit ( u64 limit_msat ); + FeeRateMultiplier ( u64 multiplier ); }; enum LogLevel { diff --git a/src/lib.rs b/src/lib.rs index a1d88ef4c..f777b07d1 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -109,7 +109,7 @@ pub use error::Error as NodeError; use error::Error; pub use event::Event; -pub use types::ChannelConfig; +pub use types::{ChannelConfig, MaxDustHTLCExposure}; pub use io::utils::generate_entropy_mnemonic; @@ -1187,7 +1187,7 @@ impl Node { /// Returns a [`UserChannelId`] allowing to locally keep track of the channel. pub fn connect_open_channel( &self, node_id: PublicKey, address: SocketAddress, channel_amount_sats: u64, - push_to_counterparty_msat: Option, channel_config: Option>, + push_to_counterparty_msat: Option, channel_config: Option, announce_channel: bool, ) -> Result { let rt_lock = self.runtime.read().unwrap(); @@ -1251,7 +1251,7 @@ impl Node { let mut user_config = default_user_config(&self.config); user_config.channel_handshake_config.announced_channel = announce_channel; - user_config.channel_config = (*(channel_config.unwrap_or_default())).clone().into(); + user_config.channel_config = (channel_config.unwrap_or_default()).clone().into(); // We set the max inflight to 100% for private channels. // FIXME: LDK will default to this behavior soon, too, at which point we should drop this // manual override. @@ -1494,7 +1494,7 @@ impl Node { /// Update the config for a previously opened channel. pub fn update_channel_config( &self, user_channel_id: &UserChannelId, counterparty_node_id: PublicKey, - channel_config: Arc, + channel_config: ChannelConfig, ) -> Result<(), Error> { let open_channels = self.channel_manager.list_channels_with_counterparty(&counterparty_node_id); @@ -1505,7 +1505,7 @@ impl Node { .update_channel_config( &counterparty_node_id, &[channel_details.channel_id], - &(*channel_config).clone().into(), + &(channel_config).clone().into(), ) .map_err(|_| Error::ChannelConfigUpdateFailed) } else { diff --git a/src/types.rs b/src/types.rs index 0c2faeb78..abc015ce4 100644 --- a/src/types.rs +++ b/src/types.rs @@ -22,7 +22,7 @@ use lightning_transaction_sync::EsploraSyncClient; use bitcoin::secp256k1::PublicKey; use bitcoin::OutPoint; -use std::sync::{Arc, Mutex, RwLock}; +use std::sync::{Arc, Mutex}; pub(crate) type DynStore = dyn KVStore + Sync + Send; @@ -279,7 +279,7 @@ pub struct ChannelDetails { /// The largest value HTLC (in msat) we currently will accept, for this channel. pub inbound_htlc_maximum_msat: Option, /// Set of configurable parameters that affect channel operation. - pub config: Arc, + pub config: ChannelConfig, } impl From for ChannelDetails { @@ -330,7 +330,7 @@ impl From for ChannelDetails { inbound_htlc_minimum_msat: value.inbound_htlc_minimum_msat.unwrap_or(0), inbound_htlc_maximum_msat: value.inbound_htlc_maximum_msat, // unwrap safety: `config` is only `None` for LDK objects serialized prior to 0.0.109. - config: value.config.map(|c| Arc::new(c.into())).unwrap(), + config: value.config.map(|c| c.into()).unwrap(), } } } @@ -350,98 +350,70 @@ pub struct PeerDetails { pub is_connected: bool, } -/// Options which apply on a per-channel basis. -/// -/// See documentation of [`LdkChannelConfig`] for details. -#[derive(Debug)] +/// Options which apply on a per-channel basis and may change at runtime or based on negotiation +/// with our counterparty. +#[derive(Copy, Clone, Debug, PartialEq, Eq)] pub struct ChannelConfig { - inner: RwLock, -} - -impl Clone for ChannelConfig { - fn clone(&self) -> Self { - self.inner.read().unwrap().clone().into() - } -} - -impl ChannelConfig { - /// Constructs a new `ChannelConfig`. - pub fn new() -> Self { - Self::default() - } - - /// Returns the set `forwarding_fee_proportional_millionths`. - pub fn forwarding_fee_proportional_millionths(&self) -> u32 { - self.inner.read().unwrap().forwarding_fee_proportional_millionths - } - - /// Sets the `forwarding_fee_proportional_millionths`. - pub fn set_forwarding_fee_proportional_millionths(&self, value: u32) { - self.inner.write().unwrap().forwarding_fee_proportional_millionths = value; - } - - /// Returns the set `forwarding_fee_base_msat`. - pub fn forwarding_fee_base_msat(&self) -> u32 { - self.inner.read().unwrap().forwarding_fee_base_msat - } - - /// Sets the `forwarding_fee_base_msat`. - pub fn set_forwarding_fee_base_msat(&self, fee_msat: u32) { - self.inner.write().unwrap().forwarding_fee_base_msat = fee_msat; - } - - /// Returns the set `cltv_expiry_delta`. - pub fn cltv_expiry_delta(&self) -> u16 { - self.inner.read().unwrap().cltv_expiry_delta - } - - /// Sets the `cltv_expiry_delta`. - pub fn set_cltv_expiry_delta(&self, value: u16) { - self.inner.write().unwrap().cltv_expiry_delta = value; - } - - /// Returns the set `force_close_avoidance_max_fee_satoshis`. - pub fn force_close_avoidance_max_fee_satoshis(&self) -> u64 { - self.inner.read().unwrap().force_close_avoidance_max_fee_satoshis - } - - /// Sets the `force_close_avoidance_max_fee_satoshis`. - pub fn set_force_close_avoidance_max_fee_satoshis(&self, value_sat: u64) { - self.inner.write().unwrap().force_close_avoidance_max_fee_satoshis = value_sat; - } - - /// Returns the set `accept_underpaying_htlcs`. - pub fn accept_underpaying_htlcs(&self) -> bool { - self.inner.read().unwrap().accept_underpaying_htlcs - } - - /// Sets the `accept_underpaying_htlcs`. - pub fn set_accept_underpaying_htlcs(&self, value: bool) { - self.inner.write().unwrap().accept_underpaying_htlcs = value; - } - - /// Sets the `max_dust_htlc_exposure` from a fixed limit. - pub fn set_max_dust_htlc_exposure_from_fixed_limit(&self, limit_msat: u64) { - self.inner.write().unwrap().max_dust_htlc_exposure = - LdkMaxDustHTLCExposure::FixedLimitMsat(limit_msat); - } - - /// Sets the `max_dust_htlc_exposure` from a fee rate multiplier. - pub fn set_max_dust_htlc_exposure_from_fee_rate_multiplier(&self, multiplier: u64) { - self.inner.write().unwrap().max_dust_htlc_exposure = - LdkMaxDustHTLCExposure::FeeRateMultiplier(multiplier); - } + /// Amount (in millionths of a satoshi) charged per satoshi for payments forwarded outbound + /// over the channel. + /// This may be allowed to change at runtime in a later update, however doing so must result in + /// update messages sent to notify all nodes of our updated relay fee. + /// + /// Please refer to [`LdkChannelConfig`] for further details. + pub forwarding_fee_proportional_millionths: u32, + /// Amount (in milli-satoshi) charged for payments forwarded outbound over the channel, in + /// excess of [`ChannelConfig::forwarding_fee_proportional_millionths`]. + /// This may be allowed to change at runtime in a later update, however doing so must result in + /// update messages sent to notify all nodes of our updated relay fee. + /// + /// Please refer to [`LdkChannelConfig`] for further details. + pub forwarding_fee_base_msat: u32, + /// The difference in the CLTV value between incoming HTLCs and an outbound HTLC forwarded over + /// the channel this config applies to. + /// + /// Please refer to [`LdkChannelConfig`] for further details. + pub cltv_expiry_delta: u16, + /// Limit our total exposure to potential loss to on-chain fees on close, including in-flight + /// HTLCs which are burned to fees as they are too small to claim on-chain and fees on + /// commitment transaction(s) broadcasted by our counterparty in excess of our own fee estimate. + /// + /// Please refer to [`LdkChannelConfig`] for further details. + pub max_dust_htlc_exposure: MaxDustHTLCExposure, + /// The additional fee we're willing to pay to avoid waiting for the counterparty's + /// `to_self_delay` to reclaim funds. + /// + /// Please refer to [`LdkChannelConfig`] for further details. + pub force_close_avoidance_max_fee_satoshis: u64, + /// If set, allows this channel's counterparty to skim an additional fee off this node's inbound + /// HTLCs. Useful for liquidity providers to offload on-chain channel costs to end users. + /// + /// Please refer to [`LdkChannelConfig`] for further details. + pub accept_underpaying_htlcs: bool, } impl From for ChannelConfig { fn from(value: LdkChannelConfig) -> Self { - Self { inner: RwLock::new(value) } + Self { + forwarding_fee_proportional_millionths: value.forwarding_fee_proportional_millionths, + forwarding_fee_base_msat: value.forwarding_fee_base_msat, + cltv_expiry_delta: value.cltv_expiry_delta, + max_dust_htlc_exposure: value.max_dust_htlc_exposure.into(), + force_close_avoidance_max_fee_satoshis: value.force_close_avoidance_max_fee_satoshis, + accept_underpaying_htlcs: value.accept_underpaying_htlcs, + } } } impl From for LdkChannelConfig { fn from(value: ChannelConfig) -> Self { - *value.inner.read().unwrap() + Self { + forwarding_fee_proportional_millionths: value.forwarding_fee_proportional_millionths, + forwarding_fee_base_msat: value.forwarding_fee_base_msat, + cltv_expiry_delta: value.cltv_expiry_delta, + max_dust_htlc_exposure: value.max_dust_htlc_exposure.into(), + force_close_avoidance_max_fee_satoshis: value.force_close_avoidance_max_fee_satoshis, + accept_underpaying_htlcs: value.accept_underpaying_htlcs, + } } } @@ -450,3 +422,46 @@ impl Default for ChannelConfig { LdkChannelConfig::default().into() } } + +/// Options for how to set the max dust exposure allowed on a channel. +/// +/// See [`LdkChannelConfig::max_dust_htlc_exposure`] for details. +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum MaxDustHTLCExposure { + /// This sets a fixed limit on the total dust exposure in millisatoshis. + /// + /// Please refer to [`LdkMaxDustHTLCExposure`] for further details. + FixedLimit { + /// The fixed limit, in millisatoshis. + limit_msat: u64, + }, + /// This sets a multiplier on the feerate to determine the maximum allowed dust exposure. + /// + /// Please refer to [`LdkMaxDustHTLCExposure`] for further details. + FeeRateMultiplier { + /// The applied fee rate multiplier. + multiplier: u64, + }, +} + +impl From for MaxDustHTLCExposure { + fn from(value: LdkMaxDustHTLCExposure) -> Self { + match value { + LdkMaxDustHTLCExposure::FixedLimitMsat(limit_msat) => Self::FixedLimit { limit_msat }, + LdkMaxDustHTLCExposure::FeeRateMultiplier(multiplier) => { + Self::FeeRateMultiplier { multiplier } + }, + } + } +} + +impl From for LdkMaxDustHTLCExposure { + fn from(value: MaxDustHTLCExposure) -> Self { + match value { + MaxDustHTLCExposure::FixedLimit { limit_msat } => Self::FixedLimitMsat(limit_msat), + MaxDustHTLCExposure::FeeRateMultiplier { multiplier } => { + Self::FeeRateMultiplier(multiplier) + }, + } + } +} From 7202c83223f97937326d896719f835866ff22510 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Fri, 30 Aug 2024 08:34:04 +0200 Subject: [PATCH 031/127] Make `Wallet` and `WalletKeysManager` `pub(crate)` --- src/wallet.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/wallet.rs b/src/wallet.rs index 0da3f6db8..7a77a4ece 100644 --- a/src/wallet.rs +++ b/src/wallet.rs @@ -43,7 +43,7 @@ enum WalletSyncStatus { InProgress { subscribers: tokio::sync::broadcast::Sender> }, } -pub struct Wallet +pub(crate) struct Wallet where D: BatchDatabase, B::Target: BroadcasterInterface, @@ -485,7 +485,7 @@ where /// Similar to [`KeysManager`], but overrides the destination and shutdown scripts so they are /// directly spendable by the BDK wallet. -pub struct WalletKeysManager +pub(crate) struct WalletKeysManager where D: BatchDatabase, B::Target: BroadcasterInterface, From 42a695ea8247ebf1d7e4e9275c826c078755feca Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Fri, 30 Aug 2024 08:33:52 +0200 Subject: [PATCH 032/127] Refactor `FeeEstimator` to introduce local target variants .. previously we used LDK's `FeeEstimator` and `ConfirmationTarget` and ~misused some of the latter's variants for our non-Lightning operations. Here, we introduce our own `FeeEstimator` and `ConfirmationTarget` allowing to add specific variants for `ChannelFunding` and `OnchainPayment`s, for example. --- src/event.rs | 4 +- src/fee_estimator.rs | 111 +++++++++++++++++++++++++++++++------------ src/wallet.rs | 16 +++---- 3 files changed, 88 insertions(+), 43 deletions(-) diff --git a/src/event.rs b/src/event.rs index c4c5034ff..d76f0b05e 100644 --- a/src/event.rs +++ b/src/event.rs @@ -6,6 +6,7 @@ use crate::{ }; use crate::connection::ConnectionManager; +use crate::fee_estimator::ConfirmationTarget; use crate::payment::store::{ PaymentDetails, PaymentDetailsUpdate, PaymentDirection, PaymentKind, PaymentStatus, @@ -18,7 +19,6 @@ use crate::io::{ }; use crate::logger::{log_debug, log_error, log_info, Logger}; -use lightning::chain::chaininterface::ConfirmationTarget; use lightning::events::bump_transaction::BumpTransactionEvent; use lightning::events::{ClosureReason, PaymentPurpose}; use lightning::events::{Event as LdkEvent, PaymentFailureReason}; @@ -398,7 +398,7 @@ where } => { // Construct the raw transaction with the output that is paid the amount of the // channel. - let confirmation_target = ConfirmationTarget::NonAnchorChannelFee; + let confirmation_target = ConfirmationTarget::ChannelFunding; // We set nLockTime to the current height to discourage fee sniping. let cur_height = self.channel_manager.current_best_block().height; diff --git a/src/fee_estimator.rs b/src/fee_estimator.rs index 329cc6e42..b023ae964 100644 --- a/src/fee_estimator.rs +++ b/src/fee_estimator.rs @@ -2,9 +2,9 @@ use crate::config::FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS; use crate::logger::{log_error, log_trace, Logger}; use crate::{Config, Error}; -use lightning::chain::chaininterface::{ - ConfirmationTarget, FeeEstimator, FEERATE_FLOOR_SATS_PER_KW, -}; +use lightning::chain::chaininterface::ConfirmationTarget as LdkConfirmationTarget; +use lightning::chain::chaininterface::FeeEstimator as LdkFeeEstimator; +use lightning::chain::chaininterface::FEERATE_FLOOR_SATS_PER_KW; use bdk::FeeRate; use esplora_client::AsyncClient as EsploraClient; @@ -17,6 +17,26 @@ use std::ops::Deref; use std::sync::{Arc, RwLock}; use std::time::Duration; +#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)] +pub(crate) enum ConfirmationTarget { + /// The default target for onchain payments. + OnchainPayment, + /// The target used for funding transactions. + ChannelFunding, + /// Targets used by LDK. + Lightning(LdkConfirmationTarget), +} + +pub(crate) trait FeeEstimator { + fn estimate_fee_rate(&self, confirmation_target: ConfirmationTarget) -> FeeRate; +} + +impl From for ConfirmationTarget { + fn from(value: LdkConfirmationTarget) -> Self { + Self::Lightning(value) + } +} + pub(crate) struct OnchainFeeEstimator where L::Target: Logger, @@ -61,23 +81,30 @@ where } let confirmation_targets = vec![ - ConfirmationTarget::OnChainSweep, - ConfirmationTarget::MinAllowedAnchorChannelRemoteFee, - ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee, - ConfirmationTarget::AnchorChannelFee, - ConfirmationTarget::NonAnchorChannelFee, - ConfirmationTarget::ChannelCloseMinimum, - ConfirmationTarget::OutputSpendingFee, + ConfirmationTarget::OnchainPayment, + ConfirmationTarget::ChannelFunding, + LdkConfirmationTarget::OnChainSweep.into(), + LdkConfirmationTarget::MinAllowedAnchorChannelRemoteFee.into(), + LdkConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee.into(), + LdkConfirmationTarget::AnchorChannelFee.into(), + LdkConfirmationTarget::NonAnchorChannelFee.into(), + LdkConfirmationTarget::ChannelCloseMinimum.into(), + LdkConfirmationTarget::OutputSpendingFee.into(), ]; + for target in confirmation_targets { let num_blocks = match target { - ConfirmationTarget::OnChainSweep => 6, - ConfirmationTarget::MinAllowedAnchorChannelRemoteFee => 1008, - ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee => 144, - ConfirmationTarget::AnchorChannelFee => 1008, - ConfirmationTarget::NonAnchorChannelFee => 12, - ConfirmationTarget::ChannelCloseMinimum => 144, - ConfirmationTarget::OutputSpendingFee => 12, + ConfirmationTarget::OnchainPayment => 6, + ConfirmationTarget::ChannelFunding => 12, + ConfirmationTarget::Lightning(ldk_target) => match ldk_target { + LdkConfirmationTarget::OnChainSweep => 6, + LdkConfirmationTarget::MinAllowedAnchorChannelRemoteFee => 1008, + LdkConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee => 144, + LdkConfirmationTarget::AnchorChannelFee => 1008, + LdkConfirmationTarget::NonAnchorChannelFee => 12, + LdkConfirmationTarget::ChannelCloseMinimum => 144, + LdkConfirmationTarget::OutputSpendingFee => 12, + }, }; let converted_estimates = @@ -96,7 +123,9 @@ where // LDK 0.0.118 introduced changes to the `ConfirmationTarget` semantics that // require some post-estimation adjustments to the fee rates, which we do here. let adjusted_fee_rate = match target { - ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee => { + ConfirmationTarget::Lightning( + LdkConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee, + ) => { let slightly_less_than_background = fee_rate.fee_wu(Weight::from_wu(1000)) - 250; FeeRate::from_sat_per_kwu(slightly_less_than_background as f32) @@ -115,33 +144,53 @@ where } Ok(()) } +} - pub(crate) fn estimate_fee_rate(&self, confirmation_target: ConfirmationTarget) -> FeeRate { +impl FeeEstimator for OnchainFeeEstimator +where + L::Target: Logger, +{ + fn estimate_fee_rate(&self, confirmation_target: ConfirmationTarget) -> FeeRate { let locked_fee_rate_cache = self.fee_rate_cache.read().unwrap(); let fallback_sats_kwu = match confirmation_target { - ConfirmationTarget::OnChainSweep => 5000, - ConfirmationTarget::MinAllowedAnchorChannelRemoteFee => FEERATE_FLOOR_SATS_PER_KW, - ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee => FEERATE_FLOOR_SATS_PER_KW, - ConfirmationTarget::AnchorChannelFee => 500, - ConfirmationTarget::NonAnchorChannelFee => 1000, - ConfirmationTarget::ChannelCloseMinimum => 500, - ConfirmationTarget::OutputSpendingFee => 1000, + ConfirmationTarget::OnchainPayment => 5000, + ConfirmationTarget::ChannelFunding => 1000, + ConfirmationTarget::Lightning(ldk_target) => match ldk_target { + LdkConfirmationTarget::OnChainSweep => 5000, + LdkConfirmationTarget::MinAllowedAnchorChannelRemoteFee => { + FEERATE_FLOOR_SATS_PER_KW + }, + LdkConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee => { + FEERATE_FLOOR_SATS_PER_KW + }, + LdkConfirmationTarget::AnchorChannelFee => 500, + LdkConfirmationTarget::NonAnchorChannelFee => 1000, + LdkConfirmationTarget::ChannelCloseMinimum => 500, + LdkConfirmationTarget::OutputSpendingFee => 1000, + }, }; // We'll fall back on this, if we really don't have any other information. let fallback_rate = FeeRate::from_sat_per_kwu(fallback_sats_kwu as f32); - *locked_fee_rate_cache.get(&confirmation_target).unwrap_or(&fallback_rate) + let estimate = *locked_fee_rate_cache.get(&confirmation_target).unwrap_or(&fallback_rate); + + // Currently we assume every transaction needs to at least be relayable, which is why we + // enforce a lower bound of `FEERATE_FLOOR_SATS_PER_KW`. + let weight_units = Weight::from_wu(1000); + FeeRate::from_wu( + estimate.fee_wu(weight_units).max(FEERATE_FLOOR_SATS_PER_KW as u64), + weight_units, + ) } } -impl FeeEstimator for OnchainFeeEstimator +impl LdkFeeEstimator for OnchainFeeEstimator where L::Target: Logger, { - fn get_est_sat_per_1000_weight(&self, confirmation_target: ConfirmationTarget) -> u32 { - (self.estimate_fee_rate(confirmation_target).fee_wu(Weight::from_wu(1000)) as u32) - .max(FEERATE_FLOOR_SATS_PER_KW) + fn get_est_sat_per_1000_weight(&self, confirmation_target: LdkConfirmationTarget) -> u32 { + self.estimate_fee_rate(confirmation_target.into()).fee_wu(Weight::from_wu(1000)) as u32 } } diff --git a/src/wallet.rs b/src/wallet.rs index 7a77a4ece..996ec57da 100644 --- a/src/wallet.rs +++ b/src/wallet.rs @@ -1,9 +1,10 @@ use crate::logger::{log_error, log_info, log_trace, Logger}; use crate::config::BDK_WALLET_SYNC_TIMEOUT_SECS; +use crate::fee_estimator::{ConfirmationTarget, FeeEstimator}; use crate::Error; -use lightning::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator}; +use lightning::chain::chaininterface::BroadcasterInterface; use lightning::events::bump_transaction::{Utxo, WalletSource}; use lightning::ln::msgs::{DecodeError, UnsignedGossipMessage}; @@ -18,8 +19,7 @@ use lightning::util::message_signing; use bdk::blockchain::EsploraBlockchain; use bdk::database::BatchDatabase; use bdk::wallet::AddressIndex; -use bdk::{Balance, FeeRate}; -use bdk::{SignOptions, SyncOptions}; +use bdk::{Balance, SignOptions, SyncOptions}; use bitcoin::address::{Payload, WitnessVersion}; use bitcoin::bech32::u5; @@ -153,9 +153,7 @@ where &self, output_script: ScriptBuf, value_sats: u64, confirmation_target: ConfirmationTarget, locktime: LockTime, ) -> Result { - let fee_rate = FeeRate::from_sat_per_kwu( - self.fee_estimator.get_est_sat_per_1000_weight(confirmation_target) as f32, - ); + let fee_rate = self.fee_estimator.estimate_fee_rate(confirmation_target); let locked_wallet = self.inner.lock().unwrap(); let mut tx_builder = locked_wallet.build_tx(); @@ -240,10 +238,8 @@ where pub(crate) fn send_to_address( &self, address: &bitcoin::Address, amount_msat_or_drain: Option, ) -> Result { - let confirmation_target = ConfirmationTarget::OutputSpendingFee; - let fee_rate = FeeRate::from_sat_per_kwu( - self.fee_estimator.get_est_sat_per_1000_weight(confirmation_target) as f32, - ); + let confirmation_target = ConfirmationTarget::OnchainPayment; + let fee_rate = self.fee_estimator.estimate_fee_rate(confirmation_target); let tx = { let locked_wallet = self.inner.lock().unwrap(); From e805403b108aed31a0cafa4238ad32ade6d002b7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Sep 2024 22:30:17 +0000 Subject: [PATCH 033/127] Bump actions/download-artifact from 3 to 4.1.7 in /.github/workflows Bumps [actions/download-artifact](https://github.com/actions/download-artifact) from 3 to 4.1.7. - [Release notes](https://github.com/actions/download-artifact/releases) - [Commits](https://github.com/actions/download-artifact/compare/v3...v4.1.7) --- updated-dependencies: - dependency-name: actions/download-artifact dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- .github/workflows/publish-jvm.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/publish-jvm.yml b/.github/workflows/publish-jvm.yml index e9b0a3594..0ae40e0a1 100644 --- a/.github/workflows/publish-jvm.yml +++ b/.github/workflows/publish-jvm.yml @@ -67,7 +67,7 @@ jobs: ./scripts/uniffi_bindgen_generate_kotlin.sh - name: Download macOS native libraries from previous job - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4.1.7 id: download with: # download the artifact created in the prior job (named "artifact") From 118301949650409de73b1902ae62c0fc803f9392 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 5 Sep 2024 10:04:38 +0200 Subject: [PATCH 034/127] Pin `tokio-util` in CI to fix MSRV --- .github/workflows/rust.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index cce80d00d..7cd08ad79 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -50,6 +50,7 @@ jobs: cargo update -p regex --precise "1.9.6" --verbose # regex 1.10.0 requires rustc 1.65.0 cargo update -p home --precise "0.5.5" --verbose # home v0.5.9 requires rustc 1.70 or newer cargo update -p tokio --precise "1.38.1" --verbose # tokio v1.39.0 requires rustc 1.70 or newer + cargo update -p tokio-util --precise "0.7.11" --verbose # tokio-util v0.7.12 requires rustc 1.70 or newer - name: Set RUSTFLAGS to deny warnings if: "matrix.toolchain == 'stable'" run: echo "RUSTFLAGS=-D warnings" >> "$GITHUB_ENV" From 7eca7fbc66c7430ac488ca80b69db2e1dea40f34 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 5 Sep 2024 09:50:12 +0200 Subject: [PATCH 035/127] Add copyright notices where they were missing Previously, only a few files included coypright headers. Here, we fix the omission and add the headers to all files where they were absent. --- bindings/uniffi-bindgen/src/main.rs | 7 +++++++ build.rs | 7 +++++++ src/balance.rs | 7 +++++++ src/builder.rs | 7 +++++++ src/config.rs | 7 +++++++ src/connection.rs | 7 +++++++ src/error.rs | 7 +++++++ src/event.rs | 7 +++++++ src/fee_estimator.rs | 7 +++++++ src/gossip.rs | 7 +++++++ src/graph.rs | 7 +++++++ src/hex_utils.rs | 7 +++++++ src/io/mod.rs | 7 +++++++ src/io/sqlite_store/migrations.rs | 7 +++++++ src/io/sqlite_store/mod.rs | 7 +++++++ src/io/test_utils.rs | 7 +++++++ src/io/utils.rs | 7 +++++++ src/io/vss_store.rs | 7 +++++++ src/lib.rs | 12 +++++------- src/liquidity.rs | 7 +++++++ src/logger.rs | 7 +++++++ src/message_handler.rs | 7 +++++++ src/payment/bolt11.rs | 7 +++++++ src/payment/bolt12.rs | 7 +++++++ src/payment/mod.rs | 7 +++++++ src/payment/onchain.rs | 7 +++++++ src/payment/spontaneous.rs | 7 +++++++ src/payment/store.rs | 7 +++++++ src/payment/unified_qr.rs | 12 +++++------- src/peer_store.rs | 7 +++++++ src/sweep.rs | 7 +++++++ src/tx_broadcaster.rs | 7 +++++++ src/types.rs | 7 +++++++ src/uniffi_types.rs | 7 +++++++ src/wallet.rs | 7 +++++++ tests/common/mod.rs | 7 +++++++ tests/integration_tests_cln.rs | 7 +++++++ tests/integration_tests_rust.rs | 7 +++++++ tests/integration_tests_vss.rs | 7 +++++++ 39 files changed, 269 insertions(+), 14 deletions(-) diff --git a/bindings/uniffi-bindgen/src/main.rs b/bindings/uniffi-bindgen/src/main.rs index 2aea96784..a71a3e914 100644 --- a/bindings/uniffi-bindgen/src/main.rs +++ b/bindings/uniffi-bindgen/src/main.rs @@ -1,3 +1,10 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + fn main() { uniffi::uniffi_bindgen_main() } diff --git a/build.rs b/build.rs index 087855111..f011148e7 100644 --- a/build.rs +++ b/build.rs @@ -1,3 +1,10 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + fn main() { #[cfg(feature = "uniffi")] uniffi::generate_scaffolding("bindings/ldk_node.udl").unwrap(); diff --git a/src/balance.rs b/src/balance.rs index f1c95dcbe..1f061cded 100644 --- a/src/balance.rs +++ b/src/balance.rs @@ -1,3 +1,10 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + use crate::sweep::value_satoshis_from_descriptor; use lightning::chain::channelmonitor::Balance as LdkBalance; diff --git a/src/builder.rs b/src/builder.rs index a2a93aa79..fc9f839b0 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -1,3 +1,10 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + use crate::config::{ default_user_config, Config, BDK_CLIENT_CONCURRENCY, BDK_CLIENT_STOP_GAP, DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS, DEFAULT_ESPLORA_SERVER_URL, WALLET_KEYS_SEED_LEN, diff --git a/src/config.rs b/src/config.rs index f0c2c856b..fac25b562 100644 --- a/src/config.rs +++ b/src/config.rs @@ -1,3 +1,10 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + use crate::payment::SendingParameters; use lightning::ln::msgs::SocketAddress; diff --git a/src/connection.rs b/src/connection.rs index 9d956d6be..5f665f77e 100644 --- a/src/connection.rs +++ b/src/connection.rs @@ -1,3 +1,10 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + use crate::logger::{log_error, log_info, Logger}; use crate::types::PeerManager; use crate::Error; diff --git a/src/error.rs b/src/error.rs index deaf6db31..660c2036e 100644 --- a/src/error.rs +++ b/src/error.rs @@ -1,3 +1,10 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + use std::fmt; #[derive(Copy, Clone, Debug, PartialEq, Eq)] diff --git a/src/event.rs b/src/event.rs index d76f0b05e..1f4b2e117 100644 --- a/src/event.rs +++ b/src/event.rs @@ -1,3 +1,10 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + use crate::types::{DynStore, Sweeper, Wallet}; use crate::{ diff --git a/src/fee_estimator.rs b/src/fee_estimator.rs index b023ae964..857106aa3 100644 --- a/src/fee_estimator.rs +++ b/src/fee_estimator.rs @@ -1,3 +1,10 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + use crate::config::FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS; use crate::logger::{log_error, log_trace, Logger}; use crate::{Config, Error}; diff --git a/src/gossip.rs b/src/gossip.rs index 1241b0cdc..450b5b5ee 100644 --- a/src/gossip.rs +++ b/src/gossip.rs @@ -1,3 +1,10 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + use crate::config::RGS_SYNC_TIMEOUT_SECS; use crate::logger::{log_trace, FilesystemLogger, Logger}; use crate::types::{GossipSync, Graph, P2PGossipSync, RapidGossipSync}; diff --git a/src/graph.rs b/src/graph.rs index 79a21853d..520be99db 100644 --- a/src/graph.rs +++ b/src/graph.rs @@ -1,3 +1,10 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + //! Objects for querying the network graph. use crate::types::Graph; diff --git a/src/hex_utils.rs b/src/hex_utils.rs index 1b50c5647..d56c6fd99 100644 --- a/src/hex_utils.rs +++ b/src/hex_utils.rs @@ -1,3 +1,10 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + use std::fmt::Write; #[cfg(feature = "uniffi")] diff --git a/src/io/mod.rs b/src/io/mod.rs index d545f6b93..c65ab1d3b 100644 --- a/src/io/mod.rs +++ b/src/io/mod.rs @@ -1,3 +1,10 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + //! Objects and traits for data persistence. pub mod sqlite_store; diff --git a/src/io/sqlite_store/migrations.rs b/src/io/sqlite_store/migrations.rs index 6d108185a..0486b8a4f 100644 --- a/src/io/sqlite_store/migrations.rs +++ b/src/io/sqlite_store/migrations.rs @@ -1,3 +1,10 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + use rusqlite::Connection; use lightning::io; diff --git a/src/io/sqlite_store/mod.rs b/src/io/sqlite_store/mod.rs index 607105509..c1eac84b4 100644 --- a/src/io/sqlite_store/mod.rs +++ b/src/io/sqlite_store/mod.rs @@ -1,3 +1,10 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + //! Objects related to [`SqliteStore`] live here. use crate::io::utils::check_namespace_key_validity; diff --git a/src/io/test_utils.rs b/src/io/test_utils.rs index cf3da452d..c4610b4f5 100644 --- a/src/io/test_utils.rs +++ b/src/io/test_utils.rs @@ -1,3 +1,10 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + use lightning::ln::functional_test_utils::{ connect_block, create_announced_chan_between_nodes, create_chanmon_cfgs, create_dummy_block, create_network, create_node_cfgs, create_node_chanmgrs, send_payment, diff --git a/src/io/utils.rs b/src/io/utils.rs index 77cc56f55..29484273c 100644 --- a/src/io/utils.rs +++ b/src/io/utils.rs @@ -1,3 +1,10 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + use super::*; use crate::config::WALLET_KEYS_SEED_LEN; diff --git a/src/io/vss_store.rs b/src/io/vss_store.rs index 426af1fbb..ec8f04b64 100644 --- a/src/io/vss_store.rs +++ b/src/io/vss_store.rs @@ -1,3 +1,10 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + use io::Error; use std::io; use std::io::ErrorKind; diff --git a/src/lib.rs b/src/lib.rs index f777b07d1..0148cf8d4 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,11 +1,9 @@ -// This file is Copyright its original authors, visible in version contror -// history. +// This file is Copyright its original authors, visible in version control history. // -// This file is licensed under the Apache License, Version 2.0 or the MIT license -// , at your option. -// You may not use this file except in accordance with one or both of these -// licenses. +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. #![crate_name = "ldk_node"] diff --git a/src/liquidity.rs b/src/liquidity.rs index 00e9f5717..1dfb5453a 100644 --- a/src/liquidity.rs +++ b/src/liquidity.rs @@ -1,3 +1,10 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + use crate::logger::{log_debug, log_error, log_info, Logger}; use crate::types::{ChannelManager, KeysManager, LiquidityManager, PeerManager}; use crate::{Config, Error}; diff --git a/src/logger.rs b/src/logger.rs index 5f8627e07..2be20a165 100644 --- a/src/logger.rs +++ b/src/logger.rs @@ -1,3 +1,10 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + pub(crate) use lightning::util::logger::Logger; pub(crate) use lightning::{log_bytes, log_debug, log_error, log_info, log_trace}; diff --git a/src/message_handler.rs b/src/message_handler.rs index 89d67d846..18dfa8637 100644 --- a/src/message_handler.rs +++ b/src/message_handler.rs @@ -1,3 +1,10 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + use crate::liquidity::LiquiditySource; use lightning::ln::features::{InitFeatures, NodeFeatures}; diff --git a/src/payment/bolt11.rs b/src/payment/bolt11.rs index 07823a17b..b7f72355b 100644 --- a/src/payment/bolt11.rs +++ b/src/payment/bolt11.rs @@ -1,3 +1,10 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + //! Holds a payment handler allowing to create and pay [BOLT 11] invoices. //! //! [BOLT 11]: https://github.com/lightning/bolts/blob/master/11-payment-encoding.md diff --git a/src/payment/bolt12.rs b/src/payment/bolt12.rs index 577dc92ae..9ec7bde34 100644 --- a/src/payment/bolt12.rs +++ b/src/payment/bolt12.rs @@ -1,3 +1,10 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + //! Holds a payment handler allowing to create and pay [BOLT 12] offers and refunds. //! //! [BOLT 12]: https://github.com/lightning/bolts/blob/master/12-offer-encoding.md diff --git a/src/payment/mod.rs b/src/payment/mod.rs index f118f3fc8..5c99cfcf8 100644 --- a/src/payment/mod.rs +++ b/src/payment/mod.rs @@ -1,3 +1,10 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + //! Objects for different types of payments. mod bolt11; diff --git a/src/payment/onchain.rs b/src/payment/onchain.rs index 5c1365de3..a3cc0d2f2 100644 --- a/src/payment/onchain.rs +++ b/src/payment/onchain.rs @@ -1,3 +1,10 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + //! Holds a payment handler allowing to send and receive on-chain payments. use crate::config::Config; diff --git a/src/payment/spontaneous.rs b/src/payment/spontaneous.rs index b7b8dcc03..3be244bb5 100644 --- a/src/payment/spontaneous.rs +++ b/src/payment/spontaneous.rs @@ -1,3 +1,10 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + //! Holds a payment handler allowing to send spontaneous ("keysend") payments. use crate::config::{Config, LDK_PAYMENT_RETRY_TIMEOUT}; diff --git a/src/payment/store.rs b/src/payment/store.rs index 3c35043ce..0cea18002 100644 --- a/src/payment/store.rs +++ b/src/payment/store.rs @@ -1,3 +1,10 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + use crate::hex_utils; use crate::io::{ PAYMENT_INFO_PERSISTENCE_PRIMARY_NAMESPACE, PAYMENT_INFO_PERSISTENCE_SECONDARY_NAMESPACE, diff --git a/src/payment/unified_qr.rs b/src/payment/unified_qr.rs index b93610115..66488e232 100644 --- a/src/payment/unified_qr.rs +++ b/src/payment/unified_qr.rs @@ -1,11 +1,9 @@ -// This file is Copyright its original authors, visible in version control -// history. +// This file is Copyright its original authors, visible in version control history. // -// This file is licensed under the Apache License, Version 2.0 or the MIT license -// , at your option. -// You may not use this file except in accordance with one or both of these -// licenses. +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. //! Holds a payment handler allowing to create [BIP 21] URIs with an on-chain, [BOLT 11], and [BOLT 12] payment //! options. diff --git a/src/peer_store.rs b/src/peer_store.rs index 21bd50872..d4d6bbb97 100644 --- a/src/peer_store.rs +++ b/src/peer_store.rs @@ -1,3 +1,10 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + use crate::io::{ PEER_INFO_PERSISTENCE_KEY, PEER_INFO_PERSISTENCE_PRIMARY_NAMESPACE, PEER_INFO_PERSISTENCE_SECONDARY_NAMESPACE, diff --git a/src/sweep.rs b/src/sweep.rs index 1c772d4e9..5c1d62a20 100644 --- a/src/sweep.rs +++ b/src/sweep.rs @@ -1,3 +1,10 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + //! The output sweeper used to live here before we upstreamed it to `rust-lightning` and migrated //! to the upstreamed version with LDK Node v0.3.0 (May 2024). We should drop this module entirely //! once sufficient time has passed for us to be confident any users completed the migration. diff --git a/src/tx_broadcaster.rs b/src/tx_broadcaster.rs index 4492bcfc6..88415ba46 100644 --- a/src/tx_broadcaster.rs +++ b/src/tx_broadcaster.rs @@ -1,3 +1,10 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + use crate::config::TX_BROADCAST_TIMEOUT_SECS; use crate::logger::{log_bytes, log_error, log_trace, Logger}; diff --git a/src/types.rs b/src/types.rs index abc015ce4..591b73b4d 100644 --- a/src/types.rs +++ b/src/types.rs @@ -1,3 +1,10 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + use crate::logger::FilesystemLogger; use crate::message_handler::NodeCustomMessageHandler; diff --git a/src/uniffi_types.rs b/src/uniffi_types.rs index 566ef8d72..17e5713d9 100644 --- a/src/uniffi_types.rs +++ b/src/uniffi_types.rs @@ -1,3 +1,10 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + // Importing these items ensures they are accessible in the uniffi bindings // without introducing unused import warnings in lib.rs. // diff --git a/src/wallet.rs b/src/wallet.rs index 996ec57da..6da08715c 100644 --- a/src/wallet.rs +++ b/src/wallet.rs @@ -1,3 +1,10 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + use crate::logger::{log_error, log_info, log_trace, Logger}; use crate::config::BDK_WALLET_SYNC_TIMEOUT_SECS; diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 6c4dbc1d1..09c17dcf2 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -1,3 +1,10 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + #![cfg(any(test, cln_test, vss_test))] #![allow(dead_code)] diff --git a/tests/integration_tests_cln.rs b/tests/integration_tests_cln.rs index 95d8f1136..bcb84833f 100644 --- a/tests/integration_tests_cln.rs +++ b/tests/integration_tests_cln.rs @@ -1,3 +1,10 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + #![cfg(cln_test)] mod common; diff --git a/tests/integration_tests_rust.rs b/tests/integration_tests_rust.rs index b3788f9d4..6b5b405dd 100644 --- a/tests/integration_tests_rust.rs +++ b/tests/integration_tests_rust.rs @@ -1,3 +1,10 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + mod common; use common::{ diff --git a/tests/integration_tests_vss.rs b/tests/integration_tests_vss.rs index 2a57ccffc..c572fbcd8 100644 --- a/tests/integration_tests_vss.rs +++ b/tests/integration_tests_vss.rs @@ -1,3 +1,10 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + #![cfg(vss_test)] mod common; From 6be51c6218e7111be4822cab840df11f2518ede4 Mon Sep 17 00:00:00 2001 From: G8XSU <3442979+G8XSU@users.noreply.github.com> Date: Fri, 6 Sep 2024 22:01:10 -0700 Subject: [PATCH 036/127] Vss-client upgrade to 0.3.x --- Cargo.toml | 2 +- src/io/vss_store.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 9d350e18c..d1fa50fd2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -71,7 +71,7 @@ libc = "0.2" uniffi = { version = "0.26.0", features = ["build"], optional = true } [target.'cfg(vss)'.dependencies] -vss-client = "0.2" +vss-client = "0.3" prost = { version = "0.11.6", default-features = false} [target.'cfg(windows)'.dependencies] diff --git a/src/io/vss_store.rs b/src/io/vss_store.rs index ec8f04b64..ba09b5988 100644 --- a/src/io/vss_store.rs +++ b/src/io/vss_store.rs @@ -61,7 +61,7 @@ impl VssStore { ) }) as _); - let client = VssClient::new(&base_url, retry_policy); + let client = VssClient::new(base_url, retry_policy); Self { client, store_id, runtime, storable_builder } } From 92612dce04b897969ae3942df8d763d7eb60b4db Mon Sep 17 00:00:00 2001 From: G8XSU <3442979+G8XSU@users.noreply.github.com> Date: Fri, 6 Sep 2024 22:03:40 -0700 Subject: [PATCH 037/127] Fix vss war name for vss-server binary in CI. --- .github/workflows/vss-integration.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/vss-integration.yml b/.github/workflows/vss-integration.yml index df739abce..44b7f445d 100644 --- a/.github/workflows/vss-integration.yml +++ b/.github/workflows/vss-integration.yml @@ -68,7 +68,7 @@ jobs: ./gradlew --version ./gradlew build - docker cp app/build/libs/app-1.0.war tomcat:/usr/local/tomcat/webapps/vss.war + docker cp app/build/libs/vss-1.0.war tomcat:/usr/local/tomcat/webapps/vss.war cd ../ - name: Run VSS Integration tests against vss-instance. run: | From 7c7b98b9560e40ddb2f8a78ff40ddffa09e93890 Mon Sep 17 00:00:00 2001 From: G8XSU <3442979+G8XSU@users.noreply.github.com> Date: Thu, 12 Sep 2024 10:53:47 -0700 Subject: [PATCH 038/127] Fix lightning-liquidity to 0.1.0-alpha.4 until upgrade PR. --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index d1fa50fd2..39f3b947d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -35,7 +35,7 @@ lightning-persister = { version = "0.0.123" } lightning-background-processor = { version = "0.0.123", features = ["futures"] } lightning-rapid-gossip-sync = { version = "0.0.123" } lightning-transaction-sync = { version = "0.0.123", features = ["esplora-async-https", "time"] } -lightning-liquidity = { version = "0.1.0-alpha.4", features = ["std"] } +lightning-liquidity = { version = "=0.1.0-alpha.4", features = ["std"] } #lightning = { git = "https://github.com/lightningdevkit/rust-lightning", branch="main", features = ["std"] } #lightning-invoice = { git = "https://github.com/lightningdevkit/rust-lightning", branch="main" } From 12dfc1a250d6ed19d98cefdbac57624bea2c9d7f Mon Sep 17 00:00:00 2001 From: Enigbe Ochekliye Date: Mon, 22 Jul 2024 21:20:36 +0100 Subject: [PATCH 039/127] feat: sanitize and set node alias What this commit does: Implements a method `set_node_alias` on NodeBuilder to allow callers customize/set the value of the node alias. This method sanitizes the user-provided alias by ensuring the following: + Node alias is UTF-8-encoded String + Node alias is non-empty + Node alias cannot exceed 32 bytes + Node alias is only valid up to the first null byte. Every character after the null byte is discraded Additionally, a test case is provided to cover sanitizing empty node alias, as well as an alias with emojis (copied and modified from rust-lightning) and a sandwiched null byte. --- src/builder.rs | 83 +++++++++++++++++++++++++++++++++++++++++++++++++- src/config.rs | 6 ++++ 2 files changed, 88 insertions(+), 1 deletion(-) diff --git a/src/builder.rs b/src/builder.rs index fc9f839b0..f6eeb7cd3 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -109,7 +109,7 @@ impl Default for LiquiditySourceConfig { /// An error encountered during building a [`Node`]. /// /// [`Node`]: crate::Node -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] pub enum BuildError { /// The given seed bytes are invalid, e.g., have invalid length. InvalidSeedBytes, @@ -139,6 +139,8 @@ pub enum BuildError { WalletSetupFailed, /// We failed to setup the logger. LoggerSetupFailed, + /// The provided alias is invalid + InvalidNodeAlias(String), } impl fmt::Display for BuildError { @@ -159,6 +161,9 @@ impl fmt::Display for BuildError { Self::KVStoreSetupFailed => write!(f, "Failed to setup KVStore."), Self::WalletSetupFailed => write!(f, "Failed to setup onchain wallet."), Self::LoggerSetupFailed => write!(f, "Failed to setup the logger."), + Self::InvalidNodeAlias(ref reason) => { + write!(f, "Given node alias is invalid: {}", reason) + }, } } } @@ -309,6 +314,17 @@ impl NodeBuilder { self } + /// Sets the alias the [`Node`] will use in its announcement. The provided + /// alias must be a valid UTF-8 string. + pub fn set_node_alias>( + &mut self, node_alias: T, + ) -> Result<&mut Self, BuildError> { + let node_alias = sanitize_alias(node_alias).map_err(|e| e)?; + + self.config.node_alias = Some(node_alias); + Ok(self) + } + /// Builds a [`Node`] instance with a [`SqliteStore`] backend and according to the options /// previously configured. pub fn build(&self) -> Result { @@ -1050,3 +1066,68 @@ fn seed_bytes_from_config( }, } } + +/// Sanitize the user-provided node alias to ensure that it is a valid protocol-specified UTF-8 string. +fn sanitize_alias>(node_alias: T) -> Result { + // Alias is convertible into UTF-8 encoded string + let node_alias: String = node_alias.into(); + let alias = node_alias.trim(); + + // Alias is non-empty + if alias.is_empty() { + return Err(BuildError::InvalidNodeAlias("Node alias cannot be empty.".to_string())); + } + + // Alias valid up to first null byte + let first_null = alias.as_bytes().iter().position(|b| *b == 0).unwrap_or(alias.len()); + let actual_alias = alias.split_at(first_null).0; + + // Alias must be 32-bytes long or less + if actual_alias.as_bytes().len() > 32 { + return Err(BuildError::InvalidNodeAlias("Node alias cannot exceed 32 bytes.".to_string())); + } + + Ok(actual_alias.to_string()) +} + +#[cfg(test)] +mod tests { + use crate::{BuildError, Node}; + + use super::NodeBuilder; + + fn create_node_with_alias>(alias: T) -> Result { + NodeBuilder::new().set_node_alias(&alias.into())?.build() + } + + #[test] + fn empty_node_alias() { + // Empty node alias + let alias = ""; + let node = create_node_with_alias(alias); + assert_eq!( + node.err().unwrap(), + BuildError::InvalidNodeAlias("Node alias cannot be empty.".to_string()) + ); + } + + #[test] + fn node_alias_with_sandwiched_null() { + // Alias with emojis + let expected_alias = "I\u{1F496}LDK-Node!"; + let user_provided_alias = "I\u{1F496}LDK-Node!\0\u{26A1}"; + let node = create_node_with_alias(user_provided_alias).unwrap(); + + assert_eq!(expected_alias, node.config().node_alias.unwrap()); + } + + #[test] + fn node_alias_longer_than_32_bytes() { + let alias = "This is a string longer than thirty-two bytes!"; // 46 bytes + let node = create_node_with_alias(alias); + assert_eq!( + node.err().unwrap(), + BuildError::InvalidNodeAlias("Node alias cannot exceed 32 bytes.".to_string()) + ); + } +} diff --git a/src/config.rs b/src/config.rs index fac25b562..69ff49a38 100644 --- a/src/config.rs +++ b/src/config.rs @@ -163,6 +163,11 @@ pub struct Config { /// **Note:** If unset, default parameters will be used, and you will be able to override the /// parameters on a per-payment basis in the corresponding method calls. pub sending_parameters: Option, + /// The node alias to be used in announcements. + /// + /// **Note**: This is required if, alongside a valid public socket address, node announcements + /// are to be broadcast. + pub node_alias: Option, } impl Default for Config { @@ -180,6 +185,7 @@ impl Default for Config { log_level: DEFAULT_LOG_LEVEL, anchor_channels_config: Some(AnchorChannelsConfig::default()), sending_parameters: None, + node_alias: None, } } } From e54bfe05c543405d094651f3054d6f81be7a4726 Mon Sep 17 00:00:00 2001 From: Enigbe Ochekliye Date: Thu, 25 Jul 2024 18:30:30 +0100 Subject: [PATCH 040/127] feat: broadcast node announcement with set alias What this commit does: Broadcasts node announcement with the user-provided alias, if set, else, uses the default [0u8;32]. Additionally, adds a random node alias generating function for use in the generation of random configuration. --- src/lib.rs | 12 +++++++++++- tests/common/mod.rs | 13 +++++++++++++ 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/src/lib.rs b/src/lib.rs index 0148cf8d4..f432fb7dc 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -601,6 +601,7 @@ impl Node { let bcast_logger = Arc::clone(&self.logger); let bcast_ann_timestamp = Arc::clone(&self.latest_node_announcement_broadcast_timestamp); let mut stop_bcast = self.stop_sender.subscribe(); + let node_alias = self.config().node_alias; runtime.spawn(async move { // We check every 30 secs whether our last broadcast is NODE_ANN_BCAST_INTERVAL away. #[cfg(not(test))] @@ -650,7 +651,16 @@ impl Node { continue; } - bcast_pm.broadcast_node_announcement([0; 3], [0; 32], addresses); + // Extract alias if set, else select the default + let alias = if let Some(ref alias) = node_alias { + let mut buf = [0_u8; 32]; + buf[..alias.as_bytes().len()].copy_from_slice(alias.as_bytes()); + buf + } else { + [0; 32] + }; + + bcast_pm.broadcast_node_announcement([0; 3], alias, addresses); let unix_time_secs_opt = SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 09c17dcf2..9b5d01ae3 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -200,6 +200,15 @@ pub(crate) fn random_listening_addresses() -> Vec { listening_addresses } +pub(crate) fn random_node_alias() -> Option { + let mut rng = thread_rng(); + let ranged_val = rng.gen_range(0..10); + match ranged_val { + 0 => None, + val => Some(format!("ldk-node-{}", val)), + } +} + pub(crate) fn random_config(anchor_channels: bool) -> Config { let mut config = Config::default(); @@ -220,6 +229,10 @@ pub(crate) fn random_config(anchor_channels: bool) -> Config { println!("Setting random LDK listening addresses: {:?}", rand_listening_addresses); config.listening_addresses = Some(rand_listening_addresses); + let alias = random_node_alias(); + println!("Setting random LDK node alias: {:?}", alias); + config.node_alias = alias; + config.log_level = LogLevel::Gossip; config From 5bd4a88b9c6c9135a7faab801e639298e17a3ac4 Mon Sep 17 00:00:00 2001 From: Enigbe Ochekliye Date: Tue, 6 Aug 2024 13:10:44 +0100 Subject: [PATCH 041/127] fix: correct node announcement, simplify setting alias, clean alias sanitization - Skips broadcasting node announcement in the event that either the node alias or the listening addresses are not set. - Aligns the InvalidNodeAlias error variant with the others to make it work with language bindings. - Simplifies the method to set the node alias. - Cleans up the alias sanitizing function to ensure that protocol- compliant aliases (in this case, empty strings) are not flagged. Additionally, removes the check for sandwiched null byte. - Finally, adds the relevant update to struct and interface to reflect changes in Rust types. --- bindings/ldk_node.udl | 4 ++++ src/builder.rs | 52 ++++++++++++++++--------------------------- src/lib.rs | 20 +++++++---------- 3 files changed, 31 insertions(+), 45 deletions(-) diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index 1c9497264..55725c6a2 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -16,6 +16,7 @@ dictionary Config { LogLevel log_level; AnchorChannelsConfig? anchor_channels_config; SendingParameters? sending_parameters; + string? node_alias; }; dictionary AnchorChannelsConfig { @@ -40,6 +41,8 @@ interface Builder { [Throws=BuildError] void set_listening_addresses(sequence listening_addresses); [Throws=BuildError] + void set_node_alias(string node_alias); + [Throws=BuildError] Node build(); [Throws=BuildError] Node build_with_fs_store(); @@ -238,6 +241,7 @@ enum BuildError { "KVStoreSetupFailed", "WalletSetupFailed", "LoggerSetupFailed", + "InvalidNodeAlias" }; [Enum] diff --git a/src/builder.rs b/src/builder.rs index f6eeb7cd3..7895900c9 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -121,6 +121,8 @@ pub enum BuildError { InvalidChannelMonitor, /// The given listening addresses are invalid, e.g. too many were passed. InvalidListeningAddresses, + /// The provided alias is invalid + InvalidNodeAlias, /// We failed to read data from the [`KVStore`]. /// /// [`KVStore`]: lightning::util::persist::KVStore @@ -139,8 +141,6 @@ pub enum BuildError { WalletSetupFailed, /// We failed to setup the logger. LoggerSetupFailed, - /// The provided alias is invalid - InvalidNodeAlias(String), } impl fmt::Display for BuildError { @@ -161,9 +161,7 @@ impl fmt::Display for BuildError { Self::KVStoreSetupFailed => write!(f, "Failed to setup KVStore."), Self::WalletSetupFailed => write!(f, "Failed to setup onchain wallet."), Self::LoggerSetupFailed => write!(f, "Failed to setup the logger."), - Self::InvalidNodeAlias(ref reason) => { - write!(f, "Given node alias is invalid: {}", reason) - }, + Self::InvalidNodeAlias => write!(f, "Given node alias is invalid."), } } } @@ -316,9 +314,7 @@ impl NodeBuilder { /// Sets the alias the [`Node`] will use in its announcement. The provided /// alias must be a valid UTF-8 string. - pub fn set_node_alias>( - &mut self, node_alias: T, - ) -> Result<&mut Self, BuildError> { + pub fn set_node_alias(&mut self, node_alias: String) -> Result<&mut Self, BuildError> { let node_alias = sanitize_alias(node_alias).map_err(|e| e)?; self.config.node_alias = Some(node_alias); @@ -522,6 +518,11 @@ impl ArcedNodeBuilder { self.inner.write().unwrap().set_log_level(level); } + /// Sets the node alias. + pub fn set_node_alias(&self, node_alias: String) -> Result<(), BuildError> { + self.inner.write().unwrap().set_node_alias(node_alias).map(|_| ()) + } + /// Builds a [`Node`] instance with a [`SqliteStore`] backend and according to the options /// previously configured. pub fn build(&self) -> Result, BuildError> { @@ -1073,21 +1074,12 @@ fn sanitize_alias>(node_alias: T) -> Result let node_alias: String = node_alias.into(); let alias = node_alias.trim(); - // Alias is non-empty - if alias.is_empty() { - return Err(BuildError::InvalidNodeAlias("Node alias cannot be empty.".to_string())); - } - - // Alias valid up to first null byte - let first_null = alias.as_bytes().iter().position(|b| *b == 0).unwrap_or(alias.len()); - let actual_alias = alias.split_at(first_null).0; - // Alias must be 32-bytes long or less - if actual_alias.as_bytes().len() > 32 { - return Err(BuildError::InvalidNodeAlias("Node alias cannot exceed 32 bytes.".to_string())); + if alias.as_bytes().len() > 32 { + return Err(BuildError::InvalidNodeAlias); } - Ok(actual_alias.to_string()) + Ok(alias.to_string()) } #[cfg(test)] @@ -1096,19 +1088,16 @@ mod tests { use super::NodeBuilder; - fn create_node_with_alias>(alias: T) -> Result { - NodeBuilder::new().set_node_alias(&alias.into())?.build() + fn create_node_with_alias(alias: String) -> Result { + NodeBuilder::new().set_node_alias(alias)?.build() } #[test] fn empty_node_alias() { // Empty node alias let alias = ""; - let node = create_node_with_alias(alias); - assert_eq!( - node.err().unwrap(), - BuildError::InvalidNodeAlias("Node alias cannot be empty.".to_string()) - ); + let node = create_node_with_alias(alias.to_string()); + assert_eq!(node.err().unwrap(), BuildError::InvalidNodeAlias); } #[test] @@ -1116,7 +1105,7 @@ mod tests { // Alias with emojis let expected_alias = "I\u{1F496}LDK-Node!"; let user_provided_alias = "I\u{1F496}LDK-Node!\0\u{26A1}"; - let node = create_node_with_alias(user_provided_alias).unwrap(); + let node = create_node_with_alias(user_provided_alias.to_string()).unwrap(); assert_eq!(expected_alias, node.config().node_alias.unwrap()); } @@ -1124,10 +1113,7 @@ mod tests { #[test] fn node_alias_longer_than_32_bytes() { let alias = "This is a string longer than thirty-two bytes!"; // 46 bytes - let node = create_node_with_alias(alias); - assert_eq!( - node.err().unwrap(), - BuildError::InvalidNodeAlias("Node alias cannot exceed 32 bytes.".to_string()) - ); + let node = create_node_with_alias(alias.to_string()); + assert_eq!(node.err().unwrap(), BuildError::InvalidNodeAlias); } } diff --git a/src/lib.rs b/src/lib.rs index f432fb7dc..5d306dc5d 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -645,22 +645,18 @@ impl Node { } let addresses = bcast_config.listening_addresses.clone().unwrap_or(Vec::new()); - - if addresses.is_empty() { - // Skip if we are not listening on any addresses. - continue; - } - - // Extract alias if set, else select the default - let alias = if let Some(ref alias) = node_alias { + let alias = node_alias.clone().map(|alias| { let mut buf = [0_u8; 32]; buf[..alias.as_bytes().len()].copy_from_slice(alias.as_bytes()); buf - } else { - [0; 32] - }; + }); + + if addresses.is_empty() || alias.is_none() { + // Skip if we are not listening on any addresses or if the node alias is not set. + continue; + } - bcast_pm.broadcast_node_announcement([0; 3], alias, addresses); + bcast_pm.broadcast_node_announcement([0; 3], alias.unwrap(), addresses); let unix_time_secs_opt = SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); From 8e73296d7331b85934388acede59c3a6291d335b Mon Sep 17 00:00:00 2001 From: Enigbe Ochekliye Date: Tue, 6 Aug 2024 15:41:29 +0100 Subject: [PATCH 042/127] docs: clarify node announcement broadcast logic --- README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/README.md b/README.md index 4078ce67b..feabca0f8 100644 --- a/README.md +++ b/README.md @@ -60,6 +60,9 @@ LDK Node currently comes with a decidedly opinionated set of design choices: - Gossip data may be sourced via Lightning's peer-to-peer network or the [Rapid Gossip Sync](https://docs.rs/lightning-rapid-gossip-sync/*/lightning_rapid_gossip_sync/) protocol. - Entropy for the Lightning and on-chain wallets may be sourced from raw bytes or a [BIP39](https://github.com/bitcoin/bips/blob/master/bip-0039.mediawiki) mnemonic. In addition, LDK Node offers the means to generate and persist the entropy bytes to disk. +**Note**: +Regarding node announcements, we have decided not to broadcast these announcements if either the node's listening addresses or its node alias are not set. + ## Language Support LDK Node itself is written in [Rust][rust] and may therefore be natively added as a library dependency to any `std` Rust program. However, beyond its Rust API it also offers language bindings for [Swift][swift], [Kotlin][kotlin], and [Python][python] based on the [UniFFI](https://github.com/mozilla/uniffi-rs/). Moreover, [Flutter bindings][flutter_bindings] are also available. From 7ad7029415022fc29da245454a3dc8b3aed1f92c Mon Sep 17 00:00:00 2001 From: Enigbe Ochekliye Date: Thu, 8 Aug 2024 14:14:25 +0100 Subject: [PATCH 043/127] refactor: update UserConfig if node alias/listening addresses are unconfigured --- src/config.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/config.rs b/src/config.rs index 69ff49a38..54d7158fd 100644 --- a/src/config.rs +++ b/src/config.rs @@ -283,5 +283,11 @@ pub(crate) fn default_user_config(config: &Config) -> UserConfig { user_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = config.anchor_channels_config.is_some(); + if config.listening_addresses.is_none() || config.node_alias.is_none() { + user_config.accept_forwards_to_priv_channels = false; + user_config.channel_handshake_config.announced_channel = false; + user_config.channel_handshake_limits.force_announced_channel_preference = true; + } + user_config } From 81a0f4d7fb4e04dd7f1b9fd461d8d075b8691263 Mon Sep 17 00:00:00 2001 From: Enigbe Ochekliye Date: Thu, 29 Aug 2024 07:57:25 +0100 Subject: [PATCH 044/127] refactor: update node alias sanitization What this commit does: + Updates the sanitization function for node alias to return NodeAlias + Updates the node alias type in the configuration to NodeAlias and implements a conversion to/from String for bindings + With this update, regardless of where the alias is set, i.e. in the set_node_alias or directly, sanitization occurs. --- bindings/ldk_node.udl | 7 +++++- src/builder.rs | 54 +++++++++++++++++++++++++------------------ src/config.rs | 7 +++--- src/uniffi_types.rs | 15 +++++++++++- 4 files changed, 55 insertions(+), 28 deletions(-) diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index 55725c6a2..adcfe9ea4 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -16,7 +16,7 @@ dictionary Config { LogLevel log_level; AnchorChannelsConfig? anchor_channels_config; SendingParameters? sending_parameters; - string? node_alias; + NodeAlias? node_alias; }; dictionary AnchorChannelsConfig { @@ -205,6 +205,7 @@ enum NodeError { "InvalidNetwork", "InvalidUri", "InvalidQuantity", + "InvalidNodeAlias", "DuplicatePayment", "UnsupportedCurrency", "InsufficientFunds", @@ -235,6 +236,7 @@ enum BuildError { "InvalidSystemTime", "InvalidChannelMonitor", "InvalidListeningAddresses", + "InvalidNodeAlias", "ReadFailed", "WriteFailed", "StoragePathAccessFailed", @@ -533,3 +535,6 @@ typedef string Mnemonic; [Custom] typedef string UntrustedString; + +[Custom] +typedef string NodeAlias; diff --git a/src/builder.rs b/src/builder.rs index 7895900c9..9481d5343 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -32,6 +32,7 @@ use lightning::chain::{chainmonitor, BestBlock, Watch}; use lightning::ln::channelmanager::{self, ChainParameters, ChannelManagerReadArgs}; use lightning::ln::msgs::{RoutingMessageHandler, SocketAddress}; use lightning::ln::peer_handler::{IgnoringMessageHandler, MessageHandler}; +use lightning::routing::gossip::NodeAlias; use lightning::routing::router::DefaultRouter; use lightning::routing::scoring::{ ProbabilisticScorer, ProbabilisticScoringDecayParameters, ProbabilisticScoringFeeParameters, @@ -121,7 +122,7 @@ pub enum BuildError { InvalidChannelMonitor, /// The given listening addresses are invalid, e.g. too many were passed. InvalidListeningAddresses, - /// The provided alias is invalid + /// The provided alias is invalid. InvalidNodeAlias, /// We failed to read data from the [`KVStore`]. /// @@ -315,7 +316,7 @@ impl NodeBuilder { /// Sets the alias the [`Node`] will use in its announcement. The provided /// alias must be a valid UTF-8 string. pub fn set_node_alias(&mut self, node_alias: String) -> Result<&mut Self, BuildError> { - let node_alias = sanitize_alias(node_alias).map_err(|e| e)?; + let node_alias = sanitize_alias(&node_alias)?; self.config.node_alias = Some(node_alias); Ok(self) @@ -1069,51 +1070,58 @@ fn seed_bytes_from_config( } /// Sanitize the user-provided node alias to ensure that it is a valid protocol-specified UTF-8 string. -fn sanitize_alias>(node_alias: T) -> Result { - // Alias is convertible into UTF-8 encoded string - let node_alias: String = node_alias.into(); - let alias = node_alias.trim(); +pub fn sanitize_alias(alias_str: &str) -> Result { + let alias = alias_str.trim(); - // Alias must be 32-bytes long or less + // Alias must be 32-bytes long or less. if alias.as_bytes().len() > 32 { return Err(BuildError::InvalidNodeAlias); } - Ok(alias.to_string()) + let mut bytes = [0u8; 32]; + bytes[..alias.as_bytes().len()].copy_from_slice(alias.as_bytes()); + Ok(NodeAlias(bytes)) } #[cfg(test)] mod tests { - use crate::{BuildError, Node}; + use lightning::routing::gossip::NodeAlias; - use super::NodeBuilder; - - fn create_node_with_alias(alias: String) -> Result { - NodeBuilder::new().set_node_alias(alias)?.build() - } + use crate::{builder::sanitize_alias, BuildError}; #[test] - fn empty_node_alias() { + fn sanitize_empty_node_alias() { // Empty node alias let alias = ""; - let node = create_node_with_alias(alias.to_string()); - assert_eq!(node.err().unwrap(), BuildError::InvalidNodeAlias); + let mut buf = [0u8; 32]; + buf[..alias.as_bytes().len()].copy_from_slice(alias.as_bytes()); + + let expected_node_alias = NodeAlias([0; 32]); + let node_alias = sanitize_alias(alias).unwrap(); + assert_eq!(node_alias, expected_node_alias); } #[test] - fn node_alias_with_sandwiched_null() { + fn sanitize_alias_with_sandwiched_null() { // Alias with emojis - let expected_alias = "I\u{1F496}LDK-Node!"; + let alias = "I\u{1F496}LDK-Node!"; + let mut buf = [0u8; 32]; + buf[..alias.as_bytes().len()].copy_from_slice(alias.as_bytes()); + let expected_alias = NodeAlias(buf); + let user_provided_alias = "I\u{1F496}LDK-Node!\0\u{26A1}"; - let node = create_node_with_alias(user_provided_alias.to_string()).unwrap(); + let node_alias = sanitize_alias(user_provided_alias).unwrap(); + + let node_alias_display = format!("{}", node_alias); - assert_eq!(expected_alias, node.config().node_alias.unwrap()); + assert_eq!(alias, &node_alias_display); + assert_ne!(expected_alias, node_alias); } #[test] - fn node_alias_longer_than_32_bytes() { + fn sanitize_alias_gt_32_bytes() { let alias = "This is a string longer than thirty-two bytes!"; // 46 bytes - let node = create_node_with_alias(alias.to_string()); + let node = sanitize_alias(alias); assert_eq!(node.err().unwrap(), BuildError::InvalidNodeAlias); } } diff --git a/src/config.rs b/src/config.rs index 54d7158fd..275a5d5f1 100644 --- a/src/config.rs +++ b/src/config.rs @@ -8,6 +8,7 @@ use crate::payment::SendingParameters; use lightning::ln::msgs::SocketAddress; +use lightning::routing::gossip::NodeAlias; use lightning::util::config::UserConfig; use lightning::util::logger::Level as LogLevel; @@ -165,9 +166,9 @@ pub struct Config { pub sending_parameters: Option, /// The node alias to be used in announcements. /// - /// **Note**: This is required if, alongside a valid public socket address, node announcements - /// are to be broadcast. - pub node_alias: Option, + /// **Note**: Node announcements will only be broadcast if the node_alias and the + /// listening_addresses are set. + pub node_alias: Option, } impl Default for Config { diff --git a/src/uniffi_types.rs b/src/uniffi_types.rs index 17e5713d9..2a6ac8da3 100644 --- a/src/uniffi_types.rs +++ b/src/uniffi_types.rs @@ -19,7 +19,7 @@ pub use lightning::ln::{ChannelId, PaymentHash, PaymentPreimage, PaymentSecret}; pub use lightning::offers::invoice::Bolt12Invoice; pub use lightning::offers::offer::{Offer, OfferId}; pub use lightning::offers::refund::Refund; -pub use lightning::routing::gossip::{NodeId, RoutingFees}; +pub use lightning::routing::gossip::{NodeAlias, NodeId, RoutingFees}; pub use lightning::util::string::UntrustedString; pub use lightning_invoice::Bolt11Invoice; @@ -30,6 +30,7 @@ pub use bip39::Mnemonic; use crate::UniffiCustomTypeConverter; +use crate::builder::sanitize_alias; use crate::error::Error; use crate::hex_utils; use crate::{SocketAddress, UserChannelId}; @@ -324,3 +325,15 @@ impl UniffiCustomTypeConverter for UntrustedString { obj.to_string() } } + +impl UniffiCustomTypeConverter for NodeAlias { + type Builtin = String; + + fn into_custom(val: Self::Builtin) -> uniffi::Result { + Ok(sanitize_alias(&val).map_err(|_| Error::InvalidNodeAlias)?) + } + + fn from_custom(obj: Self) -> Self::Builtin { + obj.to_string() + } +} From 32e70964a364b7cdb7f1878711878dcc1c823acd Mon Sep 17 00:00:00 2001 From: Enigbe Ochekliye Date: Thu, 29 Aug 2024 08:12:47 +0100 Subject: [PATCH 045/127] refactor: decompose connecting w/ peer & opening a channel What this commit does: + Decomposes connect_open_channel into two different functions: open_channel and open_announced_channel. This allows opening announced channels based on configured node alias and listening addresses values. + This enforces channel announcement only on the condition that both configuration values are set. + Additionally, a new error variant `OpenAnnouncedChannelFailed` is introduced to capture failure. Note: I thought I added the `InvalidNodeAlias` variant in the previous commit --- bindings/ldk_node.udl | 5 +++- src/config.rs | 55 +++++++++++++++++++++++++++++++++++- src/error.rs | 8 ++++++ src/lib.rs | 66 ++++++++++++++++++++++++++++++++++--------- 4 files changed, 118 insertions(+), 16 deletions(-) diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index adcfe9ea4..db0d609ae 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -72,7 +72,9 @@ interface Node { [Throws=NodeError] void disconnect(PublicKey node_id); [Throws=NodeError] - UserChannelId connect_open_channel(PublicKey node_id, SocketAddress address, u64 channel_amount_sats, u64? push_to_counterparty_msat, ChannelConfig? channel_config, boolean announce_channel); + UserChannelId open_channel(PublicKey node_id, SocketAddress address, u64 channel_amount_sats, u64? push_to_counterparty_msat, ChannelConfig? channel_config); + [Throws=NodeError] + UserChannelId open_announced_channel(PublicKey node_id, SocketAddress address, u64 channel_amount_sats, u64? push_to_counterparty_msat, ChannelConfig? channel_config); [Throws=NodeError] void close_channel([ByRef]UserChannelId user_channel_id, PublicKey counterparty_node_id); [Throws=NodeError] @@ -211,6 +213,7 @@ enum NodeError { "InsufficientFunds", "LiquiditySourceUnavailable", "LiquidityFeeTooHigh", + "OpenAnnouncedChannelFailed" }; dictionary NodeStatus { diff --git a/src/config.rs b/src/config.rs index 275a5d5f1..81246c51b 100644 --- a/src/config.rs +++ b/src/config.rs @@ -111,6 +111,9 @@ pub struct Config { /// The used Bitcoin network. pub network: Network, /// The addresses on which the node will listen for incoming connections. + /// + /// **Note**: Node announcements will only be broadcast if the node_alias and the + /// listening_addresses are set. pub listening_addresses: Option>, /// The time in-between background sync attempts of the onchain wallet, in seconds. /// @@ -272,6 +275,17 @@ pub fn default_config() -> Config { Config::default() } +/// Checks if a node is can announce a channel based on the configured values of both the node's +/// alias and its listening addresses. If either of them is unset, the node cannot announce the +/// channel. +pub fn can_announce_channel(config: &Config) -> bool { + let are_addresses_set = + config.listening_addresses.clone().is_some_and(|addr_vec| !addr_vec.is_empty()); + let is_alias_set = config.node_alias.is_some(); + + is_alias_set && are_addresses_set +} + pub(crate) fn default_user_config(config: &Config) -> UserConfig { // Initialize the default config values. // @@ -284,7 +298,7 @@ pub(crate) fn default_user_config(config: &Config) -> UserConfig { user_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = config.anchor_channels_config.is_some(); - if config.listening_addresses.is_none() || config.node_alias.is_none() { + if !can_announce_channel(config) { user_config.accept_forwards_to_priv_channels = false; user_config.channel_handshake_config.announced_channel = false; user_config.channel_handshake_limits.force_announced_channel_preference = true; @@ -292,3 +306,42 @@ pub(crate) fn default_user_config(config: &Config) -> UserConfig { user_config } + +#[cfg(test)] +mod tests { + use std::str::FromStr; + + use lightning::{ln::msgs::SocketAddress, routing::gossip::NodeAlias}; + + use crate::config::can_announce_channel; + + use super::Config; + + #[test] + fn node_can_announce_channel() { + // Default configuration with node alias and listening addresses unset + let mut node_config = Config::default(); + assert_eq!(can_announce_channel(&node_config), false); + + // Set node alias with listening addresses unset + let alias_frm_str = |alias: &str| { + let mut bytes = [0u8; 32]; + bytes[..alias.as_bytes().len()].copy_from_slice(alias.as_bytes()); + NodeAlias(bytes) + }; + node_config.node_alias = Some(alias_frm_str("LDK_Node")); + assert_eq!(can_announce_channel(&node_config), false); + + // Set node alias with an empty list of listening addresses + node_config.listening_addresses = Some(vec![]); + assert_eq!(can_announce_channel(&node_config), false); + + // Set node alias with a non-empty list of listening addresses + let socket_address = + SocketAddress::from_str("localhost:8000").expect("Socket address conversion failed."); + if let Some(ref mut addresses) = node_config.listening_addresses { + addresses.push(socket_address); + } + assert_eq!(can_announce_channel(&node_config), true); + } +} diff --git a/src/error.rs b/src/error.rs index 660c2036e..5e7dbeacd 100644 --- a/src/error.rs +++ b/src/error.rs @@ -98,6 +98,8 @@ pub enum Error { InvalidUri, /// The given quantity is invalid. InvalidQuantity, + /// The given node alias is invalid. + InvalidNodeAlias, /// A payment with the given hash has already been initiated. DuplicatePayment, /// The provided offer was denonminated in an unsupported currency. @@ -108,6 +110,10 @@ pub enum Error { LiquiditySourceUnavailable, /// The given operation failed due to the LSP's required opening fee being too high. LiquidityFeeTooHigh, + /// Returned when trying to open an announced channel with a peer. This + /// error occurs when a [`crate::Node`'s] alias or listening addresses + /// are unconfigured. + OpenAnnouncedChannelFailed, } impl fmt::Display for Error { @@ -163,6 +169,7 @@ impl fmt::Display for Error { Self::InvalidNetwork => write!(f, "The given network is invalid."), Self::InvalidUri => write!(f, "The given URI is invalid."), Self::InvalidQuantity => write!(f, "The given quantity is invalid."), + Self::InvalidNodeAlias => write!(f, "The given node alias is invalid."), Self::DuplicatePayment => { write!(f, "A payment with the given hash has already been initiated.") }, @@ -178,6 +185,7 @@ impl fmt::Display for Error { Self::LiquidityFeeTooHigh => { write!(f, "The given operation failed due to the LSP's required opening fee being too high.") }, + Self::OpenAnnouncedChannelFailed => write!(f, "Failed to open an announced channel."), } } } diff --git a/src/lib.rs b/src/lib.rs index 5d306dc5d..ce5956cbc 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -47,7 +47,7 @@ //! //! let node_id = PublicKey::from_str("NODE_ID").unwrap(); //! let node_addr = SocketAddress::from_str("IP_ADDR:PORT").unwrap(); -//! node.connect_open_channel(node_id, node_addr, 10000, None, None, false).unwrap(); +//! node.open_channel(node_id, node_addr, 10000, None, None).unwrap(); //! //! let event = node.wait_next_event(); //! println!("EVENT: {:?}", event); @@ -63,7 +63,8 @@ //! [`build`]: Builder::build //! [`start`]: Node::start //! [`stop`]: Node::stop -//! [`connect_open_channel`]: Node::connect_open_channel +//! [`open_channel`]: Node::open_channel +//! [`open_announced_channel`]: Node::open_announced_channel //! [`send`]: Bolt11Payment::send //! #![cfg_attr(not(feature = "uniffi"), deny(missing_docs))] @@ -114,6 +115,7 @@ pub use io::utils::generate_entropy_mnemonic; #[cfg(feature = "uniffi")] use uniffi_types::*; +pub use builder::sanitize_alias; #[cfg(feature = "uniffi")] pub use builder::ArcedNodeBuilder as Builder; pub use builder::BuildError; @@ -121,8 +123,9 @@ pub use builder::BuildError; pub use builder::NodeBuilder as Builder; use config::{ - default_user_config, LDK_WALLET_SYNC_TIMEOUT_SECS, NODE_ANN_BCAST_INTERVAL, - PEER_RECONNECTION_INTERVAL, RESOLVED_CHANNEL_MONITOR_ARCHIVAL_INTERVAL, RGS_SYNC_INTERVAL, + can_announce_channel, default_user_config, LDK_WALLET_SYNC_TIMEOUT_SECS, + NODE_ANN_BCAST_INTERVAL, PEER_RECONNECTION_INTERVAL, + RESOLVED_CHANNEL_MONITOR_ARCHIVAL_INTERVAL, RGS_SYNC_INTERVAL, WALLET_SYNC_INTERVAL_MINIMUM_SECS, }; use connection::ConnectionManager; @@ -601,7 +604,8 @@ impl Node { let bcast_logger = Arc::clone(&self.logger); let bcast_ann_timestamp = Arc::clone(&self.latest_node_announcement_broadcast_timestamp); let mut stop_bcast = self.stop_sender.subscribe(); - let node_alias = self.config().node_alias; + let node_alias = self.config.node_alias.clone(); + let can_announce_channel = can_announce_channel(&self.config); runtime.spawn(async move { // We check every 30 secs whether our last broadcast is NODE_ANN_BCAST_INTERVAL away. #[cfg(not(test))] @@ -646,17 +650,19 @@ impl Node { let addresses = bcast_config.listening_addresses.clone().unwrap_or(Vec::new()); let alias = node_alias.clone().map(|alias| { - let mut buf = [0_u8; 32]; - buf[..alias.as_bytes().len()].copy_from_slice(alias.as_bytes()); - buf + alias.0 }); - if addresses.is_empty() || alias.is_none() { + if !can_announce_channel { // Skip if we are not listening on any addresses or if the node alias is not set. continue; } - bcast_pm.broadcast_node_announcement([0; 3], alias.unwrap(), addresses); + if let Some(node_alias) = alias { + bcast_pm.broadcast_node_announcement([0; 3], node_alias, addresses); + } else { + continue + } let unix_time_secs_opt = SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); @@ -1189,10 +1195,9 @@ impl Node { /// opening the channel. /// /// Returns a [`UserChannelId`] allowing to locally keep track of the channel. - pub fn connect_open_channel( + fn connect_open_channel( &self, node_id: PublicKey, address: SocketAddress, channel_amount_sats: u64, push_to_counterparty_msat: Option, channel_config: Option, - announce_channel: bool, ) -> Result { let rt_lock = self.runtime.read().unwrap(); if rt_lock.is_none() { @@ -1254,12 +1259,13 @@ impl Node { } let mut user_config = default_user_config(&self.config); - user_config.channel_handshake_config.announced_channel = announce_channel; + let can_announce_channel = can_announce_channel(&self.config); + user_config.channel_handshake_config.announced_channel = can_announce_channel; user_config.channel_config = (channel_config.unwrap_or_default()).clone().into(); // We set the max inflight to 100% for private channels. // FIXME: LDK will default to this behavior soon, too, at which point we should drop this // manual override. - if !announce_channel { + if !can_announce_channel { user_config .channel_handshake_config .max_inbound_htlc_value_in_flight_percent_of_channel = 100; @@ -1292,6 +1298,38 @@ impl Node { } } + /// Opens a channel with a peer. + pub fn open_channel( + &self, node_id: PublicKey, address: SocketAddress, channel_amount_sats: u64, + push_to_counterparty_msat: Option, channel_config: Option>, + ) -> Result { + self.connect_open_channel( + node_id, + address, + channel_amount_sats, + push_to_counterparty_msat, + channel_config, + ) + } + + /// Opens an announced channel with a peer. + pub fn open_announced_channel( + &self, node_id: PublicKey, address: SocketAddress, channel_amount_sats: u64, + push_to_counterparty_msat: Option, channel_config: Option>, + ) -> Result { + if !can_announce_channel(&self.config) { + return Err(Error::OpenAnnouncedChannelFailed); + } + + self.open_channel( + node_id, + address, + channel_amount_sats, + push_to_counterparty_msat, + channel_config, + ) + } + /// Manually sync the LDK and BDK wallets with the current chain state and update the fee rate /// cache. /// From 059a30e98f2bfbd8614ef7cb0fe417073e499351 Mon Sep 17 00:00:00 2001 From: Enigbe Ochekliye Date: Thu, 29 Aug 2024 08:13:56 +0100 Subject: [PATCH 046/127] docs: cleanup documentation --- README.md | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index feabca0f8..7bcfb6f78 100644 --- a/README.md +++ b/README.md @@ -11,7 +11,7 @@ A ready-to-go Lightning node library built using [LDK][ldk] and [BDK][bdk]. LDK Node is a self-custodial Lightning node in library form. Its central goal is to provide a small, simple, and straightforward interface that enables users to easily set up and run a Lightning node with an integrated on-chain wallet. While minimalism is at its core, LDK Node aims to be sufficiently modular and configurable to be useful for a variety of use cases. ## Getting Started -The primary abstraction of the library is the [`Node`][api_docs_node], which can be retrieved by setting up and configuring a [`Builder`][api_docs_builder] to your liking and calling one of the `build` methods. `Node` can then be controlled via commands such as `start`, `stop`, `connect_open_channel`, `send`, etc. +The primary abstraction of the library is the [`Node`][api_docs_node], which can be retrieved by setting up and configuring a [`Builder`][api_docs_builder] to your liking and calling one of the `build` methods. `Node` can then be controlled via commands such as `start`, `stop`, `open_channel`, `open_announced_channel`, `send`, etc. ```rust use ldk_node::Builder; @@ -37,7 +37,7 @@ fn main() { let node_id = PublicKey::from_str("NODE_ID").unwrap(); let node_addr = SocketAddress::from_str("IP_ADDR:PORT").unwrap(); - node.connect_open_channel(node_id, node_addr, 10000, None, None, false).unwrap(); + node.open_channel(node_id, node_addr, 10000, None, None).unwrap(); let event = node.wait_next_event(); println!("EVENT: {:?}", event); @@ -60,9 +60,6 @@ LDK Node currently comes with a decidedly opinionated set of design choices: - Gossip data may be sourced via Lightning's peer-to-peer network or the [Rapid Gossip Sync](https://docs.rs/lightning-rapid-gossip-sync/*/lightning_rapid_gossip_sync/) protocol. - Entropy for the Lightning and on-chain wallets may be sourced from raw bytes or a [BIP39](https://github.com/bitcoin/bips/blob/master/bip-0039.mediawiki) mnemonic. In addition, LDK Node offers the means to generate and persist the entropy bytes to disk. -**Note**: -Regarding node announcements, we have decided not to broadcast these announcements if either the node's listening addresses or its node alias are not set. - ## Language Support LDK Node itself is written in [Rust][rust] and may therefore be natively added as a library dependency to any `std` Rust program. However, beyond its Rust API it also offers language bindings for [Swift][swift], [Kotlin][kotlin], and [Python][python] based on the [UniFFI](https://github.com/mozilla/uniffi-rs/). Moreover, [Flutter bindings][flutter_bindings] are also available. From 09fca09e53d844c459a86ea47312a063491c917e Mon Sep 17 00:00:00 2001 From: Enigbe Ochekliye Date: Thu, 29 Aug 2024 08:19:07 +0100 Subject: [PATCH 047/127] test: update tests due to `connect_open_channel` decomposition What this commit does: + Replaces calls to `connect_open_channel` with `open_channel` and `open_announced_channel` where appropriate. Status: Work In Progress (WIP) Observation: + The integration tests are now flaky and need further investigation to ascertain the reason(s) why and then to fix. --- tests/common/mod.rs | 17 ++++++++--------- tests/integration_tests_cln.rs | 11 ++--------- tests/integration_tests_rust.rs | 19 +++++++++---------- 3 files changed, 19 insertions(+), 28 deletions(-) diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 9b5d01ae3..b616f7eb5 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -11,11 +11,13 @@ use ldk_node::io::sqlite_store::SqliteStore; use ldk_node::payment::{PaymentDirection, PaymentKind, PaymentStatus}; use ldk_node::{ - Builder, Config, Event, LightningBalance, LogLevel, Node, NodeError, PendingSweepBalance, + sanitize_alias, Builder, Config, Event, LightningBalance, LogLevel, Node, NodeError, + PendingSweepBalance, }; use lightning::ln::msgs::SocketAddress; use lightning::ln::{PaymentHash, PaymentPreimage}; +use lightning::routing::gossip::NodeAlias; use lightning::util::persist::KVStore; use lightning::util::test_utils::TestStore; use lightning_persister::fs_store::FilesystemStore; @@ -200,12 +202,12 @@ pub(crate) fn random_listening_addresses() -> Vec { listening_addresses } -pub(crate) fn random_node_alias() -> Option { +pub(crate) fn random_node_alias() -> Option { let mut rng = thread_rng(); let ranged_val = rng.gen_range(0..10); match ranged_val { 0 => None, - val => Some(format!("ldk-node-{}", val)), + val => Some(sanitize_alias(&format!("ldk-node-{}", val)).unwrap()), } } @@ -398,17 +400,15 @@ pub(crate) fn premine_and_distribute_funds( } pub fn open_channel( - node_a: &TestNode, node_b: &TestNode, funding_amount_sat: u64, announce: bool, - electrsd: &ElectrsD, + node_a: &TestNode, node_b: &TestNode, funding_amount_sat: u64, electrsd: &ElectrsD, ) { node_a - .connect_open_channel( + .open_announced_channel( node_b.node_id(), node_b.listening_addresses().unwrap().first().unwrap().clone(), funding_amount_sat, None, None, - announce, ) .unwrap(); assert!(node_a.list_peers().iter().find(|c| { c.node_id == node_b.node_id() }).is_some()); @@ -447,13 +447,12 @@ pub(crate) fn do_channel_full_cycle( let funding_amount_sat = 2_080_000; let push_msat = (funding_amount_sat / 2) * 1000; // balance the channel node_a - .connect_open_channel( + .open_channel( node_b.node_id(), node_b.listening_addresses().unwrap().first().unwrap().clone(), funding_amount_sat, Some(push_msat), None, - true, ) .unwrap(); diff --git a/tests/integration_tests_cln.rs b/tests/integration_tests_cln.rs index bcb84833f..13b5c44c6 100644 --- a/tests/integration_tests_cln.rs +++ b/tests/integration_tests_cln.rs @@ -82,15 +82,8 @@ fn test_cln() { // Open the channel let funding_amount_sat = 1_000_000; - node.connect_open_channel( - cln_node_id, - cln_address, - funding_amount_sat, - Some(500_000_000), - None, - false, - ) - .unwrap(); + node.open_channel(cln_node_id, cln_address, funding_amount_sat, Some(500_000_000), None) + .unwrap(); let funding_txo = common::expect_channel_pending_event!(node, cln_node_id); common::wait_for_tx(&electrs_client, funding_txo.txid); diff --git a/tests/integration_tests_rust.rs b/tests/integration_tests_rust.rs index 6b5b405dd..89786f826 100644 --- a/tests/integration_tests_rust.rs +++ b/tests/integration_tests_rust.rs @@ -83,13 +83,12 @@ fn channel_open_fails_when_funds_insufficient() { println!("\nA -- connect_open_channel -> B"); assert_eq!( Err(NodeError::InsufficientFunds), - node_a.connect_open_channel( + node_a.open_channel( node_b.node_id(), node_b.listening_addresses().unwrap().first().unwrap().clone(), 120000, None, None, - true ) ); } @@ -132,16 +131,16 @@ fn multi_hop_sending() { // \ / // (1M:0)- N3 -(1M:0) - open_channel(&nodes[0], &nodes[1], 100_000, true, &electrsd); - open_channel(&nodes[1], &nodes[2], 1_000_000, true, &electrsd); + open_channel(&nodes[0], &nodes[1], 100_000, &electrsd); + open_channel(&nodes[1], &nodes[2], 1_000_000, &electrsd); // We need to sync wallets in-between back-to-back channel opens from the same node so BDK // wallet picks up on the broadcast funding tx and doesn't double-spend itself. // // TODO: Remove once fixed in BDK. nodes[1].sync_wallets().unwrap(); - open_channel(&nodes[1], &nodes[3], 1_000_000, true, &electrsd); - open_channel(&nodes[2], &nodes[4], 1_000_000, true, &electrsd); - open_channel(&nodes[3], &nodes[4], 1_000_000, true, &electrsd); + open_channel(&nodes[1], &nodes[3], 1_000_000, &electrsd); + open_channel(&nodes[2], &nodes[4], 1_000_000, &electrsd); + open_channel(&nodes[3], &nodes[4], 1_000_000, &electrsd); generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6); @@ -419,7 +418,7 @@ fn simple_bolt12_send_receive() { ); node_a.sync_wallets().unwrap(); - open_channel(&node_a, &node_b, 4_000_000, true, &electrsd); + open_channel(&node_a, &node_b, 4_000_000, &electrsd); generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6); @@ -626,7 +625,7 @@ fn generate_bip21_uri() { ); node_a.sync_wallets().unwrap(); - open_channel(&node_a, &node_b, 4_000_000, true, &electrsd); + open_channel(&node_a, &node_b, 4_000_000, &electrsd); generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6); node_a.sync_wallets().unwrap(); @@ -667,7 +666,7 @@ fn unified_qr_send_receive() { ); node_a.sync_wallets().unwrap(); - open_channel(&node_a, &node_b, 4_000_000, true, &electrsd); + open_channel(&node_a, &node_b, 4_000_000, &electrsd); generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6); node_a.sync_wallets().unwrap(); From 116003435503c422ba24ab108853db4972829be5 Mon Sep 17 00:00:00 2001 From: Enigbe Ochekliye Date: Thu, 29 Aug 2024 08:41:20 +0100 Subject: [PATCH 048/127] fix: change channel config type in open_(announced)_channel What this commit does: + Removes the wrapping Arc from the channel config. This is a missed update after rebasing. --- src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index ce5956cbc..12494d00d 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1301,7 +1301,7 @@ impl Node { /// Opens a channel with a peer. pub fn open_channel( &self, node_id: PublicKey, address: SocketAddress, channel_amount_sats: u64, - push_to_counterparty_msat: Option, channel_config: Option>, + push_to_counterparty_msat: Option, channel_config: Option, ) -> Result { self.connect_open_channel( node_id, @@ -1315,7 +1315,7 @@ impl Node { /// Opens an announced channel with a peer. pub fn open_announced_channel( &self, node_id: PublicKey, address: SocketAddress, channel_amount_sats: u64, - push_to_counterparty_msat: Option, channel_config: Option>, + push_to_counterparty_msat: Option, channel_config: Option, ) -> Result { if !can_announce_channel(&self.config) { return Err(Error::OpenAnnouncedChannelFailed); From 6c3deafa23f41f69a1d518220487ead0554e8427 Mon Sep 17 00:00:00 2001 From: Enigbe Ochekliye Date: Thu, 29 Aug 2024 09:17:58 +0100 Subject: [PATCH 049/127] fix: remove broken intra doc link --- src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lib.rs b/src/lib.rs index 12494d00d..5405dedec 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -20,7 +20,7 @@ //! //! The primary abstraction of the library is the [`Node`], which can be retrieved by setting up //! and configuring a [`Builder`] to your liking and calling [`build`]. `Node` can then be -//! controlled via commands such as [`start`], [`stop`], [`connect_open_channel`], +//! controlled via commands such as [`start`], [`stop`], [`open_channel`], [`open_announced_channel`] //! [`send`], etc.: //! //! ```no_run From 6aff282be46876800a6b06466a2ef637f4c1a5e3 Mon Sep 17 00:00:00 2001 From: Enigbe Ochekliye Date: Thu, 29 Aug 2024 09:40:58 +0100 Subject: [PATCH 050/127] fix: remove unstable feature `is_some_and` --- src/config.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/config.rs b/src/config.rs index 81246c51b..5a76343c9 100644 --- a/src/config.rs +++ b/src/config.rs @@ -280,7 +280,7 @@ pub fn default_config() -> Config { /// channel. pub fn can_announce_channel(config: &Config) -> bool { let are_addresses_set = - config.listening_addresses.clone().is_some_and(|addr_vec| !addr_vec.is_empty()); + config.listening_addresses.clone().map_or(false, |addr_vec| !addr_vec.is_empty()); let is_alias_set = config.node_alias.is_some(); is_alias_set && are_addresses_set From 0af5df624d6b6cb0d6ed0d17d59b5468e559daea Mon Sep 17 00:00:00 2001 From: Enigbe Ochekliye Date: Mon, 23 Sep 2024 21:26:31 +0100 Subject: [PATCH 051/127] refactor: improve channel announcement logic and fix binding tests This commit addresses changes necessary to: - fix failing tests for generated bindings - remove unnecessary error variant previously introduced to capture failure associated with opening announced channels, and re-use existing variants that better capture the reasons, i.e. `InvalidNodeAlias` and `InvalidSocketAddress`, why opening an announced channel failed. - correct visibility specifiers for objects, and - cleanup nitpicks Specific modifications across several files include: - updating the UDL file, as well as tests related to python and kotlin that call `open_channel` and/or open_announced_channel - repositioning/rearranging methods and struct fields - introducing enums (`ChannelAnnouncementStatus` & `ChannelAnnouncementBlocker`) to capture and codify channel announceable eligibility, providing reasons for unannounceable channels - modifying `can_announce_channel` to utilize the aforementioned enums, as opposed to simply returning a boolean value. - cleaning up and renaming `connect_open_channel` to `open_channel_inner`, and maintaining a boolean flag for channel announcement - updating documentation, unit, and integration tests that factor all these changes --- .../lightningdevkit/ldknode/LibraryTest.kt | 2 +- bindings/ldk_node.udl | 5 +- bindings/python/src/ldk_node/test_ldk_node.py | 2 +- src/builder.rs | 33 +++-- src/config.rs | 110 +++++++++++---- src/error.rs | 5 - src/lib.rs | 130 +++++++++++------- tests/common/mod.rs | 12 +- tests/integration_tests_rust.rs | 2 +- 9 files changed, 194 insertions(+), 107 deletions(-) diff --git a/bindings/kotlin/ldk-node-jvm/lib/src/test/kotlin/org/lightningdevkit/ldknode/LibraryTest.kt b/bindings/kotlin/ldk-node-jvm/lib/src/test/kotlin/org/lightningdevkit/ldknode/LibraryTest.kt index b629793cd..e2bcd4c89 100644 --- a/bindings/kotlin/ldk-node-jvm/lib/src/test/kotlin/org/lightningdevkit/ldknode/LibraryTest.kt +++ b/bindings/kotlin/ldk-node-jvm/lib/src/test/kotlin/org/lightningdevkit/ldknode/LibraryTest.kt @@ -175,7 +175,7 @@ class LibraryTest { assertEquals(100000uL, totalBalance1) assertEquals(100000uL, totalBalance2) - node1.connectOpenChannel(nodeId2, listenAddress2, 50000u, null, null, true) + node1.openChannel(nodeId2, listenAddress2, 50000u, null, null) val channelPendingEvent1 = node1.waitNextEvent() println("Got event: $channelPendingEvent1") diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index db0d609ae..6663604a2 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -8,6 +8,7 @@ dictionary Config { string? log_dir_path; Network network; sequence? listening_addresses; + NodeAlias? node_alias; u64 onchain_wallet_sync_interval_secs; u64 wallet_sync_interval_secs; u64 fee_rate_cache_update_interval_secs; @@ -16,7 +17,6 @@ dictionary Config { LogLevel log_level; AnchorChannelsConfig? anchor_channels_config; SendingParameters? sending_parameters; - NodeAlias? node_alias; }; dictionary AnchorChannelsConfig { @@ -62,6 +62,7 @@ interface Node { void event_handled(); PublicKey node_id(); sequence? listening_addresses(); + NodeAlias? node_alias(); Bolt11Payment bolt11_payment(); Bolt12Payment bolt12_payment(); SpontaneousPayment spontaneous_payment(); @@ -213,7 +214,6 @@ enum NodeError { "InsufficientFunds", "LiquiditySourceUnavailable", "LiquidityFeeTooHigh", - "OpenAnnouncedChannelFailed" }; dictionary NodeStatus { @@ -246,7 +246,6 @@ enum BuildError { "KVStoreSetupFailed", "WalletSetupFailed", "LoggerSetupFailed", - "InvalidNodeAlias" }; [Enum] diff --git a/bindings/python/src/ldk_node/test_ldk_node.py b/bindings/python/src/ldk_node/test_ldk_node.py index 92c4bf2d1..4f2931440 100644 --- a/bindings/python/src/ldk_node/test_ldk_node.py +++ b/bindings/python/src/ldk_node/test_ldk_node.py @@ -155,7 +155,7 @@ def test_channel_full_cycle(self): print("TOTAL 2:", total_balance_2) self.assertEqual(total_balance_2, 100000) - node_1.connect_open_channel(node_id_2, listening_addresses_2[0], 50000, None, None, True) + node_1.open_channel(node_id_2, listening_addresses_2[0], 50000, None, None) channel_pending_event_1 = node_1.wait_next_event() assert isinstance(channel_pending_event_1, Event.CHANNEL_PENDING) diff --git a/src/builder.rs b/src/builder.rs index 9481d5343..d2ceb5f22 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -307,14 +307,9 @@ impl NodeBuilder { Ok(self) } - /// Sets the level at which [`Node`] will log messages. - pub fn set_log_level(&mut self, level: LogLevel) -> &mut Self { - self.config.log_level = level; - self - } - - /// Sets the alias the [`Node`] will use in its announcement. The provided - /// alias must be a valid UTF-8 string. + /// Sets the alias the [`Node`] will use in its announcement. + /// + /// The provided alias must be a valid UTF-8 string. pub fn set_node_alias(&mut self, node_alias: String) -> Result<&mut Self, BuildError> { let node_alias = sanitize_alias(&node_alias)?; @@ -322,6 +317,12 @@ impl NodeBuilder { Ok(self) } + /// Sets the level at which [`Node`] will log messages. + pub fn set_log_level(&mut self, level: LogLevel) -> &mut Self { + self.config.log_level = level; + self + } + /// Builds a [`Node`] instance with a [`SqliteStore`] backend and according to the options /// previously configured. pub fn build(&self) -> Result { @@ -514,16 +515,16 @@ impl ArcedNodeBuilder { self.inner.write().unwrap().set_listening_addresses(listening_addresses).map(|_| ()) } - /// Sets the level at which [`Node`] will log messages. - pub fn set_log_level(&self, level: LogLevel) { - self.inner.write().unwrap().set_log_level(level); - } - /// Sets the node alias. pub fn set_node_alias(&self, node_alias: String) -> Result<(), BuildError> { self.inner.write().unwrap().set_node_alias(node_alias).map(|_| ()) } + /// Sets the level at which [`Node`] will log messages. + pub fn set_log_level(&self, level: LogLevel) { + self.inner.write().unwrap().set_log_level(level); + } + /// Builds a [`Node`] instance with a [`SqliteStore`] backend and according to the options /// previously configured. pub fn build(&self) -> Result, BuildError> { @@ -1070,7 +1071,7 @@ fn seed_bytes_from_config( } /// Sanitize the user-provided node alias to ensure that it is a valid protocol-specified UTF-8 string. -pub fn sanitize_alias(alias_str: &str) -> Result { +pub(crate) fn sanitize_alias(alias_str: &str) -> Result { let alias = alias_str.trim(); // Alias must be 32-bytes long or less. @@ -1085,9 +1086,7 @@ pub fn sanitize_alias(alias_str: &str) -> Result { #[cfg(test)] mod tests { - use lightning::routing::gossip::NodeAlias; - - use crate::{builder::sanitize_alias, BuildError}; + use super::{sanitize_alias, BuildError, NodeAlias}; #[test] fn sanitize_empty_node_alias() { diff --git a/src/config.rs b/src/config.rs index 5a76343c9..574789ac6 100644 --- a/src/config.rs +++ b/src/config.rs @@ -87,6 +87,7 @@ pub(crate) const WALLET_KEYS_SEED_LEN: usize = 64; /// | `log_dir_path` | None | /// | `network` | Bitcoin | /// | `listening_addresses` | None | +/// | `node_alias` | None | /// | `default_cltv_expiry_delta` | 144 | /// | `onchain_wallet_sync_interval_secs` | 80 | /// | `wallet_sync_interval_secs` | 30 | @@ -112,9 +113,14 @@ pub struct Config { pub network: Network, /// The addresses on which the node will listen for incoming connections. /// - /// **Note**: Node announcements will only be broadcast if the node_alias and the - /// listening_addresses are set. + /// **Note**: Node announcements will only be broadcast if the `node_alias` and the + /// `listening_addresses` are set. pub listening_addresses: Option>, + /// The node alias to be used in announcements. + /// + /// **Note**: Node announcements will only be broadcast if the `node_alias` and the + /// `listening_addresses` are set. + pub node_alias: Option, /// The time in-between background sync attempts of the onchain wallet, in seconds. /// /// **Note:** A minimum of 10 seconds is always enforced. @@ -167,11 +173,6 @@ pub struct Config { /// **Note:** If unset, default parameters will be used, and you will be able to override the /// parameters on a per-payment basis in the corresponding method calls. pub sending_parameters: Option, - /// The node alias to be used in announcements. - /// - /// **Note**: Node announcements will only be broadcast if the node_alias and the - /// listening_addresses are set. - pub node_alias: Option, } impl Default for Config { @@ -275,33 +276,68 @@ pub fn default_config() -> Config { Config::default() } +/// Specifies reasons why a channel cannot be announced. +#[derive(Debug, PartialEq)] +pub(crate) enum ChannelAnnouncementBlocker { + /// The node alias is not set. + MissingNodeAlias, + /// The listening addresses are not set. + MissingListeningAddresses, + // This listening addresses is set but the vector is empty. + EmptyListeningAddresses, +} + +/// Enumeration defining the announcement status of a channel. +#[derive(Debug, PartialEq)] +pub(crate) enum ChannelAnnouncementStatus { + /// The channel is announceable. + Announceable, + /// The channel is not announceable. + Unannounceable(ChannelAnnouncementBlocker), +} + /// Checks if a node is can announce a channel based on the configured values of both the node's -/// alias and its listening addresses. If either of them is unset, the node cannot announce the -/// channel. -pub fn can_announce_channel(config: &Config) -> bool { - let are_addresses_set = - config.listening_addresses.clone().map_or(false, |addr_vec| !addr_vec.is_empty()); - let is_alias_set = config.node_alias.is_some(); - - is_alias_set && are_addresses_set +/// alias and its listening addresses. +/// +/// If either of them is unset, the node cannot announce the channel. This ability to announce/ +/// unannounce a channel is codified with `ChannelAnnouncementStatus` +pub(crate) fn can_announce_channel(config: &Config) -> ChannelAnnouncementStatus { + if config.node_alias.is_none() { + return ChannelAnnouncementStatus::Unannounceable( + ChannelAnnouncementBlocker::MissingNodeAlias, + ); + } + + match &config.listening_addresses { + None => ChannelAnnouncementStatus::Unannounceable( + ChannelAnnouncementBlocker::MissingListeningAddresses, + ), + Some(addresses) if addresses.is_empty() => ChannelAnnouncementStatus::Unannounceable( + ChannelAnnouncementBlocker::EmptyListeningAddresses, + ), + Some(_) => ChannelAnnouncementStatus::Announceable, + } } pub(crate) fn default_user_config(config: &Config) -> UserConfig { // Initialize the default config values. // - // Note that methods such as Node::connect_open_channel might override some of the values set - // here, e.g. the ChannelHandshakeConfig, meaning these default values will mostly be relevant - // for inbound channels. + // Note that methods such as Node::open_channel and Node::open_announced_channel might override + // some of the values set here, e.g. the ChannelHandshakeConfig, meaning these default values + // will mostly be relevant for inbound channels. let mut user_config = UserConfig::default(); user_config.channel_handshake_limits.force_announced_channel_preference = false; user_config.manually_accept_inbound_channels = true; user_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = config.anchor_channels_config.is_some(); - if !can_announce_channel(config) { - user_config.accept_forwards_to_priv_channels = false; - user_config.channel_handshake_config.announced_channel = false; - user_config.channel_handshake_limits.force_announced_channel_preference = true; + match can_announce_channel(config) { + ChannelAnnouncementStatus::Announceable => (), + ChannelAnnouncementStatus::Unannounceable(_) => { + user_config.accept_forwards_to_priv_channels = false; + user_config.channel_handshake_config.announced_channel = false; + user_config.channel_handshake_limits.force_announced_channel_preference = true; + }, } user_config @@ -311,17 +347,23 @@ pub(crate) fn default_user_config(config: &Config) -> UserConfig { mod tests { use std::str::FromStr; - use lightning::{ln::msgs::SocketAddress, routing::gossip::NodeAlias}; - - use crate::config::can_announce_channel; + use crate::config::ChannelAnnouncementStatus; + use super::can_announce_channel; use super::Config; + use super::NodeAlias; + use super::SocketAddress; #[test] fn node_can_announce_channel() { // Default configuration with node alias and listening addresses unset let mut node_config = Config::default(); - assert_eq!(can_announce_channel(&node_config), false); + assert_eq!( + can_announce_channel(&node_config), + ChannelAnnouncementStatus::Unannounceable( + crate::config::ChannelAnnouncementBlocker::MissingNodeAlias + ) + ); // Set node alias with listening addresses unset let alias_frm_str = |alias: &str| { @@ -330,11 +372,21 @@ mod tests { NodeAlias(bytes) }; node_config.node_alias = Some(alias_frm_str("LDK_Node")); - assert_eq!(can_announce_channel(&node_config), false); + assert_eq!( + can_announce_channel(&node_config), + ChannelAnnouncementStatus::Unannounceable( + crate::config::ChannelAnnouncementBlocker::MissingListeningAddresses + ) + ); // Set node alias with an empty list of listening addresses node_config.listening_addresses = Some(vec![]); - assert_eq!(can_announce_channel(&node_config), false); + assert_eq!( + can_announce_channel(&node_config), + ChannelAnnouncementStatus::Unannounceable( + crate::config::ChannelAnnouncementBlocker::EmptyListeningAddresses + ) + ); // Set node alias with a non-empty list of listening addresses let socket_address = @@ -342,6 +394,6 @@ mod tests { if let Some(ref mut addresses) = node_config.listening_addresses { addresses.push(socket_address); } - assert_eq!(can_announce_channel(&node_config), true); + assert_eq!(can_announce_channel(&node_config), ChannelAnnouncementStatus::Announceable); } } diff --git a/src/error.rs b/src/error.rs index 5e7dbeacd..807e1ca54 100644 --- a/src/error.rs +++ b/src/error.rs @@ -110,10 +110,6 @@ pub enum Error { LiquiditySourceUnavailable, /// The given operation failed due to the LSP's required opening fee being too high. LiquidityFeeTooHigh, - /// Returned when trying to open an announced channel with a peer. This - /// error occurs when a [`crate::Node`'s] alias or listening addresses - /// are unconfigured. - OpenAnnouncedChannelFailed, } impl fmt::Display for Error { @@ -185,7 +181,6 @@ impl fmt::Display for Error { Self::LiquidityFeeTooHigh => { write!(f, "The given operation failed due to the LSP's required opening fee being too high.") }, - Self::OpenAnnouncedChannelFailed => write!(f, "Failed to open an announced channel."), } } } diff --git a/src/lib.rs b/src/lib.rs index 5405dedec..ad377f959 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -100,6 +100,7 @@ mod wallet; pub use bip39; pub use bitcoin; pub use lightning; +use lightning::routing::gossip::NodeAlias; pub use lightning_invoice; pub use balance::{BalanceDetails, LightningBalance, PendingSweepBalance}; @@ -115,7 +116,6 @@ pub use io::utils::generate_entropy_mnemonic; #[cfg(feature = "uniffi")] use uniffi_types::*; -pub use builder::sanitize_alias; #[cfg(feature = "uniffi")] pub use builder::ArcedNodeBuilder as Builder; pub use builder::BuildError; @@ -123,8 +123,8 @@ pub use builder::BuildError; pub use builder::NodeBuilder as Builder; use config::{ - can_announce_channel, default_user_config, LDK_WALLET_SYNC_TIMEOUT_SECS, - NODE_ANN_BCAST_INTERVAL, PEER_RECONNECTION_INTERVAL, + can_announce_channel, default_user_config, ChannelAnnouncementStatus, + LDK_WALLET_SYNC_TIMEOUT_SECS, NODE_ANN_BCAST_INTERVAL, PEER_RECONNECTION_INTERVAL, RESOLVED_CHANNEL_MONITOR_ARCHIVAL_INTERVAL, RGS_SYNC_INTERVAL, WALLET_SYNC_INTERVAL_MINIMUM_SECS, }; @@ -648,20 +648,19 @@ impl Node { continue; } - let addresses = bcast_config.listening_addresses.clone().unwrap_or(Vec::new()); - let alias = node_alias.clone().map(|alias| { - alias.0 - }); - - if !can_announce_channel { - // Skip if we are not listening on any addresses or if the node alias is not set. - continue; - } - - if let Some(node_alias) = alias { - bcast_pm.broadcast_node_announcement([0; 3], node_alias, addresses); - } else { - continue + match can_announce_channel { + ChannelAnnouncementStatus::Unannounceable(_) => { + // Skip if we are not listening on any addresses or if the node alias is not set. + continue; + } + ChannelAnnouncementStatus::Announceable => { + let addresses = bcast_config.listening_addresses.clone().unwrap_or(Vec::new()); + if let Some(node_alias) = node_alias.as_ref() { + bcast_pm.broadcast_node_announcement([0; 3], node_alias.0, addresses); + } else { + continue + } + } } let unix_time_secs_opt = @@ -973,6 +972,11 @@ impl Node { self.config.listening_addresses.clone() } + /// Returns our node alias. + pub fn node_alias(&self) -> Option { + self.config.node_alias + } + /// Returns a payment handler allowing to create and pay [BOLT 11] invoices. /// /// [BOLT 11]: https://github.com/lightning/bolts/blob/master/11-payment-encoding.md @@ -1182,22 +1186,14 @@ impl Node { Ok(()) } - /// Connect to a node and open a new channel. Disconnects and re-connects are handled automatically - /// - /// Disconnects and reconnects are handled automatically. - /// - /// If `push_to_counterparty_msat` is set, the given value will be pushed (read: sent) to the - /// channel counterparty on channel open. This can be useful to start out with the balance not - /// entirely shifted to one side, therefore allowing to receive payments from the getgo. - /// - /// If Anchor channels are enabled, this will ensure the configured - /// [`AnchorChannelsConfig::per_channel_reserve_sats`] is available and will be retained before - /// opening the channel. + /// Connect to a node and open a new channel. /// - /// Returns a [`UserChannelId`] allowing to locally keep track of the channel. - fn connect_open_channel( + /// See [`Node::open_channel`] or [`Node::open_announced_channel`] for more information about + /// parameters. + fn open_channel_inner( &self, node_id: PublicKey, address: SocketAddress, channel_amount_sats: u64, push_to_counterparty_msat: Option, channel_config: Option, + announce_channel: bool, ) -> Result { let rt_lock = self.runtime.read().unwrap(); if rt_lock.is_none() { @@ -1259,13 +1255,12 @@ impl Node { } let mut user_config = default_user_config(&self.config); - let can_announce_channel = can_announce_channel(&self.config); - user_config.channel_handshake_config.announced_channel = can_announce_channel; + user_config.channel_handshake_config.announced_channel = announce_channel; user_config.channel_config = (channel_config.unwrap_or_default()).clone().into(); // We set the max inflight to 100% for private channels. // FIXME: LDK will default to this behavior soon, too, at which point we should drop this // manual override. - if !can_announce_channel { + if !announce_channel { user_config .channel_handshake_config .max_inbound_htlc_value_in_flight_percent_of_channel = 100; @@ -1298,36 +1293,79 @@ impl Node { } } - /// Opens a channel with a peer. + /// Connect to a node and open a new channel. + /// + /// Disconnects and reconnects are handled automatically. + /// + /// If `push_to_counterparty_msat` is set, the given value will be pushed (read: sent) to the + /// channel counterparty on channel open. This can be useful to start out with the balance not + /// entirely shifted to one side, therefore allowing to receive payments from the getgo. + /// + /// If Anchor channels are enabled, this will ensure the configured + /// [`AnchorChannelsConfig::per_channel_reserve_sats`] is available and will be retained before + /// opening the channel. + /// + /// Calls `Node::open_channel_inner` with `announce_channel` set to `false`. + /// + /// Returns a [`UserChannelId`] allowing to locally keep track of the channel. pub fn open_channel( &self, node_id: PublicKey, address: SocketAddress, channel_amount_sats: u64, push_to_counterparty_msat: Option, channel_config: Option, ) -> Result { - self.connect_open_channel( + self.open_channel_inner( node_id, address, channel_amount_sats, push_to_counterparty_msat, channel_config, + false, ) } - /// Opens an announced channel with a peer. + /// Connect to a node and open a new announced channel. + /// + /// Disconnects and reconnects are handled automatically. + /// + /// If `push_to_counterparty_msat` is set, the given value will be pushed (read: sent) to the + /// channel counterparty on channel open. This can be useful to start out with the balance not + /// entirely shifted to one side, therefore allowing to receive payments from the getgo. + /// + /// If Anchor channels are enabled, this will ensure the configured + /// [`AnchorChannelsConfig::per_channel_reserve_sats`] is available and will be retained before + /// opening the channel. + /// + /// Note that, regardless of the value of `announce_channel` passed, this function + /// checks that a node is configured to announce the channel to be openned and returns + /// an error if the configuration is wrong. Otherwise, calls `Node::open_channel_inner` + /// with `announced_channel` equals to `true`. + /// See `config::can_announce_channel` for more details. + /// + /// Returns a [`UserChannelId`] allowing to locally keep track of the channel. pub fn open_announced_channel( &self, node_id: PublicKey, address: SocketAddress, channel_amount_sats: u64, push_to_counterparty_msat: Option, channel_config: Option, ) -> Result { - if !can_announce_channel(&self.config) { - return Err(Error::OpenAnnouncedChannelFailed); + match can_announce_channel(&self.config) { + config::ChannelAnnouncementStatus::Announceable => self.open_channel_inner( + node_id, + address, + channel_amount_sats, + push_to_counterparty_msat, + channel_config, + true, + ), + config::ChannelAnnouncementStatus::Unannounceable(reason) => match reason { + config::ChannelAnnouncementBlocker::MissingNodeAlias => { + return Err(Error::InvalidNodeAlias) + }, + config::ChannelAnnouncementBlocker::MissingListeningAddresses => { + return Err(Error::InvalidSocketAddress) + }, + config::ChannelAnnouncementBlocker::EmptyListeningAddresses => { + return Err(Error::InvalidSocketAddress) + }, + }, } - - self.open_channel( - node_id, - address, - channel_amount_sats, - push_to_counterparty_msat, - channel_config, - ) } /// Manually sync the LDK and BDK wallets with the current chain state and update the fee rate diff --git a/tests/common/mod.rs b/tests/common/mod.rs index b616f7eb5..c0059b8f4 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -11,8 +11,7 @@ use ldk_node::io::sqlite_store::SqliteStore; use ldk_node::payment::{PaymentDirection, PaymentKind, PaymentStatus}; use ldk_node::{ - sanitize_alias, Builder, Config, Event, LightningBalance, LogLevel, Node, NodeError, - PendingSweepBalance, + Builder, Config, Event, LightningBalance, LogLevel, Node, NodeError, PendingSweepBalance, }; use lightning::ln::msgs::SocketAddress; @@ -207,7 +206,12 @@ pub(crate) fn random_node_alias() -> Option { let ranged_val = rng.gen_range(0..10); match ranged_val { 0 => None, - val => Some(sanitize_alias(&format!("ldk-node-{}", val)).unwrap()), + val => { + let alias = format!("ldk-node-{}", val); + let mut bytes = [0u8; 32]; + bytes[..alias.as_bytes().len()].copy_from_slice(alias.as_bytes()); + Some(NodeAlias(bytes)) + }, } } @@ -443,7 +447,7 @@ pub(crate) fn do_channel_full_cycle( assert_eq!(node_a.next_event(), None); assert_eq!(node_b.next_event(), None); - println!("\nA -- connect_open_channel -> B"); + println!("\nA -- open_channel -> B"); let funding_amount_sat = 2_080_000; let push_msat = (funding_amount_sat / 2) * 1000; // balance the channel node_a diff --git a/tests/integration_tests_rust.rs b/tests/integration_tests_rust.rs index 89786f826..68d1effbb 100644 --- a/tests/integration_tests_rust.rs +++ b/tests/integration_tests_rust.rs @@ -80,7 +80,7 @@ fn channel_open_fails_when_funds_insufficient() { assert_eq!(node_a.list_balances().spendable_onchain_balance_sats, premine_amount_sat); assert_eq!(node_b.list_balances().spendable_onchain_balance_sats, premine_amount_sat); - println!("\nA -- connect_open_channel -> B"); + println!("\nA -- open_channel -> B"); assert_eq!( Err(NodeError::InsufficientFunds), node_a.open_channel( From 4fd1cb8ed98c5875627e31d9c7cbf3c5e0fe8d6b Mon Sep 17 00:00:00 2001 From: Enigbe Ochekliye Date: Thu, 26 Sep 2024 21:26:18 +0100 Subject: [PATCH 052/127] fix(test): Implement conditional channel opening based on aliases and addresses This commit addresses flaky test issues related to conditional channel opening between nodes, considering node aliases and listening addresses. Changes in test modules: - Add/modify helper functions to randomize channel announcement flags - Generate random node aliases based on announcement flags: * Set custom alias if announce_channel is true * Use default alias otherwise - Update channel opening logic to account for node and channel announcements --- src/lib.rs | 1 + tests/common/mod.rs | 101 ++++-- tests/integration_tests_cln.rs | 16 +- tests/integration_tests_rust.rs | 611 +++++++++++++++++--------------- tests/integration_tests_vss.rs | 6 +- 5 files changed, 419 insertions(+), 316 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index ad377f959..48750f74e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -654,6 +654,7 @@ impl Node { continue; } ChannelAnnouncementStatus::Announceable => { + // Broadcast node announcement. let addresses = bcast_config.listening_addresses.clone().unwrap_or(Vec::new()); if let Some(node_alias) = node_alias.as_ref() { bcast_pm.broadcast_node_announcement([0; 3], node_alias.0, addresses); diff --git a/tests/common/mod.rs b/tests/common/mod.rs index c0059b8f4..4dcbfd999 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -204,24 +204,36 @@ pub(crate) fn random_listening_addresses() -> Vec { pub(crate) fn random_node_alias() -> Option { let mut rng = thread_rng(); let ranged_val = rng.gen_range(0..10); + + let alias = format!("ldk-node-{}", ranged_val); + let mut bytes = [0u8; 32]; + bytes[..alias.as_bytes().len()].copy_from_slice(alias.as_bytes()); + + Some(NodeAlias(bytes)) +} + +pub(crate) fn random_announce_channel() -> bool { + let mut rng = thread_rng(); + let ranged_val = rng.gen_range(0..=1); match ranged_val { - 0 => None, - val => { - let alias = format!("ldk-node-{}", val); - let mut bytes = [0u8; 32]; - bytes[..alias.as_bytes().len()].copy_from_slice(alias.as_bytes()); - Some(NodeAlias(bytes)) - }, + 0 => false, + _ => true, } } -pub(crate) fn random_config(anchor_channels: bool) -> Config { +pub(crate) fn random_config(anchor_channels: bool, announce_channel: bool) -> Config { let mut config = Config::default(); if !anchor_channels { config.anchor_channels_config = None; } + if announce_channel { + let alias = random_node_alias(); + println!("Setting random LDK node alias: {:?}", alias); + config.node_alias = alias; + } + config.network = Network::Regtest; config.onchain_wallet_sync_interval_secs = 100000; config.wallet_sync_interval_secs = 100000; @@ -235,10 +247,6 @@ pub(crate) fn random_config(anchor_channels: bool) -> Config { println!("Setting random LDK listening addresses: {:?}", rand_listening_addresses); config.listening_addresses = Some(rand_listening_addresses); - let alias = random_node_alias(); - println!("Setting random LDK node alias: {:?}", alias); - config.node_alias = alias; - config.log_level = LogLevel::Gossip; config @@ -261,14 +269,15 @@ macro_rules! setup_builder { pub(crate) use setup_builder; pub(crate) fn setup_two_nodes( - electrsd: &ElectrsD, allow_0conf: bool, anchor_channels: bool, anchors_trusted_no_reserve: bool, + electrsd: &ElectrsD, allow_0conf: bool, anchor_channels: bool, + anchors_trusted_no_reserve: bool, announce_channel: bool, ) -> (TestNode, TestNode) { println!("== Node A =="); - let config_a = random_config(anchor_channels); + let config_a = random_config(anchor_channels, announce_channel); let node_a = setup_node(electrsd, config_a); println!("\n== Node B =="); - let mut config_b = random_config(anchor_channels); + let mut config_b = random_config(anchor_channels, announce_channel); if allow_0conf { config_b.trusted_peers_0conf.push(node_a.node_id()); } @@ -406,15 +415,28 @@ pub(crate) fn premine_and_distribute_funds( pub fn open_channel( node_a: &TestNode, node_b: &TestNode, funding_amount_sat: u64, electrsd: &ElectrsD, ) { - node_a - .open_announced_channel( - node_b.node_id(), - node_b.listening_addresses().unwrap().first().unwrap().clone(), - funding_amount_sat, - None, - None, - ) - .unwrap(); + if node_a.config().node_alias.is_some() { + node_a + .open_announced_channel( + node_b.node_id(), + node_b.listening_addresses().unwrap().first().unwrap().clone(), + funding_amount_sat, + None, + None, + ) + .unwrap(); + } else { + node_a + .open_channel( + node_b.node_id(), + node_b.listening_addresses().unwrap().first().unwrap().clone(), + funding_amount_sat, + None, + None, + ) + .unwrap(); + } + assert!(node_a.list_peers().iter().find(|c| { c.node_id == node_b.node_id() }).is_some()); let funding_txo_a = expect_channel_pending_event!(node_a, node_b.node_id()); @@ -450,15 +472,28 @@ pub(crate) fn do_channel_full_cycle( println!("\nA -- open_channel -> B"); let funding_amount_sat = 2_080_000; let push_msat = (funding_amount_sat / 2) * 1000; // balance the channel - node_a - .open_channel( - node_b.node_id(), - node_b.listening_addresses().unwrap().first().unwrap().clone(), - funding_amount_sat, - Some(push_msat), - None, - ) - .unwrap(); + + if node_a.config().node_alias.is_some() { + node_a + .open_announced_channel( + node_b.node_id(), + node_b.listening_addresses().unwrap().first().unwrap().clone(), + funding_amount_sat, + Some(push_msat), + None, + ) + .unwrap(); + } else { + node_a + .open_channel( + node_b.node_id(), + node_b.listening_addresses().unwrap().first().unwrap().clone(), + funding_amount_sat, + Some(push_msat), + None, + ) + .unwrap(); + } assert_eq!(node_a.list_peers().first().unwrap().node_id, node_b.node_id()); assert!(node_a.list_peers().first().unwrap().is_persisted); diff --git a/tests/integration_tests_cln.rs b/tests/integration_tests_cln.rs index 13b5c44c6..11065bfe6 100644 --- a/tests/integration_tests_cln.rs +++ b/tests/integration_tests_cln.rs @@ -9,6 +9,7 @@ mod common; +use common::random_announce_channel; use ldk_node::bitcoin::secp256k1::PublicKey; use ldk_node::bitcoin::Amount; use ldk_node::lightning::ln::msgs::SocketAddress; @@ -43,7 +44,7 @@ fn test_cln() { common::generate_blocks_and_wait(&bitcoind_client, &electrs_client, 1); // Setup LDK Node - let config = common::random_config(true); + let config = common::random_config(true, random_announce_channel()); let mut builder = Builder::from_config(config); builder.set_esplora_server("http://127.0.0.1:3002".to_string()); @@ -82,8 +83,19 @@ fn test_cln() { // Open the channel let funding_amount_sat = 1_000_000; - node.open_channel(cln_node_id, cln_address, funding_amount_sat, Some(500_000_000), None) + if node.config().node_alias.is_none() { + node.open_channel(cln_node_id, cln_address, funding_amount_sat, Some(500_000_000), None) + .unwrap(); + } else { + node.open_announced_channel( + cln_node_id, + cln_address, + funding_amount_sat, + Some(500_000_000), + None, + ) .unwrap(); + } let funding_txo = common::expect_channel_pending_event!(node, cln_node_id); common::wait_for_tx(&electrs_client, funding_txo.txid); diff --git a/tests/integration_tests_rust.rs b/tests/integration_tests_rust.rs index 68d1effbb..9c244e90a 100644 --- a/tests/integration_tests_rust.rs +++ b/tests/integration_tests_rust.rs @@ -10,8 +10,9 @@ mod common; use common::{ do_channel_full_cycle, expect_channel_ready_event, expect_event, expect_payment_received_event, expect_payment_successful_event, generate_blocks_and_wait, open_channel, - premine_and_distribute_funds, random_config, setup_bitcoind_and_electrsd, setup_builder, - setup_node, setup_two_nodes, wait_for_tx, TestSyncStore, + premine_and_distribute_funds, random_announce_channel, random_config, + setup_bitcoind_and_electrsd, setup_builder, setup_node, setup_two_nodes, wait_for_tx, + TestSyncStore, }; use ldk_node::payment::{PaymentKind, QrPaymentResult, SendingParameters}; @@ -27,42 +28,46 @@ use std::sync::Arc; #[test] fn channel_full_cycle() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let (node_a, node_b) = setup_two_nodes(&electrsd, false, true, false); + let (node_a, node_b) = + setup_two_nodes(&electrsd, false, true, false, random_announce_channel()); do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, true, false); } #[test] fn channel_full_cycle_force_close() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let (node_a, node_b) = setup_two_nodes(&electrsd, false, true, false); + let (node_a, node_b) = + setup_two_nodes(&electrsd, false, true, false, random_announce_channel()); do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, true, true); } #[test] fn channel_full_cycle_force_close_trusted_no_reserve() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let (node_a, node_b) = setup_two_nodes(&electrsd, false, true, true); + let (node_a, node_b) = setup_two_nodes(&electrsd, false, true, true, random_announce_channel()); do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, true, true); } #[test] fn channel_full_cycle_0conf() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let (node_a, node_b) = setup_two_nodes(&electrsd, true, true, false); + let (node_a, node_b) = setup_two_nodes(&electrsd, true, true, false, random_announce_channel()); do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, true, true, false) } #[test] fn channel_full_cycle_legacy_staticremotekey() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let (node_a, node_b) = setup_two_nodes(&electrsd, false, false, false); + let (node_a, node_b) = + setup_two_nodes(&electrsd, false, false, false, random_announce_channel()); do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, false, false); } #[test] fn channel_open_fails_when_funds_insufficient() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let (node_a, node_b) = setup_two_nodes(&electrsd, false, true, false); + let (node_a, node_b) = + setup_two_nodes(&electrsd, false, true, false, random_announce_channel()); let addr_a = node_a.onchain_payment().new_address().unwrap(); let addr_b = node_b.onchain_payment().new_address().unwrap(); @@ -83,13 +88,23 @@ fn channel_open_fails_when_funds_insufficient() { println!("\nA -- open_channel -> B"); assert_eq!( Err(NodeError::InsufficientFunds), - node_a.open_channel( - node_b.node_id(), - node_b.listening_addresses().unwrap().first().unwrap().clone(), - 120000, - None, - None, - ) + if node_a.config().node_alias.is_some() { + node_a.open_announced_channel( + node_b.node_id(), + node_b.listening_addresses().unwrap().first().unwrap().clone(), + 120000, + None, + None, + ) + } else { + node_a.open_channel( + node_b.node_id(), + node_b.listening_addresses().unwrap().first().unwrap().clone(), + 120000, + None, + None, + ) + } ); } @@ -100,8 +115,9 @@ fn multi_hop_sending() { // Setup and fund 5 nodes let mut nodes = Vec::new(); + let announce_channel = random_announce_channel(); for _ in 0..5 { - let config = random_config(true); + let config = random_config(true, announce_channel); setup_builder!(builder, config); builder.set_esplora_server(esplora_url.clone()); let node = builder.build().unwrap(); @@ -170,16 +186,22 @@ fn multi_hop_sending() { }; let invoice = nodes[4].bolt11_payment().receive(2_500_000, &"asdf", 9217).unwrap(); - nodes[0].bolt11_payment().send(&invoice, Some(sending_params)).unwrap(); + let send_res = nodes[0].bolt11_payment().send(&invoice, Some(sending_params)); - let payment_id = expect_payment_received_event!(&nodes[4], 2_500_000); - let fee_paid_msat = Some(2000); - expect_payment_successful_event!(nodes[0], payment_id, Some(fee_paid_msat)); + // N0 cannot find a route to N4 if node and channel is unannounced. + if nodes[0].config().node_alias.is_none() { + assert_eq!(send_res, Err(NodeError::PaymentSendingFailed)) + } else { + let payment_id = expect_payment_received_event!(&nodes[4], 2_500_000); + assert_eq!(send_res.unwrap(), payment_id.unwrap()); + let fee_paid_msat = Some(2000); + expect_payment_successful_event!(nodes[0], payment_id, Some(fee_paid_msat)); + } } #[test] fn connect_to_public_testnet_esplora() { - let mut config = random_config(true); + let mut config = random_config(true, random_announce_channel()); config.network = Network::Testnet; setup_builder!(builder, config); builder.set_esplora_server("https://blockstream.info/testnet/api".to_string()); @@ -191,7 +213,7 @@ fn connect_to_public_testnet_esplora() { #[test] fn start_stop_reinit() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let config = random_config(true); + let config = random_config(true, random_announce_channel()); let esplora_url = format!("http://{}", electrsd.esplora_url.as_ref().unwrap()); @@ -259,7 +281,8 @@ fn start_stop_reinit() { #[test] fn onchain_spend_receive() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let (node_a, node_b) = setup_two_nodes(&electrsd, false, true, false); + let (node_a, node_b) = + setup_two_nodes(&electrsd, false, true, false, random_announce_channel()); let addr_a = node_a.onchain_payment().new_address().unwrap(); let addr_b = node_b.onchain_payment().new_address().unwrap(); @@ -307,7 +330,7 @@ fn onchain_spend_receive() { #[test] fn sign_verify_msg() { let (_bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let config = random_config(true); + let config = random_config(true, random_announce_channel()); let node = setup_node(&electrsd, config); // Tests arbitrary message signing and later verification @@ -325,7 +348,8 @@ fn connection_restart_behavior() { fn do_connection_restart_behavior(persist: bool) { let (_bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let (node_a, node_b) = setup_two_nodes(&electrsd, false, false, false); + let (node_a, node_b) = + setup_two_nodes(&electrsd, false, false, false, random_announce_channel()); let node_id_a = node_a.node_id(); let node_id_b = node_b.node_id(); @@ -376,7 +400,8 @@ fn do_connection_restart_behavior(persist: bool) { #[test] fn concurrent_connections_succeed() { let (_bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let (node_a, node_b) = setup_two_nodes(&electrsd, false, true, false); + let (node_a, node_b) = + setup_two_nodes(&electrsd, false, true, false, random_announce_channel()); let node_a = Arc::new(node_a); let node_b = Arc::new(node_b); @@ -406,7 +431,8 @@ fn concurrent_connections_succeed() { #[test] fn simple_bolt12_send_receive() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let (node_a, node_b) = setup_two_nodes(&electrsd, false, true, false); + let (node_a, node_b) = + setup_two_nodes(&electrsd, false, true, false, random_announce_channel()); let address_a = node_a.onchain_payment().new_address().unwrap(); let premine_amount_sat = 5_000_000; @@ -428,191 +454,203 @@ fn simple_bolt12_send_receive() { expect_channel_ready_event!(node_a, node_b.node_id()); expect_channel_ready_event!(node_b, node_a.node_id()); - // Sleep until we broadcasted a node announcement. - while node_b.status().latest_node_announcement_broadcast_timestamp.is_none() { - std::thread::sleep(std::time::Duration::from_millis(10)); + // For announced nodes, we make a trivial check that node_alias is set knowing that + // `random_listening_addresses()` always sets the listening_addresses. This check + // is important to prevent the test from looping endlessly. + if node_a.config().node_alias.is_some() && node_b.config().node_alias.is_some() { + // Sleep until we broadcasted a node announcement. + while node_b.status().latest_node_announcement_broadcast_timestamp.is_none() { + std::thread::sleep(std::time::Duration::from_millis(10)); + } + + // Sleep one more sec to make sure the node announcement propagates. + std::thread::sleep(std::time::Duration::from_secs(1)); } - // Sleep one more sec to make sure the node announcement propagates. - std::thread::sleep(std::time::Duration::from_secs(1)); - let expected_amount_msat = 100_000_000; - let offer = node_b.bolt12_payment().receive(expected_amount_msat, "asdf", Some(1)).unwrap(); - let expected_quantity = Some(1); - let expected_payer_note = Some("Test".to_string()); - let payment_id = node_a - .bolt12_payment() - .send(&offer, expected_quantity, expected_payer_note.clone()) - .unwrap(); - - expect_payment_successful_event!(node_a, Some(payment_id), None); - let node_a_payments = node_a.list_payments(); - assert_eq!(node_a_payments.len(), 1); - match node_a_payments.first().unwrap().kind { - PaymentKind::Bolt12Offer { - hash, - preimage, - secret: _, - offer_id, - quantity: ref qty, - payer_note: ref note, - } => { - assert!(hash.is_some()); - assert!(preimage.is_some()); - assert_eq!(offer_id, offer.id()); - assert_eq!(&expected_quantity, qty); - assert_eq!(expected_payer_note.unwrap(), note.clone().unwrap().0); - //TODO: We should eventually set and assert the secret sender-side, too, but the BOLT12 - //API currently doesn't allow to do that. - }, - _ => { - panic!("Unexpected payment kind"); - }, - } - assert_eq!(node_a_payments.first().unwrap().amount_msat, Some(expected_amount_msat)); - - expect_payment_received_event!(node_b, expected_amount_msat); - let node_b_payments = node_b.list_payments(); - assert_eq!(node_b_payments.len(), 1); - match node_b_payments.first().unwrap().kind { - PaymentKind::Bolt12Offer { hash, preimage, secret, offer_id, .. } => { - assert!(hash.is_some()); - assert!(preimage.is_some()); - assert!(secret.is_some()); - assert_eq!(offer_id, offer.id()); - }, - _ => { - panic!("Unexpected payment kind"); - }, - } - assert_eq!(node_b_payments.first().unwrap().amount_msat, Some(expected_amount_msat)); - - // Test send_using_amount - let offer_amount_msat = 100_000_000; - let less_than_offer_amount = offer_amount_msat - 10_000; - let expected_amount_msat = offer_amount_msat + 10_000; - let offer = node_b.bolt12_payment().receive(offer_amount_msat, "asdf", Some(1)).unwrap(); - let expected_quantity = Some(1); - let expected_payer_note = Some("Test".to_string()); - assert!(node_a - .bolt12_payment() - .send_using_amount(&offer, less_than_offer_amount, None, None) - .is_err()); - let payment_id = node_a - .bolt12_payment() - .send_using_amount( - &offer, - expected_amount_msat, - expected_quantity, - expected_payer_note.clone(), - ) - .unwrap(); - - expect_payment_successful_event!(node_a, Some(payment_id), None); - let node_a_payments = node_a.list_payments_with_filter(|p| p.id == payment_id); - assert_eq!(node_a_payments.len(), 1); - let payment_hash = match node_a_payments.first().unwrap().kind { - PaymentKind::Bolt12Offer { - hash, - preimage, - secret: _, - offer_id, - quantity: ref qty, - payer_note: ref note, - } => { - assert!(hash.is_some()); - assert!(preimage.is_some()); - assert_eq!(offer_id, offer.id()); - assert_eq!(&expected_quantity, qty); - assert_eq!(expected_payer_note.unwrap(), note.clone().unwrap().0); - //TODO: We should eventually set and assert the secret sender-side, too, but the BOLT12 - //API currently doesn't allow to do that. - hash.unwrap() - }, - _ => { - panic!("Unexpected payment kind"); - }, - }; - assert_eq!(node_a_payments.first().unwrap().amount_msat, Some(expected_amount_msat)); - - expect_payment_received_event!(node_b, expected_amount_msat); - let node_b_payment_id = PaymentId(payment_hash.0); - let node_b_payments = node_b.list_payments_with_filter(|p| p.id == node_b_payment_id); - assert_eq!(node_b_payments.len(), 1); - match node_b_payments.first().unwrap().kind { - PaymentKind::Bolt12Offer { hash, preimage, secret, offer_id, .. } => { - assert!(hash.is_some()); - assert!(preimage.is_some()); - assert!(secret.is_some()); - assert_eq!(offer_id, offer.id()); - }, - _ => { - panic!("Unexpected payment kind"); - }, - } - assert_eq!(node_b_payments.first().unwrap().amount_msat, Some(expected_amount_msat)); - - // Now node_b refunds the amount node_a just overpaid. - let overpaid_amount = expected_amount_msat - offer_amount_msat; - let expected_quantity = Some(1); - let expected_payer_note = Some("Test".to_string()); - let refund = node_b - .bolt12_payment() - .initiate_refund(overpaid_amount, 3600, expected_quantity, expected_payer_note.clone()) - .unwrap(); - let invoice = node_a.bolt12_payment().request_refund_payment(&refund).unwrap(); - expect_payment_received_event!(node_a, overpaid_amount); - - let node_b_payment_id = node_b - .list_payments_with_filter(|p| p.amount_msat == Some(overpaid_amount)) - .first() - .unwrap() - .id; - expect_payment_successful_event!(node_b, Some(node_b_payment_id), None); - - let node_b_payments = node_b.list_payments_with_filter(|p| p.id == node_b_payment_id); - assert_eq!(node_b_payments.len(), 1); - match node_b_payments.first().unwrap().kind { - PaymentKind::Bolt12Refund { - hash, - preimage, - secret: _, - quantity: ref qty, - payer_note: ref note, - } => { - assert!(hash.is_some()); - assert!(preimage.is_some()); - assert_eq!(&expected_quantity, qty); - assert_eq!(expected_payer_note.unwrap(), note.clone().unwrap().0) - //TODO: We should eventually set and assert the secret sender-side, too, but the BOLT12 - //API currently doesn't allow to do that. - }, - _ => { - panic!("Unexpected payment kind"); - }, - } - assert_eq!(node_b_payments.first().unwrap().amount_msat, Some(overpaid_amount)); - - let node_a_payment_id = PaymentId(invoice.payment_hash().0); - let node_a_payments = node_a.list_payments_with_filter(|p| p.id == node_a_payment_id); - assert_eq!(node_a_payments.len(), 1); - match node_a_payments.first().unwrap().kind { - PaymentKind::Bolt12Refund { hash, preimage, secret, .. } => { - assert!(hash.is_some()); - assert!(preimage.is_some()); - assert!(secret.is_some()); - }, - _ => { - panic!("Unexpected payment kind"); - }, + let offer_res = node_b.bolt12_payment().receive(expected_amount_msat, "asdf", Some(1)); + if node_a.config().node_alias.is_none() && node_b.config().node_alias.is_none() { + // Node must be announced if alternative one-hop `BlindedPath` is to be used. + assert_eq!(offer_res, Err(NodeError::OfferCreationFailed)) + } else { + let offer = offer_res.unwrap(); + let expected_quantity = Some(1); + let expected_payer_note = Some("Test".to_string()); + let payment_id = node_a + .bolt12_payment() + .send(&offer, expected_quantity, expected_payer_note.clone()) + .unwrap(); + + expect_payment_successful_event!(node_a, Some(payment_id), None); + let node_a_payments = node_a.list_payments(); + assert_eq!(node_a_payments.len(), 1); + match node_a_payments.first().unwrap().kind { + PaymentKind::Bolt12Offer { + hash, + preimage, + secret: _, + offer_id, + quantity: ref qty, + payer_note: ref note, + } => { + assert!(hash.is_some()); + assert!(preimage.is_some()); + assert_eq!(offer_id, offer.id()); + assert_eq!(&expected_quantity, qty); + assert_eq!(expected_payer_note.unwrap(), note.clone().unwrap().0); + //TODO: We should eventually set and assert the secret sender-side, too, but the BOLT12 + //API currently doesn't allow to do that. + }, + _ => { + panic!("Unexpected payment kind"); + }, + } + assert_eq!(node_a_payments.first().unwrap().amount_msat, Some(expected_amount_msat)); + + expect_payment_received_event!(node_b, expected_amount_msat); + let node_b_payments = node_b.list_payments(); + assert_eq!(node_b_payments.len(), 1); + match node_b_payments.first().unwrap().kind { + PaymentKind::Bolt12Offer { hash, preimage, secret, offer_id, .. } => { + assert!(hash.is_some()); + assert!(preimage.is_some()); + assert!(secret.is_some()); + assert_eq!(offer_id, offer.id()); + }, + _ => { + panic!("Unexpected payment kind"); + }, + } + assert_eq!(node_b_payments.first().unwrap().amount_msat, Some(expected_amount_msat)); + + // Test send_using_amount + let offer_amount_msat = 100_000_000; + let less_than_offer_amount = offer_amount_msat - 10_000; + let expected_amount_msat = offer_amount_msat + 10_000; + let offer = node_b.bolt12_payment().receive(offer_amount_msat, "asdf", Some(1)).unwrap(); + let expected_quantity = Some(1); + let expected_payer_note = Some("Test".to_string()); + assert!(node_a + .bolt12_payment() + .send_using_amount(&offer, less_than_offer_amount, None, None) + .is_err()); + let payment_id = node_a + .bolt12_payment() + .send_using_amount( + &offer, + expected_amount_msat, + expected_quantity, + expected_payer_note.clone(), + ) + .unwrap(); + + expect_payment_successful_event!(node_a, Some(payment_id), None); + let node_a_payments = node_a.list_payments_with_filter(|p| p.id == payment_id); + assert_eq!(node_a_payments.len(), 1); + let payment_hash = match node_a_payments.first().unwrap().kind { + PaymentKind::Bolt12Offer { + hash, + preimage, + secret: _, + offer_id, + quantity: ref qty, + payer_note: ref note, + } => { + assert!(hash.is_some()); + assert!(preimage.is_some()); + assert_eq!(offer_id, offer.id()); + assert_eq!(&expected_quantity, qty); + assert_eq!(expected_payer_note.unwrap(), note.clone().unwrap().0); + //TODO: We should eventually set and assert the secret sender-side, too, but the BOLT12 + //API currently doesn't allow to do that. + hash.unwrap() + }, + _ => { + panic!("Unexpected payment kind"); + }, + }; + assert_eq!(node_a_payments.first().unwrap().amount_msat, Some(expected_amount_msat)); + + expect_payment_received_event!(node_b, expected_amount_msat); + let node_b_payment_id = PaymentId(payment_hash.0); + let node_b_payments = node_b.list_payments_with_filter(|p| p.id == node_b_payment_id); + assert_eq!(node_b_payments.len(), 1); + match node_b_payments.first().unwrap().kind { + PaymentKind::Bolt12Offer { hash, preimage, secret, offer_id, .. } => { + assert!(hash.is_some()); + assert!(preimage.is_some()); + assert!(secret.is_some()); + assert_eq!(offer_id, offer.id()); + }, + _ => { + panic!("Unexpected payment kind"); + }, + } + assert_eq!(node_b_payments.first().unwrap().amount_msat, Some(expected_amount_msat)); + + // Now node_b refunds the amount node_a just overpaid. + let overpaid_amount = expected_amount_msat - offer_amount_msat; + let expected_quantity = Some(1); + let expected_payer_note = Some("Test".to_string()); + let refund = node_b + .bolt12_payment() + .initiate_refund(overpaid_amount, 3600, expected_quantity, expected_payer_note.clone()) + .unwrap(); + let invoice = node_a.bolt12_payment().request_refund_payment(&refund).unwrap(); + expect_payment_received_event!(node_a, overpaid_amount); + + let node_b_payment_id = node_b + .list_payments_with_filter(|p| p.amount_msat == Some(overpaid_amount)) + .first() + .unwrap() + .id; + expect_payment_successful_event!(node_b, Some(node_b_payment_id), None); + + let node_b_payments = node_b.list_payments_with_filter(|p| p.id == node_b_payment_id); + assert_eq!(node_b_payments.len(), 1); + match node_b_payments.first().unwrap().kind { + PaymentKind::Bolt12Refund { + hash, + preimage, + secret: _, + quantity: ref qty, + payer_note: ref note, + } => { + assert!(hash.is_some()); + assert!(preimage.is_some()); + assert_eq!(&expected_quantity, qty); + assert_eq!(expected_payer_note.unwrap(), note.clone().unwrap().0) + //TODO: We should eventually set and assert the secret sender-side, too, but the BOLT12 + //API currently doesn't allow to do that. + }, + _ => { + panic!("Unexpected payment kind"); + }, + } + assert_eq!(node_b_payments.first().unwrap().amount_msat, Some(overpaid_amount)); + + let node_a_payment_id = PaymentId(invoice.payment_hash().0); + let node_a_payments = node_a.list_payments_with_filter(|p| p.id == node_a_payment_id); + assert_eq!(node_a_payments.len(), 1); + match node_a_payments.first().unwrap().kind { + PaymentKind::Bolt12Refund { hash, preimage, secret, .. } => { + assert!(hash.is_some()); + assert!(preimage.is_some()); + assert!(secret.is_some()); + }, + _ => { + panic!("Unexpected payment kind"); + }, + } + assert_eq!(node_a_payments.first().unwrap().amount_msat, Some(overpaid_amount)); } - assert_eq!(node_a_payments.first().unwrap().amount_msat, Some(overpaid_amount)); } #[test] fn generate_bip21_uri() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let (node_a, node_b) = setup_two_nodes(&electrsd, false, true, false); + let (node_a, node_b) = + setup_two_nodes(&electrsd, false, true, false, random_announce_channel()); let address_a = node_a.onchain_payment().new_address().unwrap(); let premined_sats = 5_000_000; @@ -639,21 +677,26 @@ fn generate_bip21_uri() { let uqr_payment = node_b.unified_qr_payment().receive(expected_amount_sats, "asdf", expiry_sec); - match uqr_payment.clone() { - Ok(ref uri) => { - println!("Generated URI: {}", uri); - assert!(uri.contains("BITCOIN:")); - assert!(uri.contains("lightning=")); - assert!(uri.contains("lno=")); - }, - Err(e) => panic!("Failed to generate URI: {:?}", e), + if node_a.config().node_alias.is_none() && node_b.config().node_alias.is_none() { + assert_eq!(uqr_payment, Err(NodeError::OfferCreationFailed)); + } else { + match uqr_payment.clone() { + Ok(ref uri) => { + println!("Generated URI: {}", uri); + assert!(uri.contains("BITCOIN:")); + assert!(uri.contains("lightning=")); + assert!(uri.contains("lno=")); + }, + Err(e) => panic!("Failed to generate URI: {:?}", e), + } } } #[test] fn unified_qr_send_receive() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let (node_a, node_b) = setup_two_nodes(&electrsd, false, true, false); + let (node_a, node_b) = + setup_two_nodes(&electrsd, false, true, false, random_announce_channel()); let address_a = node_a.onchain_payment().new_address().unwrap(); let premined_sats = 5_000_000; @@ -675,88 +718,98 @@ fn unified_qr_send_receive() { expect_channel_ready_event!(node_a, node_b.node_id()); expect_channel_ready_event!(node_b, node_a.node_id()); - // Sleep until we broadcast a node announcement. - while node_b.status().latest_node_announcement_broadcast_timestamp.is_none() { - std::thread::sleep(std::time::Duration::from_millis(10)); + // For announced nodes, we make a trivial check that node_alias is set knowing that + // `random_listening_addresses()` always sets the listening_addresses. This check + // is important to prevent the test from looping endlessly. + if node_a.config().node_alias.is_some() && node_b.config().node_alias.is_some() { + // Sleep until we broadcasted a node announcement. + while node_b.status().latest_node_announcement_broadcast_timestamp.is_none() { + std::thread::sleep(std::time::Duration::from_millis(10)); + } + + // Sleep one more sec to make sure the node announcement propagates. + std::thread::sleep(std::time::Duration::from_secs(1)); } - // Sleep one more sec to make sure the node announcement propagates. - std::thread::sleep(std::time::Duration::from_secs(1)); - let expected_amount_sats = 100_000; let expiry_sec = 4_000; let uqr_payment = node_b.unified_qr_payment().receive(expected_amount_sats, "asdf", expiry_sec); - let uri_str = uqr_payment.clone().unwrap(); - let offer_payment_id: PaymentId = match node_a.unified_qr_payment().send(&uri_str) { - Ok(QrPaymentResult::Bolt12 { payment_id }) => { - println!("\nBolt12 payment sent successfully with PaymentID: {:?}", payment_id); - payment_id - }, - Ok(QrPaymentResult::Bolt11 { payment_id: _ }) => { - panic!("Expected Bolt12 payment but got Bolt11"); - }, - Ok(QrPaymentResult::Onchain { txid: _ }) => { - panic!("Expected Bolt12 payment but get On-chain transaction"); - }, - Err(e) => { - panic!("Expected Bolt12 payment but got error: {:?}", e); - }, - }; - - expect_payment_successful_event!(node_a, Some(offer_payment_id), None); + if node_a.config().node_alias.is_none() && node_b.config().node_alias.is_none() { + // Node must be announced if alternative one-hop `BlindedPath` is to be used used. + assert_eq!(uqr_payment, Err(NodeError::OfferCreationFailed)) + } else { + let uri_str = uqr_payment.clone().unwrap(); + let offer_payment_id: PaymentId = match node_a.unified_qr_payment().send(&uri_str) { + Ok(QrPaymentResult::Bolt12 { payment_id }) => { + println!("\nBolt12 payment sent successfully with PaymentID: {:?}", payment_id); + payment_id + }, + Ok(QrPaymentResult::Bolt11 { payment_id: _ }) => { + panic!("Expected Bolt12 payment but got Bolt11"); + }, + Ok(QrPaymentResult::Onchain { txid: _ }) => { + panic!("Expected Bolt12 payment but get On-chain transaction"); + }, + Err(e) => { + panic!("Expected Bolt12 payment but got error: {:?}", e); + }, + }; - // Removed one character from the offer to fall back on to invoice. - // Still needs work - let uri_str_with_invalid_offer = &uri_str[..uri_str.len() - 1]; - let invoice_payment_id: PaymentId = - match node_a.unified_qr_payment().send(uri_str_with_invalid_offer) { + expect_payment_successful_event!(node_a, Some(offer_payment_id), None); + + // Removed one character from the offer to fall back on to invoice. + // Still needs work + let uri_str_with_invalid_offer = &uri_str[..uri_str.len() - 1]; + let invoice_payment_id: PaymentId = + match node_a.unified_qr_payment().send(uri_str_with_invalid_offer) { + Ok(QrPaymentResult::Bolt12 { payment_id: _ }) => { + panic!("Expected Bolt11 payment but got Bolt12"); + }, + Ok(QrPaymentResult::Bolt11 { payment_id }) => { + println!("\nBolt11 payment sent successfully with PaymentID: {:?}", payment_id); + payment_id + }, + Ok(QrPaymentResult::Onchain { txid: _ }) => { + panic!("Expected Bolt11 payment but got on-chain transaction"); + }, + Err(e) => { + panic!("Expected Bolt11 payment but got error: {:?}", e); + }, + }; + expect_payment_successful_event!(node_a, Some(invoice_payment_id), None); + + let expect_onchain_amount_sats = 800_000; + let onchain_uqr_payment = + node_b.unified_qr_payment().receive(expect_onchain_amount_sats, "asdf", 4_000).unwrap(); + + // Removed a character from the offer, so it would move on to the other parameters. + let txid = match node_a + .unified_qr_payment() + .send(&onchain_uqr_payment.as_str()[..onchain_uqr_payment.len() - 1]) + { Ok(QrPaymentResult::Bolt12 { payment_id: _ }) => { - panic!("Expected Bolt11 payment but got Bolt12"); + panic!("Expected on-chain payment but got Bolt12") }, - Ok(QrPaymentResult::Bolt11 { payment_id }) => { - println!("\nBolt11 payment sent successfully with PaymentID: {:?}", payment_id); - payment_id + Ok(QrPaymentResult::Bolt11 { payment_id: _ }) => { + panic!("Expected on-chain payment but got Bolt11"); }, - Ok(QrPaymentResult::Onchain { txid: _ }) => { - panic!("Expected Bolt11 payment but got on-chain transaction"); + Ok(QrPaymentResult::Onchain { txid }) => { + println!("\nOn-chain transaction successful with Txid: {}", txid); + txid }, Err(e) => { - panic!("Expected Bolt11 payment but got error: {:?}", e); + panic!("Expected on-chain payment but got error: {:?}", e); }, }; - expect_payment_successful_event!(node_a, Some(invoice_payment_id), None); - - let expect_onchain_amount_sats = 800_000; - let onchain_uqr_payment = - node_b.unified_qr_payment().receive(expect_onchain_amount_sats, "asdf", 4_000).unwrap(); - - // Removed a character from the offer, so it would move on to the other parameters. - let txid = match node_a - .unified_qr_payment() - .send(&onchain_uqr_payment.as_str()[..onchain_uqr_payment.len() - 1]) - { - Ok(QrPaymentResult::Bolt12 { payment_id: _ }) => { - panic!("Expected on-chain payment but got Bolt12") - }, - Ok(QrPaymentResult::Bolt11 { payment_id: _ }) => { - panic!("Expected on-chain payment but got Bolt11"); - }, - Ok(QrPaymentResult::Onchain { txid }) => { - println!("\nOn-chain transaction successful with Txid: {}", txid); - txid - }, - Err(e) => { - panic!("Expected on-chain payment but got error: {:?}", e); - }, - }; - generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6); - wait_for_tx(&electrsd.client, txid); + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6); + wait_for_tx(&electrsd.client, txid); - node_a.sync_wallets().unwrap(); - node_b.sync_wallets().unwrap(); + node_a.sync_wallets().unwrap(); + node_b.sync_wallets().unwrap(); - assert_eq!(node_b.list_balances().total_onchain_balance_sats, 800_000); - assert_eq!(node_b.list_balances().total_lightning_balance_sats, 200_000); + assert_eq!(node_b.list_balances().total_onchain_balance_sats, 800_000); + assert_eq!(node_b.list_balances().total_lightning_balance_sats, 200_000); + } } diff --git a/tests/integration_tests_vss.rs b/tests/integration_tests_vss.rs index c572fbcd8..b7fc8ba42 100644 --- a/tests/integration_tests_vss.rs +++ b/tests/integration_tests_vss.rs @@ -9,6 +9,7 @@ mod common; +use common::random_announce_channel; use ldk_node::Builder; #[test] @@ -16,7 +17,8 @@ fn channel_full_cycle_with_vss_store() { let (bitcoind, electrsd) = common::setup_bitcoind_and_electrsd(); println!("== Node A =="); let esplora_url = format!("http://{}", electrsd.esplora_url.as_ref().unwrap()); - let config_a = common::random_config(true); + let announce_channel = random_announce_channel(); + let config_a = common::random_config(true, announce_channel); let mut builder_a = Builder::from_config(config_a); builder_a.set_esplora_server(esplora_url.clone()); let vss_base_url = std::env::var("TEST_VSS_BASE_URL").unwrap(); @@ -25,7 +27,7 @@ fn channel_full_cycle_with_vss_store() { node_a.start().unwrap(); println!("\n== Node B =="); - let config_b = common::random_config(true); + let config_b = common::random_config(true, announce_channel); let mut builder_b = Builder::from_config(config_b); builder_b.set_esplora_server(esplora_url); let node_b = builder_b.build_with_vss_store(vss_base_url, "node_2_store".to_string()).unwrap(); From 11f9a89ef4e97f5da69cbf995618882fc93618b7 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 7 Oct 2024 11:20:59 +0200 Subject: [PATCH 053/127] Revert "fix(test): Implement conditional channel opening based on aliases and addresses" This reverts commit 4fd1cb8ed98c5875627e31d9c7cbf3c5e0fe8d6b as unit tests need to be kept deterministic, i.e., opening announced channels is a deliberate choice on a per test case basis. --- src/lib.rs | 1 - tests/common/mod.rs | 101 ++---- tests/integration_tests_cln.rs | 16 +- tests/integration_tests_rust.rs | 611 +++++++++++++++----------------- tests/integration_tests_vss.rs | 6 +- 5 files changed, 316 insertions(+), 419 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 48750f74e..ad377f959 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -654,7 +654,6 @@ impl Node { continue; } ChannelAnnouncementStatus::Announceable => { - // Broadcast node announcement. let addresses = bcast_config.listening_addresses.clone().unwrap_or(Vec::new()); if let Some(node_alias) = node_alias.as_ref() { bcast_pm.broadcast_node_announcement([0; 3], node_alias.0, addresses); diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 4dcbfd999..c0059b8f4 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -204,36 +204,24 @@ pub(crate) fn random_listening_addresses() -> Vec { pub(crate) fn random_node_alias() -> Option { let mut rng = thread_rng(); let ranged_val = rng.gen_range(0..10); - - let alias = format!("ldk-node-{}", ranged_val); - let mut bytes = [0u8; 32]; - bytes[..alias.as_bytes().len()].copy_from_slice(alias.as_bytes()); - - Some(NodeAlias(bytes)) -} - -pub(crate) fn random_announce_channel() -> bool { - let mut rng = thread_rng(); - let ranged_val = rng.gen_range(0..=1); match ranged_val { - 0 => false, - _ => true, + 0 => None, + val => { + let alias = format!("ldk-node-{}", val); + let mut bytes = [0u8; 32]; + bytes[..alias.as_bytes().len()].copy_from_slice(alias.as_bytes()); + Some(NodeAlias(bytes)) + }, } } -pub(crate) fn random_config(anchor_channels: bool, announce_channel: bool) -> Config { +pub(crate) fn random_config(anchor_channels: bool) -> Config { let mut config = Config::default(); if !anchor_channels { config.anchor_channels_config = None; } - if announce_channel { - let alias = random_node_alias(); - println!("Setting random LDK node alias: {:?}", alias); - config.node_alias = alias; - } - config.network = Network::Regtest; config.onchain_wallet_sync_interval_secs = 100000; config.wallet_sync_interval_secs = 100000; @@ -247,6 +235,10 @@ pub(crate) fn random_config(anchor_channels: bool, announce_channel: bool) -> Co println!("Setting random LDK listening addresses: {:?}", rand_listening_addresses); config.listening_addresses = Some(rand_listening_addresses); + let alias = random_node_alias(); + println!("Setting random LDK node alias: {:?}", alias); + config.node_alias = alias; + config.log_level = LogLevel::Gossip; config @@ -269,15 +261,14 @@ macro_rules! setup_builder { pub(crate) use setup_builder; pub(crate) fn setup_two_nodes( - electrsd: &ElectrsD, allow_0conf: bool, anchor_channels: bool, - anchors_trusted_no_reserve: bool, announce_channel: bool, + electrsd: &ElectrsD, allow_0conf: bool, anchor_channels: bool, anchors_trusted_no_reserve: bool, ) -> (TestNode, TestNode) { println!("== Node A =="); - let config_a = random_config(anchor_channels, announce_channel); + let config_a = random_config(anchor_channels); let node_a = setup_node(electrsd, config_a); println!("\n== Node B =="); - let mut config_b = random_config(anchor_channels, announce_channel); + let mut config_b = random_config(anchor_channels); if allow_0conf { config_b.trusted_peers_0conf.push(node_a.node_id()); } @@ -415,28 +406,15 @@ pub(crate) fn premine_and_distribute_funds( pub fn open_channel( node_a: &TestNode, node_b: &TestNode, funding_amount_sat: u64, electrsd: &ElectrsD, ) { - if node_a.config().node_alias.is_some() { - node_a - .open_announced_channel( - node_b.node_id(), - node_b.listening_addresses().unwrap().first().unwrap().clone(), - funding_amount_sat, - None, - None, - ) - .unwrap(); - } else { - node_a - .open_channel( - node_b.node_id(), - node_b.listening_addresses().unwrap().first().unwrap().clone(), - funding_amount_sat, - None, - None, - ) - .unwrap(); - } - + node_a + .open_announced_channel( + node_b.node_id(), + node_b.listening_addresses().unwrap().first().unwrap().clone(), + funding_amount_sat, + None, + None, + ) + .unwrap(); assert!(node_a.list_peers().iter().find(|c| { c.node_id == node_b.node_id() }).is_some()); let funding_txo_a = expect_channel_pending_event!(node_a, node_b.node_id()); @@ -472,28 +450,15 @@ pub(crate) fn do_channel_full_cycle( println!("\nA -- open_channel -> B"); let funding_amount_sat = 2_080_000; let push_msat = (funding_amount_sat / 2) * 1000; // balance the channel - - if node_a.config().node_alias.is_some() { - node_a - .open_announced_channel( - node_b.node_id(), - node_b.listening_addresses().unwrap().first().unwrap().clone(), - funding_amount_sat, - Some(push_msat), - None, - ) - .unwrap(); - } else { - node_a - .open_channel( - node_b.node_id(), - node_b.listening_addresses().unwrap().first().unwrap().clone(), - funding_amount_sat, - Some(push_msat), - None, - ) - .unwrap(); - } + node_a + .open_channel( + node_b.node_id(), + node_b.listening_addresses().unwrap().first().unwrap().clone(), + funding_amount_sat, + Some(push_msat), + None, + ) + .unwrap(); assert_eq!(node_a.list_peers().first().unwrap().node_id, node_b.node_id()); assert!(node_a.list_peers().first().unwrap().is_persisted); diff --git a/tests/integration_tests_cln.rs b/tests/integration_tests_cln.rs index 11065bfe6..13b5c44c6 100644 --- a/tests/integration_tests_cln.rs +++ b/tests/integration_tests_cln.rs @@ -9,7 +9,6 @@ mod common; -use common::random_announce_channel; use ldk_node::bitcoin::secp256k1::PublicKey; use ldk_node::bitcoin::Amount; use ldk_node::lightning::ln::msgs::SocketAddress; @@ -44,7 +43,7 @@ fn test_cln() { common::generate_blocks_and_wait(&bitcoind_client, &electrs_client, 1); // Setup LDK Node - let config = common::random_config(true, random_announce_channel()); + let config = common::random_config(true); let mut builder = Builder::from_config(config); builder.set_esplora_server("http://127.0.0.1:3002".to_string()); @@ -83,19 +82,8 @@ fn test_cln() { // Open the channel let funding_amount_sat = 1_000_000; - if node.config().node_alias.is_none() { - node.open_channel(cln_node_id, cln_address, funding_amount_sat, Some(500_000_000), None) - .unwrap(); - } else { - node.open_announced_channel( - cln_node_id, - cln_address, - funding_amount_sat, - Some(500_000_000), - None, - ) + node.open_channel(cln_node_id, cln_address, funding_amount_sat, Some(500_000_000), None) .unwrap(); - } let funding_txo = common::expect_channel_pending_event!(node, cln_node_id); common::wait_for_tx(&electrs_client, funding_txo.txid); diff --git a/tests/integration_tests_rust.rs b/tests/integration_tests_rust.rs index 9c244e90a..68d1effbb 100644 --- a/tests/integration_tests_rust.rs +++ b/tests/integration_tests_rust.rs @@ -10,9 +10,8 @@ mod common; use common::{ do_channel_full_cycle, expect_channel_ready_event, expect_event, expect_payment_received_event, expect_payment_successful_event, generate_blocks_and_wait, open_channel, - premine_and_distribute_funds, random_announce_channel, random_config, - setup_bitcoind_and_electrsd, setup_builder, setup_node, setup_two_nodes, wait_for_tx, - TestSyncStore, + premine_and_distribute_funds, random_config, setup_bitcoind_and_electrsd, setup_builder, + setup_node, setup_two_nodes, wait_for_tx, TestSyncStore, }; use ldk_node::payment::{PaymentKind, QrPaymentResult, SendingParameters}; @@ -28,46 +27,42 @@ use std::sync::Arc; #[test] fn channel_full_cycle() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let (node_a, node_b) = - setup_two_nodes(&electrsd, false, true, false, random_announce_channel()); + let (node_a, node_b) = setup_two_nodes(&electrsd, false, true, false); do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, true, false); } #[test] fn channel_full_cycle_force_close() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let (node_a, node_b) = - setup_two_nodes(&electrsd, false, true, false, random_announce_channel()); + let (node_a, node_b) = setup_two_nodes(&electrsd, false, true, false); do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, true, true); } #[test] fn channel_full_cycle_force_close_trusted_no_reserve() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let (node_a, node_b) = setup_two_nodes(&electrsd, false, true, true, random_announce_channel()); + let (node_a, node_b) = setup_two_nodes(&electrsd, false, true, true); do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, true, true); } #[test] fn channel_full_cycle_0conf() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let (node_a, node_b) = setup_two_nodes(&electrsd, true, true, false, random_announce_channel()); + let (node_a, node_b) = setup_two_nodes(&electrsd, true, true, false); do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, true, true, false) } #[test] fn channel_full_cycle_legacy_staticremotekey() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let (node_a, node_b) = - setup_two_nodes(&electrsd, false, false, false, random_announce_channel()); + let (node_a, node_b) = setup_two_nodes(&electrsd, false, false, false); do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, false, false); } #[test] fn channel_open_fails_when_funds_insufficient() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let (node_a, node_b) = - setup_two_nodes(&electrsd, false, true, false, random_announce_channel()); + let (node_a, node_b) = setup_two_nodes(&electrsd, false, true, false); let addr_a = node_a.onchain_payment().new_address().unwrap(); let addr_b = node_b.onchain_payment().new_address().unwrap(); @@ -88,23 +83,13 @@ fn channel_open_fails_when_funds_insufficient() { println!("\nA -- open_channel -> B"); assert_eq!( Err(NodeError::InsufficientFunds), - if node_a.config().node_alias.is_some() { - node_a.open_announced_channel( - node_b.node_id(), - node_b.listening_addresses().unwrap().first().unwrap().clone(), - 120000, - None, - None, - ) - } else { - node_a.open_channel( - node_b.node_id(), - node_b.listening_addresses().unwrap().first().unwrap().clone(), - 120000, - None, - None, - ) - } + node_a.open_channel( + node_b.node_id(), + node_b.listening_addresses().unwrap().first().unwrap().clone(), + 120000, + None, + None, + ) ); } @@ -115,9 +100,8 @@ fn multi_hop_sending() { // Setup and fund 5 nodes let mut nodes = Vec::new(); - let announce_channel = random_announce_channel(); for _ in 0..5 { - let config = random_config(true, announce_channel); + let config = random_config(true); setup_builder!(builder, config); builder.set_esplora_server(esplora_url.clone()); let node = builder.build().unwrap(); @@ -186,22 +170,16 @@ fn multi_hop_sending() { }; let invoice = nodes[4].bolt11_payment().receive(2_500_000, &"asdf", 9217).unwrap(); - let send_res = nodes[0].bolt11_payment().send(&invoice, Some(sending_params)); + nodes[0].bolt11_payment().send(&invoice, Some(sending_params)).unwrap(); - // N0 cannot find a route to N4 if node and channel is unannounced. - if nodes[0].config().node_alias.is_none() { - assert_eq!(send_res, Err(NodeError::PaymentSendingFailed)) - } else { - let payment_id = expect_payment_received_event!(&nodes[4], 2_500_000); - assert_eq!(send_res.unwrap(), payment_id.unwrap()); - let fee_paid_msat = Some(2000); - expect_payment_successful_event!(nodes[0], payment_id, Some(fee_paid_msat)); - } + let payment_id = expect_payment_received_event!(&nodes[4], 2_500_000); + let fee_paid_msat = Some(2000); + expect_payment_successful_event!(nodes[0], payment_id, Some(fee_paid_msat)); } #[test] fn connect_to_public_testnet_esplora() { - let mut config = random_config(true, random_announce_channel()); + let mut config = random_config(true); config.network = Network::Testnet; setup_builder!(builder, config); builder.set_esplora_server("https://blockstream.info/testnet/api".to_string()); @@ -213,7 +191,7 @@ fn connect_to_public_testnet_esplora() { #[test] fn start_stop_reinit() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let config = random_config(true, random_announce_channel()); + let config = random_config(true); let esplora_url = format!("http://{}", electrsd.esplora_url.as_ref().unwrap()); @@ -281,8 +259,7 @@ fn start_stop_reinit() { #[test] fn onchain_spend_receive() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let (node_a, node_b) = - setup_two_nodes(&electrsd, false, true, false, random_announce_channel()); + let (node_a, node_b) = setup_two_nodes(&electrsd, false, true, false); let addr_a = node_a.onchain_payment().new_address().unwrap(); let addr_b = node_b.onchain_payment().new_address().unwrap(); @@ -330,7 +307,7 @@ fn onchain_spend_receive() { #[test] fn sign_verify_msg() { let (_bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let config = random_config(true, random_announce_channel()); + let config = random_config(true); let node = setup_node(&electrsd, config); // Tests arbitrary message signing and later verification @@ -348,8 +325,7 @@ fn connection_restart_behavior() { fn do_connection_restart_behavior(persist: bool) { let (_bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let (node_a, node_b) = - setup_two_nodes(&electrsd, false, false, false, random_announce_channel()); + let (node_a, node_b) = setup_two_nodes(&electrsd, false, false, false); let node_id_a = node_a.node_id(); let node_id_b = node_b.node_id(); @@ -400,8 +376,7 @@ fn do_connection_restart_behavior(persist: bool) { #[test] fn concurrent_connections_succeed() { let (_bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let (node_a, node_b) = - setup_two_nodes(&electrsd, false, true, false, random_announce_channel()); + let (node_a, node_b) = setup_two_nodes(&electrsd, false, true, false); let node_a = Arc::new(node_a); let node_b = Arc::new(node_b); @@ -431,8 +406,7 @@ fn concurrent_connections_succeed() { #[test] fn simple_bolt12_send_receive() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let (node_a, node_b) = - setup_two_nodes(&electrsd, false, true, false, random_announce_channel()); + let (node_a, node_b) = setup_two_nodes(&electrsd, false, true, false); let address_a = node_a.onchain_payment().new_address().unwrap(); let premine_amount_sat = 5_000_000; @@ -454,203 +428,191 @@ fn simple_bolt12_send_receive() { expect_channel_ready_event!(node_a, node_b.node_id()); expect_channel_ready_event!(node_b, node_a.node_id()); - // For announced nodes, we make a trivial check that node_alias is set knowing that - // `random_listening_addresses()` always sets the listening_addresses. This check - // is important to prevent the test from looping endlessly. - if node_a.config().node_alias.is_some() && node_b.config().node_alias.is_some() { - // Sleep until we broadcasted a node announcement. - while node_b.status().latest_node_announcement_broadcast_timestamp.is_none() { - std::thread::sleep(std::time::Duration::from_millis(10)); - } - - // Sleep one more sec to make sure the node announcement propagates. - std::thread::sleep(std::time::Duration::from_secs(1)); + // Sleep until we broadcasted a node announcement. + while node_b.status().latest_node_announcement_broadcast_timestamp.is_none() { + std::thread::sleep(std::time::Duration::from_millis(10)); } + // Sleep one more sec to make sure the node announcement propagates. + std::thread::sleep(std::time::Duration::from_secs(1)); + let expected_amount_msat = 100_000_000; - let offer_res = node_b.bolt12_payment().receive(expected_amount_msat, "asdf", Some(1)); - if node_a.config().node_alias.is_none() && node_b.config().node_alias.is_none() { - // Node must be announced if alternative one-hop `BlindedPath` is to be used. - assert_eq!(offer_res, Err(NodeError::OfferCreationFailed)) - } else { - let offer = offer_res.unwrap(); - let expected_quantity = Some(1); - let expected_payer_note = Some("Test".to_string()); - let payment_id = node_a - .bolt12_payment() - .send(&offer, expected_quantity, expected_payer_note.clone()) - .unwrap(); - - expect_payment_successful_event!(node_a, Some(payment_id), None); - let node_a_payments = node_a.list_payments(); - assert_eq!(node_a_payments.len(), 1); - match node_a_payments.first().unwrap().kind { - PaymentKind::Bolt12Offer { - hash, - preimage, - secret: _, - offer_id, - quantity: ref qty, - payer_note: ref note, - } => { - assert!(hash.is_some()); - assert!(preimage.is_some()); - assert_eq!(offer_id, offer.id()); - assert_eq!(&expected_quantity, qty); - assert_eq!(expected_payer_note.unwrap(), note.clone().unwrap().0); - //TODO: We should eventually set and assert the secret sender-side, too, but the BOLT12 - //API currently doesn't allow to do that. - }, - _ => { - panic!("Unexpected payment kind"); - }, - } - assert_eq!(node_a_payments.first().unwrap().amount_msat, Some(expected_amount_msat)); - - expect_payment_received_event!(node_b, expected_amount_msat); - let node_b_payments = node_b.list_payments(); - assert_eq!(node_b_payments.len(), 1); - match node_b_payments.first().unwrap().kind { - PaymentKind::Bolt12Offer { hash, preimage, secret, offer_id, .. } => { - assert!(hash.is_some()); - assert!(preimage.is_some()); - assert!(secret.is_some()); - assert_eq!(offer_id, offer.id()); - }, - _ => { - panic!("Unexpected payment kind"); - }, - } - assert_eq!(node_b_payments.first().unwrap().amount_msat, Some(expected_amount_msat)); - - // Test send_using_amount - let offer_amount_msat = 100_000_000; - let less_than_offer_amount = offer_amount_msat - 10_000; - let expected_amount_msat = offer_amount_msat + 10_000; - let offer = node_b.bolt12_payment().receive(offer_amount_msat, "asdf", Some(1)).unwrap(); - let expected_quantity = Some(1); - let expected_payer_note = Some("Test".to_string()); - assert!(node_a - .bolt12_payment() - .send_using_amount(&offer, less_than_offer_amount, None, None) - .is_err()); - let payment_id = node_a - .bolt12_payment() - .send_using_amount( - &offer, - expected_amount_msat, - expected_quantity, - expected_payer_note.clone(), - ) - .unwrap(); - - expect_payment_successful_event!(node_a, Some(payment_id), None); - let node_a_payments = node_a.list_payments_with_filter(|p| p.id == payment_id); - assert_eq!(node_a_payments.len(), 1); - let payment_hash = match node_a_payments.first().unwrap().kind { - PaymentKind::Bolt12Offer { - hash, - preimage, - secret: _, - offer_id, - quantity: ref qty, - payer_note: ref note, - } => { - assert!(hash.is_some()); - assert!(preimage.is_some()); - assert_eq!(offer_id, offer.id()); - assert_eq!(&expected_quantity, qty); - assert_eq!(expected_payer_note.unwrap(), note.clone().unwrap().0); - //TODO: We should eventually set and assert the secret sender-side, too, but the BOLT12 - //API currently doesn't allow to do that. - hash.unwrap() - }, - _ => { - panic!("Unexpected payment kind"); - }, - }; - assert_eq!(node_a_payments.first().unwrap().amount_msat, Some(expected_amount_msat)); - - expect_payment_received_event!(node_b, expected_amount_msat); - let node_b_payment_id = PaymentId(payment_hash.0); - let node_b_payments = node_b.list_payments_with_filter(|p| p.id == node_b_payment_id); - assert_eq!(node_b_payments.len(), 1); - match node_b_payments.first().unwrap().kind { - PaymentKind::Bolt12Offer { hash, preimage, secret, offer_id, .. } => { - assert!(hash.is_some()); - assert!(preimage.is_some()); - assert!(secret.is_some()); - assert_eq!(offer_id, offer.id()); - }, - _ => { - panic!("Unexpected payment kind"); - }, - } - assert_eq!(node_b_payments.first().unwrap().amount_msat, Some(expected_amount_msat)); - - // Now node_b refunds the amount node_a just overpaid. - let overpaid_amount = expected_amount_msat - offer_amount_msat; - let expected_quantity = Some(1); - let expected_payer_note = Some("Test".to_string()); - let refund = node_b - .bolt12_payment() - .initiate_refund(overpaid_amount, 3600, expected_quantity, expected_payer_note.clone()) - .unwrap(); - let invoice = node_a.bolt12_payment().request_refund_payment(&refund).unwrap(); - expect_payment_received_event!(node_a, overpaid_amount); - - let node_b_payment_id = node_b - .list_payments_with_filter(|p| p.amount_msat == Some(overpaid_amount)) - .first() - .unwrap() - .id; - expect_payment_successful_event!(node_b, Some(node_b_payment_id), None); - - let node_b_payments = node_b.list_payments_with_filter(|p| p.id == node_b_payment_id); - assert_eq!(node_b_payments.len(), 1); - match node_b_payments.first().unwrap().kind { - PaymentKind::Bolt12Refund { - hash, - preimage, - secret: _, - quantity: ref qty, - payer_note: ref note, - } => { - assert!(hash.is_some()); - assert!(preimage.is_some()); - assert_eq!(&expected_quantity, qty); - assert_eq!(expected_payer_note.unwrap(), note.clone().unwrap().0) - //TODO: We should eventually set and assert the secret sender-side, too, but the BOLT12 - //API currently doesn't allow to do that. - }, - _ => { - panic!("Unexpected payment kind"); - }, - } - assert_eq!(node_b_payments.first().unwrap().amount_msat, Some(overpaid_amount)); - - let node_a_payment_id = PaymentId(invoice.payment_hash().0); - let node_a_payments = node_a.list_payments_with_filter(|p| p.id == node_a_payment_id); - assert_eq!(node_a_payments.len(), 1); - match node_a_payments.first().unwrap().kind { - PaymentKind::Bolt12Refund { hash, preimage, secret, .. } => { - assert!(hash.is_some()); - assert!(preimage.is_some()); - assert!(secret.is_some()); - }, - _ => { - panic!("Unexpected payment kind"); - }, - } - assert_eq!(node_a_payments.first().unwrap().amount_msat, Some(overpaid_amount)); + let offer = node_b.bolt12_payment().receive(expected_amount_msat, "asdf", Some(1)).unwrap(); + let expected_quantity = Some(1); + let expected_payer_note = Some("Test".to_string()); + let payment_id = node_a + .bolt12_payment() + .send(&offer, expected_quantity, expected_payer_note.clone()) + .unwrap(); + + expect_payment_successful_event!(node_a, Some(payment_id), None); + let node_a_payments = node_a.list_payments(); + assert_eq!(node_a_payments.len(), 1); + match node_a_payments.first().unwrap().kind { + PaymentKind::Bolt12Offer { + hash, + preimage, + secret: _, + offer_id, + quantity: ref qty, + payer_note: ref note, + } => { + assert!(hash.is_some()); + assert!(preimage.is_some()); + assert_eq!(offer_id, offer.id()); + assert_eq!(&expected_quantity, qty); + assert_eq!(expected_payer_note.unwrap(), note.clone().unwrap().0); + //TODO: We should eventually set and assert the secret sender-side, too, but the BOLT12 + //API currently doesn't allow to do that. + }, + _ => { + panic!("Unexpected payment kind"); + }, + } + assert_eq!(node_a_payments.first().unwrap().amount_msat, Some(expected_amount_msat)); + + expect_payment_received_event!(node_b, expected_amount_msat); + let node_b_payments = node_b.list_payments(); + assert_eq!(node_b_payments.len(), 1); + match node_b_payments.first().unwrap().kind { + PaymentKind::Bolt12Offer { hash, preimage, secret, offer_id, .. } => { + assert!(hash.is_some()); + assert!(preimage.is_some()); + assert!(secret.is_some()); + assert_eq!(offer_id, offer.id()); + }, + _ => { + panic!("Unexpected payment kind"); + }, + } + assert_eq!(node_b_payments.first().unwrap().amount_msat, Some(expected_amount_msat)); + + // Test send_using_amount + let offer_amount_msat = 100_000_000; + let less_than_offer_amount = offer_amount_msat - 10_000; + let expected_amount_msat = offer_amount_msat + 10_000; + let offer = node_b.bolt12_payment().receive(offer_amount_msat, "asdf", Some(1)).unwrap(); + let expected_quantity = Some(1); + let expected_payer_note = Some("Test".to_string()); + assert!(node_a + .bolt12_payment() + .send_using_amount(&offer, less_than_offer_amount, None, None) + .is_err()); + let payment_id = node_a + .bolt12_payment() + .send_using_amount( + &offer, + expected_amount_msat, + expected_quantity, + expected_payer_note.clone(), + ) + .unwrap(); + + expect_payment_successful_event!(node_a, Some(payment_id), None); + let node_a_payments = node_a.list_payments_with_filter(|p| p.id == payment_id); + assert_eq!(node_a_payments.len(), 1); + let payment_hash = match node_a_payments.first().unwrap().kind { + PaymentKind::Bolt12Offer { + hash, + preimage, + secret: _, + offer_id, + quantity: ref qty, + payer_note: ref note, + } => { + assert!(hash.is_some()); + assert!(preimage.is_some()); + assert_eq!(offer_id, offer.id()); + assert_eq!(&expected_quantity, qty); + assert_eq!(expected_payer_note.unwrap(), note.clone().unwrap().0); + //TODO: We should eventually set and assert the secret sender-side, too, but the BOLT12 + //API currently doesn't allow to do that. + hash.unwrap() + }, + _ => { + panic!("Unexpected payment kind"); + }, + }; + assert_eq!(node_a_payments.first().unwrap().amount_msat, Some(expected_amount_msat)); + + expect_payment_received_event!(node_b, expected_amount_msat); + let node_b_payment_id = PaymentId(payment_hash.0); + let node_b_payments = node_b.list_payments_with_filter(|p| p.id == node_b_payment_id); + assert_eq!(node_b_payments.len(), 1); + match node_b_payments.first().unwrap().kind { + PaymentKind::Bolt12Offer { hash, preimage, secret, offer_id, .. } => { + assert!(hash.is_some()); + assert!(preimage.is_some()); + assert!(secret.is_some()); + assert_eq!(offer_id, offer.id()); + }, + _ => { + panic!("Unexpected payment kind"); + }, + } + assert_eq!(node_b_payments.first().unwrap().amount_msat, Some(expected_amount_msat)); + + // Now node_b refunds the amount node_a just overpaid. + let overpaid_amount = expected_amount_msat - offer_amount_msat; + let expected_quantity = Some(1); + let expected_payer_note = Some("Test".to_string()); + let refund = node_b + .bolt12_payment() + .initiate_refund(overpaid_amount, 3600, expected_quantity, expected_payer_note.clone()) + .unwrap(); + let invoice = node_a.bolt12_payment().request_refund_payment(&refund).unwrap(); + expect_payment_received_event!(node_a, overpaid_amount); + + let node_b_payment_id = node_b + .list_payments_with_filter(|p| p.amount_msat == Some(overpaid_amount)) + .first() + .unwrap() + .id; + expect_payment_successful_event!(node_b, Some(node_b_payment_id), None); + + let node_b_payments = node_b.list_payments_with_filter(|p| p.id == node_b_payment_id); + assert_eq!(node_b_payments.len(), 1); + match node_b_payments.first().unwrap().kind { + PaymentKind::Bolt12Refund { + hash, + preimage, + secret: _, + quantity: ref qty, + payer_note: ref note, + } => { + assert!(hash.is_some()); + assert!(preimage.is_some()); + assert_eq!(&expected_quantity, qty); + assert_eq!(expected_payer_note.unwrap(), note.clone().unwrap().0) + //TODO: We should eventually set and assert the secret sender-side, too, but the BOLT12 + //API currently doesn't allow to do that. + }, + _ => { + panic!("Unexpected payment kind"); + }, + } + assert_eq!(node_b_payments.first().unwrap().amount_msat, Some(overpaid_amount)); + + let node_a_payment_id = PaymentId(invoice.payment_hash().0); + let node_a_payments = node_a.list_payments_with_filter(|p| p.id == node_a_payment_id); + assert_eq!(node_a_payments.len(), 1); + match node_a_payments.first().unwrap().kind { + PaymentKind::Bolt12Refund { hash, preimage, secret, .. } => { + assert!(hash.is_some()); + assert!(preimage.is_some()); + assert!(secret.is_some()); + }, + _ => { + panic!("Unexpected payment kind"); + }, } + assert_eq!(node_a_payments.first().unwrap().amount_msat, Some(overpaid_amount)); } #[test] fn generate_bip21_uri() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let (node_a, node_b) = - setup_two_nodes(&electrsd, false, true, false, random_announce_channel()); + let (node_a, node_b) = setup_two_nodes(&electrsd, false, true, false); let address_a = node_a.onchain_payment().new_address().unwrap(); let premined_sats = 5_000_000; @@ -677,26 +639,21 @@ fn generate_bip21_uri() { let uqr_payment = node_b.unified_qr_payment().receive(expected_amount_sats, "asdf", expiry_sec); - if node_a.config().node_alias.is_none() && node_b.config().node_alias.is_none() { - assert_eq!(uqr_payment, Err(NodeError::OfferCreationFailed)); - } else { - match uqr_payment.clone() { - Ok(ref uri) => { - println!("Generated URI: {}", uri); - assert!(uri.contains("BITCOIN:")); - assert!(uri.contains("lightning=")); - assert!(uri.contains("lno=")); - }, - Err(e) => panic!("Failed to generate URI: {:?}", e), - } + match uqr_payment.clone() { + Ok(ref uri) => { + println!("Generated URI: {}", uri); + assert!(uri.contains("BITCOIN:")); + assert!(uri.contains("lightning=")); + assert!(uri.contains("lno=")); + }, + Err(e) => panic!("Failed to generate URI: {:?}", e), } } #[test] fn unified_qr_send_receive() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let (node_a, node_b) = - setup_two_nodes(&electrsd, false, true, false, random_announce_channel()); + let (node_a, node_b) = setup_two_nodes(&electrsd, false, true, false); let address_a = node_a.onchain_payment().new_address().unwrap(); let premined_sats = 5_000_000; @@ -718,98 +675,88 @@ fn unified_qr_send_receive() { expect_channel_ready_event!(node_a, node_b.node_id()); expect_channel_ready_event!(node_b, node_a.node_id()); - // For announced nodes, we make a trivial check that node_alias is set knowing that - // `random_listening_addresses()` always sets the listening_addresses. This check - // is important to prevent the test from looping endlessly. - if node_a.config().node_alias.is_some() && node_b.config().node_alias.is_some() { - // Sleep until we broadcasted a node announcement. - while node_b.status().latest_node_announcement_broadcast_timestamp.is_none() { - std::thread::sleep(std::time::Duration::from_millis(10)); - } - - // Sleep one more sec to make sure the node announcement propagates. - std::thread::sleep(std::time::Duration::from_secs(1)); + // Sleep until we broadcast a node announcement. + while node_b.status().latest_node_announcement_broadcast_timestamp.is_none() { + std::thread::sleep(std::time::Duration::from_millis(10)); } + // Sleep one more sec to make sure the node announcement propagates. + std::thread::sleep(std::time::Duration::from_secs(1)); + let expected_amount_sats = 100_000; let expiry_sec = 4_000; let uqr_payment = node_b.unified_qr_payment().receive(expected_amount_sats, "asdf", expiry_sec); - if node_a.config().node_alias.is_none() && node_b.config().node_alias.is_none() { - // Node must be announced if alternative one-hop `BlindedPath` is to be used used. - assert_eq!(uqr_payment, Err(NodeError::OfferCreationFailed)) - } else { - let uri_str = uqr_payment.clone().unwrap(); - let offer_payment_id: PaymentId = match node_a.unified_qr_payment().send(&uri_str) { - Ok(QrPaymentResult::Bolt12 { payment_id }) => { - println!("\nBolt12 payment sent successfully with PaymentID: {:?}", payment_id); - payment_id - }, - Ok(QrPaymentResult::Bolt11 { payment_id: _ }) => { - panic!("Expected Bolt12 payment but got Bolt11"); - }, - Ok(QrPaymentResult::Onchain { txid: _ }) => { - panic!("Expected Bolt12 payment but get On-chain transaction"); - }, - Err(e) => { - panic!("Expected Bolt12 payment but got error: {:?}", e); - }, - }; + let uri_str = uqr_payment.clone().unwrap(); + let offer_payment_id: PaymentId = match node_a.unified_qr_payment().send(&uri_str) { + Ok(QrPaymentResult::Bolt12 { payment_id }) => { + println!("\nBolt12 payment sent successfully with PaymentID: {:?}", payment_id); + payment_id + }, + Ok(QrPaymentResult::Bolt11 { payment_id: _ }) => { + panic!("Expected Bolt12 payment but got Bolt11"); + }, + Ok(QrPaymentResult::Onchain { txid: _ }) => { + panic!("Expected Bolt12 payment but get On-chain transaction"); + }, + Err(e) => { + panic!("Expected Bolt12 payment but got error: {:?}", e); + }, + }; - expect_payment_successful_event!(node_a, Some(offer_payment_id), None); - - // Removed one character from the offer to fall back on to invoice. - // Still needs work - let uri_str_with_invalid_offer = &uri_str[..uri_str.len() - 1]; - let invoice_payment_id: PaymentId = - match node_a.unified_qr_payment().send(uri_str_with_invalid_offer) { - Ok(QrPaymentResult::Bolt12 { payment_id: _ }) => { - panic!("Expected Bolt11 payment but got Bolt12"); - }, - Ok(QrPaymentResult::Bolt11 { payment_id }) => { - println!("\nBolt11 payment sent successfully with PaymentID: {:?}", payment_id); - payment_id - }, - Ok(QrPaymentResult::Onchain { txid: _ }) => { - panic!("Expected Bolt11 payment but got on-chain transaction"); - }, - Err(e) => { - panic!("Expected Bolt11 payment but got error: {:?}", e); - }, - }; - expect_payment_successful_event!(node_a, Some(invoice_payment_id), None); - - let expect_onchain_amount_sats = 800_000; - let onchain_uqr_payment = - node_b.unified_qr_payment().receive(expect_onchain_amount_sats, "asdf", 4_000).unwrap(); - - // Removed a character from the offer, so it would move on to the other parameters. - let txid = match node_a - .unified_qr_payment() - .send(&onchain_uqr_payment.as_str()[..onchain_uqr_payment.len() - 1]) - { + expect_payment_successful_event!(node_a, Some(offer_payment_id), None); + + // Removed one character from the offer to fall back on to invoice. + // Still needs work + let uri_str_with_invalid_offer = &uri_str[..uri_str.len() - 1]; + let invoice_payment_id: PaymentId = + match node_a.unified_qr_payment().send(uri_str_with_invalid_offer) { Ok(QrPaymentResult::Bolt12 { payment_id: _ }) => { - panic!("Expected on-chain payment but got Bolt12") + panic!("Expected Bolt11 payment but got Bolt12"); }, - Ok(QrPaymentResult::Bolt11 { payment_id: _ }) => { - panic!("Expected on-chain payment but got Bolt11"); + Ok(QrPaymentResult::Bolt11 { payment_id }) => { + println!("\nBolt11 payment sent successfully with PaymentID: {:?}", payment_id); + payment_id }, - Ok(QrPaymentResult::Onchain { txid }) => { - println!("\nOn-chain transaction successful with Txid: {}", txid); - txid + Ok(QrPaymentResult::Onchain { txid: _ }) => { + panic!("Expected Bolt11 payment but got on-chain transaction"); }, Err(e) => { - panic!("Expected on-chain payment but got error: {:?}", e); + panic!("Expected Bolt11 payment but got error: {:?}", e); }, }; + expect_payment_successful_event!(node_a, Some(invoice_payment_id), None); + + let expect_onchain_amount_sats = 800_000; + let onchain_uqr_payment = + node_b.unified_qr_payment().receive(expect_onchain_amount_sats, "asdf", 4_000).unwrap(); + + // Removed a character from the offer, so it would move on to the other parameters. + let txid = match node_a + .unified_qr_payment() + .send(&onchain_uqr_payment.as_str()[..onchain_uqr_payment.len() - 1]) + { + Ok(QrPaymentResult::Bolt12 { payment_id: _ }) => { + panic!("Expected on-chain payment but got Bolt12") + }, + Ok(QrPaymentResult::Bolt11 { payment_id: _ }) => { + panic!("Expected on-chain payment but got Bolt11"); + }, + Ok(QrPaymentResult::Onchain { txid }) => { + println!("\nOn-chain transaction successful with Txid: {}", txid); + txid + }, + Err(e) => { + panic!("Expected on-chain payment but got error: {:?}", e); + }, + }; - generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6); - wait_for_tx(&electrsd.client, txid); + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6); + wait_for_tx(&electrsd.client, txid); - node_a.sync_wallets().unwrap(); - node_b.sync_wallets().unwrap(); + node_a.sync_wallets().unwrap(); + node_b.sync_wallets().unwrap(); - assert_eq!(node_b.list_balances().total_onchain_balance_sats, 800_000); - assert_eq!(node_b.list_balances().total_lightning_balance_sats, 200_000); - } + assert_eq!(node_b.list_balances().total_onchain_balance_sats, 800_000); + assert_eq!(node_b.list_balances().total_lightning_balance_sats, 200_000); } diff --git a/tests/integration_tests_vss.rs b/tests/integration_tests_vss.rs index b7fc8ba42..c572fbcd8 100644 --- a/tests/integration_tests_vss.rs +++ b/tests/integration_tests_vss.rs @@ -9,7 +9,6 @@ mod common; -use common::random_announce_channel; use ldk_node::Builder; #[test] @@ -17,8 +16,7 @@ fn channel_full_cycle_with_vss_store() { let (bitcoind, electrsd) = common::setup_bitcoind_and_electrsd(); println!("== Node A =="); let esplora_url = format!("http://{}", electrsd.esplora_url.as_ref().unwrap()); - let announce_channel = random_announce_channel(); - let config_a = common::random_config(true, announce_channel); + let config_a = common::random_config(true); let mut builder_a = Builder::from_config(config_a); builder_a.set_esplora_server(esplora_url.clone()); let vss_base_url = std::env::var("TEST_VSS_BASE_URL").unwrap(); @@ -27,7 +25,7 @@ fn channel_full_cycle_with_vss_store() { node_a.start().unwrap(); println!("\n== Node B =="); - let config_b = common::random_config(true, announce_channel); + let config_b = common::random_config(true); let mut builder_b = Builder::from_config(config_b); builder_b.set_esplora_server(esplora_url); let node_b = builder_b.build_with_vss_store(vss_base_url, "node_2_store".to_string()).unwrap(); From b8e3d7a4bb1bbc797fbb6f907be0f9e948f1d770 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 7 Oct 2024 13:40:05 +0200 Subject: [PATCH 054/127] Clarify `Builder`/`Config` docs We improve the docs a bit, highlight the requirements for node aliases, and that we'll only ever allow announcing channels if they are properly set. --- src/builder.rs | 10 +++++++--- src/config.rs | 8 +++++--- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/src/builder.rs b/src/builder.rs index d2ceb5f22..9faf97714 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -307,9 +307,10 @@ impl NodeBuilder { Ok(self) } - /// Sets the alias the [`Node`] will use in its announcement. + /// Sets the node alias that will be used when broadcasting announcements to the gossip + /// network. /// - /// The provided alias must be a valid UTF-8 string. + /// The provided alias must be a valid UTF-8 string and no longer than 32 bytes in total. pub fn set_node_alias(&mut self, node_alias: String) -> Result<&mut Self, BuildError> { let node_alias = sanitize_alias(&node_alias)?; @@ -515,7 +516,10 @@ impl ArcedNodeBuilder { self.inner.write().unwrap().set_listening_addresses(listening_addresses).map(|_| ()) } - /// Sets the node alias. + /// Sets the node alias that will be used when broadcasting announcements to the gossip + /// network. + /// + /// The provided alias must be a valid UTF-8 string and no longer than 32 bytes in total. pub fn set_node_alias(&self, node_alias: String) -> Result<(), BuildError> { self.inner.write().unwrap().set_node_alias(node_alias).map(|_| ()) } diff --git a/src/config.rs b/src/config.rs index 574789ac6..bf85eec9e 100644 --- a/src/config.rs +++ b/src/config.rs @@ -113,12 +113,14 @@ pub struct Config { pub network: Network, /// The addresses on which the node will listen for incoming connections. /// - /// **Note**: Node announcements will only be broadcast if the `node_alias` and the + /// **Note**: We will only allow opening and accepting public channels if the `node_alias` and the /// `listening_addresses` are set. pub listening_addresses: Option>, - /// The node alias to be used in announcements. + /// The node alias that will be used when broadcasting announcements to the gossip network. /// - /// **Note**: Node announcements will only be broadcast if the `node_alias` and the + /// The provided alias must be a valid UTF-8 string and no longer than 32 bytes in total. + /// + /// **Note**: We will only allow opening and accepting public channels if the `node_alias` and the /// `listening_addresses` are set. pub node_alias: Option, /// The time in-between background sync attempts of the onchain wallet, in seconds. From 7c3232c483e55a08422eab0add5e271c6db22f8f Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 7 Oct 2024 13:42:19 +0200 Subject: [PATCH 055/127] Fix alignment in config docs --- src/config.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/config.rs b/src/config.rs index bf85eec9e..e03f6775b 100644 --- a/src/config.rs +++ b/src/config.rs @@ -87,7 +87,7 @@ pub(crate) const WALLET_KEYS_SEED_LEN: usize = 64; /// | `log_dir_path` | None | /// | `network` | Bitcoin | /// | `listening_addresses` | None | -/// | `node_alias` | None | +/// | `node_alias` | None | /// | `default_cltv_expiry_delta` | 144 | /// | `onchain_wallet_sync_interval_secs` | 80 | /// | `wallet_sync_interval_secs` | 30 | From 81c36d4c651c86649ed5e04dc1518f8fac792ec4 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 7 Oct 2024 13:43:02 +0200 Subject: [PATCH 056/127] Move `NodeAlias` import to other non-`pub` imports --- src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lib.rs b/src/lib.rs index ad377f959..b3618f718 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -100,7 +100,6 @@ mod wallet; pub use bip39; pub use bitcoin; pub use lightning; -use lightning::routing::gossip::NodeAlias; pub use lightning_invoice; pub use balance::{BalanceDetails, LightningBalance, PendingSweepBalance}; @@ -151,6 +150,7 @@ use lightning::chain::{BestBlock, Confirm}; use lightning::events::bump_transaction::Wallet as LdkWallet; use lightning::ln::channelmanager::{ChannelShutdownState, PaymentId}; use lightning::ln::msgs::SocketAddress; +use lightning::routing::gossip::NodeAlias; pub use lightning::util::logger::Level as LogLevel; From d630365ae09a744acf056c974845b0a7d4925276 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 7 Oct 2024 13:53:28 +0200 Subject: [PATCH 057/127] Update docs on `open{_announced}_channel` --- src/lib.rs | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index b3618f718..4fe6ad280 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1186,10 +1186,6 @@ impl Node { Ok(()) } - /// Connect to a node and open a new channel. - /// - /// See [`Node::open_channel`] or [`Node::open_announced_channel`] for more information about - /// parameters. fn open_channel_inner( &self, node_id: PublicKey, address: SocketAddress, channel_amount_sats: u64, push_to_counterparty_msat: Option, channel_config: Option, @@ -1293,7 +1289,9 @@ impl Node { } } - /// Connect to a node and open a new channel. + /// Connect to a node and open a new unannounced channel. + /// + /// To open an announced channel, see [`Node::open_announced_channel`]. /// /// Disconnects and reconnects are handled automatically. /// @@ -1305,8 +1303,6 @@ impl Node { /// [`AnchorChannelsConfig::per_channel_reserve_sats`] is available and will be retained before /// opening the channel. /// - /// Calls `Node::open_channel_inner` with `announce_channel` set to `false`. - /// /// Returns a [`UserChannelId`] allowing to locally keep track of the channel. pub fn open_channel( &self, node_id: PublicKey, address: SocketAddress, channel_amount_sats: u64, @@ -1324,6 +1320,12 @@ impl Node { /// Connect to a node and open a new announced channel. /// + /// This will return an error if the node has not been sufficiently configured to operate as a + /// forwarding node that can properly announce its existence to the publip network graph, i.e., + /// [`Config::listening_addresses`] and [`Config::node_alias`] are unset. + /// + /// To open an unannounced channel, see [`Node::open_channel`]. + /// /// Disconnects and reconnects are handled automatically. /// /// If `push_to_counterparty_msat` is set, the given value will be pushed (read: sent) to the @@ -1334,12 +1336,6 @@ impl Node { /// [`AnchorChannelsConfig::per_channel_reserve_sats`] is available and will be retained before /// opening the channel. /// - /// Note that, regardless of the value of `announce_channel` passed, this function - /// checks that a node is configured to announce the channel to be openned and returns - /// an error if the configuration is wrong. Otherwise, calls `Node::open_channel_inner` - /// with `announced_channel` equals to `true`. - /// See `config::can_announce_channel` for more details. - /// /// Returns a [`UserChannelId`] allowing to locally keep track of the channel. pub fn open_announced_channel( &self, node_id: PublicKey, address: SocketAddress, channel_amount_sats: u64, From 58f40b99bef88b5ac281a6438dc68d7debe37bec Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 7 Oct 2024 14:16:24 +0200 Subject: [PATCH 058/127] Replace `ChannelAnnouncementStatus` boilerplate with a simple bool flag We drop the previously-introduced `ChannelAnnouncementStatus`/`ChannelAnnouncementBlocker` types. While informative, they were a bit too much boilerplate. Instead we opt to simply return a `bool` from `may_announce_channel`, and don't spawn the node announcment task to begin with if we're not configured properly. --- src/config.rs | 84 +++++++-------------------------------------- src/lib.rs | 95 ++++++++++++++++++++++++--------------------------- 2 files changed, 57 insertions(+), 122 deletions(-) diff --git a/src/config.rs b/src/config.rs index e03f6775b..2ccfc2db9 100644 --- a/src/config.rs +++ b/src/config.rs @@ -278,47 +278,9 @@ pub fn default_config() -> Config { Config::default() } -/// Specifies reasons why a channel cannot be announced. -#[derive(Debug, PartialEq)] -pub(crate) enum ChannelAnnouncementBlocker { - /// The node alias is not set. - MissingNodeAlias, - /// The listening addresses are not set. - MissingListeningAddresses, - // This listening addresses is set but the vector is empty. - EmptyListeningAddresses, -} - -/// Enumeration defining the announcement status of a channel. -#[derive(Debug, PartialEq)] -pub(crate) enum ChannelAnnouncementStatus { - /// The channel is announceable. - Announceable, - /// The channel is not announceable. - Unannounceable(ChannelAnnouncementBlocker), -} - -/// Checks if a node is can announce a channel based on the configured values of both the node's -/// alias and its listening addresses. -/// -/// If either of them is unset, the node cannot announce the channel. This ability to announce/ -/// unannounce a channel is codified with `ChannelAnnouncementStatus` -pub(crate) fn can_announce_channel(config: &Config) -> ChannelAnnouncementStatus { - if config.node_alias.is_none() { - return ChannelAnnouncementStatus::Unannounceable( - ChannelAnnouncementBlocker::MissingNodeAlias, - ); - } - - match &config.listening_addresses { - None => ChannelAnnouncementStatus::Unannounceable( - ChannelAnnouncementBlocker::MissingListeningAddresses, - ), - Some(addresses) if addresses.is_empty() => ChannelAnnouncementStatus::Unannounceable( - ChannelAnnouncementBlocker::EmptyListeningAddresses, - ), - Some(_) => ChannelAnnouncementStatus::Announceable, - } +pub(crate) fn may_announce_channel(config: &Config) -> bool { + config.node_alias.is_some() + && config.listening_addresses.as_ref().map_or(false, |addrs| !addrs.is_empty()) } pub(crate) fn default_user_config(config: &Config) -> UserConfig { @@ -333,13 +295,10 @@ pub(crate) fn default_user_config(config: &Config) -> UserConfig { user_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = config.anchor_channels_config.is_some(); - match can_announce_channel(config) { - ChannelAnnouncementStatus::Announceable => (), - ChannelAnnouncementStatus::Unannounceable(_) => { - user_config.accept_forwards_to_priv_channels = false; - user_config.channel_handshake_config.announced_channel = false; - user_config.channel_handshake_limits.force_announced_channel_preference = true; - }, + if !may_announce_channel(config) { + user_config.accept_forwards_to_priv_channels = false; + user_config.channel_handshake_config.announced_channel = false; + user_config.channel_handshake_limits.force_announced_channel_preference = true; } user_config @@ -349,23 +308,16 @@ pub(crate) fn default_user_config(config: &Config) -> UserConfig { mod tests { use std::str::FromStr; - use crate::config::ChannelAnnouncementStatus; - - use super::can_announce_channel; + use super::may_announce_channel; use super::Config; use super::NodeAlias; use super::SocketAddress; #[test] - fn node_can_announce_channel() { + fn node_announce_channel() { // Default configuration with node alias and listening addresses unset let mut node_config = Config::default(); - assert_eq!( - can_announce_channel(&node_config), - ChannelAnnouncementStatus::Unannounceable( - crate::config::ChannelAnnouncementBlocker::MissingNodeAlias - ) - ); + assert!(!may_announce_channel(&node_config)); // Set node alias with listening addresses unset let alias_frm_str = |alias: &str| { @@ -374,21 +326,11 @@ mod tests { NodeAlias(bytes) }; node_config.node_alias = Some(alias_frm_str("LDK_Node")); - assert_eq!( - can_announce_channel(&node_config), - ChannelAnnouncementStatus::Unannounceable( - crate::config::ChannelAnnouncementBlocker::MissingListeningAddresses - ) - ); + assert!(!may_announce_channel(&node_config)); // Set node alias with an empty list of listening addresses node_config.listening_addresses = Some(vec![]); - assert_eq!( - can_announce_channel(&node_config), - ChannelAnnouncementStatus::Unannounceable( - crate::config::ChannelAnnouncementBlocker::EmptyListeningAddresses - ) - ); + assert!(!may_announce_channel(&node_config)); // Set node alias with a non-empty list of listening addresses let socket_address = @@ -396,6 +338,6 @@ mod tests { if let Some(ref mut addresses) = node_config.listening_addresses { addresses.push(socket_address); } - assert_eq!(can_announce_channel(&node_config), ChannelAnnouncementStatus::Announceable); + assert!(may_announce_channel(&node_config)); } } diff --git a/src/lib.rs b/src/lib.rs index 4fe6ad280..7dd8bed03 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -122,8 +122,8 @@ pub use builder::BuildError; pub use builder::NodeBuilder as Builder; use config::{ - can_announce_channel, default_user_config, ChannelAnnouncementStatus, - LDK_WALLET_SYNC_TIMEOUT_SECS, NODE_ANN_BCAST_INTERVAL, PEER_RECONNECTION_INTERVAL, + default_user_config, may_announce_channel, LDK_WALLET_SYNC_TIMEOUT_SECS, + NODE_ANN_BCAST_INTERVAL, PEER_RECONNECTION_INTERVAL, RESOLVED_CHANNEL_MONITOR_ARCHIVAL_INTERVAL, RGS_SYNC_INTERVAL, WALLET_SYNC_INTERVAL_MINIMUM_SECS, }; @@ -605,20 +605,20 @@ impl Node { let bcast_ann_timestamp = Arc::clone(&self.latest_node_announcement_broadcast_timestamp); let mut stop_bcast = self.stop_sender.subscribe(); let node_alias = self.config.node_alias.clone(); - let can_announce_channel = can_announce_channel(&self.config); - runtime.spawn(async move { - // We check every 30 secs whether our last broadcast is NODE_ANN_BCAST_INTERVAL away. - #[cfg(not(test))] - let mut interval = tokio::time::interval(Duration::from_secs(30)); - #[cfg(test)] - let mut interval = tokio::time::interval(Duration::from_secs(5)); - loop { - tokio::select! { + if may_announce_channel(&self.config) { + runtime.spawn(async move { + // We check every 30 secs whether our last broadcast is NODE_ANN_BCAST_INTERVAL away. + #[cfg(not(test))] + let mut interval = tokio::time::interval(Duration::from_secs(30)); + #[cfg(test)] + let mut interval = tokio::time::interval(Duration::from_secs(5)); + loop { + tokio::select! { _ = stop_bcast.changed() => { log_trace!( bcast_logger, "Stopping broadcasting node announcements.", - ); + ); return; } _ = interval.tick() => { @@ -648,36 +648,37 @@ impl Node { continue; } - match can_announce_channel { - ChannelAnnouncementStatus::Unannounceable(_) => { - // Skip if we are not listening on any addresses or if the node alias is not set. - continue; - } - ChannelAnnouncementStatus::Announceable => { - let addresses = bcast_config.listening_addresses.clone().unwrap_or(Vec::new()); - if let Some(node_alias) = node_alias.as_ref() { - bcast_pm.broadcast_node_announcement([0; 3], node_alias.0, addresses); - } else { - continue - } - } - } + let addresses = if let Some(addresses) = bcast_config.listening_addresses.clone() { + addresses + } else { + debug_assert!(false, "We checked whether the node may announce, so listening addresses should always be set"); + continue; + }; + + if let Some(node_alias) = node_alias.as_ref() { + bcast_pm.broadcast_node_announcement([0; 3], node_alias.0, addresses); - let unix_time_secs_opt = - SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - *bcast_ann_timestamp.write().unwrap() = unix_time_secs_opt; + let unix_time_secs_opt = + SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); + *bcast_ann_timestamp.write().unwrap() = unix_time_secs_opt; - if let Some(unix_time_secs) = unix_time_secs_opt { - io::utils::write_latest_node_ann_bcast_timestamp(unix_time_secs, Arc::clone(&bcast_store), Arc::clone(&bcast_logger)) - .unwrap_or_else(|e| { - log_error!(bcast_logger, "Persistence failed: {}", e); - panic!("Persistence failed"); - }); + if let Some(unix_time_secs) = unix_time_secs_opt { + io::utils::write_latest_node_ann_bcast_timestamp(unix_time_secs, Arc::clone(&bcast_store), Arc::clone(&bcast_logger)) + .unwrap_or_else(|e| { + log_error!(bcast_logger, "Persistence failed: {}", e); + panic!("Persistence failed"); + }); + } + } else { + debug_assert!(false, "We checked whether the node may announce, so node alias should always be set"); + continue } + } + } } - } - }); + }); + } let mut stop_tx_bcast = self.stop_sender.subscribe(); let tx_bcaster = Arc::clone(&self.tx_broadcaster); @@ -1341,26 +1342,18 @@ impl Node { &self, node_id: PublicKey, address: SocketAddress, channel_amount_sats: u64, push_to_counterparty_msat: Option, channel_config: Option, ) -> Result { - match can_announce_channel(&self.config) { - config::ChannelAnnouncementStatus::Announceable => self.open_channel_inner( + if may_announce_channel(&self.config) { + self.open_channel_inner( node_id, address, channel_amount_sats, push_to_counterparty_msat, channel_config, true, - ), - config::ChannelAnnouncementStatus::Unannounceable(reason) => match reason { - config::ChannelAnnouncementBlocker::MissingNodeAlias => { - return Err(Error::InvalidNodeAlias) - }, - config::ChannelAnnouncementBlocker::MissingListeningAddresses => { - return Err(Error::InvalidSocketAddress) - }, - config::ChannelAnnouncementBlocker::EmptyListeningAddresses => { - return Err(Error::InvalidSocketAddress) - }, - }, + ) + } else { + log_error!(self.logger, "Failed to open announced channel as the node hasn't been sufficiently configured to act as a forwarding node. Please make sure to configure listening addreesses and node alias"); + return Err(Error::ChannelCreationFailed); } } From 534b1ac6753c819ff148398b94ff7953b0d90614 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 7 Oct 2024 14:41:00 +0200 Subject: [PATCH 059/127] Make announcing channels in tests a choice again Previously, we always opened announced channels in tests, but it should be a deliberate choice depending on the scenario we're trying to test for. --- tests/common/mod.rs | 50 +++++++++++++++++++-------------- tests/integration_tests_rust.rs | 16 +++++------ 2 files changed, 37 insertions(+), 29 deletions(-) diff --git a/tests/common/mod.rs b/tests/common/mod.rs index c0059b8f4..f8a9eae7a 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -203,16 +203,11 @@ pub(crate) fn random_listening_addresses() -> Vec { pub(crate) fn random_node_alias() -> Option { let mut rng = thread_rng(); - let ranged_val = rng.gen_range(0..10); - match ranged_val { - 0 => None, - val => { - let alias = format!("ldk-node-{}", val); - let mut bytes = [0u8; 32]; - bytes[..alias.as_bytes().len()].copy_from_slice(alias.as_bytes()); - Some(NodeAlias(bytes)) - }, - } + let rand_val = rng.gen_range(0..1000); + let alias = format!("ldk-node-{}", rand_val); + let mut bytes = [0u8; 32]; + bytes[..alias.as_bytes().len()].copy_from_slice(alias.as_bytes()); + Some(NodeAlias(bytes)) } pub(crate) fn random_config(anchor_channels: bool) -> Config { @@ -404,17 +399,30 @@ pub(crate) fn premine_and_distribute_funds( } pub fn open_channel( - node_a: &TestNode, node_b: &TestNode, funding_amount_sat: u64, electrsd: &ElectrsD, + node_a: &TestNode, node_b: &TestNode, funding_amount_sat: u64, should_announce: bool, + electrsd: &ElectrsD, ) { - node_a - .open_announced_channel( - node_b.node_id(), - node_b.listening_addresses().unwrap().first().unwrap().clone(), - funding_amount_sat, - None, - None, - ) - .unwrap(); + if should_announce { + node_a + .open_announced_channel( + node_b.node_id(), + node_b.listening_addresses().unwrap().first().unwrap().clone(), + funding_amount_sat, + None, + None, + ) + .unwrap(); + } else { + node_a + .open_channel( + node_b.node_id(), + node_b.listening_addresses().unwrap().first().unwrap().clone(), + funding_amount_sat, + None, + None, + ) + .unwrap(); + } assert!(node_a.list_peers().iter().find(|c| { c.node_id == node_b.node_id() }).is_some()); let funding_txo_a = expect_channel_pending_event!(node_a, node_b.node_id()); @@ -451,7 +459,7 @@ pub(crate) fn do_channel_full_cycle( let funding_amount_sat = 2_080_000; let push_msat = (funding_amount_sat / 2) * 1000; // balance the channel node_a - .open_channel( + .open_announced_channel( node_b.node_id(), node_b.listening_addresses().unwrap().first().unwrap().clone(), funding_amount_sat, diff --git a/tests/integration_tests_rust.rs b/tests/integration_tests_rust.rs index 68d1effbb..907e89084 100644 --- a/tests/integration_tests_rust.rs +++ b/tests/integration_tests_rust.rs @@ -131,16 +131,16 @@ fn multi_hop_sending() { // \ / // (1M:0)- N3 -(1M:0) - open_channel(&nodes[0], &nodes[1], 100_000, &electrsd); - open_channel(&nodes[1], &nodes[2], 1_000_000, &electrsd); + open_channel(&nodes[0], &nodes[1], 100_000, true, &electrsd); + open_channel(&nodes[1], &nodes[2], 1_000_000, true, &electrsd); // We need to sync wallets in-between back-to-back channel opens from the same node so BDK // wallet picks up on the broadcast funding tx and doesn't double-spend itself. // // TODO: Remove once fixed in BDK. nodes[1].sync_wallets().unwrap(); - open_channel(&nodes[1], &nodes[3], 1_000_000, &electrsd); - open_channel(&nodes[2], &nodes[4], 1_000_000, &electrsd); - open_channel(&nodes[3], &nodes[4], 1_000_000, &electrsd); + open_channel(&nodes[1], &nodes[3], 1_000_000, true, &electrsd); + open_channel(&nodes[2], &nodes[4], 1_000_000, true, &electrsd); + open_channel(&nodes[3], &nodes[4], 1_000_000, true, &electrsd); generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6); @@ -418,7 +418,7 @@ fn simple_bolt12_send_receive() { ); node_a.sync_wallets().unwrap(); - open_channel(&node_a, &node_b, 4_000_000, &electrsd); + open_channel(&node_a, &node_b, 4_000_000, true, &electrsd); generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6); @@ -625,7 +625,7 @@ fn generate_bip21_uri() { ); node_a.sync_wallets().unwrap(); - open_channel(&node_a, &node_b, 4_000_000, &electrsd); + open_channel(&node_a, &node_b, 4_000_000, true, &electrsd); generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6); node_a.sync_wallets().unwrap(); @@ -666,7 +666,7 @@ fn unified_qr_send_receive() { ); node_a.sync_wallets().unwrap(); - open_channel(&node_a, &node_b, 4_000_000, &electrsd); + open_channel(&node_a, &node_b, 4_000_000, true, &electrsd); generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6); node_a.sync_wallets().unwrap(); From 85862f53e8d3098066d55855dc5af7a5a89fbae6 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 7 Oct 2024 14:43:28 +0200 Subject: [PATCH 060/127] Drop `open_announced_channel` from README .. as we generally want to ~discourage users from arbitrarily opening announced channels. They really only should do so if they are willing and able to run a proper 24/7 forwarding node. And node operators will likely know what to look for in the API. --- README.md | 2 +- src/lib.rs | 4 +--- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 7bcfb6f78..df072999d 100644 --- a/README.md +++ b/README.md @@ -11,7 +11,7 @@ A ready-to-go Lightning node library built using [LDK][ldk] and [BDK][bdk]. LDK Node is a self-custodial Lightning node in library form. Its central goal is to provide a small, simple, and straightforward interface that enables users to easily set up and run a Lightning node with an integrated on-chain wallet. While minimalism is at its core, LDK Node aims to be sufficiently modular and configurable to be useful for a variety of use cases. ## Getting Started -The primary abstraction of the library is the [`Node`][api_docs_node], which can be retrieved by setting up and configuring a [`Builder`][api_docs_builder] to your liking and calling one of the `build` methods. `Node` can then be controlled via commands such as `start`, `stop`, `open_channel`, `open_announced_channel`, `send`, etc. +The primary abstraction of the library is the [`Node`][api_docs_node], which can be retrieved by setting up and configuring a [`Builder`][api_docs_builder] to your liking and calling one of the `build` methods. `Node` can then be controlled via commands such as `start`, `stop`, `open_channel`, `send`, etc. ```rust use ldk_node::Builder; diff --git a/src/lib.rs b/src/lib.rs index 7dd8bed03..914dec4b1 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -20,8 +20,7 @@ //! //! The primary abstraction of the library is the [`Node`], which can be retrieved by setting up //! and configuring a [`Builder`] to your liking and calling [`build`]. `Node` can then be -//! controlled via commands such as [`start`], [`stop`], [`open_channel`], [`open_announced_channel`] -//! [`send`], etc.: +//! controlled via commands such as [`start`], [`stop`], [`open_channel`], [`send`], etc.: //! //! ```no_run //! use ldk_node::Builder; @@ -64,7 +63,6 @@ //! [`start`]: Node::start //! [`stop`]: Node::stop //! [`open_channel`]: Node::open_channel -//! [`open_announced_channel`]: Node::open_announced_channel //! [`send`]: Bolt11Payment::send //! #![cfg_attr(not(feature = "uniffi"), deny(missing_docs))] From a225e1eac5543cd2962472757129a7ca0b3b4fe8 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 5 Sep 2024 09:37:18 +0200 Subject: [PATCH 061/127] Prefactor: Move `src/wallet.rs` to `src/wallet/mod.rs` We will be adding some wallet persistence/serialization related types and in a separate module down the line, so here we perepare for it by already moving the wallet code to a module directory. --- src/{wallet.rs => wallet/mod.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{wallet.rs => wallet/mod.rs} (100%) diff --git a/src/wallet.rs b/src/wallet/mod.rs similarity index 100% rename from src/wallet.rs rename to src/wallet/mod.rs From d0de1447a3d8c52e27a933aac6b21891beff68ff Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 19 Aug 2024 15:56:16 +0200 Subject: [PATCH 062/127] Update the world ... we update LDK, lightning-liquidity, BDK, rust-bitcoin, rust-esplora-client, rust-electrum-client, etc. --- Cargo.toml | 38 ++-- bindings/ldk_node.udl | 76 +++++-- src/balance.rs | 100 +++++++-- src/builder.rs | 118 ++++++----- src/config.rs | 4 +- src/error.rs | 35 +++- src/event.rs | 90 ++++---- src/fee_estimator.rs | 33 ++- src/graph.rs | 4 +- src/io/mod.rs | 42 ++++ src/io/sqlite_store/mod.rs | 30 ++- src/io/test_utils.rs | 3 +- src/io/utils.rs | 173 ++++++++++++++- src/io/vss_store.rs | 13 +- src/lib.rs | 99 +++++---- src/logger.rs | 2 +- src/message_handler.rs | 20 ++ src/payment/bolt11.rs | 19 +- src/payment/bolt12.rs | 46 ++-- src/payment/onchain.rs | 6 +- src/payment/store.rs | 8 +- src/payment/unified_qr.rs | 3 +- src/sweep.rs | 6 +- src/tx_broadcaster.rs | 11 +- src/types.rs | 25 +-- src/uniffi_types.rs | 3 +- src/wallet/mod.rs | 359 +++++++++++++++++--------------- src/wallet/persist.rs | 187 +++++++++++++++++ src/wallet/ser.rs | 346 ++++++++++++++++++++++++++++++ tests/common/mod.rs | 12 +- tests/integration_tests_rust.rs | 9 +- 31 files changed, 1439 insertions(+), 481 deletions(-) create mode 100644 src/wallet/persist.rs create mode 100644 src/wallet/ser.rs diff --git a/Cargo.toml b/Cargo.toml index 39f3b947d..89443e031 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -28,14 +28,14 @@ panic = 'abort' # Abort on panic default = [] [dependencies] -lightning = { version = "0.0.123", features = ["std"] } -lightning-invoice = { version = "0.31.0" } -lightning-net-tokio = { version = "0.0.123" } -lightning-persister = { version = "0.0.123" } -lightning-background-processor = { version = "0.0.123", features = ["futures"] } -lightning-rapid-gossip-sync = { version = "0.0.123" } -lightning-transaction-sync = { version = "0.0.123", features = ["esplora-async-https", "time"] } -lightning-liquidity = { version = "=0.1.0-alpha.4", features = ["std"] } +lightning = { version = "0.0.124", features = ["std"] } +lightning-invoice = { version = "0.32.0" } +lightning-net-tokio = { version = "0.0.124" } +lightning-persister = { version = "0.0.124" } +lightning-background-processor = { version = "0.0.124", features = ["futures"] } +lightning-rapid-gossip-sync = { version = "0.0.124" } +lightning-transaction-sync = { version = "0.0.124", features = ["esplora-async-https", "time"] } +lightning-liquidity = { version = "0.1.0-alpha.5", features = ["std"] } #lightning = { git = "https://github.com/lightningdevkit/rust-lightning", branch="main", features = ["std"] } #lightning-invoice = { git = "https://github.com/lightningdevkit/rust-lightning", branch="main" } @@ -55,18 +55,20 @@ lightning-liquidity = { version = "=0.1.0-alpha.4", features = ["std"] } #lightning-transaction-sync = { path = "../rust-lightning/lightning-transaction-sync", features = ["esplora-async"] } #lightning-liquidity = { path = "../lightning-liquidity", features = ["std"] } -bdk = { version = "0.29.0", default-features = false, features = ["std", "async-interface", "use-esplora-async", "sqlite-bundled", "keys-bip39"]} +bdk_chain = { version = "=0.19.0", default-features = false, features = ["std"] } +bdk_esplora = { version = "=0.18.0", default-features = false, features = ["async-https-rustls"]} +bdk_wallet = { version = "=1.0.0-beta.4", default-features = false, features = ["std", "keys-bip39"]} reqwest = { version = "0.11", default-features = false, features = ["json", "rustls-tls"] } rusqlite = { version = "0.28.0", features = ["bundled"] } -bitcoin = "0.30.2" +bitcoin = "0.32.2" bip39 = "2.0.0" -bip21 = { version = "0.3.1", features = ["std"], default-features = false } +bip21 = { version = "0.5", features = ["std"], default-features = false } rand = "0.8.5" chrono = { version = "0.4", default-features = false, features = ["clock"] } -tokio = { version = "1.37", default-features = false, features = [ "rt-multi-thread", "time", "sync" ] } -esplora-client = { version = "0.6", default-features = false } +tokio = { version = "1.37", default-features = false, features = [ "rt-multi-thread", "time", "sync", "macros" ] } +esplora-client = { version = "0.9", default-features = false } libc = "0.2" uniffi = { version = "0.26.0", features = ["build"], optional = true } @@ -78,18 +80,18 @@ prost = { version = "0.11.6", default-features = false} winapi = { version = "0.3", features = ["winbase"] } [dev-dependencies] -lightning = { version = "0.0.123", features = ["std", "_test_utils"] } +lightning = { version = "0.0.124", features = ["std", "_test_utils"] } #lightning = { git = "https://github.com/lightningdevkit/rust-lightning", branch="main", features = ["std", "_test_utils"] } -electrum-client = { version = "0.15.1", default-features = true } -bitcoincore-rpc = { version = "0.17.0", default-features = false } +electrum-client = { version = "0.21.0", default-features = true } +bitcoincore-rpc = { version = "0.19.0", default-features = false } proptest = "1.0.0" regex = "1.5.6" [target.'cfg(not(no_download))'.dev-dependencies] -electrsd = { version = "0.26.0", features = ["legacy", "esplora_a33e97e1", "bitcoind_25_0"] } +electrsd = { version = "0.29.0", features = ["legacy", "esplora_a33e97e1", "bitcoind_25_0"] } [target.'cfg(no_download)'.dev-dependencies] -electrsd = { version = "0.26.0", features = ["legacy"] } +electrsd = { version = "0.29.0", features = ["legacy"] } [target.'cfg(cln_test)'.dev-dependencies] clightningrpc = { version = "0.3.0-beta.8", default-features = false } diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index 6663604a2..96490f2b7 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -79,7 +79,7 @@ interface Node { [Throws=NodeError] void close_channel([ByRef]UserChannelId user_channel_id, PublicKey counterparty_node_id); [Throws=NodeError] - void force_close_channel([ByRef]UserChannelId user_channel_id, PublicKey counterparty_node_id); + void force_close_channel([ByRef]UserChannelId user_channel_id, PublicKey counterparty_node_id, string? reason); [Throws=NodeError] void update_channel_config([ByRef]UserChannelId user_channel_id, PublicKey counterparty_node_id, ChannelConfig channel_config); [Throws=NodeError] @@ -92,7 +92,6 @@ interface Node { sequence list_peers(); sequence list_channels(); NetworkGraph network_graph(); - [Throws=NodeError] string sign_message([ByRef]sequence msg); boolean verify_signature([ByRef]sequence msg, [ByRef]string sig, [ByRef]PublicKey pkey); }; @@ -130,9 +129,9 @@ interface Bolt12Payment { [Throws=NodeError] PaymentId send_using_amount([ByRef]Offer offer, u64 amount_msat, u64? quantity, string? payer_note); [Throws=NodeError] - Offer receive(u64 amount_msat, [ByRef]string description, u64? quantity); + Offer receive(u64 amount_msat, [ByRef]string description, u32? expiry_secs, u64? quantity); [Throws=NodeError] - Offer receive_variable_amount([ByRef]string description); + Offer receive_variable_amount([ByRef]string description, u32? expiry_secs); [Throws=NodeError] Bolt12Invoice request_refund_payment([ByRef]Refund refund); [Throws=NodeError] @@ -183,7 +182,6 @@ enum NodeError { "WalletOperationFailed", "WalletOperationTimeout", "OnchainTxSigningFailed", - "MessageSigningFailed", "TxSyncFailed", "TxSyncTimeout", "GossipUpdateFailed", @@ -251,7 +249,7 @@ enum BuildError { [Enum] interface Event { PaymentSuccessful(PaymentId? payment_id, PaymentHash payment_hash, u64? fee_paid_msat); - PaymentFailed(PaymentId? payment_id, PaymentHash payment_hash, PaymentFailureReason? reason); + PaymentFailed(PaymentId? payment_id, PaymentHash? payment_hash, PaymentFailureReason? reason); PaymentReceived(PaymentId? payment_id, PaymentHash payment_hash, u64 amount_msat); PaymentClaimable(PaymentId payment_id, PaymentHash payment_hash, u64 claimable_amount_msat, u32? claim_deadline); ChannelPending(ChannelId channel_id, UserChannelId user_channel_id, ChannelId former_temporary_channel_id, PublicKey counterparty_node_id, OutPoint funding_txo); @@ -266,12 +264,15 @@ enum PaymentFailureReason { "PaymentExpired", "RouteNotFound", "UnexpectedError", + "UnknownRequiredFeatures", + "InvoiceRequestExpired", + "InvoiceRequestRejected", }; [Enum] interface ClosureReason { CounterpartyForceClosed(UntrustedString peer_msg); - HolderForceClosed(); + HolderForceClosed(boolean? broadcasted_latest_txn); LegacyCooperativeClosure(); CounterpartyInitiatedCooperativeClosure(); LocallyInitiatedCooperativeClosure(); @@ -283,6 +284,7 @@ interface ClosureReason { CounterpartyCoopClosedUnfundedChannel(); FundingBatchClosure(); HTLCsTimedOut(); + PeerFeerateTooLow(u32 peer_feerate_sat_per_kw, u32 required_feerate_sat_per_kw); }; [Enum] @@ -368,7 +370,7 @@ dictionary ChannelDetails { boolean is_outbound; boolean is_channel_ready; boolean is_usable; - boolean is_public; + boolean is_announced; u16? cltv_expiry_delta; u64 counterparty_unspendable_punishment_reserve; u64? counterparty_outbound_htlc_minimum_msat; @@ -393,12 +395,58 @@ dictionary PeerDetails { [Enum] interface LightningBalance { - ClaimableOnChannelClose ( ChannelId channel_id, PublicKey counterparty_node_id, u64 amount_satoshis ); - ClaimableAwaitingConfirmations ( ChannelId channel_id, PublicKey counterparty_node_id, u64 amount_satoshis, u32 confirmation_height ); - ContentiousClaimable ( ChannelId channel_id, PublicKey counterparty_node_id, u64 amount_satoshis, u32 timeout_height, PaymentHash payment_hash, PaymentPreimage payment_preimage ); - MaybeTimeoutClaimableHTLC ( ChannelId channel_id, PublicKey counterparty_node_id, u64 amount_satoshis, u32 claimable_height, PaymentHash payment_hash); - MaybePreimageClaimableHTLC ( ChannelId channel_id, PublicKey counterparty_node_id, u64 amount_satoshis, u32 expiry_height, PaymentHash payment_hash); - CounterpartyRevokedOutputClaimable ( ChannelId channel_id, PublicKey counterparty_node_id, u64 amount_satoshis ); + ClaimableOnChannelClose ( + ChannelId channel_id, + PublicKey counterparty_node_id, + u64 amount_satoshis, + u64 transaction_fee_satoshis, + u64 outbound_payment_htlc_rounded_msat, + u64 outbound_forwarded_htlc_rounded_msat, + u64 inbound_claiming_htlc_rounded_msat, + u64 inbound_htlc_rounded_msat + ); + ClaimableAwaitingConfirmations ( + ChannelId channel_id, + PublicKey counterparty_node_id, + u64 amount_satoshis, + u32 confirmation_height, + BalanceSource source + ); + ContentiousClaimable ( + ChannelId channel_id, + PublicKey counterparty_node_id, + u64 amount_satoshis, + u32 timeout_height, + PaymentHash payment_hash, + PaymentPreimage payment_preimage + ); + MaybeTimeoutClaimableHTLC ( + ChannelId channel_id, + PublicKey counterparty_node_id, + u64 amount_satoshis, + u32 claimable_height, + PaymentHash payment_hash, + boolean outbound_payment + ); + MaybePreimageClaimableHTLC ( + ChannelId channel_id, + PublicKey counterparty_node_id, + u64 amount_satoshis, + u32 expiry_height, + PaymentHash payment_hash + ); + CounterpartyRevokedOutputClaimable ( + ChannelId channel_id, + PublicKey counterparty_node_id, + u64 amount_satoshis + ); +}; + +enum BalanceSource { + "HolderForceClosed", + "CounterpartyForceClosed", + "CoopClose", + "Htlc", }; [Enum] diff --git a/src/balance.rs b/src/balance.rs index 1f061cded..c43386d80 100644 --- a/src/balance.rs +++ b/src/balance.rs @@ -5,10 +5,12 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. -use crate::sweep::value_satoshis_from_descriptor; +use crate::sweep::value_from_descriptor; use lightning::chain::channelmonitor::Balance as LdkBalance; -use lightning::ln::{ChannelId, PaymentHash, PaymentPreimage}; +use lightning::chain::channelmonitor::BalanceSource; +use lightning::ln::types::ChannelId; +use lightning::ln::{PaymentHash, PaymentPreimage}; use lightning::util::sweep::{OutputSpendStatus, TrackedSpendableOutput}; use bitcoin::secp256k1::PublicKey; @@ -80,6 +82,49 @@ pub enum LightningBalance { /// The amount available to claim, in satoshis, excluding the on-chain fees which will be /// required to do so. amount_satoshis: u64, + /// The transaction fee we pay for the closing commitment transaction. This amount is not + /// included in the `amount_satoshis` value. + /// + /// Note that if this channel is inbound (and thus our counterparty pays the commitment + /// transaction fee) this value will be zero. For channels created prior to LDK Node 0.4 + /// the channel is always treated as outbound (and thus this value is never zero). + transaction_fee_satoshis: u64, + /// The amount of millisatoshis which has been burned to fees from HTLCs which are outbound + /// from us and are related to a payment which was sent by us. This is the sum of the + /// millisatoshis part of all HTLCs which are otherwise represented by + /// [`LightningBalance::MaybeTimeoutClaimableHTLC`] with their + /// [`LightningBalance::MaybeTimeoutClaimableHTLC::outbound_payment`] flag set, as well as + /// any dust HTLCs which would otherwise be represented the same. + /// + /// This amount (rounded up to a whole satoshi value) will not be included in `amount_satoshis`. + outbound_payment_htlc_rounded_msat: u64, + /// The amount of millisatoshis which has been burned to fees from HTLCs which are outbound + /// from us and are related to a forwarded HTLC. This is the sum of the millisatoshis part + /// of all HTLCs which are otherwise represented by + /// [`LightningBalance::MaybeTimeoutClaimableHTLC`] with their + /// [`LightningBalance::MaybeTimeoutClaimableHTLC::outbound_payment`] flag *not* set, as + /// well as any dust HTLCs which would otherwise be represented the same. + /// + /// This amount (rounded up to a whole satoshi value) will not be included in `amount_satoshis`. + outbound_forwarded_htlc_rounded_msat: u64, + /// The amount of millisatoshis which has been burned to fees from HTLCs which are inbound + /// to us and for which we know the preimage. This is the sum of the millisatoshis part of + /// all HTLCs which would be represented by [`LightningBalance::ContentiousClaimable`] on + /// channel close, but whose current value is included in `amount_satoshis`, as well as any + /// dust HTLCs which would otherwise be represented the same. + /// + /// This amount (rounded up to a whole satoshi value) will not be included in the counterparty's + /// `amount_satoshis`. + inbound_claiming_htlc_rounded_msat: u64, + /// The amount of millisatoshis which has been burned to fees from HTLCs which are inbound + /// to us and for which we do not know the preimage. This is the sum of the millisatoshis + /// part of all HTLCs which would be represented by + /// [`LightningBalance::MaybePreimageClaimableHTLC`] on channel close, as well as any dust + /// HTLCs which would otherwise be represented the same. + /// + /// This amount (rounded up to a whole satoshi value) will not be included in the + /// counterparty's `amount_satoshis`. + inbound_htlc_rounded_msat: u64, }, /// The channel has been closed, and the given balance is ours but awaiting confirmations until /// we consider it spendable. @@ -96,6 +141,8 @@ pub enum LightningBalance { /// /// [`Event::SpendableOutputs`]: lightning::events::Event::SpendableOutputs confirmation_height: u32, + /// Whether this balance is a result of cooperative close, a force-close, or an HTLC. + source: BalanceSource, }, /// The channel has been closed, and the given balance should be ours but awaiting spending /// transaction confirmation. If the spending transaction does not confirm in time, it is @@ -136,6 +183,8 @@ pub enum LightningBalance { claimable_height: u32, /// The payment hash whose preimage our counterparty needs to claim this HTLC. payment_hash: PaymentHash, + /// Indicates whether this HTLC represents a payment which was sent outbound from us. + outbound_payment: bool, }, /// HTLCs which we received from our counterparty which are claimable with a preimage which we /// do not currently have. This will only be claimable if we receive the preimage from the node @@ -174,16 +223,33 @@ impl LightningBalance { channel_id: ChannelId, counterparty_node_id: PublicKey, balance: LdkBalance, ) -> Self { match balance { - LdkBalance::ClaimableOnChannelClose { amount_satoshis } => { - Self::ClaimableOnChannelClose { channel_id, counterparty_node_id, amount_satoshis } + LdkBalance::ClaimableOnChannelClose { + amount_satoshis, + transaction_fee_satoshis, + outbound_payment_htlc_rounded_msat, + outbound_forwarded_htlc_rounded_msat, + inbound_claiming_htlc_rounded_msat, + inbound_htlc_rounded_msat, + } => Self::ClaimableOnChannelClose { + channel_id, + counterparty_node_id, + amount_satoshis, + transaction_fee_satoshis, + outbound_payment_htlc_rounded_msat, + outbound_forwarded_htlc_rounded_msat, + inbound_claiming_htlc_rounded_msat, + inbound_htlc_rounded_msat, }, - LdkBalance::ClaimableAwaitingConfirmations { amount_satoshis, confirmation_height } => { - Self::ClaimableAwaitingConfirmations { - channel_id, - counterparty_node_id, - amount_satoshis, - confirmation_height, - } + LdkBalance::ClaimableAwaitingConfirmations { + amount_satoshis, + confirmation_height, + source, + } => Self::ClaimableAwaitingConfirmations { + channel_id, + counterparty_node_id, + amount_satoshis, + confirmation_height, + source, }, LdkBalance::ContentiousClaimable { amount_satoshis, @@ -202,12 +268,14 @@ impl LightningBalance { amount_satoshis, claimable_height, payment_hash, + outbound_payment, } => Self::MaybeTimeoutClaimableHTLC { channel_id, counterparty_node_id, amount_satoshis, claimable_height, payment_hash, + outbound_payment, }, LdkBalance::MaybePreimageClaimableHTLC { amount_satoshis, @@ -278,7 +346,7 @@ impl PendingSweepBalance { match output_info.status { OutputSpendStatus::PendingInitialBroadcast { .. } => { let channel_id = output_info.channel_id; - let amount_satoshis = value_satoshis_from_descriptor(&output_info.descriptor); + let amount_satoshis = value_from_descriptor(&output_info.descriptor).to_sat(); Self::PendingBroadcast { channel_id, amount_satoshis } }, OutputSpendStatus::PendingFirstConfirmation { @@ -287,8 +355,8 @@ impl PendingSweepBalance { .. } => { let channel_id = output_info.channel_id; - let amount_satoshis = value_satoshis_from_descriptor(&output_info.descriptor); - let latest_spending_txid = latest_spending_tx.txid(); + let amount_satoshis = value_from_descriptor(&output_info.descriptor).to_sat(); + let latest_spending_txid = latest_spending_tx.compute_txid(); Self::BroadcastAwaitingConfirmation { channel_id, latest_broadcast_height, @@ -303,8 +371,8 @@ impl PendingSweepBalance { .. } => { let channel_id = output_info.channel_id; - let amount_satoshis = value_satoshis_from_descriptor(&output_info.descriptor); - let latest_spending_txid = latest_spending_tx.txid(); + let amount_satoshis = value_from_descriptor(&output_info.descriptor).to_sat(); + let latest_spending_txid = latest_spending_tx.compute_txid(); Self::AwaitingThresholdConfirmations { channel_id, latest_spending_txid, diff --git a/src/builder.rs b/src/builder.rs index 9faf97714..f6b201c54 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -6,8 +6,8 @@ // accordance with one or both of these licenses. use crate::config::{ - default_user_config, Config, BDK_CLIENT_CONCURRENCY, BDK_CLIENT_STOP_GAP, - DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS, DEFAULT_ESPLORA_SERVER_URL, WALLET_KEYS_SEED_LEN, + default_user_config, Config, DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS, DEFAULT_ESPLORA_SERVER_URL, + WALLET_KEYS_SEED_LEN, }; use crate::connection::ConnectionManager; use crate::event::EventQueue; @@ -15,6 +15,8 @@ use crate::fee_estimator::OnchainFeeEstimator; use crate::gossip::GossipSource; use crate::io; use crate::io::sqlite_store::SqliteStore; +#[cfg(any(vss, vss_test))] +use crate::io::vss_store::VssStore; use crate::liquidity::LiquiditySource; use crate::logger::{log_error, log_info, FilesystemLogger, Logger}; use crate::message_handler::NodeCustomMessageHandler; @@ -25,10 +27,12 @@ use crate::types::{ ChainMonitor, ChannelManager, DynStore, GossipSync, Graph, KeysManager, MessageRouter, OnionMessenger, PeerManager, }; +use crate::wallet::persist::KVStoreWalletPersister; use crate::wallet::Wallet; use crate::{LogLevel, Node}; use lightning::chain::{chainmonitor, BestBlock, Watch}; +use lightning::io::Cursor; use lightning::ln::channelmanager::{self, ChainParameters, ChannelManagerReadArgs}; use lightning::ln::msgs::{RoutingMessageHandler, SocketAddress}; use lightning::ln::peer_handler::{IgnoringMessageHandler, MessageHandler}; @@ -53,12 +57,9 @@ use lightning_transaction_sync::EsploraSyncClient; use lightning_liquidity::lsps2::client::LSPS2ClientConfig; use lightning_liquidity::{LiquidityClientConfig, LiquidityManager}; -#[cfg(any(vss, vss_test))] -use crate::io::vss_store::VssStore; -use bdk::bitcoin::secp256k1::Secp256k1; -use bdk::blockchain::esplora::EsploraBlockchain; -use bdk::database::SqliteDatabase; -use bdk::template::Bip84; +use bdk_wallet::template::Bip84; +use bdk_wallet::KeychainKind; +use bdk_wallet::Wallet as BdkWallet; use bip39::Mnemonic; @@ -71,7 +72,6 @@ use std::convert::TryInto; use std::default::Default; use std::fmt; use std::fs; -use std::io::Cursor; use std::path::PathBuf; use std::sync::atomic::AtomicBool; use std::sync::{Arc, Mutex, RwLock}; @@ -357,6 +357,8 @@ impl NodeBuilder { /// previously configured. #[cfg(any(vss, vss_test))] pub fn build_with_vss_store(&self, url: String, store_id: String) -> Result { + use bitcoin::key::Secp256k1; + let logger = setup_logger(&self.config)?; let seed_bytes = seed_bytes_from_config( @@ -366,14 +368,13 @@ impl NodeBuilder { )?; let config = Arc::new(self.config.clone()); - let xprv = bitcoin::bip32::ExtendedPrivKey::new_master(config.network.into(), &seed_bytes) - .map_err(|e| { - log_error!(logger, "Failed to derive master secret: {}", e); - BuildError::InvalidSeedBytes - })?; + let xprv = bitcoin::bip32::Xpriv::new_master(config.network, &seed_bytes).map_err(|e| { + log_error!(logger, "Failed to derive master secret: {}", e); + BuildError::InvalidSeedBytes + })?; let vss_xprv = xprv - .ckd_priv(&Secp256k1::new(), ChildNumber::Hardened { index: 877 }) + .derive_priv(&Secp256k1::new(), &[ChildNumber::Hardened { index: 877 }]) .map_err(|e| { log_error!(logger, "Failed to derive VSS secret: {}", e); BuildError::KVStoreSetupFailed @@ -555,38 +556,37 @@ fn build_with_store_internal( logger: Arc, kv_store: Arc, ) -> Result { // Initialize the on-chain wallet and chain access - let xprv = bitcoin::bip32::ExtendedPrivKey::new_master(config.network.into(), &seed_bytes) - .map_err(|e| { - log_error!(logger, "Failed to derive master secret: {}", e); - BuildError::InvalidSeedBytes - })?; - - let wallet_name = bdk::wallet::wallet_name_from_descriptor( - Bip84(xprv, bdk::KeychainKind::External), - Some(Bip84(xprv, bdk::KeychainKind::Internal)), - config.network.into(), - &Secp256k1::new(), - ) - .map_err(|e| { - log_error!(logger, "Failed to derive wallet name: {}", e); - BuildError::WalletSetupFailed + let xprv = bitcoin::bip32::Xpriv::new_master(config.network, &seed_bytes).map_err(|e| { + log_error!(logger, "Failed to derive master secret: {}", e); + BuildError::InvalidSeedBytes })?; - let database_path = format!("{}/bdk_wallet_{}.sqlite", config.storage_dir_path, wallet_name); - let database = SqliteDatabase::new(database_path); - - let bdk_wallet = bdk::Wallet::new( - Bip84(xprv, bdk::KeychainKind::External), - Some(Bip84(xprv, bdk::KeychainKind::Internal)), - config.network.into(), - database, - ) - .map_err(|e| { - log_error!(logger, "Failed to set up wallet: {}", e); - BuildError::WalletSetupFailed - })?; + let descriptor = Bip84(xprv, KeychainKind::External); + let change_descriptor = Bip84(xprv, KeychainKind::Internal); + let mut wallet_persister = + KVStoreWalletPersister::new(Arc::clone(&kv_store), Arc::clone(&logger)); + let wallet_opt = BdkWallet::load() + .descriptor(KeychainKind::External, Some(descriptor.clone())) + .descriptor(KeychainKind::Internal, Some(change_descriptor.clone())) + .extract_keys() + .check_network(config.network) + .load_wallet(&mut wallet_persister) + .map_err(|e| { + log_error!(logger, "Failed to set up wallet: {}", e); + BuildError::WalletSetupFailed + })?; + let bdk_wallet = match wallet_opt { + Some(wallet) => wallet, + None => BdkWallet::create(descriptor, change_descriptor) + .network(config.network) + .create_wallet(&mut wallet_persister) + .map_err(|e| { + log_error!(logger, "Failed to set up wallet: {}", e); + BuildError::WalletSetupFailed + })?, + }; - let (blockchain, tx_sync, tx_broadcaster, fee_estimator) = match chain_data_source_config { + let (esplora_client, tx_sync, tx_broadcaster, fee_estimator) = match chain_data_source_config { Some(ChainDataSourceConfig::Esplora(server_url)) => { let mut client_builder = esplora_client::Builder::new(&server_url.clone()); client_builder = client_builder.timeout(DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS); @@ -595,8 +595,6 @@ fn build_with_store_internal( esplora_client.clone(), Arc::clone(&logger), )); - let blockchain = EsploraBlockchain::from_client(esplora_client, BDK_CLIENT_STOP_GAP) - .with_concurrency(BDK_CLIENT_CONCURRENCY); let tx_broadcaster = Arc::new(TransactionBroadcaster::new( tx_sync.client().clone(), Arc::clone(&logger), @@ -606,15 +604,18 @@ fn build_with_store_internal( Arc::clone(&config), Arc::clone(&logger), )); - (blockchain, tx_sync, tx_broadcaster, fee_estimator) + (esplora_client, tx_sync, tx_broadcaster, fee_estimator) }, None => { // Default to Esplora client. let server_url = DEFAULT_ESPLORA_SERVER_URL.to_string(); - let tx_sync = Arc::new(EsploraSyncClient::new(server_url, Arc::clone(&logger))); - let blockchain = - EsploraBlockchain::from_client(tx_sync.client().clone(), BDK_CLIENT_STOP_GAP) - .with_concurrency(BDK_CLIENT_CONCURRENCY); + let mut client_builder = esplora_client::Builder::new(&server_url); + client_builder = client_builder.timeout(DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS); + let esplora_client = client_builder.build_async().unwrap(); + let tx_sync = Arc::new(EsploraSyncClient::from_client( + esplora_client.clone(), + Arc::clone(&logger), + )); let tx_broadcaster = Arc::new(TransactionBroadcaster::new( tx_sync.client().clone(), Arc::clone(&logger), @@ -624,14 +625,15 @@ fn build_with_store_internal( Arc::clone(&config), Arc::clone(&logger), )); - (blockchain, tx_sync, tx_broadcaster, fee_estimator) + (esplora_client, tx_sync, tx_broadcaster, fee_estimator) }, }; let runtime = Arc::new(RwLock::new(None)); let wallet = Arc::new(Wallet::new( - blockchain, bdk_wallet, + wallet_persister, + esplora_client, Arc::clone(&tx_broadcaster), Arc::clone(&fee_estimator), Arc::clone(&logger), @@ -711,7 +713,7 @@ fn build_with_store_internal( ) { Ok(monitors) => monitors, Err(e) => { - if e.kind() == std::io::ErrorKind::NotFound { + if e.kind() == lightning::io::ErrorKind::NotFound { Vec::new() } else { log_error!(logger, "Failed to read channel monitors: {}", e.to_string()); @@ -764,7 +766,7 @@ fn build_with_store_internal( } else { // We're starting a fresh node. let genesis_block_hash = - bitcoin::blockdata::constants::genesis_block(config.network.into()).block_hash(); + bitcoin::blockdata::constants::genesis_block(config.network).block_hash(); let chain_params = ChainParameters { network: config.network.into(), @@ -808,6 +810,7 @@ fn build_with_store_internal( Arc::new(message_router), Arc::clone(&channel_manager), IgnoringMessageHandler {}, + IgnoringMessageHandler {}, )); let ephemeral_bytes: [u8; 32] = keys_manager.get_secure_random_bytes(); @@ -883,14 +886,14 @@ fn build_with_store_internal( chan_handler: Arc::clone(&channel_manager), route_handler: Arc::clone(&p2p_gossip_sync) as Arc, - onion_message_handler: onion_messenger, + onion_message_handler: Arc::clone(&onion_messenger), custom_message_handler, }, GossipSync::Rapid(_) => MessageHandler { chan_handler: Arc::clone(&channel_manager), route_handler: Arc::new(IgnoringMessageHandler {}) as Arc, - onion_message_handler: onion_messenger, + onion_message_handler: Arc::clone(&onion_messenger), custom_message_handler, }, GossipSync::None => { @@ -1018,6 +1021,7 @@ fn build_with_store_internal( chain_monitor, output_sweeper, peer_manager, + onion_messenger, connection_manager, keys_manager, network_graph, diff --git a/src/config.rs b/src/config.rs index 2ccfc2db9..b69e73ecf 100644 --- a/src/config.rs +++ b/src/config.rs @@ -32,7 +32,7 @@ const DEFAULT_ANCHOR_PER_CHANNEL_RESERVE_SATS: u64 = 25_000; pub(crate) const BDK_CLIENT_STOP_GAP: usize = 20; // The number of concurrent requests made against the API provider. -pub(crate) const BDK_CLIENT_CONCURRENCY: u8 = 4; +pub(crate) const BDK_CLIENT_CONCURRENCY: usize = 4; // The default Esplora server we're using. pub(crate) const DEFAULT_ESPLORA_SERVER_URL: &str = "https://blockstream.info/api"; @@ -297,7 +297,7 @@ pub(crate) fn default_user_config(config: &Config) -> UserConfig { if !may_announce_channel(config) { user_config.accept_forwards_to_priv_channels = false; - user_config.channel_handshake_config.announced_channel = false; + user_config.channel_handshake_config.announce_for_forwarding = false; user_config.channel_handshake_limits.force_announced_channel_preference = true; } diff --git a/src/error.rs b/src/error.rs index 807e1ca54..8caaaabdd 100644 --- a/src/error.rs +++ b/src/error.rs @@ -5,6 +5,11 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. +use bdk_chain::bitcoin::psbt::ExtractTxError as BdkExtractTxError; +use bdk_chain::local_chain::CannotConnectError as BdkChainConnectionError; +use bdk_wallet::error::CreateTxError as BdkCreateTxError; +use bdk_wallet::signer::SignerError as BdkSignerError; + use std::fmt; #[derive(Copy, Clone, Debug, PartialEq, Eq)] @@ -48,8 +53,6 @@ pub enum Error { WalletOperationTimeout, /// A signing operation for transaction failed. OnchainTxSigningFailed, - /// A signing operation for message failed. - MessageSigningFailed, /// A transaction sync operation failed. TxSyncFailed, /// A transaction sync operation timed out. @@ -140,7 +143,6 @@ impl fmt::Display for Error { Self::WalletOperationFailed => write!(f, "Failed to conduct wallet operation."), Self::WalletOperationTimeout => write!(f, "A wallet operation timed out."), Self::OnchainTxSigningFailed => write!(f, "Failed to sign given transaction."), - Self::MessageSigningFailed => write!(f, "Failed to sign given message."), Self::TxSyncFailed => write!(f, "Failed to sync transactions."), Self::TxSyncTimeout => write!(f, "Syncing transactions timed out."), Self::GossipUpdateFailed => write!(f, "Failed to update gossip data."), @@ -187,12 +189,27 @@ impl fmt::Display for Error { impl std::error::Error for Error {} -impl From for Error { - fn from(e: bdk::Error) -> Self { - match e { - bdk::Error::Signer(_) => Self::OnchainTxSigningFailed, - _ => Self::WalletOperationFailed, - } +impl From for Error { + fn from(_: BdkSignerError) -> Self { + Self::OnchainTxSigningFailed + } +} + +impl From for Error { + fn from(_: BdkCreateTxError) -> Self { + Self::OnchainTxCreationFailed + } +} + +impl From for Error { + fn from(_: BdkExtractTxError) -> Self { + Self::OnchainTxCreationFailed + } +} + +impl From for Error { + fn from(_: BdkChainConnectionError) -> Self { + Self::WalletOperationFailed } } diff --git a/src/event.rs b/src/event.rs index 1f4b2e117..8d732d21a 100644 --- a/src/event.rs +++ b/src/event.rs @@ -27,11 +27,12 @@ use crate::io::{ use crate::logger::{log_debug, log_error, log_info, Logger}; use lightning::events::bump_transaction::BumpTransactionEvent; -use lightning::events::{ClosureReason, PaymentPurpose}; +use lightning::events::{ClosureReason, PaymentPurpose, ReplayEvent}; use lightning::events::{Event as LdkEvent, PaymentFailureReason}; use lightning::impl_writeable_tlv_based_enum; use lightning::ln::channelmanager::PaymentId; -use lightning::ln::{ChannelId, PaymentHash}; +use lightning::ln::types::ChannelId; +use lightning::ln::PaymentHash; use lightning::routing::gossip::NodeId; use lightning::util::errors::APIError; use lightning::util::ser::{Readable, ReadableArgs, Writeable, Writer}; @@ -40,7 +41,7 @@ use lightning_liquidity::lsps2::utils::compute_opening_fee; use bitcoin::blockdata::locktime::absolute::LockTime; use bitcoin::secp256k1::PublicKey; -use bitcoin::OutPoint; +use bitcoin::{Amount, OutPoint}; use rand::{thread_rng, Rng}; @@ -74,7 +75,12 @@ pub enum Event { /// Will only be `None` for events serialized with LDK Node v0.2.1 or prior. payment_id: Option, /// The hash of the payment. - payment_hash: PaymentHash, + /// + /// This will be `None` if the payment failed before receiving an invoice when paying a + /// BOLT12 [`Offer`]. + /// + /// [`Offer`]: lightning::offers::offer::Offer + payment_hash: Option, /// The reason why the payment failed. /// /// This will be `None` for events serialized by LDK Node v0.2.1 and prior. @@ -159,8 +165,8 @@ impl_writeable_tlv_based_enum!(Event, (3, payment_id, option), }, (1, PaymentFailed) => { - (0, payment_hash, required), - (1, reason, option), + (0, payment_hash, option), + (1, reason, upgradable_option), (3, payment_id, option), }, (2, PaymentReceived) => { @@ -191,7 +197,7 @@ impl_writeable_tlv_based_enum!(Event, (2, payment_id, required), (4, claimable_amount_msat, required), (6, claim_deadline, option), - }; + } ); pub struct EventQueue @@ -394,7 +400,7 @@ where } } - pub async fn handle_event(&self, event: LdkEvent) { + pub async fn handle_event(&self, event: LdkEvent) -> Result<(), ReplayEvent> { match event { LdkEvent::FundingGenerationReady { temporary_channel_id, @@ -412,17 +418,18 @@ where let locktime = LockTime::from_height(cur_height).unwrap_or(LockTime::ZERO); // Sign the final funding transaction and broadcast it. + let channel_amount = Amount::from_sat(channel_value_satoshis); match self.wallet.create_funding_transaction( output_script, - channel_value_satoshis, + channel_amount, confirmation_target, locktime, ) { Ok(final_tx) => { // Give the funding transaction back to LDK for opening the channel. match self.channel_manager.funding_transaction_generated( - &temporary_channel_id, - &counterparty_node_id, + temporary_channel_id, + counterparty_node_id, final_tx, ) { Ok(()) => {}, @@ -452,6 +459,7 @@ where .force_close_without_broadcasting_txn( &temporary_channel_id, &counterparty_node_id, + "Failed to create funding transaction".to_string(), ) .unwrap_or_else(|e| { log_error!(self.logger, "Failed to force close channel after funding generation failed: {:?}", e); @@ -462,6 +470,9 @@ where }, } }, + LdkEvent::FundingTxBroadcastSafe { .. } => { + debug_assert!(false, "We currently only support safe funding, so this event should never be emitted."); + }, LdkEvent::PaymentClaimable { payment_hash, purpose, @@ -491,7 +502,7 @@ where log_error!(self.logger, "Failed to access payment store: {}", e); panic!("Failed to access payment store"); }); - return; + return Ok(()); } if info.status == PaymentStatus::Succeeded @@ -513,7 +524,7 @@ where log_error!(self.logger, "Failed to access payment store: {}", e); panic!("Failed to access payment store"); }); - return; + return Ok(()); } let max_total_opening_fee_msat = match info.kind { @@ -552,7 +563,7 @@ where log_error!(self.logger, "Failed to access payment store: {}", e); panic!("Failed to access payment store"); }); - return; + return Ok(()); } // If this is known by the store but ChannelManager doesn't know the preimage, @@ -581,7 +592,7 @@ where ); panic!("Failed to push to event queue"); }); - return; + return Ok(()); } }, _ => {}, @@ -717,6 +728,7 @@ where receiver_node_id: _, htlcs: _, sender_intended_total_msat: _, + onion_fields: _, } => { let payment_id = PaymentId(payment_hash.0); log_info!( @@ -810,7 +822,7 @@ where id } else { debug_assert!(false, "payment_id should always be set."); - return; + return Ok(()); }; let update = PaymentDetailsUpdate { @@ -855,13 +867,13 @@ where LdkEvent::PaymentFailed { payment_id, payment_hash, reason, .. } => { log_info!( self.logger, - "Failed to send payment to payment hash {:?} due to {:?}.", - hex_utils::to_string(&payment_hash.0), + "Failed to send payment with ID {} due to {:?}.", + payment_id, reason ); let update = PaymentDetailsUpdate { - hash: Some(Some(payment_hash)), + hash: Some(payment_hash), status: Some(PaymentStatus::Failed), ..PaymentDetailsUpdate::new(payment_id) }; @@ -916,9 +928,15 @@ where funding_satoshis, channel_type, push_msat: _, + is_announced: _, + params: _, } => { let anchor_channel = channel_type.requires_anchors_zero_fee_htlc_tx(); + // TODO: We should use `is_announced` flag above and reject announced channels if + // we're not a forwading node, once we add a 'forwarding mode' based on listening + // address / node alias being set. + if anchor_channel { if let Some(anchor_channels_config) = self.config.anchor_channels_config.as_ref() @@ -951,11 +969,12 @@ where .force_close_without_broadcasting_txn( &temporary_channel_id, &counterparty_node_id, + "Channel request rejected".to_string(), ) .unwrap_or_else(|e| { log_error!(self.logger, "Failed to reject channel: {:?}", e) }); - return; + return Ok(()); } } else { log_error!( @@ -967,11 +986,12 @@ where .force_close_without_broadcasting_txn( &temporary_channel_id, &counterparty_node_id, + "Channel request rejected".to_string(), ) .unwrap_or_else(|e| { log_error!(self.logger, "Failed to reject channel: {:?}", e) }); - return; + return Ok(()); } } @@ -1038,7 +1058,7 @@ where node.announcement_info .as_ref() .map_or("unnamed node".to_string(), |ann| { - format!("node {}", ann.alias) + format!("node {}", ann.alias()) }) }) }; @@ -1177,21 +1197,8 @@ where }, LdkEvent::DiscardFunding { .. } => {}, LdkEvent::HTLCIntercepted { .. } => {}, - LdkEvent::InvoiceRequestFailed { payment_id } => { - log_error!( - self.logger, - "Failed to request invoice for outbound BOLT12 payment {}", - payment_id - ); - let update = PaymentDetailsUpdate { - status: Some(PaymentStatus::Failed), - ..PaymentDetailsUpdate::new(payment_id) - }; - self.payment_store.update(&update).unwrap_or_else(|e| { - log_error!(self.logger, "Failed to access payment store: {}", e); - panic!("Failed to access payment store"); - }); - return; + LdkEvent::InvoiceReceived { .. } => { + debug_assert!(false, "We currently don't handle BOLT12 invoices manually, so this event should never be emitted."); }, LdkEvent::ConnectionNeeded { node_id, addresses } => { let runtime_lock = self.runtime.read().unwrap(); @@ -1243,13 +1250,20 @@ where "Ignoring BumpTransactionEvent for channel {} due to trusted counterparty {}", channel_id, counterparty_node_id ); - return; + return Ok(()); } } self.bump_tx_event_handler.handle_event(&bte); }, + LdkEvent::OnionMessageIntercepted { .. } => { + debug_assert!(false, "We currently don't support onion message interception, so this event should never be emitted."); + }, + LdkEvent::OnionMessagePeerConnected { .. } => { + debug_assert!(false, "We currently don't support onion message interception, so this event should never be emitted."); + }, } + Ok(()) } } diff --git a/src/fee_estimator.rs b/src/fee_estimator.rs index 857106aa3..62b4b8882 100644 --- a/src/fee_estimator.rs +++ b/src/fee_estimator.rs @@ -13,10 +13,9 @@ use lightning::chain::chaininterface::ConfirmationTarget as LdkConfirmationTarge use lightning::chain::chaininterface::FeeEstimator as LdkFeeEstimator; use lightning::chain::chaininterface::FEERATE_FLOOR_SATS_PER_KW; -use bdk::FeeRate; +use bitcoin::FeeRate; use esplora_client::AsyncClient as EsploraClient; -use bitcoin::blockdata::weight::Weight; use bitcoin::Network; use std::collections::HashMap; @@ -90,7 +89,8 @@ where let confirmation_targets = vec![ ConfirmationTarget::OnchainPayment, ConfirmationTarget::ChannelFunding, - LdkConfirmationTarget::OnChainSweep.into(), + LdkConfirmationTarget::MaximumFeeEstimate.into(), + LdkConfirmationTarget::UrgentOnChainSweep.into(), LdkConfirmationTarget::MinAllowedAnchorChannelRemoteFee.into(), LdkConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee.into(), LdkConfirmationTarget::AnchorChannelFee.into(), @@ -104,7 +104,8 @@ where ConfirmationTarget::OnchainPayment => 6, ConfirmationTarget::ChannelFunding => 12, ConfirmationTarget::Lightning(ldk_target) => match ldk_target { - LdkConfirmationTarget::OnChainSweep => 6, + LdkConfirmationTarget::MaximumFeeEstimate => 1, + LdkConfirmationTarget::UrgentOnChainSweep => 6, LdkConfirmationTarget::MinAllowedAnchorChannelRemoteFee => 1008, LdkConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee => 144, LdkConfirmationTarget::AnchorChannelFee => 1008, @@ -114,7 +115,7 @@ where }, }; - let converted_estimates = + let converted_estimate_sat_vb = esplora_client::convert_fee_rate(num_blocks, estimates.clone()).map_err(|e| { log_error!( self.logger, @@ -125,7 +126,7 @@ where Error::FeerateEstimationUpdateFailed })?; - let fee_rate = FeeRate::from_sat_per_vb(converted_estimates); + let fee_rate = FeeRate::from_sat_per_kwu((converted_estimate_sat_vb * 250.0) as u64); // LDK 0.0.118 introduced changes to the `ConfirmationTarget` semantics that // require some post-estimation adjustments to the fee rates, which we do here. @@ -133,9 +134,8 @@ where ConfirmationTarget::Lightning( LdkConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee, ) => { - let slightly_less_than_background = - fee_rate.fee_wu(Weight::from_wu(1000)) - 250; - FeeRate::from_sat_per_kwu(slightly_less_than_background as f32) + let slightly_less_than_background = fee_rate.to_sat_per_kwu() - 250; + FeeRate::from_sat_per_kwu(slightly_less_than_background) }, _ => fee_rate, }; @@ -146,7 +146,7 @@ where self.logger, "Fee rate estimation updated for {:?}: {} sats/kwu", target, - adjusted_fee_rate.fee_wu(Weight::from_wu(1000)) + adjusted_fee_rate.to_sat_per_kwu(), ); } Ok(()) @@ -164,7 +164,8 @@ where ConfirmationTarget::OnchainPayment => 5000, ConfirmationTarget::ChannelFunding => 1000, ConfirmationTarget::Lightning(ldk_target) => match ldk_target { - LdkConfirmationTarget::OnChainSweep => 5000, + LdkConfirmationTarget::MaximumFeeEstimate => 8000, + LdkConfirmationTarget::UrgentOnChainSweep => 5000, LdkConfirmationTarget::MinAllowedAnchorChannelRemoteFee => { FEERATE_FLOOR_SATS_PER_KW }, @@ -179,17 +180,13 @@ where }; // We'll fall back on this, if we really don't have any other information. - let fallback_rate = FeeRate::from_sat_per_kwu(fallback_sats_kwu as f32); + let fallback_rate = FeeRate::from_sat_per_kwu(fallback_sats_kwu as u64); let estimate = *locked_fee_rate_cache.get(&confirmation_target).unwrap_or(&fallback_rate); // Currently we assume every transaction needs to at least be relayable, which is why we // enforce a lower bound of `FEERATE_FLOOR_SATS_PER_KW`. - let weight_units = Weight::from_wu(1000); - FeeRate::from_wu( - estimate.fee_wu(weight_units).max(FEERATE_FLOOR_SATS_PER_KW as u64), - weight_units, - ) + FeeRate::from_sat_per_kwu(estimate.to_sat_per_kwu().max(FEERATE_FLOOR_SATS_PER_KW as u64)) } } @@ -198,6 +195,6 @@ where L::Target: Logger, { fn get_est_sat_per_1000_weight(&self, confirmation_target: LdkConfirmationTarget) -> u32 { - self.estimate_fee_rate(confirmation_target.into()).fee_wu(Weight::from_wu(1000)) as u32 + self.estimate_fee_rate(confirmation_target.into()).to_sat_per_kwu() as u32 } } diff --git a/src/graph.rs b/src/graph.rs index 520be99db..3e4e58c88 100644 --- a/src/graph.rs +++ b/src/graph.rs @@ -165,8 +165,8 @@ pub struct NodeAnnouncementInfo { impl From for NodeAnnouncementInfo { fn from(value: lightning::routing::gossip::NodeAnnouncementInfo) -> Self { Self { - last_update: value.last_update, - alias: value.alias.to_string(), + last_update: value.last_update(), + alias: value.alias().to_string(), addresses: value.addresses().iter().cloned().collect(), } } diff --git a/src/io/mod.rs b/src/io/mod.rs index c65ab1d3b..22caff50f 100644 --- a/src/io/mod.rs +++ b/src/io/mod.rs @@ -42,3 +42,45 @@ pub(crate) const LATEST_RGS_SYNC_TIMESTAMP_KEY: &str = "latest_rgs_sync_timestam pub(crate) const LATEST_NODE_ANN_BCAST_TIMESTAMP_PRIMARY_NAMESPACE: &str = ""; pub(crate) const LATEST_NODE_ANN_BCAST_TIMESTAMP_SECONDARY_NAMESPACE: &str = ""; pub(crate) const LATEST_NODE_ANN_BCAST_TIMESTAMP_KEY: &str = "latest_node_ann_bcast_timestamp"; + +/// The BDK wallet's [`ChangeSet::descriptor`] will be persisted under this key. +/// +/// [`ChangeSet::descriptor`]: bdk_wallet::ChangeSet::descriptor +pub(crate) const BDK_WALLET_DESCRIPTOR_PRIMARY_NAMESPACE: &str = "bdk_wallet"; +pub(crate) const BDK_WALLET_DESCRIPTOR_SECONDARY_NAMESPACE: &str = ""; +pub(crate) const BDK_WALLET_DESCRIPTOR_KEY: &str = "descriptor"; + +/// The BDK wallet's [`ChangeSet::change_descriptor`] will be persisted under this key. +/// +/// [`ChangeSet::change_descriptor`]: bdk_wallet::ChangeSet::change_descriptor +pub(crate) const BDK_WALLET_CHANGE_DESCRIPTOR_PRIMARY_NAMESPACE: &str = "bdk_wallet"; +pub(crate) const BDK_WALLET_CHANGE_DESCRIPTOR_SECONDARY_NAMESPACE: &str = ""; +pub(crate) const BDK_WALLET_CHANGE_DESCRIPTOR_KEY: &str = "change_descriptor"; + +/// The BDK wallet's [`ChangeSet::network`] will be persisted under this key. +/// +/// [`ChangeSet::network`]: bdk_wallet::ChangeSet::network +pub(crate) const BDK_WALLET_NETWORK_PRIMARY_NAMESPACE: &str = "bdk_wallet"; +pub(crate) const BDK_WALLET_NETWORK_SECONDARY_NAMESPACE: &str = ""; +pub(crate) const BDK_WALLET_NETWORK_KEY: &str = "network"; + +/// The BDK wallet's [`ChangeSet::local_chain`] will be persisted under this key. +/// +/// [`ChangeSet::local_chain`]: bdk_wallet::ChangeSet::local_chain +pub(crate) const BDK_WALLET_LOCAL_CHAIN_PRIMARY_NAMESPACE: &str = "bdk_wallet"; +pub(crate) const BDK_WALLET_LOCAL_CHAIN_SECONDARY_NAMESPACE: &str = ""; +pub(crate) const BDK_WALLET_LOCAL_CHAIN_KEY: &str = "local_chain"; + +/// The BDK wallet's [`ChangeSet::tx_graph`] will be persisted under this key. +/// +/// [`ChangeSet::tx_graph`]: bdk_wallet::ChangeSet::tx_graph +pub(crate) const BDK_WALLET_TX_GRAPH_PRIMARY_NAMESPACE: &str = "bdk_wallet"; +pub(crate) const BDK_WALLET_TX_GRAPH_SECONDARY_NAMESPACE: &str = ""; +pub(crate) const BDK_WALLET_TX_GRAPH_KEY: &str = "tx_graph"; + +/// The BDK wallet's [`ChangeSet::indexer`] will be persisted under this key. +/// +/// [`ChangeSet::indexer`]: bdk_wallet::ChangeSet::indexer +pub(crate) const BDK_WALLET_INDEXER_PRIMARY_NAMESPACE: &str = "bdk_wallet"; +pub(crate) const BDK_WALLET_INDEXER_SECONDARY_NAMESPACE: &str = ""; +pub(crate) const BDK_WALLET_INDEXER_KEY: &str = "indexer"; diff --git a/src/io/sqlite_store/mod.rs b/src/io/sqlite_store/mod.rs index c1eac84b4..b72db5a2b 100644 --- a/src/io/sqlite_store/mod.rs +++ b/src/io/sqlite_store/mod.rs @@ -132,7 +132,7 @@ impl SqliteStore { impl KVStore for SqliteStore { fn read( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, - ) -> std::io::Result> { + ) -> io::Result> { check_namespace_key_validity(primary_namespace, secondary_namespace, Some(key), "read")?; let locked_conn = self.connection.lock().unwrap(); @@ -142,7 +142,7 @@ impl KVStore for SqliteStore { let mut stmt = locked_conn.prepare_cached(&sql).map_err(|e| { let msg = format!("Failed to prepare statement: {}", e); - std::io::Error::new(std::io::ErrorKind::Other, msg) + io::Error::new(io::ErrorKind::Other, msg) })?; let res = stmt @@ -162,7 +162,7 @@ impl KVStore for SqliteStore { PrintableString(secondary_namespace), PrintableString(key) ); - std::io::Error::new(std::io::ErrorKind::NotFound, msg) + io::Error::new(io::ErrorKind::NotFound, msg) }, e => { let msg = format!( @@ -172,7 +172,7 @@ impl KVStore for SqliteStore { PrintableString(key), e ); - std::io::Error::new(std::io::ErrorKind::Other, msg) + io::Error::new(io::ErrorKind::Other, msg) }, })?; Ok(res) @@ -180,7 +180,7 @@ impl KVStore for SqliteStore { fn write( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: &[u8], - ) -> std::io::Result<()> { + ) -> io::Result<()> { check_namespace_key_validity(primary_namespace, secondary_namespace, Some(key), "write")?; let locked_conn = self.connection.lock().unwrap(); @@ -192,7 +192,7 @@ impl KVStore for SqliteStore { let mut stmt = locked_conn.prepare_cached(&sql).map_err(|e| { let msg = format!("Failed to prepare statement: {}", e); - std::io::Error::new(std::io::ErrorKind::Other, msg) + io::Error::new(io::ErrorKind::Other, msg) })?; stmt.execute(named_params! { @@ -210,13 +210,13 @@ impl KVStore for SqliteStore { PrintableString(key), e ); - std::io::Error::new(std::io::ErrorKind::Other, msg) + io::Error::new(io::ErrorKind::Other, msg) }) } fn remove( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, _lazy: bool, - ) -> std::io::Result<()> { + ) -> io::Result<()> { check_namespace_key_validity(primary_namespace, secondary_namespace, Some(key), "remove")?; let locked_conn = self.connection.lock().unwrap(); @@ -225,7 +225,7 @@ impl KVStore for SqliteStore { let mut stmt = locked_conn.prepare_cached(&sql).map_err(|e| { let msg = format!("Failed to prepare statement: {}", e); - std::io::Error::new(std::io::ErrorKind::Other, msg) + io::Error::new(io::ErrorKind::Other, msg) })?; stmt.execute(named_params! { @@ -241,14 +241,12 @@ impl KVStore for SqliteStore { PrintableString(key), e ); - std::io::Error::new(std::io::ErrorKind::Other, msg) + io::Error::new(io::ErrorKind::Other, msg) })?; Ok(()) } - fn list( - &self, primary_namespace: &str, secondary_namespace: &str, - ) -> std::io::Result> { + fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> io::Result> { check_namespace_key_validity(primary_namespace, secondary_namespace, None, "list")?; let locked_conn = self.connection.lock().unwrap(); @@ -259,7 +257,7 @@ impl KVStore for SqliteStore { ); let mut stmt = locked_conn.prepare_cached(&sql).map_err(|e| { let msg = format!("Failed to prepare statement: {}", e); - std::io::Error::new(std::io::ErrorKind::Other, msg) + io::Error::new(io::ErrorKind::Other, msg) })?; let mut keys = Vec::new(); @@ -274,13 +272,13 @@ impl KVStore for SqliteStore { ) .map_err(|e| { let msg = format!("Failed to retrieve queried rows: {}", e); - std::io::Error::new(std::io::ErrorKind::Other, msg) + io::Error::new(io::ErrorKind::Other, msg) })?; for k in rows_iter { keys.push(k.map_err(|e| { let msg = format!("Failed to retrieve queried rows: {}", e); - std::io::Error::new(std::io::ErrorKind::Other, msg) + io::Error::new(io::ErrorKind::Other, msg) })?); } diff --git a/src/io/test_utils.rs b/src/io/test_utils.rs index c4610b4f5..98b33fa5f 100644 --- a/src/io/test_utils.rs +++ b/src/io/test_utils.rs @@ -151,12 +151,13 @@ pub(crate) fn do_test_store(store_0: &K, store_1: &K) { .force_close_broadcasting_latest_txn( &nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id(), + "whoops".to_string(), ) .unwrap(); check_closed_event!( nodes[0], 1, - ClosureReason::HolderForceClosed, + ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000 ); diff --git a/src/io/utils.rs b/src/io/utils.rs index 29484273c..f6fd10b41 100644 --- a/src/io/utils.rs +++ b/src/io/utils.rs @@ -12,8 +12,11 @@ use crate::logger::{log_error, FilesystemLogger}; use crate::peer_store::PeerStore; use crate::sweep::DeprecatedSpendableOutputInfo; use crate::types::{Broadcaster, ChainSource, DynStore, FeeEstimator, KeysManager, Sweeper}; +use crate::wallet::ser::{ChangeSetDeserWrapper, ChangeSetSerWrapper}; use crate::{Error, EventQueue, PaymentDetails}; +use lightning::io::Cursor; +use lightning::ln::msgs::DecodeError; use lightning::routing::gossip::NetworkGraph; use lightning::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringDecayParameters}; use lightning::util::logger::Logger; @@ -26,13 +29,21 @@ use lightning::util::persist::{ }; use lightning::util::ser::{Readable, ReadableArgs, Writeable}; use lightning::util::string::PrintableString; +use lightning::util::sweep::{OutputSpendStatus, OutputSweeper}; + +use bdk_chain::indexer::keychain_txout::ChangeSet as BdkIndexerChangeSet; +use bdk_chain::local_chain::ChangeSet as BdkLocalChainChangeSet; +use bdk_chain::miniscript::{Descriptor, DescriptorPublicKey}; +use bdk_chain::tx_graph::ChangeSet as BdkTxGraphChangeSet; +use bdk_chain::ConfirmationBlockTime; +use bdk_wallet::ChangeSet as BdkWalletChangeSet; use bip39::Mnemonic; -use lightning::util::sweep::{OutputSpendStatus, OutputSweeper}; +use bitcoin::Network; use rand::{thread_rng, RngCore}; use std::fs; -use std::io::{Cursor, Write}; +use std::io::Write; use std::ops::Deref; use std::path::Path; use std::sync::Arc; @@ -518,6 +529,164 @@ pub(crate) fn check_namespace_key_validity( Ok(()) } +macro_rules! impl_read_write_change_set_type { + ( $read_name: ident, $write_name: ident, $change_set_type:ty, $primary_namespace: expr, $secondary_namespace: expr, $key: expr ) => { + pub(crate) fn $read_name( + kv_store: Arc, logger: L, + ) -> Result, std::io::Error> + where + L::Target: Logger, + { + let bytes = match kv_store.read($primary_namespace, $secondary_namespace, $key) { + Ok(bytes) => bytes, + Err(e) => { + if e.kind() == lightning::io::ErrorKind::NotFound { + return Ok(None); + } else { + log_error!( + logger, + "Reading data from key {}/{}/{} failed due to: {}", + $primary_namespace, + $secondary_namespace, + $key, + e + ); + return Err(e.into()); + } + }, + }; + + let mut reader = Cursor::new(bytes); + let res: Result, DecodeError> = + Readable::read(&mut reader); + match res { + Ok(res) => Ok(Some(res.0)), + Err(e) => { + log_error!(logger, "Failed to deserialize BDK wallet field: {}", e); + Err(std::io::Error::new( + std::io::ErrorKind::InvalidData, + "Failed to deserialize BDK wallet field", + )) + }, + } + } + + pub(crate) fn $write_name( + value: &$change_set_type, kv_store: Arc, logger: L, + ) -> Result<(), std::io::Error> + where + L::Target: Logger, + { + let data = ChangeSetSerWrapper(value).encode(); + kv_store.write($primary_namespace, $secondary_namespace, $key, &data).map_err(|e| { + log_error!( + logger, + "Writing data to key {}/{}/{} failed due to: {}", + $primary_namespace, + $secondary_namespace, + $key, + e + ); + e.into() + }) + } + }; +} + +impl_read_write_change_set_type!( + read_bdk_wallet_descriptor, + write_bdk_wallet_descriptor, + Descriptor, + BDK_WALLET_DESCRIPTOR_PRIMARY_NAMESPACE, + BDK_WALLET_DESCRIPTOR_SECONDARY_NAMESPACE, + BDK_WALLET_DESCRIPTOR_KEY +); + +impl_read_write_change_set_type!( + read_bdk_wallet_change_descriptor, + write_bdk_wallet_change_descriptor, + Descriptor, + BDK_WALLET_CHANGE_DESCRIPTOR_PRIMARY_NAMESPACE, + BDK_WALLET_CHANGE_DESCRIPTOR_SECONDARY_NAMESPACE, + BDK_WALLET_CHANGE_DESCRIPTOR_KEY +); + +impl_read_write_change_set_type!( + read_bdk_wallet_network, + write_bdk_wallet_network, + Network, + BDK_WALLET_NETWORK_PRIMARY_NAMESPACE, + BDK_WALLET_NETWORK_SECONDARY_NAMESPACE, + BDK_WALLET_NETWORK_KEY +); + +impl_read_write_change_set_type!( + read_bdk_wallet_local_chain, + write_bdk_wallet_local_chain, + BdkLocalChainChangeSet, + BDK_WALLET_LOCAL_CHAIN_PRIMARY_NAMESPACE, + BDK_WALLET_LOCAL_CHAIN_SECONDARY_NAMESPACE, + BDK_WALLET_LOCAL_CHAIN_KEY +); + +impl_read_write_change_set_type!( + read_bdk_wallet_tx_graph, + write_bdk_wallet_tx_graph, + BdkTxGraphChangeSet, + BDK_WALLET_TX_GRAPH_PRIMARY_NAMESPACE, + BDK_WALLET_TX_GRAPH_SECONDARY_NAMESPACE, + BDK_WALLET_TX_GRAPH_KEY +); + +impl_read_write_change_set_type!( + read_bdk_wallet_indexer, + write_bdk_wallet_indexer, + BdkIndexerChangeSet, + BDK_WALLET_INDEXER_PRIMARY_NAMESPACE, + BDK_WALLET_INDEXER_SECONDARY_NAMESPACE, + BDK_WALLET_INDEXER_KEY +); + +// Reads the full BdkWalletChangeSet or returns default fields +pub(crate) fn read_bdk_wallet_change_set( + kv_store: Arc, logger: Arc, +) -> Result, std::io::Error> { + let mut change_set = BdkWalletChangeSet::default(); + + // We require a descriptor and return `None` to signal creation of a new wallet otherwise. + if let Some(descriptor) = + read_bdk_wallet_descriptor(Arc::clone(&kv_store), Arc::clone(&logger))? + { + change_set.descriptor = Some(descriptor); + } else { + return Ok(None); + } + + // We require a change_descriptor and return `None` to signal creation of a new wallet otherwise. + if let Some(change_descriptor) = + read_bdk_wallet_change_descriptor(Arc::clone(&kv_store), Arc::clone(&logger))? + { + change_set.change_descriptor = Some(change_descriptor); + } else { + return Ok(None); + } + + // We require a network and return `None` to signal creation of a new wallet otherwise. + if let Some(network) = read_bdk_wallet_network(Arc::clone(&kv_store), Arc::clone(&logger))? { + change_set.network = Some(network); + } else { + return Ok(None); + } + + read_bdk_wallet_local_chain(Arc::clone(&kv_store), Arc::clone(&logger))? + .map(|local_chain| change_set.local_chain = local_chain); + read_bdk_wallet_tx_graph(Arc::clone(&kv_store), Arc::clone(&logger))? + .map(|tx_graph| change_set.tx_graph = tx_graph); + read_bdk_wallet_indexer(Arc::clone(&kv_store), Arc::clone(&logger))? + .map(|indexer| change_set.indexer = indexer); + Ok(Some(change_set)) +} + #[cfg(test)] mod tests { use super::*; diff --git a/src/io/vss_store.rs b/src/io/vss_store.rs index ba09b5988..474f7dbc7 100644 --- a/src/io/vss_store.rs +++ b/src/io/vss_store.rs @@ -5,9 +5,7 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. -use io::Error; -use std::io; -use std::io::ErrorKind; +use lightning::io::{self, Error, ErrorKind}; #[cfg(test)] use std::panic::RefUnwindSafe; use std::time::Duration; @@ -139,7 +137,14 @@ impl KVStore for VssStore { })?; // unwrap safety: resp.value must be always present for a non-erroneous VSS response, otherwise // it is an API-violation which is converted to [`VssError::InternalServerError`] in [`VssClient`] - let storable = Storable::decode(&resp.value.unwrap().value[..])?; + let storable = Storable::decode(&resp.value.unwrap().value[..]).map_err(|e| { + let msg = format!( + "Failed to decode data read from key {}/{}/{}: {}", + primary_namespace, secondary_namespace, key, e + ); + Error::new(ErrorKind::Other, msg) + })?; + Ok(self.storable_builder.deconstruct(storable)?.0) } diff --git a/src/lib.rs b/src/lib.rs index 914dec4b1..4a7d081c5 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -138,7 +138,7 @@ use payment::{ use peer_store::{PeerInfo, PeerStore}; use types::{ Broadcaster, BumpTransactionEventHandler, ChainMonitor, ChannelManager, DynStore, FeeEstimator, - Graph, KeysManager, PeerManager, Router, Scorer, Sweeper, Wallet, + Graph, KeysManager, OnionMessenger, PeerManager, Router, Scorer, Sweeper, Wallet, }; pub use types::{ChannelDetails, PeerDetails, UserChannelId}; @@ -146,7 +146,8 @@ use logger::{log_error, log_info, log_trace, FilesystemLogger, Logger}; use lightning::chain::{BestBlock, Confirm}; use lightning::events::bump_transaction::Wallet as LdkWallet; -use lightning::ln::channelmanager::{ChannelShutdownState, PaymentId}; +use lightning::ln::channel_state::ChannelShutdownState; +use lightning::ln::channelmanager::PaymentId; use lightning::ln::msgs::SocketAddress; use lightning::routing::gossip::NodeAlias; @@ -186,6 +187,7 @@ pub struct Node { chain_monitor: Arc, output_sweeper: Arc, peer_manager: Arc, + onion_messenger: Arc, connection_manager: Arc>>, keys_manager: Arc, network_graph: Arc, @@ -279,49 +281,44 @@ impl Node { .config .onchain_wallet_sync_interval_secs .max(config::WALLET_SYNC_INTERVAL_MINIMUM_SECS); - std::thread::spawn(move || { - tokio::runtime::Builder::new_current_thread().enable_all().build().unwrap().block_on( - async move { - let mut onchain_wallet_sync_interval = tokio::time::interval( - Duration::from_secs(onchain_wallet_sync_interval_secs), - ); - onchain_wallet_sync_interval - .set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); - loop { - tokio::select! { - _ = stop_sync.changed() => { + runtime.spawn(async move { + let mut onchain_wallet_sync_interval = + tokio::time::interval(Duration::from_secs(onchain_wallet_sync_interval_secs)); + onchain_wallet_sync_interval + .set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); + loop { + tokio::select! { + _ = stop_sync.changed() => { + log_trace!( + sync_logger, + "Stopping background syncing on-chain wallet.", + ); + return; + } + _ = onchain_wallet_sync_interval.tick() => { + let now = Instant::now(); + match wallet.sync().await { + Ok(()) => { log_trace!( sync_logger, - "Stopping background syncing on-chain wallet.", - ); - return; + "Background sync of on-chain wallet finished in {}ms.", + now.elapsed().as_millis() + ); + let unix_time_secs_opt = + SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); + *sync_onchain_wallet_timestamp.write().unwrap() = unix_time_secs_opt; } - _ = onchain_wallet_sync_interval.tick() => { - let now = Instant::now(); - match wallet.sync().await { - Ok(()) => { - log_trace!( - sync_logger, - "Background sync of on-chain wallet finished in {}ms.", - now.elapsed().as_millis() - ); - let unix_time_secs_opt = - SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - *sync_onchain_wallet_timestamp.write().unwrap() = unix_time_secs_opt; - } - Err(err) => { - log_error!( - sync_logger, - "Background sync of on-chain wallet failed: {}", - err - ) - } - } + Err(err) => { + log_error!( + sync_logger, + "Background sync of on-chain wallet failed: {}", + err + ) } } } - }, - ); + } + } }); let mut stop_fee_updates = self.stop_sender.subscribe(); @@ -636,7 +633,7 @@ impl Node { continue; } - if !bcast_cm.list_channels().iter().any(|chan| chan.is_public && chan.is_channel_ready) { + if !bcast_cm.list_channels().iter().any(|chan| chan.is_announced && chan.is_channel_ready) { // Skip if we don't have any public channels that are ready. continue; } @@ -730,6 +727,7 @@ impl Node { let background_chan_man = Arc::clone(&self.channel_manager); let background_gossip_sync = self.gossip_source.as_gossip_sync(); let background_peer_man = Arc::clone(&self.peer_manager); + let background_onion_messenger = Arc::clone(&self.onion_messenger); let background_logger = Arc::clone(&self.logger); let background_error_logger = Arc::clone(&self.logger); let background_scorer = Arc::clone(&self.scorer); @@ -762,6 +760,7 @@ impl Node { |e| background_event_handler.handle_event(e), background_chain_mon, background_chan_man, + Some(background_onion_messenger), background_gossip_sync, background_peer_man, background_logger, @@ -1188,7 +1187,7 @@ impl Node { fn open_channel_inner( &self, node_id: PublicKey, address: SocketAddress, channel_amount_sats: u64, push_to_counterparty_msat: Option, channel_config: Option, - announce_channel: bool, + announce_for_forwarding: bool, ) -> Result { let rt_lock = self.runtime.read().unwrap(); if rt_lock.is_none() { @@ -1250,12 +1249,12 @@ impl Node { } let mut user_config = default_user_config(&self.config); - user_config.channel_handshake_config.announced_channel = announce_channel; + user_config.channel_handshake_config.announce_for_forwarding = announce_for_forwarding; user_config.channel_config = (channel_config.unwrap_or_default()).clone().into(); // We set the max inflight to 100% for private channels. // FIXME: LDK will default to this behavior soon, too, at which point we should drop this // manual override. - if !announce_channel { + if !announce_for_forwarding { user_config .channel_handshake_config .max_inbound_htlc_value_in_flight_percent_of_channel = 100; @@ -1484,7 +1483,7 @@ impl Node { pub fn close_channel( &self, user_channel_id: &UserChannelId, counterparty_node_id: PublicKey, ) -> Result<(), Error> { - self.close_channel_internal(user_channel_id, counterparty_node_id, false) + self.close_channel_internal(user_channel_id, counterparty_node_id, false, None) } /// Force-close a previously opened channel. @@ -1500,13 +1499,19 @@ impl Node { /// for more information). pub fn force_close_channel( &self, user_channel_id: &UserChannelId, counterparty_node_id: PublicKey, + reason: Option, ) -> Result<(), Error> { - self.close_channel_internal(user_channel_id, counterparty_node_id, true) + self.close_channel_internal(user_channel_id, counterparty_node_id, true, reason) } fn close_channel_internal( &self, user_channel_id: &UserChannelId, counterparty_node_id: PublicKey, force: bool, + force_close_reason: Option, ) -> Result<(), Error> { + debug_assert!( + force_close_reason.is_none() || force, + "Reason can only be set for force closures" + ); let open_channels = self.channel_manager.list_channels_with_counterparty(&counterparty_node_id); if let Some(channel_details) = @@ -1520,6 +1525,7 @@ impl Node { .force_close_without_broadcasting_txn( &channel_details.channel_id, &counterparty_node_id, + force_close_reason.unwrap_or_default(), ) .map_err(|e| { log_error!( @@ -1534,6 +1540,7 @@ impl Node { .force_close_broadcasting_latest_txn( &channel_details.channel_id, &counterparty_node_id, + force_close_reason.unwrap_or_default(), ) .map_err(|e| { log_error!(self.logger, "Failed to force-close channel: {:?}", e); @@ -1727,7 +1734,7 @@ impl Node { /// can be sure that the signature was generated by the caller. /// Signatures are EC recoverable, meaning that given the message and the /// signature the `PublicKey` of the signer can be extracted. - pub fn sign_message(&self, msg: &[u8]) -> Result { + pub fn sign_message(&self, msg: &[u8]) -> String { self.keys_manager.sign_message(msg) } diff --git a/src/logger.rs b/src/logger.rs index 2be20a165..19df24367 100644 --- a/src/logger.rs +++ b/src/logger.rs @@ -9,11 +9,11 @@ pub(crate) use lightning::util::logger::Logger; pub(crate) use lightning::{log_bytes, log_debug, log_error, log_info, log_trace}; use lightning::util::logger::{Level, Record}; -use lightning::util::ser::Writer; use chrono::Utc; use std::fs; +use std::io::Write; #[cfg(not(target_os = "windows"))] use std::os::unix::fs::symlink; use std::path::Path; diff --git a/src/message_handler.rs b/src/message_handler.rs index 18dfa8637..38999512e 100644 --- a/src/message_handler.rs +++ b/src/message_handler.rs @@ -99,4 +99,24 @@ where }, } } + + fn peer_connected( + &self, their_node_id: &PublicKey, msg: &lightning::ln::msgs::Init, inbound: bool, + ) -> Result<(), ()> { + match self { + Self::Ignoring => Ok(()), + Self::Liquidity { liquidity_source, .. } => { + liquidity_source.liquidity_manager().peer_connected(their_node_id, msg, inbound) + }, + } + } + + fn peer_disconnected(&self, their_node_id: &PublicKey) { + match self { + Self::Ignoring => {}, + Self::Liquidity { liquidity_source, .. } => { + liquidity_source.liquidity_manager().peer_disconnected(their_node_id) + }, + } + } } diff --git a/src/payment/bolt11.rs b/src/payment/bolt11.rs index b7f72355b..708c127bd 100644 --- a/src/payment/bolt11.rs +++ b/src/payment/bolt11.rs @@ -23,10 +23,15 @@ use crate::peer_store::{PeerInfo, PeerStore}; use crate::types::{ChannelManager, KeysManager}; use lightning::ln::channelmanager::{PaymentId, RecipientOnionFields, Retry, RetryableSendFailure}; +use lightning::ln::invoice_utils::{ + create_invoice_from_channelmanager_and_duration_since_epoch, + create_invoice_from_channelmanager_and_duration_since_epoch_with_payment_hash, +}; use lightning::ln::{PaymentHash, PaymentPreimage}; use lightning::routing::router::{PaymentParameters, RouteParameters}; -use lightning_invoice::{payment, Bolt11Invoice, Currency}; +use lightning::ln::bolt11_payment; +use lightning_invoice::{Bolt11Invoice, Currency}; use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::hashes::Hash; @@ -88,7 +93,7 @@ impl Bolt11Payment { return Err(Error::NotRunning); } - let (payment_hash, recipient_onion, mut route_params) = payment::payment_parameters_from_invoice(&invoice).map_err(|_| { + let (payment_hash, recipient_onion, mut route_params) = bolt11_payment::payment_parameters_from_invoice(&invoice).map_err(|_| { log_error!(self.logger, "Failed to send payment due to the given invoice being \"zero-amount\". Please use send_using_amount instead."); Error::InvalidInvoice })?; @@ -471,7 +476,7 @@ impl Bolt11Payment { let invoice = { let invoice_res = if let Some(payment_hash) = manual_claim_payment_hash { - lightning_invoice::utils::create_invoice_from_channelmanager_and_duration_since_epoch_with_payment_hash( + create_invoice_from_channelmanager_and_duration_since_epoch_with_payment_hash( &self.channel_manager, keys_manager, Arc::clone(&self.logger), @@ -484,7 +489,7 @@ impl Bolt11Payment { None, ) } else { - lightning_invoice::utils::create_invoice_from_channelmanager_and_duration_since_epoch( + create_invoice_from_channelmanager_and_duration_since_epoch( &self.channel_manager, keys_manager, Arc::clone(&self.logger), @@ -696,7 +701,7 @@ impl Bolt11Payment { return Err(Error::NotRunning); } - let (_payment_hash, _recipient_onion, route_params) = payment::payment_parameters_from_invoice(&invoice).map_err(|_| { + let (_payment_hash, _recipient_onion, route_params) = bolt11_payment::payment_parameters_from_invoice(&invoice).map_err(|_| { log_error!(self.logger, "Failed to send probes due to the given invoice being \"zero-amount\". Please use send_probes_using_amount instead."); Error::InvalidInvoice })?; @@ -738,12 +743,12 @@ impl Bolt11Payment { return Err(Error::InvalidAmount); } - payment::payment_parameters_from_invoice(&invoice).map_err(|_| { + bolt11_payment::payment_parameters_from_invoice(&invoice).map_err(|_| { log_error!(self.logger, "Failed to send probes due to the given invoice unexpectedly being \"zero-amount\"."); Error::InvalidInvoice })? } else { - payment::payment_parameters_from_zero_amount_invoice(&invoice, amount_msat).map_err(|_| { + bolt11_payment::payment_parameters_from_zero_amount_invoice(&invoice, amount_msat).map_err(|_| { log_error!(self.logger, "Failed to send probes due to the given invoice unexpectedly being not \"zero-amount\"."); Error::InvalidInvoice })? diff --git a/src/payment/bolt12.rs b/src/payment/bolt12.rs index 9ec7bde34..90024b7d3 100644 --- a/src/payment/bolt12.rs +++ b/src/payment/bolt12.rs @@ -112,7 +112,7 @@ impl Bolt12Payment { let payment = PaymentDetails::new( payment_id, kind, - Some(*offer_amount_msat), + Some(offer_amount_msat), PaymentDirection::Outbound, PaymentStatus::Pending, ); @@ -136,7 +136,7 @@ impl Bolt12Payment { let payment = PaymentDetails::new( payment_id, kind, - Some(*offer_amount_msat), + Some(offer_amount_msat), PaymentDirection::Outbound, PaymentStatus::Failed, ); @@ -172,7 +172,7 @@ impl Bolt12Payment { let max_total_routing_fee_msat = None; let offer_amount_msat = match offer.amount() { - Some(Amount::Bitcoin { amount_msats }) => *amount_msats, + Some(Amount::Bitcoin { amount_msats }) => amount_msats, Some(_) => { log_error!(self.logger, "Failed to send payment as the provided offer was denominated in an unsupported currency."); return Err(Error::UnsupportedCurrency); @@ -255,12 +255,19 @@ impl Bolt12Payment { /// Returns a payable offer that can be used to request and receive a payment of the amount /// given. pub fn receive( - &self, amount_msat: u64, description: &str, quantity: Option, + &self, amount_msat: u64, description: &str, expiry_secs: Option, quantity: Option, ) -> Result { - let offer_builder = self.channel_manager.create_offer_builder().map_err(|e| { - log_error!(self.logger, "Failed to create offer builder: {:?}", e); - Error::OfferCreationFailed - })?; + let absolute_expiry = expiry_secs.map(|secs| { + (SystemTime::now() + Duration::from_secs(secs as u64)) + .duration_since(UNIX_EPOCH) + .unwrap() + }); + + let offer_builder = + self.channel_manager.create_offer_builder(absolute_expiry).map_err(|e| { + log_error!(self.logger, "Failed to create offer builder: {:?}", e); + Error::OfferCreationFailed + })?; let mut offer = offer_builder.amount_msats(amount_msat).description(description.to_string()); @@ -284,11 +291,20 @@ impl Bolt12Payment { /// Returns a payable offer that can be used to request and receive a payment for which the /// amount is to be determined by the user, also known as a "zero-amount" offer. - pub fn receive_variable_amount(&self, description: &str) -> Result { - let offer_builder = self.channel_manager.create_offer_builder().map_err(|e| { - log_error!(self.logger, "Failed to create offer builder: {:?}", e); - Error::OfferCreationFailed - })?; + pub fn receive_variable_amount( + &self, description: &str, expiry_secs: Option, + ) -> Result { + let absolute_expiry = expiry_secs.map(|secs| { + (SystemTime::now() + Duration::from_secs(secs as u64)) + .duration_since(UNIX_EPOCH) + .unwrap() + }); + + let offer_builder = + self.channel_manager.create_offer_builder(absolute_expiry).map_err(|e| { + log_error!(self.logger, "Failed to create offer builder: {:?}", e); + Error::OfferCreationFailed + })?; let offer = offer_builder.description(description.to_string()).build().map_err(|e| { log_error!(self.logger, "Failed to create offer: {:?}", e); Error::OfferCreationFailed @@ -340,7 +356,7 @@ impl Bolt12Payment { rand::thread_rng().fill_bytes(&mut random_bytes); let payment_id = PaymentId(random_bytes); - let expiration = (SystemTime::now() + Duration::from_secs(expiry_secs as u64)) + let absolute_expiry = (SystemTime::now() + Duration::from_secs(expiry_secs as u64)) .duration_since(UNIX_EPOCH) .unwrap(); let retry_strategy = Retry::Timeout(LDK_PAYMENT_RETRY_TIMEOUT); @@ -350,7 +366,7 @@ impl Bolt12Payment { .channel_manager .create_refund_builder( amount_msat, - expiration, + absolute_expiry, payment_id, retry_strategy, max_total_routing_fee_msat, diff --git a/src/payment/onchain.rs b/src/payment/onchain.rs index a3cc0d2f2..b43765a97 100644 --- a/src/payment/onchain.rs +++ b/src/payment/onchain.rs @@ -12,7 +12,7 @@ use crate::error::Error; use crate::logger::{log_error, log_info, FilesystemLogger, Logger}; use crate::types::{ChannelManager, Wallet}; -use bitcoin::{Address, Txid}; +use bitcoin::{Address, Amount, Txid}; use std::sync::{Arc, RwLock}; @@ -70,7 +70,9 @@ impl OnchainPayment { ); return Err(Error::InsufficientFunds); } - self.wallet.send_to_address(address, Some(amount_sats)) + + let amount = Amount::from_sat(amount_sats); + self.wallet.send_to_address(address, Some(amount)) } /// Send an on-chain payment to the given address, draining all the available funds. diff --git a/src/payment/store.rs b/src/payment/store.rs index 0cea18002..ee82544dc 100644 --- a/src/payment/store.rs +++ b/src/payment/store.rs @@ -150,7 +150,7 @@ pub enum PaymentDirection { impl_writeable_tlv_based_enum!(PaymentDirection, (0, Inbound) => {}, - (1, Outbound) => {}; + (1, Outbound) => {} ); /// Represents the current status of a payment. @@ -167,7 +167,7 @@ pub enum PaymentStatus { impl_writeable_tlv_based_enum!(PaymentStatus, (0, Pending) => {}, (2, Succeeded) => {}, - (4, Failed) => {}; + (4, Failed) => {} ); /// Represents the kind of a payment. @@ -293,7 +293,7 @@ impl_writeable_tlv_based_enum!(PaymentKind, (2, preimage, option), (3, quantity, option), (4, secret, option), - }; + } ); /// Limits applying to how much fee we allow an LSP to deduct from the payment amount. @@ -499,11 +499,11 @@ where #[cfg(test)] mod tests { use super::*; + use bitcoin::io::Cursor; use lightning::util::{ ser::Readable, test_utils::{TestLogger, TestStore}, }; - use std::io::Cursor; use std::sync::Arc; /// We refactored `PaymentDetails` to hold a payment id and moved some required fields into diff --git a/src/payment/unified_qr.rs b/src/payment/unified_qr.rs index 66488e232..88d372456 100644 --- a/src/payment/unified_qr.rs +++ b/src/payment/unified_qr.rs @@ -90,7 +90,8 @@ impl UnifiedQrPayment { let amount_msats = amount_sats * 1_000; - let bolt12_offer = match self.bolt12_payment.receive(amount_msats, description, None) { + let bolt12_offer = match self.bolt12_payment.receive(amount_msats, description, None, None) + { Ok(offer) => Some(offer), Err(e) => { log_error!(self.logger, "Failed to create offer: {}", e); diff --git a/src/sweep.rs b/src/sweep.rs index 5c1d62a20..ba10869b8 100644 --- a/src/sweep.rs +++ b/src/sweep.rs @@ -10,10 +10,10 @@ //! once sufficient time has passed for us to be confident any users completed the migration. use lightning::impl_writeable_tlv_based; -use lightning::ln::ChannelId; +use lightning::ln::types::ChannelId; use lightning::sign::SpendableOutputDescriptor; -use bitcoin::{BlockHash, Transaction}; +use bitcoin::{Amount, BlockHash, Transaction}; #[derive(Clone, Debug, PartialEq, Eq)] pub(crate) struct DeprecatedSpendableOutputInfo { @@ -38,7 +38,7 @@ impl_writeable_tlv_based!(DeprecatedSpendableOutputInfo, { (14, confirmation_hash, option), }); -pub(crate) fn value_satoshis_from_descriptor(descriptor: &SpendableOutputDescriptor) -> u64 { +pub(crate) fn value_from_descriptor(descriptor: &SpendableOutputDescriptor) -> Amount { match &descriptor { SpendableOutputDescriptor::StaticOutput { output, .. } => output.value, SpendableOutputDescriptor::DelayedPaymentOutput(output) => output.output.value, diff --git a/src/tx_broadcaster.rs b/src/tx_broadcaster.rs index 88415ba46..37bd616dc 100644 --- a/src/tx_broadcaster.rs +++ b/src/tx_broadcaster.rs @@ -47,6 +47,7 @@ where let mut receiver = self.queue_receiver.lock().await; while let Some(next_package) = receiver.recv().await { for tx in &next_package { + let txid = tx.compute_txid(); let timeout_fut = tokio::time::timeout( Duration::from_secs(TX_BROADCAST_TIMEOUT_SECS), self.esplora_client.broadcast(tx), @@ -54,11 +55,7 @@ where match timeout_fut.await { Ok(res) => match res { Ok(()) => { - log_trace!( - self.logger, - "Successfully broadcast transaction {}", - tx.txid() - ); + log_trace!(self.logger, "Successfully broadcast transaction {}", txid); }, Err(e) => match e { esplora_client::Error::Reqwest(err) => { @@ -85,7 +82,7 @@ where log_error!( self.logger, "Failed to broadcast transaction {}: {}", - tx.txid(), + txid, e ); log_trace!( @@ -100,7 +97,7 @@ where log_error!( self.logger, "Failed to broadcast transaction due to timeout {}: {}", - tx.txid(), + txid, e ); log_trace!( diff --git a/src/types.rs b/src/types.rs index 591b73b4d..5005d93a6 100644 --- a/src/types.rs +++ b/src/types.rs @@ -9,11 +9,11 @@ use crate::logger::FilesystemLogger; use crate::message_handler::NodeCustomMessageHandler; use lightning::chain::chainmonitor; -use lightning::ln::channelmanager::ChannelDetails as LdkChannelDetails; +use lightning::ln::channel_state::ChannelDetails as LdkChannelDetails; use lightning::ln::msgs::RoutingMessageHandler; use lightning::ln::msgs::SocketAddress; use lightning::ln::peer_handler::IgnoringMessageHandler; -use lightning::ln::ChannelId; +use lightning::ln::types::ChannelId; use lightning::routing::gossip; use lightning::routing::router::DefaultRouter; use lightning::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters}; @@ -72,19 +72,11 @@ pub(crate) type Broadcaster = crate::tx_broadcaster::TransactionBroadcaster>; -pub(crate) type Wallet = crate::wallet::Wallet< - bdk::database::SqliteDatabase, - Arc, - Arc, - Arc, ->; +pub(crate) type Wallet = + crate::wallet::Wallet, Arc, Arc>; -pub(crate) type KeysManager = crate::wallet::WalletKeysManager< - bdk::database::SqliteDatabase, - Arc, - Arc, - Arc, ->; +pub(crate) type KeysManager = + crate::wallet::WalletKeysManager, Arc, Arc>; pub(crate) type Router = DefaultRouter< Arc, @@ -121,6 +113,7 @@ pub(crate) type OnionMessenger = lightning::onion_message::messenger::OnionMesse Arc, Arc, IgnoringMessageHandler, + IgnoringMessageHandler, >; pub(crate) type MessageRouter = lightning::onion_message::messenger::DefaultMessageRouter< @@ -234,7 +227,7 @@ pub struct ChannelDetails { /// This is a strict superset of `is_channel_ready`. pub is_usable: bool, /// Returns `true` if this channel is (or will be) publicly-announced - pub is_public: bool, + pub is_announced: bool, /// The difference in the CLTV value between incoming HTLCs and an outbound HTLC forwarded over /// the channel. pub cltv_expiry_delta: Option, @@ -308,7 +301,7 @@ impl From for ChannelDetails { is_outbound: value.is_outbound, is_channel_ready: value.is_channel_ready, is_usable: value.is_usable, - is_public: value.is_public, + is_announced: value.is_announced, cltv_expiry_delta: value.config.map(|c| c.cltv_expiry_delta), counterparty_unspendable_punishment_reserve: value .counterparty diff --git a/src/uniffi_types.rs b/src/uniffi_types.rs index 2a6ac8da3..a66bcddea 100644 --- a/src/uniffi_types.rs +++ b/src/uniffi_types.rs @@ -14,8 +14,9 @@ pub use crate::graph::{ChannelInfo, ChannelUpdateInfo, NodeAnnouncementInfo, Nod pub use crate::payment::store::{LSPFeeLimits, PaymentDirection, PaymentKind, PaymentStatus}; pub use crate::payment::{MaxTotalRoutingFeeLimit, QrPaymentResult, SendingParameters}; +pub use lightning::chain::channelmonitor::BalanceSource; pub use lightning::events::{ClosureReason, PaymentFailureReason}; -pub use lightning::ln::{ChannelId, PaymentHash, PaymentPreimage, PaymentSecret}; +pub use lightning::ln::types::{ChannelId, PaymentHash, PaymentPreimage, PaymentSecret}; pub use lightning::offers::invoice::Bolt12Invoice; pub use lightning::offers::offer::{Offer, OfferId}; pub use lightning::offers::refund::Refund; diff --git a/src/wallet/mod.rs b/src/wallet/mod.rs index 6da08715c..b1c053f66 100644 --- a/src/wallet/mod.rs +++ b/src/wallet/mod.rs @@ -5,9 +5,11 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. +use persist::KVStoreWalletPersister; + use crate::logger::{log_error, log_info, log_trace, Logger}; -use crate::config::BDK_WALLET_SYNC_TIMEOUT_SECS; +use crate::config::{BDK_CLIENT_CONCURRENCY, BDK_CLIENT_STOP_GAP, BDK_WALLET_SYNC_TIMEOUT_SECS}; use crate::fee_estimator::{ConfirmationTarget, FeeEstimator}; use crate::Error; @@ -22,77 +24,70 @@ use lightning::sign::{ }; use lightning::util::message_signing; +use lightning_invoice::RawBolt11Invoice; -use bdk::blockchain::EsploraBlockchain; -use bdk::database::BatchDatabase; -use bdk::wallet::AddressIndex; -use bdk::{Balance, SignOptions, SyncOptions}; +use bdk_chain::ChainPosition; +use bdk_esplora::EsploraAsyncExt; +use bdk_wallet::{KeychainKind, PersistedWallet, SignOptions}; -use bitcoin::address::{Payload, WitnessVersion}; -use bitcoin::bech32::u5; use bitcoin::blockdata::constants::WITNESS_SCALE_FACTOR; use bitcoin::blockdata::locktime::absolute::LockTime; -use bitcoin::hash_types::WPubkeyHash; use bitcoin::hashes::Hash; use bitcoin::key::XOnlyPublicKey; -use bitcoin::psbt::PartiallySignedTransaction; +use bitcoin::psbt::Psbt; use bitcoin::secp256k1::ecdh::SharedSecret; use bitcoin::secp256k1::ecdsa::{RecoverableSignature, Signature}; use bitcoin::secp256k1::{PublicKey, Scalar, Secp256k1, SecretKey, Signing}; -use bitcoin::{ScriptBuf, Transaction, TxOut, Txid}; +use bitcoin::{ + Amount, ScriptBuf, Transaction, TxOut, Txid, WPubkeyHash, WitnessProgram, WitnessVersion, +}; + +use esplora_client::AsyncClient as EsploraAsyncClient; use std::ops::{Deref, DerefMut}; -use std::sync::{Arc, Mutex, RwLock}; +use std::sync::{Arc, Mutex}; use std::time::Duration; +pub(crate) mod persist; +pub(crate) mod ser; + enum WalletSyncStatus { Completed, InProgress { subscribers: tokio::sync::broadcast::Sender> }, } -pub(crate) struct Wallet +pub(crate) struct Wallet where - D: BatchDatabase, B::Target: BroadcasterInterface, E::Target: FeeEstimator, L::Target: Logger, { - // A BDK blockchain used for wallet sync. - blockchain: EsploraBlockchain, // A BDK on-chain wallet. - inner: Mutex>, - // A cache storing the most recently retrieved fee rate estimations. + inner: Mutex>, + persister: Mutex, + esplora_client: EsploraAsyncClient, broadcaster: B, fee_estimator: E, // A Mutex holding the current sync status. sync_status: Mutex, - // TODO: Drop this workaround after BDK 1.0 upgrade. - balance_cache: RwLock, logger: L, } -impl Wallet +impl Wallet where - D: BatchDatabase, B::Target: BroadcasterInterface, E::Target: FeeEstimator, L::Target: Logger, { pub(crate) fn new( - blockchain: EsploraBlockchain, wallet: bdk::Wallet, broadcaster: B, fee_estimator: E, - logger: L, + wallet: bdk_wallet::PersistedWallet, + wallet_persister: KVStoreWalletPersister, esplora_client: EsploraAsyncClient, + broadcaster: B, fee_estimator: E, logger: L, ) -> Self { - let start_balance = wallet.get_balance().unwrap_or(Balance { - immature: 0, - trusted_pending: 0, - untrusted_pending: 0, - confirmed: 0, - }); - let inner = Mutex::new(wallet); + let persister = Mutex::new(wallet_persister); let sync_status = Mutex::new(WalletSyncStatus::Completed); - let balance_cache = RwLock::new(start_balance); - Self { blockchain, inner, broadcaster, fee_estimator, sync_status, balance_cache, logger } + Self { inner, persister, esplora_client, broadcaster, fee_estimator, sync_status, logger } } pub(crate) async fn sync(&self) -> Result<(), Error> { @@ -106,41 +101,53 @@ where } let res = { - let wallet_lock = self.inner.lock().unwrap(); + let full_scan_request = self.inner.lock().unwrap().start_full_scan().build(); let wallet_sync_timeout_fut = tokio::time::timeout( Duration::from_secs(BDK_WALLET_SYNC_TIMEOUT_SECS), - wallet_lock.sync(&self.blockchain, SyncOptions { progress: None }), + self.esplora_client.full_scan( + full_scan_request, + BDK_CLIENT_STOP_GAP, + BDK_CLIENT_CONCURRENCY, + ), ); match wallet_sync_timeout_fut.await { Ok(res) => match res { - Ok(()) => { - // TODO: Drop this workaround after BDK 1.0 upgrade. - // Update balance cache after syncing. - if let Ok(balance) = wallet_lock.get_balance() { - *self.balance_cache.write().unwrap() = balance; - } - Ok(()) - }, - Err(e) => match e { - bdk::Error::Esplora(ref be) => match **be { - bdk::blockchain::esplora::EsploraError::Reqwest(_) => { + Ok(update) => { + let mut locked_wallet = self.inner.lock().unwrap(); + match locked_wallet.apply_update(update) { + Ok(()) => { + let mut locked_persister = self.persister.lock().unwrap(); + locked_wallet.persist(&mut locked_persister).map_err(|e| { + log_error!(self.logger, "Failed to persist wallet: {}", e); + Error::PersistenceFailed + })?; + + Ok(()) + }, + Err(e) => { log_error!( self.logger, - "Sync failed due to HTTP connection error: {}", + "Sync failed due to chain connection error: {}", e ); - Err(From::from(e)) - }, - _ => { - log_error!(self.logger, "Sync failed due to Esplora error: {}", e); - Err(From::from(e)) + Err(Error::WalletOperationFailed) }, + } + }, + Err(e) => match *e { + esplora_client::Error::Reqwest(he) => { + log_error!( + self.logger, + "Sync failed due to HTTP connection error: {}", + he + ); + Err(Error::WalletOperationFailed) }, _ => { - log_error!(self.logger, "Wallet sync error: {}", e); - Err(From::from(e)) + log_error!(self.logger, "Sync failed due to Esplora error: {}", e); + Err(Error::WalletOperationFailed) }, }, }, @@ -157,22 +164,22 @@ where } pub(crate) fn create_funding_transaction( - &self, output_script: ScriptBuf, value_sats: u64, confirmation_target: ConfirmationTarget, + &self, output_script: ScriptBuf, amount: Amount, confirmation_target: ConfirmationTarget, locktime: LockTime, ) -> Result { let fee_rate = self.fee_estimator.estimate_fee_rate(confirmation_target); - let locked_wallet = self.inner.lock().unwrap(); + let mut locked_wallet = self.inner.lock().unwrap(); let mut tx_builder = locked_wallet.build_tx(); tx_builder - .add_recipient(output_script, value_sats) + .add_recipient(output_script, amount) .fee_rate(fee_rate) .nlocktime(locktime) .enable_rbf(); let mut psbt = match tx_builder.finish() { - Ok((psbt, _)) => { + Ok(psbt) => { log_trace!(self.logger, "Created funding PSBT: {:?}", psbt); psbt }, @@ -194,39 +201,52 @@ where }, } - Ok(psbt.extract_tx()) + let mut locked_persister = self.persister.lock().unwrap(); + locked_wallet.persist(&mut locked_persister).map_err(|e| { + log_error!(self.logger, "Failed to persist wallet: {}", e); + Error::PersistenceFailed + })?; + + let tx = psbt.extract_tx().map_err(|e| { + log_error!(self.logger, "Failed to extract transaction: {}", e); + e + })?; + + Ok(tx) } pub(crate) fn get_new_address(&self) -> Result { - let address_info = self.inner.lock().unwrap().get_address(AddressIndex::New)?; + let mut locked_wallet = self.inner.lock().unwrap(); + let mut locked_persister = self.persister.lock().unwrap(); + + let address_info = locked_wallet.reveal_next_address(KeychainKind::External); + locked_wallet.persist(&mut locked_persister).map_err(|e| { + log_error!(self.logger, "Failed to persist wallet: {}", e); + Error::PersistenceFailed + })?; Ok(address_info.address) } fn get_new_internal_address(&self) -> Result { - let address_info = - self.inner.lock().unwrap().get_internal_address(AddressIndex::LastUnused)?; + let mut locked_wallet = self.inner.lock().unwrap(); + let mut locked_persister = self.persister.lock().unwrap(); + + let address_info = locked_wallet.next_unused_address(KeychainKind::Internal); + locked_wallet.persist(&mut locked_persister).map_err(|e| { + log_error!(self.logger, "Failed to persist wallet: {}", e); + Error::PersistenceFailed + })?; Ok(address_info.address) } pub(crate) fn get_balances( &self, total_anchor_channels_reserve_sats: u64, ) -> Result<(u64, u64), Error> { - // TODO: Drop this workaround after BDK 1.0 upgrade. - // We get the balance and update our cache if we can do so without blocking on the wallet - // Mutex. Otherwise, we return a cached value. - let balance = match self.inner.try_lock() { - Ok(wallet_lock) => { - // Update balance cache if we can. - let balance = wallet_lock.get_balance()?; - *self.balance_cache.write().unwrap() = balance.clone(); - balance - }, - Err(_) => self.balance_cache.read().unwrap().clone(), - }; + let balance = self.inner.lock().unwrap().balance(); let (total, spendable) = ( - balance.get_total(), - balance.get_spendable().saturating_sub(total_anchor_channels_reserve_sats), + balance.total().to_sat(), + balance.trusted_spendable().to_sat().saturating_sub(total_anchor_channels_reserve_sats), ); Ok((total, spendable)) @@ -243,18 +263,18 @@ where /// If `amount_msat_or_drain` is `None` the wallet will be drained, i.e., all available funds will be /// spent. pub(crate) fn send_to_address( - &self, address: &bitcoin::Address, amount_msat_or_drain: Option, + &self, address: &bitcoin::Address, amount_or_drain: Option, ) -> Result { let confirmation_target = ConfirmationTarget::OnchainPayment; let fee_rate = self.fee_estimator.estimate_fee_rate(confirmation_target); let tx = { - let locked_wallet = self.inner.lock().unwrap(); + let mut locked_wallet = self.inner.lock().unwrap(); let mut tx_builder = locked_wallet.build_tx(); - if let Some(amount_sats) = amount_msat_or_drain { + if let Some(amount) = amount_or_drain { tx_builder - .add_recipient(address.script_pubkey(), amount_sats) + .add_recipient(address.script_pubkey(), amount) .fee_rate(fee_rate) .enable_rbf(); } else { @@ -266,7 +286,7 @@ where } let mut psbt = match tx_builder.finish() { - Ok((psbt, _)) => { + Ok(psbt) => { log_trace!(self.logger, "Created PSBT: {:?}", psbt); psbt }, @@ -287,19 +307,29 @@ where return Err(err.into()); }, } - psbt.extract_tx() + + let mut locked_persister = self.persister.lock().unwrap(); + locked_wallet.persist(&mut locked_persister).map_err(|e| { + log_error!(self.logger, "Failed to persist wallet: {}", e); + Error::PersistenceFailed + })?; + + psbt.extract_tx().map_err(|e| { + log_error!(self.logger, "Failed to extract transaction: {}", e); + e + })? }; self.broadcaster.broadcast_transactions(&[&tx]); - let txid = tx.txid(); + let txid = tx.compute_txid(); - if let Some(amount_sats) = amount_msat_or_drain { + if let Some(amount) = amount_or_drain { log_info!( self.logger, "Created new transaction {} sending {}sats on-chain to address {}", txid, - amount_sats, + amount.to_sat(), address ); } else { @@ -368,9 +398,8 @@ where } } -impl WalletSource for Wallet +impl WalletSource for Wallet where - D: BatchDatabase, B::Target: BroadcasterInterface, E::Target: FeeEstimator, L::Target: Logger, @@ -378,67 +407,57 @@ where fn list_confirmed_utxos(&self) -> Result, ()> { let locked_wallet = self.inner.lock().unwrap(); let mut utxos = Vec::new(); - let confirmed_txs: Vec = locked_wallet - .list_transactions(false) - .map_err(|e| { - log_error!(self.logger, "Failed to retrieve transactions from wallet: {}", e); - })? - .into_iter() - .filter(|t| t.confirmation_time.is_some()) + let confirmed_txs: Vec = locked_wallet + .transactions() + .filter(|t| matches!(t.chain_position, ChainPosition::Confirmed(_))) + .map(|t| t.tx_node.txid) .collect(); - let unspent_confirmed_utxos = locked_wallet - .list_unspent() - .map_err(|e| { - log_error!( - self.logger, - "Failed to retrieve unspent transactions from wallet: {}", - e - ); - })? - .into_iter() - .filter(|u| confirmed_txs.iter().find(|t| t.txid == u.outpoint.txid).is_some()); + let unspent_confirmed_utxos = + locked_wallet.list_unspent().filter(|u| confirmed_txs.contains(&u.outpoint.txid)); for u in unspent_confirmed_utxos { - let payload = Payload::from_script(&u.txout.script_pubkey).map_err(|e| { - log_error!(self.logger, "Failed to retrieve script payload: {}", e); - })?; + let script_pubkey = u.txout.script_pubkey; + match script_pubkey.witness_version() { + Some(version @ WitnessVersion::V0) => { + let witness_program = WitnessProgram::new(version, script_pubkey.as_bytes()) + .map_err(|e| { + log_error!(self.logger, "Failed to retrieve script payload: {}", e); + })?; - match payload { - Payload::WitnessProgram(program) => match program.version() { - WitnessVersion::V0 if program.program().len() == 20 => { - let wpkh = - WPubkeyHash::from_slice(program.program().as_bytes()).map_err(|e| { - log_error!(self.logger, "Failed to retrieve script payload: {}", e); - })?; - let utxo = Utxo::new_v0_p2wpkh(u.outpoint, u.txout.value, &wpkh); - utxos.push(utxo); - }, - WitnessVersion::V1 => { - XOnlyPublicKey::from_slice(program.program().as_bytes()).map_err(|e| { + let wpkh = WPubkeyHash::from_slice(&witness_program.program().as_bytes()) + .map_err(|e| { + log_error!(self.logger, "Failed to retrieve script payload: {}", e); + })?; + let utxo = Utxo::new_v0_p2wpkh(u.outpoint, u.txout.value, &wpkh); + utxos.push(utxo); + }, + Some(version @ WitnessVersion::V1) => { + let witness_program = WitnessProgram::new(version, script_pubkey.as_bytes()) + .map_err(|e| { log_error!(self.logger, "Failed to retrieve script payload: {}", e); })?; - let utxo = Utxo { - outpoint: u.outpoint, - output: TxOut { - value: u.txout.value, - script_pubkey: ScriptBuf::new_witness_program(&program), - }, - satisfaction_weight: 1 /* empty script_sig */ * WITNESS_SCALE_FACTOR as u64 + - 1 /* witness items */ + 1 /* schnorr sig len */ + 64, /* schnorr sig */ - }; - utxos.push(utxo); - }, - _ => { - log_error!( - self.logger, - "Unexpected witness version or length. Version: {}, Length: {}", - program.version(), - program.program().len() - ); - }, + XOnlyPublicKey::from_slice(&witness_program.program().as_bytes()).map_err( + |e| { + log_error!(self.logger, "Failed to retrieve script payload: {}", e); + }, + )?; + + let utxo = Utxo { + outpoint: u.outpoint, + output: TxOut { + value: u.txout.value, + script_pubkey: ScriptBuf::new_witness_program(&witness_program), + }, + satisfaction_weight: 1 /* empty script_sig */ * WITNESS_SCALE_FACTOR as u64 + + 1 /* witness items */ + 1 /* schnorr sig len */ + 64, /* schnorr sig */ + }; + utxos.push(utxo); }, - _ => { + Some(version) => { + log_error!(self.logger, "Unexpected witness version: {}", version,); + }, + None => { log_error!( self.logger, "Tried to use a non-witness script. This must never happen." @@ -452,16 +471,18 @@ where } fn get_change_script(&self) -> Result { - let locked_wallet = self.inner.lock().unwrap(); - let address_info = - locked_wallet.get_internal_address(AddressIndex::LastUnused).map_err(|e| { - log_error!(self.logger, "Failed to retrieve new address from wallet: {}", e); - })?; + let mut locked_wallet = self.inner.lock().unwrap(); + let mut locked_persister = self.persister.lock().unwrap(); + let address_info = locked_wallet.next_unused_address(KeychainKind::Internal); + locked_wallet.persist(&mut locked_persister).map_err(|e| { + log_error!(self.logger, "Failed to persist wallet: {}", e); + () + })?; Ok(address_info.address.script_pubkey()) } - fn sign_psbt(&self, mut psbt: PartiallySignedTransaction) -> Result { + fn sign_psbt(&self, mut psbt: Psbt) -> Result { let locked_wallet = self.inner.lock().unwrap(); // While BDK populates both `witness_utxo` and `non_witness_utxo` fields, LDK does not. As @@ -482,27 +503,30 @@ where }, } - Ok(psbt.extract_tx()) + let tx = psbt.extract_tx().map_err(|e| { + log_error!(self.logger, "Failed to extract transaction: {}", e); + () + })?; + + Ok(tx) } } /// Similar to [`KeysManager`], but overrides the destination and shutdown scripts so they are /// directly spendable by the BDK wallet. -pub(crate) struct WalletKeysManager +pub(crate) struct WalletKeysManager where - D: BatchDatabase, B::Target: BroadcasterInterface, E::Target: FeeEstimator, L::Target: Logger, { inner: KeysManager, - wallet: Arc>, + wallet: Arc>, logger: L, } -impl WalletKeysManager +impl WalletKeysManager where - D: BatchDatabase, B::Target: BroadcasterInterface, E::Target: FeeEstimator, L::Target: Logger, @@ -513,15 +537,14 @@ where /// `starting_time_nanos`. pub fn new( seed: &[u8; 32], starting_time_secs: u64, starting_time_nanos: u32, - wallet: Arc>, logger: L, + wallet: Arc>, logger: L, ) -> Self { let inner = KeysManager::new(seed, starting_time_secs, starting_time_nanos); Self { inner, wallet, logger } } - pub fn sign_message(&self, msg: &[u8]) -> Result { + pub fn sign_message(&self, msg: &[u8]) -> String { message_signing::sign(msg, &self.inner.get_node_secret_key()) - .or(Err(Error::MessageSigningFailed)) } pub fn get_node_secret_key(&self) -> SecretKey { @@ -533,9 +556,8 @@ where } } -impl NodeSigner for WalletKeysManager +impl NodeSigner for WalletKeysManager where - D: BatchDatabase, B::Target: BroadcasterInterface, E::Target: FeeEstimator, L::Target: Logger, @@ -555,9 +577,9 @@ where } fn sign_invoice( - &self, hrp_bytes: &[u8], invoice_data: &[u5], recipient: Recipient, + &self, invoice: &RawBolt11Invoice, recipient: Recipient, ) -> Result { - self.inner.sign_invoice(hrp_bytes, invoice_data, recipient) + self.inner.sign_invoice(invoice, recipient) } fn sign_gossip_message(&self, msg: UnsignedGossipMessage<'_>) -> Result { @@ -577,9 +599,8 @@ where } } -impl OutputSpender for WalletKeysManager +impl OutputSpender for WalletKeysManager where - D: BatchDatabase, B::Target: BroadcasterInterface, E::Target: FeeEstimator, L::Target: Logger, @@ -601,9 +622,8 @@ where } } -impl EntropySource for WalletKeysManager +impl EntropySource for WalletKeysManager where - D: BatchDatabase, B::Target: BroadcasterInterface, E::Target: FeeEstimator, L::Target: Logger, @@ -613,9 +633,8 @@ where } } -impl SignerProvider for WalletKeysManager +impl SignerProvider for WalletKeysManager where - D: BatchDatabase, B::Target: BroadcasterInterface, E::Target: FeeEstimator, L::Target: Logger, @@ -650,11 +669,10 @@ where log_error!(self.logger, "Failed to retrieve new address from wallet: {}", e); })?; - match address.payload { - Payload::WitnessProgram(program) => ShutdownScript::new_witness_program(&program) - .map_err(|e| { - log_error!(self.logger, "Invalid shutdown script: {:?}", e); - }), + match address.witness_program() { + Some(program) => ShutdownScript::new_witness_program(&program).map_err(|e| { + log_error!(self.logger, "Invalid shutdown script: {:?}", e); + }), _ => { log_error!( self.logger, @@ -666,9 +684,8 @@ where } } -impl ChangeDestinationSource for WalletKeysManager +impl ChangeDestinationSource for WalletKeysManager where - D: BatchDatabase, B::Target: BroadcasterInterface, E::Target: FeeEstimator, L::Target: Logger, diff --git a/src/wallet/persist.rs b/src/wallet/persist.rs new file mode 100644 index 000000000..06af541a2 --- /dev/null +++ b/src/wallet/persist.rs @@ -0,0 +1,187 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + +use crate::io::utils::{ + read_bdk_wallet_change_set, write_bdk_wallet_change_descriptor, write_bdk_wallet_descriptor, + write_bdk_wallet_indexer, write_bdk_wallet_local_chain, write_bdk_wallet_network, + write_bdk_wallet_tx_graph, +}; +use crate::logger::{log_error, FilesystemLogger}; +use crate::types::DynStore; + +use lightning::util::logger::Logger; + +use bdk_chain::Merge; +use bdk_wallet::{ChangeSet, WalletPersister}; + +use std::sync::Arc; +pub(crate) struct KVStoreWalletPersister { + latest_change_set: Option, + kv_store: Arc, + logger: Arc, +} + +impl KVStoreWalletPersister { + pub(crate) fn new(kv_store: Arc, logger: Arc) -> Self { + Self { latest_change_set: None, kv_store, logger } + } +} + +impl WalletPersister for KVStoreWalletPersister { + type Error = std::io::Error; + + fn initialize(persister: &mut Self) -> Result { + // Return immediately if we have already been initialized. + if let Some(latest_change_set) = persister.latest_change_set.as_ref() { + return Ok(latest_change_set.clone()); + } + + let change_set_opt = read_bdk_wallet_change_set( + Arc::clone(&persister.kv_store), + Arc::clone(&persister.logger), + )?; + + let change_set = match change_set_opt { + Some(persisted_change_set) => persisted_change_set, + None => { + // BDK docs state: "The implementation must return all data currently stored in the + // persister. If there is no data, return an empty changeset (using + // ChangeSet::default())." + ChangeSet::default() + }, + }; + persister.latest_change_set = Some(change_set.clone()); + Ok(change_set) + } + + fn persist(persister: &mut Self, change_set: &ChangeSet) -> Result<(), Self::Error> { + if change_set.is_empty() { + return Ok(()); + } + + // We're allowed to fail here if we're not initialized, BDK docs state: "This method can fail if the + // persister is not initialized." + let latest_change_set = persister.latest_change_set.as_mut().ok_or_else(|| { + std::io::Error::new( + std::io::ErrorKind::Other, + "Wallet must be initialized before calling persist", + ) + })?; + + // Check that we'd never accidentally override any persisted data if the change set doesn't + // match our descriptor/change_descriptor/network. + if let Some(descriptor) = change_set.descriptor.as_ref() { + if latest_change_set.descriptor.is_some() + && latest_change_set.descriptor.as_ref() != Some(descriptor) + { + debug_assert!(false, "Wallet descriptor must never change"); + log_error!( + persister.logger, + "Wallet change set doesn't match persisted descriptor. This should never happen." + ); + return Err(std::io::Error::new( + std::io::ErrorKind::InvalidData, + "Wallet change set doesn't match persisted descriptor. This should never happen." + )); + } else { + latest_change_set.descriptor = Some(descriptor.clone()); + write_bdk_wallet_descriptor( + &descriptor, + Arc::clone(&persister.kv_store), + Arc::clone(&persister.logger), + )?; + } + } + + if let Some(change_descriptor) = change_set.change_descriptor.as_ref() { + if latest_change_set.change_descriptor.is_some() + && latest_change_set.change_descriptor.as_ref() != Some(change_descriptor) + { + debug_assert!(false, "Wallet change_descriptor must never change"); + log_error!( + persister.logger, + "Wallet change set doesn't match persisted change_descriptor. This should never happen." + ); + return Err(std::io::Error::new( + std::io::ErrorKind::InvalidData, + "Wallet change set doesn't match persisted change_descriptor. This should never happen." + )); + } else { + latest_change_set.change_descriptor = Some(change_descriptor.clone()); + write_bdk_wallet_change_descriptor( + &change_descriptor, + Arc::clone(&persister.kv_store), + Arc::clone(&persister.logger), + )?; + } + } + + if let Some(network) = change_set.network { + if latest_change_set.network.is_some() && latest_change_set.network != Some(network) { + debug_assert!(false, "Wallet network must never change"); + log_error!( + persister.logger, + "Wallet change set doesn't match persisted network. This should never happen." + ); + return Err(std::io::Error::new( + std::io::ErrorKind::InvalidData, + "Wallet change set doesn't match persisted network. This should never happen.", + )); + } else { + latest_change_set.network = Some(network); + write_bdk_wallet_network( + &network, + Arc::clone(&persister.kv_store), + Arc::clone(&persister.logger), + )?; + } + } + + debug_assert!( + latest_change_set.descriptor.is_some() + && latest_change_set.change_descriptor.is_some() + && latest_change_set.network.is_some(), + "descriptor, change_descriptor, and network are mandatory ChangeSet fields" + ); + + // Merge and persist the sub-changesets individually if necessary. + // + // According to the BDK team the individual sub-changesets can be persisted + // individually/non-atomically, "(h)owever, the localchain tip is used by block-by-block + // chain sources as a reference as to where to sync from, so I would persist that last", "I + // would write in this order: indexer, tx_graph, local_chain", which is why we follow this + // particular order. + if !change_set.indexer.is_empty() { + latest_change_set.indexer.merge(change_set.indexer.clone()); + write_bdk_wallet_indexer( + &latest_change_set.indexer, + Arc::clone(&persister.kv_store), + Arc::clone(&persister.logger), + )?; + } + + if !change_set.tx_graph.is_empty() { + latest_change_set.tx_graph.merge(change_set.tx_graph.clone()); + write_bdk_wallet_tx_graph( + &latest_change_set.tx_graph, + Arc::clone(&persister.kv_store), + Arc::clone(&persister.logger), + )?; + } + + if !change_set.local_chain.is_empty() { + latest_change_set.local_chain.merge(change_set.local_chain.clone()); + write_bdk_wallet_local_chain( + &latest_change_set.local_chain, + Arc::clone(&persister.kv_store), + Arc::clone(&persister.logger), + )?; + } + + Ok(()) + } +} diff --git a/src/wallet/ser.rs b/src/wallet/ser.rs new file mode 100644 index 000000000..2e33992a8 --- /dev/null +++ b/src/wallet/ser.rs @@ -0,0 +1,346 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + +use lightning::ln::msgs::DecodeError; +use lightning::util::ser::{BigSize, Readable, RequiredWrapper, Writeable, Writer}; +use lightning::{decode_tlv_stream, encode_tlv_stream, read_tlv_fields, write_tlv_fields}; + +use bdk_chain::bdk_core::{BlockId, ConfirmationBlockTime}; +use bdk_chain::indexer::keychain_txout::ChangeSet as BdkIndexerChangeSet; +use bdk_chain::local_chain::ChangeSet as BdkLocalChainChangeSet; +use bdk_chain::tx_graph::ChangeSet as BdkTxGraphChangeSet; +use bdk_chain::DescriptorId; + +use bdk_wallet::descriptor::Descriptor; +use bdk_wallet::keys::DescriptorPublicKey; + +use bitcoin::hashes::sha256::Hash as Sha256Hash; +use bitcoin::p2p::Magic; +use bitcoin::{BlockHash, Network, OutPoint, Transaction, TxOut, Txid}; + +use std::collections::{BTreeMap, BTreeSet}; +use std::str::FromStr; +use std::sync::Arc; + +const CHANGESET_SERIALIZATION_VERSION: u8 = 1; + +pub(crate) struct ChangeSetSerWrapper<'a, T>(pub &'a T); +pub(crate) struct ChangeSetDeserWrapper(pub T); + +impl<'a> Writeable for ChangeSetSerWrapper<'a, Descriptor> { + fn write(&self, writer: &mut W) -> Result<(), lightning::io::Error> { + CHANGESET_SERIALIZATION_VERSION.write(writer)?; + + self.0.to_string().write(writer) + } +} + +impl Readable for ChangeSetDeserWrapper> { + fn read(reader: &mut R) -> Result { + let version: u8 = Readable::read(reader)?; + if version != CHANGESET_SERIALIZATION_VERSION { + return Err(DecodeError::UnknownVersion); + } + + let descriptor_str: String = Readable::read(reader)?; + let descriptor = Descriptor::::from_str(&descriptor_str) + .map_err(|_| DecodeError::InvalidValue)?; + Ok(Self(descriptor)) + } +} + +impl<'a> Writeable for ChangeSetSerWrapper<'a, Network> { + fn write(&self, writer: &mut W) -> Result<(), lightning::io::Error> { + CHANGESET_SERIALIZATION_VERSION.write(writer)?; + + self.0.magic().to_bytes().write(writer) + } +} + +impl Readable for ChangeSetDeserWrapper { + fn read(reader: &mut R) -> Result { + let version: u8 = Readable::read(reader)?; + if version != CHANGESET_SERIALIZATION_VERSION { + return Err(DecodeError::UnknownVersion); + } + + let buf: [u8; 4] = Readable::read(reader)?; + let magic = Magic::from_bytes(buf); + let network = Network::from_magic(magic).ok_or(DecodeError::InvalidValue)?; + Ok(Self(network)) + } +} + +impl<'a> Writeable for ChangeSetSerWrapper<'a, BdkLocalChainChangeSet> { + fn write(&self, writer: &mut W) -> Result<(), lightning::io::Error> { + CHANGESET_SERIALIZATION_VERSION.write(writer)?; + + encode_tlv_stream!(writer, { + (0, self.0.blocks, required), + }); + Ok(()) + } +} + +impl Readable for ChangeSetDeserWrapper { + fn read(reader: &mut R) -> Result { + let version: u8 = Readable::read(reader)?; + if version != CHANGESET_SERIALIZATION_VERSION { + return Err(DecodeError::UnknownVersion); + } + + let mut blocks = RequiredWrapper(None); + decode_tlv_stream!(reader, { + (0, blocks, required), + }); + Ok(Self(BdkLocalChainChangeSet { blocks: blocks.0.unwrap() })) + } +} + +impl<'a> Writeable for ChangeSetSerWrapper<'a, BdkTxGraphChangeSet> { + fn write(&self, writer: &mut W) -> Result<(), lightning::io::Error> { + CHANGESET_SERIALIZATION_VERSION.write(writer)?; + + encode_tlv_stream!(writer, { + (0, ChangeSetSerWrapper(&self.0.txs), required), + (2, self.0.txouts, required), + (4, ChangeSetSerWrapper(&self.0.anchors), required), + (6, self.0.last_seen, required), + }); + Ok(()) + } +} + +impl Readable for ChangeSetDeserWrapper> { + fn read(reader: &mut R) -> Result { + let version: u8 = Readable::read(reader)?; + if version != CHANGESET_SERIALIZATION_VERSION { + return Err(DecodeError::UnknownVersion); + } + + let mut txs: RequiredWrapper>>> = + RequiredWrapper(None); + let mut txouts: RequiredWrapper> = RequiredWrapper(None); + let mut anchors: RequiredWrapper< + ChangeSetDeserWrapper>, + > = RequiredWrapper(None); + let mut last_seen: RequiredWrapper> = RequiredWrapper(None); + + decode_tlv_stream!(reader, { + (0, txs, required), + (2, txouts, required), + (4, anchors, required), + (6, last_seen, required), + }); + + Ok(Self(BdkTxGraphChangeSet { + txs: txs.0.unwrap().0, + txouts: txouts.0.unwrap(), + anchors: anchors.0.unwrap().0, + last_seen: last_seen.0.unwrap(), + })) + } +} + +impl<'a> Writeable for ChangeSetSerWrapper<'a, BTreeSet<(ConfirmationBlockTime, Txid)>> { + fn write(&self, writer: &mut W) -> Result<(), lightning::io::Error> { + let len = BigSize(self.0.len() as u64); + len.write(writer)?; + for (time, txid) in self.0.iter() { + write_tlv_fields!(writer, { + (0, ChangeSetSerWrapper(time), required), + (2, txid, required), + }); + } + Ok(()) + } +} + +impl Readable for ChangeSetDeserWrapper> { + fn read(reader: &mut R) -> Result { + let len: BigSize = Readable::read(reader)?; + let mut set = BTreeSet::new(); + for _ in 0..len.0 { + let mut time: RequiredWrapper> = + RequiredWrapper(None); + let mut txid: RequiredWrapper = RequiredWrapper(None); + read_tlv_fields!(reader, { + (0, time, required), + (2, txid, required), + }); + set.insert((time.0.unwrap().0, txid.0.unwrap())); + } + Ok(Self(set)) + } +} + +impl<'a> Writeable for ChangeSetSerWrapper<'a, BTreeSet>> { + fn write(&self, writer: &mut W) -> Result<(), lightning::io::Error> { + let len = BigSize(self.0.len() as u64); + len.write(writer)?; + for tx in self.0.iter() { + write_tlv_fields!(writer, { + (0, tx, required), + }); + } + Ok(()) + } +} + +impl Readable for ChangeSetDeserWrapper>> { + fn read(reader: &mut R) -> Result { + let len: BigSize = Readable::read(reader)?; + let mut set = BTreeSet::new(); + for _ in 0..len.0 { + let mut tx: RequiredWrapper = RequiredWrapper(None); + read_tlv_fields!(reader, { + (0, tx, required), + }); + set.insert(Arc::new(tx.0.unwrap())); + } + Ok(Self(set)) + } +} + +impl<'a> Writeable for ChangeSetSerWrapper<'a, ConfirmationBlockTime> { + fn write(&self, writer: &mut W) -> Result<(), lightning::io::Error> { + encode_tlv_stream!(writer, { + (0, ChangeSetSerWrapper(&self.0.block_id), required), + (2, self.0.confirmation_time, required), + }); + Ok(()) + } +} + +impl Readable for ChangeSetDeserWrapper { + fn read(reader: &mut R) -> Result { + let mut block_id: RequiredWrapper> = RequiredWrapper(None); + let mut confirmation_time: RequiredWrapper = RequiredWrapper(None); + + decode_tlv_stream!(reader, { + (0, block_id, required), + (2, confirmation_time, required), + }); + + Ok(Self(ConfirmationBlockTime { + block_id: block_id.0.unwrap().0, + confirmation_time: confirmation_time.0.unwrap(), + })) + } +} + +impl<'a> Writeable for ChangeSetSerWrapper<'a, BlockId> { + fn write(&self, writer: &mut W) -> Result<(), lightning::io::Error> { + encode_tlv_stream!(writer, { + (0, self.0.height, required), + (2, self.0.hash, required), + }); + Ok(()) + } +} + +impl Readable for ChangeSetDeserWrapper { + fn read(reader: &mut R) -> Result { + let mut height: RequiredWrapper = RequiredWrapper(None); + let mut hash: RequiredWrapper = RequiredWrapper(None); + decode_tlv_stream!(reader, { + (0, height, required), + (2, hash, required), + }); + + Ok(Self(BlockId { height: height.0.unwrap(), hash: hash.0.unwrap() })) + } +} + +impl<'a> Writeable for ChangeSetSerWrapper<'a, BdkIndexerChangeSet> { + fn write(&self, writer: &mut W) -> Result<(), lightning::io::Error> { + CHANGESET_SERIALIZATION_VERSION.write(writer)?; + + encode_tlv_stream!(writer, { (0, ChangeSetSerWrapper(&self.0.last_revealed), required) }); + Ok(()) + } +} + +impl Readable for ChangeSetDeserWrapper { + fn read(reader: &mut R) -> Result { + let version: u8 = Readable::read(reader)?; + if version != CHANGESET_SERIALIZATION_VERSION { + return Err(DecodeError::UnknownVersion); + } + + let mut last_revealed: RequiredWrapper>> = + RequiredWrapper(None); + + decode_tlv_stream!(reader, { (0, last_revealed, required) }); + + Ok(Self(BdkIndexerChangeSet { last_revealed: last_revealed.0.unwrap().0 })) + } +} + +impl<'a> Writeable for ChangeSetSerWrapper<'a, BTreeMap> { + fn write(&self, writer: &mut W) -> Result<(), lightning::io::Error> { + let len = BigSize(self.0.len() as u64); + len.write(writer)?; + for (descriptor_id, last_index) in self.0.iter() { + write_tlv_fields!(writer, { + (0, ChangeSetSerWrapper(descriptor_id), required), + (2, last_index, required), + }); + } + Ok(()) + } +} + +impl Readable for ChangeSetDeserWrapper> { + fn read(reader: &mut R) -> Result { + let len: BigSize = Readable::read(reader)?; + let mut set = BTreeMap::new(); + for _ in 0..len.0 { + let mut descriptor_id: RequiredWrapper> = + RequiredWrapper(None); + let mut last_index: RequiredWrapper = RequiredWrapper(None); + read_tlv_fields!(reader, { + (0, descriptor_id, required), + (2, last_index, required), + }); + set.insert(descriptor_id.0.unwrap().0, last_index.0.unwrap()); + } + Ok(Self(set)) + } +} + +impl<'a> Writeable for ChangeSetSerWrapper<'a, DescriptorId> { + fn write(&self, writer: &mut W) -> Result<(), lightning::io::Error> { + encode_tlv_stream!(writer, { (0, ChangeSetSerWrapper(&self.0 .0), required) }); + Ok(()) + } +} + +impl Readable for ChangeSetDeserWrapper { + fn read(reader: &mut R) -> Result { + let mut hash: RequiredWrapper> = RequiredWrapper(None); + + decode_tlv_stream!(reader, { (0, hash, required) }); + + Ok(Self(DescriptorId(hash.0.unwrap().0))) + } +} + +impl<'a> Writeable for ChangeSetSerWrapper<'a, Sha256Hash> { + fn write(&self, writer: &mut W) -> Result<(), lightning::io::Error> { + writer.write_all(&self.0[..]) + } +} + +impl Readable for ChangeSetDeserWrapper { + fn read(reader: &mut R) -> Result { + use bitcoin::hashes::Hash; + + let buf: [u8; 32] = Readable::read(reader)?; + Ok(Self(Sha256Hash::from_slice(&buf[..]).unwrap())) + } +} diff --git a/tests/common/mod.rs b/tests/common/mod.rs index f8a9eae7a..a7cd87323 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -759,7 +759,7 @@ pub(crate) fn do_channel_full_cycle( println!("\nB close_channel (force: {})", force_close); if force_close { std::thread::sleep(Duration::from_secs(1)); - node_a.force_close_channel(&user_channel_id, node_b.node_id()).unwrap(); + node_a.force_close_channel(&user_channel_id, node_b.node_id(), None).unwrap(); } else { node_a.close_channel(&user_channel_id, node_b.node_id()).unwrap(); } @@ -913,7 +913,7 @@ impl TestSyncStore { fn do_list( &self, primary_namespace: &str, secondary_namespace: &str, - ) -> std::io::Result> { + ) -> lightning::io::Result> { let fs_res = self.fs_store.list(primary_namespace, secondary_namespace); let sqlite_res = self.sqlite_store.list(primary_namespace, secondary_namespace); let test_res = self.test_store.list(primary_namespace, secondary_namespace); @@ -944,7 +944,7 @@ impl TestSyncStore { impl KVStore for TestSyncStore { fn read( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, - ) -> std::io::Result> { + ) -> lightning::io::Result> { let _guard = self.serializer.read().unwrap(); let fs_res = self.fs_store.read(primary_namespace, secondary_namespace, key); @@ -969,7 +969,7 @@ impl KVStore for TestSyncStore { fn write( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: &[u8], - ) -> std::io::Result<()> { + ) -> lightning::io::Result<()> { let _guard = self.serializer.write().unwrap(); let fs_res = self.fs_store.write(primary_namespace, secondary_namespace, key, buf); let sqlite_res = self.sqlite_store.write(primary_namespace, secondary_namespace, key, buf); @@ -996,7 +996,7 @@ impl KVStore for TestSyncStore { fn remove( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, - ) -> std::io::Result<()> { + ) -> lightning::io::Result<()> { let _guard = self.serializer.write().unwrap(); let fs_res = self.fs_store.remove(primary_namespace, secondary_namespace, key, lazy); let sqlite_res = @@ -1024,7 +1024,7 @@ impl KVStore for TestSyncStore { fn list( &self, primary_namespace: &str, secondary_namespace: &str, - ) -> std::io::Result> { + ) -> lightning::io::Result> { let _guard = self.serializer.read().unwrap(); self.do_list(primary_namespace, secondary_namespace) } diff --git a/tests/integration_tests_rust.rs b/tests/integration_tests_rust.rs index 907e89084..6d33e80c6 100644 --- a/tests/integration_tests_rust.rs +++ b/tests/integration_tests_rust.rs @@ -312,7 +312,7 @@ fn sign_verify_msg() { // Tests arbitrary message signing and later verification let msg = "OK computer".as_bytes(); - let sig = node.sign_message(msg).unwrap(); + let sig = node.sign_message(msg); let pkey = node.node_id(); assert!(node.verify_signature(msg, sig.as_str(), &pkey)); } @@ -437,7 +437,8 @@ fn simple_bolt12_send_receive() { std::thread::sleep(std::time::Duration::from_secs(1)); let expected_amount_msat = 100_000_000; - let offer = node_b.bolt12_payment().receive(expected_amount_msat, "asdf", Some(1)).unwrap(); + let offer = + node_b.bolt12_payment().receive(expected_amount_msat, "asdf", None, Some(1)).unwrap(); let expected_quantity = Some(1); let expected_payer_note = Some("Test".to_string()); let payment_id = node_a @@ -491,7 +492,7 @@ fn simple_bolt12_send_receive() { let offer_amount_msat = 100_000_000; let less_than_offer_amount = offer_amount_msat - 10_000; let expected_amount_msat = offer_amount_msat + 10_000; - let offer = node_b.bolt12_payment().receive(offer_amount_msat, "asdf", Some(1)).unwrap(); + let offer = node_b.bolt12_payment().receive(offer_amount_msat, "asdf", None, Some(1)).unwrap(); let expected_quantity = Some(1); let expected_payer_note = Some("Test".to_string()); assert!(node_a @@ -642,7 +643,7 @@ fn generate_bip21_uri() { match uqr_payment.clone() { Ok(ref uri) => { println!("Generated URI: {}", uri); - assert!(uri.contains("BITCOIN:")); + assert!(uri.contains("bitcoin:")); assert!(uri.contains("lightning=")); assert!(uri.contains("lno=")); }, From 176b5c6bd45be6e073435d941cf53f269eca69ce Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 14 Oct 2024 16:56:37 +0200 Subject: [PATCH 063/127] Re-add `block_in_place` in `stop` ... which somehow was removed as part of the recent refactoring. --- src/lib.rs | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 4a7d081c5..67e49fd38 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -846,12 +846,14 @@ impl Node { // FIXME: For now, we wait up to 100 secs (BDK_WALLET_SYNC_TIMEOUT_SECS + 10) to allow // event handling to exit gracefully even if it was blocked on the BDK wallet syncing. We // should drop this considerably post upgrading to BDK 1.0. - let timeout_res = runtime.block_on(async { - tokio::time::timeout( - Duration::from_secs(100), - event_handling_stopped_receiver.changed(), - ) - .await + let timeout_res = tokio::task::block_in_place(move || { + runtime.block_on(async { + tokio::time::timeout( + Duration::from_secs(100), + event_handling_stopped_receiver.changed(), + ) + .await + }) }); match timeout_res { From 789dbdbb572aa2759a4127c841401a3239d22997 Mon Sep 17 00:00:00 2001 From: G8XSU <3442979+G8XSU@users.noreply.github.com> Date: Mon, 14 Oct 2024 23:11:59 -0700 Subject: [PATCH 064/127] Upgrade LDK to v0.0.125 Since LDK v0.0.124 was yanked. --- Cargo.toml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 89443e031..081e2eeb2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -28,13 +28,13 @@ panic = 'abort' # Abort on panic default = [] [dependencies] -lightning = { version = "0.0.124", features = ["std"] } +lightning = { version = "0.0.125", features = ["std"] } lightning-invoice = { version = "0.32.0" } -lightning-net-tokio = { version = "0.0.124" } -lightning-persister = { version = "0.0.124" } -lightning-background-processor = { version = "0.0.124", features = ["futures"] } -lightning-rapid-gossip-sync = { version = "0.0.124" } -lightning-transaction-sync = { version = "0.0.124", features = ["esplora-async-https", "time"] } +lightning-net-tokio = { version = "0.0.125" } +lightning-persister = { version = "0.0.125" } +lightning-background-processor = { version = "0.0.125", features = ["futures"] } +lightning-rapid-gossip-sync = { version = "0.0.125" } +lightning-transaction-sync = { version = "0.0.125", features = ["esplora-async-https", "time"] } lightning-liquidity = { version = "0.1.0-alpha.5", features = ["std"] } #lightning = { git = "https://github.com/lightningdevkit/rust-lightning", branch="main", features = ["std"] } @@ -80,7 +80,7 @@ prost = { version = "0.11.6", default-features = false} winapi = { version = "0.3", features = ["winbase"] } [dev-dependencies] -lightning = { version = "0.0.124", features = ["std", "_test_utils"] } +lightning = { version = "0.0.125", features = ["std", "_test_utils"] } #lightning = { git = "https://github.com/lightningdevkit/rust-lightning", branch="main", features = ["std", "_test_utils"] } electrum-client = { version = "0.21.0", default-features = true } bitcoincore-rpc = { version = "0.19.0", default-features = false } From 3ef3c3e8be1d1005b91c7325719565dddc8fd227 Mon Sep 17 00:00:00 2001 From: G8XSU <3442979+G8XSU@users.noreply.github.com> Date: Tue, 15 Oct 2024 00:25:41 -0700 Subject: [PATCH 065/127] Upgrade lightning-liquidity to v0.1.0-alpha.6 --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 081e2eeb2..3d9310ec6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -35,7 +35,7 @@ lightning-persister = { version = "0.0.125" } lightning-background-processor = { version = "0.0.125", features = ["futures"] } lightning-rapid-gossip-sync = { version = "0.0.125" } lightning-transaction-sync = { version = "0.0.125", features = ["esplora-async-https", "time"] } -lightning-liquidity = { version = "0.1.0-alpha.5", features = ["std"] } +lightning-liquidity = { version = "0.1.0-alpha.6", features = ["std"] } #lightning = { git = "https://github.com/lightningdevkit/rust-lightning", branch="main", features = ["std"] } #lightning-invoice = { git = "https://github.com/lightningdevkit/rust-lightning", branch="main" } From 0265c2d290a0568ef502f9f2974797632b3327f6 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Wed, 2 Oct 2024 10:44:30 +0200 Subject: [PATCH 066/127] Add `ChainSource` enum .. which will allow us to switch between different chain sources. --- src/builder.rs | 7 +++---- src/chain/mod.rs | 31 +++++++++++++++++++++++++++++++ src/config.rs | 6 ------ src/lib.rs | 1 + 4 files changed, 35 insertions(+), 10 deletions(-) create mode 100644 src/chain/mod.rs diff --git a/src/builder.rs b/src/builder.rs index f6b201c54..234e8521e 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -5,10 +5,9 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. -use crate::config::{ - default_user_config, Config, DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS, DEFAULT_ESPLORA_SERVER_URL, - WALLET_KEYS_SEED_LEN, -}; +use crate::chain::{DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS, DEFAULT_ESPLORA_SERVER_URL}; +use crate::config::{default_user_config, Config, WALLET_KEYS_SEED_LEN}; + use crate::connection::ConnectionManager; use crate::event::EventQueue; use crate::fee_estimator::OnchainFeeEstimator; diff --git a/src/chain/mod.rs b/src/chain/mod.rs new file mode 100644 index 000000000..f0bdb6347 --- /dev/null +++ b/src/chain/mod.rs @@ -0,0 +1,31 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + +use crate::logger::FilesystemLogger; + +use esplora_client::AsyncClient as EsploraAsyncClient; + +use std::sync::Arc; + +// The default Esplora server we're using. +pub(crate) const DEFAULT_ESPLORA_SERVER_URL: &str = "https://blockstream.info/api"; + +// The default Esplora client timeout we're using. +pub(crate) const DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS: u64 = 10; + +pub(crate) enum ChainSource { + Esplora { esplora_client: EsploraAsyncClient, logger: Arc }, +} + +impl ChainSource { + pub(crate) fn new_esplora(server_url: String, logger: Arc) -> Self { + let mut client_builder = esplora_client::Builder::new(&server_url.clone()); + client_builder = client_builder.timeout(DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS); + let esplora_client = client_builder.build_async().unwrap(); + Self::Esplora { esplora_client, logger } + } +} diff --git a/src/config.rs b/src/config.rs index b69e73ecf..eccb3d437 100644 --- a/src/config.rs +++ b/src/config.rs @@ -34,12 +34,6 @@ pub(crate) const BDK_CLIENT_STOP_GAP: usize = 20; // The number of concurrent requests made against the API provider. pub(crate) const BDK_CLIENT_CONCURRENCY: usize = 4; -// The default Esplora server we're using. -pub(crate) const DEFAULT_ESPLORA_SERVER_URL: &str = "https://blockstream.info/api"; - -// The default Esplora client timeout we're using. -pub(crate) const DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS: u64 = 10; - // The timeout after which we abandon retrying failed payments. pub(crate) const LDK_PAYMENT_RETRY_TIMEOUT: Duration = Duration::from_secs(10); diff --git a/src/lib.rs b/src/lib.rs index 67e49fd38..7d786b854 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -74,6 +74,7 @@ mod balance; mod builder; +mod chain; mod config; mod connection; mod error; From da658350d2d5613e538e75db853272bfe0e75c85 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Wed, 2 Oct 2024 11:28:28 +0200 Subject: [PATCH 067/127] Move on-chain syncing to `ChainSource` --- src/builder.rs | 123 ++++++++++++++++++++-------------- src/chain/mod.rs | 143 +++++++++++++++++++++++++++++++++++++-- src/lib.rs | 10 +-- src/wallet/mod.rs | 167 +++++++--------------------------------------- 4 files changed, 241 insertions(+), 202 deletions(-) diff --git a/src/builder.rs b/src/builder.rs index 234e8521e..8e6f8e2b2 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -5,7 +5,7 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. -use crate::chain::{DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS, DEFAULT_ESPLORA_SERVER_URL}; +use crate::chain::{ChainSource, DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS, DEFAULT_ESPLORA_SERVER_URL}; use crate::config::{default_user_config, Config, WALLET_KEYS_SEED_LEN}; use crate::connection::ConnectionManager; @@ -585,58 +585,78 @@ fn build_with_store_internal( })?, }; - let (esplora_client, tx_sync, tx_broadcaster, fee_estimator) = match chain_data_source_config { - Some(ChainDataSourceConfig::Esplora(server_url)) => { - let mut client_builder = esplora_client::Builder::new(&server_url.clone()); - client_builder = client_builder.timeout(DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS); - let esplora_client = client_builder.build_async().unwrap(); - let tx_sync = Arc::new(EsploraSyncClient::from_client( - esplora_client.clone(), - Arc::clone(&logger), - )); - let tx_broadcaster = Arc::new(TransactionBroadcaster::new( - tx_sync.client().clone(), - Arc::clone(&logger), - )); - let fee_estimator = Arc::new(OnchainFeeEstimator::new( - tx_sync.client().clone(), - Arc::clone(&config), - Arc::clone(&logger), - )); - (esplora_client, tx_sync, tx_broadcaster, fee_estimator) - }, - None => { - // Default to Esplora client. - let server_url = DEFAULT_ESPLORA_SERVER_URL.to_string(); - let mut client_builder = esplora_client::Builder::new(&server_url); - client_builder = client_builder.timeout(DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS); - let esplora_client = client_builder.build_async().unwrap(); - let tx_sync = Arc::new(EsploraSyncClient::from_client( - esplora_client.clone(), - Arc::clone(&logger), - )); - let tx_broadcaster = Arc::new(TransactionBroadcaster::new( - tx_sync.client().clone(), - Arc::clone(&logger), - )); - let fee_estimator = Arc::new(OnchainFeeEstimator::new( - tx_sync.client().clone(), - Arc::clone(&config), - Arc::clone(&logger), - )); - (esplora_client, tx_sync, tx_broadcaster, fee_estimator) - }, - }; + let (wallet, chain_source, tx_sync, tx_broadcaster, fee_estimator) = + match chain_data_source_config { + Some(ChainDataSourceConfig::Esplora(server_url)) => { + let mut client_builder = esplora_client::Builder::new(&server_url.clone()); + client_builder = client_builder.timeout(DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS); + let esplora_client = client_builder.build_async().unwrap(); + let tx_sync = + Arc::new(EsploraSyncClient::from_client(esplora_client, Arc::clone(&logger))); + let tx_broadcaster = Arc::new(TransactionBroadcaster::new( + tx_sync.client().clone(), + Arc::clone(&logger), + )); + let fee_estimator = Arc::new(OnchainFeeEstimator::new( + tx_sync.client().clone(), + Arc::clone(&config), + Arc::clone(&logger), + )); + + let wallet = Arc::new(Wallet::new( + bdk_wallet, + wallet_persister, + Arc::clone(&tx_broadcaster), + Arc::clone(&fee_estimator), + Arc::clone(&logger), + )); + + let chain_source = Arc::new(ChainSource::new_esplora( + server_url.clone(), + Arc::clone(&wallet), + Arc::clone(&logger), + )); + (wallet, chain_source, tx_sync, tx_broadcaster, fee_estimator) + }, + None => { + // Default to Esplora client. + let server_url = DEFAULT_ESPLORA_SERVER_URL.to_string(); + let mut client_builder = esplora_client::Builder::new(&server_url); + client_builder = client_builder.timeout(DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS); + let esplora_client = client_builder.build_async().unwrap(); + let tx_sync = Arc::new(EsploraSyncClient::from_client( + esplora_client.clone(), + Arc::clone(&logger), + )); + let tx_broadcaster = Arc::new(TransactionBroadcaster::new( + tx_sync.client().clone(), + Arc::clone(&logger), + )); + let fee_estimator = Arc::new(OnchainFeeEstimator::new( + tx_sync.client().clone(), + Arc::clone(&config), + Arc::clone(&logger), + )); + + let wallet = Arc::new(Wallet::new( + bdk_wallet, + wallet_persister, + Arc::clone(&tx_broadcaster), + Arc::clone(&fee_estimator), + Arc::clone(&logger), + )); + + let chain_source = Arc::new(ChainSource::new_esplora( + server_url.clone(), + Arc::clone(&wallet), + Arc::clone(&logger), + )); + + (wallet, chain_source, tx_sync, tx_broadcaster, fee_estimator) + }, + }; let runtime = Arc::new(RwLock::new(None)); - let wallet = Arc::new(Wallet::new( - bdk_wallet, - wallet_persister, - esplora_client, - Arc::clone(&tx_broadcaster), - Arc::clone(&fee_estimator), - Arc::clone(&logger), - )); // Initialize the ChainMonitor let chain_monitor: Arc = Arc::new(chainmonitor::ChainMonitor::new( @@ -1012,6 +1032,7 @@ fn build_with_store_internal( event_handling_stopped_sender, config, wallet, + chain_source, tx_sync, tx_broadcaster, fee_estimator, diff --git a/src/chain/mod.rs b/src/chain/mod.rs index f0bdb6347..1b1f1c938 100644 --- a/src/chain/mod.rs +++ b/src/chain/mod.rs @@ -5,11 +5,17 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. -use crate::logger::FilesystemLogger; +use crate::config::{BDK_CLIENT_CONCURRENCY, BDK_CLIENT_STOP_GAP, BDK_WALLET_SYNC_TIMEOUT_SECS}; +use crate::logger::{log_error, log_info, FilesystemLogger, Logger}; +use crate::types::Wallet; +use crate::Error; + +use bdk_esplora::EsploraAsyncExt; use esplora_client::AsyncClient as EsploraAsyncClient; -use std::sync::Arc; +use std::sync::{Arc, Mutex}; +use std::time::Duration; // The default Esplora server we're using. pub(crate) const DEFAULT_ESPLORA_SERVER_URL: &str = "https://blockstream.info/api"; @@ -17,15 +23,142 @@ pub(crate) const DEFAULT_ESPLORA_SERVER_URL: &str = "https://blockstream.info/ap // The default Esplora client timeout we're using. pub(crate) const DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS: u64 = 10; +pub(crate) enum WalletSyncStatus { + Completed, + InProgress { subscribers: tokio::sync::broadcast::Sender> }, +} + +impl WalletSyncStatus { + fn register_or_subscribe_pending_sync( + &mut self, + ) -> Option>> { + match self { + WalletSyncStatus::Completed => { + // We're first to register for a sync. + let (tx, _) = tokio::sync::broadcast::channel(1); + *self = WalletSyncStatus::InProgress { subscribers: tx }; + None + }, + WalletSyncStatus::InProgress { subscribers } => { + // A sync is in-progress, we subscribe. + let rx = subscribers.subscribe(); + Some(rx) + }, + } + } + + fn propagate_result_to_subscribers(&mut self, res: Result<(), Error>) { + // Send the notification to any other tasks that might be waiting on it by now. + { + match self { + WalletSyncStatus::Completed => { + // No sync in-progress, do nothing. + return; + }, + WalletSyncStatus::InProgress { subscribers } => { + // A sync is in-progress, we notify subscribers. + if subscribers.receiver_count() > 0 { + match subscribers.send(res) { + Ok(_) => (), + Err(e) => { + debug_assert!( + false, + "Failed to send wallet sync result to subscribers: {:?}", + e + ); + }, + } + } + *self = WalletSyncStatus::Completed; + }, + } + } + } +} + pub(crate) enum ChainSource { - Esplora { esplora_client: EsploraAsyncClient, logger: Arc }, + Esplora { + esplora_client: EsploraAsyncClient, + onchain_wallet: Arc, + onchain_wallet_sync_status: Mutex, + logger: Arc, + }, } impl ChainSource { - pub(crate) fn new_esplora(server_url: String, logger: Arc) -> Self { + pub(crate) fn new_esplora( + server_url: String, onchain_wallet: Arc, logger: Arc, + ) -> Self { let mut client_builder = esplora_client::Builder::new(&server_url.clone()); client_builder = client_builder.timeout(DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS); let esplora_client = client_builder.build_async().unwrap(); - Self::Esplora { esplora_client, logger } + let onchain_wallet_sync_status = Mutex::new(WalletSyncStatus::Completed); + Self::Esplora { esplora_client, onchain_wallet, onchain_wallet_sync_status, logger } + } + + pub(crate) async fn sync_onchain_wallet(&self) -> Result<(), Error> { + match self { + Self::Esplora { + esplora_client, + onchain_wallet, + onchain_wallet_sync_status, + logger, + .. + } => { + let receiver_res = { + let mut status_lock = onchain_wallet_sync_status.lock().unwrap(); + status_lock.register_or_subscribe_pending_sync() + }; + if let Some(mut sync_receiver) = receiver_res { + log_info!(logger, "Sync in progress, skipping."); + return sync_receiver.recv().await.map_err(|e| { + debug_assert!(false, "Failed to receive wallet sync result: {:?}", e); + log_error!(logger, "Failed to receive wallet sync result: {:?}", e); + Error::WalletOperationFailed + })?; + } + + let res = { + let full_scan_request = onchain_wallet.get_full_scan_request(); + + let wallet_sync_timeout_fut = tokio::time::timeout( + Duration::from_secs(BDK_WALLET_SYNC_TIMEOUT_SECS), + esplora_client.full_scan( + full_scan_request, + BDK_CLIENT_STOP_GAP, + BDK_CLIENT_CONCURRENCY, + ), + ); + + match wallet_sync_timeout_fut.await { + Ok(res) => match res { + Ok(update) => onchain_wallet.apply_update(update), + Err(e) => match *e { + esplora_client::Error::Reqwest(he) => { + log_error!( + logger, + "Sync failed due to HTTP connection error: {}", + he + ); + Err(Error::WalletOperationFailed) + }, + _ => { + log_error!(logger, "Sync failed due to Esplora error: {}", e); + Err(Error::WalletOperationFailed) + }, + }, + }, + Err(e) => { + log_error!(logger, "On-chain wallet sync timed out: {}", e); + Err(Error::WalletOperationTimeout) + }, + } + }; + + onchain_wallet_sync_status.lock().unwrap().propagate_result_to_subscribers(res); + + res + }, + } } } diff --git a/src/lib.rs b/src/lib.rs index 7d786b854..e1bb3e374 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -120,6 +120,7 @@ pub use builder::BuildError; #[cfg(not(feature = "uniffi"))] pub use builder::NodeBuilder as Builder; +use chain::ChainSource; use config::{ default_user_config, may_announce_channel, LDK_WALLET_SYNC_TIMEOUT_SECS, NODE_ANN_BCAST_INTERVAL, PEER_RECONNECTION_INTERVAL, @@ -180,6 +181,7 @@ pub struct Node { event_handling_stopped_sender: tokio::sync::watch::Sender<()>, config: Arc, wallet: Arc, + chain_source: Arc, tx_sync: Arc>>, tx_broadcaster: Arc, fee_estimator: Arc, @@ -274,7 +276,7 @@ impl Node { })?; // Setup wallet sync - let wallet = Arc::clone(&self.wallet); + let chain_source = Arc::clone(&self.chain_source); let sync_logger = Arc::clone(&self.logger); let sync_onchain_wallet_timestamp = Arc::clone(&self.latest_onchain_wallet_sync_timestamp); let mut stop_sync = self.stop_sender.subscribe(); @@ -298,7 +300,7 @@ impl Node { } _ = onchain_wallet_sync_interval.tick() => { let now = Instant::now(); - match wallet.sync().await { + match chain_source.sync_onchain_wallet().await { Ok(()) => { log_trace!( sync_logger, @@ -1370,7 +1372,7 @@ impl Node { return Err(Error::NotRunning); } - let wallet = Arc::clone(&self.wallet); + let chain_source = Arc::clone(&self.chain_source); let tx_sync = Arc::clone(&self.tx_sync); let sync_cman = Arc::clone(&self.channel_manager); let archive_cman = Arc::clone(&self.channel_manager); @@ -1396,7 +1398,7 @@ impl Node { let now = Instant::now(); // We don't add an additional timeout here, as `Wallet::sync` already returns // after a timeout. - match wallet.sync().await { + match chain_source.sync_onchain_wallet().await { Ok(()) => { log_info!( sync_logger, diff --git a/src/wallet/mod.rs b/src/wallet/mod.rs index b1c053f66..88d057cca 100644 --- a/src/wallet/mod.rs +++ b/src/wallet/mod.rs @@ -9,7 +9,6 @@ use persist::KVStoreWalletPersister; use crate::logger::{log_error, log_info, log_trace, Logger}; -use crate::config::{BDK_CLIENT_CONCURRENCY, BDK_CLIENT_STOP_GAP, BDK_WALLET_SYNC_TIMEOUT_SECS}; use crate::fee_estimator::{ConfirmationTarget, FeeEstimator}; use crate::Error; @@ -26,9 +25,9 @@ use lightning::sign::{ use lightning::util::message_signing; use lightning_invoice::RawBolt11Invoice; +use bdk_chain::spk_client::FullScanRequest; use bdk_chain::ChainPosition; -use bdk_esplora::EsploraAsyncExt; -use bdk_wallet::{KeychainKind, PersistedWallet, SignOptions}; +use bdk_wallet::{KeychainKind, PersistedWallet, SignOptions, Update}; use bitcoin::blockdata::constants::WITNESS_SCALE_FACTOR; use bitcoin::blockdata::locktime::absolute::LockTime; @@ -42,20 +41,12 @@ use bitcoin::{ Amount, ScriptBuf, Transaction, TxOut, Txid, WPubkeyHash, WitnessProgram, WitnessVersion, }; -use esplora_client::AsyncClient as EsploraAsyncClient; - -use std::ops::{Deref, DerefMut}; +use std::ops::Deref; use std::sync::{Arc, Mutex}; -use std::time::Duration; pub(crate) mod persist; pub(crate) mod ser; -enum WalletSyncStatus { - Completed, - InProgress { subscribers: tokio::sync::broadcast::Sender> }, -} - pub(crate) struct Wallet where B::Target: BroadcasterInterface, @@ -65,11 +56,8 @@ where // A BDK on-chain wallet. inner: Mutex>, persister: Mutex, - esplora_client: EsploraAsyncClient, broadcaster: B, fee_estimator: E, - // A Mutex holding the current sync status. - sync_status: Mutex, logger: L, } @@ -81,86 +69,34 @@ where { pub(crate) fn new( wallet: bdk_wallet::PersistedWallet, - wallet_persister: KVStoreWalletPersister, esplora_client: EsploraAsyncClient, - broadcaster: B, fee_estimator: E, logger: L, + wallet_persister: KVStoreWalletPersister, broadcaster: B, fee_estimator: E, logger: L, ) -> Self { let inner = Mutex::new(wallet); let persister = Mutex::new(wallet_persister); - let sync_status = Mutex::new(WalletSyncStatus::Completed); - Self { inner, persister, esplora_client, broadcaster, fee_estimator, sync_status, logger } + Self { inner, persister, broadcaster, fee_estimator, logger } } - pub(crate) async fn sync(&self) -> Result<(), Error> { - if let Some(mut sync_receiver) = self.register_or_subscribe_pending_sync() { - log_info!(self.logger, "Sync in progress, skipping."); - return sync_receiver.recv().await.map_err(|e| { - debug_assert!(false, "Failed to receive wallet sync result: {:?}", e); - log_error!(self.logger, "Failed to receive wallet sync result: {:?}", e); - Error::WalletOperationFailed - })?; - } - - let res = { - let full_scan_request = self.inner.lock().unwrap().start_full_scan().build(); - - let wallet_sync_timeout_fut = tokio::time::timeout( - Duration::from_secs(BDK_WALLET_SYNC_TIMEOUT_SECS), - self.esplora_client.full_scan( - full_scan_request, - BDK_CLIENT_STOP_GAP, - BDK_CLIENT_CONCURRENCY, - ), - ); - - match wallet_sync_timeout_fut.await { - Ok(res) => match res { - Ok(update) => { - let mut locked_wallet = self.inner.lock().unwrap(); - match locked_wallet.apply_update(update) { - Ok(()) => { - let mut locked_persister = self.persister.lock().unwrap(); - locked_wallet.persist(&mut locked_persister).map_err(|e| { - log_error!(self.logger, "Failed to persist wallet: {}", e); - Error::PersistenceFailed - })?; - - Ok(()) - }, - Err(e) => { - log_error!( - self.logger, - "Sync failed due to chain connection error: {}", - e - ); - Err(Error::WalletOperationFailed) - }, - } - }, - Err(e) => match *e { - esplora_client::Error::Reqwest(he) => { - log_error!( - self.logger, - "Sync failed due to HTTP connection error: {}", - he - ); - Err(Error::WalletOperationFailed) - }, - _ => { - log_error!(self.logger, "Sync failed due to Esplora error: {}", e); - Err(Error::WalletOperationFailed) - }, - }, - }, - Err(e) => { - log_error!(self.logger, "On-chain wallet sync timed out: {}", e); - Err(Error::WalletOperationTimeout) - }, - } - }; - - self.propagate_result_to_subscribers(res); + pub(crate) fn get_full_scan_request(&self) -> FullScanRequest { + self.inner.lock().unwrap().start_full_scan().build() + } - res + pub(crate) fn apply_update(&self, update: impl Into) -> Result<(), Error> { + let mut locked_wallet = self.inner.lock().unwrap(); + match locked_wallet.apply_update(update) { + Ok(()) => { + let mut locked_persister = self.persister.lock().unwrap(); + locked_wallet.persist(&mut locked_persister).map_err(|e| { + log_error!(self.logger, "Failed to persist wallet: {}", e); + Error::PersistenceFailed + })?; + + Ok(()) + }, + Err(e) => { + log_error!(self.logger, "Sync failed due to chain connection error: {}", e); + Err(Error::WalletOperationFailed) + }, + } } pub(crate) fn create_funding_transaction( @@ -343,59 +279,6 @@ where Ok(txid) } - - fn register_or_subscribe_pending_sync( - &self, - ) -> Option>> { - let mut sync_status_lock = self.sync_status.lock().unwrap(); - match sync_status_lock.deref_mut() { - WalletSyncStatus::Completed => { - // We're first to register for a sync. - let (tx, _) = tokio::sync::broadcast::channel(1); - *sync_status_lock = WalletSyncStatus::InProgress { subscribers: tx }; - None - }, - WalletSyncStatus::InProgress { subscribers } => { - // A sync is in-progress, we subscribe. - let rx = subscribers.subscribe(); - Some(rx) - }, - } - } - - fn propagate_result_to_subscribers(&self, res: Result<(), Error>) { - // Send the notification to any other tasks that might be waiting on it by now. - { - let mut sync_status_lock = self.sync_status.lock().unwrap(); - match sync_status_lock.deref_mut() { - WalletSyncStatus::Completed => { - // No sync in-progress, do nothing. - return; - }, - WalletSyncStatus::InProgress { subscribers } => { - // A sync is in-progress, we notify subscribers. - if subscribers.receiver_count() > 0 { - match subscribers.send(res) { - Ok(_) => (), - Err(e) => { - debug_assert!( - false, - "Failed to send wallet sync result to subscribers: {:?}", - e - ); - log_error!( - self.logger, - "Failed to send wallet sync result to subscribers: {:?}", - e - ); - }, - } - } - *sync_status_lock = WalletSyncStatus::Completed; - }, - } - } - } } impl WalletSource for Wallet From 4da77eb42d350395added4455a073ba652adcd27 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Wed, 2 Oct 2024 11:36:09 +0200 Subject: [PATCH 068/127] Move Lightning wallet syncing to `ChainSource` --- src/builder.rs | 130 +++++++++++++++++++++-------------------------- src/chain/mod.rs | 74 ++++++++++++++++++++++++++- src/io/utils.rs | 3 +- src/lib.rs | 126 +++++++++++++++++++-------------------------- src/types.rs | 4 +- 5 files changed, 186 insertions(+), 151 deletions(-) diff --git a/src/builder.rs b/src/builder.rs index 8e6f8e2b2..240f2c7ff 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -51,8 +51,6 @@ use lightning::util::sweep::OutputSweeper; use lightning_persister::fs_store::FilesystemStore; -use lightning_transaction_sync::EsploraSyncClient; - use lightning_liquidity::lsps2::client::LSPS2ClientConfig; use lightning_liquidity::{LiquidityClientConfig, LiquidityManager}; @@ -585,82 +583,71 @@ fn build_with_store_internal( })?, }; - let (wallet, chain_source, tx_sync, tx_broadcaster, fee_estimator) = - match chain_data_source_config { - Some(ChainDataSourceConfig::Esplora(server_url)) => { - let mut client_builder = esplora_client::Builder::new(&server_url.clone()); - client_builder = client_builder.timeout(DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS); - let esplora_client = client_builder.build_async().unwrap(); - let tx_sync = - Arc::new(EsploraSyncClient::from_client(esplora_client, Arc::clone(&logger))); - let tx_broadcaster = Arc::new(TransactionBroadcaster::new( - tx_sync.client().clone(), - Arc::clone(&logger), - )); - let fee_estimator = Arc::new(OnchainFeeEstimator::new( - tx_sync.client().clone(), - Arc::clone(&config), - Arc::clone(&logger), - )); + let (wallet, chain_source, tx_broadcaster, fee_estimator) = match chain_data_source_config { + Some(ChainDataSourceConfig::Esplora(server_url)) => { + let mut client_builder = esplora_client::Builder::new(&server_url.clone()); + client_builder = client_builder.timeout(DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS); + let esplora_client = client_builder.build_async().unwrap(); + let tx_broadcaster = + Arc::new(TransactionBroadcaster::new(esplora_client.clone(), Arc::clone(&logger))); + let fee_estimator = Arc::new(OnchainFeeEstimator::new( + esplora_client, + Arc::clone(&config), + Arc::clone(&logger), + )); - let wallet = Arc::new(Wallet::new( - bdk_wallet, - wallet_persister, - Arc::clone(&tx_broadcaster), - Arc::clone(&fee_estimator), - Arc::clone(&logger), - )); + let wallet = Arc::new(Wallet::new( + bdk_wallet, + wallet_persister, + Arc::clone(&tx_broadcaster), + Arc::clone(&fee_estimator), + Arc::clone(&logger), + )); - let chain_source = Arc::new(ChainSource::new_esplora( - server_url.clone(), - Arc::clone(&wallet), - Arc::clone(&logger), - )); - (wallet, chain_source, tx_sync, tx_broadcaster, fee_estimator) - }, - None => { - // Default to Esplora client. - let server_url = DEFAULT_ESPLORA_SERVER_URL.to_string(); - let mut client_builder = esplora_client::Builder::new(&server_url); - client_builder = client_builder.timeout(DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS); - let esplora_client = client_builder.build_async().unwrap(); - let tx_sync = Arc::new(EsploraSyncClient::from_client( - esplora_client.clone(), - Arc::clone(&logger), - )); - let tx_broadcaster = Arc::new(TransactionBroadcaster::new( - tx_sync.client().clone(), - Arc::clone(&logger), - )); - let fee_estimator = Arc::new(OnchainFeeEstimator::new( - tx_sync.client().clone(), - Arc::clone(&config), - Arc::clone(&logger), - )); + let chain_source = Arc::new(ChainSource::new_esplora( + server_url.clone(), + Arc::clone(&wallet), + Arc::clone(&logger), + )); + (wallet, chain_source, tx_broadcaster, fee_estimator) + }, + None => { + // Default to Esplora client. + let server_url = DEFAULT_ESPLORA_SERVER_URL.to_string(); + let mut client_builder = esplora_client::Builder::new(&server_url); + client_builder = client_builder.timeout(DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS); + let esplora_client = client_builder.build_async().unwrap(); + let tx_broadcaster = + Arc::new(TransactionBroadcaster::new(esplora_client.clone(), Arc::clone(&logger))); + let fee_estimator = Arc::new(OnchainFeeEstimator::new( + esplora_client, + Arc::clone(&config), + Arc::clone(&logger), + )); - let wallet = Arc::new(Wallet::new( - bdk_wallet, - wallet_persister, - Arc::clone(&tx_broadcaster), - Arc::clone(&fee_estimator), - Arc::clone(&logger), - )); + let wallet = Arc::new(Wallet::new( + bdk_wallet, + wallet_persister, + Arc::clone(&tx_broadcaster), + Arc::clone(&fee_estimator), + Arc::clone(&logger), + )); - let chain_source = Arc::new(ChainSource::new_esplora( - server_url.clone(), - Arc::clone(&wallet), - Arc::clone(&logger), - )); + let chain_source = Arc::new(ChainSource::new_esplora( + server_url.clone(), + Arc::clone(&wallet), + Arc::clone(&logger), + )); - (wallet, chain_source, tx_sync, tx_broadcaster, fee_estimator) - }, - }; + (wallet, chain_source, tx_broadcaster, fee_estimator) + }, + }; let runtime = Arc::new(RwLock::new(None)); // Initialize the ChainMonitor let chain_monitor: Arc = Arc::new(chainmonitor::ChainMonitor::new( - Some(Arc::clone(&tx_sync)), + Some(Arc::clone(&chain_source)), Arc::clone(&tx_broadcaster), Arc::clone(&logger), Arc::clone(&fee_estimator), @@ -876,7 +863,7 @@ fn build_with_store_internal( let liquidity_manager = Arc::new(LiquidityManager::new( Arc::clone(&keys_manager), Arc::clone(&channel_manager), - Some(Arc::clone(&tx_sync)), + Some(Arc::clone(&chain_source)), None, None, liquidity_client_config, @@ -944,7 +931,7 @@ fn build_with_store_internal( let output_sweeper = match io::utils::read_output_sweeper( Arc::clone(&tx_broadcaster), Arc::clone(&fee_estimator), - Arc::clone(&tx_sync), + Arc::clone(&chain_source), Arc::clone(&keys_manager), Arc::clone(&kv_store), Arc::clone(&logger), @@ -956,7 +943,7 @@ fn build_with_store_internal( channel_manager.current_best_block(), Arc::clone(&tx_broadcaster), Arc::clone(&fee_estimator), - Some(Arc::clone(&tx_sync)), + Some(Arc::clone(&chain_source)), Arc::clone(&keys_manager), Arc::clone(&keys_manager), Arc::clone(&kv_store), @@ -1033,7 +1020,6 @@ fn build_with_store_internal( config, wallet, chain_source, - tx_sync, tx_broadcaster, fee_estimator, event_queue, diff --git a/src/chain/mod.rs b/src/chain/mod.rs index 1b1f1c938..160a31d2f 100644 --- a/src/chain/mod.rs +++ b/src/chain/mod.rs @@ -5,11 +5,18 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. -use crate::config::{BDK_CLIENT_CONCURRENCY, BDK_CLIENT_STOP_GAP, BDK_WALLET_SYNC_TIMEOUT_SECS}; +use crate::config::{ + BDK_CLIENT_CONCURRENCY, BDK_CLIENT_STOP_GAP, BDK_WALLET_SYNC_TIMEOUT_SECS, + LDK_WALLET_SYNC_TIMEOUT_SECS, +}; use crate::logger::{log_error, log_info, FilesystemLogger, Logger}; use crate::types::Wallet; use crate::Error; +use lightning::chain::{Confirm, Filter}; + +use lightning_transaction_sync::EsploraSyncClient; + use bdk_esplora::EsploraAsyncExt; use esplora_client::AsyncClient as EsploraAsyncClient; @@ -81,6 +88,8 @@ pub(crate) enum ChainSource { esplora_client: EsploraAsyncClient, onchain_wallet: Arc, onchain_wallet_sync_status: Mutex, + tx_sync: Arc>>, + lightning_wallet_sync_status: Mutex, logger: Arc, }, } @@ -92,8 +101,18 @@ impl ChainSource { let mut client_builder = esplora_client::Builder::new(&server_url.clone()); client_builder = client_builder.timeout(DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS); let esplora_client = client_builder.build_async().unwrap(); + let tx_sync = + Arc::new(EsploraSyncClient::from_client(esplora_client.clone(), Arc::clone(&logger))); let onchain_wallet_sync_status = Mutex::new(WalletSyncStatus::Completed); - Self::Esplora { esplora_client, onchain_wallet, onchain_wallet_sync_status, logger } + let lightning_wallet_sync_status = Mutex::new(WalletSyncStatus::Completed); + Self::Esplora { + esplora_client, + onchain_wallet, + onchain_wallet_sync_status, + tx_sync, + lightning_wallet_sync_status, + logger, + } } pub(crate) async fn sync_onchain_wallet(&self) -> Result<(), Error> { @@ -161,4 +180,55 @@ impl ChainSource { }, } } + + pub(crate) async fn sync_lightning_wallet( + &self, confirmables: Vec<&(dyn Confirm + Send + Sync)>, + ) -> Result<(), Error> { + match self { + Self::Esplora { tx_sync, lightning_wallet_sync_status, logger, .. } => { + let receiver_res = { + let mut status_lock = lightning_wallet_sync_status.lock().unwrap(); + status_lock.register_or_subscribe_pending_sync() + }; + if let Some(mut sync_receiver) = receiver_res { + log_info!(logger, "Sync in progress, skipping."); + return sync_receiver.recv().await.map_err(|e| { + debug_assert!(false, "Failed to receive wallet sync result: {:?}", e); + log_error!(logger, "Failed to receive wallet sync result: {:?}", e); + Error::WalletOperationFailed + })?; + } + let res = { + let timeout_fut = tokio::time::timeout( + Duration::from_secs(LDK_WALLET_SYNC_TIMEOUT_SECS), + tx_sync.sync(confirmables), + ); + match timeout_fut.await { + Ok(res) => res.map_err(|_| Error::TxSyncFailed), + Err(e) => { + log_error!(logger, "Lightning wallet sync timed out: {}", e); + Err(Error::TxSyncTimeout) + }, + } + }; + + lightning_wallet_sync_status.lock().unwrap().propagate_result_to_subscribers(res); + + res + }, + } + } +} + +impl Filter for ChainSource { + fn register_tx(&self, txid: &bitcoin::Txid, script_pubkey: &bitcoin::Script) { + match self { + Self::Esplora { tx_sync, .. } => tx_sync.register_tx(txid, script_pubkey), + } + } + fn register_output(&self, output: lightning::chain::WatchedOutput) { + match self { + Self::Esplora { tx_sync, .. } => tx_sync.register_output(output), + } + } } diff --git a/src/io/utils.rs b/src/io/utils.rs index f6fd10b41..366de152c 100644 --- a/src/io/utils.rs +++ b/src/io/utils.rs @@ -8,10 +8,11 @@ use super::*; use crate::config::WALLET_KEYS_SEED_LEN; +use crate::chain::ChainSource; use crate::logger::{log_error, FilesystemLogger}; use crate::peer_store::PeerStore; use crate::sweep::DeprecatedSpendableOutputInfo; -use crate::types::{Broadcaster, ChainSource, DynStore, FeeEstimator, KeysManager, Sweeper}; +use crate::types::{Broadcaster, DynStore, FeeEstimator, KeysManager, Sweeper}; use crate::wallet::ser::{ChangeSetDeserWrapper, ChangeSetSerWrapper}; use crate::{Error, EventQueue, PaymentDetails}; diff --git a/src/lib.rs b/src/lib.rs index e1bb3e374..e25fee8f2 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -122,8 +122,7 @@ pub use builder::NodeBuilder as Builder; use chain::ChainSource; use config::{ - default_user_config, may_announce_channel, LDK_WALLET_SYNC_TIMEOUT_SECS, - NODE_ANN_BCAST_INTERVAL, PEER_RECONNECTION_INTERVAL, + default_user_config, may_announce_channel, NODE_ANN_BCAST_INTERVAL, PEER_RECONNECTION_INTERVAL, RESOLVED_CHANNEL_MONITOR_ARCHIVAL_INTERVAL, RGS_SYNC_INTERVAL, WALLET_SYNC_INTERVAL_MINIMUM_SECS, }; @@ -157,8 +156,6 @@ pub use lightning::util::logger::Level as LogLevel; use lightning_background_processor::process_events_async; -use lightning_transaction_sync::EsploraSyncClient; - use bitcoin::secp256k1::PublicKey; use rand::Rng; @@ -182,7 +179,6 @@ pub struct Node { config: Arc, wallet: Arc, chain_source: Arc, - tx_sync: Arc>>, tx_broadcaster: Arc, fee_estimator: Arc, event_queue: Arc>>, @@ -372,7 +368,7 @@ impl Node { } }); - let tx_sync = Arc::clone(&self.tx_sync); + let chain_source = Arc::clone(&self.chain_source); let sync_cman = Arc::clone(&self.channel_manager); let archive_cman = Arc::clone(&self.channel_manager); let sync_cmon = Arc::clone(&self.chain_monitor); @@ -390,45 +386,40 @@ impl Node { wallet_sync_interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); loop { tokio::select! { - _ = stop_sync.changed() => { - log_trace!( - sync_logger, - "Stopping background syncing Lightning wallet.", - ); - return; - } - _ = wallet_sync_interval.tick() => { - let confirmables = vec![ - &*sync_cman as &(dyn Confirm + Sync + Send), - &*sync_cmon as &(dyn Confirm + Sync + Send), - &*sync_sweeper as &(dyn Confirm + Sync + Send), - ]; - let now = Instant::now(); - let timeout_fut = tokio::time::timeout(Duration::from_secs(LDK_WALLET_SYNC_TIMEOUT_SECS), tx_sync.sync(confirmables)); - match timeout_fut.await { - Ok(res) => match res { - Ok(()) => { - log_trace!( - sync_logger, - "Background sync of Lightning wallet finished in {}ms.", - now.elapsed().as_millis() - ); - let unix_time_secs_opt = - SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - *sync_wallet_timestamp.write().unwrap() = unix_time_secs_opt; - - periodically_archive_fully_resolved_monitors( - Arc::clone(&archive_cman), - Arc::clone(&archive_cmon), - Arc::clone(&sync_monitor_archival_height) + _ = stop_sync.changed() => { + log_trace!( + sync_logger, + "Stopping background syncing Lightning wallet.", + ); + return; + } + _ = wallet_sync_interval.tick() => { + let confirmables = vec![ + &*sync_cman as &(dyn Confirm + Sync + Send), + &*sync_cmon as &(dyn Confirm + Sync + Send), + &*sync_sweeper as &(dyn Confirm + Sync + Send), + ]; + let now = Instant::now(); + + match chain_source.sync_lightning_wallet(confirmables).await { + Ok(()) => { + log_trace!( + sync_logger, + "Background sync of Lightning wallet finished in {}ms.", + now.elapsed().as_millis() ); - } - Err(e) => { - log_error!(sync_logger, "Background sync of Lightning wallet failed: {}", e) - } + let unix_time_secs_opt = + SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); + *sync_wallet_timestamp.write().unwrap() = unix_time_secs_opt; + + periodically_archive_fully_resolved_monitors( + Arc::clone(&archive_cman), + Arc::clone(&archive_cmon), + Arc::clone(&sync_monitor_archival_height) + ); } Err(e) => { - log_error!(sync_logger, "Background sync of Lightning wallet timed out: {}", e) + log_error!(sync_logger, "Background sync of Lightning wallet failed: {}", e) } } } @@ -1373,7 +1364,6 @@ impl Node { } let chain_source = Arc::clone(&self.chain_source); - let tx_sync = Arc::clone(&self.tx_sync); let sync_cman = Arc::clone(&self.channel_manager); let archive_cman = Arc::clone(&self.channel_manager); let sync_cmon = Arc::clone(&self.chain_monitor); @@ -1440,40 +1430,30 @@ impl Node { } let now = Instant::now(); - let tx_sync_timeout_fut = tokio::time::timeout( - Duration::from_secs(LDK_WALLET_SYNC_TIMEOUT_SECS), - tx_sync.sync(confirmables), - ); - match tx_sync_timeout_fut.await { - Ok(res) => match res { - Ok(()) => { - log_info!( - sync_logger, - "Sync of Lightning wallet finished in {}ms.", - now.elapsed().as_millis() - ); + match chain_source.sync_lightning_wallet(confirmables).await { + Ok(()) => { + log_info!( + sync_logger, + "Sync of Lightning wallet finished in {}ms.", + now.elapsed().as_millis() + ); - let unix_time_secs_opt = SystemTime::now() - .duration_since(UNIX_EPOCH) - .ok() - .map(|d| d.as_secs()); - *sync_wallet_timestamp.write().unwrap() = unix_time_secs_opt; + let unix_time_secs_opt = SystemTime::now() + .duration_since(UNIX_EPOCH) + .ok() + .map(|d| d.as_secs()); + *sync_wallet_timestamp.write().unwrap() = unix_time_secs_opt; - periodically_archive_fully_resolved_monitors( - archive_cman, - archive_cmon, - sync_monitor_archival_height, - ); - Ok(()) - }, - Err(e) => { - log_error!(sync_logger, "Sync of Lightning wallet failed: {}", e); - Err(e.into()) - }, + periodically_archive_fully_resolved_monitors( + archive_cman, + archive_cmon, + sync_monitor_archival_height, + ); + Ok(()) }, Err(e) => { - log_error!(sync_logger, "Sync of Lightning wallet timed out: {}", e); - Err(Error::TxSyncTimeout) + log_error!(sync_logger, "Sync of Lightning wallet failed: {}", e); + Err(e.into()) }, } }, diff --git a/src/types.rs b/src/types.rs index 5005d93a6..7decafefc 100644 --- a/src/types.rs +++ b/src/types.rs @@ -5,6 +5,7 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. +use crate::chain::ChainSource; use crate::logger::FilesystemLogger; use crate::message_handler::NodeCustomMessageHandler; @@ -24,7 +25,6 @@ use lightning::util::persist::KVStore; use lightning::util::ser::{Readable, Writeable, Writer}; use lightning::util::sweep::OutputSweeper; use lightning_net_tokio::SocketDescriptor; -use lightning_transaction_sync::EsploraSyncClient; use bitcoin::secp256k1::PublicKey; use bitcoin::OutPoint; @@ -52,8 +52,6 @@ pub(crate) type PeerManager = lightning::ln::peer_handler::PeerManager< Arc, >; -pub(crate) type ChainSource = EsploraSyncClient>; - pub(crate) type LiquidityManager = lightning_liquidity::LiquidityManager, Arc, Arc>; From 9096382bd1a801669b1c3e108a22f9ff3c0ec3e8 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Wed, 2 Oct 2024 13:05:49 +0200 Subject: [PATCH 069/127] Prefactor: Move fee estimator defaults to util methods --- src/fee_estimator.rs | 121 ++++++++++++++++++++++++------------------- 1 file changed, 67 insertions(+), 54 deletions(-) diff --git a/src/fee_estimator.rs b/src/fee_estimator.rs index 62b4b8882..d091b5f54 100644 --- a/src/fee_estimator.rs +++ b/src/fee_estimator.rs @@ -86,34 +86,9 @@ where return Err(Error::FeerateEstimationUpdateFailed); } - let confirmation_targets = vec![ - ConfirmationTarget::OnchainPayment, - ConfirmationTarget::ChannelFunding, - LdkConfirmationTarget::MaximumFeeEstimate.into(), - LdkConfirmationTarget::UrgentOnChainSweep.into(), - LdkConfirmationTarget::MinAllowedAnchorChannelRemoteFee.into(), - LdkConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee.into(), - LdkConfirmationTarget::AnchorChannelFee.into(), - LdkConfirmationTarget::NonAnchorChannelFee.into(), - LdkConfirmationTarget::ChannelCloseMinimum.into(), - LdkConfirmationTarget::OutputSpendingFee.into(), - ]; - + let confirmation_targets = get_all_conf_targets(); for target in confirmation_targets { - let num_blocks = match target { - ConfirmationTarget::OnchainPayment => 6, - ConfirmationTarget::ChannelFunding => 12, - ConfirmationTarget::Lightning(ldk_target) => match ldk_target { - LdkConfirmationTarget::MaximumFeeEstimate => 1, - LdkConfirmationTarget::UrgentOnChainSweep => 6, - LdkConfirmationTarget::MinAllowedAnchorChannelRemoteFee => 1008, - LdkConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee => 144, - LdkConfirmationTarget::AnchorChannelFee => 1008, - LdkConfirmationTarget::NonAnchorChannelFee => 12, - LdkConfirmationTarget::ChannelCloseMinimum => 144, - LdkConfirmationTarget::OutputSpendingFee => 12, - }, - }; + let num_blocks = get_num_block_defaults_for_target(target); let converted_estimate_sat_vb = esplora_client::convert_fee_rate(num_blocks, estimates.clone()).map_err(|e| { @@ -130,15 +105,7 @@ where // LDK 0.0.118 introduced changes to the `ConfirmationTarget` semantics that // require some post-estimation adjustments to the fee rates, which we do here. - let adjusted_fee_rate = match target { - ConfirmationTarget::Lightning( - LdkConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee, - ) => { - let slightly_less_than_background = fee_rate.to_sat_per_kwu() - 250; - FeeRate::from_sat_per_kwu(slightly_less_than_background) - }, - _ => fee_rate, - }; + let adjusted_fee_rate = apply_post_estimation_adjustments(target, fee_rate); let mut locked_fee_rate_cache = self.fee_rate_cache.write().unwrap(); locked_fee_rate_cache.insert(target, adjusted_fee_rate); @@ -160,24 +127,7 @@ where fn estimate_fee_rate(&self, confirmation_target: ConfirmationTarget) -> FeeRate { let locked_fee_rate_cache = self.fee_rate_cache.read().unwrap(); - let fallback_sats_kwu = match confirmation_target { - ConfirmationTarget::OnchainPayment => 5000, - ConfirmationTarget::ChannelFunding => 1000, - ConfirmationTarget::Lightning(ldk_target) => match ldk_target { - LdkConfirmationTarget::MaximumFeeEstimate => 8000, - LdkConfirmationTarget::UrgentOnChainSweep => 5000, - LdkConfirmationTarget::MinAllowedAnchorChannelRemoteFee => { - FEERATE_FLOOR_SATS_PER_KW - }, - LdkConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee => { - FEERATE_FLOOR_SATS_PER_KW - }, - LdkConfirmationTarget::AnchorChannelFee => 500, - LdkConfirmationTarget::NonAnchorChannelFee => 1000, - LdkConfirmationTarget::ChannelCloseMinimum => 500, - LdkConfirmationTarget::OutputSpendingFee => 1000, - }, - }; + let fallback_sats_kwu = get_fallback_rate_for_target(confirmation_target); // We'll fall back on this, if we really don't have any other information. let fallback_rate = FeeRate::from_sat_per_kwu(fallback_sats_kwu as u64); @@ -198,3 +148,66 @@ where self.estimate_fee_rate(confirmation_target.into()).to_sat_per_kwu() as u32 } } + +pub(crate) fn get_num_block_defaults_for_target(target: ConfirmationTarget) -> usize { + match target { + ConfirmationTarget::OnchainPayment => 6, + ConfirmationTarget::ChannelFunding => 12, + ConfirmationTarget::Lightning(ldk_target) => match ldk_target { + LdkConfirmationTarget::MaximumFeeEstimate => 1, + LdkConfirmationTarget::UrgentOnChainSweep => 6, + LdkConfirmationTarget::MinAllowedAnchorChannelRemoteFee => 1008, + LdkConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee => 144, + LdkConfirmationTarget::AnchorChannelFee => 1008, + LdkConfirmationTarget::NonAnchorChannelFee => 12, + LdkConfirmationTarget::ChannelCloseMinimum => 144, + LdkConfirmationTarget::OutputSpendingFee => 12, + }, + } +} + +pub(crate) fn get_fallback_rate_for_target(target: ConfirmationTarget) -> u32 { + match target { + ConfirmationTarget::OnchainPayment => 5000, + ConfirmationTarget::ChannelFunding => 1000, + ConfirmationTarget::Lightning(ldk_target) => match ldk_target { + LdkConfirmationTarget::MaximumFeeEstimate => 8000, + LdkConfirmationTarget::UrgentOnChainSweep => 5000, + LdkConfirmationTarget::MinAllowedAnchorChannelRemoteFee => FEERATE_FLOOR_SATS_PER_KW, + LdkConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee => FEERATE_FLOOR_SATS_PER_KW, + LdkConfirmationTarget::AnchorChannelFee => 500, + LdkConfirmationTarget::NonAnchorChannelFee => 1000, + LdkConfirmationTarget::ChannelCloseMinimum => 500, + LdkConfirmationTarget::OutputSpendingFee => 1000, + }, + } +} + +pub(crate) fn get_all_conf_targets() -> [ConfirmationTarget; 10] { + [ + ConfirmationTarget::OnchainPayment, + ConfirmationTarget::ChannelFunding, + LdkConfirmationTarget::MaximumFeeEstimate.into(), + LdkConfirmationTarget::UrgentOnChainSweep.into(), + LdkConfirmationTarget::MinAllowedAnchorChannelRemoteFee.into(), + LdkConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee.into(), + LdkConfirmationTarget::AnchorChannelFee.into(), + LdkConfirmationTarget::NonAnchorChannelFee.into(), + LdkConfirmationTarget::ChannelCloseMinimum.into(), + LdkConfirmationTarget::OutputSpendingFee.into(), + ] +} + +pub(crate) fn apply_post_estimation_adjustments( + target: ConfirmationTarget, estimated_rate: FeeRate, +) -> FeeRate { + match target { + ConfirmationTarget::Lightning( + LdkConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee, + ) => { + let slightly_less_than_background = estimated_rate.to_sat_per_kwu() - 250; + FeeRate::from_sat_per_kwu(slightly_less_than_background) + }, + _ => estimated_rate, + } +} From 3105a288dc39e37f06886d56e6551c48d2f12fd7 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Wed, 2 Oct 2024 13:37:57 +0200 Subject: [PATCH 070/127] Move fee rate cache updating to `ChainSource` --- src/builder.rs | 18 ++++----- src/chain/mod.rs | 88 ++++++++++++++++++++++++++++++++++++++-- src/fee_estimator.rs | 95 +++++--------------------------------------- src/io/utils.rs | 5 ++- src/lib.rs | 18 ++++----- src/types.rs | 18 +++++---- 6 files changed, 121 insertions(+), 121 deletions(-) diff --git a/src/builder.rs b/src/builder.rs index 240f2c7ff..d7ca18892 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -590,11 +590,7 @@ fn build_with_store_internal( let esplora_client = client_builder.build_async().unwrap(); let tx_broadcaster = Arc::new(TransactionBroadcaster::new(esplora_client.clone(), Arc::clone(&logger))); - let fee_estimator = Arc::new(OnchainFeeEstimator::new( - esplora_client, - Arc::clone(&config), - Arc::clone(&logger), - )); + let fee_estimator = Arc::new(OnchainFeeEstimator::new()); let wallet = Arc::new(Wallet::new( bdk_wallet, @@ -607,8 +603,11 @@ fn build_with_store_internal( let chain_source = Arc::new(ChainSource::new_esplora( server_url.clone(), Arc::clone(&wallet), + Arc::clone(&fee_estimator), + Arc::clone(&config), Arc::clone(&logger), )); + (wallet, chain_source, tx_broadcaster, fee_estimator) }, None => { @@ -619,11 +618,7 @@ fn build_with_store_internal( let esplora_client = client_builder.build_async().unwrap(); let tx_broadcaster = Arc::new(TransactionBroadcaster::new(esplora_client.clone(), Arc::clone(&logger))); - let fee_estimator = Arc::new(OnchainFeeEstimator::new( - esplora_client, - Arc::clone(&config), - Arc::clone(&logger), - )); + let fee_estimator = Arc::new(OnchainFeeEstimator::new()); let wallet = Arc::new(Wallet::new( bdk_wallet, @@ -636,6 +631,8 @@ fn build_with_store_internal( let chain_source = Arc::new(ChainSource::new_esplora( server_url.clone(), Arc::clone(&wallet), + Arc::clone(&fee_estimator), + Arc::clone(&config), Arc::clone(&logger), )); @@ -1021,7 +1018,6 @@ fn build_with_store_internal( wallet, chain_source, tx_broadcaster, - fee_estimator, event_queue, channel_manager, chain_monitor, diff --git a/src/chain/mod.rs b/src/chain/mod.rs index 160a31d2f..42b48f0c6 100644 --- a/src/chain/mod.rs +++ b/src/chain/mod.rs @@ -6,10 +6,14 @@ // accordance with one or both of these licenses. use crate::config::{ - BDK_CLIENT_CONCURRENCY, BDK_CLIENT_STOP_GAP, BDK_WALLET_SYNC_TIMEOUT_SECS, - LDK_WALLET_SYNC_TIMEOUT_SECS, + Config, BDK_CLIENT_CONCURRENCY, BDK_CLIENT_STOP_GAP, BDK_WALLET_SYNC_TIMEOUT_SECS, + FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS, LDK_WALLET_SYNC_TIMEOUT_SECS, }; -use crate::logger::{log_error, log_info, FilesystemLogger, Logger}; +use crate::fee_estimator::{ + apply_post_estimation_adjustments, get_all_conf_targets, get_num_block_defaults_for_target, + OnchainFeeEstimator, +}; +use crate::logger::{log_error, log_info, log_trace, FilesystemLogger, Logger}; use crate::types::Wallet; use crate::Error; @@ -21,6 +25,9 @@ use bdk_esplora::EsploraAsyncExt; use esplora_client::AsyncClient as EsploraAsyncClient; +use bitcoin::{FeeRate, Network}; + +use std::collections::HashMap; use std::sync::{Arc, Mutex}; use std::time::Duration; @@ -90,13 +97,16 @@ pub(crate) enum ChainSource { onchain_wallet_sync_status: Mutex, tx_sync: Arc>>, lightning_wallet_sync_status: Mutex, + fee_estimator: Arc, + config: Arc, logger: Arc, }, } impl ChainSource { pub(crate) fn new_esplora( - server_url: String, onchain_wallet: Arc, logger: Arc, + server_url: String, onchain_wallet: Arc, fee_estimator: Arc, + config: Arc, logger: Arc, ) -> Self { let mut client_builder = esplora_client::Builder::new(&server_url.clone()); client_builder = client_builder.timeout(DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS); @@ -111,6 +121,8 @@ impl ChainSource { onchain_wallet_sync_status, tx_sync, lightning_wallet_sync_status, + fee_estimator, + config, logger, } } @@ -218,6 +230,74 @@ impl ChainSource { }, } } + + pub(crate) async fn update_fee_rate_estimates(&self) -> Result<(), Error> { + match self { + Self::Esplora { esplora_client, fee_estimator, config, logger, .. } => { + let estimates = tokio::time::timeout( + Duration::from_secs(FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS), + esplora_client.get_fee_estimates(), + ) + .await + .map_err(|e| { + log_error!(logger, "Updating fee rate estimates timed out: {}", e); + Error::FeerateEstimationUpdateTimeout + })? + .map_err(|e| { + log_error!(logger, "Failed to retrieve fee rate estimates: {}", e); + Error::FeerateEstimationUpdateFailed + })?; + + if estimates.is_empty() && config.network == Network::Bitcoin { + // Ensure we fail if we didn't receive any estimates. + log_error!( + logger, + "Failed to retrieve fee rate estimates: empty fee estimates are dissallowed on Mainnet.", + ); + return Err(Error::FeerateEstimationUpdateFailed); + } + + let confirmation_targets = get_all_conf_targets(); + + let mut new_fee_rate_cache = HashMap::with_capacity(10); + for target in confirmation_targets { + let num_blocks = get_num_block_defaults_for_target(target); + + let converted_estimate_sat_vb = + esplora_client::convert_fee_rate(num_blocks, estimates.clone()).map_err( + |e| { + log_error!( + logger, + "Failed to convert fee rate estimates for {:?}: {}", + target, + e + ); + Error::FeerateEstimationUpdateFailed + }, + )?; + + let fee_rate = + FeeRate::from_sat_per_kwu((converted_estimate_sat_vb * 250.0) as u64); + + // LDK 0.0.118 introduced changes to the `ConfirmationTarget` semantics that + // require some post-estimation adjustments to the fee rates, which we do here. + let adjusted_fee_rate = apply_post_estimation_adjustments(target, fee_rate); + + new_fee_rate_cache.insert(target, adjusted_fee_rate); + + log_trace!( + logger, + "Fee rate estimation updated for {:?}: {} sats/kwu", + target, + adjusted_fee_rate.to_sat_per_kwu(), + ); + } + + fee_estimator.set_fee_rate_cache(new_fee_rate_cache); + Ok(()) + }, + } + } } impl Filter for ChainSource { diff --git a/src/fee_estimator.rs b/src/fee_estimator.rs index d091b5f54..0ecc71586 100644 --- a/src/fee_estimator.rs +++ b/src/fee_estimator.rs @@ -5,23 +5,14 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. -use crate::config::FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS; -use crate::logger::{log_error, log_trace, Logger}; -use crate::{Config, Error}; - use lightning::chain::chaininterface::ConfirmationTarget as LdkConfirmationTarget; use lightning::chain::chaininterface::FeeEstimator as LdkFeeEstimator; use lightning::chain::chaininterface::FEERATE_FLOOR_SATS_PER_KW; use bitcoin::FeeRate; -use esplora_client::AsyncClient as EsploraClient; - -use bitcoin::Network; use std::collections::HashMap; -use std::ops::Deref; -use std::sync::{Arc, RwLock}; -use std::time::Duration; +use std::sync::RwLock; #[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)] pub(crate) enum ConfirmationTarget { @@ -43,87 +34,22 @@ impl From for ConfirmationTarget { } } -pub(crate) struct OnchainFeeEstimator -where - L::Target: Logger, -{ +pub(crate) struct OnchainFeeEstimator { fee_rate_cache: RwLock>, - esplora_client: EsploraClient, - config: Arc, - logger: L, } -impl OnchainFeeEstimator -where - L::Target: Logger, -{ - pub(crate) fn new(esplora_client: EsploraClient, config: Arc, logger: L) -> Self { +impl OnchainFeeEstimator { + pub(crate) fn new() -> Self { let fee_rate_cache = RwLock::new(HashMap::new()); - Self { fee_rate_cache, esplora_client, config, logger } + Self { fee_rate_cache } } - pub(crate) async fn update_fee_estimates(&self) -> Result<(), Error> { - let estimates = tokio::time::timeout( - Duration::from_secs(FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS), - self.esplora_client.get_fee_estimates(), - ) - .await - .map_err(|e| { - log_error!(self.logger, "Updating fee rate estimates timed out: {}", e); - Error::FeerateEstimationUpdateTimeout - })? - .map_err(|e| { - log_error!(self.logger, "Failed to retrieve fee rate estimates: {}", e); - Error::FeerateEstimationUpdateFailed - })?; - - if estimates.is_empty() && self.config.network == Network::Bitcoin { - // Ensure we fail if we didn't receive any estimates. - log_error!( - self.logger, - "Failed to retrieve fee rate estimates: empty fee estimates are dissallowed on Mainnet.", - ); - return Err(Error::FeerateEstimationUpdateFailed); - } - - let confirmation_targets = get_all_conf_targets(); - for target in confirmation_targets { - let num_blocks = get_num_block_defaults_for_target(target); - - let converted_estimate_sat_vb = - esplora_client::convert_fee_rate(num_blocks, estimates.clone()).map_err(|e| { - log_error!( - self.logger, - "Failed to convert fee rate estimates for {:?}: {}", - target, - e - ); - Error::FeerateEstimationUpdateFailed - })?; - - let fee_rate = FeeRate::from_sat_per_kwu((converted_estimate_sat_vb * 250.0) as u64); - - // LDK 0.0.118 introduced changes to the `ConfirmationTarget` semantics that - // require some post-estimation adjustments to the fee rates, which we do here. - let adjusted_fee_rate = apply_post_estimation_adjustments(target, fee_rate); - - let mut locked_fee_rate_cache = self.fee_rate_cache.write().unwrap(); - locked_fee_rate_cache.insert(target, adjusted_fee_rate); - log_trace!( - self.logger, - "Fee rate estimation updated for {:?}: {} sats/kwu", - target, - adjusted_fee_rate.to_sat_per_kwu(), - ); - } - Ok(()) + pub(crate) fn set_fee_rate_cache(&self, fee_rate_cache: HashMap) { + *self.fee_rate_cache.write().unwrap() = fee_rate_cache; } } -impl FeeEstimator for OnchainFeeEstimator -where - L::Target: Logger, -{ +impl FeeEstimator for OnchainFeeEstimator { fn estimate_fee_rate(&self, confirmation_target: ConfirmationTarget) -> FeeRate { let locked_fee_rate_cache = self.fee_rate_cache.read().unwrap(); @@ -140,10 +66,7 @@ where } } -impl LdkFeeEstimator for OnchainFeeEstimator -where - L::Target: Logger, -{ +impl LdkFeeEstimator for OnchainFeeEstimator { fn get_est_sat_per_1000_weight(&self, confirmation_target: LdkConfirmationTarget) -> u32 { self.estimate_fee_rate(confirmation_target.into()).to_sat_per_kwu() as u32 } diff --git a/src/io/utils.rs b/src/io/utils.rs index 366de152c..411928495 100644 --- a/src/io/utils.rs +++ b/src/io/utils.rs @@ -9,10 +9,11 @@ use super::*; use crate::config::WALLET_KEYS_SEED_LEN; use crate::chain::ChainSource; +use crate::fee_estimator::OnchainFeeEstimator; use crate::logger::{log_error, FilesystemLogger}; use crate::peer_store::PeerStore; use crate::sweep::DeprecatedSpendableOutputInfo; -use crate::types::{Broadcaster, DynStore, FeeEstimator, KeysManager, Sweeper}; +use crate::types::{Broadcaster, DynStore, KeysManager, Sweeper}; use crate::wallet::ser::{ChangeSetDeserWrapper, ChangeSetSerWrapper}; use crate::{Error, EventQueue, PaymentDetails}; @@ -220,7 +221,7 @@ where /// Read `OutputSweeper` state from the store. pub(crate) fn read_output_sweeper( - broadcaster: Arc, fee_estimator: Arc, + broadcaster: Arc, fee_estimator: Arc, chain_data_source: Arc, keys_manager: Arc, kv_store: Arc, logger: Arc, ) -> Result { diff --git a/src/lib.rs b/src/lib.rs index e25fee8f2..68561ffb3 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -138,8 +138,8 @@ use payment::{ }; use peer_store::{PeerInfo, PeerStore}; use types::{ - Broadcaster, BumpTransactionEventHandler, ChainMonitor, ChannelManager, DynStore, FeeEstimator, - Graph, KeysManager, OnionMessenger, PeerManager, Router, Scorer, Sweeper, Wallet, + Broadcaster, BumpTransactionEventHandler, ChainMonitor, ChannelManager, DynStore, Graph, + KeysManager, OnionMessenger, PeerManager, Router, Scorer, Sweeper, Wallet, }; pub use types::{ChannelDetails, PeerDetails, UserChannelId}; @@ -180,7 +180,6 @@ pub struct Node { wallet: Arc, chain_source: Arc, tx_broadcaster: Arc, - fee_estimator: Arc, event_queue: Arc>>, channel_manager: Arc, chain_monitor: Arc, @@ -243,7 +242,7 @@ impl Node { ); // Block to ensure we update our fee rate cache once on startup - let fee_estimator = Arc::clone(&self.fee_estimator); + let chain_source = Arc::clone(&self.chain_source); let sync_logger = Arc::clone(&self.logger); let sync_fee_rate_update_timestamp = Arc::clone(&self.latest_fee_rate_cache_update_timestamp); @@ -251,7 +250,7 @@ impl Node { tokio::task::block_in_place(move || { runtime_ref.block_on(async move { let now = Instant::now(); - match fee_estimator.update_fee_estimates().await { + match chain_source.update_fee_rate_estimates().await { Ok(()) => { log_info!( sync_logger, @@ -323,7 +322,7 @@ impl Node { let mut stop_fee_updates = self.stop_sender.subscribe(); let fee_update_logger = Arc::clone(&self.logger); let fee_update_timestamp = Arc::clone(&self.latest_fee_rate_cache_update_timestamp); - let fee_estimator = Arc::clone(&self.fee_estimator); + let chain_source = Arc::clone(&self.chain_source); let fee_rate_cache_update_interval_secs = self.config.fee_rate_cache_update_interval_secs.max(WALLET_SYNC_INTERVAL_MINIMUM_SECS); runtime.spawn(async move { @@ -344,7 +343,7 @@ impl Node { } _ = fee_rate_update_interval.tick() => { let now = Instant::now(); - match fee_estimator.update_fee_estimates().await { + match chain_source.update_fee_rate_estimates().await { Ok(()) => { log_trace!( fee_update_logger, @@ -1368,7 +1367,6 @@ impl Node { let archive_cman = Arc::clone(&self.channel_manager); let sync_cmon = Arc::clone(&self.chain_monitor); let archive_cmon = Arc::clone(&self.chain_monitor); - let fee_estimator = Arc::clone(&self.fee_estimator); let sync_sweeper = Arc::clone(&self.output_sweeper); let sync_logger = Arc::clone(&self.logger); let confirmables = vec![ @@ -1409,8 +1407,8 @@ impl Node { let now = Instant::now(); // We don't add an additional timeout here, as - // `FeeEstimator::update_fee_estimates` already returns after a timeout. - match fee_estimator.update_fee_estimates().await { + // `ChainSource::update_fee_estimates` already returns after a timeout. + match chain_source.update_fee_rate_estimates().await { Ok(()) => { log_info!( sync_logger, diff --git a/src/types.rs b/src/types.rs index 7decafefc..5b28d99b8 100644 --- a/src/types.rs +++ b/src/types.rs @@ -6,6 +6,7 @@ // accordance with one or both of these licenses. use crate::chain::ChainSource; +use crate::fee_estimator::OnchainFeeEstimator; use crate::logger::FilesystemLogger; use crate::message_handler::NodeCustomMessageHandler; @@ -37,7 +38,7 @@ pub(crate) type ChainMonitor = chainmonitor::ChainMonitor< InMemorySigner, Arc, Arc, - Arc, + Arc, Arc, Arc, >; @@ -61,20 +62,21 @@ pub(crate) type ChannelManager = lightning::ln::channelmanager::ChannelManager< Arc, Arc, Arc, - Arc, + Arc, Arc, Arc, >; pub(crate) type Broadcaster = crate::tx_broadcaster::TransactionBroadcaster>; -pub(crate) type FeeEstimator = crate::fee_estimator::OnchainFeeEstimator>; - pub(crate) type Wallet = - crate::wallet::Wallet, Arc, Arc>; + crate::wallet::Wallet, Arc, Arc>; -pub(crate) type KeysManager = - crate::wallet::WalletKeysManager, Arc, Arc>; +pub(crate) type KeysManager = crate::wallet::WalletKeysManager< + Arc, + Arc, + Arc, +>; pub(crate) type Router = DefaultRouter< Arc, @@ -123,7 +125,7 @@ pub(crate) type MessageRouter = lightning::onion_message::messenger::DefaultMess pub(crate) type Sweeper = OutputSweeper< Arc, Arc, - Arc, + Arc, Arc, Arc, Arc, From fd0c26ff438d741aa57e95209859e8189b8ae650 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Wed, 2 Oct 2024 13:58:21 +0200 Subject: [PATCH 071/127] Move broadcast queue processing to `ChainSource` --- src/builder.rs | 68 +++++++++++---------------------- src/chain/mod.rs | 89 ++++++++++++++++++++++++++++++++++++++++--- src/lib.rs | 4 +- src/tx_broadcaster.rs | 83 +++------------------------------------- 4 files changed, 114 insertions(+), 130 deletions(-) diff --git a/src/builder.rs b/src/builder.rs index d7ca18892..820c86306 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -5,7 +5,7 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. -use crate::chain::{ChainSource, DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS, DEFAULT_ESPLORA_SERVER_URL}; +use crate::chain::{ChainSource, DEFAULT_ESPLORA_SERVER_URL}; use crate::config::{default_user_config, Config, WALLET_KEYS_SEED_LEN}; use crate::connection::ConnectionManager; @@ -583,60 +583,36 @@ fn build_with_store_internal( })?, }; - let (wallet, chain_source, tx_broadcaster, fee_estimator) = match chain_data_source_config { - Some(ChainDataSourceConfig::Esplora(server_url)) => { - let mut client_builder = esplora_client::Builder::new(&server_url.clone()); - client_builder = client_builder.timeout(DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS); - let esplora_client = client_builder.build_async().unwrap(); - let tx_broadcaster = - Arc::new(TransactionBroadcaster::new(esplora_client.clone(), Arc::clone(&logger))); - let fee_estimator = Arc::new(OnchainFeeEstimator::new()); - - let wallet = Arc::new(Wallet::new( - bdk_wallet, - wallet_persister, - Arc::clone(&tx_broadcaster), - Arc::clone(&fee_estimator), - Arc::clone(&logger), - )); - - let chain_source = Arc::new(ChainSource::new_esplora( - server_url.clone(), - Arc::clone(&wallet), - Arc::clone(&fee_estimator), - Arc::clone(&config), - Arc::clone(&logger), - )); + let tx_broadcaster = Arc::new(TransactionBroadcaster::new(Arc::clone(&logger))); + let fee_estimator = Arc::new(OnchainFeeEstimator::new()); + let wallet = Arc::new(Wallet::new( + bdk_wallet, + wallet_persister, + Arc::clone(&tx_broadcaster), + Arc::clone(&fee_estimator), + Arc::clone(&logger), + )); - (wallet, chain_source, tx_broadcaster, fee_estimator) - }, + let chain_source = match chain_data_source_config { + Some(ChainDataSourceConfig::Esplora(server_url)) => Arc::new(ChainSource::new_esplora( + server_url.clone(), + Arc::clone(&wallet), + Arc::clone(&fee_estimator), + Arc::clone(&tx_broadcaster), + Arc::clone(&config), + Arc::clone(&logger), + )), None => { // Default to Esplora client. let server_url = DEFAULT_ESPLORA_SERVER_URL.to_string(); - let mut client_builder = esplora_client::Builder::new(&server_url); - client_builder = client_builder.timeout(DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS); - let esplora_client = client_builder.build_async().unwrap(); - let tx_broadcaster = - Arc::new(TransactionBroadcaster::new(esplora_client.clone(), Arc::clone(&logger))); - let fee_estimator = Arc::new(OnchainFeeEstimator::new()); - - let wallet = Arc::new(Wallet::new( - bdk_wallet, - wallet_persister, - Arc::clone(&tx_broadcaster), - Arc::clone(&fee_estimator), - Arc::clone(&logger), - )); - - let chain_source = Arc::new(ChainSource::new_esplora( + Arc::new(ChainSource::new_esplora( server_url.clone(), Arc::clone(&wallet), Arc::clone(&fee_estimator), + Arc::clone(&tx_broadcaster), Arc::clone(&config), Arc::clone(&logger), - )); - - (wallet, chain_source, tx_broadcaster, fee_estimator) + )) }, }; diff --git a/src/chain/mod.rs b/src/chain/mod.rs index 42b48f0c6..12f6d0259 100644 --- a/src/chain/mod.rs +++ b/src/chain/mod.rs @@ -7,17 +7,18 @@ use crate::config::{ Config, BDK_CLIENT_CONCURRENCY, BDK_CLIENT_STOP_GAP, BDK_WALLET_SYNC_TIMEOUT_SECS, - FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS, LDK_WALLET_SYNC_TIMEOUT_SECS, + FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS, LDK_WALLET_SYNC_TIMEOUT_SECS, TX_BROADCAST_TIMEOUT_SECS, }; use crate::fee_estimator::{ apply_post_estimation_adjustments, get_all_conf_targets, get_num_block_defaults_for_target, OnchainFeeEstimator, }; -use crate::logger::{log_error, log_info, log_trace, FilesystemLogger, Logger}; -use crate::types::Wallet; +use crate::logger::{log_bytes, log_error, log_info, log_trace, FilesystemLogger, Logger}; +use crate::types::{Broadcaster, Wallet}; use crate::Error; use lightning::chain::{Confirm, Filter}; +use lightning::util::ser::Writeable; use lightning_transaction_sync::EsploraSyncClient; @@ -98,6 +99,7 @@ pub(crate) enum ChainSource { tx_sync: Arc>>, lightning_wallet_sync_status: Mutex, fee_estimator: Arc, + tx_broadcaster: Arc, config: Arc, logger: Arc, }, @@ -106,9 +108,9 @@ pub(crate) enum ChainSource { impl ChainSource { pub(crate) fn new_esplora( server_url: String, onchain_wallet: Arc, fee_estimator: Arc, - config: Arc, logger: Arc, + tx_broadcaster: Arc, config: Arc, logger: Arc, ) -> Self { - let mut client_builder = esplora_client::Builder::new(&server_url.clone()); + let mut client_builder = esplora_client::Builder::new(&server_url); client_builder = client_builder.timeout(DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS); let esplora_client = client_builder.build_async().unwrap(); let tx_sync = @@ -122,6 +124,7 @@ impl ChainSource { tx_sync, lightning_wallet_sync_status, fee_estimator, + tx_broadcaster, config, logger, } @@ -298,6 +301,82 @@ impl ChainSource { }, } } + + pub(crate) async fn process_broadcast_queue(&self) { + match self { + Self::Esplora { esplora_client, tx_broadcaster, logger, .. } => { + let mut receiver = tx_broadcaster.get_broadcast_queue().await; + while let Some(next_package) = receiver.recv().await { + for tx in &next_package { + let txid = tx.compute_txid(); + let timeout_fut = tokio::time::timeout( + Duration::from_secs(TX_BROADCAST_TIMEOUT_SECS), + esplora_client.broadcast(tx), + ); + match timeout_fut.await { + Ok(res) => match res { + Ok(()) => { + log_trace!( + logger, + "Successfully broadcast transaction {}", + txid + ); + }, + Err(e) => match e { + esplora_client::Error::Reqwest(err) => { + if err.status() == reqwest::StatusCode::from_u16(400).ok() { + // Ignore 400, as this just means bitcoind already knows the + // transaction. + // FIXME: We can further differentiate here based on the error + // message which will be available with rust-esplora-client 0.7 and + // later. + } else { + log_error!( + logger, + "Failed to broadcast due to HTTP connection error: {}", + err + ); + } + log_trace!( + logger, + "Failed broadcast transaction bytes: {}", + log_bytes!(tx.encode()) + ); + }, + _ => { + log_error!( + logger, + "Failed to broadcast transaction {}: {}", + txid, + e + ); + log_trace!( + logger, + "Failed broadcast transaction bytes: {}", + log_bytes!(tx.encode()) + ); + }, + }, + }, + Err(e) => { + log_error!( + logger, + "Failed to broadcast transaction due to timeout {}: {}", + txid, + e + ); + log_trace!( + logger, + "Failed broadcast transaction bytes: {}", + log_bytes!(tx.encode()) + ); + }, + } + } + } + }, + } + } } impl Filter for ChainSource { diff --git a/src/lib.rs b/src/lib.rs index 68561ffb3..6b7d5e86c 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -669,7 +669,7 @@ impl Node { } let mut stop_tx_bcast = self.stop_sender.subscribe(); - let tx_bcaster = Arc::clone(&self.tx_broadcaster); + let chain_source = Arc::clone(&self.chain_source); let tx_bcast_logger = Arc::clone(&self.logger); runtime.spawn(async move { // Every second we try to clear our broadcasting queue. @@ -685,7 +685,7 @@ impl Node { return; } _ = interval.tick() => { - tx_bcaster.process_queue().await; + chain_source.process_broadcast_queue().await; } } } diff --git a/src/tx_broadcaster.rs b/src/tx_broadcaster.rs index 37bd616dc..5aded03c6 100644 --- a/src/tx_broadcaster.rs +++ b/src/tx_broadcaster.rs @@ -5,22 +5,16 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. -use crate::config::TX_BROADCAST_TIMEOUT_SECS; -use crate::logger::{log_bytes, log_error, log_trace, Logger}; +use crate::logger::{log_error, Logger}; use lightning::chain::chaininterface::BroadcasterInterface; -use lightning::util::ser::Writeable; - -use esplora_client::AsyncClient as EsploraClient; use bitcoin::Transaction; -use reqwest::StatusCode; use tokio::sync::mpsc; -use tokio::sync::Mutex; +use tokio::sync::{Mutex, MutexGuard}; use std::ops::Deref; -use std::time::Duration; const BCAST_PACKAGE_QUEUE_SIZE: usize = 50; @@ -30,7 +24,6 @@ where { queue_sender: mpsc::Sender>, queue_receiver: Mutex>>, - esplora_client: EsploraClient, logger: L, } @@ -38,77 +31,13 @@ impl TransactionBroadcaster where L::Target: Logger, { - pub(crate) fn new(esplora_client: EsploraClient, logger: L) -> Self { + pub(crate) fn new(logger: L) -> Self { let (queue_sender, queue_receiver) = mpsc::channel(BCAST_PACKAGE_QUEUE_SIZE); - Self { queue_sender, queue_receiver: Mutex::new(queue_receiver), esplora_client, logger } + Self { queue_sender, queue_receiver: Mutex::new(queue_receiver), logger } } - pub(crate) async fn process_queue(&self) { - let mut receiver = self.queue_receiver.lock().await; - while let Some(next_package) = receiver.recv().await { - for tx in &next_package { - let txid = tx.compute_txid(); - let timeout_fut = tokio::time::timeout( - Duration::from_secs(TX_BROADCAST_TIMEOUT_SECS), - self.esplora_client.broadcast(tx), - ); - match timeout_fut.await { - Ok(res) => match res { - Ok(()) => { - log_trace!(self.logger, "Successfully broadcast transaction {}", txid); - }, - Err(e) => match e { - esplora_client::Error::Reqwest(err) => { - if err.status() == StatusCode::from_u16(400).ok() { - // Ignore 400, as this just means bitcoind already knows the - // transaction. - // FIXME: We can further differentiate here based on the error - // message which will be available with rust-esplora-client 0.7 and - // later. - } else { - log_error!( - self.logger, - "Failed to broadcast due to HTTP connection error: {}", - err - ); - } - log_trace!( - self.logger, - "Failed broadcast transaction bytes: {}", - log_bytes!(tx.encode()) - ); - }, - _ => { - log_error!( - self.logger, - "Failed to broadcast transaction {}: {}", - txid, - e - ); - log_trace!( - self.logger, - "Failed broadcast transaction bytes: {}", - log_bytes!(tx.encode()) - ); - }, - }, - }, - Err(e) => { - log_error!( - self.logger, - "Failed to broadcast transaction due to timeout {}: {}", - txid, - e - ); - log_trace!( - self.logger, - "Failed broadcast transaction bytes: {}", - log_bytes!(tx.encode()) - ); - }, - } - } - } + pub(crate) async fn get_broadcast_queue(&self) -> MutexGuard>> { + self.queue_receiver.lock().await } } From d66edd48d3018abc48811ac800cda90db5f3aa6c Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 3 Oct 2024 12:07:15 +0200 Subject: [PATCH 072/127] Move syncing tasks to `ChainSource` .. which also gives us the opportunity to simplify and DRY up the logic between background and manual syncing. --- src/builder.rs | 26 +++-- src/chain/mod.rs | 251 +++++++++++++++++++++++++++++++++++++------- src/lib.rs | 266 ++--------------------------------------------- 3 files changed, 239 insertions(+), 304 deletions(-) diff --git a/src/builder.rs b/src/builder.rs index 820c86306..4905d97d3 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -552,6 +552,15 @@ fn build_with_store_internal( liquidity_source_config: Option<&LiquiditySourceConfig>, seed_bytes: [u8; 64], logger: Arc, kv_store: Arc, ) -> Result { + // Initialize the status fields. + let is_listening = Arc::new(AtomicBool::new(false)); + let latest_wallet_sync_timestamp = Arc::new(RwLock::new(None)); + let latest_onchain_wallet_sync_timestamp = Arc::new(RwLock::new(None)); + let latest_fee_rate_cache_update_timestamp = Arc::new(RwLock::new(None)); + let latest_rgs_snapshot_timestamp = Arc::new(RwLock::new(None)); + let latest_node_announcement_broadcast_timestamp = Arc::new(RwLock::new(None)); + let latest_channel_monitor_archival_height = Arc::new(RwLock::new(None)); + // Initialize the on-chain wallet and chain access let xprv = bitcoin::bip32::Xpriv::new_master(config.network, &seed_bytes).map_err(|e| { log_error!(logger, "Failed to derive master secret: {}", e); @@ -601,6 +610,10 @@ fn build_with_store_internal( Arc::clone(&tx_broadcaster), Arc::clone(&config), Arc::clone(&logger), + Arc::clone(&latest_wallet_sync_timestamp), + Arc::clone(&latest_onchain_wallet_sync_timestamp), + Arc::clone(&latest_fee_rate_cache_update_timestamp), + latest_channel_monitor_archival_height, )), None => { // Default to Esplora client. @@ -612,6 +625,10 @@ fn build_with_store_internal( Arc::clone(&tx_broadcaster), Arc::clone(&config), Arc::clone(&logger), + Arc::clone(&latest_wallet_sync_timestamp), + Arc::clone(&latest_onchain_wallet_sync_timestamp), + Arc::clone(&latest_fee_rate_cache_update_timestamp), + latest_channel_monitor_archival_height, )) }, }; @@ -978,14 +995,6 @@ fn build_with_store_internal( let (stop_sender, _) = tokio::sync::watch::channel(()); let (event_handling_stopped_sender, _) = tokio::sync::watch::channel(()); - let is_listening = Arc::new(AtomicBool::new(false)); - let latest_wallet_sync_timestamp = Arc::new(RwLock::new(None)); - let latest_onchain_wallet_sync_timestamp = Arc::new(RwLock::new(None)); - let latest_fee_rate_cache_update_timestamp = Arc::new(RwLock::new(None)); - let latest_rgs_snapshot_timestamp = Arc::new(RwLock::new(None)); - let latest_node_announcement_broadcast_timestamp = Arc::new(RwLock::new(None)); - let latest_channel_monitor_archival_height = Arc::new(RwLock::new(None)); - Ok(Node { runtime, stop_sender, @@ -1017,7 +1026,6 @@ fn build_with_store_internal( latest_fee_rate_cache_update_timestamp, latest_rgs_snapshot_timestamp, latest_node_announcement_broadcast_timestamp, - latest_channel_monitor_archival_height, }) } diff --git a/src/chain/mod.rs b/src/chain/mod.rs index 12f6d0259..d4ce30994 100644 --- a/src/chain/mod.rs +++ b/src/chain/mod.rs @@ -7,14 +7,16 @@ use crate::config::{ Config, BDK_CLIENT_CONCURRENCY, BDK_CLIENT_STOP_GAP, BDK_WALLET_SYNC_TIMEOUT_SECS, - FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS, LDK_WALLET_SYNC_TIMEOUT_SECS, TX_BROADCAST_TIMEOUT_SECS, + FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS, LDK_WALLET_SYNC_TIMEOUT_SECS, + RESOLVED_CHANNEL_MONITOR_ARCHIVAL_INTERVAL, TX_BROADCAST_TIMEOUT_SECS, + WALLET_SYNC_INTERVAL_MINIMUM_SECS, }; use crate::fee_estimator::{ apply_post_estimation_adjustments, get_all_conf_targets, get_num_block_defaults_for_target, OnchainFeeEstimator, }; use crate::logger::{log_bytes, log_error, log_info, log_trace, FilesystemLogger, Logger}; -use crate::types::{Broadcaster, Wallet}; +use crate::types::{Broadcaster, ChainMonitor, ChannelManager, Sweeper, Wallet}; use crate::Error; use lightning::chain::{Confirm, Filter}; @@ -29,8 +31,8 @@ use esplora_client::AsyncClient as EsploraAsyncClient; use bitcoin::{FeeRate, Network}; use std::collections::HashMap; -use std::sync::{Arc, Mutex}; -use std::time::Duration; +use std::sync::{Arc, Mutex, RwLock}; +use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; // The default Esplora server we're using. pub(crate) const DEFAULT_ESPLORA_SERVER_URL: &str = "https://blockstream.info/api"; @@ -102,6 +104,10 @@ pub(crate) enum ChainSource { tx_broadcaster: Arc, config: Arc, logger: Arc, + latest_wallet_sync_timestamp: Arc>>, + latest_onchain_wallet_sync_timestamp: Arc>>, + latest_fee_rate_cache_update_timestamp: Arc>>, + latest_channel_monitor_archival_height: Arc>>, }, } @@ -109,6 +115,10 @@ impl ChainSource { pub(crate) fn new_esplora( server_url: String, onchain_wallet: Arc, fee_estimator: Arc, tx_broadcaster: Arc, config: Arc, logger: Arc, + latest_wallet_sync_timestamp: Arc>>, + latest_onchain_wallet_sync_timestamp: Arc>>, + latest_fee_rate_cache_update_timestamp: Arc>>, + latest_channel_monitor_archival_height: Arc>>, ) -> Self { let mut client_builder = esplora_client::Builder::new(&server_url); client_builder = client_builder.timeout(DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS); @@ -127,6 +137,71 @@ impl ChainSource { tx_broadcaster, config, logger, + latest_wallet_sync_timestamp, + latest_onchain_wallet_sync_timestamp, + latest_fee_rate_cache_update_timestamp, + latest_channel_monitor_archival_height, + } + } + + pub(crate) async fn continuously_sync_wallets( + &self, mut stop_sync_receiver: tokio::sync::watch::Receiver<()>, + channel_manager: Arc, chain_monitor: Arc, + output_sweeper: Arc, + ) { + match self { + Self::Esplora { config, logger, .. } => { + // Setup syncing intervals + let onchain_wallet_sync_interval_secs = + config.onchain_wallet_sync_interval_secs.max(WALLET_SYNC_INTERVAL_MINIMUM_SECS); + let mut onchain_wallet_sync_interval = + tokio::time::interval(Duration::from_secs(onchain_wallet_sync_interval_secs)); + onchain_wallet_sync_interval + .set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); + + let fee_rate_cache_update_interval_secs = config + .fee_rate_cache_update_interval_secs + .max(WALLET_SYNC_INTERVAL_MINIMUM_SECS); + let mut fee_rate_update_interval = + tokio::time::interval(Duration::from_secs(fee_rate_cache_update_interval_secs)); + // When starting up, we just blocked on updating, so skip the first tick. + fee_rate_update_interval.reset(); + fee_rate_update_interval + .set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); + + let wallet_sync_interval_secs = + config.wallet_sync_interval_secs.max(WALLET_SYNC_INTERVAL_MINIMUM_SECS); + let mut wallet_sync_interval = + tokio::time::interval(Duration::from_secs(wallet_sync_interval_secs)); + wallet_sync_interval + .set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); + + // Start the syncing loop. + loop { + tokio::select! { + _ = stop_sync_receiver.changed() => { + log_trace!( + logger, + "Stopping background syncing on-chain wallet.", + ); + return; + } + _ = onchain_wallet_sync_interval.tick() => { + let _ = self.sync_onchain_wallet().await; + } + _ = fee_rate_update_interval.tick() => { + let _ = self.update_fee_rate_estimates().await; + } + _ = wallet_sync_interval.tick() => { + let _ = self.sync_lightning_wallet( + Arc::clone(&channel_manager), + Arc::clone(&chain_monitor), + Arc::clone(&output_sweeper), + ).await; + } + } + } + }, } } @@ -137,6 +212,7 @@ impl ChainSource { onchain_wallet, onchain_wallet_sync_status, logger, + latest_onchain_wallet_sync_timestamp, .. } => { let receiver_res = { @@ -152,42 +228,60 @@ impl ChainSource { })?; } - let res = { - let full_scan_request = onchain_wallet.get_full_scan_request(); - - let wallet_sync_timeout_fut = tokio::time::timeout( - Duration::from_secs(BDK_WALLET_SYNC_TIMEOUT_SECS), - esplora_client.full_scan( - full_scan_request, - BDK_CLIENT_STOP_GAP, - BDK_CLIENT_CONCURRENCY, - ), - ); + let res = + { + let full_scan_request = onchain_wallet.get_full_scan_request(); + + let wallet_sync_timeout_fut = tokio::time::timeout( + Duration::from_secs(BDK_WALLET_SYNC_TIMEOUT_SECS), + esplora_client.full_scan( + full_scan_request, + BDK_CLIENT_STOP_GAP, + BDK_CLIENT_CONCURRENCY, + ), + ); - match wallet_sync_timeout_fut.await { - Ok(res) => match res { - Ok(update) => onchain_wallet.apply_update(update), - Err(e) => match *e { - esplora_client::Error::Reqwest(he) => { - log_error!( - logger, - "Sync failed due to HTTP connection error: {}", - he - ); - Err(Error::WalletOperationFailed) + let now = Instant::now(); + match wallet_sync_timeout_fut.await { + Ok(res) => match res { + Ok(update) => match onchain_wallet.apply_update(update) { + Ok(()) => { + log_info!( + logger, + "Sync of on-chain wallet finished in {}ms.", + now.elapsed().as_millis() + ); + let unix_time_secs_opt = SystemTime::now() + .duration_since(UNIX_EPOCH) + .ok() + .map(|d| d.as_secs()); + *latest_onchain_wallet_sync_timestamp.write().unwrap() = + unix_time_secs_opt; + Ok(()) + }, + Err(e) => Err(e), }, - _ => { - log_error!(logger, "Sync failed due to Esplora error: {}", e); - Err(Error::WalletOperationFailed) + Err(e) => match *e { + esplora_client::Error::Reqwest(he) => { + log_error!( + logger, + "Sync failed due to HTTP connection error: {}", + he + ); + Err(Error::WalletOperationFailed) + }, + _ => { + log_error!(logger, "Sync of on-chain wallet failed due to Esplora error: {}", e); + Err(Error::WalletOperationFailed) + }, }, }, - }, - Err(e) => { - log_error!(logger, "On-chain wallet sync timed out: {}", e); - Err(Error::WalletOperationTimeout) - }, - } - }; + Err(e) => { + log_error!(logger, "On-chain wallet sync timed out: {}", e); + Err(Error::WalletOperationTimeout) + }, + } + }; onchain_wallet_sync_status.lock().unwrap().propagate_result_to_subscribers(res); @@ -197,10 +291,27 @@ impl ChainSource { } pub(crate) async fn sync_lightning_wallet( - &self, confirmables: Vec<&(dyn Confirm + Send + Sync)>, + &self, channel_manager: Arc, chain_monitor: Arc, + output_sweeper: Arc, ) -> Result<(), Error> { match self { - Self::Esplora { tx_sync, lightning_wallet_sync_status, logger, .. } => { + Self::Esplora { + tx_sync, + lightning_wallet_sync_status, + logger, + latest_wallet_sync_timestamp, + latest_channel_monitor_archival_height, + .. + } => { + let sync_cman = Arc::clone(&channel_manager); + let sync_cmon = Arc::clone(&chain_monitor); + let sync_sweeper = Arc::clone(&output_sweeper); + let confirmables = vec![ + &*sync_cman as &(dyn Confirm + Sync + Send), + &*sync_cmon as &(dyn Confirm + Sync + Send), + &*sync_sweeper as &(dyn Confirm + Sync + Send), + ]; + let receiver_res = { let mut status_lock = lightning_wallet_sync_status.lock().unwrap(); status_lock.register_or_subscribe_pending_sync() @@ -218,8 +329,34 @@ impl ChainSource { Duration::from_secs(LDK_WALLET_SYNC_TIMEOUT_SECS), tx_sync.sync(confirmables), ); + let now = Instant::now(); match timeout_fut.await { - Ok(res) => res.map_err(|_| Error::TxSyncFailed), + Ok(res) => match res { + Ok(()) => { + log_info!( + logger, + "Sync of Lightning wallet finished in {}ms.", + now.elapsed().as_millis() + ); + + let unix_time_secs_opt = SystemTime::now() + .duration_since(UNIX_EPOCH) + .ok() + .map(|d| d.as_secs()); + *latest_wallet_sync_timestamp.write().unwrap() = unix_time_secs_opt; + + periodically_archive_fully_resolved_monitors( + Arc::clone(&channel_manager), + Arc::clone(&chain_monitor), + Arc::clone(&latest_channel_monitor_archival_height), + ); + Ok(()) + }, + Err(e) => { + log_error!(logger, "Sync of Lightning wallet failed: {}", e); + Err(e.into()) + }, + }, Err(e) => { log_error!(logger, "Lightning wallet sync timed out: {}", e); Err(Error::TxSyncTimeout) @@ -236,7 +373,15 @@ impl ChainSource { pub(crate) async fn update_fee_rate_estimates(&self) -> Result<(), Error> { match self { - Self::Esplora { esplora_client, fee_estimator, config, logger, .. } => { + Self::Esplora { + esplora_client, + fee_estimator, + config, + logger, + latest_fee_rate_cache_update_timestamp, + .. + } => { + let now = Instant::now(); let estimates = tokio::time::timeout( Duration::from_secs(FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS), esplora_client.get_fee_estimates(), @@ -297,6 +442,16 @@ impl ChainSource { } fee_estimator.set_fee_rate_cache(new_fee_rate_cache); + + log_info!( + logger, + "Fee rate cache update finished in {}ms.", + now.elapsed().as_millis() + ); + let unix_time_secs_opt = + SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); + *latest_fee_rate_cache_update_timestamp.write().unwrap() = unix_time_secs_opt; + Ok(()) }, } @@ -391,3 +546,19 @@ impl Filter for ChainSource { } } } + +fn periodically_archive_fully_resolved_monitors( + channel_manager: Arc, chain_monitor: Arc, + latest_channel_monitor_archival_height: Arc>>, +) { + let mut latest_archival_height_lock = latest_channel_monitor_archival_height.write().unwrap(); + let cur_height = channel_manager.current_best_block().height; + let should_archive = latest_archival_height_lock + .as_ref() + .map_or(true, |h| cur_height >= h + RESOLVED_CHANNEL_MONITOR_ARCHIVAL_INTERVAL); + + if should_archive { + chain_monitor.archive_fully_resolved_channel_monitors(); + *latest_archival_height_lock = Some(cur_height); + } +} diff --git a/src/lib.rs b/src/lib.rs index 6b7d5e86c..4b491ea27 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -122,9 +122,7 @@ pub use builder::NodeBuilder as Builder; use chain::ChainSource; use config::{ - default_user_config, may_announce_channel, NODE_ANN_BCAST_INTERVAL, PEER_RECONNECTION_INTERVAL, - RESOLVED_CHANNEL_MONITOR_ARCHIVAL_INTERVAL, RGS_SYNC_INTERVAL, - WALLET_SYNC_INTERVAL_MINIMUM_SECS, + default_user_config, may_announce_channel, NODE_ANN_BCAST_INTERVAL, PEER_RECONNECTION_INTERVAL, RGS_SYNC_INTERVAL, }; use connection::ConnectionManager; use event::{EventHandler, EventQueue}; @@ -145,7 +143,7 @@ pub use types::{ChannelDetails, PeerDetails, UserChannelId}; use logger::{log_error, log_info, log_trace, FilesystemLogger, Logger}; -use lightning::chain::{BestBlock, Confirm}; +use lightning::chain::BestBlock; use lightning::events::bump_transaction::Wallet as LdkWallet; use lightning::ln::channel_state::ChannelShutdownState; use lightning::ln::channelmanager::PaymentId; @@ -203,7 +201,6 @@ pub struct Node { latest_fee_rate_cache_update_timestamp: Arc>>, latest_rgs_snapshot_timestamp: Arc>>, latest_node_announcement_broadcast_timestamp: Arc>>, - latest_channel_monitor_archival_height: Arc>>, } impl Node { @@ -270,160 +267,16 @@ impl Node { }) })?; - // Setup wallet sync - let chain_source = Arc::clone(&self.chain_source); - let sync_logger = Arc::clone(&self.logger); - let sync_onchain_wallet_timestamp = Arc::clone(&self.latest_onchain_wallet_sync_timestamp); - let mut stop_sync = self.stop_sender.subscribe(); - let onchain_wallet_sync_interval_secs = self - .config - .onchain_wallet_sync_interval_secs - .max(config::WALLET_SYNC_INTERVAL_MINIMUM_SECS); - runtime.spawn(async move { - let mut onchain_wallet_sync_interval = - tokio::time::interval(Duration::from_secs(onchain_wallet_sync_interval_secs)); - onchain_wallet_sync_interval - .set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); - loop { - tokio::select! { - _ = stop_sync.changed() => { - log_trace!( - sync_logger, - "Stopping background syncing on-chain wallet.", - ); - return; - } - _ = onchain_wallet_sync_interval.tick() => { - let now = Instant::now(); - match chain_source.sync_onchain_wallet().await { - Ok(()) => { - log_trace!( - sync_logger, - "Background sync of on-chain wallet finished in {}ms.", - now.elapsed().as_millis() - ); - let unix_time_secs_opt = - SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - *sync_onchain_wallet_timestamp.write().unwrap() = unix_time_secs_opt; - } - Err(err) => { - log_error!( - sync_logger, - "Background sync of on-chain wallet failed: {}", - err - ) - } - } - } - } - } - }); - - let mut stop_fee_updates = self.stop_sender.subscribe(); - let fee_update_logger = Arc::clone(&self.logger); - let fee_update_timestamp = Arc::clone(&self.latest_fee_rate_cache_update_timestamp); - let chain_source = Arc::clone(&self.chain_source); - let fee_rate_cache_update_interval_secs = - self.config.fee_rate_cache_update_interval_secs.max(WALLET_SYNC_INTERVAL_MINIMUM_SECS); - runtime.spawn(async move { - let mut fee_rate_update_interval = - tokio::time::interval(Duration::from_secs(fee_rate_cache_update_interval_secs)); - // We just blocked on updating, so skip the first tick. - fee_rate_update_interval.reset(); - fee_rate_update_interval - .set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); - loop { - tokio::select! { - _ = stop_fee_updates.changed() => { - log_trace!( - fee_update_logger, - "Stopping background updates of fee rate cache.", - ); - return; - } - _ = fee_rate_update_interval.tick() => { - let now = Instant::now(); - match chain_source.update_fee_rate_estimates().await { - Ok(()) => { - log_trace!( - fee_update_logger, - "Background update of fee rate cache finished in {}ms.", - now.elapsed().as_millis() - ); - let unix_time_secs_opt = - SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - *fee_update_timestamp.write().unwrap() = unix_time_secs_opt; - } - Err(err) => { - log_error!( - fee_update_logger, - "Background update of fee rate cache failed: {}", - err - ) - } - } - } - } - } - }); - + // Spawn background task continuously syncing onchain, lightning, and fee rate cache. + let stop_sync_receiver = self.stop_sender.subscribe(); let chain_source = Arc::clone(&self.chain_source); let sync_cman = Arc::clone(&self.channel_manager); - let archive_cman = Arc::clone(&self.channel_manager); let sync_cmon = Arc::clone(&self.chain_monitor); - let archive_cmon = Arc::clone(&self.chain_monitor); let sync_sweeper = Arc::clone(&self.output_sweeper); - let sync_logger = Arc::clone(&self.logger); - let sync_wallet_timestamp = Arc::clone(&self.latest_wallet_sync_timestamp); - let sync_monitor_archival_height = Arc::clone(&self.latest_channel_monitor_archival_height); - let mut stop_sync = self.stop_sender.subscribe(); - let wallet_sync_interval_secs = - self.config.wallet_sync_interval_secs.max(WALLET_SYNC_INTERVAL_MINIMUM_SECS); runtime.spawn(async move { - let mut wallet_sync_interval = - tokio::time::interval(Duration::from_secs(wallet_sync_interval_secs)); - wallet_sync_interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); - loop { - tokio::select! { - _ = stop_sync.changed() => { - log_trace!( - sync_logger, - "Stopping background syncing Lightning wallet.", - ); - return; - } - _ = wallet_sync_interval.tick() => { - let confirmables = vec![ - &*sync_cman as &(dyn Confirm + Sync + Send), - &*sync_cmon as &(dyn Confirm + Sync + Send), - &*sync_sweeper as &(dyn Confirm + Sync + Send), - ]; - let now = Instant::now(); - - match chain_source.sync_lightning_wallet(confirmables).await { - Ok(()) => { - log_trace!( - sync_logger, - "Background sync of Lightning wallet finished in {}ms.", - now.elapsed().as_millis() - ); - let unix_time_secs_opt = - SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - *sync_wallet_timestamp.write().unwrap() = unix_time_secs_opt; - - periodically_archive_fully_resolved_monitors( - Arc::clone(&archive_cman), - Arc::clone(&archive_cmon), - Arc::clone(&sync_monitor_archival_height) - ); - } - Err(e) => { - log_error!(sync_logger, "Background sync of Lightning wallet failed: {}", e) - } - } - } - } - } + chain_source + .continuously_sync_wallets(stop_sync_receiver, sync_cman, sync_cmon, sync_sweeper) + .await; }); if self.gossip_source.is_rgs() { @@ -1364,96 +1217,15 @@ impl Node { let chain_source = Arc::clone(&self.chain_source); let sync_cman = Arc::clone(&self.channel_manager); - let archive_cman = Arc::clone(&self.channel_manager); let sync_cmon = Arc::clone(&self.chain_monitor); - let archive_cmon = Arc::clone(&self.chain_monitor); let sync_sweeper = Arc::clone(&self.output_sweeper); - let sync_logger = Arc::clone(&self.logger); - let confirmables = vec![ - &*sync_cman as &(dyn Confirm + Sync + Send), - &*sync_cmon as &(dyn Confirm + Sync + Send), - &*sync_sweeper as &(dyn Confirm + Sync + Send), - ]; - let sync_wallet_timestamp = Arc::clone(&self.latest_wallet_sync_timestamp); - let sync_fee_rate_update_timestamp = - Arc::clone(&self.latest_fee_rate_cache_update_timestamp); - let sync_onchain_wallet_timestamp = Arc::clone(&self.latest_onchain_wallet_sync_timestamp); - let sync_monitor_archival_height = Arc::clone(&self.latest_channel_monitor_archival_height); - tokio::task::block_in_place(move || { tokio::runtime::Builder::new_multi_thread().enable_all().build().unwrap().block_on( async move { - let now = Instant::now(); - // We don't add an additional timeout here, as `Wallet::sync` already returns - // after a timeout. - match chain_source.sync_onchain_wallet().await { - Ok(()) => { - log_info!( - sync_logger, - "Sync of on-chain wallet finished in {}ms.", - now.elapsed().as_millis() - ); - let unix_time_secs_opt = SystemTime::now() - .duration_since(UNIX_EPOCH) - .ok() - .map(|d| d.as_secs()); - *sync_onchain_wallet_timestamp.write().unwrap() = unix_time_secs_opt; - }, - Err(e) => { - log_error!(sync_logger, "Sync of on-chain wallet failed: {}", e); - return Err(e); - }, - }; - - let now = Instant::now(); - // We don't add an additional timeout here, as - // `ChainSource::update_fee_estimates` already returns after a timeout. - match chain_source.update_fee_rate_estimates().await { - Ok(()) => { - log_info!( - sync_logger, - "Fee rate cache update finished in {}ms.", - now.elapsed().as_millis() - ); - let unix_time_secs_opt = SystemTime::now() - .duration_since(UNIX_EPOCH) - .ok() - .map(|d| d.as_secs()); - *sync_fee_rate_update_timestamp.write().unwrap() = unix_time_secs_opt; - }, - Err(e) => { - log_error!(sync_logger, "Fee rate cache update failed: {}", e,); - return Err(e); - }, - } - - let now = Instant::now(); - match chain_source.sync_lightning_wallet(confirmables).await { - Ok(()) => { - log_info!( - sync_logger, - "Sync of Lightning wallet finished in {}ms.", - now.elapsed().as_millis() - ); - - let unix_time_secs_opt = SystemTime::now() - .duration_since(UNIX_EPOCH) - .ok() - .map(|d| d.as_secs()); - *sync_wallet_timestamp.write().unwrap() = unix_time_secs_opt; - - periodically_archive_fully_resolved_monitors( - archive_cman, - archive_cmon, - sync_monitor_archival_height, - ); - Ok(()) - }, - Err(e) => { - log_error!(sync_logger, "Sync of Lightning wallet failed: {}", e); - Err(e.into()) - }, - } + chain_source.update_fee_rate_estimates().await?; + chain_source.sync_lightning_wallet(sync_cman, sync_cmon, sync_sweeper).await?; + chain_source.sync_onchain_wallet().await?; + Ok(()) }, ) }) @@ -1790,19 +1562,3 @@ pub(crate) fn total_anchor_channels_reserve_sats( * anchor_channels_config.per_channel_reserve_sats }) } - -fn periodically_archive_fully_resolved_monitors( - channel_manager: Arc, chain_monitor: Arc, - latest_channel_monitor_archival_height: Arc>>, -) { - let mut latest_archival_height_lock = latest_channel_monitor_archival_height.write().unwrap(); - let cur_height = channel_manager.current_best_block().height; - let should_archive = latest_archival_height_lock - .as_ref() - .map_or(true, |h| cur_height >= h + RESOLVED_CHANNEL_MONITOR_ARCHIVAL_INTERVAL); - - if should_archive { - chain_monitor.archive_fully_resolved_channel_monitors(); - *latest_archival_height_lock = Some(cur_height); - } -} From 835baf4e63bc75dfdd52efc0d7a95f0557766763 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 3 Oct 2024 13:10:30 +0200 Subject: [PATCH 073/127] Implement incremental Esplora syncing for the on-chain wallet --- src/chain/mod.rs | 119 ++++++++++++++++++++++++++++------------------ src/lib.rs | 24 +--------- src/wallet/mod.rs | 6 ++- 3 files changed, 80 insertions(+), 69 deletions(-) diff --git a/src/chain/mod.rs b/src/chain/mod.rs index d4ce30994..6751c0837 100644 --- a/src/chain/mod.rs +++ b/src/chain/mod.rs @@ -228,10 +228,71 @@ impl ChainSource { })?; } - let res = - { - let full_scan_request = onchain_wallet.get_full_scan_request(); + let res = { + // If this is our first sync, do a full scan with the configured gap limit. + // Otherwise just do an incremental sync. + let incremental_sync = + latest_onchain_wallet_sync_timestamp.read().unwrap().is_some(); + + macro_rules! get_and_apply_wallet_update { + ($sync_future: expr) => {{ + let now = Instant::now(); + match $sync_future.await { + Ok(res) => match res { + Ok(update) => match onchain_wallet.apply_update(update) { + Ok(()) => { + log_info!( + logger, + "{} of on-chain wallet finished in {}ms.", + if incremental_sync { "Incremental sync" } else { "Sync" }, + now.elapsed().as_millis() + ); + let unix_time_secs_opt = SystemTime::now() + .duration_since(UNIX_EPOCH) + .ok() + .map(|d| d.as_secs()); + *latest_onchain_wallet_sync_timestamp.write().unwrap() = + unix_time_secs_opt; + Ok(()) + }, + Err(e) => Err(e), + }, + Err(e) => match *e { + esplora_client::Error::Reqwest(he) => { + log_error!( + logger, + "{} of on-chain wallet failed due to HTTP connection error: {}", + if incremental_sync { "Incremental sync" } else { "Sync" }, + he + ); + Err(Error::WalletOperationFailed) + }, + _ => { + log_error!( + logger, + "{} of on-chain wallet failed due to Esplora error: {}", + if incremental_sync { "Incremental sync" } else { "Sync" }, + e + ); + Err(Error::WalletOperationFailed) + }, + }, + }, + Err(e) => { + log_error!( + logger, + "{} of on-chain wallet timed out: {}", + if incremental_sync { "Incremental sync" } else { "Sync" }, + e + ); + Err(Error::WalletOperationTimeout) + }, + } + }} + } + if incremental_sync { + let full_scan_request = onchain_wallet.get_full_scan_request(); let wallet_sync_timeout_fut = tokio::time::timeout( Duration::from_secs(BDK_WALLET_SYNC_TIMEOUT_SECS), esplora_client.full_scan( @@ -240,48 +301,16 @@ impl ChainSource { BDK_CLIENT_CONCURRENCY, ), ); - - let now = Instant::now(); - match wallet_sync_timeout_fut.await { - Ok(res) => match res { - Ok(update) => match onchain_wallet.apply_update(update) { - Ok(()) => { - log_info!( - logger, - "Sync of on-chain wallet finished in {}ms.", - now.elapsed().as_millis() - ); - let unix_time_secs_opt = SystemTime::now() - .duration_since(UNIX_EPOCH) - .ok() - .map(|d| d.as_secs()); - *latest_onchain_wallet_sync_timestamp.write().unwrap() = - unix_time_secs_opt; - Ok(()) - }, - Err(e) => Err(e), - }, - Err(e) => match *e { - esplora_client::Error::Reqwest(he) => { - log_error!( - logger, - "Sync failed due to HTTP connection error: {}", - he - ); - Err(Error::WalletOperationFailed) - }, - _ => { - log_error!(logger, "Sync of on-chain wallet failed due to Esplora error: {}", e); - Err(Error::WalletOperationFailed) - }, - }, - }, - Err(e) => { - log_error!(logger, "On-chain wallet sync timed out: {}", e); - Err(Error::WalletOperationTimeout) - }, - } - }; + get_and_apply_wallet_update!(wallet_sync_timeout_fut) + } else { + let sync_request = onchain_wallet.get_incremental_sync_request(); + let wallet_sync_timeout_fut = tokio::time::timeout( + Duration::from_secs(BDK_WALLET_SYNC_TIMEOUT_SECS), + esplora_client.sync(sync_request, BDK_CLIENT_CONCURRENCY), + ); + get_and_apply_wallet_update!(wallet_sync_timeout_fut) + } + }; onchain_wallet_sync_status.lock().unwrap().propagate_result_to_subscribers(res); diff --git a/src/lib.rs b/src/lib.rs index 4b491ea27..0c07944f5 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -240,31 +240,9 @@ impl Node { // Block to ensure we update our fee rate cache once on startup let chain_source = Arc::clone(&self.chain_source); - let sync_logger = Arc::clone(&self.logger); - let sync_fee_rate_update_timestamp = - Arc::clone(&self.latest_fee_rate_cache_update_timestamp); let runtime_ref = &runtime; tokio::task::block_in_place(move || { - runtime_ref.block_on(async move { - let now = Instant::now(); - match chain_source.update_fee_rate_estimates().await { - Ok(()) => { - log_info!( - sync_logger, - "Initial fee rate cache update finished in {}ms.", - now.elapsed().as_millis() - ); - let unix_time_secs_opt = - SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - *sync_fee_rate_update_timestamp.write().unwrap() = unix_time_secs_opt; - Ok(()) - }, - Err(e) => { - log_error!(sync_logger, "Initial fee rate cache update failed: {}", e,); - Err(e) - }, - } - }) + runtime_ref.block_on(async move { chain_source.update_fee_rate_estimates().await }) })?; // Spawn background task continuously syncing onchain, lightning, and fee rate cache. diff --git a/src/wallet/mod.rs b/src/wallet/mod.rs index 88d057cca..30da1682d 100644 --- a/src/wallet/mod.rs +++ b/src/wallet/mod.rs @@ -25,7 +25,7 @@ use lightning::sign::{ use lightning::util::message_signing; use lightning_invoice::RawBolt11Invoice; -use bdk_chain::spk_client::FullScanRequest; +use bdk_chain::spk_client::{FullScanRequest, SyncRequest}; use bdk_chain::ChainPosition; use bdk_wallet::{KeychainKind, PersistedWallet, SignOptions, Update}; @@ -80,6 +80,10 @@ where self.inner.lock().unwrap().start_full_scan().build() } + pub(crate) fn get_incremental_sync_request(&self) -> SyncRequest<(KeychainKind, u32)> { + self.inner.lock().unwrap().start_sync_with_revealed_spks().build() + } + pub(crate) fn apply_update(&self, update: impl Into) -> Result<(), Error> { let mut locked_wallet = self.inner.lock().unwrap(); match locked_wallet.apply_update(update) { From 4ccc93a9c565c93c10584c2a66ee01b79d712ff6 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 3 Oct 2024 14:16:05 +0200 Subject: [PATCH 074/127] Introduce persisted `NodeMetrics` struct Previously, we persisted some of the `latest_` fields exposed via `NodeStatus`. Here, we now refactor this via a persisted `NodeMetrics` struct which allows to persist more fields across restarts. In particular, we now persist the latest time we sync the on-chain wallet, resulting in only doing a full scan on first initialization, and doing incremental syncing afterwards. As both of these operations are really really lightweight, we don't bother to migrate the old persisted timestamps for RGS updates and node announcement broadcasts over to the new data format. --- bindings/ldk_node.udl | 3 +- src/builder.rs | 66 +++++++++++----------- src/chain/mod.rs | 83 +++++++++++++++++---------- src/io/mod.rs | 13 ++--- src/io/utils.rs | 93 +++++++----------------------- src/lib.rs | 128 +++++++++++++++++++++++++++--------------- 6 files changed, 195 insertions(+), 191 deletions(-) diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index 96490f2b7..817fd06ee 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -218,11 +218,12 @@ dictionary NodeStatus { boolean is_running; boolean is_listening; BestBlock current_best_block; - u64? latest_wallet_sync_timestamp; + u64? latest_lightning_wallet_sync_timestamp; u64? latest_onchain_wallet_sync_timestamp; u64? latest_fee_rate_cache_update_timestamp; u64? latest_rgs_snapshot_timestamp; u64? latest_node_announcement_broadcast_timestamp; + u32? latest_channel_monitor_archival_height; }; dictionary BestBlock { diff --git a/src/builder.rs b/src/builder.rs index 4905d97d3..1cfd46eed 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -12,8 +12,8 @@ use crate::connection::ConnectionManager; use crate::event::EventQueue; use crate::fee_estimator::OnchainFeeEstimator; use crate::gossip::GossipSource; -use crate::io; use crate::io::sqlite_store::SqliteStore; +use crate::io::utils::{read_node_metrics, write_node_metrics}; #[cfg(any(vss, vss_test))] use crate::io::vss_store::VssStore; use crate::liquidity::LiquiditySource; @@ -28,6 +28,7 @@ use crate::types::{ }; use crate::wallet::persist::KVStoreWalletPersister; use crate::wallet::Wallet; +use crate::{io, NodeMetrics}; use crate::{LogLevel, Node}; use lightning::chain::{chainmonitor, BestBlock, Watch}; @@ -554,12 +555,16 @@ fn build_with_store_internal( ) -> Result { // Initialize the status fields. let is_listening = Arc::new(AtomicBool::new(false)); - let latest_wallet_sync_timestamp = Arc::new(RwLock::new(None)); - let latest_onchain_wallet_sync_timestamp = Arc::new(RwLock::new(None)); - let latest_fee_rate_cache_update_timestamp = Arc::new(RwLock::new(None)); - let latest_rgs_snapshot_timestamp = Arc::new(RwLock::new(None)); - let latest_node_announcement_broadcast_timestamp = Arc::new(RwLock::new(None)); - let latest_channel_monitor_archival_height = Arc::new(RwLock::new(None)); + let node_metrics = match read_node_metrics(Arc::clone(&kv_store), Arc::clone(&logger)) { + Ok(metrics) => Arc::new(RwLock::new(metrics)), + Err(e) => { + if e.kind() == std::io::ErrorKind::NotFound { + Arc::new(RwLock::new(NodeMetrics::default())) + } else { + return Err(BuildError::ReadFailed); + } + }, + }; // Initialize the on-chain wallet and chain access let xprv = bitcoin::bip32::Xpriv::new_master(config.network, &seed_bytes).map_err(|e| { @@ -608,12 +613,10 @@ fn build_with_store_internal( Arc::clone(&wallet), Arc::clone(&fee_estimator), Arc::clone(&tx_broadcaster), + Arc::clone(&kv_store), Arc::clone(&config), Arc::clone(&logger), - Arc::clone(&latest_wallet_sync_timestamp), - Arc::clone(&latest_onchain_wallet_sync_timestamp), - Arc::clone(&latest_fee_rate_cache_update_timestamp), - latest_channel_monitor_archival_height, + Arc::clone(&node_metrics), )), None => { // Default to Esplora client. @@ -623,12 +626,10 @@ fn build_with_store_internal( Arc::clone(&wallet), Arc::clone(&fee_estimator), Arc::clone(&tx_broadcaster), + Arc::clone(&kv_store), Arc::clone(&config), Arc::clone(&logger), - Arc::clone(&latest_wallet_sync_timestamp), - Arc::clone(&latest_onchain_wallet_sync_timestamp), - Arc::clone(&latest_fee_rate_cache_update_timestamp), - latest_channel_monitor_archival_height, + Arc::clone(&node_metrics), )) }, }; @@ -820,23 +821,24 @@ fn build_with_store_internal( Arc::new(GossipSource::new_p2p(Arc::clone(&network_graph), Arc::clone(&logger))); // Reset the RGS sync timestamp in case we somehow switch gossip sources - io::utils::write_latest_rgs_sync_timestamp( - 0, - Arc::clone(&kv_store), - Arc::clone(&logger), - ) - .map_err(|e| { - log_error!(logger, "Failed writing to store: {}", e); - BuildError::WriteFailed - })?; + { + let mut locked_node_metrics = node_metrics.write().unwrap(); + locked_node_metrics.latest_rgs_snapshot_timestamp = None; + write_node_metrics( + &*locked_node_metrics, + Arc::clone(&kv_store), + Arc::clone(&logger), + ) + .map_err(|e| { + log_error!(logger, "Failed writing to store: {}", e); + BuildError::WriteFailed + })?; + } p2p_source }, GossipSourceConfig::RapidGossipSync(rgs_server) => { - let latest_sync_timestamp = io::utils::read_latest_rgs_sync_timestamp( - Arc::clone(&kv_store), - Arc::clone(&logger), - ) - .unwrap_or(0); + let latest_sync_timestamp = + node_metrics.read().unwrap().latest_rgs_snapshot_timestamp.unwrap_or(0); Arc::new(GossipSource::new_rgs( rgs_server.clone(), latest_sync_timestamp, @@ -1021,11 +1023,7 @@ fn build_with_store_internal( peer_store, payment_store, is_listening, - latest_wallet_sync_timestamp, - latest_onchain_wallet_sync_timestamp, - latest_fee_rate_cache_update_timestamp, - latest_rgs_snapshot_timestamp, - latest_node_announcement_broadcast_timestamp, + node_metrics, }) } diff --git a/src/chain/mod.rs b/src/chain/mod.rs index 6751c0837..267513e49 100644 --- a/src/chain/mod.rs +++ b/src/chain/mod.rs @@ -15,9 +15,10 @@ use crate::fee_estimator::{ apply_post_estimation_adjustments, get_all_conf_targets, get_num_block_defaults_for_target, OnchainFeeEstimator, }; +use crate::io::utils::write_node_metrics; use crate::logger::{log_bytes, log_error, log_info, log_trace, FilesystemLogger, Logger}; -use crate::types::{Broadcaster, ChainMonitor, ChannelManager, Sweeper, Wallet}; -use crate::Error; +use crate::types::{Broadcaster, ChainMonitor, ChannelManager, DynStore, Sweeper, Wallet}; +use crate::{Error, NodeMetrics}; use lightning::chain::{Confirm, Filter}; use lightning::util::ser::Writeable; @@ -102,23 +103,18 @@ pub(crate) enum ChainSource { lightning_wallet_sync_status: Mutex, fee_estimator: Arc, tx_broadcaster: Arc, + kv_store: Arc, config: Arc, logger: Arc, - latest_wallet_sync_timestamp: Arc>>, - latest_onchain_wallet_sync_timestamp: Arc>>, - latest_fee_rate_cache_update_timestamp: Arc>>, - latest_channel_monitor_archival_height: Arc>>, + node_metrics: Arc>, }, } impl ChainSource { pub(crate) fn new_esplora( server_url: String, onchain_wallet: Arc, fee_estimator: Arc, - tx_broadcaster: Arc, config: Arc, logger: Arc, - latest_wallet_sync_timestamp: Arc>>, - latest_onchain_wallet_sync_timestamp: Arc>>, - latest_fee_rate_cache_update_timestamp: Arc>>, - latest_channel_monitor_archival_height: Arc>>, + tx_broadcaster: Arc, kv_store: Arc, config: Arc, + logger: Arc, node_metrics: Arc>, ) -> Self { let mut client_builder = esplora_client::Builder::new(&server_url); client_builder = client_builder.timeout(DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS); @@ -135,12 +131,10 @@ impl ChainSource { lightning_wallet_sync_status, fee_estimator, tx_broadcaster, + kv_store, config, logger, - latest_wallet_sync_timestamp, - latest_onchain_wallet_sync_timestamp, - latest_fee_rate_cache_update_timestamp, - latest_channel_monitor_archival_height, + node_metrics, } } @@ -211,8 +205,9 @@ impl ChainSource { esplora_client, onchain_wallet, onchain_wallet_sync_status, + kv_store, logger, - latest_onchain_wallet_sync_timestamp, + node_metrics, .. } => { let receiver_res = { @@ -232,7 +227,7 @@ impl ChainSource { // If this is our first sync, do a full scan with the configured gap limit. // Otherwise just do an incremental sync. let incremental_sync = - latest_onchain_wallet_sync_timestamp.read().unwrap().is_some(); + node_metrics.read().unwrap().latest_onchain_wallet_sync_timestamp.is_some(); macro_rules! get_and_apply_wallet_update { ($sync_future: expr) => {{ @@ -251,8 +246,11 @@ impl ChainSource { .duration_since(UNIX_EPOCH) .ok() .map(|d| d.as_secs()); - *latest_onchain_wallet_sync_timestamp.write().unwrap() = - unix_time_secs_opt; + { + let mut locked_node_metrics = node_metrics.write().unwrap(); + locked_node_metrics.latest_onchain_wallet_sync_timestamp = unix_time_secs_opt; + write_node_metrics(&*locked_node_metrics, Arc::clone(&kv_store), Arc::clone(&logger))?; + } Ok(()) }, Err(e) => Err(e), @@ -327,9 +325,9 @@ impl ChainSource { Self::Esplora { tx_sync, lightning_wallet_sync_status, + kv_store, logger, - latest_wallet_sync_timestamp, - latest_channel_monitor_archival_height, + node_metrics, .. } => { let sync_cman = Arc::clone(&channel_manager); @@ -372,13 +370,24 @@ impl ChainSource { .duration_since(UNIX_EPOCH) .ok() .map(|d| d.as_secs()); - *latest_wallet_sync_timestamp.write().unwrap() = unix_time_secs_opt; + { + let mut locked_node_metrics = node_metrics.write().unwrap(); + locked_node_metrics.latest_lightning_wallet_sync_timestamp = + unix_time_secs_opt; + write_node_metrics( + &*locked_node_metrics, + Arc::clone(&kv_store), + Arc::clone(&logger), + )?; + } periodically_archive_fully_resolved_monitors( Arc::clone(&channel_manager), Arc::clone(&chain_monitor), - Arc::clone(&latest_channel_monitor_archival_height), - ); + Arc::clone(&kv_store), + Arc::clone(&logger), + Arc::clone(&node_metrics), + )?; Ok(()) }, Err(e) => { @@ -406,8 +415,9 @@ impl ChainSource { esplora_client, fee_estimator, config, + kv_store, logger, - latest_fee_rate_cache_update_timestamp, + node_metrics, .. } => { let now = Instant::now(); @@ -479,7 +489,15 @@ impl ChainSource { ); let unix_time_secs_opt = SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - *latest_fee_rate_cache_update_timestamp.write().unwrap() = unix_time_secs_opt; + { + let mut locked_node_metrics = node_metrics.write().unwrap(); + locked_node_metrics.latest_fee_rate_cache_update_timestamp = unix_time_secs_opt; + write_node_metrics( + &*locked_node_metrics, + Arc::clone(&kv_store), + Arc::clone(&logger), + )?; + } Ok(()) }, @@ -578,16 +596,19 @@ impl Filter for ChainSource { fn periodically_archive_fully_resolved_monitors( channel_manager: Arc, chain_monitor: Arc, - latest_channel_monitor_archival_height: Arc>>, -) { - let mut latest_archival_height_lock = latest_channel_monitor_archival_height.write().unwrap(); + kv_store: Arc, logger: Arc, node_metrics: Arc>, +) -> Result<(), Error> { + let mut locked_node_metrics = node_metrics.write().unwrap(); let cur_height = channel_manager.current_best_block().height; - let should_archive = latest_archival_height_lock + let should_archive = locked_node_metrics + .latest_channel_monitor_archival_height .as_ref() .map_or(true, |h| cur_height >= h + RESOLVED_CHANNEL_MONITOR_ARCHIVAL_INTERVAL); if should_archive { chain_monitor.archive_fully_resolved_channel_monitors(); - *latest_archival_height_lock = Some(cur_height); + locked_node_metrics.latest_channel_monitor_archival_height = Some(cur_height); + write_node_metrics(&*locked_node_metrics, kv_store, logger)?; } + Ok(()) } diff --git a/src/io/mod.rs b/src/io/mod.rs index 22caff50f..fab0a27f9 100644 --- a/src/io/mod.rs +++ b/src/io/mod.rs @@ -33,15 +33,10 @@ pub(crate) const DEPRECATED_SPENDABLE_OUTPUT_INFO_PERSISTENCE_PRIMARY_NAMESPACE: "spendable_outputs"; pub(crate) const DEPRECATED_SPENDABLE_OUTPUT_INFO_PERSISTENCE_SECONDARY_NAMESPACE: &str = ""; -/// RapidGossipSync's `latest_sync_timestamp` will be persisted under this key. -pub(crate) const LATEST_RGS_SYNC_TIMESTAMP_PRIMARY_NAMESPACE: &str = ""; -pub(crate) const LATEST_RGS_SYNC_TIMESTAMP_SECONDARY_NAMESPACE: &str = ""; -pub(crate) const LATEST_RGS_SYNC_TIMESTAMP_KEY: &str = "latest_rgs_sync_timestamp"; - -/// The last time we broadcast a node announcement will be persisted under this key. -pub(crate) const LATEST_NODE_ANN_BCAST_TIMESTAMP_PRIMARY_NAMESPACE: &str = ""; -pub(crate) const LATEST_NODE_ANN_BCAST_TIMESTAMP_SECONDARY_NAMESPACE: &str = ""; -pub(crate) const LATEST_NODE_ANN_BCAST_TIMESTAMP_KEY: &str = "latest_node_ann_bcast_timestamp"; +/// The node metrics will be persisted under this key. +pub(crate) const NODE_METRICS_PRIMARY_NAMESPACE: &str = ""; +pub(crate) const NODE_METRICS_SECONDARY_NAMESPACE: &str = ""; +pub(crate) const NODE_METRICS_KEY: &str = "node_metrics"; /// The BDK wallet's [`ChangeSet::descriptor`] will be persisted under this key. /// diff --git a/src/io/utils.rs b/src/io/utils.rs index 411928495..218fec473 100644 --- a/src/io/utils.rs +++ b/src/io/utils.rs @@ -10,12 +10,15 @@ use crate::config::WALLET_KEYS_SEED_LEN; use crate::chain::ChainSource; use crate::fee_estimator::OnchainFeeEstimator; +use crate::io::{ + NODE_METRICS_KEY, NODE_METRICS_PRIMARY_NAMESPACE, NODE_METRICS_SECONDARY_NAMESPACE, +}; use crate::logger::{log_error, FilesystemLogger}; use crate::peer_store::PeerStore; use crate::sweep::DeprecatedSpendableOutputInfo; use crate::types::{Broadcaster, DynStore, KeysManager, Sweeper}; use crate::wallet::ser::{ChangeSetDeserWrapper, ChangeSetSerWrapper}; -use crate::{Error, EventQueue, PaymentDetails}; +use crate::{Error, EventQueue, NodeMetrics, PaymentDetails}; use lightning::io::Cursor; use lightning::ln::msgs::DecodeError; @@ -342,98 +345,44 @@ where Ok(()) } -pub(crate) fn read_latest_rgs_sync_timestamp( +pub(crate) fn read_node_metrics( kv_store: Arc, logger: L, -) -> Result +) -> Result where L::Target: Logger, { let mut reader = Cursor::new(kv_store.read( - LATEST_RGS_SYNC_TIMESTAMP_PRIMARY_NAMESPACE, - LATEST_RGS_SYNC_TIMESTAMP_SECONDARY_NAMESPACE, - LATEST_RGS_SYNC_TIMESTAMP_KEY, + NODE_METRICS_PRIMARY_NAMESPACE, + NODE_METRICS_SECONDARY_NAMESPACE, + NODE_METRICS_KEY, )?); - u32::read(&mut reader).map_err(|e| { - log_error!(logger, "Failed to deserialize latest RGS sync timestamp: {}", e); - std::io::Error::new( - std::io::ErrorKind::InvalidData, - "Failed to deserialize latest RGS sync timestamp", - ) - }) -} - -pub(crate) fn write_latest_rgs_sync_timestamp( - updated_timestamp: u32, kv_store: Arc, logger: L, -) -> Result<(), Error> -where - L::Target: Logger, -{ - let data = updated_timestamp.encode(); - kv_store - .write( - LATEST_RGS_SYNC_TIMESTAMP_PRIMARY_NAMESPACE, - LATEST_RGS_SYNC_TIMESTAMP_SECONDARY_NAMESPACE, - LATEST_RGS_SYNC_TIMESTAMP_KEY, - &data, - ) - .map_err(|e| { - log_error!( - logger, - "Writing data to key {}/{}/{} failed due to: {}", - LATEST_RGS_SYNC_TIMESTAMP_PRIMARY_NAMESPACE, - LATEST_RGS_SYNC_TIMESTAMP_SECONDARY_NAMESPACE, - LATEST_RGS_SYNC_TIMESTAMP_KEY, - e - ); - Error::PersistenceFailed - }) -} - -pub(crate) fn read_latest_node_ann_bcast_timestamp( - kv_store: Arc, logger: L, -) -> Result -where - L::Target: Logger, -{ - let mut reader = Cursor::new(kv_store.read( - LATEST_NODE_ANN_BCAST_TIMESTAMP_PRIMARY_NAMESPACE, - LATEST_NODE_ANN_BCAST_TIMESTAMP_SECONDARY_NAMESPACE, - LATEST_NODE_ANN_BCAST_TIMESTAMP_KEY, - )?); - u64::read(&mut reader).map_err(|e| { - log_error!( - logger, - "Failed to deserialize latest node announcement broadcast timestamp: {}", - e - ); - std::io::Error::new( - std::io::ErrorKind::InvalidData, - "Failed to deserialize latest node announcement broadcast timestamp", - ) + NodeMetrics::read(&mut reader).map_err(|e| { + log_error!(logger, "Failed to deserialize NodeMetrics: {}", e); + std::io::Error::new(std::io::ErrorKind::InvalidData, "Failed to deserialize NodeMetrics") }) } -pub(crate) fn write_latest_node_ann_bcast_timestamp( - updated_timestamp: u64, kv_store: Arc, logger: L, +pub(crate) fn write_node_metrics( + node_metrics: &NodeMetrics, kv_store: Arc, logger: L, ) -> Result<(), Error> where L::Target: Logger, { - let data = updated_timestamp.encode(); + let data = node_metrics.encode(); kv_store .write( - LATEST_NODE_ANN_BCAST_TIMESTAMP_PRIMARY_NAMESPACE, - LATEST_NODE_ANN_BCAST_TIMESTAMP_SECONDARY_NAMESPACE, - LATEST_NODE_ANN_BCAST_TIMESTAMP_KEY, + NODE_METRICS_PRIMARY_NAMESPACE, + NODE_METRICS_SECONDARY_NAMESPACE, + NODE_METRICS_KEY, &data, ) .map_err(|e| { log_error!( logger, "Writing data to key {}/{}/{} failed due to: {}", - LATEST_NODE_ANN_BCAST_TIMESTAMP_PRIMARY_NAMESPACE, - LATEST_NODE_ANN_BCAST_TIMESTAMP_SECONDARY_NAMESPACE, - LATEST_NODE_ANN_BCAST_TIMESTAMP_KEY, + NODE_METRICS_PRIMARY_NAMESPACE, + NODE_METRICS_SECONDARY_NAMESPACE, + NODE_METRICS_KEY, e ); Error::PersistenceFailed diff --git a/src/lib.rs b/src/lib.rs index 0c07944f5..12d38e194 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -122,12 +122,14 @@ pub use builder::NodeBuilder as Builder; use chain::ChainSource; use config::{ - default_user_config, may_announce_channel, NODE_ANN_BCAST_INTERVAL, PEER_RECONNECTION_INTERVAL, RGS_SYNC_INTERVAL, + default_user_config, may_announce_channel, NODE_ANN_BCAST_INTERVAL, PEER_RECONNECTION_INTERVAL, + RGS_SYNC_INTERVAL, }; use connection::ConnectionManager; use event::{EventHandler, EventQueue}; use gossip::GossipSource; use graph::NetworkGraph; +use io::utils::write_node_metrics; use liquidity::LiquiditySource; use payment::store::PaymentStore; use payment::{ @@ -145,6 +147,7 @@ use logger::{log_error, log_info, log_trace, FilesystemLogger, Logger}; use lightning::chain::BestBlock; use lightning::events::bump_transaction::Wallet as LdkWallet; +use lightning::impl_writeable_tlv_based; use lightning::ln::channel_state::ChannelShutdownState; use lightning::ln::channelmanager::PaymentId; use lightning::ln::msgs::SocketAddress; @@ -196,11 +199,7 @@ pub struct Node { peer_store: Arc>>, payment_store: Arc>>, is_listening: Arc, - latest_wallet_sync_timestamp: Arc>>, - latest_onchain_wallet_sync_timestamp: Arc>>, - latest_fee_rate_cache_update_timestamp: Arc>>, - latest_rgs_snapshot_timestamp: Arc>>, - latest_node_announcement_broadcast_timestamp: Arc>>, + node_metrics: Arc>, } impl Node { @@ -261,7 +260,7 @@ impl Node { let gossip_source = Arc::clone(&self.gossip_source); let gossip_sync_store = Arc::clone(&self.kv_store); let gossip_sync_logger = Arc::clone(&self.logger); - let gossip_rgs_sync_timestamp = Arc::clone(&self.latest_rgs_snapshot_timestamp); + let gossip_node_metrics = Arc::clone(&self.node_metrics); let mut stop_gossip_sync = self.stop_sender.subscribe(); runtime.spawn(async move { let mut interval = tokio::time::interval(RGS_SYNC_INTERVAL); @@ -284,22 +283,22 @@ impl Node { "Background sync of RGS gossip data finished in {}ms.", now.elapsed().as_millis() ); - io::utils::write_latest_rgs_sync_timestamp( - updated_timestamp, - Arc::clone(&gossip_sync_store), - Arc::clone(&gossip_sync_logger), - ) - .unwrap_or_else(|e| { - log_error!(gossip_sync_logger, "Persistence failed: {}", e); - panic!("Persistence failed"); - }); - *gossip_rgs_sync_timestamp.write().unwrap() = Some(updated_timestamp as u64); + { + let mut locked_node_metrics = gossip_node_metrics.write().unwrap(); + locked_node_metrics.latest_rgs_snapshot_timestamp = Some(updated_timestamp); + write_node_metrics(&*locked_node_metrics, Arc::clone(&gossip_sync_store), Arc::clone(&gossip_sync_logger)) + .unwrap_or_else(|e| { + log_error!(gossip_sync_logger, "Persistence failed: {}", e); + }); + } + } + Err(e) => { + log_error!( + gossip_sync_logger, + "Background sync of RGS gossip data failed: {}", + e + ) } - Err(e) => log_error!( - gossip_sync_logger, - "Background sync of RGS gossip data failed: {}", - e - ), } } } @@ -421,7 +420,7 @@ impl Node { let bcast_config = Arc::clone(&self.config); let bcast_store = Arc::clone(&self.kv_store); let bcast_logger = Arc::clone(&self.logger); - let bcast_ann_timestamp = Arc::clone(&self.latest_node_announcement_broadcast_timestamp); + let bcast_node_metrics = Arc::clone(&self.node_metrics); let mut stop_bcast = self.stop_sender.subscribe(); let node_alias = self.config.node_alias.clone(); if may_announce_channel(&self.config) { @@ -441,13 +440,13 @@ impl Node { return; } _ = interval.tick() => { - let skip_broadcast = match io::utils::read_latest_node_ann_bcast_timestamp(Arc::clone(&bcast_store), Arc::clone(&bcast_logger)) { - Ok(latest_bcast_time_secs) => { + let skip_broadcast = match bcast_node_metrics.read().unwrap().latest_node_announcement_broadcast_timestamp { + Some(latest_bcast_time_secs) => { // Skip if the time hasn't elapsed yet. let next_bcast_unix_time = SystemTime::UNIX_EPOCH + Duration::from_secs(latest_bcast_time_secs) + NODE_ANN_BCAST_INTERVAL; next_bcast_unix_time.elapsed().is_err() } - Err(_) => { + None => { // Don't skip if we haven't broadcasted before. false } @@ -479,20 +478,18 @@ impl Node { let unix_time_secs_opt = SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - *bcast_ann_timestamp.write().unwrap() = unix_time_secs_opt; - - if let Some(unix_time_secs) = unix_time_secs_opt { - io::utils::write_latest_node_ann_bcast_timestamp(unix_time_secs, Arc::clone(&bcast_store), Arc::clone(&bcast_logger)) + { + let mut locked_node_metrics = bcast_node_metrics.write().unwrap(); + locked_node_metrics.latest_node_announcement_broadcast_timestamp = unix_time_secs_opt; + write_node_metrics(&*locked_node_metrics, Arc::clone(&bcast_store), Arc::clone(&bcast_logger)) .unwrap_or_else(|e| { log_error!(bcast_logger, "Persistence failed: {}", e); - panic!("Persistence failed"); }); } } else { debug_assert!(false, "We checked whether the node may announce, so node alias should always be set"); continue } - } } } @@ -719,24 +716,30 @@ impl Node { let is_running = self.runtime.read().unwrap().is_some(); let is_listening = self.is_listening.load(Ordering::Acquire); let current_best_block = self.channel_manager.current_best_block().into(); - let latest_wallet_sync_timestamp = *self.latest_wallet_sync_timestamp.read().unwrap(); + let locked_node_metrics = self.node_metrics.read().unwrap(); + let latest_lightning_wallet_sync_timestamp = + locked_node_metrics.latest_lightning_wallet_sync_timestamp; let latest_onchain_wallet_sync_timestamp = - *self.latest_onchain_wallet_sync_timestamp.read().unwrap(); + locked_node_metrics.latest_onchain_wallet_sync_timestamp; let latest_fee_rate_cache_update_timestamp = - *self.latest_fee_rate_cache_update_timestamp.read().unwrap(); - let latest_rgs_snapshot_timestamp = *self.latest_rgs_snapshot_timestamp.read().unwrap(); + locked_node_metrics.latest_fee_rate_cache_update_timestamp; + let latest_rgs_snapshot_timestamp = + locked_node_metrics.latest_rgs_snapshot_timestamp.map(|val| val as u64); let latest_node_announcement_broadcast_timestamp = - *self.latest_node_announcement_broadcast_timestamp.read().unwrap(); + locked_node_metrics.latest_node_announcement_broadcast_timestamp; + let latest_channel_monitor_archival_height = + locked_node_metrics.latest_channel_monitor_archival_height; NodeStatus { is_running, is_listening, current_best_block, - latest_wallet_sync_timestamp, + latest_lightning_wallet_sync_timestamp, latest_onchain_wallet_sync_timestamp, latest_fee_rate_cache_update_timestamp, latest_rgs_snapshot_timestamp, latest_node_announcement_broadcast_timestamp, + latest_channel_monitor_archival_height, } } @@ -1497,30 +1500,67 @@ pub struct NodeStatus { /// The timestamp, in seconds since start of the UNIX epoch, when we last successfully synced /// our Lightning wallet to the chain tip. /// - /// Will be `None` if the wallet hasn't been synced since the [`Node`] was initialized. - pub latest_wallet_sync_timestamp: Option, + /// Will be `None` if the wallet hasn't been synced yet. + pub latest_lightning_wallet_sync_timestamp: Option, /// The timestamp, in seconds since start of the UNIX epoch, when we last successfully synced /// our on-chain wallet to the chain tip. /// - /// Will be `None` if the wallet hasn't been synced since the [`Node`] was initialized. + /// Will be `None` if the wallet hasn't been synced yet. pub latest_onchain_wallet_sync_timestamp: Option, /// The timestamp, in seconds since start of the UNIX epoch, when we last successfully update /// our fee rate cache. /// - /// Will be `None` if the cache hasn't been updated since the [`Node`] was initialized. + /// Will be `None` if the cache hasn't been updated yet. pub latest_fee_rate_cache_update_timestamp: Option, /// The timestamp, in seconds since start of the UNIX epoch, when the last rapid gossip sync /// (RGS) snapshot we successfully applied was generated. /// - /// Will be `None` if RGS isn't configured or the snapshot hasn't been updated since the [`Node`] was initialized. + /// Will be `None` if RGS isn't configured or the snapshot hasn't been updated yet. pub latest_rgs_snapshot_timestamp: Option, /// The timestamp, in seconds since start of the UNIX epoch, when we last broadcasted a node /// announcement. /// - /// Will be `None` if we have no public channels or we haven't broadcasted since the [`Node`] was initialized. + /// Will be `None` if we have no public channels or we haven't broadcasted yet. pub latest_node_announcement_broadcast_timestamp: Option, + /// The block height when we last archived closed channel monitor data. + /// + /// Will be `None` if we haven't archived any monitors of closed channels yet. + pub latest_channel_monitor_archival_height: Option, } +/// Status fields that are persisted across restarts. +#[derive(Clone, Debug, PartialEq, Eq)] +pub(crate) struct NodeMetrics { + latest_lightning_wallet_sync_timestamp: Option, + latest_onchain_wallet_sync_timestamp: Option, + latest_fee_rate_cache_update_timestamp: Option, + latest_rgs_snapshot_timestamp: Option, + latest_node_announcement_broadcast_timestamp: Option, + latest_channel_monitor_archival_height: Option, +} + +impl Default for NodeMetrics { + fn default() -> Self { + Self { + latest_lightning_wallet_sync_timestamp: None, + latest_onchain_wallet_sync_timestamp: None, + latest_fee_rate_cache_update_timestamp: None, + latest_rgs_snapshot_timestamp: None, + latest_node_announcement_broadcast_timestamp: None, + latest_channel_monitor_archival_height: None, + } + } +} + +impl_writeable_tlv_based!(NodeMetrics, { + (0, latest_lightning_wallet_sync_timestamp, option), + (2, latest_onchain_wallet_sync_timestamp, option), + (4, latest_fee_rate_cache_update_timestamp, option), + (6, latest_rgs_snapshot_timestamp, option), + (8, latest_node_announcement_broadcast_timestamp, option), + (10, latest_channel_monitor_archival_height, option), +}); + pub(crate) fn total_anchor_channels_reserve_sats( channel_manager: &ChannelManager, config: &Config, ) -> u64 { From 66ca6be302c57bb0bc5b3835ae37b1d2c723d52c Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 8 Oct 2024 12:52:56 +0200 Subject: [PATCH 075/127] Prefactor: Expose config objects via `config` module .. to further de-clutter the top-level docs. --- src/config.rs | 2 ++ src/lib.rs | 16 +++++++++++----- src/uniffi_types.rs | 1 + tests/common/mod.rs | 5 ++--- 4 files changed, 16 insertions(+), 8 deletions(-) diff --git a/src/config.rs b/src/config.rs index eccb3d437..0fd55abdf 100644 --- a/src/config.rs +++ b/src/config.rs @@ -5,6 +5,8 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. +//! Objects for configuring the node. + use crate::payment::SendingParameters; use lightning::ln::msgs::SocketAddress; diff --git a/src/lib.rs b/src/lib.rs index 12d38e194..a3782983c 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -75,7 +75,7 @@ mod balance; mod builder; mod chain; -mod config; +pub mod config; mod connection; mod error; mod event; @@ -102,7 +102,6 @@ pub use lightning; pub use lightning_invoice; pub use balance::{BalanceDetails, LightningBalance, PendingSweepBalance}; -pub use config::{default_config, AnchorChannelsConfig, Config}; pub use error::Error as NodeError; use error::Error; @@ -122,8 +121,8 @@ pub use builder::NodeBuilder as Builder; use chain::ChainSource; use config::{ - default_user_config, may_announce_channel, NODE_ANN_BCAST_INTERVAL, PEER_RECONNECTION_INTERVAL, - RGS_SYNC_INTERVAL, + default_user_config, may_announce_channel, Config, NODE_ANN_BCAST_INTERVAL, + PEER_RECONNECTION_INTERVAL, RGS_SYNC_INTERVAL, }; use connection::ConnectionManager; use event::{EventHandler, EventQueue}; @@ -1131,6 +1130,8 @@ impl Node { /// opening the channel. /// /// Returns a [`UserChannelId`] allowing to locally keep track of the channel. + /// + /// [`AnchorChannelsConfig::per_channel_reserve_sats`]: crate::config::AnchorChannelsConfig::per_channel_reserve_sats pub fn open_channel( &self, node_id: PublicKey, address: SocketAddress, channel_amount_sats: u64, push_to_counterparty_msat: Option, channel_config: Option, @@ -1164,6 +1165,8 @@ impl Node { /// opening the channel. /// /// Returns a [`UserChannelId`] allowing to locally keep track of the channel. + /// + /// [`AnchorChannelsConfig::per_channel_reserve_sats`]: crate::config::AnchorChannelsConfig::per_channel_reserve_sats pub fn open_announced_channel( &self, node_id: PublicKey, address: SocketAddress, channel_amount_sats: u64, push_to_counterparty_msat: Option, channel_config: Option, @@ -1233,6 +1236,8 @@ impl Node { /// Broadcasting the closing transactions will be omitted for Anchor channels if we trust the /// counterparty to broadcast for us (see [`AnchorChannelsConfig::trusted_peers_no_reserve`] /// for more information). + /// + /// [`AnchorChannelsConfig::trusted_peers_no_reserve`]: crate::config::AnchorChannelsConfig::trusted_peers_no_reserve pub fn force_close_channel( &self, user_channel_id: &UserChannelId, counterparty_node_id: PublicKey, reason: Option, @@ -1389,7 +1394,8 @@ impl Node { /// /// For example, you could retrieve all stored outbound payments as follows: /// ``` - /// # use ldk_node::{Builder, Config}; + /// # use ldk_node::Builder; + /// # use ldk_node::config::Config; /// # use ldk_node::payment::PaymentDirection; /// # use ldk_node::bitcoin::Network; /// # let mut config = Config::default(); diff --git a/src/uniffi_types.rs b/src/uniffi_types.rs index a66bcddea..c7d4320ee 100644 --- a/src/uniffi_types.rs +++ b/src/uniffi_types.rs @@ -10,6 +10,7 @@ // // Make sure to add any re-exported items that need to be used in uniffi below. +pub use crate::config::{default_config, AnchorChannelsConfig}; pub use crate::graph::{ChannelInfo, ChannelUpdateInfo, NodeAnnouncementInfo, NodeInfo}; pub use crate::payment::store::{LSPFeeLimits, PaymentDirection, PaymentKind, PaymentStatus}; pub use crate::payment::{MaxTotalRoutingFeeLimit, QrPaymentResult, SendingParameters}; diff --git a/tests/common/mod.rs b/tests/common/mod.rs index a7cd87323..049f1e417 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -8,11 +8,10 @@ #![cfg(any(test, cln_test, vss_test))] #![allow(dead_code)] +use ldk_node::config::Config; use ldk_node::io::sqlite_store::SqliteStore; use ldk_node::payment::{PaymentDirection, PaymentKind, PaymentStatus}; -use ldk_node::{ - Builder, Config, Event, LightningBalance, LogLevel, Node, NodeError, PendingSweepBalance, -}; +use ldk_node::{Builder, Event, LightningBalance, LogLevel, Node, NodeError, PendingSweepBalance}; use lightning::ln::msgs::SocketAddress; use lightning::ln::{PaymentHash, PaymentPreimage}; From ad60e072df2a8dfb3e2d5706580f85fe19a91263 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 8 Oct 2024 14:20:03 +0200 Subject: [PATCH 076/127] Prefactor: Move `ChannelConfig` to `config` .. to also expose it via the `config` module rather than at the top-level docs. --- src/config.rs | 118 +++++++++++++++++++++++++++++++++++++++++++ src/lib.rs | 3 +- src/types.rs | 119 +------------------------------------------- src/uniffi_types.rs | 2 +- 4 files changed, 121 insertions(+), 121 deletions(-) diff --git a/src/config.rs b/src/config.rs index 0fd55abdf..ea4a60f17 100644 --- a/src/config.rs +++ b/src/config.rs @@ -11,6 +11,8 @@ use crate::payment::SendingParameters; use lightning::ln::msgs::SocketAddress; use lightning::routing::gossip::NodeAlias; +use lightning::util::config::ChannelConfig as LdkChannelConfig; +use lightning::util::config::MaxDustHTLCExposure as LdkMaxDustHTLCExposure; use lightning::util::config::UserConfig; use lightning::util::logger::Level as LogLevel; @@ -300,6 +302,122 @@ pub(crate) fn default_user_config(config: &Config) -> UserConfig { user_config } +/// Options which apply on a per-channel basis and may change at runtime or based on negotiation +/// with our counterparty. +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub struct ChannelConfig { + /// Amount (in millionths of a satoshi) charged per satoshi for payments forwarded outbound + /// over the channel. + /// This may be allowed to change at runtime in a later update, however doing so must result in + /// update messages sent to notify all nodes of our updated relay fee. + /// + /// Please refer to [`LdkChannelConfig`] for further details. + pub forwarding_fee_proportional_millionths: u32, + /// Amount (in milli-satoshi) charged for payments forwarded outbound over the channel, in + /// excess of [`ChannelConfig::forwarding_fee_proportional_millionths`]. + /// This may be allowed to change at runtime in a later update, however doing so must result in + /// update messages sent to notify all nodes of our updated relay fee. + /// + /// Please refer to [`LdkChannelConfig`] for further details. + pub forwarding_fee_base_msat: u32, + /// The difference in the CLTV value between incoming HTLCs and an outbound HTLC forwarded over + /// the channel this config applies to. + /// + /// Please refer to [`LdkChannelConfig`] for further details. + pub cltv_expiry_delta: u16, + /// Limit our total exposure to potential loss to on-chain fees on close, including in-flight + /// HTLCs which are burned to fees as they are too small to claim on-chain and fees on + /// commitment transaction(s) broadcasted by our counterparty in excess of our own fee estimate. + /// + /// Please refer to [`LdkChannelConfig`] for further details. + pub max_dust_htlc_exposure: MaxDustHTLCExposure, + /// The additional fee we're willing to pay to avoid waiting for the counterparty's + /// `to_self_delay` to reclaim funds. + /// + /// Please refer to [`LdkChannelConfig`] for further details. + pub force_close_avoidance_max_fee_satoshis: u64, + /// If set, allows this channel's counterparty to skim an additional fee off this node's inbound + /// HTLCs. Useful for liquidity providers to offload on-chain channel costs to end users. + /// + /// Please refer to [`LdkChannelConfig`] for further details. + pub accept_underpaying_htlcs: bool, +} + +impl From for ChannelConfig { + fn from(value: LdkChannelConfig) -> Self { + Self { + forwarding_fee_proportional_millionths: value.forwarding_fee_proportional_millionths, + forwarding_fee_base_msat: value.forwarding_fee_base_msat, + cltv_expiry_delta: value.cltv_expiry_delta, + max_dust_htlc_exposure: value.max_dust_htlc_exposure.into(), + force_close_avoidance_max_fee_satoshis: value.force_close_avoidance_max_fee_satoshis, + accept_underpaying_htlcs: value.accept_underpaying_htlcs, + } + } +} + +impl From for LdkChannelConfig { + fn from(value: ChannelConfig) -> Self { + Self { + forwarding_fee_proportional_millionths: value.forwarding_fee_proportional_millionths, + forwarding_fee_base_msat: value.forwarding_fee_base_msat, + cltv_expiry_delta: value.cltv_expiry_delta, + max_dust_htlc_exposure: value.max_dust_htlc_exposure.into(), + force_close_avoidance_max_fee_satoshis: value.force_close_avoidance_max_fee_satoshis, + accept_underpaying_htlcs: value.accept_underpaying_htlcs, + } + } +} + +impl Default for ChannelConfig { + fn default() -> Self { + LdkChannelConfig::default().into() + } +} + +/// Options for how to set the max dust exposure allowed on a channel. +/// +/// See [`LdkChannelConfig::max_dust_htlc_exposure`] for details. +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum MaxDustHTLCExposure { + /// This sets a fixed limit on the total dust exposure in millisatoshis. + /// + /// Please refer to [`LdkMaxDustHTLCExposure`] for further details. + FixedLimit { + /// The fixed limit, in millisatoshis. + limit_msat: u64, + }, + /// This sets a multiplier on the feerate to determine the maximum allowed dust exposure. + /// + /// Please refer to [`LdkMaxDustHTLCExposure`] for further details. + FeeRateMultiplier { + /// The applied fee rate multiplier. + multiplier: u64, + }, +} + +impl From for MaxDustHTLCExposure { + fn from(value: LdkMaxDustHTLCExposure) -> Self { + match value { + LdkMaxDustHTLCExposure::FixedLimitMsat(limit_msat) => Self::FixedLimit { limit_msat }, + LdkMaxDustHTLCExposure::FeeRateMultiplier(multiplier) => { + Self::FeeRateMultiplier { multiplier } + }, + } + } +} + +impl From for LdkMaxDustHTLCExposure { + fn from(value: MaxDustHTLCExposure) -> Self { + match value { + MaxDustHTLCExposure::FixedLimit { limit_msat } => Self::FixedLimitMsat(limit_msat), + MaxDustHTLCExposure::FeeRateMultiplier { multiplier } => { + Self::FeeRateMultiplier(multiplier) + }, + } + } +} + #[cfg(test)] mod tests { use std::str::FromStr; diff --git a/src/lib.rs b/src/lib.rs index a3782983c..e17a2980f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -106,7 +106,6 @@ pub use error::Error as NodeError; use error::Error; pub use event::Event; -pub use types::{ChannelConfig, MaxDustHTLCExposure}; pub use io::utils::generate_entropy_mnemonic; @@ -121,7 +120,7 @@ pub use builder::NodeBuilder as Builder; use chain::ChainSource; use config::{ - default_user_config, may_announce_channel, Config, NODE_ANN_BCAST_INTERVAL, + default_user_config, may_announce_channel, ChannelConfig, Config, NODE_ANN_BCAST_INTERVAL, PEER_RECONNECTION_INTERVAL, RGS_SYNC_INTERVAL, }; use connection::ConnectionManager; diff --git a/src/types.rs b/src/types.rs index 5b28d99b8..9fae37e18 100644 --- a/src/types.rs +++ b/src/types.rs @@ -6,6 +6,7 @@ // accordance with one or both of these licenses. use crate::chain::ChainSource; +use crate::config::ChannelConfig; use crate::fee_estimator::OnchainFeeEstimator; use crate::logger::FilesystemLogger; use crate::message_handler::NodeCustomMessageHandler; @@ -20,8 +21,6 @@ use lightning::routing::gossip; use lightning::routing::router::DefaultRouter; use lightning::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters}; use lightning::sign::InMemorySigner; -use lightning::util::config::ChannelConfig as LdkChannelConfig; -use lightning::util::config::MaxDustHTLCExposure as LdkMaxDustHTLCExposure; use lightning::util::persist::KVStore; use lightning::util::ser::{Readable, Writeable, Writer}; use lightning::util::sweep::OutputSweeper; @@ -349,119 +348,3 @@ pub struct PeerDetails { /// Indicates whether we currently have an active connection with the peer. pub is_connected: bool, } - -/// Options which apply on a per-channel basis and may change at runtime or based on negotiation -/// with our counterparty. -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -pub struct ChannelConfig { - /// Amount (in millionths of a satoshi) charged per satoshi for payments forwarded outbound - /// over the channel. - /// This may be allowed to change at runtime in a later update, however doing so must result in - /// update messages sent to notify all nodes of our updated relay fee. - /// - /// Please refer to [`LdkChannelConfig`] for further details. - pub forwarding_fee_proportional_millionths: u32, - /// Amount (in milli-satoshi) charged for payments forwarded outbound over the channel, in - /// excess of [`ChannelConfig::forwarding_fee_proportional_millionths`]. - /// This may be allowed to change at runtime in a later update, however doing so must result in - /// update messages sent to notify all nodes of our updated relay fee. - /// - /// Please refer to [`LdkChannelConfig`] for further details. - pub forwarding_fee_base_msat: u32, - /// The difference in the CLTV value between incoming HTLCs and an outbound HTLC forwarded over - /// the channel this config applies to. - /// - /// Please refer to [`LdkChannelConfig`] for further details. - pub cltv_expiry_delta: u16, - /// Limit our total exposure to potential loss to on-chain fees on close, including in-flight - /// HTLCs which are burned to fees as they are too small to claim on-chain and fees on - /// commitment transaction(s) broadcasted by our counterparty in excess of our own fee estimate. - /// - /// Please refer to [`LdkChannelConfig`] for further details. - pub max_dust_htlc_exposure: MaxDustHTLCExposure, - /// The additional fee we're willing to pay to avoid waiting for the counterparty's - /// `to_self_delay` to reclaim funds. - /// - /// Please refer to [`LdkChannelConfig`] for further details. - pub force_close_avoidance_max_fee_satoshis: u64, - /// If set, allows this channel's counterparty to skim an additional fee off this node's inbound - /// HTLCs. Useful for liquidity providers to offload on-chain channel costs to end users. - /// - /// Please refer to [`LdkChannelConfig`] for further details. - pub accept_underpaying_htlcs: bool, -} - -impl From for ChannelConfig { - fn from(value: LdkChannelConfig) -> Self { - Self { - forwarding_fee_proportional_millionths: value.forwarding_fee_proportional_millionths, - forwarding_fee_base_msat: value.forwarding_fee_base_msat, - cltv_expiry_delta: value.cltv_expiry_delta, - max_dust_htlc_exposure: value.max_dust_htlc_exposure.into(), - force_close_avoidance_max_fee_satoshis: value.force_close_avoidance_max_fee_satoshis, - accept_underpaying_htlcs: value.accept_underpaying_htlcs, - } - } -} - -impl From for LdkChannelConfig { - fn from(value: ChannelConfig) -> Self { - Self { - forwarding_fee_proportional_millionths: value.forwarding_fee_proportional_millionths, - forwarding_fee_base_msat: value.forwarding_fee_base_msat, - cltv_expiry_delta: value.cltv_expiry_delta, - max_dust_htlc_exposure: value.max_dust_htlc_exposure.into(), - force_close_avoidance_max_fee_satoshis: value.force_close_avoidance_max_fee_satoshis, - accept_underpaying_htlcs: value.accept_underpaying_htlcs, - } - } -} - -impl Default for ChannelConfig { - fn default() -> Self { - LdkChannelConfig::default().into() - } -} - -/// Options for how to set the max dust exposure allowed on a channel. -/// -/// See [`LdkChannelConfig::max_dust_htlc_exposure`] for details. -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -pub enum MaxDustHTLCExposure { - /// This sets a fixed limit on the total dust exposure in millisatoshis. - /// - /// Please refer to [`LdkMaxDustHTLCExposure`] for further details. - FixedLimit { - /// The fixed limit, in millisatoshis. - limit_msat: u64, - }, - /// This sets a multiplier on the feerate to determine the maximum allowed dust exposure. - /// - /// Please refer to [`LdkMaxDustHTLCExposure`] for further details. - FeeRateMultiplier { - /// The applied fee rate multiplier. - multiplier: u64, - }, -} - -impl From for MaxDustHTLCExposure { - fn from(value: LdkMaxDustHTLCExposure) -> Self { - match value { - LdkMaxDustHTLCExposure::FixedLimitMsat(limit_msat) => Self::FixedLimit { limit_msat }, - LdkMaxDustHTLCExposure::FeeRateMultiplier(multiplier) => { - Self::FeeRateMultiplier { multiplier } - }, - } - } -} - -impl From for LdkMaxDustHTLCExposure { - fn from(value: MaxDustHTLCExposure) -> Self { - match value { - MaxDustHTLCExposure::FixedLimit { limit_msat } => Self::FixedLimitMsat(limit_msat), - MaxDustHTLCExposure::FeeRateMultiplier { multiplier } => { - Self::FeeRateMultiplier(multiplier) - }, - } - } -} diff --git a/src/uniffi_types.rs b/src/uniffi_types.rs index c7d4320ee..15f01ead7 100644 --- a/src/uniffi_types.rs +++ b/src/uniffi_types.rs @@ -10,7 +10,7 @@ // // Make sure to add any re-exported items that need to be used in uniffi below. -pub use crate::config::{default_config, AnchorChannelsConfig}; +pub use crate::config::{default_config, AnchorChannelsConfig, MaxDustHTLCExposure}; pub use crate::graph::{ChannelInfo, ChannelUpdateInfo, NodeAnnouncementInfo, NodeInfo}; pub use crate::payment::store::{LSPFeeLimits, PaymentDirection, PaymentKind, PaymentStatus}; pub use crate::payment::{MaxTotalRoutingFeeLimit, QrPaymentResult, SendingParameters}; From 94ff68f19acfe79782c701619f5d9c8c2da357c0 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 8 Oct 2024 12:44:06 +0200 Subject: [PATCH 077/127] Move Esplora-specific sync options to dedicated config .. as other upcoming chain sources might not have the same config options such as syncing intervals, or at least not with the same semantics. --- README.md | 2 +- .../lightningdevkit/ldknode/LibraryTest.kt | 4 +- bindings/ldk_node.udl | 11 ++-- bindings/python/src/ldk_node/test_ldk_node.py | 2 +- src/builder.rs | 49 ++++++++++++------ src/chain/mod.rs | 35 +++++++------ src/config.rs | 50 +++++++++++++------ src/lib.rs | 14 ++++-- src/uniffi_types.rs | 4 +- tests/common/mod.rs | 9 ++-- tests/integration_tests_cln.rs | 2 +- tests/integration_tests_rust.rs | 15 ++++-- tests/integration_tests_vss.rs | 4 +- 13 files changed, 130 insertions(+), 71 deletions(-) diff --git a/README.md b/README.md index df072999d..22ef1a1b2 100644 --- a/README.md +++ b/README.md @@ -24,7 +24,7 @@ use std::str::FromStr; fn main() { let mut builder = Builder::new(); builder.set_network(Network::Testnet); - builder.set_esplora_server("https://blockstream.info/testnet/api".to_string()); + builder.set_chain_source_esplora("https://blockstream.info/testnet/api".to_string(), None); builder.set_gossip_source_rgs("https://rapidsync.lightningdevkit.org/testnet/snapshot".to_string()); let node = builder.build().unwrap(); diff --git a/bindings/kotlin/ldk-node-jvm/lib/src/test/kotlin/org/lightningdevkit/ldknode/LibraryTest.kt b/bindings/kotlin/ldk-node-jvm/lib/src/test/kotlin/org/lightningdevkit/ldknode/LibraryTest.kt index e2bcd4c89..786534b84 100644 --- a/bindings/kotlin/ldk-node-jvm/lib/src/test/kotlin/org/lightningdevkit/ldknode/LibraryTest.kt +++ b/bindings/kotlin/ldk-node-jvm/lib/src/test/kotlin/org/lightningdevkit/ldknode/LibraryTest.kt @@ -130,9 +130,9 @@ class LibraryTest { println("Config 2: $config2") val builder1 = Builder.fromConfig(config1) - builder1.setEsploraServer(esploraEndpoint) + builder1.setChainSourceEsplora(esploraEndpoint, null) val builder2 = Builder.fromConfig(config2) - builder2.setEsploraServer(esploraEndpoint) + builder2.setChainSourceEsplora(esploraEndpoint, null) val node1 = builder1.build() val node2 = builder2.build() diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index 817fd06ee..b4fc7ec79 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -9,9 +9,6 @@ dictionary Config { Network network; sequence? listening_addresses; NodeAlias? node_alias; - u64 onchain_wallet_sync_interval_secs; - u64 wallet_sync_interval_secs; - u64 fee_rate_cache_update_interval_secs; sequence trusted_peers_0conf; u64 probing_liquidity_limit_multiplier; LogLevel log_level; @@ -24,6 +21,12 @@ dictionary AnchorChannelsConfig { u64 per_channel_reserve_sats; }; +dictionary EsploraSyncConfig { + u64 onchain_wallet_sync_interval_secs; + u64 lightning_wallet_sync_interval_secs; + u64 fee_rate_cache_update_interval_secs; +}; + interface Builder { constructor(); [Name=from_config] @@ -32,7 +35,7 @@ interface Builder { [Throws=BuildError] void set_entropy_seed_bytes(sequence seed_bytes); void set_entropy_bip39_mnemonic(Mnemonic mnemonic, string? passphrase); - void set_esplora_server(string esplora_server_url); + void set_chain_source_esplora(string server_url, EsploraSyncConfig? config); void set_gossip_source_p2p(); void set_gossip_source_rgs(string rgs_server_url); void set_liquidity_source_lsps2(SocketAddress address, PublicKey node_id, string? token); diff --git a/bindings/python/src/ldk_node/test_ldk_node.py b/bindings/python/src/ldk_node/test_ldk_node.py index 4f2931440..82c493e32 100644 --- a/bindings/python/src/ldk_node/test_ldk_node.py +++ b/bindings/python/src/ldk_node/test_ldk_node.py @@ -84,7 +84,7 @@ def setup_node(tmp_dir, esplora_endpoint, listening_addresses): config = default_config() builder = Builder.from_config(config) builder.set_storage_dir_path(tmp_dir) - builder.set_esplora_server(esplora_endpoint) + builder.set_chain_source_esplora(esplora_endpoint, None) builder.set_network(DEFAULT_TEST_NETWORK) builder.set_listening_addresses(listening_addresses) return builder.build() diff --git a/src/builder.rs b/src/builder.rs index 1cfd46eed..43171db1f 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -6,7 +6,7 @@ // accordance with one or both of these licenses. use crate::chain::{ChainSource, DEFAULT_ESPLORA_SERVER_URL}; -use crate::config::{default_user_config, Config, WALLET_KEYS_SEED_LEN}; +use crate::config::{default_user_config, Config, EsploraSyncConfig, WALLET_KEYS_SEED_LEN}; use crate::connection::ConnectionManager; use crate::event::EventQueue; @@ -77,7 +77,7 @@ use std::time::SystemTime; #[derive(Debug, Clone)] enum ChainDataSourceConfig { - Esplora(String), + Esplora { server_url: String, sync_config: Option }, } #[derive(Debug, Clone)] @@ -237,8 +237,14 @@ impl NodeBuilder { } /// Configures the [`Node`] instance to source its chain data from the given Esplora server. - pub fn set_esplora_server(&mut self, esplora_server_url: String) -> &mut Self { - self.chain_data_source_config = Some(ChainDataSourceConfig::Esplora(esplora_server_url)); + /// + /// If no `sync_config` is given, default values are used. See [`EsploraSyncConfig`] for more + /// information. + pub fn set_chain_source_esplora( + &mut self, server_url: String, sync_config: Option, + ) -> &mut Self { + self.chain_data_source_config = + Some(ChainDataSourceConfig::Esplora { server_url, sync_config }); self } @@ -464,8 +470,13 @@ impl ArcedNodeBuilder { } /// Configures the [`Node`] instance to source its chain data from the given Esplora server. - pub fn set_esplora_server(&self, esplora_server_url: String) { - self.inner.write().unwrap().set_esplora_server(esplora_server_url); + /// + /// If no `sync_config` is given, default values are used. See [`EsploraSyncConfig`] for more + /// information. + pub fn set_chain_source_esplora( + &self, server_url: String, sync_config: Option, + ) { + self.inner.write().unwrap().set_chain_source_esplora(server_url, sync_config); } /// Configures the [`Node`] instance to source its gossip data from the Lightning peer-to-peer @@ -608,21 +619,27 @@ fn build_with_store_internal( )); let chain_source = match chain_data_source_config { - Some(ChainDataSourceConfig::Esplora(server_url)) => Arc::new(ChainSource::new_esplora( - server_url.clone(), - Arc::clone(&wallet), - Arc::clone(&fee_estimator), - Arc::clone(&tx_broadcaster), - Arc::clone(&kv_store), - Arc::clone(&config), - Arc::clone(&logger), - Arc::clone(&node_metrics), - )), + Some(ChainDataSourceConfig::Esplora { server_url, sync_config }) => { + let sync_config = sync_config.unwrap_or(EsploraSyncConfig::default()); + Arc::new(ChainSource::new_esplora( + server_url.clone(), + sync_config, + Arc::clone(&wallet), + Arc::clone(&fee_estimator), + Arc::clone(&tx_broadcaster), + Arc::clone(&kv_store), + Arc::clone(&config), + Arc::clone(&logger), + Arc::clone(&node_metrics), + )) + }, None => { // Default to Esplora client. let server_url = DEFAULT_ESPLORA_SERVER_URL.to_string(); + let sync_config = EsploraSyncConfig::default(); Arc::new(ChainSource::new_esplora( server_url.clone(), + sync_config, Arc::clone(&wallet), Arc::clone(&fee_estimator), Arc::clone(&tx_broadcaster), diff --git a/src/chain/mod.rs b/src/chain/mod.rs index 267513e49..7501c9809 100644 --- a/src/chain/mod.rs +++ b/src/chain/mod.rs @@ -6,8 +6,8 @@ // accordance with one or both of these licenses. use crate::config::{ - Config, BDK_CLIENT_CONCURRENCY, BDK_CLIENT_STOP_GAP, BDK_WALLET_SYNC_TIMEOUT_SECS, - FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS, LDK_WALLET_SYNC_TIMEOUT_SECS, + Config, EsploraSyncConfig, BDK_CLIENT_CONCURRENCY, BDK_CLIENT_STOP_GAP, + BDK_WALLET_SYNC_TIMEOUT_SECS, FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS, LDK_WALLET_SYNC_TIMEOUT_SECS, RESOLVED_CHANNEL_MONITOR_ARCHIVAL_INTERVAL, TX_BROADCAST_TIMEOUT_SECS, WALLET_SYNC_INTERVAL_MINIMUM_SECS, }; @@ -96,6 +96,7 @@ impl WalletSyncStatus { pub(crate) enum ChainSource { Esplora { + sync_config: EsploraSyncConfig, esplora_client: EsploraAsyncClient, onchain_wallet: Arc, onchain_wallet_sync_status: Mutex, @@ -112,9 +113,10 @@ pub(crate) enum ChainSource { impl ChainSource { pub(crate) fn new_esplora( - server_url: String, onchain_wallet: Arc, fee_estimator: Arc, - tx_broadcaster: Arc, kv_store: Arc, config: Arc, - logger: Arc, node_metrics: Arc>, + server_url: String, sync_config: EsploraSyncConfig, onchain_wallet: Arc, + fee_estimator: Arc, tx_broadcaster: Arc, + kv_store: Arc, config: Arc, logger: Arc, + node_metrics: Arc>, ) -> Self { let mut client_builder = esplora_client::Builder::new(&server_url); client_builder = client_builder.timeout(DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS); @@ -124,6 +126,7 @@ impl ChainSource { let onchain_wallet_sync_status = Mutex::new(WalletSyncStatus::Completed); let lightning_wallet_sync_status = Mutex::new(WalletSyncStatus::Completed); Self::Esplora { + sync_config, esplora_client, onchain_wallet, onchain_wallet_sync_status, @@ -144,16 +147,17 @@ impl ChainSource { output_sweeper: Arc, ) { match self { - Self::Esplora { config, logger, .. } => { + Self::Esplora { sync_config, logger, .. } => { // Setup syncing intervals - let onchain_wallet_sync_interval_secs = - config.onchain_wallet_sync_interval_secs.max(WALLET_SYNC_INTERVAL_MINIMUM_SECS); + let onchain_wallet_sync_interval_secs = sync_config + .onchain_wallet_sync_interval_secs + .max(WALLET_SYNC_INTERVAL_MINIMUM_SECS); let mut onchain_wallet_sync_interval = tokio::time::interval(Duration::from_secs(onchain_wallet_sync_interval_secs)); onchain_wallet_sync_interval .set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); - let fee_rate_cache_update_interval_secs = config + let fee_rate_cache_update_interval_secs = sync_config .fee_rate_cache_update_interval_secs .max(WALLET_SYNC_INTERVAL_MINIMUM_SECS); let mut fee_rate_update_interval = @@ -163,11 +167,12 @@ impl ChainSource { fee_rate_update_interval .set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); - let wallet_sync_interval_secs = - config.wallet_sync_interval_secs.max(WALLET_SYNC_INTERVAL_MINIMUM_SECS); - let mut wallet_sync_interval = - tokio::time::interval(Duration::from_secs(wallet_sync_interval_secs)); - wallet_sync_interval + let lightning_wallet_sync_interval_secs = sync_config + .lightning_wallet_sync_interval_secs + .max(WALLET_SYNC_INTERVAL_MINIMUM_SECS); + let mut lightning_wallet_sync_interval = + tokio::time::interval(Duration::from_secs(lightning_wallet_sync_interval_secs)); + lightning_wallet_sync_interval .set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); // Start the syncing loop. @@ -186,7 +191,7 @@ impl ChainSource { _ = fee_rate_update_interval.tick() => { let _ = self.update_fee_rate_estimates().await; } - _ = wallet_sync_interval.tick() => { + _ = lightning_wallet_sync_interval.tick() => { let _ = self.sync_lightning_wallet( Arc::clone(&channel_manager), Arc::clone(&chain_monitor), diff --git a/src/config.rs b/src/config.rs index ea4a60f17..d82b64f32 100644 --- a/src/config.rs +++ b/src/config.rs @@ -121,18 +121,6 @@ pub struct Config { /// **Note**: We will only allow opening and accepting public channels if the `node_alias` and the /// `listening_addresses` are set. pub node_alias: Option, - /// The time in-between background sync attempts of the onchain wallet, in seconds. - /// - /// **Note:** A minimum of 10 seconds is always enforced. - pub onchain_wallet_sync_interval_secs: u64, - /// The time in-between background sync attempts of the LDK wallet, in seconds. - /// - /// **Note:** A minimum of 10 seconds is always enforced. - pub wallet_sync_interval_secs: u64, - /// The time in-between background update attempts to our fee rate cache, in seconds. - /// - /// **Note:** A minimum of 10 seconds is always enforced. - pub fee_rate_cache_update_interval_secs: u64, /// A list of peers that we allow to establish zero confirmation channels to us. /// /// **Note:** Allowing payments via zero-confirmation channels is potentially insecure if the @@ -182,9 +170,6 @@ impl Default for Config { log_dir_path: None, network: DEFAULT_NETWORK, listening_addresses: None, - onchain_wallet_sync_interval_secs: DEFAULT_BDK_WALLET_SYNC_INTERVAL_SECS, - wallet_sync_interval_secs: DEFAULT_LDK_WALLET_SYNC_INTERVAL_SECS, - fee_rate_cache_update_interval_secs: DEFAULT_FEE_RATE_CACHE_UPDATE_INTERVAL_SECS, trusted_peers_0conf: Vec::new(), probing_liquidity_limit_multiplier: DEFAULT_PROBING_LIQUIDITY_LIMIT_MULTIPLIER, log_level: DEFAULT_LOG_LEVEL, @@ -302,6 +287,41 @@ pub(crate) fn default_user_config(config: &Config) -> UserConfig { user_config } +/// Options related to syncing the Lightning and on-chain wallets via an Esplora backend. +/// +/// ### Defaults +/// +/// | Parameter | Value | +/// |----------------------------------------|--------------------| +/// | `onchain_wallet_sync_interval_secs` | 80 | +/// | `lightning_wallet_sync_interval_secs` | 30 | +/// | `fee_rate_cache_update_interval_secs` | 600 | +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub struct EsploraSyncConfig { + /// The time in-between background sync attempts of the onchain wallet, in seconds. + /// + /// **Note:** A minimum of 10 seconds is always enforced. + pub onchain_wallet_sync_interval_secs: u64, + /// The time in-between background sync attempts of the LDK wallet, in seconds. + /// + /// **Note:** A minimum of 10 seconds is always enforced. + pub lightning_wallet_sync_interval_secs: u64, + /// The time in-between background update attempts to our fee rate cache, in seconds. + /// + /// **Note:** A minimum of 10 seconds is always enforced. + pub fee_rate_cache_update_interval_secs: u64, +} + +impl Default for EsploraSyncConfig { + fn default() -> Self { + Self { + onchain_wallet_sync_interval_secs: DEFAULT_BDK_WALLET_SYNC_INTERVAL_SECS, + lightning_wallet_sync_interval_secs: DEFAULT_LDK_WALLET_SYNC_INTERVAL_SECS, + fee_rate_cache_update_interval_secs: DEFAULT_FEE_RATE_CACHE_UPDATE_INTERVAL_SECS, + } + } +} + /// Options which apply on a per-channel basis and may change at runtime or based on negotiation /// with our counterparty. #[derive(Copy, Clone, Debug, PartialEq, Eq)] diff --git a/src/lib.rs b/src/lib.rs index e17a2980f..42b99406a 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -33,7 +33,7 @@ //! fn main() { //! let mut builder = Builder::new(); //! builder.set_network(Network::Testnet); -//! builder.set_esplora_server("https://blockstream.info/testnet/api".to_string()); +//! builder.set_chain_source_esplora("https://blockstream.info/testnet/api".to_string(), None); //! builder.set_gossip_source_rgs("https://rapidsync.lightningdevkit.org/testnet/snapshot".to_string()); //! //! let node = builder.build().unwrap(); @@ -1188,10 +1188,14 @@ impl Node { /// Manually sync the LDK and BDK wallets with the current chain state and update the fee rate /// cache. /// - /// **Note:** The wallets are regularly synced in the background, which is configurable via - /// [`Config::onchain_wallet_sync_interval_secs`] and [`Config::wallet_sync_interval_secs`]. - /// Therefore, using this blocking sync method is almost always redundant and should be avoided - /// where possible. + /// **Note:** The wallets are regularly synced in the background, which is configurable via the + /// respective config object, e.g., via + /// [`EsploraSyncConfig::onchain_wallet_sync_interval_secs`] and + /// [`EsploraSyncConfig::lightning_wallet_sync_interval_secs`]. Therefore, using this blocking + /// sync method is almost always redundant and should be avoided where possible. + /// + /// [`EsploraSyncConfig::onchain_wallet_sync_interval_secs`]: crate::config::EsploraSyncConfig::onchain_wallet_sync_interval_secs + /// [`EsploraSyncConfig::lightning_wallet_sync_interval_secs`]: crate::config::EsploraSyncConfig::lightning_wallet_sync_interval_secs pub fn sync_wallets(&self) -> Result<(), Error> { let rt_lock = self.runtime.read().unwrap(); if rt_lock.is_none() { diff --git a/src/uniffi_types.rs b/src/uniffi_types.rs index 15f01ead7..894e5d739 100644 --- a/src/uniffi_types.rs +++ b/src/uniffi_types.rs @@ -10,7 +10,9 @@ // // Make sure to add any re-exported items that need to be used in uniffi below. -pub use crate::config::{default_config, AnchorChannelsConfig, MaxDustHTLCExposure}; +pub use crate::config::{ + default_config, AnchorChannelsConfig, EsploraSyncConfig, MaxDustHTLCExposure, +}; pub use crate::graph::{ChannelInfo, ChannelUpdateInfo, NodeAnnouncementInfo, NodeInfo}; pub use crate::payment::store::{LSPFeeLimits, PaymentDirection, PaymentKind, PaymentStatus}; pub use crate::payment::{MaxTotalRoutingFeeLimit, QrPaymentResult, SendingParameters}; diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 049f1e417..9c712286a 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -8,7 +8,7 @@ #![cfg(any(test, cln_test, vss_test))] #![allow(dead_code)] -use ldk_node::config::Config; +use ldk_node::config::{Config, EsploraSyncConfig}; use ldk_node::io::sqlite_store::SqliteStore; use ldk_node::payment::{PaymentDirection, PaymentKind, PaymentStatus}; use ldk_node::{Builder, Event, LightningBalance, LogLevel, Node, NodeError, PendingSweepBalance}; @@ -217,8 +217,6 @@ pub(crate) fn random_config(anchor_channels: bool) -> Config { } config.network = Network::Regtest; - config.onchain_wallet_sync_interval_secs = 100000; - config.wallet_sync_interval_secs = 100000; println!("Setting network: {}", config.network); let rand_dir = random_storage_path(); @@ -280,8 +278,11 @@ pub(crate) fn setup_two_nodes( pub(crate) fn setup_node(electrsd: &ElectrsD, config: Config) -> TestNode { let esplora_url = format!("http://{}", electrsd.esplora_url.as_ref().unwrap()); + let mut sync_config = EsploraSyncConfig::default(); + sync_config.onchain_wallet_sync_interval_secs = 100000; + sync_config.lightning_wallet_sync_interval_secs = 100000; setup_builder!(builder, config); - builder.set_esplora_server(esplora_url.clone()); + builder.set_chain_source_esplora(esplora_url.clone(), Some(sync_config)); let test_sync_store = Arc::new(TestSyncStore::new(config.storage_dir_path.into())); let node = builder.build_with_store(test_sync_store).unwrap(); node.start().unwrap(); diff --git a/tests/integration_tests_cln.rs b/tests/integration_tests_cln.rs index 13b5c44c6..c3ade673f 100644 --- a/tests/integration_tests_cln.rs +++ b/tests/integration_tests_cln.rs @@ -45,7 +45,7 @@ fn test_cln() { // Setup LDK Node let config = common::random_config(true); let mut builder = Builder::from_config(config); - builder.set_esplora_server("http://127.0.0.1:3002".to_string()); + builder.set_chain_source_esplora("http://127.0.0.1:3002".to_string(), None); let node = builder.build().unwrap(); node.start().unwrap(); diff --git a/tests/integration_tests_rust.rs b/tests/integration_tests_rust.rs index 6d33e80c6..13f3ab0be 100644 --- a/tests/integration_tests_rust.rs +++ b/tests/integration_tests_rust.rs @@ -14,6 +14,7 @@ use common::{ setup_node, setup_two_nodes, wait_for_tx, TestSyncStore, }; +use ldk_node::config::EsploraSyncConfig; use ldk_node::payment::{PaymentKind, QrPaymentResult, SendingParameters}; use ldk_node::{Builder, Event, NodeError}; @@ -102,8 +103,11 @@ fn multi_hop_sending() { let mut nodes = Vec::new(); for _ in 0..5 { let config = random_config(true); + let mut sync_config = EsploraSyncConfig::default(); + sync_config.onchain_wallet_sync_interval_secs = 100000; + sync_config.lightning_wallet_sync_interval_secs = 100000; setup_builder!(builder, config); - builder.set_esplora_server(esplora_url.clone()); + builder.set_chain_source_esplora(esplora_url.clone(), Some(sync_config)); let node = builder.build().unwrap(); node.start().unwrap(); nodes.push(node); @@ -182,7 +186,7 @@ fn connect_to_public_testnet_esplora() { let mut config = random_config(true); config.network = Network::Testnet; setup_builder!(builder, config); - builder.set_esplora_server("https://blockstream.info/testnet/api".to_string()); + builder.set_chain_source_esplora("https://blockstream.info/testnet/api".to_string(), None); let node = builder.build().unwrap(); node.start().unwrap(); node.stop().unwrap(); @@ -198,8 +202,11 @@ fn start_stop_reinit() { let test_sync_store: Arc = Arc::new(TestSyncStore::new(config.storage_dir_path.clone().into())); + let mut sync_config = EsploraSyncConfig::default(); + sync_config.onchain_wallet_sync_interval_secs = 100000; + sync_config.lightning_wallet_sync_interval_secs = 100000; setup_builder!(builder, config); - builder.set_esplora_server(esplora_url.clone()); + builder.set_chain_source_esplora(esplora_url.clone(), Some(sync_config)); let node = builder.build_with_store(Arc::clone(&test_sync_store)).unwrap(); node.start().unwrap(); @@ -236,7 +243,7 @@ fn start_stop_reinit() { drop(node); setup_builder!(builder, config); - builder.set_esplora_server(esplora_url.clone()); + builder.set_chain_source_esplora(esplora_url.clone(), Some(sync_config)); let reinitialized_node = builder.build_with_store(Arc::clone(&test_sync_store)).unwrap(); reinitialized_node.start().unwrap(); diff --git a/tests/integration_tests_vss.rs b/tests/integration_tests_vss.rs index c572fbcd8..483902375 100644 --- a/tests/integration_tests_vss.rs +++ b/tests/integration_tests_vss.rs @@ -18,7 +18,7 @@ fn channel_full_cycle_with_vss_store() { let esplora_url = format!("http://{}", electrsd.esplora_url.as_ref().unwrap()); let config_a = common::random_config(true); let mut builder_a = Builder::from_config(config_a); - builder_a.set_esplora_server(esplora_url.clone()); + builder_a.set_chain_source_esplora(esplora_url.clone(), None); let vss_base_url = std::env::var("TEST_VSS_BASE_URL").unwrap(); let node_a = builder_a.build_with_vss_store(vss_base_url.clone(), "node_1_store".to_string()).unwrap(); @@ -27,7 +27,7 @@ fn channel_full_cycle_with_vss_store() { println!("\n== Node B =="); let config_b = common::random_config(true); let mut builder_b = Builder::from_config(config_b); - builder_b.set_esplora_server(esplora_url); + builder_b.set_chain_source_esplora(esplora_url.clone(), None); let node_b = builder_b.build_with_vss_store(vss_base_url, "node_2_store".to_string()).unwrap(); node_b.start().unwrap(); From 61673b144f6fc45947fd3405d73f0451f6d8dffb Mon Sep 17 00:00:00 2001 From: G8XSU <3442979+G8XSU@users.noreply.github.com> Date: Mon, 14 Oct 2024 12:03:08 -0700 Subject: [PATCH 078/127] Handle payment_store update failure in PaymentClaimable. --- src/event.rs | 49 +++++++++++++++++++++++++++++-------------------- 1 file changed, 29 insertions(+), 20 deletions(-) diff --git a/src/event.rs b/src/event.rs index 8d732d21a..e157acadb 100644 --- a/src/event.rs +++ b/src/event.rs @@ -498,11 +498,13 @@ where status: Some(PaymentStatus::Failed), ..PaymentDetailsUpdate::new(payment_id) }; - self.payment_store.update(&update).unwrap_or_else(|e| { - log_error!(self.logger, "Failed to access payment store: {}", e); - panic!("Failed to access payment store"); - }); - return Ok(()); + match self.payment_store.update(&update) { + Ok(_) => return Ok(()), + Err(e) => { + log_error!(self.logger, "Failed to access payment store: {}", e); + return Err(ReplayEvent()) + }, + }; } if info.status == PaymentStatus::Succeeded @@ -520,11 +522,13 @@ where status: Some(PaymentStatus::Failed), ..PaymentDetailsUpdate::new(payment_id) }; - self.payment_store.update(&update).unwrap_or_else(|e| { - log_error!(self.logger, "Failed to access payment store: {}", e); - panic!("Failed to access payment store"); - }); - return Ok(()); + match self.payment_store.update(&update) { + Ok(_) => return Ok(()), + Err(e) => { + log_error!(self.logger, "Failed to access payment store: {}", e); + return Err(ReplayEvent()) + }, + }; } let max_total_opening_fee_msat = match info.kind { @@ -559,11 +563,13 @@ where status: Some(PaymentStatus::Failed), ..PaymentDetailsUpdate::new(payment_id) }; - self.payment_store.update(&update).unwrap_or_else(|e| { - log_error!(self.logger, "Failed to access payment store: {}", e); - panic!("Failed to access payment store"); - }); - return Ok(()); + match self.payment_store.update(&update) { + Ok(_) => return Ok(()), + Err(e) => { + log_error!(self.logger, "Failed to access payment store: {}", e); + return Err(ReplayEvent()) + }, + }; } // If this is known by the store but ChannelManager doesn't know the preimage, @@ -715,10 +721,13 @@ where status: Some(PaymentStatus::Failed), ..PaymentDetailsUpdate::new(payment_id) }; - self.payment_store.update(&update).unwrap_or_else(|e| { - log_error!(self.logger, "Failed to access payment store: {}", e); - panic!("Failed to access payment store"); - }); + match self.payment_store.update(&update) { + Ok(_) => return Ok(()), + Err(e) => { + log_error!(self.logger, "Failed to access payment store: {}", e); + return Err(ReplayEvent()) + }, + }; } }, LdkEvent::PaymentClaimed { @@ -796,7 +805,7 @@ where payment_id, e ); - panic!("Failed to access payment store"); + return Err(ReplayEvent()); }, } From 7e93628f33ed7da4f4e1cab14b5af229c66633d7 Mon Sep 17 00:00:00 2001 From: G8XSU <3442979+G8XSU@users.noreply.github.com> Date: Mon, 14 Oct 2024 12:08:46 -0700 Subject: [PATCH 079/127] Handle payment_store update failure in PaymentFailed. --- src/event.rs | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/src/event.rs b/src/event.rs index e157acadb..c43847137 100644 --- a/src/event.rs +++ b/src/event.rs @@ -886,10 +886,13 @@ where status: Some(PaymentStatus::Failed), ..PaymentDetailsUpdate::new(payment_id) }; - self.payment_store.update(&update).unwrap_or_else(|e| { - log_error!(self.logger, "Failed to access payment store: {}", e); - panic!("Failed to access payment store"); - }); + match self.payment_store.update(&update) { + Ok(_) => {}, + Err(e) => { + log_error!(self.logger, "Failed to access payment store: {}", e); + return Err(ReplayEvent()); + }, + }; self.event_queue .add_event(Event::PaymentFailed { payment_id: Some(payment_id), From 07c34ef43c1c68b0eae2e9bcff476caabece70da Mon Sep 17 00:00:00 2001 From: G8XSU <3442979+G8XSU@users.noreply.github.com> Date: Mon, 14 Oct 2024 12:10:17 -0700 Subject: [PATCH 080/127] Handle payment_store update failure in PaymentSent. --- src/event.rs | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/src/event.rs b/src/event.rs index c43847137..8506ba240 100644 --- a/src/event.rs +++ b/src/event.rs @@ -841,10 +841,13 @@ where ..PaymentDetailsUpdate::new(payment_id) }; - self.payment_store.update(&update).unwrap_or_else(|e| { - log_error!(self.logger, "Failed to access payment store: {}", e); - panic!("Failed to access payment store"); - }); + match self.payment_store.update(&update) { + Ok(_) => {}, + Err(e) => { + log_error!(self.logger, "Failed to access payment store: {}", e); + return Err(ReplayEvent()); + }, + }; self.payment_store.get(&payment_id).map(|payment| { log_info!( From bf7436540036ccb66561e67522e188ce35197938 Mon Sep 17 00:00:00 2001 From: G8XSU <3442979+G8XSU@users.noreply.github.com> Date: Mon, 14 Oct 2024 12:20:19 -0700 Subject: [PATCH 081/127] Handle event_queue push failure in PaymentClaimable. --- src/event.rs | 31 ++++++++++++++++--------------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/src/event.rs b/src/event.rs index 8506ba240..066faa817 100644 --- a/src/event.rs +++ b/src/event.rs @@ -502,7 +502,7 @@ where Ok(_) => return Ok(()), Err(e) => { log_error!(self.logger, "Failed to access payment store: {}", e); - return Err(ReplayEvent()) + return Err(ReplayEvent()); }, }; } @@ -526,7 +526,7 @@ where Ok(_) => return Ok(()), Err(e) => { log_error!(self.logger, "Failed to access payment store: {}", e); - return Err(ReplayEvent()) + return Err(ReplayEvent()); }, }; } @@ -567,7 +567,7 @@ where Ok(_) => return Ok(()), Err(e) => { log_error!(self.logger, "Failed to access payment store: {}", e); - return Err(ReplayEvent()) + return Err(ReplayEvent()); }, }; } @@ -583,22 +583,23 @@ where "We would have registered the preimage if we knew" ); - self.event_queue - .add_event(Event::PaymentClaimable { - payment_id, - payment_hash, - claimable_amount_msat: amount_msat, - claim_deadline, - }) - .unwrap_or_else(|e| { + let event = Event::PaymentClaimable { + payment_id, + payment_hash, + claimable_amount_msat: amount_msat, + claim_deadline, + }; + match self.event_queue.add_event(event) { + Ok(_) => return Ok(()), + Err(e) => { log_error!( self.logger, "Failed to push to event queue: {}", e ); - panic!("Failed to push to event queue"); - }); - return Ok(()); + return Err(ReplayEvent()); + }, + }; } }, _ => {}, @@ -725,7 +726,7 @@ where Ok(_) => return Ok(()), Err(e) => { log_error!(self.logger, "Failed to access payment store: {}", e); - return Err(ReplayEvent()) + return Err(ReplayEvent()); }, }; } From 6eebd00d2bda6177da8c7ffb8653317c29a5d2e2 Mon Sep 17 00:00:00 2001 From: G8XSU <3442979+G8XSU@users.noreply.github.com> Date: Mon, 14 Oct 2024 12:21:39 -0700 Subject: [PATCH 082/127] Handle event_queue push failure in PaymentClaimed. --- src/event.rs | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/src/event.rs b/src/event.rs index 066faa817..0accb6b88 100644 --- a/src/event.rs +++ b/src/event.rs @@ -810,16 +810,18 @@ where }, } - self.event_queue - .add_event(Event::PaymentReceived { - payment_id: Some(payment_id), - payment_hash, - amount_msat, - }) - .unwrap_or_else(|e| { + let event = Event::PaymentReceived { + payment_id: Some(payment_id), + payment_hash, + amount_msat, + }; + match self.event_queue.add_event(event) { + Ok(_) => return Ok(()), + Err(e) => { log_error!(self.logger, "Failed to push to event queue: {}", e); - panic!("Failed to push to event queue"); - }); + return Err(ReplayEvent()); + }, + }; }, LdkEvent::PaymentSent { payment_id, From 374dd60e93c07621f113b452bcf323c7e27af957 Mon Sep 17 00:00:00 2001 From: G8XSU <3442979+G8XSU@users.noreply.github.com> Date: Mon, 14 Oct 2024 12:23:02 -0700 Subject: [PATCH 083/127] Handle event_queue push failure in PaymentSent. --- src/event.rs | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/src/event.rs b/src/event.rs index 0accb6b88..952018ccc 100644 --- a/src/event.rs +++ b/src/event.rs @@ -867,17 +867,19 @@ where hex_utils::to_string(&payment_preimage.0) ); }); + let event = Event::PaymentSuccessful { + payment_id: Some(payment_id), + payment_hash, + fee_paid_msat, + }; - self.event_queue - .add_event(Event::PaymentSuccessful { - payment_id: Some(payment_id), - payment_hash, - fee_paid_msat, - }) - .unwrap_or_else(|e| { + match self.event_queue.add_event(event) { + Ok(_) => return Ok(()), + Err(e) => { log_error!(self.logger, "Failed to push to event queue: {}", e); - panic!("Failed to push to event queue"); - }); + return Err(ReplayEvent()); + }, + }; }, LdkEvent::PaymentFailed { payment_id, payment_hash, reason, .. } => { log_info!( From 8fa90f021b0e7d837a52b4e30cb81e822cd4e98e Mon Sep 17 00:00:00 2001 From: G8XSU <3442979+G8XSU@users.noreply.github.com> Date: Mon, 14 Oct 2024 12:24:13 -0700 Subject: [PATCH 084/127] Handle event_queue push failure in PaymentFailed. --- src/event.rs | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/event.rs b/src/event.rs index 952018ccc..388dd90a6 100644 --- a/src/event.rs +++ b/src/event.rs @@ -901,16 +901,16 @@ where return Err(ReplayEvent()); }, }; - self.event_queue - .add_event(Event::PaymentFailed { - payment_id: Some(payment_id), - payment_hash, - reason, - }) - .unwrap_or_else(|e| { + + let event = + Event::PaymentFailed { payment_id: Some(payment_id), payment_hash, reason }; + match self.event_queue.add_event(event) { + Ok(_) => return Ok(()), + Err(e) => { log_error!(self.logger, "Failed to push to event queue: {}", e); - panic!("Failed to push to event queue"); - }); + return Err(ReplayEvent()); + }, + }; }, LdkEvent::PaymentPathSuccessful { .. } => {}, From b7beff0f5affdc846036ea014190c47674170cec Mon Sep 17 00:00:00 2001 From: G8XSU <3442979+G8XSU@users.noreply.github.com> Date: Mon, 14 Oct 2024 12:26:33 -0700 Subject: [PATCH 085/127] Handle event_queue push failure in ChannelPending. --- src/event.rs | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/src/event.rs b/src/event.rs index 388dd90a6..cc1a13367 100644 --- a/src/event.rs +++ b/src/event.rs @@ -1131,18 +1131,22 @@ where channel_id, counterparty_node_id, ); - self.event_queue - .add_event(Event::ChannelPending { - channel_id, - user_channel_id: UserChannelId(user_channel_id), - former_temporary_channel_id: former_temporary_channel_id.unwrap(), - counterparty_node_id, - funding_txo, - }) - .unwrap_or_else(|e| { + + let event = Event::ChannelPending { + channel_id, + user_channel_id: UserChannelId(user_channel_id), + former_temporary_channel_id: former_temporary_channel_id.unwrap(), + counterparty_node_id, + funding_txo, + }; + match self.event_queue.add_event(event) { + Ok(_) => {}, + Err(e) => { log_error!(self.logger, "Failed to push to event queue: {}", e); - panic!("Failed to push to event queue"); - }); + return Err(ReplayEvent()); + }, + }; + let network_graph = self.network_graph.read_only(); let channels = self.channel_manager.list_channels_with_counterparty(&counterparty_node_id); From 8e6e9357e6d865188cca618cdcbf91493dc1d538 Mon Sep 17 00:00:00 2001 From: G8XSU <3442979+G8XSU@users.noreply.github.com> Date: Mon, 14 Oct 2024 12:27:32 -0700 Subject: [PATCH 086/127] Handle event_queue push failure in ChannelReady. --- src/event.rs | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/src/event.rs b/src/event.rs index cc1a13367..568f44762 100644 --- a/src/event.rs +++ b/src/event.rs @@ -1188,16 +1188,19 @@ where channel_id, counterparty_node_id, ); - self.event_queue - .add_event(Event::ChannelReady { - channel_id, - user_channel_id: UserChannelId(user_channel_id), - counterparty_node_id: Some(counterparty_node_id), - }) - .unwrap_or_else(|e| { + + let event = Event::ChannelReady { + channel_id, + user_channel_id: UserChannelId(user_channel_id), + counterparty_node_id: Some(counterparty_node_id), + }; + match self.event_queue.add_event(event) { + Ok(_) => {}, + Err(e) => { log_error!(self.logger, "Failed to push to event queue: {}", e); - panic!("Failed to push to event queue"); - }); + return Err(ReplayEvent()); + }, + }; }, LdkEvent::ChannelClosed { channel_id, From 7befec75c9b49957c22e77e638316d8b29f4e07d Mon Sep 17 00:00:00 2001 From: G8XSU <3442979+G8XSU@users.noreply.github.com> Date: Mon, 14 Oct 2024 12:30:07 -0700 Subject: [PATCH 087/127] Handle event_queue push failure in ChannelClosed. --- src/event.rs | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/src/event.rs b/src/event.rs index 568f44762..f84f8466c 100644 --- a/src/event.rs +++ b/src/event.rs @@ -1210,17 +1210,21 @@ where .. } => { log_info!(self.logger, "Channel {} closed due to: {}", channel_id, reason); - self.event_queue - .add_event(Event::ChannelClosed { - channel_id, - user_channel_id: UserChannelId(user_channel_id), - counterparty_node_id, - reason: Some(reason), - }) - .unwrap_or_else(|e| { + + let event = Event::ChannelClosed { + channel_id, + user_channel_id: UserChannelId(user_channel_id), + counterparty_node_id, + reason: Some(reason), + }; + + match self.event_queue.add_event(event) { + Ok(_) => {}, + Err(e) => { log_error!(self.logger, "Failed to push to event queue: {}", e); - panic!("Failed to push to event queue"); - }); + return Err(ReplayEvent()); + }, + }; }, LdkEvent::DiscardFunding { .. } => {}, LdkEvent::HTLCIntercepted { .. } => {}, From 5594560dcd9fe746b9accb749bc2a0e5d312a79a Mon Sep 17 00:00:00 2001 From: G8XSU <3442979+G8XSU@users.noreply.github.com> Date: Mon, 14 Oct 2024 12:36:30 -0700 Subject: [PATCH 088/127] Handle output tracking failure in SpendableOutputs. --- src/event.rs | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/src/event.rs b/src/event.rs index f84f8466c..3535f6390 100644 --- a/src/event.rs +++ b/src/event.rs @@ -935,12 +935,13 @@ where } }, LdkEvent::SpendableOutputs { outputs, channel_id } => { - self.output_sweeper - .track_spendable_outputs(outputs, channel_id, true, None) - .unwrap_or_else(|_| { + match self.output_sweeper.track_spendable_outputs(outputs, channel_id, true, None) { + Ok(_) => return Ok(()), + Err(_) => { log_error!(self.logger, "Failed to track spendable outputs"); - panic!("Failed to track spendable outputs"); - }); + return Err(ReplayEvent()); + }, + }; }, LdkEvent::OpenChannelRequest { temporary_channel_id, From 2c824ffa564ca7f2b3707b279dd3add5cc34c475 Mon Sep 17 00:00:00 2001 From: G8XSU <3442979+G8XSU@users.noreply.github.com> Date: Wed, 9 Oct 2024 17:29:45 -0700 Subject: [PATCH 089/127] Retry more aggressively in VssStore. Since a failed persistence might cause LDK to panic. --- src/io/vss_store.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/io/vss_store.rs b/src/io/vss_store.rs index 474f7dbc7..470f3462c 100644 --- a/src/io/vss_store.rs +++ b/src/io/vss_store.rs @@ -46,10 +46,10 @@ impl VssStore { pub(crate) fn new(base_url: String, store_id: String, data_encryption_key: [u8; 32]) -> Self { let runtime = tokio::runtime::Builder::new_multi_thread().enable_all().build().unwrap(); let storable_builder = StorableBuilder::new(data_encryption_key, RandEntropySource); - let retry_policy = ExponentialBackoffRetryPolicy::new(Duration::from_millis(100)) - .with_max_attempts(3) - .with_max_total_delay(Duration::from_secs(2)) - .with_max_jitter(Duration::from_millis(50)) + let retry_policy = ExponentialBackoffRetryPolicy::new(Duration::from_millis(10)) + .with_max_attempts(10) + .with_max_total_delay(Duration::from_secs(15)) + .with_max_jitter(Duration::from_millis(10)) .skip_retry_on_error(Box::new(|e: &VssError| { matches!( e, From 0bec57936c9932a0adbdc2579dac5a5c6bea3997 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Wed, 9 Oct 2024 11:26:05 +0200 Subject: [PATCH 090/127] Implement `Listen` for `Wallet` .. which we'll use to feed blocks to it in following commits. --- src/wallet/mod.rs | 63 ++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 62 insertions(+), 1 deletion(-) diff --git a/src/wallet/mod.rs b/src/wallet/mod.rs index 30da1682d..50f096e36 100644 --- a/src/wallet/mod.rs +++ b/src/wallet/mod.rs @@ -7,12 +7,13 @@ use persist::KVStoreWalletPersister; -use crate::logger::{log_error, log_info, log_trace, Logger}; +use crate::logger::{log_debug, log_error, log_info, log_trace, Logger}; use crate::fee_estimator::{ConfirmationTarget, FeeEstimator}; use crate::Error; use lightning::chain::chaininterface::BroadcasterInterface; +use lightning::chain::Listen; use lightning::events::bump_transaction::{Utxo, WalletSource}; use lightning::ln::msgs::{DecodeError, UnsignedGossipMessage}; @@ -285,6 +286,66 @@ where } } +impl Listen for Wallet +where + B::Target: BroadcasterInterface, + E::Target: FeeEstimator, + L::Target: Logger, +{ + fn filtered_block_connected( + &self, _header: &bitcoin::block::Header, + _txdata: &lightning::chain::transaction::TransactionData, _height: u32, + ) { + debug_assert!(false, "Syncing filtered blocks is currently not supported"); + // As far as we can tell this would be a no-op anyways as we don't have to tell BDK about + // the header chain of intermediate blocks. According to the BDK team, it's sufficient to + // only connect full blocks starting from the last point of disagreement. + } + + fn block_connected(&self, block: &bitcoin::Block, height: u32) { + let mut locked_wallet = self.inner.lock().unwrap(); + + let pre_checkpoint = locked_wallet.latest_checkpoint(); + if pre_checkpoint.height() != height - 1 + || pre_checkpoint.hash() != block.header.prev_blockhash + { + log_debug!( + self.logger, + "Detected reorg while applying a connected block to on-chain wallet: new block with hash {} at height {}", + block.header.block_hash(), + height + ); + } + + match locked_wallet.apply_block(block, height) { + Ok(()) => (), + Err(e) => { + log_error!( + self.logger, + "Failed to apply connected block to on-chain wallet: {}", + e + ); + return; + }, + }; + + let mut locked_persister = self.persister.lock().unwrap(); + match locked_wallet.persist(&mut locked_persister) { + Ok(_) => (), + Err(e) => { + log_error!(self.logger, "Failed to persist on-chain wallet: {}", e); + return; + }, + }; + } + + fn block_disconnected(&self, _header: &bitcoin::block::Header, _height: u32) { + // This is a no-op as we don't have to tell BDK about disconnections. According to the BDK + // team, it's sufficient in case of a reorg to always connect blocks starting from the last + // point of disagreement. + } +} + impl WalletSource for Wallet where B::Target: BroadcasterInterface, From c59d781461e5d661ccf3c5966755600e8068f743 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Wed, 9 Oct 2024 13:51:13 +0200 Subject: [PATCH 091/127] Add `ChainSource::BitcoindRpc` variant and basic client struct --- Cargo.toml | 2 ++ src/chain/bitcoind_rpc.rs | 53 +++++++++++++++++++++++++++++++++++++++ src/chain/mod.rs | 41 ++++++++++++++++++++++++++++++ 3 files changed, 96 insertions(+) create mode 100644 src/chain/bitcoind_rpc.rs diff --git a/Cargo.toml b/Cargo.toml index 3d9310ec6..01ce08d73 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -34,6 +34,7 @@ lightning-net-tokio = { version = "0.0.125" } lightning-persister = { version = "0.0.125" } lightning-background-processor = { version = "0.0.125", features = ["futures"] } lightning-rapid-gossip-sync = { version = "0.0.125" } +lightning-block-sync = { version = "0.0.125", features = ["rpc-client", "tokio"] } lightning-transaction-sync = { version = "0.0.125", features = ["esplora-async-https", "time"] } lightning-liquidity = { version = "0.1.0-alpha.6", features = ["std"] } @@ -65,6 +66,7 @@ bitcoin = "0.32.2" bip39 = "2.0.0" bip21 = { version = "0.5", features = ["std"], default-features = false } +base64 = { version = "0.22.1", default-features = false, features = ["std"] } rand = "0.8.5" chrono = { version = "0.4", default-features = false, features = ["clock"] } tokio = { version = "1.37", default-features = false, features = [ "rt-multi-thread", "time", "sync", "macros" ] } diff --git a/src/chain/bitcoind_rpc.rs b/src/chain/bitcoind_rpc.rs new file mode 100644 index 000000000..0abc834bc --- /dev/null +++ b/src/chain/bitcoind_rpc.rs @@ -0,0 +1,53 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + +use lightning_block_sync::http::HttpEndpoint; +use lightning_block_sync::rpc::RpcClient; +use lightning_block_sync::{AsyncBlockSourceResult, BlockData, BlockHeaderData, BlockSource}; + +use bitcoin::BlockHash; + +use base64::prelude::{Engine, BASE64_STANDARD}; + +use std::sync::Arc; + +pub struct BitcoindRpcClient { + rpc_client: Arc, +} + +impl BitcoindRpcClient { + pub(crate) fn new(host: String, port: u16, rpc_user: String, rpc_password: String) -> Self { + let http_endpoint = HttpEndpoint::for_host(host.clone()).with_port(port); + let rpc_credentials = + BASE64_STANDARD.encode(format!("{}:{}", rpc_user.clone(), rpc_password.clone())); + + let rpc_client = Arc::new( + RpcClient::new(&rpc_credentials, http_endpoint) + .expect("RpcClient::new is actually infallible"), + ); + + Self { rpc_client } + } +} + +impl BlockSource for BitcoindRpcClient { + fn get_header<'a>( + &'a self, header_hash: &'a BlockHash, height_hint: Option, + ) -> AsyncBlockSourceResult<'a, BlockHeaderData> { + Box::pin(async move { self.rpc_client.get_header(header_hash, height_hint).await }) + } + + fn get_block<'a>( + &'a self, header_hash: &'a BlockHash, + ) -> AsyncBlockSourceResult<'a, BlockData> { + Box::pin(async move { self.rpc_client.get_block(header_hash).await }) + } + + fn get_best_block<'a>(&'a self) -> AsyncBlockSourceResult<(BlockHash, Option)> { + Box::pin(async move { self.rpc_client.get_best_block().await }) + } +} diff --git a/src/chain/mod.rs b/src/chain/mod.rs index 7501c9809..3b832bf3d 100644 --- a/src/chain/mod.rs +++ b/src/chain/mod.rs @@ -5,6 +5,8 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. +mod bitcoind_rpc; + use crate::config::{ Config, EsploraSyncConfig, BDK_CLIENT_CONCURRENCY, BDK_CLIENT_STOP_GAP, BDK_WALLET_SYNC_TIMEOUT_SECS, FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS, LDK_WALLET_SYNC_TIMEOUT_SECS, @@ -35,6 +37,8 @@ use std::collections::HashMap; use std::sync::{Arc, Mutex, RwLock}; use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; +use self::bitcoind_rpc::BitcoindRpcClient; + // The default Esplora server we're using. pub(crate) const DEFAULT_ESPLORA_SERVER_URL: &str = "https://blockstream.info/api"; @@ -109,6 +113,16 @@ pub(crate) enum ChainSource { logger: Arc, node_metrics: Arc>, }, + BitcoindRpc { + bitcoind_rpc_client: Arc, + onchain_wallet: Arc, + fee_estimator: Arc, + tx_broadcaster: Arc, + kv_store: Arc, + config: Arc, + logger: Arc, + node_metrics: Arc>, + }, } impl ChainSource { @@ -141,6 +155,26 @@ impl ChainSource { } } + pub(crate) fn new_bitcoind_rpc( + host: String, port: u16, rpc_user: String, rpc_password: String, + onchain_wallet: Arc, fee_estimator: Arc, + tx_broadcaster: Arc, kv_store: Arc, config: Arc, + logger: Arc, node_metrics: Arc>, + ) -> Self { + let bitcoind_rpc_client = + Arc::new(BitcoindRpcClient::new(host, port, rpc_user, rpc_password)); + Self::BitcoindRpc { + bitcoind_rpc_client, + onchain_wallet, + fee_estimator, + tx_broadcaster, + kv_store, + config, + logger, + node_metrics, + } + } + pub(crate) async fn continuously_sync_wallets( &self, mut stop_sync_receiver: tokio::sync::watch::Receiver<()>, channel_manager: Arc, chain_monitor: Arc, @@ -201,6 +235,7 @@ impl ChainSource { } } }, + Self::BitcoindRpc { .. } => todo!(), } } @@ -319,6 +354,7 @@ impl ChainSource { res }, + Self::BitcoindRpc { .. } => todo!(), } } @@ -411,6 +447,7 @@ impl ChainSource { res }, + Self::BitcoindRpc { .. } => todo!(), } } @@ -506,6 +543,7 @@ impl ChainSource { Ok(()) }, + Self::BitcoindRpc { .. } => todo!(), } } @@ -582,6 +620,7 @@ impl ChainSource { } } }, + Self::BitcoindRpc { .. } => todo!(), } } } @@ -590,11 +629,13 @@ impl Filter for ChainSource { fn register_tx(&self, txid: &bitcoin::Txid, script_pubkey: &bitcoin::Script) { match self { Self::Esplora { tx_sync, .. } => tx_sync.register_tx(txid, script_pubkey), + Self::BitcoindRpc { .. } => (), } } fn register_output(&self, output: lightning::chain::WatchedOutput) { match self { Self::Esplora { tx_sync, .. } => tx_sync.register_output(output), + Self::BitcoindRpc { .. } => (), } } } From ab5a620485b1d4bbac1b22a965cdb4ec6552bc07 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Wed, 9 Oct 2024 16:23:14 +0200 Subject: [PATCH 092/127] Add initial polling logic We first initialize by synchronizing all `Listen` implementations, and then head into a loop continuously polling our RPC `BlockSource`. We also implement a `BoundedHeaderCache` to limit in-memory footprint. --- src/chain/bitcoind_rpc.rs | 82 +++++++++++- src/chain/mod.rs | 257 +++++++++++++++++++++++++++++++++++++- src/lib.rs | 18 ++- src/wallet/mod.rs | 7 +- 4 files changed, 355 insertions(+), 9 deletions(-) diff --git a/src/chain/bitcoind_rpc.rs b/src/chain/bitcoind_rpc.rs index 0abc834bc..30f1bd057 100644 --- a/src/chain/bitcoind_rpc.rs +++ b/src/chain/bitcoind_rpc.rs @@ -5,14 +5,22 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. +use crate::types::{ChainMonitor, ChannelManager, Sweeper, Wallet}; + +use lightning::chain::Listen; + use lightning_block_sync::http::HttpEndpoint; +use lightning_block_sync::poll::ValidatedBlockHeader; use lightning_block_sync::rpc::RpcClient; -use lightning_block_sync::{AsyncBlockSourceResult, BlockData, BlockHeaderData, BlockSource}; +use lightning_block_sync::{ + AsyncBlockSourceResult, BlockData, BlockHeaderData, BlockSource, Cache, +}; use bitcoin::BlockHash; use base64::prelude::{Engine, BASE64_STANDARD}; +use std::collections::{HashMap, VecDeque}; use std::sync::Arc; pub struct BitcoindRpcClient { @@ -51,3 +59,75 @@ impl BlockSource for BitcoindRpcClient { Box::pin(async move { self.rpc_client.get_best_block().await }) } } + +const MAX_HEADER_CACHE_ENTRIES: usize = 100; + +pub(crate) struct BoundedHeaderCache { + header_map: HashMap, + recently_seen: VecDeque, +} + +impl BoundedHeaderCache { + pub(crate) fn new() -> Self { + let header_map = HashMap::new(); + let recently_seen = VecDeque::new(); + Self { header_map, recently_seen } + } +} + +impl Cache for BoundedHeaderCache { + fn look_up(&self, block_hash: &BlockHash) -> Option<&ValidatedBlockHeader> { + self.header_map.get(block_hash) + } + + fn block_connected(&mut self, block_hash: BlockHash, block_header: ValidatedBlockHeader) { + self.recently_seen.push_back(block_hash); + self.header_map.insert(block_hash, block_header); + + if self.header_map.len() >= MAX_HEADER_CACHE_ENTRIES { + // Keep dropping old entries until we've actually removed a header entry. + while let Some(oldest_entry) = self.recently_seen.pop_front() { + if self.header_map.remove(&oldest_entry).is_some() { + break; + } + } + } + } + + fn block_disconnected(&mut self, block_hash: &BlockHash) -> Option { + self.recently_seen.retain(|e| e != block_hash); + self.header_map.remove(block_hash) + } +} + +pub(crate) struct ChainListener { + pub(crate) onchain_wallet: Arc, + pub(crate) channel_manager: Arc, + pub(crate) chain_monitor: Arc, + pub(crate) output_sweeper: Arc, +} + +impl Listen for ChainListener { + fn filtered_block_connected( + &self, header: &bitcoin::block::Header, + txdata: &lightning::chain::transaction::TransactionData, height: u32, + ) { + self.onchain_wallet.filtered_block_connected(header, txdata, height); + self.channel_manager.filtered_block_connected(header, txdata, height); + self.chain_monitor.filtered_block_connected(header, txdata, height); + self.output_sweeper.filtered_block_connected(header, txdata, height); + } + fn block_connected(&self, block: &bitcoin::Block, height: u32) { + self.onchain_wallet.block_connected(block, height); + self.channel_manager.block_connected(block, height); + self.chain_monitor.block_connected(block, height); + self.output_sweeper.block_connected(block, height); + } + + fn block_disconnected(&self, header: &bitcoin::block::Header, height: u32) { + self.onchain_wallet.block_disconnected(header, height); + self.channel_manager.block_disconnected(header, height); + self.chain_monitor.block_disconnected(header, height); + self.output_sweeper.block_disconnected(header, height); + } +} diff --git a/src/chain/mod.rs b/src/chain/mod.rs index 3b832bf3d..2668311d8 100644 --- a/src/chain/mod.rs +++ b/src/chain/mod.rs @@ -7,6 +7,7 @@ mod bitcoind_rpc; +use crate::chain::bitcoind_rpc::{BitcoindRpcClient, BoundedHeaderCache, ChainListener}; use crate::config::{ Config, EsploraSyncConfig, BDK_CLIENT_CONCURRENCY, BDK_CLIENT_STOP_GAP, BDK_WALLET_SYNC_TIMEOUT_SECS, FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS, LDK_WALLET_SYNC_TIMEOUT_SECS, @@ -22,11 +23,15 @@ use crate::logger::{log_bytes, log_error, log_info, log_trace, FilesystemLogger, use crate::types::{Broadcaster, ChainMonitor, ChannelManager, DynStore, Sweeper, Wallet}; use crate::{Error, NodeMetrics}; -use lightning::chain::{Confirm, Filter}; +use lightning::chain::{Confirm, Filter, Listen}; use lightning::util::ser::Writeable; use lightning_transaction_sync::EsploraSyncClient; +use lightning_block_sync::init::{synchronize_listeners, validate_best_block_header}; +use lightning_block_sync::poll::{ChainPoller, ChainTip, ValidatedBlockHeader}; +use lightning_block_sync::SpvClient; + use bdk_esplora::EsploraAsyncExt; use esplora_client::AsyncClient as EsploraAsyncClient; @@ -37,14 +42,14 @@ use std::collections::HashMap; use std::sync::{Arc, Mutex, RwLock}; use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; -use self::bitcoind_rpc::BitcoindRpcClient; - // The default Esplora server we're using. pub(crate) const DEFAULT_ESPLORA_SERVER_URL: &str = "https://blockstream.info/api"; // The default Esplora client timeout we're using. pub(crate) const DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS: u64 = 10; +const CHAIN_POLLING_INTERVAL_SECS: u64 = 1; + pub(crate) enum WalletSyncStatus { Completed, InProgress { subscribers: tokio::sync::broadcast::Sender> }, @@ -115,7 +120,10 @@ pub(crate) enum ChainSource { }, BitcoindRpc { bitcoind_rpc_client: Arc, + header_cache: tokio::sync::Mutex, + latest_chain_tip: RwLock>, onchain_wallet: Arc, + wallet_polling_status: Mutex, fee_estimator: Arc, tx_broadcaster: Arc, kv_store: Arc, @@ -163,9 +171,15 @@ impl ChainSource { ) -> Self { let bitcoind_rpc_client = Arc::new(BitcoindRpcClient::new(host, port, rpc_user, rpc_password)); + let header_cache = tokio::sync::Mutex::new(BoundedHeaderCache::new()); + let latest_chain_tip = RwLock::new(None); + let wallet_polling_status = Mutex::new(WalletSyncStatus::Completed); Self::BitcoindRpc { bitcoind_rpc_client, + header_cache, + latest_chain_tip, onchain_wallet, + wallet_polling_status, fee_estimator, tx_broadcaster, kv_store, @@ -235,7 +249,129 @@ impl ChainSource { } } }, - Self::BitcoindRpc { .. } => todo!(), + Self::BitcoindRpc { + bitcoind_rpc_client, + header_cache, + latest_chain_tip, + onchain_wallet, + wallet_polling_status, + kv_store, + config, + logger, + node_metrics, + .. + } => { + // First register for the wallet polling status to make sure `Node::sync_wallets` calls + // wait on the result before proceeding. + { + let mut status_lock = wallet_polling_status.lock().unwrap(); + if status_lock.register_or_subscribe_pending_sync().is_some() { + debug_assert!(false, "Sync already in progress. This should never happen."); + } + } + + let channel_manager_best_block_hash = + channel_manager.current_best_block().block_hash; + let sweeper_best_block_hash = output_sweeper.current_best_block().block_hash; + let onchain_wallet_best_block_hash = onchain_wallet.current_best_block().block_hash; + + let mut chain_listeners = vec![ + ( + onchain_wallet_best_block_hash, + &**onchain_wallet as &(dyn Listen + Send + Sync), + ), + ( + channel_manager_best_block_hash, + &*channel_manager as &(dyn Listen + Send + Sync), + ), + (sweeper_best_block_hash, &*output_sweeper as &(dyn Listen + Send + Sync)), + ]; + + // TODO: Eventually we might want to see if we can synchronize `ChannelMonitor`s + // before giving them to `ChainMonitor` it the first place. However, this isn't + // trivial as we load them on initialization (in the `Builder`) and only gain + // network access during `start`. For now, we just make sure we get the worst known + // block hash and sychronize them via `ChainMonitor`. + if let Some(worst_channel_monitor_block_hash) = chain_monitor + .list_monitors() + .iter() + .flat_map(|(txo, _)| chain_monitor.get_monitor(*txo)) + .map(|m| m.current_best_block()) + .min_by_key(|b| b.height) + .map(|b| b.block_hash) + { + chain_listeners.push(( + worst_channel_monitor_block_hash, + &*chain_monitor as &(dyn Listen + Send + Sync), + )); + } + + loop { + let mut locked_header_cache = header_cache.lock().await; + match synchronize_listeners( + bitcoind_rpc_client.as_ref(), + config.network, + &mut *locked_header_cache, + chain_listeners.clone(), + ) + .await + { + Ok(chain_tip) => { + { + *latest_chain_tip.write().unwrap() = Some(chain_tip); + let unix_time_secs_opt = SystemTime::now() + .duration_since(UNIX_EPOCH) + .ok() + .map(|d| d.as_secs()); + let mut locked_node_metrics = node_metrics.write().unwrap(); + locked_node_metrics.latest_lightning_wallet_sync_timestamp = + unix_time_secs_opt; + locked_node_metrics.latest_onchain_wallet_sync_timestamp = + unix_time_secs_opt; + write_node_metrics( + &*locked_node_metrics, + Arc::clone(&kv_store), + Arc::clone(&logger), + ) + .unwrap_or_else(|e| { + log_error!(logger, "Failed to persist node metrics: {}", e); + }); + } + break; + }, + + Err(e) => { + log_error!(logger, "Failed to synchronize chain listeners: {:?}", e); + tokio::time::sleep(Duration::from_secs(CHAIN_POLLING_INTERVAL_SECS)) + .await; + }, + } + } + + // Now propagate the initial result to unblock waiting subscribers. + wallet_polling_status.lock().unwrap().propagate_result_to_subscribers(Ok(())); + + let mut chain_polling_interval = + tokio::time::interval(Duration::from_secs(CHAIN_POLLING_INTERVAL_SECS)); + chain_polling_interval + .set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); + + // Start the polling loop. + loop { + tokio::select! { + _ = stop_sync_receiver.changed() => { + log_trace!( + logger, + "Stopping polling for new chain data.", + ); + return; + } + _ = chain_polling_interval.tick() => { + let _ = self.poll_and_update_listeners(Arc::clone(&channel_manager), Arc::clone(&chain_monitor), Arc::clone(&output_sweeper)).await; + } + } + } + }, } } @@ -451,6 +587,119 @@ impl ChainSource { } } + pub(crate) async fn poll_and_update_listeners( + &self, channel_manager: Arc, chain_monitor: Arc, + output_sweeper: Arc, + ) -> Result<(), Error> { + match self { + Self::Esplora { .. } => { + debug_assert!(false, "Polling should only be used with chain listeners"); + Ok(()) + }, + Self::BitcoindRpc { + bitcoind_rpc_client, + header_cache, + latest_chain_tip, + onchain_wallet, + wallet_polling_status, + kv_store, + config, + logger, + node_metrics, + .. + } => { + let receiver_res = { + let mut status_lock = wallet_polling_status.lock().unwrap(); + status_lock.register_or_subscribe_pending_sync() + }; + + if let Some(mut sync_receiver) = receiver_res { + log_info!(logger, "Sync in progress, skipping."); + return sync_receiver.recv().await.map_err(|e| { + debug_assert!(false, "Failed to receive wallet polling result: {:?}", e); + log_error!(logger, "Failed to receive wallet polling result: {:?}", e); + Error::WalletOperationFailed + })?; + } + + let latest_chain_tip_opt = latest_chain_tip.read().unwrap().clone(); + let chain_tip = if let Some(tip) = latest_chain_tip_opt { + tip + } else { + match validate_best_block_header(bitcoind_rpc_client.as_ref()).await { + Ok(tip) => { + *latest_chain_tip.write().unwrap() = Some(tip); + tip + }, + Err(e) => { + log_error!(logger, "Failed to poll for chain data: {:?}", e); + let res = Err(Error::TxSyncFailed); + wallet_polling_status + .lock() + .unwrap() + .propagate_result_to_subscribers(res); + return res; + }, + } + }; + + let mut locked_header_cache = header_cache.lock().await; + let chain_poller = + ChainPoller::new(Arc::clone(&bitcoind_rpc_client), config.network); + let chain_listener = ChainListener { + onchain_wallet: Arc::clone(&onchain_wallet), + channel_manager: Arc::clone(&channel_manager), + chain_monitor, + output_sweeper, + }; + let mut spv_client = SpvClient::new( + chain_tip, + chain_poller, + &mut *locked_header_cache, + &chain_listener, + ); + let mut chain_polling_interval = + tokio::time::interval(Duration::from_secs(CHAIN_POLLING_INTERVAL_SECS)); + chain_polling_interval + .set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); + + match spv_client.poll_best_tip().await { + Ok((ChainTip::Better(tip), true)) => { + *latest_chain_tip.write().unwrap() = Some(tip); + }, + Ok(_) => {}, + Err(e) => { + log_error!(logger, "Failed to poll for chain data: {:?}", e); + let res = Err(Error::TxSyncFailed); + wallet_polling_status.lock().unwrap().propagate_result_to_subscribers(res); + return res; + }, + } + + let res = { + let unix_time_secs_opt = + SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); + let mut locked_node_metrics = node_metrics.write().unwrap(); + locked_node_metrics.latest_lightning_wallet_sync_timestamp = unix_time_secs_opt; + locked_node_metrics.latest_onchain_wallet_sync_timestamp = unix_time_secs_opt; + write_node_metrics( + &*locked_node_metrics, + Arc::clone(&kv_store), + Arc::clone(&logger), + ) + .map_err(|e| { + log_error!(logger, "Failed to persist node metrics: {}", e); + Error::PersistenceFailed + }) + }; + + wallet_polling_status.lock().unwrap().propagate_result_to_subscribers(res); + + res + }, + } + } + pub(crate) async fn update_fee_rate_estimates(&self) -> Result<(), Error> { match self { Self::Esplora { diff --git a/src/lib.rs b/src/lib.rs index 42b99406a..abf9b8b36 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1209,9 +1209,21 @@ impl Node { tokio::task::block_in_place(move || { tokio::runtime::Builder::new_multi_thread().enable_all().build().unwrap().block_on( async move { - chain_source.update_fee_rate_estimates().await?; - chain_source.sync_lightning_wallet(sync_cman, sync_cmon, sync_sweeper).await?; - chain_source.sync_onchain_wallet().await?; + match chain_source.as_ref() { + ChainSource::Esplora { .. } => { + chain_source.update_fee_rate_estimates().await?; + chain_source + .sync_lightning_wallet(sync_cman, sync_cmon, sync_sweeper) + .await?; + chain_source.sync_onchain_wallet().await?; + }, + ChainSource::BitcoindRpc { .. } => { + chain_source.update_fee_rate_estimates().await?; + chain_source + .poll_and_update_listeners(sync_cman, sync_cmon, sync_sweeper) + .await?; + }, + } Ok(()) }, ) diff --git a/src/wallet/mod.rs b/src/wallet/mod.rs index 50f096e36..77e04c138 100644 --- a/src/wallet/mod.rs +++ b/src/wallet/mod.rs @@ -13,7 +13,7 @@ use crate::fee_estimator::{ConfirmationTarget, FeeEstimator}; use crate::Error; use lightning::chain::chaininterface::BroadcasterInterface; -use lightning::chain::Listen; +use lightning::chain::{BestBlock, Listen}; use lightning::events::bump_transaction::{Utxo, WalletSource}; use lightning::ln::msgs::{DecodeError, UnsignedGossipMessage}; @@ -85,6 +85,11 @@ where self.inner.lock().unwrap().start_sync_with_revealed_spks().build() } + pub(crate) fn current_best_block(&self) -> BestBlock { + let checkpoint = self.inner.lock().unwrap().latest_checkpoint(); + BestBlock { block_hash: checkpoint.hash(), height: checkpoint.height() } + } + pub(crate) fn apply_update(&self, update: impl Into) -> Result<(), Error> { let mut locked_wallet = self.inner.lock().unwrap(); match locked_wallet.apply_update(update) { From b756d79b08df9400007c4fd26a50e2a583555552 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 10 Oct 2024 09:58:15 +0200 Subject: [PATCH 093/127] Add comments clarifying that `syncing` methods shouldn't be used for RPC --- src/chain/mod.rs | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/src/chain/mod.rs b/src/chain/mod.rs index 2668311d8..0fabaa312 100644 --- a/src/chain/mod.rs +++ b/src/chain/mod.rs @@ -375,6 +375,8 @@ impl ChainSource { } } + // Synchronize the onchain wallet via transaction-based protocols (i.e., Esplora, Electrum, + // etc.) pub(crate) async fn sync_onchain_wallet(&self) -> Result<(), Error> { match self { Self::Esplora { @@ -490,10 +492,16 @@ impl ChainSource { res }, - Self::BitcoindRpc { .. } => todo!(), + Self::BitcoindRpc { .. } => { + // In BitcoindRpc mode we sync lightning and onchain wallet in one go by via + // `ChainPoller`. So nothing to do here. + unreachable!("Onchain wallet will be synced via chain polling") + }, } } + // Synchronize the Lightning wallet via transaction-based protocols (i.e., Esplora, Electrum, + // etc.) pub(crate) async fn sync_lightning_wallet( &self, channel_manager: Arc, chain_monitor: Arc, output_sweeper: Arc, @@ -583,7 +591,11 @@ impl ChainSource { res }, - Self::BitcoindRpc { .. } => todo!(), + Self::BitcoindRpc { .. } => { + // In BitcoindRpc mode we sync lightning and onchain wallet in one go by via + // `ChainPoller`. So nothing to do here. + unreachable!("Lightning wallet will be synced via chain polling") + }, } } @@ -593,8 +605,9 @@ impl ChainSource { ) -> Result<(), Error> { match self { Self::Esplora { .. } => { - debug_assert!(false, "Polling should only be used with chain listeners"); - Ok(()) + // In Esplora mode we sync lightning and onchain wallets via + // `sync_onchain_wallet` and `sync_lightning_wallet`. So nothing to do here. + unreachable!("Listeners will be synced via transction-based syncing") }, Self::BitcoindRpc { bitcoind_rpc_client, From 935a17ef3cb62373926a7386967c234cc6e2c442 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 10 Oct 2024 10:35:08 +0200 Subject: [PATCH 094/127] Implement broadcast queue processing --- Cargo.toml | 1 + src/chain/bitcoind_rpc.rs | 8 +++++- src/chain/mod.rs | 55 ++++++++++++++++++++++++++++++++++++++- 3 files changed, 62 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 01ce08d73..07c8c4592 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -73,6 +73,7 @@ tokio = { version = "1.37", default-features = false, features = [ "rt-multi-thr esplora-client = { version = "0.9", default-features = false } libc = "0.2" uniffi = { version = "0.26.0", features = ["build"], optional = true } +serde_json = { version = "1.0.128", default-features = false, features = ["std"] } [target.'cfg(vss)'.dependencies] vss-client = "0.3" diff --git a/src/chain/bitcoind_rpc.rs b/src/chain/bitcoind_rpc.rs index 30f1bd057..e1580ba0d 100644 --- a/src/chain/bitcoind_rpc.rs +++ b/src/chain/bitcoind_rpc.rs @@ -16,7 +16,7 @@ use lightning_block_sync::{ AsyncBlockSourceResult, BlockData, BlockHeaderData, BlockSource, Cache, }; -use bitcoin::BlockHash; +use bitcoin::{BlockHash, Transaction, Txid}; use base64::prelude::{Engine, BASE64_STANDARD}; @@ -40,6 +40,12 @@ impl BitcoindRpcClient { Self { rpc_client } } + + pub(crate) async fn broadcast_transaction(&self, tx: &Transaction) -> std::io::Result { + let tx_serialized = bitcoin::consensus::encode::serialize_hex(tx); + let tx_json = serde_json::json!(tx_serialized); + self.rpc_client.call_method::("sendrawtransaction", &vec![tx_json]).await + } } impl BlockSource for BitcoindRpcClient { diff --git a/src/chain/mod.rs b/src/chain/mod.rs index 0fabaa312..0545c39bb 100644 --- a/src/chain/mod.rs +++ b/src/chain/mod.rs @@ -882,7 +882,60 @@ impl ChainSource { } } }, - Self::BitcoindRpc { .. } => todo!(), + Self::BitcoindRpc { bitcoind_rpc_client, tx_broadcaster, logger, .. } => { + // While it's a bit unclear when we'd be able to lean on Bitcoin Core >v28 + // features, we should eventually switch to use `submitpackage` via the + // `rust-bitcoind-json-rpc` crate rather than just broadcasting individual + // transactions. + let mut receiver = tx_broadcaster.get_broadcast_queue().await; + while let Some(next_package) = receiver.recv().await { + for tx in &next_package { + let txid = tx.compute_txid(); + let timeout_fut = tokio::time::timeout( + Duration::from_secs(TX_BROADCAST_TIMEOUT_SECS), + bitcoind_rpc_client.broadcast_transaction(tx), + ); + match timeout_fut.await { + Ok(res) => match res { + Ok(id) => { + debug_assert_eq!(id, txid); + log_trace!( + logger, + "Successfully broadcast transaction {}", + txid + ); + }, + Err(e) => { + log_error!( + logger, + "Failed to broadcast transaction {}: {}", + txid, + e + ); + log_trace!( + logger, + "Failed broadcast transaction bytes: {}", + log_bytes!(tx.encode()) + ); + }, + }, + Err(e) => { + log_error!( + logger, + "Failed to broadcast transaction due to timeout {}: {}", + txid, + e + ); + log_trace!( + logger, + "Failed broadcast transaction bytes: {}", + log_bytes!(tx.encode()) + ); + }, + } + } + } + }, } } } From ce3606e5c38ab1ef6c1d5116429e9767e22f0b01 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 10 Oct 2024 12:07:02 +0200 Subject: [PATCH 095/127] Implement fee rate estmation updating --- Cargo.toml | 1 + src/chain/bitcoind_rpc.rs | 75 +++++++++++++++++++++++- src/chain/mod.rs | 120 +++++++++++++++++++++++++++++++++++++- src/fee_estimator.rs | 13 ++++- 4 files changed, 203 insertions(+), 6 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 07c8c4592..48bc64cbb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -73,6 +73,7 @@ tokio = { version = "1.37", default-features = false, features = [ "rt-multi-thr esplora-client = { version = "0.9", default-features = false } libc = "0.2" uniffi = { version = "0.26.0", features = ["build"], optional = true } +serde = { version = "1.0.210", default-features = false, features = ["std", "derive"] } serde_json = { version = "1.0.128", default-features = false, features = ["std"] } [target.'cfg(vss)'.dependencies] diff --git a/src/chain/bitcoind_rpc.rs b/src/chain/bitcoind_rpc.rs index e1580ba0d..672854daf 100644 --- a/src/chain/bitcoind_rpc.rs +++ b/src/chain/bitcoind_rpc.rs @@ -10,13 +10,16 @@ use crate::types::{ChainMonitor, ChannelManager, Sweeper, Wallet}; use lightning::chain::Listen; use lightning_block_sync::http::HttpEndpoint; +use lightning_block_sync::http::JsonResponse; use lightning_block_sync::poll::ValidatedBlockHeader; use lightning_block_sync::rpc::RpcClient; use lightning_block_sync::{ AsyncBlockSourceResult, BlockData, BlockHeaderData, BlockSource, Cache, }; -use bitcoin::{BlockHash, Transaction, Txid}; +use serde::Serialize; + +use bitcoin::{BlockHash, FeeRate, Transaction, Txid}; use base64::prelude::{Engine, BASE64_STANDARD}; @@ -46,6 +49,27 @@ impl BitcoindRpcClient { let tx_json = serde_json::json!(tx_serialized); self.rpc_client.call_method::("sendrawtransaction", &vec![tx_json]).await } + + pub(crate) async fn get_fee_estimate_for_target( + &self, num_blocks: usize, estimation_mode: FeeRateEstimationMode, + ) -> std::io::Result { + let num_blocks_json = serde_json::json!(num_blocks); + let estimation_mode_json = serde_json::json!(estimation_mode); + self.rpc_client + .call_method::( + "estimatesmartfee", + &vec![num_blocks_json, estimation_mode_json], + ) + .await + .map(|resp| resp.0) + } + + pub(crate) async fn get_mempool_minimum_fee_rate(&self) -> std::io::Result { + self.rpc_client + .call_method::("getmempoolinfo", &vec![]) + .await + .map(|resp| resp.0) + } } impl BlockSource for BitcoindRpcClient { @@ -66,6 +90,55 @@ impl BlockSource for BitcoindRpcClient { } } +pub(crate) struct FeeResponse(pub FeeRate); + +impl TryInto for JsonResponse { + type Error = std::io::Error; + fn try_into(self) -> std::io::Result { + if !self.0["errors"].is_null() { + return Err(std::io::Error::new( + std::io::ErrorKind::Other, + self.0["errors"].to_string(), + )); + } + let fee_rate_btc_per_kvbyte = self.0["feerate"] + .as_f64() + .ok_or(std::io::Error::new(std::io::ErrorKind::Other, "Failed to parse fee rate"))?; + // Bitcoin Core gives us a feerate in BTC/KvB. + // Thus, we multiply by 25_000_000 (10^8 / 4) to get satoshis/kwu. + let fee_rate = { + let fee_rate_sat_per_kwu = (fee_rate_btc_per_kvbyte * 25_000_000.0).round() as u64; + FeeRate::from_sat_per_kwu(fee_rate_sat_per_kwu) + }; + Ok(FeeResponse(fee_rate)) + } +} + +pub struct MempoolMinFeeResponse(pub FeeRate); + +impl TryInto for JsonResponse { + type Error = std::io::Error; + fn try_into(self) -> std::io::Result { + let fee_rate_btc_per_kvbyte = self.0["mempoolminfee"] + .as_f64() + .ok_or(std::io::Error::new(std::io::ErrorKind::Other, "Failed to parse fee rate"))?; + // Bitcoin Core gives us a feerate in BTC/KvB. + // Thus, we multiply by 25_000_000 (10^8 / 4) to get satoshis/kwu. + let fee_rate = { + let fee_rate_sat_per_kwu = (fee_rate_btc_per_kvbyte * 25_000_000.0).round() as u64; + FeeRate::from_sat_per_kwu(fee_rate_sat_per_kwu) + }; + Ok(MempoolMinFeeResponse(fee_rate)) + } +} + +#[derive(Debug, Clone, Serialize)] +#[serde(rename_all = "UPPERCASE")] +pub(crate) enum FeeRateEstimationMode { + Economical, + Conservative, +} + const MAX_HEADER_CACHE_ENTRIES: usize = 100; pub(crate) struct BoundedHeaderCache { diff --git a/src/chain/mod.rs b/src/chain/mod.rs index 0545c39bb..889f9af6c 100644 --- a/src/chain/mod.rs +++ b/src/chain/mod.rs @@ -7,7 +7,9 @@ mod bitcoind_rpc; -use crate::chain::bitcoind_rpc::{BitcoindRpcClient, BoundedHeaderCache, ChainListener}; +use crate::chain::bitcoind_rpc::{ + BitcoindRpcClient, BoundedHeaderCache, ChainListener, FeeRateEstimationMode, +}; use crate::config::{ Config, EsploraSyncConfig, BDK_CLIENT_CONCURRENCY, BDK_CLIENT_STOP_GAP, BDK_WALLET_SYNC_TIMEOUT_SECS, FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS, LDK_WALLET_SYNC_TIMEOUT_SECS, @@ -16,13 +18,14 @@ use crate::config::{ }; use crate::fee_estimator::{ apply_post_estimation_adjustments, get_all_conf_targets, get_num_block_defaults_for_target, - OnchainFeeEstimator, + ConfirmationTarget, OnchainFeeEstimator, }; use crate::io::utils::write_node_metrics; use crate::logger::{log_bytes, log_error, log_info, log_trace, FilesystemLogger, Logger}; use crate::types::{Broadcaster, ChainMonitor, ChannelManager, DynStore, Sweeper, Wallet}; use crate::{Error, NodeMetrics}; +use lightning::chain::chaininterface::ConfirmationTarget as LdkConfirmationTarget; use lightning::chain::{Confirm, Filter, Listen}; use lightning::util::ser::Writeable; @@ -356,6 +359,13 @@ impl ChainSource { chain_polling_interval .set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); + let mut fee_rate_update_interval = + tokio::time::interval(Duration::from_secs(CHAIN_POLLING_INTERVAL_SECS)); + // When starting up, we just blocked on updating, so skip the first tick. + fee_rate_update_interval.reset(); + fee_rate_update_interval + .set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); + // Start the polling loop. loop { tokio::select! { @@ -369,6 +379,9 @@ impl ChainSource { _ = chain_polling_interval.tick() => { let _ = self.poll_and_update_listeners(Arc::clone(&channel_manager), Arc::clone(&chain_monitor), Arc::clone(&output_sweeper)).await; } + _ = fee_rate_update_interval.tick() => { + let _ = self.update_fee_rate_estimates().await; + } } } }, @@ -805,7 +818,108 @@ impl ChainSource { Ok(()) }, - Self::BitcoindRpc { .. } => todo!(), + Self::BitcoindRpc { + bitcoind_rpc_client, + fee_estimator, + kv_store, + logger, + node_metrics, + .. + } => { + macro_rules! get_fee_rate_update { + ($estimation_fut: expr) => {{ + tokio::time::timeout( + Duration::from_secs(FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS), + $estimation_fut, + ) + .await + .map_err(|e| { + log_error!(logger, "Updating fee rate estimates timed out: {}", e); + Error::FeerateEstimationUpdateTimeout + })? + .map_err(|e| { + log_error!(logger, "Failed to retrieve fee rate estimates: {}", e); + Error::FeerateEstimationUpdateFailed + })? + }}; + } + let confirmation_targets = get_all_conf_targets(); + + let mut new_fee_rate_cache = HashMap::with_capacity(10); + let now = Instant::now(); + for target in confirmation_targets { + let fee_rate = match target { + ConfirmationTarget::Lightning( + LdkConfirmationTarget::MinAllowedAnchorChannelRemoteFee, + ) => { + let estimation_fut = bitcoind_rpc_client.get_mempool_minimum_fee_rate(); + get_fee_rate_update!(estimation_fut) + }, + ConfirmationTarget::Lightning( + LdkConfirmationTarget::MaximumFeeEstimate, + ) => { + let num_blocks = get_num_block_defaults_for_target(target); + let estimation_mode = FeeRateEstimationMode::Conservative; + let estimation_fut = bitcoind_rpc_client + .get_fee_estimate_for_target(num_blocks, estimation_mode); + get_fee_rate_update!(estimation_fut) + }, + ConfirmationTarget::Lightning( + LdkConfirmationTarget::UrgentOnChainSweep, + ) => { + let num_blocks = get_num_block_defaults_for_target(target); + let estimation_mode = FeeRateEstimationMode::Conservative; + let estimation_fut = bitcoind_rpc_client + .get_fee_estimate_for_target(num_blocks, estimation_mode); + get_fee_rate_update!(estimation_fut) + }, + _ => { + // Otherwise, we default to economical block-target estimate. + let num_blocks = get_num_block_defaults_for_target(target); + let estimation_mode = FeeRateEstimationMode::Economical; + let estimation_fut = bitcoind_rpc_client + .get_fee_estimate_for_target(num_blocks, estimation_mode); + get_fee_rate_update!(estimation_fut) + }, + }; + + // LDK 0.0.118 introduced changes to the `ConfirmationTarget` semantics that + // require some post-estimation adjustments to the fee rates, which we do here. + let adjusted_fee_rate = apply_post_estimation_adjustments(target, fee_rate); + + new_fee_rate_cache.insert(target, adjusted_fee_rate); + + log_trace!( + logger, + "Fee rate estimation updated for {:?}: {} sats/kwu", + target, + adjusted_fee_rate.to_sat_per_kwu(), + ); + } + + if fee_estimator.set_fee_rate_cache(new_fee_rate_cache) { + // We only log if the values changed, as it might be very spammy otherwise. + log_info!( + logger, + "Fee rate cache update finished in {}ms.", + now.elapsed().as_millis() + ); + } + + let unix_time_secs_opt = + SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); + { + let mut locked_node_metrics = node_metrics.write().unwrap(); + locked_node_metrics.latest_fee_rate_cache_update_timestamp = unix_time_secs_opt; + write_node_metrics( + &*locked_node_metrics, + Arc::clone(&kv_store), + Arc::clone(&logger), + )?; + } + + Ok(()) + }, } } diff --git a/src/fee_estimator.rs b/src/fee_estimator.rs index 0ecc71586..8db6a6050 100644 --- a/src/fee_estimator.rs +++ b/src/fee_estimator.rs @@ -44,8 +44,17 @@ impl OnchainFeeEstimator { Self { fee_rate_cache } } - pub(crate) fn set_fee_rate_cache(&self, fee_rate_cache: HashMap) { - *self.fee_rate_cache.write().unwrap() = fee_rate_cache; + // Updates the fee rate cache and returns if the new values changed. + pub(crate) fn set_fee_rate_cache( + &self, fee_rate_cache_update: HashMap, + ) -> bool { + let mut locked_fee_rate_cache = self.fee_rate_cache.write().unwrap(); + if fee_rate_cache_update != *locked_fee_rate_cache { + *locked_fee_rate_cache = fee_rate_cache_update; + true + } else { + false + } } } From 8e1dda17c87ff5e80a1d6b5cbb8d9001550f255d Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 14 Oct 2024 15:47:45 +0200 Subject: [PATCH 096/127] Retrieve and apply unconfirmed transactions from the mempool .. to allow the on-chain wallet to detect what's inflight. --- src/chain/bitcoind_rpc.rs | 186 +++++++++++++++++++++++++++++++++++++- src/chain/mod.rs | 52 +++++++---- src/wallet/mod.rs | 15 +++ 3 files changed, 235 insertions(+), 18 deletions(-) diff --git a/src/chain/bitcoind_rpc.rs b/src/chain/bitcoind_rpc.rs index 672854daf..6e7360601 100644 --- a/src/chain/bitcoind_rpc.rs +++ b/src/chain/bitcoind_rpc.rs @@ -12,7 +12,7 @@ use lightning::chain::Listen; use lightning_block_sync::http::HttpEndpoint; use lightning_block_sync::http::JsonResponse; use lightning_block_sync::poll::ValidatedBlockHeader; -use lightning_block_sync::rpc::RpcClient; +use lightning_block_sync::rpc::{RpcClient, RpcError}; use lightning_block_sync::{ AsyncBlockSourceResult, BlockData, BlockHeaderData, BlockSource, Cache, }; @@ -24,10 +24,12 @@ use bitcoin::{BlockHash, FeeRate, Transaction, Txid}; use base64::prelude::{Engine, BASE64_STANDARD}; use std::collections::{HashMap, VecDeque}; +use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::Arc; pub struct BitcoindRpcClient { rpc_client: Arc, + latest_mempool_timestamp: AtomicU64, } impl BitcoindRpcClient { @@ -41,7 +43,9 @@ impl BitcoindRpcClient { .expect("RpcClient::new is actually infallible"), ); - Self { rpc_client } + let latest_mempool_timestamp = AtomicU64::new(0); + + Self { rpc_client, latest_mempool_timestamp } } pub(crate) async fn broadcast_transaction(&self, tx: &Transaction) -> std::io::Result { @@ -70,6 +74,99 @@ impl BitcoindRpcClient { .await .map(|resp| resp.0) } + + pub(crate) async fn get_raw_transaction( + &self, txid: &Txid, + ) -> std::io::Result> { + let txid_hex = bitcoin::consensus::encode::serialize_hex(txid); + let txid_json = serde_json::json!(txid_hex); + match self + .rpc_client + .call_method::("getrawtransaction", &vec![txid_json]) + .await + { + Ok(resp) => Ok(Some(resp.0)), + Err(e) => match e.into_inner() { + Some(inner) => { + let rpc_error_res: Result, _> = inner.downcast(); + + match rpc_error_res { + Ok(rpc_error) => { + // Check if it's the 'not found' error code. + if rpc_error.code == -5 { + Ok(None) + } else { + Err(std::io::Error::new(std::io::ErrorKind::Other, rpc_error)) + } + }, + Err(_) => Err(std::io::Error::new( + std::io::ErrorKind::Other, + "Failed to process getrawtransaction response", + )), + } + }, + None => Err(std::io::Error::new( + std::io::ErrorKind::Other, + "Failed to process getrawtransaction response", + )), + }, + } + } + + pub(crate) async fn get_raw_mempool(&self) -> std::io::Result> { + let verbose_flag_json = serde_json::json!(true); + self.rpc_client + .call_method::("getrawmempool", &vec![verbose_flag_json]) + .await + .map(|resp| resp.0) + } + + /// Get mempool transactions, alongside their first-seen unix timestamps. + /// + /// This method is an adapted version of `bdk_bitcoind_rpc::Emitter::mempool`. It emits each + /// transaction only once, unless we cannot assume the transaction's ancestors are already + /// emitted. + pub(crate) async fn get_mempool_transactions_and_timestamp_at_height( + &self, best_processed_height: u32, + ) -> std::io::Result> { + let prev_mempool_time = self.latest_mempool_timestamp.load(Ordering::Relaxed); + let mut latest_time = prev_mempool_time; + + let mempool_entries = self.get_raw_mempool().await?; + let mut txs_to_emit = Vec::new(); + + for entry in mempool_entries { + if entry.time > latest_time { + latest_time = entry.time; + } + + // Avoid emitting transactions that are already emitted if we can guarantee + // blocks containing ancestors are already emitted. The bitcoind rpc interface + // provides us with the block height that the tx is introduced to the mempool. + // If we have already emitted the block of height, we can assume that all + // ancestor txs have been processed by the receiver. + let ancestor_within_height = entry.height <= best_processed_height; + let is_already_emitted = entry.time <= prev_mempool_time; + if is_already_emitted && ancestor_within_height { + continue; + } + + match self.get_raw_transaction(&entry.txid).await { + Ok(Some(tx)) => { + txs_to_emit.push((tx, entry.time)); + }, + Ok(None) => { + continue; + }, + Err(e) => return Err(e), + }; + } + + if !txs_to_emit.is_empty() { + self.latest_mempool_timestamp.store(latest_time, Ordering::Release); + } + Ok(txs_to_emit) + } } impl BlockSource for BitcoindRpcClient { @@ -132,6 +229,91 @@ impl TryInto for JsonResponse { } } +pub struct GetRawTransactionResponse(pub Transaction); + +impl TryInto for JsonResponse { + type Error = std::io::Error; + fn try_into(self) -> std::io::Result { + let tx = self + .0 + .as_str() + .ok_or(std::io::Error::new( + std::io::ErrorKind::Other, + "Failed to parse getrawtransaction response", + )) + .and_then(|s| { + bitcoin::consensus::encode::deserialize_hex(s).map_err(|_| { + std::io::Error::new( + std::io::ErrorKind::Other, + "Failed to parse getrawtransaction response", + ) + }) + })?; + + Ok(GetRawTransactionResponse(tx)) + } +} + +pub struct GetRawMempoolResponse(Vec); + +impl TryInto for JsonResponse { + type Error = std::io::Error; + fn try_into(self) -> std::io::Result { + let mut mempool_transactions = Vec::new(); + let res = self.0.as_object().ok_or(std::io::Error::new( + std::io::ErrorKind::Other, + "Failed to parse getrawmempool response", + ))?; + + for (k, v) in res { + let txid = match bitcoin::consensus::encode::deserialize_hex(k) { + Ok(txid) => txid, + Err(_) => { + return Err(std::io::Error::new( + std::io::ErrorKind::Other, + "Failed to parse getrawmempool response", + )); + }, + }; + + let time = match v["time"].as_u64() { + Some(time) => time, + None => { + return Err(std::io::Error::new( + std::io::ErrorKind::Other, + "Failed to parse getrawmempool response", + )); + }, + }; + + let height = match v["height"].as_u64().and_then(|h| h.try_into().ok()) { + Some(height) => height, + None => { + return Err(std::io::Error::new( + std::io::ErrorKind::Other, + "Failed to parse getrawmempool response", + )); + }, + }; + let entry = RawMempoolEntry { txid, time, height }; + + mempool_transactions.push(entry); + } + + Ok(GetRawMempoolResponse(mempool_transactions)) + } +} + +#[derive(Debug, Clone)] +pub(crate) struct RawMempoolEntry { + /// The transaction id + txid: Txid, + /// Local time transaction entered pool in seconds since 1 Jan 1970 GMT + time: u64, + /// Block height when transaction entered pool + height: u32, +} + #[derive(Debug, Clone, Serialize)] #[serde(rename_all = "UPPERCASE")] pub(crate) enum FeeRateEstimationMode { diff --git a/src/chain/mod.rs b/src/chain/mod.rs index 889f9af6c..b7440ba6b 100644 --- a/src/chain/mod.rs +++ b/src/chain/mod.rs @@ -702,25 +702,45 @@ impl ChainSource { }, } - let res = { - let unix_time_secs_opt = - SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - let mut locked_node_metrics = node_metrics.write().unwrap(); - locked_node_metrics.latest_lightning_wallet_sync_timestamp = unix_time_secs_opt; - locked_node_metrics.latest_onchain_wallet_sync_timestamp = unix_time_secs_opt; - write_node_metrics( - &*locked_node_metrics, - Arc::clone(&kv_store), - Arc::clone(&logger), - ) - .map_err(|e| { + let cur_height = channel_manager.current_best_block().height; + match bitcoind_rpc_client + .get_mempool_transactions_and_timestamp_at_height(cur_height) + .await + { + Ok(unconfirmed_txs) => { + let _ = onchain_wallet.apply_unconfirmed_txs(unconfirmed_txs); + }, + Err(e) => { + log_error!(logger, "Failed to poll for mempool transactions: {:?}", e); + let res = Err(Error::TxSyncFailed); + wallet_polling_status.lock().unwrap().propagate_result_to_subscribers(res); + return res; + }, + } + + let unix_time_secs_opt = + SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); + let mut locked_node_metrics = node_metrics.write().unwrap(); + locked_node_metrics.latest_lightning_wallet_sync_timestamp = unix_time_secs_opt; + locked_node_metrics.latest_onchain_wallet_sync_timestamp = unix_time_secs_opt; + + let write_res = write_node_metrics( + &*locked_node_metrics, + Arc::clone(&kv_store), + Arc::clone(&logger), + ); + match write_res { + Ok(()) => (), + Err(e) => { log_error!(logger, "Failed to persist node metrics: {}", e); - Error::PersistenceFailed - }) - }; + let res = Err(Error::PersistenceFailed); + wallet_polling_status.lock().unwrap().propagate_result_to_subscribers(res); + return res; + }, + } + let res = Ok(()); wallet_polling_status.lock().unwrap().propagate_result_to_subscribers(res); - res }, } diff --git a/src/wallet/mod.rs b/src/wallet/mod.rs index 77e04c138..494fcd768 100644 --- a/src/wallet/mod.rs +++ b/src/wallet/mod.rs @@ -109,6 +109,21 @@ where } } + pub(crate) fn apply_unconfirmed_txs( + &self, unconfirmed_txs: Vec<(Transaction, u64)>, + ) -> Result<(), Error> { + let mut locked_wallet = self.inner.lock().unwrap(); + locked_wallet.apply_unconfirmed_txs(unconfirmed_txs); + + let mut locked_persister = self.persister.lock().unwrap(); + locked_wallet.persist(&mut locked_persister).map_err(|e| { + log_error!(self.logger, "Failed to persist wallet: {}", e); + Error::PersistenceFailed + })?; + + Ok(()) + } + pub(crate) fn create_funding_transaction( &self, output_script: ScriptBuf, amount: Amount, confirmation_target: ConfirmationTarget, locktime: LockTime, From 6da0b985f335792d8f964ff55d1b7eb2310422be Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 10 Oct 2024 09:53:38 +0200 Subject: [PATCH 097/127] Allow to configure bitcoind RPC in `Builder` --- bindings/ldk_node.udl | 1 + src/builder.rs | 39 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 40 insertions(+) diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index b4fc7ec79..5deb36915 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -36,6 +36,7 @@ interface Builder { void set_entropy_seed_bytes(sequence seed_bytes); void set_entropy_bip39_mnemonic(Mnemonic mnemonic, string? passphrase); void set_chain_source_esplora(string server_url, EsploraSyncConfig? config); + void set_chain_source_bitcoind_rpc(string rpc_host, u16 rpc_port, string rpc_user, string rpc_password); void set_gossip_source_p2p(); void set_gossip_source_rgs(string rgs_server_url); void set_liquidity_source_lsps2(SocketAddress address, PublicKey node_id, string? token); diff --git a/src/builder.rs b/src/builder.rs index 43171db1f..733a99960 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -78,6 +78,7 @@ use std::time::SystemTime; #[derive(Debug, Clone)] enum ChainDataSourceConfig { Esplora { server_url: String, sync_config: Option }, + BitcoindRpc { rpc_host: String, rpc_port: u16, rpc_user: String, rpc_password: String }, } #[derive(Debug, Clone)] @@ -248,6 +249,16 @@ impl NodeBuilder { self } + /// Configures the [`Node`] instance to source its chain data from the given Bitcoin Core RPC + /// endpoint. + pub fn set_chain_source_bitcoind_rpc( + &mut self, rpc_host: String, rpc_port: u16, rpc_user: String, rpc_password: String, + ) -> &mut Self { + self.chain_data_source_config = + Some(ChainDataSourceConfig::BitcoindRpc { rpc_host, rpc_port, rpc_user, rpc_password }); + self + } + /// Configures the [`Node`] instance to source its gossip data from the Lightning peer-to-peer /// network. pub fn set_gossip_source_p2p(&mut self) -> &mut Self { @@ -479,6 +490,19 @@ impl ArcedNodeBuilder { self.inner.write().unwrap().set_chain_source_esplora(server_url, sync_config); } + /// Configures the [`Node`] instance to source its chain data from the given Bitcoin Core RPC + /// endpoint. + pub fn set_chain_source_bitcoind_rpc( + &self, rpc_host: String, rpc_port: u16, rpc_user: String, rpc_password: String, + ) { + self.inner.write().unwrap().set_chain_source_bitcoind_rpc( + rpc_host, + rpc_port, + rpc_user, + rpc_password, + ); + } + /// Configures the [`Node`] instance to source its gossip data from the Lightning peer-to-peer /// network. pub fn set_gossip_source_p2p(&self) { @@ -633,6 +657,21 @@ fn build_with_store_internal( Arc::clone(&node_metrics), )) }, + Some(ChainDataSourceConfig::BitcoindRpc { rpc_host, rpc_port, rpc_user, rpc_password }) => { + Arc::new(ChainSource::new_bitcoind_rpc( + rpc_host.clone(), + *rpc_port, + rpc_user.clone(), + rpc_password.clone(), + Arc::clone(&wallet), + Arc::clone(&fee_estimator), + Arc::clone(&tx_broadcaster), + Arc::clone(&kv_store), + Arc::clone(&config), + Arc::clone(&logger), + Arc::clone(&node_metrics), + )) + }, None => { // Default to Esplora client. let server_url = DEFAULT_ESPLORA_SERVER_URL.to_string(); From e268b806b4472ffeab78d71b3f42c265efcb4219 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 15 Oct 2024 16:48:27 +0200 Subject: [PATCH 098/127] Only enforce successful fee rate cache updates on mainnet This behavior mirrors what we do in the Esplora case: we only enforce successful fee rate updates on mainnet. On regtest/signet/testnet we will just skip (i.e. return `Ok(())`) if we fail to retrieve the updates (e.g., when bitcoind's `estimatesmartfee` isn't sufficiently populated) and will either keep previously-retrieved values or worst case fallback to the fallback defaults. --- src/chain/mod.rs | 41 ++++++++++++++++++++++++++++++++++------- 1 file changed, 34 insertions(+), 7 deletions(-) diff --git a/src/chain/mod.rs b/src/chain/mod.rs index b7440ba6b..af77e6bee 100644 --- a/src/chain/mod.rs +++ b/src/chain/mod.rs @@ -841,6 +841,7 @@ impl ChainSource { Self::BitcoindRpc { bitcoind_rpc_client, fee_estimator, + config, kv_store, logger, node_metrics, @@ -848,7 +849,7 @@ impl ChainSource { } => { macro_rules! get_fee_rate_update { ($estimation_fut: expr) => {{ - tokio::time::timeout( + let update_res = tokio::time::timeout( Duration::from_secs(FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS), $estimation_fut, ) @@ -856,11 +857,8 @@ impl ChainSource { .map_err(|e| { log_error!(logger, "Updating fee rate estimates timed out: {}", e); Error::FeerateEstimationUpdateTimeout - })? - .map_err(|e| { - log_error!(logger, "Failed to retrieve fee rate estimates: {}", e); - Error::FeerateEstimationUpdateFailed - })? + })?; + update_res }}; } let confirmation_targets = get_all_conf_targets(); @@ -868,7 +866,7 @@ impl ChainSource { let mut new_fee_rate_cache = HashMap::with_capacity(10); let now = Instant::now(); for target in confirmation_targets { - let fee_rate = match target { + let fee_rate_update_res = match target { ConfirmationTarget::Lightning( LdkConfirmationTarget::MinAllowedAnchorChannelRemoteFee, ) => { @@ -903,6 +901,35 @@ impl ChainSource { }, }; + let fee_rate = match (fee_rate_update_res, config.network) { + (Ok(rate), _) => rate, + (Err(e), Network::Bitcoin) => { + // Strictly fail on mainnet. + log_error!(logger, "Failed to retrieve fee rate estimates: {}", e); + return Err(Error::FeerateEstimationUpdateFailed); + }, + (Err(e), n) if n == Network::Regtest || n == Network::Signet => { + // On regtest/signet we just fall back to the usual 1 sat/vb == 250 + // sat/kwu default. + log_error!( + logger, + "Failed to retrieve fee rate estimates: {}. Falling back to default of 1 sat/vb.", + e, + ); + FeeRate::from_sat_per_kwu(250) + }, + (Err(e), _) => { + // On testnet `estimatesmartfee` can be unreliable so we just skip in + // case of a failure, which will have us falling back to defaults. + log_error!( + logger, + "Failed to retrieve fee rate estimates: {}. Falling back to defaults.", + e, + ); + return Ok(()); + }, + }; + // LDK 0.0.118 introduced changes to the `ConfirmationTarget` semantics that // require some post-estimation adjustments to the fee rates, which we do here. let adjusted_fee_rate = apply_post_estimation_adjustments(target, fee_rate); From 0a65272b6195b2789ffaf300254ffecda6e12a7a Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 10 Oct 2024 16:13:28 +0200 Subject: [PATCH 099/127] Add `ChainSource` support to tests --- tests/common/mod.rs | 37 ++++++++++++++++++------ tests/integration_tests_rust.rs | 51 +++++++++++++++++++++++---------- 2 files changed, 64 insertions(+), 24 deletions(-) diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 9c712286a..e8c2d3892 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -241,6 +241,12 @@ type TestNode = Arc; #[cfg(not(feature = "uniffi"))] type TestNode = Node; +#[derive(Clone)] +pub(crate) enum TestChainSource<'a> { + Esplora(&'a ElectrsD), + BitcoindRpc(&'a BitcoinD), +} + macro_rules! setup_builder { ($builder: ident, $config: expr) => { #[cfg(feature = "uniffi")] @@ -253,11 +259,12 @@ macro_rules! setup_builder { pub(crate) use setup_builder; pub(crate) fn setup_two_nodes( - electrsd: &ElectrsD, allow_0conf: bool, anchor_channels: bool, anchors_trusted_no_reserve: bool, + chain_source: &TestChainSource, allow_0conf: bool, anchor_channels: bool, + anchors_trusted_no_reserve: bool, ) -> (TestNode, TestNode) { println!("== Node A =="); let config_a = random_config(anchor_channels); - let node_a = setup_node(electrsd, config_a); + let node_a = setup_node(chain_source, config_a); println!("\n== Node B =="); let mut config_b = random_config(anchor_channels); @@ -272,17 +279,29 @@ pub(crate) fn setup_two_nodes( .trusted_peers_no_reserve .push(node_a.node_id()); } - let node_b = setup_node(electrsd, config_b); + let node_b = setup_node(chain_source, config_b); (node_a, node_b) } -pub(crate) fn setup_node(electrsd: &ElectrsD, config: Config) -> TestNode { - let esplora_url = format!("http://{}", electrsd.esplora_url.as_ref().unwrap()); - let mut sync_config = EsploraSyncConfig::default(); - sync_config.onchain_wallet_sync_interval_secs = 100000; - sync_config.lightning_wallet_sync_interval_secs = 100000; +pub(crate) fn setup_node(chain_source: &TestChainSource, config: Config) -> TestNode { setup_builder!(builder, config); - builder.set_chain_source_esplora(esplora_url.clone(), Some(sync_config)); + match chain_source { + TestChainSource::Esplora(electrsd) => { + let esplora_url = format!("http://{}", electrsd.esplora_url.as_ref().unwrap()); + let mut sync_config = EsploraSyncConfig::default(); + sync_config.onchain_wallet_sync_interval_secs = 100000; + sync_config.lightning_wallet_sync_interval_secs = 100000; + builder.set_chain_source_esplora(esplora_url.clone(), Some(sync_config)); + }, + TestChainSource::BitcoindRpc(bitcoind) => { + let rpc_host = bitcoind.params.rpc_socket.ip().to_string(); + let rpc_port = bitcoind.params.rpc_socket.port(); + let values = bitcoind.params.get_cookie_values().unwrap().unwrap(); + let rpc_user = values.user; + let rpc_password = values.password; + builder.set_chain_source_bitcoind_rpc(rpc_host, rpc_port, rpc_user, rpc_password); + }, + } let test_sync_store = Arc::new(TestSyncStore::new(config.storage_dir_path.into())); let node = builder.build_with_store(test_sync_store).unwrap(); node.start().unwrap(); diff --git a/tests/integration_tests_rust.rs b/tests/integration_tests_rust.rs index 13f3ab0be..7dab40b16 100644 --- a/tests/integration_tests_rust.rs +++ b/tests/integration_tests_rust.rs @@ -11,7 +11,7 @@ use common::{ do_channel_full_cycle, expect_channel_ready_event, expect_event, expect_payment_received_event, expect_payment_successful_event, generate_blocks_and_wait, open_channel, premine_and_distribute_funds, random_config, setup_bitcoind_and_electrsd, setup_builder, - setup_node, setup_two_nodes, wait_for_tx, TestSyncStore, + setup_node, setup_two_nodes, wait_for_tx, TestChainSource, TestSyncStore, }; use ldk_node::config::EsploraSyncConfig; @@ -21,49 +21,63 @@ use ldk_node::{Builder, Event, NodeError}; use lightning::ln::channelmanager::PaymentId; use lightning::util::persist::KVStore; -use bitcoin::{Amount, Network}; +use bitcoin::Amount; use std::sync::Arc; #[test] fn channel_full_cycle() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let (node_a, node_b) = setup_two_nodes(&electrsd, false, true, false); + let chain_source = TestChainSource::Esplora(&electrsd); + let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); + do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, true, false); +} + +#[test] +fn channel_full_cycle_bitcoind() { + let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); + let chain_source = TestChainSource::BitcoindRpc(&bitcoind); + let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, true, false); } #[test] fn channel_full_cycle_force_close() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let (node_a, node_b) = setup_two_nodes(&electrsd, false, true, false); + let chain_source = TestChainSource::Esplora(&electrsd); + let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, true, true); } #[test] fn channel_full_cycle_force_close_trusted_no_reserve() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let (node_a, node_b) = setup_two_nodes(&electrsd, false, true, true); + let chain_source = TestChainSource::Esplora(&electrsd); + let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, true); do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, true, true); } #[test] fn channel_full_cycle_0conf() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let (node_a, node_b) = setup_two_nodes(&electrsd, true, true, false); + let chain_source = TestChainSource::Esplora(&electrsd); + let (node_a, node_b) = setup_two_nodes(&chain_source, true, true, false); do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, true, true, false) } #[test] fn channel_full_cycle_legacy_staticremotekey() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let (node_a, node_b) = setup_two_nodes(&electrsd, false, false, false); + let chain_source = TestChainSource::Esplora(&electrsd); + let (node_a, node_b) = setup_two_nodes(&chain_source, false, false, false); do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, false, false); } #[test] fn channel_open_fails_when_funds_insufficient() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let (node_a, node_b) = setup_two_nodes(&electrsd, false, true, false); + let chain_source = TestChainSource::Esplora(&electrsd); + let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); let addr_a = node_a.onchain_payment().new_address().unwrap(); let addr_b = node_b.onchain_payment().new_address().unwrap(); @@ -266,7 +280,8 @@ fn start_stop_reinit() { #[test] fn onchain_spend_receive() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let (node_a, node_b) = setup_two_nodes(&electrsd, false, true, false); + let chain_source = TestChainSource::Esplora(&electrsd); + let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); let addr_a = node_a.onchain_payment().new_address().unwrap(); let addr_b = node_b.onchain_payment().new_address().unwrap(); @@ -315,7 +330,8 @@ fn onchain_spend_receive() { fn sign_verify_msg() { let (_bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let config = random_config(true); - let node = setup_node(&electrsd, config); + let chain_source = TestChainSource::Esplora(&electrsd); + let node = setup_node(&chain_source, config); // Tests arbitrary message signing and later verification let msg = "OK computer".as_bytes(); @@ -332,7 +348,8 @@ fn connection_restart_behavior() { fn do_connection_restart_behavior(persist: bool) { let (_bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let (node_a, node_b) = setup_two_nodes(&electrsd, false, false, false); + let chain_source = TestChainSource::Esplora(&electrsd); + let (node_a, node_b) = setup_two_nodes(&chain_source, false, false, false); let node_id_a = node_a.node_id(); let node_id_b = node_b.node_id(); @@ -383,7 +400,8 @@ fn do_connection_restart_behavior(persist: bool) { #[test] fn concurrent_connections_succeed() { let (_bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let (node_a, node_b) = setup_two_nodes(&electrsd, false, true, false); + let chain_source = TestChainSource::Esplora(&electrsd); + let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); let node_a = Arc::new(node_a); let node_b = Arc::new(node_b); @@ -413,7 +431,8 @@ fn concurrent_connections_succeed() { #[test] fn simple_bolt12_send_receive() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let (node_a, node_b) = setup_two_nodes(&electrsd, false, true, false); + let chain_source = TestChainSource::Esplora(&electrsd); + let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); let address_a = node_a.onchain_payment().new_address().unwrap(); let premine_amount_sat = 5_000_000; @@ -620,7 +639,8 @@ fn simple_bolt12_send_receive() { #[test] fn generate_bip21_uri() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let (node_a, node_b) = setup_two_nodes(&electrsd, false, true, false); + let chain_source = TestChainSource::Esplora(&electrsd); + let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); let address_a = node_a.onchain_payment().new_address().unwrap(); let premined_sats = 5_000_000; @@ -661,7 +681,8 @@ fn generate_bip21_uri() { #[test] fn unified_qr_send_receive() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let (node_a, node_b) = setup_two_nodes(&electrsd, false, true, false); + let chain_source = TestChainSource::Esplora(&electrsd); + let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); let address_a = node_a.onchain_payment().new_address().unwrap(); let premined_sats = 5_000_000; From 4a086c345dc26c48a97094a4fa658c623c2f1d5f Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Wed, 16 Oct 2024 18:40:27 +0200 Subject: [PATCH 100/127] Drop flaky `connect_to_public_esplora` test .. as it regularly makes CI fail and doesn't provide us anything really. --- tests/integration_tests_rust.rs | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/tests/integration_tests_rust.rs b/tests/integration_tests_rust.rs index 7dab40b16..dc5c4b818 100644 --- a/tests/integration_tests_rust.rs +++ b/tests/integration_tests_rust.rs @@ -195,17 +195,6 @@ fn multi_hop_sending() { expect_payment_successful_event!(nodes[0], payment_id, Some(fee_paid_msat)); } -#[test] -fn connect_to_public_testnet_esplora() { - let mut config = random_config(true); - config.network = Network::Testnet; - setup_builder!(builder, config); - builder.set_chain_source_esplora("https://blockstream.info/testnet/api".to_string(), None); - let node = builder.build().unwrap(); - node.start().unwrap(); - node.stop().unwrap(); -} - #[test] fn start_stop_reinit() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); From f165c74c69a9207cb0a88250dfd4613c94d150cb Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 14 Oct 2024 15:52:01 +0200 Subject: [PATCH 101/127] Relax `onchain_fee_buffer` in tests slightly .. to account for slight differences in fee rate estmations between chain sources. --- tests/common/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/common/mod.rs b/tests/common/mod.rs index e8c2d3892..7c501d545 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -502,7 +502,7 @@ pub(crate) fn do_channel_full_cycle( node_a.sync_wallets().unwrap(); node_b.sync_wallets().unwrap(); - let onchain_fee_buffer_sat = 1500; + let onchain_fee_buffer_sat = 5000; let node_a_anchor_reserve_sat = if expect_anchor_channel { 25_000 } else { 0 }; let node_a_upper_bound_sat = premine_amount_sat - node_a_anchor_reserve_sat - funding_amount_sat; From 7c629f24d9c41af12d1c5bf3fdd529d67734aa8c Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 15 Oct 2024 17:32:42 +0200 Subject: [PATCH 102/127] Update README to reflect `bitcoind` RPC support --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 22ef1a1b2..02dcbf323 100644 --- a/README.md +++ b/README.md @@ -55,7 +55,7 @@ fn main() { LDK Node currently comes with a decidedly opinionated set of design choices: - On-chain data is handled by the integrated [BDK][bdk] wallet. -- Chain data may currently be sourced from an [Esplora][esplora] server, while support for Electrum and `bitcoind` RPC will follow soon. +- Chain data may currently be sourced from the Bitcoin Core RPC interface or an [Esplora][esplora] server, while support for Electrum will follow soon. - Wallet and channel state may be persisted to an [SQLite][sqlite] database, to file system, or to a custom back-end to be implemented by the user. - Gossip data may be sourced via Lightning's peer-to-peer network or the [Rapid Gossip Sync](https://docs.rs/lightning-rapid-gossip-sync/*/lightning_rapid_gossip_sync/) protocol. - Entropy for the Lightning and on-chain wallets may be sourced from raw bytes or a [BIP39](https://github.com/bitcoin/bips/blob/master/bip-0039.mediawiki) mnemonic. In addition, LDK Node offers the means to generate and persist the entropy bytes to disk. From ef9810cc51294e06e0535d6f83d5802d49b1e03b Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Wed, 16 Oct 2024 11:06:33 +0200 Subject: [PATCH 103/127] Upgrade Uniffi version --- Cargo.toml | 4 ++-- bindings/uniffi-bindgen/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 48bc64cbb..e56faefb0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -72,7 +72,7 @@ chrono = { version = "0.4", default-features = false, features = ["clock"] } tokio = { version = "1.37", default-features = false, features = [ "rt-multi-thread", "time", "sync", "macros" ] } esplora-client = { version = "0.9", default-features = false } libc = "0.2" -uniffi = { version = "0.26.0", features = ["build"], optional = true } +uniffi = { version = "0.27.3", features = ["build"], optional = true } serde = { version = "1.0.210", default-features = false, features = ["std", "derive"] } serde_json = { version = "1.0.128", default-features = false, features = ["std"] } @@ -101,7 +101,7 @@ electrsd = { version = "0.29.0", features = ["legacy"] } clightningrpc = { version = "0.3.0-beta.8", default-features = false } [build-dependencies] -uniffi = { version = "0.26.0", features = ["build"], optional = true } +uniffi = { version = "0.27.3", features = ["build"], optional = true } [profile.release] panic = "abort" diff --git a/bindings/uniffi-bindgen/Cargo.toml b/bindings/uniffi-bindgen/Cargo.toml index b823f397d..9a4c9d5da 100644 --- a/bindings/uniffi-bindgen/Cargo.toml +++ b/bindings/uniffi-bindgen/Cargo.toml @@ -6,4 +6,4 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -uniffi = { version = "0.26.0", features = ["cli"] } +uniffi = { version = "0.27.3", features = ["cli"] } From d0a14d2e0f263beabe10953f71ee6a2852f17239 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Wed, 16 Oct 2024 11:20:37 +0200 Subject: [PATCH 104/127] Bump kotlin-android `compileSdk` to 34 --- bindings/kotlin/ldk-node-android/lib/build.gradle.kts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bindings/kotlin/ldk-node-android/lib/build.gradle.kts b/bindings/kotlin/ldk-node-android/lib/build.gradle.kts index 5e6775cdc..69d126b54 100644 --- a/bindings/kotlin/ldk-node-android/lib/build.gradle.kts +++ b/bindings/kotlin/ldk-node-android/lib/build.gradle.kts @@ -16,7 +16,7 @@ repositories { } android { - compileSdk = 33 + compileSdk = 34 defaultConfig { minSdk = 21 From 2646629f76c6fbe5791e6e26b6f48c0b44f75c72 Mon Sep 17 00:00:00 2001 From: G8XSU <3442979+G8XSU@users.noreply.github.com> Date: Thu, 10 Oct 2024 02:17:57 -0700 Subject: [PATCH 105/127] Enable using VssStore with VssHeaderProvider. --- src/builder.rs | 73 ++++++++++++++++++++++++++-------- src/io/vss_store.rs | 15 +++++-- src/lib.rs | 2 + tests/integration_tests_vss.rs | 18 +++++++-- 4 files changed, 86 insertions(+), 22 deletions(-) diff --git a/src/builder.rs b/src/builder.rs index 43171db1f..1a6410241 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -65,7 +65,9 @@ use bitcoin::secp256k1::PublicKey; use bitcoin::{BlockHash, Network}; #[cfg(any(vss, vss_test))] -use bitcoin::bip32::ChildNumber; +use bitcoin::bip32::{ChildNumber, Xpriv}; +#[cfg(any(vss, vss_test))] +use std::collections::HashMap; use std::convert::TryInto; use std::default::Default; use std::fmt; @@ -74,6 +76,8 @@ use std::path::PathBuf; use std::sync::atomic::AtomicBool; use std::sync::{Arc, Mutex, RwLock}; use std::time::SystemTime; +#[cfg(any(vss, vss_test))] +use vss_client::headers::{FixedHeaders, VssHeaderProvider}; #[derive(Debug, Clone)] enum ChainDataSourceConfig { @@ -357,12 +361,41 @@ impl NodeBuilder { self.build_with_store(kv_store) } - /// Builds a [`Node`] instance with a [`VssStore`] backend and according to the options + /// Builds a [`Node`] instance with a [VSS] backend and according to the options /// previously configured. + /// + /// Uses [`FixedHeaders`] as default method for authentication/authorization. + /// Given `fixed_headers` are included as it is in all the requests made to VSS. + /// + /// **Caution**: VSS support is in **alpha** and is considered experimental. + /// Using VSS (or any remote persistence) may cause LDK to panic if persistence failures are + /// unrecoverable, i.e., if they remain unresolved after internal retries are exhausted. + /// + /// [VSS]: https://github.com/lightningdevkit/vss-server/blob/main/README.md #[cfg(any(vss, vss_test))] - pub fn build_with_vss_store(&self, url: String, store_id: String) -> Result { - use bitcoin::key::Secp256k1; + pub fn build_with_vss_store_and_fixed_headers( + &self, vss_url: String, store_id: String, fixed_headers: HashMap, + ) -> Result { + let header_provider = Arc::new(FixedHeaders::new(fixed_headers)); + + self.build_with_vss_store_and_header_provider(vss_url, store_id, header_provider) + } + /// Builds a [`Node`] instance with a [VSS] backend and according to the options + /// previously configured. + /// + /// Given `header_provider` is used to attach headers to every request made + /// to VSS. + /// + /// **Caution**: VSS support is in **alpha** and is considered experimental. + /// Using VSS (or any remote persistence) may cause LDK to panic if persistence failures are + /// unrecoverable, i.e., if they remain unresolved after internal retries are exhausted. + /// + /// [VSS]: https://github.com/lightningdevkit/vss-server/blob/main/README.md + #[cfg(any(vss, vss_test))] + pub fn build_with_vss_store_and_header_provider( + &self, vss_url: String, store_id: String, header_provider: Arc, + ) -> Result { let logger = setup_logger(&self.config)?; let seed_bytes = seed_bytes_from_config( @@ -370,23 +403,14 @@ impl NodeBuilder { self.entropy_source_config.as_ref(), Arc::clone(&logger), )?; - let config = Arc::new(self.config.clone()); - let xprv = bitcoin::bip32::Xpriv::new_master(config.network, &seed_bytes).map_err(|e| { - log_error!(logger, "Failed to derive master secret: {}", e); - BuildError::InvalidSeedBytes - })?; + let config = Arc::new(self.config.clone()); - let vss_xprv = xprv - .derive_priv(&Secp256k1::new(), &[ChildNumber::Hardened { index: 877 }]) - .map_err(|e| { - log_error!(logger, "Failed to derive VSS secret: {}", e); - BuildError::KVStoreSetupFailed - })?; + let vss_xprv = derive_vss_xprv(config.clone(), &seed_bytes, Arc::clone(&logger))?; let vss_seed_bytes: [u8; 32] = vss_xprv.private_key.secret_bytes(); - let vss_store = Arc::new(VssStore::new(url, store_id, vss_seed_bytes)); + let vss_store = Arc::new(VssStore::new(vss_url, store_id, vss_seed_bytes, header_provider)); build_with_store_internal( config, self.chain_data_source_config.as_ref(), @@ -1079,6 +1103,23 @@ fn seed_bytes_from_config( } } +#[cfg(any(vss, vss_test))] +fn derive_vss_xprv( + config: Arc, seed_bytes: &[u8; 64], logger: Arc, +) -> Result { + use bitcoin::key::Secp256k1; + + let xprv = Xpriv::new_master(config.network, seed_bytes).map_err(|e| { + log_error!(logger, "Failed to derive master secret: {}", e); + BuildError::InvalidSeedBytes + })?; + + xprv.derive_priv(&Secp256k1::new(), &[ChildNumber::Hardened { index: 877 }]).map_err(|e| { + log_error!(logger, "Failed to derive VSS secret: {}", e); + BuildError::KVStoreSetupFailed + }) +} + /// Sanitize the user-provided node alias to ensure that it is a valid protocol-specified UTF-8 string. pub(crate) fn sanitize_alias(alias_str: &str) -> Result { let alias = alias_str.trim(); diff --git a/src/io/vss_store.rs b/src/io/vss_store.rs index 474f7dbc7..fbed7f7cf 100644 --- a/src/io/vss_store.rs +++ b/src/io/vss_store.rs @@ -8,6 +8,7 @@ use lightning::io::{self, Error, ErrorKind}; #[cfg(test)] use std::panic::RefUnwindSafe; +use std::sync::Arc; use std::time::Duration; use crate::io::utils::check_namespace_key_validity; @@ -17,6 +18,7 @@ use rand::RngCore; use tokio::runtime::Runtime; use vss_client::client::VssClient; use vss_client::error::VssError; +use vss_client::headers::VssHeaderProvider; use vss_client::types::{ DeleteObjectRequest, GetObjectRequest, KeyValue, ListKeyVersionsRequest, PutObjectRequest, Storable, @@ -43,7 +45,10 @@ pub struct VssStore { } impl VssStore { - pub(crate) fn new(base_url: String, store_id: String, data_encryption_key: [u8; 32]) -> Self { + pub(crate) fn new( + base_url: String, store_id: String, data_encryption_key: [u8; 32], + header_provider: Arc, + ) -> Self { let runtime = tokio::runtime::Builder::new_multi_thread().enable_all().build().unwrap(); let storable_builder = StorableBuilder::new(data_encryption_key, RandEntropySource); let retry_policy = ExponentialBackoffRetryPolicy::new(Duration::from_millis(100)) @@ -59,7 +64,7 @@ impl VssStore { ) }) as _); - let client = VssClient::new(base_url, retry_policy); + let client = VssClient::new_with_headers(base_url, retry_policy, header_provider); Self { client, store_id, runtime, storable_builder } } @@ -238,6 +243,8 @@ mod tests { use crate::io::test_utils::do_read_write_remove_list_persist; use rand::distributions::Alphanumeric; use rand::{thread_rng, Rng, RngCore}; + use std::collections::HashMap; + use vss_client::headers::FixedHeaders; #[test] fn read_write_remove_list_persist() { @@ -246,7 +253,9 @@ mod tests { let rand_store_id: String = (0..7).map(|_| rng.sample(Alphanumeric) as char).collect(); let mut data_encryption_key = [0u8; 32]; rng.fill_bytes(&mut data_encryption_key); - let vss_store = VssStore::new(vss_base_url, rand_store_id, data_encryption_key); + let header_provider = Arc::new(FixedHeaders::new(HashMap::new())); + let vss_store = + VssStore::new(vss_base_url, rand_store_id, data_encryption_key, header_provider); do_read_write_remove_list_persist(&vss_store); } diff --git a/src/lib.rs b/src/lib.rs index 42b99406a..8213712b8 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -100,6 +100,8 @@ pub use bip39; pub use bitcoin; pub use lightning; pub use lightning_invoice; +#[cfg(any(vss, vss_test))] +pub use vss_client; pub use balance::{BalanceDetails, LightningBalance, PendingSweepBalance}; pub use error::Error as NodeError; diff --git a/tests/integration_tests_vss.rs b/tests/integration_tests_vss.rs index 483902375..525c1f1f1 100644 --- a/tests/integration_tests_vss.rs +++ b/tests/integration_tests_vss.rs @@ -10,6 +10,7 @@ mod common; use ldk_node::Builder; +use std::collections::HashMap; #[test] fn channel_full_cycle_with_vss_store() { @@ -20,15 +21,26 @@ fn channel_full_cycle_with_vss_store() { let mut builder_a = Builder::from_config(config_a); builder_a.set_chain_source_esplora(esplora_url.clone(), None); let vss_base_url = std::env::var("TEST_VSS_BASE_URL").unwrap(); - let node_a = - builder_a.build_with_vss_store(vss_base_url.clone(), "node_1_store".to_string()).unwrap(); + let node_a = builder_a + .build_with_vss_store_and_fixed_headers( + vss_base_url.clone(), + "node_1_store".to_string(), + HashMap::new(), + ) + .unwrap(); node_a.start().unwrap(); println!("\n== Node B =="); let config_b = common::random_config(true); let mut builder_b = Builder::from_config(config_b); builder_b.set_chain_source_esplora(esplora_url.clone(), None); - let node_b = builder_b.build_with_vss_store(vss_base_url, "node_2_store".to_string()).unwrap(); + let node_b = builder_b + .build_with_vss_store_and_fixed_headers( + vss_base_url, + "node_2_store".to_string(), + HashMap::new(), + ) + .unwrap(); node_b.start().unwrap(); common::do_channel_full_cycle( From ffea164662187218a4f0dc2e5ff71c343485ccd7 Mon Sep 17 00:00:00 2001 From: G8XSU <3442979+G8XSU@users.noreply.github.com> Date: Wed, 16 Oct 2024 15:24:54 -0700 Subject: [PATCH 106/127] Default to LnUrlJWT auth for using VSS. build_with_vss_store now uses LNURL-Auth as the default method for authentication/authorization. --- src/builder.rs | 58 +++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 57 insertions(+), 1 deletion(-) diff --git a/src/builder.rs b/src/builder.rs index 1a6410241..a4b9a6eab 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -77,7 +77,7 @@ use std::sync::atomic::AtomicBool; use std::sync::{Arc, Mutex, RwLock}; use std::time::SystemTime; #[cfg(any(vss, vss_test))] -use vss_client::headers::{FixedHeaders, VssHeaderProvider}; +use vss_client::headers::{FixedHeaders, LnurlAuthToJwtProvider, VssHeaderProvider}; #[derive(Debug, Clone)] enum ChainDataSourceConfig { @@ -361,10 +361,66 @@ impl NodeBuilder { self.build_with_store(kv_store) } + /// Builds a [`Node`] instance with a [VSS] backend and according to the options + /// previously configured. + /// + /// Uses [LNURL-auth] based authentication scheme as default method for authentication/authorization. + /// + /// The LNURL challenge will be retrieved by making a request to the given `lnurl_auth_server_url`. + /// The returned JWT token in response to the signed LNURL request, will be used for + /// authentication/authorization of all the requests made to VSS. + /// + /// `fixed_headers` are included as it is in all the requests made to VSS and LNURL auth server. + /// + /// **Caution**: VSS support is in **alpha** and is considered experimental. + /// Using VSS (or any remote persistence) may cause LDK to panic if persistence failures are + /// unrecoverable, i.e., if they remain unresolved after internal retries are exhausted. + /// + /// [VSS]: https://github.com/lightningdevkit/vss-server/blob/main/README.md + /// [LNURL-auth]: https://github.com/lnurl/luds/blob/luds/04.md + #[cfg(any(vss, vss_test))] + pub fn build_with_vss_store( + &self, vss_url: String, store_id: String, lnurl_auth_server_url: String, + fixed_headers: HashMap, + ) -> Result { + use bitcoin::key::Secp256k1; + + let logger = setup_logger(&self.config)?; + + let seed_bytes = seed_bytes_from_config( + &self.config, + self.entropy_source_config.as_ref(), + Arc::clone(&logger), + )?; + + let config = Arc::new(self.config.clone()); + + let vss_xprv = derive_vss_xprv(config, &seed_bytes, Arc::clone(&logger))?; + + let lnurl_auth_xprv = vss_xprv + .derive_priv(&Secp256k1::new(), &[ChildNumber::Hardened { index: 138 }]) + .map_err(|e| { + log_error!(logger, "Failed to derive VSS secret: {}", e); + BuildError::KVStoreSetupFailed + })?; + + let lnurl_auth_jwt_provider = + LnurlAuthToJwtProvider::new(lnurl_auth_xprv, lnurl_auth_server_url, fixed_headers) + .map_err(|e| { + log_error!(logger, "Failed to create LnurlAuthToJwtProvider: {}", e); + BuildError::KVStoreSetupFailed + })?; + + let header_provider = Arc::new(lnurl_auth_jwt_provider); + + self.build_with_vss_store_and_header_provider(vss_url, store_id, header_provider) + } + /// Builds a [`Node`] instance with a [VSS] backend and according to the options /// previously configured. /// /// Uses [`FixedHeaders`] as default method for authentication/authorization. + /// /// Given `fixed_headers` are included as it is in all the requests made to VSS. /// /// **Caution**: VSS support is in **alpha** and is considered experimental. From cf91516000cd897baa175a039e8c96a8be1e79b9 Mon Sep 17 00:00:00 2001 From: G8XSU <3442979+G8XSU@users.noreply.github.com> Date: Tue, 15 Oct 2024 00:00:30 -0700 Subject: [PATCH 107/127] Add build_with_vss* methods for ArcedNodeBuilder. --- src/builder.rs | 74 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 74 insertions(+) diff --git a/src/builder.rs b/src/builder.rs index a4b9a6eab..4fa8f53f5 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -631,6 +631,80 @@ impl ArcedNodeBuilder { self.inner.read().unwrap().build_with_fs_store().map(Arc::new) } + /// Builds a [`Node`] instance with a [VSS] backend and according to the options + /// previously configured. + /// + /// Uses [LNURL-auth] based authentication scheme as default method for authentication/authorization. + /// + /// The LNURL challenge will be retrieved by making a request to the given `lnurl_auth_server_url`. + /// The returned JWT token in response to the signed LNURL request, will be used for + /// authentication/authorization of all the requests made to VSS. + /// + /// `fixed_headers` are included as it is in all the requests made to VSS and LNURL auth server. + /// + /// **Caution**: VSS support is in **alpha** and is considered experimental. + /// Using VSS (or any remote persistence) may cause LDK to panic if persistence failures are + /// unrecoverable, i.e., if they remain unresolved after internal retries are exhausted. + /// + /// [VSS]: https://github.com/lightningdevkit/vss-server/blob/main/README.md + /// [LNURL-auth]: https://github.com/lnurl/luds/blob/luds/04.md + #[cfg(any(vss, vss_test))] + pub fn build_with_vss_store( + &self, vss_url: String, store_id: String, lnurl_auth_server_url: String, + fixed_headers: HashMap, + ) -> Result, BuildError> { + self.inner + .read() + .unwrap() + .build_with_vss_store(vss_url, store_id, lnurl_auth_server_url, fixed_headers) + .map(Arc::new) + } + + /// Builds a [`Node`] instance with a [VSS] backend and according to the options + /// previously configured. + /// + /// Uses [`FixedHeaders`] as default method for authentication/authorization. + /// + /// Given `fixed_headers` are included as it is in all the requests made to VSS. + /// + /// **Caution**: VSS support is in **alpha** and is considered experimental. + /// Using VSS (or any remote persistence) may cause LDK to panic if persistence failures are + /// unrecoverable, i.e., if they remain unresolved after internal retries are exhausted. + /// + /// [VSS]: https://github.com/lightningdevkit/vss-server/blob/main/README.md + #[cfg(any(vss, vss_test))] + pub fn build_with_vss_store_and_fixed_headers( + &self, vss_url: String, store_id: String, fixed_headers: HashMap, + ) -> Result, BuildError> { + self.inner + .read() + .unwrap() + .build_with_vss_store_and_fixed_headers(vss_url, store_id, fixed_headers) + .map(Arc::new) + } + + /// Builds a [`Node`] instance with a [VSS] backend and according to the options + /// previously configured. + /// + /// Given `header_provider` is used to attach headers to every request made + /// to VSS. + /// + /// **Caution**: VSS support is in **alpha** and is considered experimental. + /// Using VSS (or any remote persistence) may cause LDK to panic if persistence failures are + /// unrecoverable, i.e., if they remain unresolved after internal retries are exhausted. + /// + /// [VSS]: https://github.com/lightningdevkit/vss-server/blob/main/README.md + #[cfg(any(vss, vss_test))] + pub fn build_with_vss_store_and_header_provider( + &self, vss_url: String, store_id: String, header_provider: Arc, + ) -> Result, BuildError> { + self.inner + .read() + .unwrap() + .build_with_vss_store_and_header_provider(vss_url, store_id, header_provider) + .map(Arc::new) + } + /// Builds a [`Node`] instance according to the options previously configured. pub fn build_with_store(&self, kv_store: Arc) -> Result, BuildError> { self.inner.read().unwrap().build_with_store(kv_store).map(Arc::new) From 0ae0fc12eb6dc52e61d2c9e9261793095b30d9a7 Mon Sep 17 00:00:00 2001 From: G8XSU <3442979+G8XSU@users.noreply.github.com> Date: Tue, 15 Oct 2024 15:32:39 -0700 Subject: [PATCH 108/127] Use KeyObfuscator in VssStore. For client-side key obfuscation, improving privacy and security. --- src/builder.rs | 8 +++++-- src/io/vss_store.rs | 52 ++++++++++++++++++++++++++++++++------------- 2 files changed, 43 insertions(+), 17 deletions(-) diff --git a/src/builder.rs b/src/builder.rs index e24e3b498..d088adf14 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -477,7 +477,11 @@ impl NodeBuilder { let vss_seed_bytes: [u8; 32] = vss_xprv.private_key.secret_bytes(); - let vss_store = Arc::new(VssStore::new(vss_url, store_id, vss_seed_bytes, header_provider)); + let vss_store = + VssStore::new(vss_url, store_id, vss_seed_bytes, header_provider).map_err(|e| { + log_error!(logger, "Failed to setup VssStore: {}", e); + BuildError::KVStoreSetupFailed + })?; build_with_store_internal( config, self.chain_data_source_config.as_ref(), @@ -485,7 +489,7 @@ impl NodeBuilder { self.liquidity_source_config.as_ref(), seed_bytes, logger, - vss_store, + Arc::new(vss_store), ) } diff --git a/src/io/vss_store.rs b/src/io/vss_store.rs index e1b2a2a70..296eaabe3 100644 --- a/src/io/vss_store.rs +++ b/src/io/vss_store.rs @@ -5,16 +5,16 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. +use crate::io::utils::check_namespace_key_validity; +use bitcoin::hashes::{sha256, Hash, HashEngine, Hmac, HmacEngine}; use lightning::io::{self, Error, ErrorKind}; +use lightning::util::persist::KVStore; +use prost::Message; +use rand::RngCore; #[cfg(test)] use std::panic::RefUnwindSafe; use std::sync::Arc; use std::time::Duration; - -use crate::io::utils::check_namespace_key_validity; -use lightning::util::persist::KVStore; -use prost::Message; -use rand::RngCore; use tokio::runtime::Runtime; use vss_client::client::VssClient; use vss_client::error::VssError; @@ -23,6 +23,7 @@ use vss_client::types::{ DeleteObjectRequest, GetObjectRequest, KeyValue, ListKeyVersionsRequest, PutObjectRequest, Storable, }; +use vss_client::util::key_obfuscator::KeyObfuscator; use vss_client::util::retry::{ ExponentialBackoffRetryPolicy, FilteredRetryPolicy, JitteredRetryPolicy, MaxAttemptsRetryPolicy, MaxTotalDelayRetryPolicy, RetryPolicy, @@ -42,14 +43,18 @@ pub struct VssStore { store_id: String, runtime: Runtime, storable_builder: StorableBuilder, + key_obfuscator: KeyObfuscator, } impl VssStore { pub(crate) fn new( - base_url: String, store_id: String, data_encryption_key: [u8; 32], + base_url: String, store_id: String, vss_seed: [u8; 32], header_provider: Arc, - ) -> Self { - let runtime = tokio::runtime::Builder::new_multi_thread().enable_all().build().unwrap(); + ) -> io::Result { + let runtime = tokio::runtime::Builder::new_multi_thread().enable_all().build()?; + let (data_encryption_key, obfuscation_master_key) = + derive_data_encryption_and_obfuscation_keys(&vss_seed); + let key_obfuscator = KeyObfuscator::new(obfuscation_master_key); let storable_builder = StorableBuilder::new(data_encryption_key, RandEntropySource); let retry_policy = ExponentialBackoffRetryPolicy::new(Duration::from_millis(10)) .with_max_attempts(10) @@ -65,16 +70,17 @@ impl VssStore { }) as _); let client = VssClient::new_with_headers(base_url, retry_policy, header_provider); - Self { client, store_id, runtime, storable_builder } + Ok(Self { client, store_id, runtime, storable_builder, key_obfuscator }) } fn build_key( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, ) -> io::Result { + let obfuscated_key = self.key_obfuscator.obfuscate(key); if primary_namespace.is_empty() { - Ok(key.to_string()) + Ok(obfuscated_key) } else { - Ok(format!("{}#{}#{}", primary_namespace, secondary_namespace, key)) + Ok(format!("{}#{}#{}", primary_namespace, secondary_namespace, obfuscated_key)) } } @@ -82,7 +88,10 @@ impl VssStore { let mut parts = unified_key.splitn(3, '#'); let (_primary_namespace, _secondary_namespace) = (parts.next(), parts.next()); match parts.next() { - Some(actual_key) => Ok(actual_key.to_string()), + Some(obfuscated_key) => { + let actual_key = self.key_obfuscator.deobfuscate(obfuscated_key)?; + Ok(actual_key) + }, None => Err(Error::new(ErrorKind::InvalidData, "Invalid key format")), } } @@ -224,6 +233,19 @@ impl KVStore for VssStore { } } +fn derive_data_encryption_and_obfuscation_keys(vss_seed: &[u8; 32]) -> ([u8; 32], [u8; 32]) { + let hkdf = |initial_key_material: &[u8], salt: &[u8]| -> [u8; 32] { + let mut engine = HmacEngine::::new(salt); + engine.input(initial_key_material); + Hmac::from_engine(engine).to_byte_array() + }; + + let prk = hkdf(vss_seed, b"pseudo_random_key"); + let k1 = hkdf(&prk, b"data_encryption_key"); + let k2 = hkdf(&prk, &[&k1[..], b"obfuscation_key"].concat()); + (k1, k2) +} + /// A source for generating entropy/randomness using [`rand`]. pub(crate) struct RandEntropySource; @@ -251,11 +273,11 @@ mod tests { let vss_base_url = std::env::var("TEST_VSS_BASE_URL").unwrap(); let mut rng = thread_rng(); let rand_store_id: String = (0..7).map(|_| rng.sample(Alphanumeric) as char).collect(); - let mut data_encryption_key = [0u8; 32]; - rng.fill_bytes(&mut data_encryption_key); + let mut vss_seed = [0u8; 32]; + rng.fill_bytes(&mut vss_seed); let header_provider = Arc::new(FixedHeaders::new(HashMap::new())); let vss_store = - VssStore::new(vss_base_url, rand_store_id, data_encryption_key, header_provider); + VssStore::new(vss_base_url, rand_store_id, vss_seed, header_provider).unwrap(); do_read_write_remove_list_persist(&vss_store); } From b05d99b8c866ece729f3b47019f76fcf7895b990 Mon Sep 17 00:00:00 2001 From: G8XSU <3442979+G8XSU@users.noreply.github.com> Date: Wed, 16 Oct 2024 17:01:06 -0700 Subject: [PATCH 109/127] Launch VSS, remove cfg flag for it. --- .github/workflows/vss-integration.yml | 4 ++-- Cargo.toml | 2 -- src/builder.rs | 11 ----------- src/io/mod.rs | 1 - src/lib.rs | 1 - 5 files changed, 2 insertions(+), 17 deletions(-) diff --git a/.github/workflows/vss-integration.yml b/.github/workflows/vss-integration.yml index 44b7f445d..83544313b 100644 --- a/.github/workflows/vss-integration.yml +++ b/.github/workflows/vss-integration.yml @@ -74,8 +74,8 @@ jobs: run: | cd ldk-node export TEST_VSS_BASE_URL="http://localhost:8080/vss" - RUSTFLAGS="--cfg vss_test --cfg vss" cargo build --verbose --color always - RUSTFLAGS="--cfg vss_test --cfg vss" cargo test --test integration_tests_vss + RUSTFLAGS="--cfg vss_test" cargo build --verbose --color always + RUSTFLAGS="--cfg vss_test" cargo test --test integration_tests_vss - name: Cleanup run: | diff --git a/Cargo.toml b/Cargo.toml index e56faefb0..df68c49a5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -76,7 +76,6 @@ uniffi = { version = "0.27.3", features = ["build"], optional = true } serde = { version = "1.0.210", default-features = false, features = ["std", "derive"] } serde_json = { version = "1.0.128", default-features = false, features = ["std"] } -[target.'cfg(vss)'.dependencies] vss-client = "0.3" prost = { version = "0.11.6", default-features = false} @@ -113,7 +112,6 @@ panic = "abort" level = "forbid" # When adding a new cfg attribute, ensure that it is added to this list. check-cfg = [ - "cfg(vss)", "cfg(vss_test)", "cfg(ldk_bench)", "cfg(tokio_unstable)", diff --git a/src/builder.rs b/src/builder.rs index d088adf14..c14ffcf5a 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -14,7 +14,6 @@ use crate::fee_estimator::OnchainFeeEstimator; use crate::gossip::GossipSource; use crate::io::sqlite_store::SqliteStore; use crate::io::utils::{read_node_metrics, write_node_metrics}; -#[cfg(any(vss, vss_test))] use crate::io::vss_store::VssStore; use crate::liquidity::LiquiditySource; use crate::logger::{log_error, log_info, FilesystemLogger, Logger}; @@ -64,9 +63,7 @@ use bip39::Mnemonic; use bitcoin::secp256k1::PublicKey; use bitcoin::{BlockHash, Network}; -#[cfg(any(vss, vss_test))] use bitcoin::bip32::{ChildNumber, Xpriv}; -#[cfg(any(vss, vss_test))] use std::collections::HashMap; use std::convert::TryInto; use std::default::Default; @@ -76,7 +73,6 @@ use std::path::PathBuf; use std::sync::atomic::AtomicBool; use std::sync::{Arc, Mutex, RwLock}; use std::time::SystemTime; -#[cfg(any(vss, vss_test))] use vss_client::headers::{FixedHeaders, LnurlAuthToJwtProvider, VssHeaderProvider}; #[derive(Debug, Clone)] @@ -389,7 +385,6 @@ impl NodeBuilder { /// /// [VSS]: https://github.com/lightningdevkit/vss-server/blob/main/README.md /// [LNURL-auth]: https://github.com/lnurl/luds/blob/luds/04.md - #[cfg(any(vss, vss_test))] pub fn build_with_vss_store( &self, vss_url: String, store_id: String, lnurl_auth_server_url: String, fixed_headers: HashMap, @@ -439,7 +434,6 @@ impl NodeBuilder { /// unrecoverable, i.e., if they remain unresolved after internal retries are exhausted. /// /// [VSS]: https://github.com/lightningdevkit/vss-server/blob/main/README.md - #[cfg(any(vss, vss_test))] pub fn build_with_vss_store_and_fixed_headers( &self, vss_url: String, store_id: String, fixed_headers: HashMap, ) -> Result { @@ -459,7 +453,6 @@ impl NodeBuilder { /// unrecoverable, i.e., if they remain unresolved after internal retries are exhausted. /// /// [VSS]: https://github.com/lightningdevkit/vss-server/blob/main/README.md - #[cfg(any(vss, vss_test))] pub fn build_with_vss_store_and_header_provider( &self, vss_url: String, store_id: String, header_provider: Arc, ) -> Result { @@ -676,7 +669,6 @@ impl ArcedNodeBuilder { /// /// [VSS]: https://github.com/lightningdevkit/vss-server/blob/main/README.md /// [LNURL-auth]: https://github.com/lnurl/luds/blob/luds/04.md - #[cfg(any(vss, vss_test))] pub fn build_with_vss_store( &self, vss_url: String, store_id: String, lnurl_auth_server_url: String, fixed_headers: HashMap, @@ -700,7 +692,6 @@ impl ArcedNodeBuilder { /// unrecoverable, i.e., if they remain unresolved after internal retries are exhausted. /// /// [VSS]: https://github.com/lightningdevkit/vss-server/blob/main/README.md - #[cfg(any(vss, vss_test))] pub fn build_with_vss_store_and_fixed_headers( &self, vss_url: String, store_id: String, fixed_headers: HashMap, ) -> Result, BuildError> { @@ -722,7 +713,6 @@ impl ArcedNodeBuilder { /// unrecoverable, i.e., if they remain unresolved after internal retries are exhausted. /// /// [VSS]: https://github.com/lightningdevkit/vss-server/blob/main/README.md - #[cfg(any(vss, vss_test))] pub fn build_with_vss_store_and_header_provider( &self, vss_url: String, store_id: String, header_provider: Arc, ) -> Result, BuildError> { @@ -1276,7 +1266,6 @@ fn seed_bytes_from_config( } } -#[cfg(any(vss, vss_test))] fn derive_vss_xprv( config: Arc, seed_bytes: &[u8; 64], logger: Arc, ) -> Result { diff --git a/src/io/mod.rs b/src/io/mod.rs index fab0a27f9..3192dbb86 100644 --- a/src/io/mod.rs +++ b/src/io/mod.rs @@ -11,7 +11,6 @@ pub mod sqlite_store; #[cfg(test)] pub(crate) mod test_utils; pub(crate) mod utils; -#[cfg(any(vss, vss_test))] pub(crate) mod vss_store; /// The event queue will be persisted under this key. diff --git a/src/lib.rs b/src/lib.rs index 232ab4e1a..8fc3972e1 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -100,7 +100,6 @@ pub use bip39; pub use bitcoin; pub use lightning; pub use lightning_invoice; -#[cfg(any(vss, vss_test))] pub use vss_client; pub use balance::{BalanceDetails, LightningBalance, PendingSweepBalance}; From 5bb4855cdba7a6c1d43bf19471885d534b8022f6 Mon Sep 17 00:00:00 2001 From: G8XSU <3442979+G8XSU@users.noreply.github.com> Date: Tue, 15 Oct 2024 00:02:16 -0700 Subject: [PATCH 110/127] Expose build_with_vss_* methods in bindings. --- bindings/ldk_node.udl | 20 ++++++++++++++++++++ src/uniffi_types.rs | 2 ++ 2 files changed, 22 insertions(+) diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index 5deb36915..6ec7208b2 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -50,6 +50,12 @@ interface Builder { Node build(); [Throws=BuildError] Node build_with_fs_store(); + [Throws=BuildError] + Node build_with_vss_store(string vss_url, string store_id, string lnurl_auth_server_url, record fixed_headers); + [Throws=BuildError] + Node build_with_vss_store_and_fixed_headers(string vss_url, string store_id, record fixed_headers); + [Throws=BuildError] + Node build_with_vss_store_and_header_provider(string vss_url, string store_id, VssHeaderProvider header_provider); }; interface Node { @@ -251,6 +257,20 @@ enum BuildError { "LoggerSetupFailed", }; +[Trait] +interface VssHeaderProvider { + [Async, Throws=VssHeaderProviderError] + record get_headers([ByRef]sequence request); +}; + +[Error] +enum VssHeaderProviderError { + "InvalidData", + "RequestError", + "AuthorizationError", + "InternalError", +}; + [Enum] interface Event { PaymentSuccessful(PaymentId? payment_id, PaymentHash payment_hash, u64? fee_paid_msat); diff --git a/src/uniffi_types.rs b/src/uniffi_types.rs index 894e5d739..c8c84dbed 100644 --- a/src/uniffi_types.rs +++ b/src/uniffi_types.rs @@ -32,6 +32,8 @@ pub use bitcoin::{Address, BlockHash, Network, OutPoint, Txid}; pub use bip39::Mnemonic; +pub use vss_client::headers::{VssHeaderProvider, VssHeaderProviderError}; + use crate::UniffiCustomTypeConverter; use crate::builder::sanitize_alias; From cfb85651ecc84ffcee6cb968d94c23027028eddd Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 17 Oct 2024 12:45:52 +0200 Subject: [PATCH 111/127] Bump version number to 0.4.0 --- Cargo.toml | 2 +- Package.swift | 2 +- bindings/kotlin/ldk-node-android/gradle.properties | 2 +- bindings/kotlin/ldk-node-jvm/gradle.properties | 2 +- bindings/python/pyproject.toml | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index df68c49a5..9602b1016 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ldk-node" -version = "0.3.0" +version = "0.4.0" authors = ["Elias Rohrer "] homepage = "https://lightningdevkit.org/" license = "MIT OR Apache-2.0" diff --git a/Package.swift b/Package.swift index 67c02dd8b..7adf1e158 100644 --- a/Package.swift +++ b/Package.swift @@ -3,7 +3,7 @@ import PackageDescription -let tag = "v0.3.0" +let tag = "v0.4.0" let checksum = "07c8741768956bf1a51d1c25f751b5e29d1ae9ee2fd786c4282031c9a8a92f0c" let url = "https://github.com/lightningdevkit/ldk-node/releases/download/\(tag)/LDKNodeFFI.xcframework.zip" diff --git a/bindings/kotlin/ldk-node-android/gradle.properties b/bindings/kotlin/ldk-node-android/gradle.properties index 70f5823b6..c84f2c46c 100644 --- a/bindings/kotlin/ldk-node-android/gradle.properties +++ b/bindings/kotlin/ldk-node-android/gradle.properties @@ -2,4 +2,4 @@ org.gradle.jvmargs=-Xmx1536m android.useAndroidX=true android.enableJetifier=true kotlin.code.style=official -libraryVersion=0.3.0 +libraryVersion=0.4.0 diff --git a/bindings/kotlin/ldk-node-jvm/gradle.properties b/bindings/kotlin/ldk-node-jvm/gradle.properties index 4ed588117..a84d6e412 100644 --- a/bindings/kotlin/ldk-node-jvm/gradle.properties +++ b/bindings/kotlin/ldk-node-jvm/gradle.properties @@ -1,3 +1,3 @@ org.gradle.jvmargs=-Xmx1536m kotlin.code.style=official -libraryVersion=0.3.0 +libraryVersion=0.4.0 diff --git a/bindings/python/pyproject.toml b/bindings/python/pyproject.toml index c8ff0a79d..7d24d7884 100644 --- a/bindings/python/pyproject.toml +++ b/bindings/python/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ldk_node" -version = "0.3.0" +version = "0.4.0" authors = [ { name="Elias Rohrer", email="dev@tnull.de" }, ] From ff47a976a15528c1121e1793f6bdef28195ca606 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 17 Oct 2024 13:41:36 +0200 Subject: [PATCH 112/127] Update CHANGELOG for v0.4 --- CHANGELOG.md | 42 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3a3894899..7597ce36f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,45 @@ +# 0.4.0 - Oct 17, 2024 + +Besides numerous API improvements and bugfixes this this fourth minor release notably adds support for sourcing chain and fee rate data from a Bitcoin Core RPC backend, as well as experimental support for the [VSS] remote storage backend. + +## Feature and API updates +- Support for multiple chain sources has been added. To this end, Esplora-specific configuration options can now be given via `EsploraSyncConfig` to `Builder::set_chain_source_esplora`. Furthermore, all configuration objects (including the main `Config`) is now exposed via the `config` sub-module (#365). +- Support for sourcing chain and fee estimation data from a Bitcoin Core RPC backed has been added (#370). +- Initial experimental support for an encrypted [VSS] remote storage backend has been added (#369, #376, #378). + - **Caution**: VSS support is in **alpha** and is considered experimental. Using VSS (or any remote persistence) may cause LDK to panic if persistence failures are unrecoverable, i.e., if they remain unresolved after internal retries are exhausted. +- Support for setting the `NodeAlias` in public node announcements as been added. We now ensure that announced channels can only be opened and accepted when the required configuration options to operate as a public forwarding node are set (listening addresses and node alias). As part of this `Node::connect_open_channel` was split into `open_channel` and `open_announced_channel` API methods. (#330, #366). +- The `Node` can now be started via a new `Node::start_with_runtime` call that allows to reuse an outer `tokio` runtime context, avoiding runtime stacking when run in `async` environments (#319). +- Support for generating and paying unified QR codes has been added (#302). +- Support for `quantity` and `payer_note` fields when sending or receiving BOLT12 payments has been added (#327). +- Support for setting additional parameters when sending BOLT11 payments has been added (#336, #351). + +## Bug Fixes +- The `ChannelConfig` object has been refactored, now allowing to query the currently applied `MaxDustHTLCExposure` limit (#350). +- A bug potentially leading to panicking on shutdown when stacking `tokio` runtime contexts has been fixed (#373). +- We now no longer panic when hitting a persistence failure during event handling. Instead, events will be replayed until successful (#374). +, +## Compatibility Notes +- The LDK dependency has been updated to version 0.0.125 (#358, #375). +- The BDK dependency has been updated to version 1.0-beta.4 (#358). + - Going forward, the BDK state will be persisted in the configured `KVStore` backend. + - **Note**: The old descriptor state will *not* be automatically migrated on upgrade, potentially leading to address reuse. Privacy-concious users might want to manually advance the descriptor by requesting new addresses until it reaches the previously observed height. + - After the node as been successfully upgraded users may safely delete `bdk_wallet_*.sqlite` from the storage path. +- The `rust-bitcoin` dependency has been updated to version 0.32.2 (#358). +- The UniFFI dependency has been updated to version 0.27.3 (#379). +- The `bip21` dependency has been updated to version 0.5 (#358). +- The `rust-esplora-client` has been updated to version 0.9 (#358). + +In total, this release features 55 files changed, 6134 insertions, 2184 deletions in 166 commits from 6 authors, in alphabetical order: + +- G8XSU +- Ian Slane +- jbesraa +- Elias Rohrer +- elnosh +- Enigbe Ochekliye + +[VSS]: https://github.com/lightningdevkit/vss-server/blob/main/README.md + # 0.3.0 - June 21, 2024 This third minor release notably adds support for BOLT12 payments, Anchor From 86b22ef9ce5a12debe7b48f38350d2921f617744 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 17 Oct 2024 14:14:29 +0200 Subject: [PATCH 113/127] Update Swift files for v0.4.0 --- Package.swift | 2 +- bindings/swift/Sources/LDKNode/LDKNode.swift | 3615 +++++++++++------- 2 files changed, 2269 insertions(+), 1348 deletions(-) diff --git a/Package.swift b/Package.swift index 7adf1e158..253db6e68 100644 --- a/Package.swift +++ b/Package.swift @@ -4,7 +4,7 @@ import PackageDescription let tag = "v0.4.0" -let checksum = "07c8741768956bf1a51d1c25f751b5e29d1ae9ee2fd786c4282031c9a8a92f0c" +let checksum = "5dcdfdd6e3331062d649786fa6e758487227f6037d9881353fe0c293a3a4c7e0" let url = "https://github.com/lightningdevkit/ldk-node/releases/download/\(tag)/LDKNodeFFI.xcframework.zip" let package = Package( diff --git a/bindings/swift/Sources/LDKNode/LDKNode.swift b/bindings/swift/Sources/LDKNode/LDKNode.swift index 5937c9050..835816b9f 100644 --- a/bindings/swift/Sources/LDKNode/LDKNode.swift +++ b/bindings/swift/Sources/LDKNode/LDKNode.swift @@ -1,7 +1,9 @@ // This file was autogenerated by some hot garbage in the `uniffi` crate. // Trust me, you don't want to mess with it! -import Foundation + import SystemConfiguration +// swiftlint:disable all +import Foundation // Depending on the consumer's build setup, the low-level FFI code // might be in a separate module, or it might be compiled inline into @@ -19,6 +21,10 @@ fileprivate extension RustBuffer { self.init(capacity: rbuf.capacity, len: rbuf.len, data: rbuf.data) } + static func empty() -> RustBuffer { + RustBuffer(capacity: 0, len:0, data: nil) + } + static func from(_ ptr: UnsafeBufferPointer) -> RustBuffer { try! rustCall { ffi_ldk_node_rustbuffer_from_bytes(ForeignBytes(bufferPointer: ptr), $0) } } @@ -221,9 +227,17 @@ fileprivate enum UniffiInternalError: LocalizedError { } } +fileprivate extension NSLock { + func withLock(f: () throws -> T) rethrows -> T { + self.lock() + defer { self.unlock() } + return try f() + } +} + fileprivate let CALL_SUCCESS: Int8 = 0 fileprivate let CALL_ERROR: Int8 = 1 -fileprivate let CALL_PANIC: Int8 = 2 +fileprivate let CALL_UNEXPECTED_ERROR: Int8 = 2 fileprivate let CALL_CANCELLED: Int8 = 3 fileprivate extension RustCallStatus { @@ -276,7 +290,7 @@ private func uniffiCheckCallStatus( throw UniffiInternalError.unexpectedRustCallError } - case CALL_PANIC: + case CALL_UNEXPECTED_ERROR: // When the rust code sees a panic, it tries to construct a RustBuffer // with the message. But if that code panics, then it just sends back // an empty buffer. @@ -295,6 +309,76 @@ private func uniffiCheckCallStatus( } } +private func uniffiTraitInterfaceCall( + callStatus: UnsafeMutablePointer, + makeCall: () throws -> T, + writeReturn: (T) -> () +) { + do { + try writeReturn(makeCall()) + } catch let error { + callStatus.pointee.code = CALL_UNEXPECTED_ERROR + callStatus.pointee.errorBuf = FfiConverterString.lower(String(describing: error)) + } +} + +private func uniffiTraitInterfaceCallWithError( + callStatus: UnsafeMutablePointer, + makeCall: () throws -> T, + writeReturn: (T) -> (), + lowerError: (E) -> RustBuffer +) { + do { + try writeReturn(makeCall()) + } catch let error as E { + callStatus.pointee.code = CALL_ERROR + callStatus.pointee.errorBuf = lowerError(error) + } catch { + callStatus.pointee.code = CALL_UNEXPECTED_ERROR + callStatus.pointee.errorBuf = FfiConverterString.lower(String(describing: error)) + } +} +fileprivate class UniffiHandleMap { + private var map: [UInt64: T] = [:] + private let lock = NSLock() + private var currentHandle: UInt64 = 1 + + func insert(obj: T) -> UInt64 { + lock.withLock { + let handle = currentHandle + currentHandle += 1 + map[handle] = obj + return handle + } + } + + func get(handle: UInt64) throws -> T { + try lock.withLock { + guard let obj = map[handle] else { + throw UniffiInternalError.unexpectedStaleHandle + } + return obj + } + } + + @discardableResult + func remove(handle: UInt64) throws -> T { + try lock.withLock { + guard let obj = map.removeValue(forKey: handle) else { + throw UniffiInternalError.unexpectedStaleHandle + } + return obj + } + } + + var count: Int { + get { + map.count + } + } +} + + // Public interface members begin here. @@ -430,168 +514,168 @@ public protocol Bolt11PaymentProtocol : AnyObject { func receiveViaJitChannel(amountMsat: UInt64, description: String, expirySecs: UInt32, maxLspFeeLimitMsat: UInt64?) throws -> Bolt11Invoice - func send(invoice: Bolt11Invoice) throws -> PaymentId + func send(invoice: Bolt11Invoice, sendingParameters: SendingParameters?) throws -> PaymentId func sendProbes(invoice: Bolt11Invoice) throws func sendProbesUsingAmount(invoice: Bolt11Invoice, amountMsat: UInt64) throws - func sendUsingAmount(invoice: Bolt11Invoice, amountMsat: UInt64) throws -> PaymentId + func sendUsingAmount(invoice: Bolt11Invoice, amountMsat: UInt64, sendingParameters: SendingParameters?) throws -> PaymentId } -public class Bolt11Payment: +open class Bolt11Payment: Bolt11PaymentProtocol { - fileprivate let pointer: UnsafeMutableRawPointer + fileprivate let pointer: UnsafeMutableRawPointer! + + /// Used to instantiate a [FFIObject] without an actual pointer, for fakes in tests, mostly. + public struct NoPointer { + public init() {} + } // TODO: We'd like this to be `private` but for Swifty reasons, // we can't implement `FfiConverter` without making this `required` and we can't // make it `required` without making it `public`. - required init(unsafeFromRawPointer pointer: UnsafeMutableRawPointer) { + required public init(unsafeFromRawPointer pointer: UnsafeMutableRawPointer) { self.pointer = pointer } + /// This constructor can be used to instantiate a fake object. + /// - Parameter noPointer: Placeholder value so we can have a constructor separate from the default empty one that may be implemented for classes extending [FFIObject]. + /// + /// - Warning: + /// Any object instantiated with this constructor cannot be passed to an actual Rust-backed object. Since there isn't a backing [Pointer] the FFI lower functions will crash. + public init(noPointer: NoPointer) { + self.pointer = nil + } + public func uniffiClonePointer() -> UnsafeMutableRawPointer { return try! rustCall { uniffi_ldk_node_fn_clone_bolt11payment(self.pointer, $0) } } + // No primary constructor declared for this class. deinit { + guard let pointer = pointer else { + return + } + try! rustCall { uniffi_ldk_node_fn_free_bolt11payment(pointer, $0) } } - - public func claimForHash(paymentHash: PaymentHash, claimableAmountMsat: UInt64, preimage: PaymentPreimage) throws { - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_bolt11payment_claim_for_hash(self.uniffiClonePointer(), +open func claimForHash(paymentHash: PaymentHash, claimableAmountMsat: UInt64, preimage: PaymentPreimage)throws {try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_bolt11payment_claim_for_hash(self.uniffiClonePointer(), FfiConverterTypePaymentHash.lower(paymentHash), FfiConverterUInt64.lower(claimableAmountMsat), FfiConverterTypePaymentPreimage.lower(preimage),$0 ) } - } - public func failForHash(paymentHash: PaymentHash) throws { - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_bolt11payment_fail_for_hash(self.uniffiClonePointer(), +} + +open func failForHash(paymentHash: PaymentHash)throws {try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_bolt11payment_fail_for_hash(self.uniffiClonePointer(), FfiConverterTypePaymentHash.lower(paymentHash),$0 ) } - } - public func receive(amountMsat: UInt64, description: String, expirySecs: UInt32) throws -> Bolt11Invoice { - return try FfiConverterTypeBolt11Invoice.lift( - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_bolt11payment_receive(self.uniffiClonePointer(), +} + +open func receive(amountMsat: UInt64, description: String, expirySecs: UInt32)throws -> Bolt11Invoice { + return try FfiConverterTypeBolt11Invoice.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_bolt11payment_receive(self.uniffiClonePointer(), FfiConverterUInt64.lower(amountMsat), FfiConverterString.lower(description), FfiConverterUInt32.lower(expirySecs),$0 ) +}) } - ) - } - public func receiveForHash(amountMsat: UInt64, description: String, expirySecs: UInt32, paymentHash: PaymentHash) throws -> Bolt11Invoice { - return try FfiConverterTypeBolt11Invoice.lift( - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_bolt11payment_receive_for_hash(self.uniffiClonePointer(), + +open func receiveForHash(amountMsat: UInt64, description: String, expirySecs: UInt32, paymentHash: PaymentHash)throws -> Bolt11Invoice { + return try FfiConverterTypeBolt11Invoice.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_bolt11payment_receive_for_hash(self.uniffiClonePointer(), FfiConverterUInt64.lower(amountMsat), FfiConverterString.lower(description), FfiConverterUInt32.lower(expirySecs), FfiConverterTypePaymentHash.lower(paymentHash),$0 ) +}) } - ) - } - public func receiveVariableAmount(description: String, expirySecs: UInt32) throws -> Bolt11Invoice { - return try FfiConverterTypeBolt11Invoice.lift( - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_bolt11payment_receive_variable_amount(self.uniffiClonePointer(), + +open func receiveVariableAmount(description: String, expirySecs: UInt32)throws -> Bolt11Invoice { + return try FfiConverterTypeBolt11Invoice.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_bolt11payment_receive_variable_amount(self.uniffiClonePointer(), FfiConverterString.lower(description), FfiConverterUInt32.lower(expirySecs),$0 ) +}) } - ) - } - public func receiveVariableAmountForHash(description: String, expirySecs: UInt32, paymentHash: PaymentHash) throws -> Bolt11Invoice { - return try FfiConverterTypeBolt11Invoice.lift( - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_bolt11payment_receive_variable_amount_for_hash(self.uniffiClonePointer(), + +open func receiveVariableAmountForHash(description: String, expirySecs: UInt32, paymentHash: PaymentHash)throws -> Bolt11Invoice { + return try FfiConverterTypeBolt11Invoice.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_bolt11payment_receive_variable_amount_for_hash(self.uniffiClonePointer(), FfiConverterString.lower(description), FfiConverterUInt32.lower(expirySecs), FfiConverterTypePaymentHash.lower(paymentHash),$0 ) +}) } - ) - } - public func receiveVariableAmountViaJitChannel(description: String, expirySecs: UInt32, maxProportionalLspFeeLimitPpmMsat: UInt64?) throws -> Bolt11Invoice { - return try FfiConverterTypeBolt11Invoice.lift( - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_bolt11payment_receive_variable_amount_via_jit_channel(self.uniffiClonePointer(), + +open func receiveVariableAmountViaJitChannel(description: String, expirySecs: UInt32, maxProportionalLspFeeLimitPpmMsat: UInt64?)throws -> Bolt11Invoice { + return try FfiConverterTypeBolt11Invoice.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_bolt11payment_receive_variable_amount_via_jit_channel(self.uniffiClonePointer(), FfiConverterString.lower(description), FfiConverterUInt32.lower(expirySecs), FfiConverterOptionUInt64.lower(maxProportionalLspFeeLimitPpmMsat),$0 ) +}) } - ) - } - public func receiveViaJitChannel(amountMsat: UInt64, description: String, expirySecs: UInt32, maxLspFeeLimitMsat: UInt64?) throws -> Bolt11Invoice { - return try FfiConverterTypeBolt11Invoice.lift( - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_bolt11payment_receive_via_jit_channel(self.uniffiClonePointer(), + +open func receiveViaJitChannel(amountMsat: UInt64, description: String, expirySecs: UInt32, maxLspFeeLimitMsat: UInt64?)throws -> Bolt11Invoice { + return try FfiConverterTypeBolt11Invoice.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_bolt11payment_receive_via_jit_channel(self.uniffiClonePointer(), FfiConverterUInt64.lower(amountMsat), FfiConverterString.lower(description), FfiConverterUInt32.lower(expirySecs), FfiConverterOptionUInt64.lower(maxLspFeeLimitMsat),$0 ) +}) } - ) - } - public func send(invoice: Bolt11Invoice) throws -> PaymentId { - return try FfiConverterTypePaymentId.lift( - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_bolt11payment_send(self.uniffiClonePointer(), - FfiConverterTypeBolt11Invoice.lower(invoice),$0 + +open func send(invoice: Bolt11Invoice, sendingParameters: SendingParameters?)throws -> PaymentId { + return try FfiConverterTypePaymentId.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_bolt11payment_send(self.uniffiClonePointer(), + FfiConverterTypeBolt11Invoice.lower(invoice), + FfiConverterOptionTypeSendingParameters.lower(sendingParameters),$0 ) +}) } - ) - } - public func sendProbes(invoice: Bolt11Invoice) throws { - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_bolt11payment_send_probes(self.uniffiClonePointer(), + +open func sendProbes(invoice: Bolt11Invoice)throws {try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_bolt11payment_send_probes(self.uniffiClonePointer(), FfiConverterTypeBolt11Invoice.lower(invoice),$0 ) } - } - public func sendProbesUsingAmount(invoice: Bolt11Invoice, amountMsat: UInt64) throws { - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_bolt11payment_send_probes_using_amount(self.uniffiClonePointer(), +} + +open func sendProbesUsingAmount(invoice: Bolt11Invoice, amountMsat: UInt64)throws {try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_bolt11payment_send_probes_using_amount(self.uniffiClonePointer(), FfiConverterTypeBolt11Invoice.lower(invoice), FfiConverterUInt64.lower(amountMsat),$0 ) } - } - public func sendUsingAmount(invoice: Bolt11Invoice, amountMsat: UInt64) throws -> PaymentId { - return try FfiConverterTypePaymentId.lift( - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_bolt11payment_send_using_amount(self.uniffiClonePointer(), +} + +open func sendUsingAmount(invoice: Bolt11Invoice, amountMsat: UInt64, sendingParameters: SendingParameters?)throws -> PaymentId { + return try FfiConverterTypePaymentId.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_bolt11payment_send_using_amount(self.uniffiClonePointer(), FfiConverterTypeBolt11Invoice.lower(invoice), - FfiConverterUInt64.lower(amountMsat),$0 + FfiConverterUInt64.lower(amountMsat), + FfiConverterOptionTypeSendingParameters.lower(sendingParameters),$0 ) +}) } - ) - } + } @@ -627,6 +711,8 @@ public struct FfiConverterTypeBolt11Payment: FfiConverter { } + + public func FfiConverterTypeBolt11Payment_lift(_ pointer: UnsafeMutableRawPointer) throws -> Bolt11Payment { return try FfiConverterTypeBolt11Payment.lift(pointer) } @@ -640,108 +726,121 @@ public func FfiConverterTypeBolt11Payment_lower(_ value: Bolt11Payment) -> Unsaf public protocol Bolt12PaymentProtocol : AnyObject { - func initiateRefund(amountMsat: UInt64, expirySecs: UInt32) throws -> Refund + func initiateRefund(amountMsat: UInt64, expirySecs: UInt32, quantity: UInt64?, payerNote: String?) throws -> Refund - func receive(amountMsat: UInt64, description: String) throws -> Offer + func receive(amountMsat: UInt64, description: String, expirySecs: UInt32?, quantity: UInt64?) throws -> Offer - func receiveVariableAmount(description: String) throws -> Offer + func receiveVariableAmount(description: String, expirySecs: UInt32?) throws -> Offer func requestRefundPayment(refund: Refund) throws -> Bolt12Invoice - func send(offer: Offer, payerNote: String?) throws -> PaymentId + func send(offer: Offer, quantity: UInt64?, payerNote: String?) throws -> PaymentId - func sendUsingAmount(offer: Offer, payerNote: String?, amountMsat: UInt64) throws -> PaymentId + func sendUsingAmount(offer: Offer, amountMsat: UInt64, quantity: UInt64?, payerNote: String?) throws -> PaymentId } -public class Bolt12Payment: +open class Bolt12Payment: Bolt12PaymentProtocol { - fileprivate let pointer: UnsafeMutableRawPointer + fileprivate let pointer: UnsafeMutableRawPointer! + + /// Used to instantiate a [FFIObject] without an actual pointer, for fakes in tests, mostly. + public struct NoPointer { + public init() {} + } // TODO: We'd like this to be `private` but for Swifty reasons, // we can't implement `FfiConverter` without making this `required` and we can't // make it `required` without making it `public`. - required init(unsafeFromRawPointer pointer: UnsafeMutableRawPointer) { + required public init(unsafeFromRawPointer pointer: UnsafeMutableRawPointer) { self.pointer = pointer } + /// This constructor can be used to instantiate a fake object. + /// - Parameter noPointer: Placeholder value so we can have a constructor separate from the default empty one that may be implemented for classes extending [FFIObject]. + /// + /// - Warning: + /// Any object instantiated with this constructor cannot be passed to an actual Rust-backed object. Since there isn't a backing [Pointer] the FFI lower functions will crash. + public init(noPointer: NoPointer) { + self.pointer = nil + } + public func uniffiClonePointer() -> UnsafeMutableRawPointer { return try! rustCall { uniffi_ldk_node_fn_clone_bolt12payment(self.pointer, $0) } } + // No primary constructor declared for this class. deinit { + guard let pointer = pointer else { + return + } + try! rustCall { uniffi_ldk_node_fn_free_bolt12payment(pointer, $0) } } - - public func initiateRefund(amountMsat: UInt64, expirySecs: UInt32) throws -> Refund { - return try FfiConverterTypeRefund.lift( - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_bolt12payment_initiate_refund(self.uniffiClonePointer(), +open func initiateRefund(amountMsat: UInt64, expirySecs: UInt32, quantity: UInt64?, payerNote: String?)throws -> Refund { + return try FfiConverterTypeRefund.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_bolt12payment_initiate_refund(self.uniffiClonePointer(), FfiConverterUInt64.lower(amountMsat), - FfiConverterUInt32.lower(expirySecs),$0 + FfiConverterUInt32.lower(expirySecs), + FfiConverterOptionUInt64.lower(quantity), + FfiConverterOptionString.lower(payerNote),$0 ) +}) } - ) - } - public func receive(amountMsat: UInt64, description: String) throws -> Offer { - return try FfiConverterTypeOffer.lift( - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_bolt12payment_receive(self.uniffiClonePointer(), + +open func receive(amountMsat: UInt64, description: String, expirySecs: UInt32?, quantity: UInt64?)throws -> Offer { + return try FfiConverterTypeOffer.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_bolt12payment_receive(self.uniffiClonePointer(), FfiConverterUInt64.lower(amountMsat), - FfiConverterString.lower(description),$0 + FfiConverterString.lower(description), + FfiConverterOptionUInt32.lower(expirySecs), + FfiConverterOptionUInt64.lower(quantity),$0 ) +}) } - ) - } - public func receiveVariableAmount(description: String) throws -> Offer { - return try FfiConverterTypeOffer.lift( - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_bolt12payment_receive_variable_amount(self.uniffiClonePointer(), - FfiConverterString.lower(description),$0 + +open func receiveVariableAmount(description: String, expirySecs: UInt32?)throws -> Offer { + return try FfiConverterTypeOffer.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_bolt12payment_receive_variable_amount(self.uniffiClonePointer(), + FfiConverterString.lower(description), + FfiConverterOptionUInt32.lower(expirySecs),$0 ) +}) } - ) - } - public func requestRefundPayment(refund: Refund) throws -> Bolt12Invoice { - return try FfiConverterTypeBolt12Invoice.lift( - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_bolt12payment_request_refund_payment(self.uniffiClonePointer(), + +open func requestRefundPayment(refund: Refund)throws -> Bolt12Invoice { + return try FfiConverterTypeBolt12Invoice.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_bolt12payment_request_refund_payment(self.uniffiClonePointer(), FfiConverterTypeRefund.lower(refund),$0 ) +}) } - ) - } - public func send(offer: Offer, payerNote: String?) throws -> PaymentId { - return try FfiConverterTypePaymentId.lift( - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_bolt12payment_send(self.uniffiClonePointer(), + +open func send(offer: Offer, quantity: UInt64?, payerNote: String?)throws -> PaymentId { + return try FfiConverterTypePaymentId.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_bolt12payment_send(self.uniffiClonePointer(), FfiConverterTypeOffer.lower(offer), + FfiConverterOptionUInt64.lower(quantity), FfiConverterOptionString.lower(payerNote),$0 ) +}) } - ) - } - public func sendUsingAmount(offer: Offer, payerNote: String?, amountMsat: UInt64) throws -> PaymentId { - return try FfiConverterTypePaymentId.lift( - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_bolt12payment_send_using_amount(self.uniffiClonePointer(), + +open func sendUsingAmount(offer: Offer, amountMsat: UInt64, quantity: UInt64?, payerNote: String?)throws -> PaymentId { + return try FfiConverterTypePaymentId.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_bolt12payment_send_using_amount(self.uniffiClonePointer(), FfiConverterTypeOffer.lower(offer), - FfiConverterOptionString.lower(payerNote), - FfiConverterUInt64.lower(amountMsat),$0 + FfiConverterUInt64.lower(amountMsat), + FfiConverterOptionUInt64.lower(quantity), + FfiConverterOptionString.lower(payerNote),$0 ) +}) } - ) - } + } @@ -777,6 +876,8 @@ public struct FfiConverterTypeBolt12Payment: FfiConverter { } + + public func FfiConverterTypeBolt12Payment_lift(_ pointer: UnsafeMutableRawPointer) throws -> Bolt12Payment { return try FfiConverterTypeBolt12Payment.lift(pointer) } @@ -794,14 +895,22 @@ public protocol BuilderProtocol : AnyObject { func buildWithFsStore() throws -> Node + func buildWithVssStore(vssUrl: String, storeId: String, lnurlAuthServerUrl: String, fixedHeaders: [String: String]) throws -> Node + + func buildWithVssStoreAndFixedHeaders(vssUrl: String, storeId: String, fixedHeaders: [String: String]) throws -> Node + + func buildWithVssStoreAndHeaderProvider(vssUrl: String, storeId: String, headerProvider: VssHeaderProvider) throws -> Node + + func setChainSourceBitcoindRpc(rpcHost: String, rpcPort: UInt16, rpcUser: String, rpcPassword: String) + + func setChainSourceEsplora(serverUrl: String, config: EsploraSyncConfig?) + func setEntropyBip39Mnemonic(mnemonic: Mnemonic, passphrase: String?) func setEntropySeedBytes(seedBytes: [UInt8]) throws func setEntropySeedPath(seedPath: String) - func setEsploraServer(esploraServerUrl: String) - func setGossipSourceP2p() func setGossipSourceRgs(rgsServerUrl: String) @@ -812,154 +921,203 @@ public protocol BuilderProtocol : AnyObject { func setNetwork(network: Network) + func setNodeAlias(nodeAlias: String) throws + func setStorageDirPath(storageDirPath: String) } -public class Builder: +open class Builder: BuilderProtocol { - fileprivate let pointer: UnsafeMutableRawPointer + fileprivate let pointer: UnsafeMutableRawPointer! + + /// Used to instantiate a [FFIObject] without an actual pointer, for fakes in tests, mostly. + public struct NoPointer { + public init() {} + } // TODO: We'd like this to be `private` but for Swifty reasons, // we can't implement `FfiConverter` without making this `required` and we can't // make it `required` without making it `public`. - required init(unsafeFromRawPointer pointer: UnsafeMutableRawPointer) { + required public init(unsafeFromRawPointer pointer: UnsafeMutableRawPointer) { self.pointer = pointer } + /// This constructor can be used to instantiate a fake object. + /// - Parameter noPointer: Placeholder value so we can have a constructor separate from the default empty one that may be implemented for classes extending [FFIObject]. + /// + /// - Warning: + /// Any object instantiated with this constructor cannot be passed to an actual Rust-backed object. Since there isn't a backing [Pointer] the FFI lower functions will crash. + public init(noPointer: NoPointer) { + self.pointer = nil + } + public func uniffiClonePointer() -> UnsafeMutableRawPointer { return try! rustCall { uniffi_ldk_node_fn_clone_builder(self.pointer, $0) } } - public convenience init() { - self.init(unsafeFromRawPointer: try! rustCall() { - uniffi_ldk_node_fn_constructor_builder_new($0) -}) - } +public convenience init() { + let pointer = + try! rustCall() { + uniffi_ldk_node_fn_constructor_builder_new($0 + ) +} + self.init(unsafeFromRawPointer: pointer) +} deinit { + guard let pointer = pointer else { + return + } + try! rustCall { uniffi_ldk_node_fn_free_builder(pointer, $0) } } - public static func fromConfig(config: Config) -> Builder { - return Builder(unsafeFromRawPointer: try! rustCall() { +public static func fromConfig(config: Config) -> Builder { + return try! FfiConverterTypeBuilder.lift(try! rustCall() { uniffi_ldk_node_fn_constructor_builder_from_config( - FfiConverterTypeConfig.lower(config),$0) + FfiConverterTypeConfig.lower(config),$0 + ) }) - } - +} +open func build()throws -> Node { + return try FfiConverterTypeNode.lift(try rustCallWithError(FfiConverterTypeBuildError.lift) { + uniffi_ldk_node_fn_method_builder_build(self.uniffiClonePointer(),$0 + ) +}) +} - public func build() throws -> Node { - return try FfiConverterTypeNode.lift( - try - rustCallWithError(FfiConverterTypeBuildError.lift) { - uniffi_ldk_node_fn_method_builder_build(self.uniffiClonePointer(), $0 +open func buildWithFsStore()throws -> Node { + return try FfiConverterTypeNode.lift(try rustCallWithError(FfiConverterTypeBuildError.lift) { + uniffi_ldk_node_fn_method_builder_build_with_fs_store(self.uniffiClonePointer(),$0 ) +}) } - ) - } - public func buildWithFsStore() throws -> Node { - return try FfiConverterTypeNode.lift( - try - rustCallWithError(FfiConverterTypeBuildError.lift) { - uniffi_ldk_node_fn_method_builder_build_with_fs_store(self.uniffiClonePointer(), $0 + +open func buildWithVssStore(vssUrl: String, storeId: String, lnurlAuthServerUrl: String, fixedHeaders: [String: String])throws -> Node { + return try FfiConverterTypeNode.lift(try rustCallWithError(FfiConverterTypeBuildError.lift) { + uniffi_ldk_node_fn_method_builder_build_with_vss_store(self.uniffiClonePointer(), + FfiConverterString.lower(vssUrl), + FfiConverterString.lower(storeId), + FfiConverterString.lower(lnurlAuthServerUrl), + FfiConverterDictionaryStringString.lower(fixedHeaders),$0 ) +}) +} + +open func buildWithVssStoreAndFixedHeaders(vssUrl: String, storeId: String, fixedHeaders: [String: String])throws -> Node { + return try FfiConverterTypeNode.lift(try rustCallWithError(FfiConverterTypeBuildError.lift) { + uniffi_ldk_node_fn_method_builder_build_with_vss_store_and_fixed_headers(self.uniffiClonePointer(), + FfiConverterString.lower(vssUrl), + FfiConverterString.lower(storeId), + FfiConverterDictionaryStringString.lower(fixedHeaders),$0 + ) +}) +} + +open func buildWithVssStoreAndHeaderProvider(vssUrl: String, storeId: String, headerProvider: VssHeaderProvider)throws -> Node { + return try FfiConverterTypeNode.lift(try rustCallWithError(FfiConverterTypeBuildError.lift) { + uniffi_ldk_node_fn_method_builder_build_with_vss_store_and_header_provider(self.uniffiClonePointer(), + FfiConverterString.lower(vssUrl), + FfiConverterString.lower(storeId), + FfiConverterTypeVssHeaderProvider.lower(headerProvider),$0 + ) +}) +} + +open func setChainSourceBitcoindRpc(rpcHost: String, rpcPort: UInt16, rpcUser: String, rpcPassword: String) {try! rustCall() { + uniffi_ldk_node_fn_method_builder_set_chain_source_bitcoind_rpc(self.uniffiClonePointer(), + FfiConverterString.lower(rpcHost), + FfiConverterUInt16.lower(rpcPort), + FfiConverterString.lower(rpcUser), + FfiConverterString.lower(rpcPassword),$0 + ) +} } - ) - } - public func setEntropyBip39Mnemonic(mnemonic: Mnemonic, passphrase: String?) { - try! - rustCall() { - uniffi_ldk_node_fn_method_builder_set_entropy_bip39_mnemonic(self.uniffiClonePointer(), +open func setChainSourceEsplora(serverUrl: String, config: EsploraSyncConfig?) {try! rustCall() { + uniffi_ldk_node_fn_method_builder_set_chain_source_esplora(self.uniffiClonePointer(), + FfiConverterString.lower(serverUrl), + FfiConverterOptionTypeEsploraSyncConfig.lower(config),$0 + ) +} +} + +open func setEntropyBip39Mnemonic(mnemonic: Mnemonic, passphrase: String?) {try! rustCall() { + uniffi_ldk_node_fn_method_builder_set_entropy_bip39_mnemonic(self.uniffiClonePointer(), FfiConverterTypeMnemonic.lower(mnemonic), FfiConverterOptionString.lower(passphrase),$0 ) } - } - public func setEntropySeedBytes(seedBytes: [UInt8]) throws { - try - rustCallWithError(FfiConverterTypeBuildError.lift) { - uniffi_ldk_node_fn_method_builder_set_entropy_seed_bytes(self.uniffiClonePointer(), +} + +open func setEntropySeedBytes(seedBytes: [UInt8])throws {try rustCallWithError(FfiConverterTypeBuildError.lift) { + uniffi_ldk_node_fn_method_builder_set_entropy_seed_bytes(self.uniffiClonePointer(), FfiConverterSequenceUInt8.lower(seedBytes),$0 ) } - } - public func setEntropySeedPath(seedPath: String) { - try! - rustCall() { +} - uniffi_ldk_node_fn_method_builder_set_entropy_seed_path(self.uniffiClonePointer(), +open func setEntropySeedPath(seedPath: String) {try! rustCall() { + uniffi_ldk_node_fn_method_builder_set_entropy_seed_path(self.uniffiClonePointer(), FfiConverterString.lower(seedPath),$0 ) } - } - public func setEsploraServer(esploraServerUrl: String) { - try! - rustCall() { - - uniffi_ldk_node_fn_method_builder_set_esplora_server(self.uniffiClonePointer(), - FfiConverterString.lower(esploraServerUrl),$0 - ) } - } - public func setGossipSourceP2p() { - try! - rustCall() { - uniffi_ldk_node_fn_method_builder_set_gossip_source_p2p(self.uniffiClonePointer(), $0 +open func setGossipSourceP2p() {try! rustCall() { + uniffi_ldk_node_fn_method_builder_set_gossip_source_p2p(self.uniffiClonePointer(),$0 ) } - } - public func setGossipSourceRgs(rgsServerUrl: String) { - try! - rustCall() { +} - uniffi_ldk_node_fn_method_builder_set_gossip_source_rgs(self.uniffiClonePointer(), +open func setGossipSourceRgs(rgsServerUrl: String) {try! rustCall() { + uniffi_ldk_node_fn_method_builder_set_gossip_source_rgs(self.uniffiClonePointer(), FfiConverterString.lower(rgsServerUrl),$0 ) } - } - public func setLiquiditySourceLsps2(address: SocketAddress, nodeId: PublicKey, token: String?) { - try! - rustCall() { +} - uniffi_ldk_node_fn_method_builder_set_liquidity_source_lsps2(self.uniffiClonePointer(), +open func setLiquiditySourceLsps2(address: SocketAddress, nodeId: PublicKey, token: String?) {try! rustCall() { + uniffi_ldk_node_fn_method_builder_set_liquidity_source_lsps2(self.uniffiClonePointer(), FfiConverterTypeSocketAddress.lower(address), FfiConverterTypePublicKey.lower(nodeId), FfiConverterOptionString.lower(token),$0 ) } - } - public func setListeningAddresses(listeningAddresses: [SocketAddress]) throws { - try - rustCallWithError(FfiConverterTypeBuildError.lift) { - uniffi_ldk_node_fn_method_builder_set_listening_addresses(self.uniffiClonePointer(), +} + +open func setListeningAddresses(listeningAddresses: [SocketAddress])throws {try rustCallWithError(FfiConverterTypeBuildError.lift) { + uniffi_ldk_node_fn_method_builder_set_listening_addresses(self.uniffiClonePointer(), FfiConverterSequenceTypeSocketAddress.lower(listeningAddresses),$0 ) } - } - public func setNetwork(network: Network) { - try! - rustCall() { +} - uniffi_ldk_node_fn_method_builder_set_network(self.uniffiClonePointer(), +open func setNetwork(network: Network) {try! rustCall() { + uniffi_ldk_node_fn_method_builder_set_network(self.uniffiClonePointer(), FfiConverterTypeNetwork.lower(network),$0 ) } - } - public func setStorageDirPath(storageDirPath: String) { - try! - rustCall() { +} - uniffi_ldk_node_fn_method_builder_set_storage_dir_path(self.uniffiClonePointer(), +open func setNodeAlias(nodeAlias: String)throws {try rustCallWithError(FfiConverterTypeBuildError.lift) { + uniffi_ldk_node_fn_method_builder_set_node_alias(self.uniffiClonePointer(), + FfiConverterString.lower(nodeAlias),$0 + ) +} +} + +open func setStorageDirPath(storageDirPath: String) {try! rustCall() { + uniffi_ldk_node_fn_method_builder_set_storage_dir_path(self.uniffiClonePointer(), FfiConverterString.lower(storageDirPath),$0 ) } - } +} + } @@ -995,6 +1153,8 @@ public struct FfiConverterTypeBuilder: FfiConverter { } + + public func FfiConverterTypeBuilder_lift(_ pointer: UnsafeMutableRawPointer) throws -> Builder { return try FfiConverterTypeBuilder.lift(pointer) } @@ -1006,299 +1166,90 @@ public func FfiConverterTypeBuilder_lower(_ value: Builder) -> UnsafeMutableRawP -public protocol ChannelConfigProtocol : AnyObject { - - func acceptUnderpayingHtlcs() -> Bool - - func cltvExpiryDelta() -> UInt16 - - func forceCloseAvoidanceMaxFeeSatoshis() -> UInt64 - - func forwardingFeeBaseMsat() -> UInt32 - - func forwardingFeeProportionalMillionths() -> UInt32 - - func setAcceptUnderpayingHtlcs(value: Bool) - - func setCltvExpiryDelta(value: UInt16) - - func setForceCloseAvoidanceMaxFeeSatoshis(valueSat: UInt64) +public protocol NetworkGraphProtocol : AnyObject { - func setForwardingFeeBaseMsat(feeMsat: UInt32) + func channel(shortChannelId: UInt64) -> ChannelInfo? - func setForwardingFeeProportionalMillionths(value: UInt32) + func listChannels() -> [UInt64] - func setMaxDustHtlcExposureFromFeeRateMultiplier(multiplier: UInt64) + func listNodes() -> [NodeId] - func setMaxDustHtlcExposureFromFixedLimit(limitMsat: UInt64) + func node(nodeId: NodeId) -> NodeInfo? } -public class ChannelConfig: - ChannelConfigProtocol { - fileprivate let pointer: UnsafeMutableRawPointer +open class NetworkGraph: + NetworkGraphProtocol { + fileprivate let pointer: UnsafeMutableRawPointer! + + /// Used to instantiate a [FFIObject] without an actual pointer, for fakes in tests, mostly. + public struct NoPointer { + public init() {} + } // TODO: We'd like this to be `private` but for Swifty reasons, // we can't implement `FfiConverter` without making this `required` and we can't // make it `required` without making it `public`. - required init(unsafeFromRawPointer pointer: UnsafeMutableRawPointer) { + required public init(unsafeFromRawPointer pointer: UnsafeMutableRawPointer) { self.pointer = pointer } - public func uniffiClonePointer() -> UnsafeMutableRawPointer { - return try! rustCall { uniffi_ldk_node_fn_clone_channelconfig(self.pointer, $0) } + /// This constructor can be used to instantiate a fake object. + /// - Parameter noPointer: Placeholder value so we can have a constructor separate from the default empty one that may be implemented for classes extending [FFIObject]. + /// + /// - Warning: + /// Any object instantiated with this constructor cannot be passed to an actual Rust-backed object. Since there isn't a backing [Pointer] the FFI lower functions will crash. + public init(noPointer: NoPointer) { + self.pointer = nil } - public convenience init() { - self.init(unsafeFromRawPointer: try! rustCall() { - uniffi_ldk_node_fn_constructor_channelconfig_new($0) -}) + + public func uniffiClonePointer() -> UnsafeMutableRawPointer { + return try! rustCall { uniffi_ldk_node_fn_clone_networkgraph(self.pointer, $0) } } + // No primary constructor declared for this class. deinit { - try! rustCall { uniffi_ldk_node_fn_free_channelconfig(pointer, $0) } + guard let pointer = pointer else { + return + } + + try! rustCall { uniffi_ldk_node_fn_free_networkgraph(pointer, $0) } } - - public func acceptUnderpayingHtlcs() -> Bool { - return try! FfiConverterBool.lift( - try! - rustCall() { - - uniffi_ldk_node_fn_method_channelconfig_accept_underpaying_htlcs(self.uniffiClonePointer(), $0 +open func channel(shortChannelId: UInt64) -> ChannelInfo? { + return try! FfiConverterOptionTypeChannelInfo.lift(try! rustCall() { + uniffi_ldk_node_fn_method_networkgraph_channel(self.uniffiClonePointer(), + FfiConverterUInt64.lower(shortChannelId),$0 ) +}) } - ) - } - public func cltvExpiryDelta() -> UInt16 { - return try! FfiConverterUInt16.lift( - try! - rustCall() { - uniffi_ldk_node_fn_method_channelconfig_cltv_expiry_delta(self.uniffiClonePointer(), $0 +open func listChannels() -> [UInt64] { + return try! FfiConverterSequenceUInt64.lift(try! rustCall() { + uniffi_ldk_node_fn_method_networkgraph_list_channels(self.uniffiClonePointer(),$0 ) +}) } - ) - } - public func forceCloseAvoidanceMaxFeeSatoshis() -> UInt64 { - return try! FfiConverterUInt64.lift( - try! - rustCall() { - uniffi_ldk_node_fn_method_channelconfig_force_close_avoidance_max_fee_satoshis(self.uniffiClonePointer(), $0 +open func listNodes() -> [NodeId] { + return try! FfiConverterSequenceTypeNodeId.lift(try! rustCall() { + uniffi_ldk_node_fn_method_networkgraph_list_nodes(self.uniffiClonePointer(),$0 ) +}) } - ) - } - public func forwardingFeeBaseMsat() -> UInt32 { - return try! FfiConverterUInt32.lift( - try! - rustCall() { - uniffi_ldk_node_fn_method_channelconfig_forwarding_fee_base_msat(self.uniffiClonePointer(), $0 +open func node(nodeId: NodeId) -> NodeInfo? { + return try! FfiConverterOptionTypeNodeInfo.lift(try! rustCall() { + uniffi_ldk_node_fn_method_networkgraph_node(self.uniffiClonePointer(), + FfiConverterTypeNodeId.lower(nodeId),$0 ) +}) } - ) - } - public func forwardingFeeProportionalMillionths() -> UInt32 { - return try! FfiConverterUInt32.lift( - try! - rustCall() { - uniffi_ldk_node_fn_method_channelconfig_forwarding_fee_proportional_millionths(self.uniffiClonePointer(), $0 - ) -} - ) - } - public func setAcceptUnderpayingHtlcs(value: Bool) { - try! - rustCall() { - - uniffi_ldk_node_fn_method_channelconfig_set_accept_underpaying_htlcs(self.uniffiClonePointer(), - FfiConverterBool.lower(value),$0 - ) -} - } - public func setCltvExpiryDelta(value: UInt16) { - try! - rustCall() { - - uniffi_ldk_node_fn_method_channelconfig_set_cltv_expiry_delta(self.uniffiClonePointer(), - FfiConverterUInt16.lower(value),$0 - ) -} - } - public func setForceCloseAvoidanceMaxFeeSatoshis(valueSat: UInt64) { - try! - rustCall() { - - uniffi_ldk_node_fn_method_channelconfig_set_force_close_avoidance_max_fee_satoshis(self.uniffiClonePointer(), - FfiConverterUInt64.lower(valueSat),$0 - ) -} - } - public func setForwardingFeeBaseMsat(feeMsat: UInt32) { - try! - rustCall() { - - uniffi_ldk_node_fn_method_channelconfig_set_forwarding_fee_base_msat(self.uniffiClonePointer(), - FfiConverterUInt32.lower(feeMsat),$0 - ) -} - } - public func setForwardingFeeProportionalMillionths(value: UInt32) { - try! - rustCall() { - - uniffi_ldk_node_fn_method_channelconfig_set_forwarding_fee_proportional_millionths(self.uniffiClonePointer(), - FfiConverterUInt32.lower(value),$0 - ) -} - } - public func setMaxDustHtlcExposureFromFeeRateMultiplier(multiplier: UInt64) { - try! - rustCall() { - - uniffi_ldk_node_fn_method_channelconfig_set_max_dust_htlc_exposure_from_fee_rate_multiplier(self.uniffiClonePointer(), - FfiConverterUInt64.lower(multiplier),$0 - ) -} - } - public func setMaxDustHtlcExposureFromFixedLimit(limitMsat: UInt64) { - try! - rustCall() { - - uniffi_ldk_node_fn_method_channelconfig_set_max_dust_htlc_exposure_from_fixed_limit(self.uniffiClonePointer(), - FfiConverterUInt64.lower(limitMsat),$0 - ) -} - } - -} - -public struct FfiConverterTypeChannelConfig: FfiConverter { - - typealias FfiType = UnsafeMutableRawPointer - typealias SwiftType = ChannelConfig - - public static func lift(_ pointer: UnsafeMutableRawPointer) throws -> ChannelConfig { - return ChannelConfig(unsafeFromRawPointer: pointer) - } - - public static func lower(_ value: ChannelConfig) -> UnsafeMutableRawPointer { - return value.uniffiClonePointer() - } - - public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> ChannelConfig { - let v: UInt64 = try readInt(&buf) - // The Rust code won't compile if a pointer won't fit in a UInt64. - // We have to go via `UInt` because that's the thing that's the size of a pointer. - let ptr = UnsafeMutableRawPointer(bitPattern: UInt(truncatingIfNeeded: v)) - if (ptr == nil) { - throw UniffiInternalError.unexpectedNullPointer - } - return try lift(ptr!) - } - - public static func write(_ value: ChannelConfig, into buf: inout [UInt8]) { - // This fiddling is because `Int` is the thing that's the same size as a pointer. - // The Rust code won't compile if a pointer won't fit in a `UInt64`. - writeInt(&buf, UInt64(bitPattern: Int64(Int(bitPattern: lower(value))))) - } -} - - -public func FfiConverterTypeChannelConfig_lift(_ pointer: UnsafeMutableRawPointer) throws -> ChannelConfig { - return try FfiConverterTypeChannelConfig.lift(pointer) -} - -public func FfiConverterTypeChannelConfig_lower(_ value: ChannelConfig) -> UnsafeMutableRawPointer { - return FfiConverterTypeChannelConfig.lower(value) -} - - - - -public protocol NetworkGraphProtocol : AnyObject { - - func channel(shortChannelId: UInt64) -> ChannelInfo? - - func listChannels() -> [UInt64] - - func listNodes() -> [NodeId] - - func node(nodeId: NodeId) -> NodeInfo? - -} - -public class NetworkGraph: - NetworkGraphProtocol { - fileprivate let pointer: UnsafeMutableRawPointer - - // TODO: We'd like this to be `private` but for Swifty reasons, - // we can't implement `FfiConverter` without making this `required` and we can't - // make it `required` without making it `public`. - required init(unsafeFromRawPointer pointer: UnsafeMutableRawPointer) { - self.pointer = pointer - } - - public func uniffiClonePointer() -> UnsafeMutableRawPointer { - return try! rustCall { uniffi_ldk_node_fn_clone_networkgraph(self.pointer, $0) } - } - - deinit { - try! rustCall { uniffi_ldk_node_fn_free_networkgraph(pointer, $0) } - } - - - - - - public func channel(shortChannelId: UInt64) -> ChannelInfo? { - return try! FfiConverterOptionTypeChannelInfo.lift( - try! - rustCall() { - - uniffi_ldk_node_fn_method_networkgraph_channel(self.uniffiClonePointer(), - FfiConverterUInt64.lower(shortChannelId),$0 - ) -} - ) - } - public func listChannels() -> [UInt64] { - return try! FfiConverterSequenceUInt64.lift( - try! - rustCall() { - - uniffi_ldk_node_fn_method_networkgraph_list_channels(self.uniffiClonePointer(), $0 - ) -} - ) - } - public func listNodes() -> [NodeId] { - return try! FfiConverterSequenceTypeNodeId.lift( - try! - rustCall() { - - uniffi_ldk_node_fn_method_networkgraph_list_nodes(self.uniffiClonePointer(), $0 - ) -} - ) - } - public func node(nodeId: NodeId) -> NodeInfo? { - return try! FfiConverterOptionTypeNodeInfo.lift( - try! - rustCall() { - - uniffi_ldk_node_fn_method_networkgraph_node(self.uniffiClonePointer(), - FfiConverterTypeNodeId.lower(nodeId),$0 - ) -} - ) - } - + } public struct FfiConverterTypeNetworkGraph: FfiConverter { @@ -1333,6 +1284,8 @@ public struct FfiConverterTypeNetworkGraph: FfiConverter { } + + public func FfiConverterTypeNetworkGraph_lift(_ pointer: UnsafeMutableRawPointer) throws -> NetworkGraph { return try FfiConverterTypeNetworkGraph.lift(pointer) } @@ -1356,13 +1309,11 @@ public protocol NodeProtocol : AnyObject { func connect(nodeId: PublicKey, address: SocketAddress, persist: Bool) throws - func connectOpenChannel(nodeId: PublicKey, address: SocketAddress, channelAmountSats: UInt64, pushToCounterpartyMsat: UInt64?, channelConfig: ChannelConfig?, announceChannel: Bool) throws -> UserChannelId - func disconnect(nodeId: PublicKey) throws func eventHandled() - func forceCloseChannel(userChannelId: UserChannelId, counterpartyNodeId: PublicKey) throws + func forceCloseChannel(userChannelId: UserChannelId, counterpartyNodeId: PublicKey, reason: String?) throws func listBalances() -> BalanceDetails @@ -1380,15 +1331,21 @@ public protocol NodeProtocol : AnyObject { func nextEventAsync() async -> Event + func nodeAlias() -> NodeAlias? + func nodeId() -> PublicKey func onchainPayment() -> OnchainPayment + func openAnnouncedChannel(nodeId: PublicKey, address: SocketAddress, channelAmountSats: UInt64, pushToCounterpartyMsat: UInt64?, channelConfig: ChannelConfig?) throws -> UserChannelId + + func openChannel(nodeId: PublicKey, address: SocketAddress, channelAmountSats: UInt64, pushToCounterpartyMsat: UInt64?, channelConfig: ChannelConfig?) throws -> UserChannelId + func payment(paymentId: PaymentId) -> PaymentDetails? func removePayment(paymentId: PaymentId) throws - func signMessage(msg: [UInt8]) throws -> String + func signMessage(msg: [UInt8]) -> String func spontaneousPayment() -> SpontaneousPayment @@ -1400,6 +1357,8 @@ public protocol NodeProtocol : AnyObject { func syncWallets() throws + func unifiedQrPayment() -> UnifiedQrPayment + func updateChannelConfig(userChannelId: UserChannelId, counterpartyNodeId: PublicKey, channelConfig: ChannelConfig) throws func verifySignature(msg: [UInt8], sig: String, pkey: PublicKey) -> Bool @@ -1408,193 +1367,163 @@ public protocol NodeProtocol : AnyObject { } -public class Node: +open class Node: NodeProtocol { - fileprivate let pointer: UnsafeMutableRawPointer + fileprivate let pointer: UnsafeMutableRawPointer! + + /// Used to instantiate a [FFIObject] without an actual pointer, for fakes in tests, mostly. + public struct NoPointer { + public init() {} + } // TODO: We'd like this to be `private` but for Swifty reasons, // we can't implement `FfiConverter` without making this `required` and we can't // make it `required` without making it `public`. - required init(unsafeFromRawPointer pointer: UnsafeMutableRawPointer) { + required public init(unsafeFromRawPointer pointer: UnsafeMutableRawPointer) { self.pointer = pointer } + /// This constructor can be used to instantiate a fake object. + /// - Parameter noPointer: Placeholder value so we can have a constructor separate from the default empty one that may be implemented for classes extending [FFIObject]. + /// + /// - Warning: + /// Any object instantiated with this constructor cannot be passed to an actual Rust-backed object. Since there isn't a backing [Pointer] the FFI lower functions will crash. + public init(noPointer: NoPointer) { + self.pointer = nil + } + public func uniffiClonePointer() -> UnsafeMutableRawPointer { return try! rustCall { uniffi_ldk_node_fn_clone_node(self.pointer, $0) } } + // No primary constructor declared for this class. deinit { + guard let pointer = pointer else { + return + } + try! rustCall { uniffi_ldk_node_fn_free_node(pointer, $0) } } - - public func bolt11Payment() -> Bolt11Payment { - return try! FfiConverterTypeBolt11Payment.lift( - try! - rustCall() { - - uniffi_ldk_node_fn_method_node_bolt11_payment(self.uniffiClonePointer(), $0 +open func bolt11Payment() -> Bolt11Payment { + return try! FfiConverterTypeBolt11Payment.lift(try! rustCall() { + uniffi_ldk_node_fn_method_node_bolt11_payment(self.uniffiClonePointer(),$0 ) +}) } - ) - } - public func bolt12Payment() -> Bolt12Payment { - return try! FfiConverterTypeBolt12Payment.lift( - try! - rustCall() { - uniffi_ldk_node_fn_method_node_bolt12_payment(self.uniffiClonePointer(), $0 +open func bolt12Payment() -> Bolt12Payment { + return try! FfiConverterTypeBolt12Payment.lift(try! rustCall() { + uniffi_ldk_node_fn_method_node_bolt12_payment(self.uniffiClonePointer(),$0 ) +}) } - ) - } - public func closeChannel(userChannelId: UserChannelId, counterpartyNodeId: PublicKey) throws { - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_node_close_channel(self.uniffiClonePointer(), + +open func closeChannel(userChannelId: UserChannelId, counterpartyNodeId: PublicKey)throws {try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_node_close_channel(self.uniffiClonePointer(), FfiConverterTypeUserChannelId.lower(userChannelId), FfiConverterTypePublicKey.lower(counterpartyNodeId),$0 ) } - } - public func config() -> Config { - return try! FfiConverterTypeConfig.lift( - try! - rustCall() { +} - uniffi_ldk_node_fn_method_node_config(self.uniffiClonePointer(), $0 +open func config() -> Config { + return try! FfiConverterTypeConfig.lift(try! rustCall() { + uniffi_ldk_node_fn_method_node_config(self.uniffiClonePointer(),$0 ) +}) } - ) - } - public func connect(nodeId: PublicKey, address: SocketAddress, persist: Bool) throws { - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_node_connect(self.uniffiClonePointer(), + +open func connect(nodeId: PublicKey, address: SocketAddress, persist: Bool)throws {try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_node_connect(self.uniffiClonePointer(), FfiConverterTypePublicKey.lower(nodeId), FfiConverterTypeSocketAddress.lower(address), FfiConverterBool.lower(persist),$0 ) } - } - public func connectOpenChannel(nodeId: PublicKey, address: SocketAddress, channelAmountSats: UInt64, pushToCounterpartyMsat: UInt64?, channelConfig: ChannelConfig?, announceChannel: Bool) throws -> UserChannelId { - return try FfiConverterTypeUserChannelId.lift( - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_node_connect_open_channel(self.uniffiClonePointer(), - FfiConverterTypePublicKey.lower(nodeId), - FfiConverterTypeSocketAddress.lower(address), - FfiConverterUInt64.lower(channelAmountSats), - FfiConverterOptionUInt64.lower(pushToCounterpartyMsat), - FfiConverterOptionTypeChannelConfig.lower(channelConfig), - FfiConverterBool.lower(announceChannel),$0 - ) } - ) - } - public func disconnect(nodeId: PublicKey) throws { - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_node_disconnect(self.uniffiClonePointer(), + +open func disconnect(nodeId: PublicKey)throws {try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_node_disconnect(self.uniffiClonePointer(), FfiConverterTypePublicKey.lower(nodeId),$0 ) } - } - public func eventHandled() { - try! - rustCall() { +} - uniffi_ldk_node_fn_method_node_event_handled(self.uniffiClonePointer(), $0 +open func eventHandled() {try! rustCall() { + uniffi_ldk_node_fn_method_node_event_handled(self.uniffiClonePointer(),$0 ) } - } - public func forceCloseChannel(userChannelId: UserChannelId, counterpartyNodeId: PublicKey) throws { - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_node_force_close_channel(self.uniffiClonePointer(), +} + +open func forceCloseChannel(userChannelId: UserChannelId, counterpartyNodeId: PublicKey, reason: String?)throws {try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_node_force_close_channel(self.uniffiClonePointer(), FfiConverterTypeUserChannelId.lower(userChannelId), - FfiConverterTypePublicKey.lower(counterpartyNodeId),$0 + FfiConverterTypePublicKey.lower(counterpartyNodeId), + FfiConverterOptionString.lower(reason),$0 ) } - } - public func listBalances() -> BalanceDetails { - return try! FfiConverterTypeBalanceDetails.lift( - try! - rustCall() { +} - uniffi_ldk_node_fn_method_node_list_balances(self.uniffiClonePointer(), $0 +open func listBalances() -> BalanceDetails { + return try! FfiConverterTypeBalanceDetails.lift(try! rustCall() { + uniffi_ldk_node_fn_method_node_list_balances(self.uniffiClonePointer(),$0 ) +}) } - ) - } - public func listChannels() -> [ChannelDetails] { - return try! FfiConverterSequenceTypeChannelDetails.lift( - try! - rustCall() { - uniffi_ldk_node_fn_method_node_list_channels(self.uniffiClonePointer(), $0 +open func listChannels() -> [ChannelDetails] { + return try! FfiConverterSequenceTypeChannelDetails.lift(try! rustCall() { + uniffi_ldk_node_fn_method_node_list_channels(self.uniffiClonePointer(),$0 ) +}) } - ) - } - public func listPayments() -> [PaymentDetails] { - return try! FfiConverterSequenceTypePaymentDetails.lift( - try! - rustCall() { - uniffi_ldk_node_fn_method_node_list_payments(self.uniffiClonePointer(), $0 +open func listPayments() -> [PaymentDetails] { + return try! FfiConverterSequenceTypePaymentDetails.lift(try! rustCall() { + uniffi_ldk_node_fn_method_node_list_payments(self.uniffiClonePointer(),$0 ) +}) } - ) - } - public func listPeers() -> [PeerDetails] { - return try! FfiConverterSequenceTypePeerDetails.lift( - try! - rustCall() { - uniffi_ldk_node_fn_method_node_list_peers(self.uniffiClonePointer(), $0 +open func listPeers() -> [PeerDetails] { + return try! FfiConverterSequenceTypePeerDetails.lift(try! rustCall() { + uniffi_ldk_node_fn_method_node_list_peers(self.uniffiClonePointer(),$0 ) +}) } - ) - } - public func listeningAddresses() -> [SocketAddress]? { - return try! FfiConverterOptionSequenceTypeSocketAddress.lift( - try! - rustCall() { - uniffi_ldk_node_fn_method_node_listening_addresses(self.uniffiClonePointer(), $0 +open func listeningAddresses() -> [SocketAddress]? { + return try! FfiConverterOptionSequenceTypeSocketAddress.lift(try! rustCall() { + uniffi_ldk_node_fn_method_node_listening_addresses(self.uniffiClonePointer(),$0 ) +}) } - ) - } - public func networkGraph() -> NetworkGraph { - return try! FfiConverterTypeNetworkGraph.lift( - try! - rustCall() { - uniffi_ldk_node_fn_method_node_network_graph(self.uniffiClonePointer(), $0 +open func networkGraph() -> NetworkGraph { + return try! FfiConverterTypeNetworkGraph.lift(try! rustCall() { + uniffi_ldk_node_fn_method_node_network_graph(self.uniffiClonePointer(),$0 ) +}) } - ) - } - public func nextEvent() -> Event? { - return try! FfiConverterOptionTypeEvent.lift( - try! - rustCall() { - uniffi_ldk_node_fn_method_node_next_event(self.uniffiClonePointer(), $0 +open func nextEvent() -> Event? { + return try! FfiConverterOptionTypeEvent.lift(try! rustCall() { + uniffi_ldk_node_fn_method_node_next_event(self.uniffiClonePointer(),$0 ) +}) } - ) - } - public func nextEventAsync() async -> Event { - return try! await uniffiRustCallAsync( + +open func nextEventAsync()async -> Event { + return + try! await uniffiRustCallAsync( rustFutureFunc: { uniffi_ldk_node_fn_method_node_next_event_async( self.uniffiClonePointer() + ) }, pollFunc: ffi_ldk_node_rust_future_poll_rust_buffer, @@ -1604,132 +1533,141 @@ public class Node: errorHandler: nil ) - } - +} - public func nodeId() -> PublicKey { - return try! FfiConverterTypePublicKey.lift( - try! - rustCall() { +open func nodeAlias() -> NodeAlias? { + return try! FfiConverterOptionTypeNodeAlias.lift(try! rustCall() { + uniffi_ldk_node_fn_method_node_node_alias(self.uniffiClonePointer(),$0 + ) +}) +} - uniffi_ldk_node_fn_method_node_node_id(self.uniffiClonePointer(), $0 +open func nodeId() -> PublicKey { + return try! FfiConverterTypePublicKey.lift(try! rustCall() { + uniffi_ldk_node_fn_method_node_node_id(self.uniffiClonePointer(),$0 ) +}) } - ) - } - public func onchainPayment() -> OnchainPayment { - return try! FfiConverterTypeOnchainPayment.lift( - try! - rustCall() { - uniffi_ldk_node_fn_method_node_onchain_payment(self.uniffiClonePointer(), $0 +open func onchainPayment() -> OnchainPayment { + return try! FfiConverterTypeOnchainPayment.lift(try! rustCall() { + uniffi_ldk_node_fn_method_node_onchain_payment(self.uniffiClonePointer(),$0 ) +}) +} + +open func openAnnouncedChannel(nodeId: PublicKey, address: SocketAddress, channelAmountSats: UInt64, pushToCounterpartyMsat: UInt64?, channelConfig: ChannelConfig?)throws -> UserChannelId { + return try FfiConverterTypeUserChannelId.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_node_open_announced_channel(self.uniffiClonePointer(), + FfiConverterTypePublicKey.lower(nodeId), + FfiConverterTypeSocketAddress.lower(address), + FfiConverterUInt64.lower(channelAmountSats), + FfiConverterOptionUInt64.lower(pushToCounterpartyMsat), + FfiConverterOptionTypeChannelConfig.lower(channelConfig),$0 + ) +}) +} + +open func openChannel(nodeId: PublicKey, address: SocketAddress, channelAmountSats: UInt64, pushToCounterpartyMsat: UInt64?, channelConfig: ChannelConfig?)throws -> UserChannelId { + return try FfiConverterTypeUserChannelId.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_node_open_channel(self.uniffiClonePointer(), + FfiConverterTypePublicKey.lower(nodeId), + FfiConverterTypeSocketAddress.lower(address), + FfiConverterUInt64.lower(channelAmountSats), + FfiConverterOptionUInt64.lower(pushToCounterpartyMsat), + FfiConverterOptionTypeChannelConfig.lower(channelConfig),$0 + ) +}) } - ) - } - public func payment(paymentId: PaymentId) -> PaymentDetails? { - return try! FfiConverterOptionTypePaymentDetails.lift( - try! - rustCall() { - uniffi_ldk_node_fn_method_node_payment(self.uniffiClonePointer(), +open func payment(paymentId: PaymentId) -> PaymentDetails? { + return try! FfiConverterOptionTypePaymentDetails.lift(try! rustCall() { + uniffi_ldk_node_fn_method_node_payment(self.uniffiClonePointer(), FfiConverterTypePaymentId.lower(paymentId),$0 ) +}) } - ) - } - public func removePayment(paymentId: PaymentId) throws { - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_node_remove_payment(self.uniffiClonePointer(), + +open func removePayment(paymentId: PaymentId)throws {try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_node_remove_payment(self.uniffiClonePointer(), FfiConverterTypePaymentId.lower(paymentId),$0 ) } - } - public func signMessage(msg: [UInt8]) throws -> String { - return try FfiConverterString.lift( - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_node_sign_message(self.uniffiClonePointer(), +} + +open func signMessage(msg: [UInt8]) -> String { + return try! FfiConverterString.lift(try! rustCall() { + uniffi_ldk_node_fn_method_node_sign_message(self.uniffiClonePointer(), FfiConverterSequenceUInt8.lower(msg),$0 ) +}) +} + +open func spontaneousPayment() -> SpontaneousPayment { + return try! FfiConverterTypeSpontaneousPayment.lift(try! rustCall() { + uniffi_ldk_node_fn_method_node_spontaneous_payment(self.uniffiClonePointer(),$0 + ) +}) } - ) - } - public func spontaneousPayment() -> SpontaneousPayment { - return try! FfiConverterTypeSpontaneousPayment.lift( - try! - rustCall() { - uniffi_ldk_node_fn_method_node_spontaneous_payment(self.uniffiClonePointer(), $0 +open func start()throws {try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_node_start(self.uniffiClonePointer(),$0 ) } - ) - } - public func start() throws { - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_node_start(self.uniffiClonePointer(), $0 +} + +open func status() -> NodeStatus { + return try! FfiConverterTypeNodeStatus.lift(try! rustCall() { + uniffi_ldk_node_fn_method_node_status(self.uniffiClonePointer(),$0 ) +}) } - } - public func status() -> NodeStatus { - return try! FfiConverterTypeNodeStatus.lift( - try! - rustCall() { - uniffi_ldk_node_fn_method_node_status(self.uniffiClonePointer(), $0 +open func stop()throws {try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_node_stop(self.uniffiClonePointer(),$0 ) } - ) - } - public func stop() throws { - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_node_stop(self.uniffiClonePointer(), $0 +} + +open func syncWallets()throws {try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_node_sync_wallets(self.uniffiClonePointer(),$0 ) } - } - public func syncWallets() throws { - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_node_sync_wallets(self.uniffiClonePointer(), $0 +} + +open func unifiedQrPayment() -> UnifiedQrPayment { + return try! FfiConverterTypeUnifiedQrPayment.lift(try! rustCall() { + uniffi_ldk_node_fn_method_node_unified_qr_payment(self.uniffiClonePointer(),$0 ) +}) } - } - public func updateChannelConfig(userChannelId: UserChannelId, counterpartyNodeId: PublicKey, channelConfig: ChannelConfig) throws { - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_node_update_channel_config(self.uniffiClonePointer(), + +open func updateChannelConfig(userChannelId: UserChannelId, counterpartyNodeId: PublicKey, channelConfig: ChannelConfig)throws {try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_node_update_channel_config(self.uniffiClonePointer(), FfiConverterTypeUserChannelId.lower(userChannelId), FfiConverterTypePublicKey.lower(counterpartyNodeId), FfiConverterTypeChannelConfig.lower(channelConfig),$0 ) } - } - public func verifySignature(msg: [UInt8], sig: String, pkey: PublicKey) -> Bool { - return try! FfiConverterBool.lift( - try! - rustCall() { +} - uniffi_ldk_node_fn_method_node_verify_signature(self.uniffiClonePointer(), +open func verifySignature(msg: [UInt8], sig: String, pkey: PublicKey) -> Bool { + return try! FfiConverterBool.lift(try! rustCall() { + uniffi_ldk_node_fn_method_node_verify_signature(self.uniffiClonePointer(), FfiConverterSequenceUInt8.lower(msg), FfiConverterString.lower(sig), FfiConverterTypePublicKey.lower(pkey),$0 ) +}) } - ) - } - public func waitNextEvent() -> Event { - return try! FfiConverterTypeEvent.lift( - try! - rustCall() { - uniffi_ldk_node_fn_method_node_wait_next_event(self.uniffiClonePointer(), $0 +open func waitNextEvent() -> Event { + return try! FfiConverterTypeEvent.lift(try! rustCall() { + uniffi_ldk_node_fn_method_node_wait_next_event(self.uniffiClonePointer(),$0 ) +}) } - ) - } + } @@ -1765,6 +1703,8 @@ public struct FfiConverterTypeNode: FfiConverter { } + + public func FfiConverterTypeNode_lift(_ pointer: UnsafeMutableRawPointer) throws -> Node { return try FfiConverterTypeNode.lift(pointer) } @@ -1782,63 +1722,75 @@ public protocol OnchainPaymentProtocol : AnyObject { func sendAllToAddress(address: Address) throws -> Txid - func sendToAddress(address: Address, amountMsat: UInt64) throws -> Txid + func sendToAddress(address: Address, amountSats: UInt64) throws -> Txid } -public class OnchainPayment: +open class OnchainPayment: OnchainPaymentProtocol { - fileprivate let pointer: UnsafeMutableRawPointer + fileprivate let pointer: UnsafeMutableRawPointer! + + /// Used to instantiate a [FFIObject] without an actual pointer, for fakes in tests, mostly. + public struct NoPointer { + public init() {} + } // TODO: We'd like this to be `private` but for Swifty reasons, // we can't implement `FfiConverter` without making this `required` and we can't // make it `required` without making it `public`. - required init(unsafeFromRawPointer pointer: UnsafeMutableRawPointer) { + required public init(unsafeFromRawPointer pointer: UnsafeMutableRawPointer) { self.pointer = pointer } + /// This constructor can be used to instantiate a fake object. + /// - Parameter noPointer: Placeholder value so we can have a constructor separate from the default empty one that may be implemented for classes extending [FFIObject]. + /// + /// - Warning: + /// Any object instantiated with this constructor cannot be passed to an actual Rust-backed object. Since there isn't a backing [Pointer] the FFI lower functions will crash. + public init(noPointer: NoPointer) { + self.pointer = nil + } + public func uniffiClonePointer() -> UnsafeMutableRawPointer { return try! rustCall { uniffi_ldk_node_fn_clone_onchainpayment(self.pointer, $0) } } + // No primary constructor declared for this class. deinit { + guard let pointer = pointer else { + return + } + try! rustCall { uniffi_ldk_node_fn_free_onchainpayment(pointer, $0) } } - - public func newAddress() throws -> Address { - return try FfiConverterTypeAddress.lift( - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_onchainpayment_new_address(self.uniffiClonePointer(), $0 +open func newAddress()throws -> Address { + return try FfiConverterTypeAddress.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_onchainpayment_new_address(self.uniffiClonePointer(),$0 ) +}) } - ) - } - public func sendAllToAddress(address: Address) throws -> Txid { - return try FfiConverterTypeTxid.lift( - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_onchainpayment_send_all_to_address(self.uniffiClonePointer(), + +open func sendAllToAddress(address: Address)throws -> Txid { + return try FfiConverterTypeTxid.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_onchainpayment_send_all_to_address(self.uniffiClonePointer(), FfiConverterTypeAddress.lower(address),$0 ) +}) } - ) - } - public func sendToAddress(address: Address, amountMsat: UInt64) throws -> Txid { - return try FfiConverterTypeTxid.lift( - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_onchainpayment_send_to_address(self.uniffiClonePointer(), + +open func sendToAddress(address: Address, amountSats: UInt64)throws -> Txid { + return try FfiConverterTypeTxid.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_onchainpayment_send_to_address(self.uniffiClonePointer(), FfiConverterTypeAddress.lower(address), - FfiConverterUInt64.lower(amountMsat),$0 + FfiConverterUInt64.lower(amountSats),$0 ) +}) } - ) - } + } @@ -1874,6 +1826,8 @@ public struct FfiConverterTypeOnchainPayment: FfiConverter { } + + public func FfiConverterTypeOnchainPayment_lift(_ pointer: UnsafeMutableRawPointer) throws -> OnchainPayment { return try FfiConverterTypeOnchainPayment.lift(pointer) } @@ -1887,55 +1841,71 @@ public func FfiConverterTypeOnchainPayment_lower(_ value: OnchainPayment) -> Uns public protocol SpontaneousPaymentProtocol : AnyObject { - func send(amountMsat: UInt64, nodeId: PublicKey) throws -> PaymentId + func send(amountMsat: UInt64, nodeId: PublicKey, sendingParameters: SendingParameters?) throws -> PaymentId func sendProbes(amountMsat: UInt64, nodeId: PublicKey) throws } -public class SpontaneousPayment: +open class SpontaneousPayment: SpontaneousPaymentProtocol { - fileprivate let pointer: UnsafeMutableRawPointer + fileprivate let pointer: UnsafeMutableRawPointer! + + /// Used to instantiate a [FFIObject] without an actual pointer, for fakes in tests, mostly. + public struct NoPointer { + public init() {} + } // TODO: We'd like this to be `private` but for Swifty reasons, // we can't implement `FfiConverter` without making this `required` and we can't // make it `required` without making it `public`. - required init(unsafeFromRawPointer pointer: UnsafeMutableRawPointer) { + required public init(unsafeFromRawPointer pointer: UnsafeMutableRawPointer) { self.pointer = pointer } + /// This constructor can be used to instantiate a fake object. + /// - Parameter noPointer: Placeholder value so we can have a constructor separate from the default empty one that may be implemented for classes extending [FFIObject]. + /// + /// - Warning: + /// Any object instantiated with this constructor cannot be passed to an actual Rust-backed object. Since there isn't a backing [Pointer] the FFI lower functions will crash. + public init(noPointer: NoPointer) { + self.pointer = nil + } + public func uniffiClonePointer() -> UnsafeMutableRawPointer { return try! rustCall { uniffi_ldk_node_fn_clone_spontaneouspayment(self.pointer, $0) } } + // No primary constructor declared for this class. deinit { + guard let pointer = pointer else { + return + } + try! rustCall { uniffi_ldk_node_fn_free_spontaneouspayment(pointer, $0) } } - - public func send(amountMsat: UInt64, nodeId: PublicKey) throws -> PaymentId { - return try FfiConverterTypePaymentId.lift( - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_spontaneouspayment_send(self.uniffiClonePointer(), +open func send(amountMsat: UInt64, nodeId: PublicKey, sendingParameters: SendingParameters?)throws -> PaymentId { + return try FfiConverterTypePaymentId.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_spontaneouspayment_send(self.uniffiClonePointer(), FfiConverterUInt64.lower(amountMsat), - FfiConverterTypePublicKey.lower(nodeId),$0 + FfiConverterTypePublicKey.lower(nodeId), + FfiConverterOptionTypeSendingParameters.lower(sendingParameters),$0 ) +}) } - ) - } - public func sendProbes(amountMsat: UInt64, nodeId: PublicKey) throws { - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_spontaneouspayment_send_probes(self.uniffiClonePointer(), + +open func sendProbes(amountMsat: UInt64, nodeId: PublicKey)throws {try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_spontaneouspayment_send_probes(self.uniffiClonePointer(), FfiConverterUInt64.lower(amountMsat), FfiConverterTypePublicKey.lower(nodeId),$0 ) } - } +} + } @@ -1971,12 +1941,241 @@ public struct FfiConverterTypeSpontaneousPayment: FfiConverter { } -public func FfiConverterTypeSpontaneousPayment_lift(_ pointer: UnsafeMutableRawPointer) throws -> SpontaneousPayment { - return try FfiConverterTypeSpontaneousPayment.lift(pointer) + + +public func FfiConverterTypeSpontaneousPayment_lift(_ pointer: UnsafeMutableRawPointer) throws -> SpontaneousPayment { + return try FfiConverterTypeSpontaneousPayment.lift(pointer) +} + +public func FfiConverterTypeSpontaneousPayment_lower(_ value: SpontaneousPayment) -> UnsafeMutableRawPointer { + return FfiConverterTypeSpontaneousPayment.lower(value) +} + + + + +public protocol UnifiedQrPaymentProtocol : AnyObject { + + func receive(amountSats: UInt64, message: String, expirySec: UInt32) throws -> String + + func send(uriStr: String) throws -> QrPaymentResult + +} + +open class UnifiedQrPayment: + UnifiedQrPaymentProtocol { + fileprivate let pointer: UnsafeMutableRawPointer! + + /// Used to instantiate a [FFIObject] without an actual pointer, for fakes in tests, mostly. + public struct NoPointer { + public init() {} + } + + // TODO: We'd like this to be `private` but for Swifty reasons, + // we can't implement `FfiConverter` without making this `required` and we can't + // make it `required` without making it `public`. + required public init(unsafeFromRawPointer pointer: UnsafeMutableRawPointer) { + self.pointer = pointer + } + + /// This constructor can be used to instantiate a fake object. + /// - Parameter noPointer: Placeholder value so we can have a constructor separate from the default empty one that may be implemented for classes extending [FFIObject]. + /// + /// - Warning: + /// Any object instantiated with this constructor cannot be passed to an actual Rust-backed object. Since there isn't a backing [Pointer] the FFI lower functions will crash. + public init(noPointer: NoPointer) { + self.pointer = nil + } + + public func uniffiClonePointer() -> UnsafeMutableRawPointer { + return try! rustCall { uniffi_ldk_node_fn_clone_unifiedqrpayment(self.pointer, $0) } + } + // No primary constructor declared for this class. + + deinit { + guard let pointer = pointer else { + return + } + + try! rustCall { uniffi_ldk_node_fn_free_unifiedqrpayment(pointer, $0) } + } + + + + +open func receive(amountSats: UInt64, message: String, expirySec: UInt32)throws -> String { + return try FfiConverterString.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_unifiedqrpayment_receive(self.uniffiClonePointer(), + FfiConverterUInt64.lower(amountSats), + FfiConverterString.lower(message), + FfiConverterUInt32.lower(expirySec),$0 + ) +}) +} + +open func send(uriStr: String)throws -> QrPaymentResult { + return try FfiConverterTypeQrPaymentResult.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_unifiedqrpayment_send(self.uniffiClonePointer(), + FfiConverterString.lower(uriStr),$0 + ) +}) +} + + +} + +public struct FfiConverterTypeUnifiedQrPayment: FfiConverter { + + typealias FfiType = UnsafeMutableRawPointer + typealias SwiftType = UnifiedQrPayment + + public static func lift(_ pointer: UnsafeMutableRawPointer) throws -> UnifiedQrPayment { + return UnifiedQrPayment(unsafeFromRawPointer: pointer) + } + + public static func lower(_ value: UnifiedQrPayment) -> UnsafeMutableRawPointer { + return value.uniffiClonePointer() + } + + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> UnifiedQrPayment { + let v: UInt64 = try readInt(&buf) + // The Rust code won't compile if a pointer won't fit in a UInt64. + // We have to go via `UInt` because that's the thing that's the size of a pointer. + let ptr = UnsafeMutableRawPointer(bitPattern: UInt(truncatingIfNeeded: v)) + if (ptr == nil) { + throw UniffiInternalError.unexpectedNullPointer + } + return try lift(ptr!) + } + + public static func write(_ value: UnifiedQrPayment, into buf: inout [UInt8]) { + // This fiddling is because `Int` is the thing that's the same size as a pointer. + // The Rust code won't compile if a pointer won't fit in a `UInt64`. + writeInt(&buf, UInt64(bitPattern: Int64(Int(bitPattern: lower(value))))) + } +} + + + + +public func FfiConverterTypeUnifiedQrPayment_lift(_ pointer: UnsafeMutableRawPointer) throws -> UnifiedQrPayment { + return try FfiConverterTypeUnifiedQrPayment.lift(pointer) +} + +public func FfiConverterTypeUnifiedQrPayment_lower(_ value: UnifiedQrPayment) -> UnsafeMutableRawPointer { + return FfiConverterTypeUnifiedQrPayment.lower(value) +} + + + + +public protocol VssHeaderProviderProtocol : AnyObject { + + func getHeaders(request: [UInt8]) async throws -> [String: String] + +} + +open class VssHeaderProvider: + VssHeaderProviderProtocol { + fileprivate let pointer: UnsafeMutableRawPointer! + + /// Used to instantiate a [FFIObject] without an actual pointer, for fakes in tests, mostly. + public struct NoPointer { + public init() {} + } + + // TODO: We'd like this to be `private` but for Swifty reasons, + // we can't implement `FfiConverter` without making this `required` and we can't + // make it `required` without making it `public`. + required public init(unsafeFromRawPointer pointer: UnsafeMutableRawPointer) { + self.pointer = pointer + } + + /// This constructor can be used to instantiate a fake object. + /// - Parameter noPointer: Placeholder value so we can have a constructor separate from the default empty one that may be implemented for classes extending [FFIObject]. + /// + /// - Warning: + /// Any object instantiated with this constructor cannot be passed to an actual Rust-backed object. Since there isn't a backing [Pointer] the FFI lower functions will crash. + public init(noPointer: NoPointer) { + self.pointer = nil + } + + public func uniffiClonePointer() -> UnsafeMutableRawPointer { + return try! rustCall { uniffi_ldk_node_fn_clone_vssheaderprovider(self.pointer, $0) } + } + // No primary constructor declared for this class. + + deinit { + guard let pointer = pointer else { + return + } + + try! rustCall { uniffi_ldk_node_fn_free_vssheaderprovider(pointer, $0) } + } + + + + +open func getHeaders(request: [UInt8])async throws -> [String: String] { + return + try await uniffiRustCallAsync( + rustFutureFunc: { + uniffi_ldk_node_fn_method_vssheaderprovider_get_headers( + self.uniffiClonePointer(), + FfiConverterSequenceUInt8.lower(request) + ) + }, + pollFunc: ffi_ldk_node_rust_future_poll_rust_buffer, + completeFunc: ffi_ldk_node_rust_future_complete_rust_buffer, + freeFunc: ffi_ldk_node_rust_future_free_rust_buffer, + liftFunc: FfiConverterDictionaryStringString.lift, + errorHandler: FfiConverterTypeVssHeaderProviderError.lift + ) +} + + +} + +public struct FfiConverterTypeVssHeaderProvider: FfiConverter { + + typealias FfiType = UnsafeMutableRawPointer + typealias SwiftType = VssHeaderProvider + + public static func lift(_ pointer: UnsafeMutableRawPointer) throws -> VssHeaderProvider { + return VssHeaderProvider(unsafeFromRawPointer: pointer) + } + + public static func lower(_ value: VssHeaderProvider) -> UnsafeMutableRawPointer { + return value.uniffiClonePointer() + } + + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> VssHeaderProvider { + let v: UInt64 = try readInt(&buf) + // The Rust code won't compile if a pointer won't fit in a UInt64. + // We have to go via `UInt` because that's the thing that's the size of a pointer. + let ptr = UnsafeMutableRawPointer(bitPattern: UInt(truncatingIfNeeded: v)) + if (ptr == nil) { + throw UniffiInternalError.unexpectedNullPointer + } + return try lift(ptr!) + } + + public static func write(_ value: VssHeaderProvider, into buf: inout [UInt8]) { + // This fiddling is because `Int` is the thing that's the same size as a pointer. + // The Rust code won't compile if a pointer won't fit in a `UInt64`. + writeInt(&buf, UInt64(bitPattern: Int64(Int(bitPattern: lower(value))))) + } +} + + + + +public func FfiConverterTypeVssHeaderProvider_lift(_ pointer: UnsafeMutableRawPointer) throws -> VssHeaderProvider { + return try FfiConverterTypeVssHeaderProvider.lift(pointer) } -public func FfiConverterTypeSpontaneousPayment_lower(_ value: SpontaneousPayment) -> UnsafeMutableRawPointer { - return FfiConverterTypeSpontaneousPayment.lower(value) +public func FfiConverterTypeVssHeaderProvider_lower(_ value: VssHeaderProvider) -> UnsafeMutableRawPointer { + return FfiConverterTypeVssHeaderProvider.lower(value) } @@ -1986,15 +2185,14 @@ public struct AnchorChannelsConfig { // Default memberwise initializers are never public by default, so we // declare one manually. - public init( - trustedPeersNoReserve: [PublicKey], - perChannelReserveSats: UInt64) { + public init(trustedPeersNoReserve: [PublicKey], perChannelReserveSats: UInt64) { self.trustedPeersNoReserve = trustedPeersNoReserve self.perChannelReserveSats = perChannelReserveSats } } + extension AnchorChannelsConfig: Equatable, Hashable { public static func ==(lhs: AnchorChannelsConfig, rhs: AnchorChannelsConfig) -> Bool { if lhs.trustedPeersNoReserve != rhs.trustedPeersNoReserve { @@ -2048,13 +2246,7 @@ public struct BalanceDetails { // Default memberwise initializers are never public by default, so we // declare one manually. - public init( - totalOnchainBalanceSats: UInt64, - spendableOnchainBalanceSats: UInt64, - totalAnchorChannelsReserveSats: UInt64, - totalLightningBalanceSats: UInt64, - lightningBalances: [LightningBalance], - pendingBalancesFromChannelClosures: [PendingSweepBalance]) { + public init(totalOnchainBalanceSats: UInt64, spendableOnchainBalanceSats: UInt64, totalAnchorChannelsReserveSats: UInt64, totalLightningBalanceSats: UInt64, lightningBalances: [LightningBalance], pendingBalancesFromChannelClosures: [PendingSweepBalance]) { self.totalOnchainBalanceSats = totalOnchainBalanceSats self.spendableOnchainBalanceSats = spendableOnchainBalanceSats self.totalAnchorChannelsReserveSats = totalAnchorChannelsReserveSats @@ -2065,6 +2257,7 @@ public struct BalanceDetails { } + extension BalanceDetails: Equatable, Hashable { public static func ==(lhs: BalanceDetails, rhs: BalanceDetails) -> Bool { if lhs.totalOnchainBalanceSats != rhs.totalOnchainBalanceSats { @@ -2138,15 +2331,14 @@ public struct BestBlock { // Default memberwise initializers are never public by default, so we // declare one manually. - public init( - blockHash: BlockHash, - height: UInt32) { + public init(blockHash: BlockHash, height: UInt32) { self.blockHash = blockHash self.height = height } } + extension BestBlock: Equatable, Hashable { public static func ==(lhs: BestBlock, rhs: BestBlock) -> Bool { if lhs.blockHash != rhs.blockHash { @@ -2190,6 +2382,95 @@ public func FfiConverterTypeBestBlock_lower(_ value: BestBlock) -> RustBuffer { } +public struct ChannelConfig { + public var forwardingFeeProportionalMillionths: UInt32 + public var forwardingFeeBaseMsat: UInt32 + public var cltvExpiryDelta: UInt16 + public var maxDustHtlcExposure: MaxDustHtlcExposure + public var forceCloseAvoidanceMaxFeeSatoshis: UInt64 + public var acceptUnderpayingHtlcs: Bool + + // Default memberwise initializers are never public by default, so we + // declare one manually. + public init(forwardingFeeProportionalMillionths: UInt32, forwardingFeeBaseMsat: UInt32, cltvExpiryDelta: UInt16, maxDustHtlcExposure: MaxDustHtlcExposure, forceCloseAvoidanceMaxFeeSatoshis: UInt64, acceptUnderpayingHtlcs: Bool) { + self.forwardingFeeProportionalMillionths = forwardingFeeProportionalMillionths + self.forwardingFeeBaseMsat = forwardingFeeBaseMsat + self.cltvExpiryDelta = cltvExpiryDelta + self.maxDustHtlcExposure = maxDustHtlcExposure + self.forceCloseAvoidanceMaxFeeSatoshis = forceCloseAvoidanceMaxFeeSatoshis + self.acceptUnderpayingHtlcs = acceptUnderpayingHtlcs + } +} + + + +extension ChannelConfig: Equatable, Hashable { + public static func ==(lhs: ChannelConfig, rhs: ChannelConfig) -> Bool { + if lhs.forwardingFeeProportionalMillionths != rhs.forwardingFeeProportionalMillionths { + return false + } + if lhs.forwardingFeeBaseMsat != rhs.forwardingFeeBaseMsat { + return false + } + if lhs.cltvExpiryDelta != rhs.cltvExpiryDelta { + return false + } + if lhs.maxDustHtlcExposure != rhs.maxDustHtlcExposure { + return false + } + if lhs.forceCloseAvoidanceMaxFeeSatoshis != rhs.forceCloseAvoidanceMaxFeeSatoshis { + return false + } + if lhs.acceptUnderpayingHtlcs != rhs.acceptUnderpayingHtlcs { + return false + } + return true + } + + public func hash(into hasher: inout Hasher) { + hasher.combine(forwardingFeeProportionalMillionths) + hasher.combine(forwardingFeeBaseMsat) + hasher.combine(cltvExpiryDelta) + hasher.combine(maxDustHtlcExposure) + hasher.combine(forceCloseAvoidanceMaxFeeSatoshis) + hasher.combine(acceptUnderpayingHtlcs) + } +} + + +public struct FfiConverterTypeChannelConfig: FfiConverterRustBuffer { + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> ChannelConfig { + return + try ChannelConfig( + forwardingFeeProportionalMillionths: FfiConverterUInt32.read(from: &buf), + forwardingFeeBaseMsat: FfiConverterUInt32.read(from: &buf), + cltvExpiryDelta: FfiConverterUInt16.read(from: &buf), + maxDustHtlcExposure: FfiConverterTypeMaxDustHTLCExposure.read(from: &buf), + forceCloseAvoidanceMaxFeeSatoshis: FfiConverterUInt64.read(from: &buf), + acceptUnderpayingHtlcs: FfiConverterBool.read(from: &buf) + ) + } + + public static func write(_ value: ChannelConfig, into buf: inout [UInt8]) { + FfiConverterUInt32.write(value.forwardingFeeProportionalMillionths, into: &buf) + FfiConverterUInt32.write(value.forwardingFeeBaseMsat, into: &buf) + FfiConverterUInt16.write(value.cltvExpiryDelta, into: &buf) + FfiConverterTypeMaxDustHTLCExposure.write(value.maxDustHtlcExposure, into: &buf) + FfiConverterUInt64.write(value.forceCloseAvoidanceMaxFeeSatoshis, into: &buf) + FfiConverterBool.write(value.acceptUnderpayingHtlcs, into: &buf) + } +} + + +public func FfiConverterTypeChannelConfig_lift(_ buf: RustBuffer) throws -> ChannelConfig { + return try FfiConverterTypeChannelConfig.lift(buf) +} + +public func FfiConverterTypeChannelConfig_lower(_ value: ChannelConfig) -> RustBuffer { + return FfiConverterTypeChannelConfig.lower(value) +} + + public struct ChannelDetails { public var channelId: ChannelId public var counterpartyNodeId: PublicKey @@ -2205,7 +2486,7 @@ public struct ChannelDetails { public var isOutbound: Bool public var isChannelReady: Bool public var isUsable: Bool - public var isPublic: Bool + public var isAnnounced: Bool public var cltvExpiryDelta: UInt16? public var counterpartyUnspendablePunishmentReserve: UInt64 public var counterpartyOutboundHtlcMinimumMsat: UInt64? @@ -2222,35 +2503,7 @@ public struct ChannelDetails { // Default memberwise initializers are never public by default, so we // declare one manually. - public init( - channelId: ChannelId, - counterpartyNodeId: PublicKey, - fundingTxo: OutPoint?, - channelValueSats: UInt64, - unspendablePunishmentReserve: UInt64?, - userChannelId: UserChannelId, - feerateSatPer1000Weight: UInt32, - outboundCapacityMsat: UInt64, - inboundCapacityMsat: UInt64, - confirmationsRequired: UInt32?, - confirmations: UInt32?, - isOutbound: Bool, - isChannelReady: Bool, - isUsable: Bool, - isPublic: Bool, - cltvExpiryDelta: UInt16?, - counterpartyUnspendablePunishmentReserve: UInt64, - counterpartyOutboundHtlcMinimumMsat: UInt64?, - counterpartyOutboundHtlcMaximumMsat: UInt64?, - counterpartyForwardingInfoFeeBaseMsat: UInt32?, - counterpartyForwardingInfoFeeProportionalMillionths: UInt32?, - counterpartyForwardingInfoCltvExpiryDelta: UInt16?, - nextOutboundHtlcLimitMsat: UInt64, - nextOutboundHtlcMinimumMsat: UInt64, - forceCloseSpendDelay: UInt16?, - inboundHtlcMinimumMsat: UInt64, - inboundHtlcMaximumMsat: UInt64?, - config: ChannelConfig) { + public init(channelId: ChannelId, counterpartyNodeId: PublicKey, fundingTxo: OutPoint?, channelValueSats: UInt64, unspendablePunishmentReserve: UInt64?, userChannelId: UserChannelId, feerateSatPer1000Weight: UInt32, outboundCapacityMsat: UInt64, inboundCapacityMsat: UInt64, confirmationsRequired: UInt32?, confirmations: UInt32?, isOutbound: Bool, isChannelReady: Bool, isUsable: Bool, isAnnounced: Bool, cltvExpiryDelta: UInt16?, counterpartyUnspendablePunishmentReserve: UInt64, counterpartyOutboundHtlcMinimumMsat: UInt64?, counterpartyOutboundHtlcMaximumMsat: UInt64?, counterpartyForwardingInfoFeeBaseMsat: UInt32?, counterpartyForwardingInfoFeeProportionalMillionths: UInt32?, counterpartyForwardingInfoCltvExpiryDelta: UInt16?, nextOutboundHtlcLimitMsat: UInt64, nextOutboundHtlcMinimumMsat: UInt64, forceCloseSpendDelay: UInt16?, inboundHtlcMinimumMsat: UInt64, inboundHtlcMaximumMsat: UInt64?, config: ChannelConfig) { self.channelId = channelId self.counterpartyNodeId = counterpartyNodeId self.fundingTxo = fundingTxo @@ -2265,7 +2518,7 @@ public struct ChannelDetails { self.isOutbound = isOutbound self.isChannelReady = isChannelReady self.isUsable = isUsable - self.isPublic = isPublic + self.isAnnounced = isAnnounced self.cltvExpiryDelta = cltvExpiryDelta self.counterpartyUnspendablePunishmentReserve = counterpartyUnspendablePunishmentReserve self.counterpartyOutboundHtlcMinimumMsat = counterpartyOutboundHtlcMinimumMsat @@ -2284,6 +2537,128 @@ public struct ChannelDetails { +extension ChannelDetails: Equatable, Hashable { + public static func ==(lhs: ChannelDetails, rhs: ChannelDetails) -> Bool { + if lhs.channelId != rhs.channelId { + return false + } + if lhs.counterpartyNodeId != rhs.counterpartyNodeId { + return false + } + if lhs.fundingTxo != rhs.fundingTxo { + return false + } + if lhs.channelValueSats != rhs.channelValueSats { + return false + } + if lhs.unspendablePunishmentReserve != rhs.unspendablePunishmentReserve { + return false + } + if lhs.userChannelId != rhs.userChannelId { + return false + } + if lhs.feerateSatPer1000Weight != rhs.feerateSatPer1000Weight { + return false + } + if lhs.outboundCapacityMsat != rhs.outboundCapacityMsat { + return false + } + if lhs.inboundCapacityMsat != rhs.inboundCapacityMsat { + return false + } + if lhs.confirmationsRequired != rhs.confirmationsRequired { + return false + } + if lhs.confirmations != rhs.confirmations { + return false + } + if lhs.isOutbound != rhs.isOutbound { + return false + } + if lhs.isChannelReady != rhs.isChannelReady { + return false + } + if lhs.isUsable != rhs.isUsable { + return false + } + if lhs.isAnnounced != rhs.isAnnounced { + return false + } + if lhs.cltvExpiryDelta != rhs.cltvExpiryDelta { + return false + } + if lhs.counterpartyUnspendablePunishmentReserve != rhs.counterpartyUnspendablePunishmentReserve { + return false + } + if lhs.counterpartyOutboundHtlcMinimumMsat != rhs.counterpartyOutboundHtlcMinimumMsat { + return false + } + if lhs.counterpartyOutboundHtlcMaximumMsat != rhs.counterpartyOutboundHtlcMaximumMsat { + return false + } + if lhs.counterpartyForwardingInfoFeeBaseMsat != rhs.counterpartyForwardingInfoFeeBaseMsat { + return false + } + if lhs.counterpartyForwardingInfoFeeProportionalMillionths != rhs.counterpartyForwardingInfoFeeProportionalMillionths { + return false + } + if lhs.counterpartyForwardingInfoCltvExpiryDelta != rhs.counterpartyForwardingInfoCltvExpiryDelta { + return false + } + if lhs.nextOutboundHtlcLimitMsat != rhs.nextOutboundHtlcLimitMsat { + return false + } + if lhs.nextOutboundHtlcMinimumMsat != rhs.nextOutboundHtlcMinimumMsat { + return false + } + if lhs.forceCloseSpendDelay != rhs.forceCloseSpendDelay { + return false + } + if lhs.inboundHtlcMinimumMsat != rhs.inboundHtlcMinimumMsat { + return false + } + if lhs.inboundHtlcMaximumMsat != rhs.inboundHtlcMaximumMsat { + return false + } + if lhs.config != rhs.config { + return false + } + return true + } + + public func hash(into hasher: inout Hasher) { + hasher.combine(channelId) + hasher.combine(counterpartyNodeId) + hasher.combine(fundingTxo) + hasher.combine(channelValueSats) + hasher.combine(unspendablePunishmentReserve) + hasher.combine(userChannelId) + hasher.combine(feerateSatPer1000Weight) + hasher.combine(outboundCapacityMsat) + hasher.combine(inboundCapacityMsat) + hasher.combine(confirmationsRequired) + hasher.combine(confirmations) + hasher.combine(isOutbound) + hasher.combine(isChannelReady) + hasher.combine(isUsable) + hasher.combine(isAnnounced) + hasher.combine(cltvExpiryDelta) + hasher.combine(counterpartyUnspendablePunishmentReserve) + hasher.combine(counterpartyOutboundHtlcMinimumMsat) + hasher.combine(counterpartyOutboundHtlcMaximumMsat) + hasher.combine(counterpartyForwardingInfoFeeBaseMsat) + hasher.combine(counterpartyForwardingInfoFeeProportionalMillionths) + hasher.combine(counterpartyForwardingInfoCltvExpiryDelta) + hasher.combine(nextOutboundHtlcLimitMsat) + hasher.combine(nextOutboundHtlcMinimumMsat) + hasher.combine(forceCloseSpendDelay) + hasher.combine(inboundHtlcMinimumMsat) + hasher.combine(inboundHtlcMaximumMsat) + hasher.combine(config) + } +} + + public struct FfiConverterTypeChannelDetails: FfiConverterRustBuffer { public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> ChannelDetails { return @@ -2302,7 +2677,7 @@ public struct FfiConverterTypeChannelDetails: FfiConverterRustBuffer { isOutbound: FfiConverterBool.read(from: &buf), isChannelReady: FfiConverterBool.read(from: &buf), isUsable: FfiConverterBool.read(from: &buf), - isPublic: FfiConverterBool.read(from: &buf), + isAnnounced: FfiConverterBool.read(from: &buf), cltvExpiryDelta: FfiConverterOptionUInt16.read(from: &buf), counterpartyUnspendablePunishmentReserve: FfiConverterUInt64.read(from: &buf), counterpartyOutboundHtlcMinimumMsat: FfiConverterOptionUInt64.read(from: &buf), @@ -2334,7 +2709,7 @@ public struct FfiConverterTypeChannelDetails: FfiConverterRustBuffer { FfiConverterBool.write(value.isOutbound, into: &buf) FfiConverterBool.write(value.isChannelReady, into: &buf) FfiConverterBool.write(value.isUsable, into: &buf) - FfiConverterBool.write(value.isPublic, into: &buf) + FfiConverterBool.write(value.isAnnounced, into: &buf) FfiConverterOptionUInt16.write(value.cltvExpiryDelta, into: &buf) FfiConverterUInt64.write(value.counterpartyUnspendablePunishmentReserve, into: &buf) FfiConverterOptionUInt64.write(value.counterpartyOutboundHtlcMinimumMsat, into: &buf) @@ -2370,12 +2745,7 @@ public struct ChannelInfo { // Default memberwise initializers are never public by default, so we // declare one manually. - public init( - nodeOne: NodeId, - oneToTwo: ChannelUpdateInfo?, - nodeTwo: NodeId, - twoToOne: ChannelUpdateInfo?, - capacitySats: UInt64?) { + public init(nodeOne: NodeId, oneToTwo: ChannelUpdateInfo?, nodeTwo: NodeId, twoToOne: ChannelUpdateInfo?, capacitySats: UInt64?) { self.nodeOne = nodeOne self.oneToTwo = oneToTwo self.nodeTwo = nodeTwo @@ -2385,6 +2755,7 @@ public struct ChannelInfo { } + extension ChannelInfo: Equatable, Hashable { public static func ==(lhs: ChannelInfo, rhs: ChannelInfo) -> Bool { if lhs.nodeOne != rhs.nodeOne { @@ -2456,13 +2827,7 @@ public struct ChannelUpdateInfo { // Default memberwise initializers are never public by default, so we // declare one manually. - public init( - lastUpdate: UInt32, - enabled: Bool, - cltvExpiryDelta: UInt16, - htlcMinimumMsat: UInt64, - htlcMaximumMsat: UInt64, - fees: RoutingFees) { + public init(lastUpdate: UInt32, enabled: Bool, cltvExpiryDelta: UInt16, htlcMinimumMsat: UInt64, htlcMaximumMsat: UInt64, fees: RoutingFees) { self.lastUpdate = lastUpdate self.enabled = enabled self.cltvExpiryDelta = cltvExpiryDelta @@ -2473,6 +2838,7 @@ public struct ChannelUpdateInfo { } + extension ChannelUpdateInfo: Equatable, Hashable { public static func ==(lhs: ChannelUpdateInfo, rhs: ChannelUpdateInfo) -> Bool { if lhs.lastUpdate != rhs.lastUpdate { @@ -2545,46 +2911,31 @@ public struct Config { public var logDirPath: String? public var network: Network public var listeningAddresses: [SocketAddress]? - public var defaultCltvExpiryDelta: UInt32 - public var onchainWalletSyncIntervalSecs: UInt64 - public var walletSyncIntervalSecs: UInt64 - public var feeRateCacheUpdateIntervalSecs: UInt64 + public var nodeAlias: NodeAlias? public var trustedPeers0conf: [PublicKey] public var probingLiquidityLimitMultiplier: UInt64 public var logLevel: LogLevel public var anchorChannelsConfig: AnchorChannelsConfig? + public var sendingParameters: SendingParameters? // Default memberwise initializers are never public by default, so we // declare one manually. - public init( - storageDirPath: String, - logDirPath: String?, - network: Network, - listeningAddresses: [SocketAddress]?, - defaultCltvExpiryDelta: UInt32, - onchainWalletSyncIntervalSecs: UInt64, - walletSyncIntervalSecs: UInt64, - feeRateCacheUpdateIntervalSecs: UInt64, - trustedPeers0conf: [PublicKey], - probingLiquidityLimitMultiplier: UInt64, - logLevel: LogLevel, - anchorChannelsConfig: AnchorChannelsConfig?) { + public init(storageDirPath: String, logDirPath: String?, network: Network, listeningAddresses: [SocketAddress]?, nodeAlias: NodeAlias?, trustedPeers0conf: [PublicKey], probingLiquidityLimitMultiplier: UInt64, logLevel: LogLevel, anchorChannelsConfig: AnchorChannelsConfig?, sendingParameters: SendingParameters?) { self.storageDirPath = storageDirPath self.logDirPath = logDirPath self.network = network self.listeningAddresses = listeningAddresses - self.defaultCltvExpiryDelta = defaultCltvExpiryDelta - self.onchainWalletSyncIntervalSecs = onchainWalletSyncIntervalSecs - self.walletSyncIntervalSecs = walletSyncIntervalSecs - self.feeRateCacheUpdateIntervalSecs = feeRateCacheUpdateIntervalSecs + self.nodeAlias = nodeAlias self.trustedPeers0conf = trustedPeers0conf self.probingLiquidityLimitMultiplier = probingLiquidityLimitMultiplier self.logLevel = logLevel self.anchorChannelsConfig = anchorChannelsConfig + self.sendingParameters = sendingParameters } } + extension Config: Equatable, Hashable { public static func ==(lhs: Config, rhs: Config) -> Bool { if lhs.storageDirPath != rhs.storageDirPath { @@ -2599,16 +2950,7 @@ extension Config: Equatable, Hashable { if lhs.listeningAddresses != rhs.listeningAddresses { return false } - if lhs.defaultCltvExpiryDelta != rhs.defaultCltvExpiryDelta { - return false - } - if lhs.onchainWalletSyncIntervalSecs != rhs.onchainWalletSyncIntervalSecs { - return false - } - if lhs.walletSyncIntervalSecs != rhs.walletSyncIntervalSecs { - return false - } - if lhs.feeRateCacheUpdateIntervalSecs != rhs.feeRateCacheUpdateIntervalSecs { + if lhs.nodeAlias != rhs.nodeAlias { return false } if lhs.trustedPeers0conf != rhs.trustedPeers0conf { @@ -2623,6 +2965,9 @@ extension Config: Equatable, Hashable { if lhs.anchorChannelsConfig != rhs.anchorChannelsConfig { return false } + if lhs.sendingParameters != rhs.sendingParameters { + return false + } return true } @@ -2631,14 +2976,12 @@ extension Config: Equatable, Hashable { hasher.combine(logDirPath) hasher.combine(network) hasher.combine(listeningAddresses) - hasher.combine(defaultCltvExpiryDelta) - hasher.combine(onchainWalletSyncIntervalSecs) - hasher.combine(walletSyncIntervalSecs) - hasher.combine(feeRateCacheUpdateIntervalSecs) + hasher.combine(nodeAlias) hasher.combine(trustedPeers0conf) hasher.combine(probingLiquidityLimitMultiplier) hasher.combine(logLevel) hasher.combine(anchorChannelsConfig) + hasher.combine(sendingParameters) } } @@ -2651,14 +2994,12 @@ public struct FfiConverterTypeConfig: FfiConverterRustBuffer { logDirPath: FfiConverterOptionString.read(from: &buf), network: FfiConverterTypeNetwork.read(from: &buf), listeningAddresses: FfiConverterOptionSequenceTypeSocketAddress.read(from: &buf), - defaultCltvExpiryDelta: FfiConverterUInt32.read(from: &buf), - onchainWalletSyncIntervalSecs: FfiConverterUInt64.read(from: &buf), - walletSyncIntervalSecs: FfiConverterUInt64.read(from: &buf), - feeRateCacheUpdateIntervalSecs: FfiConverterUInt64.read(from: &buf), + nodeAlias: FfiConverterOptionTypeNodeAlias.read(from: &buf), trustedPeers0conf: FfiConverterSequenceTypePublicKey.read(from: &buf), probingLiquidityLimitMultiplier: FfiConverterUInt64.read(from: &buf), logLevel: FfiConverterTypeLogLevel.read(from: &buf), - anchorChannelsConfig: FfiConverterOptionTypeAnchorChannelsConfig.read(from: &buf) + anchorChannelsConfig: FfiConverterOptionTypeAnchorChannelsConfig.read(from: &buf), + sendingParameters: FfiConverterOptionTypeSendingParameters.read(from: &buf) ) } @@ -2667,14 +3008,12 @@ public struct FfiConverterTypeConfig: FfiConverterRustBuffer { FfiConverterOptionString.write(value.logDirPath, into: &buf) FfiConverterTypeNetwork.write(value.network, into: &buf) FfiConverterOptionSequenceTypeSocketAddress.write(value.listeningAddresses, into: &buf) - FfiConverterUInt32.write(value.defaultCltvExpiryDelta, into: &buf) - FfiConverterUInt64.write(value.onchainWalletSyncIntervalSecs, into: &buf) - FfiConverterUInt64.write(value.walletSyncIntervalSecs, into: &buf) - FfiConverterUInt64.write(value.feeRateCacheUpdateIntervalSecs, into: &buf) + FfiConverterOptionTypeNodeAlias.write(value.nodeAlias, into: &buf) FfiConverterSequenceTypePublicKey.write(value.trustedPeers0conf, into: &buf) FfiConverterUInt64.write(value.probingLiquidityLimitMultiplier, into: &buf) FfiConverterTypeLogLevel.write(value.logLevel, into: &buf) FfiConverterOptionTypeAnchorChannelsConfig.write(value.anchorChannelsConfig, into: &buf) + FfiConverterOptionTypeSendingParameters.write(value.sendingParameters, into: &buf) } } @@ -2688,21 +3027,85 @@ public func FfiConverterTypeConfig_lower(_ value: Config) -> RustBuffer { } +public struct EsploraSyncConfig { + public var onchainWalletSyncIntervalSecs: UInt64 + public var lightningWalletSyncIntervalSecs: UInt64 + public var feeRateCacheUpdateIntervalSecs: UInt64 + + // Default memberwise initializers are never public by default, so we + // declare one manually. + public init(onchainWalletSyncIntervalSecs: UInt64, lightningWalletSyncIntervalSecs: UInt64, feeRateCacheUpdateIntervalSecs: UInt64) { + self.onchainWalletSyncIntervalSecs = onchainWalletSyncIntervalSecs + self.lightningWalletSyncIntervalSecs = lightningWalletSyncIntervalSecs + self.feeRateCacheUpdateIntervalSecs = feeRateCacheUpdateIntervalSecs + } +} + + + +extension EsploraSyncConfig: Equatable, Hashable { + public static func ==(lhs: EsploraSyncConfig, rhs: EsploraSyncConfig) -> Bool { + if lhs.onchainWalletSyncIntervalSecs != rhs.onchainWalletSyncIntervalSecs { + return false + } + if lhs.lightningWalletSyncIntervalSecs != rhs.lightningWalletSyncIntervalSecs { + return false + } + if lhs.feeRateCacheUpdateIntervalSecs != rhs.feeRateCacheUpdateIntervalSecs { + return false + } + return true + } + + public func hash(into hasher: inout Hasher) { + hasher.combine(onchainWalletSyncIntervalSecs) + hasher.combine(lightningWalletSyncIntervalSecs) + hasher.combine(feeRateCacheUpdateIntervalSecs) + } +} + + +public struct FfiConverterTypeEsploraSyncConfig: FfiConverterRustBuffer { + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> EsploraSyncConfig { + return + try EsploraSyncConfig( + onchainWalletSyncIntervalSecs: FfiConverterUInt64.read(from: &buf), + lightningWalletSyncIntervalSecs: FfiConverterUInt64.read(from: &buf), + feeRateCacheUpdateIntervalSecs: FfiConverterUInt64.read(from: &buf) + ) + } + + public static func write(_ value: EsploraSyncConfig, into buf: inout [UInt8]) { + FfiConverterUInt64.write(value.onchainWalletSyncIntervalSecs, into: &buf) + FfiConverterUInt64.write(value.lightningWalletSyncIntervalSecs, into: &buf) + FfiConverterUInt64.write(value.feeRateCacheUpdateIntervalSecs, into: &buf) + } +} + + +public func FfiConverterTypeEsploraSyncConfig_lift(_ buf: RustBuffer) throws -> EsploraSyncConfig { + return try FfiConverterTypeEsploraSyncConfig.lift(buf) +} + +public func FfiConverterTypeEsploraSyncConfig_lower(_ value: EsploraSyncConfig) -> RustBuffer { + return FfiConverterTypeEsploraSyncConfig.lower(value) +} + + public struct LspFeeLimits { public var maxTotalOpeningFeeMsat: UInt64? public var maxProportionalOpeningFeePpmMsat: UInt64? // Default memberwise initializers are never public by default, so we // declare one manually. - public init( - maxTotalOpeningFeeMsat: UInt64?, - maxProportionalOpeningFeePpmMsat: UInt64?) { + public init(maxTotalOpeningFeeMsat: UInt64?, maxProportionalOpeningFeePpmMsat: UInt64?) { self.maxTotalOpeningFeeMsat = maxTotalOpeningFeeMsat self.maxProportionalOpeningFeePpmMsat = maxProportionalOpeningFeePpmMsat } } + extension LspFeeLimits: Equatable, Hashable { public static func ==(lhs: LspFeeLimits, rhs: LspFeeLimits) -> Bool { if lhs.maxTotalOpeningFeeMsat != rhs.maxTotalOpeningFeeMsat { @@ -2753,10 +3156,7 @@ public struct NodeAnnouncementInfo { // Default memberwise initializers are never public by default, so we // declare one manually. - public init( - lastUpdate: UInt32, - alias: String, - addresses: [SocketAddress]) { + public init(lastUpdate: UInt32, alias: String, addresses: [SocketAddress]) { self.lastUpdate = lastUpdate self.alias = alias self.addresses = addresses @@ -2764,6 +3164,7 @@ public struct NodeAnnouncementInfo { } + extension NodeAnnouncementInfo: Equatable, Hashable { public static func ==(lhs: NodeAnnouncementInfo, rhs: NodeAnnouncementInfo) -> Bool { if lhs.lastUpdate != rhs.lastUpdate { @@ -2819,15 +3220,14 @@ public struct NodeInfo { // Default memberwise initializers are never public by default, so we // declare one manually. - public init( - channels: [UInt64], - announcementInfo: NodeAnnouncementInfo?) { + public init(channels: [UInt64], announcementInfo: NodeAnnouncementInfo?) { self.channels = channels self.announcementInfo = announcementInfo } } + extension NodeInfo: Equatable, Hashable { public static func ==(lhs: NodeInfo, rhs: NodeInfo) -> Bool { if lhs.channels != rhs.channels { @@ -2875,35 +3275,30 @@ public struct NodeStatus { public var isRunning: Bool public var isListening: Bool public var currentBestBlock: BestBlock - public var latestWalletSyncTimestamp: UInt64? + public var latestLightningWalletSyncTimestamp: UInt64? public var latestOnchainWalletSyncTimestamp: UInt64? public var latestFeeRateCacheUpdateTimestamp: UInt64? public var latestRgsSnapshotTimestamp: UInt64? public var latestNodeAnnouncementBroadcastTimestamp: UInt64? + public var latestChannelMonitorArchivalHeight: UInt32? // Default memberwise initializers are never public by default, so we // declare one manually. - public init( - isRunning: Bool, - isListening: Bool, - currentBestBlock: BestBlock, - latestWalletSyncTimestamp: UInt64?, - latestOnchainWalletSyncTimestamp: UInt64?, - latestFeeRateCacheUpdateTimestamp: UInt64?, - latestRgsSnapshotTimestamp: UInt64?, - latestNodeAnnouncementBroadcastTimestamp: UInt64?) { + public init(isRunning: Bool, isListening: Bool, currentBestBlock: BestBlock, latestLightningWalletSyncTimestamp: UInt64?, latestOnchainWalletSyncTimestamp: UInt64?, latestFeeRateCacheUpdateTimestamp: UInt64?, latestRgsSnapshotTimestamp: UInt64?, latestNodeAnnouncementBroadcastTimestamp: UInt64?, latestChannelMonitorArchivalHeight: UInt32?) { self.isRunning = isRunning self.isListening = isListening self.currentBestBlock = currentBestBlock - self.latestWalletSyncTimestamp = latestWalletSyncTimestamp + self.latestLightningWalletSyncTimestamp = latestLightningWalletSyncTimestamp self.latestOnchainWalletSyncTimestamp = latestOnchainWalletSyncTimestamp self.latestFeeRateCacheUpdateTimestamp = latestFeeRateCacheUpdateTimestamp self.latestRgsSnapshotTimestamp = latestRgsSnapshotTimestamp self.latestNodeAnnouncementBroadcastTimestamp = latestNodeAnnouncementBroadcastTimestamp + self.latestChannelMonitorArchivalHeight = latestChannelMonitorArchivalHeight } } + extension NodeStatus: Equatable, Hashable { public static func ==(lhs: NodeStatus, rhs: NodeStatus) -> Bool { if lhs.isRunning != rhs.isRunning { @@ -2915,7 +3310,7 @@ extension NodeStatus: Equatable, Hashable { if lhs.currentBestBlock != rhs.currentBestBlock { return false } - if lhs.latestWalletSyncTimestamp != rhs.latestWalletSyncTimestamp { + if lhs.latestLightningWalletSyncTimestamp != rhs.latestLightningWalletSyncTimestamp { return false } if lhs.latestOnchainWalletSyncTimestamp != rhs.latestOnchainWalletSyncTimestamp { @@ -2930,6 +3325,9 @@ extension NodeStatus: Equatable, Hashable { if lhs.latestNodeAnnouncementBroadcastTimestamp != rhs.latestNodeAnnouncementBroadcastTimestamp { return false } + if lhs.latestChannelMonitorArchivalHeight != rhs.latestChannelMonitorArchivalHeight { + return false + } return true } @@ -2937,11 +3335,12 @@ extension NodeStatus: Equatable, Hashable { hasher.combine(isRunning) hasher.combine(isListening) hasher.combine(currentBestBlock) - hasher.combine(latestWalletSyncTimestamp) + hasher.combine(latestLightningWalletSyncTimestamp) hasher.combine(latestOnchainWalletSyncTimestamp) hasher.combine(latestFeeRateCacheUpdateTimestamp) hasher.combine(latestRgsSnapshotTimestamp) hasher.combine(latestNodeAnnouncementBroadcastTimestamp) + hasher.combine(latestChannelMonitorArchivalHeight) } } @@ -2953,11 +3352,12 @@ public struct FfiConverterTypeNodeStatus: FfiConverterRustBuffer { isRunning: FfiConverterBool.read(from: &buf), isListening: FfiConverterBool.read(from: &buf), currentBestBlock: FfiConverterTypeBestBlock.read(from: &buf), - latestWalletSyncTimestamp: FfiConverterOptionUInt64.read(from: &buf), + latestLightningWalletSyncTimestamp: FfiConverterOptionUInt64.read(from: &buf), latestOnchainWalletSyncTimestamp: FfiConverterOptionUInt64.read(from: &buf), latestFeeRateCacheUpdateTimestamp: FfiConverterOptionUInt64.read(from: &buf), latestRgsSnapshotTimestamp: FfiConverterOptionUInt64.read(from: &buf), - latestNodeAnnouncementBroadcastTimestamp: FfiConverterOptionUInt64.read(from: &buf) + latestNodeAnnouncementBroadcastTimestamp: FfiConverterOptionUInt64.read(from: &buf), + latestChannelMonitorArchivalHeight: FfiConverterOptionUInt32.read(from: &buf) ) } @@ -2965,11 +3365,12 @@ public struct FfiConverterTypeNodeStatus: FfiConverterRustBuffer { FfiConverterBool.write(value.isRunning, into: &buf) FfiConverterBool.write(value.isListening, into: &buf) FfiConverterTypeBestBlock.write(value.currentBestBlock, into: &buf) - FfiConverterOptionUInt64.write(value.latestWalletSyncTimestamp, into: &buf) + FfiConverterOptionUInt64.write(value.latestLightningWalletSyncTimestamp, into: &buf) FfiConverterOptionUInt64.write(value.latestOnchainWalletSyncTimestamp, into: &buf) FfiConverterOptionUInt64.write(value.latestFeeRateCacheUpdateTimestamp, into: &buf) FfiConverterOptionUInt64.write(value.latestRgsSnapshotTimestamp, into: &buf) FfiConverterOptionUInt64.write(value.latestNodeAnnouncementBroadcastTimestamp, into: &buf) + FfiConverterOptionUInt32.write(value.latestChannelMonitorArchivalHeight, into: &buf) } } @@ -2989,15 +3390,14 @@ public struct OutPoint { // Default memberwise initializers are never public by default, so we // declare one manually. - public init( - txid: Txid, - vout: UInt32) { + public init(txid: Txid, vout: UInt32) { self.txid = txid self.vout = vout } } + extension OutPoint: Equatable, Hashable { public static func ==(lhs: OutPoint, rhs: OutPoint) -> Bool { if lhs.txid != rhs.txid { @@ -3051,13 +3451,7 @@ public struct PaymentDetails { // Default memberwise initializers are never public by default, so we // declare one manually. - public init( - id: PaymentId, - kind: PaymentKind, - amountMsat: UInt64?, - direction: PaymentDirection, - status: PaymentStatus, - latestUpdateTimestamp: UInt64) { + public init(id: PaymentId, kind: PaymentKind, amountMsat: UInt64?, direction: PaymentDirection, status: PaymentStatus, latestUpdateTimestamp: UInt64) { self.id = id self.kind = kind self.amountMsat = amountMsat @@ -3068,6 +3462,7 @@ public struct PaymentDetails { } + extension PaymentDetails: Equatable, Hashable { public static func ==(lhs: PaymentDetails, rhs: PaymentDetails) -> Bool { if lhs.id != rhs.id { @@ -3143,11 +3538,7 @@ public struct PeerDetails { // Default memberwise initializers are never public by default, so we // declare one manually. - public init( - nodeId: PublicKey, - address: SocketAddress, - isPersisted: Bool, - isConnected: Bool) { + public init(nodeId: PublicKey, address: SocketAddress, isPersisted: Bool, isConnected: Bool) { self.nodeId = nodeId self.address = address self.isPersisted = isPersisted @@ -3156,6 +3547,7 @@ public struct PeerDetails { } + extension PeerDetails: Equatable, Hashable { public static func ==(lhs: PeerDetails, rhs: PeerDetails) -> Bool { if lhs.nodeId != rhs.nodeId { @@ -3206,69 +3598,210 @@ public func FfiConverterTypePeerDetails_lift(_ buf: RustBuffer) throws -> PeerDe return try FfiConverterTypePeerDetails.lift(buf) } -public func FfiConverterTypePeerDetails_lower(_ value: PeerDetails) -> RustBuffer { - return FfiConverterTypePeerDetails.lower(value) +public func FfiConverterTypePeerDetails_lower(_ value: PeerDetails) -> RustBuffer { + return FfiConverterTypePeerDetails.lower(value) +} + + +public struct RoutingFees { + public var baseMsat: UInt32 + public var proportionalMillionths: UInt32 + + // Default memberwise initializers are never public by default, so we + // declare one manually. + public init(baseMsat: UInt32, proportionalMillionths: UInt32) { + self.baseMsat = baseMsat + self.proportionalMillionths = proportionalMillionths + } +} + + + +extension RoutingFees: Equatable, Hashable { + public static func ==(lhs: RoutingFees, rhs: RoutingFees) -> Bool { + if lhs.baseMsat != rhs.baseMsat { + return false + } + if lhs.proportionalMillionths != rhs.proportionalMillionths { + return false + } + return true + } + + public func hash(into hasher: inout Hasher) { + hasher.combine(baseMsat) + hasher.combine(proportionalMillionths) + } +} + + +public struct FfiConverterTypeRoutingFees: FfiConverterRustBuffer { + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> RoutingFees { + return + try RoutingFees( + baseMsat: FfiConverterUInt32.read(from: &buf), + proportionalMillionths: FfiConverterUInt32.read(from: &buf) + ) + } + + public static func write(_ value: RoutingFees, into buf: inout [UInt8]) { + FfiConverterUInt32.write(value.baseMsat, into: &buf) + FfiConverterUInt32.write(value.proportionalMillionths, into: &buf) + } +} + + +public func FfiConverterTypeRoutingFees_lift(_ buf: RustBuffer) throws -> RoutingFees { + return try FfiConverterTypeRoutingFees.lift(buf) +} + +public func FfiConverterTypeRoutingFees_lower(_ value: RoutingFees) -> RustBuffer { + return FfiConverterTypeRoutingFees.lower(value) } -public struct RoutingFees { - public var baseMsat: UInt32 - public var proportionalMillionths: UInt32 +public struct SendingParameters { + public var maxTotalRoutingFeeMsat: MaxTotalRoutingFeeLimit? + public var maxTotalCltvExpiryDelta: UInt32? + public var maxPathCount: UInt8? + public var maxChannelSaturationPowerOfHalf: UInt8? // Default memberwise initializers are never public by default, so we // declare one manually. - public init( - baseMsat: UInt32, - proportionalMillionths: UInt32) { - self.baseMsat = baseMsat - self.proportionalMillionths = proportionalMillionths + public init(maxTotalRoutingFeeMsat: MaxTotalRoutingFeeLimit?, maxTotalCltvExpiryDelta: UInt32?, maxPathCount: UInt8?, maxChannelSaturationPowerOfHalf: UInt8?) { + self.maxTotalRoutingFeeMsat = maxTotalRoutingFeeMsat + self.maxTotalCltvExpiryDelta = maxTotalCltvExpiryDelta + self.maxPathCount = maxPathCount + self.maxChannelSaturationPowerOfHalf = maxChannelSaturationPowerOfHalf } } -extension RoutingFees: Equatable, Hashable { - public static func ==(lhs: RoutingFees, rhs: RoutingFees) -> Bool { - if lhs.baseMsat != rhs.baseMsat { + +extension SendingParameters: Equatable, Hashable { + public static func ==(lhs: SendingParameters, rhs: SendingParameters) -> Bool { + if lhs.maxTotalRoutingFeeMsat != rhs.maxTotalRoutingFeeMsat { return false } - if lhs.proportionalMillionths != rhs.proportionalMillionths { + if lhs.maxTotalCltvExpiryDelta != rhs.maxTotalCltvExpiryDelta { + return false + } + if lhs.maxPathCount != rhs.maxPathCount { + return false + } + if lhs.maxChannelSaturationPowerOfHalf != rhs.maxChannelSaturationPowerOfHalf { return false } return true } public func hash(into hasher: inout Hasher) { - hasher.combine(baseMsat) - hasher.combine(proportionalMillionths) + hasher.combine(maxTotalRoutingFeeMsat) + hasher.combine(maxTotalCltvExpiryDelta) + hasher.combine(maxPathCount) + hasher.combine(maxChannelSaturationPowerOfHalf) } } -public struct FfiConverterTypeRoutingFees: FfiConverterRustBuffer { - public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> RoutingFees { +public struct FfiConverterTypeSendingParameters: FfiConverterRustBuffer { + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> SendingParameters { return - try RoutingFees( - baseMsat: FfiConverterUInt32.read(from: &buf), - proportionalMillionths: FfiConverterUInt32.read(from: &buf) + try SendingParameters( + maxTotalRoutingFeeMsat: FfiConverterOptionTypeMaxTotalRoutingFeeLimit.read(from: &buf), + maxTotalCltvExpiryDelta: FfiConverterOptionUInt32.read(from: &buf), + maxPathCount: FfiConverterOptionUInt8.read(from: &buf), + maxChannelSaturationPowerOfHalf: FfiConverterOptionUInt8.read(from: &buf) ) } - public static func write(_ value: RoutingFees, into buf: inout [UInt8]) { - FfiConverterUInt32.write(value.baseMsat, into: &buf) - FfiConverterUInt32.write(value.proportionalMillionths, into: &buf) + public static func write(_ value: SendingParameters, into buf: inout [UInt8]) { + FfiConverterOptionTypeMaxTotalRoutingFeeLimit.write(value.maxTotalRoutingFeeMsat, into: &buf) + FfiConverterOptionUInt32.write(value.maxTotalCltvExpiryDelta, into: &buf) + FfiConverterOptionUInt8.write(value.maxPathCount, into: &buf) + FfiConverterOptionUInt8.write(value.maxChannelSaturationPowerOfHalf, into: &buf) } } -public func FfiConverterTypeRoutingFees_lift(_ buf: RustBuffer) throws -> RoutingFees { - return try FfiConverterTypeRoutingFees.lift(buf) +public func FfiConverterTypeSendingParameters_lift(_ buf: RustBuffer) throws -> SendingParameters { + return try FfiConverterTypeSendingParameters.lift(buf) } -public func FfiConverterTypeRoutingFees_lower(_ value: RoutingFees) -> RustBuffer { - return FfiConverterTypeRoutingFees.lower(value) +public func FfiConverterTypeSendingParameters_lower(_ value: SendingParameters) -> RustBuffer { + return FfiConverterTypeSendingParameters.lower(value) +} + +// Note that we don't yet support `indirect` for enums. +// See https://github.com/mozilla/uniffi-rs/issues/396 for further discussion. + +public enum BalanceSource { + + case holderForceClosed + case counterpartyForceClosed + case coopClose + case htlc +} + + +public struct FfiConverterTypeBalanceSource: FfiConverterRustBuffer { + typealias SwiftType = BalanceSource + + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> BalanceSource { + let variant: Int32 = try readInt(&buf) + switch variant { + + case 1: return .holderForceClosed + + case 2: return .counterpartyForceClosed + + case 3: return .coopClose + + case 4: return .htlc + + default: throw UniffiInternalError.unexpectedEnumCase + } + } + + public static func write(_ value: BalanceSource, into buf: inout [UInt8]) { + switch value { + + + case .holderForceClosed: + writeInt(&buf, Int32(1)) + + + case .counterpartyForceClosed: + writeInt(&buf, Int32(2)) + + + case .coopClose: + writeInt(&buf, Int32(3)) + + + case .htlc: + writeInt(&buf, Int32(4)) + + } + } +} + + +public func FfiConverterTypeBalanceSource_lift(_ buf: RustBuffer) throws -> BalanceSource { + return try FfiConverterTypeBalanceSource.lift(buf) +} + +public func FfiConverterTypeBalanceSource_lower(_ value: BalanceSource) -> RustBuffer { + return FfiConverterTypeBalanceSource.lower(value) } + +extension BalanceSource: Equatable, Hashable {} + + + + public enum BuildError { @@ -3283,6 +3816,8 @@ public enum BuildError { case InvalidListeningAddresses(message: String) + case InvalidNodeAlias(message: String) + case ReadFailed(message: String) case WriteFailed(message: String) @@ -3295,10 +3830,6 @@ public enum BuildError { case LoggerSetupFailed(message: String) - - fileprivate static func uniffiErrorHandler(_ error: RustBuffer) throws -> Error { - return try FfiConverterTypeBuildError.lift(error) - } } @@ -3332,27 +3863,31 @@ public struct FfiConverterTypeBuildError: FfiConverterRustBuffer { message: try FfiConverterString.read(from: &buf) ) - case 6: return .ReadFailed( + case 6: return .InvalidNodeAlias( message: try FfiConverterString.read(from: &buf) ) - case 7: return .WriteFailed( + case 7: return .ReadFailed( message: try FfiConverterString.read(from: &buf) ) - case 8: return .StoragePathAccessFailed( + case 8: return .WriteFailed( message: try FfiConverterString.read(from: &buf) ) - case 9: return .KvStoreSetupFailed( + case 9: return .StoragePathAccessFailed( message: try FfiConverterString.read(from: &buf) ) - case 10: return .WalletSetupFailed( + case 10: return .KvStoreSetupFailed( message: try FfiConverterString.read(from: &buf) ) - case 11: return .LoggerSetupFailed( + case 11: return .WalletSetupFailed( + message: try FfiConverterString.read(from: &buf) + ) + + case 12: return .LoggerSetupFailed( message: try FfiConverterString.read(from: &buf) ) @@ -3377,18 +3912,20 @@ public struct FfiConverterTypeBuildError: FfiConverterRustBuffer { writeInt(&buf, Int32(4)) case .InvalidListeningAddresses(_ /* message is ignored*/): writeInt(&buf, Int32(5)) - case .ReadFailed(_ /* message is ignored*/): + case .InvalidNodeAlias(_ /* message is ignored*/): writeInt(&buf, Int32(6)) - case .WriteFailed(_ /* message is ignored*/): + case .ReadFailed(_ /* message is ignored*/): writeInt(&buf, Int32(7)) - case .StoragePathAccessFailed(_ /* message is ignored*/): + case .WriteFailed(_ /* message is ignored*/): writeInt(&buf, Int32(8)) - case .KvStoreSetupFailed(_ /* message is ignored*/): + case .StoragePathAccessFailed(_ /* message is ignored*/): writeInt(&buf, Int32(9)) - case .WalletSetupFailed(_ /* message is ignored*/): + case .KvStoreSetupFailed(_ /* message is ignored*/): writeInt(&buf, Int32(10)) - case .LoggerSetupFailed(_ /* message is ignored*/): + case .WalletSetupFailed(_ /* message is ignored*/): writeInt(&buf, Int32(11)) + case .LoggerSetupFailed(_ /* message is ignored*/): + writeInt(&buf, Int32(12)) } @@ -3402,27 +3939,30 @@ extension BuildError: Error { } // Note that we don't yet support `indirect` for enums. // See https://github.com/mozilla/uniffi-rs/issues/396 for further discussion. + public enum ClosureReason { - case counterpartyForceClosed( - peerMsg: UntrustedString + case counterpartyForceClosed(peerMsg: UntrustedString + ) + case holderForceClosed(broadcastedLatestTxn: Bool? ) - case holderForceClosed case legacyCooperativeClosure case counterpartyInitiatedCooperativeClosure case locallyInitiatedCooperativeClosure case commitmentTxConfirmed case fundingTimedOut - case processingError( - err: String + case processingError(err: String ) case disconnectedPeer case outdatedChannelManager case counterpartyCoopClosedUnfundedChannel case fundingBatchClosure case htlCsTimedOut + case peerFeerateTooLow(peerFeerateSatPerKw: UInt32, requiredFeerateSatPerKw: UInt32 + ) } + public struct FfiConverterTypeClosureReason: FfiConverterRustBuffer { typealias SwiftType = ClosureReason @@ -3430,11 +3970,11 @@ public struct FfiConverterTypeClosureReason: FfiConverterRustBuffer { let variant: Int32 = try readInt(&buf) switch variant { - case 1: return .counterpartyForceClosed( - peerMsg: try FfiConverterTypeUntrustedString.read(from: &buf) + case 1: return .counterpartyForceClosed(peerMsg: try FfiConverterTypeUntrustedString.read(from: &buf) ) - case 2: return .holderForceClosed + case 2: return .holderForceClosed(broadcastedLatestTxn: try FfiConverterOptionBool.read(from: &buf) + ) case 3: return .legacyCooperativeClosure @@ -3446,8 +3986,7 @@ public struct FfiConverterTypeClosureReason: FfiConverterRustBuffer { case 7: return .fundingTimedOut - case 8: return .processingError( - err: try FfiConverterString.read(from: &buf) + case 8: return .processingError(err: try FfiConverterString.read(from: &buf) ) case 9: return .disconnectedPeer @@ -3460,6 +3999,9 @@ public struct FfiConverterTypeClosureReason: FfiConverterRustBuffer { case 13: return .htlCsTimedOut + case 14: return .peerFeerateTooLow(peerFeerateSatPerKw: try FfiConverterUInt32.read(from: &buf), requiredFeerateSatPerKw: try FfiConverterUInt32.read(from: &buf) + ) + default: throw UniffiInternalError.unexpectedEnumCase } } @@ -3473,9 +4015,10 @@ public struct FfiConverterTypeClosureReason: FfiConverterRustBuffer { FfiConverterTypeUntrustedString.write(peerMsg, into: &buf) - case .holderForceClosed: + case let .holderForceClosed(broadcastedLatestTxn): writeInt(&buf, Int32(2)) - + FfiConverterOptionBool.write(broadcastedLatestTxn, into: &buf) + case .legacyCooperativeClosure: writeInt(&buf, Int32(3)) @@ -3521,6 +4064,12 @@ public struct FfiConverterTypeClosureReason: FfiConverterRustBuffer { case .htlCsTimedOut: writeInt(&buf, Int32(13)) + + case let .peerFeerateTooLow(peerFeerateSatPerKw,requiredFeerateSatPerKw): + writeInt(&buf, Int32(14)) + FfiConverterUInt32.write(peerFeerateSatPerKw, into: &buf) + FfiConverterUInt32.write(requiredFeerateSatPerKw, into: &buf) + } } } @@ -3535,55 +4084,33 @@ public func FfiConverterTypeClosureReason_lower(_ value: ClosureReason) -> RustB } + extension ClosureReason: Equatable, Hashable {} // Note that we don't yet support `indirect` for enums. // See https://github.com/mozilla/uniffi-rs/issues/396 for further discussion. + public enum Event { - case paymentSuccessful( - paymentId: PaymentId?, - paymentHash: PaymentHash, - feePaidMsat: UInt64? + case paymentSuccessful(paymentId: PaymentId?, paymentHash: PaymentHash, feePaidMsat: UInt64? ) - case paymentFailed( - paymentId: PaymentId?, - paymentHash: PaymentHash, - reason: PaymentFailureReason? + case paymentFailed(paymentId: PaymentId?, paymentHash: PaymentHash?, reason: PaymentFailureReason? ) - case paymentReceived( - paymentId: PaymentId?, - paymentHash: PaymentHash, - amountMsat: UInt64 + case paymentReceived(paymentId: PaymentId?, paymentHash: PaymentHash, amountMsat: UInt64 ) - case paymentClaimable( - paymentId: PaymentId, - paymentHash: PaymentHash, - claimableAmountMsat: UInt64, - claimDeadline: UInt32? + case paymentClaimable(paymentId: PaymentId, paymentHash: PaymentHash, claimableAmountMsat: UInt64, claimDeadline: UInt32? ) - case channelPending( - channelId: ChannelId, - userChannelId: UserChannelId, - formerTemporaryChannelId: ChannelId, - counterpartyNodeId: PublicKey, - fundingTxo: OutPoint + case channelPending(channelId: ChannelId, userChannelId: UserChannelId, formerTemporaryChannelId: ChannelId, counterpartyNodeId: PublicKey, fundingTxo: OutPoint ) - case channelReady( - channelId: ChannelId, - userChannelId: UserChannelId, - counterpartyNodeId: PublicKey? + case channelReady(channelId: ChannelId, userChannelId: UserChannelId, counterpartyNodeId: PublicKey? ) - case channelClosed( - channelId: ChannelId, - userChannelId: UserChannelId, - counterpartyNodeId: PublicKey?, - reason: ClosureReason? + case channelClosed(channelId: ChannelId, userChannelId: UserChannelId, counterpartyNodeId: PublicKey?, reason: ClosureReason? ) } + public struct FfiConverterTypeEvent: FfiConverterRustBuffer { typealias SwiftType = Event @@ -3591,50 +4118,25 @@ public struct FfiConverterTypeEvent: FfiConverterRustBuffer { let variant: Int32 = try readInt(&buf) switch variant { - case 1: return .paymentSuccessful( - paymentId: try FfiConverterOptionTypePaymentId.read(from: &buf), - paymentHash: try FfiConverterTypePaymentHash.read(from: &buf), - feePaidMsat: try FfiConverterOptionUInt64.read(from: &buf) + case 1: return .paymentSuccessful(paymentId: try FfiConverterOptionTypePaymentId.read(from: &buf), paymentHash: try FfiConverterTypePaymentHash.read(from: &buf), feePaidMsat: try FfiConverterOptionUInt64.read(from: &buf) ) - case 2: return .paymentFailed( - paymentId: try FfiConverterOptionTypePaymentId.read(from: &buf), - paymentHash: try FfiConverterTypePaymentHash.read(from: &buf), - reason: try FfiConverterOptionTypePaymentFailureReason.read(from: &buf) + case 2: return .paymentFailed(paymentId: try FfiConverterOptionTypePaymentId.read(from: &buf), paymentHash: try FfiConverterOptionTypePaymentHash.read(from: &buf), reason: try FfiConverterOptionTypePaymentFailureReason.read(from: &buf) ) - case 3: return .paymentReceived( - paymentId: try FfiConverterOptionTypePaymentId.read(from: &buf), - paymentHash: try FfiConverterTypePaymentHash.read(from: &buf), - amountMsat: try FfiConverterUInt64.read(from: &buf) + case 3: return .paymentReceived(paymentId: try FfiConverterOptionTypePaymentId.read(from: &buf), paymentHash: try FfiConverterTypePaymentHash.read(from: &buf), amountMsat: try FfiConverterUInt64.read(from: &buf) ) - case 4: return .paymentClaimable( - paymentId: try FfiConverterTypePaymentId.read(from: &buf), - paymentHash: try FfiConverterTypePaymentHash.read(from: &buf), - claimableAmountMsat: try FfiConverterUInt64.read(from: &buf), - claimDeadline: try FfiConverterOptionUInt32.read(from: &buf) + case 4: return .paymentClaimable(paymentId: try FfiConverterTypePaymentId.read(from: &buf), paymentHash: try FfiConverterTypePaymentHash.read(from: &buf), claimableAmountMsat: try FfiConverterUInt64.read(from: &buf), claimDeadline: try FfiConverterOptionUInt32.read(from: &buf) ) - case 5: return .channelPending( - channelId: try FfiConverterTypeChannelId.read(from: &buf), - userChannelId: try FfiConverterTypeUserChannelId.read(from: &buf), - formerTemporaryChannelId: try FfiConverterTypeChannelId.read(from: &buf), - counterpartyNodeId: try FfiConverterTypePublicKey.read(from: &buf), - fundingTxo: try FfiConverterTypeOutPoint.read(from: &buf) + case 5: return .channelPending(channelId: try FfiConverterTypeChannelId.read(from: &buf), userChannelId: try FfiConverterTypeUserChannelId.read(from: &buf), formerTemporaryChannelId: try FfiConverterTypeChannelId.read(from: &buf), counterpartyNodeId: try FfiConverterTypePublicKey.read(from: &buf), fundingTxo: try FfiConverterTypeOutPoint.read(from: &buf) ) - case 6: return .channelReady( - channelId: try FfiConverterTypeChannelId.read(from: &buf), - userChannelId: try FfiConverterTypeUserChannelId.read(from: &buf), - counterpartyNodeId: try FfiConverterOptionTypePublicKey.read(from: &buf) + case 6: return .channelReady(channelId: try FfiConverterTypeChannelId.read(from: &buf), userChannelId: try FfiConverterTypeUserChannelId.read(from: &buf), counterpartyNodeId: try FfiConverterOptionTypePublicKey.read(from: &buf) ) - case 7: return .channelClosed( - channelId: try FfiConverterTypeChannelId.read(from: &buf), - userChannelId: try FfiConverterTypeUserChannelId.read(from: &buf), - counterpartyNodeId: try FfiConverterOptionTypePublicKey.read(from: &buf), - reason: try FfiConverterOptionTypeClosureReason.read(from: &buf) + case 7: return .channelClosed(channelId: try FfiConverterTypeChannelId.read(from: &buf), userChannelId: try FfiConverterTypeUserChannelId.read(from: &buf), counterpartyNodeId: try FfiConverterOptionTypePublicKey.read(from: &buf), reason: try FfiConverterOptionTypeClosureReason.read(from: &buf) ) default: throw UniffiInternalError.unexpectedEnumCase @@ -3655,7 +4157,7 @@ public struct FfiConverterTypeEvent: FfiConverterRustBuffer { case let .paymentFailed(paymentId,paymentHash,reason): writeInt(&buf, Int32(2)) FfiConverterOptionTypePaymentId.write(paymentId, into: &buf) - FfiConverterTypePaymentHash.write(paymentHash, into: &buf) + FfiConverterOptionTypePaymentHash.write(paymentHash, into: &buf) FfiConverterOptionTypePaymentFailureReason.write(reason, into: &buf) @@ -3711,54 +4213,31 @@ public func FfiConverterTypeEvent_lower(_ value: Event) -> RustBuffer { } + extension Event: Equatable, Hashable {} // Note that we don't yet support `indirect` for enums. // See https://github.com/mozilla/uniffi-rs/issues/396 for further discussion. + public enum LightningBalance { - case claimableOnChannelClose( - channelId: ChannelId, - counterpartyNodeId: PublicKey, - amountSatoshis: UInt64 + case claimableOnChannelClose(channelId: ChannelId, counterpartyNodeId: PublicKey, amountSatoshis: UInt64, transactionFeeSatoshis: UInt64, outboundPaymentHtlcRoundedMsat: UInt64, outboundForwardedHtlcRoundedMsat: UInt64, inboundClaimingHtlcRoundedMsat: UInt64, inboundHtlcRoundedMsat: UInt64 ) - case claimableAwaitingConfirmations( - channelId: ChannelId, - counterpartyNodeId: PublicKey, - amountSatoshis: UInt64, - confirmationHeight: UInt32 + case claimableAwaitingConfirmations(channelId: ChannelId, counterpartyNodeId: PublicKey, amountSatoshis: UInt64, confirmationHeight: UInt32, source: BalanceSource ) - case contentiousClaimable( - channelId: ChannelId, - counterpartyNodeId: PublicKey, - amountSatoshis: UInt64, - timeoutHeight: UInt32, - paymentHash: PaymentHash, - paymentPreimage: PaymentPreimage + case contentiousClaimable(channelId: ChannelId, counterpartyNodeId: PublicKey, amountSatoshis: UInt64, timeoutHeight: UInt32, paymentHash: PaymentHash, paymentPreimage: PaymentPreimage ) - case maybeTimeoutClaimableHtlc( - channelId: ChannelId, - counterpartyNodeId: PublicKey, - amountSatoshis: UInt64, - claimableHeight: UInt32, - paymentHash: PaymentHash + case maybeTimeoutClaimableHtlc(channelId: ChannelId, counterpartyNodeId: PublicKey, amountSatoshis: UInt64, claimableHeight: UInt32, paymentHash: PaymentHash, outboundPayment: Bool ) - case maybePreimageClaimableHtlc( - channelId: ChannelId, - counterpartyNodeId: PublicKey, - amountSatoshis: UInt64, - expiryHeight: UInt32, - paymentHash: PaymentHash + case maybePreimageClaimableHtlc(channelId: ChannelId, counterpartyNodeId: PublicKey, amountSatoshis: UInt64, expiryHeight: UInt32, paymentHash: PaymentHash ) - case counterpartyRevokedOutputClaimable( - channelId: ChannelId, - counterpartyNodeId: PublicKey, - amountSatoshis: UInt64 + case counterpartyRevokedOutputClaimable(channelId: ChannelId, counterpartyNodeId: PublicKey, amountSatoshis: UInt64 ) } + public struct FfiConverterTypeLightningBalance: FfiConverterRustBuffer { typealias SwiftType = LightningBalance @@ -3766,48 +4245,22 @@ public struct FfiConverterTypeLightningBalance: FfiConverterRustBuffer { let variant: Int32 = try readInt(&buf) switch variant { - case 1: return .claimableOnChannelClose( - channelId: try FfiConverterTypeChannelId.read(from: &buf), - counterpartyNodeId: try FfiConverterTypePublicKey.read(from: &buf), - amountSatoshis: try FfiConverterUInt64.read(from: &buf) + case 1: return .claimableOnChannelClose(channelId: try FfiConverterTypeChannelId.read(from: &buf), counterpartyNodeId: try FfiConverterTypePublicKey.read(from: &buf), amountSatoshis: try FfiConverterUInt64.read(from: &buf), transactionFeeSatoshis: try FfiConverterUInt64.read(from: &buf), outboundPaymentHtlcRoundedMsat: try FfiConverterUInt64.read(from: &buf), outboundForwardedHtlcRoundedMsat: try FfiConverterUInt64.read(from: &buf), inboundClaimingHtlcRoundedMsat: try FfiConverterUInt64.read(from: &buf), inboundHtlcRoundedMsat: try FfiConverterUInt64.read(from: &buf) ) - case 2: return .claimableAwaitingConfirmations( - channelId: try FfiConverterTypeChannelId.read(from: &buf), - counterpartyNodeId: try FfiConverterTypePublicKey.read(from: &buf), - amountSatoshis: try FfiConverterUInt64.read(from: &buf), - confirmationHeight: try FfiConverterUInt32.read(from: &buf) + case 2: return .claimableAwaitingConfirmations(channelId: try FfiConverterTypeChannelId.read(from: &buf), counterpartyNodeId: try FfiConverterTypePublicKey.read(from: &buf), amountSatoshis: try FfiConverterUInt64.read(from: &buf), confirmationHeight: try FfiConverterUInt32.read(from: &buf), source: try FfiConverterTypeBalanceSource.read(from: &buf) ) - case 3: return .contentiousClaimable( - channelId: try FfiConverterTypeChannelId.read(from: &buf), - counterpartyNodeId: try FfiConverterTypePublicKey.read(from: &buf), - amountSatoshis: try FfiConverterUInt64.read(from: &buf), - timeoutHeight: try FfiConverterUInt32.read(from: &buf), - paymentHash: try FfiConverterTypePaymentHash.read(from: &buf), - paymentPreimage: try FfiConverterTypePaymentPreimage.read(from: &buf) + case 3: return .contentiousClaimable(channelId: try FfiConverterTypeChannelId.read(from: &buf), counterpartyNodeId: try FfiConverterTypePublicKey.read(from: &buf), amountSatoshis: try FfiConverterUInt64.read(from: &buf), timeoutHeight: try FfiConverterUInt32.read(from: &buf), paymentHash: try FfiConverterTypePaymentHash.read(from: &buf), paymentPreimage: try FfiConverterTypePaymentPreimage.read(from: &buf) ) - case 4: return .maybeTimeoutClaimableHtlc( - channelId: try FfiConverterTypeChannelId.read(from: &buf), - counterpartyNodeId: try FfiConverterTypePublicKey.read(from: &buf), - amountSatoshis: try FfiConverterUInt64.read(from: &buf), - claimableHeight: try FfiConverterUInt32.read(from: &buf), - paymentHash: try FfiConverterTypePaymentHash.read(from: &buf) + case 4: return .maybeTimeoutClaimableHtlc(channelId: try FfiConverterTypeChannelId.read(from: &buf), counterpartyNodeId: try FfiConverterTypePublicKey.read(from: &buf), amountSatoshis: try FfiConverterUInt64.read(from: &buf), claimableHeight: try FfiConverterUInt32.read(from: &buf), paymentHash: try FfiConverterTypePaymentHash.read(from: &buf), outboundPayment: try FfiConverterBool.read(from: &buf) ) - case 5: return .maybePreimageClaimableHtlc( - channelId: try FfiConverterTypeChannelId.read(from: &buf), - counterpartyNodeId: try FfiConverterTypePublicKey.read(from: &buf), - amountSatoshis: try FfiConverterUInt64.read(from: &buf), - expiryHeight: try FfiConverterUInt32.read(from: &buf), - paymentHash: try FfiConverterTypePaymentHash.read(from: &buf) + case 5: return .maybePreimageClaimableHtlc(channelId: try FfiConverterTypeChannelId.read(from: &buf), counterpartyNodeId: try FfiConverterTypePublicKey.read(from: &buf), amountSatoshis: try FfiConverterUInt64.read(from: &buf), expiryHeight: try FfiConverterUInt32.read(from: &buf), paymentHash: try FfiConverterTypePaymentHash.read(from: &buf) ) - case 6: return .counterpartyRevokedOutputClaimable( - channelId: try FfiConverterTypeChannelId.read(from: &buf), - counterpartyNodeId: try FfiConverterTypePublicKey.read(from: &buf), - amountSatoshis: try FfiConverterUInt64.read(from: &buf) + case 6: return .counterpartyRevokedOutputClaimable(channelId: try FfiConverterTypeChannelId.read(from: &buf), counterpartyNodeId: try FfiConverterTypePublicKey.read(from: &buf), amountSatoshis: try FfiConverterUInt64.read(from: &buf) ) default: throw UniffiInternalError.unexpectedEnumCase @@ -3818,19 +4271,25 @@ public struct FfiConverterTypeLightningBalance: FfiConverterRustBuffer { switch value { - case let .claimableOnChannelClose(channelId,counterpartyNodeId,amountSatoshis): + case let .claimableOnChannelClose(channelId,counterpartyNodeId,amountSatoshis,transactionFeeSatoshis,outboundPaymentHtlcRoundedMsat,outboundForwardedHtlcRoundedMsat,inboundClaimingHtlcRoundedMsat,inboundHtlcRoundedMsat): writeInt(&buf, Int32(1)) FfiConverterTypeChannelId.write(channelId, into: &buf) FfiConverterTypePublicKey.write(counterpartyNodeId, into: &buf) FfiConverterUInt64.write(amountSatoshis, into: &buf) + FfiConverterUInt64.write(transactionFeeSatoshis, into: &buf) + FfiConverterUInt64.write(outboundPaymentHtlcRoundedMsat, into: &buf) + FfiConverterUInt64.write(outboundForwardedHtlcRoundedMsat, into: &buf) + FfiConverterUInt64.write(inboundClaimingHtlcRoundedMsat, into: &buf) + FfiConverterUInt64.write(inboundHtlcRoundedMsat, into: &buf) - case let .claimableAwaitingConfirmations(channelId,counterpartyNodeId,amountSatoshis,confirmationHeight): + case let .claimableAwaitingConfirmations(channelId,counterpartyNodeId,amountSatoshis,confirmationHeight,source): writeInt(&buf, Int32(2)) FfiConverterTypeChannelId.write(channelId, into: &buf) FfiConverterTypePublicKey.write(counterpartyNodeId, into: &buf) FfiConverterUInt64.write(amountSatoshis, into: &buf) FfiConverterUInt32.write(confirmationHeight, into: &buf) + FfiConverterTypeBalanceSource.write(source, into: &buf) case let .contentiousClaimable(channelId,counterpartyNodeId,amountSatoshis,timeoutHeight,paymentHash,paymentPreimage): @@ -3843,130 +4302,255 @@ public struct FfiConverterTypeLightningBalance: FfiConverterRustBuffer { FfiConverterTypePaymentPreimage.write(paymentPreimage, into: &buf) - case let .maybeTimeoutClaimableHtlc(channelId,counterpartyNodeId,amountSatoshis,claimableHeight,paymentHash): + case let .maybeTimeoutClaimableHtlc(channelId,counterpartyNodeId,amountSatoshis,claimableHeight,paymentHash,outboundPayment): writeInt(&buf, Int32(4)) FfiConverterTypeChannelId.write(channelId, into: &buf) FfiConverterTypePublicKey.write(counterpartyNodeId, into: &buf) FfiConverterUInt64.write(amountSatoshis, into: &buf) FfiConverterUInt32.write(claimableHeight, into: &buf) FfiConverterTypePaymentHash.write(paymentHash, into: &buf) + FfiConverterBool.write(outboundPayment, into: &buf) + + + case let .maybePreimageClaimableHtlc(channelId,counterpartyNodeId,amountSatoshis,expiryHeight,paymentHash): + writeInt(&buf, Int32(5)) + FfiConverterTypeChannelId.write(channelId, into: &buf) + FfiConverterTypePublicKey.write(counterpartyNodeId, into: &buf) + FfiConverterUInt64.write(amountSatoshis, into: &buf) + FfiConverterUInt32.write(expiryHeight, into: &buf) + FfiConverterTypePaymentHash.write(paymentHash, into: &buf) + + + case let .counterpartyRevokedOutputClaimable(channelId,counterpartyNodeId,amountSatoshis): + writeInt(&buf, Int32(6)) + FfiConverterTypeChannelId.write(channelId, into: &buf) + FfiConverterTypePublicKey.write(counterpartyNodeId, into: &buf) + FfiConverterUInt64.write(amountSatoshis, into: &buf) + } + } +} + + +public func FfiConverterTypeLightningBalance_lift(_ buf: RustBuffer) throws -> LightningBalance { + return try FfiConverterTypeLightningBalance.lift(buf) +} + +public func FfiConverterTypeLightningBalance_lower(_ value: LightningBalance) -> RustBuffer { + return FfiConverterTypeLightningBalance.lower(value) +} + + + +extension LightningBalance: Equatable, Hashable {} + + + +// Note that we don't yet support `indirect` for enums. +// See https://github.com/mozilla/uniffi-rs/issues/396 for further discussion. + +public enum LogLevel { + + case gossip + case trace + case debug + case info + case warn + case error +} + + +public struct FfiConverterTypeLogLevel: FfiConverterRustBuffer { + typealias SwiftType = LogLevel + + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> LogLevel { + let variant: Int32 = try readInt(&buf) + switch variant { + + case 1: return .gossip + + case 2: return .trace + + case 3: return .debug + + case 4: return .info + + case 5: return .warn + + case 6: return .error + + default: throw UniffiInternalError.unexpectedEnumCase + } + } + + public static func write(_ value: LogLevel, into buf: inout [UInt8]) { + switch value { + + + case .gossip: + writeInt(&buf, Int32(1)) + + + case .trace: + writeInt(&buf, Int32(2)) + + + case .debug: + writeInt(&buf, Int32(3)) + + + case .info: + writeInt(&buf, Int32(4)) + + + case .warn: + writeInt(&buf, Int32(5)) + + + case .error: + writeInt(&buf, Int32(6)) + + } + } +} + + +public func FfiConverterTypeLogLevel_lift(_ buf: RustBuffer) throws -> LogLevel { + return try FfiConverterTypeLogLevel.lift(buf) +} + +public func FfiConverterTypeLogLevel_lower(_ value: LogLevel) -> RustBuffer { + return FfiConverterTypeLogLevel.lower(value) +} + + + +extension LogLevel: Equatable, Hashable {} + + + +// Note that we don't yet support `indirect` for enums. +// See https://github.com/mozilla/uniffi-rs/issues/396 for further discussion. + +public enum MaxDustHtlcExposure { + + case fixedLimit(limitMsat: UInt64 + ) + case feeRateMultiplier(multiplier: UInt64 + ) +} + + +public struct FfiConverterTypeMaxDustHTLCExposure: FfiConverterRustBuffer { + typealias SwiftType = MaxDustHtlcExposure + + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> MaxDustHtlcExposure { + let variant: Int32 = try readInt(&buf) + switch variant { + + case 1: return .fixedLimit(limitMsat: try FfiConverterUInt64.read(from: &buf) + ) + + case 2: return .feeRateMultiplier(multiplier: try FfiConverterUInt64.read(from: &buf) + ) + + default: throw UniffiInternalError.unexpectedEnumCase + } + } + + public static func write(_ value: MaxDustHtlcExposure, into buf: inout [UInt8]) { + switch value { - case let .maybePreimageClaimableHtlc(channelId,counterpartyNodeId,amountSatoshis,expiryHeight,paymentHash): - writeInt(&buf, Int32(5)) - FfiConverterTypeChannelId.write(channelId, into: &buf) - FfiConverterTypePublicKey.write(counterpartyNodeId, into: &buf) - FfiConverterUInt64.write(amountSatoshis, into: &buf) - FfiConverterUInt32.write(expiryHeight, into: &buf) - FfiConverterTypePaymentHash.write(paymentHash, into: &buf) + + case let .fixedLimit(limitMsat): + writeInt(&buf, Int32(1)) + FfiConverterUInt64.write(limitMsat, into: &buf) - case let .counterpartyRevokedOutputClaimable(channelId,counterpartyNodeId,amountSatoshis): - writeInt(&buf, Int32(6)) - FfiConverterTypeChannelId.write(channelId, into: &buf) - FfiConverterTypePublicKey.write(counterpartyNodeId, into: &buf) - FfiConverterUInt64.write(amountSatoshis, into: &buf) + case let .feeRateMultiplier(multiplier): + writeInt(&buf, Int32(2)) + FfiConverterUInt64.write(multiplier, into: &buf) } } } -public func FfiConverterTypeLightningBalance_lift(_ buf: RustBuffer) throws -> LightningBalance { - return try FfiConverterTypeLightningBalance.lift(buf) +public func FfiConverterTypeMaxDustHTLCExposure_lift(_ buf: RustBuffer) throws -> MaxDustHtlcExposure { + return try FfiConverterTypeMaxDustHTLCExposure.lift(buf) } -public func FfiConverterTypeLightningBalance_lower(_ value: LightningBalance) -> RustBuffer { - return FfiConverterTypeLightningBalance.lower(value) +public func FfiConverterTypeMaxDustHTLCExposure_lower(_ value: MaxDustHtlcExposure) -> RustBuffer { + return FfiConverterTypeMaxDustHTLCExposure.lower(value) } -extension LightningBalance: Equatable, Hashable {} + +extension MaxDustHtlcExposure: Equatable, Hashable {} // Note that we don't yet support `indirect` for enums. // See https://github.com/mozilla/uniffi-rs/issues/396 for further discussion. -public enum LogLevel { + +public enum MaxTotalRoutingFeeLimit { - case gossip - case trace - case debug - case info - case warn - case error + case none + case some(amountMsat: UInt64 + ) } -public struct FfiConverterTypeLogLevel: FfiConverterRustBuffer { - typealias SwiftType = LogLevel - public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> LogLevel { +public struct FfiConverterTypeMaxTotalRoutingFeeLimit: FfiConverterRustBuffer { + typealias SwiftType = MaxTotalRoutingFeeLimit + + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> MaxTotalRoutingFeeLimit { let variant: Int32 = try readInt(&buf) switch variant { - case 1: return .gossip - - case 2: return .trace - - case 3: return .debug - - case 4: return .info - - case 5: return .warn + case 1: return .none - case 6: return .error + case 2: return .some(amountMsat: try FfiConverterUInt64.read(from: &buf) + ) default: throw UniffiInternalError.unexpectedEnumCase } } - public static func write(_ value: LogLevel, into buf: inout [UInt8]) { + public static func write(_ value: MaxTotalRoutingFeeLimit, into buf: inout [UInt8]) { switch value { - case .gossip: + case .none: writeInt(&buf, Int32(1)) - case .trace: + case let .some(amountMsat): writeInt(&buf, Int32(2)) - - - case .debug: - writeInt(&buf, Int32(3)) - - - case .info: - writeInt(&buf, Int32(4)) - - - case .warn: - writeInt(&buf, Int32(5)) - - - case .error: - writeInt(&buf, Int32(6)) - + FfiConverterUInt64.write(amountMsat, into: &buf) + } } } -public func FfiConverterTypeLogLevel_lift(_ buf: RustBuffer) throws -> LogLevel { - return try FfiConverterTypeLogLevel.lift(buf) +public func FfiConverterTypeMaxTotalRoutingFeeLimit_lift(_ buf: RustBuffer) throws -> MaxTotalRoutingFeeLimit { + return try FfiConverterTypeMaxTotalRoutingFeeLimit.lift(buf) } -public func FfiConverterTypeLogLevel_lower(_ value: LogLevel) -> RustBuffer { - return FfiConverterTypeLogLevel.lower(value) +public func FfiConverterTypeMaxTotalRoutingFeeLimit_lower(_ value: MaxTotalRoutingFeeLimit) -> RustBuffer { + return FfiConverterTypeMaxTotalRoutingFeeLimit.lower(value) } -extension LogLevel: Equatable, Hashable {} + +extension MaxTotalRoutingFeeLimit: Equatable, Hashable {} // Note that we don't yet support `indirect` for enums. // See https://github.com/mozilla/uniffi-rs/issues/396 for further discussion. + public enum Network { case bitcoin @@ -3975,6 +4559,7 @@ public enum Network { case regtest } + public struct FfiConverterTypeNetwork: FfiConverterRustBuffer { typealias SwiftType = Network @@ -4027,6 +4612,7 @@ public func FfiConverterTypeNetwork_lower(_ value: Network) -> RustBuffer { } + extension Network: Equatable, Hashable {} @@ -4074,8 +4660,6 @@ public enum NodeError { case OnchainTxSigningFailed(message: String) - case MessageSigningFailed(message: String) - case TxSyncFailed(message: String) case TxSyncTimeout(message: String) @@ -4086,6 +4670,8 @@ public enum NodeError { case LiquidityRequestFailed(message: String) + case UriParameterParsingFailed(message: String) + case InvalidAddress(message: String) case InvalidSocketAddress(message: String) @@ -4118,6 +4704,12 @@ public enum NodeError { case InvalidNetwork(message: String) + case InvalidUri(message: String) + + case InvalidQuantity(message: String) + + case InvalidNodeAlias(message: String) + case DuplicatePayment(message: String) case UnsupportedCurrency(message: String) @@ -4128,10 +4720,6 @@ public enum NodeError { case LiquidityFeeTooHigh(message: String) - - fileprivate static func uniffiErrorHandler(_ error: RustBuffer) throws -> Error { - return try FfiConverterTypeNodeError.lift(error) - } } @@ -4221,27 +4809,27 @@ public struct FfiConverterTypeNodeError: FfiConverterRustBuffer { message: try FfiConverterString.read(from: &buf) ) - case 20: return .MessageSigningFailed( + case 20: return .TxSyncFailed( message: try FfiConverterString.read(from: &buf) ) - case 21: return .TxSyncFailed( + case 21: return .TxSyncTimeout( message: try FfiConverterString.read(from: &buf) ) - case 22: return .TxSyncTimeout( + case 22: return .GossipUpdateFailed( message: try FfiConverterString.read(from: &buf) ) - case 23: return .GossipUpdateFailed( + case 23: return .GossipUpdateTimeout( message: try FfiConverterString.read(from: &buf) ) - case 24: return .GossipUpdateTimeout( + case 24: return .LiquidityRequestFailed( message: try FfiConverterString.read(from: &buf) ) - case 25: return .LiquidityRequestFailed( + case 25: return .UriParameterParsingFailed( message: try FfiConverterString.read(from: &buf) ) @@ -4309,23 +4897,35 @@ public struct FfiConverterTypeNodeError: FfiConverterRustBuffer { message: try FfiConverterString.read(from: &buf) ) - case 42: return .DuplicatePayment( + case 42: return .InvalidUri( + message: try FfiConverterString.read(from: &buf) + ) + + case 43: return .InvalidQuantity( + message: try FfiConverterString.read(from: &buf) + ) + + case 44: return .InvalidNodeAlias( + message: try FfiConverterString.read(from: &buf) + ) + + case 45: return .DuplicatePayment( message: try FfiConverterString.read(from: &buf) ) - case 43: return .UnsupportedCurrency( + case 46: return .UnsupportedCurrency( message: try FfiConverterString.read(from: &buf) ) - case 44: return .InsufficientFunds( + case 47: return .InsufficientFunds( message: try FfiConverterString.read(from: &buf) ) - case 45: return .LiquiditySourceUnavailable( + case 48: return .LiquiditySourceUnavailable( message: try FfiConverterString.read(from: &buf) ) - case 46: return .LiquidityFeeTooHigh( + case 49: return .LiquidityFeeTooHigh( message: try FfiConverterString.read(from: &buf) ) @@ -4378,17 +4978,17 @@ public struct FfiConverterTypeNodeError: FfiConverterRustBuffer { writeInt(&buf, Int32(18)) case .OnchainTxSigningFailed(_ /* message is ignored*/): writeInt(&buf, Int32(19)) - case .MessageSigningFailed(_ /* message is ignored*/): - writeInt(&buf, Int32(20)) case .TxSyncFailed(_ /* message is ignored*/): - writeInt(&buf, Int32(21)) + writeInt(&buf, Int32(20)) case .TxSyncTimeout(_ /* message is ignored*/): - writeInt(&buf, Int32(22)) + writeInt(&buf, Int32(21)) case .GossipUpdateFailed(_ /* message is ignored*/): - writeInt(&buf, Int32(23)) + writeInt(&buf, Int32(22)) case .GossipUpdateTimeout(_ /* message is ignored*/): - writeInt(&buf, Int32(24)) + writeInt(&buf, Int32(23)) case .LiquidityRequestFailed(_ /* message is ignored*/): + writeInt(&buf, Int32(24)) + case .UriParameterParsingFailed(_ /* message is ignored*/): writeInt(&buf, Int32(25)) case .InvalidAddress(_ /* message is ignored*/): writeInt(&buf, Int32(26)) @@ -4422,16 +5022,22 @@ public struct FfiConverterTypeNodeError: FfiConverterRustBuffer { writeInt(&buf, Int32(40)) case .InvalidNetwork(_ /* message is ignored*/): writeInt(&buf, Int32(41)) - case .DuplicatePayment(_ /* message is ignored*/): + case .InvalidUri(_ /* message is ignored*/): writeInt(&buf, Int32(42)) - case .UnsupportedCurrency(_ /* message is ignored*/): + case .InvalidQuantity(_ /* message is ignored*/): writeInt(&buf, Int32(43)) - case .InsufficientFunds(_ /* message is ignored*/): + case .InvalidNodeAlias(_ /* message is ignored*/): writeInt(&buf, Int32(44)) - case .LiquiditySourceUnavailable(_ /* message is ignored*/): + case .DuplicatePayment(_ /* message is ignored*/): writeInt(&buf, Int32(45)) - case .LiquidityFeeTooHigh(_ /* message is ignored*/): + case .UnsupportedCurrency(_ /* message is ignored*/): writeInt(&buf, Int32(46)) + case .InsufficientFunds(_ /* message is ignored*/): + writeInt(&buf, Int32(47)) + case .LiquiditySourceUnavailable(_ /* message is ignored*/): + writeInt(&buf, Int32(48)) + case .LiquidityFeeTooHigh(_ /* message is ignored*/): + writeInt(&buf, Int32(49)) } @@ -4445,12 +5051,14 @@ extension NodeError: Error { } // Note that we don't yet support `indirect` for enums. // See https://github.com/mozilla/uniffi-rs/issues/396 for further discussion. + public enum PaymentDirection { case inbound case outbound } + public struct FfiConverterTypePaymentDirection: FfiConverterRustBuffer { typealias SwiftType = PaymentDirection @@ -4491,12 +5099,14 @@ public func FfiConverterTypePaymentDirection_lower(_ value: PaymentDirection) -> } + extension PaymentDirection: Equatable, Hashable {} // Note that we don't yet support `indirect` for enums. // See https://github.com/mozilla/uniffi-rs/issues/396 for further discussion. + public enum PaymentFailureReason { case recipientRejected @@ -4505,8 +5115,12 @@ public enum PaymentFailureReason { case paymentExpired case routeNotFound case unexpectedError + case unknownRequiredFeatures + case invoiceRequestExpired + case invoiceRequestRejected } + public struct FfiConverterTypePaymentFailureReason: FfiConverterRustBuffer { typealias SwiftType = PaymentFailureReason @@ -4526,6 +5140,12 @@ public struct FfiConverterTypePaymentFailureReason: FfiConverterRustBuffer { case 6: return .unexpectedError + case 7: return .unknownRequiredFeatures + + case 8: return .invoiceRequestExpired + + case 9: return .invoiceRequestRejected + default: throw UniffiInternalError.unexpectedEnumCase } } @@ -4557,6 +5177,18 @@ public struct FfiConverterTypePaymentFailureReason: FfiConverterRustBuffer { case .unexpectedError: writeInt(&buf, Int32(6)) + + case .unknownRequiredFeatures: + writeInt(&buf, Int32(7)) + + + case .invoiceRequestExpired: + writeInt(&buf, Int32(8)) + + + case .invoiceRequestRejected: + writeInt(&buf, Int32(9)) + } } } @@ -4571,43 +5203,30 @@ public func FfiConverterTypePaymentFailureReason_lower(_ value: PaymentFailureRe } + extension PaymentFailureReason: Equatable, Hashable {} // Note that we don't yet support `indirect` for enums. // See https://github.com/mozilla/uniffi-rs/issues/396 for further discussion. + public enum PaymentKind { case onchain - case bolt11( - hash: PaymentHash, - preimage: PaymentPreimage?, - secret: PaymentSecret? + case bolt11(hash: PaymentHash, preimage: PaymentPreimage?, secret: PaymentSecret? ) - case bolt11Jit( - hash: PaymentHash, - preimage: PaymentPreimage?, - secret: PaymentSecret?, - lspFeeLimits: LspFeeLimits + case bolt11Jit(hash: PaymentHash, preimage: PaymentPreimage?, secret: PaymentSecret?, lspFeeLimits: LspFeeLimits ) - case bolt12Offer( - hash: PaymentHash?, - preimage: PaymentPreimage?, - secret: PaymentSecret?, - offerId: OfferId + case bolt12Offer(hash: PaymentHash?, preimage: PaymentPreimage?, secret: PaymentSecret?, offerId: OfferId, payerNote: UntrustedString?, quantity: UInt64? ) - case bolt12Refund( - hash: PaymentHash?, - preimage: PaymentPreimage?, - secret: PaymentSecret? + case bolt12Refund(hash: PaymentHash?, preimage: PaymentPreimage?, secret: PaymentSecret?, payerNote: UntrustedString?, quantity: UInt64? ) - case spontaneous( - hash: PaymentHash, - preimage: PaymentPreimage? + case spontaneous(hash: PaymentHash, preimage: PaymentPreimage? ) } + public struct FfiConverterTypePaymentKind: FfiConverterRustBuffer { typealias SwiftType = PaymentKind @@ -4617,35 +5236,19 @@ public struct FfiConverterTypePaymentKind: FfiConverterRustBuffer { case 1: return .onchain - case 2: return .bolt11( - hash: try FfiConverterTypePaymentHash.read(from: &buf), - preimage: try FfiConverterOptionTypePaymentPreimage.read(from: &buf), - secret: try FfiConverterOptionTypePaymentSecret.read(from: &buf) + case 2: return .bolt11(hash: try FfiConverterTypePaymentHash.read(from: &buf), preimage: try FfiConverterOptionTypePaymentPreimage.read(from: &buf), secret: try FfiConverterOptionTypePaymentSecret.read(from: &buf) ) - case 3: return .bolt11Jit( - hash: try FfiConverterTypePaymentHash.read(from: &buf), - preimage: try FfiConverterOptionTypePaymentPreimage.read(from: &buf), - secret: try FfiConverterOptionTypePaymentSecret.read(from: &buf), - lspFeeLimits: try FfiConverterTypeLSPFeeLimits.read(from: &buf) + case 3: return .bolt11Jit(hash: try FfiConverterTypePaymentHash.read(from: &buf), preimage: try FfiConverterOptionTypePaymentPreimage.read(from: &buf), secret: try FfiConverterOptionTypePaymentSecret.read(from: &buf), lspFeeLimits: try FfiConverterTypeLSPFeeLimits.read(from: &buf) ) - case 4: return .bolt12Offer( - hash: try FfiConverterOptionTypePaymentHash.read(from: &buf), - preimage: try FfiConverterOptionTypePaymentPreimage.read(from: &buf), - secret: try FfiConverterOptionTypePaymentSecret.read(from: &buf), - offerId: try FfiConverterTypeOfferId.read(from: &buf) + case 4: return .bolt12Offer(hash: try FfiConverterOptionTypePaymentHash.read(from: &buf), preimage: try FfiConverterOptionTypePaymentPreimage.read(from: &buf), secret: try FfiConverterOptionTypePaymentSecret.read(from: &buf), offerId: try FfiConverterTypeOfferId.read(from: &buf), payerNote: try FfiConverterOptionTypeUntrustedString.read(from: &buf), quantity: try FfiConverterOptionUInt64.read(from: &buf) ) - case 5: return .bolt12Refund( - hash: try FfiConverterOptionTypePaymentHash.read(from: &buf), - preimage: try FfiConverterOptionTypePaymentPreimage.read(from: &buf), - secret: try FfiConverterOptionTypePaymentSecret.read(from: &buf) + case 5: return .bolt12Refund(hash: try FfiConverterOptionTypePaymentHash.read(from: &buf), preimage: try FfiConverterOptionTypePaymentPreimage.read(from: &buf), secret: try FfiConverterOptionTypePaymentSecret.read(from: &buf), payerNote: try FfiConverterOptionTypeUntrustedString.read(from: &buf), quantity: try FfiConverterOptionUInt64.read(from: &buf) ) - case 6: return .spontaneous( - hash: try FfiConverterTypePaymentHash.read(from: &buf), - preimage: try FfiConverterOptionTypePaymentPreimage.read(from: &buf) + case 6: return .spontaneous(hash: try FfiConverterTypePaymentHash.read(from: &buf), preimage: try FfiConverterOptionTypePaymentPreimage.read(from: &buf) ) default: throw UniffiInternalError.unexpectedEnumCase @@ -4675,19 +5278,23 @@ public struct FfiConverterTypePaymentKind: FfiConverterRustBuffer { FfiConverterTypeLSPFeeLimits.write(lspFeeLimits, into: &buf) - case let .bolt12Offer(hash,preimage,secret,offerId): + case let .bolt12Offer(hash,preimage,secret,offerId,payerNote,quantity): writeInt(&buf, Int32(4)) FfiConverterOptionTypePaymentHash.write(hash, into: &buf) FfiConverterOptionTypePaymentPreimage.write(preimage, into: &buf) FfiConverterOptionTypePaymentSecret.write(secret, into: &buf) FfiConverterTypeOfferId.write(offerId, into: &buf) + FfiConverterOptionTypeUntrustedString.write(payerNote, into: &buf) + FfiConverterOptionUInt64.write(quantity, into: &buf) - case let .bolt12Refund(hash,preimage,secret): + case let .bolt12Refund(hash,preimage,secret,payerNote,quantity): writeInt(&buf, Int32(5)) FfiConverterOptionTypePaymentHash.write(hash, into: &buf) FfiConverterOptionTypePaymentPreimage.write(preimage, into: &buf) FfiConverterOptionTypePaymentSecret.write(secret, into: &buf) + FfiConverterOptionTypeUntrustedString.write(payerNote, into: &buf) + FfiConverterOptionUInt64.write(quantity, into: &buf) case let .spontaneous(hash,preimage): @@ -4709,12 +5316,14 @@ public func FfiConverterTypePaymentKind_lower(_ value: PaymentKind) -> RustBuffe } + extension PaymentKind: Equatable, Hashable {} // Note that we don't yet support `indirect` for enums. // See https://github.com/mozilla/uniffi-rs/issues/396 for further discussion. + public enum PaymentStatus { case pending @@ -4722,6 +5331,7 @@ public enum PaymentStatus { case failed } + public struct FfiConverterTypePaymentStatus: FfiConverterRustBuffer { typealias SwiftType = PaymentStatus @@ -4768,33 +5378,25 @@ public func FfiConverterTypePaymentStatus_lower(_ value: PaymentStatus) -> RustB } + extension PaymentStatus: Equatable, Hashable {} // Note that we don't yet support `indirect` for enums. // See https://github.com/mozilla/uniffi-rs/issues/396 for further discussion. + public enum PendingSweepBalance { - case pendingBroadcast( - channelId: ChannelId?, - amountSatoshis: UInt64 + case pendingBroadcast(channelId: ChannelId?, amountSatoshis: UInt64 ) - case broadcastAwaitingConfirmation( - channelId: ChannelId?, - latestBroadcastHeight: UInt32, - latestSpendingTxid: Txid, - amountSatoshis: UInt64 + case broadcastAwaitingConfirmation(channelId: ChannelId?, latestBroadcastHeight: UInt32, latestSpendingTxid: Txid, amountSatoshis: UInt64 ) - case awaitingThresholdConfirmations( - channelId: ChannelId?, - latestSpendingTxid: Txid, - confirmationHash: BlockHash, - confirmationHeight: UInt32, - amountSatoshis: UInt64 + case awaitingThresholdConfirmations(channelId: ChannelId?, latestSpendingTxid: Txid, confirmationHash: BlockHash, confirmationHeight: UInt32, amountSatoshis: UInt64 ) } + public struct FfiConverterTypePendingSweepBalance: FfiConverterRustBuffer { typealias SwiftType = PendingSweepBalance @@ -4802,24 +5404,13 @@ public struct FfiConverterTypePendingSweepBalance: FfiConverterRustBuffer { let variant: Int32 = try readInt(&buf) switch variant { - case 1: return .pendingBroadcast( - channelId: try FfiConverterOptionTypeChannelId.read(from: &buf), - amountSatoshis: try FfiConverterUInt64.read(from: &buf) + case 1: return .pendingBroadcast(channelId: try FfiConverterOptionTypeChannelId.read(from: &buf), amountSatoshis: try FfiConverterUInt64.read(from: &buf) ) - case 2: return .broadcastAwaitingConfirmation( - channelId: try FfiConverterOptionTypeChannelId.read(from: &buf), - latestBroadcastHeight: try FfiConverterUInt32.read(from: &buf), - latestSpendingTxid: try FfiConverterTypeTxid.read(from: &buf), - amountSatoshis: try FfiConverterUInt64.read(from: &buf) + case 2: return .broadcastAwaitingConfirmation(channelId: try FfiConverterOptionTypeChannelId.read(from: &buf), latestBroadcastHeight: try FfiConverterUInt32.read(from: &buf), latestSpendingTxid: try FfiConverterTypeTxid.read(from: &buf), amountSatoshis: try FfiConverterUInt64.read(from: &buf) ) - case 3: return .awaitingThresholdConfirmations( - channelId: try FfiConverterOptionTypeChannelId.read(from: &buf), - latestSpendingTxid: try FfiConverterTypeTxid.read(from: &buf), - confirmationHash: try FfiConverterTypeBlockHash.read(from: &buf), - confirmationHeight: try FfiConverterUInt32.read(from: &buf), - amountSatoshis: try FfiConverterUInt64.read(from: &buf) + case 3: return .awaitingThresholdConfirmations(channelId: try FfiConverterOptionTypeChannelId.read(from: &buf), latestSpendingTxid: try FfiConverterTypeTxid.read(from: &buf), confirmationHash: try FfiConverterTypeBlockHash.read(from: &buf), confirmationHeight: try FfiConverterUInt32.read(from: &buf), amountSatoshis: try FfiConverterUInt64.read(from: &buf) ) default: throw UniffiInternalError.unexpectedEnumCase @@ -4844,31 +5435,196 @@ public struct FfiConverterTypePendingSweepBalance: FfiConverterRustBuffer { FfiConverterUInt64.write(amountSatoshis, into: &buf) - case let .awaitingThresholdConfirmations(channelId,latestSpendingTxid,confirmationHash,confirmationHeight,amountSatoshis): + case let .awaitingThresholdConfirmations(channelId,latestSpendingTxid,confirmationHash,confirmationHeight,amountSatoshis): + writeInt(&buf, Int32(3)) + FfiConverterOptionTypeChannelId.write(channelId, into: &buf) + FfiConverterTypeTxid.write(latestSpendingTxid, into: &buf) + FfiConverterTypeBlockHash.write(confirmationHash, into: &buf) + FfiConverterUInt32.write(confirmationHeight, into: &buf) + FfiConverterUInt64.write(amountSatoshis, into: &buf) + + } + } +} + + +public func FfiConverterTypePendingSweepBalance_lift(_ buf: RustBuffer) throws -> PendingSweepBalance { + return try FfiConverterTypePendingSweepBalance.lift(buf) +} + +public func FfiConverterTypePendingSweepBalance_lower(_ value: PendingSweepBalance) -> RustBuffer { + return FfiConverterTypePendingSweepBalance.lower(value) +} + + + +extension PendingSweepBalance: Equatable, Hashable {} + + + +// Note that we don't yet support `indirect` for enums. +// See https://github.com/mozilla/uniffi-rs/issues/396 for further discussion. + +public enum QrPaymentResult { + + case onchain(txid: Txid + ) + case bolt11(paymentId: PaymentId + ) + case bolt12(paymentId: PaymentId + ) +} + + +public struct FfiConverterTypeQrPaymentResult: FfiConverterRustBuffer { + typealias SwiftType = QrPaymentResult + + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> QrPaymentResult { + let variant: Int32 = try readInt(&buf) + switch variant { + + case 1: return .onchain(txid: try FfiConverterTypeTxid.read(from: &buf) + ) + + case 2: return .bolt11(paymentId: try FfiConverterTypePaymentId.read(from: &buf) + ) + + case 3: return .bolt12(paymentId: try FfiConverterTypePaymentId.read(from: &buf) + ) + + default: throw UniffiInternalError.unexpectedEnumCase + } + } + + public static func write(_ value: QrPaymentResult, into buf: inout [UInt8]) { + switch value { + + + case let .onchain(txid): + writeInt(&buf, Int32(1)) + FfiConverterTypeTxid.write(txid, into: &buf) + + + case let .bolt11(paymentId): + writeInt(&buf, Int32(2)) + FfiConverterTypePaymentId.write(paymentId, into: &buf) + + + case let .bolt12(paymentId): + writeInt(&buf, Int32(3)) + FfiConverterTypePaymentId.write(paymentId, into: &buf) + + } + } +} + + +public func FfiConverterTypeQrPaymentResult_lift(_ buf: RustBuffer) throws -> QrPaymentResult { + return try FfiConverterTypeQrPaymentResult.lift(buf) +} + +public func FfiConverterTypeQrPaymentResult_lower(_ value: QrPaymentResult) -> RustBuffer { + return FfiConverterTypeQrPaymentResult.lower(value) +} + + + +extension QrPaymentResult: Equatable, Hashable {} + + + + +public enum VssHeaderProviderError { + + + + case InvalidData(message: String) + + case RequestError(message: String) + + case AuthorizationError(message: String) + + case InternalError(message: String) + +} + + +public struct FfiConverterTypeVssHeaderProviderError: FfiConverterRustBuffer { + typealias SwiftType = VssHeaderProviderError + + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> VssHeaderProviderError { + let variant: Int32 = try readInt(&buf) + switch variant { + + + + + case 1: return .InvalidData( + message: try FfiConverterString.read(from: &buf) + ) + + case 2: return .RequestError( + message: try FfiConverterString.read(from: &buf) + ) + + case 3: return .AuthorizationError( + message: try FfiConverterString.read(from: &buf) + ) + + case 4: return .InternalError( + message: try FfiConverterString.read(from: &buf) + ) + + + default: throw UniffiInternalError.unexpectedEnumCase + } + } + + public static func write(_ value: VssHeaderProviderError, into buf: inout [UInt8]) { + switch value { + + + + + case .InvalidData(_ /* message is ignored*/): + writeInt(&buf, Int32(1)) + case .RequestError(_ /* message is ignored*/): + writeInt(&buf, Int32(2)) + case .AuthorizationError(_ /* message is ignored*/): writeInt(&buf, Int32(3)) - FfiConverterOptionTypeChannelId.write(channelId, into: &buf) - FfiConverterTypeTxid.write(latestSpendingTxid, into: &buf) - FfiConverterTypeBlockHash.write(confirmationHash, into: &buf) - FfiConverterUInt32.write(confirmationHeight, into: &buf) - FfiConverterUInt64.write(amountSatoshis, into: &buf) - + case .InternalError(_ /* message is ignored*/): + writeInt(&buf, Int32(4)) + + } } } -public func FfiConverterTypePendingSweepBalance_lift(_ buf: RustBuffer) throws -> PendingSweepBalance { - return try FfiConverterTypePendingSweepBalance.lift(buf) -} - -public func FfiConverterTypePendingSweepBalance_lower(_ value: PendingSweepBalance) -> RustBuffer { - return FfiConverterTypePendingSweepBalance.lower(value) -} +extension VssHeaderProviderError: Equatable, Hashable {} +extension VssHeaderProviderError: Error { } -extension PendingSweepBalance: Equatable, Hashable {} +fileprivate struct FfiConverterOptionUInt8: FfiConverterRustBuffer { + typealias SwiftType = UInt8? + public static func write(_ value: SwiftType, into buf: inout [UInt8]) { + guard let value = value else { + writeInt(&buf, Int8(0)) + return + } + writeInt(&buf, Int8(1)) + FfiConverterUInt8.write(value, into: &buf) + } + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> SwiftType { + switch try readInt(&buf) as Int8 { + case 0: return nil + case 1: return try FfiConverterUInt8.read(from: &buf) + default: throw UniffiInternalError.unexpectedOptionalTag + } + } +} fileprivate struct FfiConverterOptionUInt16: FfiConverterRustBuffer { typealias SwiftType = UInt16? @@ -4933,8 +5689,8 @@ fileprivate struct FfiConverterOptionUInt64: FfiConverterRustBuffer { } } -fileprivate struct FfiConverterOptionString: FfiConverterRustBuffer { - typealias SwiftType = String? +fileprivate struct FfiConverterOptionBool: FfiConverterRustBuffer { + typealias SwiftType = Bool? public static func write(_ value: SwiftType, into buf: inout [UInt8]) { guard let value = value else { @@ -4942,20 +5698,20 @@ fileprivate struct FfiConverterOptionString: FfiConverterRustBuffer { return } writeInt(&buf, Int8(1)) - FfiConverterString.write(value, into: &buf) + FfiConverterBool.write(value, into: &buf) } public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> SwiftType { switch try readInt(&buf) as Int8 { case 0: return nil - case 1: return try FfiConverterString.read(from: &buf) + case 1: return try FfiConverterBool.read(from: &buf) default: throw UniffiInternalError.unexpectedOptionalTag } } } -fileprivate struct FfiConverterOptionTypeChannelConfig: FfiConverterRustBuffer { - typealias SwiftType = ChannelConfig? +fileprivate struct FfiConverterOptionString: FfiConverterRustBuffer { + typealias SwiftType = String? public static func write(_ value: SwiftType, into buf: inout [UInt8]) { guard let value = value else { @@ -4963,13 +5719,13 @@ fileprivate struct FfiConverterOptionTypeChannelConfig: FfiConverterRustBuffer { return } writeInt(&buf, Int8(1)) - FfiConverterTypeChannelConfig.write(value, into: &buf) + FfiConverterString.write(value, into: &buf) } public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> SwiftType { switch try readInt(&buf) as Int8 { case 0: return nil - case 1: return try FfiConverterTypeChannelConfig.read(from: &buf) + case 1: return try FfiConverterString.read(from: &buf) default: throw UniffiInternalError.unexpectedOptionalTag } } @@ -4996,6 +5752,27 @@ fileprivate struct FfiConverterOptionTypeAnchorChannelsConfig: FfiConverterRustB } } +fileprivate struct FfiConverterOptionTypeChannelConfig: FfiConverterRustBuffer { + typealias SwiftType = ChannelConfig? + + public static func write(_ value: SwiftType, into buf: inout [UInt8]) { + guard let value = value else { + writeInt(&buf, Int8(0)) + return + } + writeInt(&buf, Int8(1)) + FfiConverterTypeChannelConfig.write(value, into: &buf) + } + + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> SwiftType { + switch try readInt(&buf) as Int8 { + case 0: return nil + case 1: return try FfiConverterTypeChannelConfig.read(from: &buf) + default: throw UniffiInternalError.unexpectedOptionalTag + } + } +} + fileprivate struct FfiConverterOptionTypeChannelInfo: FfiConverterRustBuffer { typealias SwiftType = ChannelInfo? @@ -5038,6 +5815,27 @@ fileprivate struct FfiConverterOptionTypeChannelUpdateInfo: FfiConverterRustBuff } } +fileprivate struct FfiConverterOptionTypeEsploraSyncConfig: FfiConverterRustBuffer { + typealias SwiftType = EsploraSyncConfig? + + public static func write(_ value: SwiftType, into buf: inout [UInt8]) { + guard let value = value else { + writeInt(&buf, Int8(0)) + return + } + writeInt(&buf, Int8(1)) + FfiConverterTypeEsploraSyncConfig.write(value, into: &buf) + } + + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> SwiftType { + switch try readInt(&buf) as Int8 { + case 0: return nil + case 1: return try FfiConverterTypeEsploraSyncConfig.read(from: &buf) + default: throw UniffiInternalError.unexpectedOptionalTag + } + } +} + fileprivate struct FfiConverterOptionTypeNodeAnnouncementInfo: FfiConverterRustBuffer { typealias SwiftType = NodeAnnouncementInfo? @@ -5122,6 +5920,27 @@ fileprivate struct FfiConverterOptionTypePaymentDetails: FfiConverterRustBuffer } } +fileprivate struct FfiConverterOptionTypeSendingParameters: FfiConverterRustBuffer { + typealias SwiftType = SendingParameters? + + public static func write(_ value: SwiftType, into buf: inout [UInt8]) { + guard let value = value else { + writeInt(&buf, Int8(0)) + return + } + writeInt(&buf, Int8(1)) + FfiConverterTypeSendingParameters.write(value, into: &buf) + } + + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> SwiftType { + switch try readInt(&buf) as Int8 { + case 0: return nil + case 1: return try FfiConverterTypeSendingParameters.read(from: &buf) + default: throw UniffiInternalError.unexpectedOptionalTag + } + } +} + fileprivate struct FfiConverterOptionTypeClosureReason: FfiConverterRustBuffer { typealias SwiftType = ClosureReason? @@ -5164,6 +5983,27 @@ fileprivate struct FfiConverterOptionTypeEvent: FfiConverterRustBuffer { } } +fileprivate struct FfiConverterOptionTypeMaxTotalRoutingFeeLimit: FfiConverterRustBuffer { + typealias SwiftType = MaxTotalRoutingFeeLimit? + + public static func write(_ value: SwiftType, into buf: inout [UInt8]) { + guard let value = value else { + writeInt(&buf, Int8(0)) + return + } + writeInt(&buf, Int8(1)) + FfiConverterTypeMaxTotalRoutingFeeLimit.write(value, into: &buf) + } + + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> SwiftType { + switch try readInt(&buf) as Int8 { + case 0: return nil + case 1: return try FfiConverterTypeMaxTotalRoutingFeeLimit.read(from: &buf) + default: throw UniffiInternalError.unexpectedOptionalTag + } + } +} + fileprivate struct FfiConverterOptionTypePaymentFailureReason: FfiConverterRustBuffer { typealias SwiftType = PaymentFailureReason? @@ -5227,6 +6067,27 @@ fileprivate struct FfiConverterOptionTypeChannelId: FfiConverterRustBuffer { } } +fileprivate struct FfiConverterOptionTypeNodeAlias: FfiConverterRustBuffer { + typealias SwiftType = NodeAlias? + + public static func write(_ value: SwiftType, into buf: inout [UInt8]) { + guard let value = value else { + writeInt(&buf, Int8(0)) + return + } + writeInt(&buf, Int8(1)) + FfiConverterTypeNodeAlias.write(value, into: &buf) + } + + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> SwiftType { + switch try readInt(&buf) as Int8 { + case 0: return nil + case 1: return try FfiConverterTypeNodeAlias.read(from: &buf) + default: throw UniffiInternalError.unexpectedOptionalTag + } + } +} + fileprivate struct FfiConverterOptionTypePaymentHash: FfiConverterRustBuffer { typealias SwiftType = PaymentHash? @@ -5332,6 +6193,27 @@ fileprivate struct FfiConverterOptionTypePublicKey: FfiConverterRustBuffer { } } +fileprivate struct FfiConverterOptionTypeUntrustedString: FfiConverterRustBuffer { + typealias SwiftType = UntrustedString? + + public static func write(_ value: SwiftType, into buf: inout [UInt8]) { + guard let value = value else { + writeInt(&buf, Int8(0)) + return + } + writeInt(&buf, Int8(1)) + FfiConverterTypeUntrustedString.write(value, into: &buf) + } + + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> SwiftType { + switch try readInt(&buf) as Int8 { + case 0: return nil + case 1: return try FfiConverterTypeUntrustedString.read(from: &buf) + default: throw UniffiInternalError.unexpectedOptionalTag + } + } +} + fileprivate struct FfiConverterSequenceUInt8: FfiConverterRustBuffer { typealias SwiftType = [UInt8] @@ -5552,6 +6434,29 @@ fileprivate struct FfiConverterSequenceTypeSocketAddress: FfiConverterRustBuffer } } +fileprivate struct FfiConverterDictionaryStringString: FfiConverterRustBuffer { + public static func write(_ value: [String: String], into buf: inout [UInt8]) { + let len = Int32(value.count) + writeInt(&buf, len) + for (key, value) in value { + FfiConverterString.write(key, into: &buf) + FfiConverterString.write(value, into: &buf) + } + } + + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> [String: String] { + let len: Int32 = try readInt(&buf) + var dict = [String: String]() + dict.reserveCapacity(Int(len)) + for _ in 0.. RustBuffer { +/** + * Typealias from the type name used in the UDL file to the builtin type. This + * is needed because the UDL type name is used in function/method signatures. + */ +public typealias NodeAlias = String +public struct FfiConverterTypeNodeAlias: FfiConverter { + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> NodeAlias { + return try FfiConverterString.read(from: &buf) + } + + public static func write(_ value: NodeAlias, into buf: inout [UInt8]) { + return FfiConverterString.write(value, into: &buf) + } + + public static func lift(_ value: RustBuffer) throws -> NodeAlias { + return try FfiConverterString.lift(value) + } + + public static func lower(_ value: NodeAlias) -> RustBuffer { + return FfiConverterString.lower(value) + } +} + + +public func FfiConverterTypeNodeAlias_lift(_ value: RustBuffer) throws -> NodeAlias { + return try FfiConverterTypeNodeAlias.lift(value) +} + +public func FfiConverterTypeNodeAlias_lower(_ value: NodeAlias) -> RustBuffer { + return FfiConverterTypeNodeAlias.lower(value) +} + + + /** * Typealias from the type name used in the UDL file to the builtin type. This * is needed because the UDL type name is used in function/method signatures. @@ -6200,11 +7139,13 @@ public func FfiConverterTypeUserChannelId_lower(_ value: UserChannelId) -> RustB private let UNIFFI_RUST_FUTURE_POLL_READY: Int8 = 0 private let UNIFFI_RUST_FUTURE_POLL_MAYBE_READY: Int8 = 1 +fileprivate let uniffiContinuationHandleMap = UniffiHandleMap>() + fileprivate func uniffiRustCallAsync( - rustFutureFunc: () -> UnsafeMutableRawPointer, - pollFunc: (UnsafeMutableRawPointer, @escaping UniFfiRustFutureContinuation, UnsafeMutableRawPointer) -> (), - completeFunc: (UnsafeMutableRawPointer, UnsafeMutablePointer) -> F, - freeFunc: (UnsafeMutableRawPointer) -> (), + rustFutureFunc: () -> UInt64, + pollFunc: (UInt64, @escaping UniffiRustFutureContinuationCallback, UInt64) -> (), + completeFunc: (UInt64, UnsafeMutablePointer) -> F, + freeFunc: (UInt64) -> (), liftFunc: (F) throws -> T, errorHandler: ((RustBuffer) throws -> Error)? ) async throws -> T { @@ -6218,7 +7159,11 @@ fileprivate func uniffiRustCallAsync( var pollResult: Int8; repeat { pollResult = await withUnsafeContinuation { - pollFunc(rustFuture, uniffiFutureContinuationCallback, ContinuationHolder($0).toOpaque()) + pollFunc( + rustFuture, + uniffiFutureContinuationCallback, + uniffiContinuationHandleMap.insert(obj: $0) + ) } } while pollResult != UNIFFI_RUST_FUTURE_POLL_READY @@ -6230,44 +7175,24 @@ fileprivate func uniffiRustCallAsync( // Callback handlers for an async calls. These are invoked by Rust when the future is ready. They // lift the return value or error and resume the suspended function. -fileprivate func uniffiFutureContinuationCallback(ptr: UnsafeMutableRawPointer, pollResult: Int8) { - ContinuationHolder.fromOpaque(ptr).resume(pollResult) -} - -// Wraps UnsafeContinuation in a class so that we can use reference counting when passing it across -// the FFI -fileprivate class ContinuationHolder { - let continuation: UnsafeContinuation - - init(_ continuation: UnsafeContinuation) { - self.continuation = continuation - } - - func resume(_ pollResult: Int8) { - self.continuation.resume(returning: pollResult) - } - - func toOpaque() -> UnsafeMutableRawPointer { - return Unmanaged.passRetained(self).toOpaque() +fileprivate func uniffiFutureContinuationCallback(handle: UInt64, pollResult: Int8) { + if let continuation = try? uniffiContinuationHandleMap.remove(handle: handle) { + continuation.resume(returning: pollResult) + } else { + print("uniffiFutureContinuationCallback invalid handle") } - - static func fromOpaque(_ ptr: UnsafeRawPointer) -> ContinuationHolder { - return Unmanaged.fromOpaque(ptr).takeRetainedValue() - } -} -public func defaultConfig() -> Config { - return try! FfiConverterTypeConfig.lift( - try! rustCall() { - uniffi_ldk_node_fn_func_default_config($0) } +public func defaultConfig() -> Config { + return try! FfiConverterTypeConfig.lift(try! rustCall() { + uniffi_ldk_node_fn_func_default_config($0 ) +}) } -public func generateEntropyMnemonic() -> Mnemonic { - return try! FfiConverterTypeMnemonic.lift( - try! rustCall() { - uniffi_ldk_node_fn_func_generate_entropy_mnemonic($0) -} +public func generateEntropyMnemonic() -> Mnemonic { + return try! FfiConverterTypeMnemonic.lift(try! rustCall() { + uniffi_ldk_node_fn_func_generate_entropy_mnemonic($0 ) +}) } private enum InitializationResult { @@ -6279,7 +7204,7 @@ private enum InitializationResult { // the code inside is only computed once. private var initializationResult: InitializationResult { // Get the bindings contract version from our ComponentInterface - let bindings_contract_version = 25 + let bindings_contract_version = 26 // Get the scaffolding contract version by calling the into the dylib let scaffolding_contract_version = ffi_ldk_node_uniffi_contract_version() if bindings_contract_version != scaffolding_contract_version { @@ -6315,7 +7240,7 @@ private var initializationResult: InitializationResult { if (uniffi_ldk_node_checksum_method_bolt11payment_receive_via_jit_channel() != 50555) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_bolt11payment_send() != 35346) { + if (uniffi_ldk_node_checksum_method_bolt11payment_send() != 39133) { return InitializationResult.apiChecksumMismatch } if (uniffi_ldk_node_checksum_method_bolt11payment_send_probes() != 39625) { @@ -6324,25 +7249,25 @@ private var initializationResult: InitializationResult { if (uniffi_ldk_node_checksum_method_bolt11payment_send_probes_using_amount() != 25010) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_bolt11payment_send_using_amount() != 15471) { + if (uniffi_ldk_node_checksum_method_bolt11payment_send_using_amount() != 19557) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_bolt12payment_initiate_refund() != 15379) { + if (uniffi_ldk_node_checksum_method_bolt12payment_initiate_refund() != 38039) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_bolt12payment_receive() != 20864) { + if (uniffi_ldk_node_checksum_method_bolt12payment_receive() != 15049) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_bolt12payment_receive_variable_amount() != 10863) { + if (uniffi_ldk_node_checksum_method_bolt12payment_receive_variable_amount() != 7279) { return InitializationResult.apiChecksumMismatch } if (uniffi_ldk_node_checksum_method_bolt12payment_request_refund_payment() != 61945) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_bolt12payment_send() != 15282) { + if (uniffi_ldk_node_checksum_method_bolt12payment_send() != 56449) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_bolt12payment_send_using_amount() != 21384) { + if (uniffi_ldk_node_checksum_method_bolt12payment_send_using_amount() != 26006) { return InitializationResult.apiChecksumMismatch } if (uniffi_ldk_node_checksum_method_builder_build() != 785) { @@ -6351,70 +7276,49 @@ private var initializationResult: InitializationResult { if (uniffi_ldk_node_checksum_method_builder_build_with_fs_store() != 61304) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_builder_set_entropy_bip39_mnemonic() != 827) { - return InitializationResult.apiChecksumMismatch - } - if (uniffi_ldk_node_checksum_method_builder_set_entropy_seed_bytes() != 44799) { - return InitializationResult.apiChecksumMismatch - } - if (uniffi_ldk_node_checksum_method_builder_set_entropy_seed_path() != 64056) { - return InitializationResult.apiChecksumMismatch - } - if (uniffi_ldk_node_checksum_method_builder_set_esplora_server() != 7044) { - return InitializationResult.apiChecksumMismatch - } - if (uniffi_ldk_node_checksum_method_builder_set_gossip_source_p2p() != 9279) { - return InitializationResult.apiChecksumMismatch - } - if (uniffi_ldk_node_checksum_method_builder_set_gossip_source_rgs() != 64312) { - return InitializationResult.apiChecksumMismatch - } - if (uniffi_ldk_node_checksum_method_builder_set_liquidity_source_lsps2() != 2667) { - return InitializationResult.apiChecksumMismatch - } - if (uniffi_ldk_node_checksum_method_builder_set_listening_addresses() != 14051) { + if (uniffi_ldk_node_checksum_method_builder_build_with_vss_store() != 2871) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_builder_set_network() != 27539) { + if (uniffi_ldk_node_checksum_method_builder_build_with_vss_store_and_fixed_headers() != 24910) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_builder_set_storage_dir_path() != 59019) { + if (uniffi_ldk_node_checksum_method_builder_build_with_vss_store_and_header_provider() != 9090) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_channelconfig_accept_underpaying_htlcs() != 45655) { + if (uniffi_ldk_node_checksum_method_builder_set_chain_source_bitcoind_rpc() != 2111) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_channelconfig_cltv_expiry_delta() != 19044) { + if (uniffi_ldk_node_checksum_method_builder_set_chain_source_esplora() != 1781) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_channelconfig_force_close_avoidance_max_fee_satoshis() != 69) { + if (uniffi_ldk_node_checksum_method_builder_set_entropy_bip39_mnemonic() != 827) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_channelconfig_forwarding_fee_base_msat() != 3400) { + if (uniffi_ldk_node_checksum_method_builder_set_entropy_seed_bytes() != 44799) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_channelconfig_forwarding_fee_proportional_millionths() != 31794) { + if (uniffi_ldk_node_checksum_method_builder_set_entropy_seed_path() != 64056) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_channelconfig_set_accept_underpaying_htlcs() != 27275) { + if (uniffi_ldk_node_checksum_method_builder_set_gossip_source_p2p() != 9279) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_channelconfig_set_cltv_expiry_delta() != 40735) { + if (uniffi_ldk_node_checksum_method_builder_set_gossip_source_rgs() != 64312) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_channelconfig_set_force_close_avoidance_max_fee_satoshis() != 48479) { + if (uniffi_ldk_node_checksum_method_builder_set_liquidity_source_lsps2() != 2667) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_channelconfig_set_forwarding_fee_base_msat() != 29831) { + if (uniffi_ldk_node_checksum_method_builder_set_listening_addresses() != 14051) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_channelconfig_set_forwarding_fee_proportional_millionths() != 65060) { + if (uniffi_ldk_node_checksum_method_builder_set_network() != 27539) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_channelconfig_set_max_dust_htlc_exposure_from_fee_rate_multiplier() != 4707) { + if (uniffi_ldk_node_checksum_method_builder_set_node_alias() != 18342) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_channelconfig_set_max_dust_htlc_exposure_from_fixed_limit() != 16864) { + if (uniffi_ldk_node_checksum_method_builder_set_storage_dir_path() != 59019) { return InitializationResult.apiChecksumMismatch } if (uniffi_ldk_node_checksum_method_networkgraph_channel() != 38070) { @@ -6444,16 +7348,13 @@ private var initializationResult: InitializationResult { if (uniffi_ldk_node_checksum_method_node_connect() != 34120) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_node_connect_open_channel() != 64763) { - return InitializationResult.apiChecksumMismatch - } if (uniffi_ldk_node_checksum_method_node_disconnect() != 43538) { return InitializationResult.apiChecksumMismatch } if (uniffi_ldk_node_checksum_method_node_event_handled() != 47939) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_node_force_close_channel() != 44813) { + if (uniffi_ldk_node_checksum_method_node_force_close_channel() != 48831) { return InitializationResult.apiChecksumMismatch } if (uniffi_ldk_node_checksum_method_node_list_balances() != 57528) { @@ -6480,19 +7381,28 @@ private var initializationResult: InitializationResult { if (uniffi_ldk_node_checksum_method_node_next_event_async() != 25426) { return InitializationResult.apiChecksumMismatch } + if (uniffi_ldk_node_checksum_method_node_node_alias() != 29526) { + return InitializationResult.apiChecksumMismatch + } if (uniffi_ldk_node_checksum_method_node_node_id() != 51489) { return InitializationResult.apiChecksumMismatch } if (uniffi_ldk_node_checksum_method_node_onchain_payment() != 6092) { return InitializationResult.apiChecksumMismatch } + if (uniffi_ldk_node_checksum_method_node_open_announced_channel() != 36623) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_node_open_channel() != 40283) { + return InitializationResult.apiChecksumMismatch + } if (uniffi_ldk_node_checksum_method_node_payment() != 60296) { return InitializationResult.apiChecksumMismatch } if (uniffi_ldk_node_checksum_method_node_remove_payment() != 47952) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_node_sign_message() != 51392) { + if (uniffi_ldk_node_checksum_method_node_sign_message() != 49319) { return InitializationResult.apiChecksumMismatch } if (uniffi_ldk_node_checksum_method_node_spontaneous_payment() != 37403) { @@ -6510,7 +7420,10 @@ private var initializationResult: InitializationResult { if (uniffi_ldk_node_checksum_method_node_sync_wallets() != 32474) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_node_update_channel_config() != 38109) { + if (uniffi_ldk_node_checksum_method_node_unified_qr_payment() != 9837) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_node_update_channel_config() != 37852) { return InitializationResult.apiChecksumMismatch } if (uniffi_ldk_node_checksum_method_node_verify_signature() != 20486) { @@ -6525,22 +7438,28 @@ private var initializationResult: InitializationResult { if (uniffi_ldk_node_checksum_method_onchainpayment_send_all_to_address() != 20046) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_onchainpayment_send_to_address() != 34782) { + if (uniffi_ldk_node_checksum_method_onchainpayment_send_to_address() != 55731) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_spontaneouspayment_send() != 16613) { + if (uniffi_ldk_node_checksum_method_spontaneouspayment_send() != 48210) { return InitializationResult.apiChecksumMismatch } if (uniffi_ldk_node_checksum_method_spontaneouspayment_send_probes() != 25937) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_constructor_builder_from_config() != 64393) { + if (uniffi_ldk_node_checksum_method_unifiedqrpayment_receive() != 913) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_unifiedqrpayment_send() != 53900) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_constructor_builder_new() != 48442) { + if (uniffi_ldk_node_checksum_method_vssheaderprovider_get_headers() != 7788) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_constructor_channelconfig_new() != 24987) { + if (uniffi_ldk_node_checksum_constructor_builder_from_config() != 994) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_constructor_builder_new() != 40499) { return InitializationResult.apiChecksumMismatch } @@ -6556,4 +7475,6 @@ private func uniffiEnsureInitialized() { case .apiChecksumMismatch: fatalError("UniFFI API checksum mismatch: try cleaning and rebuilding your project") } -} \ No newline at end of file +} + +// swiftlint:enable all \ No newline at end of file From 3ae579ef86779add695a36659980ca25b0479e93 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 17 Oct 2024 15:09:48 +0200 Subject: [PATCH 114/127] Update CHANGELOG.md --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7597ce36f..fa0003538 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,6 @@ # 0.4.0 - Oct 17, 2024 -Besides numerous API improvements and bugfixes this this fourth minor release notably adds support for sourcing chain and fee rate data from a Bitcoin Core RPC backend, as well as experimental support for the [VSS] remote storage backend. +Besides numerous API improvements and bugfixes this fourth minor release notably adds support for sourcing chain and fee rate data from a Bitcoin Core RPC backend, as well as experimental support for the [VSS] remote storage backend. ## Feature and API updates - Support for multiple chain sources has been added. To this end, Esplora-specific configuration options can now be given via `EsploraSyncConfig` to `Builder::set_chain_source_esplora`. Furthermore, all configuration objects (including the main `Config`) is now exposed via the `config` sub-module (#365). From 3684f29b4bc98e384bbb4bb645013327207fc6f7 Mon Sep 17 00:00:00 2001 From: Tommy Volk Date: Fri, 18 Oct 2024 12:11:40 +0100 Subject: [PATCH 115/127] fix: incremental and full scans are performed correctly --- src/chain/mod.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/chain/mod.rs b/src/chain/mod.rs index af77e6bee..a7906fc0f 100644 --- a/src/chain/mod.rs +++ b/src/chain/mod.rs @@ -481,6 +481,13 @@ impl ChainSource { } if incremental_sync { + let sync_request = onchain_wallet.get_incremental_sync_request(); + let wallet_sync_timeout_fut = tokio::time::timeout( + Duration::from_secs(BDK_WALLET_SYNC_TIMEOUT_SECS), + esplora_client.sync(sync_request, BDK_CLIENT_CONCURRENCY), + ); + get_and_apply_wallet_update!(wallet_sync_timeout_fut) + } else { let full_scan_request = onchain_wallet.get_full_scan_request(); let wallet_sync_timeout_fut = tokio::time::timeout( Duration::from_secs(BDK_WALLET_SYNC_TIMEOUT_SECS), @@ -491,13 +498,6 @@ impl ChainSource { ), ); get_and_apply_wallet_update!(wallet_sync_timeout_fut) - } else { - let sync_request = onchain_wallet.get_incremental_sync_request(); - let wallet_sync_timeout_fut = tokio::time::timeout( - Duration::from_secs(BDK_WALLET_SYNC_TIMEOUT_SECS), - esplora_client.sync(sync_request, BDK_CLIENT_CONCURRENCY), - ); - get_and_apply_wallet_update!(wallet_sync_timeout_fut) } }; From 296f390a1b49ef20ad1ab665c27fa2346c2a32fc Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Fri, 18 Oct 2024 12:27:41 -0500 Subject: [PATCH 116/127] Bump version number to 0.4.1 --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 9602b1016..f5cae6b95 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ldk-node" -version = "0.4.0" +version = "0.4.1" authors = ["Elias Rohrer "] homepage = "https://lightningdevkit.org/" license = "MIT OR Apache-2.0" From c5719605943b797126ad12ad351a9d708bec2cd7 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Fri, 18 Oct 2024 12:29:55 -0500 Subject: [PATCH 117/127] Update CHANGELOG for v0.4.1 --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index fa0003538..353d4744f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,7 @@ +# 0.4.1 - Oct 18, 2024 + +Fixes a wallet syncing issue where full syncs were used instead of incremental syncs, and vice versa (#383). + # 0.4.0 - Oct 17, 2024 Besides numerous API improvements and bugfixes this fourth minor release notably adds support for sourcing chain and fee rate data from a Bitcoin Core RPC backend, as well as experimental support for the [VSS] remote storage backend. From ec26f069ac30776316bb13c2bfaf24e4ad91d66d Mon Sep 17 00:00:00 2001 From: Fuyin Date: Wed, 23 Oct 2024 21:30:22 +0800 Subject: [PATCH 118/127] Fix invalid witness program length --- src/wallet/mod.rs | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/src/wallet/mod.rs b/src/wallet/mod.rs index 494fcd768..271007bf9 100644 --- a/src/wallet/mod.rs +++ b/src/wallet/mod.rs @@ -387,10 +387,12 @@ where let script_pubkey = u.txout.script_pubkey; match script_pubkey.witness_version() { Some(version @ WitnessVersion::V0) => { - let witness_program = WitnessProgram::new(version, script_pubkey.as_bytes()) - .map_err(|e| { - log_error!(self.logger, "Failed to retrieve script payload: {}", e); - })?; + let witness_program = + WitnessProgram::new(version, &script_pubkey.as_bytes()[2..]).map_err( + |e| { + log_error!(self.logger, "Failed to retrieve script payload: {}", e); + }, + )?; let wpkh = WPubkeyHash::from_slice(&witness_program.program().as_bytes()) .map_err(|e| { @@ -400,10 +402,12 @@ where utxos.push(utxo); }, Some(version @ WitnessVersion::V1) => { - let witness_program = WitnessProgram::new(version, script_pubkey.as_bytes()) - .map_err(|e| { - log_error!(self.logger, "Failed to retrieve script payload: {}", e); - })?; + let witness_program = + WitnessProgram::new(version, &script_pubkey.as_bytes()[2..]).map_err( + |e| { + log_error!(self.logger, "Failed to retrieve script payload: {}", e); + }, + )?; XOnlyPublicKey::from_slice(&witness_program.program().as_bytes()).map_err( |e| { From 3b5f00b450de213044d904ca3a045f6a78acde02 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 28 Oct 2024 11:16:47 +0100 Subject: [PATCH 119/127] Add comments and debug assertions for `list_unspent_utxos` We previously erroneously included the version byte trying to construct a `WitnessProgram`, which was was recently fixed. Here we add some more comments to the code explaining what went wrong, and also add a debug assertion checking `list_unspent_utxos` retrieves at least one `Utxo` when we see any confirmed balances. --- src/wallet/mod.rs | 52 ++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 42 insertions(+), 10 deletions(-) diff --git a/src/wallet/mod.rs b/src/wallet/mod.rs index 271007bf9..d237e8c6a 100644 --- a/src/wallet/mod.rs +++ b/src/wallet/mod.rs @@ -205,6 +205,16 @@ where ) -> Result<(u64, u64), Error> { let balance = self.inner.lock().unwrap().balance(); + // Make sure `list_confirmed_utxos` returns at least one `Utxo` we could use to spend/bump + // Anchors if we have any confirmed amounts. + #[cfg(debug_assertions)] + if balance.confirmed != Amount::ZERO { + debug_assert!( + self.list_confirmed_utxos().map_or(false, |v| !v.is_empty()), + "Confirmed amounts should always be available for Anchor spending" + ); + } + let (total, spendable) = ( balance.total().to_sat(), balance.trusted_spendable().to_sat().saturating_sub(total_anchor_channels_reserve_sats), @@ -387,12 +397,23 @@ where let script_pubkey = u.txout.script_pubkey; match script_pubkey.witness_version() { Some(version @ WitnessVersion::V0) => { + // According to the SegWit rules of [BIP 141] a witness program is defined as: + // > A scriptPubKey (or redeemScript as defined in BIP16/P2SH) that consists of + // > a 1-byte push opcode (one of OP_0,OP_1,OP_2,.. .,OP_16) followed by a direct + // > data push between 2 and 40 bytes gets a new special meaning. The value of + // > the first push is called the "version byte". The following byte vector + // > pushed is called the "witness program"." + // + // We therefore skip the first byte we just read via `witness_version` and use + // the rest (i.e., the data push) as the raw bytes to construct the + // `WitnessProgram` below. + // + // [BIP 141]: https://github.com/bitcoin/bips/blob/master/bip-0141.mediawiki#witness-program + let witness_bytes = &script_pubkey.as_bytes()[2..]; let witness_program = - WitnessProgram::new(version, &script_pubkey.as_bytes()[2..]).map_err( - |e| { - log_error!(self.logger, "Failed to retrieve script payload: {}", e); - }, - )?; + WitnessProgram::new(version, witness_bytes).map_err(|e| { + log_error!(self.logger, "Failed to retrieve script payload: {}", e); + })?; let wpkh = WPubkeyHash::from_slice(&witness_program.program().as_bytes()) .map_err(|e| { @@ -402,12 +423,23 @@ where utxos.push(utxo); }, Some(version @ WitnessVersion::V1) => { + // According to the SegWit rules of [BIP 141] a witness program is defined as: + // > A scriptPubKey (or redeemScript as defined in BIP16/P2SH) that consists of + // > a 1-byte push opcode (one of OP_0,OP_1,OP_2,.. .,OP_16) followed by a direct + // > data push between 2 and 40 bytes gets a new special meaning. The value of + // > the first push is called the "version byte". The following byte vector + // > pushed is called the "witness program"." + // + // We therefore skip the first byte we just read via `witness_version` and use + // the rest (i.e., the data push) as the raw bytes to construct the + // `WitnessProgram` below. + // + // [BIP 141]: https://github.com/bitcoin/bips/blob/master/bip-0141.mediawiki#witness-program + let witness_bytes = &script_pubkey.as_bytes()[2..]; let witness_program = - WitnessProgram::new(version, &script_pubkey.as_bytes()[2..]).map_err( - |e| { - log_error!(self.logger, "Failed to retrieve script payload: {}", e); - }, - )?; + WitnessProgram::new(version, witness_bytes).map_err(|e| { + log_error!(self.logger, "Failed to retrieve script payload: {}", e); + })?; XOnlyPublicKey::from_slice(&witness_program.program().as_bytes()).map_err( |e| { From c228e7fb9ed0a0dee9340ec14a23396611e784e0 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 28 Oct 2024 11:38:14 +0100 Subject: [PATCH 120/127] Update `CHANGELOG` for v0.4.2 --- CHANGELOG.md | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 353d4744f..7cc63ec27 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,22 @@ +# 0.4.2 - Oct 28, 2024 + +This patch release fixes an issue that prohibited the node from using available confirmed on-chain funds to spend/bump Anchor outputs (#387). + +In total, this release features 1 files changed, 40 insertions, 4 deletions in 3 commits from 3 authors, in alphabetical order: + +- Fuyin +- Elias Rohrer + + # 0.4.1 - Oct 18, 2024 -Fixes a wallet syncing issue where full syncs were used instead of incremental syncs, and vice versa (#383). +This patch release fixes a wallet syncing issue where full syncs were used instead of incremental syncs, and vice versa (#383). + +In total, this release features 3 files changed, 13 insertions, 9 deletions in 6 commits from 3 authors, in alphabetical order: + +- Jeffrey Czyz +- Elias Rohrer +- Tommy Volk # 0.4.0 - Oct 17, 2024 From e177de52c1453a44b6f33de0c9da0fdd08154f10 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 28 Oct 2024 11:40:12 +0100 Subject: [PATCH 121/127] Bump version number to v0.4.2 --- Cargo.toml | 2 +- Package.swift | 2 +- bindings/kotlin/ldk-node-android/gradle.properties | 2 +- bindings/kotlin/ldk-node-jvm/gradle.properties | 2 +- bindings/python/pyproject.toml | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index f5cae6b95..73fe0a1b7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ldk-node" -version = "0.4.1" +version = "0.4.2" authors = ["Elias Rohrer "] homepage = "https://lightningdevkit.org/" license = "MIT OR Apache-2.0" diff --git a/Package.swift b/Package.swift index 253db6e68..a6a66c2df 100644 --- a/Package.swift +++ b/Package.swift @@ -3,7 +3,7 @@ import PackageDescription -let tag = "v0.4.0" +let tag = "v0.4.2" let checksum = "5dcdfdd6e3331062d649786fa6e758487227f6037d9881353fe0c293a3a4c7e0" let url = "https://github.com/lightningdevkit/ldk-node/releases/download/\(tag)/LDKNodeFFI.xcframework.zip" diff --git a/bindings/kotlin/ldk-node-android/gradle.properties b/bindings/kotlin/ldk-node-android/gradle.properties index c84f2c46c..44a51cfaf 100644 --- a/bindings/kotlin/ldk-node-android/gradle.properties +++ b/bindings/kotlin/ldk-node-android/gradle.properties @@ -2,4 +2,4 @@ org.gradle.jvmargs=-Xmx1536m android.useAndroidX=true android.enableJetifier=true kotlin.code.style=official -libraryVersion=0.4.0 +libraryVersion=0.4.2 diff --git a/bindings/kotlin/ldk-node-jvm/gradle.properties b/bindings/kotlin/ldk-node-jvm/gradle.properties index a84d6e412..338b60d96 100644 --- a/bindings/kotlin/ldk-node-jvm/gradle.properties +++ b/bindings/kotlin/ldk-node-jvm/gradle.properties @@ -1,3 +1,3 @@ org.gradle.jvmargs=-Xmx1536m kotlin.code.style=official -libraryVersion=0.4.0 +libraryVersion=0.4.2 diff --git a/bindings/python/pyproject.toml b/bindings/python/pyproject.toml index 7d24d7884..781542ec3 100644 --- a/bindings/python/pyproject.toml +++ b/bindings/python/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ldk_node" -version = "0.4.0" +version = "0.4.2" authors = [ { name="Elias Rohrer", email="dev@tnull.de" }, ] From 8a7a591456bea5a5ae08dd35a4d757b7a44b22e6 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 28 Oct 2024 11:52:11 +0100 Subject: [PATCH 122/127] Update Swift files for v0.4.2 --- Package.swift | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Package.swift b/Package.swift index a6a66c2df..059052492 100644 --- a/Package.swift +++ b/Package.swift @@ -4,7 +4,7 @@ import PackageDescription let tag = "v0.4.2" -let checksum = "5dcdfdd6e3331062d649786fa6e758487227f6037d9881353fe0c293a3a4c7e0" +let checksum = "95ea5307eb3a99203e39cfa21d962bfe3e879e62429e8c7cdf5292cae5dc35cc" let url = "https://github.com/lightningdevkit/ldk-node/releases/download/\(tag)/LDKNodeFFI.xcframework.zip" let package = Package( From 9bd2cd49bf3b76825e2413029e3b44a6621926ae Mon Sep 17 00:00:00 2001 From: Roland Bewick Date: Sat, 2 Nov 2024 14:45:57 +0700 Subject: [PATCH 123/127] chore: log sync start times --- src/chain/mod.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/chain/mod.rs b/src/chain/mod.rs index a7906fc0f..097a04ca1 100644 --- a/src/chain/mod.rs +++ b/src/chain/mod.rs @@ -420,6 +420,8 @@ impl ChainSource { let incremental_sync = node_metrics.read().unwrap().latest_onchain_wallet_sync_timestamp.is_some(); + log_info!(logger, "Starting onchain wallet sync"); + macro_rules! get_and_apply_wallet_update { ($sync_future: expr) => {{ let now = Instant::now(); @@ -555,6 +557,7 @@ impl ChainSource { tx_sync.sync(confirmables), ); let now = Instant::now(); + log_info!(logger, "Starting lightning wallet sync"); match timeout_fut.await { Ok(res) => match res { Ok(()) => { @@ -758,6 +761,7 @@ impl ChainSource { .. } => { let now = Instant::now(); + log_info!(logger, "Starting fee estimates sync"); let estimates = tokio::time::timeout( Duration::from_secs(FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS), esplora_client.get_fee_estimates(), From d8d0557c9f139c34588011fb767055c55eb1b41e Mon Sep 17 00:00:00 2001 From: Roland Bewick Date: Sat, 2 Nov 2024 15:03:01 +0700 Subject: [PATCH 124/127] alby: always do full onchain sync --- src/chain/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/chain/mod.rs b/src/chain/mod.rs index 097a04ca1..e951fc10f 100644 --- a/src/chain/mod.rs +++ b/src/chain/mod.rs @@ -417,8 +417,8 @@ impl ChainSource { let res = { // If this is our first sync, do a full scan with the configured gap limit. // Otherwise just do an incremental sync. - let incremental_sync = - node_metrics.read().unwrap().latest_onchain_wallet_sync_timestamp.is_some(); + // Alby: always do full sync + let incremental_sync = false; //node_metrics.read().unwrap().latest_onchain_wallet_sync_timestamp.is_some(); log_info!(logger, "Starting onchain wallet sync"); From 500df72b2c7e9f2f86dbcaa28ad9db570832b499 Mon Sep 17 00:00:00 2001 From: Roland Bewick Date: Sat, 2 Nov 2024 15:03:13 +0700 Subject: [PATCH 125/127] chore: update log copy --- src/builder.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/builder.rs b/src/builder.rs index 4651a832e..d6f6bee60 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -818,7 +818,7 @@ fn build_with_store_internal( let chain_source = match chain_data_source_config { Some(ChainDataSourceConfig::Esplora { server_url, sync_config }) => { - log_info!(logger, "Using custom esplora server: {}", server_url); + log_info!(logger, "Using esplora server: {}", server_url); let sync_config = sync_config.unwrap_or(EsploraSyncConfig::default()); Arc::new(ChainSource::new_esplora( server_url.clone(), From 1b02d2b481a559f64514678a7c5d45f7ce008246 Mon Sep 17 00:00:00 2001 From: Roland Bewick Date: Mon, 4 Nov 2024 13:40:52 +0700 Subject: [PATCH 126/127] fix: decrease BDK_CLIENT_STOP_GAP, re-enable incremental sync --- src/chain/mod.rs | 4 ++-- src/config.rs | 5 ++++- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/src/chain/mod.rs b/src/chain/mod.rs index e951fc10f..097a04ca1 100644 --- a/src/chain/mod.rs +++ b/src/chain/mod.rs @@ -417,8 +417,8 @@ impl ChainSource { let res = { // If this is our first sync, do a full scan with the configured gap limit. // Otherwise just do an incremental sync. - // Alby: always do full sync - let incremental_sync = false; //node_metrics.read().unwrap().latest_onchain_wallet_sync_timestamp.is_some(); + let incremental_sync = + node_metrics.read().unwrap().latest_onchain_wallet_sync_timestamp.is_some(); log_info!(logger, "Starting onchain wallet sync"); diff --git a/src/config.rs b/src/config.rs index 473261059..8624b9293 100644 --- a/src/config.rs +++ b/src/config.rs @@ -36,12 +36,15 @@ pub(crate) const ENABLE_BACKGROUND_SYNC: bool = false; // The 'stop gap' parameter used by BDK's wallet sync. This seems to configure the threshold // number of derivation indexes after which BDK stops looking for new scripts belonging to the wallet. -pub(crate) const BDK_CLIENT_STOP_GAP: usize = 20; +// Alby: decreased to do less unnecessary requests. The actual amount is twice this (for internal + external addresses) +// but also is based on the BDK_CLIENT_CONCURRENCY below (using less than 4 has no effect). +pub(crate) const BDK_CLIENT_STOP_GAP: usize = 1; // 20 // The number of concurrent requests made against the API provider. pub(crate) const BDK_CLIENT_CONCURRENCY: usize = 4; // The timeout after which we abandon retrying failed payments. +// Alby: increase this for extra retries pub(crate) const LDK_PAYMENT_RETRY_TIMEOUT: Duration = Duration::from_secs(50); // (10); // The interval (in block height) after which we retry archiving fully resolved channel monitors. From 0c70be776b1966a5dbfd39834b273d264dedb3c6 Mon Sep 17 00:00:00 2001 From: Roland Bewick Date: Mon, 4 Nov 2024 16:13:35 +0700 Subject: [PATCH 127/127] fix: revert change to BDK_CLIENT_STOP_GAP --- src/config.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/config.rs b/src/config.rs index 8624b9293..fd48eece4 100644 --- a/src/config.rs +++ b/src/config.rs @@ -36,9 +36,8 @@ pub(crate) const ENABLE_BACKGROUND_SYNC: bool = false; // The 'stop gap' parameter used by BDK's wallet sync. This seems to configure the threshold // number of derivation indexes after which BDK stops looking for new scripts belonging to the wallet. -// Alby: decreased to do less unnecessary requests. The actual amount is twice this (for internal + external addresses) -// but also is based on the BDK_CLIENT_CONCURRENCY below (using less than 4 has no effect). -pub(crate) const BDK_CLIENT_STOP_GAP: usize = 1; // 20 +// Alby: this is only used in the first ever sync. Afterward, it only checks ~4 external and interal addresses (until more addresses are generated) +pub(crate) const BDK_CLIENT_STOP_GAP: usize = 20; // The number of concurrent requests made against the API provider. pub(crate) const BDK_CLIENT_CONCURRENCY: usize = 4;