@@ -1517,10 +1517,10 @@ where
1517
1517
return false;
1518
1518
}
1519
1519
}
1520
- !self
1521
- .channel_by_id
1522
- .iter()
1523
- .any(|(_, channel)| channel.is_funded() || channel.funding().is_outbound() )
1520
+ let chan_is_funded_or_outbound = |(_, channel): (_, &Channel<SP>)| {
1521
+ channel.is_funded() || channel.funding().is_outbound()
1522
+ };
1523
+ !self.channel_by_id.iter().any(chan_is_funded_or_outbound )
1524
1524
&& self.monitor_update_blocked_actions.is_empty()
1525
1525
&& self.closed_channel_monitor_update_ids.is_empty()
1526
1526
}
@@ -3313,17 +3313,14 @@ macro_rules! emit_funding_tx_broadcast_safe_event {
3313
3313
macro_rules! emit_channel_pending_event {
3314
3314
($locked_events: expr, $channel: expr) => {
3315
3315
if $channel.context.should_emit_channel_pending_event() {
3316
+ let funding_txo = $channel.funding.get_funding_txo().unwrap();
3316
3317
$locked_events.push_back((
3317
3318
events::Event::ChannelPending {
3318
3319
channel_id: $channel.context.channel_id(),
3319
3320
former_temporary_channel_id: $channel.context.temporary_channel_id(),
3320
3321
counterparty_node_id: $channel.context.get_counterparty_node_id(),
3321
3322
user_channel_id: $channel.context.get_user_id(),
3322
- funding_txo: $channel
3323
- .funding
3324
- .get_funding_txo()
3325
- .unwrap()
3326
- .into_bitcoin_outpoint(),
3323
+ funding_txo: funding_txo.into_bitcoin_outpoint(),
3327
3324
channel_type: Some($channel.funding.get_channel_type().clone()),
3328
3325
},
3329
3326
None,
@@ -3798,8 +3795,8 @@ where
3798
3795
let mut outbound_scid_alias = 0;
3799
3796
let mut i = 0;
3800
3797
loop {
3798
+ // fuzzing chacha20 doesn't use the key at all so we always get the same alias
3801
3799
if cfg!(fuzzing) {
3802
- // fuzzing chacha20 doesn't use the key at all so we always get the same alias
3803
3800
outbound_scid_alias += 1;
3804
3801
} else {
3805
3802
outbound_scid_alias = fake_scid::Namespace::OutboundAlias.get_fake_scid(
@@ -3931,22 +3928,17 @@ where
3931
3928
for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
3932
3929
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
3933
3930
let peer_state = &mut *peer_state_lock;
3934
- res.extend(
3935
- peer_state
3936
- .channel_by_id
3937
- .iter()
3938
- // Only `Channels` in the `Channel::Funded` phase can be considered funded.
3939
- .filter(|(_, chan)| chan.is_funded())
3940
- .filter(f)
3941
- .map(|(_channel_id, channel)| {
3942
- ChannelDetails::from_channel(
3943
- channel,
3944
- best_block_height,
3945
- peer_state.latest_features.clone(),
3946
- &self.fee_estimator,
3947
- )
3948
- }),
3949
- );
3931
+ // Only `Channels` in the `Channel::Funded` phase can be considered funded.
3932
+ let filtered_chan_by_id =
3933
+ peer_state.channel_by_id.iter().filter(|(_, chan)| chan.is_funded()).filter(f);
3934
+ res.extend(filtered_chan_by_id.map(|(_channel_id, channel)| {
3935
+ ChannelDetails::from_channel(
3936
+ channel,
3937
+ best_block_height,
3938
+ peer_state.latest_features.clone(),
3939
+ &self.fee_estimator,
3940
+ )
3941
+ }));
3950
3942
}
3951
3943
}
3952
3944
res
@@ -4013,12 +4005,8 @@ where
4013
4005
&self.fee_estimator,
4014
4006
)
4015
4007
};
4016
- return peer_state
4017
- .channel_by_id
4018
- .iter()
4019
- .map(|(_, chan)| (chan))
4020
- .map(channel_to_details)
4021
- .collect();
4008
+ let chan_by_id = peer_state.channel_by_id.iter();
4009
+ return chan_by_id.map(|(_, chan)| (chan)).map(channel_to_details).collect();
4022
4010
}
4023
4011
vec![]
4024
4012
}
@@ -8919,9 +8907,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
8919
8907
) -> Result<(), MsgHandleErrInternal> {
8920
8908
self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel: &mut Channel<SP>| {
8921
8909
match channel.as_unfunded_v2_mut() {
8922
- Some(unfunded_channel) => Ok(unfunded_channel
8923
- .tx_add_output(msg)
8924
- .into_msg_send_event(counterparty_node_id)),
8910
+ Some(unfunded_channel) => {
8911
+ let msg_send_event = unfunded_channel
8912
+ .tx_add_output(msg)
8913
+ .into_msg_send_event(counterparty_node_id);
8914
+ Ok(msg_send_event)
8915
+ },
8925
8916
None => Err("tx_add_output"),
8926
8917
}
8927
8918
})
@@ -8932,9 +8923,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
8932
8923
) -> Result<(), MsgHandleErrInternal> {
8933
8924
self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel: &mut Channel<SP>| {
8934
8925
match channel.as_unfunded_v2_mut() {
8935
- Some(unfunded_channel) => Ok(unfunded_channel
8936
- .tx_remove_input(msg)
8937
- .into_msg_send_event(counterparty_node_id)),
8926
+ Some(unfunded_channel) => {
8927
+ let msg_send_event = unfunded_channel
8928
+ .tx_remove_input(msg)
8929
+ .into_msg_send_event(counterparty_node_id);
8930
+ Ok(msg_send_event)
8931
+ },
8938
8932
None => Err("tx_remove_input"),
8939
8933
}
8940
8934
})
@@ -8945,9 +8939,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
8945
8939
) -> Result<(), MsgHandleErrInternal> {
8946
8940
self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel: &mut Channel<SP>| {
8947
8941
match channel.as_unfunded_v2_mut() {
8948
- Some(unfunded_channel) => Ok(unfunded_channel
8949
- .tx_remove_output(msg)
8950
- .into_msg_send_event(counterparty_node_id)),
8942
+ Some(unfunded_channel) => {
8943
+ let msg_send_event = unfunded_channel
8944
+ .tx_remove_output(msg)
8945
+ .into_msg_send_event(counterparty_node_id);
8946
+ Ok(msg_send_event)
8947
+ },
8951
8948
None => Err("tx_remove_output"),
8952
8949
}
8953
8950
})
@@ -9657,13 +9654,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
9657
9654
let is_processing_events = self.pending_events_processor.load(Ordering::Acquire);
9658
9655
let num_forward_events = pending_events
9659
9656
.iter()
9660
- .filter(|(ev, _)| {
9661
- if let events::Event::PendingHTLCsForwardable { .. } = ev {
9662
- true
9663
- } else {
9664
- false
9665
- }
9666
- })
9657
+ .filter(|(ev, _)| matches!(ev, events::Event::PendingHTLCsForwardable { .. }))
9667
9658
.count();
9668
9659
// We only want to push a PendingHTLCsForwardable event if no others are queued. Processing
9669
9660
// events is done in batches and they are not removed until we're done processing each
@@ -10954,30 +10945,32 @@ where
10954
10945
payer_note: Option<String>, payment_id: PaymentId, retry_strategy: Retry,
10955
10946
route_params_config: RouteParametersConfig,
10956
10947
) -> Result<(), Bolt12SemanticError> {
10948
+ let create_pending_payment_fn = |invoice_request: &InvoiceRequest, nonce| {
10949
+ let expiration = StaleExpiration::TimerTicks(1);
10950
+ let retryable_invoice_request = RetryableInvoiceRequest {
10951
+ invoice_request: invoice_request.clone(),
10952
+ nonce,
10953
+ needs_retry: true,
10954
+ };
10955
+ self.pending_outbound_payments
10956
+ .add_new_awaiting_invoice(
10957
+ payment_id,
10958
+ expiration,
10959
+ retry_strategy,
10960
+ route_params_config,
10961
+ Some(retryable_invoice_request),
10962
+ )
10963
+ .map_err(|_| Bolt12SemanticError::DuplicatePaymentId)
10964
+ };
10965
+
10957
10966
self.pay_for_offer_intern(
10958
10967
offer,
10959
10968
quantity,
10960
10969
amount_msats,
10961
10970
payer_note,
10962
10971
payment_id,
10963
10972
None,
10964
- |invoice_request, nonce| {
10965
- let expiration = StaleExpiration::TimerTicks(1);
10966
- let retryable_invoice_request = RetryableInvoiceRequest {
10967
- invoice_request: invoice_request.clone(),
10968
- nonce,
10969
- needs_retry: true,
10970
- };
10971
- self.pending_outbound_payments
10972
- .add_new_awaiting_invoice(
10973
- payment_id,
10974
- expiration,
10975
- retry_strategy,
10976
- route_params_config,
10977
- Some(retryable_invoice_request),
10978
- )
10979
- .map_err(|_| Bolt12SemanticError::DuplicatePaymentId)
10980
- },
10973
+ create_pending_payment_fn,
10981
10974
)
10982
10975
}
10983
10976
@@ -11281,9 +11274,8 @@ where
11281
11274
}
11282
11275
11283
11276
fn get_peers_for_blinded_path(&self) -> Vec<MessageForwardNode> {
11284
- self.per_peer_state
11285
- .read()
11286
- .unwrap()
11277
+ let per_peer_state = self.per_peer_state.read().unwrap();
11278
+ per_peer_state
11287
11279
.iter()
11288
11280
.map(|(node_id, peer_state)| (node_id, peer_state.lock().unwrap()))
11289
11281
.filter(|(_, peer)| peer.is_connected)
@@ -12103,13 +12095,10 @@ where
12103
12095
self.do_chain_event(None, |channel| {
12104
12096
if let Some(funding_txo) = channel.funding.get_funding_txo() {
12105
12097
if funding_txo.txid == *txid {
12106
- channel
12107
- .funding_transaction_unconfirmed(&&WithChannelContext::from(
12108
- &self.logger,
12109
- &channel.context,
12110
- None,
12111
- ))
12112
- .map(|()| (None, Vec::new(), None))
12098
+ let chan_context =
12099
+ WithChannelContext::from(&self.logger, &channel.context, None);
12100
+ let res = channel.funding_transaction_unconfirmed(&&chan_context);
12101
+ res.map(|()| (None, Vec::new(), None))
12113
12102
} else {
12114
12103
Ok((None, Vec::new(), None))
12115
12104
}
@@ -12436,13 +12425,13 @@ where
12436
12425
MR::Target: MessageRouter,
12437
12426
L::Target: Logger,
12438
12427
{
12439
- fn handle_open_channel(&self, counterparty_node_id: PublicKey, msg : &msgs::OpenChannel) {
12428
+ fn handle_open_channel(&self, counterparty_node_id: PublicKey, message : &msgs::OpenChannel) {
12440
12429
// Note that we never need to persist the updated ChannelManager for an inbound
12441
12430
// open_channel message - pre-funded channels are never written so there should be no
12442
12431
// change to the contents.
12443
12432
let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
12444
- let res =
12445
- self.internal_open_channel(&counterparty_node_id, OpenChannelMessageRef::V1( msg) );
12433
+ let msg = OpenChannelMessageRef::V1(message);
12434
+ let res = self.internal_open_channel(&counterparty_node_id, msg);
12446
12435
let persist = match &res {
12447
12436
Err(e) if e.closes_channel() => {
12448
12437
debug_assert!(false, "We shouldn't close a new channel");
@@ -12951,16 +12940,10 @@ where
12951
12940
{
12952
12941
let RetryableInvoiceRequest { invoice_request, nonce, .. } = retryable_invoice_request;
12953
12942
12954
- if self
12955
- .flow
12956
- .enqueue_invoice_request(
12957
- invoice_request,
12958
- payment_id,
12959
- nonce,
12960
- self.get_peers_for_blinded_path(),
12961
- )
12962
- .is_err()
12963
- {
12943
+ let peers = self.get_peers_for_blinded_path();
12944
+ let enqueue_invreq_res =
12945
+ self.flow.enqueue_invoice_request(invoice_request, payment_id, nonce, peers);
12946
+ if enqueue_invreq_res.is_err() {
12964
12947
log_warn!(
12965
12948
self.logger,
12966
12949
"Retry failed for invoice request with payment_id {}",
@@ -14069,11 +14052,9 @@ impl Readable for VecDeque<(Event, Option<EventCompletionAction>)> {
14069
14052
fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
14070
14053
let len: u64 = Readable::read(reader)?;
14071
14054
const MAX_ALLOC_SIZE: u64 = 1024 * 16;
14072
- let mut events: Self = VecDeque::with_capacity(cmp::min(
14073
- MAX_ALLOC_SIZE
14074
- / mem::size_of::<(events::Event, Option<EventCompletionAction>)>() as u64,
14075
- len,
14076
- ) as usize);
14055
+ let event_size = mem::size_of::<(events::Event, Option<EventCompletionAction>)>();
14056
+ let mut events: Self =
14057
+ VecDeque::with_capacity(cmp::min(MAX_ALLOC_SIZE / event_size as u64, len) as usize);
14077
14058
for _ in 0..len {
14078
14059
let ev_opt = MaybeReadable::read(reader)?;
14079
14060
let action = Readable::read(reader)?;
0 commit comments