@@ -3372,8 +3372,12 @@ macro_rules! process_events_body {
33723372
33733373 // TODO: This behavior should be documented. It's unintuitive that we query
33743374 // ChannelMonitors when clearing other events.
3375- if $self.process_pending_monitor_events() {
3376- result = NotifyOption::DoPersist;
3375+ match $self.process_pending_monitor_events() {
3376+ NotifyOption::DoPersist => result = NotifyOption::DoPersist,
3377+ NotifyOption::SkipPersistHandleEvents
3378+ if result == NotifyOption::SkipPersistNoEvents =>
3379+ result = NotifyOption::SkipPersistHandleEvents,
3380+ _ => {},
33773381 }
33783382 }
33793383
@@ -10165,13 +10169,14 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1016510169 let logger = WithChannelContext::from(&self.logger, &chan.context, None);
1016610170 let update_completed = self.handle_monitor_update_res(update_res, logger);
1016710171 if update_completed {
10168- Some( self.try_resume_channel_post_monitor_update(
10172+ let (data, _needs_persist) = self.try_resume_channel_post_monitor_update(
1016910173 in_flight_monitor_updates,
1017010174 monitor_update_blocked_actions,
1017110175 pending_msg_events,
1017210176 is_connected,
1017310177 chan,
10174- ))
10178+ );
10179+ Some(data)
1017510180 } else {
1017610181 None
1017710182 }
@@ -10231,13 +10236,14 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1023110236 );
1023210237
1023310238 let completion_data = if all_updates_complete {
10234- Some( self.try_resume_channel_post_monitor_update(
10239+ let (data, _needs_persist) = self.try_resume_channel_post_monitor_update(
1023510240 in_flight_monitor_updates,
1023610241 monitor_update_blocked_actions,
1023710242 pending_msg_events,
1023810243 is_connected,
1023910244 chan,
10240- ))
10245+ );
10246+ Some(data)
1024110247 } else {
1024210248 None
1024310249 };
@@ -10262,7 +10268,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1026210268 >,
1026310269 pending_msg_events: &mut Vec<MessageSendEvent>, is_connected: bool,
1026410270 chan: &mut FundedChannel<SP>,
10265- ) -> PostMonitorUpdateChanResume {
10271+ ) -> ( PostMonitorUpdateChanResume, bool) {
1026610272 let chan_id = chan.context.channel_id();
1026710273 let outbound_alias = chan.context.outbound_scid_alias();
1026810274 let counterparty_node_id = chan.context.get_counterparty_node_id();
@@ -10280,7 +10286,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1028010286
1028110287 if chan.blocked_monitor_updates_pending() != 0 {
1028210288 log_debug!(logger, "Channel has blocked monitor updates, completing update actions but leaving channel blocked");
10283- PostMonitorUpdateChanResume::Blocked { update_actions }
10289+ let needs_persist = !update_actions.is_empty();
10290+ (PostMonitorUpdateChanResume::Blocked { update_actions }, needs_persist)
1028410291 } else {
1028510292 log_debug!(logger, "Channel is open and awaiting update, resuming it");
1028610293 let updates = chan.monitor_updating_restored(
@@ -10311,6 +10318,11 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1031110318 None
1031210319 };
1031310320
10321+ // Checked before handle_channel_resumption moves these fields.
10322+ let has_state_changes = updates.funding_broadcastable.is_some()
10323+ || updates.channel_ready.is_some()
10324+ || updates.announcement_sigs.is_some();
10325+
1031410326 let (htlc_forwards, decode_update_add_htlcs) = self.handle_channel_resumption(
1031510327 pending_msg_events,
1031610328 chan,
@@ -10333,19 +10345,32 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1033310345 let unbroadcasted_batch_funding_txid =
1033410346 chan.context.unbroadcasted_batch_funding_txid(&chan.funding);
1033510347
10336- PostMonitorUpdateChanResume::Unblocked {
10337- channel_id: chan_id,
10338- counterparty_node_id,
10339- funding_txo: chan.funding_outpoint(),
10340- user_channel_id: chan.context.get_user_id(),
10341- unbroadcasted_batch_funding_txid,
10342- update_actions,
10343- htlc_forwards,
10344- decode_update_add_htlcs,
10345- finalized_claimed_htlcs: updates.finalized_claimed_htlcs,
10346- failed_htlcs: updates.failed_htlcs,
10347- committed_outbound_htlc_sources: updates.committed_outbound_htlc_sources,
10348- }
10348+ // Queuing outbound messages (commitment_update, raa) alone does
10349+ // not require ChannelManager persistence.
10350+ let needs_persist = has_state_changes
10351+ || !updates.finalized_claimed_htlcs.is_empty()
10352+ || !updates.failed_htlcs.is_empty()
10353+ || !update_actions.is_empty()
10354+ || unbroadcasted_batch_funding_txid.is_some()
10355+ || !htlc_forwards.is_empty()
10356+ || decode_update_add_htlcs.is_some();
10357+
10358+ (
10359+ PostMonitorUpdateChanResume::Unblocked {
10360+ channel_id: chan_id,
10361+ counterparty_node_id,
10362+ funding_txo: chan.funding_outpoint(),
10363+ user_channel_id: chan.context.get_user_id(),
10364+ unbroadcasted_batch_funding_txid,
10365+ update_actions,
10366+ htlc_forwards,
10367+ decode_update_add_htlcs,
10368+ finalized_claimed_htlcs: updates.finalized_claimed_htlcs,
10369+ failed_htlcs: updates.failed_htlcs,
10370+ committed_outbound_htlc_sources: updates.committed_outbound_htlc_sources,
10371+ },
10372+ needs_persist,
10373+ )
1034910374 }
1035010375 }
1035110376
@@ -10614,13 +10639,14 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1061410639 }
1061510640
1061610641 #[rustfmt::skip]
10617- fn channel_monitor_updated(&self, channel_id: &ChannelId, highest_applied_update_id: Option<u64>, counterparty_node_id: &PublicKey) {
10642+ fn channel_monitor_updated(&self, channel_id: &ChannelId, highest_applied_update_id: Option<u64>, counterparty_node_id: &PublicKey) -> bool {
1061810643 debug_assert!(self.total_consistency_lock.try_write().is_err()); // Caller holds read lock
1061910644
1062010645 let per_peer_state = self.per_peer_state.read().unwrap();
1062110646 let mut peer_state_lock;
1062210647 let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
10623- if peer_state_mutex_opt.is_none() { return }
10648+ // Peer is gone; conservatively request persistence.
10649+ if peer_state_mutex_opt.is_none() { return true }
1062410650 peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
1062510651 let peer_state = &mut *peer_state_lock;
1062610652
@@ -10652,15 +10678,15 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1065210678 } else { 0 };
1065310679
1065410680 if remaining_in_flight != 0 {
10655- return;
10681+ return false ;
1065610682 }
1065710683
1065810684 if let Some(chan) = peer_state.channel_by_id
1065910685 .get_mut(channel_id)
1066010686 .and_then(Channel::as_funded_mut)
1066110687 {
1066210688 if chan.is_awaiting_monitor_update() {
10663- let completion_data = self.try_resume_channel_post_monitor_update(
10689+ let ( completion_data, needs_persist) = self.try_resume_channel_post_monitor_update(
1066410690 &mut peer_state.in_flight_monitor_updates,
1066510691 &mut peer_state.monitor_update_blocked_actions,
1066610692 &mut peer_state.pending_msg_events,
@@ -10675,16 +10701,20 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1067510701
1067610702 self.handle_post_monitor_update_chan_resume(completion_data);
1067710703 self.handle_holding_cell_free_result(holding_cell_res);
10704+ needs_persist
1067810705 } else {
1067910706 log_trace!(logger, "Channel is open but not awaiting update");
10707+ false
1068010708 }
1068110709 } else {
1068210710 let update_actions = peer_state.monitor_update_blocked_actions
1068310711 .remove(channel_id).unwrap_or(Vec::new());
10712+ let needs_persist = !update_actions.is_empty();
1068410713 log_trace!(logger, "Channel is closed, applying {} post-update actions", update_actions.len());
1068510714 mem::drop(peer_state_lock);
1068610715 mem::drop(per_peer_state);
1068710716 self.handle_monitor_update_completion_actions(update_actions);
10717+ needs_persist
1068810718 }
1068910719 }
1069010720
@@ -13013,19 +13043,21 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1301313043 Ok(())
1301413044 }
1301513045
13016- /// Process pending events from the [`chain::Watch`], returning whether any events were processed.
13017- fn process_pending_monitor_events(&self) -> bool {
13046+ /// Process pending events from the [`chain::Watch`], returning the appropriate
13047+ /// [`NotifyOption`] for persistence and event handling.
13048+ fn process_pending_monitor_events(&self) -> NotifyOption {
1301813049 debug_assert!(self.total_consistency_lock.try_write().is_err()); // Caller holds read lock
1301913050
1302013051 let mut failed_channels: Vec<(Result<Infallible, _>, _)> = Vec::new();
1302113052 let mut pending_monitor_events = self.chain_monitor.release_pending_monitor_events();
13022- let has_pending_monitor_events = !pending_monitor_events.is_empty() ;
13053+ let mut result = NotifyOption::SkipPersistNoEvents ;
1302313054 for (funding_outpoint, channel_id, mut monitor_events, counterparty_node_id) in
1302413055 pending_monitor_events.drain(..)
1302513056 {
1302613057 for monitor_event in monitor_events.drain(..) {
1302713058 match monitor_event {
1302813059 MonitorEvent::HTLCEvent(htlc_update) => {
13060+ result = NotifyOption::DoPersist;
1302913061 let logger = WithContext::from(
1303013062 &self.logger,
1303113063 Some(counterparty_node_id),
@@ -13078,6 +13110,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1307813110 },
1307913111 MonitorEvent::HolderForceClosed(_)
1308013112 | MonitorEvent::HolderForceClosedWithInfo { .. } => {
13113+ result = NotifyOption::DoPersist;
1308113114 let per_peer_state = self.per_peer_state.read().unwrap();
1308213115 if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
1308313116 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
@@ -13110,6 +13143,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1311013143 }
1311113144 },
1311213145 MonitorEvent::CommitmentTxConfirmed(_) => {
13146+ result = NotifyOption::DoPersist;
1311313147 let per_peer_state = self.per_peer_state.read().unwrap();
1311413148 if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
1311513149 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
@@ -13131,11 +13165,15 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1313113165 }
1313213166 },
1313313167 MonitorEvent::Completed { channel_id, monitor_update_id, .. } => {
13134- self.channel_monitor_updated(
13168+ if self.channel_monitor_updated(
1313513169 &channel_id,
1313613170 Some(monitor_update_id),
1313713171 &counterparty_node_id,
13138- );
13172+ ) {
13173+ result = NotifyOption::DoPersist;
13174+ } else if result == NotifyOption::SkipPersistNoEvents {
13175+ result = NotifyOption::SkipPersistHandleEvents;
13176+ }
1313913177 },
1314013178 }
1314113179 }
@@ -13145,7 +13183,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1314513183 let _ = self.handle_error(err, counterparty_node_id);
1314613184 }
1314713185
13148- has_pending_monitor_events
13186+ result
1314913187 }
1315013188
1315113189 fn handle_holding_cell_free_result(&self, result: FreeHoldingCellsResult) {
@@ -15171,8 +15209,6 @@ impl<
1517115209 fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent> {
1517215210 let events = RefCell::new(Vec::new());
1517315211 PersistenceNotifierGuard::optionally_notify(self, || {
15174- let mut result = NotifyOption::SkipPersistNoEvents;
15175-
1517615212 // This method is quite performance-sensitive. Not only is it called very often, but it
1517715213 // *is* the critical path between generating a message for a peer and giving it to the
1517815214 // `PeerManager` to send. Thus, we should avoid adding any more logic here than we
@@ -15181,9 +15217,7 @@ impl<
1518115217
1518215218 // TODO: This behavior should be documented. It's unintuitive that we query
1518315219 // ChannelMonitors when clearing other events.
15184- if self.process_pending_monitor_events() {
15185- result = NotifyOption::DoPersist;
15186- }
15220+ let mut result = self.process_pending_monitor_events();
1518715221
1518815222 if self.maybe_generate_initial_closing_signed() {
1518915223 result = NotifyOption::DoPersist;
0 commit comments