@@ -3370,42 +3370,10 @@ macro_rules! handle_monitor_update_completion {
33703370 }};
33713371}
33723372
3373- /// Returns whether the monitor update is completed, `false` if the update is in-progress.
3374- fn handle_monitor_update_res<CM: AChannelManager, LG: Logger>(
3375- cm: &CM, update_res: ChannelMonitorUpdateStatus, logger: LG,
3376- ) -> bool {
3377- debug_assert!(cm.get_cm().background_events_processed_since_startup.load(Ordering::Acquire));
3378- match update_res {
3379- ChannelMonitorUpdateStatus::UnrecoverableError => {
3380- let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down.";
3381- log_error!(logger, "{}", err_str);
3382- panic!("{}", err_str);
3383- },
3384- ChannelMonitorUpdateStatus::InProgress => {
3385- #[cfg(not(any(test, feature = "_externalize_tests")))]
3386- if cm.get_cm().monitor_update_type.swap(1, Ordering::Relaxed) == 2 {
3387- panic!("Cannot use both ChannelMonitorUpdateStatus modes InProgress and Completed without restart");
3388- }
3389- log_debug!(
3390- logger,
3391- "ChannelMonitor update in flight, holding messages until the update completes.",
3392- );
3393- false
3394- },
3395- ChannelMonitorUpdateStatus::Completed => {
3396- #[cfg(not(any(test, feature = "_externalize_tests")))]
3397- if cm.get_cm().monitor_update_type.swap(2, Ordering::Relaxed) == 1 {
3398- panic!("Cannot use both ChannelMonitorUpdateStatus modes InProgress and Completed without restart");
3399- }
3400- true
3401- },
3402- }
3403- }
3404-
34053373macro_rules! handle_initial_monitor {
34063374 ($self: ident, $update_res: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => {
34073375 let logger = WithChannelContext::from(&$self.logger, &$chan.context, None);
3408- let update_completed = handle_monitor_update_res( $self, $update_res, logger);
3376+ let update_completed = $self.handle_monitor_update_res( $update_res, logger);
34093377 if update_completed {
34103378 handle_monitor_update_completion!(
34113379 $self,
@@ -3418,69 +3386,17 @@ macro_rules! handle_initial_monitor {
34183386 };
34193387}
34203388
3421- fn handle_new_monitor_update_internal<CM: AChannelManager, LG: Logger>(
3422- cm: &CM,
3423- in_flight_monitor_updates: &mut BTreeMap<ChannelId, (OutPoint, Vec<ChannelMonitorUpdate>)>,
3424- channel_id: ChannelId, funding_txo: OutPoint, counterparty_node_id: PublicKey,
3425- new_update: ChannelMonitorUpdate, logger: LG,
3426- ) -> (bool, bool) {
3427- let in_flight_updates = &mut in_flight_monitor_updates
3428- .entry(channel_id)
3429- .or_insert_with(|| (funding_txo, Vec::new()))
3430- .1;
3431- // During startup, we push monitor updates as background events through to here in
3432- // order to replay updates that were in-flight when we shut down. Thus, we have to
3433- // filter for uniqueness here.
3434- let update_idx =
3435- in_flight_updates.iter().position(|upd| upd == &new_update).unwrap_or_else(|| {
3436- in_flight_updates.push(new_update);
3437- in_flight_updates.len() - 1
3438- });
3439-
3440- if cm.get_cm().background_events_processed_since_startup.load(Ordering::Acquire) {
3441- let update_res =
3442- cm.get_cm().chain_monitor.update_channel(channel_id, &in_flight_updates[update_idx]);
3443- let update_completed = handle_monitor_update_res(cm, update_res, logger);
3444- if update_completed {
3445- let _ = in_flight_updates.remove(update_idx);
3446- }
3447- (update_completed, update_completed && in_flight_updates.is_empty())
3448- } else {
3449- // We blindly assume that the ChannelMonitorUpdate will be regenerated on startup if we
3450- // fail to persist it. This is a fairly safe assumption, however, since anything we do
3451- // during the startup sequence should be replayed exactly if we immediately crash.
3452- let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
3453- counterparty_node_id,
3454- funding_txo,
3455- channel_id,
3456- update: in_flight_updates[update_idx].clone(),
3457- };
3458- // We want to track the in-flight update both in `in_flight_monitor_updates` and in
3459- // `pending_background_events` to avoid a race condition during
3460- // `pending_background_events` processing where we complete one
3461- // `ChannelMonitorUpdate` (but there are more pending as background events) but we
3462- // conclude that all pending `ChannelMonitorUpdate`s have completed and its safe to
3463- // run post-completion actions.
3464- // We could work around that with some effort, but its simpler to just track updates
3465- // twice.
3466- cm.get_cm().pending_background_events.lock().unwrap().push(event);
3467- (false, false)
3468- }
3469- }
3470-
34713389macro_rules! handle_post_close_monitor_update {
34723390 (
34733391 $self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr,
34743392 $per_peer_state_lock: expr, $counterparty_node_id: expr, $channel_id: expr
34753393 ) => {{
3476- let (update_completed, all_updates_complete) = handle_new_monitor_update_internal(
3477- $self,
3394+ let (update_completed, all_updates_complete) = $self.handle_new_monitor_update_internal(
34783395 &mut $peer_state.in_flight_monitor_updates,
34793396 $channel_id,
34803397 $funding_txo,
34813398 $counterparty_node_id,
34823399 $update,
3483- WithContext::from(&$self.logger, Some($counterparty_node_id), Some($channel_id), None),
34843400 );
34853401 if all_updates_complete {
34863402 let update_actions = $peer_state
@@ -3510,14 +3426,12 @@ macro_rules! handle_new_monitor_update_locked_actions_handled_by_caller {
35103426 (
35113427 $self: ident, $funding_txo: expr, $update: expr, $in_flight_monitor_updates: expr, $chan_context: expr
35123428 ) => {{
3513- let (update_completed, _all_updates_complete) = handle_new_monitor_update_internal(
3514- $self,
3429+ let (update_completed, _all_updates_complete) = $self.handle_new_monitor_update_internal(
35153430 $in_flight_monitor_updates,
35163431 $chan_context.channel_id(),
35173432 $funding_txo,
35183433 $chan_context.get_counterparty_node_id(),
35193434 $update,
3520- WithChannelContext::from(&$self.logger, &$chan_context, None),
35213435 );
35223436 update_completed
35233437 }};
@@ -3528,14 +3442,12 @@ macro_rules! handle_new_monitor_update {
35283442 $self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr,
35293443 $per_peer_state_lock: expr, $chan: expr
35303444 ) => {{
3531- let (update_completed, all_updates_complete) = handle_new_monitor_update_internal(
3532- $self,
3445+ let (update_completed, all_updates_complete) = $self.handle_new_monitor_update_internal(
35333446 &mut $peer_state.in_flight_monitor_updates,
35343447 $chan.context.channel_id(),
35353448 $funding_txo,
35363449 $chan.context.get_counterparty_node_id(),
35373450 $update,
3538- WithChannelContext::from(&$self.logger, &$chan.context, None),
35393451 );
35403452 if all_updates_complete {
35413453 handle_monitor_update_completion!(
@@ -9795,6 +9707,90 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
97959707 }
97969708 }
97979709
9710+ fn handle_new_monitor_update_internal(
9711+ &self,
9712+ in_flight_monitor_updates: &mut BTreeMap<ChannelId, (OutPoint, Vec<ChannelMonitorUpdate>)>,
9713+ channel_id: ChannelId, funding_txo: OutPoint, counterparty_node_id: PublicKey,
9714+ new_update: ChannelMonitorUpdate,
9715+ ) -> (bool, bool) {
9716+ let in_flight_updates = &mut in_flight_monitor_updates
9717+ .entry(channel_id)
9718+ .or_insert_with(|| (funding_txo, Vec::new()))
9719+ .1;
9720+ // During startup, we push monitor updates as background events through to here in
9721+ // order to replay updates that were in-flight when we shut down. Thus, we have to
9722+ // filter for uniqueness here.
9723+ let update_idx =
9724+ in_flight_updates.iter().position(|upd| upd == &new_update).unwrap_or_else(|| {
9725+ in_flight_updates.push(new_update);
9726+ in_flight_updates.len() - 1
9727+ });
9728+
9729+ if self.background_events_processed_since_startup.load(Ordering::Acquire) {
9730+ let update_res =
9731+ self.chain_monitor.update_channel(channel_id, &in_flight_updates[update_idx]);
9732+ let logger =
9733+ WithContext::from(&self.logger, Some(counterparty_node_id), Some(channel_id), None);
9734+ let update_completed = self.handle_monitor_update_res(update_res, logger);
9735+ if update_completed {
9736+ let _ = in_flight_updates.remove(update_idx);
9737+ }
9738+ (update_completed, update_completed && in_flight_updates.is_empty())
9739+ } else {
9740+ // We blindly assume that the ChannelMonitorUpdate will be regenerated on startup if we
9741+ // fail to persist it. This is a fairly safe assumption, however, since anything we do
9742+ // during the startup sequence should be replayed exactly if we immediately crash.
9743+ let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
9744+ counterparty_node_id,
9745+ funding_txo,
9746+ channel_id,
9747+ update: in_flight_updates[update_idx].clone(),
9748+ };
9749+ // We want to track the in-flight update both in `in_flight_monitor_updates` and in
9750+ // `pending_background_events` to avoid a race condition during
9751+ // `pending_background_events` processing where we complete one
9752+ // `ChannelMonitorUpdate` (but there are more pending as background events) but we
9753+ // conclude that all pending `ChannelMonitorUpdate`s have completed and its safe to
9754+ // run post-completion actions.
9755+ // We could work around that with some effort, but its simpler to just track updates
9756+ // twice.
9757+ self.pending_background_events.lock().unwrap().push(event);
9758+ (false, false)
9759+ }
9760+ }
9761+
9762+ /// Returns whether the monitor update is completed, `false` if the update is in-progress.
9763+ fn handle_monitor_update_res<LG: Logger>(
9764+ &self, update_res: ChannelMonitorUpdateStatus, logger: LG,
9765+ ) -> bool {
9766+ debug_assert!(self.background_events_processed_since_startup.load(Ordering::Acquire));
9767+ match update_res {
9768+ ChannelMonitorUpdateStatus::UnrecoverableError => {
9769+ let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down.";
9770+ log_error!(logger, "{}", err_str);
9771+ panic!("{}", err_str);
9772+ },
9773+ ChannelMonitorUpdateStatus::InProgress => {
9774+ #[cfg(not(any(test, feature = "_externalize_tests")))]
9775+ if self.monitor_update_type.swap(1, Ordering::Relaxed) == 2 {
9776+ panic!("Cannot use both ChannelMonitorUpdateStatus modes InProgress and Completed without restart");
9777+ }
9778+ log_debug!(
9779+ logger,
9780+ "ChannelMonitor update in flight, holding messages until the update completes.",
9781+ );
9782+ false
9783+ },
9784+ ChannelMonitorUpdateStatus::Completed => {
9785+ #[cfg(not(any(test, feature = "_externalize_tests")))]
9786+ if self.monitor_update_type.swap(2, Ordering::Relaxed) == 1 {
9787+ panic!("Cannot use both ChannelMonitorUpdateStatus modes InProgress and Completed without restart");
9788+ }
9789+ true
9790+ },
9791+ }
9792+ }
9793+
97989794 /// Handles a channel reentering a functional state, either due to reconnect or a monitor
97999795 /// update completion.
98009796 #[rustfmt::skip]
0 commit comments