Skip to content

Commit dcc24d1

Browse files
committed
Test if a given mutex is locked by the current thread in tests
In anticipation of the next commit(s) adding threaded tests, we need to ensure our lockorder checks work fine with multiple threads. Sadly, currently we have tests in the form `assert!(mutex.try_lock().is_ok())` to assert that a given mutex is not locked by the caller to a function. The fix is rather simple given we already track mutexes locked by a thread in our `debug_sync` logic - simply replace the check with a new extension trait which (for test builds) checks the locked state by only looking at what was locked by the current thread.
1 parent 6967cc4 commit dcc24d1

File tree

5 files changed

+95
-19
lines changed

5 files changed

+95
-19
lines changed

lightning/src/ln/channelmanager.rs

Lines changed: 13 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ use crate::prelude::*;
7070
use core::{cmp, mem};
7171
use core::cell::RefCell;
7272
use crate::io::Read;
73-
use crate::sync::{Arc, Mutex, RwLock, RwLockReadGuard, FairRwLock};
73+
use crate::sync::{Arc, Mutex, RwLock, RwLockReadGuard, FairRwLock, LockTestExt, LockHeldState};
7474
use core::sync::atomic::{AtomicUsize, Ordering};
7575
use core::time::Duration;
7676
use core::ops::Deref;
@@ -1173,13 +1173,10 @@ macro_rules! handle_error {
11731173
match $internal {
11741174
Ok(msg) => Ok(msg),
11751175
Err(MsgHandleErrInternal { err, chan_id, shutdown_finish }) => {
1176-
#[cfg(any(feature = "_test_utils", test))]
1177-
{
1178-
// In testing, ensure there are no deadlocks where the lock is already held upon
1179-
// entering the macro.
1180-
debug_assert!($self.pending_events.try_lock().is_ok());
1181-
debug_assert!($self.per_peer_state.try_write().is_ok());
1182-
}
1176+
// In testing, ensure there are no deadlocks where the lock is already held upon
1177+
// entering the macro.
1178+
debug_assert_ne!($self.pending_events.held_by_thread(), LockHeldState::HeldByThread);
1179+
debug_assert_ne!($self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread);
11831180

11841181
let mut msg_events = Vec::with_capacity(2);
11851182

@@ -3636,17 +3633,14 @@ where
36363633
/// Fails an HTLC backwards to the sender of it to us.
36373634
/// Note that we do not assume that channels corresponding to failed HTLCs are still available.
36383635
fn fail_htlc_backwards_internal(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCDestination) {
3639-
#[cfg(any(feature = "_test_utils", test))]
3640-
{
3641-
// Ensure that no peer state channel storage lock is not held when calling this
3642-
// function.
3643-
// This ensures that future code doesn't introduce a lock_order requirement for
3644-
// `forward_htlcs` to be locked after the `per_peer_state` peer locks, which calling
3645-
// this function with any `per_peer_state` peer lock aquired would.
3646-
let per_peer_state = self.per_peer_state.read().unwrap();
3647-
for (_, peer) in per_peer_state.iter() {
3648-
debug_assert!(peer.try_lock().is_ok());
3649-
}
3636+
// Ensure that no peer state channel storage lock is not held when calling this
3637+
// function.
3638+
// This ensures that future code doesn't introduce a lock_order requirement for
3639+
// `forward_htlcs` to be locked after the `per_peer_state` peer locks, which calling
3640+
// this function with any `per_peer_state` peer lock aquired would.
3641+
let per_peer_state = self.per_peer_state.read().unwrap();
3642+
for (_, peer) in per_peer_state.iter() {
3643+
debug_assert_ne!(peer.held_by_thread(), LockHeldState::HeldByThread);
36503644
}
36513645

36523646
//TODO: There is a timing attack here where if a node fails an HTLC back to us they can

lightning/src/sync/debug_sync.rs

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,8 @@ use std::sync::Condvar as StdCondvar;
1414

1515
use crate::prelude::HashMap;
1616

17+
use super::{LockTestExt, LockHeldState};
18+
1719
#[cfg(feature = "backtrace")]
1820
use {crate::prelude::hash_map, backtrace::Backtrace, std::sync::Once};
1921

@@ -168,6 +170,18 @@ impl LockMetadata {
168170
fn pre_lock(this: &Arc<LockMetadata>) { Self::_pre_lock(this, false); }
169171
fn pre_read_lock(this: &Arc<LockMetadata>) -> bool { Self::_pre_lock(this, true) }
170172

173+
fn held_by_thread(this: &Arc<LockMetadata>) -> LockHeldState {
174+
let mut res = LockHeldState::NotHeldByThread;
175+
LOCKS_HELD.with(|held| {
176+
for (locked_idx, _locked) in held.borrow().iter() {
177+
if *locked_idx == this.lock_idx {
178+
res = LockHeldState::HeldByThread;
179+
}
180+
}
181+
});
182+
res
183+
}
184+
171185
fn try_locked(this: &Arc<LockMetadata>) {
172186
LOCKS_HELD.with(|held| {
173187
// Since a try-lock will simply fail if the lock is held already, we do not
@@ -248,6 +262,13 @@ impl<T> Mutex<T> {
248262
}
249263
}
250264

265+
impl <T> LockTestExt for Mutex<T> {
266+
#[inline]
267+
fn held_by_thread(&self) -> LockHeldState {
268+
LockMetadata::held_by_thread(&self.deps)
269+
}
270+
}
271+
251272
pub struct RwLock<T: Sized> {
252273
inner: StdRwLock<T>,
253274
deps: Arc<LockMetadata>,
@@ -332,4 +353,11 @@ impl<T> RwLock<T> {
332353
}
333354
}
334355

356+
impl <T> LockTestExt for RwLock<T> {
357+
#[inline]
358+
fn held_by_thread(&self) -> LockHeldState {
359+
LockMetadata::held_by_thread(&self.deps)
360+
}
361+
}
362+
335363
pub type FairRwLock<T> = RwLock<T>;

lightning/src/sync/fairrwlock.rs

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
use std::sync::{LockResult, RwLock, RwLockReadGuard, RwLockWriteGuard, TryLockResult};
22
use std::sync::atomic::{AtomicUsize, Ordering};
3+
use super::{LockHeldState, LockTestExt};
34

45
/// Rust libstd's RwLock does not provide any fairness guarantees (and, in fact, when used on
56
/// Linux with pthreads under the hood, readers trivially and completely starve writers).
@@ -48,3 +49,11 @@ impl<T> FairRwLock<T> {
4849
self.lock.try_write()
4950
}
5051
}
52+
53+
impl<T> LockTestExt for FairRwLock<T> {
54+
#[inline]
55+
fn held_by_thread(&self) -> LockHeldState {
56+
// fairrwlock is only built in non-test modes, so we should never support tests.
57+
LockHeldState::Unsupported
58+
}
59+
}

lightning/src/sync/mod.rs

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,15 @@
1+
#[allow(dead_code)] // Depending on the compilation flags some variants are never used
2+
#[derive(Debug, PartialEq, Eq)]
3+
pub(crate) enum LockHeldState {
4+
HeldByThread,
5+
NotHeldByThread,
6+
Unsupported,
7+
}
8+
9+
pub(crate) trait LockTestExt {
10+
fn held_by_thread(&self) -> LockHeldState;
11+
}
12+
113
#[cfg(all(feature = "std", not(feature = "_bench_unstable"), test))]
214
mod debug_sync;
315
#[cfg(all(feature = "std", not(feature = "_bench_unstable"), test))]
@@ -6,11 +18,27 @@ pub use debug_sync::*;
618
// Note that to make debug_sync's regex work this must not contain `debug_string` in the module name
719
mod test_lockorder_checks;
820

21+
#[cfg(all(any(feature = "_bench_unstable", not(test)), feature = "std"))]
22+
23+
924
#[cfg(all(feature = "std", any(feature = "_bench_unstable", not(test))))]
1025
pub(crate) mod fairrwlock;
1126
#[cfg(all(feature = "std", any(feature = "_bench_unstable", not(test))))]
1227
pub use {std::sync::{Arc, Mutex, Condvar, MutexGuard, RwLock, RwLockReadGuard, RwLockWriteGuard}, fairrwlock::FairRwLock};
1328

29+
#[cfg(all(feature = "std", any(feature = "_bench_unstable", not(test))))]
30+
mod ext_impl {
31+
use super::*;
32+
impl<T> LockTestExt for Mutex<T> {
33+
#[inline]
34+
fn held_by_thread(&self) -> LockHeldState { LockHeldState::Unsupported }
35+
}
36+
impl<T> LockTestExt for RwLock<T> {
37+
#[inline]
38+
fn held_by_thread(&self) -> LockHeldState { LockHeldState::Unsupported }
39+
}
40+
}
41+
1442
#[cfg(not(feature = "std"))]
1543
mod nostd_sync;
1644
#[cfg(not(feature = "std"))]

lightning/src/sync/nostd_sync.rs

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ pub use ::alloc::sync::Arc;
22
use core::ops::{Deref, DerefMut};
33
use core::time::Duration;
44
use core::cell::{RefCell, Ref, RefMut};
5+
use super::{LockTestExt, LockHeldState};
56

67
pub type LockResult<Guard> = Result<Guard, ()>;
78

@@ -61,6 +62,14 @@ impl<T> Mutex<T> {
6162
}
6263
}
6364

65+
impl<T> LockTestExt for Mutex<T> {
66+
#[inline]
67+
fn held_by_thread(&self) -> LockHeldState {
68+
if self.lock().is_err() { return LockHeldState::HeldByThread; }
69+
else { return LockHeldState::NotHeldByThread; }
70+
}
71+
}
72+
6473
pub struct RwLock<T: ?Sized> {
6574
inner: RefCell<T>
6675
}
@@ -116,4 +125,12 @@ impl<T> RwLock<T> {
116125
}
117126
}
118127

128+
impl<T> LockTestExt for RwLock<T> {
129+
#[inline]
130+
fn held_by_thread(&self) -> LockHeldState {
131+
if self.write().is_err() { return LockHeldState::HeldByThread; }
132+
else { return LockHeldState::NotHeldByThread; }
133+
}
134+
}
135+
119136
pub type FairRwLock<T> = RwLock<T>;

0 commit comments

Comments
 (0)