1 use crate::arch
::wasm32
;
4 use crate::sync
::atomic
::{AtomicUsize, Ordering::SeqCst}
;
5 use crate::sys
::locks
::Mutex
;
6 use crate::time
::Duration
;
12 pub type MovableCondvar
= Condvar
;
14 // Condition variables are implemented with a simple counter internally that is
15 // likely to cause spurious wakeups. Blocking on a condition variable will first
16 // read the value of the internal counter, unlock the given mutex, and then
17 // block if and only if the counter's value is still the same. Notifying a
18 // condition variable will modify the counter (add one for now) and then wake up
19 // a thread waiting on the address of the counter.
21 // A thread waiting on the condition variable will as a result avoid going to
22 // sleep if it's notified after the lock is unlocked but before it fully goes to
23 // sleep. A sleeping thread is guaranteed to be woken up at some point as it can
24 // only be woken up with a call to `wake`.
26 // Note that it's possible for 2 or more threads to be woken up by a call to
27 // `notify_one` with this implementation. That can happen where the modification
28 // of `cnt` causes any threads in the middle of `wait` to avoid going to sleep,
29 // and the subsequent `wake` may wake up a thread that's actually blocking. We
30 // consider this a spurious wakeup, though, which all users of condition
31 // variables must already be prepared to handle. As a result, this source of
32 // spurious wakeups is currently though to be ok, although it may be problematic
33 // later on if it causes too many spurious wakeups.
36 pub const fn new() -> Condvar
{
37 Condvar { cnt: AtomicUsize::new(0) }
41 pub unsafe fn init(&mut self) {
45 pub unsafe fn notify_one(&self) {
46 self.cnt
.fetch_add(1, SeqCst
);
47 // SAFETY: ptr() is always valid
49 wasm32
::memory_atomic_notify(self.ptr(), 1);
54 pub unsafe fn notify_all(&self) {
55 self.cnt
.fetch_add(1, SeqCst
);
56 // SAFETY: ptr() is always valid
58 wasm32
::memory_atomic_notify(self.ptr(), u32::MAX
); // -1 == "wake everyone"
62 pub unsafe fn wait(&self, mutex
: &Mutex
) {
63 // "atomically block and unlock" implemented by loading our current
64 // counter's value, unlocking the mutex, and blocking if the counter
65 // still has the same value.
67 // Notifications happen by incrementing the counter and then waking a
68 // thread. Incrementing the counter after we unlock the mutex will
69 // prevent us from sleeping and otherwise the call to `wake` will
70 // wake us up once we're asleep.
71 let ticket
= self.cnt
.load(SeqCst
) as i32;
73 let val
= wasm32
::memory_atomic_wait32(self.ptr(), ticket
, -1);
74 // 0 == woken, 1 == not equal to `ticket`, 2 == timeout (shouldn't happen)
75 debug_assert
!(val
== 0 || val
== 1);
79 pub unsafe fn wait_timeout(&self, mutex
: &Mutex
, dur
: Duration
) -> bool
{
80 let ticket
= self.cnt
.load(SeqCst
) as i32;
82 let nanos
= dur
.as_nanos();
83 let nanos
= cmp
::min(i64::MAX
as u128
, nanos
);
85 // If the return value is 2 then a timeout happened, so we return
86 // `false` as we weren't actually notified.
87 let ret
= wasm32
::memory_atomic_wait32(self.ptr(), ticket
, nanos
as i64) != 2;
93 pub unsafe fn destroy(&self) {
98 fn ptr(&self) -> *mut i32 {
99 assert_eq
!(mem
::size_of
::<usize>(), mem
::size_of
::<i32>());
100 self.cnt
.as_mut_ptr() as *mut i32