]>
Commit | Line | Data |
---|---|---|
532ac7d7 XL |
1 | use crate::arch::wasm32; |
2 | use crate::cell::UnsafeCell; | |
3 | use crate::mem; | |
dfeec247 | 4 | use crate::sync::atomic::{AtomicU32, AtomicUsize, Ordering::SeqCst}; |
532ac7d7 | 5 | use crate::sys::thread; |
0bf4aa26 XL |
6 | |
7 | pub struct Mutex { | |
8 | locked: AtomicUsize, | |
9 | } | |
10 | ||
29967ef6 XL |
11 | pub type MovableMutex = Mutex; |
12 | ||
0bf4aa26 XL |
13 | // Mutexes have a pretty simple implementation where they contain an `i32` |
14 | // internally that is 0 when unlocked and 1 when the mutex is locked. | |
15 | // Acquisition has a fast path where it attempts to cmpxchg the 0 to a 1, and | |
16 | // if it fails it then waits for a notification. Releasing a lock is then done | |
17 | // by swapping in 0 and then notifying any waiters, if present. | |
18 | ||
19 | impl Mutex { | |
20 | pub const fn new() -> Mutex { | |
21 | Mutex { locked: AtomicUsize::new(0) } | |
22 | } | |
23 | ||
24 | #[inline] | |
25 | pub unsafe fn init(&mut self) { | |
26 | // nothing to do | |
27 | } | |
28 | ||
29 | pub unsafe fn lock(&self) { | |
30 | while !self.try_lock() { | |
29967ef6 XL |
31 | // SAFETY: the caller must uphold the safety contract for `memory_atomic_wait32`. |
32 | let val = unsafe { | |
33 | wasm32::memory_atomic_wait32( | |
34 | self.ptr(), | |
35 | 1, // we expect our mutex is locked | |
36 | -1, // wait infinitely | |
37 | ) | |
38 | }; | |
0bf4aa26 XL |
39 | // we should have either woke up (0) or got a not-equal due to a |
40 | // race (1). We should never time out (2) | |
41 | debug_assert!(val == 0 || val == 1); | |
42 | } | |
43 | } | |
44 | ||
45 | pub unsafe fn unlock(&self) { | |
46 | let prev = self.locked.swap(0, SeqCst); | |
47 | debug_assert_eq!(prev, 1); | |
3dfed10e | 48 | wasm32::memory_atomic_notify(self.ptr(), 1); // wake up one waiter, if any |
0bf4aa26 XL |
49 | } |
50 | ||
51 | #[inline] | |
52 | pub unsafe fn try_lock(&self) -> bool { | |
53 | self.locked.compare_exchange(0, 1, SeqCst, SeqCst).is_ok() | |
54 | } | |
55 | ||
56 | #[inline] | |
57 | pub unsafe fn destroy(&self) { | |
58 | // nothing to do | |
59 | } | |
60 | ||
61 | #[inline] | |
62 | fn ptr(&self) -> *mut i32 { | |
63 | assert_eq!(mem::size_of::<usize>(), mem::size_of::<i32>()); | |
60c5eb7d | 64 | self.locked.as_mut_ptr() as *mut i32 |
0bf4aa26 XL |
65 | } |
66 | } | |
67 | ||
68 | pub struct ReentrantMutex { | |
69 | owner: AtomicU32, | |
70 | recursions: UnsafeCell<u32>, | |
71 | } | |
72 | ||
73 | unsafe impl Send for ReentrantMutex {} | |
74 | unsafe impl Sync for ReentrantMutex {} | |
75 | ||
76 | // Reentrant mutexes are similarly implemented to mutexs above except that | |
77 | // instead of "1" meaning unlocked we use the id of a thread to represent | |
78 | // whether it has locked a mutex. That way we have an atomic counter which | |
79 | // always holds the id of the thread that currently holds the lock (or 0 if the | |
80 | // lock is unlocked). | |
81 | // | |
82 | // Once a thread acquires a lock recursively, which it detects by looking at | |
83 | // the value that's already there, it will update a local `recursions` counter | |
84 | // in a nonatomic fashion (as we hold the lock). The lock is then fully | |
85 | // released when this recursion counter reaches 0. | |
86 | ||
87 | impl ReentrantMutex { | |
ba9703b0 | 88 | pub const unsafe fn uninitialized() -> ReentrantMutex { |
dfeec247 | 89 | ReentrantMutex { owner: AtomicU32::new(0), recursions: UnsafeCell::new(0) } |
0bf4aa26 XL |
90 | } |
91 | ||
ba9703b0 | 92 | pub unsafe fn init(&self) { |
0bf4aa26 XL |
93 | // nothing to do... |
94 | } | |
95 | ||
96 | pub unsafe fn lock(&self) { | |
97 | let me = thread::my_id(); | |
98 | while let Err(owner) = self._try_lock(me) { | |
29967ef6 XL |
99 | // SAFETY: the caller must gurantee that `self.ptr()` and `owner` are valid i32. |
100 | let val = unsafe { wasm32::memory_atomic_wait32(self.ptr(), owner as i32, -1) }; | |
0bf4aa26 XL |
101 | debug_assert!(val == 0 || val == 1); |
102 | } | |
103 | } | |
104 | ||
105 | #[inline] | |
106 | pub unsafe fn try_lock(&self) -> bool { | |
29967ef6 | 107 | unsafe { self._try_lock(thread::my_id()).is_ok() } |
0bf4aa26 XL |
108 | } |
109 | ||
110 | #[inline] | |
111 | unsafe fn _try_lock(&self, id: u32) -> Result<(), u32> { | |
29967ef6 | 112 | let id = id.checked_add(1).unwrap(); |
0bf4aa26 XL |
113 | match self.owner.compare_exchange(0, id, SeqCst, SeqCst) { |
114 | // we transitioned from unlocked to locked | |
115 | Ok(_) => { | |
116 | debug_assert_eq!(*self.recursions.get(), 0); | |
117 | Ok(()) | |
118 | } | |
119 | ||
120 | // we currently own this lock, so let's update our count and return | |
121 | // true. | |
122 | Err(n) if n == id => { | |
123 | *self.recursions.get() += 1; | |
124 | Ok(()) | |
125 | } | |
126 | ||
127 | // Someone else owns the lock, let our caller take care of it | |
128 | Err(other) => Err(other), | |
129 | } | |
130 | } | |
131 | ||
132 | pub unsafe fn unlock(&self) { | |
133 | // If we didn't ever recursively lock the lock then we fully unlock the | |
134 | // mutex and wake up a waiter, if any. Otherwise we decrement our | |
135 | // recursive counter and let some one else take care of the zero. | |
136 | match *self.recursions.get() { | |
137 | 0 => { | |
138 | self.owner.swap(0, SeqCst); | |
29967ef6 XL |
139 | // SAFETY: the caller must gurantee that `self.ptr()` is valid i32. |
140 | unsafe { | |
fc512014 | 141 | wasm32::memory_atomic_notify(self.ptr() as *mut i32, 1); |
29967ef6 | 142 | } // wake up one waiter, if any |
0bf4aa26 XL |
143 | } |
144 | ref mut n => *n -= 1, | |
145 | } | |
146 | } | |
147 | ||
148 | pub unsafe fn destroy(&self) { | |
149 | // nothing to do... | |
150 | } | |
151 | ||
152 | #[inline] | |
153 | fn ptr(&self) -> *mut i32 { | |
60c5eb7d | 154 | self.owner.as_mut_ptr() as *mut i32 |
0bf4aa26 XL |
155 | } |
156 | } |