]>
Commit | Line | Data |
---|---|---|
532ac7d7 XL |
1 | use crate::arch::wasm32; |
2 | use crate::cell::UnsafeCell; | |
3 | use crate::mem; | |
4 | use crate::sync::atomic::{AtomicUsize, AtomicU32, Ordering::SeqCst}; | |
5 | use crate::sys::thread; | |
0bf4aa26 XL |
6 | |
7 | pub struct Mutex { | |
8 | locked: AtomicUsize, | |
9 | } | |
10 | ||
11 | // Mutexes have a pretty simple implementation where they contain an `i32` | |
12 | // internally that is 0 when unlocked and 1 when the mutex is locked. | |
13 | // Acquisition has a fast path where it attempts to cmpxchg the 0 to a 1, and | |
14 | // if it fails it then waits for a notification. Releasing a lock is then done | |
15 | // by swapping in 0 and then notifying any waiters, if present. | |
16 | ||
17 | impl Mutex { | |
18 | pub const fn new() -> Mutex { | |
19 | Mutex { locked: AtomicUsize::new(0) } | |
20 | } | |
21 | ||
22 | #[inline] | |
23 | pub unsafe fn init(&mut self) { | |
24 | // nothing to do | |
25 | } | |
26 | ||
27 | pub unsafe fn lock(&self) { | |
28 | while !self.try_lock() { | |
0731742a | 29 | let val = wasm32::i32_atomic_wait( |
0bf4aa26 XL |
30 | self.ptr(), |
31 | 1, // we expect our mutex is locked | |
32 | -1, // wait infinitely | |
33 | ); | |
34 | // we should have either woke up (0) or got a not-equal due to a | |
35 | // race (1). We should never time out (2) | |
36 | debug_assert!(val == 0 || val == 1); | |
37 | } | |
38 | } | |
39 | ||
40 | pub unsafe fn unlock(&self) { | |
41 | let prev = self.locked.swap(0, SeqCst); | |
42 | debug_assert_eq!(prev, 1); | |
0731742a | 43 | wasm32::atomic_notify(self.ptr(), 1); // wake up one waiter, if any |
0bf4aa26 XL |
44 | } |
45 | ||
46 | #[inline] | |
47 | pub unsafe fn try_lock(&self) -> bool { | |
48 | self.locked.compare_exchange(0, 1, SeqCst, SeqCst).is_ok() | |
49 | } | |
50 | ||
51 | #[inline] | |
52 | pub unsafe fn destroy(&self) { | |
53 | // nothing to do | |
54 | } | |
55 | ||
56 | #[inline] | |
57 | fn ptr(&self) -> *mut i32 { | |
58 | assert_eq!(mem::size_of::<usize>(), mem::size_of::<i32>()); | |
60c5eb7d | 59 | self.locked.as_mut_ptr() as *mut i32 |
0bf4aa26 XL |
60 | } |
61 | } | |
62 | ||
63 | pub struct ReentrantMutex { | |
64 | owner: AtomicU32, | |
65 | recursions: UnsafeCell<u32>, | |
66 | } | |
67 | ||
68 | unsafe impl Send for ReentrantMutex {} | |
69 | unsafe impl Sync for ReentrantMutex {} | |
70 | ||
71 | // Reentrant mutexes are similarly implemented to mutexs above except that | |
72 | // instead of "1" meaning unlocked we use the id of a thread to represent | |
73 | // whether it has locked a mutex. That way we have an atomic counter which | |
74 | // always holds the id of the thread that currently holds the lock (or 0 if the | |
75 | // lock is unlocked). | |
76 | // | |
77 | // Once a thread acquires a lock recursively, which it detects by looking at | |
78 | // the value that's already there, it will update a local `recursions` counter | |
79 | // in a nonatomic fashion (as we hold the lock). The lock is then fully | |
80 | // released when this recursion counter reaches 0. | |
81 | ||
82 | impl ReentrantMutex { | |
83 | pub unsafe fn uninitialized() -> ReentrantMutex { | |
84 | ReentrantMutex { | |
85 | owner: AtomicU32::new(0), | |
86 | recursions: UnsafeCell::new(0), | |
87 | } | |
88 | } | |
89 | ||
90 | pub unsafe fn init(&mut self) { | |
91 | // nothing to do... | |
92 | } | |
93 | ||
94 | pub unsafe fn lock(&self) { | |
95 | let me = thread::my_id(); | |
96 | while let Err(owner) = self._try_lock(me) { | |
0731742a | 97 | let val = wasm32::i32_atomic_wait(self.ptr(), owner as i32, -1); |
0bf4aa26 XL |
98 | debug_assert!(val == 0 || val == 1); |
99 | } | |
100 | } | |
101 | ||
102 | #[inline] | |
103 | pub unsafe fn try_lock(&self) -> bool { | |
104 | self._try_lock(thread::my_id()).is_ok() | |
105 | } | |
106 | ||
107 | #[inline] | |
108 | unsafe fn _try_lock(&self, id: u32) -> Result<(), u32> { | |
109 | let id = id.checked_add(1).unwrap(); // make sure `id` isn't 0 | |
110 | match self.owner.compare_exchange(0, id, SeqCst, SeqCst) { | |
111 | // we transitioned from unlocked to locked | |
112 | Ok(_) => { | |
113 | debug_assert_eq!(*self.recursions.get(), 0); | |
114 | Ok(()) | |
115 | } | |
116 | ||
117 | // we currently own this lock, so let's update our count and return | |
118 | // true. | |
119 | Err(n) if n == id => { | |
120 | *self.recursions.get() += 1; | |
121 | Ok(()) | |
122 | } | |
123 | ||
124 | // Someone else owns the lock, let our caller take care of it | |
125 | Err(other) => Err(other), | |
126 | } | |
127 | } | |
128 | ||
129 | pub unsafe fn unlock(&self) { | |
130 | // If we didn't ever recursively lock the lock then we fully unlock the | |
131 | // mutex and wake up a waiter, if any. Otherwise we decrement our | |
132 | // recursive counter and let some one else take care of the zero. | |
133 | match *self.recursions.get() { | |
134 | 0 => { | |
135 | self.owner.swap(0, SeqCst); | |
0731742a | 136 | wasm32::atomic_notify(self.ptr() as *mut i32, 1); // wake up one waiter, if any |
0bf4aa26 XL |
137 | } |
138 | ref mut n => *n -= 1, | |
139 | } | |
140 | } | |
141 | ||
142 | pub unsafe fn destroy(&self) { | |
143 | // nothing to do... | |
144 | } | |
145 | ||
146 | #[inline] | |
147 | fn ptr(&self) -> *mut i32 { | |
60c5eb7d | 148 | self.owner.as_mut_ptr() as *mut i32 |
0bf4aa26 XL |
149 | } |
150 | } |