]> git.proxmox.com Git - rustc.git/blob - vendor/parking_lot_core/src/thread_parker/cloudabi.rs
New upstream version 1.44.1+dfsg1
[rustc.git] / vendor / parking_lot_core / src / thread_parker / cloudabi.rs
1 // Copyright 2016 Amanieu d'Antras
2 //
3 // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4 // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5 // http://opensource.org/licenses/MIT>, at your option. This file may not be
6 // copied, modified, or distributed except according to those terms.
7
8 use cloudabi as abi;
9 use core::{
10 cell::Cell,
11 mem::{self, MaybeUninit},
12 sync::atomic::{AtomicU32, Ordering},
13 };
14 use std::{convert::TryFrom, thread, time::Instant};
15
16 extern "C" {
17 #[thread_local]
18 static __pthread_thread_id: abi::tid;
19 }
20
21 struct Lock {
22 lock: AtomicU32,
23 }
24
25 impl Lock {
26 pub fn new() -> Self {
27 Lock {
28 lock: AtomicU32::new(abi::LOCK_UNLOCKED.0),
29 }
30 }
31
32 /// # Safety
33 ///
34 /// See `Lock::lock`.
35 unsafe fn try_lock(&self) -> Option<LockGuard> {
36 // Attempt to acquire the lock.
37 if let Err(old) = self.lock.compare_exchange(
38 abi::LOCK_UNLOCKED.0,
39 __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0,
40 Ordering::Acquire,
41 Ordering::Relaxed,
42 ) {
43 // Failure. Crash upon recursive acquisition.
44 debug_assert_ne!(
45 old & !abi::LOCK_KERNEL_MANAGED.0,
46 __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0,
47 "Attempted to recursive write-lock a lock",
48 );
49 None
50 } else {
51 Some(LockGuard { lock: &self.lock })
52 }
53 }
54
55 /// # Safety
56 ///
57 /// This method is unsafe because the `LockGuard` has a raw pointer into this `Lock`
58 /// that it will access on drop to unlock the lock. So make sure the `LockGuard` goes
59 /// out of scope before the `Lock` it came from moves or goes out of scope.
60 pub unsafe fn lock(&self) -> LockGuard {
61 self.try_lock().unwrap_or_else(|| {
62 // Call into the kernel to acquire a write lock.
63 let subscription = abi::subscription {
64 type_: abi::eventtype::LOCK_WRLOCK,
65 union: abi::subscription_union {
66 lock: abi::subscription_lock {
67 lock: self.ptr(),
68 lock_scope: abi::scope::PRIVATE,
69 },
70 },
71 ..mem::zeroed()
72 };
73 let mut event = MaybeUninit::<abi::event>::uninit();
74 let mut nevents: usize = 0;
75 let ret = abi::poll(&subscription, event.as_mut_ptr(), 1, &mut nevents);
76 debug_assert_eq!(ret, abi::errno::SUCCESS);
77 debug_assert_eq!(event.assume_init().error, abi::errno::SUCCESS);
78
79 LockGuard { lock: &self.lock }
80 })
81 }
82
83 fn ptr(&self) -> *mut abi::lock {
84 &self.lock as *const AtomicU32 as *mut abi::lock
85 }
86 }
87
88 struct LockGuard {
89 lock: *const AtomicU32,
90 }
91
92 impl LockGuard {
93 fn ptr(&self) -> *mut abi::lock {
94 self.lock as *mut abi::lock
95 }
96 }
97
98 impl Drop for LockGuard {
99 fn drop(&mut self) {
100 let lock = unsafe { &*self.lock };
101 debug_assert_eq!(
102 lock.load(Ordering::Relaxed) & !abi::LOCK_KERNEL_MANAGED.0,
103 unsafe { __pthread_thread_id.0 } | abi::LOCK_WRLOCKED.0,
104 "This lock is not write-locked by this thread"
105 );
106
107 if !lock
108 .compare_exchange(
109 unsafe { __pthread_thread_id.0 } | abi::LOCK_WRLOCKED.0,
110 abi::LOCK_UNLOCKED.0,
111 Ordering::Release,
112 Ordering::Relaxed,
113 )
114 .is_ok()
115 {
116 // Lock is managed by kernelspace. Call into the kernel
117 // to unblock waiting threads.
118 let ret = unsafe { abi::lock_unlock(self.lock as *mut abi::lock, abi::scope::PRIVATE) };
119 debug_assert_eq!(ret, abi::errno::SUCCESS);
120 }
121 }
122 }
123
124 struct Condvar {
125 condvar: AtomicU32,
126 }
127
128 impl Condvar {
129 pub fn new() -> Self {
130 Condvar {
131 condvar: AtomicU32::new(abi::CONDVAR_HAS_NO_WAITERS.0),
132 }
133 }
134
135 pub fn wait(&self, lock: &LockGuard) {
136 unsafe {
137 let subscription = abi::subscription {
138 type_: abi::eventtype::CONDVAR,
139 union: abi::subscription_union {
140 condvar: abi::subscription_condvar {
141 condvar: self.ptr(),
142 condvar_scope: abi::scope::PRIVATE,
143 lock: lock.ptr(),
144 lock_scope: abi::scope::PRIVATE,
145 },
146 },
147 ..mem::zeroed()
148 };
149 let mut event = MaybeUninit::<abi::event>::uninit();
150 let mut nevents: usize = 0;
151
152 let ret = abi::poll(&subscription, event.as_mut_ptr(), 1, &mut nevents);
153 debug_assert_eq!(ret, abi::errno::SUCCESS);
154 debug_assert_eq!(event.assume_init().error, abi::errno::SUCCESS);
155 }
156 }
157
158 /// Waits for a signal on the condvar.
159 /// Returns false if it times out before anyone notified us.
160 pub fn wait_timeout(&self, lock: &LockGuard, timeout: abi::timestamp) -> bool {
161 unsafe {
162 let subscriptions = [
163 abi::subscription {
164 type_: abi::eventtype::CONDVAR,
165 union: abi::subscription_union {
166 condvar: abi::subscription_condvar {
167 condvar: self.ptr(),
168 condvar_scope: abi::scope::PRIVATE,
169 lock: lock.ptr(),
170 lock_scope: abi::scope::PRIVATE,
171 },
172 },
173 ..mem::zeroed()
174 },
175 abi::subscription {
176 type_: abi::eventtype::CLOCK,
177 union: abi::subscription_union {
178 clock: abi::subscription_clock {
179 clock_id: abi::clockid::MONOTONIC,
180 timeout,
181 ..mem::zeroed()
182 },
183 },
184 ..mem::zeroed()
185 },
186 ];
187 let mut events = MaybeUninit::<[abi::event; 2]>::uninit();
188 let mut nevents: usize = 0;
189
190 let ret = abi::poll(
191 subscriptions.as_ptr(),
192 events.as_mut_ptr() as *mut _,
193 2,
194 &mut nevents,
195 );
196 debug_assert_eq!(ret, abi::errno::SUCCESS);
197 let events = events.assume_init();
198 for i in 0..nevents {
199 debug_assert_eq!(events[i].error, abi::errno::SUCCESS);
200 if events[i].type_ == abi::eventtype::CONDVAR {
201 return true;
202 }
203 }
204 }
205 false
206 }
207
208 pub fn notify(&self) {
209 let ret = unsafe { abi::condvar_signal(self.ptr(), abi::scope::PRIVATE, 1) };
210 debug_assert_eq!(ret, abi::errno::SUCCESS);
211 }
212
213 fn ptr(&self) -> *mut abi::condvar {
214 &self.condvar as *const AtomicU32 as *mut abi::condvar
215 }
216 }
217
218 // Helper type for putting a thread to sleep until some other thread wakes it up
219 pub struct ThreadParker {
220 should_park: Cell<bool>,
221 lock: Lock,
222 condvar: Condvar,
223 }
224
225 impl super::ThreadParkerT for ThreadParker {
226 type UnparkHandle = UnparkHandle;
227
228 const IS_CHEAP_TO_CONSTRUCT: bool = true;
229
230 fn new() -> ThreadParker {
231 ThreadParker {
232 should_park: Cell::new(false),
233 lock: Lock::new(),
234 condvar: Condvar::new(),
235 }
236 }
237
238 unsafe fn prepare_park(&self) {
239 self.should_park.set(true);
240 }
241
242 unsafe fn timed_out(&self) -> bool {
243 // We need to grab the lock here because another thread may be
244 // concurrently executing UnparkHandle::unpark, which is done without
245 // holding the queue lock.
246 let _guard = self.lock.lock();
247 self.should_park.get()
248 }
249
250 unsafe fn park(&self) {
251 let guard = self.lock.lock();
252 while self.should_park.get() {
253 self.condvar.wait(&guard);
254 }
255 }
256
257 unsafe fn park_until(&self, timeout: Instant) -> bool {
258 let guard = self.lock.lock();
259 while self.should_park.get() {
260 if let Some(duration_left) = timeout.checked_duration_since(Instant::now()) {
261 if let Ok(nanos_left) = abi::timestamp::try_from(duration_left.as_nanos()) {
262 self.condvar.wait_timeout(&guard, nanos_left);
263 } else {
264 // remaining timeout overflows an abi::timestamp. Sleep indefinitely
265 self.condvar.wait(&guard);
266 }
267 } else {
268 // We timed out
269 return false;
270 }
271 }
272 true
273 }
274
275 unsafe fn unpark_lock(&self) -> UnparkHandle {
276 let _lock_guard = self.lock.lock();
277
278 UnparkHandle {
279 thread_parker: self,
280 _lock_guard,
281 }
282 }
283 }
284
285 pub struct UnparkHandle {
286 thread_parker: *const ThreadParker,
287 _lock_guard: LockGuard,
288 }
289
290 impl super::UnparkHandleT for UnparkHandle {
291 unsafe fn unpark(self) {
292 (*self.thread_parker).should_park.set(false);
293
294 // We notify while holding the lock here to avoid races with the target
295 // thread. In particular, the thread could exit after we unlock the
296 // mutex, which would make the condvar access invalid memory.
297 (*self.thread_parker).condvar.notify();
298 }
299 }
300
301 #[inline]
302 pub fn thread_yield() {
303 thread::yield_now();
304 }