]> git.proxmox.com Git - rustc.git/blame - src/libstd/sys/cloudabi/mutex.rs
New upstream version 1.46.0+dfsg1
[rustc.git] / src / libstd / sys / cloudabi / mutex.rs
CommitLineData
532ac7d7
XL
1use crate::cell::UnsafeCell;
2use crate::mem;
416331ca 3use crate::mem::MaybeUninit;
532ac7d7
XL
4use crate::sync::atomic::{AtomicU32, Ordering};
5use crate::sys::cloudabi::abi;
6use crate::sys::rwlock::{self, RWLock};
2c00a5a8
XL
7
8extern "C" {
9 #[thread_local]
10 static __pthread_thread_id: abi::tid;
11}
12
13// Implement Mutex using an RWLock. This doesn't introduce any
14// performance overhead in this environment, as the operations would be
15// implemented identically.
16pub struct Mutex(RWLock);
17
18pub unsafe fn raw(m: &Mutex) -> *mut AtomicU32 {
19 rwlock::raw(&m.0)
20}
21
22impl Mutex {
23 pub const fn new() -> Mutex {
24 Mutex(RWLock::new())
25 }
26
27 pub unsafe fn init(&mut self) {
28 // This function should normally reinitialize the mutex after
29 // moving it to a different memory address. This implementation
30 // does not require adjustments after moving.
31 }
32
33 pub unsafe fn try_lock(&self) -> bool {
34 self.0.try_write()
35 }
36
37 pub unsafe fn lock(&self) {
38 self.0.write()
39 }
40
41 pub unsafe fn unlock(&self) {
42 self.0.write_unlock()
43 }
44
45 pub unsafe fn destroy(&self) {
46 self.0.destroy()
47 }
48}
49
50pub struct ReentrantMutex {
416331ca
XL
51 lock: UnsafeCell<MaybeUninit<AtomicU32>>,
52 recursion: UnsafeCell<MaybeUninit<u32>>,
2c00a5a8
XL
53}
54
55impl ReentrantMutex {
ba9703b0 56 pub const unsafe fn uninitialized() -> ReentrantMutex {
416331ca
XL
57 ReentrantMutex {
58 lock: UnsafeCell::new(MaybeUninit::uninit()),
dfeec247 59 recursion: UnsafeCell::new(MaybeUninit::uninit()),
416331ca 60 }
2c00a5a8
XL
61 }
62
ba9703b0
XL
63 pub unsafe fn init(&self) {
64 *self.lock.get() = MaybeUninit::new(AtomicU32::new(abi::LOCK_UNLOCKED.0));
65 *self.recursion.get() = MaybeUninit::new(0);
2c00a5a8
XL
66 }
67
68 pub unsafe fn try_lock(&self) -> bool {
69 // Attempt to acquire the lock.
416331ca
XL
70 let lock = (*self.lock.get()).as_mut_ptr();
71 let recursion = (*self.recursion.get()).as_mut_ptr();
2c00a5a8
XL
72 if let Err(old) = (*lock).compare_exchange(
73 abi::LOCK_UNLOCKED.0,
74 __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0,
75 Ordering::Acquire,
76 Ordering::Relaxed,
77 ) {
78 // If we fail to acquire the lock, it may be the case
79 // that we've already acquired it and may need to recurse.
80 if old & !abi::LOCK_KERNEL_MANAGED.0 == __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0 {
81 *recursion += 1;
82 true
83 } else {
84 false
85 }
86 } else {
87 // Success.
88 assert_eq!(*recursion, 0, "Mutex has invalid recursion count");
89 true
90 }
91 }
92
93 pub unsafe fn lock(&self) {
94 if !self.try_lock() {
95 // Call into the kernel to acquire a write lock.
96 let lock = self.lock.get();
97 let subscription = abi::subscription {
98 type_: abi::eventtype::LOCK_WRLOCK,
99 union: abi::subscription_union {
100 lock: abi::subscription_lock {
101 lock: lock as *mut abi::lock,
102 lock_scope: abi::scope::PRIVATE,
103 },
104 },
105 ..mem::zeroed()
106 };
e1599b0c
XL
107 let mut event = MaybeUninit::<abi::event>::uninit();
108 let mut nevents = MaybeUninit::<usize>::uninit();
109 let ret = abi::poll(&subscription, event.as_mut_ptr(), 1, nevents.as_mut_ptr());
2c00a5a8 110 assert_eq!(ret, abi::errno::SUCCESS, "Failed to acquire mutex");
e1599b0c 111 let event = event.assume_init();
2c00a5a8
XL
112 assert_eq!(event.error, abi::errno::SUCCESS, "Failed to acquire mutex");
113 }
114 }
115
116 pub unsafe fn unlock(&self) {
416331ca
XL
117 let lock = (*self.lock.get()).as_mut_ptr();
118 let recursion = (*self.recursion.get()).as_mut_ptr();
2c00a5a8
XL
119 assert_eq!(
120 (*lock).load(Ordering::Relaxed) & !abi::LOCK_KERNEL_MANAGED.0,
121 __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0,
122 "This mutex is locked by a different thread"
123 );
124
125 if *recursion > 0 {
126 *recursion -= 1;
127 } else if !(*lock)
128 .compare_exchange(
129 __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0,
130 abi::LOCK_UNLOCKED.0,
131 Ordering::Release,
132 Ordering::Relaxed,
133 )
134 .is_ok()
135 {
136 // Lock is managed by kernelspace. Call into the kernel
137 // to unblock waiting threads.
138 let ret = abi::lock_unlock(lock as *mut abi::lock, abi::scope::PRIVATE);
139 assert_eq!(ret, abi::errno::SUCCESS, "Failed to unlock a mutex");
140 }
141 }
142
143 pub unsafe fn destroy(&self) {
416331ca
XL
144 let lock = (*self.lock.get()).as_mut_ptr();
145 let recursion = (*self.recursion.get()).as_mut_ptr();
2c00a5a8
XL
146 assert_eq!(
147 (*lock).load(Ordering::Relaxed),
148 abi::LOCK_UNLOCKED.0,
149 "Attempted to destroy locked mutex"
150 );
151 assert_eq!(*recursion, 0, "Recursion counter invalid");
152 }
153}