]> git.proxmox.com Git - rustc.git/blob - library/std/src/sys/sgx/rwlock.rs
New upstream version 1.62.1+dfsg1
[rustc.git] / library / std / src / sys / sgx / rwlock.rs
1 #[cfg(test)]
2 mod tests;
3
4 use crate::num::NonZeroUsize;
5
6 use super::waitqueue::{
7 try_lock_or_false, NotifiedTcs, SpinMutex, SpinMutexGuard, WaitQueue, WaitVariable,
8 };
9 use crate::mem;
10
11 pub struct RwLock {
12 readers: SpinMutex<WaitVariable<Option<NonZeroUsize>>>,
13 writer: SpinMutex<WaitVariable<bool>>,
14 }
15
16 pub type MovableRwLock = Box<RwLock>;
17
18 // Check at compile time that RwLock size matches C definition (see test_c_rwlock_initializer below)
19 //
20 // # Safety
21 // Never called, as it is a compile time check.
22 #[allow(dead_code)]
23 unsafe fn rw_lock_size_assert(r: RwLock) {
24 unsafe { mem::transmute::<RwLock, [u8; 144]>(r) };
25 }
26
27 impl RwLock {
28 pub const fn new() -> RwLock {
29 RwLock {
30 readers: SpinMutex::new(WaitVariable::new(None)),
31 writer: SpinMutex::new(WaitVariable::new(false)),
32 }
33 }
34
35 #[inline]
36 pub unsafe fn read(&self) {
37 let mut rguard = self.readers.lock();
38 let wguard = self.writer.lock();
39 if *wguard.lock_var() || !wguard.queue_empty() {
40 // Another thread has or is waiting for the write lock, wait
41 drop(wguard);
42 WaitQueue::wait(rguard, || {});
43 // Another thread has passed the lock to us
44 } else {
45 // No waiting writers, acquire the read lock
46 *rguard.lock_var_mut() =
47 NonZeroUsize::new(rguard.lock_var().map_or(0, |n| n.get()) + 1);
48 }
49 }
50
51 #[inline]
52 pub unsafe fn try_read(&self) -> bool {
53 let mut rguard = try_lock_or_false!(self.readers);
54 let wguard = try_lock_or_false!(self.writer);
55 if *wguard.lock_var() || !wguard.queue_empty() {
56 // Another thread has or is waiting for the write lock
57 false
58 } else {
59 // No waiting writers, acquire the read lock
60 *rguard.lock_var_mut() =
61 NonZeroUsize::new(rguard.lock_var().map_or(0, |n| n.get()) + 1);
62 true
63 }
64 }
65
66 #[inline]
67 pub unsafe fn write(&self) {
68 let rguard = self.readers.lock();
69 let mut wguard = self.writer.lock();
70 if *wguard.lock_var() || rguard.lock_var().is_some() {
71 // Another thread has the lock, wait
72 drop(rguard);
73 WaitQueue::wait(wguard, || {});
74 // Another thread has passed the lock to us
75 } else {
76 // We are just now obtaining the lock
77 *wguard.lock_var_mut() = true;
78 }
79 }
80
81 #[inline]
82 pub unsafe fn try_write(&self) -> bool {
83 let rguard = try_lock_or_false!(self.readers);
84 let mut wguard = try_lock_or_false!(self.writer);
85 if *wguard.lock_var() || rguard.lock_var().is_some() {
86 // Another thread has the lock
87 false
88 } else {
89 // We are just now obtaining the lock
90 *wguard.lock_var_mut() = true;
91 true
92 }
93 }
94
95 #[inline]
96 unsafe fn __read_unlock(
97 &self,
98 mut rguard: SpinMutexGuard<'_, WaitVariable<Option<NonZeroUsize>>>,
99 wguard: SpinMutexGuard<'_, WaitVariable<bool>>,
100 ) {
101 *rguard.lock_var_mut() = NonZeroUsize::new(rguard.lock_var().unwrap().get() - 1);
102 if rguard.lock_var().is_some() {
103 // There are other active readers
104 } else {
105 if let Ok(mut wguard) = WaitQueue::notify_one(wguard) {
106 // A writer was waiting, pass the lock
107 *wguard.lock_var_mut() = true;
108 wguard.drop_after(rguard);
109 } else {
110 // No writers were waiting, the lock is released
111 rtassert!(rguard.queue_empty());
112 }
113 }
114 }
115
116 #[inline]
117 pub unsafe fn read_unlock(&self) {
118 let rguard = self.readers.lock();
119 let wguard = self.writer.lock();
120 unsafe { self.__read_unlock(rguard, wguard) };
121 }
122
123 #[inline]
124 unsafe fn __write_unlock(
125 &self,
126 rguard: SpinMutexGuard<'_, WaitVariable<Option<NonZeroUsize>>>,
127 wguard: SpinMutexGuard<'_, WaitVariable<bool>>,
128 ) {
129 match WaitQueue::notify_one(wguard) {
130 Err(mut wguard) => {
131 // No writers waiting, release the write lock
132 *wguard.lock_var_mut() = false;
133 if let Ok(mut rguard) = WaitQueue::notify_all(rguard) {
134 // One or more readers were waiting, pass the lock to them
135 if let NotifiedTcs::All { count } = rguard.notified_tcs() {
136 *rguard.lock_var_mut() = Some(count)
137 } else {
138 unreachable!() // called notify_all
139 }
140 rguard.drop_after(wguard);
141 } else {
142 // No readers waiting, the lock is released
143 }
144 }
145 Ok(wguard) => {
146 // There was a thread waiting for write, just pass the lock
147 wguard.drop_after(rguard);
148 }
149 }
150 }
151
152 #[inline]
153 pub unsafe fn write_unlock(&self) {
154 let rguard = self.readers.lock();
155 let wguard = self.writer.lock();
156 unsafe { self.__write_unlock(rguard, wguard) };
157 }
158
159 // only used by __rust_rwlock_unlock below
160 #[inline]
161 #[cfg_attr(test, allow(dead_code))]
162 unsafe fn unlock(&self) {
163 let rguard = self.readers.lock();
164 let wguard = self.writer.lock();
165 if *wguard.lock_var() == true {
166 unsafe { self.__write_unlock(rguard, wguard) };
167 } else {
168 unsafe { self.__read_unlock(rguard, wguard) };
169 }
170 }
171
172 #[inline]
173 pub unsafe fn destroy(&self) {}
174 }
175
176 // The following functions are needed by libunwind. These symbols are named
177 // in pre-link args for the target specification, so keep that in sync.
178 #[cfg(not(test))]
179 const EINVAL: i32 = 22;
180
181 #[cfg(not(test))]
182 #[no_mangle]
183 pub unsafe extern "C" fn __rust_rwlock_rdlock(p: *mut RwLock) -> i32 {
184 if p.is_null() {
185 return EINVAL;
186 }
187 unsafe { (*p).read() };
188 return 0;
189 }
190
191 #[cfg(not(test))]
192 #[no_mangle]
193 pub unsafe extern "C" fn __rust_rwlock_wrlock(p: *mut RwLock) -> i32 {
194 if p.is_null() {
195 return EINVAL;
196 }
197 unsafe { (*p).write() };
198 return 0;
199 }
200 #[cfg(not(test))]
201 #[no_mangle]
202 pub unsafe extern "C" fn __rust_rwlock_unlock(p: *mut RwLock) -> i32 {
203 if p.is_null() {
204 return EINVAL;
205 }
206 unsafe { (*p).unlock() };
207 return 0;
208 }