]> git.proxmox.com Git - rustc.git/blame - library/std/src/sys/hermit/mutex.rs
New upstream version 1.51.0+dfsg1
[rustc.git] / library / std / src / sys / hermit / mutex.rs
CommitLineData
29967ef6
XL
1use crate::cell::UnsafeCell;
2use crate::collections::VecDeque;
e74abb32 3use crate::ffi::c_void;
5869c6ff 4use crate::hint;
29967ef6 5use crate::ops::{Deref, DerefMut, Drop};
60c5eb7d 6use crate::ptr;
5869c6ff 7use crate::sync::atomic::{AtomicUsize, Ordering};
e74abb32
XL
8use crate::sys::hermit::abi;
9
29967ef6
XL
10/// This type provides a lock based on busy waiting to realize mutual exclusion
11///
12/// # Description
13///
14/// This structure behaves a lot like a common mutex. There are some differences:
15///
16/// - By using busy waiting, it can be used outside the runtime.
17/// - It is a so called ticket lock and is completly fair.
18#[cfg_attr(target_arch = "x86_64", repr(align(128)))]
19#[cfg_attr(not(target_arch = "x86_64"), repr(align(64)))]
20struct Spinlock<T: ?Sized> {
21 queue: AtomicUsize,
22 dequeue: AtomicUsize,
23 data: UnsafeCell<T>,
24}
25
26unsafe impl<T: ?Sized + Send> Sync for Spinlock<T> {}
27unsafe impl<T: ?Sized + Send> Send for Spinlock<T> {}
28
29/// A guard to which the protected data can be accessed
30///
31/// When the guard falls out of scope it will release the lock.
32struct SpinlockGuard<'a, T: ?Sized + 'a> {
33 dequeue: &'a AtomicUsize,
34 data: &'a mut T,
35}
36
37impl<T> Spinlock<T> {
38 pub const fn new(user_data: T) -> Spinlock<T> {
39 Spinlock {
40 queue: AtomicUsize::new(0),
41 dequeue: AtomicUsize::new(1),
42 data: UnsafeCell::new(user_data),
43 }
44 }
45
46 #[inline]
47 fn obtain_lock(&self) {
48 let ticket = self.queue.fetch_add(1, Ordering::SeqCst) + 1;
49 while self.dequeue.load(Ordering::SeqCst) != ticket {
5869c6ff 50 hint::spin_loop();
29967ef6
XL
51 }
52 }
53
54 #[inline]
55 pub unsafe fn lock(&self) -> SpinlockGuard<'_, T> {
56 self.obtain_lock();
57 SpinlockGuard { dequeue: &self.dequeue, data: &mut *self.data.get() }
58 }
59}
60
61impl<T: ?Sized + Default> Default for Spinlock<T> {
62 fn default() -> Spinlock<T> {
63 Spinlock::new(Default::default())
64 }
65}
66
67impl<'a, T: ?Sized> Deref for SpinlockGuard<'a, T> {
68 type Target = T;
69 fn deref(&self) -> &T {
70 &*self.data
71 }
72}
73
74impl<'a, T: ?Sized> DerefMut for SpinlockGuard<'a, T> {
75 fn deref_mut(&mut self) -> &mut T {
76 &mut *self.data
77 }
78}
79
80impl<'a, T: ?Sized> Drop for SpinlockGuard<'a, T> {
81 /// The dropping of the SpinlockGuard will release the lock it was created from.
82 fn drop(&mut self) {
83 self.dequeue.fetch_add(1, Ordering::SeqCst);
84 }
85}
86
87/// Realize a priority queue for tasks
88struct PriorityQueue {
89 queues: [Option<VecDeque<abi::Tid>>; abi::NO_PRIORITIES],
90 prio_bitmap: u64,
91}
92
93impl PriorityQueue {
94 pub const fn new() -> PriorityQueue {
95 PriorityQueue {
96 queues: [
97 None, None, None, None, None, None, None, None, None, None, None, None, None, None,
98 None, None, None, None, None, None, None, None, None, None, None, None, None, None,
99 None, None, None,
100 ],
101 prio_bitmap: 0,
102 }
103 }
104
105 /// Add a task id by its priority to the queue
106 pub fn push(&mut self, prio: abi::Priority, id: abi::Tid) {
107 let i: usize = prio.into().into();
108 self.prio_bitmap |= (1 << i) as u64;
109 if let Some(queue) = &mut self.queues[i] {
110 queue.push_back(id);
111 } else {
112 let mut queue = VecDeque::new();
113 queue.push_back(id);
114 self.queues[i] = Some(queue);
115 }
116 }
117
118 fn pop_from_queue(&mut self, queue_index: usize) -> Option<abi::Tid> {
119 if let Some(queue) = &mut self.queues[queue_index] {
120 let id = queue.pop_front();
121
122 if queue.is_empty() {
123 self.prio_bitmap &= !(1 << queue_index as u64);
124 }
125
126 id
127 } else {
128 None
129 }
130 }
131
132 /// Pop the task handle with the highest priority from the queue
133 pub fn pop(&mut self) -> Option<abi::Tid> {
134 for i in 0..abi::NO_PRIORITIES {
135 if self.prio_bitmap & (1 << i) != 0 {
136 return self.pop_from_queue(i);
137 }
138 }
139
140 None
141 }
142}
143
144struct MutexInner {
145 locked: bool,
146 blocked_task: PriorityQueue,
147}
148
149impl MutexInner {
150 pub const fn new() -> MutexInner {
151 MutexInner { locked: false, blocked_task: PriorityQueue::new() }
152 }
153}
154
e74abb32 155pub struct Mutex {
29967ef6 156 inner: Spinlock<MutexInner>,
e74abb32
XL
157}
158
29967ef6
XL
159pub type MovableMutex = Box<Mutex>;
160
e74abb32
XL
161unsafe impl Send for Mutex {}
162unsafe impl Sync for Mutex {}
163
164impl Mutex {
165 pub const fn new() -> Mutex {
29967ef6 166 Mutex { inner: Spinlock::new(MutexInner::new()) }
e74abb32
XL
167 }
168
169 #[inline]
170 pub unsafe fn init(&mut self) {
29967ef6 171 self.inner = Spinlock::new(MutexInner::new());
e74abb32
XL
172 }
173
174 #[inline]
175 pub unsafe fn lock(&self) {
29967ef6
XL
176 loop {
177 let mut guard = self.inner.lock();
178 if guard.locked == false {
179 guard.locked = true;
180 return;
181 } else {
182 let prio = abi::get_priority();
183 let id = abi::getpid();
184
185 guard.blocked_task.push(prio, id);
186 abi::block_current_task();
187 drop(guard);
188 abi::yield_now();
189 }
190 }
e74abb32
XL
191 }
192
193 #[inline]
194 pub unsafe fn unlock(&self) {
29967ef6
XL
195 let mut guard = self.inner.lock();
196 guard.locked = false;
197 if let Some(tid) = guard.blocked_task.pop() {
198 abi::wakeup_task(tid);
199 }
e74abb32
XL
200 }
201
202 #[inline]
203 pub unsafe fn try_lock(&self) -> bool {
29967ef6
XL
204 let mut guard = self.inner.lock();
205 if guard.locked == false {
206 guard.locked = true;
207 }
208 guard.locked
e74abb32
XL
209 }
210
211 #[inline]
29967ef6 212 pub unsafe fn destroy(&self) {}
e74abb32
XL
213}
214
215pub struct ReentrantMutex {
60c5eb7d 216 inner: *const c_void,
e74abb32
XL
217}
218
219impl ReentrantMutex {
ba9703b0 220 pub const unsafe fn uninitialized() -> ReentrantMutex {
e74abb32
XL
221 ReentrantMutex { inner: ptr::null() }
222 }
223
224 #[inline]
ba9703b0
XL
225 pub unsafe fn init(&self) {
226 let _ = abi::recmutex_init(&self.inner as *const *const c_void as *mut _);
e74abb32
XL
227 }
228
229 #[inline]
230 pub unsafe fn lock(&self) {
231 let _ = abi::recmutex_lock(self.inner);
232 }
233
234 #[inline]
235 pub unsafe fn try_lock(&self) -> bool {
236 true
237 }
238
239 #[inline]
240 pub unsafe fn unlock(&self) {
241 let _ = abi::recmutex_unlock(self.inner);
242 }
243
244 #[inline]
245 pub unsafe fn destroy(&self) {
246 let _ = abi::recmutex_destroy(self.inner);
247 }
248}