]>
Commit | Line | Data |
---|---|---|
f035d41b XL |
1 | // Copyright 2018 Amanieu d'Antras |
2 | // | |
3 | // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or | |
4 | // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or | |
5 | // http://opensource.org/licenses/MIT>, at your option. This file may not be | |
6 | // copied, modified, or distributed except according to those terms. | |
7 | ||
8 | use crate::{ | |
9 | mutex::{RawMutex, RawMutexFair, RawMutexTimed}, | |
10 | GuardNoSend, | |
11 | }; | |
12 | use core::{ | |
13 | cell::{Cell, UnsafeCell}, | |
14 | fmt, | |
15 | marker::PhantomData, | |
16 | mem, | |
17 | num::NonZeroUsize, | |
18 | ops::Deref, | |
19 | sync::atomic::{AtomicUsize, Ordering}, | |
20 | }; | |
21 | ||
22 | #[cfg(feature = "owning_ref")] | |
23 | use owning_ref::StableAddress; | |
24 | ||
25 | #[cfg(feature = "serde")] | |
26 | use serde::{Deserialize, Deserializer, Serialize, Serializer}; | |
27 | ||
28 | /// Helper trait which returns a non-zero thread ID. | |
29 | /// | |
30 | /// The simplest way to implement this trait is to return the address of a | |
31 | /// thread-local variable. | |
32 | /// | |
33 | /// # Safety | |
34 | /// | |
35 | /// Implementations of this trait must ensure that no two active threads share | |
36 | /// the same thread ID. However the ID of a thread that has exited can be | |
37 | /// re-used since that thread is no longer active. | |
38 | pub unsafe trait GetThreadId { | |
39 | /// Initial value. | |
40 | // A “non-constant” const item is a legacy way to supply an initialized value to downstream | |
41 | // static items. Can hopefully be replaced with `const fn new() -> Self` at some point. | |
42 | #[allow(clippy::declare_interior_mutable_const)] | |
43 | const INIT: Self; | |
44 | ||
45 | /// Returns a non-zero thread ID which identifies the current thread of | |
46 | /// execution. | |
47 | fn nonzero_thread_id(&self) -> NonZeroUsize; | |
48 | } | |
49 | ||
50 | struct RawReentrantMutex<R, G> { | |
51 | owner: AtomicUsize, | |
52 | lock_count: Cell<usize>, | |
53 | mutex: R, | |
54 | get_thread_id: G, | |
55 | } | |
56 | ||
57 | impl<R: RawMutex, G: GetThreadId> RawReentrantMutex<R, G> { | |
58 | #[inline] | |
59 | fn lock_internal<F: FnOnce() -> bool>(&self, try_lock: F) -> bool { | |
60 | let id = self.get_thread_id.nonzero_thread_id().get(); | |
61 | if self.owner.load(Ordering::Relaxed) == id { | |
62 | self.lock_count.set( | |
63 | self.lock_count | |
64 | .get() | |
65 | .checked_add(1) | |
66 | .expect("ReentrantMutex lock count overflow"), | |
67 | ); | |
68 | } else { | |
69 | if !try_lock() { | |
70 | return false; | |
71 | } | |
72 | self.owner.store(id, Ordering::Relaxed); | |
73 | debug_assert_eq!(self.lock_count.get(), 0); | |
74 | self.lock_count.set(1); | |
75 | } | |
76 | true | |
77 | } | |
78 | ||
79 | #[inline] | |
80 | fn lock(&self) { | |
81 | self.lock_internal(|| { | |
82 | self.mutex.lock(); | |
83 | true | |
84 | }); | |
85 | } | |
86 | ||
87 | #[inline] | |
88 | fn try_lock(&self) -> bool { | |
89 | self.lock_internal(|| self.mutex.try_lock()) | |
90 | } | |
91 | ||
92 | #[inline] | |
93 | fn unlock(&self) { | |
94 | let lock_count = self.lock_count.get() - 1; | |
95 | self.lock_count.set(lock_count); | |
96 | if lock_count == 0 { | |
97 | self.owner.store(0, Ordering::Relaxed); | |
98 | self.mutex.unlock(); | |
99 | } | |
100 | } | |
101 | } | |
102 | ||
103 | impl<R: RawMutexFair, G: GetThreadId> RawReentrantMutex<R, G> { | |
104 | #[inline] | |
105 | fn unlock_fair(&self) { | |
106 | let lock_count = self.lock_count.get() - 1; | |
107 | self.lock_count.set(lock_count); | |
108 | if lock_count == 0 { | |
109 | self.owner.store(0, Ordering::Relaxed); | |
110 | self.mutex.unlock_fair(); | |
111 | } | |
112 | } | |
113 | ||
114 | #[inline] | |
115 | fn bump(&self) { | |
116 | if self.lock_count.get() == 1 { | |
117 | let id = self.owner.load(Ordering::Relaxed); | |
118 | self.owner.store(0, Ordering::Relaxed); | |
119 | self.mutex.bump(); | |
120 | self.owner.store(id, Ordering::Relaxed); | |
121 | } | |
122 | } | |
123 | } | |
124 | ||
125 | impl<R: RawMutexTimed, G: GetThreadId> RawReentrantMutex<R, G> { | |
126 | #[inline] | |
127 | fn try_lock_until(&self, timeout: R::Instant) -> bool { | |
128 | self.lock_internal(|| self.mutex.try_lock_until(timeout)) | |
129 | } | |
130 | ||
131 | #[inline] | |
132 | fn try_lock_for(&self, timeout: R::Duration) -> bool { | |
133 | self.lock_internal(|| self.mutex.try_lock_for(timeout)) | |
134 | } | |
135 | } | |
136 | ||
137 | /// A mutex which can be recursively locked by a single thread. | |
138 | /// | |
139 | /// This type is identical to `Mutex` except for the following points: | |
140 | /// | |
141 | /// - Locking multiple times from the same thread will work correctly instead of | |
142 | /// deadlocking. | |
143 | /// - `ReentrantMutexGuard` does not give mutable references to the locked data. | |
144 | /// Use a `RefCell` if you need this. | |
145 | /// | |
146 | /// See [`Mutex`](struct.Mutex.html) for more details about the underlying mutex | |
147 | /// primitive. | |
148 | pub struct ReentrantMutex<R, G, T: ?Sized> { | |
149 | raw: RawReentrantMutex<R, G>, | |
150 | data: UnsafeCell<T>, | |
151 | } | |
152 | ||
153 | unsafe impl<R: RawMutex + Send, G: GetThreadId + Send, T: ?Sized + Send> Send | |
154 | for ReentrantMutex<R, G, T> | |
155 | { | |
156 | } | |
157 | unsafe impl<R: RawMutex + Sync, G: GetThreadId + Sync, T: ?Sized + Send> Sync | |
158 | for ReentrantMutex<R, G, T> | |
159 | { | |
160 | } | |
161 | ||
162 | impl<R: RawMutex, G: GetThreadId, T> ReentrantMutex<R, G, T> { | |
163 | /// Creates a new reentrant mutex in an unlocked state ready for use. | |
164 | #[cfg(feature = "nightly")] | |
165 | #[inline] | |
166 | pub const fn new(val: T) -> ReentrantMutex<R, G, T> { | |
167 | ReentrantMutex { | |
168 | data: UnsafeCell::new(val), | |
169 | raw: RawReentrantMutex { | |
170 | owner: AtomicUsize::new(0), | |
171 | lock_count: Cell::new(0), | |
172 | mutex: R::INIT, | |
173 | get_thread_id: G::INIT, | |
174 | }, | |
175 | } | |
176 | } | |
177 | ||
178 | /// Creates a new reentrant mutex in an unlocked state ready for use. | |
179 | #[cfg(not(feature = "nightly"))] | |
180 | #[inline] | |
181 | pub fn new(val: T) -> ReentrantMutex<R, G, T> { | |
182 | ReentrantMutex { | |
183 | data: UnsafeCell::new(val), | |
184 | raw: RawReentrantMutex { | |
185 | owner: AtomicUsize::new(0), | |
186 | lock_count: Cell::new(0), | |
187 | mutex: R::INIT, | |
188 | get_thread_id: G::INIT, | |
189 | }, | |
190 | } | |
191 | } | |
192 | ||
193 | /// Consumes this mutex, returning the underlying data. | |
194 | #[inline] | |
195 | pub fn into_inner(self) -> T { | |
196 | self.data.into_inner() | |
197 | } | |
198 | } | |
199 | ||
200 | impl<R, G, T> ReentrantMutex<R, G, T> { | |
201 | /// Creates a new reentrant mutex based on a pre-existing raw mutex and a | |
202 | /// helper to get the thread ID. | |
203 | /// | |
204 | /// This allows creating a reentrant mutex in a constant context on stable | |
205 | /// Rust. | |
206 | #[inline] | |
207 | pub const fn const_new(raw_mutex: R, get_thread_id: G, val: T) -> ReentrantMutex<R, G, T> { | |
208 | ReentrantMutex { | |
209 | data: UnsafeCell::new(val), | |
210 | raw: RawReentrantMutex { | |
211 | owner: AtomicUsize::new(0), | |
212 | lock_count: Cell::new(0), | |
213 | mutex: raw_mutex, | |
214 | get_thread_id, | |
215 | }, | |
216 | } | |
217 | } | |
218 | } | |
219 | ||
220 | impl<R: RawMutex, G: GetThreadId, T: ?Sized> ReentrantMutex<R, G, T> { | |
221 | /// # Safety | |
222 | /// | |
223 | /// The lock must be held when calling this method. | |
224 | #[inline] | |
225 | unsafe fn guard(&self) -> ReentrantMutexGuard<'_, R, G, T> { | |
226 | ReentrantMutexGuard { | |
227 | remutex: &self, | |
228 | marker: PhantomData, | |
229 | } | |
230 | } | |
231 | ||
232 | /// Acquires a reentrant mutex, blocking the current thread until it is able | |
233 | /// to do so. | |
234 | /// | |
235 | /// If the mutex is held by another thread then this function will block the | |
236 | /// local thread until it is available to acquire the mutex. If the mutex is | |
237 | /// already held by the current thread then this function will increment the | |
238 | /// lock reference count and return immediately. Upon returning, | |
239 | /// the thread is the only thread with the mutex held. An RAII guard is | |
240 | /// returned to allow scoped unlock of the lock. When the guard goes out of | |
241 | /// scope, the mutex will be unlocked. | |
242 | #[inline] | |
243 | pub fn lock(&self) -> ReentrantMutexGuard<'_, R, G, T> { | |
244 | self.raw.lock(); | |
245 | // SAFETY: The lock is held, as required. | |
246 | unsafe { self.guard() } | |
247 | } | |
248 | ||
249 | /// Attempts to acquire this lock. | |
250 | /// | |
251 | /// If the lock could not be acquired at this time, then `None` is returned. | |
252 | /// Otherwise, an RAII guard is returned. The lock will be unlocked when the | |
253 | /// guard is dropped. | |
254 | /// | |
255 | /// This function does not block. | |
256 | #[inline] | |
257 | pub fn try_lock(&self) -> Option<ReentrantMutexGuard<'_, R, G, T>> { | |
258 | if self.raw.try_lock() { | |
259 | // SAFETY: The lock is held, as required. | |
260 | Some(unsafe { self.guard() }) | |
261 | } else { | |
262 | None | |
263 | } | |
264 | } | |
265 | ||
266 | /// Returns a mutable reference to the underlying data. | |
267 | /// | |
268 | /// Since this call borrows the `ReentrantMutex` mutably, no actual locking needs to | |
269 | /// take place---the mutable borrow statically guarantees no locks exist. | |
270 | #[inline] | |
271 | pub fn get_mut(&mut self) -> &mut T { | |
272 | unsafe { &mut *self.data.get() } | |
273 | } | |
274 | ||
275 | /// Forcibly unlocks the mutex. | |
276 | /// | |
277 | /// This is useful when combined with `mem::forget` to hold a lock without | |
278 | /// the need to maintain a `ReentrantMutexGuard` object alive, for example when | |
279 | /// dealing with FFI. | |
280 | /// | |
281 | /// # Safety | |
282 | /// | |
283 | /// This method must only be called if the current thread logically owns a | |
284 | /// `ReentrantMutexGuard` but that guard has be discarded using `mem::forget`. | |
285 | /// Behavior is undefined if a mutex is unlocked when not locked. | |
286 | #[inline] | |
287 | pub unsafe fn force_unlock(&self) { | |
288 | self.raw.unlock(); | |
289 | } | |
290 | ||
291 | /// Returns the underlying raw mutex object. | |
292 | /// | |
293 | /// Note that you will most likely need to import the `RawMutex` trait from | |
294 | /// `lock_api` to be able to call functions on the raw mutex. | |
295 | /// | |
296 | /// # Safety | |
297 | /// | |
298 | /// This method is unsafe because it allows unlocking a mutex while | |
299 | /// still holding a reference to a `ReentrantMutexGuard`. | |
300 | #[inline] | |
301 | pub unsafe fn raw(&self) -> &R { | |
302 | &self.raw.mutex | |
303 | } | |
304 | } | |
305 | ||
306 | impl<R: RawMutexFair, G: GetThreadId, T: ?Sized> ReentrantMutex<R, G, T> { | |
307 | /// Forcibly unlocks the mutex using a fair unlock protocol. | |
308 | /// | |
309 | /// This is useful when combined with `mem::forget` to hold a lock without | |
310 | /// the need to maintain a `ReentrantMutexGuard` object alive, for example when | |
311 | /// dealing with FFI. | |
312 | /// | |
313 | /// # Safety | |
314 | /// | |
315 | /// This method must only be called if the current thread logically owns a | |
316 | /// `ReentrantMutexGuard` but that guard has be discarded using `mem::forget`. | |
317 | /// Behavior is undefined if a mutex is unlocked when not locked. | |
318 | #[inline] | |
319 | pub unsafe fn force_unlock_fair(&self) { | |
320 | self.raw.unlock_fair(); | |
321 | } | |
322 | } | |
323 | ||
324 | impl<R: RawMutexTimed, G: GetThreadId, T: ?Sized> ReentrantMutex<R, G, T> { | |
325 | /// Attempts to acquire this lock until a timeout is reached. | |
326 | /// | |
327 | /// If the lock could not be acquired before the timeout expired, then | |
328 | /// `None` is returned. Otherwise, an RAII guard is returned. The lock will | |
329 | /// be unlocked when the guard is dropped. | |
330 | #[inline] | |
331 | pub fn try_lock_for(&self, timeout: R::Duration) -> Option<ReentrantMutexGuard<'_, R, G, T>> { | |
332 | if self.raw.try_lock_for(timeout) { | |
333 | // SAFETY: The lock is held, as required. | |
334 | Some(unsafe { self.guard() }) | |
335 | } else { | |
336 | None | |
337 | } | |
338 | } | |
339 | ||
340 | /// Attempts to acquire this lock until a timeout is reached. | |
341 | /// | |
342 | /// If the lock could not be acquired before the timeout expired, then | |
343 | /// `None` is returned. Otherwise, an RAII guard is returned. The lock will | |
344 | /// be unlocked when the guard is dropped. | |
345 | #[inline] | |
346 | pub fn try_lock_until(&self, timeout: R::Instant) -> Option<ReentrantMutexGuard<'_, R, G, T>> { | |
347 | if self.raw.try_lock_until(timeout) { | |
348 | // SAFETY: The lock is held, as required. | |
349 | Some(unsafe { self.guard() }) | |
350 | } else { | |
351 | None | |
352 | } | |
353 | } | |
354 | } | |
355 | ||
356 | impl<R: RawMutex, G: GetThreadId, T: ?Sized + Default> Default for ReentrantMutex<R, G, T> { | |
357 | #[inline] | |
358 | fn default() -> ReentrantMutex<R, G, T> { | |
359 | ReentrantMutex::new(Default::default()) | |
360 | } | |
361 | } | |
362 | ||
363 | impl<R: RawMutex, G: GetThreadId, T> From<T> for ReentrantMutex<R, G, T> { | |
364 | #[inline] | |
365 | fn from(t: T) -> ReentrantMutex<R, G, T> { | |
366 | ReentrantMutex::new(t) | |
367 | } | |
368 | } | |
369 | ||
370 | impl<R: RawMutex, G: GetThreadId, T: ?Sized + fmt::Debug> fmt::Debug for ReentrantMutex<R, G, T> { | |
371 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { | |
372 | match self.try_lock() { | |
373 | Some(guard) => f | |
374 | .debug_struct("ReentrantMutex") | |
375 | .field("data", &&*guard) | |
376 | .finish(), | |
377 | None => { | |
378 | struct LockedPlaceholder; | |
379 | impl fmt::Debug for LockedPlaceholder { | |
380 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { | |
381 | f.write_str("<locked>") | |
382 | } | |
383 | } | |
384 | ||
385 | f.debug_struct("ReentrantMutex") | |
386 | .field("data", &LockedPlaceholder) | |
387 | .finish() | |
388 | } | |
389 | } | |
390 | } | |
391 | } | |
392 | ||
393 | // Copied and modified from serde | |
394 | #[cfg(feature = "serde")] | |
395 | impl<R, G, T> Serialize for ReentrantMutex<R, G, T> | |
396 | where | |
397 | R: RawMutex, | |
398 | G: GetThreadId, | |
399 | T: Serialize + ?Sized, | |
400 | { | |
401 | fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> | |
402 | where | |
403 | S: Serializer, | |
404 | { | |
405 | self.lock().serialize(serializer) | |
406 | } | |
407 | } | |
408 | ||
409 | #[cfg(feature = "serde")] | |
410 | impl<'de, R, G, T> Deserialize<'de> for ReentrantMutex<R, G, T> | |
411 | where | |
412 | R: RawMutex, | |
413 | G: GetThreadId, | |
414 | T: Deserialize<'de> + ?Sized, | |
415 | { | |
416 | fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> | |
417 | where | |
418 | D: Deserializer<'de>, | |
419 | { | |
420 | Deserialize::deserialize(deserializer).map(ReentrantMutex::new) | |
421 | } | |
422 | } | |
423 | ||
424 | /// An RAII implementation of a "scoped lock" of a reentrant mutex. When this structure | |
425 | /// is dropped (falls out of scope), the lock will be unlocked. | |
426 | /// | |
427 | /// The data protected by the mutex can be accessed through this guard via its | |
428 | /// `Deref` implementation. | |
429 | #[must_use = "if unused the ReentrantMutex will immediately unlock"] | |
430 | pub struct ReentrantMutexGuard<'a, R: RawMutex, G: GetThreadId, T: ?Sized> { | |
431 | remutex: &'a ReentrantMutex<R, G, T>, | |
432 | marker: PhantomData<(&'a T, GuardNoSend)>, | |
433 | } | |
434 | ||
435 | unsafe impl<'a, R: RawMutex + Sync + 'a, G: GetThreadId + Sync + 'a, T: ?Sized + Sync + 'a> Sync | |
436 | for ReentrantMutexGuard<'a, R, G, T> | |
437 | { | |
438 | } | |
439 | ||
440 | impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> ReentrantMutexGuard<'a, R, G, T> { | |
441 | /// Returns a reference to the original `ReentrantMutex` object. | |
442 | pub fn remutex(s: &Self) -> &'a ReentrantMutex<R, G, T> { | |
443 | s.remutex | |
444 | } | |
445 | ||
446 | /// Makes a new `MappedReentrantMutexGuard` for a component of the locked data. | |
447 | /// | |
448 | /// This operation cannot fail as the `ReentrantMutexGuard` passed | |
449 | /// in already locked the mutex. | |
450 | /// | |
451 | /// This is an associated function that needs to be | |
452 | /// used as `ReentrantMutexGuard::map(...)`. A method would interfere with methods of | |
453 | /// the same name on the contents of the locked data. | |
454 | #[inline] | |
455 | pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedReentrantMutexGuard<'a, R, G, U> | |
456 | where | |
457 | F: FnOnce(&T) -> &U, | |
458 | { | |
459 | let raw = &s.remutex.raw; | |
460 | let data = f(unsafe { &*s.remutex.data.get() }); | |
461 | mem::forget(s); | |
462 | MappedReentrantMutexGuard { | |
463 | raw, | |
464 | data, | |
465 | marker: PhantomData, | |
466 | } | |
467 | } | |
468 | ||
469 | /// Attempts to make a new `MappedReentrantMutexGuard` for a component of the | |
470 | /// locked data. The original guard is return if the closure returns `None`. | |
471 | /// | |
472 | /// This operation cannot fail as the `ReentrantMutexGuard` passed | |
473 | /// in already locked the mutex. | |
474 | /// | |
475 | /// This is an associated function that needs to be | |
476 | /// used as `ReentrantMutexGuard::map(...)`. A method would interfere with methods of | |
477 | /// the same name on the contents of the locked data. | |
478 | #[inline] | |
479 | pub fn try_map<U: ?Sized, F>( | |
480 | s: Self, | |
481 | f: F, | |
482 | ) -> Result<MappedReentrantMutexGuard<'a, R, G, U>, Self> | |
483 | where | |
484 | F: FnOnce(&mut T) -> Option<&mut U>, | |
485 | { | |
486 | let raw = &s.remutex.raw; | |
487 | let data = match f(unsafe { &mut *s.remutex.data.get() }) { | |
488 | Some(data) => data, | |
489 | None => return Err(s), | |
490 | }; | |
491 | mem::forget(s); | |
492 | Ok(MappedReentrantMutexGuard { | |
493 | raw, | |
494 | data, | |
495 | marker: PhantomData, | |
496 | }) | |
497 | } | |
498 | ||
499 | /// Temporarily unlocks the mutex to execute the given function. | |
500 | /// | |
501 | /// This is safe because `&mut` guarantees that there exist no other | |
502 | /// references to the data protected by the mutex. | |
503 | #[inline] | |
504 | pub fn unlocked<F, U>(s: &mut Self, f: F) -> U | |
505 | where | |
506 | F: FnOnce() -> U, | |
507 | { | |
508 | s.remutex.raw.unlock(); | |
509 | defer!(s.remutex.raw.lock()); | |
510 | f() | |
511 | } | |
512 | } | |
513 | ||
514 | impl<'a, R: RawMutexFair + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> | |
515 | ReentrantMutexGuard<'a, R, G, T> | |
516 | { | |
517 | /// Unlocks the mutex using a fair unlock protocol. | |
518 | /// | |
519 | /// By default, mutexes are unfair and allow the current thread to re-lock | |
520 | /// the mutex before another has the chance to acquire the lock, even if | |
521 | /// that thread has been blocked on the mutex for a long time. This is the | |
522 | /// default because it allows much higher throughput as it avoids forcing a | |
523 | /// context switch on every mutex unlock. This can result in one thread | |
524 | /// acquiring a mutex many more times than other threads. | |
525 | /// | |
526 | /// However in some cases it can be beneficial to ensure fairness by forcing | |
527 | /// the lock to pass on to a waiting thread if there is one. This is done by | |
528 | /// using this method instead of dropping the `ReentrantMutexGuard` normally. | |
529 | #[inline] | |
530 | pub fn unlock_fair(s: Self) { | |
531 | s.remutex.raw.unlock_fair(); | |
532 | mem::forget(s); | |
533 | } | |
534 | ||
535 | /// Temporarily unlocks the mutex to execute the given function. | |
536 | /// | |
537 | /// The mutex is unlocked a fair unlock protocol. | |
538 | /// | |
539 | /// This is safe because `&mut` guarantees that there exist no other | |
540 | /// references to the data protected by the mutex. | |
541 | #[inline] | |
542 | pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U | |
543 | where | |
544 | F: FnOnce() -> U, | |
545 | { | |
546 | s.remutex.raw.unlock_fair(); | |
547 | defer!(s.remutex.raw.lock()); | |
548 | f() | |
549 | } | |
550 | ||
551 | /// Temporarily yields the mutex to a waiting thread if there is one. | |
552 | /// | |
553 | /// This method is functionally equivalent to calling `unlock_fair` followed | |
554 | /// by `lock`, however it can be much more efficient in the case where there | |
555 | /// are no waiting threads. | |
556 | #[inline] | |
557 | pub fn bump(s: &mut Self) { | |
558 | s.remutex.raw.bump(); | |
559 | } | |
560 | } | |
561 | ||
562 | impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Deref | |
563 | for ReentrantMutexGuard<'a, R, G, T> | |
564 | { | |
565 | type Target = T; | |
566 | #[inline] | |
567 | fn deref(&self) -> &T { | |
568 | unsafe { &*self.remutex.data.get() } | |
569 | } | |
570 | } | |
571 | ||
572 | impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Drop | |
573 | for ReentrantMutexGuard<'a, R, G, T> | |
574 | { | |
575 | #[inline] | |
576 | fn drop(&mut self) { | |
577 | self.remutex.raw.unlock(); | |
578 | } | |
579 | } | |
580 | ||
581 | impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug | |
582 | for ReentrantMutexGuard<'a, R, G, T> | |
583 | { | |
584 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { | |
585 | fmt::Debug::fmt(&**self, f) | |
586 | } | |
587 | } | |
588 | ||
589 | impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display | |
590 | for ReentrantMutexGuard<'a, R, G, T> | |
591 | { | |
592 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { | |
593 | (**self).fmt(f) | |
594 | } | |
595 | } | |
596 | ||
597 | #[cfg(feature = "owning_ref")] | |
598 | unsafe impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> StableAddress | |
599 | for ReentrantMutexGuard<'a, R, G, T> | |
600 | { | |
601 | } | |
602 | ||
603 | /// An RAII mutex guard returned by `ReentrantMutexGuard::map`, which can point to a | |
604 | /// subfield of the protected data. | |
605 | /// | |
606 | /// The main difference between `MappedReentrantMutexGuard` and `ReentrantMutexGuard` is that the | |
607 | /// former doesn't support temporarily unlocking and re-locking, since that | |
608 | /// could introduce soundness issues if the locked object is modified by another | |
609 | /// thread. | |
610 | #[must_use = "if unused the ReentrantMutex will immediately unlock"] | |
611 | pub struct MappedReentrantMutexGuard<'a, R: RawMutex, G: GetThreadId, T: ?Sized> { | |
612 | raw: &'a RawReentrantMutex<R, G>, | |
613 | data: *const T, | |
614 | marker: PhantomData<&'a T>, | |
615 | } | |
616 | ||
617 | unsafe impl<'a, R: RawMutex + Sync + 'a, G: GetThreadId + Sync + 'a, T: ?Sized + Sync + 'a> Sync | |
618 | for MappedReentrantMutexGuard<'a, R, G, T> | |
619 | { | |
620 | } | |
621 | ||
622 | impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> | |
623 | MappedReentrantMutexGuard<'a, R, G, T> | |
624 | { | |
625 | /// Makes a new `MappedReentrantMutexGuard` for a component of the locked data. | |
626 | /// | |
627 | /// This operation cannot fail as the `MappedReentrantMutexGuard` passed | |
628 | /// in already locked the mutex. | |
629 | /// | |
630 | /// This is an associated function that needs to be | |
631 | /// used as `MappedReentrantMutexGuard::map(...)`. A method would interfere with methods of | |
632 | /// the same name on the contents of the locked data. | |
633 | #[inline] | |
634 | pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedReentrantMutexGuard<'a, R, G, U> | |
635 | where | |
636 | F: FnOnce(&T) -> &U, | |
637 | { | |
638 | let raw = s.raw; | |
639 | let data = f(unsafe { &*s.data }); | |
640 | mem::forget(s); | |
641 | MappedReentrantMutexGuard { | |
642 | raw, | |
643 | data, | |
644 | marker: PhantomData, | |
645 | } | |
646 | } | |
647 | ||
648 | /// Attempts to make a new `MappedReentrantMutexGuard` for a component of the | |
649 | /// locked data. The original guard is return if the closure returns `None`. | |
650 | /// | |
651 | /// This operation cannot fail as the `MappedReentrantMutexGuard` passed | |
652 | /// in already locked the mutex. | |
653 | /// | |
654 | /// This is an associated function that needs to be | |
655 | /// used as `MappedReentrantMutexGuard::map(...)`. A method would interfere with methods of | |
656 | /// the same name on the contents of the locked data. | |
657 | #[inline] | |
658 | pub fn try_map<U: ?Sized, F>( | |
659 | s: Self, | |
660 | f: F, | |
661 | ) -> Result<MappedReentrantMutexGuard<'a, R, G, U>, Self> | |
662 | where | |
663 | F: FnOnce(&T) -> Option<&U>, | |
664 | { | |
665 | let raw = s.raw; | |
666 | let data = match f(unsafe { &*s.data }) { | |
667 | Some(data) => data, | |
668 | None => return Err(s), | |
669 | }; | |
670 | mem::forget(s); | |
671 | Ok(MappedReentrantMutexGuard { | |
672 | raw, | |
673 | data, | |
674 | marker: PhantomData, | |
675 | }) | |
676 | } | |
677 | } | |
678 | ||
679 | impl<'a, R: RawMutexFair + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> | |
680 | MappedReentrantMutexGuard<'a, R, G, T> | |
681 | { | |
682 | /// Unlocks the mutex using a fair unlock protocol. | |
683 | /// | |
684 | /// By default, mutexes are unfair and allow the current thread to re-lock | |
685 | /// the mutex before another has the chance to acquire the lock, even if | |
686 | /// that thread has been blocked on the mutex for a long time. This is the | |
687 | /// default because it allows much higher throughput as it avoids forcing a | |
688 | /// context switch on every mutex unlock. This can result in one thread | |
689 | /// acquiring a mutex many more times than other threads. | |
690 | /// | |
691 | /// However in some cases it can be beneficial to ensure fairness by forcing | |
692 | /// the lock to pass on to a waiting thread if there is one. This is done by | |
693 | /// using this method instead of dropping the `ReentrantMutexGuard` normally. | |
694 | #[inline] | |
695 | pub fn unlock_fair(s: Self) { | |
696 | s.raw.unlock_fair(); | |
697 | mem::forget(s); | |
698 | } | |
699 | } | |
700 | ||
701 | impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Deref | |
702 | for MappedReentrantMutexGuard<'a, R, G, T> | |
703 | { | |
704 | type Target = T; | |
705 | #[inline] | |
706 | fn deref(&self) -> &T { | |
707 | unsafe { &*self.data } | |
708 | } | |
709 | } | |
710 | ||
711 | impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Drop | |
712 | for MappedReentrantMutexGuard<'a, R, G, T> | |
713 | { | |
714 | #[inline] | |
715 | fn drop(&mut self) { | |
716 | self.raw.unlock(); | |
717 | } | |
718 | } | |
719 | ||
720 | impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug | |
721 | for MappedReentrantMutexGuard<'a, R, G, T> | |
722 | { | |
723 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { | |
724 | fmt::Debug::fmt(&**self, f) | |
725 | } | |
726 | } | |
727 | ||
728 | impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display | |
729 | for MappedReentrantMutexGuard<'a, R, G, T> | |
730 | { | |
731 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { | |
732 | (**self).fmt(f) | |
733 | } | |
734 | } | |
735 | ||
736 | #[cfg(feature = "owning_ref")] | |
737 | unsafe impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> StableAddress | |
738 | for MappedReentrantMutexGuard<'a, R, G, T> | |
739 | { | |
740 | } |