]>
Commit | Line | Data |
---|---|---|
9fa01778 | 1 | //! This module defines types which are thread safe if cfg!(parallel_compiler) is true. |
ff7c6d11 XL |
2 | //! |
3 | //! `Lrc` is an alias of either Rc or Arc. | |
4 | //! | |
5 | //! `Lock` is a mutex. | |
9fa01778 | 6 | //! It internally uses `parking_lot::Mutex` if cfg!(parallel_compiler) is true, |
ff7c6d11 XL |
7 | //! `RefCell` otherwise. |
8 | //! | |
9 | //! `RwLock` is a read-write lock. | |
9fa01778 | 10 | //! It internally uses `parking_lot::RwLock` if cfg!(parallel_compiler) is true, |
ff7c6d11 XL |
11 | //! `RefCell` otherwise. |
12 | //! | |
9fa01778 | 13 | //! `MTLock` is a mutex which disappears if cfg!(parallel_compiler) is false. |
ff7c6d11 | 14 | //! |
9fa01778 | 15 | //! `MTRef` is a immutable reference if cfg!(parallel_compiler), and an mutable reference otherwise. |
94b46f34 | 16 | //! |
ff7c6d11 | 17 | //! `rustc_erase_owner!` erases a OwningRef owner into Erased or Erased + Send + Sync |
9fa01778 | 18 | //! depending on the value of cfg!(parallel_compiler). |
ff7c6d11 | 19 | |
83c7162d XL |
20 | use std::collections::HashMap; |
21 | use std::hash::{Hash, BuildHasher}; | |
83c7162d | 22 | use std::marker::PhantomData; |
83c7162d | 23 | use std::ops::{Deref, DerefMut}; |
9fa01778 | 24 | use crate::owning_ref::{Erased, OwningRef}; |
ff7c6d11 | 25 | |
94b46f34 XL |
26 | pub fn serial_join<A, B, RA, RB>(oper_a: A, oper_b: B) -> (RA, RB) |
27 | where A: FnOnce() -> RA, | |
28 | B: FnOnce() -> RB | |
29 | { | |
30 | (oper_a(), oper_b()) | |
31 | } | |
32 | ||
33 | pub struct SerialScope; | |
34 | ||
35 | impl SerialScope { | |
36 | pub fn spawn<F>(&self, f: F) | |
37 | where F: FnOnce(&SerialScope) | |
38 | { | |
39 | f(self) | |
40 | } | |
41 | } | |
42 | ||
43 | pub fn serial_scope<F, R>(f: F) -> R | |
44 | where F: FnOnce(&SerialScope) -> R | |
45 | { | |
46 | f(&SerialScope) | |
47 | } | |
48 | ||
0731742a XL |
49 | pub use std::sync::atomic::Ordering::SeqCst; |
50 | pub use std::sync::atomic::Ordering; | |
51 | ||
ff7c6d11 | 52 | cfg_if! { |
9fa01778 | 53 | if #[cfg(not(parallel_compiler))] { |
ff7c6d11 XL |
54 | pub auto trait Send {} |
55 | pub auto trait Sync {} | |
56 | ||
57 | impl<T: ?Sized> Send for T {} | |
58 | impl<T: ?Sized> Sync for T {} | |
59 | ||
60 | #[macro_export] | |
61 | macro_rules! rustc_erase_owner { | |
62 | ($v:expr) => { | |
63 | $v.erase_owner() | |
64 | } | |
65 | } | |
66 | ||
0731742a | 67 | use std::ops::Add; |
532ac7d7 | 68 | use std::panic::{resume_unwind, catch_unwind, AssertUnwindSafe}; |
0731742a | 69 | |
416331ca XL |
70 | /// This is a single threaded variant of AtomicCell provided by crossbeam. |
71 | /// Unlike `Atomic` this is intended for all `Copy` types, | |
72 | /// but it lacks the explicit ordering arguments. | |
73 | #[derive(Debug)] | |
74 | pub struct AtomicCell<T: Copy>(Cell<T>); | |
75 | ||
76 | impl<T: Copy> AtomicCell<T> { | |
77 | #[inline] | |
78 | pub fn new(v: T) -> Self { | |
79 | AtomicCell(Cell::new(v)) | |
80 | } | |
81 | ||
82 | #[inline] | |
83 | pub fn get_mut(&mut self) -> &mut T { | |
84 | self.0.get_mut() | |
85 | } | |
86 | } | |
87 | ||
88 | impl<T: Copy> AtomicCell<T> { | |
89 | #[inline] | |
90 | pub fn into_inner(self) -> T { | |
91 | self.0.into_inner() | |
92 | } | |
93 | ||
94 | #[inline] | |
95 | pub fn load(&self) -> T { | |
96 | self.0.get() | |
97 | } | |
98 | ||
99 | #[inline] | |
100 | pub fn store(&self, val: T) { | |
101 | self.0.set(val) | |
102 | } | |
103 | ||
104 | #[inline] | |
105 | pub fn swap(&self, val: T) -> T { | |
106 | self.0.replace(val) | |
107 | } | |
108 | } | |
109 | ||
110 | /// This is a single threaded variant of `AtomicU64`, `AtomicUsize`, etc. | |
111 | /// It differs from `AtomicCell` in that it has explicit ordering arguments | |
112 | /// and is only intended for use with the native atomic types. | |
113 | /// You should use this type through the `AtomicU64`, `AtomicUsize`, etc, type aliases | |
114 | /// as it's not intended to be used separately. | |
0731742a XL |
115 | #[derive(Debug)] |
116 | pub struct Atomic<T: Copy>(Cell<T>); | |
117 | ||
118 | impl<T: Copy> Atomic<T> { | |
9fa01778 | 119 | #[inline] |
0731742a XL |
120 | pub fn new(v: T) -> Self { |
121 | Atomic(Cell::new(v)) | |
122 | } | |
123 | } | |
124 | ||
416331ca XL |
125 | impl<T: Copy> Atomic<T> { |
126 | #[inline] | |
0731742a XL |
127 | pub fn into_inner(self) -> T { |
128 | self.0.into_inner() | |
129 | } | |
130 | ||
9fa01778 | 131 | #[inline] |
0731742a XL |
132 | pub fn load(&self, _: Ordering) -> T { |
133 | self.0.get() | |
134 | } | |
135 | ||
9fa01778 | 136 | #[inline] |
0731742a XL |
137 | pub fn store(&self, val: T, _: Ordering) { |
138 | self.0.set(val) | |
139 | } | |
140 | ||
416331ca | 141 | #[inline] |
0731742a XL |
142 | pub fn swap(&self, val: T, _: Ordering) -> T { |
143 | self.0.replace(val) | |
144 | } | |
416331ca | 145 | } |
0731742a | 146 | |
416331ca XL |
147 | impl<T: Copy + PartialEq> Atomic<T> { |
148 | #[inline] | |
0731742a XL |
149 | pub fn compare_exchange(&self, |
150 | current: T, | |
151 | new: T, | |
152 | _: Ordering, | |
153 | _: Ordering) | |
154 | -> Result<T, T> { | |
155 | let read = self.0.get(); | |
156 | if read == current { | |
157 | self.0.set(new); | |
158 | Ok(read) | |
159 | } else { | |
160 | Err(read) | |
161 | } | |
162 | } | |
163 | } | |
164 | ||
165 | impl<T: Add<Output=T> + Copy> Atomic<T> { | |
416331ca | 166 | #[inline] |
0731742a XL |
167 | pub fn fetch_add(&self, val: T, _: Ordering) -> T { |
168 | let old = self.0.get(); | |
169 | self.0.set(old + val); | |
170 | old | |
171 | } | |
172 | } | |
173 | ||
174 | pub type AtomicUsize = Atomic<usize>; | |
175 | pub type AtomicBool = Atomic<bool>; | |
9fa01778 | 176 | pub type AtomicU32 = Atomic<u32>; |
0731742a XL |
177 | pub type AtomicU64 = Atomic<u64>; |
178 | ||
94b46f34 XL |
179 | pub use self::serial_join as join; |
180 | pub use self::serial_scope as scope; | |
181 | ||
9fa01778 XL |
182 | #[macro_export] |
183 | macro_rules! parallel { | |
184 | ($($blocks:tt),*) => { | |
532ac7d7 XL |
185 | // We catch panics here ensuring that all the blocks execute. |
186 | // This makes behavior consistent with the parallel compiler. | |
187 | let mut panic = None; | |
188 | $( | |
189 | if let Err(p) = ::std::panic::catch_unwind( | |
190 | ::std::panic::AssertUnwindSafe(|| $blocks) | |
191 | ) { | |
192 | if panic.is_none() { | |
193 | panic = Some(p); | |
194 | } | |
195 | } | |
196 | )* | |
197 | if let Some(panic) = panic { | |
198 | ::std::panic::resume_unwind(panic); | |
199 | } | |
9fa01778 XL |
200 | } |
201 | } | |
202 | ||
94b46f34 XL |
203 | pub use std::iter::Iterator as ParallelIterator; |
204 | ||
205 | pub fn par_iter<T: IntoIterator>(t: T) -> T::IntoIter { | |
206 | t.into_iter() | |
207 | } | |
208 | ||
532ac7d7 XL |
209 | pub fn par_for_each_in<T: IntoIterator>( |
210 | t: T, | |
211 | for_each: | |
212 | impl Fn(<<T as IntoIterator>::IntoIter as Iterator>::Item) + Sync + Send | |
213 | ) { | |
214 | // We catch panics here ensuring that all the loop iterations execute. | |
215 | // This makes behavior consistent with the parallel compiler. | |
216 | let mut panic = None; | |
217 | t.into_iter().for_each(|i| { | |
218 | if let Err(p) = catch_unwind(AssertUnwindSafe(|| for_each(i))) { | |
219 | if panic.is_none() { | |
220 | panic = Some(p); | |
221 | } | |
222 | } | |
223 | }); | |
224 | if let Some(panic) = panic { | |
225 | resume_unwind(panic); | |
226 | } | |
227 | } | |
228 | ||
8faf50e0 | 229 | pub type MetadataRef = OwningRef<Box<dyn Erased>, [u8]>; |
ff7c6d11 XL |
230 | |
231 | pub use std::rc::Rc as Lrc; | |
94b46f34 | 232 | pub use std::rc::Weak as Weak; |
ff7c6d11 | 233 | pub use std::cell::Ref as ReadGuard; |
b7449926 | 234 | pub use std::cell::Ref as MappedReadGuard; |
ff7c6d11 | 235 | pub use std::cell::RefMut as WriteGuard; |
b7449926 | 236 | pub use std::cell::RefMut as MappedWriteGuard; |
ff7c6d11 | 237 | pub use std::cell::RefMut as LockGuard; |
b7449926 | 238 | pub use std::cell::RefMut as MappedLockGuard; |
ff7c6d11 | 239 | |
0531ce1d | 240 | use std::cell::RefCell as InnerRwLock; |
ff7c6d11 XL |
241 | use std::cell::RefCell as InnerLock; |
242 | ||
243 | use std::cell::Cell; | |
244 | ||
94b46f34 XL |
245 | #[derive(Debug)] |
246 | pub struct WorkerLocal<T>(OneThread<T>); | |
247 | ||
248 | impl<T> WorkerLocal<T> { | |
249 | /// Creates a new worker local where the `initial` closure computes the | |
250 | /// value this worker local should take for each thread in the thread pool. | |
251 | #[inline] | |
252 | pub fn new<F: FnMut(usize) -> T>(mut f: F) -> WorkerLocal<T> { | |
253 | WorkerLocal(OneThread::new(f(0))) | |
254 | } | |
255 | ||
256 | /// Returns the worker-local value for each thread | |
257 | #[inline] | |
258 | pub fn into_inner(self) -> Vec<T> { | |
259 | vec![OneThread::into_inner(self.0)] | |
260 | } | |
261 | } | |
262 | ||
263 | impl<T> Deref for WorkerLocal<T> { | |
264 | type Target = T; | |
265 | ||
266 | #[inline(always)] | |
267 | fn deref(&self) -> &T { | |
268 | &*self.0 | |
269 | } | |
270 | } | |
271 | ||
272 | pub type MTRef<'a, T> = &'a mut T; | |
273 | ||
0bf4aa26 | 274 | #[derive(Debug, Default)] |
ff7c6d11 XL |
275 | pub struct MTLock<T>(T); |
276 | ||
277 | impl<T> MTLock<T> { | |
278 | #[inline(always)] | |
279 | pub fn new(inner: T) -> Self { | |
280 | MTLock(inner) | |
281 | } | |
282 | ||
283 | #[inline(always)] | |
284 | pub fn into_inner(self) -> T { | |
285 | self.0 | |
286 | } | |
287 | ||
288 | #[inline(always)] | |
289 | pub fn get_mut(&mut self) -> &mut T { | |
290 | &mut self.0 | |
291 | } | |
292 | ||
293 | #[inline(always)] | |
294 | pub fn lock(&self) -> &T { | |
295 | &self.0 | |
296 | } | |
297 | ||
298 | #[inline(always)] | |
94b46f34 XL |
299 | pub fn lock_mut(&mut self) -> &mut T { |
300 | &mut self.0 | |
ff7c6d11 XL |
301 | } |
302 | } | |
303 | ||
304 | // FIXME: Probably a bad idea (in the threaded case) | |
305 | impl<T: Clone> Clone for MTLock<T> { | |
306 | #[inline] | |
307 | fn clone(&self) -> Self { | |
308 | MTLock(self.0.clone()) | |
309 | } | |
310 | } | |
ff7c6d11 XL |
311 | } else { |
312 | pub use std::marker::Send as Send; | |
313 | pub use std::marker::Sync as Sync; | |
314 | ||
315 | pub use parking_lot::RwLockReadGuard as ReadGuard; | |
b7449926 | 316 | pub use parking_lot::MappedRwLockReadGuard as MappedReadGuard; |
ff7c6d11 | 317 | pub use parking_lot::RwLockWriteGuard as WriteGuard; |
b7449926 | 318 | pub use parking_lot::MappedRwLockWriteGuard as MappedWriteGuard; |
ff7c6d11 XL |
319 | |
320 | pub use parking_lot::MutexGuard as LockGuard; | |
b7449926 | 321 | pub use parking_lot::MappedMutexGuard as MappedLockGuard; |
ff7c6d11 | 322 | |
9fa01778 | 323 | pub use std::sync::atomic::{AtomicBool, AtomicUsize, AtomicU32, AtomicU64}; |
0731742a | 324 | |
416331ca XL |
325 | pub use crossbeam_utils::atomic::AtomicCell; |
326 | ||
ff7c6d11 | 327 | pub use std::sync::Arc as Lrc; |
94b46f34 | 328 | pub use std::sync::Weak as Weak; |
ff7c6d11 | 329 | |
94b46f34 XL |
330 | pub type MTRef<'a, T> = &'a T; |
331 | ||
0bf4aa26 | 332 | #[derive(Debug, Default)] |
94b46f34 XL |
333 | pub struct MTLock<T>(Lock<T>); |
334 | ||
335 | impl<T> MTLock<T> { | |
336 | #[inline(always)] | |
337 | pub fn new(inner: T) -> Self { | |
338 | MTLock(Lock::new(inner)) | |
339 | } | |
340 | ||
341 | #[inline(always)] | |
342 | pub fn into_inner(self) -> T { | |
343 | self.0.into_inner() | |
344 | } | |
345 | ||
346 | #[inline(always)] | |
347 | pub fn get_mut(&mut self) -> &mut T { | |
348 | self.0.get_mut() | |
349 | } | |
350 | ||
351 | #[inline(always)] | |
9fa01778 | 352 | pub fn lock(&self) -> LockGuard<'_, T> { |
94b46f34 XL |
353 | self.0.lock() |
354 | } | |
355 | ||
356 | #[inline(always)] | |
9fa01778 | 357 | pub fn lock_mut(&self) -> LockGuard<'_, T> { |
94b46f34 XL |
358 | self.lock() |
359 | } | |
360 | } | |
ff7c6d11 XL |
361 | |
362 | use parking_lot::Mutex as InnerLock; | |
0531ce1d | 363 | use parking_lot::RwLock as InnerRwLock; |
ff7c6d11 | 364 | |
94b46f34 | 365 | use std; |
83c7162d | 366 | use std::thread; |
94b46f34 XL |
367 | pub use rayon::{join, scope}; |
368 | ||
532ac7d7 XL |
369 | /// Runs a list of blocks in parallel. The first block is executed immediately on |
370 | /// the current thread. Use that for the longest running block. | |
9fa01778 XL |
371 | #[macro_export] |
372 | macro_rules! parallel { | |
532ac7d7 XL |
373 | (impl $fblock:tt [$($c:tt,)*] [$block:tt $(, $rest:tt)*]) => { |
374 | parallel!(impl $fblock [$block, $($c,)*] [$($rest),*]) | |
9fa01778 | 375 | }; |
532ac7d7 | 376 | (impl $fblock:tt [$($blocks:tt,)*] []) => { |
9fa01778 XL |
377 | ::rustc_data_structures::sync::scope(|s| { |
378 | $( | |
379 | s.spawn(|_| $blocks); | |
380 | )* | |
532ac7d7 | 381 | $fblock; |
9fa01778 XL |
382 | }) |
383 | }; | |
532ac7d7 XL |
384 | ($fblock:tt, $($blocks:tt),*) => { |
385 | // Reverse the order of the later blocks since Rayon executes them in reverse order | |
9fa01778 XL |
386 | // when using a single thread. This ensures the execution order matches that |
387 | // of a single threaded rustc | |
532ac7d7 | 388 | parallel!(impl $fblock [] [$($blocks),*]); |
9fa01778 XL |
389 | }; |
390 | } | |
391 | ||
94b46f34 XL |
392 | pub use rayon_core::WorkerLocal; |
393 | ||
394 | pub use rayon::iter::ParallelIterator; | |
395 | use rayon::iter::IntoParallelIterator; | |
396 | ||
397 | pub fn par_iter<T: IntoParallelIterator>(t: T) -> T::Iter { | |
398 | t.into_par_iter() | |
399 | } | |
83c7162d | 400 | |
532ac7d7 XL |
401 | pub fn par_for_each_in<T: IntoParallelIterator>( |
402 | t: T, | |
403 | for_each: impl Fn( | |
404 | <<T as IntoParallelIterator>::Iter as ParallelIterator>::Item | |
405 | ) + Sync + Send | |
406 | ) { | |
407 | t.into_par_iter().for_each(for_each) | |
408 | } | |
409 | ||
8faf50e0 | 410 | pub type MetadataRef = OwningRef<Box<dyn Erased + Send + Sync>, [u8]>; |
ff7c6d11 XL |
411 | |
412 | /// This makes locks panic if they are already held. | |
413 | /// It is only useful when you are running in a single thread | |
414 | const ERROR_CHECKING: bool = false; | |
415 | ||
416 | #[macro_export] | |
417 | macro_rules! rustc_erase_owner { | |
418 | ($v:expr) => {{ | |
419 | let v = $v; | |
0531ce1d | 420 | ::rustc_data_structures::sync::assert_send_val(&v); |
ff7c6d11 XL |
421 | v.erase_send_sync_owner() |
422 | }} | |
423 | } | |
ff7c6d11 XL |
424 | } |
425 | } | |
426 | ||
427 | pub fn assert_sync<T: ?Sized + Sync>() {} | |
0731742a | 428 | pub fn assert_send<T: ?Sized + Send>() {} |
0531ce1d | 429 | pub fn assert_send_val<T: ?Sized + Send>(_t: &T) {} |
ff7c6d11 XL |
430 | pub fn assert_send_sync_val<T: ?Sized + Sync + Send>(_t: &T) {} |
431 | ||
83c7162d XL |
432 | pub trait HashMapExt<K, V> { |
433 | /// Same as HashMap::insert, but it may panic if there's already an | |
434 | /// entry for `key` with a value not equal to `value` | |
435 | fn insert_same(&mut self, key: K, value: V); | |
436 | } | |
437 | ||
438 | impl<K: Eq + Hash, V: Eq, S: BuildHasher> HashMapExt<K, V> for HashMap<K, V, S> { | |
439 | fn insert_same(&mut self, key: K, value: V) { | |
440 | self.entry(key).and_modify(|old| assert!(*old == value)).or_insert(value); | |
441 | } | |
442 | } | |
443 | ||
444 | /// A type whose inner value can be written once and then will stay read-only | |
445 | // This contains a PhantomData<T> since this type conceptually owns a T outside the Mutex once | |
446 | // initialized. This ensures that Once<T> is Sync only if T is. If we did not have PhantomData<T> | |
447 | // we could send a &Once<Cell<bool>> to multiple threads and call `get` on it to get access | |
448 | // to &Cell<bool> on those threads. | |
449 | pub struct Once<T>(Lock<Option<T>>, PhantomData<T>); | |
450 | ||
451 | impl<T> Once<T> { | |
452 | /// Creates an Once value which is uninitialized | |
453 | #[inline(always)] | |
454 | pub fn new() -> Self { | |
455 | Once(Lock::new(None), PhantomData) | |
456 | } | |
457 | ||
458 | /// Consumes the value and returns Some(T) if it was initialized | |
459 | #[inline(always)] | |
460 | pub fn into_inner(self) -> Option<T> { | |
461 | self.0.into_inner() | |
462 | } | |
463 | ||
464 | /// Tries to initialize the inner value to `value`. | |
465 | /// Returns `None` if the inner value was uninitialized and `value` was consumed setting it | |
466 | /// otherwise if the inner value was already set it returns `value` back to the caller | |
467 | #[inline] | |
468 | pub fn try_set(&self, value: T) -> Option<T> { | |
469 | let mut lock = self.0.lock(); | |
470 | if lock.is_some() { | |
471 | return Some(value); | |
472 | } | |
473 | *lock = Some(value); | |
474 | None | |
475 | } | |
476 | ||
477 | /// Tries to initialize the inner value to `value`. | |
478 | /// Returns `None` if the inner value was uninitialized and `value` was consumed setting it | |
479 | /// otherwise if the inner value was already set it asserts that `value` is equal to the inner | |
480 | /// value and then returns `value` back to the caller | |
481 | #[inline] | |
482 | pub fn try_set_same(&self, value: T) -> Option<T> where T: Eq { | |
483 | let mut lock = self.0.lock(); | |
484 | if let Some(ref inner) = *lock { | |
485 | assert!(*inner == value); | |
486 | return Some(value); | |
487 | } | |
488 | *lock = Some(value); | |
489 | None | |
490 | } | |
491 | ||
492 | /// Tries to initialize the inner value to `value` and panics if it was already initialized | |
493 | #[inline] | |
494 | pub fn set(&self, value: T) { | |
495 | assert!(self.try_set(value).is_none()); | |
496 | } | |
497 | ||
498 | /// Tries to initialize the inner value by calling the closure while ensuring that no-one else | |
499 | /// can access the value in the mean time by holding a lock for the duration of the closure. | |
500 | /// If the value was already initialized the closure is not called and `false` is returned, | |
501 | /// otherwise if the value from the closure initializes the inner value, `true` is returned | |
502 | #[inline] | |
503 | pub fn init_locking<F: FnOnce() -> T>(&self, f: F) -> bool { | |
504 | let mut lock = self.0.lock(); | |
505 | if lock.is_some() { | |
506 | return false; | |
507 | } | |
508 | *lock = Some(f()); | |
509 | true | |
510 | } | |
511 | ||
512 | /// Tries to initialize the inner value by calling the closure without ensuring that no-one | |
513 | /// else can access it. This mean when this is called from multiple threads, multiple | |
514 | /// closures may concurrently be computing a value which the inner value should take. | |
515 | /// Only one of these closures are used to actually initialize the value. | |
516 | /// If some other closure already set the value, | |
517 | /// we return the value our closure computed wrapped in a `Option`. | |
518 | /// If our closure set the value, `None` is returned. | |
519 | /// If the value is already initialized, the closure is not called and `None` is returned. | |
520 | #[inline] | |
521 | pub fn init_nonlocking<F: FnOnce() -> T>(&self, f: F) -> Option<T> { | |
522 | if self.0.lock().is_some() { | |
523 | None | |
524 | } else { | |
525 | self.try_set(f()) | |
526 | } | |
527 | } | |
528 | ||
529 | /// Tries to initialize the inner value by calling the closure without ensuring that no-one | |
530 | /// else can access it. This mean when this is called from multiple threads, multiple | |
531 | /// closures may concurrently be computing a value which the inner value should take. | |
532 | /// Only one of these closures are used to actually initialize the value. | |
533 | /// If some other closure already set the value, we assert that it our closure computed | |
b7449926 | 534 | /// a value equal to the value already set and then |
83c7162d XL |
535 | /// we return the value our closure computed wrapped in a `Option`. |
536 | /// If our closure set the value, `None` is returned. | |
537 | /// If the value is already initialized, the closure is not called and `None` is returned. | |
538 | #[inline] | |
539 | pub fn init_nonlocking_same<F: FnOnce() -> T>(&self, f: F) -> Option<T> where T: Eq { | |
540 | if self.0.lock().is_some() { | |
541 | None | |
542 | } else { | |
543 | self.try_set_same(f()) | |
544 | } | |
545 | } | |
546 | ||
547 | /// Tries to get a reference to the inner value, returns `None` if it is not yet initialized | |
548 | #[inline(always)] | |
549 | pub fn try_get(&self) -> Option<&T> { | |
550 | let lock = &*self.0.lock(); | |
551 | if let Some(ref inner) = *lock { | |
552 | // This is safe since we won't mutate the inner value | |
553 | unsafe { Some(&*(inner as *const T)) } | |
554 | } else { | |
555 | None | |
556 | } | |
557 | } | |
558 | ||
559 | /// Gets reference to the inner value, panics if it is not yet initialized | |
560 | #[inline(always)] | |
561 | pub fn get(&self) -> &T { | |
562 | self.try_get().expect("value was not set") | |
563 | } | |
564 | ||
565 | /// Gets reference to the inner value, panics if it is not yet initialized | |
566 | #[inline(always)] | |
567 | pub fn borrow(&self) -> &T { | |
568 | self.get() | |
569 | } | |
570 | } | |
571 | ||
ff7c6d11 XL |
572 | #[derive(Debug)] |
573 | pub struct Lock<T>(InnerLock<T>); | |
574 | ||
575 | impl<T> Lock<T> { | |
576 | #[inline(always)] | |
577 | pub fn new(inner: T) -> Self { | |
578 | Lock(InnerLock::new(inner)) | |
579 | } | |
580 | ||
581 | #[inline(always)] | |
582 | pub fn into_inner(self) -> T { | |
583 | self.0.into_inner() | |
584 | } | |
585 | ||
586 | #[inline(always)] | |
587 | pub fn get_mut(&mut self) -> &mut T { | |
588 | self.0.get_mut() | |
589 | } | |
590 | ||
9fa01778 | 591 | #[cfg(parallel_compiler)] |
94b46f34 | 592 | #[inline(always)] |
9fa01778 | 593 | pub fn try_lock(&self) -> Option<LockGuard<'_, T>> { |
94b46f34 XL |
594 | self.0.try_lock() |
595 | } | |
596 | ||
9fa01778 | 597 | #[cfg(not(parallel_compiler))] |
94b46f34 | 598 | #[inline(always)] |
9fa01778 | 599 | pub fn try_lock(&self) -> Option<LockGuard<'_, T>> { |
94b46f34 XL |
600 | self.0.try_borrow_mut().ok() |
601 | } | |
602 | ||
9fa01778 | 603 | #[cfg(parallel_compiler)] |
ff7c6d11 | 604 | #[inline(always)] |
9fa01778 | 605 | pub fn lock(&self) -> LockGuard<'_, T> { |
ff7c6d11 XL |
606 | if ERROR_CHECKING { |
607 | self.0.try_lock().expect("lock was already held") | |
608 | } else { | |
609 | self.0.lock() | |
610 | } | |
611 | } | |
612 | ||
9fa01778 | 613 | #[cfg(not(parallel_compiler))] |
ff7c6d11 | 614 | #[inline(always)] |
9fa01778 | 615 | pub fn lock(&self) -> LockGuard<'_, T> { |
ff7c6d11 XL |
616 | self.0.borrow_mut() |
617 | } | |
618 | ||
0531ce1d XL |
619 | #[inline(always)] |
620 | pub fn with_lock<F: FnOnce(&mut T) -> R, R>(&self, f: F) -> R { | |
621 | f(&mut *self.lock()) | |
622 | } | |
623 | ||
ff7c6d11 | 624 | #[inline(always)] |
9fa01778 | 625 | pub fn borrow(&self) -> LockGuard<'_, T> { |
ff7c6d11 XL |
626 | self.lock() |
627 | } | |
628 | ||
629 | #[inline(always)] | |
9fa01778 | 630 | pub fn borrow_mut(&self) -> LockGuard<'_, T> { |
ff7c6d11 XL |
631 | self.lock() |
632 | } | |
633 | } | |
634 | ||
0531ce1d XL |
635 | impl<T: Default> Default for Lock<T> { |
636 | #[inline] | |
637 | fn default() -> Self { | |
638 | Lock::new(T::default()) | |
639 | } | |
640 | } | |
641 | ||
ff7c6d11 XL |
642 | // FIXME: Probably a bad idea |
643 | impl<T: Clone> Clone for Lock<T> { | |
644 | #[inline] | |
645 | fn clone(&self) -> Self { | |
646 | Lock::new(self.borrow().clone()) | |
647 | } | |
648 | } | |
0531ce1d XL |
649 | |
650 | #[derive(Debug)] | |
651 | pub struct RwLock<T>(InnerRwLock<T>); | |
652 | ||
653 | impl<T> RwLock<T> { | |
654 | #[inline(always)] | |
655 | pub fn new(inner: T) -> Self { | |
656 | RwLock(InnerRwLock::new(inner)) | |
657 | } | |
658 | ||
659 | #[inline(always)] | |
660 | pub fn into_inner(self) -> T { | |
661 | self.0.into_inner() | |
662 | } | |
663 | ||
664 | #[inline(always)] | |
665 | pub fn get_mut(&mut self) -> &mut T { | |
666 | self.0.get_mut() | |
667 | } | |
668 | ||
9fa01778 | 669 | #[cfg(not(parallel_compiler))] |
0531ce1d | 670 | #[inline(always)] |
9fa01778 | 671 | pub fn read(&self) -> ReadGuard<'_, T> { |
0531ce1d XL |
672 | self.0.borrow() |
673 | } | |
674 | ||
9fa01778 | 675 | #[cfg(parallel_compiler)] |
0531ce1d | 676 | #[inline(always)] |
9fa01778 | 677 | pub fn read(&self) -> ReadGuard<'_, T> { |
0531ce1d XL |
678 | if ERROR_CHECKING { |
679 | self.0.try_read().expect("lock was already held") | |
680 | } else { | |
681 | self.0.read() | |
682 | } | |
683 | } | |
684 | ||
685 | #[inline(always)] | |
686 | pub fn with_read_lock<F: FnOnce(&T) -> R, R>(&self, f: F) -> R { | |
687 | f(&*self.read()) | |
688 | } | |
689 | ||
9fa01778 | 690 | #[cfg(not(parallel_compiler))] |
83c7162d | 691 | #[inline(always)] |
9fa01778 | 692 | pub fn try_write(&self) -> Result<WriteGuard<'_, T>, ()> { |
83c7162d XL |
693 | self.0.try_borrow_mut().map_err(|_| ()) |
694 | } | |
695 | ||
9fa01778 | 696 | #[cfg(parallel_compiler)] |
83c7162d | 697 | #[inline(always)] |
9fa01778 | 698 | pub fn try_write(&self) -> Result<WriteGuard<'_, T>, ()> { |
83c7162d XL |
699 | self.0.try_write().ok_or(()) |
700 | } | |
701 | ||
9fa01778 | 702 | #[cfg(not(parallel_compiler))] |
0531ce1d | 703 | #[inline(always)] |
9fa01778 | 704 | pub fn write(&self) -> WriteGuard<'_, T> { |
0531ce1d XL |
705 | self.0.borrow_mut() |
706 | } | |
707 | ||
9fa01778 | 708 | #[cfg(parallel_compiler)] |
0531ce1d | 709 | #[inline(always)] |
9fa01778 | 710 | pub fn write(&self) -> WriteGuard<'_, T> { |
0531ce1d XL |
711 | if ERROR_CHECKING { |
712 | self.0.try_write().expect("lock was already held") | |
713 | } else { | |
714 | self.0.write() | |
715 | } | |
716 | } | |
717 | ||
718 | #[inline(always)] | |
719 | pub fn with_write_lock<F: FnOnce(&mut T) -> R, R>(&self, f: F) -> R { | |
720 | f(&mut *self.write()) | |
721 | } | |
722 | ||
723 | #[inline(always)] | |
9fa01778 | 724 | pub fn borrow(&self) -> ReadGuard<'_, T> { |
0531ce1d XL |
725 | self.read() |
726 | } | |
727 | ||
728 | #[inline(always)] | |
9fa01778 | 729 | pub fn borrow_mut(&self) -> WriteGuard<'_, T> { |
0531ce1d XL |
730 | self.write() |
731 | } | |
732 | } | |
733 | ||
734 | // FIXME: Probably a bad idea | |
735 | impl<T: Clone> Clone for RwLock<T> { | |
736 | #[inline] | |
737 | fn clone(&self) -> Self { | |
738 | RwLock::new(self.borrow().clone()) | |
739 | } | |
740 | } | |
83c7162d XL |
741 | |
742 | /// A type which only allows its inner value to be used in one thread. | |
743 | /// It will panic if it is used on multiple threads. | |
744 | #[derive(Copy, Clone, Hash, Debug, Eq, PartialEq)] | |
745 | pub struct OneThread<T> { | |
9fa01778 | 746 | #[cfg(parallel_compiler)] |
83c7162d XL |
747 | thread: thread::ThreadId, |
748 | inner: T, | |
749 | } | |
750 | ||
9fa01778 | 751 | #[cfg(parallel_compiler)] |
83c7162d | 752 | unsafe impl<T> std::marker::Sync for OneThread<T> {} |
9fa01778 | 753 | #[cfg(parallel_compiler)] |
83c7162d XL |
754 | unsafe impl<T> std::marker::Send for OneThread<T> {} |
755 | ||
756 | impl<T> OneThread<T> { | |
757 | #[inline(always)] | |
758 | fn check(&self) { | |
9fa01778 | 759 | #[cfg(parallel_compiler)] |
83c7162d XL |
760 | assert_eq!(thread::current().id(), self.thread); |
761 | } | |
762 | ||
763 | #[inline(always)] | |
764 | pub fn new(inner: T) -> Self { | |
765 | OneThread { | |
9fa01778 | 766 | #[cfg(parallel_compiler)] |
83c7162d XL |
767 | thread: thread::current().id(), |
768 | inner, | |
769 | } | |
770 | } | |
771 | ||
772 | #[inline(always)] | |
773 | pub fn into_inner(value: Self) -> T { | |
774 | value.check(); | |
775 | value.inner | |
776 | } | |
777 | } | |
778 | ||
779 | impl<T> Deref for OneThread<T> { | |
780 | type Target = T; | |
781 | ||
782 | fn deref(&self) -> &T { | |
783 | self.check(); | |
784 | &self.inner | |
785 | } | |
786 | } | |
787 | ||
788 | impl<T> DerefMut for OneThread<T> { | |
789 | fn deref_mut(&mut self) -> &mut T { | |
790 | self.check(); | |
791 | &mut self.inner | |
792 | } | |
793 | } |