]> git.proxmox.com Git - rustc.git/blame - compiler/rustc_data_structures/src/sync.rs
New upstream version 1.71.1+dfsg1
[rustc.git] / compiler / rustc_data_structures / src / sync.rs
CommitLineData
353b0b11
FG
1//! This module defines various operations and types that are implemented in
2//! one way for the serial compiler, and another way the parallel compiler.
ff7c6d11 3//!
353b0b11
FG
4//! Operations
5//! ----------
6//! The parallel versions of operations use Rayon to execute code in parallel,
7//! while the serial versions degenerate straightforwardly to serial execution.
8//! The operations include `join`, `parallel`, `par_iter`, and `par_for_each`.
ff7c6d11 9//!
353b0b11
FG
10//! Types
11//! -----
12//! The parallel versions of types provide various kinds of synchronization,
13//! while the serial compiler versions do not.
ff7c6d11 14//!
353b0b11
FG
15//! The following table shows how the types are implemented internally. Except
16//! where noted otherwise, the type in column one is defined as a
17//! newtype around the type from column two or three.
ff7c6d11 18//!
353b0b11
FG
19//! | Type | Serial version | Parallel version |
20//! | ----------------------- | ------------------- | ------------------------------- |
21//! | `Lrc<T>` | `rc::Rc<T>` | `sync::Arc<T>` |
22//! |` Weak<T>` | `rc::Weak<T>` | `sync::Weak<T>` |
23//! | | | |
24//! | `AtomicBool` | `Cell<bool>` | `atomic::AtomicBool` |
25//! | `AtomicU32` | `Cell<u32>` | `atomic::AtomicU32` |
26//! | `AtomicU64` | `Cell<u64>` | `atomic::AtomicU64` |
27//! | `AtomicUsize` | `Cell<usize>` | `atomic::AtomicUsize` |
28//! | | | |
29//! | `Lock<T>` | `RefCell<T>` | `parking_lot::Mutex<T>` |
30//! | `RwLock<T>` | `RefCell<T>` | `parking_lot::RwLock<T>` |
31//! | `MTLock<T>` [^1] | `T` | `Lock<T>` |
32//! | `MTLockRef<'a, T>` [^2] | `&'a mut MTLock<T>` | `&'a MTLock<T>` |
33//! | | | |
34//! | `ParallelIterator` | `Iterator` | `rayon::iter::ParallelIterator` |
ff7c6d11 35//!
353b0b11
FG
36//! [^1] `MTLock` is similar to `Lock`, but the serial version avoids the cost
37//! of a `RefCell`. This is appropriate when interior mutability is not
38//! required.
94b46f34 39//!
353b0b11 40//! [^2] `MTLockRef` is a typedef.
ff7c6d11 41
49aad941 42pub use crate::marker::*;
83c7162d 43use std::collections::HashMap;
dfeec247 44use std::hash::{BuildHasher, Hash};
83c7162d 45use std::ops::{Deref, DerefMut};
064997fb 46use std::panic::{catch_unwind, resume_unwind, AssertUnwindSafe};
ff7c6d11 47
49aad941
FG
48mod worker_local;
49pub use worker_local::{Registry, WorkerLocal};
50
0731742a 51pub use std::sync::atomic::Ordering;
dfeec247 52pub use std::sync::atomic::Ordering::SeqCst;
0731742a 53
353b0b11 54pub use vec::{AppendOnlyIndexVec, AppendOnlyVec};
9ffffee4
FG
55
56mod vec;
57
49aad941
FG
58mod mode {
59 use super::Ordering;
60 use std::sync::atomic::AtomicU8;
61
62 const UNINITIALIZED: u8 = 0;
63 const DYN_NOT_THREAD_SAFE: u8 = 1;
64 const DYN_THREAD_SAFE: u8 = 2;
65
66 static DYN_THREAD_SAFE_MODE: AtomicU8 = AtomicU8::new(UNINITIALIZED);
67
68 // Whether thread safety is enabled (due to running under multiple threads).
69 #[inline]
70 pub fn is_dyn_thread_safe() -> bool {
71 match DYN_THREAD_SAFE_MODE.load(Ordering::Relaxed) {
72 DYN_NOT_THREAD_SAFE => false,
73 DYN_THREAD_SAFE => true,
74 _ => panic!("uninitialized dyn_thread_safe mode!"),
75 }
76 }
77
78 // Only set by the `-Z threads` compile option
79 pub fn set_dyn_thread_safe_mode(mode: bool) {
80 let set: u8 = if mode { DYN_THREAD_SAFE } else { DYN_NOT_THREAD_SAFE };
81 let previous = DYN_THREAD_SAFE_MODE.compare_exchange(
82 UNINITIALIZED,
83 set,
84 Ordering::Relaxed,
85 Ordering::Relaxed,
86 );
87
88 // Check that the mode was either uninitialized or was already set to the requested mode.
89 assert!(previous.is_ok() || previous == Err(set));
90 }
91}
92
93pub use mode::{is_dyn_thread_safe, set_dyn_thread_safe_mode};
94
ff7c6d11 95cfg_if! {
9fa01778 96 if #[cfg(not(parallel_compiler))] {
353b0b11
FG
97 pub unsafe auto trait Send {}
98 pub unsafe auto trait Sync {}
ff7c6d11 99
353b0b11
FG
100 unsafe impl<T> Send for T {}
101 unsafe impl<T> Sync for T {}
ff7c6d11 102
0731742a
XL
103 use std::ops::Add;
104
416331ca 105 /// This is a single threaded variant of `AtomicU64`, `AtomicUsize`, etc.
17df50a5
XL
106 /// It has explicit ordering arguments and is only intended for use with
107 /// the native atomic types.
416331ca
XL
108 /// You should use this type through the `AtomicU64`, `AtomicUsize`, etc, type aliases
109 /// as it's not intended to be used separately.
f2b60f7d 110 #[derive(Debug, Default)]
0731742a
XL
111 pub struct Atomic<T: Copy>(Cell<T>);
112
113 impl<T: Copy> Atomic<T> {
9fa01778 114 #[inline]
0731742a
XL
115 pub fn new(v: T) -> Self {
116 Atomic(Cell::new(v))
117 }
0731742a 118
416331ca 119 #[inline]
0731742a
XL
120 pub fn into_inner(self) -> T {
121 self.0.into_inner()
122 }
123
9fa01778 124 #[inline]
0731742a
XL
125 pub fn load(&self, _: Ordering) -> T {
126 self.0.get()
127 }
128
9fa01778 129 #[inline]
0731742a
XL
130 pub fn store(&self, val: T, _: Ordering) {
131 self.0.set(val)
132 }
133
416331ca 134 #[inline]
0731742a
XL
135 pub fn swap(&self, val: T, _: Ordering) -> T {
136 self.0.replace(val)
137 }
416331ca 138 }
0731742a 139
353b0b11
FG
140 impl Atomic<bool> {
141 pub fn fetch_or(&self, val: bool, _: Ordering) -> bool {
142 let result = self.0.get() | val;
143 self.0.set(val);
144 result
145 }
146 }
147
416331ca
XL
148 impl<T: Copy + PartialEq> Atomic<T> {
149 #[inline]
0731742a
XL
150 pub fn compare_exchange(&self,
151 current: T,
152 new: T,
153 _: Ordering,
154 _: Ordering)
155 -> Result<T, T> {
156 let read = self.0.get();
157 if read == current {
158 self.0.set(new);
159 Ok(read)
160 } else {
161 Err(read)
162 }
163 }
164 }
165
166 impl<T: Add<Output=T> + Copy> Atomic<T> {
416331ca 167 #[inline]
0731742a
XL
168 pub fn fetch_add(&self, val: T, _: Ordering) -> T {
169 let old = self.0.get();
170 self.0.set(old + val);
171 old
172 }
173 }
174
175 pub type AtomicUsize = Atomic<usize>;
176 pub type AtomicBool = Atomic<bool>;
9fa01778 177 pub type AtomicU32 = Atomic<u32>;
0731742a
XL
178 pub type AtomicU64 = Atomic<u64>;
179
e74abb32
XL
180 pub fn join<A, B, RA, RB>(oper_a: A, oper_b: B) -> (RA, RB)
181 where A: FnOnce() -> RA,
182 B: FnOnce() -> RB
183 {
184 (oper_a(), oper_b())
185 }
186
9fa01778
XL
187 #[macro_export]
188 macro_rules! parallel {
49aad941 189 ($($blocks:block),*) => {
532ac7d7
XL
190 // We catch panics here ensuring that all the blocks execute.
191 // This makes behavior consistent with the parallel compiler.
192 let mut panic = None;
193 $(
194 if let Err(p) = ::std::panic::catch_unwind(
195 ::std::panic::AssertUnwindSafe(|| $blocks)
196 ) {
197 if panic.is_none() {
198 panic = Some(p);
199 }
200 }
201 )*
202 if let Some(panic) = panic {
203 ::std::panic::resume_unwind(panic);
204 }
9fa01778
XL
205 }
206 }
207
064997fb 208 pub fn par_for_each_in<T: IntoIterator>(t: T, mut for_each: impl FnMut(T::Item) + Sync + Send) {
532ac7d7
XL
209 // We catch panics here ensuring that all the loop iterations execute.
210 // This makes behavior consistent with the parallel compiler.
211 let mut panic = None;
212 t.into_iter().for_each(|i| {
213 if let Err(p) = catch_unwind(AssertUnwindSafe(|| for_each(i))) {
214 if panic.is_none() {
215 panic = Some(p);
216 }
217 }
218 });
219 if let Some(panic) = panic {
220 resume_unwind(panic);
221 }
222 }
223
49aad941
FG
224 pub fn par_map<T: IntoIterator, R, C: FromIterator<R>>(
225 t: T,
226 mut map: impl FnMut(<<T as IntoIterator>::IntoIter as Iterator>::Item) -> R,
227 ) -> C {
228 // We catch panics here ensuring that all the loop iterations execute.
229 let mut panic = None;
230 let r = t.into_iter().filter_map(|i| {
231 match catch_unwind(AssertUnwindSafe(|| map(i))) {
232 Ok(r) => Some(r),
233 Err(p) => {
234 if panic.is_none() {
235 panic = Some(p);
236 }
237 None
238 }
239 }
240 }).collect();
241 if let Some(panic) = panic {
242 resume_unwind(panic);
243 }
244 r
245 }
ff7c6d11
XL
246
247 pub use std::rc::Rc as Lrc;
94b46f34 248 pub use std::rc::Weak as Weak;
ff7c6d11 249 pub use std::cell::Ref as ReadGuard;
b7449926 250 pub use std::cell::Ref as MappedReadGuard;
ff7c6d11 251 pub use std::cell::RefMut as WriteGuard;
b7449926 252 pub use std::cell::RefMut as MappedWriteGuard;
ff7c6d11 253 pub use std::cell::RefMut as LockGuard;
b7449926 254 pub use std::cell::RefMut as MappedLockGuard;
ff7c6d11 255
923072b8 256 pub use std::cell::OnceCell;
f9f354fc 257
0531ce1d 258 use std::cell::RefCell as InnerRwLock;
ff7c6d11
XL
259 use std::cell::RefCell as InnerLock;
260
261 use std::cell::Cell;
262
353b0b11 263 pub type MTLockRef<'a, T> = &'a mut MTLock<T>;
94b46f34 264
0bf4aa26 265 #[derive(Debug, Default)]
ff7c6d11
XL
266 pub struct MTLock<T>(T);
267
268 impl<T> MTLock<T> {
269 #[inline(always)]
270 pub fn new(inner: T) -> Self {
271 MTLock(inner)
272 }
273
274 #[inline(always)]
275 pub fn into_inner(self) -> T {
276 self.0
277 }
278
279 #[inline(always)]
280 pub fn get_mut(&mut self) -> &mut T {
281 &mut self.0
282 }
283
284 #[inline(always)]
285 pub fn lock(&self) -> &T {
286 &self.0
287 }
288
289 #[inline(always)]
94b46f34
XL
290 pub fn lock_mut(&mut self) -> &mut T {
291 &mut self.0
ff7c6d11
XL
292 }
293 }
294
295 // FIXME: Probably a bad idea (in the threaded case)
296 impl<T: Clone> Clone for MTLock<T> {
297 #[inline]
298 fn clone(&self) -> Self {
299 MTLock(self.0.clone())
300 }
301 }
ff7c6d11
XL
302 } else {
303 pub use std::marker::Send as Send;
304 pub use std::marker::Sync as Sync;
305
306 pub use parking_lot::RwLockReadGuard as ReadGuard;
b7449926 307 pub use parking_lot::MappedRwLockReadGuard as MappedReadGuard;
ff7c6d11 308 pub use parking_lot::RwLockWriteGuard as WriteGuard;
b7449926 309 pub use parking_lot::MappedRwLockWriteGuard as MappedWriteGuard;
ff7c6d11
XL
310
311 pub use parking_lot::MutexGuard as LockGuard;
b7449926 312 pub use parking_lot::MappedMutexGuard as MappedLockGuard;
ff7c6d11 313
923072b8 314 pub use std::sync::OnceLock as OnceCell;
f9f354fc 315
9fa01778 316 pub use std::sync::atomic::{AtomicBool, AtomicUsize, AtomicU32, AtomicU64};
0731742a 317
ff7c6d11 318 pub use std::sync::Arc as Lrc;
94b46f34 319 pub use std::sync::Weak as Weak;
ff7c6d11 320
353b0b11 321 pub type MTLockRef<'a, T> = &'a MTLock<T>;
94b46f34 322
0bf4aa26 323 #[derive(Debug, Default)]
94b46f34
XL
324 pub struct MTLock<T>(Lock<T>);
325
326 impl<T> MTLock<T> {
327 #[inline(always)]
328 pub fn new(inner: T) -> Self {
329 MTLock(Lock::new(inner))
330 }
331
332 #[inline(always)]
333 pub fn into_inner(self) -> T {
334 self.0.into_inner()
335 }
336
337 #[inline(always)]
338 pub fn get_mut(&mut self) -> &mut T {
339 self.0.get_mut()
340 }
341
342 #[inline(always)]
9fa01778 343 pub fn lock(&self) -> LockGuard<'_, T> {
94b46f34
XL
344 self.0.lock()
345 }
346
347 #[inline(always)]
9fa01778 348 pub fn lock_mut(&self) -> LockGuard<'_, T> {
94b46f34
XL
349 self.lock()
350 }
351 }
ff7c6d11
XL
352
353 use parking_lot::Mutex as InnerLock;
0531ce1d 354 use parking_lot::RwLock as InnerRwLock;
ff7c6d11 355
83c7162d 356 use std::thread;
49aad941
FG
357
358 #[inline]
359 pub fn join<A, B, RA: DynSend, RB: DynSend>(oper_a: A, oper_b: B) -> (RA, RB)
360 where
361 A: FnOnce() -> RA + DynSend,
362 B: FnOnce() -> RB + DynSend,
363 {
364 if mode::is_dyn_thread_safe() {
365 let oper_a = FromDyn::from(oper_a);
366 let oper_b = FromDyn::from(oper_b);
367 let (a, b) = rayon::join(move || FromDyn::from(oper_a.into_inner()()), move || FromDyn::from(oper_b.into_inner()()));
368 (a.into_inner(), b.into_inner())
369 } else {
370 (oper_a(), oper_b())
371 }
372 }
373
374 // This function only works when `mode::is_dyn_thread_safe()`.
375 pub fn scope<'scope, OP, R>(op: OP) -> R
376 where
377 OP: FnOnce(&rayon::Scope<'scope>) -> R + DynSend,
378 R: DynSend,
379 {
380 let op = FromDyn::from(op);
381 rayon::scope(|s| FromDyn::from(op.into_inner()(s))).into_inner()
382 }
94b46f34 383
532ac7d7
XL
384 /// Runs a list of blocks in parallel. The first block is executed immediately on
385 /// the current thread. Use that for the longest running block.
9fa01778
XL
386 #[macro_export]
387 macro_rules! parallel {
49aad941 388 (impl $fblock:block [$($c:expr,)*] [$block:expr $(, $rest:expr)*]) => {
532ac7d7 389 parallel!(impl $fblock [$block, $($c,)*] [$($rest),*])
9fa01778 390 };
49aad941 391 (impl $fblock:block [$($blocks:expr,)*] []) => {
9fa01778 392 ::rustc_data_structures::sync::scope(|s| {
49aad941
FG
393 $(let block = rustc_data_structures::sync::FromDyn::from(|| $blocks);
394 s.spawn(move |_| block.into_inner()());)*
395 (|| $fblock)();
396 });
397 };
398 ($fblock:block, $($blocks:block),*) => {
399 if rustc_data_structures::sync::is_dyn_thread_safe() {
400 // Reverse the order of the later blocks since Rayon executes them in reverse order
401 // when using a single thread. This ensures the execution order matches that
402 // of a single threaded rustc.
403 parallel!(impl $fblock [] [$($blocks),*]);
404 } else {
405 // We catch panics here ensuring that all the blocks execute.
406 // This makes behavior consistent with the parallel compiler.
407 let mut panic = None;
408 if let Err(p) = ::std::panic::catch_unwind(
409 ::std::panic::AssertUnwindSafe(|| $fblock)
410 ) {
411 if panic.is_none() {
412 panic = Some(p);
413 }
414 }
9fa01778 415 $(
49aad941
FG
416 if let Err(p) = ::std::panic::catch_unwind(
417 ::std::panic::AssertUnwindSafe(|| $blocks)
418 ) {
419 if panic.is_none() {
420 panic = Some(p);
421 }
422 }
9fa01778 423 )*
49aad941
FG
424 if let Some(panic) = panic {
425 ::std::panic::resume_unwind(panic);
426 }
427 }
9fa01778
XL
428 };
429 }
430
49aad941 431 use rayon::iter::{FromParallelIterator, IntoParallelIterator, ParallelIterator};
94b46f34 432
49aad941
FG
433 pub fn par_for_each_in<I, T: IntoIterator<Item = I> + IntoParallelIterator<Item = I>>(
434 t: T,
435 for_each: impl Fn(I) + DynSync + DynSend
436 ) {
437 if mode::is_dyn_thread_safe() {
438 let for_each = FromDyn::from(for_each);
439 let panic: Lock<Option<_>> = Lock::new(None);
440 t.into_par_iter().for_each(|i| if let Err(p) = catch_unwind(AssertUnwindSafe(|| for_each(i))) {
441 let mut l = panic.lock();
442 if l.is_none() {
443 *l = Some(p)
444 }
445 });
94b46f34 446
49aad941
FG
447 if let Some(panic) = panic.into_inner() {
448 resume_unwind(panic);
449 }
450 } else {
451 // We catch panics here ensuring that all the loop iterations execute.
452 // This makes behavior consistent with the parallel compiler.
453 let mut panic = None;
454 t.into_iter().for_each(|i| {
455 if let Err(p) = catch_unwind(AssertUnwindSafe(|| for_each(i))) {
456 if panic.is_none() {
457 panic = Some(p);
458 }
459 }
460 });
461 if let Some(panic) = panic {
462 resume_unwind(panic);
463 }
464 }
94b46f34 465 }
83c7162d 466
49aad941
FG
467 pub fn par_map<
468 I,
469 T: IntoIterator<Item = I> + IntoParallelIterator<Item = I>,
470 R: std::marker::Send,
471 C: FromIterator<R> + FromParallelIterator<R>
472 >(
532ac7d7 473 t: T,
49aad941
FG
474 map: impl Fn(I) -> R + DynSync + DynSend
475 ) -> C {
476 if mode::is_dyn_thread_safe() {
477 let panic: Lock<Option<_>> = Lock::new(None);
478 let map = FromDyn::from(map);
479 // We catch panics here ensuring that all the loop iterations execute.
480 let r = t.into_par_iter().filter_map(|i| {
481 match catch_unwind(AssertUnwindSafe(|| map(i))) {
482 Ok(r) => Some(r),
483 Err(p) => {
484 let mut l = panic.lock();
485 if l.is_none() {
486 *l = Some(p);
487 }
488 None
489 },
490 }
491 }).collect();
532ac7d7 492
49aad941
FG
493 if let Some(panic) = panic.into_inner() {
494 resume_unwind(panic);
495 }
496 r
497 } else {
498 // We catch panics here ensuring that all the loop iterations execute.
499 let mut panic = None;
500 let r = t.into_iter().filter_map(|i| {
501 match catch_unwind(AssertUnwindSafe(|| map(i))) {
502 Ok(r) => Some(r),
503 Err(p) => {
504 if panic.is_none() {
505 panic = Some(p);
506 }
507 None
508 }
509 }
510 }).collect();
511 if let Some(panic) = panic {
512 resume_unwind(panic);
513 }
514 r
515 }
516 }
ff7c6d11
XL
517
518 /// This makes locks panic if they are already held.
519 /// It is only useful when you are running in a single thread
520 const ERROR_CHECKING: bool = false;
ff7c6d11
XL
521 }
522}
523
49aad941
FG
524#[derive(Default)]
525#[cfg_attr(parallel_compiler, repr(align(64)))]
526pub struct CacheAligned<T>(pub T);
ff7c6d11 527
83c7162d
XL
528pub trait HashMapExt<K, V> {
529 /// Same as HashMap::insert, but it may panic if there's already an
530 /// entry for `key` with a value not equal to `value`
531 fn insert_same(&mut self, key: K, value: V);
532}
533
534impl<K: Eq + Hash, V: Eq, S: BuildHasher> HashMapExt<K, V> for HashMap<K, V, S> {
535 fn insert_same(&mut self, key: K, value: V) {
536 self.entry(key).and_modify(|old| assert!(*old == value)).or_insert(value);
537 }
538}
539
ff7c6d11
XL
540#[derive(Debug)]
541pub struct Lock<T>(InnerLock<T>);
542
543impl<T> Lock<T> {
544 #[inline(always)]
545 pub fn new(inner: T) -> Self {
546 Lock(InnerLock::new(inner))
547 }
548
549 #[inline(always)]
550 pub fn into_inner(self) -> T {
551 self.0.into_inner()
552 }
553
554 #[inline(always)]
555 pub fn get_mut(&mut self) -> &mut T {
556 self.0.get_mut()
557 }
558
9fa01778 559 #[cfg(parallel_compiler)]
94b46f34 560 #[inline(always)]
9fa01778 561 pub fn try_lock(&self) -> Option<LockGuard<'_, T>> {
94b46f34
XL
562 self.0.try_lock()
563 }
564
9fa01778 565 #[cfg(not(parallel_compiler))]
94b46f34 566 #[inline(always)]
9fa01778 567 pub fn try_lock(&self) -> Option<LockGuard<'_, T>> {
94b46f34
XL
568 self.0.try_borrow_mut().ok()
569 }
570
9fa01778 571 #[cfg(parallel_compiler)]
ff7c6d11 572 #[inline(always)]
487cf647 573 #[track_caller]
9fa01778 574 pub fn lock(&self) -> LockGuard<'_, T> {
ff7c6d11
XL
575 if ERROR_CHECKING {
576 self.0.try_lock().expect("lock was already held")
577 } else {
578 self.0.lock()
579 }
580 }
581
9fa01778 582 #[cfg(not(parallel_compiler))]
ff7c6d11 583 #[inline(always)]
487cf647 584 #[track_caller]
9fa01778 585 pub fn lock(&self) -> LockGuard<'_, T> {
ff7c6d11
XL
586 self.0.borrow_mut()
587 }
588
0531ce1d 589 #[inline(always)]
487cf647 590 #[track_caller]
0531ce1d
XL
591 pub fn with_lock<F: FnOnce(&mut T) -> R, R>(&self, f: F) -> R {
592 f(&mut *self.lock())
593 }
594
ff7c6d11 595 #[inline(always)]
487cf647 596 #[track_caller]
9fa01778 597 pub fn borrow(&self) -> LockGuard<'_, T> {
ff7c6d11
XL
598 self.lock()
599 }
600
601 #[inline(always)]
487cf647 602 #[track_caller]
9fa01778 603 pub fn borrow_mut(&self) -> LockGuard<'_, T> {
ff7c6d11
XL
604 self.lock()
605 }
606}
607
0531ce1d
XL
608impl<T: Default> Default for Lock<T> {
609 #[inline]
610 fn default() -> Self {
611 Lock::new(T::default())
612 }
613}
614
29967ef6 615#[derive(Debug, Default)]
0531ce1d
XL
616pub struct RwLock<T>(InnerRwLock<T>);
617
618impl<T> RwLock<T> {
619 #[inline(always)]
620 pub fn new(inner: T) -> Self {
621 RwLock(InnerRwLock::new(inner))
622 }
623
624 #[inline(always)]
625 pub fn into_inner(self) -> T {
626 self.0.into_inner()
627 }
628
629 #[inline(always)]
630 pub fn get_mut(&mut self) -> &mut T {
631 self.0.get_mut()
632 }
633
9fa01778 634 #[cfg(not(parallel_compiler))]
0531ce1d 635 #[inline(always)]
487cf647 636 #[track_caller]
9fa01778 637 pub fn read(&self) -> ReadGuard<'_, T> {
0531ce1d
XL
638 self.0.borrow()
639 }
640
9fa01778 641 #[cfg(parallel_compiler)]
0531ce1d 642 #[inline(always)]
9fa01778 643 pub fn read(&self) -> ReadGuard<'_, T> {
0531ce1d
XL
644 if ERROR_CHECKING {
645 self.0.try_read().expect("lock was already held")
646 } else {
647 self.0.read()
648 }
649 }
650
651 #[inline(always)]
487cf647 652 #[track_caller]
0531ce1d
XL
653 pub fn with_read_lock<F: FnOnce(&T) -> R, R>(&self, f: F) -> R {
654 f(&*self.read())
655 }
656
9fa01778 657 #[cfg(not(parallel_compiler))]
83c7162d 658 #[inline(always)]
9fa01778 659 pub fn try_write(&self) -> Result<WriteGuard<'_, T>, ()> {
83c7162d
XL
660 self.0.try_borrow_mut().map_err(|_| ())
661 }
662
9fa01778 663 #[cfg(parallel_compiler)]
83c7162d 664 #[inline(always)]
9fa01778 665 pub fn try_write(&self) -> Result<WriteGuard<'_, T>, ()> {
83c7162d
XL
666 self.0.try_write().ok_or(())
667 }
668
9fa01778 669 #[cfg(not(parallel_compiler))]
0531ce1d 670 #[inline(always)]
487cf647 671 #[track_caller]
9fa01778 672 pub fn write(&self) -> WriteGuard<'_, T> {
0531ce1d
XL
673 self.0.borrow_mut()
674 }
675
9fa01778 676 #[cfg(parallel_compiler)]
0531ce1d 677 #[inline(always)]
9fa01778 678 pub fn write(&self) -> WriteGuard<'_, T> {
0531ce1d
XL
679 if ERROR_CHECKING {
680 self.0.try_write().expect("lock was already held")
681 } else {
682 self.0.write()
683 }
684 }
685
686 #[inline(always)]
487cf647 687 #[track_caller]
0531ce1d
XL
688 pub fn with_write_lock<F: FnOnce(&mut T) -> R, R>(&self, f: F) -> R {
689 f(&mut *self.write())
690 }
691
692 #[inline(always)]
487cf647 693 #[track_caller]
9fa01778 694 pub fn borrow(&self) -> ReadGuard<'_, T> {
0531ce1d
XL
695 self.read()
696 }
697
698 #[inline(always)]
487cf647 699 #[track_caller]
9fa01778 700 pub fn borrow_mut(&self) -> WriteGuard<'_, T> {
0531ce1d
XL
701 self.write()
702 }
064997fb 703
064997fb
FG
704 #[cfg(not(parallel_compiler))]
705 #[inline(always)]
706 pub fn leak(&self) -> &T {
707 ReadGuard::leak(self.read())
708 }
709
710 #[cfg(parallel_compiler)]
711 #[inline(always)]
712 pub fn leak(&self) -> &T {
713 let guard = self.read();
714 let ret = unsafe { &*(&*guard as *const T) };
715 std::mem::forget(guard);
716 ret
717 }
0531ce1d
XL
718}
719
720// FIXME: Probably a bad idea
721impl<T: Clone> Clone for RwLock<T> {
722 #[inline]
723 fn clone(&self) -> Self {
724 RwLock::new(self.borrow().clone())
725 }
726}
83c7162d
XL
727
728/// A type which only allows its inner value to be used in one thread.
729/// It will panic if it is used on multiple threads.
e74abb32 730#[derive(Debug)]
83c7162d 731pub struct OneThread<T> {
9fa01778 732 #[cfg(parallel_compiler)]
83c7162d
XL
733 thread: thread::ThreadId,
734 inner: T,
735}
736
9fa01778 737#[cfg(parallel_compiler)]
83c7162d 738unsafe impl<T> std::marker::Sync for OneThread<T> {}
9fa01778 739#[cfg(parallel_compiler)]
83c7162d
XL
740unsafe impl<T> std::marker::Send for OneThread<T> {}
741
742impl<T> OneThread<T> {
743 #[inline(always)]
744 fn check(&self) {
9fa01778 745 #[cfg(parallel_compiler)]
83c7162d
XL
746 assert_eq!(thread::current().id(), self.thread);
747 }
748
749 #[inline(always)]
750 pub fn new(inner: T) -> Self {
751 OneThread {
9fa01778 752 #[cfg(parallel_compiler)]
83c7162d
XL
753 thread: thread::current().id(),
754 inner,
755 }
756 }
757
758 #[inline(always)]
759 pub fn into_inner(value: Self) -> T {
760 value.check();
761 value.inner
762 }
763}
764
765impl<T> Deref for OneThread<T> {
766 type Target = T;
767
768 fn deref(&self) -> &T {
769 self.check();
770 &self.inner
771 }
772}
773
774impl<T> DerefMut for OneThread<T> {
775 fn deref_mut(&mut self) -> &mut T {
776 self.check();
777 &mut self.inner
778 }
779}