]> git.proxmox.com Git - rustc.git/blame - compiler/rustc_data_structures/src/sync.rs
New upstream version 1.69.0+dfsg1
[rustc.git] / compiler / rustc_data_structures / src / sync.rs
CommitLineData
9fa01778 1//! This module defines types which are thread safe if cfg!(parallel_compiler) is true.
ff7c6d11 2//!
e74abb32 3//! `Lrc` is an alias of `Arc` if cfg!(parallel_compiler) is true, `Rc` otherwise.
ff7c6d11
XL
4//!
5//! `Lock` is a mutex.
9fa01778 6//! It internally uses `parking_lot::Mutex` if cfg!(parallel_compiler) is true,
ff7c6d11
XL
7//! `RefCell` otherwise.
8//!
9//! `RwLock` is a read-write lock.
9fa01778 10//! It internally uses `parking_lot::RwLock` if cfg!(parallel_compiler) is true,
ff7c6d11
XL
11//! `RefCell` otherwise.
12//!
9fa01778 13//! `MTLock` is a mutex which disappears if cfg!(parallel_compiler) is false.
ff7c6d11 14//!
e74abb32 15//! `MTRef` is an immutable reference if cfg!(parallel_compiler), and a mutable reference otherwise.
94b46f34 16//!
94222f64 17//! `rustc_erase_owner!` erases an OwningRef owner into Erased or Erased + Send + Sync
9fa01778 18//! depending on the value of cfg!(parallel_compiler).
ff7c6d11 19
dfeec247 20use crate::owning_ref::{Erased, OwningRef};
83c7162d 21use std::collections::HashMap;
dfeec247 22use std::hash::{BuildHasher, Hash};
83c7162d 23use std::ops::{Deref, DerefMut};
064997fb 24use std::panic::{catch_unwind, resume_unwind, AssertUnwindSafe};
ff7c6d11 25
0731742a 26pub use std::sync::atomic::Ordering;
dfeec247 27pub use std::sync::atomic::Ordering::SeqCst;
0731742a 28
9ffffee4
FG
29pub use vec::AppendOnlyVec;
30
31mod vec;
32
ff7c6d11 33cfg_if! {
9fa01778 34 if #[cfg(not(parallel_compiler))] {
ff7c6d11
XL
35 pub auto trait Send {}
36 pub auto trait Sync {}
37
9ffffee4
FG
38 impl<T> Send for T {}
39 impl<T> Sync for T {}
ff7c6d11
XL
40
41 #[macro_export]
42 macro_rules! rustc_erase_owner {
43 ($v:expr) => {
44 $v.erase_owner()
45 }
46 }
47
0731742a
XL
48 use std::ops::Add;
49
416331ca 50 /// This is a single threaded variant of `AtomicU64`, `AtomicUsize`, etc.
17df50a5
XL
51 /// It has explicit ordering arguments and is only intended for use with
52 /// the native atomic types.
416331ca
XL
53 /// You should use this type through the `AtomicU64`, `AtomicUsize`, etc, type aliases
54 /// as it's not intended to be used separately.
f2b60f7d 55 #[derive(Debug, Default)]
0731742a
XL
56 pub struct Atomic<T: Copy>(Cell<T>);
57
58 impl<T: Copy> Atomic<T> {
9fa01778 59 #[inline]
0731742a
XL
60 pub fn new(v: T) -> Self {
61 Atomic(Cell::new(v))
62 }
0731742a 63
416331ca 64 #[inline]
0731742a
XL
65 pub fn into_inner(self) -> T {
66 self.0.into_inner()
67 }
68
9fa01778 69 #[inline]
0731742a
XL
70 pub fn load(&self, _: Ordering) -> T {
71 self.0.get()
72 }
73
9fa01778 74 #[inline]
0731742a
XL
75 pub fn store(&self, val: T, _: Ordering) {
76 self.0.set(val)
77 }
78
416331ca 79 #[inline]
0731742a
XL
80 pub fn swap(&self, val: T, _: Ordering) -> T {
81 self.0.replace(val)
82 }
416331ca 83 }
0731742a 84
416331ca
XL
85 impl<T: Copy + PartialEq> Atomic<T> {
86 #[inline]
0731742a
XL
87 pub fn compare_exchange(&self,
88 current: T,
89 new: T,
90 _: Ordering,
91 _: Ordering)
92 -> Result<T, T> {
93 let read = self.0.get();
94 if read == current {
95 self.0.set(new);
96 Ok(read)
97 } else {
98 Err(read)
99 }
100 }
101 }
102
103 impl<T: Add<Output=T> + Copy> Atomic<T> {
416331ca 104 #[inline]
0731742a
XL
105 pub fn fetch_add(&self, val: T, _: Ordering) -> T {
106 let old = self.0.get();
107 self.0.set(old + val);
108 old
109 }
110 }
111
112 pub type AtomicUsize = Atomic<usize>;
113 pub type AtomicBool = Atomic<bool>;
9fa01778 114 pub type AtomicU32 = Atomic<u32>;
0731742a
XL
115 pub type AtomicU64 = Atomic<u64>;
116
e74abb32
XL
117 pub fn join<A, B, RA, RB>(oper_a: A, oper_b: B) -> (RA, RB)
118 where A: FnOnce() -> RA,
119 B: FnOnce() -> RB
120 {
121 (oper_a(), oper_b())
122 }
123
9fa01778
XL
124 #[macro_export]
125 macro_rules! parallel {
126 ($($blocks:tt),*) => {
532ac7d7
XL
127 // We catch panics here ensuring that all the blocks execute.
128 // This makes behavior consistent with the parallel compiler.
129 let mut panic = None;
130 $(
131 if let Err(p) = ::std::panic::catch_unwind(
132 ::std::panic::AssertUnwindSafe(|| $blocks)
133 ) {
134 if panic.is_none() {
135 panic = Some(p);
136 }
137 }
138 )*
139 if let Some(panic) = panic {
140 ::std::panic::resume_unwind(panic);
141 }
9fa01778
XL
142 }
143 }
144
9c376795 145 pub use Iterator as ParallelIterator;
94b46f34
XL
146
147 pub fn par_iter<T: IntoIterator>(t: T) -> T::IntoIter {
148 t.into_iter()
149 }
150
064997fb 151 pub fn par_for_each_in<T: IntoIterator>(t: T, mut for_each: impl FnMut(T::Item) + Sync + Send) {
532ac7d7
XL
152 // We catch panics here ensuring that all the loop iterations execute.
153 // This makes behavior consistent with the parallel compiler.
154 let mut panic = None;
155 t.into_iter().for_each(|i| {
156 if let Err(p) = catch_unwind(AssertUnwindSafe(|| for_each(i))) {
157 if panic.is_none() {
158 panic = Some(p);
159 }
160 }
161 });
162 if let Some(panic) = panic {
163 resume_unwind(panic);
164 }
165 }
166
8faf50e0 167 pub type MetadataRef = OwningRef<Box<dyn Erased>, [u8]>;
ff7c6d11
XL
168
169 pub use std::rc::Rc as Lrc;
94b46f34 170 pub use std::rc::Weak as Weak;
ff7c6d11 171 pub use std::cell::Ref as ReadGuard;
b7449926 172 pub use std::cell::Ref as MappedReadGuard;
ff7c6d11 173 pub use std::cell::RefMut as WriteGuard;
b7449926 174 pub use std::cell::RefMut as MappedWriteGuard;
ff7c6d11 175 pub use std::cell::RefMut as LockGuard;
b7449926 176 pub use std::cell::RefMut as MappedLockGuard;
ff7c6d11 177
923072b8 178 pub use std::cell::OnceCell;
f9f354fc 179
0531ce1d 180 use std::cell::RefCell as InnerRwLock;
ff7c6d11
XL
181 use std::cell::RefCell as InnerLock;
182
183 use std::cell::Cell;
184
94b46f34
XL
185 #[derive(Debug)]
186 pub struct WorkerLocal<T>(OneThread<T>);
187
188 impl<T> WorkerLocal<T> {
189 /// Creates a new worker local where the `initial` closure computes the
190 /// value this worker local should take for each thread in the thread pool.
191 #[inline]
192 pub fn new<F: FnMut(usize) -> T>(mut f: F) -> WorkerLocal<T> {
193 WorkerLocal(OneThread::new(f(0)))
194 }
195
196 /// Returns the worker-local value for each thread
197 #[inline]
198 pub fn into_inner(self) -> Vec<T> {
199 vec![OneThread::into_inner(self.0)]
200 }
201 }
202
203 impl<T> Deref for WorkerLocal<T> {
204 type Target = T;
205
206 #[inline(always)]
207 fn deref(&self) -> &T {
487cf647 208 &self.0
94b46f34
XL
209 }
210 }
211
212 pub type MTRef<'a, T> = &'a mut T;
213
0bf4aa26 214 #[derive(Debug, Default)]
ff7c6d11
XL
215 pub struct MTLock<T>(T);
216
217 impl<T> MTLock<T> {
218 #[inline(always)]
219 pub fn new(inner: T) -> Self {
220 MTLock(inner)
221 }
222
223 #[inline(always)]
224 pub fn into_inner(self) -> T {
225 self.0
226 }
227
228 #[inline(always)]
229 pub fn get_mut(&mut self) -> &mut T {
230 &mut self.0
231 }
232
233 #[inline(always)]
234 pub fn lock(&self) -> &T {
235 &self.0
236 }
237
238 #[inline(always)]
94b46f34
XL
239 pub fn lock_mut(&mut self) -> &mut T {
240 &mut self.0
ff7c6d11
XL
241 }
242 }
243
244 // FIXME: Probably a bad idea (in the threaded case)
245 impl<T: Clone> Clone for MTLock<T> {
246 #[inline]
247 fn clone(&self) -> Self {
248 MTLock(self.0.clone())
249 }
250 }
ff7c6d11
XL
251 } else {
252 pub use std::marker::Send as Send;
253 pub use std::marker::Sync as Sync;
254
255 pub use parking_lot::RwLockReadGuard as ReadGuard;
b7449926 256 pub use parking_lot::MappedRwLockReadGuard as MappedReadGuard;
ff7c6d11 257 pub use parking_lot::RwLockWriteGuard as WriteGuard;
b7449926 258 pub use parking_lot::MappedRwLockWriteGuard as MappedWriteGuard;
ff7c6d11
XL
259
260 pub use parking_lot::MutexGuard as LockGuard;
b7449926 261 pub use parking_lot::MappedMutexGuard as MappedLockGuard;
ff7c6d11 262
923072b8 263 pub use std::sync::OnceLock as OnceCell;
f9f354fc 264
9fa01778 265 pub use std::sync::atomic::{AtomicBool, AtomicUsize, AtomicU32, AtomicU64};
0731742a 266
ff7c6d11 267 pub use std::sync::Arc as Lrc;
94b46f34 268 pub use std::sync::Weak as Weak;
ff7c6d11 269
94b46f34
XL
270 pub type MTRef<'a, T> = &'a T;
271
0bf4aa26 272 #[derive(Debug, Default)]
94b46f34
XL
273 pub struct MTLock<T>(Lock<T>);
274
275 impl<T> MTLock<T> {
276 #[inline(always)]
277 pub fn new(inner: T) -> Self {
278 MTLock(Lock::new(inner))
279 }
280
281 #[inline(always)]
282 pub fn into_inner(self) -> T {
283 self.0.into_inner()
284 }
285
286 #[inline(always)]
287 pub fn get_mut(&mut self) -> &mut T {
288 self.0.get_mut()
289 }
290
291 #[inline(always)]
9fa01778 292 pub fn lock(&self) -> LockGuard<'_, T> {
94b46f34
XL
293 self.0.lock()
294 }
295
296 #[inline(always)]
9fa01778 297 pub fn lock_mut(&self) -> LockGuard<'_, T> {
94b46f34
XL
298 self.lock()
299 }
300 }
ff7c6d11
XL
301
302 use parking_lot::Mutex as InnerLock;
0531ce1d 303 use parking_lot::RwLock as InnerRwLock;
ff7c6d11 304
83c7162d 305 use std::thread;
94b46f34
XL
306 pub use rayon::{join, scope};
307
532ac7d7
XL
308 /// Runs a list of blocks in parallel. The first block is executed immediately on
309 /// the current thread. Use that for the longest running block.
9fa01778
XL
310 #[macro_export]
311 macro_rules! parallel {
532ac7d7
XL
312 (impl $fblock:tt [$($c:tt,)*] [$block:tt $(, $rest:tt)*]) => {
313 parallel!(impl $fblock [$block, $($c,)*] [$($rest),*])
9fa01778 314 };
532ac7d7 315 (impl $fblock:tt [$($blocks:tt,)*] []) => {
9fa01778
XL
316 ::rustc_data_structures::sync::scope(|s| {
317 $(
318 s.spawn(|_| $blocks);
319 )*
532ac7d7 320 $fblock;
9fa01778
XL
321 })
322 };
532ac7d7
XL
323 ($fblock:tt, $($blocks:tt),*) => {
324 // Reverse the order of the later blocks since Rayon executes them in reverse order
9fa01778
XL
325 // when using a single thread. This ensures the execution order matches that
326 // of a single threaded rustc
532ac7d7 327 parallel!(impl $fblock [] [$($blocks),*]);
9fa01778
XL
328 };
329 }
330
94b46f34
XL
331 pub use rayon_core::WorkerLocal;
332
333 pub use rayon::iter::ParallelIterator;
334 use rayon::iter::IntoParallelIterator;
335
336 pub fn par_iter<T: IntoParallelIterator>(t: T) -> T::Iter {
337 t.into_par_iter()
338 }
83c7162d 339
532ac7d7
XL
340 pub fn par_for_each_in<T: IntoParallelIterator>(
341 t: T,
74b04a01 342 for_each: impl Fn(T::Item) + Sync + Send,
532ac7d7 343 ) {
064997fb
FG
344 let ps: Vec<_> = t.into_par_iter().map(|i| catch_unwind(AssertUnwindSafe(|| for_each(i)))).collect();
345 ps.into_iter().for_each(|p| if let Err(panic) = p {
346 resume_unwind(panic)
347 });
532ac7d7
XL
348 }
349
8faf50e0 350 pub type MetadataRef = OwningRef<Box<dyn Erased + Send + Sync>, [u8]>;
ff7c6d11
XL
351
352 /// This makes locks panic if they are already held.
353 /// It is only useful when you are running in a single thread
354 const ERROR_CHECKING: bool = false;
355
356 #[macro_export]
357 macro_rules! rustc_erase_owner {
358 ($v:expr) => {{
359 let v = $v;
0531ce1d 360 ::rustc_data_structures::sync::assert_send_val(&v);
ff7c6d11
XL
361 v.erase_send_sync_owner()
362 }}
363 }
ff7c6d11
XL
364 }
365}
366
367pub fn assert_sync<T: ?Sized + Sync>() {}
0731742a 368pub fn assert_send<T: ?Sized + Send>() {}
0531ce1d 369pub fn assert_send_val<T: ?Sized + Send>(_t: &T) {}
ff7c6d11
XL
370pub fn assert_send_sync_val<T: ?Sized + Sync + Send>(_t: &T) {}
371
83c7162d
XL
372pub trait HashMapExt<K, V> {
373 /// Same as HashMap::insert, but it may panic if there's already an
374 /// entry for `key` with a value not equal to `value`
375 fn insert_same(&mut self, key: K, value: V);
376}
377
378impl<K: Eq + Hash, V: Eq, S: BuildHasher> HashMapExt<K, V> for HashMap<K, V, S> {
379 fn insert_same(&mut self, key: K, value: V) {
380 self.entry(key).and_modify(|old| assert!(*old == value)).or_insert(value);
381 }
382}
383
ff7c6d11
XL
384#[derive(Debug)]
385pub struct Lock<T>(InnerLock<T>);
386
387impl<T> Lock<T> {
388 #[inline(always)]
389 pub fn new(inner: T) -> Self {
390 Lock(InnerLock::new(inner))
391 }
392
393 #[inline(always)]
394 pub fn into_inner(self) -> T {
395 self.0.into_inner()
396 }
397
398 #[inline(always)]
399 pub fn get_mut(&mut self) -> &mut T {
400 self.0.get_mut()
401 }
402
9fa01778 403 #[cfg(parallel_compiler)]
94b46f34 404 #[inline(always)]
9fa01778 405 pub fn try_lock(&self) -> Option<LockGuard<'_, T>> {
94b46f34
XL
406 self.0.try_lock()
407 }
408
9fa01778 409 #[cfg(not(parallel_compiler))]
94b46f34 410 #[inline(always)]
9fa01778 411 pub fn try_lock(&self) -> Option<LockGuard<'_, T>> {
94b46f34
XL
412 self.0.try_borrow_mut().ok()
413 }
414
9fa01778 415 #[cfg(parallel_compiler)]
ff7c6d11 416 #[inline(always)]
487cf647 417 #[track_caller]
9fa01778 418 pub fn lock(&self) -> LockGuard<'_, T> {
ff7c6d11
XL
419 if ERROR_CHECKING {
420 self.0.try_lock().expect("lock was already held")
421 } else {
422 self.0.lock()
423 }
424 }
425
9fa01778 426 #[cfg(not(parallel_compiler))]
ff7c6d11 427 #[inline(always)]
487cf647 428 #[track_caller]
9fa01778 429 pub fn lock(&self) -> LockGuard<'_, T> {
ff7c6d11
XL
430 self.0.borrow_mut()
431 }
432
0531ce1d 433 #[inline(always)]
487cf647 434 #[track_caller]
0531ce1d
XL
435 pub fn with_lock<F: FnOnce(&mut T) -> R, R>(&self, f: F) -> R {
436 f(&mut *self.lock())
437 }
438
ff7c6d11 439 #[inline(always)]
487cf647 440 #[track_caller]
9fa01778 441 pub fn borrow(&self) -> LockGuard<'_, T> {
ff7c6d11
XL
442 self.lock()
443 }
444
445 #[inline(always)]
487cf647 446 #[track_caller]
9fa01778 447 pub fn borrow_mut(&self) -> LockGuard<'_, T> {
ff7c6d11
XL
448 self.lock()
449 }
450}
451
0531ce1d
XL
452impl<T: Default> Default for Lock<T> {
453 #[inline]
454 fn default() -> Self {
455 Lock::new(T::default())
456 }
457}
458
ff7c6d11
XL
459// FIXME: Probably a bad idea
460impl<T: Clone> Clone for Lock<T> {
461 #[inline]
462 fn clone(&self) -> Self {
463 Lock::new(self.borrow().clone())
464 }
465}
0531ce1d 466
29967ef6 467#[derive(Debug, Default)]
0531ce1d
XL
468pub struct RwLock<T>(InnerRwLock<T>);
469
470impl<T> RwLock<T> {
471 #[inline(always)]
472 pub fn new(inner: T) -> Self {
473 RwLock(InnerRwLock::new(inner))
474 }
475
476 #[inline(always)]
477 pub fn into_inner(self) -> T {
478 self.0.into_inner()
479 }
480
481 #[inline(always)]
482 pub fn get_mut(&mut self) -> &mut T {
483 self.0.get_mut()
484 }
485
9fa01778 486 #[cfg(not(parallel_compiler))]
0531ce1d 487 #[inline(always)]
487cf647 488 #[track_caller]
9fa01778 489 pub fn read(&self) -> ReadGuard<'_, T> {
0531ce1d
XL
490 self.0.borrow()
491 }
492
9fa01778 493 #[cfg(parallel_compiler)]
0531ce1d 494 #[inline(always)]
9fa01778 495 pub fn read(&self) -> ReadGuard<'_, T> {
0531ce1d
XL
496 if ERROR_CHECKING {
497 self.0.try_read().expect("lock was already held")
498 } else {
499 self.0.read()
500 }
501 }
502
503 #[inline(always)]
487cf647 504 #[track_caller]
0531ce1d
XL
505 pub fn with_read_lock<F: FnOnce(&T) -> R, R>(&self, f: F) -> R {
506 f(&*self.read())
507 }
508
9fa01778 509 #[cfg(not(parallel_compiler))]
83c7162d 510 #[inline(always)]
9fa01778 511 pub fn try_write(&self) -> Result<WriteGuard<'_, T>, ()> {
83c7162d
XL
512 self.0.try_borrow_mut().map_err(|_| ())
513 }
514
9fa01778 515 #[cfg(parallel_compiler)]
83c7162d 516 #[inline(always)]
9fa01778 517 pub fn try_write(&self) -> Result<WriteGuard<'_, T>, ()> {
83c7162d
XL
518 self.0.try_write().ok_or(())
519 }
520
9fa01778 521 #[cfg(not(parallel_compiler))]
0531ce1d 522 #[inline(always)]
487cf647 523 #[track_caller]
9fa01778 524 pub fn write(&self) -> WriteGuard<'_, T> {
0531ce1d
XL
525 self.0.borrow_mut()
526 }
527
9fa01778 528 #[cfg(parallel_compiler)]
0531ce1d 529 #[inline(always)]
9fa01778 530 pub fn write(&self) -> WriteGuard<'_, T> {
0531ce1d
XL
531 if ERROR_CHECKING {
532 self.0.try_write().expect("lock was already held")
533 } else {
534 self.0.write()
535 }
536 }
537
538 #[inline(always)]
487cf647 539 #[track_caller]
0531ce1d
XL
540 pub fn with_write_lock<F: FnOnce(&mut T) -> R, R>(&self, f: F) -> R {
541 f(&mut *self.write())
542 }
543
544 #[inline(always)]
487cf647 545 #[track_caller]
9fa01778 546 pub fn borrow(&self) -> ReadGuard<'_, T> {
0531ce1d
XL
547 self.read()
548 }
549
550 #[inline(always)]
487cf647 551 #[track_caller]
9fa01778 552 pub fn borrow_mut(&self) -> WriteGuard<'_, T> {
0531ce1d
XL
553 self.write()
554 }
064997fb
FG
555
556 #[cfg(not(parallel_compiler))]
557 #[inline(always)]
558 pub fn clone_guard<'a>(rg: &ReadGuard<'a, T>) -> ReadGuard<'a, T> {
559 ReadGuard::clone(rg)
560 }
561
562 #[cfg(parallel_compiler)]
563 #[inline(always)]
564 pub fn clone_guard<'a>(rg: &ReadGuard<'a, T>) -> ReadGuard<'a, T> {
565 ReadGuard::rwlock(&rg).read()
566 }
567
568 #[cfg(not(parallel_compiler))]
569 #[inline(always)]
570 pub fn leak(&self) -> &T {
571 ReadGuard::leak(self.read())
572 }
573
574 #[cfg(parallel_compiler)]
575 #[inline(always)]
576 pub fn leak(&self) -> &T {
577 let guard = self.read();
578 let ret = unsafe { &*(&*guard as *const T) };
579 std::mem::forget(guard);
580 ret
581 }
0531ce1d
XL
582}
583
584// FIXME: Probably a bad idea
585impl<T: Clone> Clone for RwLock<T> {
586 #[inline]
587 fn clone(&self) -> Self {
588 RwLock::new(self.borrow().clone())
589 }
590}
83c7162d
XL
591
592/// A type which only allows its inner value to be used in one thread.
593/// It will panic if it is used on multiple threads.
e74abb32 594#[derive(Debug)]
83c7162d 595pub struct OneThread<T> {
9fa01778 596 #[cfg(parallel_compiler)]
83c7162d
XL
597 thread: thread::ThreadId,
598 inner: T,
599}
600
9fa01778 601#[cfg(parallel_compiler)]
83c7162d 602unsafe impl<T> std::marker::Sync for OneThread<T> {}
9fa01778 603#[cfg(parallel_compiler)]
83c7162d
XL
604unsafe impl<T> std::marker::Send for OneThread<T> {}
605
606impl<T> OneThread<T> {
607 #[inline(always)]
608 fn check(&self) {
9fa01778 609 #[cfg(parallel_compiler)]
83c7162d
XL
610 assert_eq!(thread::current().id(), self.thread);
611 }
612
613 #[inline(always)]
614 pub fn new(inner: T) -> Self {
615 OneThread {
9fa01778 616 #[cfg(parallel_compiler)]
83c7162d
XL
617 thread: thread::current().id(),
618 inner,
619 }
620 }
621
622 #[inline(always)]
623 pub fn into_inner(value: Self) -> T {
624 value.check();
625 value.inner
626 }
627}
628
629impl<T> Deref for OneThread<T> {
630 type Target = T;
631
632 fn deref(&self) -> &T {
633 self.check();
634 &self.inner
635 }
636}
637
638impl<T> DerefMut for OneThread<T> {
639 fn deref_mut(&mut self) -> &mut T {
640 self.check();
641 &mut self.inner
642 }
643}