]> git.proxmox.com Git - rustc.git/blob - vendor/crossbeam-utils-0.6.6/src/atomic/consume.rs
New upstream version 1.51.0+dfsg1
[rustc.git] / vendor / crossbeam-utils-0.6.6 / src / atomic / consume.rs
1 #[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
2 use core::sync::atomic::compiler_fence;
3 use core::sync::atomic::Ordering;
4
5 /// Trait which allows reading from primitive atomic types with "consume" ordering.
6 pub trait AtomicConsume {
7 /// Type returned by `load_consume`.
8 type Val;
9
10 /// Loads a value from the atomic using a "consume" memory ordering.
11 ///
12 /// This is similar to the "acquire" ordering, except that an ordering is
13 /// only guaranteed with operations that "depend on" the result of the load.
14 /// However consume loads are usually much faster than acquire loads on
15 /// architectures with a weak memory model since they don't require memory
16 /// fence instructions.
17 ///
18 /// The exact definition of "depend on" is a bit vague, but it works as you
19 /// would expect in practice since a lot of software, especially the Linux
20 /// kernel, rely on this behavior.
21 ///
22 /// This is currently only implemented on ARM and AArch64, where a fence
23 /// can be avoided. On other architectures this will fall back to a simple
24 /// `load(Ordering::Acquire)`.
25 fn load_consume(&self) -> Self::Val;
26 }
27
28 #[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
29 macro_rules! impl_consume {
30 () => {
31 #[inline]
32 fn load_consume(&self) -> Self::Val {
33 let result = self.load(Ordering::Relaxed);
34 compiler_fence(Ordering::Acquire);
35 result
36 }
37 };
38 }
39
40 #[cfg(not(any(target_arch = "arm", target_arch = "aarch64")))]
41 macro_rules! impl_consume {
42 () => {
43 #[inline]
44 fn load_consume(&self) -> Self::Val {
45 self.load(Ordering::Acquire)
46 }
47 };
48 }
49
50 macro_rules! impl_atomic {
51 ($atomic:ident, $val:ty) => {
52 impl AtomicConsume for ::core::sync::atomic::$atomic {
53 type Val = $val;
54 impl_consume!();
55 }
56 };
57 }
58
59 impl_atomic!(AtomicBool, bool);
60 impl_atomic!(AtomicUsize, usize);
61 impl_atomic!(AtomicIsize, isize);
62 #[cfg(all(feature = "nightly", target_has_atomic = "8"))]
63 impl_atomic!(AtomicU8, u8);
64 #[cfg(all(feature = "nightly", target_has_atomic = "8"))]
65 impl_atomic!(AtomicI8, i8);
66 #[cfg(all(feature = "nightly", target_has_atomic = "16"))]
67 impl_atomic!(AtomicU16, u16);
68 #[cfg(all(feature = "nightly", target_has_atomic = "16"))]
69 impl_atomic!(AtomicI16, i16);
70 #[cfg(all(feature = "nightly", target_has_atomic = "32"))]
71 impl_atomic!(AtomicU32, u32);
72 #[cfg(all(feature = "nightly", target_has_atomic = "32"))]
73 impl_atomic!(AtomicI32, i32);
74 #[cfg(all(feature = "nightly", target_has_atomic = "64"))]
75 impl_atomic!(AtomicU64, u64);
76 #[cfg(all(feature = "nightly", target_has_atomic = "64"))]
77 impl_atomic!(AtomicI64, i64);
78
79 impl<T> AtomicConsume for ::core::sync::atomic::AtomicPtr<T> {
80 type Val = *mut T;
81 impl_consume!();
82 }