1 // Necessary for implementing atomic methods for `AtomicUnit`
2 #![allow(clippy::unit_arg)]
4 use crate::primitive
::sync
::atomic
::{self, AtomicBool}
;
5 use core
::cell
::UnsafeCell
;
8 use core
::mem
::{self, ManuallyDrop, MaybeUninit}
;
9 use core
::sync
::atomic
::Ordering
;
13 #[cfg(feature = "std")]
14 use std
::panic
::{RefUnwindSafe, UnwindSafe}
;
16 use super::seq_lock
::SeqLock
;
18 /// A thread-safe mutable memory location.
20 /// This type is equivalent to [`Cell`], except it can also be shared among multiple threads.
22 /// Operations on `AtomicCell`s use atomic instructions whenever possible, and synchronize using
23 /// global locks otherwise. You can call [`AtomicCell::<T>::is_lock_free()`] to check whether
24 /// atomic instructions or locks will be used.
26 /// Atomic loads use the [`Acquire`] ordering and atomic stores use the [`Release`] ordering.
28 /// [`Cell`]: std::cell::Cell
29 /// [`AtomicCell::<T>::is_lock_free()`]: AtomicCell::is_lock_free
30 /// [`Acquire`]: std::sync::atomic::Ordering::Acquire
31 /// [`Release`]: std::sync::atomic::Ordering::Release
33 pub struct AtomicCell
<T
> {
36 /// If this value can be transmuted into a primitive atomic type, it will be treated as such.
37 /// Otherwise, all potentially concurrent operations on this data will be protected by a global
40 /// Using MaybeUninit to prevent code outside the cell from observing partially initialized state:
41 /// <https://github.com/crossbeam-rs/crossbeam/issues/833>
44 /// - we'll never store uninitialized `T` due to our API only using initialized `T`.
45 /// - this `MaybeUninit` does *not* fix <https://github.com/crossbeam-rs/crossbeam/issues/315>.
46 value
: UnsafeCell
<MaybeUninit
<T
>>,
49 unsafe impl<T
: Send
> Send
for AtomicCell
<T
> {}
50 unsafe impl<T
: Send
> Sync
for AtomicCell
<T
> {}
52 #[cfg(feature = "std")]
53 impl<T
> UnwindSafe
for AtomicCell
<T
> {}
54 #[cfg(feature = "std")]
55 impl<T
> RefUnwindSafe
for AtomicCell
<T
> {}
57 impl<T
> AtomicCell
<T
> {
58 /// Creates a new atomic cell initialized with `val`.
63 /// use crossbeam_utils::atomic::AtomicCell;
65 /// let a = AtomicCell::new(7);
67 pub const fn new(val
: T
) -> AtomicCell
<T
> {
69 value
: UnsafeCell
::new(MaybeUninit
::new(val
)),
73 /// Consumes the atomic and returns the contained value.
75 /// This is safe because passing `self` by value guarantees that no other threads are
76 /// concurrently accessing the atomic data.
81 /// use crossbeam_utils::atomic::AtomicCell;
83 /// let a = AtomicCell::new(7);
84 /// let v = a.into_inner();
88 pub fn into_inner(self) -> T
{
89 let this
= ManuallyDrop
::new(self);
91 // - passing `self` by value guarantees that no other threads are concurrently
92 // accessing the atomic data
93 // - the raw pointer passed in is valid because we got it from an owned value.
94 // - `ManuallyDrop` prevents double dropping `T`
95 unsafe { this.as_ptr().read() }
98 /// Returns `true` if operations on values of this type are lock-free.
100 /// If the compiler or the platform doesn't support the necessary atomic instructions,
101 /// `AtomicCell<T>` will use global locks for every potentially concurrent atomic operation.
106 /// use crossbeam_utils::atomic::AtomicCell;
108 /// // This type is internally represented as `AtomicUsize` so we can just use atomic
109 /// // operations provided by it.
110 /// assert_eq!(AtomicCell::<usize>::is_lock_free(), true);
112 /// // A wrapper struct around `isize`.
116 /// // `AtomicCell<Foo>` will be internally represented as `AtomicIsize`.
117 /// assert_eq!(AtomicCell::<Foo>::is_lock_free(), true);
119 /// // Operations on zero-sized types are always lock-free.
120 /// assert_eq!(AtomicCell::<()>::is_lock_free(), true);
122 /// // Very large types cannot be represented as any of the standard atomic types, so atomic
123 /// // operations on them will have to use global locks for synchronization.
124 /// assert_eq!(AtomicCell::<[u8; 1000]>::is_lock_free(), false);
126 pub const fn is_lock_free() -> bool
{
127 atomic_is_lock_free
::<T
>()
130 /// Stores `val` into the atomic cell.
135 /// use crossbeam_utils::atomic::AtomicCell;
137 /// let a = AtomicCell::new(7);
139 /// assert_eq!(a.load(), 7);
141 /// assert_eq!(a.load(), 8);
143 pub fn store(&self, val
: T
) {
144 if mem
::needs_drop
::<T
>() {
145 drop(self.swap(val
));
148 atomic_store(self.as_ptr(), val
);
153 /// Stores `val` into the atomic cell and returns the previous value.
158 /// use crossbeam_utils::atomic::AtomicCell;
160 /// let a = AtomicCell::new(7);
162 /// assert_eq!(a.load(), 7);
163 /// assert_eq!(a.swap(8), 7);
164 /// assert_eq!(a.load(), 8);
166 pub fn swap(&self, val
: T
) -> T
{
167 unsafe { atomic_swap(self.as_ptr(), val) }
170 /// Returns a raw pointer to the underlying data in this atomic cell.
175 /// use crossbeam_utils::atomic::AtomicCell;
177 /// let a = AtomicCell::new(5);
179 /// let ptr = a.as_ptr();
182 pub fn as_ptr(&self) -> *mut T
{
183 self.value
.get() as *mut T
187 impl<T
: Default
> AtomicCell
<T
> {
188 /// Takes the value of the atomic cell, leaving `Default::default()` in its place.
193 /// use crossbeam_utils::atomic::AtomicCell;
195 /// let a = AtomicCell::new(5);
196 /// let five = a.take();
198 /// assert_eq!(five, 5);
199 /// assert_eq!(a.into_inner(), 0);
201 pub fn take(&self) -> T
{
202 self.swap(Default
::default())
206 impl<T
: Copy
> AtomicCell
<T
> {
207 /// Loads a value from the atomic cell.
212 /// use crossbeam_utils::atomic::AtomicCell;
214 /// let a = AtomicCell::new(7);
216 /// assert_eq!(a.load(), 7);
218 pub fn load(&self) -> T
{
219 unsafe { atomic_load(self.as_ptr()) }
223 impl<T
: Copy
+ Eq
> AtomicCell
<T
> {
224 /// If the current value equals `current`, stores `new` into the atomic cell.
226 /// The return value is always the previous value. If it is equal to `current`, then the value
232 /// # #![allow(deprecated)]
233 /// use crossbeam_utils::atomic::AtomicCell;
235 /// let a = AtomicCell::new(1);
237 /// assert_eq!(a.compare_and_swap(2, 3), 1);
238 /// assert_eq!(a.load(), 1);
240 /// assert_eq!(a.compare_and_swap(1, 2), 1);
241 /// assert_eq!(a.load(), 2);
243 // TODO: remove in the next major version.
244 #[deprecated(note = "Use `compare_exchange` instead")]
245 pub fn compare_and_swap(&self, current
: T
, new
: T
) -> T
{
246 match self.compare_exchange(current
, new
) {
252 /// If the current value equals `current`, stores `new` into the atomic cell.
254 /// The return value is a result indicating whether the new value was written and containing
255 /// the previous value. On success this value is guaranteed to be equal to `current`.
260 /// use crossbeam_utils::atomic::AtomicCell;
262 /// let a = AtomicCell::new(1);
264 /// assert_eq!(a.compare_exchange(2, 3), Err(1));
265 /// assert_eq!(a.load(), 1);
267 /// assert_eq!(a.compare_exchange(1, 2), Ok(1));
268 /// assert_eq!(a.load(), 2);
270 pub fn compare_exchange(&self, current
: T
, new
: T
) -> Result
<T
, T
> {
271 unsafe { atomic_compare_exchange_weak(self.as_ptr(), current, new) }
274 /// Fetches the value, and applies a function to it that returns an optional
275 /// new value. Returns a `Result` of `Ok(previous_value)` if the function returned `Some(_)`, else
276 /// `Err(previous_value)`.
278 /// Note: This may call the function multiple times if the value has been changed from other threads in
279 /// the meantime, as long as the function returns `Some(_)`, but the function will have been applied
280 /// only once to the stored value.
285 /// use crossbeam_utils::atomic::AtomicCell;
287 /// let a = AtomicCell::new(7);
288 /// assert_eq!(a.fetch_update(|_| None), Err(7));
289 /// assert_eq!(a.fetch_update(|a| Some(a + 1)), Ok(7));
290 /// assert_eq!(a.fetch_update(|a| Some(a + 1)), Ok(8));
291 /// assert_eq!(a.load(), 9);
294 pub fn fetch_update
<F
>(&self, mut f
: F
) -> Result
<T
, T
>
296 F
: FnMut(T
) -> Option
<T
>,
298 let mut prev
= self.load();
299 while let Some(next
) = f(prev
) {
300 match self.compare_exchange(prev
, next
) {
301 x @
Ok(_
) => return x
,
302 Err(next_prev
) => prev
= next_prev
,
309 // `MaybeUninit` prevents `T` from being dropped, so we need to implement `Drop`
310 // for `AtomicCell` to avoid leaks of non-`Copy` types.
311 impl<T
> Drop
for AtomicCell
<T
> {
313 if mem
::needs_drop
::<T
>() {
315 // - the mutable reference guarantees that no other threads are concurrently accessing the atomic data
316 // - the raw pointer passed in is valid because we got it from a reference
317 // - `MaybeUninit` prevents double dropping `T`
319 self.as_ptr().drop_in_place();
325 macro_rules
! impl_arithmetic
{
326 ($t
:ty
, fallback
, $example
:tt
) => {
327 impl AtomicCell
<$t
> {
328 /// Increments the current value by `val` and returns the previous value.
330 /// The addition wraps on overflow.
335 /// use crossbeam_utils::atomic::AtomicCell;
339 /// assert_eq!(a.fetch_add(3), 7);
340 /// assert_eq!(a.load(), 10);
343 pub fn fetch_add(&self, val
: $t
) -> $t
{
344 let _guard
= lock(self.as_ptr() as usize).write();
345 let value
= unsafe { &mut *(self.as_ptr()) }
;
347 *value
= value
.wrapping_add(val
);
351 /// Decrements the current value by `val` and returns the previous value.
353 /// The subtraction wraps on overflow.
358 /// use crossbeam_utils::atomic::AtomicCell;
362 /// assert_eq!(a.fetch_sub(3), 7);
363 /// assert_eq!(a.load(), 4);
366 pub fn fetch_sub(&self, val
: $t
) -> $t
{
367 let _guard
= lock(self.as_ptr() as usize).write();
368 let value
= unsafe { &mut *(self.as_ptr()) }
;
370 *value
= value
.wrapping_sub(val
);
374 /// Applies bitwise "and" to the current value and returns the previous value.
379 /// use crossbeam_utils::atomic::AtomicCell;
383 /// assert_eq!(a.fetch_and(3), 7);
384 /// assert_eq!(a.load(), 3);
387 pub fn fetch_and(&self, val
: $t
) -> $t
{
388 let _guard
= lock(self.as_ptr() as usize).write();
389 let value
= unsafe { &mut *(self.as_ptr()) }
;
395 /// Applies bitwise "nand" to the current value and returns the previous value.
400 /// use crossbeam_utils::atomic::AtomicCell;
404 /// assert_eq!(a.fetch_nand(3), 7);
405 /// assert_eq!(a.load(), !(7 & 3));
408 pub fn fetch_nand(&self, val
: $t
) -> $t
{
409 let _guard
= lock(self.as_ptr() as usize).write();
410 let value
= unsafe { &mut *(self.as_ptr()) }
;
412 *value
= !(old
& val
);
416 /// Applies bitwise "or" to the current value and returns the previous value.
421 /// use crossbeam_utils::atomic::AtomicCell;
425 /// assert_eq!(a.fetch_or(16), 7);
426 /// assert_eq!(a.load(), 23);
429 pub fn fetch_or(&self, val
: $t
) -> $t
{
430 let _guard
= lock(self.as_ptr() as usize).write();
431 let value
= unsafe { &mut *(self.as_ptr()) }
;
437 /// Applies bitwise "xor" to the current value and returns the previous value.
442 /// use crossbeam_utils::atomic::AtomicCell;
446 /// assert_eq!(a.fetch_xor(2), 7);
447 /// assert_eq!(a.load(), 5);
450 pub fn fetch_xor(&self, val
: $t
) -> $t
{
451 let _guard
= lock(self.as_ptr() as usize).write();
452 let value
= unsafe { &mut *(self.as_ptr()) }
;
458 /// Compares and sets the maximum of the current value and `val`,
459 /// and returns the previous value.
464 /// use crossbeam_utils::atomic::AtomicCell;
468 /// assert_eq!(a.fetch_max(2), 7);
469 /// assert_eq!(a.load(), 7);
472 pub fn fetch_max(&self, val
: $t
) -> $t
{
473 let _guard
= lock(self.as_ptr() as usize).write();
474 let value
= unsafe { &mut *(self.as_ptr()) }
;
476 *value
= cmp
::max(old
, val
);
480 /// Compares and sets the minimum of the current value and `val`,
481 /// and returns the previous value.
486 /// use crossbeam_utils::atomic::AtomicCell;
490 /// assert_eq!(a.fetch_min(2), 7);
491 /// assert_eq!(a.load(), 2);
494 pub fn fetch_min(&self, val
: $t
) -> $t
{
495 let _guard
= lock(self.as_ptr() as usize).write();
496 let value
= unsafe { &mut *(self.as_ptr()) }
;
498 *value
= cmp
::min(old
, val
);
503 ($t
:ty
, $atomic
:ty
, $example
:tt
) => {
504 impl AtomicCell
<$t
> {
505 /// Increments the current value by `val` and returns the previous value.
507 /// The addition wraps on overflow.
512 /// use crossbeam_utils::atomic::AtomicCell;
516 /// assert_eq!(a.fetch_add(3), 7);
517 /// assert_eq!(a.load(), 10);
520 pub fn fetch_add(&self, val
: $t
) -> $t
{
521 if can_transmute
::<$t
, $atomic
>() {
522 let a
= unsafe { &*(self.as_ptr() as *const $atomic) }
;
523 a
.fetch_add(val
, Ordering
::AcqRel
)
525 let _guard
= lock(self.as_ptr() as usize).write();
526 let value
= unsafe { &mut *(self.as_ptr()) }
;
528 *value
= value
.wrapping_add(val
);
533 /// Decrements the current value by `val` and returns the previous value.
535 /// The subtraction wraps on overflow.
540 /// use crossbeam_utils::atomic::AtomicCell;
544 /// assert_eq!(a.fetch_sub(3), 7);
545 /// assert_eq!(a.load(), 4);
548 pub fn fetch_sub(&self, val
: $t
) -> $t
{
549 if can_transmute
::<$t
, $atomic
>() {
550 let a
= unsafe { &*(self.as_ptr() as *const $atomic) }
;
551 a
.fetch_sub(val
, Ordering
::AcqRel
)
553 let _guard
= lock(self.as_ptr() as usize).write();
554 let value
= unsafe { &mut *(self.as_ptr()) }
;
556 *value
= value
.wrapping_sub(val
);
561 /// Applies bitwise "and" to the current value and returns the previous value.
566 /// use crossbeam_utils::atomic::AtomicCell;
570 /// assert_eq!(a.fetch_and(3), 7);
571 /// assert_eq!(a.load(), 3);
574 pub fn fetch_and(&self, val
: $t
) -> $t
{
575 if can_transmute
::<$t
, $atomic
>() {
576 let a
= unsafe { &*(self.as_ptr() as *const $atomic) }
;
577 a
.fetch_and(val
, Ordering
::AcqRel
)
579 let _guard
= lock(self.as_ptr() as usize).write();
580 let value
= unsafe { &mut *(self.as_ptr()) }
;
587 /// Applies bitwise "nand" to the current value and returns the previous value.
592 /// use crossbeam_utils::atomic::AtomicCell;
596 /// assert_eq!(a.fetch_nand(3), 7);
597 /// assert_eq!(a.load(), !(7 & 3));
600 pub fn fetch_nand(&self, val
: $t
) -> $t
{
601 if can_transmute
::<$t
, $atomic
>() {
602 let a
= unsafe { &*(self.as_ptr() as *const $atomic) }
;
603 a
.fetch_nand(val
, Ordering
::AcqRel
)
605 let _guard
= lock(self.as_ptr() as usize).write();
606 let value
= unsafe { &mut *(self.as_ptr()) }
;
608 *value
= !(old
& val
);
613 /// Applies bitwise "or" to the current value and returns the previous value.
618 /// use crossbeam_utils::atomic::AtomicCell;
622 /// assert_eq!(a.fetch_or(16), 7);
623 /// assert_eq!(a.load(), 23);
626 pub fn fetch_or(&self, val
: $t
) -> $t
{
627 if can_transmute
::<$t
, $atomic
>() {
628 let a
= unsafe { &*(self.as_ptr() as *const $atomic) }
;
629 a
.fetch_or(val
, Ordering
::AcqRel
)
631 let _guard
= lock(self.as_ptr() as usize).write();
632 let value
= unsafe { &mut *(self.as_ptr()) }
;
639 /// Applies bitwise "xor" to the current value and returns the previous value.
644 /// use crossbeam_utils::atomic::AtomicCell;
648 /// assert_eq!(a.fetch_xor(2), 7);
649 /// assert_eq!(a.load(), 5);
652 pub fn fetch_xor(&self, val
: $t
) -> $t
{
653 if can_transmute
::<$t
, $atomic
>() {
654 let a
= unsafe { &*(self.as_ptr() as *const $atomic) }
;
655 a
.fetch_xor(val
, Ordering
::AcqRel
)
657 let _guard
= lock(self.as_ptr() as usize).write();
658 let value
= unsafe { &mut *(self.as_ptr()) }
;
665 /// Compares and sets the maximum of the current value and `val`,
666 /// and returns the previous value.
671 /// use crossbeam_utils::atomic::AtomicCell;
675 /// assert_eq!(a.fetch_max(9), 7);
676 /// assert_eq!(a.load(), 9);
679 pub fn fetch_max(&self, val
: $t
) -> $t
{
680 if can_transmute
::<$t
, $atomic
>() {
681 // TODO: Atomic*::fetch_max requires Rust 1.45.
682 self.fetch_update(|old
| Some(cmp
::max(old
, val
))).unwrap()
684 let _guard
= lock(self.as_ptr() as usize).write();
685 let value
= unsafe { &mut *(self.as_ptr()) }
;
687 *value
= cmp
::max(old
, val
);
692 /// Compares and sets the minimum of the current value and `val`,
693 /// and returns the previous value.
698 /// use crossbeam_utils::atomic::AtomicCell;
702 /// assert_eq!(a.fetch_min(2), 7);
703 /// assert_eq!(a.load(), 2);
706 pub fn fetch_min(&self, val
: $t
) -> $t
{
707 if can_transmute
::<$t
, $atomic
>() {
708 // TODO: Atomic*::fetch_min requires Rust 1.45.
709 self.fetch_update(|old
| Some(cmp
::min(old
, val
))).unwrap()
711 let _guard
= lock(self.as_ptr() as usize).write();
712 let value
= unsafe { &mut *(self.as_ptr()) }
;
714 *value
= cmp
::min(old
, val
);
722 impl_arithmetic
!(u8, atomic
::AtomicU8
, "let a = AtomicCell::new(7u8);");
723 impl_arithmetic
!(i8, atomic
::AtomicI8
, "let a = AtomicCell::new(7i8);");
724 impl_arithmetic
!(u16, atomic
::AtomicU16
, "let a = AtomicCell::new(7u16);");
725 impl_arithmetic
!(i16, atomic
::AtomicI16
, "let a = AtomicCell::new(7i16);");
726 impl_arithmetic
!(u32, atomic
::AtomicU32
, "let a = AtomicCell::new(7u32);");
727 impl_arithmetic
!(i32, atomic
::AtomicI32
, "let a = AtomicCell::new(7i32);");
728 #[cfg(not(crossbeam_no_atomic_64))]
729 impl_arithmetic
!(u64, atomic
::AtomicU64
, "let a = AtomicCell::new(7u64);");
730 #[cfg(not(crossbeam_no_atomic_64))]
731 impl_arithmetic
!(i64, atomic
::AtomicI64
, "let a = AtomicCell::new(7i64);");
732 #[cfg(crossbeam_no_atomic_64)]
733 impl_arithmetic
!(u64, fallback
, "let a = AtomicCell::new(7u64);");
734 #[cfg(crossbeam_no_atomic_64)]
735 impl_arithmetic
!(i64, fallback
, "let a = AtomicCell::new(7i64);");
736 // TODO: AtomicU128 is unstable
737 // impl_arithmetic!(u128, atomic::AtomicU128, "let a = AtomicCell::new(7u128);");
738 // impl_arithmetic!(i128, atomic::AtomicI128, "let a = AtomicCell::new(7i128);");
739 impl_arithmetic
!(u128
, fallback
, "let a = AtomicCell::new(7u128);");
740 impl_arithmetic
!(i128
, fallback
, "let a = AtomicCell::new(7i128);");
745 "let a = AtomicCell::new(7usize);"
750 "let a = AtomicCell::new(7isize);"
753 impl AtomicCell
<bool
> {
754 /// Applies logical "and" to the current value and returns the previous value.
759 /// use crossbeam_utils::atomic::AtomicCell;
761 /// let a = AtomicCell::new(true);
763 /// assert_eq!(a.fetch_and(true), true);
764 /// assert_eq!(a.load(), true);
766 /// assert_eq!(a.fetch_and(false), true);
767 /// assert_eq!(a.load(), false);
770 pub fn fetch_and(&self, val
: bool
) -> bool
{
771 let a
= unsafe { &*(self.as_ptr() as *const AtomicBool) }
;
772 a
.fetch_and(val
, Ordering
::AcqRel
)
775 /// Applies logical "nand" to the current value and returns the previous value.
780 /// use crossbeam_utils::atomic::AtomicCell;
782 /// let a = AtomicCell::new(true);
784 /// assert_eq!(a.fetch_nand(false), true);
785 /// assert_eq!(a.load(), true);
787 /// assert_eq!(a.fetch_nand(true), true);
788 /// assert_eq!(a.load(), false);
790 /// assert_eq!(a.fetch_nand(false), false);
791 /// assert_eq!(a.load(), true);
794 pub fn fetch_nand(&self, val
: bool
) -> bool
{
795 let a
= unsafe { &*(self.as_ptr() as *const AtomicBool) }
;
796 a
.fetch_nand(val
, Ordering
::AcqRel
)
799 /// Applies logical "or" to the current value and returns the previous value.
804 /// use crossbeam_utils::atomic::AtomicCell;
806 /// let a = AtomicCell::new(false);
808 /// assert_eq!(a.fetch_or(false), false);
809 /// assert_eq!(a.load(), false);
811 /// assert_eq!(a.fetch_or(true), false);
812 /// assert_eq!(a.load(), true);
815 pub fn fetch_or(&self, val
: bool
) -> bool
{
816 let a
= unsafe { &*(self.as_ptr() as *const AtomicBool) }
;
817 a
.fetch_or(val
, Ordering
::AcqRel
)
820 /// Applies logical "xor" to the current value and returns the previous value.
825 /// use crossbeam_utils::atomic::AtomicCell;
827 /// let a = AtomicCell::new(true);
829 /// assert_eq!(a.fetch_xor(false), true);
830 /// assert_eq!(a.load(), true);
832 /// assert_eq!(a.fetch_xor(true), true);
833 /// assert_eq!(a.load(), false);
836 pub fn fetch_xor(&self, val
: bool
) -> bool
{
837 let a
= unsafe { &*(self.as_ptr() as *const AtomicBool) }
;
838 a
.fetch_xor(val
, Ordering
::AcqRel
)
842 impl<T
: Default
> Default
for AtomicCell
<T
> {
843 fn default() -> AtomicCell
<T
> {
844 AtomicCell
::new(T
::default())
848 impl<T
> From
<T
> for AtomicCell
<T
> {
850 fn from(val
: T
) -> AtomicCell
<T
> {
855 impl<T
: Copy
+ fmt
::Debug
> fmt
::Debug
for AtomicCell
<T
> {
856 fn fmt(&self, f
: &mut fmt
::Formatter
<'_
>) -> fmt
::Result
{
857 f
.debug_struct("AtomicCell")
858 .field("value", &self.load())
863 /// Returns `true` if values of type `A` can be transmuted into values of type `B`.
864 const fn can_transmute
<A
, B
>() -> bool
{
865 // Sizes must be equal, but alignment of `A` must be greater or equal than that of `B`.
866 (mem
::size_of
::<A
>() == mem
::size_of
::<B
>()) & (mem
::align_of
::<A
>() >= mem
::align_of
::<B
>())
869 /// Returns a reference to the global lock associated with the `AtomicCell` at address `addr`.
871 /// This function is used to protect atomic data which doesn't fit into any of the primitive atomic
872 /// types in `std::sync::atomic`. Operations on such atomics must therefore use a global lock.
874 /// However, there is not only one global lock but an array of many locks, and one of them is
875 /// picked based on the given address. Having many locks reduces contention and improves
879 fn lock(addr
: usize) -> &'
static SeqLock
{
880 // The number of locks is a prime number because we want to make sure `addr % LEN` gets
881 // dispersed across all locks.
883 // Note that addresses are always aligned to some power of 2, depending on type `T` in
884 // `AtomicCell<T>`. If `LEN` was an even number, then `addr % LEN` would be an even number,
885 // too, which means only half of the locks would get utilized!
887 // It is also possible for addresses to accidentally get aligned to a number that is not a
888 // power of 2. Consider this example:
893 // a: AtomicCell<u8>,
899 // Now, if we have a slice of type `&[Foo]`, it is possible that field `a` in all items gets
900 // stored at addresses that are multiples of 3. It'd be too bad if `LEN` was divisible by 3.
901 // In order to protect from such cases, we simply choose a large prime number for `LEN`.
902 const LEN
: usize = 97;
903 #[allow(clippy::declare_interior_mutable_const)]
904 const L
: SeqLock
= SeqLock
::new();
905 static LOCKS
: [SeqLock
; LEN
] = [
906 L
, L
, L
, L
, L
, L
, L
, L
, L
, L
, L
, L
, L
, L
, L
, L
, L
, L
, L
, L
, L
, L
, L
, L
, L
, L
, L
, L
, L
, L
,
907 L
, L
, L
, L
, L
, L
, L
, L
, L
, L
, L
, L
, L
, L
, L
, L
, L
, L
, L
, L
, L
, L
, L
, L
, L
, L
, L
, L
, L
, L
,
908 L
, L
, L
, L
, L
, L
, L
, L
, L
, L
, L
, L
, L
, L
, L
, L
, L
, L
, L
, L
, L
, L
, L
, L
, L
, L
, L
, L
, L
, L
,
912 // If the modulus is a constant number, the compiler will use crazy math to transform this into
913 // a sequence of cheap arithmetic operations rather than using the slow modulo instruction.
919 /// All operations are noops.
924 fn load(&self, _order
: Ordering
) {}
927 fn store(&self, _val
: (), _order
: Ordering
) {}
930 fn swap(&self, _val
: (), _order
: Ordering
) {}
933 fn compare_exchange_weak(
939 ) -> Result
<(), ()> {
944 macro_rules
! atomic
{
945 // If values of type `$t` can be transmuted into values of the primitive atomic type `$atomic`,
946 // declares variable `$a` of type `$atomic` and executes `$atomic_op`, breaking out of the loop.
947 (@check
, $t
:ty
, $atomic
:ty
, $a
:ident
, $atomic_op
:expr
) => {
948 if can_transmute
::<$t
, $atomic
>() {
954 // If values of type `$t` can be transmuted into values of a primitive atomic type, declares
955 // variable `$a` of that type and executes `$atomic_op`. Otherwise, just executes
957 ($t
:ty
, $a
:ident
, $atomic_op
:expr
, $fallback_op
:expr
) => {
959 atomic
!(@check
, $t
, AtomicUnit
, $a
, $atomic_op
);
961 atomic
!(@check
, $t
, atomic
::AtomicU8
, $a
, $atomic_op
);
962 atomic
!(@check
, $t
, atomic
::AtomicU16
, $a
, $atomic_op
);
963 atomic
!(@check
, $t
, atomic
::AtomicU32
, $a
, $atomic_op
);
964 #[cfg(not(crossbeam_no_atomic_64))]
965 atomic
!(@check
, $t
, atomic
::AtomicU64
, $a
, $atomic_op
);
966 // TODO: AtomicU128 is unstable
967 // atomic!(@check, $t, atomic::AtomicU128, $a, $atomic_op);
974 /// Returns `true` if operations on `AtomicCell<T>` are lock-free.
975 const fn atomic_is_lock_free
<T
>() -> bool
{
976 // HACK(taiki-e): This is equivalent to `atomic! { T, _a, true, false }`, but can be used in const fn even in Rust 1.36.
977 let is_lock_free
= can_transmute
::<T
, AtomicUnit
>()
978 | can_transmute
::<T
, atomic
::AtomicU8
>()
979 | can_transmute
::<T
, atomic
::AtomicU16
>()
980 | can_transmute
::<T
, atomic
::AtomicU32
>();
981 #[cfg(not(crossbeam_no_atomic_64))]
982 let is_lock_free
= is_lock_free
| can_transmute
::<T
, atomic
::AtomicU64
>();
983 // TODO: AtomicU128 is unstable
984 // let is_lock_free = is_lock_free | can_transmute::<T, atomic::AtomicU128>();
988 /// Atomically reads data from `src`.
990 /// This operation uses the `Acquire` ordering. If possible, an atomic instructions is used, and a
991 /// global lock otherwise.
992 unsafe fn atomic_load
<T
>(src
: *mut T
) -> T
999 a
= &*(src
as *const _
as *const _
);
1000 mem
::transmute_copy(&a
.load(Ordering
::Acquire
))
1003 let lock
= lock(src
as usize);
1005 // Try doing an optimistic read first.
1006 if let Some(stamp
) = lock
.optimistic_read() {
1007 // We need a volatile read here because other threads might concurrently modify the
1008 // value. In theory, data races are *always* UB, even if we use volatile reads and
1009 // discard the data when a data race is detected. The proper solution would be to
1010 // do atomic reads and atomic writes, but we can't atomically read and write all
1011 // kinds of data since `AtomicU8` is not available on stable Rust yet.
1012 let val
= ptr
::read_volatile(src
);
1014 if lock
.validate_read(stamp
) {
1019 // Grab a regular write lock so that writers don't starve this load.
1020 let guard
= lock
.write();
1021 let val
= ptr
::read(src
);
1022 // The value hasn't been changed. Drop the guard without incrementing the stamp.
1029 /// Atomically writes `val` to `dst`.
1031 /// This operation uses the `Release` ordering. If possible, an atomic instructions is used, and a
1032 /// global lock otherwise.
1033 unsafe fn atomic_store
<T
>(dst
: *mut T
, val
: T
) {
1037 a
= &*(dst
as *const _
as *const _
);
1038 a
.store(mem
::transmute_copy(&val
), Ordering
::Release
);
1042 let _guard
= lock(dst
as usize).write();
1043 ptr
::write(dst
, val
);
1048 /// Atomically swaps data at `dst` with `val`.
1050 /// This operation uses the `AcqRel` ordering. If possible, an atomic instructions is used, and a
1051 /// global lock otherwise.
1052 unsafe fn atomic_swap
<T
>(dst
: *mut T
, val
: T
) -> T
{
1056 a
= &*(dst
as *const _
as *const _
);
1057 let res
= mem
::transmute_copy(&a
.swap(mem
::transmute_copy(&val
), Ordering
::AcqRel
));
1062 let _guard
= lock(dst
as usize).write();
1063 ptr
::replace(dst
, val
)
1068 /// Atomically compares data at `dst` to `current` and, if equal byte-for-byte, exchanges data at
1069 /// `dst` with `new`.
1071 /// Returns the old value on success, or the current value at `dst` on failure.
1073 /// This operation uses the `AcqRel` ordering. If possible, an atomic instructions is used, and a
1074 /// global lock otherwise.
1075 unsafe fn atomic_compare_exchange_weak
<T
>(dst
: *mut T
, mut current
: T
, new
: T
) -> Result
<T
, T
>
1082 a
= &*(dst
as *const _
as *const _
);
1083 let mut current_raw
= mem
::transmute_copy(¤t
);
1084 let new_raw
= mem
::transmute_copy(&new
);
1087 match a
.compare_exchange_weak(
1093 Ok(_
) => break Ok(current
),
1094 Err(previous_raw
) => {
1095 let previous
= mem
::transmute_copy(&previous_raw
);
1097 if !T
::eq(&previous
, ¤t
) {
1098 break Err(previous
);
1101 // The compare-exchange operation has failed and didn't store `new`. The
1102 // failure is either spurious, or `previous` was semantically equal to
1103 // `current` but not byte-equal. Let's retry with `previous` as the new
1106 current_raw
= previous_raw
;
1112 let guard
= lock(dst
as usize).write();
1114 if T
::eq(&*dst
, ¤t
) {
1115 Ok(ptr
::replace(dst
, new
))
1117 let val
= ptr
::read(dst
);
1118 // The value hasn't been changed. Drop the guard without incrementing the stamp.