1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
13 //! Atomic types provide primitive shared-memory communication between
14 //! threads, and are the building blocks of other concurrent
17 //! This module defines atomic versions of a select number of primitive
18 //! types, including [`AtomicBool`], [`AtomicIsize`], and [`AtomicUsize`].
19 //! Atomic types present operations that, when used correctly, synchronize
20 //! updates between threads.
22 //! [`AtomicBool`]: struct.AtomicBool.html
23 //! [`AtomicIsize`]: struct.AtomicIsize.html
24 //! [`AtomicUsize`]: struct.AtomicUsize.html
26 //! Each method takes an [`Ordering`] which represents the strength of
27 //! the memory barrier for that operation. These orderings are the
28 //! same as [LLVM atomic orderings][1]. For more information see the [nomicon][2].
30 //! [`Ordering`]: enum.Ordering.html
32 //! [1]: https://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations
33 //! [2]: ../../../nomicon/atomics.html
35 //! Atomic variables are safe to share between threads (they implement [`Sync`])
36 //! but they do not themselves provide the mechanism for sharing and follow the
37 //! [threading model](../../../std/thread/index.html#the-threading-model) of rust.
38 //! The most common way to share an atomic variable is to put it into an [`Arc`][arc] (an
39 //! atomically-reference-counted shared pointer).
41 //! [`Sync`]: ../../marker/trait.Sync.html
42 //! [arc]: ../../../std/sync/struct.Arc.html
44 //! Most atomic types may be stored in static variables, initialized using
45 //! the provided static initializers like [`ATOMIC_BOOL_INIT`]. Atomic statics
46 //! are often used for lazy global initialization.
48 //! [`ATOMIC_BOOL_INIT`]: constant.ATOMIC_BOOL_INIT.html
52 //! A simple spinlock:
55 //! use std::sync::Arc;
56 //! use std::sync::atomic::{AtomicUsize, Ordering};
60 //! let spinlock = Arc::new(AtomicUsize::new(1));
62 //! let spinlock_clone = spinlock.clone();
63 //! let thread = thread::spawn(move|| {
64 //! spinlock_clone.store(0, Ordering::SeqCst);
67 //! // Wait for the other thread to release the lock
68 //! while spinlock.load(Ordering::SeqCst) != 0 {}
70 //! if let Err(panic) = thread.join() {
71 //! println!("Thread had an error: {:?}", panic);
76 //! Keep a global count of live threads:
79 //! use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
81 //! static GLOBAL_THREAD_COUNT: AtomicUsize = ATOMIC_USIZE_INIT;
83 //! let old_thread_count = GLOBAL_THREAD_COUNT.fetch_add(1, Ordering::SeqCst);
84 //! println!("live threads: {}", old_thread_count + 1);
87 #![stable(feature = "rust1", since = "1.0.0")]
88 #![cfg_attr(not(target_has_atomic = "8"), allow(dead_code))]
89 #![cfg_attr(not(target_has_atomic = "8"), allow(unused_imports))]
91 use self::Ordering
::*;
97 /// Save power or switch hyperthreads in a busy-wait spin-loop.
99 /// This function is deliberately more primitive than
100 /// [`std::thread::yield_now`](../../../std/thread/fn.yield_now.html) and
101 /// does not directly yield to the system's scheduler.
102 /// In some cases it might be useful to use a combination of both functions.
103 /// Careful benchmarking is advised.
105 /// On some platforms this function may not do anything at all.
107 #[stable(feature = "spin_loop_hint", since = "1.24.0")]
108 pub fn spin_loop_hint() {
109 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
111 asm
!("pause" ::: "memory" : "volatile");
114 #[cfg(target_arch = "aarch64")]
116 asm
!("yield" ::: "memory" : "volatile");
120 /// A boolean type which can be safely shared between threads.
122 /// This type has the same in-memory representation as a [`bool`].
124 /// [`bool`]: ../../../std/primitive.bool.html
125 #[cfg(target_has_atomic = "8")]
126 #[stable(feature = "rust1", since = "1.0.0")]
127 pub struct AtomicBool
{
131 #[cfg(target_has_atomic = "8")]
132 #[stable(feature = "rust1", since = "1.0.0")]
133 impl Default
for AtomicBool
{
134 /// Creates an `AtomicBool` initialized to `false`.
135 fn default() -> Self {
140 // Send is implicitly implemented for AtomicBool.
141 #[cfg(target_has_atomic = "8")]
142 #[stable(feature = "rust1", since = "1.0.0")]
143 unsafe impl Sync
for AtomicBool {}
145 /// A raw pointer type which can be safely shared between threads.
147 /// This type has the same in-memory representation as a `*mut T`.
148 #[cfg(target_has_atomic = "ptr")]
149 #[stable(feature = "rust1", since = "1.0.0")]
150 pub struct AtomicPtr
<T
> {
151 p
: UnsafeCell
<*mut T
>,
154 #[cfg(target_has_atomic = "ptr")]
155 #[stable(feature = "rust1", since = "1.0.0")]
156 impl<T
> Default
for AtomicPtr
<T
> {
157 /// Creates a null `AtomicPtr<T>`.
158 fn default() -> AtomicPtr
<T
> {
159 AtomicPtr
::new(::ptr
::null_mut())
163 #[cfg(target_has_atomic = "ptr")]
164 #[stable(feature = "rust1", since = "1.0.0")]
165 unsafe impl<T
> Send
for AtomicPtr
<T
> {}
166 #[cfg(target_has_atomic = "ptr")]
167 #[stable(feature = "rust1", since = "1.0.0")]
168 unsafe impl<T
> Sync
for AtomicPtr
<T
> {}
170 /// Atomic memory orderings
172 /// Memory orderings limit the ways that both the compiler and CPU may reorder
173 /// instructions around atomic operations. At its most restrictive,
174 /// "sequentially consistent" atomics allow neither reads nor writes
175 /// to be moved either before or after the atomic operation; on the other end
176 /// "relaxed" atomics allow all reorderings.
178 /// Rust's memory orderings are [the same as
179 /// LLVM's](https://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations).
181 /// For more information see the [nomicon].
183 /// [nomicon]: ../../../nomicon/atomics.html
184 #[stable(feature = "rust1", since = "1.0.0")]
185 #[derive(Copy, Clone, Debug)]
188 /// No ordering constraints, only atomic operations.
190 /// Corresponds to LLVM's [`Monotonic`] ordering.
192 /// [`Monotonic`]: https://llvm.org/docs/Atomics.html#monotonic
193 #[stable(feature = "rust1", since = "1.0.0")]
195 /// When coupled with a store, all previous operations become ordered
196 /// before any load of this value with [`Acquire`] (or stronger) ordering.
197 /// In particular, all previous writes become visible to all threads
198 /// that perform an [`Acquire`] (or stronger) load of this value.
200 /// Notice that using this ordering for an operation that combines loads
201 /// and stores leads to a [`Relaxed`] load operation!
203 /// This ordering is only applicable for operations that can perform a store.
205 /// Corresponds to LLVM's [`Release`] ordering.
207 /// [`Release`]: https://llvm.org/docs/Atomics.html#release
208 /// [`Acquire`]: https://llvm.org/docs/Atomics.html#acquire
209 /// [`Relaxed`]: https://llvm.org/docs/Atomics.html#monotonic
210 #[stable(feature = "rust1", since = "1.0.0")]
212 /// When coupled with a load, if the loaded value was written by a store operation with
213 /// [`Release`] (or stronger) ordering, then all subsequent operations
214 /// become ordered after that store. In particular, all subsequent loads will see data
215 /// written before the store.
217 /// Notice that using this ordering for an operation that combines loads
218 /// and stores leads to a [`Relaxed`] store operation!
220 /// This ordering is only applicable for operations that can perform a load.
222 /// Corresponds to LLVM's [`Acquire`] ordering.
224 /// [`Acquire`]: https://llvm.org/docs/Atomics.html#acquire
225 /// [`Release`]: https://llvm.org/docs/Atomics.html#release
226 /// [`Relaxed`]: https://llvm.org/docs/Atomics.html#monotonic
227 #[stable(feature = "rust1", since = "1.0.0")]
229 /// Has the effects of both [`Acquire`] and [`Release`] together:
230 /// For loads it uses [`Acquire`] ordering. For stores it uses the [`Release`] ordering.
232 /// Notice that in the case of `compare_and_swap`, it is possible that the operation ends up
233 /// not performing any store and hence it has just `Acquire` ordering. However,
234 /// `AcqRel` will never perform [`Relaxed`] accesses.
236 /// This ordering is only applicable for operations that combine both loads and stores.
238 /// Corresponds to LLVM's [`AcquireRelease`] ordering.
240 /// [`AcquireRelease`]: https://llvm.org/docs/Atomics.html#acquirerelease
241 /// [`Acquire`]: https://llvm.org/docs/Atomics.html#acquire
242 /// [`Release`]: https://llvm.org/docs/Atomics.html#release
243 /// [`Relaxed`]: https://llvm.org/docs/Atomics.html#monotonic
244 #[stable(feature = "rust1", since = "1.0.0")]
246 /// Like [`Acquire`]/[`Release`]/[`AcqRel`] (for load, store, and load-with-store
247 /// operations, respectively) with the additional guarantee that all threads see all
248 /// sequentially consistent operations in the same order.
250 /// Corresponds to LLVM's [`SequentiallyConsistent`] ordering.
252 /// [`SequentiallyConsistent`]: https://llvm.org/docs/Atomics.html#sequentiallyconsistent
253 /// [`Acquire`]: https://llvm.org/docs/Atomics.html#acquire
254 /// [`Release`]: https://llvm.org/docs/Atomics.html#release
255 /// [`AcqRel`]: https://llvm.org/docs/Atomics.html#acquirerelease
256 #[stable(feature = "rust1", since = "1.0.0")]
260 /// An [`AtomicBool`] initialized to `false`.
262 /// [`AtomicBool`]: struct.AtomicBool.html
263 #[cfg(target_has_atomic = "8")]
264 #[stable(feature = "rust1", since = "1.0.0")]
265 pub const ATOMIC_BOOL_INIT
: AtomicBool
= AtomicBool
::new(false);
267 #[cfg(target_has_atomic = "8")]
269 /// Creates a new `AtomicBool`.
274 /// use std::sync::atomic::AtomicBool;
276 /// let atomic_true = AtomicBool::new(true);
277 /// let atomic_false = AtomicBool::new(false);
280 #[stable(feature = "rust1", since = "1.0.0")]
281 pub const fn new(v
: bool
) -> AtomicBool
{
282 AtomicBool { v: UnsafeCell::new(v as u8) }
285 /// Returns a mutable reference to the underlying [`bool`].
287 /// This is safe because the mutable reference guarantees that no other threads are
288 /// concurrently accessing the atomic data.
290 /// [`bool`]: ../../../std/primitive.bool.html
295 /// use std::sync::atomic::{AtomicBool, Ordering};
297 /// let mut some_bool = AtomicBool::new(true);
298 /// assert_eq!(*some_bool.get_mut(), true);
299 /// *some_bool.get_mut() = false;
300 /// assert_eq!(some_bool.load(Ordering::SeqCst), false);
303 #[stable(feature = "atomic_access", since = "1.15.0")]
304 pub fn get_mut(&mut self) -> &mut bool
{
305 unsafe { &mut *(self.v.get() as *mut bool) }
308 /// Consumes the atomic and returns the contained value.
310 /// This is safe because passing `self` by value guarantees that no other threads are
311 /// concurrently accessing the atomic data.
316 /// use std::sync::atomic::AtomicBool;
318 /// let some_bool = AtomicBool::new(true);
319 /// assert_eq!(some_bool.into_inner(), true);
322 #[stable(feature = "atomic_access", since = "1.15.0")]
323 pub fn into_inner(self) -> bool
{
324 self.v
.into_inner() != 0
327 /// Loads a value from the bool.
329 /// `load` takes an [`Ordering`] argument which describes the memory ordering
330 /// of this operation. Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
334 /// Panics if `order` is [`Release`] or [`AcqRel`].
336 /// [`Ordering`]: enum.Ordering.html
337 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
338 /// [`Release`]: enum.Ordering.html#variant.Release
339 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
340 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
341 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
346 /// use std::sync::atomic::{AtomicBool, Ordering};
348 /// let some_bool = AtomicBool::new(true);
350 /// assert_eq!(some_bool.load(Ordering::Relaxed), true);
353 #[stable(feature = "rust1", since = "1.0.0")]
354 pub fn load(&self, order
: Ordering
) -> bool
{
355 unsafe { atomic_load(self.v.get(), order) != 0 }
358 /// Stores a value into the bool.
360 /// `store` takes an [`Ordering`] argument which describes the memory ordering
361 /// of this operation. Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
365 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
367 /// [`Ordering`]: enum.Ordering.html
368 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
369 /// [`Release`]: enum.Ordering.html#variant.Release
370 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
371 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
372 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
377 /// use std::sync::atomic::{AtomicBool, Ordering};
379 /// let some_bool = AtomicBool::new(true);
381 /// some_bool.store(false, Ordering::Relaxed);
382 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
385 #[stable(feature = "rust1", since = "1.0.0")]
386 pub fn store(&self, val
: bool
, order
: Ordering
) {
388 atomic_store(self.v
.get(), val
as u8, order
);
392 /// Stores a value into the bool, returning the previous value.
394 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
395 /// of this operation. All ordering modes are possible. Note that using
396 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
397 /// using [`Release`] makes the load part [`Relaxed`].
399 /// [`Ordering`]: enum.Ordering.html
400 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
401 /// [`Release`]: enum.Ordering.html#variant.Release
402 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
407 /// use std::sync::atomic::{AtomicBool, Ordering};
409 /// let some_bool = AtomicBool::new(true);
411 /// assert_eq!(some_bool.swap(false, Ordering::Relaxed), true);
412 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
415 #[stable(feature = "rust1", since = "1.0.0")]
416 #[cfg(target_has_atomic = "cas")]
417 pub fn swap(&self, val
: bool
, order
: Ordering
) -> bool
{
418 unsafe { atomic_swap(self.v.get(), val as u8, order) != 0 }
421 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
423 /// The return value is always the previous value. If it is equal to `current`, then the value
426 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
427 /// ordering of this operation. Notice that even when using [`AcqRel`], the operation
428 /// might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
429 /// Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
430 /// happens, and using [`Release`] makes the load part [`Relaxed`].
432 /// [`Ordering`]: enum.Ordering.html
433 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
434 /// [`Release`]: enum.Ordering.html#variant.Release
435 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
436 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
437 /// [`bool`]: ../../../std/primitive.bool.html
442 /// use std::sync::atomic::{AtomicBool, Ordering};
444 /// let some_bool = AtomicBool::new(true);
446 /// assert_eq!(some_bool.compare_and_swap(true, false, Ordering::Relaxed), true);
447 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
449 /// assert_eq!(some_bool.compare_and_swap(true, true, Ordering::Relaxed), false);
450 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
453 #[stable(feature = "rust1", since = "1.0.0")]
454 #[cfg(target_has_atomic = "cas")]
455 pub fn compare_and_swap(&self, current
: bool
, new
: bool
, order
: Ordering
) -> bool
{
456 match self.compare_exchange(current
, new
, order
, strongest_failure_ordering(order
)) {
462 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
464 /// The return value is a result indicating whether the new value was written and containing
465 /// the previous value. On success this value is guaranteed to be equal to `current`.
467 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
468 /// ordering of this operation. The first describes the required ordering if the
469 /// operation succeeds while the second describes the required ordering when the
470 /// operation fails. Using [`Acquire`] as success ordering makes the store part
471 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
472 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
473 /// and must be equivalent to or weaker than the success ordering.
476 /// [`bool`]: ../../../std/primitive.bool.html
477 /// [`Ordering`]: enum.Ordering.html
478 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
479 /// [`Release`]: enum.Ordering.html#variant.Release
480 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
481 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
486 /// use std::sync::atomic::{AtomicBool, Ordering};
488 /// let some_bool = AtomicBool::new(true);
490 /// assert_eq!(some_bool.compare_exchange(true,
492 /// Ordering::Acquire,
493 /// Ordering::Relaxed),
495 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
497 /// assert_eq!(some_bool.compare_exchange(true, true,
498 /// Ordering::SeqCst,
499 /// Ordering::Acquire),
501 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
504 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
505 #[cfg(target_has_atomic = "cas")]
506 pub fn compare_exchange(&self,
511 -> Result
<bool
, bool
> {
513 atomic_compare_exchange(self.v
.get(), current
as u8, new
as u8, success
, failure
)
516 Err(x
) => Err(x
!= 0),
520 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
522 /// Unlike [`compare_exchange`], this function is allowed to spuriously fail even when the
523 /// comparison succeeds, which can result in more efficient code on some platforms. The
524 /// return value is a result indicating whether the new value was written and containing the
527 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
528 /// ordering of this operation. The first describes the required ordering if the
529 /// operation succeeds while the second describes the required ordering when the
530 /// operation fails. Using [`Acquire`] as success ordering makes the store part
531 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
532 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
533 /// and must be equivalent to or weaker than the success ordering.
535 /// [`bool`]: ../../../std/primitive.bool.html
536 /// [`compare_exchange`]: #method.compare_exchange
537 /// [`Ordering`]: enum.Ordering.html
538 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
539 /// [`Release`]: enum.Ordering.html#variant.Release
540 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
541 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
546 /// use std::sync::atomic::{AtomicBool, Ordering};
548 /// let val = AtomicBool::new(false);
551 /// let mut old = val.load(Ordering::Relaxed);
553 /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
555 /// Err(x) => old = x,
560 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
561 #[cfg(target_has_atomic = "cas")]
562 pub fn compare_exchange_weak(&self,
567 -> Result
<bool
, bool
> {
569 atomic_compare_exchange_weak(self.v
.get(), current
as u8, new
as u8, success
, failure
)
572 Err(x
) => Err(x
!= 0),
576 /// Logical "and" with a boolean value.
578 /// Performs a logical "and" operation on the current value and the argument `val`, and sets
579 /// the new value to the result.
581 /// Returns the previous value.
583 /// `fetch_and` takes an [`Ordering`] argument which describes the memory ordering
584 /// of this operation. All ordering modes are possible. Note that using
585 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
586 /// using [`Release`] makes the load part [`Relaxed`].
588 /// [`Ordering`]: enum.Ordering.html
589 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
590 /// [`Release`]: enum.Ordering.html#variant.Release
591 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
596 /// use std::sync::atomic::{AtomicBool, Ordering};
598 /// let foo = AtomicBool::new(true);
599 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), true);
600 /// assert_eq!(foo.load(Ordering::SeqCst), false);
602 /// let foo = AtomicBool::new(true);
603 /// assert_eq!(foo.fetch_and(true, Ordering::SeqCst), true);
604 /// assert_eq!(foo.load(Ordering::SeqCst), true);
606 /// let foo = AtomicBool::new(false);
607 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), false);
608 /// assert_eq!(foo.load(Ordering::SeqCst), false);
611 #[stable(feature = "rust1", since = "1.0.0")]
612 #[cfg(target_has_atomic = "cas")]
613 pub fn fetch_and(&self, val
: bool
, order
: Ordering
) -> bool
{
614 unsafe { atomic_and(self.v.get(), val as u8, order) != 0 }
617 /// Logical "nand" with a boolean value.
619 /// Performs a logical "nand" operation on the current value and the argument `val`, and sets
620 /// the new value to the result.
622 /// Returns the previous value.
624 /// `fetch_nand` takes an [`Ordering`] argument which describes the memory ordering
625 /// of this operation. All ordering modes are possible. Note that using
626 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
627 /// using [`Release`] makes the load part [`Relaxed`].
629 /// [`Ordering`]: enum.Ordering.html
630 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
631 /// [`Release`]: enum.Ordering.html#variant.Release
632 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
637 /// use std::sync::atomic::{AtomicBool, Ordering};
639 /// let foo = AtomicBool::new(true);
640 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), true);
641 /// assert_eq!(foo.load(Ordering::SeqCst), true);
643 /// let foo = AtomicBool::new(true);
644 /// assert_eq!(foo.fetch_nand(true, Ordering::SeqCst), true);
645 /// assert_eq!(foo.load(Ordering::SeqCst) as usize, 0);
646 /// assert_eq!(foo.load(Ordering::SeqCst), false);
648 /// let foo = AtomicBool::new(false);
649 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), false);
650 /// assert_eq!(foo.load(Ordering::SeqCst), true);
653 #[stable(feature = "rust1", since = "1.0.0")]
654 #[cfg(target_has_atomic = "cas")]
655 pub fn fetch_nand(&self, val
: bool
, order
: Ordering
) -> bool
{
656 // We can't use atomic_nand here because it can result in a bool with
657 // an invalid value. This happens because the atomic operation is done
658 // with an 8-bit integer internally, which would set the upper 7 bits.
659 // So we just use fetch_xor or swap instead.
662 // We must invert the bool.
663 self.fetch_xor(true, order
)
665 // !(x & false) == true
666 // We must set the bool to true.
667 self.swap(true, order
)
671 /// Logical "or" with a boolean value.
673 /// Performs a logical "or" operation on the current value and the argument `val`, and sets the
674 /// new value to the result.
676 /// Returns the previous value.
678 /// `fetch_or` takes an [`Ordering`] argument which describes the memory ordering
679 /// of this operation. All ordering modes are possible. Note that using
680 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
681 /// using [`Release`] makes the load part [`Relaxed`].
683 /// [`Ordering`]: enum.Ordering.html
684 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
685 /// [`Release`]: enum.Ordering.html#variant.Release
686 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
691 /// use std::sync::atomic::{AtomicBool, Ordering};
693 /// let foo = AtomicBool::new(true);
694 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), true);
695 /// assert_eq!(foo.load(Ordering::SeqCst), true);
697 /// let foo = AtomicBool::new(true);
698 /// assert_eq!(foo.fetch_or(true, Ordering::SeqCst), true);
699 /// assert_eq!(foo.load(Ordering::SeqCst), true);
701 /// let foo = AtomicBool::new(false);
702 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), false);
703 /// assert_eq!(foo.load(Ordering::SeqCst), false);
706 #[stable(feature = "rust1", since = "1.0.0")]
707 #[cfg(target_has_atomic = "cas")]
708 pub fn fetch_or(&self, val
: bool
, order
: Ordering
) -> bool
{
709 unsafe { atomic_or(self.v.get(), val as u8, order) != 0 }
712 /// Logical "xor" with a boolean value.
714 /// Performs a logical "xor" operation on the current value and the argument `val`, and sets
715 /// the new value to the result.
717 /// Returns the previous value.
719 /// `fetch_xor` takes an [`Ordering`] argument which describes the memory ordering
720 /// of this operation. All ordering modes are possible. Note that using
721 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
722 /// using [`Release`] makes the load part [`Relaxed`].
724 /// [`Ordering`]: enum.Ordering.html
725 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
726 /// [`Release`]: enum.Ordering.html#variant.Release
727 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
732 /// use std::sync::atomic::{AtomicBool, Ordering};
734 /// let foo = AtomicBool::new(true);
735 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), true);
736 /// assert_eq!(foo.load(Ordering::SeqCst), true);
738 /// let foo = AtomicBool::new(true);
739 /// assert_eq!(foo.fetch_xor(true, Ordering::SeqCst), true);
740 /// assert_eq!(foo.load(Ordering::SeqCst), false);
742 /// let foo = AtomicBool::new(false);
743 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), false);
744 /// assert_eq!(foo.load(Ordering::SeqCst), false);
747 #[stable(feature = "rust1", since = "1.0.0")]
748 #[cfg(target_has_atomic = "cas")]
749 pub fn fetch_xor(&self, val
: bool
, order
: Ordering
) -> bool
{
750 unsafe { atomic_xor(self.v.get(), val as u8, order) != 0 }
754 #[cfg(target_has_atomic = "ptr")]
755 impl<T
> AtomicPtr
<T
> {
756 /// Creates a new `AtomicPtr`.
761 /// use std::sync::atomic::AtomicPtr;
763 /// let ptr = &mut 5;
764 /// let atomic_ptr = AtomicPtr::new(ptr);
767 #[stable(feature = "rust1", since = "1.0.0")]
768 pub const fn new(p
: *mut T
) -> AtomicPtr
<T
> {
769 AtomicPtr { p: UnsafeCell::new(p) }
772 /// Returns a mutable reference to the underlying pointer.
774 /// This is safe because the mutable reference guarantees that no other threads are
775 /// concurrently accessing the atomic data.
780 /// use std::sync::atomic::{AtomicPtr, Ordering};
782 /// let mut atomic_ptr = AtomicPtr::new(&mut 10);
783 /// *atomic_ptr.get_mut() = &mut 5;
784 /// assert_eq!(unsafe { *atomic_ptr.load(Ordering::SeqCst) }, 5);
787 #[stable(feature = "atomic_access", since = "1.15.0")]
788 pub fn get_mut(&mut self) -> &mut *mut T
{
789 unsafe { &mut *self.p.get() }
792 /// Consumes the atomic and returns the contained value.
794 /// This is safe because passing `self` by value guarantees that no other threads are
795 /// concurrently accessing the atomic data.
800 /// use std::sync::atomic::AtomicPtr;
802 /// let atomic_ptr = AtomicPtr::new(&mut 5);
803 /// assert_eq!(unsafe { *atomic_ptr.into_inner() }, 5);
806 #[stable(feature = "atomic_access", since = "1.15.0")]
807 pub fn into_inner(self) -> *mut T
{
811 /// Loads a value from the pointer.
813 /// `load` takes an [`Ordering`] argument which describes the memory ordering
814 /// of this operation. Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
818 /// Panics if `order` is [`Release`] or [`AcqRel`].
820 /// [`Ordering`]: enum.Ordering.html
821 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
822 /// [`Release`]: enum.Ordering.html#variant.Release
823 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
824 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
825 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
830 /// use std::sync::atomic::{AtomicPtr, Ordering};
832 /// let ptr = &mut 5;
833 /// let some_ptr = AtomicPtr::new(ptr);
835 /// let value = some_ptr.load(Ordering::Relaxed);
838 #[stable(feature = "rust1", since = "1.0.0")]
839 pub fn load(&self, order
: Ordering
) -> *mut T
{
840 unsafe { atomic_load(self.p.get() as *mut usize, order) as *mut T }
843 /// Stores a value into the pointer.
845 /// `store` takes an [`Ordering`] argument which describes the memory ordering
846 /// of this operation. Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
850 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
852 /// [`Ordering`]: enum.Ordering.html
853 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
854 /// [`Release`]: enum.Ordering.html#variant.Release
855 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
856 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
857 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
862 /// use std::sync::atomic::{AtomicPtr, Ordering};
864 /// let ptr = &mut 5;
865 /// let some_ptr = AtomicPtr::new(ptr);
867 /// let other_ptr = &mut 10;
869 /// some_ptr.store(other_ptr, Ordering::Relaxed);
872 #[stable(feature = "rust1", since = "1.0.0")]
873 pub fn store(&self, ptr
: *mut T
, order
: Ordering
) {
875 atomic_store(self.p
.get() as *mut usize, ptr
as usize, order
);
879 /// Stores a value into the pointer, returning the previous value.
881 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
882 /// of this operation. All ordering modes are possible. Note that using
883 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
884 /// using [`Release`] makes the load part [`Relaxed`].
886 /// [`Ordering`]: enum.Ordering.html
887 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
888 /// [`Release`]: enum.Ordering.html#variant.Release
889 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
894 /// use std::sync::atomic::{AtomicPtr, Ordering};
896 /// let ptr = &mut 5;
897 /// let some_ptr = AtomicPtr::new(ptr);
899 /// let other_ptr = &mut 10;
901 /// let value = some_ptr.swap(other_ptr, Ordering::Relaxed);
904 #[stable(feature = "rust1", since = "1.0.0")]
905 #[cfg(target_has_atomic = "cas")]
906 pub fn swap(&self, ptr
: *mut T
, order
: Ordering
) -> *mut T
{
907 unsafe { atomic_swap(self.p.get() as *mut usize, ptr as usize, order) as *mut T }
910 /// Stores a value into the pointer if the current value is the same as the `current` value.
912 /// The return value is always the previous value. If it is equal to `current`, then the value
915 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
916 /// ordering of this operation. Notice that even when using [`AcqRel`], the operation
917 /// might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
918 /// Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
919 /// happens, and using [`Release`] makes the load part [`Relaxed`].
921 /// [`Ordering`]: enum.Ordering.html
922 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
923 /// [`Release`]: enum.Ordering.html#variant.Release
924 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
925 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
930 /// use std::sync::atomic::{AtomicPtr, Ordering};
932 /// let ptr = &mut 5;
933 /// let some_ptr = AtomicPtr::new(ptr);
935 /// let other_ptr = &mut 10;
936 /// let another_ptr = &mut 10;
938 /// let value = some_ptr.compare_and_swap(other_ptr, another_ptr, Ordering::Relaxed);
941 #[stable(feature = "rust1", since = "1.0.0")]
942 #[cfg(target_has_atomic = "cas")]
943 pub fn compare_and_swap(&self, current
: *mut T
, new
: *mut T
, order
: Ordering
) -> *mut T
{
944 match self.compare_exchange(current
, new
, order
, strongest_failure_ordering(order
)) {
950 /// Stores a value into the pointer if the current value is the same as the `current` value.
952 /// The return value is a result indicating whether the new value was written and containing
953 /// the previous value. On success this value is guaranteed to be equal to `current`.
955 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
956 /// ordering of this operation. The first describes the required ordering if the
957 /// operation succeeds while the second describes the required ordering when the
958 /// operation fails. Using [`Acquire`] as success ordering makes the store part
959 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
960 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
961 /// and must be equivalent to or weaker than the success ordering.
963 /// [`Ordering`]: enum.Ordering.html
964 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
965 /// [`Release`]: enum.Ordering.html#variant.Release
966 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
967 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
972 /// use std::sync::atomic::{AtomicPtr, Ordering};
974 /// let ptr = &mut 5;
975 /// let some_ptr = AtomicPtr::new(ptr);
977 /// let other_ptr = &mut 10;
978 /// let another_ptr = &mut 10;
980 /// let value = some_ptr.compare_exchange(other_ptr, another_ptr,
981 /// Ordering::SeqCst, Ordering::Relaxed);
984 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
985 #[cfg(target_has_atomic = "cas")]
986 pub fn compare_exchange(&self,
991 -> Result
<*mut T
, *mut T
> {
993 let res
= atomic_compare_exchange(self.p
.get() as *mut usize,
999 Ok(x
) => Ok(x
as *mut T
),
1000 Err(x
) => Err(x
as *mut T
),
1005 /// Stores a value into the pointer if the current value is the same as the `current` value.
1007 /// Unlike [`compare_exchange`], this function is allowed to spuriously fail even when the
1008 /// comparison succeeds, which can result in more efficient code on some platforms. The
1009 /// return value is a result indicating whether the new value was written and containing the
1012 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
1013 /// ordering of this operation. The first describes the required ordering if the
1014 /// operation succeeds while the second describes the required ordering when the
1015 /// operation fails. Using [`Acquire`] as success ordering makes the store part
1016 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
1017 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1018 /// and must be equivalent to or weaker than the success ordering.
1020 /// [`compare_exchange`]: #method.compare_exchange
1021 /// [`Ordering`]: enum.Ordering.html
1022 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1023 /// [`Release`]: enum.Ordering.html#variant.Release
1024 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
1025 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1030 /// use std::sync::atomic::{AtomicPtr, Ordering};
1032 /// let some_ptr = AtomicPtr::new(&mut 5);
1034 /// let new = &mut 10;
1035 /// let mut old = some_ptr.load(Ordering::Relaxed);
1037 /// match some_ptr.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
1039 /// Err(x) => old = x,
1044 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
1045 #[cfg(target_has_atomic = "cas")]
1046 pub fn compare_exchange_weak(&self,
1051 -> Result
<*mut T
, *mut T
> {
1053 let res
= atomic_compare_exchange_weak(self.p
.get() as *mut usize,
1059 Ok(x
) => Ok(x
as *mut T
),
1060 Err(x
) => Err(x
as *mut T
),
1066 #[cfg(target_has_atomic = "8")]
1067 #[stable(feature = "atomic_bool_from", since = "1.24.0")]
1068 impl From
<bool
> for AtomicBool
{
1070 fn from(b
: bool
) -> Self { Self::new(b) }
1073 #[cfg(target_has_atomic = "ptr")]
1074 #[stable(feature = "atomic_from", since = "1.23.0")]
1075 impl<T
> From
<*mut T
> for AtomicPtr
<T
> {
1077 fn from(p
: *mut T
) -> Self { Self::new(p) }
1080 #[cfg(target_has_atomic = "ptr")]
1081 macro_rules
! atomic_int
{
1085 $stable_access
:meta
,
1088 $s_int_type
:expr
, $int_ref
:expr
,
1089 $extra_feature
:expr
,
1090 $min_fn
:ident
, $max_fn
:ident
,
1091 $int_type
:ident $atomic_type
:ident $atomic_init
:ident
) => {
1092 /// An integer type which can be safely shared between threads.
1094 /// This type has the same in-memory representation as the underlying
1095 /// integer type, [`
1096 #[doc = $s_int_type]
1099 /// ). For more about the differences between atomic types and
1100 /// non-atomic types, please see the [module-level documentation].
1102 /// [module-level documentation]: index.html
1104 pub struct $atomic_type
{
1105 v
: UnsafeCell
<$int_type
>,
1108 /// An atomic integer initialized to `0`.
1110 pub const $atomic_init
: $atomic_type
= $atomic_type
::new(0);
1113 impl Default
for $atomic_type
{
1114 fn default() -> Self {
1115 Self::new(Default
::default())
1120 impl From
<$int_type
> for $atomic_type
{
1122 fn from(v
: $int_type
) -> Self { Self::new(v) }
1126 impl fmt
::Debug
for $atomic_type
{
1127 fn fmt(&self, f
: &mut fmt
::Formatter
) -> fmt
::Result
{
1128 fmt
::Debug
::fmt(&self.load(Ordering
::SeqCst
), f
)
1132 // Send is implicitly implemented.
1134 unsafe impl Sync
for $atomic_type {}
1138 concat
!("Creates a new atomic integer.
1143 ", $extra_feature
, "use std::sync::atomic::", stringify
!($atomic_type
), ";
1145 let atomic_forty_two = ", stringify
!($atomic_type
), "::new(42);
1149 pub const fn new(v
: $int_type
) -> Self {
1150 $atomic_type {v: UnsafeCell::new(v)}
1155 concat
!("Returns a mutable reference to the underlying integer.
1157 This is safe because the mutable reference guarantees that no other threads are
1158 concurrently accessing the atomic data.
1163 ", $extra_feature
, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1165 let mut some_var = ", stringify
!($atomic_type
), "::new(10);
1166 assert_eq!(*some_var.get_mut(), 10);
1167 *some_var.get_mut() = 5;
1168 assert_eq!(some_var.load(Ordering::SeqCst), 5);
1172 pub fn get_mut(&mut self) -> &mut $int_type
{
1173 unsafe { &mut *self.v.get() }
1178 concat
!("Consumes the atomic and returns the contained value.
1180 This is safe because passing `self` by value guarantees that no other threads are
1181 concurrently accessing the atomic data.
1186 ", $extra_feature
, "use std::sync::atomic::", stringify
!($atomic_type
), ";
1188 let some_var = ", stringify
!($atomic_type
), "::new(5);
1189 assert_eq!(some_var.into_inner(), 5);
1193 pub fn into_inner(self) -> $int_type
{
1199 concat
!("Loads a value from the atomic integer.
1201 `load` takes an [`Ordering`] argument which describes the memory ordering of this operation.
1202 Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
1206 Panics if `order` is [`Release`] or [`AcqRel`].
1208 [`Ordering`]: enum.Ordering.html
1209 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1210 [`Release`]: enum.Ordering.html#variant.Release
1211 [`Acquire`]: enum.Ordering.html#variant.Acquire
1212 [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1213 [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1218 ", $extra_feature
, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1220 let some_var = ", stringify
!($atomic_type
), "::new(5);
1222 assert_eq!(some_var.load(Ordering::Relaxed), 5);
1226 pub fn load(&self, order
: Ordering
) -> $int_type
{
1227 unsafe { atomic_load(self.v.get(), order) }
1232 concat
!("Stores a value into the atomic integer.
1234 `store` takes an [`Ordering`] argument which describes the memory ordering of this operation.
1235 Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
1239 Panics if `order` is [`Acquire`] or [`AcqRel`].
1241 [`Ordering`]: enum.Ordering.html
1242 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1243 [`Release`]: enum.Ordering.html#variant.Release
1244 [`Acquire`]: enum.Ordering.html#variant.Acquire
1245 [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1246 [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1251 ", $extra_feature
, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1253 let some_var = ", stringify
!($atomic_type
), "::new(5);
1255 some_var.store(10, Ordering::Relaxed);
1256 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1260 pub fn store(&self, val
: $int_type
, order
: Ordering
) {
1261 unsafe { atomic_store(self.v.get(), val, order); }
1266 concat
!("Stores a value into the atomic integer, returning the previous value.
1268 `swap` takes an [`Ordering`] argument which describes the memory ordering
1269 of this operation. All ordering modes are possible. Note that using
1270 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1271 using [`Release`] makes the load part [`Relaxed`].
1273 [`Ordering`]: enum.Ordering.html
1274 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1275 [`Release`]: enum.Ordering.html#variant.Release
1276 [`Acquire`]: enum.Ordering.html#variant.Acquire
1281 ", $extra_feature
, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1283 let some_var = ", stringify
!($atomic_type
), "::new(5);
1285 assert_eq!(some_var.swap(10, Ordering::Relaxed), 5);
1289 #[cfg(target_has_atomic = "cas")]
1290 pub fn swap(&self, val
: $int_type
, order
: Ordering
) -> $int_type
{
1291 unsafe { atomic_swap(self.v.get(), val, order) }
1296 concat
!("Stores a value into the atomic integer if the current value is the same as
1297 the `current` value.
1299 The return value is always the previous value. If it is equal to `current`, then the
1302 `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
1303 ordering of this operation. Notice that even when using [`AcqRel`], the operation
1304 might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
1305 Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
1306 happens, and using [`Release`] makes the load part [`Relaxed`].
1308 [`Ordering`]: enum.Ordering.html
1309 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1310 [`Release`]: enum.Ordering.html#variant.Release
1311 [`Acquire`]: enum.Ordering.html#variant.Acquire
1312 [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1317 ", $extra_feature
, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1319 let some_var = ", stringify
!($atomic_type
), "::new(5);
1321 assert_eq!(some_var.compare_and_swap(5, 10, Ordering::Relaxed), 5);
1322 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1324 assert_eq!(some_var.compare_and_swap(6, 12, Ordering::Relaxed), 10);
1325 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1329 #[cfg(target_has_atomic = "cas")]
1330 pub fn compare_and_swap(&self,
1333 order
: Ordering
) -> $int_type
{
1334 match self.compare_exchange(current
,
1337 strongest_failure_ordering(order
)) {
1345 concat
!("Stores a value into the atomic integer if the current value is the same as
1346 the `current` value.
1348 The return value is a result indicating whether the new value was written and
1349 containing the previous value. On success this value is guaranteed to be equal to
1352 `compare_exchange` takes two [`Ordering`] arguments to describe the memory
1353 ordering of this operation. The first describes the required ordering if the
1354 operation succeeds while the second describes the required ordering when the
1355 operation fails. Using [`Acquire`] as success ordering makes the store part
1356 of this operation [`Relaxed`], and using [`Release`] makes the successful load
1357 [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1358 and must be equivalent to or weaker than the success ordering.
1360 [`Ordering`]: enum.Ordering.html
1361 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1362 [`Release`]: enum.Ordering.html#variant.Release
1363 [`Acquire`]: enum.Ordering.html#variant.Acquire
1364 [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1369 ", $extra_feature
, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1371 let some_var = ", stringify
!($atomic_type
), "::new(5);
1373 assert_eq!(some_var.compare_exchange(5, 10,
1377 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1379 assert_eq!(some_var.compare_exchange(6, 12,
1383 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1387 #[cfg(target_has_atomic = "cas")]
1388 pub fn compare_exchange(&self,
1392 failure
: Ordering
) -> Result
<$int_type
, $int_type
> {
1393 unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) }
1398 concat
!("Stores a value into the atomic integer if the current value is the same as
1399 the `current` value.
1401 Unlike [`compare_exchange`], this function is allowed to spuriously fail even
1402 when the comparison succeeds, which can result in more efficient code on some
1403 platforms. The return value is a result indicating whether the new value was
1404 written and containing the previous value.
1406 `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
1407 ordering of this operation. The first describes the required ordering if the
1408 operation succeeds while the second describes the required ordering when the
1409 operation fails. Using [`Acquire`] as success ordering makes the store part
1410 of this operation [`Relaxed`], and using [`Release`] makes the successful load
1411 [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1412 and must be equivalent to or weaker than the success ordering.
1414 [`compare_exchange`]: #method.compare_exchange
1415 [`Ordering`]: enum.Ordering.html
1416 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1417 [`Release`]: enum.Ordering.html#variant.Release
1418 [`Acquire`]: enum.Ordering.html#variant.Acquire
1419 [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1424 ", $extra_feature
, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1426 let val = ", stringify
!($atomic_type
), "::new(4);
1428 let mut old = val.load(Ordering::Relaxed);
1431 match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
1439 #[cfg(target_has_atomic = "cas")]
1440 pub fn compare_exchange_weak(&self,
1444 failure
: Ordering
) -> Result
<$int_type
, $int_type
> {
1446 atomic_compare_exchange_weak(self.v
.get(), current
, new
, success
, failure
)
1452 concat
!("Adds to the current value, returning the previous value.
1454 This operation wraps around on overflow.
1456 `fetch_add` takes an [`Ordering`] argument which describes the memory ordering
1457 of this operation. All ordering modes are possible. Note that using
1458 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1459 using [`Release`] makes the load part [`Relaxed`].
1461 [`Ordering`]: enum.Ordering.html
1462 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1463 [`Release`]: enum.Ordering.html#variant.Release
1464 [`Acquire`]: enum.Ordering.html#variant.Acquire
1469 ", $extra_feature
, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1471 let foo = ", stringify
!($atomic_type
), "::new(0);
1472 assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0);
1473 assert_eq!(foo.load(Ordering::SeqCst), 10);
1477 #[cfg(target_has_atomic = "cas")]
1478 pub fn fetch_add(&self, val
: $int_type
, order
: Ordering
) -> $int_type
{
1479 unsafe { atomic_add(self.v.get(), val, order) }
1484 concat
!("Subtracts from the current value, returning the previous value.
1486 This operation wraps around on overflow.
1488 `fetch_sub` takes an [`Ordering`] argument which describes the memory ordering
1489 of this operation. All ordering modes are possible. Note that using
1490 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1491 using [`Release`] makes the load part [`Relaxed`].
1493 [`Ordering`]: enum.Ordering.html
1494 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1495 [`Release`]: enum.Ordering.html#variant.Release
1496 [`Acquire`]: enum.Ordering.html#variant.Acquire
1501 ", $extra_feature
, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1503 let foo = ", stringify
!($atomic_type
), "::new(20);
1504 assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 20);
1505 assert_eq!(foo.load(Ordering::SeqCst), 10);
1509 #[cfg(target_has_atomic = "cas")]
1510 pub fn fetch_sub(&self, val
: $int_type
, order
: Ordering
) -> $int_type
{
1511 unsafe { atomic_sub(self.v.get(), val, order) }
1516 concat
!("Bitwise \"and\" with the current value.
1518 Performs a bitwise \"and\" operation on the current value and the argument `val`, and
1519 sets the new value to the result.
1521 Returns the previous value.
1523 `fetch_and` takes an [`Ordering`] argument which describes the memory ordering
1524 of this operation. All ordering modes are possible. Note that using
1525 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1526 using [`Release`] makes the load part [`Relaxed`].
1528 [`Ordering`]: enum.Ordering.html
1529 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1530 [`Release`]: enum.Ordering.html#variant.Release
1531 [`Acquire`]: enum.Ordering.html#variant.Acquire
1536 ", $extra_feature
, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1538 let foo = ", stringify
!($atomic_type
), "::new(0b101101);
1539 assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101);
1540 assert_eq!(foo.load(Ordering::SeqCst), 0b100001);
1544 #[cfg(target_has_atomic = "cas")]
1545 pub fn fetch_and(&self, val
: $int_type
, order
: Ordering
) -> $int_type
{
1546 unsafe { atomic_and(self.v.get(), val, order) }
1551 concat
!("Bitwise \"nand\" with the current value.
1553 Performs a bitwise \"nand\" operation on the current value and the argument `val`, and
1554 sets the new value to the result.
1556 Returns the previous value.
1558 `fetch_nand` takes an [`Ordering`] argument which describes the memory ordering
1559 of this operation. All ordering modes are possible. Note that using
1560 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1561 using [`Release`] makes the load part [`Relaxed`].
1563 [`Ordering`]: enum.Ordering.html
1564 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1565 [`Release`]: enum.Ordering.html#variant.Release
1566 [`Acquire`]: enum.Ordering.html#variant.Acquire
1571 ", $extra_feature
, "
1572 use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1574 let foo = ", stringify
!($atomic_type
), "::new(0x13);
1575 assert_eq!(foo.fetch_nand(0x31, Ordering::SeqCst), 0x13);
1576 assert_eq!(foo.load(Ordering::SeqCst), !(0x13 & 0x31));
1580 #[cfg(target_has_atomic = "cas")]
1581 pub fn fetch_nand(&self, val
: $int_type
, order
: Ordering
) -> $int_type
{
1582 unsafe { atomic_nand(self.v.get(), val, order) }
1587 concat
!("Bitwise \"or\" with the current value.
1589 Performs a bitwise \"or\" operation on the current value and the argument `val`, and
1590 sets the new value to the result.
1592 Returns the previous value.
1594 `fetch_or` takes an [`Ordering`] argument which describes the memory ordering
1595 of this operation. All ordering modes are possible. Note that using
1596 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1597 using [`Release`] makes the load part [`Relaxed`].
1599 [`Ordering`]: enum.Ordering.html
1600 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1601 [`Release`]: enum.Ordering.html#variant.Release
1602 [`Acquire`]: enum.Ordering.html#variant.Acquire
1607 ", $extra_feature
, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1609 let foo = ", stringify
!($atomic_type
), "::new(0b101101);
1610 assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101);
1611 assert_eq!(foo.load(Ordering::SeqCst), 0b111111);
1615 #[cfg(target_has_atomic = "cas")]
1616 pub fn fetch_or(&self, val
: $int_type
, order
: Ordering
) -> $int_type
{
1617 unsafe { atomic_or(self.v.get(), val, order) }
1622 concat
!("Bitwise \"xor\" with the current value.
1624 Performs a bitwise \"xor\" operation on the current value and the argument `val`, and
1625 sets the new value to the result.
1627 Returns the previous value.
1629 `fetch_xor` takes an [`Ordering`] argument which describes the memory ordering
1630 of this operation. All ordering modes are possible. Note that using
1631 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1632 using [`Release`] makes the load part [`Relaxed`].
1634 [`Ordering`]: enum.Ordering.html
1635 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1636 [`Release`]: enum.Ordering.html#variant.Release
1637 [`Acquire`]: enum.Ordering.html#variant.Acquire
1642 ", $extra_feature
, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1644 let foo = ", stringify
!($atomic_type
), "::new(0b101101);
1645 assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101);
1646 assert_eq!(foo.load(Ordering::SeqCst), 0b011110);
1650 #[cfg(target_has_atomic = "cas")]
1651 pub fn fetch_xor(&self, val
: $int_type
, order
: Ordering
) -> $int_type
{
1652 unsafe { atomic_xor(self.v.get(), val, order) }
1657 concat
!("Fetches the value, and applies a function to it that returns an optional
1658 new value. Returns a `Result` of `Ok(previous_value)` if the function returned `Some(_)`, else
1659 `Err(previous_value)`.
1661 Note: This may call the function multiple times if the value has been changed from other threads in
1662 the meantime, as long as the function returns `Some(_)`, but the function will have been applied
1663 but once to the stored value.
1665 `fetch_update` takes two [`Ordering`] arguments to describe the memory
1666 ordering of this operation. The first describes the required ordering for loads
1667 and failed updates while the second describes the required ordering when the
1668 operation finally succeeds. Beware that this is different from the two
1669 modes in [`compare_exchange`]!
1671 Using [`Acquire`] as success ordering makes the store part
1672 of this operation [`Relaxed`], and using [`Release`] makes the final successful load
1673 [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1674 and must be equivalent to or weaker than the success ordering.
1676 [`bool`]: ../../../std/primitive.bool.html
1677 [`compare_exchange`]: #method.compare_exchange
1678 [`Ordering`]: enum.Ordering.html
1679 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1680 [`Release`]: enum.Ordering.html#variant.Release
1681 [`Acquire`]: enum.Ordering.html#variant.Acquire
1682 [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1687 #![feature(no_more_cas)]
1688 ", $extra_feature
, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1690 let x = ", stringify
!($atomic_type
), "::new(7);
1691 assert_eq!(x.fetch_update(|_| None, Ordering::SeqCst, Ordering::SeqCst), Err(7));
1692 assert_eq!(x.fetch_update(|x| Some(x + 1), Ordering::SeqCst, Ordering::SeqCst), Ok(7));
1693 assert_eq!(x.fetch_update(|x| Some(x + 1), Ordering::SeqCst, Ordering::SeqCst), Ok(8));
1694 assert_eq!(x.load(Ordering::SeqCst), 9);
1697 #[unstable(feature = "no_more_cas",
1698 reason
= "no more CAS loops in user code",
1700 #[cfg(target_has_atomic = "cas")]
1701 pub fn fetch_update
<F
>(&self,
1703 fetch_order
: Ordering
,
1704 set_order
: Ordering
) -> Result
<$int_type
, $int_type
>
1705 where F
: FnMut($int_type
) -> Option
<$int_type
> {
1706 let mut prev
= self.load(fetch_order
);
1707 while let Some(next
) = f(prev
) {
1708 match self.compare_exchange_weak(prev
, next
, set_order
, fetch_order
) {
1709 x @
Ok(_
) => return x
,
1710 Err(next_prev
) => prev
= next_prev
1718 concat
!("Maximum with the current value.
1720 Finds the maximum of the current value and the argument `val`, and
1721 sets the new value to the result.
1723 Returns the previous value.
1725 `fetch_max` takes an [`Ordering`] argument which describes the memory ordering
1726 of this operation. All ordering modes are possible. Note that using
1727 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1728 using [`Release`] makes the load part [`Relaxed`].
1730 [`Ordering`]: enum.Ordering.html
1731 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1732 [`Release`]: enum.Ordering.html#variant.Release
1733 [`Acquire`]: enum.Ordering.html#variant.Acquire
1738 #![feature(atomic_min_max)]
1739 ", $extra_feature
, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1741 let foo = ", stringify
!($atomic_type
), "::new(23);
1742 assert_eq!(foo.fetch_max(42, Ordering::SeqCst), 23);
1743 assert_eq!(foo.load(Ordering::SeqCst), 42);
1746 If you want to obtain the maximum value in one step, you can use the following:
1749 #![feature(atomic_min_max)]
1750 ", $extra_feature
, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1752 let foo = ", stringify
!($atomic_type
), "::new(23);
1754 let max_foo = foo.fetch_max(bar, Ordering::SeqCst).max(bar);
1755 assert!(max_foo == 42);
1758 #[unstable(feature = "atomic_min_max",
1759 reason
= "easier and faster min/max than writing manual CAS loop",
1761 #[cfg(target_has_atomic = "cas")]
1762 pub fn fetch_max(&self, val
: $int_type
, order
: Ordering
) -> $int_type
{
1763 unsafe { $max_fn(self.v.get(), val, order) }
1768 concat
!("Minimum with the current value.
1770 Finds the minimum of the current value and the argument `val`, and
1771 sets the new value to the result.
1773 Returns the previous value.
1775 `fetch_min` takes an [`Ordering`] argument which describes the memory ordering
1776 of this operation. All ordering modes are possible. Note that using
1777 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1778 using [`Release`] makes the load part [`Relaxed`].
1780 [`Ordering`]: enum.Ordering.html
1781 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1782 [`Release`]: enum.Ordering.html#variant.Release
1783 [`Acquire`]: enum.Ordering.html#variant.Acquire
1788 #![feature(atomic_min_max)]
1789 ", $extra_feature
, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1791 let foo = ", stringify
!($atomic_type
), "::new(23);
1792 assert_eq!(foo.fetch_min(42, Ordering::Relaxed), 23);
1793 assert_eq!(foo.load(Ordering::Relaxed), 23);
1794 assert_eq!(foo.fetch_min(22, Ordering::Relaxed), 23);
1795 assert_eq!(foo.load(Ordering::Relaxed), 22);
1798 If you want to obtain the minimum value in one step, you can use the following:
1801 #![feature(atomic_min_max)]
1802 ", $extra_feature
, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1804 let foo = ", stringify
!($atomic_type
), "::new(23);
1806 let min_foo = foo.fetch_min(bar, Ordering::SeqCst).min(bar);
1807 assert_eq!(min_foo, 12);
1810 #[unstable(feature = "atomic_min_max",
1811 reason
= "easier and faster min/max than writing manual CAS loop",
1813 #[cfg(target_has_atomic = "cas")]
1814 pub fn fetch_min(&self, val
: $int_type
, order
: Ordering
) -> $int_type
{
1815 unsafe { $min_fn(self.v.get(), val, order) }
1823 #[cfg(target_has_atomic = "8")]
1825 unstable(feature
= "integer_atomics", issue
= "32976"),
1826 unstable(feature
= "integer_atomics", issue
= "32976"),
1827 unstable(feature
= "integer_atomics", issue
= "32976"),
1828 unstable(feature
= "integer_atomics", issue
= "32976"),
1829 unstable(feature
= "integer_atomics", issue
= "32976"),
1830 unstable(feature
= "integer_atomics", issue
= "32976"),
1831 "i8", "../../../std/primitive.i8.html",
1832 "#![feature(integer_atomics)]\n\n",
1833 atomic_min
, atomic_max
,
1834 i8 AtomicI8 ATOMIC_I8_INIT
1836 #[cfg(target_has_atomic = "8")]
1838 unstable(feature
= "integer_atomics", issue
= "32976"),
1839 unstable(feature
= "integer_atomics", issue
= "32976"),
1840 unstable(feature
= "integer_atomics", issue
= "32976"),
1841 unstable(feature
= "integer_atomics", issue
= "32976"),
1842 unstable(feature
= "integer_atomics", issue
= "32976"),
1843 unstable(feature
= "integer_atomics", issue
= "32976"),
1844 "u8", "../../../std/primitive.u8.html",
1845 "#![feature(integer_atomics)]\n\n",
1846 atomic_umin
, atomic_umax
,
1847 u8 AtomicU8 ATOMIC_U8_INIT
1849 #[cfg(target_has_atomic = "16")]
1851 unstable(feature
= "integer_atomics", issue
= "32976"),
1852 unstable(feature
= "integer_atomics", issue
= "32976"),
1853 unstable(feature
= "integer_atomics", issue
= "32976"),
1854 unstable(feature
= "integer_atomics", issue
= "32976"),
1855 unstable(feature
= "integer_atomics", issue
= "32976"),
1856 unstable(feature
= "integer_atomics", issue
= "32976"),
1857 "i16", "../../../std/primitive.i16.html",
1858 "#![feature(integer_atomics)]\n\n",
1859 atomic_min
, atomic_max
,
1860 i16 AtomicI16 ATOMIC_I16_INIT
1862 #[cfg(target_has_atomic = "16")]
1864 unstable(feature
= "integer_atomics", issue
= "32976"),
1865 unstable(feature
= "integer_atomics", issue
= "32976"),
1866 unstable(feature
= "integer_atomics", issue
= "32976"),
1867 unstable(feature
= "integer_atomics", issue
= "32976"),
1868 unstable(feature
= "integer_atomics", issue
= "32976"),
1869 unstable(feature
= "integer_atomics", issue
= "32976"),
1870 "u16", "../../../std/primitive.u16.html",
1871 "#![feature(integer_atomics)]\n\n",
1872 atomic_umin
, atomic_umax
,
1873 u16 AtomicU16 ATOMIC_U16_INIT
1875 #[cfg(target_has_atomic = "32")]
1877 unstable(feature
= "integer_atomics", issue
= "32976"),
1878 unstable(feature
= "integer_atomics", issue
= "32976"),
1879 unstable(feature
= "integer_atomics", issue
= "32976"),
1880 unstable(feature
= "integer_atomics", issue
= "32976"),
1881 unstable(feature
= "integer_atomics", issue
= "32976"),
1882 unstable(feature
= "integer_atomics", issue
= "32976"),
1883 "i32", "../../../std/primitive.i32.html",
1884 "#![feature(integer_atomics)]\n\n",
1885 atomic_min
, atomic_max
,
1886 i32 AtomicI32 ATOMIC_I32_INIT
1888 #[cfg(target_has_atomic = "32")]
1890 unstable(feature
= "integer_atomics", issue
= "32976"),
1891 unstable(feature
= "integer_atomics", issue
= "32976"),
1892 unstable(feature
= "integer_atomics", issue
= "32976"),
1893 unstable(feature
= "integer_atomics", issue
= "32976"),
1894 unstable(feature
= "integer_atomics", issue
= "32976"),
1895 unstable(feature
= "integer_atomics", issue
= "32976"),
1896 "u32", "../../../std/primitive.u32.html",
1897 "#![feature(integer_atomics)]\n\n",
1898 atomic_umin
, atomic_umax
,
1899 u32 AtomicU32 ATOMIC_U32_INIT
1901 #[cfg(target_has_atomic = "64")]
1903 unstable(feature
= "integer_atomics", issue
= "32976"),
1904 unstable(feature
= "integer_atomics", issue
= "32976"),
1905 unstable(feature
= "integer_atomics", issue
= "32976"),
1906 unstable(feature
= "integer_atomics", issue
= "32976"),
1907 unstable(feature
= "integer_atomics", issue
= "32976"),
1908 unstable(feature
= "integer_atomics", issue
= "32976"),
1909 "i64", "../../../std/primitive.i64.html",
1910 "#![feature(integer_atomics)]\n\n",
1911 atomic_min
, atomic_max
,
1912 i64 AtomicI64 ATOMIC_I64_INIT
1914 #[cfg(target_has_atomic = "64")]
1916 unstable(feature
= "integer_atomics", issue
= "32976"),
1917 unstable(feature
= "integer_atomics", issue
= "32976"),
1918 unstable(feature
= "integer_atomics", issue
= "32976"),
1919 unstable(feature
= "integer_atomics", issue
= "32976"),
1920 unstable(feature
= "integer_atomics", issue
= "32976"),
1921 unstable(feature
= "integer_atomics", issue
= "32976"),
1922 "u64", "../../../std/primitive.u64.html",
1923 "#![feature(integer_atomics)]\n\n",
1924 atomic_umin
, atomic_umax
,
1925 u64 AtomicU64 ATOMIC_U64_INIT
1927 #[cfg(target_has_atomic = "ptr")]
1929 stable(feature
= "rust1", since
= "1.0.0"),
1930 stable(feature
= "extended_compare_and_swap", since
= "1.10.0"),
1931 stable(feature
= "atomic_debug", since
= "1.3.0"),
1932 stable(feature
= "atomic_access", since
= "1.15.0"),
1933 stable(feature
= "atomic_from", since
= "1.23.0"),
1934 stable(feature
= "atomic_nand", since
= "1.27.0"),
1935 "isize", "../../../std/primitive.isize.html",
1937 atomic_min
, atomic_max
,
1938 isize AtomicIsize ATOMIC_ISIZE_INIT
1940 #[cfg(target_has_atomic = "ptr")]
1942 stable(feature
= "rust1", since
= "1.0.0"),
1943 stable(feature
= "extended_compare_and_swap", since
= "1.10.0"),
1944 stable(feature
= "atomic_debug", since
= "1.3.0"),
1945 stable(feature
= "atomic_access", since
= "1.15.0"),
1946 stable(feature
= "atomic_from", since
= "1.23.0"),
1947 stable(feature
= "atomic_nand", since
= "1.27.0"),
1948 "usize", "../../../std/primitive.usize.html",
1950 atomic_umin
, atomic_umax
,
1951 usize AtomicUsize ATOMIC_USIZE_INIT
1955 #[cfg(target_has_atomic = "cas")]
1956 fn strongest_failure_ordering(order
: Ordering
) -> Ordering
{
1967 unsafe fn atomic_store
<T
>(dst
: *mut T
, val
: T
, order
: Ordering
) {
1969 Release
=> intrinsics
::atomic_store_rel(dst
, val
),
1970 Relaxed
=> intrinsics
::atomic_store_relaxed(dst
, val
),
1971 SeqCst
=> intrinsics
::atomic_store(dst
, val
),
1972 Acquire
=> panic
!("there is no such thing as an acquire store"),
1973 AcqRel
=> panic
!("there is no such thing as an acquire/release store"),
1978 unsafe fn atomic_load
<T
>(dst
: *const T
, order
: Ordering
) -> T
{
1980 Acquire
=> intrinsics
::atomic_load_acq(dst
),
1981 Relaxed
=> intrinsics
::atomic_load_relaxed(dst
),
1982 SeqCst
=> intrinsics
::atomic_load(dst
),
1983 Release
=> panic
!("there is no such thing as a release load"),
1984 AcqRel
=> panic
!("there is no such thing as an acquire/release load"),
1989 #[cfg(target_has_atomic = "cas")]
1990 unsafe fn atomic_swap
<T
>(dst
: *mut T
, val
: T
, order
: Ordering
) -> T
{
1992 Acquire
=> intrinsics
::atomic_xchg_acq(dst
, val
),
1993 Release
=> intrinsics
::atomic_xchg_rel(dst
, val
),
1994 AcqRel
=> intrinsics
::atomic_xchg_acqrel(dst
, val
),
1995 Relaxed
=> intrinsics
::atomic_xchg_relaxed(dst
, val
),
1996 SeqCst
=> intrinsics
::atomic_xchg(dst
, val
),
2000 /// Returns the previous value (like __sync_fetch_and_add).
2002 #[cfg(target_has_atomic = "cas")]
2003 unsafe fn atomic_add
<T
>(dst
: *mut T
, val
: T
, order
: Ordering
) -> T
{
2005 Acquire
=> intrinsics
::atomic_xadd_acq(dst
, val
),
2006 Release
=> intrinsics
::atomic_xadd_rel(dst
, val
),
2007 AcqRel
=> intrinsics
::atomic_xadd_acqrel(dst
, val
),
2008 Relaxed
=> intrinsics
::atomic_xadd_relaxed(dst
, val
),
2009 SeqCst
=> intrinsics
::atomic_xadd(dst
, val
),
2013 /// Returns the previous value (like __sync_fetch_and_sub).
2015 #[cfg(target_has_atomic = "cas")]
2016 unsafe fn atomic_sub
<T
>(dst
: *mut T
, val
: T
, order
: Ordering
) -> T
{
2018 Acquire
=> intrinsics
::atomic_xsub_acq(dst
, val
),
2019 Release
=> intrinsics
::atomic_xsub_rel(dst
, val
),
2020 AcqRel
=> intrinsics
::atomic_xsub_acqrel(dst
, val
),
2021 Relaxed
=> intrinsics
::atomic_xsub_relaxed(dst
, val
),
2022 SeqCst
=> intrinsics
::atomic_xsub(dst
, val
),
2027 #[cfg(target_has_atomic = "cas")]
2028 unsafe fn atomic_compare_exchange
<T
>(dst
: *mut T
,
2034 let (val
, ok
) = match (success
, failure
) {
2035 (Acquire
, Acquire
) => intrinsics
::atomic_cxchg_acq(dst
, old
, new
),
2036 (Release
, Relaxed
) => intrinsics
::atomic_cxchg_rel(dst
, old
, new
),
2037 (AcqRel
, Acquire
) => intrinsics
::atomic_cxchg_acqrel(dst
, old
, new
),
2038 (Relaxed
, Relaxed
) => intrinsics
::atomic_cxchg_relaxed(dst
, old
, new
),
2039 (SeqCst
, SeqCst
) => intrinsics
::atomic_cxchg(dst
, old
, new
),
2040 (Acquire
, Relaxed
) => intrinsics
::atomic_cxchg_acq_failrelaxed(dst
, old
, new
),
2041 (AcqRel
, Relaxed
) => intrinsics
::atomic_cxchg_acqrel_failrelaxed(dst
, old
, new
),
2042 (SeqCst
, Relaxed
) => intrinsics
::atomic_cxchg_failrelaxed(dst
, old
, new
),
2043 (SeqCst
, Acquire
) => intrinsics
::atomic_cxchg_failacq(dst
, old
, new
),
2044 (_
, AcqRel
) => panic
!("there is no such thing as an acquire/release failure ordering"),
2045 (_
, Release
) => panic
!("there is no such thing as a release failure ordering"),
2046 _
=> panic
!("a failure ordering can't be stronger than a success ordering"),
2048 if ok { Ok(val) }
else { Err(val) }
2052 #[cfg(target_has_atomic = "cas")]
2053 unsafe fn atomic_compare_exchange_weak
<T
>(dst
: *mut T
,
2059 let (val
, ok
) = match (success
, failure
) {
2060 (Acquire
, Acquire
) => intrinsics
::atomic_cxchgweak_acq(dst
, old
, new
),
2061 (Release
, Relaxed
) => intrinsics
::atomic_cxchgweak_rel(dst
, old
, new
),
2062 (AcqRel
, Acquire
) => intrinsics
::atomic_cxchgweak_acqrel(dst
, old
, new
),
2063 (Relaxed
, Relaxed
) => intrinsics
::atomic_cxchgweak_relaxed(dst
, old
, new
),
2064 (SeqCst
, SeqCst
) => intrinsics
::atomic_cxchgweak(dst
, old
, new
),
2065 (Acquire
, Relaxed
) => intrinsics
::atomic_cxchgweak_acq_failrelaxed(dst
, old
, new
),
2066 (AcqRel
, Relaxed
) => intrinsics
::atomic_cxchgweak_acqrel_failrelaxed(dst
, old
, new
),
2067 (SeqCst
, Relaxed
) => intrinsics
::atomic_cxchgweak_failrelaxed(dst
, old
, new
),
2068 (SeqCst
, Acquire
) => intrinsics
::atomic_cxchgweak_failacq(dst
, old
, new
),
2069 (_
, AcqRel
) => panic
!("there is no such thing as an acquire/release failure ordering"),
2070 (_
, Release
) => panic
!("there is no such thing as a release failure ordering"),
2071 _
=> panic
!("a failure ordering can't be stronger than a success ordering"),
2073 if ok { Ok(val) }
else { Err(val) }
2077 #[cfg(target_has_atomic = "cas")]
2078 unsafe fn atomic_and
<T
>(dst
: *mut T
, val
: T
, order
: Ordering
) -> T
{
2080 Acquire
=> intrinsics
::atomic_and_acq(dst
, val
),
2081 Release
=> intrinsics
::atomic_and_rel(dst
, val
),
2082 AcqRel
=> intrinsics
::atomic_and_acqrel(dst
, val
),
2083 Relaxed
=> intrinsics
::atomic_and_relaxed(dst
, val
),
2084 SeqCst
=> intrinsics
::atomic_and(dst
, val
),
2089 #[cfg(target_has_atomic = "cas")]
2090 unsafe fn atomic_nand
<T
>(dst
: *mut T
, val
: T
, order
: Ordering
) -> T
{
2092 Acquire
=> intrinsics
::atomic_nand_acq(dst
, val
),
2093 Release
=> intrinsics
::atomic_nand_rel(dst
, val
),
2094 AcqRel
=> intrinsics
::atomic_nand_acqrel(dst
, val
),
2095 Relaxed
=> intrinsics
::atomic_nand_relaxed(dst
, val
),
2096 SeqCst
=> intrinsics
::atomic_nand(dst
, val
),
2101 #[cfg(target_has_atomic = "cas")]
2102 unsafe fn atomic_or
<T
>(dst
: *mut T
, val
: T
, order
: Ordering
) -> T
{
2104 Acquire
=> intrinsics
::atomic_or_acq(dst
, val
),
2105 Release
=> intrinsics
::atomic_or_rel(dst
, val
),
2106 AcqRel
=> intrinsics
::atomic_or_acqrel(dst
, val
),
2107 Relaxed
=> intrinsics
::atomic_or_relaxed(dst
, val
),
2108 SeqCst
=> intrinsics
::atomic_or(dst
, val
),
2113 #[cfg(target_has_atomic = "cas")]
2114 unsafe fn atomic_xor
<T
>(dst
: *mut T
, val
: T
, order
: Ordering
) -> T
{
2116 Acquire
=> intrinsics
::atomic_xor_acq(dst
, val
),
2117 Release
=> intrinsics
::atomic_xor_rel(dst
, val
),
2118 AcqRel
=> intrinsics
::atomic_xor_acqrel(dst
, val
),
2119 Relaxed
=> intrinsics
::atomic_xor_relaxed(dst
, val
),
2120 SeqCst
=> intrinsics
::atomic_xor(dst
, val
),
2124 /// returns the max value (signed comparison)
2126 #[cfg(target_has_atomic = "cas")]
2127 unsafe fn atomic_max
<T
>(dst
: *mut T
, val
: T
, order
: Ordering
) -> T
{
2129 Acquire
=> intrinsics
::atomic_max_acq(dst
, val
),
2130 Release
=> intrinsics
::atomic_max_rel(dst
, val
),
2131 AcqRel
=> intrinsics
::atomic_max_acqrel(dst
, val
),
2132 Relaxed
=> intrinsics
::atomic_max_relaxed(dst
, val
),
2133 SeqCst
=> intrinsics
::atomic_max(dst
, val
),
2137 /// returns the min value (signed comparison)
2139 #[cfg(target_has_atomic = "cas")]
2140 unsafe fn atomic_min
<T
>(dst
: *mut T
, val
: T
, order
: Ordering
) -> T
{
2142 Acquire
=> intrinsics
::atomic_min_acq(dst
, val
),
2143 Release
=> intrinsics
::atomic_min_rel(dst
, val
),
2144 AcqRel
=> intrinsics
::atomic_min_acqrel(dst
, val
),
2145 Relaxed
=> intrinsics
::atomic_min_relaxed(dst
, val
),
2146 SeqCst
=> intrinsics
::atomic_min(dst
, val
),
2150 /// returns the max value (signed comparison)
2152 #[cfg(target_has_atomic = "cas")]
2153 unsafe fn atomic_umax
<T
>(dst
: *mut T
, val
: T
, order
: Ordering
) -> T
{
2155 Acquire
=> intrinsics
::atomic_umax_acq(dst
, val
),
2156 Release
=> intrinsics
::atomic_umax_rel(dst
, val
),
2157 AcqRel
=> intrinsics
::atomic_umax_acqrel(dst
, val
),
2158 Relaxed
=> intrinsics
::atomic_umax_relaxed(dst
, val
),
2159 SeqCst
=> intrinsics
::atomic_umax(dst
, val
),
2163 /// returns the min value (signed comparison)
2165 #[cfg(target_has_atomic = "cas")]
2166 unsafe fn atomic_umin
<T
>(dst
: *mut T
, val
: T
, order
: Ordering
) -> T
{
2168 Acquire
=> intrinsics
::atomic_umin_acq(dst
, val
),
2169 Release
=> intrinsics
::atomic_umin_rel(dst
, val
),
2170 AcqRel
=> intrinsics
::atomic_umin_acqrel(dst
, val
),
2171 Relaxed
=> intrinsics
::atomic_umin_relaxed(dst
, val
),
2172 SeqCst
=> intrinsics
::atomic_umin(dst
, val
),
2176 /// An atomic fence.
2178 /// Depending on the specified order, a fence prevents the compiler and CPU from
2179 /// reordering certain types of memory operations around it.
2180 /// That creates synchronizes-with relationships between it and atomic operations
2181 /// or fences in other threads.
2183 /// A fence 'A' which has (at least) [`Release`] ordering semantics, synchronizes
2184 /// with a fence 'B' with (at least) [`Acquire`] semantics, if and only if there
2185 /// exist operations X and Y, both operating on some atomic object 'M' such
2186 /// that A is sequenced before X, Y is synchronized before B and Y observes
2187 /// the change to M. This provides a happens-before dependence between A and B.
2190 /// Thread 1 Thread 2
2192 /// fence(Release); A --------------
2193 /// x.store(3, Relaxed); X --------- |
2196 /// -------------> Y if x.load(Relaxed) == 3 {
2197 /// |-------> B fence(Acquire);
2202 /// Atomic operations with [`Release`] or [`Acquire`] semantics can also synchronize
2205 /// A fence which has [`SeqCst`] ordering, in addition to having both [`Acquire`]
2206 /// and [`Release`] semantics, participates in the global program order of the
2207 /// other [`SeqCst`] operations and/or fences.
2209 /// Accepts [`Acquire`], [`Release`], [`AcqRel`] and [`SeqCst`] orderings.
2213 /// Panics if `order` is [`Relaxed`].
2218 /// use std::sync::atomic::AtomicBool;
2219 /// use std::sync::atomic::fence;
2220 /// use std::sync::atomic::Ordering;
2222 /// // A mutual exclusion primitive based on spinlock.
2223 /// pub struct Mutex {
2224 /// flag: AtomicBool,
2228 /// pub fn new() -> Mutex {
2230 /// flag: AtomicBool::new(false),
2234 /// pub fn lock(&self) {
2235 /// while !self.flag.compare_and_swap(false, true, Ordering::Relaxed) {}
2236 /// // This fence synchronizes-with store in `unlock`.
2237 /// fence(Ordering::Acquire);
2240 /// pub fn unlock(&self) {
2241 /// self.flag.store(false, Ordering::Release);
2246 /// [`Ordering`]: enum.Ordering.html
2247 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
2248 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
2249 /// [`Release`]: enum.Ordering.html#variant.Release
2250 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
2251 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
2253 #[stable(feature = "rust1", since = "1.0.0")]
2254 #[cfg_attr(target_arch = "wasm32", allow(unused_variables))]
2255 pub fn fence(order
: Ordering
) {
2256 // On wasm32 it looks like fences aren't implemented in LLVM yet in that
2257 // they will cause LLVM to abort. The wasm instruction set doesn't have
2258 // fences right now. There's discussion online about the best way for tools
2259 // to conventionally implement fences at
2260 // https://github.com/WebAssembly/tool-conventions/issues/59. We should
2261 // follow that discussion and implement a solution when one comes about!
2262 #[cfg(not(target_arch = "wasm32"))]
2265 Acquire
=> intrinsics
::atomic_fence_acq(),
2266 Release
=> intrinsics
::atomic_fence_rel(),
2267 AcqRel
=> intrinsics
::atomic_fence_acqrel(),
2268 SeqCst
=> intrinsics
::atomic_fence(),
2269 Relaxed
=> panic
!("there is no such thing as a relaxed fence"),
2275 /// A compiler memory fence.
2277 /// `compiler_fence` does not emit any machine code, but restricts the kinds
2278 /// of memory re-ordering the compiler is allowed to do. Specifically, depending on
2279 /// the given [`Ordering`] semantics, the compiler may be disallowed from moving reads
2280 /// or writes from before or after the call to the other side of the call to
2281 /// `compiler_fence`. Note that it does **not** prevent the *hardware*
2282 /// from doing such re-ordering. This is not a problem in a single-threaded,
2283 /// execution context, but when other threads may modify memory at the same
2284 /// time, stronger synchronization primitives such as [`fence`] are required.
2286 /// The re-ordering prevented by the different ordering semantics are:
2288 /// - with [`SeqCst`], no re-ordering of reads and writes across this point is allowed.
2289 /// - with [`Release`], preceding reads and writes cannot be moved past subsequent writes.
2290 /// - with [`Acquire`], subsequent reads and writes cannot be moved ahead of preceding reads.
2291 /// - with [`AcqRel`], both of the above rules are enforced.
2293 /// `compiler_fence` is generally only useful for preventing a thread from
2294 /// racing *with itself*. That is, if a given thread is executing one piece
2295 /// of code, and is then interrupted, and starts executing code elsewhere
2296 /// (while still in the same thread, and conceptually still on the same
2297 /// core). In traditional programs, this can only occur when a signal
2298 /// handler is registered. In more low-level code, such situations can also
2299 /// arise when handling interrupts, when implementing green threads with
2300 /// pre-emption, etc. Curious readers are encouraged to read the Linux kernel's
2301 /// discussion of [memory barriers].
2305 /// Panics if `order` is [`Relaxed`].
2309 /// Without `compiler_fence`, the `assert_eq!` in following code
2310 /// is *not* guaranteed to succeed, despite everything happening in a single thread.
2311 /// To see why, remember that the compiler is free to swap the stores to
2312 /// `IMPORTANT_VARIABLE` and `IS_READ` since they are both
2313 /// `Ordering::Relaxed`. If it does, and the signal handler is invoked right
2314 /// after `IS_READY` is updated, then the signal handler will see
2315 /// `IS_READY=1`, but `IMPORTANT_VARIABLE=0`.
2316 /// Using a `compiler_fence` remedies this situation.
2319 /// use std::sync::atomic::{AtomicBool, AtomicUsize};
2320 /// use std::sync::atomic::{ATOMIC_BOOL_INIT, ATOMIC_USIZE_INIT};
2321 /// use std::sync::atomic::Ordering;
2322 /// use std::sync::atomic::compiler_fence;
2324 /// static IMPORTANT_VARIABLE: AtomicUsize = ATOMIC_USIZE_INIT;
2325 /// static IS_READY: AtomicBool = ATOMIC_BOOL_INIT;
2328 /// IMPORTANT_VARIABLE.store(42, Ordering::Relaxed);
2329 /// // prevent earlier writes from being moved beyond this point
2330 /// compiler_fence(Ordering::Release);
2331 /// IS_READY.store(true, Ordering::Relaxed);
2334 /// fn signal_handler() {
2335 /// if IS_READY.load(Ordering::Relaxed) {
2336 /// assert_eq!(IMPORTANT_VARIABLE.load(Ordering::Relaxed), 42);
2341 /// [`fence`]: fn.fence.html
2342 /// [`Ordering`]: enum.Ordering.html
2343 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
2344 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
2345 /// [`Release`]: enum.Ordering.html#variant.Release
2346 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
2347 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
2348 /// [memory barriers]: https://www.kernel.org/doc/Documentation/memory-barriers.txt
2350 #[stable(feature = "compiler_fences", since = "1.21.0")]
2351 pub fn compiler_fence(order
: Ordering
) {
2354 Acquire
=> intrinsics
::atomic_singlethreadfence_acq(),
2355 Release
=> intrinsics
::atomic_singlethreadfence_rel(),
2356 AcqRel
=> intrinsics
::atomic_singlethreadfence_acqrel(),
2357 SeqCst
=> intrinsics
::atomic_singlethreadfence(),
2358 Relaxed
=> panic
!("there is no such thing as a relaxed compiler fence"),
2364 #[cfg(target_has_atomic = "8")]
2365 #[stable(feature = "atomic_debug", since = "1.3.0")]
2366 impl fmt
::Debug
for AtomicBool
{
2367 fn fmt(&self, f
: &mut fmt
::Formatter
) -> fmt
::Result
{
2368 fmt
::Debug
::fmt(&self.load(Ordering
::SeqCst
), f
)
2372 #[cfg(target_has_atomic = "ptr")]
2373 #[stable(feature = "atomic_debug", since = "1.3.0")]
2374 impl<T
> fmt
::Debug
for AtomicPtr
<T
> {
2375 fn fmt(&self, f
: &mut fmt
::Formatter
) -> fmt
::Result
{
2376 fmt
::Debug
::fmt(&self.load(Ordering
::SeqCst
), f
)
2380 #[cfg(target_has_atomic = "ptr")]
2381 #[stable(feature = "atomic_pointer", since = "1.24.0")]
2382 impl<T
> fmt
::Pointer
for AtomicPtr
<T
> {
2383 fn fmt(&self, f
: &mut fmt
::Formatter
) -> fmt
::Result
{
2384 fmt
::Pointer
::fmt(&self.load(Ordering
::SeqCst
), f
)