1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
13 //! Atomic types provide primitive shared-memory communication between
14 //! threads, and are the building blocks of other concurrent
17 //! This module defines atomic versions of a select number of primitive
18 //! types, including [`AtomicBool`], [`AtomicIsize`], and [`AtomicUsize`].
19 //! Atomic types present operations that, when used correctly, synchronize
20 //! updates between threads.
22 //! [`AtomicBool`]: struct.AtomicBool.html
23 //! [`AtomicIsize`]: struct.AtomicIsize.html
24 //! [`AtomicUsize`]: struct.AtomicUsize.html
26 //! Each method takes an [`Ordering`] which represents the strength of
27 //! the memory barrier for that operation. These orderings are the
28 //! same as [LLVM atomic orderings][1]. For more information see the [nomicon][2].
30 //! [`Ordering`]: enum.Ordering.html
32 //! [1]: https://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations
33 //! [2]: ../../../nomicon/atomics.html
35 //! Atomic variables are safe to share between threads (they implement [`Sync`])
36 //! but they do not themselves provide the mechanism for sharing and follow the
37 //! [threading model](../../../std/thread/index.html#the-threading-model) of rust.
38 //! The most common way to share an atomic variable is to put it into an [`Arc`][arc] (an
39 //! atomically-reference-counted shared pointer).
41 //! [`Sync`]: ../../marker/trait.Sync.html
42 //! [arc]: ../../../std/sync/struct.Arc.html
44 //! Most atomic types may be stored in static variables, initialized using
45 //! the provided static initializers like [`ATOMIC_BOOL_INIT`]. Atomic statics
46 //! are often used for lazy global initialization.
48 //! [`ATOMIC_BOOL_INIT`]: constant.ATOMIC_BOOL_INIT.html
52 //! A simple spinlock:
55 //! use std::sync::Arc;
56 //! use std::sync::atomic::{AtomicUsize, Ordering};
60 //! let spinlock = Arc::new(AtomicUsize::new(1));
62 //! let spinlock_clone = spinlock.clone();
63 //! let thread = thread::spawn(move|| {
64 //! spinlock_clone.store(0, Ordering::SeqCst);
67 //! // Wait for the other thread to release the lock
68 //! while spinlock.load(Ordering::SeqCst) != 0 {}
70 //! if let Err(panic) = thread.join() {
71 //! println!("Thread had an error: {:?}", panic);
76 //! Keep a global count of live threads:
79 //! use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
81 //! static GLOBAL_THREAD_COUNT: AtomicUsize = ATOMIC_USIZE_INIT;
83 //! let old_thread_count = GLOBAL_THREAD_COUNT.fetch_add(1, Ordering::SeqCst);
84 //! println!("live threads: {}", old_thread_count + 1);
87 #![stable(feature = "rust1", since = "1.0.0")]
88 #![cfg_attr(not(target_has_atomic = "8"), allow(dead_code))]
89 #![cfg_attr(not(target_has_atomic = "8"), allow(unused_imports))]
91 use self::Ordering
::*;
97 /// Save power or switch hyperthreads in a busy-wait spin-loop.
99 /// This function is deliberately more primitive than
100 /// [`std::thread::yield_now`](../../../std/thread/fn.yield_now.html) and
101 /// does not directly yield to the system's scheduler.
102 /// In some cases it might be useful to use a combination of both functions.
103 /// Careful benchmarking is advised.
105 /// On some platforms this function may not do anything at all.
107 #[stable(feature = "spin_loop_hint", since = "1.24.0")]
108 pub fn spin_loop_hint() {
109 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
111 asm
!("pause" ::: "memory" : "volatile");
114 #[cfg(target_arch = "aarch64")]
116 asm
!("yield" ::: "memory" : "volatile");
120 /// A boolean type which can be safely shared between threads.
122 /// This type has the same in-memory representation as a [`bool`].
124 /// [`bool`]: ../../../std/primitive.bool.html
125 #[cfg(target_has_atomic = "8")]
126 #[stable(feature = "rust1", since = "1.0.0")]
127 pub struct AtomicBool
{
131 #[cfg(target_has_atomic = "8")]
132 #[stable(feature = "rust1", since = "1.0.0")]
133 impl Default
for AtomicBool
{
134 /// Creates an `AtomicBool` initialized to `false`.
135 fn default() -> Self {
140 // Send is implicitly implemented for AtomicBool.
141 #[cfg(target_has_atomic = "8")]
142 #[stable(feature = "rust1", since = "1.0.0")]
143 unsafe impl Sync
for AtomicBool {}
145 /// A raw pointer type which can be safely shared between threads.
147 /// This type has the same in-memory representation as a `*mut T`.
148 #[cfg(target_has_atomic = "ptr")]
149 #[stable(feature = "rust1", since = "1.0.0")]
150 pub struct AtomicPtr
<T
> {
151 p
: UnsafeCell
<*mut T
>,
154 #[cfg(target_has_atomic = "ptr")]
155 #[stable(feature = "rust1", since = "1.0.0")]
156 impl<T
> Default
for AtomicPtr
<T
> {
157 /// Creates a null `AtomicPtr<T>`.
158 fn default() -> AtomicPtr
<T
> {
159 AtomicPtr
::new(::ptr
::null_mut())
163 #[cfg(target_has_atomic = "ptr")]
164 #[stable(feature = "rust1", since = "1.0.0")]
165 unsafe impl<T
> Send
for AtomicPtr
<T
> {}
166 #[cfg(target_has_atomic = "ptr")]
167 #[stable(feature = "rust1", since = "1.0.0")]
168 unsafe impl<T
> Sync
for AtomicPtr
<T
> {}
170 /// Atomic memory orderings
172 /// Memory orderings limit the ways that both the compiler and CPU may reorder
173 /// instructions around atomic operations. At its most restrictive,
174 /// "sequentially consistent" atomics allow neither reads nor writes
175 /// to be moved either before or after the atomic operation; on the other end
176 /// "relaxed" atomics allow all reorderings.
178 /// Rust's memory orderings are [the same as
179 /// LLVM's](https://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations).
181 /// For more information see the [nomicon].
183 /// [nomicon]: ../../../nomicon/atomics.html
184 #[stable(feature = "rust1", since = "1.0.0")]
185 #[derive(Copy, Clone, Debug)]
188 /// No ordering constraints, only atomic operations.
190 /// Corresponds to LLVM's [`Monotonic`] ordering.
192 /// [`Monotonic`]: https://llvm.org/docs/Atomics.html#monotonic
193 #[stable(feature = "rust1", since = "1.0.0")]
195 /// When coupled with a store, all previous operations become ordered
196 /// before any load of this value with [`Acquire`] (or stronger) ordering.
197 /// In particular, all previous writes become visible to all threads
198 /// that perform an [`Acquire`] (or stronger) load of this value.
200 /// Notice that using this ordering for an operation that combines loads
201 /// and stores leads to a [`Relaxed`] load operation!
203 /// This ordering is only applicable for operations that can perform a store.
205 /// Corresponds to LLVM's [`Release`] ordering.
207 /// [`Release`]: https://llvm.org/docs/Atomics.html#release
208 /// [`Acquire`]: https://llvm.org/docs/Atomics.html#acquire
209 /// [`Relaxed`]: https://llvm.org/docs/Atomics.html#monotonic
210 #[stable(feature = "rust1", since = "1.0.0")]
212 /// When coupled with a load, if the loaded value was written by a store operation with
213 /// [`Release`] (or stronger) ordering, then all subsequent operations
214 /// become ordered after that store. In particular, all subsequent loads will see data
215 /// written before the store.
217 /// Notice that using this ordering for an operation that combines loads
218 /// and stores leads to a [`Relaxed`] store operation!
220 /// This ordering is only applicable for operations that can perform a load.
222 /// Corresponds to LLVM's [`Acquire`] ordering.
224 /// [`Acquire`]: https://llvm.org/docs/Atomics.html#acquire
225 /// [`Release`]: https://llvm.org/docs/Atomics.html#release
226 /// [`Relaxed`]: https://llvm.org/docs/Atomics.html#monotonic
227 #[stable(feature = "rust1", since = "1.0.0")]
229 /// Has the effects of both [`Acquire`] and [`Release`] together:
230 /// For loads it uses [`Acquire`] ordering. For stores it uses the [`Release`] ordering.
232 /// Notice that in the case of `compare_and_swap`, it is possible that the operation ends up
233 /// not performing any store and hence it has just `Acquire` ordering. However,
234 /// `AcqRel` will never perform [`Relaxed`] accesses.
236 /// This ordering is only applicable for operations that combine both loads and stores.
238 /// Corresponds to LLVM's [`AcquireRelease`] ordering.
240 /// [`AcquireRelease`]: https://llvm.org/docs/Atomics.html#acquirerelease
241 /// [`Acquire`]: https://llvm.org/docs/Atomics.html#acquire
242 /// [`Release`]: https://llvm.org/docs/Atomics.html#release
243 /// [`Relaxed`]: https://llvm.org/docs/Atomics.html#monotonic
244 #[stable(feature = "rust1", since = "1.0.0")]
246 /// Like [`Acquire`]/[`Release`]/[`AcqRel`] (for load, store, and load-with-store
247 /// operations, respectively) with the additional guarantee that all threads see all
248 /// sequentially consistent operations in the same order.
250 /// Corresponds to LLVM's [`SequentiallyConsistent`] ordering.
252 /// [`SequentiallyConsistent`]: https://llvm.org/docs/Atomics.html#sequentiallyconsistent
253 /// [`Acquire`]: https://llvm.org/docs/Atomics.html#acquire
254 /// [`Release`]: https://llvm.org/docs/Atomics.html#release
255 /// [`AcqRel`]: https://llvm.org/docs/Atomics.html#acquirerelease
256 #[stable(feature = "rust1", since = "1.0.0")]
260 /// An [`AtomicBool`] initialized to `false`.
262 /// [`AtomicBool`]: struct.AtomicBool.html
263 #[cfg(target_has_atomic = "8")]
264 #[stable(feature = "rust1", since = "1.0.0")]
265 pub const ATOMIC_BOOL_INIT
: AtomicBool
= AtomicBool
::new(false);
267 #[cfg(target_has_atomic = "8")]
269 /// Creates a new `AtomicBool`.
274 /// use std::sync::atomic::AtomicBool;
276 /// let atomic_true = AtomicBool::new(true);
277 /// let atomic_false = AtomicBool::new(false);
280 #[stable(feature = "rust1", since = "1.0.0")]
281 pub const fn new(v
: bool
) -> AtomicBool
{
282 AtomicBool { v: UnsafeCell::new(v as u8) }
285 /// Returns a mutable reference to the underlying [`bool`].
287 /// This is safe because the mutable reference guarantees that no other threads are
288 /// concurrently accessing the atomic data.
290 /// [`bool`]: ../../../std/primitive.bool.html
295 /// use std::sync::atomic::{AtomicBool, Ordering};
297 /// let mut some_bool = AtomicBool::new(true);
298 /// assert_eq!(*some_bool.get_mut(), true);
299 /// *some_bool.get_mut() = false;
300 /// assert_eq!(some_bool.load(Ordering::SeqCst), false);
303 #[stable(feature = "atomic_access", since = "1.15.0")]
304 pub fn get_mut(&mut self) -> &mut bool
{
305 unsafe { &mut *(self.v.get() as *mut bool) }
308 /// Consumes the atomic and returns the contained value.
310 /// This is safe because passing `self` by value guarantees that no other threads are
311 /// concurrently accessing the atomic data.
316 /// use std::sync::atomic::AtomicBool;
318 /// let some_bool = AtomicBool::new(true);
319 /// assert_eq!(some_bool.into_inner(), true);
322 #[stable(feature = "atomic_access", since = "1.15.0")]
323 pub fn into_inner(self) -> bool
{
324 self.v
.into_inner() != 0
327 /// Loads a value from the bool.
329 /// `load` takes an [`Ordering`] argument which describes the memory ordering
330 /// of this operation. Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
334 /// Panics if `order` is [`Release`] or [`AcqRel`].
336 /// [`Ordering`]: enum.Ordering.html
337 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
338 /// [`Release`]: enum.Ordering.html#variant.Release
339 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
340 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
341 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
346 /// use std::sync::atomic::{AtomicBool, Ordering};
348 /// let some_bool = AtomicBool::new(true);
350 /// assert_eq!(some_bool.load(Ordering::Relaxed), true);
353 #[stable(feature = "rust1", since = "1.0.0")]
354 pub fn load(&self, order
: Ordering
) -> bool
{
355 unsafe { atomic_load(self.v.get(), order) != 0 }
358 /// Stores a value into the bool.
360 /// `store` takes an [`Ordering`] argument which describes the memory ordering
361 /// of this operation. Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
365 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
367 /// [`Ordering`]: enum.Ordering.html
368 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
369 /// [`Release`]: enum.Ordering.html#variant.Release
370 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
371 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
372 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
377 /// use std::sync::atomic::{AtomicBool, Ordering};
379 /// let some_bool = AtomicBool::new(true);
381 /// some_bool.store(false, Ordering::Relaxed);
382 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
385 #[stable(feature = "rust1", since = "1.0.0")]
386 pub fn store(&self, val
: bool
, order
: Ordering
) {
388 atomic_store(self.v
.get(), val
as u8, order
);
392 /// Stores a value into the bool, returning the previous value.
394 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
395 /// of this operation. All ordering modes are possible. Note that using
396 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
397 /// using [`Release`] makes the load part [`Relaxed`].
399 /// [`Ordering`]: enum.Ordering.html
400 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
401 /// [`Release`]: enum.Ordering.html#variant.Release
402 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
407 /// use std::sync::atomic::{AtomicBool, Ordering};
409 /// let some_bool = AtomicBool::new(true);
411 /// assert_eq!(some_bool.swap(false, Ordering::Relaxed), true);
412 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
415 #[stable(feature = "rust1", since = "1.0.0")]
416 #[cfg(target_has_atomic = "cas")]
417 pub fn swap(&self, val
: bool
, order
: Ordering
) -> bool
{
418 unsafe { atomic_swap(self.v.get(), val as u8, order) != 0 }
421 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
423 /// The return value is always the previous value. If it is equal to `current`, then the value
426 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
427 /// ordering of this operation. Notice that even when using [`AcqRel`], the operation
428 /// might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
429 /// Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
430 /// happens, and using [`Release`] makes the load part [`Relaxed`].
432 /// [`Ordering`]: enum.Ordering.html
433 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
434 /// [`Release`]: enum.Ordering.html#variant.Release
435 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
436 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
437 /// [`bool`]: ../../../std/primitive.bool.html
442 /// use std::sync::atomic::{AtomicBool, Ordering};
444 /// let some_bool = AtomicBool::new(true);
446 /// assert_eq!(some_bool.compare_and_swap(true, false, Ordering::Relaxed), true);
447 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
449 /// assert_eq!(some_bool.compare_and_swap(true, true, Ordering::Relaxed), false);
450 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
453 #[stable(feature = "rust1", since = "1.0.0")]
454 #[cfg(target_has_atomic = "cas")]
455 pub fn compare_and_swap(&self, current
: bool
, new
: bool
, order
: Ordering
) -> bool
{
456 match self.compare_exchange(current
, new
, order
, strongest_failure_ordering(order
)) {
462 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
464 /// The return value is a result indicating whether the new value was written and containing
465 /// the previous value. On success this value is guaranteed to be equal to `current`.
467 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
468 /// ordering of this operation. The first describes the required ordering if the
469 /// operation succeeds while the second describes the required ordering when the
470 /// operation fails. Using [`Acquire`] as success ordering makes the store part
471 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
472 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
473 /// and must be equivalent to or weaker than the success ordering.
476 /// [`bool`]: ../../../std/primitive.bool.html
477 /// [`Ordering`]: enum.Ordering.html
478 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
479 /// [`Release`]: enum.Ordering.html#variant.Release
480 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
481 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
486 /// use std::sync::atomic::{AtomicBool, Ordering};
488 /// let some_bool = AtomicBool::new(true);
490 /// assert_eq!(some_bool.compare_exchange(true,
492 /// Ordering::Acquire,
493 /// Ordering::Relaxed),
495 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
497 /// assert_eq!(some_bool.compare_exchange(true, true,
498 /// Ordering::SeqCst,
499 /// Ordering::Acquire),
501 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
504 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
505 #[cfg(target_has_atomic = "cas")]
506 pub fn compare_exchange(&self,
511 -> Result
<bool
, bool
> {
513 atomic_compare_exchange(self.v
.get(), current
as u8, new
as u8, success
, failure
)
516 Err(x
) => Err(x
!= 0),
520 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
522 /// Unlike [`compare_exchange`], this function is allowed to spuriously fail even when the
523 /// comparison succeeds, which can result in more efficient code on some platforms. The
524 /// return value is a result indicating whether the new value was written and containing the
527 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
528 /// ordering of this operation. The first describes the required ordering if the
529 /// operation succeeds while the second describes the required ordering when the
530 /// operation fails. Using [`Acquire`] as success ordering makes the store part
531 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
532 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
533 /// and must be equivalent to or weaker than the success ordering.
535 /// [`bool`]: ../../../std/primitive.bool.html
536 /// [`compare_exchange`]: #method.compare_exchange
537 /// [`Ordering`]: enum.Ordering.html
538 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
539 /// [`Release`]: enum.Ordering.html#variant.Release
540 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
541 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
546 /// use std::sync::atomic::{AtomicBool, Ordering};
548 /// let val = AtomicBool::new(false);
551 /// let mut old = val.load(Ordering::Relaxed);
553 /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
555 /// Err(x) => old = x,
560 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
561 pub fn compare_exchange_weak(&self,
566 -> Result
<bool
, bool
> {
568 atomic_compare_exchange_weak(self.v
.get(), current
as u8, new
as u8, success
, failure
)
571 Err(x
) => Err(x
!= 0),
575 /// Logical "and" with a boolean value.
577 /// Performs a logical "and" operation on the current value and the argument `val`, and sets
578 /// the new value to the result.
580 /// Returns the previous value.
582 /// `fetch_and` takes an [`Ordering`] argument which describes the memory ordering
583 /// of this operation. All ordering modes are possible. Note that using
584 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
585 /// using [`Release`] makes the load part [`Relaxed`].
587 /// [`Ordering`]: enum.Ordering.html
588 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
589 /// [`Release`]: enum.Ordering.html#variant.Release
590 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
595 /// use std::sync::atomic::{AtomicBool, Ordering};
597 /// let foo = AtomicBool::new(true);
598 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), true);
599 /// assert_eq!(foo.load(Ordering::SeqCst), false);
601 /// let foo = AtomicBool::new(true);
602 /// assert_eq!(foo.fetch_and(true, Ordering::SeqCst), true);
603 /// assert_eq!(foo.load(Ordering::SeqCst), true);
605 /// let foo = AtomicBool::new(false);
606 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), false);
607 /// assert_eq!(foo.load(Ordering::SeqCst), false);
610 #[stable(feature = "rust1", since = "1.0.0")]
611 #[cfg(target_has_atomic = "cas")]
612 pub fn fetch_and(&self, val
: bool
, order
: Ordering
) -> bool
{
613 unsafe { atomic_and(self.v.get(), val as u8, order) != 0 }
616 /// Logical "nand" with a boolean value.
618 /// Performs a logical "nand" operation on the current value and the argument `val`, and sets
619 /// the new value to the result.
621 /// Returns the previous value.
623 /// `fetch_nand` takes an [`Ordering`] argument which describes the memory ordering
624 /// of this operation. All ordering modes are possible. Note that using
625 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
626 /// using [`Release`] makes the load part [`Relaxed`].
628 /// [`Ordering`]: enum.Ordering.html
629 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
630 /// [`Release`]: enum.Ordering.html#variant.Release
631 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
636 /// use std::sync::atomic::{AtomicBool, Ordering};
638 /// let foo = AtomicBool::new(true);
639 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), true);
640 /// assert_eq!(foo.load(Ordering::SeqCst), true);
642 /// let foo = AtomicBool::new(true);
643 /// assert_eq!(foo.fetch_nand(true, Ordering::SeqCst), true);
644 /// assert_eq!(foo.load(Ordering::SeqCst) as usize, 0);
645 /// assert_eq!(foo.load(Ordering::SeqCst), false);
647 /// let foo = AtomicBool::new(false);
648 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), false);
649 /// assert_eq!(foo.load(Ordering::SeqCst), true);
652 #[stable(feature = "rust1", since = "1.0.0")]
653 #[cfg(target_has_atomic = "cas")]
654 pub fn fetch_nand(&self, val
: bool
, order
: Ordering
) -> bool
{
655 // We can't use atomic_nand here because it can result in a bool with
656 // an invalid value. This happens because the atomic operation is done
657 // with an 8-bit integer internally, which would set the upper 7 bits.
658 // So we just use fetch_xor or swap instead.
661 // We must invert the bool.
662 self.fetch_xor(true, order
)
664 // !(x & false) == true
665 // We must set the bool to true.
666 self.swap(true, order
)
670 /// Logical "or" with a boolean value.
672 /// Performs a logical "or" operation on the current value and the argument `val`, and sets the
673 /// new value to the result.
675 /// Returns the previous value.
677 /// `fetch_or` takes an [`Ordering`] argument which describes the memory ordering
678 /// of this operation. All ordering modes are possible. Note that using
679 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
680 /// using [`Release`] makes the load part [`Relaxed`].
682 /// [`Ordering`]: enum.Ordering.html
683 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
684 /// [`Release`]: enum.Ordering.html#variant.Release
685 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
690 /// use std::sync::atomic::{AtomicBool, Ordering};
692 /// let foo = AtomicBool::new(true);
693 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), true);
694 /// assert_eq!(foo.load(Ordering::SeqCst), true);
696 /// let foo = AtomicBool::new(true);
697 /// assert_eq!(foo.fetch_or(true, Ordering::SeqCst), true);
698 /// assert_eq!(foo.load(Ordering::SeqCst), true);
700 /// let foo = AtomicBool::new(false);
701 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), false);
702 /// assert_eq!(foo.load(Ordering::SeqCst), false);
705 #[stable(feature = "rust1", since = "1.0.0")]
706 #[cfg(target_has_atomic = "cas")]
707 pub fn fetch_or(&self, val
: bool
, order
: Ordering
) -> bool
{
708 unsafe { atomic_or(self.v.get(), val as u8, order) != 0 }
711 /// Logical "xor" with a boolean value.
713 /// Performs a logical "xor" operation on the current value and the argument `val`, and sets
714 /// the new value to the result.
716 /// Returns the previous value.
718 /// `fetch_xor` takes an [`Ordering`] argument which describes the memory ordering
719 /// of this operation. All ordering modes are possible. Note that using
720 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
721 /// using [`Release`] makes the load part [`Relaxed`].
723 /// [`Ordering`]: enum.Ordering.html
724 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
725 /// [`Release`]: enum.Ordering.html#variant.Release
726 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
731 /// use std::sync::atomic::{AtomicBool, Ordering};
733 /// let foo = AtomicBool::new(true);
734 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), true);
735 /// assert_eq!(foo.load(Ordering::SeqCst), true);
737 /// let foo = AtomicBool::new(true);
738 /// assert_eq!(foo.fetch_xor(true, Ordering::SeqCst), true);
739 /// assert_eq!(foo.load(Ordering::SeqCst), false);
741 /// let foo = AtomicBool::new(false);
742 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), false);
743 /// assert_eq!(foo.load(Ordering::SeqCst), false);
746 #[stable(feature = "rust1", since = "1.0.0")]
747 #[cfg(target_has_atomic = "cas")]
748 pub fn fetch_xor(&self, val
: bool
, order
: Ordering
) -> bool
{
749 unsafe { atomic_xor(self.v.get(), val as u8, order) != 0 }
753 #[cfg(target_has_atomic = "ptr")]
754 impl<T
> AtomicPtr
<T
> {
755 /// Creates a new `AtomicPtr`.
760 /// use std::sync::atomic::AtomicPtr;
762 /// let ptr = &mut 5;
763 /// let atomic_ptr = AtomicPtr::new(ptr);
766 #[stable(feature = "rust1", since = "1.0.0")]
767 pub const fn new(p
: *mut T
) -> AtomicPtr
<T
> {
768 AtomicPtr { p: UnsafeCell::new(p) }
771 /// Returns a mutable reference to the underlying pointer.
773 /// This is safe because the mutable reference guarantees that no other threads are
774 /// concurrently accessing the atomic data.
779 /// use std::sync::atomic::{AtomicPtr, Ordering};
781 /// let mut atomic_ptr = AtomicPtr::new(&mut 10);
782 /// *atomic_ptr.get_mut() = &mut 5;
783 /// assert_eq!(unsafe { *atomic_ptr.load(Ordering::SeqCst) }, 5);
786 #[stable(feature = "atomic_access", since = "1.15.0")]
787 pub fn get_mut(&mut self) -> &mut *mut T
{
788 unsafe { &mut *self.p.get() }
791 /// Consumes the atomic and returns the contained value.
793 /// This is safe because passing `self` by value guarantees that no other threads are
794 /// concurrently accessing the atomic data.
799 /// use std::sync::atomic::AtomicPtr;
801 /// let atomic_ptr = AtomicPtr::new(&mut 5);
802 /// assert_eq!(unsafe { *atomic_ptr.into_inner() }, 5);
805 #[stable(feature = "atomic_access", since = "1.15.0")]
806 pub fn into_inner(self) -> *mut T
{
810 /// Loads a value from the pointer.
812 /// `load` takes an [`Ordering`] argument which describes the memory ordering
813 /// of this operation. Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
817 /// Panics if `order` is [`Release`] or [`AcqRel`].
819 /// [`Ordering`]: enum.Ordering.html
820 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
821 /// [`Release`]: enum.Ordering.html#variant.Release
822 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
823 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
824 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
829 /// use std::sync::atomic::{AtomicPtr, Ordering};
831 /// let ptr = &mut 5;
832 /// let some_ptr = AtomicPtr::new(ptr);
834 /// let value = some_ptr.load(Ordering::Relaxed);
837 #[stable(feature = "rust1", since = "1.0.0")]
838 pub fn load(&self, order
: Ordering
) -> *mut T
{
839 unsafe { atomic_load(self.p.get() as *mut usize, order) as *mut T }
842 /// Stores a value into the pointer.
844 /// `store` takes an [`Ordering`] argument which describes the memory ordering
845 /// of this operation. Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
849 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
851 /// [`Ordering`]: enum.Ordering.html
852 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
853 /// [`Release`]: enum.Ordering.html#variant.Release
854 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
855 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
856 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
861 /// use std::sync::atomic::{AtomicPtr, Ordering};
863 /// let ptr = &mut 5;
864 /// let some_ptr = AtomicPtr::new(ptr);
866 /// let other_ptr = &mut 10;
868 /// some_ptr.store(other_ptr, Ordering::Relaxed);
871 #[stable(feature = "rust1", since = "1.0.0")]
872 pub fn store(&self, ptr
: *mut T
, order
: Ordering
) {
874 atomic_store(self.p
.get() as *mut usize, ptr
as usize, order
);
878 /// Stores a value into the pointer, returning the previous value.
880 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
881 /// of this operation. All ordering modes are possible. Note that using
882 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
883 /// using [`Release`] makes the load part [`Relaxed`].
885 /// [`Ordering`]: enum.Ordering.html
886 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
887 /// [`Release`]: enum.Ordering.html#variant.Release
888 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
893 /// use std::sync::atomic::{AtomicPtr, Ordering};
895 /// let ptr = &mut 5;
896 /// let some_ptr = AtomicPtr::new(ptr);
898 /// let other_ptr = &mut 10;
900 /// let value = some_ptr.swap(other_ptr, Ordering::Relaxed);
903 #[stable(feature = "rust1", since = "1.0.0")]
904 #[cfg(target_has_atomic = "cas")]
905 pub fn swap(&self, ptr
: *mut T
, order
: Ordering
) -> *mut T
{
906 unsafe { atomic_swap(self.p.get() as *mut usize, ptr as usize, order) as *mut T }
909 /// Stores a value into the pointer if the current value is the same as the `current` value.
911 /// The return value is always the previous value. If it is equal to `current`, then the value
914 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
915 /// ordering of this operation. Notice that even when using [`AcqRel`], the operation
916 /// might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
917 /// Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
918 /// happens, and using [`Release`] makes the load part [`Relaxed`].
920 /// [`Ordering`]: enum.Ordering.html
921 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
922 /// [`Release`]: enum.Ordering.html#variant.Release
923 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
924 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
929 /// use std::sync::atomic::{AtomicPtr, Ordering};
931 /// let ptr = &mut 5;
932 /// let some_ptr = AtomicPtr::new(ptr);
934 /// let other_ptr = &mut 10;
935 /// let another_ptr = &mut 10;
937 /// let value = some_ptr.compare_and_swap(other_ptr, another_ptr, Ordering::Relaxed);
940 #[stable(feature = "rust1", since = "1.0.0")]
941 #[cfg(target_has_atomic = "cas")]
942 pub fn compare_and_swap(&self, current
: *mut T
, new
: *mut T
, order
: Ordering
) -> *mut T
{
943 match self.compare_exchange(current
, new
, order
, strongest_failure_ordering(order
)) {
949 /// Stores a value into the pointer if the current value is the same as the `current` value.
951 /// The return value is a result indicating whether the new value was written and containing
952 /// the previous value. On success this value is guaranteed to be equal to `current`.
954 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
955 /// ordering of this operation. The first describes the required ordering if the
956 /// operation succeeds while the second describes the required ordering when the
957 /// operation fails. Using [`Acquire`] as success ordering makes the store part
958 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
959 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
960 /// and must be equivalent to or weaker than the success ordering.
962 /// [`Ordering`]: enum.Ordering.html
963 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
964 /// [`Release`]: enum.Ordering.html#variant.Release
965 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
966 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
971 /// use std::sync::atomic::{AtomicPtr, Ordering};
973 /// let ptr = &mut 5;
974 /// let some_ptr = AtomicPtr::new(ptr);
976 /// let other_ptr = &mut 10;
977 /// let another_ptr = &mut 10;
979 /// let value = some_ptr.compare_exchange(other_ptr, another_ptr,
980 /// Ordering::SeqCst, Ordering::Relaxed);
983 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
984 #[cfg(target_has_atomic = "cas")]
985 pub fn compare_exchange(&self,
990 -> Result
<*mut T
, *mut T
> {
992 let res
= atomic_compare_exchange(self.p
.get() as *mut usize,
998 Ok(x
) => Ok(x
as *mut T
),
999 Err(x
) => Err(x
as *mut T
),
1004 /// Stores a value into the pointer if the current value is the same as the `current` value.
1006 /// Unlike [`compare_exchange`], this function is allowed to spuriously fail even when the
1007 /// comparison succeeds, which can result in more efficient code on some platforms. The
1008 /// return value is a result indicating whether the new value was written and containing the
1011 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
1012 /// ordering of this operation. The first describes the required ordering if the
1013 /// operation succeeds while the second describes the required ordering when the
1014 /// operation fails. Using [`Acquire`] as success ordering makes the store part
1015 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
1016 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1017 /// and must be equivalent to or weaker than the success ordering.
1019 /// [`compare_exchange`]: #method.compare_exchange
1020 /// [`Ordering`]: enum.Ordering.html
1021 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1022 /// [`Release`]: enum.Ordering.html#variant.Release
1023 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
1024 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1029 /// use std::sync::atomic::{AtomicPtr, Ordering};
1031 /// let some_ptr = AtomicPtr::new(&mut 5);
1033 /// let new = &mut 10;
1034 /// let mut old = some_ptr.load(Ordering::Relaxed);
1036 /// match some_ptr.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
1038 /// Err(x) => old = x,
1043 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
1044 pub fn compare_exchange_weak(&self,
1049 -> Result
<*mut T
, *mut T
> {
1051 let res
= atomic_compare_exchange_weak(self.p
.get() as *mut usize,
1057 Ok(x
) => Ok(x
as *mut T
),
1058 Err(x
) => Err(x
as *mut T
),
1064 #[cfg(target_has_atomic = "8")]
1065 #[stable(feature = "atomic_bool_from", since = "1.24.0")]
1066 impl From
<bool
> for AtomicBool
{
1068 fn from(b
: bool
) -> Self { Self::new(b) }
1071 #[cfg(target_has_atomic = "ptr")]
1072 #[stable(feature = "atomic_from", since = "1.23.0")]
1073 impl<T
> From
<*mut T
> for AtomicPtr
<T
> {
1075 fn from(p
: *mut T
) -> Self { Self::new(p) }
1078 #[cfg(target_has_atomic = "ptr")]
1079 macro_rules
! atomic_int
{
1083 $stable_access
:meta
,
1086 $s_int_type
:expr
, $int_ref
:expr
,
1087 $extra_feature
:expr
,
1088 $min_fn
:ident
, $max_fn
:ident
,
1089 $int_type
:ident $atomic_type
:ident $atomic_init
:ident
) => {
1090 /// An integer type which can be safely shared between threads.
1092 /// This type has the same in-memory representation as the underlying
1093 /// integer type, [`
1094 #[doc = $s_int_type]
1097 /// ). For more about the differences between atomic types and
1098 /// non-atomic types, please see the [module-level documentation].
1100 /// [module-level documentation]: index.html
1102 pub struct $atomic_type
{
1103 v
: UnsafeCell
<$int_type
>,
1106 /// An atomic integer initialized to `0`.
1108 pub const $atomic_init
: $atomic_type
= $atomic_type
::new(0);
1111 impl Default
for $atomic_type
{
1112 fn default() -> Self {
1113 Self::new(Default
::default())
1118 impl From
<$int_type
> for $atomic_type
{
1120 fn from(v
: $int_type
) -> Self { Self::new(v) }
1124 impl fmt
::Debug
for $atomic_type
{
1125 fn fmt(&self, f
: &mut fmt
::Formatter
) -> fmt
::Result
{
1126 fmt
::Debug
::fmt(&self.load(Ordering
::SeqCst
), f
)
1130 // Send is implicitly implemented.
1132 unsafe impl Sync
for $atomic_type {}
1136 concat
!("Creates a new atomic integer.
1141 ", $extra_feature
, "use std::sync::atomic::", stringify
!($atomic_type
), ";
1143 let atomic_forty_two = ", stringify
!($atomic_type
), "::new(42);
1147 pub const fn new(v
: $int_type
) -> Self {
1148 $atomic_type {v: UnsafeCell::new(v)}
1153 concat
!("Returns a mutable reference to the underlying integer.
1155 This is safe because the mutable reference guarantees that no other threads are
1156 concurrently accessing the atomic data.
1161 ", $extra_feature
, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1163 let mut some_var = ", stringify
!($atomic_type
), "::new(10);
1164 assert_eq!(*some_var.get_mut(), 10);
1165 *some_var.get_mut() = 5;
1166 assert_eq!(some_var.load(Ordering::SeqCst), 5);
1170 pub fn get_mut(&mut self) -> &mut $int_type
{
1171 unsafe { &mut *self.v.get() }
1176 concat
!("Consumes the atomic and returns the contained value.
1178 This is safe because passing `self` by value guarantees that no other threads are
1179 concurrently accessing the atomic data.
1184 ", $extra_feature
, "use std::sync::atomic::", stringify
!($atomic_type
), ";
1186 let some_var = ", stringify
!($atomic_type
), "::new(5);
1187 assert_eq!(some_var.into_inner(), 5);
1191 pub fn into_inner(self) -> $int_type
{
1197 concat
!("Loads a value from the atomic integer.
1199 `load` takes an [`Ordering`] argument which describes the memory ordering of this operation.
1200 Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
1204 Panics if `order` is [`Release`] or [`AcqRel`].
1206 [`Ordering`]: enum.Ordering.html
1207 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1208 [`Release`]: enum.Ordering.html#variant.Release
1209 [`Acquire`]: enum.Ordering.html#variant.Acquire
1210 [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1211 [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1216 ", $extra_feature
, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1218 let some_var = ", stringify
!($atomic_type
), "::new(5);
1220 assert_eq!(some_var.load(Ordering::Relaxed), 5);
1224 pub fn load(&self, order
: Ordering
) -> $int_type
{
1225 unsafe { atomic_load(self.v.get(), order) }
1230 concat
!("Stores a value into the atomic integer.
1232 `store` takes an [`Ordering`] argument which describes the memory ordering of this operation.
1233 Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
1237 Panics if `order` is [`Acquire`] or [`AcqRel`].
1239 [`Ordering`]: enum.Ordering.html
1240 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1241 [`Release`]: enum.Ordering.html#variant.Release
1242 [`Acquire`]: enum.Ordering.html#variant.Acquire
1243 [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1244 [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1249 ", $extra_feature
, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1251 let some_var = ", stringify
!($atomic_type
), "::new(5);
1253 some_var.store(10, Ordering::Relaxed);
1254 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1258 pub fn store(&self, val
: $int_type
, order
: Ordering
) {
1259 unsafe { atomic_store(self.v.get(), val, order); }
1264 concat
!("Stores a value into the atomic integer, returning the previous value.
1266 `swap` takes an [`Ordering`] argument which describes the memory ordering
1267 of this operation. All ordering modes are possible. Note that using
1268 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1269 using [`Release`] makes the load part [`Relaxed`].
1271 [`Ordering`]: enum.Ordering.html
1272 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1273 [`Release`]: enum.Ordering.html#variant.Release
1274 [`Acquire`]: enum.Ordering.html#variant.Acquire
1279 ", $extra_feature
, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1281 let some_var = ", stringify
!($atomic_type
), "::new(5);
1283 assert_eq!(some_var.swap(10, Ordering::Relaxed), 5);
1287 #[cfg(target_has_atomic = "cas")]
1288 pub fn swap(&self, val
: $int_type
, order
: Ordering
) -> $int_type
{
1289 unsafe { atomic_swap(self.v.get(), val, order) }
1294 concat
!("Stores a value into the atomic integer if the current value is the same as
1295 the `current` value.
1297 The return value is always the previous value. If it is equal to `current`, then the
1300 `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
1301 ordering of this operation. Notice that even when using [`AcqRel`], the operation
1302 might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
1303 Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
1304 happens, and using [`Release`] makes the load part [`Relaxed`].
1306 [`Ordering`]: enum.Ordering.html
1307 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1308 [`Release`]: enum.Ordering.html#variant.Release
1309 [`Acquire`]: enum.Ordering.html#variant.Acquire
1310 [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1315 ", $extra_feature
, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1317 let some_var = ", stringify
!($atomic_type
), "::new(5);
1319 assert_eq!(some_var.compare_and_swap(5, 10, Ordering::Relaxed), 5);
1320 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1322 assert_eq!(some_var.compare_and_swap(6, 12, Ordering::Relaxed), 10);
1323 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1327 #[cfg(target_has_atomic = "cas")]
1328 pub fn compare_and_swap(&self,
1331 order
: Ordering
) -> $int_type
{
1332 match self.compare_exchange(current
,
1335 strongest_failure_ordering(order
)) {
1343 concat
!("Stores a value into the atomic integer if the current value is the same as
1344 the `current` value.
1346 The return value is a result indicating whether the new value was written and
1347 containing the previous value. On success this value is guaranteed to be equal to
1350 `compare_exchange` takes two [`Ordering`] arguments to describe the memory
1351 ordering of this operation. The first describes the required ordering if the
1352 operation succeeds while the second describes the required ordering when the
1353 operation fails. Using [`Acquire`] as success ordering makes the store part
1354 of this operation [`Relaxed`], and using [`Release`] makes the successful load
1355 [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1356 and must be equivalent to or weaker than the success ordering.
1358 [`Ordering`]: enum.Ordering.html
1359 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1360 [`Release`]: enum.Ordering.html#variant.Release
1361 [`Acquire`]: enum.Ordering.html#variant.Acquire
1362 [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1367 ", $extra_feature
, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1369 let some_var = ", stringify
!($atomic_type
), "::new(5);
1371 assert_eq!(some_var.compare_exchange(5, 10,
1375 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1377 assert_eq!(some_var.compare_exchange(6, 12,
1381 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1385 #[cfg(target_has_atomic = "cas")]
1386 pub fn compare_exchange(&self,
1390 failure
: Ordering
) -> Result
<$int_type
, $int_type
> {
1391 unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) }
1396 concat
!("Stores a value into the atomic integer if the current value is the same as
1397 the `current` value.
1399 Unlike [`compare_exchange`], this function is allowed to spuriously fail even
1400 when the comparison succeeds, which can result in more efficient code on some
1401 platforms. The return value is a result indicating whether the new value was
1402 written and containing the previous value.
1404 `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
1405 ordering of this operation. The first describes the required ordering if the
1406 operation succeeds while the second describes the required ordering when the
1407 operation fails. Using [`Acquire`] as success ordering makes the store part
1408 of this operation [`Relaxed`], and using [`Release`] makes the successful load
1409 [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1410 and must be equivalent to or weaker than the success ordering.
1412 [`compare_exchange`]: #method.compare_exchange
1413 [`Ordering`]: enum.Ordering.html
1414 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1415 [`Release`]: enum.Ordering.html#variant.Release
1416 [`Acquire`]: enum.Ordering.html#variant.Acquire
1417 [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1422 ", $extra_feature
, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1424 let val = ", stringify
!($atomic_type
), "::new(4);
1426 let mut old = val.load(Ordering::Relaxed);
1429 match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
1437 pub fn compare_exchange_weak(&self,
1441 failure
: Ordering
) -> Result
<$int_type
, $int_type
> {
1443 atomic_compare_exchange_weak(self.v
.get(), current
, new
, success
, failure
)
1449 concat
!("Adds to the current value, returning the previous value.
1451 This operation wraps around on overflow.
1453 `fetch_add` takes an [`Ordering`] argument which describes the memory ordering
1454 of this operation. All ordering modes are possible. Note that using
1455 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1456 using [`Release`] makes the load part [`Relaxed`].
1458 [`Ordering`]: enum.Ordering.html
1459 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1460 [`Release`]: enum.Ordering.html#variant.Release
1461 [`Acquire`]: enum.Ordering.html#variant.Acquire
1466 ", $extra_feature
, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1468 let foo = ", stringify
!($atomic_type
), "::new(0);
1469 assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0);
1470 assert_eq!(foo.load(Ordering::SeqCst), 10);
1474 pub fn fetch_add(&self, val
: $int_type
, order
: Ordering
) -> $int_type
{
1475 unsafe { atomic_add(self.v.get(), val, order) }
1480 concat
!("Subtracts from the current value, returning the previous value.
1482 This operation wraps around on overflow.
1484 `fetch_sub` takes an [`Ordering`] argument which describes the memory ordering
1485 of this operation. All ordering modes are possible. Note that using
1486 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1487 using [`Release`] makes the load part [`Relaxed`].
1489 [`Ordering`]: enum.Ordering.html
1490 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1491 [`Release`]: enum.Ordering.html#variant.Release
1492 [`Acquire`]: enum.Ordering.html#variant.Acquire
1497 ", $extra_feature
, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1499 let foo = ", stringify
!($atomic_type
), "::new(20);
1500 assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 20);
1501 assert_eq!(foo.load(Ordering::SeqCst), 10);
1505 pub fn fetch_sub(&self, val
: $int_type
, order
: Ordering
) -> $int_type
{
1506 unsafe { atomic_sub(self.v.get(), val, order) }
1511 concat
!("Bitwise \"and\" with the current value.
1513 Performs a bitwise \"and\" operation on the current value and the argument `val`, and
1514 sets the new value to the result.
1516 Returns the previous value.
1518 `fetch_and` takes an [`Ordering`] argument which describes the memory ordering
1519 of this operation. All ordering modes are possible. Note that using
1520 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1521 using [`Release`] makes the load part [`Relaxed`].
1523 [`Ordering`]: enum.Ordering.html
1524 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1525 [`Release`]: enum.Ordering.html#variant.Release
1526 [`Acquire`]: enum.Ordering.html#variant.Acquire
1531 ", $extra_feature
, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1533 let foo = ", stringify
!($atomic_type
), "::new(0b101101);
1534 assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101);
1535 assert_eq!(foo.load(Ordering::SeqCst), 0b100001);
1539 pub fn fetch_and(&self, val
: $int_type
, order
: Ordering
) -> $int_type
{
1540 unsafe { atomic_and(self.v.get(), val, order) }
1545 concat
!("Bitwise \"nand\" with the current value.
1547 Performs a bitwise \"nand\" operation on the current value and the argument `val`, and
1548 sets the new value to the result.
1550 Returns the previous value.
1552 `fetch_nand` takes an [`Ordering`] argument which describes the memory ordering
1553 of this operation. All ordering modes are possible. Note that using
1554 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1555 using [`Release`] makes the load part [`Relaxed`].
1557 [`Ordering`]: enum.Ordering.html
1558 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1559 [`Release`]: enum.Ordering.html#variant.Release
1560 [`Acquire`]: enum.Ordering.html#variant.Acquire
1565 ", $extra_feature
, "
1566 use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1568 let foo = ", stringify
!($atomic_type
), "::new(0x13);
1569 assert_eq!(foo.fetch_nand(0x31, Ordering::SeqCst), 0x13);
1570 assert_eq!(foo.load(Ordering::SeqCst), !(0x13 & 0x31));
1574 pub fn fetch_nand(&self, val
: $int_type
, order
: Ordering
) -> $int_type
{
1575 unsafe { atomic_nand(self.v.get(), val, order) }
1580 concat
!("Bitwise \"or\" with the current value.
1582 Performs a bitwise \"or\" operation on the current value and the argument `val`, and
1583 sets the new value to the result.
1585 Returns the previous value.
1587 `fetch_or` takes an [`Ordering`] argument which describes the memory ordering
1588 of this operation. All ordering modes are possible. Note that using
1589 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1590 using [`Release`] makes the load part [`Relaxed`].
1592 [`Ordering`]: enum.Ordering.html
1593 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1594 [`Release`]: enum.Ordering.html#variant.Release
1595 [`Acquire`]: enum.Ordering.html#variant.Acquire
1600 ", $extra_feature
, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1602 let foo = ", stringify
!($atomic_type
), "::new(0b101101);
1603 assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101);
1604 assert_eq!(foo.load(Ordering::SeqCst), 0b111111);
1608 pub fn fetch_or(&self, val
: $int_type
, order
: Ordering
) -> $int_type
{
1609 unsafe { atomic_or(self.v.get(), val, order) }
1614 concat
!("Bitwise \"xor\" with the current value.
1616 Performs a bitwise \"xor\" operation on the current value and the argument `val`, and
1617 sets the new value to the result.
1619 Returns the previous value.
1621 `fetch_xor` takes an [`Ordering`] argument which describes the memory ordering
1622 of this operation. All ordering modes are possible. Note that using
1623 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1624 using [`Release`] makes the load part [`Relaxed`].
1626 [`Ordering`]: enum.Ordering.html
1627 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1628 [`Release`]: enum.Ordering.html#variant.Release
1629 [`Acquire`]: enum.Ordering.html#variant.Acquire
1634 ", $extra_feature
, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1636 let foo = ", stringify
!($atomic_type
), "::new(0b101101);
1637 assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101);
1638 assert_eq!(foo.load(Ordering::SeqCst), 0b011110);
1642 pub fn fetch_xor(&self, val
: $int_type
, order
: Ordering
) -> $int_type
{
1643 unsafe { atomic_xor(self.v.get(), val, order) }
1648 concat
!("Fetches the value, and applies a function to it that returns an optional
1649 new value. Returns a `Result` of `Ok(previous_value)` if the function returned `Some(_)`, else
1650 `Err(previous_value)`.
1652 Note: This may call the function multiple times if the value has been changed from other threads in
1653 the meantime, as long as the function returns `Some(_)`, but the function will have been applied
1654 but once to the stored value.
1656 `fetch_update` takes two [`Ordering`] arguments to describe the memory
1657 ordering of this operation. The first describes the required ordering for loads
1658 and failed updates while the second describes the required ordering when the
1659 operation finally succeeds. Beware that this is different from the two
1660 modes in [`compare_exchange`]!
1662 Using [`Acquire`] as success ordering makes the store part
1663 of this operation [`Relaxed`], and using [`Release`] makes the final successful load
1664 [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1665 and must be equivalent to or weaker than the success ordering.
1667 [`bool`]: ../../../std/primitive.bool.html
1668 [`compare_exchange`]: #method.compare_exchange
1669 [`Ordering`]: enum.Ordering.html
1670 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1671 [`Release`]: enum.Ordering.html#variant.Release
1672 [`Acquire`]: enum.Ordering.html#variant.Acquire
1673 [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1678 #![feature(no_more_cas)]
1679 ", $extra_feature
, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1681 let x = ", stringify
!($atomic_type
), "::new(7);
1682 assert_eq!(x.fetch_update(|_| None, Ordering::SeqCst, Ordering::SeqCst), Err(7));
1683 assert_eq!(x.fetch_update(|x| Some(x + 1), Ordering::SeqCst, Ordering::SeqCst), Ok(7));
1684 assert_eq!(x.fetch_update(|x| Some(x + 1), Ordering::SeqCst, Ordering::SeqCst), Ok(8));
1685 assert_eq!(x.load(Ordering::SeqCst), 9);
1688 #[unstable(feature = "no_more_cas",
1689 reason
= "no more CAS loops in user code",
1691 pub fn fetch_update
<F
>(&self,
1693 fetch_order
: Ordering
,
1694 set_order
: Ordering
) -> Result
<$int_type
, $int_type
>
1695 where F
: FnMut($int_type
) -> Option
<$int_type
> {
1696 let mut prev
= self.load(fetch_order
);
1697 while let Some(next
) = f(prev
) {
1698 match self.compare_exchange_weak(prev
, next
, set_order
, fetch_order
) {
1699 x @
Ok(_
) => return x
,
1700 Err(next_prev
) => prev
= next_prev
1708 concat
!("Maximum with the current value.
1710 Finds the maximum of the current value and the argument `val`, and
1711 sets the new value to the result.
1713 Returns the previous value.
1715 `fetch_max` takes an [`Ordering`] argument which describes the memory ordering
1716 of this operation. All ordering modes are possible. Note that using
1717 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1718 using [`Release`] makes the load part [`Relaxed`].
1720 [`Ordering`]: enum.Ordering.html
1721 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1722 [`Release`]: enum.Ordering.html#variant.Release
1723 [`Acquire`]: enum.Ordering.html#variant.Acquire
1728 #![feature(atomic_min_max)]
1729 ", $extra_feature
, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1731 let foo = ", stringify
!($atomic_type
), "::new(23);
1732 assert_eq!(foo.fetch_max(42, Ordering::SeqCst), 23);
1733 assert_eq!(foo.load(Ordering::SeqCst), 42);
1736 If you want to obtain the maximum value in one step, you can use the following:
1739 #![feature(atomic_min_max)]
1740 ", $extra_feature
, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1742 let foo = ", stringify
!($atomic_type
), "::new(23);
1744 let max_foo = foo.fetch_max(bar, Ordering::SeqCst).max(bar);
1745 assert!(max_foo == 42);
1748 #[unstable(feature = "atomic_min_max",
1749 reason
= "easier and faster min/max than writing manual CAS loop",
1751 pub fn fetch_max(&self, val
: $int_type
, order
: Ordering
) -> $int_type
{
1752 unsafe { $max_fn(self.v.get(), val, order) }
1757 concat
!("Minimum with the current value.
1759 Finds the minimum of the current value and the argument `val`, and
1760 sets the new value to the result.
1762 Returns the previous value.
1764 `fetch_min` takes an [`Ordering`] argument which describes the memory ordering
1765 of this operation. All ordering modes are possible. Note that using
1766 [`Acquire`] makes the store part of this operation [`Relaxed`], and
1767 using [`Release`] makes the load part [`Relaxed`].
1769 [`Ordering`]: enum.Ordering.html
1770 [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1771 [`Release`]: enum.Ordering.html#variant.Release
1772 [`Acquire`]: enum.Ordering.html#variant.Acquire
1777 #![feature(atomic_min_max)]
1778 ", $extra_feature
, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1780 let foo = ", stringify
!($atomic_type
), "::new(23);
1781 assert_eq!(foo.fetch_min(42, Ordering::Relaxed), 23);
1782 assert_eq!(foo.load(Ordering::Relaxed), 23);
1783 assert_eq!(foo.fetch_min(22, Ordering::Relaxed), 23);
1784 assert_eq!(foo.load(Ordering::Relaxed), 22);
1787 If you want to obtain the minimum value in one step, you can use the following:
1790 #![feature(atomic_min_max)]
1791 ", $extra_feature
, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1793 let foo = ", stringify
!($atomic_type
), "::new(23);
1795 let min_foo = foo.fetch_min(bar, Ordering::SeqCst).min(bar);
1796 assert_eq!(min_foo, 12);
1799 #[unstable(feature = "atomic_min_max",
1800 reason
= "easier and faster min/max than writing manual CAS loop",
1802 pub fn fetch_min(&self, val
: $int_type
, order
: Ordering
) -> $int_type
{
1803 unsafe { $min_fn(self.v.get(), val, order) }
1811 #[cfg(target_has_atomic = "8")]
1813 unstable(feature
= "integer_atomics", issue
= "32976"),
1814 unstable(feature
= "integer_atomics", issue
= "32976"),
1815 unstable(feature
= "integer_atomics", issue
= "32976"),
1816 unstable(feature
= "integer_atomics", issue
= "32976"),
1817 unstable(feature
= "integer_atomics", issue
= "32976"),
1818 unstable(feature
= "integer_atomics", issue
= "32976"),
1819 "i8", "../../../std/primitive.i8.html",
1820 "#![feature(integer_atomics)]\n\n",
1821 atomic_min
, atomic_max
,
1822 i8 AtomicI8 ATOMIC_I8_INIT
1824 #[cfg(target_has_atomic = "8")]
1826 unstable(feature
= "integer_atomics", issue
= "32976"),
1827 unstable(feature
= "integer_atomics", issue
= "32976"),
1828 unstable(feature
= "integer_atomics", issue
= "32976"),
1829 unstable(feature
= "integer_atomics", issue
= "32976"),
1830 unstable(feature
= "integer_atomics", issue
= "32976"),
1831 unstable(feature
= "integer_atomics", issue
= "32976"),
1832 "u8", "../../../std/primitive.u8.html",
1833 "#![feature(integer_atomics)]\n\n",
1834 atomic_umin
, atomic_umax
,
1835 u8 AtomicU8 ATOMIC_U8_INIT
1837 #[cfg(target_has_atomic = "16")]
1839 unstable(feature
= "integer_atomics", issue
= "32976"),
1840 unstable(feature
= "integer_atomics", issue
= "32976"),
1841 unstable(feature
= "integer_atomics", issue
= "32976"),
1842 unstable(feature
= "integer_atomics", issue
= "32976"),
1843 unstable(feature
= "integer_atomics", issue
= "32976"),
1844 unstable(feature
= "integer_atomics", issue
= "32976"),
1845 "i16", "../../../std/primitive.i16.html",
1846 "#![feature(integer_atomics)]\n\n",
1847 atomic_min
, atomic_max
,
1848 i16 AtomicI16 ATOMIC_I16_INIT
1850 #[cfg(target_has_atomic = "16")]
1852 unstable(feature
= "integer_atomics", issue
= "32976"),
1853 unstable(feature
= "integer_atomics", issue
= "32976"),
1854 unstable(feature
= "integer_atomics", issue
= "32976"),
1855 unstable(feature
= "integer_atomics", issue
= "32976"),
1856 unstable(feature
= "integer_atomics", issue
= "32976"),
1857 unstable(feature
= "integer_atomics", issue
= "32976"),
1858 "u16", "../../../std/primitive.u16.html",
1859 "#![feature(integer_atomics)]\n\n",
1860 atomic_umin
, atomic_umax
,
1861 u16 AtomicU16 ATOMIC_U16_INIT
1863 #[cfg(target_has_atomic = "32")]
1865 unstable(feature
= "integer_atomics", issue
= "32976"),
1866 unstable(feature
= "integer_atomics", issue
= "32976"),
1867 unstable(feature
= "integer_atomics", issue
= "32976"),
1868 unstable(feature
= "integer_atomics", issue
= "32976"),
1869 unstable(feature
= "integer_atomics", issue
= "32976"),
1870 unstable(feature
= "integer_atomics", issue
= "32976"),
1871 "i32", "../../../std/primitive.i32.html",
1872 "#![feature(integer_atomics)]\n\n",
1873 atomic_min
, atomic_max
,
1874 i32 AtomicI32 ATOMIC_I32_INIT
1876 #[cfg(target_has_atomic = "32")]
1878 unstable(feature
= "integer_atomics", issue
= "32976"),
1879 unstable(feature
= "integer_atomics", issue
= "32976"),
1880 unstable(feature
= "integer_atomics", issue
= "32976"),
1881 unstable(feature
= "integer_atomics", issue
= "32976"),
1882 unstable(feature
= "integer_atomics", issue
= "32976"),
1883 unstable(feature
= "integer_atomics", issue
= "32976"),
1884 "u32", "../../../std/primitive.u32.html",
1885 "#![feature(integer_atomics)]\n\n",
1886 atomic_umin
, atomic_umax
,
1887 u32 AtomicU32 ATOMIC_U32_INIT
1889 #[cfg(target_has_atomic = "64")]
1891 unstable(feature
= "integer_atomics", issue
= "32976"),
1892 unstable(feature
= "integer_atomics", issue
= "32976"),
1893 unstable(feature
= "integer_atomics", issue
= "32976"),
1894 unstable(feature
= "integer_atomics", issue
= "32976"),
1895 unstable(feature
= "integer_atomics", issue
= "32976"),
1896 unstable(feature
= "integer_atomics", issue
= "32976"),
1897 "i64", "../../../std/primitive.i64.html",
1898 "#![feature(integer_atomics)]\n\n",
1899 atomic_min
, atomic_max
,
1900 i64 AtomicI64 ATOMIC_I64_INIT
1902 #[cfg(target_has_atomic = "64")]
1904 unstable(feature
= "integer_atomics", issue
= "32976"),
1905 unstable(feature
= "integer_atomics", issue
= "32976"),
1906 unstable(feature
= "integer_atomics", issue
= "32976"),
1907 unstable(feature
= "integer_atomics", issue
= "32976"),
1908 unstable(feature
= "integer_atomics", issue
= "32976"),
1909 unstable(feature
= "integer_atomics", issue
= "32976"),
1910 "u64", "../../../std/primitive.u64.html",
1911 "#![feature(integer_atomics)]\n\n",
1912 atomic_umin
, atomic_umax
,
1913 u64 AtomicU64 ATOMIC_U64_INIT
1915 #[cfg(target_has_atomic = "ptr")]
1917 stable(feature
= "rust1", since
= "1.0.0"),
1918 stable(feature
= "extended_compare_and_swap", since
= "1.10.0"),
1919 stable(feature
= "atomic_debug", since
= "1.3.0"),
1920 stable(feature
= "atomic_access", since
= "1.15.0"),
1921 stable(feature
= "atomic_from", since
= "1.23.0"),
1922 stable(feature
= "atomic_nand", since
= "1.27.0"),
1923 "isize", "../../../std/primitive.isize.html",
1925 atomic_min
, atomic_max
,
1926 isize AtomicIsize ATOMIC_ISIZE_INIT
1928 #[cfg(target_has_atomic = "ptr")]
1930 stable(feature
= "rust1", since
= "1.0.0"),
1931 stable(feature
= "extended_compare_and_swap", since
= "1.10.0"),
1932 stable(feature
= "atomic_debug", since
= "1.3.0"),
1933 stable(feature
= "atomic_access", since
= "1.15.0"),
1934 stable(feature
= "atomic_from", since
= "1.23.0"),
1935 stable(feature
= "atomic_nand", since
= "1.27.0"),
1936 "usize", "../../../std/primitive.usize.html",
1938 atomic_umin
, atomic_umax
,
1939 usize AtomicUsize ATOMIC_USIZE_INIT
1943 #[cfg(target_has_atomic = "cas")]
1944 fn strongest_failure_ordering(order
: Ordering
) -> Ordering
{
1955 unsafe fn atomic_store
<T
>(dst
: *mut T
, val
: T
, order
: Ordering
) {
1957 Release
=> intrinsics
::atomic_store_rel(dst
, val
),
1958 Relaxed
=> intrinsics
::atomic_store_relaxed(dst
, val
),
1959 SeqCst
=> intrinsics
::atomic_store(dst
, val
),
1960 Acquire
=> panic
!("there is no such thing as an acquire store"),
1961 AcqRel
=> panic
!("there is no such thing as an acquire/release store"),
1966 unsafe fn atomic_load
<T
>(dst
: *const T
, order
: Ordering
) -> T
{
1968 Acquire
=> intrinsics
::atomic_load_acq(dst
),
1969 Relaxed
=> intrinsics
::atomic_load_relaxed(dst
),
1970 SeqCst
=> intrinsics
::atomic_load(dst
),
1971 Release
=> panic
!("there is no such thing as a release load"),
1972 AcqRel
=> panic
!("there is no such thing as an acquire/release load"),
1977 #[cfg(target_has_atomic = "cas")]
1978 unsafe fn atomic_swap
<T
>(dst
: *mut T
, val
: T
, order
: Ordering
) -> T
{
1980 Acquire
=> intrinsics
::atomic_xchg_acq(dst
, val
),
1981 Release
=> intrinsics
::atomic_xchg_rel(dst
, val
),
1982 AcqRel
=> intrinsics
::atomic_xchg_acqrel(dst
, val
),
1983 Relaxed
=> intrinsics
::atomic_xchg_relaxed(dst
, val
),
1984 SeqCst
=> intrinsics
::atomic_xchg(dst
, val
),
1988 /// Returns the previous value (like __sync_fetch_and_add).
1990 unsafe fn atomic_add
<T
>(dst
: *mut T
, val
: T
, order
: Ordering
) -> T
{
1992 Acquire
=> intrinsics
::atomic_xadd_acq(dst
, val
),
1993 Release
=> intrinsics
::atomic_xadd_rel(dst
, val
),
1994 AcqRel
=> intrinsics
::atomic_xadd_acqrel(dst
, val
),
1995 Relaxed
=> intrinsics
::atomic_xadd_relaxed(dst
, val
),
1996 SeqCst
=> intrinsics
::atomic_xadd(dst
, val
),
2000 /// Returns the previous value (like __sync_fetch_and_sub).
2002 unsafe fn atomic_sub
<T
>(dst
: *mut T
, val
: T
, order
: Ordering
) -> T
{
2004 Acquire
=> intrinsics
::atomic_xsub_acq(dst
, val
),
2005 Release
=> intrinsics
::atomic_xsub_rel(dst
, val
),
2006 AcqRel
=> intrinsics
::atomic_xsub_acqrel(dst
, val
),
2007 Relaxed
=> intrinsics
::atomic_xsub_relaxed(dst
, val
),
2008 SeqCst
=> intrinsics
::atomic_xsub(dst
, val
),
2013 #[cfg(target_has_atomic = "cas")]
2014 unsafe fn atomic_compare_exchange
<T
>(dst
: *mut T
,
2020 let (val
, ok
) = match (success
, failure
) {
2021 (Acquire
, Acquire
) => intrinsics
::atomic_cxchg_acq(dst
, old
, new
),
2022 (Release
, Relaxed
) => intrinsics
::atomic_cxchg_rel(dst
, old
, new
),
2023 (AcqRel
, Acquire
) => intrinsics
::atomic_cxchg_acqrel(dst
, old
, new
),
2024 (Relaxed
, Relaxed
) => intrinsics
::atomic_cxchg_relaxed(dst
, old
, new
),
2025 (SeqCst
, SeqCst
) => intrinsics
::atomic_cxchg(dst
, old
, new
),
2026 (Acquire
, Relaxed
) => intrinsics
::atomic_cxchg_acq_failrelaxed(dst
, old
, new
),
2027 (AcqRel
, Relaxed
) => intrinsics
::atomic_cxchg_acqrel_failrelaxed(dst
, old
, new
),
2028 (SeqCst
, Relaxed
) => intrinsics
::atomic_cxchg_failrelaxed(dst
, old
, new
),
2029 (SeqCst
, Acquire
) => intrinsics
::atomic_cxchg_failacq(dst
, old
, new
),
2030 (_
, AcqRel
) => panic
!("there is no such thing as an acquire/release failure ordering"),
2031 (_
, Release
) => panic
!("there is no such thing as a release failure ordering"),
2032 _
=> panic
!("a failure ordering can't be stronger than a success ordering"),
2034 if ok { Ok(val) }
else { Err(val) }
2038 unsafe fn atomic_compare_exchange_weak
<T
>(dst
: *mut T
,
2044 let (val
, ok
) = match (success
, failure
) {
2045 (Acquire
, Acquire
) => intrinsics
::atomic_cxchgweak_acq(dst
, old
, new
),
2046 (Release
, Relaxed
) => intrinsics
::atomic_cxchgweak_rel(dst
, old
, new
),
2047 (AcqRel
, Acquire
) => intrinsics
::atomic_cxchgweak_acqrel(dst
, old
, new
),
2048 (Relaxed
, Relaxed
) => intrinsics
::atomic_cxchgweak_relaxed(dst
, old
, new
),
2049 (SeqCst
, SeqCst
) => intrinsics
::atomic_cxchgweak(dst
, old
, new
),
2050 (Acquire
, Relaxed
) => intrinsics
::atomic_cxchgweak_acq_failrelaxed(dst
, old
, new
),
2051 (AcqRel
, Relaxed
) => intrinsics
::atomic_cxchgweak_acqrel_failrelaxed(dst
, old
, new
),
2052 (SeqCst
, Relaxed
) => intrinsics
::atomic_cxchgweak_failrelaxed(dst
, old
, new
),
2053 (SeqCst
, Acquire
) => intrinsics
::atomic_cxchgweak_failacq(dst
, old
, new
),
2054 (_
, AcqRel
) => panic
!("there is no such thing as an acquire/release failure ordering"),
2055 (_
, Release
) => panic
!("there is no such thing as a release failure ordering"),
2056 _
=> panic
!("a failure ordering can't be stronger than a success ordering"),
2058 if ok { Ok(val) }
else { Err(val) }
2062 unsafe fn atomic_and
<T
>(dst
: *mut T
, val
: T
, order
: Ordering
) -> T
{
2064 Acquire
=> intrinsics
::atomic_and_acq(dst
, val
),
2065 Release
=> intrinsics
::atomic_and_rel(dst
, val
),
2066 AcqRel
=> intrinsics
::atomic_and_acqrel(dst
, val
),
2067 Relaxed
=> intrinsics
::atomic_and_relaxed(dst
, val
),
2068 SeqCst
=> intrinsics
::atomic_and(dst
, val
),
2073 unsafe fn atomic_nand
<T
>(dst
: *mut T
, val
: T
, order
: Ordering
) -> T
{
2075 Acquire
=> intrinsics
::atomic_nand_acq(dst
, val
),
2076 Release
=> intrinsics
::atomic_nand_rel(dst
, val
),
2077 AcqRel
=> intrinsics
::atomic_nand_acqrel(dst
, val
),
2078 Relaxed
=> intrinsics
::atomic_nand_relaxed(dst
, val
),
2079 SeqCst
=> intrinsics
::atomic_nand(dst
, val
),
2084 unsafe fn atomic_or
<T
>(dst
: *mut T
, val
: T
, order
: Ordering
) -> T
{
2086 Acquire
=> intrinsics
::atomic_or_acq(dst
, val
),
2087 Release
=> intrinsics
::atomic_or_rel(dst
, val
),
2088 AcqRel
=> intrinsics
::atomic_or_acqrel(dst
, val
),
2089 Relaxed
=> intrinsics
::atomic_or_relaxed(dst
, val
),
2090 SeqCst
=> intrinsics
::atomic_or(dst
, val
),
2095 unsafe fn atomic_xor
<T
>(dst
: *mut T
, val
: T
, order
: Ordering
) -> T
{
2097 Acquire
=> intrinsics
::atomic_xor_acq(dst
, val
),
2098 Release
=> intrinsics
::atomic_xor_rel(dst
, val
),
2099 AcqRel
=> intrinsics
::atomic_xor_acqrel(dst
, val
),
2100 Relaxed
=> intrinsics
::atomic_xor_relaxed(dst
, val
),
2101 SeqCst
=> intrinsics
::atomic_xor(dst
, val
),
2105 /// returns the max value (signed comparison)
2107 unsafe fn atomic_max
<T
>(dst
: *mut T
, val
: T
, order
: Ordering
) -> T
{
2109 Acquire
=> intrinsics
::atomic_max_acq(dst
, val
),
2110 Release
=> intrinsics
::atomic_max_rel(dst
, val
),
2111 AcqRel
=> intrinsics
::atomic_max_acqrel(dst
, val
),
2112 Relaxed
=> intrinsics
::atomic_max_relaxed(dst
, val
),
2113 SeqCst
=> intrinsics
::atomic_max(dst
, val
),
2117 /// returns the min value (signed comparison)
2119 unsafe fn atomic_min
<T
>(dst
: *mut T
, val
: T
, order
: Ordering
) -> T
{
2121 Acquire
=> intrinsics
::atomic_min_acq(dst
, val
),
2122 Release
=> intrinsics
::atomic_min_rel(dst
, val
),
2123 AcqRel
=> intrinsics
::atomic_min_acqrel(dst
, val
),
2124 Relaxed
=> intrinsics
::atomic_min_relaxed(dst
, val
),
2125 SeqCst
=> intrinsics
::atomic_min(dst
, val
),
2129 /// returns the max value (signed comparison)
2131 unsafe fn atomic_umax
<T
>(dst
: *mut T
, val
: T
, order
: Ordering
) -> T
{
2133 Acquire
=> intrinsics
::atomic_umax_acq(dst
, val
),
2134 Release
=> intrinsics
::atomic_umax_rel(dst
, val
),
2135 AcqRel
=> intrinsics
::atomic_umax_acqrel(dst
, val
),
2136 Relaxed
=> intrinsics
::atomic_umax_relaxed(dst
, val
),
2137 SeqCst
=> intrinsics
::atomic_umax(dst
, val
),
2141 /// returns the min value (signed comparison)
2143 unsafe fn atomic_umin
<T
>(dst
: *mut T
, val
: T
, order
: Ordering
) -> T
{
2145 Acquire
=> intrinsics
::atomic_umin_acq(dst
, val
),
2146 Release
=> intrinsics
::atomic_umin_rel(dst
, val
),
2147 AcqRel
=> intrinsics
::atomic_umin_acqrel(dst
, val
),
2148 Relaxed
=> intrinsics
::atomic_umin_relaxed(dst
, val
),
2149 SeqCst
=> intrinsics
::atomic_umin(dst
, val
),
2153 /// An atomic fence.
2155 /// Depending on the specified order, a fence prevents the compiler and CPU from
2156 /// reordering certain types of memory operations around it.
2157 /// That creates synchronizes-with relationships between it and atomic operations
2158 /// or fences in other threads.
2160 /// A fence 'A' which has (at least) [`Release`] ordering semantics, synchronizes
2161 /// with a fence 'B' with (at least) [`Acquire`] semantics, if and only if there
2162 /// exist operations X and Y, both operating on some atomic object 'M' such
2163 /// that A is sequenced before X, Y is synchronized before B and Y observes
2164 /// the change to M. This provides a happens-before dependence between A and B.
2167 /// Thread 1 Thread 2
2169 /// fence(Release); A --------------
2170 /// x.store(3, Relaxed); X --------- |
2173 /// -------------> Y if x.load(Relaxed) == 3 {
2174 /// |-------> B fence(Acquire);
2179 /// Atomic operations with [`Release`] or [`Acquire`] semantics can also synchronize
2182 /// A fence which has [`SeqCst`] ordering, in addition to having both [`Acquire`]
2183 /// and [`Release`] semantics, participates in the global program order of the
2184 /// other [`SeqCst`] operations and/or fences.
2186 /// Accepts [`Acquire`], [`Release`], [`AcqRel`] and [`SeqCst`] orderings.
2190 /// Panics if `order` is [`Relaxed`].
2195 /// use std::sync::atomic::AtomicBool;
2196 /// use std::sync::atomic::fence;
2197 /// use std::sync::atomic::Ordering;
2199 /// // A mutual exclusion primitive based on spinlock.
2200 /// pub struct Mutex {
2201 /// flag: AtomicBool,
2205 /// pub fn new() -> Mutex {
2207 /// flag: AtomicBool::new(false),
2211 /// pub fn lock(&self) {
2212 /// while !self.flag.compare_and_swap(false, true, Ordering::Relaxed) {}
2213 /// // This fence synchronizes-with store in `unlock`.
2214 /// fence(Ordering::Acquire);
2217 /// pub fn unlock(&self) {
2218 /// self.flag.store(false, Ordering::Release);
2223 /// [`Ordering`]: enum.Ordering.html
2224 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
2225 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
2226 /// [`Release`]: enum.Ordering.html#variant.Release
2227 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
2228 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
2230 #[stable(feature = "rust1", since = "1.0.0")]
2231 pub fn fence(order
: Ordering
) {
2234 Acquire
=> intrinsics
::atomic_fence_acq(),
2235 Release
=> intrinsics
::atomic_fence_rel(),
2236 AcqRel
=> intrinsics
::atomic_fence_acqrel(),
2237 SeqCst
=> intrinsics
::atomic_fence(),
2238 Relaxed
=> panic
!("there is no such thing as a relaxed fence"),
2244 /// A compiler memory fence.
2246 /// `compiler_fence` does not emit any machine code, but restricts the kinds
2247 /// of memory re-ordering the compiler is allowed to do. Specifically, depending on
2248 /// the given [`Ordering`] semantics, the compiler may be disallowed from moving reads
2249 /// or writes from before or after the call to the other side of the call to
2250 /// `compiler_fence`. Note that it does **not** prevent the *hardware*
2251 /// from doing such re-ordering. This is not a problem in a single-threaded,
2252 /// execution context, but when other threads may modify memory at the same
2253 /// time, stronger synchronization primitives such as [`fence`] are required.
2255 /// The re-ordering prevented by the different ordering semantics are:
2257 /// - with [`SeqCst`], no re-ordering of reads and writes across this point is allowed.
2258 /// - with [`Release`], preceding reads and writes cannot be moved past subsequent writes.
2259 /// - with [`Acquire`], subsequent reads and writes cannot be moved ahead of preceding reads.
2260 /// - with [`AcqRel`], both of the above rules are enforced.
2262 /// `compiler_fence` is generally only useful for preventing a thread from
2263 /// racing *with itself*. That is, if a given thread is executing one piece
2264 /// of code, and is then interrupted, and starts executing code elsewhere
2265 /// (while still in the same thread, and conceptually still on the same
2266 /// core). In traditional programs, this can only occur when a signal
2267 /// handler is registered. In more low-level code, such situations can also
2268 /// arise when handling interrupts, when implementing green threads with
2269 /// pre-emption, etc. Curious readers are encouraged to read the Linux kernel's
2270 /// discussion of [memory barriers].
2274 /// Panics if `order` is [`Relaxed`].
2278 /// Without `compiler_fence`, the `assert_eq!` in following code
2279 /// is *not* guaranteed to succeed, despite everything happening in a single thread.
2280 /// To see why, remember that the compiler is free to swap the stores to
2281 /// `IMPORTANT_VARIABLE` and `IS_READ` since they are both
2282 /// `Ordering::Relaxed`. If it does, and the signal handler is invoked right
2283 /// after `IS_READY` is updated, then the signal handler will see
2284 /// `IS_READY=1`, but `IMPORTANT_VARIABLE=0`.
2285 /// Using a `compiler_fence` remedies this situation.
2288 /// use std::sync::atomic::{AtomicBool, AtomicUsize};
2289 /// use std::sync::atomic::{ATOMIC_BOOL_INIT, ATOMIC_USIZE_INIT};
2290 /// use std::sync::atomic::Ordering;
2291 /// use std::sync::atomic::compiler_fence;
2293 /// static IMPORTANT_VARIABLE: AtomicUsize = ATOMIC_USIZE_INIT;
2294 /// static IS_READY: AtomicBool = ATOMIC_BOOL_INIT;
2297 /// IMPORTANT_VARIABLE.store(42, Ordering::Relaxed);
2298 /// // prevent earlier writes from being moved beyond this point
2299 /// compiler_fence(Ordering::Release);
2300 /// IS_READY.store(true, Ordering::Relaxed);
2303 /// fn signal_handler() {
2304 /// if IS_READY.load(Ordering::Relaxed) {
2305 /// assert_eq!(IMPORTANT_VARIABLE.load(Ordering::Relaxed), 42);
2310 /// [`fence`]: fn.fence.html
2311 /// [`Ordering`]: enum.Ordering.html
2312 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
2313 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
2314 /// [`Release`]: enum.Ordering.html#variant.Release
2315 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
2316 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
2317 /// [memory barriers]: https://www.kernel.org/doc/Documentation/memory-barriers.txt
2319 #[stable(feature = "compiler_fences", since = "1.21.0")]
2320 pub fn compiler_fence(order
: Ordering
) {
2323 Acquire
=> intrinsics
::atomic_singlethreadfence_acq(),
2324 Release
=> intrinsics
::atomic_singlethreadfence_rel(),
2325 AcqRel
=> intrinsics
::atomic_singlethreadfence_acqrel(),
2326 SeqCst
=> intrinsics
::atomic_singlethreadfence(),
2327 Relaxed
=> panic
!("there is no such thing as a relaxed compiler fence"),
2333 #[cfg(target_has_atomic = "8")]
2334 #[stable(feature = "atomic_debug", since = "1.3.0")]
2335 impl fmt
::Debug
for AtomicBool
{
2336 fn fmt(&self, f
: &mut fmt
::Formatter
) -> fmt
::Result
{
2337 fmt
::Debug
::fmt(&self.load(Ordering
::SeqCst
), f
)
2341 #[cfg(target_has_atomic = "ptr")]
2342 #[stable(feature = "atomic_debug", since = "1.3.0")]
2343 impl<T
> fmt
::Debug
for AtomicPtr
<T
> {
2344 fn fmt(&self, f
: &mut fmt
::Formatter
) -> fmt
::Result
{
2345 fmt
::Debug
::fmt(&self.load(Ordering
::SeqCst
), f
)
2349 #[cfg(target_has_atomic = "ptr")]
2350 #[stable(feature = "atomic_pointer", since = "1.24.0")]
2351 impl<T
> fmt
::Pointer
for AtomicPtr
<T
> {
2352 fn fmt(&self, f
: &mut fmt
::Formatter
) -> fmt
::Result
{
2353 fmt
::Pointer
::fmt(&self.load(Ordering
::SeqCst
), f
)