]> git.proxmox.com Git - rustc.git/blame - library/core/src/sync/atomic.rs
New upstream version 1.55.0+dfsg1
[rustc.git] / library / core / src / sync / atomic.rs
CommitLineData
1a4d82fc
JJ
1//! Atomic types
2//!
3//! Atomic types provide primitive shared-memory communication between
4//! threads, and are the building blocks of other concurrent
5//! types.
6//!
7//! This module defines atomic versions of a select number of primitive
9fa01778
XL
8//! types, including [`AtomicBool`], [`AtomicIsize`], [`AtomicUsize`],
9//! [`AtomicI8`], [`AtomicU16`], etc.
1a4d82fc
JJ
10//! Atomic types present operations that, when used correctly, synchronize
11//! updates between threads.
12//!
cc61c64b 13//! Each method takes an [`Ordering`] which represents the strength of
1a4d82fc 14//! the memory barrier for that operation. These orderings are the
e74abb32 15//! same as the [C++20 atomic orderings][1]. For more information see the [nomicon][2].
1a4d82fc 16//!
e74abb32 17//! [1]: https://en.cppreference.com/w/cpp/atomic/memory_order
32a655c1 18//! [2]: ../../../nomicon/atomics.html
1a4d82fc 19//!
cc61c64b 20//! Atomic variables are safe to share between threads (they implement [`Sync`])
a7813a04 21//! but they do not themselves provide the mechanism for sharing and follow the
60c5eb7d 22//! [threading model](../../../std/thread/index.html#the-threading-model) of Rust.
cc61c64b 23//! The most common way to share an atomic variable is to put it into an [`Arc`][arc] (an
1a4d82fc
JJ
24//! atomically-reference-counted shared pointer).
25//!
cc61c64b
XL
26//! [arc]: ../../../std/sync/struct.Arc.html
27//!
9fa01778
XL
28//! Atomic types may be stored in static variables, initialized using
29//! the constant initializers like [`AtomicBool::new`]. Atomic statics
1a4d82fc
JJ
30//! are often used for lazy global initialization.
31//!
9fa01778
XL
32//! # Portability
33//!
34//! All atomic types in this module are guaranteed to be [lock-free] if they're
35//! available. This means they don't internally acquire a global mutex. Atomic
36//! types and operations are not guaranteed to be wait-free. This means that
37//! operations like `fetch_or` may be implemented with a compare-and-swap loop.
38//!
39//! Atomic operations may be implemented at the instruction layer with
40//! larger-size atomics. For example some platforms use 4-byte atomic
41//! instructions to implement `AtomicI8`. Note that this emulation should not
42//! have an impact on correctness of code, it's just something to be aware of.
43//!
44//! The atomic types in this module may not be available on all platforms. The
45//! atomic types here are all widely available, however, and can generally be
46//! relied upon existing. Some notable exceptions are:
47//!
48//! * PowerPC and MIPS platforms with 32-bit pointers do not have `AtomicU64` or
49//! `AtomicI64` types.
fc512014
XL
50//! * ARM platforms like `armv5te` that aren't for Linux only provide `load`
51//! and `store` operations, and do not support Compare and Swap (CAS)
52//! operations, such as `swap`, `fetch_add`, etc. Additionally on Linux,
53//! these CAS operations are implemented via [operating system support], which
54//! may come with a performance penalty.
55//! * ARM targets with `thumbv6m` only provide `load` and `store` operations,
56//! and do not support Compare and Swap (CAS) operations, such as `swap`,
57//! `fetch_add`, etc.
58//!
59//! [operating system support]: https://www.kernel.org/doc/Documentation/arm/kernel_user_helpers.txt
9fa01778
XL
60//!
61//! Note that future platforms may be added that also do not have support for
62//! some atomic operations. Maximally portable code will want to be careful
63//! about which atomic types are used. `AtomicUsize` and `AtomicIsize` are
64//! generally the most portable, but even then they're not available everywhere.
65//! For reference, the `std` library requires pointer-sized atomics, although
66//! `core` does not.
67//!
68//! Currently you'll need to use `#[cfg(target_arch)]` primarily to
69//! conditionally compile in code with atomics. There is an unstable
70//! `#[cfg(target_has_atomic)]` as well which may be stabilized in the future.
71//!
72//! [lock-free]: https://en.wikipedia.org/wiki/Non-blocking_algorithm
1a4d82fc
JJ
73//!
74//! # Examples
75//!
76//! A simple spinlock:
77//!
78//! ```
79//! use std::sync::Arc;
85aaf69f 80//! use std::sync::atomic::{AtomicUsize, Ordering};
cdc7bbd5 81//! use std::{hint, thread};
1a4d82fc
JJ
82//!
83//! fn main() {
85aaf69f 84//! let spinlock = Arc::new(AtomicUsize::new(1));
1a4d82fc 85//!
1b1a35ee 86//! let spinlock_clone = Arc::clone(&spinlock);
a7813a04 87//! let thread = thread::spawn(move|| {
1a4d82fc
JJ
88//! spinlock_clone.store(0, Ordering::SeqCst);
89//! });
90//!
bd371182 91//! // Wait for the other thread to release the lock
cdc7bbd5
XL
92//! while spinlock.load(Ordering::SeqCst) != 0 {
93//! hint::spin_loop();
94//! }
a7813a04
XL
95//!
96//! if let Err(panic) = thread.join() {
97//! println!("Thread had an error: {:?}", panic);
98//! }
1a4d82fc
JJ
99//! }
100//! ```
101//!
bd371182 102//! Keep a global count of live threads:
1a4d82fc
JJ
103//!
104//! ```
9fa01778 105//! use std::sync::atomic::{AtomicUsize, Ordering};
1a4d82fc 106//!
9fa01778 107//! static GLOBAL_THREAD_COUNT: AtomicUsize = AtomicUsize::new(0);
1a4d82fc 108//!
bd371182
AL
109//! let old_thread_count = GLOBAL_THREAD_COUNT.fetch_add(1, Ordering::SeqCst);
110//! println!("live threads: {}", old_thread_count + 1);
1a4d82fc
JJ
111//! ```
112
85aaf69f 113#![stable(feature = "rust1", since = "1.0.0")]
e74abb32
XL
114#![cfg_attr(not(target_has_atomic_load_store = "8"), allow(dead_code))]
115#![cfg_attr(not(target_has_atomic_load_store = "8"), allow(unused_imports))]
1a4d82fc
JJ
116
117use self::Ordering::*;
118
48663c56
XL
119use crate::cell::UnsafeCell;
120use crate::fmt;
dfeec247 121use crate::intrinsics;
9346a6ac 122
48663c56 123use crate::hint::spin_loop;
9fa01778 124
1a4d82fc 125/// A boolean type which can be safely shared between threads.
9e0c209e 126///
ea8adc8c
XL
127/// This type has the same in-memory representation as a [`bool`].
128///
f035d41b
XL
129/// **Note**: This type is only available on platforms that support atomic
130/// loads and stores of `u8`.
60c5eb7d 131#[cfg(target_has_atomic_load_store = "8")]
85aaf69f 132#[stable(feature = "rust1", since = "1.0.0")]
a1dfa0c6 133#[repr(C, align(1))]
1a4d82fc 134pub struct AtomicBool {
a7813a04 135 v: UnsafeCell<u8>,
1a4d82fc
JJ
136}
137
60c5eb7d 138#[cfg(target_has_atomic_load_store = "8")]
92a42be0 139#[stable(feature = "rust1", since = "1.0.0")]
9346a6ac 140impl Default for AtomicBool {
c30ab7b3 141 /// Creates an `AtomicBool` initialized to `false`.
29967ef6 142 #[inline]
62682a34 143 fn default() -> Self {
a7813a04 144 Self::new(false)
9346a6ac
AL
145 }
146}
147
b039eaaf 148// Send is implicitly implemented for AtomicBool.
60c5eb7d 149#[cfg(target_has_atomic_load_store = "8")]
92a42be0 150#[stable(feature = "rust1", since = "1.0.0")]
1a4d82fc
JJ
151unsafe impl Sync for AtomicBool {}
152
1a4d82fc 153/// A raw pointer type which can be safely shared between threads.
9e0c209e
SL
154///
155/// This type has the same in-memory representation as a `*mut T`.
f035d41b
XL
156///
157/// **Note**: This type is only available on platforms that support atomic
158/// loads and stores of pointers. Its size depends on the target pointer's size.
60c5eb7d 159#[cfg(target_has_atomic_load_store = "ptr")]
85aaf69f 160#[stable(feature = "rust1", since = "1.0.0")]
a1dfa0c6
XL
161#[cfg_attr(target_pointer_width = "16", repr(C, align(2)))]
162#[cfg_attr(target_pointer_width = "32", repr(C, align(4)))]
163#[cfg_attr(target_pointer_width = "64", repr(C, align(8)))]
1a4d82fc 164pub struct AtomicPtr<T> {
62682a34 165 p: UnsafeCell<*mut T>,
1a4d82fc
JJ
166}
167
60c5eb7d 168#[cfg(target_has_atomic_load_store = "ptr")]
92a42be0 169#[stable(feature = "rust1", since = "1.0.0")]
d9579d0f 170impl<T> Default for AtomicPtr<T> {
9e0c209e 171 /// Creates a null `AtomicPtr<T>`.
d9579d0f 172 fn default() -> AtomicPtr<T> {
48663c56 173 AtomicPtr::new(crate::ptr::null_mut())
d9579d0f
AL
174 }
175}
176
60c5eb7d 177#[cfg(target_has_atomic_load_store = "ptr")]
92a42be0 178#[stable(feature = "rust1", since = "1.0.0")]
c1a9b12d 179unsafe impl<T> Send for AtomicPtr<T> {}
60c5eb7d 180#[cfg(target_has_atomic_load_store = "ptr")]
92a42be0 181#[stable(feature = "rust1", since = "1.0.0")]
1a4d82fc
JJ
182unsafe impl<T> Sync for AtomicPtr<T> {}
183
184/// Atomic memory orderings
185///
a1dfa0c6 186/// Memory orderings specify the way atomic operations synchronize memory.
1b1a35ee
XL
187/// In its weakest [`Ordering::Relaxed`], only the memory directly touched by the
188/// operation is synchronized. On the other hand, a store-load pair of [`Ordering::SeqCst`]
a1dfa0c6
XL
189/// operations synchronize other memory while additionally preserving a total order of such
190/// operations across all threads.
1a4d82fc 191///
e74abb32
XL
192/// Rust's memory orderings are [the same as those of
193/// C++20](https://en.cppreference.com/w/cpp/atomic/memory_order).
32a655c1 194///
cc61c64b
XL
195/// For more information see the [nomicon].
196///
197/// [nomicon]: ../../../nomicon/atomics.html
85aaf69f 198#[stable(feature = "rust1", since = "1.0.0")]
0731742a 199#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
b7449926 200#[non_exhaustive]
1a4d82fc 201pub enum Ordering {
cc61c64b
XL
202 /// No ordering constraints, only atomic operations.
203 ///
e74abb32 204 /// Corresponds to [`memory_order_relaxed`] in C++20.
cc61c64b 205 ///
e74abb32 206 /// [`memory_order_relaxed`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Relaxed_ordering
85aaf69f 207 #[stable(feature = "rust1", since = "1.0.0")]
1a4d82fc 208 Relaxed,
b7449926
XL
209 /// When coupled with a store, all previous operations become ordered
210 /// before any load of this value with [`Acquire`] (or stronger) ordering.
211 /// In particular, all previous writes become visible to all threads
212 /// that perform an [`Acquire`] (or stronger) load of this value.
cc61c64b 213 ///
b7449926
XL
214 /// Notice that using this ordering for an operation that combines loads
215 /// and stores leads to a [`Relaxed`] load operation!
216 ///
217 /// This ordering is only applicable for operations that can perform a store.
218 ///
e74abb32 219 /// Corresponds to [`memory_order_release`] in C++20.
b7449926 220 ///
e74abb32 221 /// [`memory_order_release`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering
85aaf69f 222 #[stable(feature = "rust1", since = "1.0.0")]
1a4d82fc 223 Release,
b7449926
XL
224 /// When coupled with a load, if the loaded value was written by a store operation with
225 /// [`Release`] (or stronger) ordering, then all subsequent operations
226 /// become ordered after that store. In particular, all subsequent loads will see data
227 /// written before the store.
228 ///
229 /// Notice that using this ordering for an operation that combines loads
230 /// and stores leads to a [`Relaxed`] store operation!
231 ///
232 /// This ordering is only applicable for operations that can perform a load.
cc61c64b 233 ///
e74abb32 234 /// Corresponds to [`memory_order_acquire`] in C++20.
b7449926 235 ///
e74abb32 236 /// [`memory_order_acquire`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering
85aaf69f 237 #[stable(feature = "rust1", since = "1.0.0")]
1a4d82fc 238 Acquire,
b7449926
XL
239 /// Has the effects of both [`Acquire`] and [`Release`] together:
240 /// For loads it uses [`Acquire`] ordering. For stores it uses the [`Release`] ordering.
241 ///
242 /// Notice that in the case of `compare_and_swap`, it is possible that the operation ends up
a1dfa0c6 243 /// not performing any store and hence it has just [`Acquire`] ordering. However,
e74abb32 244 /// `AcqRel` will never perform [`Relaxed`] accesses.
0531ce1d
XL
245 ///
246 /// This ordering is only applicable for operations that combine both loads and stores.
247 ///
e74abb32 248 /// Corresponds to [`memory_order_acq_rel`] in C++20.
cc61c64b 249 ///
e74abb32 250 /// [`memory_order_acq_rel`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering
85aaf69f 251 #[stable(feature = "rust1", since = "1.0.0")]
1a4d82fc 252 AcqRel,
b7449926
XL
253 /// Like [`Acquire`]/[`Release`]/[`AcqRel`] (for load, store, and load-with-store
254 /// operations, respectively) with the additional guarantee that all threads see all
1a4d82fc 255 /// sequentially consistent operations in the same order.
b7449926 256 ///
e74abb32 257 /// Corresponds to [`memory_order_seq_cst`] in C++20.
b7449926 258 ///
e74abb32 259 /// [`memory_order_seq_cst`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Sequentially-consistent_ordering
85aaf69f 260 #[stable(feature = "rust1", since = "1.0.0")]
1a4d82fc
JJ
261 SeqCst,
262}
263
cc61c64b 264/// An [`AtomicBool`] initialized to `false`.
60c5eb7d 265#[cfg(target_has_atomic_load_store = "8")]
85aaf69f 266#[stable(feature = "rust1", since = "1.0.0")]
532ac7d7 267#[rustc_deprecated(
9fa01778
XL
268 since = "1.34.0",
269 reason = "the `new` function is now preferred",
dfeec247 270 suggestion = "AtomicBool::new(false)"
532ac7d7 271)]
62682a34 272pub const ATOMIC_BOOL_INIT: AtomicBool = AtomicBool::new(false);
1a4d82fc 273
60c5eb7d 274#[cfg(target_has_atomic_load_store = "8")]
1a4d82fc
JJ
275impl AtomicBool {
276 /// Creates a new `AtomicBool`.
277 ///
278 /// # Examples
279 ///
280 /// ```
281 /// use std::sync::atomic::AtomicBool;
282 ///
283 /// let atomic_true = AtomicBool::new(true);
284 /// let atomic_false = AtomicBool::new(false);
285 /// ```
286 #[inline]
85aaf69f 287 #[stable(feature = "rust1", since = "1.0.0")]
cdc7bbd5 288 #[rustc_const_stable(feature = "const_atomic_new", since = "1.24.0")]
62682a34 289 pub const fn new(v: bool) -> AtomicBool {
a7813a04 290 AtomicBool { v: UnsafeCell::new(v as u8) }
1a4d82fc
JJ
291 }
292
ea8adc8c 293 /// Returns a mutable reference to the underlying [`bool`].
9e0c209e
SL
294 ///
295 /// This is safe because the mutable reference guarantees that no other threads are
296 /// concurrently accessing the atomic data.
297 ///
298 /// # Examples
299 ///
300 /// ```
9e0c209e
SL
301 /// use std::sync::atomic::{AtomicBool, Ordering};
302 ///
303 /// let mut some_bool = AtomicBool::new(true);
304 /// assert_eq!(*some_bool.get_mut(), true);
305 /// *some_bool.get_mut() = false;
306 /// assert_eq!(some_bool.load(Ordering::SeqCst), false);
307 /// ```
308 #[inline]
476ff2be 309 #[stable(feature = "atomic_access", since = "1.15.0")]
9e0c209e 310 pub fn get_mut(&mut self) -> &mut bool {
dfeec247 311 // SAFETY: the mutable reference guarantees unique ownership.
9e0c209e
SL
312 unsafe { &mut *(self.v.get() as *mut bool) }
313 }
314
1b1a35ee
XL
315 /// Get atomic access to a `&mut bool`.
316 ///
317 /// # Examples
318 ///
319 /// ```
320 /// #![feature(atomic_from_mut)]
321 /// use std::sync::atomic::{AtomicBool, Ordering};
322 ///
323 /// let mut some_bool = true;
324 /// let a = AtomicBool::from_mut(&mut some_bool);
325 /// a.store(false, Ordering::Relaxed);
326 /// assert_eq!(some_bool, false);
327 /// ```
328 #[inline]
329 #[cfg(target_has_atomic_equal_alignment = "8")]
330 #[unstable(feature = "atomic_from_mut", issue = "76314")]
331 pub fn from_mut(v: &mut bool) -> &Self {
332 // SAFETY: the mutable reference guarantees unique ownership, and
333 // alignment of both `bool` and `Self` is 1.
334 unsafe { &*(v as *mut bool as *mut Self) }
335 }
336
9e0c209e
SL
337 /// Consumes the atomic and returns the contained value.
338 ///
339 /// This is safe because passing `self` by value guarantees that no other threads are
340 /// concurrently accessing the atomic data.
341 ///
342 /// # Examples
343 ///
344 /// ```
9e0c209e
SL
345 /// use std::sync::atomic::AtomicBool;
346 ///
347 /// let some_bool = AtomicBool::new(true);
348 /// assert_eq!(some_bool.into_inner(), true);
349 /// ```
350 #[inline]
476ff2be 351 #[stable(feature = "atomic_access", since = "1.15.0")]
29967ef6
XL
352 #[rustc_const_unstable(feature = "const_cell_into_inner", issue = "78729")]
353 pub const fn into_inner(self) -> bool {
2c00a5a8 354 self.v.into_inner() != 0
9e0c209e
SL
355 }
356
1a4d82fc
JJ
357 /// Loads a value from the bool.
358 ///
32a655c1 359 /// `load` takes an [`Ordering`] argument which describes the memory ordering
b7449926 360 /// of this operation. Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
1a4d82fc
JJ
361 ///
362 /// # Panics
363 ///
32a655c1
SL
364 /// Panics if `order` is [`Release`] or [`AcqRel`].
365 ///
1a4d82fc
JJ
366 /// # Examples
367 ///
368 /// ```
369 /// use std::sync::atomic::{AtomicBool, Ordering};
370 ///
371 /// let some_bool = AtomicBool::new(true);
372 ///
62682a34 373 /// assert_eq!(some_bool.load(Ordering::Relaxed), true);
1a4d82fc
JJ
374 /// ```
375 #[inline]
85aaf69f 376 #[stable(feature = "rust1", since = "1.0.0")]
1a4d82fc 377 pub fn load(&self, order: Ordering) -> bool {
dfeec247
XL
378 // SAFETY: any data races are prevented by atomic intrinsics and the raw
379 // pointer passed in is valid because we got it from a reference.
a7813a04 380 unsafe { atomic_load(self.v.get(), order) != 0 }
1a4d82fc
JJ
381 }
382
383 /// Stores a value into the bool.
384 ///
32a655c1 385 /// `store` takes an [`Ordering`] argument which describes the memory ordering
b7449926
XL
386 /// of this operation. Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
387 ///
388 /// # Panics
389 ///
390 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
32a655c1 391 ///
1a4d82fc
JJ
392 /// # Examples
393 ///
394 /// ```
395 /// use std::sync::atomic::{AtomicBool, Ordering};
396 ///
397 /// let some_bool = AtomicBool::new(true);
398 ///
399 /// some_bool.store(false, Ordering::Relaxed);
62682a34 400 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
1a4d82fc 401 /// ```
1a4d82fc 402 #[inline]
85aaf69f 403 #[stable(feature = "rust1", since = "1.0.0")]
1a4d82fc 404 pub fn store(&self, val: bool, order: Ordering) {
dfeec247
XL
405 // SAFETY: any data races are prevented by atomic intrinsics and the raw
406 // pointer passed in is valid because we got it from a reference.
c30ab7b3
SL
407 unsafe {
408 atomic_store(self.v.get(), val as u8, order);
409 }
1a4d82fc
JJ
410 }
411
cc61c64b 412 /// Stores a value into the bool, returning the previous value.
1a4d82fc 413 ///
32a655c1 414 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
b7449926
XL
415 /// of this operation. All ordering modes are possible. Note that using
416 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
417 /// using [`Release`] makes the load part [`Relaxed`].
32a655c1 418 ///
f035d41b
XL
419 /// **Note:** This method is only available on platforms that support atomic
420 /// operations on `u8`.
421 ///
1a4d82fc
JJ
422 /// # Examples
423 ///
424 /// ```
425 /// use std::sync::atomic::{AtomicBool, Ordering};
426 ///
427 /// let some_bool = AtomicBool::new(true);
428 ///
62682a34
SL
429 /// assert_eq!(some_bool.swap(false, Ordering::Relaxed), true);
430 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
1a4d82fc
JJ
431 /// ```
432 #[inline]
85aaf69f 433 #[stable(feature = "rust1", since = "1.0.0")]
e74abb32 434 #[cfg(target_has_atomic = "8")]
1a4d82fc 435 pub fn swap(&self, val: bool, order: Ordering) -> bool {
dfeec247 436 // SAFETY: data races are prevented by atomic intrinsics.
a7813a04 437 unsafe { atomic_swap(self.v.get(), val as u8, order) != 0 }
1a4d82fc
JJ
438 }
439
ea8adc8c 440 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
1a4d82fc 441 ///
c1a9b12d
SL
442 /// The return value is always the previous value. If it is equal to `current`, then the value
443 /// was updated.
1a4d82fc 444 ///
32a655c1 445 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
b7449926
XL
446 /// ordering of this operation. Notice that even when using [`AcqRel`], the operation
447 /// might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
448 /// Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
449 /// happens, and using [`Release`] makes the load part [`Relaxed`].
32a655c1 450 ///
f035d41b
XL
451 /// **Note:** This method is only available on platforms that support atomic
452 /// operations on `u8`.
453 ///
fc512014
XL
454 /// # Migrating to `compare_exchange` and `compare_exchange_weak`
455 ///
456 /// `compare_and_swap` is equivalent to `compare_exchange` with the following mapping for
457 /// memory orderings:
458 ///
459 /// Original | Success | Failure
460 /// -------- | ------- | -------
461 /// Relaxed | Relaxed | Relaxed
462 /// Acquire | Acquire | Acquire
463 /// Release | Release | Relaxed
464 /// AcqRel | AcqRel | Acquire
465 /// SeqCst | SeqCst | SeqCst
466 ///
467 /// `compare_exchange_weak` is allowed to fail spuriously even when the comparison succeeds,
468 /// which allows the compiler to generate better assembly code when the compare and swap
469 /// is used in a loop.
470 ///
1a4d82fc
JJ
471 /// # Examples
472 ///
473 /// ```
474 /// use std::sync::atomic::{AtomicBool, Ordering};
475 ///
476 /// let some_bool = AtomicBool::new(true);
477 ///
62682a34
SL
478 /// assert_eq!(some_bool.compare_and_swap(true, false, Ordering::Relaxed), true);
479 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
480 ///
481 /// assert_eq!(some_bool.compare_and_swap(true, true, Ordering::Relaxed), false);
482 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
1a4d82fc
JJ
483 /// ```
484 #[inline]
85aaf69f 485 #[stable(feature = "rust1", since = "1.0.0")]
fc512014
XL
486 #[rustc_deprecated(
487 since = "1.50.0",
488 reason = "Use `compare_exchange` or `compare_exchange_weak` instead"
489 )]
e74abb32 490 #[cfg(target_has_atomic = "8")]
c1a9b12d 491 pub fn compare_and_swap(&self, current: bool, new: bool, order: Ordering) -> bool {
54a0048b
SL
492 match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
493 Ok(x) => x,
494 Err(x) => x,
495 }
7453a54e
SL
496 }
497
ea8adc8c 498 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
7453a54e 499 ///
54a0048b 500 /// The return value is a result indicating whether the new value was written and containing
3157f602 501 /// the previous value. On success this value is guaranteed to be equal to `current`.
7453a54e 502 ///
32a655c1 503 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
fc512014
XL
504 /// ordering of this operation. `success` describes the required ordering for the
505 /// read-modify-write operation that takes place if the comparison with `current` succeeds.
506 /// `failure` describes the required ordering for the load operation that takes place when
507 /// the comparison fails. Using [`Acquire`] as success ordering makes the store part
b7449926
XL
508 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
509 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
510 /// and must be equivalent to or weaker than the success ordering.
511 ///
f035d41b
XL
512 /// **Note:** This method is only available on platforms that support atomic
513 /// operations on `u8`.
32a655c1 514 ///
7453a54e
SL
515 /// # Examples
516 ///
517 /// ```
7453a54e
SL
518 /// use std::sync::atomic::{AtomicBool, Ordering};
519 ///
520 /// let some_bool = AtomicBool::new(true);
521 ///
522 /// assert_eq!(some_bool.compare_exchange(true,
523 /// false,
524 /// Ordering::Acquire,
525 /// Ordering::Relaxed),
54a0048b 526 /// Ok(true));
7453a54e
SL
527 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
528 ///
529 /// assert_eq!(some_bool.compare_exchange(true, true,
530 /// Ordering::SeqCst,
531 /// Ordering::Acquire),
54a0048b 532 /// Err(false));
7453a54e
SL
533 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
534 /// ```
535 #[inline]
a7813a04 536 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
fc512014 537 #[doc(alias = "compare_and_swap")]
e74abb32 538 #[cfg(target_has_atomic = "8")]
dfeec247
XL
539 pub fn compare_exchange(
540 &self,
541 current: bool,
542 new: bool,
543 success: Ordering,
544 failure: Ordering,
545 ) -> Result<bool, bool> {
546 // SAFETY: data races are prevented by atomic intrinsics.
c30ab7b3
SL
547 match unsafe {
548 atomic_compare_exchange(self.v.get(), current as u8, new as u8, success, failure)
549 } {
a7813a04
XL
550 Ok(x) => Ok(x != 0),
551 Err(x) => Err(x != 0),
54a0048b 552 }
7453a54e
SL
553 }
554
ea8adc8c 555 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
7453a54e 556 ///
1b1a35ee 557 /// Unlike [`AtomicBool::compare_exchange`], this function is allowed to spuriously fail even when the
7453a54e 558 /// comparison succeeds, which can result in more efficient code on some platforms. The
54a0048b
SL
559 /// return value is a result indicating whether the new value was written and containing the
560 /// previous value.
7453a54e 561 ///
32a655c1 562 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
fc512014
XL
563 /// ordering of this operation. `success` describes the required ordering for the
564 /// read-modify-write operation that takes place if the comparison with `current` succeeds.
565 /// `failure` describes the required ordering for the load operation that takes place when
566 /// the comparison fails. Using [`Acquire`] as success ordering makes the store part
b7449926
XL
567 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
568 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
569 /// and must be equivalent to or weaker than the success ordering.
32a655c1 570 ///
f035d41b
XL
571 /// **Note:** This method is only available on platforms that support atomic
572 /// operations on `u8`.
573 ///
7453a54e
SL
574 /// # Examples
575 ///
576 /// ```
7453a54e
SL
577 /// use std::sync::atomic::{AtomicBool, Ordering};
578 ///
579 /// let val = AtomicBool::new(false);
580 ///
581 /// let new = true;
582 /// let mut old = val.load(Ordering::Relaxed);
583 /// loop {
54a0048b
SL
584 /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
585 /// Ok(_) => break,
586 /// Err(x) => old = x,
7453a54e
SL
587 /// }
588 /// }
589 /// ```
590 #[inline]
a7813a04 591 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
fc512014 592 #[doc(alias = "compare_and_swap")]
e74abb32 593 #[cfg(target_has_atomic = "8")]
dfeec247
XL
594 pub fn compare_exchange_weak(
595 &self,
596 current: bool,
597 new: bool,
598 success: Ordering,
599 failure: Ordering,
600 ) -> Result<bool, bool> {
601 // SAFETY: data races are prevented by atomic intrinsics.
c30ab7b3
SL
602 match unsafe {
603 atomic_compare_exchange_weak(self.v.get(), current as u8, new as u8, success, failure)
604 } {
a7813a04
XL
605 Ok(x) => Ok(x != 0),
606 Err(x) => Err(x != 0),
54a0048b 607 }
1a4d82fc
JJ
608 }
609
610 /// Logical "and" with a boolean value.
611 ///
612 /// Performs a logical "and" operation on the current value and the argument `val`, and sets
613 /// the new value to the result.
614 ///
615 /// Returns the previous value.
616 ///
b7449926
XL
617 /// `fetch_and` takes an [`Ordering`] argument which describes the memory ordering
618 /// of this operation. All ordering modes are possible. Note that using
619 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
620 /// using [`Release`] makes the load part [`Relaxed`].
621 ///
f035d41b
XL
622 /// **Note:** This method is only available on platforms that support atomic
623 /// operations on `u8`.
624 ///
1a4d82fc
JJ
625 /// # Examples
626 ///
627 /// ```
628 /// use std::sync::atomic::{AtomicBool, Ordering};
629 ///
630 /// let foo = AtomicBool::new(true);
62682a34
SL
631 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), true);
632 /// assert_eq!(foo.load(Ordering::SeqCst), false);
1a4d82fc
JJ
633 ///
634 /// let foo = AtomicBool::new(true);
62682a34
SL
635 /// assert_eq!(foo.fetch_and(true, Ordering::SeqCst), true);
636 /// assert_eq!(foo.load(Ordering::SeqCst), true);
1a4d82fc
JJ
637 ///
638 /// let foo = AtomicBool::new(false);
62682a34
SL
639 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), false);
640 /// assert_eq!(foo.load(Ordering::SeqCst), false);
1a4d82fc
JJ
641 /// ```
642 #[inline]
85aaf69f 643 #[stable(feature = "rust1", since = "1.0.0")]
e74abb32 644 #[cfg(target_has_atomic = "8")]
1a4d82fc 645 pub fn fetch_and(&self, val: bool, order: Ordering) -> bool {
dfeec247 646 // SAFETY: data races are prevented by atomic intrinsics.
a7813a04 647 unsafe { atomic_and(self.v.get(), val as u8, order) != 0 }
1a4d82fc
JJ
648 }
649
650 /// Logical "nand" with a boolean value.
651 ///
652 /// Performs a logical "nand" operation on the current value and the argument `val`, and sets
653 /// the new value to the result.
654 ///
655 /// Returns the previous value.
656 ///
b7449926
XL
657 /// `fetch_nand` takes an [`Ordering`] argument which describes the memory ordering
658 /// of this operation. All ordering modes are possible. Note that using
659 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
660 /// using [`Release`] makes the load part [`Relaxed`].
661 ///
f035d41b
XL
662 /// **Note:** This method is only available on platforms that support atomic
663 /// operations on `u8`.
664 ///
1a4d82fc
JJ
665 /// # Examples
666 ///
667 /// ```
668 /// use std::sync::atomic::{AtomicBool, Ordering};
669 ///
670 /// let foo = AtomicBool::new(true);
62682a34
SL
671 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), true);
672 /// assert_eq!(foo.load(Ordering::SeqCst), true);
1a4d82fc
JJ
673 ///
674 /// let foo = AtomicBool::new(true);
62682a34
SL
675 /// assert_eq!(foo.fetch_nand(true, Ordering::SeqCst), true);
676 /// assert_eq!(foo.load(Ordering::SeqCst) as usize, 0);
677 /// assert_eq!(foo.load(Ordering::SeqCst), false);
1a4d82fc
JJ
678 ///
679 /// let foo = AtomicBool::new(false);
62682a34
SL
680 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), false);
681 /// assert_eq!(foo.load(Ordering::SeqCst), true);
1a4d82fc
JJ
682 /// ```
683 #[inline]
85aaf69f 684 #[stable(feature = "rust1", since = "1.0.0")]
e74abb32 685 #[cfg(target_has_atomic = "8")]
1a4d82fc 686 pub fn fetch_nand(&self, val: bool, order: Ordering) -> bool {
a7813a04
XL
687 // We can't use atomic_nand here because it can result in a bool with
688 // an invalid value. This happens because the atomic operation is done
689 // with an 8-bit integer internally, which would set the upper 7 bits.
cc61c64b
XL
690 // So we just use fetch_xor or swap instead.
691 if val {
692 // !(x & true) == !x
693 // We must invert the bool.
694 self.fetch_xor(true, order)
695 } else {
696 // !(x & false) == true
697 // We must set the bool to true.
698 self.swap(true, order)
a7813a04 699 }
1a4d82fc
JJ
700 }
701
702 /// Logical "or" with a boolean value.
703 ///
704 /// Performs a logical "or" operation on the current value and the argument `val`, and sets the
705 /// new value to the result.
706 ///
707 /// Returns the previous value.
708 ///
b7449926
XL
709 /// `fetch_or` takes an [`Ordering`] argument which describes the memory ordering
710 /// of this operation. All ordering modes are possible. Note that using
711 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
712 /// using [`Release`] makes the load part [`Relaxed`].
713 ///
f035d41b
XL
714 /// **Note:** This method is only available on platforms that support atomic
715 /// operations on `u8`.
716 ///
1a4d82fc
JJ
717 /// # Examples
718 ///
719 /// ```
720 /// use std::sync::atomic::{AtomicBool, Ordering};
721 ///
722 /// let foo = AtomicBool::new(true);
62682a34
SL
723 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), true);
724 /// assert_eq!(foo.load(Ordering::SeqCst), true);
1a4d82fc
JJ
725 ///
726 /// let foo = AtomicBool::new(true);
62682a34
SL
727 /// assert_eq!(foo.fetch_or(true, Ordering::SeqCst), true);
728 /// assert_eq!(foo.load(Ordering::SeqCst), true);
1a4d82fc
JJ
729 ///
730 /// let foo = AtomicBool::new(false);
62682a34
SL
731 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), false);
732 /// assert_eq!(foo.load(Ordering::SeqCst), false);
1a4d82fc
JJ
733 /// ```
734 #[inline]
85aaf69f 735 #[stable(feature = "rust1", since = "1.0.0")]
e74abb32 736 #[cfg(target_has_atomic = "8")]
1a4d82fc 737 pub fn fetch_or(&self, val: bool, order: Ordering) -> bool {
dfeec247 738 // SAFETY: data races are prevented by atomic intrinsics.
a7813a04 739 unsafe { atomic_or(self.v.get(), val as u8, order) != 0 }
1a4d82fc
JJ
740 }
741
742 /// Logical "xor" with a boolean value.
743 ///
744 /// Performs a logical "xor" operation on the current value and the argument `val`, and sets
745 /// the new value to the result.
746 ///
747 /// Returns the previous value.
748 ///
b7449926
XL
749 /// `fetch_xor` takes an [`Ordering`] argument which describes the memory ordering
750 /// of this operation. All ordering modes are possible. Note that using
751 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
752 /// using [`Release`] makes the load part [`Relaxed`].
753 ///
f035d41b
XL
754 /// **Note:** This method is only available on platforms that support atomic
755 /// operations on `u8`.
756 ///
1a4d82fc
JJ
757 /// # Examples
758 ///
759 /// ```
760 /// use std::sync::atomic::{AtomicBool, Ordering};
761 ///
762 /// let foo = AtomicBool::new(true);
62682a34
SL
763 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), true);
764 /// assert_eq!(foo.load(Ordering::SeqCst), true);
1a4d82fc
JJ
765 ///
766 /// let foo = AtomicBool::new(true);
62682a34
SL
767 /// assert_eq!(foo.fetch_xor(true, Ordering::SeqCst), true);
768 /// assert_eq!(foo.load(Ordering::SeqCst), false);
1a4d82fc
JJ
769 ///
770 /// let foo = AtomicBool::new(false);
62682a34
SL
771 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), false);
772 /// assert_eq!(foo.load(Ordering::SeqCst), false);
1a4d82fc
JJ
773 /// ```
774 #[inline]
85aaf69f 775 #[stable(feature = "rust1", since = "1.0.0")]
e74abb32 776 #[cfg(target_has_atomic = "8")]
1a4d82fc 777 pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool {
dfeec247 778 // SAFETY: data races are prevented by atomic intrinsics.
a7813a04 779 unsafe { atomic_xor(self.v.get(), val as u8, order) != 0 }
1a4d82fc 780 }
60c5eb7d
XL
781
782 /// Returns a mutable pointer to the underlying [`bool`].
783 ///
784 /// Doing non-atomic reads and writes on the resulting integer can be a data race.
785 /// This method is mostly useful for FFI, where the function signature may use
786 /// `*mut bool` instead of `&AtomicBool`.
787 ///
788 /// Returning an `*mut` pointer from a shared reference to this atomic is safe because the
789 /// atomic types work with interior mutability. All modifications of an atomic change the value
790 /// through a shared reference, and can do so safely as long as they use atomic operations. Any
791 /// use of the returned raw pointer requires an `unsafe` block and still has to uphold the same
792 /// restriction: operations on it must be atomic.
793 ///
60c5eb7d
XL
794 /// # Examples
795 ///
796 /// ```ignore (extern-declaration)
797 /// # fn main() {
798 /// use std::sync::atomic::AtomicBool;
5869c6ff 799 /// extern "C" {
60c5eb7d
XL
800 /// fn my_atomic_op(arg: *mut bool);
801 /// }
802 ///
803 /// let mut atomic = AtomicBool::new(true);
804 /// unsafe {
805 /// my_atomic_op(atomic.as_mut_ptr());
806 /// }
807 /// # }
808 /// ```
809 #[inline]
dfeec247 810 #[unstable(feature = "atomic_mut_ptr", reason = "recently added", issue = "66893")]
60c5eb7d
XL
811 pub fn as_mut_ptr(&self) -> *mut bool {
812 self.v.get() as *mut bool
813 }
29967ef6
XL
814
815 /// Fetches the value, and applies a function to it that returns an optional
816 /// new value. Returns a `Result` of `Ok(previous_value)` if the function
817 /// returned `Some(_)`, else `Err(previous_value)`.
818 ///
819 /// Note: This may call the function multiple times if the value has been
820 /// changed from other threads in the meantime, as long as the function
821 /// returns `Some(_)`, but the function will have been applied only once to
822 /// the stored value.
823 ///
824 /// `fetch_update` takes two [`Ordering`] arguments to describe the memory
825 /// ordering of this operation. The first describes the required ordering for
826 /// when the operation finally succeeds while the second describes the
827 /// required ordering for loads. These correspond to the success and failure
828 /// orderings of [`AtomicBool::compare_exchange`] respectively.
829 ///
830 /// Using [`Acquire`] as success ordering makes the store part of this
831 /// operation [`Relaxed`], and using [`Release`] makes the final successful
832 /// load [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`],
833 /// [`Acquire`] or [`Relaxed`] and must be equivalent to or weaker than the
834 /// success ordering.
835 ///
836 /// **Note:** This method is only available on platforms that support atomic
837 /// operations on `u8`.
838 ///
839 /// # Examples
840 ///
841 /// ```rust
29967ef6
XL
842 /// use std::sync::atomic::{AtomicBool, Ordering};
843 ///
844 /// let x = AtomicBool::new(false);
845 /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |_| None), Err(false));
846 /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(!x)), Ok(false));
847 /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(!x)), Ok(true));
848 /// assert_eq!(x.load(Ordering::SeqCst), false);
849 /// ```
850 #[inline]
cdc7bbd5 851 #[stable(feature = "atomic_fetch_update", since = "1.53.0")]
29967ef6
XL
852 #[cfg(target_has_atomic = "8")]
853 pub fn fetch_update<F>(
854 &self,
855 set_order: Ordering,
856 fetch_order: Ordering,
857 mut f: F,
858 ) -> Result<bool, bool>
859 where
860 F: FnMut(bool) -> Option<bool>,
861 {
862 let mut prev = self.load(fetch_order);
863 while let Some(next) = f(prev) {
864 match self.compare_exchange_weak(prev, next, set_order, fetch_order) {
865 x @ Ok(_) => return x,
866 Err(next_prev) => prev = next_prev,
867 }
868 }
869 Err(prev)
870 }
1a4d82fc
JJ
871}
872
60c5eb7d 873#[cfg(target_has_atomic_load_store = "ptr")]
1a4d82fc
JJ
874impl<T> AtomicPtr<T> {
875 /// Creates a new `AtomicPtr`.
876 ///
877 /// # Examples
878 ///
879 /// ```
880 /// use std::sync::atomic::AtomicPtr;
881 ///
85aaf69f 882 /// let ptr = &mut 5;
1a4d82fc
JJ
883 /// let atomic_ptr = AtomicPtr::new(ptr);
884 /// ```
885 #[inline]
85aaf69f 886 #[stable(feature = "rust1", since = "1.0.0")]
cdc7bbd5 887 #[rustc_const_stable(feature = "const_atomic_new", since = "1.24.0")]
62682a34
SL
888 pub const fn new(p: *mut T) -> AtomicPtr<T> {
889 AtomicPtr { p: UnsafeCell::new(p) }
1a4d82fc
JJ
890 }
891
9e0c209e
SL
892 /// Returns a mutable reference to the underlying pointer.
893 ///
894 /// This is safe because the mutable reference guarantees that no other threads are
895 /// concurrently accessing the atomic data.
896 ///
897 /// # Examples
898 ///
899 /// ```
9e0c209e
SL
900 /// use std::sync::atomic::{AtomicPtr, Ordering};
901 ///
cdc7bbd5
XL
902 /// let mut data = 10;
903 /// let mut atomic_ptr = AtomicPtr::new(&mut data);
904 /// let mut other_data = 5;
905 /// *atomic_ptr.get_mut() = &mut other_data;
9e0c209e
SL
906 /// assert_eq!(unsafe { *atomic_ptr.load(Ordering::SeqCst) }, 5);
907 /// ```
908 #[inline]
476ff2be 909 #[stable(feature = "atomic_access", since = "1.15.0")]
9e0c209e 910 pub fn get_mut(&mut self) -> &mut *mut T {
1b1a35ee
XL
911 self.p.get_mut()
912 }
913
914 /// Get atomic access to a pointer.
915 ///
916 /// # Examples
917 ///
918 /// ```
919 /// #![feature(atomic_from_mut)]
920 /// use std::sync::atomic::{AtomicPtr, Ordering};
921 ///
cdc7bbd5
XL
922 /// let mut data = 123;
923 /// let mut some_ptr = &mut data as *mut i32;
1b1a35ee 924 /// let a = AtomicPtr::from_mut(&mut some_ptr);
cdc7bbd5
XL
925 /// let mut other_data = 456;
926 /// a.store(&mut other_data, Ordering::Relaxed);
1b1a35ee
XL
927 /// assert_eq!(unsafe { *some_ptr }, 456);
928 /// ```
929 #[inline]
930 #[cfg(target_has_atomic_equal_alignment = "ptr")]
931 #[unstable(feature = "atomic_from_mut", issue = "76314")]
932 pub fn from_mut(v: &mut *mut T) -> &Self {
933 use crate::mem::align_of;
934 let [] = [(); align_of::<AtomicPtr<()>>() - align_of::<*mut ()>()];
935 // SAFETY:
936 // - the mutable reference guarantees unique ownership.
937 // - the alignment of `*mut T` and `Self` is the same on all platforms
938 // supported by rust, as verified above.
939 unsafe { &*(v as *mut *mut T as *mut Self) }
9e0c209e
SL
940 }
941
942 /// Consumes the atomic and returns the contained value.
943 ///
944 /// This is safe because passing `self` by value guarantees that no other threads are
945 /// concurrently accessing the atomic data.
946 ///
947 /// # Examples
948 ///
949 /// ```
9e0c209e
SL
950 /// use std::sync::atomic::AtomicPtr;
951 ///
cdc7bbd5
XL
952 /// let mut data = 5;
953 /// let atomic_ptr = AtomicPtr::new(&mut data);
9e0c209e
SL
954 /// assert_eq!(unsafe { *atomic_ptr.into_inner() }, 5);
955 /// ```
956 #[inline]
476ff2be 957 #[stable(feature = "atomic_access", since = "1.15.0")]
29967ef6
XL
958 #[rustc_const_unstable(feature = "const_cell_into_inner", issue = "78729")]
959 pub const fn into_inner(self) -> *mut T {
2c00a5a8 960 self.p.into_inner()
9e0c209e
SL
961 }
962
1a4d82fc
JJ
963 /// Loads a value from the pointer.
964 ///
32a655c1 965 /// `load` takes an [`Ordering`] argument which describes the memory ordering
b7449926 966 /// of this operation. Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
1a4d82fc
JJ
967 ///
968 /// # Panics
969 ///
32a655c1
SL
970 /// Panics if `order` is [`Release`] or [`AcqRel`].
971 ///
1a4d82fc
JJ
972 /// # Examples
973 ///
974 /// ```
975 /// use std::sync::atomic::{AtomicPtr, Ordering};
976 ///
85aaf69f 977 /// let ptr = &mut 5;
1a4d82fc
JJ
978 /// let some_ptr = AtomicPtr::new(ptr);
979 ///
980 /// let value = some_ptr.load(Ordering::Relaxed);
981 /// ```
982 #[inline]
85aaf69f 983 #[stable(feature = "rust1", since = "1.0.0")]
1a4d82fc 984 pub fn load(&self, order: Ordering) -> *mut T {
fc512014 985 // SAFETY: data races are prevented by atomic intrinsics.
5869c6ff 986 unsafe { atomic_load(self.p.get(), order) }
1a4d82fc
JJ
987 }
988
989 /// Stores a value into the pointer.
990 ///
32a655c1 991 /// `store` takes an [`Ordering`] argument which describes the memory ordering
b7449926
XL
992 /// of this operation. Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
993 ///
994 /// # Panics
995 ///
996 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
32a655c1 997 ///
1a4d82fc
JJ
998 /// # Examples
999 ///
1000 /// ```
1001 /// use std::sync::atomic::{AtomicPtr, Ordering};
1002 ///
85aaf69f 1003 /// let ptr = &mut 5;
1a4d82fc
JJ
1004 /// let some_ptr = AtomicPtr::new(ptr);
1005 ///
85aaf69f 1006 /// let other_ptr = &mut 10;
1a4d82fc
JJ
1007 ///
1008 /// some_ptr.store(other_ptr, Ordering::Relaxed);
1009 /// ```
1a4d82fc 1010 #[inline]
85aaf69f 1011 #[stable(feature = "rust1", since = "1.0.0")]
1a4d82fc 1012 pub fn store(&self, ptr: *mut T, order: Ordering) {
fc512014
XL
1013 // SAFETY: data races are prevented by atomic intrinsics.
1014 unsafe {
1015 atomic_store(self.p.get(), ptr, order);
1016 }
1a4d82fc
JJ
1017 }
1018
cc61c64b 1019 /// Stores a value into the pointer, returning the previous value.
1a4d82fc 1020 ///
32a655c1 1021 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
b7449926
XL
1022 /// of this operation. All ordering modes are possible. Note that using
1023 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
1024 /// using [`Release`] makes the load part [`Relaxed`].
32a655c1 1025 ///
f035d41b
XL
1026 /// **Note:** This method is only available on platforms that support atomic
1027 /// operations on pointers.
1028 ///
1a4d82fc
JJ
1029 /// # Examples
1030 ///
1031 /// ```
1032 /// use std::sync::atomic::{AtomicPtr, Ordering};
1033 ///
85aaf69f 1034 /// let ptr = &mut 5;
1a4d82fc
JJ
1035 /// let some_ptr = AtomicPtr::new(ptr);
1036 ///
85aaf69f 1037 /// let other_ptr = &mut 10;
1a4d82fc
JJ
1038 ///
1039 /// let value = some_ptr.swap(other_ptr, Ordering::Relaxed);
1040 /// ```
1041 #[inline]
85aaf69f 1042 #[stable(feature = "rust1", since = "1.0.0")]
e74abb32 1043 #[cfg(target_has_atomic = "ptr")]
1a4d82fc 1044 pub fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T {
fc512014 1045 // SAFETY: data races are prevented by atomic intrinsics.
5869c6ff 1046 unsafe { atomic_swap(self.p.get(), ptr, order) }
1a4d82fc
JJ
1047 }
1048
c1a9b12d 1049 /// Stores a value into the pointer if the current value is the same as the `current` value.
1a4d82fc 1050 ///
c1a9b12d
SL
1051 /// The return value is always the previous value. If it is equal to `current`, then the value
1052 /// was updated.
1a4d82fc 1053 ///
32a655c1 1054 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
b7449926
XL
1055 /// ordering of this operation. Notice that even when using [`AcqRel`], the operation
1056 /// might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
1057 /// Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
1058 /// happens, and using [`Release`] makes the load part [`Relaxed`].
32a655c1 1059 ///
f035d41b
XL
1060 /// **Note:** This method is only available on platforms that support atomic
1061 /// operations on pointers.
1062 ///
fc512014
XL
1063 /// # Migrating to `compare_exchange` and `compare_exchange_weak`
1064 ///
1065 /// `compare_and_swap` is equivalent to `compare_exchange` with the following mapping for
1066 /// memory orderings:
1067 ///
1068 /// Original | Success | Failure
1069 /// -------- | ------- | -------
1070 /// Relaxed | Relaxed | Relaxed
1071 /// Acquire | Acquire | Acquire
1072 /// Release | Release | Relaxed
1073 /// AcqRel | AcqRel | Acquire
1074 /// SeqCst | SeqCst | SeqCst
1075 ///
1076 /// `compare_exchange_weak` is allowed to fail spuriously even when the comparison succeeds,
1077 /// which allows the compiler to generate better assembly code when the compare and swap
1078 /// is used in a loop.
1079 ///
1a4d82fc
JJ
1080 /// # Examples
1081 ///
1082 /// ```
1083 /// use std::sync::atomic::{AtomicPtr, Ordering};
1084 ///
85aaf69f 1085 /// let ptr = &mut 5;
1a4d82fc
JJ
1086 /// let some_ptr = AtomicPtr::new(ptr);
1087 ///
85aaf69f 1088 /// let other_ptr = &mut 10;
1a4d82fc 1089 ///
e1599b0c 1090 /// let value = some_ptr.compare_and_swap(ptr, other_ptr, Ordering::Relaxed);
1a4d82fc
JJ
1091 /// ```
1092 #[inline]
85aaf69f 1093 #[stable(feature = "rust1", since = "1.0.0")]
fc512014
XL
1094 #[rustc_deprecated(
1095 since = "1.50.0",
1096 reason = "Use `compare_exchange` or `compare_exchange_weak` instead"
1097 )]
e74abb32 1098 #[cfg(target_has_atomic = "ptr")]
c1a9b12d 1099 pub fn compare_and_swap(&self, current: *mut T, new: *mut T, order: Ordering) -> *mut T {
54a0048b
SL
1100 match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
1101 Ok(x) => x,
1102 Err(x) => x,
1103 }
7453a54e
SL
1104 }
1105
1106 /// Stores a value into the pointer if the current value is the same as the `current` value.
1107 ///
54a0048b 1108 /// The return value is a result indicating whether the new value was written and containing
3157f602 1109 /// the previous value. On success this value is guaranteed to be equal to `current`.
7453a54e 1110 ///
32a655c1 1111 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
fc512014
XL
1112 /// ordering of this operation. `success` describes the required ordering for the
1113 /// read-modify-write operation that takes place if the comparison with `current` succeeds.
1114 /// `failure` describes the required ordering for the load operation that takes place when
1115 /// the comparison fails. Using [`Acquire`] as success ordering makes the store part
b7449926
XL
1116 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
1117 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1118 /// and must be equivalent to or weaker than the success ordering.
32a655c1 1119 ///
f035d41b
XL
1120 /// **Note:** This method is only available on platforms that support atomic
1121 /// operations on pointers.
1122 ///
7453a54e
SL
1123 /// # Examples
1124 ///
1125 /// ```
7453a54e
SL
1126 /// use std::sync::atomic::{AtomicPtr, Ordering};
1127 ///
1128 /// let ptr = &mut 5;
1129 /// let some_ptr = AtomicPtr::new(ptr);
1130 ///
1131 /// let other_ptr = &mut 10;
7453a54e 1132 ///
e1599b0c 1133 /// let value = some_ptr.compare_exchange(ptr, other_ptr,
7453a54e
SL
1134 /// Ordering::SeqCst, Ordering::Relaxed);
1135 /// ```
1136 #[inline]
a7813a04 1137 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
e74abb32 1138 #[cfg(target_has_atomic = "ptr")]
dfeec247
XL
1139 pub fn compare_exchange(
1140 &self,
1141 current: *mut T,
1142 new: *mut T,
1143 success: Ordering,
1144 failure: Ordering,
1145 ) -> Result<*mut T, *mut T> {
fc512014 1146 // SAFETY: data races are prevented by atomic intrinsics.
5869c6ff 1147 unsafe { atomic_compare_exchange(self.p.get(), current, new, success, failure) }
1a4d82fc 1148 }
7453a54e
SL
1149
1150 /// Stores a value into the pointer if the current value is the same as the `current` value.
1151 ///
1b1a35ee 1152 /// Unlike [`AtomicPtr::compare_exchange`], this function is allowed to spuriously fail even when the
7453a54e 1153 /// comparison succeeds, which can result in more efficient code on some platforms. The
54a0048b
SL
1154 /// return value is a result indicating whether the new value was written and containing the
1155 /// previous value.
7453a54e 1156 ///
32a655c1 1157 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
fc512014
XL
1158 /// ordering of this operation. `success` describes the required ordering for the
1159 /// read-modify-write operation that takes place if the comparison with `current` succeeds.
1160 /// `failure` describes the required ordering for the load operation that takes place when
1161 /// the comparison fails. Using [`Acquire`] as success ordering makes the store part
b7449926
XL
1162 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
1163 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1164 /// and must be equivalent to or weaker than the success ordering.
32a655c1 1165 ///
f035d41b
XL
1166 /// **Note:** This method is only available on platforms that support atomic
1167 /// operations on pointers.
1168 ///
7453a54e
SL
1169 /// # Examples
1170 ///
1171 /// ```
7453a54e
SL
1172 /// use std::sync::atomic::{AtomicPtr, Ordering};
1173 ///
1174 /// let some_ptr = AtomicPtr::new(&mut 5);
1175 ///
1176 /// let new = &mut 10;
1177 /// let mut old = some_ptr.load(Ordering::Relaxed);
1178 /// loop {
54a0048b
SL
1179 /// match some_ptr.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
1180 /// Ok(_) => break,
1181 /// Err(x) => old = x,
7453a54e
SL
1182 /// }
1183 /// }
1184 /// ```
1185 #[inline]
a7813a04 1186 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
e74abb32 1187 #[cfg(target_has_atomic = "ptr")]
dfeec247
XL
1188 pub fn compare_exchange_weak(
1189 &self,
1190 current: *mut T,
1191 new: *mut T,
1192 success: Ordering,
1193 failure: Ordering,
1194 ) -> Result<*mut T, *mut T> {
fc512014
XL
1195 // SAFETY: This intrinsic is unsafe because it operates on a raw pointer
1196 // but we know for sure that the pointer is valid (we just got it from
1197 // an `UnsafeCell` that we have by reference) and the atomic operation
1198 // itself allows us to safely mutate the `UnsafeCell` contents.
5869c6ff 1199 unsafe { atomic_compare_exchange_weak(self.p.get(), current, new, success, failure) }
7453a54e 1200 }
29967ef6
XL
1201
1202 /// Fetches the value, and applies a function to it that returns an optional
1203 /// new value. Returns a `Result` of `Ok(previous_value)` if the function
1204 /// returned `Some(_)`, else `Err(previous_value)`.
1205 ///
1206 /// Note: This may call the function multiple times if the value has been
1207 /// changed from other threads in the meantime, as long as the function
1208 /// returns `Some(_)`, but the function will have been applied only once to
1209 /// the stored value.
1210 ///
1211 /// `fetch_update` takes two [`Ordering`] arguments to describe the memory
1212 /// ordering of this operation. The first describes the required ordering for
1213 /// when the operation finally succeeds while the second describes the
1214 /// required ordering for loads. These correspond to the success and failure
1215 /// orderings of [`AtomicPtr::compare_exchange`] respectively.
1216 ///
1217 /// Using [`Acquire`] as success ordering makes the store part of this
1218 /// operation [`Relaxed`], and using [`Release`] makes the final successful
1219 /// load [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`],
1220 /// [`Acquire`] or [`Relaxed`] and must be equivalent to or weaker than the
1221 /// success ordering.
1222 ///
1223 /// **Note:** This method is only available on platforms that support atomic
1224 /// operations on pointers.
1225 ///
1226 /// # Examples
1227 ///
1228 /// ```rust
29967ef6
XL
1229 /// use std::sync::atomic::{AtomicPtr, Ordering};
1230 ///
1231 /// let ptr: *mut _ = &mut 5;
1232 /// let some_ptr = AtomicPtr::new(ptr);
1233 ///
1234 /// let new: *mut _ = &mut 10;
1235 /// assert_eq!(some_ptr.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |_| None), Err(ptr));
1236 /// let result = some_ptr.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| {
1237 /// if x == ptr {
1238 /// Some(new)
1239 /// } else {
1240 /// None
1241 /// }
1242 /// });
1243 /// assert_eq!(result, Ok(ptr));
1244 /// assert_eq!(some_ptr.load(Ordering::SeqCst), new);
1245 /// ```
1246 #[inline]
cdc7bbd5 1247 #[stable(feature = "atomic_fetch_update", since = "1.53.0")]
29967ef6
XL
1248 #[cfg(target_has_atomic = "ptr")]
1249 pub fn fetch_update<F>(
1250 &self,
1251 set_order: Ordering,
1252 fetch_order: Ordering,
1253 mut f: F,
1254 ) -> Result<*mut T, *mut T>
1255 where
1256 F: FnMut(*mut T) -> Option<*mut T>,
1257 {
1258 let mut prev = self.load(fetch_order);
1259 while let Some(next) = f(prev) {
1260 match self.compare_exchange_weak(prev, next, set_order, fetch_order) {
1261 x @ Ok(_) => return x,
1262 Err(next_prev) => prev = next_prev,
1263 }
1264 }
1265 Err(prev)
1266 }
7453a54e
SL
1267}
1268
60c5eb7d 1269#[cfg(target_has_atomic_load_store = "8")]
ff7c6d11
XL
1270#[stable(feature = "atomic_bool_from", since = "1.24.0")]
1271impl From<bool> for AtomicBool {
0731742a
XL
1272 /// Converts a `bool` into an `AtomicBool`.
1273 ///
1274 /// # Examples
1275 ///
1276 /// ```
1277 /// use std::sync::atomic::AtomicBool;
1278 /// let atomic_bool = AtomicBool::from(true);
1279 /// assert_eq!(format!("{:?}", atomic_bool), "true")
1280 /// ```
ff7c6d11 1281 #[inline]
dfeec247
XL
1282 fn from(b: bool) -> Self {
1283 Self::new(b)
1284 }
ff7c6d11
XL
1285}
1286
60c5eb7d 1287#[cfg(target_has_atomic_load_store = "ptr")]
abe05a73
XL
1288#[stable(feature = "atomic_from", since = "1.23.0")]
1289impl<T> From<*mut T> for AtomicPtr<T> {
1290 #[inline]
dfeec247
XL
1291 fn from(p: *mut T) -> Self {
1292 Self::new(p)
1293 }
abe05a73
XL
1294}
1295
1b1a35ee
XL
1296#[allow(unused_macros)] // This macro ends up being unused on some architectures.
1297macro_rules! if_not_8_bit {
1298 (u8, $($tt:tt)*) => { "" };
1299 (i8, $($tt:tt)*) => { "" };
1300 ($_:ident, $($tt:tt)*) => { $($tt)* };
1301}
1302
60c5eb7d 1303#[cfg(target_has_atomic_load_store = "8")]
a7813a04 1304macro_rules! atomic_int {
e74abb32 1305 ($cfg_cas:meta,
1b1a35ee 1306 $cfg_align:meta,
e74abb32 1307 $stable:meta,
a7813a04
XL
1308 $stable_cxchg:meta,
1309 $stable_debug:meta,
9e0c209e 1310 $stable_access:meta,
2c00a5a8
XL
1311 $stable_from:meta,
1312 $stable_nand:meta,
60c5eb7d 1313 $const_stable:meta,
9fa01778 1314 $stable_init_const:meta,
6a06907d 1315 $s_int_type:literal,
0531ce1d 1316 $extra_feature:expr,
83c7162d 1317 $min_fn:ident, $max_fn:ident,
a1dfa0c6 1318 $align:expr,
9fa01778 1319 $atomic_new:expr,
a7813a04
XL
1320 $int_type:ident $atomic_type:ident $atomic_init:ident) => {
1321 /// An integer type which can be safely shared between threads.
9e0c209e 1322 ///
ea8adc8c
XL
1323 /// This type has the same in-memory representation as the underlying
1324 /// integer type, [`
1325 #[doc = $s_int_type]
6a06907d 1326 /// `]. For more about the differences between atomic types and
9fa01778
XL
1327 /// non-atomic types as well as information about the portability of
1328 /// this type, please see the [module-level documentation].
ea8adc8c 1329 ///
f035d41b
XL
1330 /// **Note:** This type is only available on platforms that support
1331 /// atomic loads and stores of [`
1332 #[doc = $s_int_type]
6a06907d 1333 /// `].
f035d41b 1334 ///
1b1a35ee 1335 /// [module-level documentation]: crate::sync::atomic
a7813a04 1336 #[$stable]
a1dfa0c6 1337 #[repr(C, align($align))]
a7813a04
XL
1338 pub struct $atomic_type {
1339 v: UnsafeCell<$int_type>,
1340 }
1341
1342 /// An atomic integer initialized to `0`.
9fa01778 1343 #[$stable_init_const]
532ac7d7 1344 #[rustc_deprecated(
9fa01778
XL
1345 since = "1.34.0",
1346 reason = "the `new` function is now preferred",
1347 suggestion = $atomic_new,
532ac7d7 1348 )]
a7813a04
XL
1349 pub const $atomic_init: $atomic_type = $atomic_type::new(0);
1350
1351 #[$stable]
1352 impl Default for $atomic_type {
29967ef6 1353 #[inline]
a7813a04
XL
1354 fn default() -> Self {
1355 Self::new(Default::default())
1356 }
1357 }
1358
2c00a5a8 1359 #[$stable_from]
abe05a73 1360 impl From<$int_type> for $atomic_type {
5869c6ff
XL
1361 #[doc = concat!("Converts an `", stringify!($int_type), "` into an `", stringify!($atomic_type), "`.")]
1362 #[inline]
1363 fn from(v: $int_type) -> Self { Self::new(v) }
abe05a73
XL
1364 }
1365
a7813a04
XL
1366 #[$stable_debug]
1367 impl fmt::Debug for $atomic_type {
48663c56 1368 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
83c7162d 1369 fmt::Debug::fmt(&self.load(Ordering::SeqCst), f)
a7813a04
XL
1370 }
1371 }
1372
1373 // Send is implicitly implemented.
1374 #[$stable]
1375 unsafe impl Sync for $atomic_type {}
1376
1377 impl $atomic_type {
5869c6ff
XL
1378 /// Creates a new atomic integer.
1379 ///
1380 /// # Examples
1381 ///
1382 /// ```
1383 #[doc = concat!($extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";")]
1384 ///
1385 #[doc = concat!("let atomic_forty_two = ", stringify!($atomic_type), "::new(42);")]
1386 /// ```
1387 #[inline]
1388 #[$stable]
1389 #[$const_stable]
1390 pub const fn new(v: $int_type) -> Self {
1391 Self {v: UnsafeCell::new(v)}
a7813a04
XL
1392 }
1393
5869c6ff
XL
1394 /// Returns a mutable reference to the underlying integer.
1395 ///
1396 /// This is safe because the mutable reference guarantees that no other threads are
1397 /// concurrently accessing the atomic data.
1398 ///
1399 /// # Examples
1400 ///
1401 /// ```
1402 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
1403 ///
1404 #[doc = concat!("let mut some_var = ", stringify!($atomic_type), "::new(10);")]
1405 /// assert_eq!(*some_var.get_mut(), 10);
1406 /// *some_var.get_mut() = 5;
1407 /// assert_eq!(some_var.load(Ordering::SeqCst), 5);
1408 /// ```
1409 #[inline]
1410 #[$stable_access]
1411 pub fn get_mut(&mut self) -> &mut $int_type {
1412 self.v.get_mut()
1b1a35ee
XL
1413 }
1414
5869c6ff
XL
1415 #[doc = concat!("Get atomic access to a `&mut ", stringify!($int_type), "`.")]
1416 ///
1417 #[doc = if_not_8_bit! {
1418 $int_type,
1419 concat!(
1420 "**Note:** This function is only available on targets where `",
1421 stringify!($int_type), "` has an alignment of ", $align, " bytes."
1422 )
1423 }]
1424 ///
1425 /// # Examples
1426 ///
1427 /// ```
1428 /// #![feature(atomic_from_mut)]
1429 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
1430 ///
1431 /// let mut some_int = 123;
1432 #[doc = concat!("let a = ", stringify!($atomic_type), "::from_mut(&mut some_int);")]
1433 /// a.store(100, Ordering::Relaxed);
1434 /// assert_eq!(some_int, 100);
1435 /// ```
1436 ///
1437 #[inline]
1438 #[$cfg_align]
1439 #[unstable(feature = "atomic_from_mut", issue = "76314")]
1440 pub fn from_mut(v: &mut $int_type) -> &Self {
1441 use crate::mem::align_of;
1442 let [] = [(); align_of::<Self>() - align_of::<$int_type>()];
1443 // SAFETY:
1444 // - the mutable reference guarantees unique ownership.
1445 // - the alignment of `$int_type` and `Self` is the
1446 // same, as promised by $cfg_align and verified above.
1447 unsafe { &*(v as *mut $int_type as *mut Self) }
9e0c209e
SL
1448 }
1449
5869c6ff
XL
1450 /// Consumes the atomic and returns the contained value.
1451 ///
1452 /// This is safe because passing `self` by value guarantees that no other threads are
1453 /// concurrently accessing the atomic data.
1454 ///
1455 /// # Examples
1456 ///
1457 /// ```
1458 #[doc = concat!($extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";")]
1459 ///
1460 #[doc = concat!("let some_var = ", stringify!($atomic_type), "::new(5);")]
1461 /// assert_eq!(some_var.into_inner(), 5);
1462 /// ```
1463 #[inline]
1464 #[$stable_access]
1465 #[rustc_const_unstable(feature = "const_cell_into_inner", issue = "78729")]
1466 pub const fn into_inner(self) -> $int_type {
1467 self.v.into_inner()
9e0c209e
SL
1468 }
1469
5869c6ff
XL
1470 /// Loads a value from the atomic integer.
1471 ///
1472 /// `load` takes an [`Ordering`] argument which describes the memory ordering of this operation.
1473 /// Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
1474 ///
1475 /// # Panics
1476 ///
1477 /// Panics if `order` is [`Release`] or [`AcqRel`].
1478 ///
1479 /// # Examples
1480 ///
1481 /// ```
1482 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
1483 ///
1484 #[doc = concat!("let some_var = ", stringify!($atomic_type), "::new(5);")]
1485 ///
1486 /// assert_eq!(some_var.load(Ordering::Relaxed), 5);
1487 /// ```
1488 #[inline]
1489 #[$stable]
1490 pub fn load(&self, order: Ordering) -> $int_type {
1491 // SAFETY: data races are prevented by atomic intrinsics.
1492 unsafe { atomic_load(self.v.get(), order) }
a7813a04
XL
1493 }
1494
5869c6ff
XL
1495 /// Stores a value into the atomic integer.
1496 ///
1497 /// `store` takes an [`Ordering`] argument which describes the memory ordering of this operation.
1498 /// Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
1499 ///
1500 /// # Panics
1501 ///
1502 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
1503 ///
1504 /// # Examples
1505 ///
1506 /// ```
1507 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
1508 ///
1509 #[doc = concat!("let some_var = ", stringify!($atomic_type), "::new(5);")]
1510 ///
1511 /// some_var.store(10, Ordering::Relaxed);
1512 /// assert_eq!(some_var.load(Ordering::Relaxed), 10);
1513 /// ```
1514 #[inline]
1515 #[$stable]
1516 pub fn store(&self, val: $int_type, order: Ordering) {
1517 // SAFETY: data races are prevented by atomic intrinsics.
1518 unsafe { atomic_store(self.v.get(), val, order); }
a7813a04
XL
1519 }
1520
5869c6ff
XL
1521 /// Stores a value into the atomic integer, returning the previous value.
1522 ///
1523 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
1524 /// of this operation. All ordering modes are possible. Note that using
1525 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
1526 /// using [`Release`] makes the load part [`Relaxed`].
1527 ///
1528 /// **Note**: This method is only available on platforms that support atomic operations on
6a06907d 1529 #[doc = concat!("[`", $s_int_type, "`].")]
5869c6ff
XL
1530 ///
1531 /// # Examples
1532 ///
1533 /// ```
1534 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
1535 ///
1536 #[doc = concat!("let some_var = ", stringify!($atomic_type), "::new(5);")]
1537 ///
1538 /// assert_eq!(some_var.swap(10, Ordering::Relaxed), 5);
1539 /// ```
1540 #[inline]
1541 #[$stable]
1542 #[$cfg_cas]
1543 pub fn swap(&self, val: $int_type, order: Ordering) -> $int_type {
1544 // SAFETY: data races are prevented by atomic intrinsics.
1545 unsafe { atomic_swap(self.v.get(), val, order) }
a7813a04
XL
1546 }
1547
5869c6ff
XL
1548 /// Stores a value into the atomic integer if the current value is the same as
1549 /// the `current` value.
1550 ///
1551 /// The return value is always the previous value. If it is equal to `current`, then the
1552 /// value was updated.
1553 ///
1554 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
1555 /// ordering of this operation. Notice that even when using [`AcqRel`], the operation
1556 /// might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
1557 /// Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
1558 /// happens, and using [`Release`] makes the load part [`Relaxed`].
1559 ///
1560 /// **Note**: This method is only available on platforms that support atomic operations on
6a06907d 1561 #[doc = concat!("[`", $s_int_type, "`].")]
5869c6ff
XL
1562 ///
1563 /// # Migrating to `compare_exchange` and `compare_exchange_weak`
1564 ///
1565 /// `compare_and_swap` is equivalent to `compare_exchange` with the following mapping for
1566 /// memory orderings:
1567 ///
1568 /// Original | Success | Failure
1569 /// -------- | ------- | -------
1570 /// Relaxed | Relaxed | Relaxed
1571 /// Acquire | Acquire | Acquire
1572 /// Release | Release | Relaxed
1573 /// AcqRel | AcqRel | Acquire
1574 /// SeqCst | SeqCst | SeqCst
1575 ///
1576 /// `compare_exchange_weak` is allowed to fail spuriously even when the comparison succeeds,
1577 /// which allows the compiler to generate better assembly code when the compare and swap
1578 /// is used in a loop.
1579 ///
1580 /// # Examples
1581 ///
1582 /// ```
1583 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
1584 ///
1585 #[doc = concat!("let some_var = ", stringify!($atomic_type), "::new(5);")]
1586 ///
1587 /// assert_eq!(some_var.compare_and_swap(5, 10, Ordering::Relaxed), 5);
1588 /// assert_eq!(some_var.load(Ordering::Relaxed), 10);
1589 ///
1590 /// assert_eq!(some_var.compare_and_swap(6, 12, Ordering::Relaxed), 10);
1591 /// assert_eq!(some_var.load(Ordering::Relaxed), 10);
1592 /// ```
1593 #[inline]
1594 #[$stable]
1595 #[rustc_deprecated(
1596 since = "1.50.0",
1597 reason = "Use `compare_exchange` or `compare_exchange_weak` instead")
1598 ]
1599 #[$cfg_cas]
1600 pub fn compare_and_swap(&self,
1601 current: $int_type,
1602 new: $int_type,
1603 order: Ordering) -> $int_type {
1604 match self.compare_exchange(current,
1605 new,
1606 order,
1607 strongest_failure_ordering(order)) {
1608 Ok(x) => x,
1609 Err(x) => x,
a7813a04
XL
1610 }
1611 }
1612
5869c6ff
XL
1613 /// Stores a value into the atomic integer if the current value is the same as
1614 /// the `current` value.
1615 ///
1616 /// The return value is a result indicating whether the new value was written and
1617 /// containing the previous value. On success this value is guaranteed to be equal to
1618 /// `current`.
1619 ///
1620 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
1621 /// ordering of this operation. `success` describes the required ordering for the
1622 /// read-modify-write operation that takes place if the comparison with `current` succeeds.
1623 /// `failure` describes the required ordering for the load operation that takes place when
1624 /// the comparison fails. Using [`Acquire`] as success ordering makes the store part
1625 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
1626 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1627 /// and must be equivalent to or weaker than the success ordering.
1628 ///
1629 /// **Note**: This method is only available on platforms that support atomic operations on
6a06907d 1630 #[doc = concat!("[`", $s_int_type, "`].")]
5869c6ff
XL
1631 ///
1632 /// # Examples
1633 ///
1634 /// ```
1635 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
1636 ///
1637 #[doc = concat!("let some_var = ", stringify!($atomic_type), "::new(5);")]
1638 ///
1639 /// assert_eq!(some_var.compare_exchange(5, 10,
1640 /// Ordering::Acquire,
1641 /// Ordering::Relaxed),
1642 /// Ok(5));
1643 /// assert_eq!(some_var.load(Ordering::Relaxed), 10);
1644 ///
1645 /// assert_eq!(some_var.compare_exchange(6, 12,
1646 /// Ordering::SeqCst,
1647 /// Ordering::Acquire),
1648 /// Err(10));
1649 /// assert_eq!(some_var.load(Ordering::Relaxed), 10);
1650 /// ```
1651 #[inline]
1652 #[$stable_cxchg]
1653 #[$cfg_cas]
1654 pub fn compare_exchange(&self,
1655 current: $int_type,
1656 new: $int_type,
1657 success: Ordering,
1658 failure: Ordering) -> Result<$int_type, $int_type> {
1659 // SAFETY: data races are prevented by atomic intrinsics.
1660 unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) }
a7813a04
XL
1661 }
1662
5869c6ff
XL
1663 /// Stores a value into the atomic integer if the current value is the same as
1664 /// the `current` value.
1665 ///
1666 #[doc = concat!("Unlike [`", stringify!($atomic_type), "::compare_exchange`],")]
1667 /// this function is allowed to spuriously fail even
1668 /// when the comparison succeeds, which can result in more efficient code on some
1669 /// platforms. The return value is a result indicating whether the new value was
1670 /// written and containing the previous value.
1671 ///
1672 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
1673 /// ordering of this operation. `success` describes the required ordering for the
1674 /// read-modify-write operation that takes place if the comparison with `current` succeeds.
1675 /// `failure` describes the required ordering for the load operation that takes place when
1676 /// the comparison fails. Using [`Acquire`] as success ordering makes the store part
1677 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
1678 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1679 /// and must be equivalent to or weaker than the success ordering.
1680 ///
1681 /// **Note**: This method is only available on platforms that support atomic operations on
6a06907d 1682 #[doc = concat!("[`", $s_int_type, "`].")]
5869c6ff
XL
1683 ///
1684 /// # Examples
1685 ///
1686 /// ```
1687 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
1688 ///
1689 #[doc = concat!("let val = ", stringify!($atomic_type), "::new(4);")]
1690 ///
1691 /// let mut old = val.load(Ordering::Relaxed);
1692 /// loop {
1693 /// let new = old * 2;
1694 /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
1695 /// Ok(_) => break,
1696 /// Err(x) => old = x,
1697 /// }
1698 /// }
1699 /// ```
1700 #[inline]
1701 #[$stable_cxchg]
1702 #[$cfg_cas]
1703 pub fn compare_exchange_weak(&self,
1704 current: $int_type,
1705 new: $int_type,
1706 success: Ordering,
1707 failure: Ordering) -> Result<$int_type, $int_type> {
1708 // SAFETY: data races are prevented by atomic intrinsics.
1709 unsafe {
1710 atomic_compare_exchange_weak(self.v.get(), current, new, success, failure)
a7813a04
XL
1711 }
1712 }
1713
5869c6ff
XL
1714 /// Adds to the current value, returning the previous value.
1715 ///
1716 /// This operation wraps around on overflow.
1717 ///
1718 /// `fetch_add` takes an [`Ordering`] argument which describes the memory ordering
1719 /// of this operation. All ordering modes are possible. Note that using
1720 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
1721 /// using [`Release`] makes the load part [`Relaxed`].
1722 ///
1723 /// **Note**: This method is only available on platforms that support atomic operations on
6a06907d 1724 #[doc = concat!("[`", $s_int_type, "`].")]
5869c6ff
XL
1725 ///
1726 /// # Examples
1727 ///
1728 /// ```
1729 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
1730 ///
1731 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(0);")]
1732 /// assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0);
1733 /// assert_eq!(foo.load(Ordering::SeqCst), 10);
1734 /// ```
1735 #[inline]
1736 #[$stable]
1737 #[$cfg_cas]
1738 pub fn fetch_add(&self, val: $int_type, order: Ordering) -> $int_type {
1739 // SAFETY: data races are prevented by atomic intrinsics.
1740 unsafe { atomic_add(self.v.get(), val, order) }
a7813a04
XL
1741 }
1742
5869c6ff
XL
1743 /// Subtracts from the current value, returning the previous value.
1744 ///
1745 /// This operation wraps around on overflow.
1746 ///
1747 /// `fetch_sub` takes an [`Ordering`] argument which describes the memory ordering
1748 /// of this operation. All ordering modes are possible. Note that using
1749 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
1750 /// using [`Release`] makes the load part [`Relaxed`].
1751 ///
1752 /// **Note**: This method is only available on platforms that support atomic operations on
6a06907d 1753 #[doc = concat!("[`", $s_int_type, "`].")]
5869c6ff
XL
1754 ///
1755 /// # Examples
1756 ///
1757 /// ```
1758 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
1759 ///
1760 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(20);")]
1761 /// assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 20);
1762 /// assert_eq!(foo.load(Ordering::SeqCst), 10);
1763 /// ```
1764 #[inline]
1765 #[$stable]
1766 #[$cfg_cas]
1767 pub fn fetch_sub(&self, val: $int_type, order: Ordering) -> $int_type {
1768 // SAFETY: data races are prevented by atomic intrinsics.
1769 unsafe { atomic_sub(self.v.get(), val, order) }
a7813a04
XL
1770 }
1771
5869c6ff
XL
1772 /// Bitwise "and" with the current value.
1773 ///
1774 /// Performs a bitwise "and" operation on the current value and the argument `val`, and
1775 /// sets the new value to the result.
1776 ///
1777 /// Returns the previous value.
1778 ///
1779 /// `fetch_and` takes an [`Ordering`] argument which describes the memory ordering
1780 /// of this operation. All ordering modes are possible. Note that using
1781 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
1782 /// using [`Release`] makes the load part [`Relaxed`].
1783 ///
1784 /// **Note**: This method is only available on platforms that support atomic operations on
6a06907d 1785 #[doc = concat!("[`", $s_int_type, "`].")]
5869c6ff
XL
1786 ///
1787 /// # Examples
1788 ///
1789 /// ```
1790 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
1791 ///
1792 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(0b101101);")]
1793 /// assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101);
1794 /// assert_eq!(foo.load(Ordering::SeqCst), 0b100001);
1795 /// ```
1796 #[inline]
1797 #[$stable]
1798 #[$cfg_cas]
1799 pub fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type {
1800 // SAFETY: data races are prevented by atomic intrinsics.
1801 unsafe { atomic_and(self.v.get(), val, order) }
a7813a04
XL
1802 }
1803
5869c6ff
XL
1804 /// Bitwise "nand" with the current value.
1805 ///
1806 /// Performs a bitwise "nand" operation on the current value and the argument `val`, and
1807 /// sets the new value to the result.
1808 ///
1809 /// Returns the previous value.
1810 ///
1811 /// `fetch_nand` takes an [`Ordering`] argument which describes the memory ordering
1812 /// of this operation. All ordering modes are possible. Note that using
1813 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
1814 /// using [`Release`] makes the load part [`Relaxed`].
1815 ///
1816 /// **Note**: This method is only available on platforms that support atomic operations on
6a06907d 1817 #[doc = concat!("[`", $s_int_type, "`].")]
5869c6ff
XL
1818 ///
1819 /// # Examples
1820 ///
1821 /// ```
1822 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
1823 ///
1824 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(0x13);")]
1825 /// assert_eq!(foo.fetch_nand(0x31, Ordering::SeqCst), 0x13);
1826 /// assert_eq!(foo.load(Ordering::SeqCst), !(0x13 & 0x31));
1827 /// ```
1828 #[inline]
1829 #[$stable_nand]
1830 #[$cfg_cas]
1831 pub fn fetch_nand(&self, val: $int_type, order: Ordering) -> $int_type {
1832 // SAFETY: data races are prevented by atomic intrinsics.
1833 unsafe { atomic_nand(self.v.get(), val, order) }
2c00a5a8
XL
1834 }
1835
5869c6ff
XL
1836 /// Bitwise "or" with the current value.
1837 ///
1838 /// Performs a bitwise "or" operation on the current value and the argument `val`, and
1839 /// sets the new value to the result.
1840 ///
1841 /// Returns the previous value.
1842 ///
1843 /// `fetch_or` takes an [`Ordering`] argument which describes the memory ordering
1844 /// of this operation. All ordering modes are possible. Note that using
1845 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
1846 /// using [`Release`] makes the load part [`Relaxed`].
1847 ///
1848 /// **Note**: This method is only available on platforms that support atomic operations on
6a06907d 1849 #[doc = concat!("[`", $s_int_type, "`].")]
5869c6ff
XL
1850 ///
1851 /// # Examples
1852 ///
1853 /// ```
1854 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
1855 ///
1856 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(0b101101);")]
1857 /// assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101);
1858 /// assert_eq!(foo.load(Ordering::SeqCst), 0b111111);
1859 /// ```
1860 #[inline]
1861 #[$stable]
1862 #[$cfg_cas]
1863 pub fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type {
1864 // SAFETY: data races are prevented by atomic intrinsics.
1865 unsafe { atomic_or(self.v.get(), val, order) }
a7813a04
XL
1866 }
1867
5869c6ff
XL
1868 /// Bitwise "xor" with the current value.
1869 ///
1870 /// Performs a bitwise "xor" operation on the current value and the argument `val`, and
1871 /// sets the new value to the result.
1872 ///
1873 /// Returns the previous value.
1874 ///
1875 /// `fetch_xor` takes an [`Ordering`] argument which describes the memory ordering
1876 /// of this operation. All ordering modes are possible. Note that using
1877 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
1878 /// using [`Release`] makes the load part [`Relaxed`].
1879 ///
1880 /// **Note**: This method is only available on platforms that support atomic operations on
6a06907d 1881 #[doc = concat!("[`", $s_int_type, "`].")]
5869c6ff
XL
1882 ///
1883 /// # Examples
1884 ///
1885 /// ```
1886 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
1887 ///
1888 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(0b101101);")]
1889 /// assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101);
1890 /// assert_eq!(foo.load(Ordering::SeqCst), 0b011110);
1891 /// ```
1892 #[inline]
1893 #[$stable]
1894 #[$cfg_cas]
1895 pub fn fetch_xor(&self, val: $int_type, order: Ordering) -> $int_type {
1896 // SAFETY: data races are prevented by atomic intrinsics.
1897 unsafe { atomic_xor(self.v.get(), val, order) }
a7813a04 1898 }
83c7162d 1899
5869c6ff
XL
1900 /// Fetches the value, and applies a function to it that returns an optional
1901 /// new value. Returns a `Result` of `Ok(previous_value)` if the function returned `Some(_)`, else
1902 /// `Err(previous_value)`.
1903 ///
1904 /// Note: This may call the function multiple times if the value has been changed from other threads in
1905 /// the meantime, as long as the function returns `Some(_)`, but the function will have been applied
1906 /// only once to the stored value.
1907 ///
1908 /// `fetch_update` takes two [`Ordering`] arguments to describe the memory ordering of this operation.
1909 /// The first describes the required ordering for when the operation finally succeeds while the second
1910 /// describes the required ordering for loads. These correspond to the success and failure orderings of
1911 #[doc = concat!("[`", stringify!($atomic_type), "::compare_exchange`]")]
1912 /// respectively.
1913 ///
1914 /// Using [`Acquire`] as success ordering makes the store part
1915 /// of this operation [`Relaxed`], and using [`Release`] makes the final successful load
1916 /// [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
1917 /// and must be equivalent to or weaker than the success ordering.
1918 ///
1919 /// **Note**: This method is only available on platforms that support atomic operations on
6a06907d 1920 #[doc = concat!("[`", $s_int_type, "`].")]
5869c6ff
XL
1921 ///
1922 /// # Examples
1923 ///
1924 /// ```rust
1925 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
1926 ///
1927 #[doc = concat!("let x = ", stringify!($atomic_type), "::new(7);")]
1928 /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |_| None), Err(7));
1929 /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(x + 1)), Ok(7));
1930 /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(x + 1)), Ok(8));
1931 /// assert_eq!(x.load(Ordering::SeqCst), 9);
1932 /// ```
1933 #[inline]
1934 #[stable(feature = "no_more_cas", since = "1.45.0")]
1935 #[$cfg_cas]
1936 pub fn fetch_update<F>(&self,
1937 set_order: Ordering,
1938 fetch_order: Ordering,
1939 mut f: F) -> Result<$int_type, $int_type>
1940 where F: FnMut($int_type) -> Option<$int_type> {
1941 let mut prev = self.load(fetch_order);
1942 while let Some(next) = f(prev) {
1943 match self.compare_exchange_weak(prev, next, set_order, fetch_order) {
1944 x @ Ok(_) => return x,
1945 Err(next_prev) => prev = next_prev
83c7162d 1946 }
83c7162d 1947 }
5869c6ff 1948 Err(prev)
83c7162d
XL
1949 }
1950
5869c6ff
XL
1951 /// Maximum with the current value.
1952 ///
1953 /// Finds the maximum of the current value and the argument `val`, and
1954 /// sets the new value to the result.
1955 ///
1956 /// Returns the previous value.
1957 ///
1958 /// `fetch_max` takes an [`Ordering`] argument which describes the memory ordering
1959 /// of this operation. All ordering modes are possible. Note that using
1960 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
1961 /// using [`Release`] makes the load part [`Relaxed`].
1962 ///
1963 /// **Note**: This method is only available on platforms that support atomic operations on
6a06907d 1964 #[doc = concat!("[`", $s_int_type, "`].")]
5869c6ff
XL
1965 ///
1966 /// # Examples
1967 ///
1968 /// ```
1969 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
1970 ///
1971 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(23);")]
1972 /// assert_eq!(foo.fetch_max(42, Ordering::SeqCst), 23);
1973 /// assert_eq!(foo.load(Ordering::SeqCst), 42);
1974 /// ```
1975 ///
1976 /// If you want to obtain the maximum value in one step, you can use the following:
1977 ///
1978 /// ```
1979 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
1980 ///
1981 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(23);")]
1982 /// let bar = 42;
1983 /// let max_foo = foo.fetch_max(bar, Ordering::SeqCst).max(bar);
1984 /// assert!(max_foo == 42);
1985 /// ```
1986 #[inline]
1987 #[stable(feature = "atomic_min_max", since = "1.45.0")]
1988 #[$cfg_cas]
1989 pub fn fetch_max(&self, val: $int_type, order: Ordering) -> $int_type {
1990 // SAFETY: data races are prevented by atomic intrinsics.
1991 unsafe { $max_fn(self.v.get(), val, order) }
83c7162d
XL
1992 }
1993
5869c6ff
XL
1994 /// Minimum with the current value.
1995 ///
1996 /// Finds the minimum of the current value and the argument `val`, and
1997 /// sets the new value to the result.
1998 ///
1999 /// Returns the previous value.
2000 ///
2001 /// `fetch_min` takes an [`Ordering`] argument which describes the memory ordering
2002 /// of this operation. All ordering modes are possible. Note that using
2003 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
2004 /// using [`Release`] makes the load part [`Relaxed`].
2005 ///
2006 /// **Note**: This method is only available on platforms that support atomic operations on
6a06907d 2007 #[doc = concat!("[`", $s_int_type, "`].")]
5869c6ff
XL
2008 ///
2009 /// # Examples
2010 ///
2011 /// ```
2012 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
2013 ///
2014 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(23);")]
2015 /// assert_eq!(foo.fetch_min(42, Ordering::Relaxed), 23);
2016 /// assert_eq!(foo.load(Ordering::Relaxed), 23);
2017 /// assert_eq!(foo.fetch_min(22, Ordering::Relaxed), 23);
2018 /// assert_eq!(foo.load(Ordering::Relaxed), 22);
2019 /// ```
2020 ///
2021 /// If you want to obtain the minimum value in one step, you can use the following:
2022 ///
2023 /// ```
2024 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
2025 ///
2026 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(23);")]
2027 /// let bar = 12;
2028 /// let min_foo = foo.fetch_min(bar, Ordering::SeqCst).min(bar);
2029 /// assert_eq!(min_foo, 12);
2030 /// ```
2031 #[inline]
2032 #[stable(feature = "atomic_min_max", since = "1.45.0")]
2033 #[$cfg_cas]
2034 pub fn fetch_min(&self, val: $int_type, order: Ordering) -> $int_type {
2035 // SAFETY: data races are prevented by atomic intrinsics.
2036 unsafe { $min_fn(self.v.get(), val, order) }
83c7162d
XL
2037 }
2038
5869c6ff
XL
2039 /// Returns a mutable pointer to the underlying integer.
2040 ///
2041 /// Doing non-atomic reads and writes on the resulting integer can be a data race.
2042 /// This method is mostly useful for FFI, where the function signature may use
2043 #[doc = concat!("`*mut ", stringify!($int_type), "` instead of `&", stringify!($atomic_type), "`.")]
2044 ///
2045 /// Returning an `*mut` pointer from a shared reference to this atomic is safe because the
2046 /// atomic types work with interior mutability. All modifications of an atomic change the value
2047 /// through a shared reference, and can do so safely as long as they use atomic operations. Any
2048 /// use of the returned raw pointer requires an `unsafe` block and still has to uphold the same
2049 /// restriction: operations on it must be atomic.
2050 ///
2051 /// # Examples
2052 ///
2053 /// ```ignore (extern-declaration)
2054 /// # fn main() {
2055 #[doc = concat!($extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";")]
2056 ///
2057 /// extern "C" {
2058 #[doc = concat!(" fn my_atomic_op(arg: *mut ", stringify!($int_type), ");")]
2059 /// }
2060 ///
2061 #[doc = concat!("let mut atomic = ", stringify!($atomic_type), "::new(1);")]
2062 ///
2063 // SAFETY: Safe as long as `my_atomic_op` is atomic.
2064 /// unsafe {
2065 /// my_atomic_op(atomic.as_mut_ptr());
2066 /// }
2067 /// # }
2068 /// ```
2069 #[inline]
2070 #[unstable(feature = "atomic_mut_ptr",
2071 reason = "recently added",
2072 issue = "66893")]
2073 pub fn as_mut_ptr(&self) -> *mut $int_type {
2074 self.v.get()
60c5eb7d 2075 }
a7813a04
XL
2076 }
2077 }
2078}
2079
60c5eb7d 2080#[cfg(target_has_atomic_load_store = "8")]
a7813a04 2081atomic_int! {
e74abb32 2082 cfg(target_has_atomic = "8"),
1b1a35ee 2083 cfg(target_has_atomic_equal_alignment = "8"),
9fa01778
XL
2084 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2085 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2086 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2087 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2088 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2089 stable(feature = "integer_atomics_stable", since = "1.34.0"),
60c5eb7d 2090 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
83c7162d 2091 unstable(feature = "integer_atomics", issue = "32976"),
6a06907d 2092 "i8",
dc9dc135 2093 "",
83c7162d 2094 atomic_min, atomic_max,
a1dfa0c6 2095 1,
9fa01778 2096 "AtomicI8::new(0)",
a7813a04
XL
2097 i8 AtomicI8 ATOMIC_I8_INIT
2098}
60c5eb7d 2099#[cfg(target_has_atomic_load_store = "8")]
a7813a04 2100atomic_int! {
e74abb32 2101 cfg(target_has_atomic = "8"),
1b1a35ee 2102 cfg(target_has_atomic_equal_alignment = "8"),
9fa01778
XL
2103 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2104 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2105 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2106 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2107 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2108 stable(feature = "integer_atomics_stable", since = "1.34.0"),
60c5eb7d 2109 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
83c7162d 2110 unstable(feature = "integer_atomics", issue = "32976"),
6a06907d 2111 "u8",
dc9dc135 2112 "",
83c7162d 2113 atomic_umin, atomic_umax,
a1dfa0c6 2114 1,
9fa01778 2115 "AtomicU8::new(0)",
a7813a04
XL
2116 u8 AtomicU8 ATOMIC_U8_INIT
2117}
60c5eb7d 2118#[cfg(target_has_atomic_load_store = "16")]
a7813a04 2119atomic_int! {
e74abb32 2120 cfg(target_has_atomic = "16"),
1b1a35ee 2121 cfg(target_has_atomic_equal_alignment = "16"),
9fa01778
XL
2122 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2123 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2124 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2125 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2126 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2127 stable(feature = "integer_atomics_stable", since = "1.34.0"),
60c5eb7d 2128 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
83c7162d 2129 unstable(feature = "integer_atomics", issue = "32976"),
6a06907d 2130 "i16",
dc9dc135 2131 "",
83c7162d 2132 atomic_min, atomic_max,
a1dfa0c6 2133 2,
9fa01778 2134 "AtomicI16::new(0)",
a7813a04
XL
2135 i16 AtomicI16 ATOMIC_I16_INIT
2136}
60c5eb7d 2137#[cfg(target_has_atomic_load_store = "16")]
a7813a04 2138atomic_int! {
e74abb32 2139 cfg(target_has_atomic = "16"),
1b1a35ee 2140 cfg(target_has_atomic_equal_alignment = "16"),
9fa01778
XL
2141 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2142 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2143 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2144 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2145 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2146 stable(feature = "integer_atomics_stable", since = "1.34.0"),
60c5eb7d 2147 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
83c7162d 2148 unstable(feature = "integer_atomics", issue = "32976"),
6a06907d 2149 "u16",
dc9dc135 2150 "",
83c7162d 2151 atomic_umin, atomic_umax,
a1dfa0c6 2152 2,
9fa01778 2153 "AtomicU16::new(0)",
a7813a04
XL
2154 u16 AtomicU16 ATOMIC_U16_INIT
2155}
60c5eb7d 2156#[cfg(target_has_atomic_load_store = "32")]
a7813a04 2157atomic_int! {
e74abb32 2158 cfg(target_has_atomic = "32"),
1b1a35ee 2159 cfg(target_has_atomic_equal_alignment = "32"),
9fa01778
XL
2160 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2161 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2162 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2163 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2164 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2165 stable(feature = "integer_atomics_stable", since = "1.34.0"),
60c5eb7d 2166 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
83c7162d 2167 unstable(feature = "integer_atomics", issue = "32976"),
6a06907d 2168 "i32",
dc9dc135 2169 "",
83c7162d 2170 atomic_min, atomic_max,
a1dfa0c6 2171 4,
9fa01778 2172 "AtomicI32::new(0)",
a7813a04
XL
2173 i32 AtomicI32 ATOMIC_I32_INIT
2174}
60c5eb7d 2175#[cfg(target_has_atomic_load_store = "32")]
a7813a04 2176atomic_int! {
e74abb32 2177 cfg(target_has_atomic = "32"),
1b1a35ee 2178 cfg(target_has_atomic_equal_alignment = "32"),
9fa01778
XL
2179 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2180 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2181 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2182 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2183 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2184 stable(feature = "integer_atomics_stable", since = "1.34.0"),
60c5eb7d 2185 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
83c7162d 2186 unstable(feature = "integer_atomics", issue = "32976"),
6a06907d 2187 "u32",
dc9dc135 2188 "",
83c7162d 2189 atomic_umin, atomic_umax,
a1dfa0c6 2190 4,
9fa01778 2191 "AtomicU32::new(0)",
a7813a04
XL
2192 u32 AtomicU32 ATOMIC_U32_INIT
2193}
60c5eb7d 2194#[cfg(target_has_atomic_load_store = "64")]
a7813a04 2195atomic_int! {
e74abb32 2196 cfg(target_has_atomic = "64"),
1b1a35ee 2197 cfg(target_has_atomic_equal_alignment = "64"),
9fa01778
XL
2198 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2199 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2200 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2201 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2202 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2203 stable(feature = "integer_atomics_stable", since = "1.34.0"),
60c5eb7d 2204 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
83c7162d 2205 unstable(feature = "integer_atomics", issue = "32976"),
6a06907d 2206 "i64",
dc9dc135 2207 "",
83c7162d 2208 atomic_min, atomic_max,
a1dfa0c6 2209 8,
9fa01778 2210 "AtomicI64::new(0)",
a7813a04
XL
2211 i64 AtomicI64 ATOMIC_I64_INIT
2212}
60c5eb7d 2213#[cfg(target_has_atomic_load_store = "64")]
a7813a04 2214atomic_int! {
e74abb32 2215 cfg(target_has_atomic = "64"),
1b1a35ee 2216 cfg(target_has_atomic_equal_alignment = "64"),
9fa01778
XL
2217 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2218 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2219 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2220 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2221 stable(feature = "integer_atomics_stable", since = "1.34.0"),
2222 stable(feature = "integer_atomics_stable", since = "1.34.0"),
60c5eb7d 2223 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
83c7162d 2224 unstable(feature = "integer_atomics", issue = "32976"),
6a06907d 2225 "u64",
dc9dc135 2226 "",
83c7162d 2227 atomic_umin, atomic_umax,
a1dfa0c6 2228 8,
9fa01778 2229 "AtomicU64::new(0)",
a7813a04
XL
2230 u64 AtomicU64 ATOMIC_U64_INIT
2231}
e74abb32 2232#[cfg(target_has_atomic_load_store = "128")]
a1dfa0c6 2233atomic_int! {
e74abb32 2234 cfg(target_has_atomic = "128"),
1b1a35ee 2235 cfg(target_has_atomic_equal_alignment = "128"),
a1dfa0c6
XL
2236 unstable(feature = "integer_atomics", issue = "32976"),
2237 unstable(feature = "integer_atomics", issue = "32976"),
2238 unstable(feature = "integer_atomics", issue = "32976"),
2239 unstable(feature = "integer_atomics", issue = "32976"),
2240 unstable(feature = "integer_atomics", issue = "32976"),
2241 unstable(feature = "integer_atomics", issue = "32976"),
60c5eb7d 2242 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
9fa01778 2243 unstable(feature = "integer_atomics", issue = "32976"),
6a06907d 2244 "i128",
a1dfa0c6
XL
2245 "#![feature(integer_atomics)]\n\n",
2246 atomic_min, atomic_max,
2247 16,
9fa01778 2248 "AtomicI128::new(0)",
a1dfa0c6
XL
2249 i128 AtomicI128 ATOMIC_I128_INIT
2250}
e74abb32 2251#[cfg(target_has_atomic_load_store = "128")]
a1dfa0c6 2252atomic_int! {
e74abb32 2253 cfg(target_has_atomic = "128"),
1b1a35ee 2254 cfg(target_has_atomic_equal_alignment = "128"),
a1dfa0c6
XL
2255 unstable(feature = "integer_atomics", issue = "32976"),
2256 unstable(feature = "integer_atomics", issue = "32976"),
2257 unstable(feature = "integer_atomics", issue = "32976"),
2258 unstable(feature = "integer_atomics", issue = "32976"),
2259 unstable(feature = "integer_atomics", issue = "32976"),
2260 unstable(feature = "integer_atomics", issue = "32976"),
60c5eb7d 2261 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
9fa01778 2262 unstable(feature = "integer_atomics", issue = "32976"),
6a06907d 2263 "u128",
a1dfa0c6
XL
2264 "#![feature(integer_atomics)]\n\n",
2265 atomic_umin, atomic_umax,
2266 16,
9fa01778 2267 "AtomicU128::new(0)",
a1dfa0c6
XL
2268 u128 AtomicU128 ATOMIC_U128_INIT
2269}
29967ef6
XL
2270
2271macro_rules! atomic_int_ptr_sized {
2272 ( $($target_pointer_width:literal $align:literal)* ) => { $(
2273 #[cfg(target_has_atomic_load_store = "ptr")]
2274 #[cfg(target_pointer_width = $target_pointer_width)]
2275 atomic_int! {
2276 cfg(target_has_atomic = "ptr"),
2277 cfg(target_has_atomic_equal_alignment = "ptr"),
2278 stable(feature = "rust1", since = "1.0.0"),
2279 stable(feature = "extended_compare_and_swap", since = "1.10.0"),
2280 stable(feature = "atomic_debug", since = "1.3.0"),
2281 stable(feature = "atomic_access", since = "1.15.0"),
2282 stable(feature = "atomic_from", since = "1.23.0"),
2283 stable(feature = "atomic_nand", since = "1.27.0"),
cdc7bbd5 2284 rustc_const_stable(feature = "const_integer_atomics", since = "1.24.0"),
29967ef6 2285 stable(feature = "rust1", since = "1.0.0"),
6a06907d 2286 "isize",
29967ef6
XL
2287 "",
2288 atomic_min, atomic_max,
2289 $align,
2290 "AtomicIsize::new(0)",
2291 isize AtomicIsize ATOMIC_ISIZE_INIT
2292 }
2293 #[cfg(target_has_atomic_load_store = "ptr")]
2294 #[cfg(target_pointer_width = $target_pointer_width)]
2295 atomic_int! {
2296 cfg(target_has_atomic = "ptr"),
2297 cfg(target_has_atomic_equal_alignment = "ptr"),
2298 stable(feature = "rust1", since = "1.0.0"),
2299 stable(feature = "extended_compare_and_swap", since = "1.10.0"),
2300 stable(feature = "atomic_debug", since = "1.3.0"),
2301 stable(feature = "atomic_access", since = "1.15.0"),
2302 stable(feature = "atomic_from", since = "1.23.0"),
2303 stable(feature = "atomic_nand", since = "1.27.0"),
cdc7bbd5 2304 rustc_const_stable(feature = "const_integer_atomics", since = "1.24.0"),
29967ef6 2305 stable(feature = "rust1", since = "1.0.0"),
6a06907d 2306 "usize",
29967ef6
XL
2307 "",
2308 atomic_umin, atomic_umax,
2309 $align,
2310 "AtomicUsize::new(0)",
2311 usize AtomicUsize ATOMIC_USIZE_INIT
2312 }
2313 )* };
a7813a04 2314}
29967ef6
XL
2315
2316atomic_int_ptr_sized! {
2317 "16" 2
2318 "32" 4
2319 "64" 8
a7813a04
XL
2320}
2321
7453a54e 2322#[inline]
e74abb32 2323#[cfg(target_has_atomic = "8")]
7453a54e
SL
2324fn strongest_failure_ordering(order: Ordering) -> Ordering {
2325 match order {
2326 Release => Relaxed,
2327 Relaxed => Relaxed,
c30ab7b3 2328 SeqCst => SeqCst,
7453a54e 2329 Acquire => Acquire,
c30ab7b3 2330 AcqRel => Acquire,
7453a54e 2331 }
1a4d82fc
JJ
2332}
2333
2334#[inline]
ba9703b0 2335unsafe fn atomic_store<T: Copy>(dst: *mut T, val: T, order: Ordering) {
f035d41b
XL
2336 // SAFETY: the caller must uphold the safety contract for `atomic_store`.
2337 unsafe {
2338 match order {
2339 Release => intrinsics::atomic_store_rel(dst, val),
2340 Relaxed => intrinsics::atomic_store_relaxed(dst, val),
2341 SeqCst => intrinsics::atomic_store(dst, val),
2342 Acquire => panic!("there is no such thing as an acquire store"),
2343 AcqRel => panic!("there is no such thing as an acquire/release store"),
2344 }
1a4d82fc
JJ
2345 }
2346}
2347
2348#[inline]
ba9703b0 2349unsafe fn atomic_load<T: Copy>(dst: *const T, order: Ordering) -> T {
f035d41b
XL
2350 // SAFETY: the caller must uphold the safety contract for `atomic_load`.
2351 unsafe {
2352 match order {
2353 Acquire => intrinsics::atomic_load_acq(dst),
2354 Relaxed => intrinsics::atomic_load_relaxed(dst),
2355 SeqCst => intrinsics::atomic_load(dst),
2356 Release => panic!("there is no such thing as a release load"),
2357 AcqRel => panic!("there is no such thing as an acquire/release load"),
2358 }
1a4d82fc
JJ
2359 }
2360}
2361
2362#[inline]
e74abb32 2363#[cfg(target_has_atomic = "8")]
ba9703b0 2364unsafe fn atomic_swap<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
f035d41b
XL
2365 // SAFETY: the caller must uphold the safety contract for `atomic_swap`.
2366 unsafe {
2367 match order {
2368 Acquire => intrinsics::atomic_xchg_acq(dst, val),
2369 Release => intrinsics::atomic_xchg_rel(dst, val),
2370 AcqRel => intrinsics::atomic_xchg_acqrel(dst, val),
2371 Relaxed => intrinsics::atomic_xchg_relaxed(dst, val),
2372 SeqCst => intrinsics::atomic_xchg(dst, val),
2373 }
1a4d82fc
JJ
2374 }
2375}
2376
cc61c64b 2377/// Returns the previous value (like __sync_fetch_and_add).
1a4d82fc 2378#[inline]
e74abb32 2379#[cfg(target_has_atomic = "8")]
ba9703b0 2380unsafe fn atomic_add<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
f035d41b
XL
2381 // SAFETY: the caller must uphold the safety contract for `atomic_add`.
2382 unsafe {
2383 match order {
2384 Acquire => intrinsics::atomic_xadd_acq(dst, val),
2385 Release => intrinsics::atomic_xadd_rel(dst, val),
2386 AcqRel => intrinsics::atomic_xadd_acqrel(dst, val),
2387 Relaxed => intrinsics::atomic_xadd_relaxed(dst, val),
2388 SeqCst => intrinsics::atomic_xadd(dst, val),
2389 }
1a4d82fc
JJ
2390 }
2391}
2392
cc61c64b 2393/// Returns the previous value (like __sync_fetch_and_sub).
1a4d82fc 2394#[inline]
e74abb32 2395#[cfg(target_has_atomic = "8")]
ba9703b0 2396unsafe fn atomic_sub<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
f035d41b
XL
2397 // SAFETY: the caller must uphold the safety contract for `atomic_sub`.
2398 unsafe {
2399 match order {
2400 Acquire => intrinsics::atomic_xsub_acq(dst, val),
2401 Release => intrinsics::atomic_xsub_rel(dst, val),
2402 AcqRel => intrinsics::atomic_xsub_acqrel(dst, val),
2403 Relaxed => intrinsics::atomic_xsub_relaxed(dst, val),
2404 SeqCst => intrinsics::atomic_xsub(dst, val),
2405 }
1a4d82fc
JJ
2406 }
2407}
2408
2409#[inline]
e74abb32 2410#[cfg(target_has_atomic = "8")]
ba9703b0 2411unsafe fn atomic_compare_exchange<T: Copy>(
dfeec247
XL
2412 dst: *mut T,
2413 old: T,
2414 new: T,
2415 success: Ordering,
2416 failure: Ordering,
2417) -> Result<T, T> {
f035d41b
XL
2418 // SAFETY: the caller must uphold the safety contract for `atomic_compare_exchange`.
2419 let (val, ok) = unsafe {
2420 match (success, failure) {
2421 (Acquire, Acquire) => intrinsics::atomic_cxchg_acq(dst, old, new),
2422 (Release, Relaxed) => intrinsics::atomic_cxchg_rel(dst, old, new),
2423 (AcqRel, Acquire) => intrinsics::atomic_cxchg_acqrel(dst, old, new),
2424 (Relaxed, Relaxed) => intrinsics::atomic_cxchg_relaxed(dst, old, new),
2425 (SeqCst, SeqCst) => intrinsics::atomic_cxchg(dst, old, new),
2426 (Acquire, Relaxed) => intrinsics::atomic_cxchg_acq_failrelaxed(dst, old, new),
2427 (AcqRel, Relaxed) => intrinsics::atomic_cxchg_acqrel_failrelaxed(dst, old, new),
2428 (SeqCst, Relaxed) => intrinsics::atomic_cxchg_failrelaxed(dst, old, new),
2429 (SeqCst, Acquire) => intrinsics::atomic_cxchg_failacq(dst, old, new),
2430 (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"),
2431 (_, Release) => panic!("there is no such thing as a release failure ordering"),
2432 _ => panic!("a failure ordering can't be stronger than a success ordering"),
2433 }
54a0048b 2434 };
c30ab7b3 2435 if ok { Ok(val) } else { Err(val) }
7453a54e
SL
2436}
2437
7453a54e 2438#[inline]
e74abb32 2439#[cfg(target_has_atomic = "8")]
ba9703b0 2440unsafe fn atomic_compare_exchange_weak<T: Copy>(
dfeec247
XL
2441 dst: *mut T,
2442 old: T,
2443 new: T,
2444 success: Ordering,
2445 failure: Ordering,
2446) -> Result<T, T> {
f035d41b
XL
2447 // SAFETY: the caller must uphold the safety contract for `atomic_compare_exchange_weak`.
2448 let (val, ok) = unsafe {
2449 match (success, failure) {
2450 (Acquire, Acquire) => intrinsics::atomic_cxchgweak_acq(dst, old, new),
2451 (Release, Relaxed) => intrinsics::atomic_cxchgweak_rel(dst, old, new),
2452 (AcqRel, Acquire) => intrinsics::atomic_cxchgweak_acqrel(dst, old, new),
2453 (Relaxed, Relaxed) => intrinsics::atomic_cxchgweak_relaxed(dst, old, new),
2454 (SeqCst, SeqCst) => intrinsics::atomic_cxchgweak(dst, old, new),
2455 (Acquire, Relaxed) => intrinsics::atomic_cxchgweak_acq_failrelaxed(dst, old, new),
2456 (AcqRel, Relaxed) => intrinsics::atomic_cxchgweak_acqrel_failrelaxed(dst, old, new),
2457 (SeqCst, Relaxed) => intrinsics::atomic_cxchgweak_failrelaxed(dst, old, new),
2458 (SeqCst, Acquire) => intrinsics::atomic_cxchgweak_failacq(dst, old, new),
2459 (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"),
2460 (_, Release) => panic!("there is no such thing as a release failure ordering"),
2461 _ => panic!("a failure ordering can't be stronger than a success ordering"),
2462 }
54a0048b 2463 };
c30ab7b3 2464 if ok { Ok(val) } else { Err(val) }
7453a54e
SL
2465}
2466
1a4d82fc 2467#[inline]
e74abb32 2468#[cfg(target_has_atomic = "8")]
ba9703b0 2469unsafe fn atomic_and<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
f035d41b
XL
2470 // SAFETY: the caller must uphold the safety contract for `atomic_and`
2471 unsafe {
2472 match order {
2473 Acquire => intrinsics::atomic_and_acq(dst, val),
2474 Release => intrinsics::atomic_and_rel(dst, val),
2475 AcqRel => intrinsics::atomic_and_acqrel(dst, val),
2476 Relaxed => intrinsics::atomic_and_relaxed(dst, val),
2477 SeqCst => intrinsics::atomic_and(dst, val),
2478 }
1a4d82fc
JJ
2479 }
2480}
2481
2c00a5a8 2482#[inline]
e74abb32 2483#[cfg(target_has_atomic = "8")]
ba9703b0 2484unsafe fn atomic_nand<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
f035d41b
XL
2485 // SAFETY: the caller must uphold the safety contract for `atomic_nand`
2486 unsafe {
2487 match order {
2488 Acquire => intrinsics::atomic_nand_acq(dst, val),
2489 Release => intrinsics::atomic_nand_rel(dst, val),
2490 AcqRel => intrinsics::atomic_nand_acqrel(dst, val),
2491 Relaxed => intrinsics::atomic_nand_relaxed(dst, val),
2492 SeqCst => intrinsics::atomic_nand(dst, val),
2493 }
2c00a5a8
XL
2494 }
2495}
2496
1a4d82fc 2497#[inline]
e74abb32 2498#[cfg(target_has_atomic = "8")]
ba9703b0 2499unsafe fn atomic_or<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
f035d41b
XL
2500 // SAFETY: the caller must uphold the safety contract for `atomic_or`
2501 unsafe {
2502 match order {
2503 Acquire => intrinsics::atomic_or_acq(dst, val),
2504 Release => intrinsics::atomic_or_rel(dst, val),
2505 AcqRel => intrinsics::atomic_or_acqrel(dst, val),
2506 Relaxed => intrinsics::atomic_or_relaxed(dst, val),
2507 SeqCst => intrinsics::atomic_or(dst, val),
2508 }
1a4d82fc
JJ
2509 }
2510}
2511
1a4d82fc 2512#[inline]
e74abb32 2513#[cfg(target_has_atomic = "8")]
ba9703b0 2514unsafe fn atomic_xor<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
f035d41b
XL
2515 // SAFETY: the caller must uphold the safety contract for `atomic_xor`
2516 unsafe {
2517 match order {
2518 Acquire => intrinsics::atomic_xor_acq(dst, val),
2519 Release => intrinsics::atomic_xor_rel(dst, val),
2520 AcqRel => intrinsics::atomic_xor_acqrel(dst, val),
2521 Relaxed => intrinsics::atomic_xor_relaxed(dst, val),
2522 SeqCst => intrinsics::atomic_xor(dst, val),
2523 }
1a4d82fc
JJ
2524 }
2525}
2526
83c7162d
XL
2527/// returns the max value (signed comparison)
2528#[inline]
e74abb32 2529#[cfg(target_has_atomic = "8")]
ba9703b0 2530unsafe fn atomic_max<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
f035d41b
XL
2531 // SAFETY: the caller must uphold the safety contract for `atomic_max`
2532 unsafe {
2533 match order {
2534 Acquire => intrinsics::atomic_max_acq(dst, val),
2535 Release => intrinsics::atomic_max_rel(dst, val),
2536 AcqRel => intrinsics::atomic_max_acqrel(dst, val),
2537 Relaxed => intrinsics::atomic_max_relaxed(dst, val),
2538 SeqCst => intrinsics::atomic_max(dst, val),
2539 }
83c7162d
XL
2540 }
2541}
2542
2543/// returns the min value (signed comparison)
2544#[inline]
e74abb32 2545#[cfg(target_has_atomic = "8")]
ba9703b0 2546unsafe fn atomic_min<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
f035d41b
XL
2547 // SAFETY: the caller must uphold the safety contract for `atomic_min`
2548 unsafe {
2549 match order {
2550 Acquire => intrinsics::atomic_min_acq(dst, val),
2551 Release => intrinsics::atomic_min_rel(dst, val),
2552 AcqRel => intrinsics::atomic_min_acqrel(dst, val),
2553 Relaxed => intrinsics::atomic_min_relaxed(dst, val),
2554 SeqCst => intrinsics::atomic_min(dst, val),
2555 }
83c7162d
XL
2556 }
2557}
2558
74b04a01 2559/// returns the max value (unsigned comparison)
83c7162d 2560#[inline]
e74abb32 2561#[cfg(target_has_atomic = "8")]
ba9703b0 2562unsafe fn atomic_umax<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
f035d41b
XL
2563 // SAFETY: the caller must uphold the safety contract for `atomic_umax`
2564 unsafe {
2565 match order {
2566 Acquire => intrinsics::atomic_umax_acq(dst, val),
2567 Release => intrinsics::atomic_umax_rel(dst, val),
2568 AcqRel => intrinsics::atomic_umax_acqrel(dst, val),
2569 Relaxed => intrinsics::atomic_umax_relaxed(dst, val),
2570 SeqCst => intrinsics::atomic_umax(dst, val),
2571 }
83c7162d
XL
2572 }
2573}
2574
74b04a01 2575/// returns the min value (unsigned comparison)
83c7162d 2576#[inline]
e74abb32 2577#[cfg(target_has_atomic = "8")]
ba9703b0 2578unsafe fn atomic_umin<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
f035d41b
XL
2579 // SAFETY: the caller must uphold the safety contract for `atomic_umin`
2580 unsafe {
2581 match order {
2582 Acquire => intrinsics::atomic_umin_acq(dst, val),
2583 Release => intrinsics::atomic_umin_rel(dst, val),
2584 AcqRel => intrinsics::atomic_umin_acqrel(dst, val),
2585 Relaxed => intrinsics::atomic_umin_relaxed(dst, val),
2586 SeqCst => intrinsics::atomic_umin(dst, val),
2587 }
83c7162d
XL
2588 }
2589}
2590
1a4d82fc
JJ
2591/// An atomic fence.
2592///
7cac9316
XL
2593/// Depending on the specified order, a fence prevents the compiler and CPU from
2594/// reordering certain types of memory operations around it.
2595/// That creates synchronizes-with relationships between it and atomic operations
2596/// or fences in other threads.
2597///
2598/// A fence 'A' which has (at least) [`Release`] ordering semantics, synchronizes
2599/// with a fence 'B' with (at least) [`Acquire`] semantics, if and only if there
2600/// exist operations X and Y, both operating on some atomic object 'M' such
1a4d82fc
JJ
2601/// that A is sequenced before X, Y is synchronized before B and Y observes
2602/// the change to M. This provides a happens-before dependence between A and B.
2603///
7cac9316
XL
2604/// ```text
2605/// Thread 1 Thread 2
2606///
2607/// fence(Release); A --------------
2608/// x.store(3, Relaxed); X --------- |
2609/// | |
2610/// | |
2611/// -------------> Y if x.load(Relaxed) == 3 {
2612/// |-------> B fence(Acquire);
2613/// ...
2614/// }
2615/// ```
2616///
32a655c1 2617/// Atomic operations with [`Release`] or [`Acquire`] semantics can also synchronize
1a4d82fc
JJ
2618/// with a fence.
2619///
32a655c1
SL
2620/// A fence which has [`SeqCst`] ordering, in addition to having both [`Acquire`]
2621/// and [`Release`] semantics, participates in the global program order of the
2622/// other [`SeqCst`] operations and/or fences.
1a4d82fc 2623///
32a655c1 2624/// Accepts [`Acquire`], [`Release`], [`AcqRel`] and [`SeqCst`] orderings.
1a4d82fc
JJ
2625///
2626/// # Panics
2627///
32a655c1
SL
2628/// Panics if `order` is [`Relaxed`].
2629///
7cac9316
XL
2630/// # Examples
2631///
2632/// ```
2633/// use std::sync::atomic::AtomicBool;
2634/// use std::sync::atomic::fence;
2635/// use std::sync::atomic::Ordering;
2636///
2637/// // A mutual exclusion primitive based on spinlock.
2638/// pub struct Mutex {
2639/// flag: AtomicBool,
2640/// }
2641///
2642/// impl Mutex {
2643/// pub fn new() -> Mutex {
2644/// Mutex {
2645/// flag: AtomicBool::new(false),
2646/// }
2647/// }
2648///
2649/// pub fn lock(&self) {
3dfed10e 2650/// // Wait until the old value is `false`.
136023e0
XL
2651/// while self
2652/// .flag
2653/// .compare_exchange_weak(false, true, Ordering::Relaxed, Ordering::Relaxed)
2654/// .is_err()
2655/// {}
3b2f2976 2656/// // This fence synchronizes-with store in `unlock`.
7cac9316
XL
2657/// fence(Ordering::Acquire);
2658/// }
2659///
2660/// pub fn unlock(&self) {
2661/// self.flag.store(false, Ordering::Release);
2662/// }
2663/// }
2664/// ```
1a4d82fc 2665#[inline]
85aaf69f 2666#[stable(feature = "rust1", since = "1.0.0")]
1a4d82fc 2667pub fn fence(order: Ordering) {
dfeec247 2668 // SAFETY: using an atomic fence is safe.
1a4d82fc
JJ
2669 unsafe {
2670 match order {
2671 Acquire => intrinsics::atomic_fence_acq(),
2672 Release => intrinsics::atomic_fence_rel(),
c30ab7b3
SL
2673 AcqRel => intrinsics::atomic_fence_acqrel(),
2674 SeqCst => intrinsics::atomic_fence(),
2675 Relaxed => panic!("there is no such thing as a relaxed fence"),
1a4d82fc
JJ
2676 }
2677 }
2678}
c1a9b12d 2679
cc61c64b
XL
2680/// A compiler memory fence.
2681///
3b2f2976
XL
2682/// `compiler_fence` does not emit any machine code, but restricts the kinds
2683/// of memory re-ordering the compiler is allowed to do. Specifically, depending on
2684/// the given [`Ordering`] semantics, the compiler may be disallowed from moving reads
2685/// or writes from before or after the call to the other side of the call to
2686/// `compiler_fence`. Note that it does **not** prevent the *hardware*
2687/// from doing such re-ordering. This is not a problem in a single-threaded,
2688/// execution context, but when other threads may modify memory at the same
2689/// time, stronger synchronization primitives such as [`fence`] are required.
cc61c64b
XL
2690///
2691/// The re-ordering prevented by the different ordering semantics are:
2692///
2693/// - with [`SeqCst`], no re-ordering of reads and writes across this point is allowed.
2694/// - with [`Release`], preceding reads and writes cannot be moved past subsequent writes.
2695/// - with [`Acquire`], subsequent reads and writes cannot be moved ahead of preceding reads.
2696/// - with [`AcqRel`], both of the above rules are enforced.
2697///
3b2f2976
XL
2698/// `compiler_fence` is generally only useful for preventing a thread from
2699/// racing *with itself*. That is, if a given thread is executing one piece
2700/// of code, and is then interrupted, and starts executing code elsewhere
2701/// (while still in the same thread, and conceptually still on the same
2702/// core). In traditional programs, this can only occur when a signal
2703/// handler is registered. In more low-level code, such situations can also
2704/// arise when handling interrupts, when implementing green threads with
2705/// pre-emption, etc. Curious readers are encouraged to read the Linux kernel's
2706/// discussion of [memory barriers].
2707///
cc61c64b
XL
2708/// # Panics
2709///
2710/// Panics if `order` is [`Relaxed`].
2711///
3b2f2976
XL
2712/// # Examples
2713///
2714/// Without `compiler_fence`, the `assert_eq!` in following code
2715/// is *not* guaranteed to succeed, despite everything happening in a single thread.
2716/// To see why, remember that the compiler is free to swap the stores to
136023e0 2717/// `IMPORTANT_VARIABLE` and `IS_READY` since they are both
3b2f2976
XL
2718/// `Ordering::Relaxed`. If it does, and the signal handler is invoked right
2719/// after `IS_READY` is updated, then the signal handler will see
2720/// `IS_READY=1`, but `IMPORTANT_VARIABLE=0`.
2721/// Using a `compiler_fence` remedies this situation.
2722///
2723/// ```
2724/// use std::sync::atomic::{AtomicBool, AtomicUsize};
3b2f2976
XL
2725/// use std::sync::atomic::Ordering;
2726/// use std::sync::atomic::compiler_fence;
2727///
9fa01778
XL
2728/// static IMPORTANT_VARIABLE: AtomicUsize = AtomicUsize::new(0);
2729/// static IS_READY: AtomicBool = AtomicBool::new(false);
3b2f2976
XL
2730///
2731/// fn main() {
2732/// IMPORTANT_VARIABLE.store(42, Ordering::Relaxed);
2733/// // prevent earlier writes from being moved beyond this point
2734/// compiler_fence(Ordering::Release);
2735/// IS_READY.store(true, Ordering::Relaxed);
2736/// }
2737///
2738/// fn signal_handler() {
2739/// if IS_READY.load(Ordering::Relaxed) {
2740/// assert_eq!(IMPORTANT_VARIABLE.load(Ordering::Relaxed), 42);
2741/// }
2742/// }
2743/// ```
2744///
3b2f2976 2745/// [memory barriers]: https://www.kernel.org/doc/Documentation/memory-barriers.txt
cc61c64b 2746#[inline]
3b2f2976 2747#[stable(feature = "compiler_fences", since = "1.21.0")]
cc61c64b 2748pub fn compiler_fence(order: Ordering) {
dfeec247 2749 // SAFETY: using an atomic fence is safe.
cc61c64b
XL
2750 unsafe {
2751 match order {
2752 Acquire => intrinsics::atomic_singlethreadfence_acq(),
2753 Release => intrinsics::atomic_singlethreadfence_rel(),
2754 AcqRel => intrinsics::atomic_singlethreadfence_acqrel(),
2755 SeqCst => intrinsics::atomic_singlethreadfence(),
2756 Relaxed => panic!("there is no such thing as a relaxed compiler fence"),
cc61c64b
XL
2757 }
2758 }
2759}
2760
60c5eb7d 2761#[cfg(target_has_atomic_load_store = "8")]
a7813a04
XL
2762#[stable(feature = "atomic_debug", since = "1.3.0")]
2763impl fmt::Debug for AtomicBool {
48663c56 2764 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
83c7162d 2765 fmt::Debug::fmt(&self.load(Ordering::SeqCst), f)
a7813a04
XL
2766 }
2767}
c1a9b12d 2768
60c5eb7d 2769#[cfg(target_has_atomic_load_store = "ptr")]
c1a9b12d
SL
2770#[stable(feature = "atomic_debug", since = "1.3.0")]
2771impl<T> fmt::Debug for AtomicPtr<T> {
48663c56 2772 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
83c7162d 2773 fmt::Debug::fmt(&self.load(Ordering::SeqCst), f)
c1a9b12d
SL
2774 }
2775}
ff7c6d11 2776
60c5eb7d 2777#[cfg(target_has_atomic_load_store = "ptr")]
ff7c6d11
XL
2778#[stable(feature = "atomic_pointer", since = "1.24.0")]
2779impl<T> fmt::Pointer for AtomicPtr<T> {
48663c56 2780 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
ff7c6d11
XL
2781 fmt::Pointer::fmt(&self.load(Ordering::SeqCst), f)
2782 }
2783}
5869c6ff
XL
2784
2785/// Signals the processor that it is inside a busy-wait spin-loop ("spin lock").
2786///
2787/// This function is deprecated in favor of [`hint::spin_loop`].
2788///
2789/// [`hint::spin_loop`]: crate::hint::spin_loop
2790#[inline]
2791#[stable(feature = "spin_loop_hint", since = "1.24.0")]
2792#[rustc_deprecated(since = "1.51.0", reason = "use hint::spin_loop instead")]
2793pub fn spin_loop_hint() {
2794 spin_loop()
2795}