]> git.proxmox.com Git - rustc.git/blame - src/libcore/sync/atomic.rs
New upstream version 1.23.0+dfsg1
[rustc.git] / src / libcore / sync / atomic.rs
CommitLineData
1a4d82fc
JJ
1// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2// file at the top-level directory of this distribution and at
3// http://rust-lang.org/COPYRIGHT.
4//
5// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8// option. This file may not be copied, modified, or distributed
9// except according to those terms.
10
11//! Atomic types
12//!
13//! Atomic types provide primitive shared-memory communication between
14//! threads, and are the building blocks of other concurrent
15//! types.
16//!
17//! This module defines atomic versions of a select number of primitive
cc61c64b 18//! types, including [`AtomicBool`], [`AtomicIsize`], and [`AtomicUsize`].
1a4d82fc
JJ
19//! Atomic types present operations that, when used correctly, synchronize
20//! updates between threads.
21//!
cc61c64b
XL
22//! [`AtomicBool`]: struct.AtomicBool.html
23//! [`AtomicIsize`]: struct.AtomicIsize.html
24//! [`AtomicUsize`]: struct.AtomicUsize.html
25//!
26//! Each method takes an [`Ordering`] which represents the strength of
1a4d82fc 27//! the memory barrier for that operation. These orderings are the
32a655c1 28//! same as [LLVM atomic orderings][1]. For more information see the [nomicon][2].
1a4d82fc 29//!
cc61c64b
XL
30//! [`Ordering`]: enum.Ordering.html
31//!
85aaf69f 32//! [1]: http://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations
32a655c1 33//! [2]: ../../../nomicon/atomics.html
1a4d82fc 34//!
cc61c64b 35//! Atomic variables are safe to share between threads (they implement [`Sync`])
a7813a04
XL
36//! but they do not themselves provide the mechanism for sharing and follow the
37//! [threading model](../../../std/thread/index.html#the-threading-model) of rust.
cc61c64b 38//! The most common way to share an atomic variable is to put it into an [`Arc`][arc] (an
1a4d82fc
JJ
39//! atomically-reference-counted shared pointer).
40//!
cc61c64b
XL
41//! [`Sync`]: ../../marker/trait.Sync.html
42//! [arc]: ../../../std/sync/struct.Arc.html
43//!
1a4d82fc 44//! Most atomic types may be stored in static variables, initialized using
cc61c64b 45//! the provided static initializers like [`ATOMIC_BOOL_INIT`]. Atomic statics
1a4d82fc
JJ
46//! are often used for lazy global initialization.
47//!
cc61c64b 48//! [`ATOMIC_BOOL_INIT`]: constant.ATOMIC_BOOL_INIT.html
1a4d82fc
JJ
49//!
50//! # Examples
51//!
52//! A simple spinlock:
53//!
54//! ```
55//! use std::sync::Arc;
85aaf69f
SL
56//! use std::sync::atomic::{AtomicUsize, Ordering};
57//! use std::thread;
1a4d82fc
JJ
58//!
59//! fn main() {
85aaf69f 60//! let spinlock = Arc::new(AtomicUsize::new(1));
1a4d82fc
JJ
61//!
62//! let spinlock_clone = spinlock.clone();
a7813a04 63//! let thread = thread::spawn(move|| {
1a4d82fc
JJ
64//! spinlock_clone.store(0, Ordering::SeqCst);
65//! });
66//!
bd371182 67//! // Wait for the other thread to release the lock
1a4d82fc 68//! while spinlock.load(Ordering::SeqCst) != 0 {}
a7813a04
XL
69//!
70//! if let Err(panic) = thread.join() {
71//! println!("Thread had an error: {:?}", panic);
72//! }
1a4d82fc
JJ
73//! }
74//! ```
75//!
bd371182 76//! Keep a global count of live threads:
1a4d82fc
JJ
77//!
78//! ```
85aaf69f 79//! use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
1a4d82fc 80//!
bd371182 81//! static GLOBAL_THREAD_COUNT: AtomicUsize = ATOMIC_USIZE_INIT;
1a4d82fc 82//!
bd371182
AL
83//! let old_thread_count = GLOBAL_THREAD_COUNT.fetch_add(1, Ordering::SeqCst);
84//! println!("live threads: {}", old_thread_count + 1);
1a4d82fc
JJ
85//! ```
86
85aaf69f 87#![stable(feature = "rust1", since = "1.0.0")]
5bcae85e
SL
88#![cfg_attr(not(target_has_atomic = "8"), allow(dead_code))]
89#![cfg_attr(not(target_has_atomic = "8"), allow(unused_imports))]
1a4d82fc
JJ
90
91use self::Ordering::*;
92
1a4d82fc
JJ
93use intrinsics;
94use cell::UnsafeCell;
c1a9b12d 95use fmt;
9346a6ac 96
7cac9316
XL
97/// Save power or switch hyperthreads in a busy-wait spin-loop.
98///
99/// This function is deliberately more primitive than
100/// `std::thread::yield_now` and does not directly yield to the
101/// system's scheduler. In some cases it might be useful to use a
102/// combination of both functions. Careful benchmarking is advised.
103///
104/// On some platforms this function may not do anything at all.
105#[inline]
106#[unstable(feature = "hint_core_should_pause", issue = "41196")]
107pub fn hint_core_should_pause()
108{
109 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
110 unsafe {
111 asm!("pause" ::: "memory" : "volatile");
112 }
113
114 #[cfg(target_arch = "aarch64")]
115 unsafe {
116 asm!("yield" ::: "memory" : "volatile");
117 }
118}
119
1a4d82fc 120/// A boolean type which can be safely shared between threads.
9e0c209e 121///
ea8adc8c
XL
122/// This type has the same in-memory representation as a [`bool`].
123///
124/// [`bool`]: ../../../std/primitive.bool.html
3157f602 125#[cfg(target_has_atomic = "8")]
85aaf69f 126#[stable(feature = "rust1", since = "1.0.0")]
1a4d82fc 127pub struct AtomicBool {
a7813a04 128 v: UnsafeCell<u8>,
1a4d82fc
JJ
129}
130
3157f602 131#[cfg(target_has_atomic = "8")]
92a42be0 132#[stable(feature = "rust1", since = "1.0.0")]
9346a6ac 133impl Default for AtomicBool {
c30ab7b3 134 /// Creates an `AtomicBool` initialized to `false`.
62682a34 135 fn default() -> Self {
a7813a04 136 Self::new(false)
9346a6ac
AL
137 }
138}
139
b039eaaf 140// Send is implicitly implemented for AtomicBool.
3157f602 141#[cfg(target_has_atomic = "8")]
92a42be0 142#[stable(feature = "rust1", since = "1.0.0")]
1a4d82fc
JJ
143unsafe impl Sync for AtomicBool {}
144
1a4d82fc 145/// A raw pointer type which can be safely shared between threads.
9e0c209e
SL
146///
147/// This type has the same in-memory representation as a `*mut T`.
3157f602 148#[cfg(target_has_atomic = "ptr")]
85aaf69f 149#[stable(feature = "rust1", since = "1.0.0")]
1a4d82fc 150pub struct AtomicPtr<T> {
62682a34 151 p: UnsafeCell<*mut T>,
1a4d82fc
JJ
152}
153
3157f602 154#[cfg(target_has_atomic = "ptr")]
92a42be0 155#[stable(feature = "rust1", since = "1.0.0")]
d9579d0f 156impl<T> Default for AtomicPtr<T> {
9e0c209e 157 /// Creates a null `AtomicPtr<T>`.
d9579d0f
AL
158 fn default() -> AtomicPtr<T> {
159 AtomicPtr::new(::ptr::null_mut())
160 }
161}
162
3157f602 163#[cfg(target_has_atomic = "ptr")]
92a42be0 164#[stable(feature = "rust1", since = "1.0.0")]
c1a9b12d 165unsafe impl<T> Send for AtomicPtr<T> {}
3157f602 166#[cfg(target_has_atomic = "ptr")]
92a42be0 167#[stable(feature = "rust1", since = "1.0.0")]
1a4d82fc
JJ
168unsafe impl<T> Sync for AtomicPtr<T> {}
169
170/// Atomic memory orderings
171///
172/// Memory orderings limit the ways that both the compiler and CPU may reorder
173/// instructions around atomic operations. At its most restrictive,
174/// "sequentially consistent" atomics allow neither reads nor writes
175/// to be moved either before or after the atomic operation; on the other end
176/// "relaxed" atomics allow all reorderings.
177///
178/// Rust's memory orderings are [the same as
c1a9b12d 179/// LLVM's](http://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations).
32a655c1 180///
cc61c64b
XL
181/// For more information see the [nomicon].
182///
183/// [nomicon]: ../../../nomicon/atomics.html
85aaf69f 184#[stable(feature = "rust1", since = "1.0.0")]
54a0048b 185#[derive(Copy, Clone, Debug)]
1a4d82fc 186pub enum Ordering {
cc61c64b
XL
187 /// No ordering constraints, only atomic operations.
188 ///
189 /// Corresponds to LLVM's [`Monotonic`] ordering.
190 ///
191 /// [`Monotonic`]: http://llvm.org/docs/Atomics.html#monotonic
85aaf69f 192 #[stable(feature = "rust1", since = "1.0.0")]
1a4d82fc
JJ
193 Relaxed,
194 /// When coupled with a store, all previous writes become visible
cc61c64b 195 /// to the other threads that perform a load with [`Acquire`] ordering
1a4d82fc 196 /// on the same value.
cc61c64b
XL
197 ///
198 /// [`Acquire`]: http://llvm.org/docs/Atomics.html#acquire
85aaf69f 199 #[stable(feature = "rust1", since = "1.0.0")]
1a4d82fc
JJ
200 Release,
201 /// When coupled with a load, all subsequent loads will see data
cc61c64b 202 /// written before a store with [`Release`] ordering on the same value
a7813a04 203 /// in other threads.
cc61c64b
XL
204 ///
205 /// [`Release`]: http://llvm.org/docs/Atomics.html#release
85aaf69f 206 #[stable(feature = "rust1", since = "1.0.0")]
1a4d82fc 207 Acquire,
cc61c64b
XL
208 /// When coupled with a load, uses [`Acquire`] ordering, and with a store
209 /// [`Release`] ordering.
210 ///
211 /// [`Acquire`]: http://llvm.org/docs/Atomics.html#acquire
212 /// [`Release`]: http://llvm.org/docs/Atomics.html#release
85aaf69f 213 #[stable(feature = "rust1", since = "1.0.0")]
1a4d82fc
JJ
214 AcqRel,
215 /// Like `AcqRel` with the additional guarantee that all threads see all
216 /// sequentially consistent operations in the same order.
85aaf69f 217 #[stable(feature = "rust1", since = "1.0.0")]
1a4d82fc 218 SeqCst,
c30ab7b3
SL
219 // Prevent exhaustive matching to allow for future extension
220 #[doc(hidden)]
221 #[unstable(feature = "future_atomic_orderings", issue = "0")]
222 __Nonexhaustive,
1a4d82fc
JJ
223}
224
cc61c64b
XL
225/// An [`AtomicBool`] initialized to `false`.
226///
227/// [`AtomicBool`]: struct.AtomicBool.html
3157f602 228#[cfg(target_has_atomic = "8")]
85aaf69f 229#[stable(feature = "rust1", since = "1.0.0")]
62682a34 230pub const ATOMIC_BOOL_INIT: AtomicBool = AtomicBool::new(false);
1a4d82fc 231
3157f602 232#[cfg(target_has_atomic = "8")]
1a4d82fc
JJ
233impl AtomicBool {
234 /// Creates a new `AtomicBool`.
235 ///
236 /// # Examples
237 ///
238 /// ```
239 /// use std::sync::atomic::AtomicBool;
240 ///
241 /// let atomic_true = AtomicBool::new(true);
242 /// let atomic_false = AtomicBool::new(false);
243 /// ```
244 #[inline]
85aaf69f 245 #[stable(feature = "rust1", since = "1.0.0")]
abe05a73 246 #[rustc_const_unstable(feature = "const_atomic_bool_new")]
62682a34 247 pub const fn new(v: bool) -> AtomicBool {
a7813a04 248 AtomicBool { v: UnsafeCell::new(v as u8) }
1a4d82fc
JJ
249 }
250
ea8adc8c 251 /// Returns a mutable reference to the underlying [`bool`].
9e0c209e
SL
252 ///
253 /// This is safe because the mutable reference guarantees that no other threads are
254 /// concurrently accessing the atomic data.
255 ///
ea8adc8c
XL
256 /// [`bool`]: ../../../std/primitive.bool.html
257 ///
9e0c209e
SL
258 /// # Examples
259 ///
260 /// ```
9e0c209e
SL
261 /// use std::sync::atomic::{AtomicBool, Ordering};
262 ///
263 /// let mut some_bool = AtomicBool::new(true);
264 /// assert_eq!(*some_bool.get_mut(), true);
265 /// *some_bool.get_mut() = false;
266 /// assert_eq!(some_bool.load(Ordering::SeqCst), false);
267 /// ```
268 #[inline]
476ff2be 269 #[stable(feature = "atomic_access", since = "1.15.0")]
9e0c209e
SL
270 pub fn get_mut(&mut self) -> &mut bool {
271 unsafe { &mut *(self.v.get() as *mut bool) }
272 }
273
274 /// Consumes the atomic and returns the contained value.
275 ///
276 /// This is safe because passing `self` by value guarantees that no other threads are
277 /// concurrently accessing the atomic data.
278 ///
279 /// # Examples
280 ///
281 /// ```
9e0c209e
SL
282 /// use std::sync::atomic::AtomicBool;
283 ///
284 /// let some_bool = AtomicBool::new(true);
285 /// assert_eq!(some_bool.into_inner(), true);
286 /// ```
287 #[inline]
476ff2be 288 #[stable(feature = "atomic_access", since = "1.15.0")]
9e0c209e
SL
289 pub fn into_inner(self) -> bool {
290 unsafe { self.v.into_inner() != 0 }
291 }
292
1a4d82fc
JJ
293 /// Loads a value from the bool.
294 ///
32a655c1
SL
295 /// `load` takes an [`Ordering`] argument which describes the memory ordering
296 /// of this operation.
1a4d82fc
JJ
297 ///
298 /// # Panics
299 ///
32a655c1
SL
300 /// Panics if `order` is [`Release`] or [`AcqRel`].
301 ///
302 /// [`Ordering`]: enum.Ordering.html
303 /// [`Release`]: enum.Ordering.html#variant.Release
cc61c64b 304 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1a4d82fc
JJ
305 ///
306 /// # Examples
307 ///
308 /// ```
309 /// use std::sync::atomic::{AtomicBool, Ordering};
310 ///
311 /// let some_bool = AtomicBool::new(true);
312 ///
62682a34 313 /// assert_eq!(some_bool.load(Ordering::Relaxed), true);
1a4d82fc
JJ
314 /// ```
315 #[inline]
85aaf69f 316 #[stable(feature = "rust1", since = "1.0.0")]
1a4d82fc 317 pub fn load(&self, order: Ordering) -> bool {
a7813a04 318 unsafe { atomic_load(self.v.get(), order) != 0 }
1a4d82fc
JJ
319 }
320
321 /// Stores a value into the bool.
322 ///
32a655c1
SL
323 /// `store` takes an [`Ordering`] argument which describes the memory ordering
324 /// of this operation.
325 ///
326 /// [`Ordering`]: enum.Ordering.html
1a4d82fc
JJ
327 ///
328 /// # Examples
329 ///
330 /// ```
331 /// use std::sync::atomic::{AtomicBool, Ordering};
332 ///
333 /// let some_bool = AtomicBool::new(true);
334 ///
335 /// some_bool.store(false, Ordering::Relaxed);
62682a34 336 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
1a4d82fc
JJ
337 /// ```
338 ///
339 /// # Panics
340 ///
cc61c64b
XL
341 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
342 ///
343 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
344 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1a4d82fc 345 #[inline]
85aaf69f 346 #[stable(feature = "rust1", since = "1.0.0")]
1a4d82fc 347 pub fn store(&self, val: bool, order: Ordering) {
c30ab7b3
SL
348 unsafe {
349 atomic_store(self.v.get(), val as u8, order);
350 }
1a4d82fc
JJ
351 }
352
cc61c64b 353 /// Stores a value into the bool, returning the previous value.
1a4d82fc 354 ///
32a655c1
SL
355 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
356 /// of this operation.
357 ///
358 /// [`Ordering`]: enum.Ordering.html
1a4d82fc
JJ
359 ///
360 /// # Examples
361 ///
362 /// ```
363 /// use std::sync::atomic::{AtomicBool, Ordering};
364 ///
365 /// let some_bool = AtomicBool::new(true);
366 ///
62682a34
SL
367 /// assert_eq!(some_bool.swap(false, Ordering::Relaxed), true);
368 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
1a4d82fc
JJ
369 /// ```
370 #[inline]
85aaf69f 371 #[stable(feature = "rust1", since = "1.0.0")]
1a4d82fc 372 pub fn swap(&self, val: bool, order: Ordering) -> bool {
a7813a04 373 unsafe { atomic_swap(self.v.get(), val as u8, order) != 0 }
1a4d82fc
JJ
374 }
375
ea8adc8c 376 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
1a4d82fc 377 ///
c1a9b12d
SL
378 /// The return value is always the previous value. If it is equal to `current`, then the value
379 /// was updated.
1a4d82fc 380 ///
32a655c1
SL
381 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
382 /// ordering of this operation.
383 ///
384 /// [`Ordering`]: enum.Ordering.html
ea8adc8c 385 /// [`bool`]: ../../../std/primitive.bool.html
1a4d82fc
JJ
386 ///
387 /// # Examples
388 ///
389 /// ```
390 /// use std::sync::atomic::{AtomicBool, Ordering};
391 ///
392 /// let some_bool = AtomicBool::new(true);
393 ///
62682a34
SL
394 /// assert_eq!(some_bool.compare_and_swap(true, false, Ordering::Relaxed), true);
395 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
396 ///
397 /// assert_eq!(some_bool.compare_and_swap(true, true, Ordering::Relaxed), false);
398 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
1a4d82fc
JJ
399 /// ```
400 #[inline]
85aaf69f 401 #[stable(feature = "rust1", since = "1.0.0")]
c1a9b12d 402 pub fn compare_and_swap(&self, current: bool, new: bool, order: Ordering) -> bool {
54a0048b
SL
403 match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
404 Ok(x) => x,
405 Err(x) => x,
406 }
7453a54e
SL
407 }
408
ea8adc8c 409 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
7453a54e 410 ///
54a0048b 411 /// The return value is a result indicating whether the new value was written and containing
3157f602 412 /// the previous value. On success this value is guaranteed to be equal to `current`.
7453a54e 413 ///
32a655c1
SL
414 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
415 /// ordering of this operation. The first describes the required ordering if the
416 /// operation succeeds while the second describes the required ordering when the
417 /// operation fails. The failure ordering can't be [`Release`] or [`AcqRel`] and must
418 /// be equivalent or weaker than the success ordering.
419 ///
ea8adc8c 420 /// [`bool`]: ../../../std/primitive.bool.html
32a655c1
SL
421 /// [`Ordering`]: enum.Ordering.html
422 /// [`Release`]: enum.Ordering.html#variant.Release
423 /// [`AcqRel`]: enum.Ordering.html#variant.Release
7453a54e
SL
424 ///
425 /// # Examples
426 ///
427 /// ```
7453a54e
SL
428 /// use std::sync::atomic::{AtomicBool, Ordering};
429 ///
430 /// let some_bool = AtomicBool::new(true);
431 ///
432 /// assert_eq!(some_bool.compare_exchange(true,
433 /// false,
434 /// Ordering::Acquire,
435 /// Ordering::Relaxed),
54a0048b 436 /// Ok(true));
7453a54e
SL
437 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
438 ///
439 /// assert_eq!(some_bool.compare_exchange(true, true,
440 /// Ordering::SeqCst,
441 /// Ordering::Acquire),
54a0048b 442 /// Err(false));
7453a54e
SL
443 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
444 /// ```
445 #[inline]
a7813a04 446 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
7453a54e
SL
447 pub fn compare_exchange(&self,
448 current: bool,
449 new: bool,
450 success: Ordering,
c30ab7b3
SL
451 failure: Ordering)
452 -> Result<bool, bool> {
453 match unsafe {
454 atomic_compare_exchange(self.v.get(), current as u8, new as u8, success, failure)
455 } {
a7813a04
XL
456 Ok(x) => Ok(x != 0),
457 Err(x) => Err(x != 0),
54a0048b 458 }
7453a54e
SL
459 }
460
ea8adc8c 461 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
7453a54e 462 ///
cc61c64b 463 /// Unlike [`compare_exchange`], this function is allowed to spuriously fail even when the
7453a54e 464 /// comparison succeeds, which can result in more efficient code on some platforms. The
54a0048b
SL
465 /// return value is a result indicating whether the new value was written and containing the
466 /// previous value.
7453a54e 467 ///
32a655c1 468 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
7453a54e
SL
469 /// ordering of this operation. The first describes the required ordering if the operation
470 /// succeeds while the second describes the required ordering when the operation fails. The
32a655c1
SL
471 /// failure ordering can't be [`Release`] or [`AcqRel`] and must be equivalent or
472 /// weaker than the success ordering.
473 ///
ea8adc8c 474 /// [`bool`]: ../../../std/primitive.bool.html
cc61c64b 475 /// [`compare_exchange`]: #method.compare_exchange
32a655c1
SL
476 /// [`Ordering`]: enum.Ordering.html
477 /// [`Release`]: enum.Ordering.html#variant.Release
478 /// [`AcqRel`]: enum.Ordering.html#variant.Release
7453a54e
SL
479 ///
480 /// # Examples
481 ///
482 /// ```
7453a54e
SL
483 /// use std::sync::atomic::{AtomicBool, Ordering};
484 ///
485 /// let val = AtomicBool::new(false);
486 ///
487 /// let new = true;
488 /// let mut old = val.load(Ordering::Relaxed);
489 /// loop {
54a0048b
SL
490 /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
491 /// Ok(_) => break,
492 /// Err(x) => old = x,
7453a54e
SL
493 /// }
494 /// }
495 /// ```
496 #[inline]
a7813a04 497 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
7453a54e
SL
498 pub fn compare_exchange_weak(&self,
499 current: bool,
500 new: bool,
501 success: Ordering,
c30ab7b3
SL
502 failure: Ordering)
503 -> Result<bool, bool> {
504 match unsafe {
505 atomic_compare_exchange_weak(self.v.get(), current as u8, new as u8, success, failure)
506 } {
a7813a04
XL
507 Ok(x) => Ok(x != 0),
508 Err(x) => Err(x != 0),
54a0048b 509 }
1a4d82fc
JJ
510 }
511
512 /// Logical "and" with a boolean value.
513 ///
514 /// Performs a logical "and" operation on the current value and the argument `val`, and sets
515 /// the new value to the result.
516 ///
517 /// Returns the previous value.
518 ///
519 /// # Examples
520 ///
521 /// ```
522 /// use std::sync::atomic::{AtomicBool, Ordering};
523 ///
524 /// let foo = AtomicBool::new(true);
62682a34
SL
525 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), true);
526 /// assert_eq!(foo.load(Ordering::SeqCst), false);
1a4d82fc
JJ
527 ///
528 /// let foo = AtomicBool::new(true);
62682a34
SL
529 /// assert_eq!(foo.fetch_and(true, Ordering::SeqCst), true);
530 /// assert_eq!(foo.load(Ordering::SeqCst), true);
1a4d82fc
JJ
531 ///
532 /// let foo = AtomicBool::new(false);
62682a34
SL
533 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), false);
534 /// assert_eq!(foo.load(Ordering::SeqCst), false);
1a4d82fc
JJ
535 /// ```
536 #[inline]
85aaf69f 537 #[stable(feature = "rust1", since = "1.0.0")]
1a4d82fc 538 pub fn fetch_and(&self, val: bool, order: Ordering) -> bool {
a7813a04 539 unsafe { atomic_and(self.v.get(), val as u8, order) != 0 }
1a4d82fc
JJ
540 }
541
542 /// Logical "nand" with a boolean value.
543 ///
544 /// Performs a logical "nand" operation on the current value and the argument `val`, and sets
545 /// the new value to the result.
546 ///
547 /// Returns the previous value.
548 ///
549 /// # Examples
550 ///
551 /// ```
552 /// use std::sync::atomic::{AtomicBool, Ordering};
553 ///
554 /// let foo = AtomicBool::new(true);
62682a34
SL
555 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), true);
556 /// assert_eq!(foo.load(Ordering::SeqCst), true);
1a4d82fc
JJ
557 ///
558 /// let foo = AtomicBool::new(true);
62682a34
SL
559 /// assert_eq!(foo.fetch_nand(true, Ordering::SeqCst), true);
560 /// assert_eq!(foo.load(Ordering::SeqCst) as usize, 0);
561 /// assert_eq!(foo.load(Ordering::SeqCst), false);
1a4d82fc
JJ
562 ///
563 /// let foo = AtomicBool::new(false);
62682a34
SL
564 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), false);
565 /// assert_eq!(foo.load(Ordering::SeqCst), true);
1a4d82fc
JJ
566 /// ```
567 #[inline]
85aaf69f 568 #[stable(feature = "rust1", since = "1.0.0")]
1a4d82fc 569 pub fn fetch_nand(&self, val: bool, order: Ordering) -> bool {
a7813a04
XL
570 // We can't use atomic_nand here because it can result in a bool with
571 // an invalid value. This happens because the atomic operation is done
572 // with an 8-bit integer internally, which would set the upper 7 bits.
cc61c64b
XL
573 // So we just use fetch_xor or swap instead.
574 if val {
575 // !(x & true) == !x
576 // We must invert the bool.
577 self.fetch_xor(true, order)
578 } else {
579 // !(x & false) == true
580 // We must set the bool to true.
581 self.swap(true, order)
a7813a04 582 }
1a4d82fc
JJ
583 }
584
585 /// Logical "or" with a boolean value.
586 ///
587 /// Performs a logical "or" operation on the current value and the argument `val`, and sets the
588 /// new value to the result.
589 ///
590 /// Returns the previous value.
591 ///
592 /// # Examples
593 ///
594 /// ```
595 /// use std::sync::atomic::{AtomicBool, Ordering};
596 ///
597 /// let foo = AtomicBool::new(true);
62682a34
SL
598 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), true);
599 /// assert_eq!(foo.load(Ordering::SeqCst), true);
1a4d82fc
JJ
600 ///
601 /// let foo = AtomicBool::new(true);
62682a34
SL
602 /// assert_eq!(foo.fetch_or(true, Ordering::SeqCst), true);
603 /// assert_eq!(foo.load(Ordering::SeqCst), true);
1a4d82fc
JJ
604 ///
605 /// let foo = AtomicBool::new(false);
62682a34
SL
606 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), false);
607 /// assert_eq!(foo.load(Ordering::SeqCst), false);
1a4d82fc
JJ
608 /// ```
609 #[inline]
85aaf69f 610 #[stable(feature = "rust1", since = "1.0.0")]
1a4d82fc 611 pub fn fetch_or(&self, val: bool, order: Ordering) -> bool {
a7813a04 612 unsafe { atomic_or(self.v.get(), val as u8, order) != 0 }
1a4d82fc
JJ
613 }
614
615 /// Logical "xor" with a boolean value.
616 ///
617 /// Performs a logical "xor" operation on the current value and the argument `val`, and sets
618 /// the new value to the result.
619 ///
620 /// Returns the previous value.
621 ///
622 /// # Examples
623 ///
624 /// ```
625 /// use std::sync::atomic::{AtomicBool, Ordering};
626 ///
627 /// let foo = AtomicBool::new(true);
62682a34
SL
628 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), true);
629 /// assert_eq!(foo.load(Ordering::SeqCst), true);
1a4d82fc
JJ
630 ///
631 /// let foo = AtomicBool::new(true);
62682a34
SL
632 /// assert_eq!(foo.fetch_xor(true, Ordering::SeqCst), true);
633 /// assert_eq!(foo.load(Ordering::SeqCst), false);
1a4d82fc
JJ
634 ///
635 /// let foo = AtomicBool::new(false);
62682a34
SL
636 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), false);
637 /// assert_eq!(foo.load(Ordering::SeqCst), false);
1a4d82fc
JJ
638 /// ```
639 #[inline]
85aaf69f 640 #[stable(feature = "rust1", since = "1.0.0")]
1a4d82fc 641 pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool {
a7813a04 642 unsafe { atomic_xor(self.v.get(), val as u8, order) != 0 }
1a4d82fc
JJ
643 }
644}
645
3157f602 646#[cfg(target_has_atomic = "ptr")]
1a4d82fc
JJ
647impl<T> AtomicPtr<T> {
648 /// Creates a new `AtomicPtr`.
649 ///
650 /// # Examples
651 ///
652 /// ```
653 /// use std::sync::atomic::AtomicPtr;
654 ///
85aaf69f 655 /// let ptr = &mut 5;
1a4d82fc
JJ
656 /// let atomic_ptr = AtomicPtr::new(ptr);
657 /// ```
658 #[inline]
85aaf69f 659 #[stable(feature = "rust1", since = "1.0.0")]
abe05a73 660 #[rustc_const_unstable(feature = "const_atomic_ptr_new")]
62682a34
SL
661 pub const fn new(p: *mut T) -> AtomicPtr<T> {
662 AtomicPtr { p: UnsafeCell::new(p) }
1a4d82fc
JJ
663 }
664
9e0c209e
SL
665 /// Returns a mutable reference to the underlying pointer.
666 ///
667 /// This is safe because the mutable reference guarantees that no other threads are
668 /// concurrently accessing the atomic data.
669 ///
670 /// # Examples
671 ///
672 /// ```
9e0c209e
SL
673 /// use std::sync::atomic::{AtomicPtr, Ordering};
674 ///
675 /// let mut atomic_ptr = AtomicPtr::new(&mut 10);
676 /// *atomic_ptr.get_mut() = &mut 5;
677 /// assert_eq!(unsafe { *atomic_ptr.load(Ordering::SeqCst) }, 5);
678 /// ```
679 #[inline]
476ff2be 680 #[stable(feature = "atomic_access", since = "1.15.0")]
9e0c209e
SL
681 pub fn get_mut(&mut self) -> &mut *mut T {
682 unsafe { &mut *self.p.get() }
683 }
684
685 /// Consumes the atomic and returns the contained value.
686 ///
687 /// This is safe because passing `self` by value guarantees that no other threads are
688 /// concurrently accessing the atomic data.
689 ///
690 /// # Examples
691 ///
692 /// ```
9e0c209e
SL
693 /// use std::sync::atomic::AtomicPtr;
694 ///
695 /// let atomic_ptr = AtomicPtr::new(&mut 5);
696 /// assert_eq!(unsafe { *atomic_ptr.into_inner() }, 5);
697 /// ```
698 #[inline]
476ff2be 699 #[stable(feature = "atomic_access", since = "1.15.0")]
9e0c209e
SL
700 pub fn into_inner(self) -> *mut T {
701 unsafe { self.p.into_inner() }
702 }
703
1a4d82fc
JJ
704 /// Loads a value from the pointer.
705 ///
32a655c1
SL
706 /// `load` takes an [`Ordering`] argument which describes the memory ordering
707 /// of this operation.
1a4d82fc
JJ
708 ///
709 /// # Panics
710 ///
32a655c1
SL
711 /// Panics if `order` is [`Release`] or [`AcqRel`].
712 ///
713 /// [`Ordering`]: enum.Ordering.html
714 /// [`Release`]: enum.Ordering.html#variant.Release
715 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1a4d82fc
JJ
716 ///
717 /// # Examples
718 ///
719 /// ```
720 /// use std::sync::atomic::{AtomicPtr, Ordering};
721 ///
85aaf69f 722 /// let ptr = &mut 5;
1a4d82fc
JJ
723 /// let some_ptr = AtomicPtr::new(ptr);
724 ///
725 /// let value = some_ptr.load(Ordering::Relaxed);
726 /// ```
727 #[inline]
85aaf69f 728 #[stable(feature = "rust1", since = "1.0.0")]
1a4d82fc 729 pub fn load(&self, order: Ordering) -> *mut T {
c30ab7b3 730 unsafe { atomic_load(self.p.get() as *mut usize, order) as *mut T }
1a4d82fc
JJ
731 }
732
733 /// Stores a value into the pointer.
734 ///
32a655c1
SL
735 /// `store` takes an [`Ordering`] argument which describes the memory ordering
736 /// of this operation.
737 ///
738 /// [`Ordering`]: enum.Ordering.html
1a4d82fc
JJ
739 ///
740 /// # Examples
741 ///
742 /// ```
743 /// use std::sync::atomic::{AtomicPtr, Ordering};
744 ///
85aaf69f 745 /// let ptr = &mut 5;
1a4d82fc
JJ
746 /// let some_ptr = AtomicPtr::new(ptr);
747 ///
85aaf69f 748 /// let other_ptr = &mut 10;
1a4d82fc
JJ
749 ///
750 /// some_ptr.store(other_ptr, Ordering::Relaxed);
751 /// ```
752 ///
753 /// # Panics
754 ///
cc61c64b
XL
755 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
756 ///
757 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
758 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1a4d82fc 759 #[inline]
85aaf69f 760 #[stable(feature = "rust1", since = "1.0.0")]
1a4d82fc 761 pub fn store(&self, ptr: *mut T, order: Ordering) {
c30ab7b3
SL
762 unsafe {
763 atomic_store(self.p.get() as *mut usize, ptr as usize, order);
764 }
1a4d82fc
JJ
765 }
766
cc61c64b 767 /// Stores a value into the pointer, returning the previous value.
1a4d82fc 768 ///
32a655c1
SL
769 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
770 /// of this operation.
771 ///
772 /// [`Ordering`]: enum.Ordering.html
1a4d82fc
JJ
773 ///
774 /// # Examples
775 ///
776 /// ```
777 /// use std::sync::atomic::{AtomicPtr, Ordering};
778 ///
85aaf69f 779 /// let ptr = &mut 5;
1a4d82fc
JJ
780 /// let some_ptr = AtomicPtr::new(ptr);
781 ///
85aaf69f 782 /// let other_ptr = &mut 10;
1a4d82fc
JJ
783 ///
784 /// let value = some_ptr.swap(other_ptr, Ordering::Relaxed);
785 /// ```
786 #[inline]
85aaf69f 787 #[stable(feature = "rust1", since = "1.0.0")]
1a4d82fc 788 pub fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T {
62682a34 789 unsafe { atomic_swap(self.p.get() as *mut usize, ptr as usize, order) as *mut T }
1a4d82fc
JJ
790 }
791
c1a9b12d 792 /// Stores a value into the pointer if the current value is the same as the `current` value.
1a4d82fc 793 ///
c1a9b12d
SL
794 /// The return value is always the previous value. If it is equal to `current`, then the value
795 /// was updated.
1a4d82fc 796 ///
32a655c1
SL
797 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
798 /// ordering of this operation.
799 ///
800 /// [`Ordering`]: enum.Ordering.html
1a4d82fc
JJ
801 ///
802 /// # Examples
803 ///
804 /// ```
805 /// use std::sync::atomic::{AtomicPtr, Ordering};
806 ///
85aaf69f 807 /// let ptr = &mut 5;
1a4d82fc
JJ
808 /// let some_ptr = AtomicPtr::new(ptr);
809 ///
85aaf69f
SL
810 /// let other_ptr = &mut 10;
811 /// let another_ptr = &mut 10;
1a4d82fc
JJ
812 ///
813 /// let value = some_ptr.compare_and_swap(other_ptr, another_ptr, Ordering::Relaxed);
814 /// ```
815 #[inline]
85aaf69f 816 #[stable(feature = "rust1", since = "1.0.0")]
c1a9b12d 817 pub fn compare_and_swap(&self, current: *mut T, new: *mut T, order: Ordering) -> *mut T {
54a0048b
SL
818 match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
819 Ok(x) => x,
820 Err(x) => x,
821 }
7453a54e
SL
822 }
823
824 /// Stores a value into the pointer if the current value is the same as the `current` value.
825 ///
54a0048b 826 /// The return value is a result indicating whether the new value was written and containing
3157f602 827 /// the previous value. On success this value is guaranteed to be equal to `current`.
7453a54e 828 ///
32a655c1
SL
829 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
830 /// ordering of this operation. The first describes the required ordering if
831 /// the operation succeeds while the second describes the required ordering when
832 /// the operation fails. The failure ordering can't be [`Release`] or [`AcqRel`]
833 /// and must be equivalent or weaker than the success ordering.
834 ///
835 /// [`Ordering`]: enum.Ordering.html
836 /// [`Release`]: enum.Ordering.html#variant.Release
837 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
7453a54e
SL
838 ///
839 /// # Examples
840 ///
841 /// ```
7453a54e
SL
842 /// use std::sync::atomic::{AtomicPtr, Ordering};
843 ///
844 /// let ptr = &mut 5;
845 /// let some_ptr = AtomicPtr::new(ptr);
846 ///
847 /// let other_ptr = &mut 10;
848 /// let another_ptr = &mut 10;
849 ///
850 /// let value = some_ptr.compare_exchange(other_ptr, another_ptr,
851 /// Ordering::SeqCst, Ordering::Relaxed);
852 /// ```
853 #[inline]
a7813a04 854 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
7453a54e
SL
855 pub fn compare_exchange(&self,
856 current: *mut T,
857 new: *mut T,
858 success: Ordering,
c30ab7b3
SL
859 failure: Ordering)
860 -> Result<*mut T, *mut T> {
1a4d82fc 861 unsafe {
54a0048b
SL
862 let res = atomic_compare_exchange(self.p.get() as *mut usize,
863 current as usize,
864 new as usize,
865 success,
866 failure);
867 match res {
868 Ok(x) => Ok(x as *mut T),
869 Err(x) => Err(x as *mut T),
870 }
1a4d82fc
JJ
871 }
872 }
7453a54e
SL
873
874 /// Stores a value into the pointer if the current value is the same as the `current` value.
875 ///
32a655c1 876 /// Unlike [`compare_exchange`], this function is allowed to spuriously fail even when the
7453a54e 877 /// comparison succeeds, which can result in more efficient code on some platforms. The
54a0048b
SL
878 /// return value is a result indicating whether the new value was written and containing the
879 /// previous value.
7453a54e 880 ///
32a655c1 881 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
7453a54e
SL
882 /// ordering of this operation. The first describes the required ordering if the operation
883 /// succeeds while the second describes the required ordering when the operation fails. The
32a655c1
SL
884 /// failure ordering can't be [`Release`] or [`AcqRel`] and must be equivalent or
885 /// weaker than the success ordering.
886 ///
887 /// [`compare_exchange`]: #method.compare_exchange
888 /// [`Ordering`]: enum.Ordering.html
889 /// [`Release`]: enum.Ordering.html#variant.Release
890 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
7453a54e
SL
891 ///
892 /// # Examples
893 ///
894 /// ```
7453a54e
SL
895 /// use std::sync::atomic::{AtomicPtr, Ordering};
896 ///
897 /// let some_ptr = AtomicPtr::new(&mut 5);
898 ///
899 /// let new = &mut 10;
900 /// let mut old = some_ptr.load(Ordering::Relaxed);
901 /// loop {
54a0048b
SL
902 /// match some_ptr.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
903 /// Ok(_) => break,
904 /// Err(x) => old = x,
7453a54e
SL
905 /// }
906 /// }
907 /// ```
908 #[inline]
a7813a04 909 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
7453a54e
SL
910 pub fn compare_exchange_weak(&self,
911 current: *mut T,
912 new: *mut T,
913 success: Ordering,
c30ab7b3
SL
914 failure: Ordering)
915 -> Result<*mut T, *mut T> {
54a0048b
SL
916 unsafe {
917 let res = atomic_compare_exchange_weak(self.p.get() as *mut usize,
918 current as usize,
919 new as usize,
920 success,
921 failure);
922 match res {
923 Ok(x) => Ok(x as *mut T),
924 Err(x) => Err(x as *mut T),
925 }
926 }
7453a54e
SL
927 }
928}
929
abe05a73
XL
930#[cfg(target_has_atomic = "ptr")]
931#[stable(feature = "atomic_from", since = "1.23.0")]
932impl<T> From<*mut T> for AtomicPtr<T> {
933 #[inline]
934 fn from(p: *mut T) -> Self { Self::new(p) }
935}
936
7cac9316 937#[cfg(target_has_atomic = "ptr")]
a7813a04 938macro_rules! atomic_int {
ea8adc8c 939 ($stable:meta, $const_unstable:meta,
a7813a04
XL
940 $stable_cxchg:meta,
941 $stable_debug:meta,
9e0c209e 942 $stable_access:meta,
ea8adc8c 943 $s_int_type:expr, $int_ref:expr,
a7813a04
XL
944 $int_type:ident $atomic_type:ident $atomic_init:ident) => {
945 /// An integer type which can be safely shared between threads.
9e0c209e 946 ///
ea8adc8c
XL
947 /// This type has the same in-memory representation as the underlying
948 /// integer type, [`
949 #[doc = $s_int_type]
950 /// `](
951 #[doc = $int_ref]
952 /// ). For more about the differences between atomic types and
953 /// non-atomic types, please see the [module-level documentation].
954 ///
955 /// Please note that examples are shared between atomic variants of
956 /// primitive integer types, so it's normal that they are all
957 /// demonstrating [`AtomicIsize`].
958 ///
959 /// [module-level documentation]: index.html
960 /// [`AtomicIsize`]: struct.AtomicIsize.html
a7813a04
XL
961 #[$stable]
962 pub struct $atomic_type {
963 v: UnsafeCell<$int_type>,
964 }
965
966 /// An atomic integer initialized to `0`.
967 #[$stable]
968 pub const $atomic_init: $atomic_type = $atomic_type::new(0);
969
970 #[$stable]
971 impl Default for $atomic_type {
972 fn default() -> Self {
973 Self::new(Default::default())
974 }
975 }
976
abe05a73
XL
977 #[stable(feature = "atomic_from", since = "1.23.0")]
978 impl From<$int_type> for $atomic_type {
979 #[inline]
980 fn from(v: $int_type) -> Self { Self::new(v) }
981 }
982
a7813a04
XL
983 #[$stable_debug]
984 impl fmt::Debug for $atomic_type {
985 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
986 f.debug_tuple(stringify!($atomic_type))
987 .field(&self.load(Ordering::SeqCst))
988 .finish()
989 }
990 }
991
992 // Send is implicitly implemented.
993 #[$stable]
994 unsafe impl Sync for $atomic_type {}
995
996 impl $atomic_type {
997 /// Creates a new atomic integer.
998 ///
999 /// # Examples
1000 ///
1001 /// ```
1002 /// use std::sync::atomic::AtomicIsize;
1003 ///
1004 /// let atomic_forty_two = AtomicIsize::new(42);
1005 /// ```
1006 #[inline]
1007 #[$stable]
abe05a73 1008 #[$const_unstable]
a7813a04
XL
1009 pub const fn new(v: $int_type) -> Self {
1010 $atomic_type {v: UnsafeCell::new(v)}
1011 }
1012
9e0c209e
SL
1013 /// Returns a mutable reference to the underlying integer.
1014 ///
1015 /// This is safe because the mutable reference guarantees that no other threads are
1016 /// concurrently accessing the atomic data.
1017 ///
1018 /// # Examples
1019 ///
1020 /// ```
9e0c209e
SL
1021 /// use std::sync::atomic::{AtomicIsize, Ordering};
1022 ///
1023 /// let mut some_isize = AtomicIsize::new(10);
1024 /// assert_eq!(*some_isize.get_mut(), 10);
1025 /// *some_isize.get_mut() = 5;
1026 /// assert_eq!(some_isize.load(Ordering::SeqCst), 5);
1027 /// ```
1028 #[inline]
1029 #[$stable_access]
1030 pub fn get_mut(&mut self) -> &mut $int_type {
1031 unsafe { &mut *self.v.get() }
1032 }
1033
1034 /// Consumes the atomic and returns the contained value.
1035 ///
1036 /// This is safe because passing `self` by value guarantees that no other threads are
1037 /// concurrently accessing the atomic data.
1038 ///
1039 /// # Examples
1040 ///
1041 /// ```
9e0c209e
SL
1042 /// use std::sync::atomic::AtomicIsize;
1043 ///
1044 /// let some_isize = AtomicIsize::new(5);
1045 /// assert_eq!(some_isize.into_inner(), 5);
1046 /// ```
1047 #[inline]
1048 #[$stable_access]
1049 pub fn into_inner(self) -> $int_type {
1050 unsafe { self.v.into_inner() }
1051 }
1052
a7813a04
XL
1053 /// Loads a value from the atomic integer.
1054 ///
32a655c1 1055 /// `load` takes an [`Ordering`] argument which describes the memory ordering of this
a7813a04
XL
1056 /// operation.
1057 ///
1058 /// # Panics
1059 ///
32a655c1
SL
1060 /// Panics if `order` is [`Release`] or [`AcqRel`].
1061 ///
1062 /// [`Ordering`]: enum.Ordering.html
1063 /// [`Release`]: enum.Ordering.html#variant.Release
1064 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
a7813a04
XL
1065 ///
1066 /// # Examples
1067 ///
1068 /// ```
1069 /// use std::sync::atomic::{AtomicIsize, Ordering};
1070 ///
1071 /// let some_isize = AtomicIsize::new(5);
1072 ///
1073 /// assert_eq!(some_isize.load(Ordering::Relaxed), 5);
1074 /// ```
1075 #[inline]
1076 #[$stable]
1077 pub fn load(&self, order: Ordering) -> $int_type {
1078 unsafe { atomic_load(self.v.get(), order) }
1079 }
1080
1081 /// Stores a value into the atomic integer.
1082 ///
32a655c1 1083 /// `store` takes an [`Ordering`] argument which describes the memory ordering of this
a7813a04
XL
1084 /// operation.
1085 ///
32a655c1
SL
1086 /// [`Ordering`]: enum.Ordering.html
1087 ///
a7813a04
XL
1088 /// # Examples
1089 ///
1090 /// ```
1091 /// use std::sync::atomic::{AtomicIsize, Ordering};
1092 ///
1093 /// let some_isize = AtomicIsize::new(5);
1094 ///
1095 /// some_isize.store(10, Ordering::Relaxed);
1096 /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
1097 /// ```
1098 ///
1099 /// # Panics
1100 ///
cc61c64b
XL
1101 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
1102 ///
1103 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
1104 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
a7813a04
XL
1105 #[inline]
1106 #[$stable]
1107 pub fn store(&self, val: $int_type, order: Ordering) {
1108 unsafe { atomic_store(self.v.get(), val, order); }
1109 }
1110
cc61c64b 1111 /// Stores a value into the atomic integer, returning the previous value.
a7813a04 1112 ///
32a655c1 1113 /// `swap` takes an [`Ordering`] argument which describes the memory ordering of this
a7813a04
XL
1114 /// operation.
1115 ///
32a655c1
SL
1116 /// [`Ordering`]: enum.Ordering.html
1117 ///
a7813a04
XL
1118 /// # Examples
1119 ///
1120 /// ```
1121 /// use std::sync::atomic::{AtomicIsize, Ordering};
1122 ///
1123 /// let some_isize = AtomicIsize::new(5);
1124 ///
1125 /// assert_eq!(some_isize.swap(10, Ordering::Relaxed), 5);
1126 /// ```
1127 #[inline]
1128 #[$stable]
1129 pub fn swap(&self, val: $int_type, order: Ordering) -> $int_type {
1130 unsafe { atomic_swap(self.v.get(), val, order) }
1131 }
1132
1133 /// Stores a value into the atomic integer if the current value is the same as the
1134 /// `current` value.
1135 ///
1136 /// The return value is always the previous value. If it is equal to `current`, then the
1137 /// value was updated.
1138 ///
32a655c1 1139 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
a7813a04
XL
1140 /// ordering of this operation.
1141 ///
32a655c1
SL
1142 /// [`Ordering`]: enum.Ordering.html
1143 ///
a7813a04
XL
1144 /// # Examples
1145 ///
1146 /// ```
1147 /// use std::sync::atomic::{AtomicIsize, Ordering};
1148 ///
1149 /// let some_isize = AtomicIsize::new(5);
1150 ///
1151 /// assert_eq!(some_isize.compare_and_swap(5, 10, Ordering::Relaxed), 5);
1152 /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
1153 ///
1154 /// assert_eq!(some_isize.compare_and_swap(6, 12, Ordering::Relaxed), 10);
1155 /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
1156 /// ```
1157 #[inline]
1158 #[$stable]
1159 pub fn compare_and_swap(&self,
1160 current: $int_type,
1161 new: $int_type,
1162 order: Ordering) -> $int_type {
1163 match self.compare_exchange(current,
1164 new,
1165 order,
1166 strongest_failure_ordering(order)) {
1167 Ok(x) => x,
1168 Err(x) => x,
1169 }
1170 }
1171
1172 /// Stores a value into the atomic integer if the current value is the same as the
1173 /// `current` value.
1174 ///
1175 /// The return value is a result indicating whether the new value was written and
1176 /// containing the previous value. On success this value is guaranteed to be equal to
3157f602 1177 /// `current`.
a7813a04 1178 ///
32a655c1
SL
1179 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
1180 /// ordering of this operation. The first describes the required ordering if
1181 /// the operation succeeds while the second describes the required ordering when
1182 /// the operation fails. The failure ordering can't be [`Release`] or [`AcqRel`] and
1183 /// must be equivalent or weaker than the success ordering.
1184 ///
1185 /// [`Ordering`]: enum.Ordering.html
1186 /// [`Release`]: enum.Ordering.html#variant.Release
1187 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
a7813a04
XL
1188 ///
1189 /// # Examples
1190 ///
1191 /// ```
1192 /// use std::sync::atomic::{AtomicIsize, Ordering};
1193 ///
1194 /// let some_isize = AtomicIsize::new(5);
1195 ///
1196 /// assert_eq!(some_isize.compare_exchange(5, 10,
1197 /// Ordering::Acquire,
1198 /// Ordering::Relaxed),
1199 /// Ok(5));
1200 /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
1201 ///
1202 /// assert_eq!(some_isize.compare_exchange(6, 12,
1203 /// Ordering::SeqCst,
1204 /// Ordering::Acquire),
1205 /// Err(10));
1206 /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
1207 /// ```
1208 #[inline]
1209 #[$stable_cxchg]
1210 pub fn compare_exchange(&self,
1211 current: $int_type,
1212 new: $int_type,
1213 success: Ordering,
1214 failure: Ordering) -> Result<$int_type, $int_type> {
1215 unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) }
1216 }
1217
1218 /// Stores a value into the atomic integer if the current value is the same as the
1219 /// `current` value.
1220 ///
32a655c1
SL
1221 /// Unlike [`compare_exchange`], this function is allowed to spuriously fail even
1222 /// when the comparison succeeds, which can result in more efficient code on some
1223 /// platforms. The return value is a result indicating whether the new value was
1224 /// written and containing the previous value.
a7813a04 1225 ///
32a655c1 1226 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
a7813a04
XL
1227 /// ordering of this operation. The first describes the required ordering if the
1228 /// operation succeeds while the second describes the required ordering when the
32a655c1
SL
1229 /// operation fails. The failure ordering can't be [`Release`] or [`AcqRel`] and
1230 /// must be equivalent or weaker than the success ordering.
1231 ///
1232 /// [`compare_exchange`]: #method.compare_exchange
1233 /// [`Ordering`]: enum.Ordering.html
1234 /// [`Release`]: enum.Ordering.html#variant.Release
1235 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
a7813a04
XL
1236 ///
1237 /// # Examples
1238 ///
1239 /// ```
1240 /// use std::sync::atomic::{AtomicIsize, Ordering};
1241 ///
1242 /// let val = AtomicIsize::new(4);
1243 ///
1244 /// let mut old = val.load(Ordering::Relaxed);
1245 /// loop {
1246 /// let new = old * 2;
1247 /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
1248 /// Ok(_) => break,
1249 /// Err(x) => old = x,
1250 /// }
1251 /// }
1252 /// ```
1253 #[inline]
1254 #[$stable_cxchg]
1255 pub fn compare_exchange_weak(&self,
1256 current: $int_type,
1257 new: $int_type,
1258 success: Ordering,
1259 failure: Ordering) -> Result<$int_type, $int_type> {
1260 unsafe {
1261 atomic_compare_exchange_weak(self.v.get(), current, new, success, failure)
1262 }
1263 }
1264
cc61c64b
XL
1265 /// Adds to the current value, returning the previous value.
1266 ///
1267 /// This operation wraps around on overflow.
a7813a04
XL
1268 ///
1269 /// # Examples
1270 ///
1271 /// ```
1272 /// use std::sync::atomic::{AtomicIsize, Ordering};
1273 ///
1274 /// let foo = AtomicIsize::new(0);
1275 /// assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0);
1276 /// assert_eq!(foo.load(Ordering::SeqCst), 10);
1277 /// ```
1278 #[inline]
1279 #[$stable]
1280 pub fn fetch_add(&self, val: $int_type, order: Ordering) -> $int_type {
1281 unsafe { atomic_add(self.v.get(), val, order) }
1282 }
1283
cc61c64b
XL
1284 /// Subtracts from the current value, returning the previous value.
1285 ///
1286 /// This operation wraps around on overflow.
a7813a04
XL
1287 ///
1288 /// # Examples
1289 ///
1290 /// ```
1291 /// use std::sync::atomic::{AtomicIsize, Ordering};
1292 ///
1293 /// let foo = AtomicIsize::new(0);
1294 /// assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 0);
1295 /// assert_eq!(foo.load(Ordering::SeqCst), -10);
1296 /// ```
1297 #[inline]
1298 #[$stable]
1299 pub fn fetch_sub(&self, val: $int_type, order: Ordering) -> $int_type {
1300 unsafe { atomic_sub(self.v.get(), val, order) }
1301 }
1302
cc61c64b
XL
1303 /// Bitwise "and" with the current value.
1304 ///
1305 /// Performs a bitwise "and" operation on the current value and the argument `val`, and
1306 /// sets the new value to the result.
1307 ///
1308 /// Returns the previous value.
a7813a04
XL
1309 ///
1310 /// # Examples
1311 ///
1312 /// ```
1313 /// use std::sync::atomic::{AtomicIsize, Ordering};
1314 ///
1315 /// let foo = AtomicIsize::new(0b101101);
1316 /// assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101);
1317 /// assert_eq!(foo.load(Ordering::SeqCst), 0b100001);
1318 #[inline]
1319 #[$stable]
1320 pub fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type {
1321 unsafe { atomic_and(self.v.get(), val, order) }
1322 }
1323
cc61c64b
XL
1324 /// Bitwise "or" with the current value.
1325 ///
1326 /// Performs a bitwise "or" operation on the current value and the argument `val`, and
1327 /// sets the new value to the result.
1328 ///
1329 /// Returns the previous value.
a7813a04
XL
1330 ///
1331 /// # Examples
1332 ///
1333 /// ```
1334 /// use std::sync::atomic::{AtomicIsize, Ordering};
1335 ///
1336 /// let foo = AtomicIsize::new(0b101101);
1337 /// assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101);
1338 /// assert_eq!(foo.load(Ordering::SeqCst), 0b111111);
1339 #[inline]
1340 #[$stable]
1341 pub fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type {
1342 unsafe { atomic_or(self.v.get(), val, order) }
1343 }
1344
cc61c64b
XL
1345 /// Bitwise "xor" with the current value.
1346 ///
1347 /// Performs a bitwise "xor" operation on the current value and the argument `val`, and
1348 /// sets the new value to the result.
1349 ///
1350 /// Returns the previous value.
a7813a04
XL
1351 ///
1352 /// # Examples
1353 ///
1354 /// ```
1355 /// use std::sync::atomic::{AtomicIsize, Ordering};
1356 ///
1357 /// let foo = AtomicIsize::new(0b101101);
1358 /// assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101);
1359 /// assert_eq!(foo.load(Ordering::SeqCst), 0b011110);
1360 #[inline]
1361 #[$stable]
1362 pub fn fetch_xor(&self, val: $int_type, order: Ordering) -> $int_type {
1363 unsafe { atomic_xor(self.v.get(), val, order) }
1364 }
1365 }
1366 }
1367}
1368
1369#[cfg(target_has_atomic = "8")]
1370atomic_int! {
1371 unstable(feature = "integer_atomics", issue = "32976"),
ea8adc8c 1372 rustc_const_unstable(feature = "const_atomic_i8_new"),
a7813a04
XL
1373 unstable(feature = "integer_atomics", issue = "32976"),
1374 unstable(feature = "integer_atomics", issue = "32976"),
9e0c209e 1375 unstable(feature = "integer_atomics", issue = "32976"),
ea8adc8c 1376 "i8", "../../../std/primitive.i8.html",
a7813a04
XL
1377 i8 AtomicI8 ATOMIC_I8_INIT
1378}
1379#[cfg(target_has_atomic = "8")]
1380atomic_int! {
1381 unstable(feature = "integer_atomics", issue = "32976"),
ea8adc8c 1382 rustc_const_unstable(feature = "const_atomic_u8_new"),
a7813a04
XL
1383 unstable(feature = "integer_atomics", issue = "32976"),
1384 unstable(feature = "integer_atomics", issue = "32976"),
9e0c209e 1385 unstable(feature = "integer_atomics", issue = "32976"),
ea8adc8c 1386 "u8", "../../../std/primitive.u8.html",
a7813a04
XL
1387 u8 AtomicU8 ATOMIC_U8_INIT
1388}
1389#[cfg(target_has_atomic = "16")]
1390atomic_int! {
1391 unstable(feature = "integer_atomics", issue = "32976"),
ea8adc8c 1392 rustc_const_unstable(feature = "const_atomic_i16_new"),
a7813a04
XL
1393 unstable(feature = "integer_atomics", issue = "32976"),
1394 unstable(feature = "integer_atomics", issue = "32976"),
9e0c209e 1395 unstable(feature = "integer_atomics", issue = "32976"),
ea8adc8c 1396 "i16", "../../../std/primitive.i16.html",
a7813a04
XL
1397 i16 AtomicI16 ATOMIC_I16_INIT
1398}
1399#[cfg(target_has_atomic = "16")]
1400atomic_int! {
1401 unstable(feature = "integer_atomics", issue = "32976"),
ea8adc8c 1402 rustc_const_unstable(feature = "const_atomic_u16_new"),
a7813a04
XL
1403 unstable(feature = "integer_atomics", issue = "32976"),
1404 unstable(feature = "integer_atomics", issue = "32976"),
9e0c209e 1405 unstable(feature = "integer_atomics", issue = "32976"),
ea8adc8c 1406 "u16", "../../../std/primitive.u16.html",
a7813a04
XL
1407 u16 AtomicU16 ATOMIC_U16_INIT
1408}
1409#[cfg(target_has_atomic = "32")]
1410atomic_int! {
1411 unstable(feature = "integer_atomics", issue = "32976"),
ea8adc8c 1412 rustc_const_unstable(feature = "const_atomic_i32_new"),
a7813a04
XL
1413 unstable(feature = "integer_atomics", issue = "32976"),
1414 unstable(feature = "integer_atomics", issue = "32976"),
9e0c209e 1415 unstable(feature = "integer_atomics", issue = "32976"),
ea8adc8c 1416 "i32", "../../../std/primitive.i32.html",
a7813a04
XL
1417 i32 AtomicI32 ATOMIC_I32_INIT
1418}
1419#[cfg(target_has_atomic = "32")]
1420atomic_int! {
1421 unstable(feature = "integer_atomics", issue = "32976"),
ea8adc8c 1422 rustc_const_unstable(feature = "const_atomic_u32_new"),
a7813a04
XL
1423 unstable(feature = "integer_atomics", issue = "32976"),
1424 unstable(feature = "integer_atomics", issue = "32976"),
9e0c209e 1425 unstable(feature = "integer_atomics", issue = "32976"),
ea8adc8c 1426 "u32", "../../../std/primitive.u32.html",
a7813a04
XL
1427 u32 AtomicU32 ATOMIC_U32_INIT
1428}
1429#[cfg(target_has_atomic = "64")]
1430atomic_int! {
1431 unstable(feature = "integer_atomics", issue = "32976"),
ea8adc8c 1432 rustc_const_unstable(feature = "const_atomic_i64_new"),
a7813a04
XL
1433 unstable(feature = "integer_atomics", issue = "32976"),
1434 unstable(feature = "integer_atomics", issue = "32976"),
9e0c209e 1435 unstable(feature = "integer_atomics", issue = "32976"),
ea8adc8c 1436 "i64", "../../../std/primitive.i64.html",
a7813a04
XL
1437 i64 AtomicI64 ATOMIC_I64_INIT
1438}
1439#[cfg(target_has_atomic = "64")]
1440atomic_int! {
1441 unstable(feature = "integer_atomics", issue = "32976"),
ea8adc8c 1442 rustc_const_unstable(feature = "const_atomic_u64_new"),
a7813a04
XL
1443 unstable(feature = "integer_atomics", issue = "32976"),
1444 unstable(feature = "integer_atomics", issue = "32976"),
9e0c209e 1445 unstable(feature = "integer_atomics", issue = "32976"),
ea8adc8c 1446 "u64", "../../../std/primitive.u64.html",
a7813a04
XL
1447 u64 AtomicU64 ATOMIC_U64_INIT
1448}
3157f602 1449#[cfg(target_has_atomic = "ptr")]
a7813a04
XL
1450atomic_int!{
1451 stable(feature = "rust1", since = "1.0.0"),
ea8adc8c 1452 rustc_const_unstable(feature = "const_atomic_isize_new"),
a7813a04
XL
1453 stable(feature = "extended_compare_and_swap", since = "1.10.0"),
1454 stable(feature = "atomic_debug", since = "1.3.0"),
476ff2be 1455 stable(feature = "atomic_access", since = "1.15.0"),
ea8adc8c 1456 "isize", "../../../std/primitive.isize.html",
a7813a04
XL
1457 isize AtomicIsize ATOMIC_ISIZE_INIT
1458}
3157f602 1459#[cfg(target_has_atomic = "ptr")]
a7813a04
XL
1460atomic_int!{
1461 stable(feature = "rust1", since = "1.0.0"),
ea8adc8c 1462 rustc_const_unstable(feature = "const_atomic_usize_new"),
a7813a04
XL
1463 stable(feature = "extended_compare_and_swap", since = "1.10.0"),
1464 stable(feature = "atomic_debug", since = "1.3.0"),
476ff2be 1465 stable(feature = "atomic_access", since = "1.15.0"),
ea8adc8c 1466 "usize", "../../../std/primitive.usize.html",
a7813a04
XL
1467 usize AtomicUsize ATOMIC_USIZE_INIT
1468}
1469
7453a54e
SL
1470#[inline]
1471fn strongest_failure_ordering(order: Ordering) -> Ordering {
1472 match order {
1473 Release => Relaxed,
1474 Relaxed => Relaxed,
c30ab7b3 1475 SeqCst => SeqCst,
7453a54e 1476 Acquire => Acquire,
c30ab7b3
SL
1477 AcqRel => Acquire,
1478 __Nonexhaustive => __Nonexhaustive,
7453a54e 1479 }
1a4d82fc
JJ
1480}
1481
1482#[inline]
7453a54e 1483unsafe fn atomic_store<T>(dst: *mut T, val: T, order: Ordering) {
1a4d82fc
JJ
1484 match order {
1485 Release => intrinsics::atomic_store_rel(dst, val),
1486 Relaxed => intrinsics::atomic_store_relaxed(dst, val),
c30ab7b3 1487 SeqCst => intrinsics::atomic_store(dst, val),
1a4d82fc 1488 Acquire => panic!("there is no such thing as an acquire store"),
c30ab7b3
SL
1489 AcqRel => panic!("there is no such thing as an acquire/release store"),
1490 __Nonexhaustive => panic!("invalid memory ordering"),
1a4d82fc
JJ
1491 }
1492}
1493
1494#[inline]
7453a54e 1495unsafe fn atomic_load<T>(dst: *const T, order: Ordering) -> T {
1a4d82fc
JJ
1496 match order {
1497 Acquire => intrinsics::atomic_load_acq(dst),
1498 Relaxed => intrinsics::atomic_load_relaxed(dst),
c30ab7b3 1499 SeqCst => intrinsics::atomic_load(dst),
1a4d82fc 1500 Release => panic!("there is no such thing as a release load"),
c30ab7b3
SL
1501 AcqRel => panic!("there is no such thing as an acquire/release load"),
1502 __Nonexhaustive => panic!("invalid memory ordering"),
1a4d82fc
JJ
1503 }
1504}
1505
1506#[inline]
1a4d82fc
JJ
1507unsafe fn atomic_swap<T>(dst: *mut T, val: T, order: Ordering) -> T {
1508 match order {
1509 Acquire => intrinsics::atomic_xchg_acq(dst, val),
1510 Release => intrinsics::atomic_xchg_rel(dst, val),
c30ab7b3 1511 AcqRel => intrinsics::atomic_xchg_acqrel(dst, val),
1a4d82fc 1512 Relaxed => intrinsics::atomic_xchg_relaxed(dst, val),
c30ab7b3
SL
1513 SeqCst => intrinsics::atomic_xchg(dst, val),
1514 __Nonexhaustive => panic!("invalid memory ordering"),
1a4d82fc
JJ
1515 }
1516}
1517
cc61c64b 1518/// Returns the previous value (like __sync_fetch_and_add).
1a4d82fc 1519#[inline]
1a4d82fc
JJ
1520unsafe fn atomic_add<T>(dst: *mut T, val: T, order: Ordering) -> T {
1521 match order {
1522 Acquire => intrinsics::atomic_xadd_acq(dst, val),
1523 Release => intrinsics::atomic_xadd_rel(dst, val),
c30ab7b3 1524 AcqRel => intrinsics::atomic_xadd_acqrel(dst, val),
1a4d82fc 1525 Relaxed => intrinsics::atomic_xadd_relaxed(dst, val),
c30ab7b3
SL
1526 SeqCst => intrinsics::atomic_xadd(dst, val),
1527 __Nonexhaustive => panic!("invalid memory ordering"),
1a4d82fc
JJ
1528 }
1529}
1530
cc61c64b 1531/// Returns the previous value (like __sync_fetch_and_sub).
1a4d82fc 1532#[inline]
1a4d82fc
JJ
1533unsafe fn atomic_sub<T>(dst: *mut T, val: T, order: Ordering) -> T {
1534 match order {
1535 Acquire => intrinsics::atomic_xsub_acq(dst, val),
1536 Release => intrinsics::atomic_xsub_rel(dst, val),
c30ab7b3 1537 AcqRel => intrinsics::atomic_xsub_acqrel(dst, val),
1a4d82fc 1538 Relaxed => intrinsics::atomic_xsub_relaxed(dst, val),
c30ab7b3
SL
1539 SeqCst => intrinsics::atomic_xsub(dst, val),
1540 __Nonexhaustive => panic!("invalid memory ordering"),
1a4d82fc
JJ
1541 }
1542}
1543
1544#[inline]
7453a54e
SL
1545unsafe fn atomic_compare_exchange<T>(dst: *mut T,
1546 old: T,
1547 new: T,
1548 success: Ordering,
c30ab7b3
SL
1549 failure: Ordering)
1550 -> Result<T, T> {
54a0048b 1551 let (val, ok) = match (success, failure) {
7453a54e
SL
1552 (Acquire, Acquire) => intrinsics::atomic_cxchg_acq(dst, old, new),
1553 (Release, Relaxed) => intrinsics::atomic_cxchg_rel(dst, old, new),
c30ab7b3 1554 (AcqRel, Acquire) => intrinsics::atomic_cxchg_acqrel(dst, old, new),
7453a54e 1555 (Relaxed, Relaxed) => intrinsics::atomic_cxchg_relaxed(dst, old, new),
c30ab7b3 1556 (SeqCst, SeqCst) => intrinsics::atomic_cxchg(dst, old, new),
7453a54e 1557 (Acquire, Relaxed) => intrinsics::atomic_cxchg_acq_failrelaxed(dst, old, new),
c30ab7b3
SL
1558 (AcqRel, Relaxed) => intrinsics::atomic_cxchg_acqrel_failrelaxed(dst, old, new),
1559 (SeqCst, Relaxed) => intrinsics::atomic_cxchg_failrelaxed(dst, old, new),
1560 (SeqCst, Acquire) => intrinsics::atomic_cxchg_failacq(dst, old, new),
1561 (__Nonexhaustive, _) => panic!("invalid memory ordering"),
1562 (_, __Nonexhaustive) => panic!("invalid memory ordering"),
54a0048b
SL
1563 (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"),
1564 (_, Release) => panic!("there is no such thing as a release failure ordering"),
7453a54e 1565 _ => panic!("a failure ordering can't be stronger than a success ordering"),
54a0048b 1566 };
c30ab7b3 1567 if ok { Ok(val) } else { Err(val) }
7453a54e
SL
1568}
1569
7453a54e 1570#[inline]
7453a54e
SL
1571unsafe fn atomic_compare_exchange_weak<T>(dst: *mut T,
1572 old: T,
1573 new: T,
1574 success: Ordering,
c30ab7b3
SL
1575 failure: Ordering)
1576 -> Result<T, T> {
54a0048b 1577 let (val, ok) = match (success, failure) {
7453a54e
SL
1578 (Acquire, Acquire) => intrinsics::atomic_cxchgweak_acq(dst, old, new),
1579 (Release, Relaxed) => intrinsics::atomic_cxchgweak_rel(dst, old, new),
c30ab7b3 1580 (AcqRel, Acquire) => intrinsics::atomic_cxchgweak_acqrel(dst, old, new),
7453a54e 1581 (Relaxed, Relaxed) => intrinsics::atomic_cxchgweak_relaxed(dst, old, new),
c30ab7b3 1582 (SeqCst, SeqCst) => intrinsics::atomic_cxchgweak(dst, old, new),
7453a54e 1583 (Acquire, Relaxed) => intrinsics::atomic_cxchgweak_acq_failrelaxed(dst, old, new),
c30ab7b3
SL
1584 (AcqRel, Relaxed) => intrinsics::atomic_cxchgweak_acqrel_failrelaxed(dst, old, new),
1585 (SeqCst, Relaxed) => intrinsics::atomic_cxchgweak_failrelaxed(dst, old, new),
1586 (SeqCst, Acquire) => intrinsics::atomic_cxchgweak_failacq(dst, old, new),
1587 (__Nonexhaustive, _) => panic!("invalid memory ordering"),
1588 (_, __Nonexhaustive) => panic!("invalid memory ordering"),
54a0048b
SL
1589 (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"),
1590 (_, Release) => panic!("there is no such thing as a release failure ordering"),
7453a54e 1591 _ => panic!("a failure ordering can't be stronger than a success ordering"),
54a0048b 1592 };
c30ab7b3 1593 if ok { Ok(val) } else { Err(val) }
7453a54e
SL
1594}
1595
1a4d82fc 1596#[inline]
1a4d82fc
JJ
1597unsafe fn atomic_and<T>(dst: *mut T, val: T, order: Ordering) -> T {
1598 match order {
1599 Acquire => intrinsics::atomic_and_acq(dst, val),
1600 Release => intrinsics::atomic_and_rel(dst, val),
c30ab7b3 1601 AcqRel => intrinsics::atomic_and_acqrel(dst, val),
1a4d82fc 1602 Relaxed => intrinsics::atomic_and_relaxed(dst, val),
c30ab7b3
SL
1603 SeqCst => intrinsics::atomic_and(dst, val),
1604 __Nonexhaustive => panic!("invalid memory ordering"),
1a4d82fc
JJ
1605 }
1606}
1607
1a4d82fc 1608#[inline]
1a4d82fc
JJ
1609unsafe fn atomic_or<T>(dst: *mut T, val: T, order: Ordering) -> T {
1610 match order {
1611 Acquire => intrinsics::atomic_or_acq(dst, val),
1612 Release => intrinsics::atomic_or_rel(dst, val),
c30ab7b3 1613 AcqRel => intrinsics::atomic_or_acqrel(dst, val),
1a4d82fc 1614 Relaxed => intrinsics::atomic_or_relaxed(dst, val),
c30ab7b3
SL
1615 SeqCst => intrinsics::atomic_or(dst, val),
1616 __Nonexhaustive => panic!("invalid memory ordering"),
1a4d82fc
JJ
1617 }
1618}
1619
1a4d82fc 1620#[inline]
1a4d82fc
JJ
1621unsafe fn atomic_xor<T>(dst: *mut T, val: T, order: Ordering) -> T {
1622 match order {
1623 Acquire => intrinsics::atomic_xor_acq(dst, val),
1624 Release => intrinsics::atomic_xor_rel(dst, val),
c30ab7b3 1625 AcqRel => intrinsics::atomic_xor_acqrel(dst, val),
1a4d82fc 1626 Relaxed => intrinsics::atomic_xor_relaxed(dst, val),
c30ab7b3
SL
1627 SeqCst => intrinsics::atomic_xor(dst, val),
1628 __Nonexhaustive => panic!("invalid memory ordering"),
1a4d82fc
JJ
1629 }
1630}
1631
1a4d82fc
JJ
1632/// An atomic fence.
1633///
7cac9316
XL
1634/// Depending on the specified order, a fence prevents the compiler and CPU from
1635/// reordering certain types of memory operations around it.
1636/// That creates synchronizes-with relationships between it and atomic operations
1637/// or fences in other threads.
1638///
1639/// A fence 'A' which has (at least) [`Release`] ordering semantics, synchronizes
1640/// with a fence 'B' with (at least) [`Acquire`] semantics, if and only if there
1641/// exist operations X and Y, both operating on some atomic object 'M' such
1a4d82fc
JJ
1642/// that A is sequenced before X, Y is synchronized before B and Y observes
1643/// the change to M. This provides a happens-before dependence between A and B.
1644///
7cac9316
XL
1645/// ```text
1646/// Thread 1 Thread 2
1647///
1648/// fence(Release); A --------------
1649/// x.store(3, Relaxed); X --------- |
1650/// | |
1651/// | |
1652/// -------------> Y if x.load(Relaxed) == 3 {
1653/// |-------> B fence(Acquire);
1654/// ...
1655/// }
1656/// ```
1657///
32a655c1 1658/// Atomic operations with [`Release`] or [`Acquire`] semantics can also synchronize
1a4d82fc
JJ
1659/// with a fence.
1660///
32a655c1
SL
1661/// A fence which has [`SeqCst`] ordering, in addition to having both [`Acquire`]
1662/// and [`Release`] semantics, participates in the global program order of the
1663/// other [`SeqCst`] operations and/or fences.
1a4d82fc 1664///
32a655c1 1665/// Accepts [`Acquire`], [`Release`], [`AcqRel`] and [`SeqCst`] orderings.
1a4d82fc
JJ
1666///
1667/// # Panics
1668///
32a655c1
SL
1669/// Panics if `order` is [`Relaxed`].
1670///
7cac9316
XL
1671/// # Examples
1672///
1673/// ```
1674/// use std::sync::atomic::AtomicBool;
1675/// use std::sync::atomic::fence;
1676/// use std::sync::atomic::Ordering;
1677///
1678/// // A mutual exclusion primitive based on spinlock.
1679/// pub struct Mutex {
1680/// flag: AtomicBool,
1681/// }
1682///
1683/// impl Mutex {
1684/// pub fn new() -> Mutex {
1685/// Mutex {
1686/// flag: AtomicBool::new(false),
1687/// }
1688/// }
1689///
1690/// pub fn lock(&self) {
1691/// while !self.flag.compare_and_swap(false, true, Ordering::Relaxed) {}
3b2f2976 1692/// // This fence synchronizes-with store in `unlock`.
7cac9316
XL
1693/// fence(Ordering::Acquire);
1694/// }
1695///
1696/// pub fn unlock(&self) {
1697/// self.flag.store(false, Ordering::Release);
1698/// }
1699/// }
1700/// ```
1701///
32a655c1
SL
1702/// [`Ordering`]: enum.Ordering.html
1703/// [`Acquire`]: enum.Ordering.html#variant.Acquire
1704/// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1705/// [`Release`]: enum.Ordering.html#variant.Release
1706/// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1707/// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1a4d82fc 1708#[inline]
85aaf69f 1709#[stable(feature = "rust1", since = "1.0.0")]
1a4d82fc
JJ
1710pub fn fence(order: Ordering) {
1711 unsafe {
1712 match order {
1713 Acquire => intrinsics::atomic_fence_acq(),
1714 Release => intrinsics::atomic_fence_rel(),
c30ab7b3
SL
1715 AcqRel => intrinsics::atomic_fence_acqrel(),
1716 SeqCst => intrinsics::atomic_fence(),
1717 Relaxed => panic!("there is no such thing as a relaxed fence"),
1718 __Nonexhaustive => panic!("invalid memory ordering"),
1a4d82fc
JJ
1719 }
1720 }
1721}
c1a9b12d 1722
c1a9b12d 1723
cc61c64b
XL
1724/// A compiler memory fence.
1725///
3b2f2976
XL
1726/// `compiler_fence` does not emit any machine code, but restricts the kinds
1727/// of memory re-ordering the compiler is allowed to do. Specifically, depending on
1728/// the given [`Ordering`] semantics, the compiler may be disallowed from moving reads
1729/// or writes from before or after the call to the other side of the call to
1730/// `compiler_fence`. Note that it does **not** prevent the *hardware*
1731/// from doing such re-ordering. This is not a problem in a single-threaded,
1732/// execution context, but when other threads may modify memory at the same
1733/// time, stronger synchronization primitives such as [`fence`] are required.
cc61c64b
XL
1734///
1735/// The re-ordering prevented by the different ordering semantics are:
1736///
1737/// - with [`SeqCst`], no re-ordering of reads and writes across this point is allowed.
1738/// - with [`Release`], preceding reads and writes cannot be moved past subsequent writes.
1739/// - with [`Acquire`], subsequent reads and writes cannot be moved ahead of preceding reads.
1740/// - with [`AcqRel`], both of the above rules are enforced.
1741///
3b2f2976
XL
1742/// `compiler_fence` is generally only useful for preventing a thread from
1743/// racing *with itself*. That is, if a given thread is executing one piece
1744/// of code, and is then interrupted, and starts executing code elsewhere
1745/// (while still in the same thread, and conceptually still on the same
1746/// core). In traditional programs, this can only occur when a signal
1747/// handler is registered. In more low-level code, such situations can also
1748/// arise when handling interrupts, when implementing green threads with
1749/// pre-emption, etc. Curious readers are encouraged to read the Linux kernel's
1750/// discussion of [memory barriers].
1751///
cc61c64b
XL
1752/// # Panics
1753///
1754/// Panics if `order` is [`Relaxed`].
1755///
3b2f2976
XL
1756/// # Examples
1757///
1758/// Without `compiler_fence`, the `assert_eq!` in following code
1759/// is *not* guaranteed to succeed, despite everything happening in a single thread.
1760/// To see why, remember that the compiler is free to swap the stores to
1761/// `IMPORTANT_VARIABLE` and `IS_READ` since they are both
1762/// `Ordering::Relaxed`. If it does, and the signal handler is invoked right
1763/// after `IS_READY` is updated, then the signal handler will see
1764/// `IS_READY=1`, but `IMPORTANT_VARIABLE=0`.
1765/// Using a `compiler_fence` remedies this situation.
1766///
1767/// ```
1768/// use std::sync::atomic::{AtomicBool, AtomicUsize};
1769/// use std::sync::atomic::{ATOMIC_BOOL_INIT, ATOMIC_USIZE_INIT};
1770/// use std::sync::atomic::Ordering;
1771/// use std::sync::atomic::compiler_fence;
1772///
1773/// static IMPORTANT_VARIABLE: AtomicUsize = ATOMIC_USIZE_INIT;
1774/// static IS_READY: AtomicBool = ATOMIC_BOOL_INIT;
1775///
1776/// fn main() {
1777/// IMPORTANT_VARIABLE.store(42, Ordering::Relaxed);
1778/// // prevent earlier writes from being moved beyond this point
1779/// compiler_fence(Ordering::Release);
1780/// IS_READY.store(true, Ordering::Relaxed);
1781/// }
1782///
1783/// fn signal_handler() {
1784/// if IS_READY.load(Ordering::Relaxed) {
1785/// assert_eq!(IMPORTANT_VARIABLE.load(Ordering::Relaxed), 42);
1786/// }
1787/// }
1788/// ```
1789///
cc61c64b
XL
1790/// [`fence`]: fn.fence.html
1791/// [`Ordering`]: enum.Ordering.html
1792/// [`Acquire`]: enum.Ordering.html#variant.Acquire
1793/// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1794/// [`Release`]: enum.Ordering.html#variant.Release
1795/// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1796/// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
3b2f2976 1797/// [memory barriers]: https://www.kernel.org/doc/Documentation/memory-barriers.txt
cc61c64b 1798#[inline]
3b2f2976 1799#[stable(feature = "compiler_fences", since = "1.21.0")]
cc61c64b
XL
1800pub fn compiler_fence(order: Ordering) {
1801 unsafe {
1802 match order {
1803 Acquire => intrinsics::atomic_singlethreadfence_acq(),
1804 Release => intrinsics::atomic_singlethreadfence_rel(),
1805 AcqRel => intrinsics::atomic_singlethreadfence_acqrel(),
1806 SeqCst => intrinsics::atomic_singlethreadfence(),
1807 Relaxed => panic!("there is no such thing as a relaxed compiler fence"),
1808 __Nonexhaustive => panic!("invalid memory ordering"),
1809 }
1810 }
1811}
1812
1813
3157f602 1814#[cfg(target_has_atomic = "8")]
a7813a04
XL
1815#[stable(feature = "atomic_debug", since = "1.3.0")]
1816impl fmt::Debug for AtomicBool {
1817 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1818 f.debug_tuple("AtomicBool").field(&self.load(Ordering::SeqCst)).finish()
1819 }
1820}
c1a9b12d 1821
3157f602 1822#[cfg(target_has_atomic = "ptr")]
c1a9b12d
SL
1823#[stable(feature = "atomic_debug", since = "1.3.0")]
1824impl<T> fmt::Debug for AtomicPtr<T> {
1825 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1826 f.debug_tuple("AtomicPtr").field(&self.load(Ordering::SeqCst)).finish()
1827 }
1828}