]> git.proxmox.com Git - rustc.git/blame - src/libcore/sync/atomic.rs
New upstream version 1.18.0+dfsg1
[rustc.git] / src / libcore / sync / atomic.rs
CommitLineData
1a4d82fc
JJ
1// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2// file at the top-level directory of this distribution and at
3// http://rust-lang.org/COPYRIGHT.
4//
5// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8// option. This file may not be copied, modified, or distributed
9// except according to those terms.
10
11//! Atomic types
12//!
13//! Atomic types provide primitive shared-memory communication between
14//! threads, and are the building blocks of other concurrent
15//! types.
16//!
17//! This module defines atomic versions of a select number of primitive
cc61c64b 18//! types, including [`AtomicBool`], [`AtomicIsize`], and [`AtomicUsize`].
1a4d82fc
JJ
19//! Atomic types present operations that, when used correctly, synchronize
20//! updates between threads.
21//!
cc61c64b
XL
22//! [`AtomicBool`]: struct.AtomicBool.html
23//! [`AtomicIsize`]: struct.AtomicIsize.html
24//! [`AtomicUsize`]: struct.AtomicUsize.html
25//!
26//! Each method takes an [`Ordering`] which represents the strength of
1a4d82fc 27//! the memory barrier for that operation. These orderings are the
32a655c1 28//! same as [LLVM atomic orderings][1]. For more information see the [nomicon][2].
1a4d82fc 29//!
cc61c64b
XL
30//! [`Ordering`]: enum.Ordering.html
31//!
85aaf69f 32//! [1]: http://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations
32a655c1 33//! [2]: ../../../nomicon/atomics.html
1a4d82fc 34//!
cc61c64b 35//! Atomic variables are safe to share between threads (they implement [`Sync`])
a7813a04
XL
36//! but they do not themselves provide the mechanism for sharing and follow the
37//! [threading model](../../../std/thread/index.html#the-threading-model) of rust.
cc61c64b 38//! The most common way to share an atomic variable is to put it into an [`Arc`][arc] (an
1a4d82fc
JJ
39//! atomically-reference-counted shared pointer).
40//!
cc61c64b
XL
41//! [`Sync`]: ../../marker/trait.Sync.html
42//! [arc]: ../../../std/sync/struct.Arc.html
43//!
1a4d82fc 44//! Most atomic types may be stored in static variables, initialized using
cc61c64b 45//! the provided static initializers like [`ATOMIC_BOOL_INIT`]. Atomic statics
1a4d82fc
JJ
46//! are often used for lazy global initialization.
47//!
cc61c64b 48//! [`ATOMIC_BOOL_INIT`]: constant.ATOMIC_BOOL_INIT.html
1a4d82fc
JJ
49//!
50//! # Examples
51//!
52//! A simple spinlock:
53//!
54//! ```
55//! use std::sync::Arc;
85aaf69f
SL
56//! use std::sync::atomic::{AtomicUsize, Ordering};
57//! use std::thread;
1a4d82fc
JJ
58//!
59//! fn main() {
85aaf69f 60//! let spinlock = Arc::new(AtomicUsize::new(1));
1a4d82fc
JJ
61//!
62//! let spinlock_clone = spinlock.clone();
a7813a04 63//! let thread = thread::spawn(move|| {
1a4d82fc
JJ
64//! spinlock_clone.store(0, Ordering::SeqCst);
65//! });
66//!
bd371182 67//! // Wait for the other thread to release the lock
1a4d82fc 68//! while spinlock.load(Ordering::SeqCst) != 0 {}
a7813a04
XL
69//!
70//! if let Err(panic) = thread.join() {
71//! println!("Thread had an error: {:?}", panic);
72//! }
1a4d82fc
JJ
73//! }
74//! ```
75//!
bd371182 76//! Keep a global count of live threads:
1a4d82fc
JJ
77//!
78//! ```
85aaf69f 79//! use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
1a4d82fc 80//!
bd371182 81//! static GLOBAL_THREAD_COUNT: AtomicUsize = ATOMIC_USIZE_INIT;
1a4d82fc 82//!
bd371182
AL
83//! let old_thread_count = GLOBAL_THREAD_COUNT.fetch_add(1, Ordering::SeqCst);
84//! println!("live threads: {}", old_thread_count + 1);
1a4d82fc
JJ
85//! ```
86
85aaf69f 87#![stable(feature = "rust1", since = "1.0.0")]
5bcae85e
SL
88#![cfg_attr(not(target_has_atomic = "8"), allow(dead_code))]
89#![cfg_attr(not(target_has_atomic = "8"), allow(unused_imports))]
1a4d82fc
JJ
90
91use self::Ordering::*;
92
1a4d82fc
JJ
93use intrinsics;
94use cell::UnsafeCell;
c1a9b12d 95use fmt;
9346a6ac 96
1a4d82fc 97/// A boolean type which can be safely shared between threads.
9e0c209e
SL
98///
99/// This type has the same in-memory representation as a `bool`.
3157f602 100#[cfg(target_has_atomic = "8")]
85aaf69f 101#[stable(feature = "rust1", since = "1.0.0")]
1a4d82fc 102pub struct AtomicBool {
a7813a04 103 v: UnsafeCell<u8>,
1a4d82fc
JJ
104}
105
3157f602 106#[cfg(target_has_atomic = "8")]
92a42be0 107#[stable(feature = "rust1", since = "1.0.0")]
9346a6ac 108impl Default for AtomicBool {
c30ab7b3 109 /// Creates an `AtomicBool` initialized to `false`.
62682a34 110 fn default() -> Self {
a7813a04 111 Self::new(false)
9346a6ac
AL
112 }
113}
114
b039eaaf 115// Send is implicitly implemented for AtomicBool.
3157f602 116#[cfg(target_has_atomic = "8")]
92a42be0 117#[stable(feature = "rust1", since = "1.0.0")]
1a4d82fc
JJ
118unsafe impl Sync for AtomicBool {}
119
1a4d82fc 120/// A raw pointer type which can be safely shared between threads.
9e0c209e
SL
121///
122/// This type has the same in-memory representation as a `*mut T`.
3157f602 123#[cfg(target_has_atomic = "ptr")]
85aaf69f 124#[stable(feature = "rust1", since = "1.0.0")]
1a4d82fc 125pub struct AtomicPtr<T> {
62682a34 126 p: UnsafeCell<*mut T>,
1a4d82fc
JJ
127}
128
3157f602 129#[cfg(target_has_atomic = "ptr")]
92a42be0 130#[stable(feature = "rust1", since = "1.0.0")]
d9579d0f 131impl<T> Default for AtomicPtr<T> {
9e0c209e 132 /// Creates a null `AtomicPtr<T>`.
d9579d0f
AL
133 fn default() -> AtomicPtr<T> {
134 AtomicPtr::new(::ptr::null_mut())
135 }
136}
137
3157f602 138#[cfg(target_has_atomic = "ptr")]
92a42be0 139#[stable(feature = "rust1", since = "1.0.0")]
c1a9b12d 140unsafe impl<T> Send for AtomicPtr<T> {}
3157f602 141#[cfg(target_has_atomic = "ptr")]
92a42be0 142#[stable(feature = "rust1", since = "1.0.0")]
1a4d82fc
JJ
143unsafe impl<T> Sync for AtomicPtr<T> {}
144
145/// Atomic memory orderings
146///
147/// Memory orderings limit the ways that both the compiler and CPU may reorder
148/// instructions around atomic operations. At its most restrictive,
149/// "sequentially consistent" atomics allow neither reads nor writes
150/// to be moved either before or after the atomic operation; on the other end
151/// "relaxed" atomics allow all reorderings.
152///
153/// Rust's memory orderings are [the same as
c1a9b12d 154/// LLVM's](http://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations).
32a655c1 155///
cc61c64b
XL
156/// For more information see the [nomicon].
157///
158/// [nomicon]: ../../../nomicon/atomics.html
85aaf69f 159#[stable(feature = "rust1", since = "1.0.0")]
54a0048b 160#[derive(Copy, Clone, Debug)]
1a4d82fc 161pub enum Ordering {
cc61c64b
XL
162 /// No ordering constraints, only atomic operations.
163 ///
164 /// Corresponds to LLVM's [`Monotonic`] ordering.
165 ///
166 /// [`Monotonic`]: http://llvm.org/docs/Atomics.html#monotonic
85aaf69f 167 #[stable(feature = "rust1", since = "1.0.0")]
1a4d82fc
JJ
168 Relaxed,
169 /// When coupled with a store, all previous writes become visible
cc61c64b 170 /// to the other threads that perform a load with [`Acquire`] ordering
1a4d82fc 171 /// on the same value.
cc61c64b
XL
172 ///
173 /// [`Acquire`]: http://llvm.org/docs/Atomics.html#acquire
85aaf69f 174 #[stable(feature = "rust1", since = "1.0.0")]
1a4d82fc
JJ
175 Release,
176 /// When coupled with a load, all subsequent loads will see data
cc61c64b 177 /// written before a store with [`Release`] ordering on the same value
a7813a04 178 /// in other threads.
cc61c64b
XL
179 ///
180 /// [`Release`]: http://llvm.org/docs/Atomics.html#release
85aaf69f 181 #[stable(feature = "rust1", since = "1.0.0")]
1a4d82fc 182 Acquire,
cc61c64b
XL
183 /// When coupled with a load, uses [`Acquire`] ordering, and with a store
184 /// [`Release`] ordering.
185 ///
186 /// [`Acquire`]: http://llvm.org/docs/Atomics.html#acquire
187 /// [`Release`]: http://llvm.org/docs/Atomics.html#release
85aaf69f 188 #[stable(feature = "rust1", since = "1.0.0")]
1a4d82fc
JJ
189 AcqRel,
190 /// Like `AcqRel` with the additional guarantee that all threads see all
191 /// sequentially consistent operations in the same order.
85aaf69f 192 #[stable(feature = "rust1", since = "1.0.0")]
1a4d82fc 193 SeqCst,
c30ab7b3
SL
194 // Prevent exhaustive matching to allow for future extension
195 #[doc(hidden)]
196 #[unstable(feature = "future_atomic_orderings", issue = "0")]
197 __Nonexhaustive,
1a4d82fc
JJ
198}
199
cc61c64b
XL
200/// An [`AtomicBool`] initialized to `false`.
201///
202/// [`AtomicBool`]: struct.AtomicBool.html
3157f602 203#[cfg(target_has_atomic = "8")]
85aaf69f 204#[stable(feature = "rust1", since = "1.0.0")]
62682a34 205pub const ATOMIC_BOOL_INIT: AtomicBool = AtomicBool::new(false);
1a4d82fc 206
3157f602 207#[cfg(target_has_atomic = "8")]
1a4d82fc
JJ
208impl AtomicBool {
209 /// Creates a new `AtomicBool`.
210 ///
211 /// # Examples
212 ///
213 /// ```
214 /// use std::sync::atomic::AtomicBool;
215 ///
216 /// let atomic_true = AtomicBool::new(true);
217 /// let atomic_false = AtomicBool::new(false);
218 /// ```
219 #[inline]
85aaf69f 220 #[stable(feature = "rust1", since = "1.0.0")]
62682a34 221 pub const fn new(v: bool) -> AtomicBool {
a7813a04 222 AtomicBool { v: UnsafeCell::new(v as u8) }
1a4d82fc
JJ
223 }
224
9e0c209e
SL
225 /// Returns a mutable reference to the underlying `bool`.
226 ///
227 /// This is safe because the mutable reference guarantees that no other threads are
228 /// concurrently accessing the atomic data.
229 ///
230 /// # Examples
231 ///
232 /// ```
9e0c209e
SL
233 /// use std::sync::atomic::{AtomicBool, Ordering};
234 ///
235 /// let mut some_bool = AtomicBool::new(true);
236 /// assert_eq!(*some_bool.get_mut(), true);
237 /// *some_bool.get_mut() = false;
238 /// assert_eq!(some_bool.load(Ordering::SeqCst), false);
239 /// ```
240 #[inline]
476ff2be 241 #[stable(feature = "atomic_access", since = "1.15.0")]
9e0c209e
SL
242 pub fn get_mut(&mut self) -> &mut bool {
243 unsafe { &mut *(self.v.get() as *mut bool) }
244 }
245
246 /// Consumes the atomic and returns the contained value.
247 ///
248 /// This is safe because passing `self` by value guarantees that no other threads are
249 /// concurrently accessing the atomic data.
250 ///
251 /// # Examples
252 ///
253 /// ```
9e0c209e
SL
254 /// use std::sync::atomic::AtomicBool;
255 ///
256 /// let some_bool = AtomicBool::new(true);
257 /// assert_eq!(some_bool.into_inner(), true);
258 /// ```
259 #[inline]
476ff2be 260 #[stable(feature = "atomic_access", since = "1.15.0")]
9e0c209e
SL
261 pub fn into_inner(self) -> bool {
262 unsafe { self.v.into_inner() != 0 }
263 }
264
1a4d82fc
JJ
265 /// Loads a value from the bool.
266 ///
32a655c1
SL
267 /// `load` takes an [`Ordering`] argument which describes the memory ordering
268 /// of this operation.
1a4d82fc
JJ
269 ///
270 /// # Panics
271 ///
32a655c1
SL
272 /// Panics if `order` is [`Release`] or [`AcqRel`].
273 ///
274 /// [`Ordering`]: enum.Ordering.html
275 /// [`Release`]: enum.Ordering.html#variant.Release
cc61c64b 276 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1a4d82fc
JJ
277 ///
278 /// # Examples
279 ///
280 /// ```
281 /// use std::sync::atomic::{AtomicBool, Ordering};
282 ///
283 /// let some_bool = AtomicBool::new(true);
284 ///
62682a34 285 /// assert_eq!(some_bool.load(Ordering::Relaxed), true);
1a4d82fc
JJ
286 /// ```
287 #[inline]
85aaf69f 288 #[stable(feature = "rust1", since = "1.0.0")]
1a4d82fc 289 pub fn load(&self, order: Ordering) -> bool {
a7813a04 290 unsafe { atomic_load(self.v.get(), order) != 0 }
1a4d82fc
JJ
291 }
292
293 /// Stores a value into the bool.
294 ///
32a655c1
SL
295 /// `store` takes an [`Ordering`] argument which describes the memory ordering
296 /// of this operation.
297 ///
298 /// [`Ordering`]: enum.Ordering.html
1a4d82fc
JJ
299 ///
300 /// # Examples
301 ///
302 /// ```
303 /// use std::sync::atomic::{AtomicBool, Ordering};
304 ///
305 /// let some_bool = AtomicBool::new(true);
306 ///
307 /// some_bool.store(false, Ordering::Relaxed);
62682a34 308 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
1a4d82fc
JJ
309 /// ```
310 ///
311 /// # Panics
312 ///
cc61c64b
XL
313 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
314 ///
315 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
316 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1a4d82fc 317 #[inline]
85aaf69f 318 #[stable(feature = "rust1", since = "1.0.0")]
1a4d82fc 319 pub fn store(&self, val: bool, order: Ordering) {
c30ab7b3
SL
320 unsafe {
321 atomic_store(self.v.get(), val as u8, order);
322 }
1a4d82fc
JJ
323 }
324
cc61c64b 325 /// Stores a value into the bool, returning the previous value.
1a4d82fc 326 ///
32a655c1
SL
327 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
328 /// of this operation.
329 ///
330 /// [`Ordering`]: enum.Ordering.html
1a4d82fc
JJ
331 ///
332 /// # Examples
333 ///
334 /// ```
335 /// use std::sync::atomic::{AtomicBool, Ordering};
336 ///
337 /// let some_bool = AtomicBool::new(true);
338 ///
62682a34
SL
339 /// assert_eq!(some_bool.swap(false, Ordering::Relaxed), true);
340 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
1a4d82fc
JJ
341 /// ```
342 #[inline]
85aaf69f 343 #[stable(feature = "rust1", since = "1.0.0")]
1a4d82fc 344 pub fn swap(&self, val: bool, order: Ordering) -> bool {
a7813a04 345 unsafe { atomic_swap(self.v.get(), val as u8, order) != 0 }
1a4d82fc
JJ
346 }
347
c1a9b12d 348 /// Stores a value into the `bool` if the current value is the same as the `current` value.
1a4d82fc 349 ///
c1a9b12d
SL
350 /// The return value is always the previous value. If it is equal to `current`, then the value
351 /// was updated.
1a4d82fc 352 ///
32a655c1
SL
353 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
354 /// ordering of this operation.
355 ///
356 /// [`Ordering`]: enum.Ordering.html
1a4d82fc
JJ
357 ///
358 /// # Examples
359 ///
360 /// ```
361 /// use std::sync::atomic::{AtomicBool, Ordering};
362 ///
363 /// let some_bool = AtomicBool::new(true);
364 ///
62682a34
SL
365 /// assert_eq!(some_bool.compare_and_swap(true, false, Ordering::Relaxed), true);
366 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
367 ///
368 /// assert_eq!(some_bool.compare_and_swap(true, true, Ordering::Relaxed), false);
369 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
1a4d82fc
JJ
370 /// ```
371 #[inline]
85aaf69f 372 #[stable(feature = "rust1", since = "1.0.0")]
c1a9b12d 373 pub fn compare_and_swap(&self, current: bool, new: bool, order: Ordering) -> bool {
54a0048b
SL
374 match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
375 Ok(x) => x,
376 Err(x) => x,
377 }
7453a54e
SL
378 }
379
380 /// Stores a value into the `bool` if the current value is the same as the `current` value.
381 ///
54a0048b 382 /// The return value is a result indicating whether the new value was written and containing
3157f602 383 /// the previous value. On success this value is guaranteed to be equal to `current`.
7453a54e 384 ///
32a655c1
SL
385 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
386 /// ordering of this operation. The first describes the required ordering if the
387 /// operation succeeds while the second describes the required ordering when the
388 /// operation fails. The failure ordering can't be [`Release`] or [`AcqRel`] and must
389 /// be equivalent or weaker than the success ordering.
390 ///
391 /// [`Ordering`]: enum.Ordering.html
392 /// [`Release`]: enum.Ordering.html#variant.Release
393 /// [`AcqRel`]: enum.Ordering.html#variant.Release
7453a54e
SL
394 ///
395 /// # Examples
396 ///
397 /// ```
7453a54e
SL
398 /// use std::sync::atomic::{AtomicBool, Ordering};
399 ///
400 /// let some_bool = AtomicBool::new(true);
401 ///
402 /// assert_eq!(some_bool.compare_exchange(true,
403 /// false,
404 /// Ordering::Acquire,
405 /// Ordering::Relaxed),
54a0048b 406 /// Ok(true));
7453a54e
SL
407 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
408 ///
409 /// assert_eq!(some_bool.compare_exchange(true, true,
410 /// Ordering::SeqCst,
411 /// Ordering::Acquire),
54a0048b 412 /// Err(false));
7453a54e
SL
413 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
414 /// ```
415 #[inline]
a7813a04 416 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
7453a54e
SL
417 pub fn compare_exchange(&self,
418 current: bool,
419 new: bool,
420 success: Ordering,
c30ab7b3
SL
421 failure: Ordering)
422 -> Result<bool, bool> {
423 match unsafe {
424 atomic_compare_exchange(self.v.get(), current as u8, new as u8, success, failure)
425 } {
a7813a04
XL
426 Ok(x) => Ok(x != 0),
427 Err(x) => Err(x != 0),
54a0048b 428 }
7453a54e
SL
429 }
430
431 /// Stores a value into the `bool` if the current value is the same as the `current` value.
432 ///
cc61c64b 433 /// Unlike [`compare_exchange`], this function is allowed to spuriously fail even when the
7453a54e 434 /// comparison succeeds, which can result in more efficient code on some platforms. The
54a0048b
SL
435 /// return value is a result indicating whether the new value was written and containing the
436 /// previous value.
7453a54e 437 ///
32a655c1 438 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
7453a54e
SL
439 /// ordering of this operation. The first describes the required ordering if the operation
440 /// succeeds while the second describes the required ordering when the operation fails. The
32a655c1
SL
441 /// failure ordering can't be [`Release`] or [`AcqRel`] and must be equivalent or
442 /// weaker than the success ordering.
443 ///
cc61c64b 444 /// [`compare_exchange`]: #method.compare_exchange
32a655c1
SL
445 /// [`Ordering`]: enum.Ordering.html
446 /// [`Release`]: enum.Ordering.html#variant.Release
447 /// [`AcqRel`]: enum.Ordering.html#variant.Release
7453a54e
SL
448 ///
449 /// # Examples
450 ///
451 /// ```
7453a54e
SL
452 /// use std::sync::atomic::{AtomicBool, Ordering};
453 ///
454 /// let val = AtomicBool::new(false);
455 ///
456 /// let new = true;
457 /// let mut old = val.load(Ordering::Relaxed);
458 /// loop {
54a0048b
SL
459 /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
460 /// Ok(_) => break,
461 /// Err(x) => old = x,
7453a54e
SL
462 /// }
463 /// }
464 /// ```
465 #[inline]
a7813a04 466 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
7453a54e
SL
467 pub fn compare_exchange_weak(&self,
468 current: bool,
469 new: bool,
470 success: Ordering,
c30ab7b3
SL
471 failure: Ordering)
472 -> Result<bool, bool> {
473 match unsafe {
474 atomic_compare_exchange_weak(self.v.get(), current as u8, new as u8, success, failure)
475 } {
a7813a04
XL
476 Ok(x) => Ok(x != 0),
477 Err(x) => Err(x != 0),
54a0048b 478 }
1a4d82fc
JJ
479 }
480
481 /// Logical "and" with a boolean value.
482 ///
483 /// Performs a logical "and" operation on the current value and the argument `val`, and sets
484 /// the new value to the result.
485 ///
486 /// Returns the previous value.
487 ///
488 /// # Examples
489 ///
490 /// ```
491 /// use std::sync::atomic::{AtomicBool, Ordering};
492 ///
493 /// let foo = AtomicBool::new(true);
62682a34
SL
494 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), true);
495 /// assert_eq!(foo.load(Ordering::SeqCst), false);
1a4d82fc
JJ
496 ///
497 /// let foo = AtomicBool::new(true);
62682a34
SL
498 /// assert_eq!(foo.fetch_and(true, Ordering::SeqCst), true);
499 /// assert_eq!(foo.load(Ordering::SeqCst), true);
1a4d82fc
JJ
500 ///
501 /// let foo = AtomicBool::new(false);
62682a34
SL
502 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), false);
503 /// assert_eq!(foo.load(Ordering::SeqCst), false);
1a4d82fc
JJ
504 /// ```
505 #[inline]
85aaf69f 506 #[stable(feature = "rust1", since = "1.0.0")]
1a4d82fc 507 pub fn fetch_and(&self, val: bool, order: Ordering) -> bool {
a7813a04 508 unsafe { atomic_and(self.v.get(), val as u8, order) != 0 }
1a4d82fc
JJ
509 }
510
511 /// Logical "nand" with a boolean value.
512 ///
513 /// Performs a logical "nand" operation on the current value and the argument `val`, and sets
514 /// the new value to the result.
515 ///
516 /// Returns the previous value.
517 ///
518 /// # Examples
519 ///
520 /// ```
521 /// use std::sync::atomic::{AtomicBool, Ordering};
522 ///
523 /// let foo = AtomicBool::new(true);
62682a34
SL
524 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), true);
525 /// assert_eq!(foo.load(Ordering::SeqCst), true);
1a4d82fc
JJ
526 ///
527 /// let foo = AtomicBool::new(true);
62682a34
SL
528 /// assert_eq!(foo.fetch_nand(true, Ordering::SeqCst), true);
529 /// assert_eq!(foo.load(Ordering::SeqCst) as usize, 0);
530 /// assert_eq!(foo.load(Ordering::SeqCst), false);
1a4d82fc
JJ
531 ///
532 /// let foo = AtomicBool::new(false);
62682a34
SL
533 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), false);
534 /// assert_eq!(foo.load(Ordering::SeqCst), true);
1a4d82fc
JJ
535 /// ```
536 #[inline]
85aaf69f 537 #[stable(feature = "rust1", since = "1.0.0")]
1a4d82fc 538 pub fn fetch_nand(&self, val: bool, order: Ordering) -> bool {
a7813a04
XL
539 // We can't use atomic_nand here because it can result in a bool with
540 // an invalid value. This happens because the atomic operation is done
541 // with an 8-bit integer internally, which would set the upper 7 bits.
cc61c64b
XL
542 // So we just use fetch_xor or swap instead.
543 if val {
544 // !(x & true) == !x
545 // We must invert the bool.
546 self.fetch_xor(true, order)
547 } else {
548 // !(x & false) == true
549 // We must set the bool to true.
550 self.swap(true, order)
a7813a04 551 }
1a4d82fc
JJ
552 }
553
554 /// Logical "or" with a boolean value.
555 ///
556 /// Performs a logical "or" operation on the current value and the argument `val`, and sets the
557 /// new value to the result.
558 ///
559 /// Returns the previous value.
560 ///
561 /// # Examples
562 ///
563 /// ```
564 /// use std::sync::atomic::{AtomicBool, Ordering};
565 ///
566 /// let foo = AtomicBool::new(true);
62682a34
SL
567 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), true);
568 /// assert_eq!(foo.load(Ordering::SeqCst), true);
1a4d82fc
JJ
569 ///
570 /// let foo = AtomicBool::new(true);
62682a34
SL
571 /// assert_eq!(foo.fetch_or(true, Ordering::SeqCst), true);
572 /// assert_eq!(foo.load(Ordering::SeqCst), true);
1a4d82fc
JJ
573 ///
574 /// let foo = AtomicBool::new(false);
62682a34
SL
575 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), false);
576 /// assert_eq!(foo.load(Ordering::SeqCst), false);
1a4d82fc
JJ
577 /// ```
578 #[inline]
85aaf69f 579 #[stable(feature = "rust1", since = "1.0.0")]
1a4d82fc 580 pub fn fetch_or(&self, val: bool, order: Ordering) -> bool {
a7813a04 581 unsafe { atomic_or(self.v.get(), val as u8, order) != 0 }
1a4d82fc
JJ
582 }
583
584 /// Logical "xor" with a boolean value.
585 ///
586 /// Performs a logical "xor" operation on the current value and the argument `val`, and sets
587 /// the new value to the result.
588 ///
589 /// Returns the previous value.
590 ///
591 /// # Examples
592 ///
593 /// ```
594 /// use std::sync::atomic::{AtomicBool, Ordering};
595 ///
596 /// let foo = AtomicBool::new(true);
62682a34
SL
597 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), true);
598 /// assert_eq!(foo.load(Ordering::SeqCst), true);
1a4d82fc
JJ
599 ///
600 /// let foo = AtomicBool::new(true);
62682a34
SL
601 /// assert_eq!(foo.fetch_xor(true, Ordering::SeqCst), true);
602 /// assert_eq!(foo.load(Ordering::SeqCst), false);
1a4d82fc
JJ
603 ///
604 /// let foo = AtomicBool::new(false);
62682a34
SL
605 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), false);
606 /// assert_eq!(foo.load(Ordering::SeqCst), false);
1a4d82fc
JJ
607 /// ```
608 #[inline]
85aaf69f 609 #[stable(feature = "rust1", since = "1.0.0")]
1a4d82fc 610 pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool {
a7813a04 611 unsafe { atomic_xor(self.v.get(), val as u8, order) != 0 }
1a4d82fc
JJ
612 }
613}
614
3157f602 615#[cfg(target_has_atomic = "ptr")]
1a4d82fc
JJ
616impl<T> AtomicPtr<T> {
617 /// Creates a new `AtomicPtr`.
618 ///
619 /// # Examples
620 ///
621 /// ```
622 /// use std::sync::atomic::AtomicPtr;
623 ///
85aaf69f 624 /// let ptr = &mut 5;
1a4d82fc
JJ
625 /// let atomic_ptr = AtomicPtr::new(ptr);
626 /// ```
627 #[inline]
85aaf69f 628 #[stable(feature = "rust1", since = "1.0.0")]
62682a34
SL
629 pub const fn new(p: *mut T) -> AtomicPtr<T> {
630 AtomicPtr { p: UnsafeCell::new(p) }
1a4d82fc
JJ
631 }
632
9e0c209e
SL
633 /// Returns a mutable reference to the underlying pointer.
634 ///
635 /// This is safe because the mutable reference guarantees that no other threads are
636 /// concurrently accessing the atomic data.
637 ///
638 /// # Examples
639 ///
640 /// ```
9e0c209e
SL
641 /// use std::sync::atomic::{AtomicPtr, Ordering};
642 ///
643 /// let mut atomic_ptr = AtomicPtr::new(&mut 10);
644 /// *atomic_ptr.get_mut() = &mut 5;
645 /// assert_eq!(unsafe { *atomic_ptr.load(Ordering::SeqCst) }, 5);
646 /// ```
647 #[inline]
476ff2be 648 #[stable(feature = "atomic_access", since = "1.15.0")]
9e0c209e
SL
649 pub fn get_mut(&mut self) -> &mut *mut T {
650 unsafe { &mut *self.p.get() }
651 }
652
653 /// Consumes the atomic and returns the contained value.
654 ///
655 /// This is safe because passing `self` by value guarantees that no other threads are
656 /// concurrently accessing the atomic data.
657 ///
658 /// # Examples
659 ///
660 /// ```
9e0c209e
SL
661 /// use std::sync::atomic::AtomicPtr;
662 ///
663 /// let atomic_ptr = AtomicPtr::new(&mut 5);
664 /// assert_eq!(unsafe { *atomic_ptr.into_inner() }, 5);
665 /// ```
666 #[inline]
476ff2be 667 #[stable(feature = "atomic_access", since = "1.15.0")]
9e0c209e
SL
668 pub fn into_inner(self) -> *mut T {
669 unsafe { self.p.into_inner() }
670 }
671
1a4d82fc
JJ
672 /// Loads a value from the pointer.
673 ///
32a655c1
SL
674 /// `load` takes an [`Ordering`] argument which describes the memory ordering
675 /// of this operation.
1a4d82fc
JJ
676 ///
677 /// # Panics
678 ///
32a655c1
SL
679 /// Panics if `order` is [`Release`] or [`AcqRel`].
680 ///
681 /// [`Ordering`]: enum.Ordering.html
682 /// [`Release`]: enum.Ordering.html#variant.Release
683 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1a4d82fc
JJ
684 ///
685 /// # Examples
686 ///
687 /// ```
688 /// use std::sync::atomic::{AtomicPtr, Ordering};
689 ///
85aaf69f 690 /// let ptr = &mut 5;
1a4d82fc
JJ
691 /// let some_ptr = AtomicPtr::new(ptr);
692 ///
693 /// let value = some_ptr.load(Ordering::Relaxed);
694 /// ```
695 #[inline]
85aaf69f 696 #[stable(feature = "rust1", since = "1.0.0")]
1a4d82fc 697 pub fn load(&self, order: Ordering) -> *mut T {
c30ab7b3 698 unsafe { atomic_load(self.p.get() as *mut usize, order) as *mut T }
1a4d82fc
JJ
699 }
700
701 /// Stores a value into the pointer.
702 ///
32a655c1
SL
703 /// `store` takes an [`Ordering`] argument which describes the memory ordering
704 /// of this operation.
705 ///
706 /// [`Ordering`]: enum.Ordering.html
1a4d82fc
JJ
707 ///
708 /// # Examples
709 ///
710 /// ```
711 /// use std::sync::atomic::{AtomicPtr, Ordering};
712 ///
85aaf69f 713 /// let ptr = &mut 5;
1a4d82fc
JJ
714 /// let some_ptr = AtomicPtr::new(ptr);
715 ///
85aaf69f 716 /// let other_ptr = &mut 10;
1a4d82fc
JJ
717 ///
718 /// some_ptr.store(other_ptr, Ordering::Relaxed);
719 /// ```
720 ///
721 /// # Panics
722 ///
cc61c64b
XL
723 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
724 ///
725 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
726 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1a4d82fc 727 #[inline]
85aaf69f 728 #[stable(feature = "rust1", since = "1.0.0")]
1a4d82fc 729 pub fn store(&self, ptr: *mut T, order: Ordering) {
c30ab7b3
SL
730 unsafe {
731 atomic_store(self.p.get() as *mut usize, ptr as usize, order);
732 }
1a4d82fc
JJ
733 }
734
cc61c64b 735 /// Stores a value into the pointer, returning the previous value.
1a4d82fc 736 ///
32a655c1
SL
737 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
738 /// of this operation.
739 ///
740 /// [`Ordering`]: enum.Ordering.html
1a4d82fc
JJ
741 ///
742 /// # Examples
743 ///
744 /// ```
745 /// use std::sync::atomic::{AtomicPtr, Ordering};
746 ///
85aaf69f 747 /// let ptr = &mut 5;
1a4d82fc
JJ
748 /// let some_ptr = AtomicPtr::new(ptr);
749 ///
85aaf69f 750 /// let other_ptr = &mut 10;
1a4d82fc
JJ
751 ///
752 /// let value = some_ptr.swap(other_ptr, Ordering::Relaxed);
753 /// ```
754 #[inline]
85aaf69f 755 #[stable(feature = "rust1", since = "1.0.0")]
1a4d82fc 756 pub fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T {
62682a34 757 unsafe { atomic_swap(self.p.get() as *mut usize, ptr as usize, order) as *mut T }
1a4d82fc
JJ
758 }
759
c1a9b12d 760 /// Stores a value into the pointer if the current value is the same as the `current` value.
1a4d82fc 761 ///
c1a9b12d
SL
762 /// The return value is always the previous value. If it is equal to `current`, then the value
763 /// was updated.
1a4d82fc 764 ///
32a655c1
SL
765 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
766 /// ordering of this operation.
767 ///
768 /// [`Ordering`]: enum.Ordering.html
1a4d82fc
JJ
769 ///
770 /// # Examples
771 ///
772 /// ```
773 /// use std::sync::atomic::{AtomicPtr, Ordering};
774 ///
85aaf69f 775 /// let ptr = &mut 5;
1a4d82fc
JJ
776 /// let some_ptr = AtomicPtr::new(ptr);
777 ///
85aaf69f
SL
778 /// let other_ptr = &mut 10;
779 /// let another_ptr = &mut 10;
1a4d82fc
JJ
780 ///
781 /// let value = some_ptr.compare_and_swap(other_ptr, another_ptr, Ordering::Relaxed);
782 /// ```
783 #[inline]
85aaf69f 784 #[stable(feature = "rust1", since = "1.0.0")]
c1a9b12d 785 pub fn compare_and_swap(&self, current: *mut T, new: *mut T, order: Ordering) -> *mut T {
54a0048b
SL
786 match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
787 Ok(x) => x,
788 Err(x) => x,
789 }
7453a54e
SL
790 }
791
792 /// Stores a value into the pointer if the current value is the same as the `current` value.
793 ///
54a0048b 794 /// The return value is a result indicating whether the new value was written and containing
3157f602 795 /// the previous value. On success this value is guaranteed to be equal to `current`.
7453a54e 796 ///
32a655c1
SL
797 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
798 /// ordering of this operation. The first describes the required ordering if
799 /// the operation succeeds while the second describes the required ordering when
800 /// the operation fails. The failure ordering can't be [`Release`] or [`AcqRel`]
801 /// and must be equivalent or weaker than the success ordering.
802 ///
803 /// [`Ordering`]: enum.Ordering.html
804 /// [`Release`]: enum.Ordering.html#variant.Release
805 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
7453a54e
SL
806 ///
807 /// # Examples
808 ///
809 /// ```
7453a54e
SL
810 /// use std::sync::atomic::{AtomicPtr, Ordering};
811 ///
812 /// let ptr = &mut 5;
813 /// let some_ptr = AtomicPtr::new(ptr);
814 ///
815 /// let other_ptr = &mut 10;
816 /// let another_ptr = &mut 10;
817 ///
818 /// let value = some_ptr.compare_exchange(other_ptr, another_ptr,
819 /// Ordering::SeqCst, Ordering::Relaxed);
820 /// ```
821 #[inline]
a7813a04 822 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
7453a54e
SL
823 pub fn compare_exchange(&self,
824 current: *mut T,
825 new: *mut T,
826 success: Ordering,
c30ab7b3
SL
827 failure: Ordering)
828 -> Result<*mut T, *mut T> {
1a4d82fc 829 unsafe {
54a0048b
SL
830 let res = atomic_compare_exchange(self.p.get() as *mut usize,
831 current as usize,
832 new as usize,
833 success,
834 failure);
835 match res {
836 Ok(x) => Ok(x as *mut T),
837 Err(x) => Err(x as *mut T),
838 }
1a4d82fc
JJ
839 }
840 }
7453a54e
SL
841
842 /// Stores a value into the pointer if the current value is the same as the `current` value.
843 ///
32a655c1 844 /// Unlike [`compare_exchange`], this function is allowed to spuriously fail even when the
7453a54e 845 /// comparison succeeds, which can result in more efficient code on some platforms. The
54a0048b
SL
846 /// return value is a result indicating whether the new value was written and containing the
847 /// previous value.
7453a54e 848 ///
32a655c1 849 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
7453a54e
SL
850 /// ordering of this operation. The first describes the required ordering if the operation
851 /// succeeds while the second describes the required ordering when the operation fails. The
32a655c1
SL
852 /// failure ordering can't be [`Release`] or [`AcqRel`] and must be equivalent or
853 /// weaker than the success ordering.
854 ///
855 /// [`compare_exchange`]: #method.compare_exchange
856 /// [`Ordering`]: enum.Ordering.html
857 /// [`Release`]: enum.Ordering.html#variant.Release
858 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
7453a54e
SL
859 ///
860 /// # Examples
861 ///
862 /// ```
7453a54e
SL
863 /// use std::sync::atomic::{AtomicPtr, Ordering};
864 ///
865 /// let some_ptr = AtomicPtr::new(&mut 5);
866 ///
867 /// let new = &mut 10;
868 /// let mut old = some_ptr.load(Ordering::Relaxed);
869 /// loop {
54a0048b
SL
870 /// match some_ptr.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
871 /// Ok(_) => break,
872 /// Err(x) => old = x,
7453a54e
SL
873 /// }
874 /// }
875 /// ```
876 #[inline]
a7813a04 877 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
7453a54e
SL
878 pub fn compare_exchange_weak(&self,
879 current: *mut T,
880 new: *mut T,
881 success: Ordering,
c30ab7b3
SL
882 failure: Ordering)
883 -> Result<*mut T, *mut T> {
54a0048b
SL
884 unsafe {
885 let res = atomic_compare_exchange_weak(self.p.get() as *mut usize,
886 current as usize,
887 new as usize,
888 success,
889 failure);
890 match res {
891 Ok(x) => Ok(x as *mut T),
892 Err(x) => Err(x as *mut T),
893 }
894 }
7453a54e
SL
895 }
896}
897
a7813a04
XL
898macro_rules! atomic_int {
899 ($stable:meta,
900 $stable_cxchg:meta,
901 $stable_debug:meta,
9e0c209e 902 $stable_access:meta,
a7813a04
XL
903 $int_type:ident $atomic_type:ident $atomic_init:ident) => {
904 /// An integer type which can be safely shared between threads.
9e0c209e
SL
905 ///
906 /// This type has the same in-memory representation as the underlying integer type.
a7813a04
XL
907 #[$stable]
908 pub struct $atomic_type {
909 v: UnsafeCell<$int_type>,
910 }
911
912 /// An atomic integer initialized to `0`.
913 #[$stable]
914 pub const $atomic_init: $atomic_type = $atomic_type::new(0);
915
916 #[$stable]
917 impl Default for $atomic_type {
918 fn default() -> Self {
919 Self::new(Default::default())
920 }
921 }
922
923 #[$stable_debug]
924 impl fmt::Debug for $atomic_type {
925 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
926 f.debug_tuple(stringify!($atomic_type))
927 .field(&self.load(Ordering::SeqCst))
928 .finish()
929 }
930 }
931
932 // Send is implicitly implemented.
933 #[$stable]
934 unsafe impl Sync for $atomic_type {}
935
936 impl $atomic_type {
937 /// Creates a new atomic integer.
938 ///
939 /// # Examples
940 ///
941 /// ```
942 /// use std::sync::atomic::AtomicIsize;
943 ///
944 /// let atomic_forty_two = AtomicIsize::new(42);
945 /// ```
946 #[inline]
947 #[$stable]
948 pub const fn new(v: $int_type) -> Self {
949 $atomic_type {v: UnsafeCell::new(v)}
950 }
951
9e0c209e
SL
952 /// Returns a mutable reference to the underlying integer.
953 ///
954 /// This is safe because the mutable reference guarantees that no other threads are
955 /// concurrently accessing the atomic data.
956 ///
957 /// # Examples
958 ///
959 /// ```
9e0c209e
SL
960 /// use std::sync::atomic::{AtomicIsize, Ordering};
961 ///
962 /// let mut some_isize = AtomicIsize::new(10);
963 /// assert_eq!(*some_isize.get_mut(), 10);
964 /// *some_isize.get_mut() = 5;
965 /// assert_eq!(some_isize.load(Ordering::SeqCst), 5);
966 /// ```
967 #[inline]
968 #[$stable_access]
969 pub fn get_mut(&mut self) -> &mut $int_type {
970 unsafe { &mut *self.v.get() }
971 }
972
973 /// Consumes the atomic and returns the contained value.
974 ///
975 /// This is safe because passing `self` by value guarantees that no other threads are
976 /// concurrently accessing the atomic data.
977 ///
978 /// # Examples
979 ///
980 /// ```
9e0c209e
SL
981 /// use std::sync::atomic::AtomicIsize;
982 ///
983 /// let some_isize = AtomicIsize::new(5);
984 /// assert_eq!(some_isize.into_inner(), 5);
985 /// ```
986 #[inline]
987 #[$stable_access]
988 pub fn into_inner(self) -> $int_type {
989 unsafe { self.v.into_inner() }
990 }
991
a7813a04
XL
992 /// Loads a value from the atomic integer.
993 ///
32a655c1 994 /// `load` takes an [`Ordering`] argument which describes the memory ordering of this
a7813a04
XL
995 /// operation.
996 ///
997 /// # Panics
998 ///
32a655c1
SL
999 /// Panics if `order` is [`Release`] or [`AcqRel`].
1000 ///
1001 /// [`Ordering`]: enum.Ordering.html
1002 /// [`Release`]: enum.Ordering.html#variant.Release
1003 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
a7813a04
XL
1004 ///
1005 /// # Examples
1006 ///
1007 /// ```
1008 /// use std::sync::atomic::{AtomicIsize, Ordering};
1009 ///
1010 /// let some_isize = AtomicIsize::new(5);
1011 ///
1012 /// assert_eq!(some_isize.load(Ordering::Relaxed), 5);
1013 /// ```
1014 #[inline]
1015 #[$stable]
1016 pub fn load(&self, order: Ordering) -> $int_type {
1017 unsafe { atomic_load(self.v.get(), order) }
1018 }
1019
1020 /// Stores a value into the atomic integer.
1021 ///
32a655c1 1022 /// `store` takes an [`Ordering`] argument which describes the memory ordering of this
a7813a04
XL
1023 /// operation.
1024 ///
32a655c1
SL
1025 /// [`Ordering`]: enum.Ordering.html
1026 ///
a7813a04
XL
1027 /// # Examples
1028 ///
1029 /// ```
1030 /// use std::sync::atomic::{AtomicIsize, Ordering};
1031 ///
1032 /// let some_isize = AtomicIsize::new(5);
1033 ///
1034 /// some_isize.store(10, Ordering::Relaxed);
1035 /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
1036 /// ```
1037 ///
1038 /// # Panics
1039 ///
cc61c64b
XL
1040 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
1041 ///
1042 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
1043 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
a7813a04
XL
1044 #[inline]
1045 #[$stable]
1046 pub fn store(&self, val: $int_type, order: Ordering) {
1047 unsafe { atomic_store(self.v.get(), val, order); }
1048 }
1049
cc61c64b 1050 /// Stores a value into the atomic integer, returning the previous value.
a7813a04 1051 ///
32a655c1 1052 /// `swap` takes an [`Ordering`] argument which describes the memory ordering of this
a7813a04
XL
1053 /// operation.
1054 ///
32a655c1
SL
1055 /// [`Ordering`]: enum.Ordering.html
1056 ///
a7813a04
XL
1057 /// # Examples
1058 ///
1059 /// ```
1060 /// use std::sync::atomic::{AtomicIsize, Ordering};
1061 ///
1062 /// let some_isize = AtomicIsize::new(5);
1063 ///
1064 /// assert_eq!(some_isize.swap(10, Ordering::Relaxed), 5);
1065 /// ```
1066 #[inline]
1067 #[$stable]
1068 pub fn swap(&self, val: $int_type, order: Ordering) -> $int_type {
1069 unsafe { atomic_swap(self.v.get(), val, order) }
1070 }
1071
1072 /// Stores a value into the atomic integer if the current value is the same as the
1073 /// `current` value.
1074 ///
1075 /// The return value is always the previous value. If it is equal to `current`, then the
1076 /// value was updated.
1077 ///
32a655c1 1078 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
a7813a04
XL
1079 /// ordering of this operation.
1080 ///
32a655c1
SL
1081 /// [`Ordering`]: enum.Ordering.html
1082 ///
a7813a04
XL
1083 /// # Examples
1084 ///
1085 /// ```
1086 /// use std::sync::atomic::{AtomicIsize, Ordering};
1087 ///
1088 /// let some_isize = AtomicIsize::new(5);
1089 ///
1090 /// assert_eq!(some_isize.compare_and_swap(5, 10, Ordering::Relaxed), 5);
1091 /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
1092 ///
1093 /// assert_eq!(some_isize.compare_and_swap(6, 12, Ordering::Relaxed), 10);
1094 /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
1095 /// ```
1096 #[inline]
1097 #[$stable]
1098 pub fn compare_and_swap(&self,
1099 current: $int_type,
1100 new: $int_type,
1101 order: Ordering) -> $int_type {
1102 match self.compare_exchange(current,
1103 new,
1104 order,
1105 strongest_failure_ordering(order)) {
1106 Ok(x) => x,
1107 Err(x) => x,
1108 }
1109 }
1110
1111 /// Stores a value into the atomic integer if the current value is the same as the
1112 /// `current` value.
1113 ///
1114 /// The return value is a result indicating whether the new value was written and
1115 /// containing the previous value. On success this value is guaranteed to be equal to
3157f602 1116 /// `current`.
a7813a04 1117 ///
32a655c1
SL
1118 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
1119 /// ordering of this operation. The first describes the required ordering if
1120 /// the operation succeeds while the second describes the required ordering when
1121 /// the operation fails. The failure ordering can't be [`Release`] or [`AcqRel`] and
1122 /// must be equivalent or weaker than the success ordering.
1123 ///
1124 /// [`Ordering`]: enum.Ordering.html
1125 /// [`Release`]: enum.Ordering.html#variant.Release
1126 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
a7813a04
XL
1127 ///
1128 /// # Examples
1129 ///
1130 /// ```
1131 /// use std::sync::atomic::{AtomicIsize, Ordering};
1132 ///
1133 /// let some_isize = AtomicIsize::new(5);
1134 ///
1135 /// assert_eq!(some_isize.compare_exchange(5, 10,
1136 /// Ordering::Acquire,
1137 /// Ordering::Relaxed),
1138 /// Ok(5));
1139 /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
1140 ///
1141 /// assert_eq!(some_isize.compare_exchange(6, 12,
1142 /// Ordering::SeqCst,
1143 /// Ordering::Acquire),
1144 /// Err(10));
1145 /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
1146 /// ```
1147 #[inline]
1148 #[$stable_cxchg]
1149 pub fn compare_exchange(&self,
1150 current: $int_type,
1151 new: $int_type,
1152 success: Ordering,
1153 failure: Ordering) -> Result<$int_type, $int_type> {
1154 unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) }
1155 }
1156
1157 /// Stores a value into the atomic integer if the current value is the same as the
1158 /// `current` value.
1159 ///
32a655c1
SL
1160 /// Unlike [`compare_exchange`], this function is allowed to spuriously fail even
1161 /// when the comparison succeeds, which can result in more efficient code on some
1162 /// platforms. The return value is a result indicating whether the new value was
1163 /// written and containing the previous value.
a7813a04 1164 ///
32a655c1 1165 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
a7813a04
XL
1166 /// ordering of this operation. The first describes the required ordering if the
1167 /// operation succeeds while the second describes the required ordering when the
32a655c1
SL
1168 /// operation fails. The failure ordering can't be [`Release`] or [`AcqRel`] and
1169 /// must be equivalent or weaker than the success ordering.
1170 ///
1171 /// [`compare_exchange`]: #method.compare_exchange
1172 /// [`Ordering`]: enum.Ordering.html
1173 /// [`Release`]: enum.Ordering.html#variant.Release
1174 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
a7813a04
XL
1175 ///
1176 /// # Examples
1177 ///
1178 /// ```
1179 /// use std::sync::atomic::{AtomicIsize, Ordering};
1180 ///
1181 /// let val = AtomicIsize::new(4);
1182 ///
1183 /// let mut old = val.load(Ordering::Relaxed);
1184 /// loop {
1185 /// let new = old * 2;
1186 /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
1187 /// Ok(_) => break,
1188 /// Err(x) => old = x,
1189 /// }
1190 /// }
1191 /// ```
1192 #[inline]
1193 #[$stable_cxchg]
1194 pub fn compare_exchange_weak(&self,
1195 current: $int_type,
1196 new: $int_type,
1197 success: Ordering,
1198 failure: Ordering) -> Result<$int_type, $int_type> {
1199 unsafe {
1200 atomic_compare_exchange_weak(self.v.get(), current, new, success, failure)
1201 }
1202 }
1203
cc61c64b
XL
1204 /// Adds to the current value, returning the previous value.
1205 ///
1206 /// This operation wraps around on overflow.
a7813a04
XL
1207 ///
1208 /// # Examples
1209 ///
1210 /// ```
1211 /// use std::sync::atomic::{AtomicIsize, Ordering};
1212 ///
1213 /// let foo = AtomicIsize::new(0);
1214 /// assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0);
1215 /// assert_eq!(foo.load(Ordering::SeqCst), 10);
1216 /// ```
1217 #[inline]
1218 #[$stable]
1219 pub fn fetch_add(&self, val: $int_type, order: Ordering) -> $int_type {
1220 unsafe { atomic_add(self.v.get(), val, order) }
1221 }
1222
cc61c64b
XL
1223 /// Subtracts from the current value, returning the previous value.
1224 ///
1225 /// This operation wraps around on overflow.
a7813a04
XL
1226 ///
1227 /// # Examples
1228 ///
1229 /// ```
1230 /// use std::sync::atomic::{AtomicIsize, Ordering};
1231 ///
1232 /// let foo = AtomicIsize::new(0);
1233 /// assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 0);
1234 /// assert_eq!(foo.load(Ordering::SeqCst), -10);
1235 /// ```
1236 #[inline]
1237 #[$stable]
1238 pub fn fetch_sub(&self, val: $int_type, order: Ordering) -> $int_type {
1239 unsafe { atomic_sub(self.v.get(), val, order) }
1240 }
1241
cc61c64b
XL
1242 /// Bitwise "and" with the current value.
1243 ///
1244 /// Performs a bitwise "and" operation on the current value and the argument `val`, and
1245 /// sets the new value to the result.
1246 ///
1247 /// Returns the previous value.
a7813a04
XL
1248 ///
1249 /// # Examples
1250 ///
1251 /// ```
1252 /// use std::sync::atomic::{AtomicIsize, Ordering};
1253 ///
1254 /// let foo = AtomicIsize::new(0b101101);
1255 /// assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101);
1256 /// assert_eq!(foo.load(Ordering::SeqCst), 0b100001);
1257 #[inline]
1258 #[$stable]
1259 pub fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type {
1260 unsafe { atomic_and(self.v.get(), val, order) }
1261 }
1262
cc61c64b
XL
1263 /// Bitwise "or" with the current value.
1264 ///
1265 /// Performs a bitwise "or" operation on the current value and the argument `val`, and
1266 /// sets the new value to the result.
1267 ///
1268 /// Returns the previous value.
a7813a04
XL
1269 ///
1270 /// # Examples
1271 ///
1272 /// ```
1273 /// use std::sync::atomic::{AtomicIsize, Ordering};
1274 ///
1275 /// let foo = AtomicIsize::new(0b101101);
1276 /// assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101);
1277 /// assert_eq!(foo.load(Ordering::SeqCst), 0b111111);
1278 #[inline]
1279 #[$stable]
1280 pub fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type {
1281 unsafe { atomic_or(self.v.get(), val, order) }
1282 }
1283
cc61c64b
XL
1284 /// Bitwise "xor" with the current value.
1285 ///
1286 /// Performs a bitwise "xor" operation on the current value and the argument `val`, and
1287 /// sets the new value to the result.
1288 ///
1289 /// Returns the previous value.
a7813a04
XL
1290 ///
1291 /// # Examples
1292 ///
1293 /// ```
1294 /// use std::sync::atomic::{AtomicIsize, Ordering};
1295 ///
1296 /// let foo = AtomicIsize::new(0b101101);
1297 /// assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101);
1298 /// assert_eq!(foo.load(Ordering::SeqCst), 0b011110);
1299 #[inline]
1300 #[$stable]
1301 pub fn fetch_xor(&self, val: $int_type, order: Ordering) -> $int_type {
1302 unsafe { atomic_xor(self.v.get(), val, order) }
1303 }
1304 }
1305 }
1306}
1307
1308#[cfg(target_has_atomic = "8")]
1309atomic_int! {
1310 unstable(feature = "integer_atomics", issue = "32976"),
1311 unstable(feature = "integer_atomics", issue = "32976"),
1312 unstable(feature = "integer_atomics", issue = "32976"),
9e0c209e 1313 unstable(feature = "integer_atomics", issue = "32976"),
a7813a04
XL
1314 i8 AtomicI8 ATOMIC_I8_INIT
1315}
1316#[cfg(target_has_atomic = "8")]
1317atomic_int! {
1318 unstable(feature = "integer_atomics", issue = "32976"),
1319 unstable(feature = "integer_atomics", issue = "32976"),
1320 unstable(feature = "integer_atomics", issue = "32976"),
9e0c209e 1321 unstable(feature = "integer_atomics", issue = "32976"),
a7813a04
XL
1322 u8 AtomicU8 ATOMIC_U8_INIT
1323}
1324#[cfg(target_has_atomic = "16")]
1325atomic_int! {
1326 unstable(feature = "integer_atomics", issue = "32976"),
1327 unstable(feature = "integer_atomics", issue = "32976"),
1328 unstable(feature = "integer_atomics", issue = "32976"),
9e0c209e 1329 unstable(feature = "integer_atomics", issue = "32976"),
a7813a04
XL
1330 i16 AtomicI16 ATOMIC_I16_INIT
1331}
1332#[cfg(target_has_atomic = "16")]
1333atomic_int! {
1334 unstable(feature = "integer_atomics", issue = "32976"),
1335 unstable(feature = "integer_atomics", issue = "32976"),
1336 unstable(feature = "integer_atomics", issue = "32976"),
9e0c209e 1337 unstable(feature = "integer_atomics", issue = "32976"),
a7813a04
XL
1338 u16 AtomicU16 ATOMIC_U16_INIT
1339}
1340#[cfg(target_has_atomic = "32")]
1341atomic_int! {
1342 unstable(feature = "integer_atomics", issue = "32976"),
1343 unstable(feature = "integer_atomics", issue = "32976"),
1344 unstable(feature = "integer_atomics", issue = "32976"),
9e0c209e 1345 unstable(feature = "integer_atomics", issue = "32976"),
a7813a04
XL
1346 i32 AtomicI32 ATOMIC_I32_INIT
1347}
1348#[cfg(target_has_atomic = "32")]
1349atomic_int! {
1350 unstable(feature = "integer_atomics", issue = "32976"),
1351 unstable(feature = "integer_atomics", issue = "32976"),
1352 unstable(feature = "integer_atomics", issue = "32976"),
9e0c209e 1353 unstable(feature = "integer_atomics", issue = "32976"),
a7813a04
XL
1354 u32 AtomicU32 ATOMIC_U32_INIT
1355}
1356#[cfg(target_has_atomic = "64")]
1357atomic_int! {
1358 unstable(feature = "integer_atomics", issue = "32976"),
1359 unstable(feature = "integer_atomics", issue = "32976"),
1360 unstable(feature = "integer_atomics", issue = "32976"),
9e0c209e 1361 unstable(feature = "integer_atomics", issue = "32976"),
a7813a04
XL
1362 i64 AtomicI64 ATOMIC_I64_INIT
1363}
1364#[cfg(target_has_atomic = "64")]
1365atomic_int! {
1366 unstable(feature = "integer_atomics", issue = "32976"),
1367 unstable(feature = "integer_atomics", issue = "32976"),
1368 unstable(feature = "integer_atomics", issue = "32976"),
9e0c209e 1369 unstable(feature = "integer_atomics", issue = "32976"),
a7813a04
XL
1370 u64 AtomicU64 ATOMIC_U64_INIT
1371}
3157f602 1372#[cfg(target_has_atomic = "ptr")]
a7813a04
XL
1373atomic_int!{
1374 stable(feature = "rust1", since = "1.0.0"),
1375 stable(feature = "extended_compare_and_swap", since = "1.10.0"),
1376 stable(feature = "atomic_debug", since = "1.3.0"),
476ff2be 1377 stable(feature = "atomic_access", since = "1.15.0"),
a7813a04
XL
1378 isize AtomicIsize ATOMIC_ISIZE_INIT
1379}
3157f602 1380#[cfg(target_has_atomic = "ptr")]
a7813a04
XL
1381atomic_int!{
1382 stable(feature = "rust1", since = "1.0.0"),
1383 stable(feature = "extended_compare_and_swap", since = "1.10.0"),
1384 stable(feature = "atomic_debug", since = "1.3.0"),
476ff2be 1385 stable(feature = "atomic_access", since = "1.15.0"),
a7813a04
XL
1386 usize AtomicUsize ATOMIC_USIZE_INIT
1387}
1388
7453a54e
SL
1389#[inline]
1390fn strongest_failure_ordering(order: Ordering) -> Ordering {
1391 match order {
1392 Release => Relaxed,
1393 Relaxed => Relaxed,
c30ab7b3 1394 SeqCst => SeqCst,
7453a54e 1395 Acquire => Acquire,
c30ab7b3
SL
1396 AcqRel => Acquire,
1397 __Nonexhaustive => __Nonexhaustive,
7453a54e 1398 }
1a4d82fc
JJ
1399}
1400
1401#[inline]
7453a54e 1402unsafe fn atomic_store<T>(dst: *mut T, val: T, order: Ordering) {
1a4d82fc
JJ
1403 match order {
1404 Release => intrinsics::atomic_store_rel(dst, val),
1405 Relaxed => intrinsics::atomic_store_relaxed(dst, val),
c30ab7b3 1406 SeqCst => intrinsics::atomic_store(dst, val),
1a4d82fc 1407 Acquire => panic!("there is no such thing as an acquire store"),
c30ab7b3
SL
1408 AcqRel => panic!("there is no such thing as an acquire/release store"),
1409 __Nonexhaustive => panic!("invalid memory ordering"),
1a4d82fc
JJ
1410 }
1411}
1412
1413#[inline]
7453a54e 1414unsafe fn atomic_load<T>(dst: *const T, order: Ordering) -> T {
1a4d82fc
JJ
1415 match order {
1416 Acquire => intrinsics::atomic_load_acq(dst),
1417 Relaxed => intrinsics::atomic_load_relaxed(dst),
c30ab7b3 1418 SeqCst => intrinsics::atomic_load(dst),
1a4d82fc 1419 Release => panic!("there is no such thing as a release load"),
c30ab7b3
SL
1420 AcqRel => panic!("there is no such thing as an acquire/release load"),
1421 __Nonexhaustive => panic!("invalid memory ordering"),
1a4d82fc
JJ
1422 }
1423}
1424
1425#[inline]
1a4d82fc
JJ
1426unsafe fn atomic_swap<T>(dst: *mut T, val: T, order: Ordering) -> T {
1427 match order {
1428 Acquire => intrinsics::atomic_xchg_acq(dst, val),
1429 Release => intrinsics::atomic_xchg_rel(dst, val),
c30ab7b3 1430 AcqRel => intrinsics::atomic_xchg_acqrel(dst, val),
1a4d82fc 1431 Relaxed => intrinsics::atomic_xchg_relaxed(dst, val),
c30ab7b3
SL
1432 SeqCst => intrinsics::atomic_xchg(dst, val),
1433 __Nonexhaustive => panic!("invalid memory ordering"),
1a4d82fc
JJ
1434 }
1435}
1436
cc61c64b 1437/// Returns the previous value (like __sync_fetch_and_add).
1a4d82fc 1438#[inline]
1a4d82fc
JJ
1439unsafe fn atomic_add<T>(dst: *mut T, val: T, order: Ordering) -> T {
1440 match order {
1441 Acquire => intrinsics::atomic_xadd_acq(dst, val),
1442 Release => intrinsics::atomic_xadd_rel(dst, val),
c30ab7b3 1443 AcqRel => intrinsics::atomic_xadd_acqrel(dst, val),
1a4d82fc 1444 Relaxed => intrinsics::atomic_xadd_relaxed(dst, val),
c30ab7b3
SL
1445 SeqCst => intrinsics::atomic_xadd(dst, val),
1446 __Nonexhaustive => panic!("invalid memory ordering"),
1a4d82fc
JJ
1447 }
1448}
1449
cc61c64b 1450/// Returns the previous value (like __sync_fetch_and_sub).
1a4d82fc 1451#[inline]
1a4d82fc
JJ
1452unsafe fn atomic_sub<T>(dst: *mut T, val: T, order: Ordering) -> T {
1453 match order {
1454 Acquire => intrinsics::atomic_xsub_acq(dst, val),
1455 Release => intrinsics::atomic_xsub_rel(dst, val),
c30ab7b3 1456 AcqRel => intrinsics::atomic_xsub_acqrel(dst, val),
1a4d82fc 1457 Relaxed => intrinsics::atomic_xsub_relaxed(dst, val),
c30ab7b3
SL
1458 SeqCst => intrinsics::atomic_xsub(dst, val),
1459 __Nonexhaustive => panic!("invalid memory ordering"),
1a4d82fc
JJ
1460 }
1461}
1462
1463#[inline]
7453a54e
SL
1464unsafe fn atomic_compare_exchange<T>(dst: *mut T,
1465 old: T,
1466 new: T,
1467 success: Ordering,
c30ab7b3
SL
1468 failure: Ordering)
1469 -> Result<T, T> {
54a0048b 1470 let (val, ok) = match (success, failure) {
7453a54e
SL
1471 (Acquire, Acquire) => intrinsics::atomic_cxchg_acq(dst, old, new),
1472 (Release, Relaxed) => intrinsics::atomic_cxchg_rel(dst, old, new),
c30ab7b3 1473 (AcqRel, Acquire) => intrinsics::atomic_cxchg_acqrel(dst, old, new),
7453a54e 1474 (Relaxed, Relaxed) => intrinsics::atomic_cxchg_relaxed(dst, old, new),
c30ab7b3 1475 (SeqCst, SeqCst) => intrinsics::atomic_cxchg(dst, old, new),
7453a54e 1476 (Acquire, Relaxed) => intrinsics::atomic_cxchg_acq_failrelaxed(dst, old, new),
c30ab7b3
SL
1477 (AcqRel, Relaxed) => intrinsics::atomic_cxchg_acqrel_failrelaxed(dst, old, new),
1478 (SeqCst, Relaxed) => intrinsics::atomic_cxchg_failrelaxed(dst, old, new),
1479 (SeqCst, Acquire) => intrinsics::atomic_cxchg_failacq(dst, old, new),
1480 (__Nonexhaustive, _) => panic!("invalid memory ordering"),
1481 (_, __Nonexhaustive) => panic!("invalid memory ordering"),
54a0048b
SL
1482 (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"),
1483 (_, Release) => panic!("there is no such thing as a release failure ordering"),
7453a54e 1484 _ => panic!("a failure ordering can't be stronger than a success ordering"),
54a0048b 1485 };
c30ab7b3 1486 if ok { Ok(val) } else { Err(val) }
7453a54e
SL
1487}
1488
7453a54e 1489#[inline]
7453a54e
SL
1490unsafe fn atomic_compare_exchange_weak<T>(dst: *mut T,
1491 old: T,
1492 new: T,
1493 success: Ordering,
c30ab7b3
SL
1494 failure: Ordering)
1495 -> Result<T, T> {
54a0048b 1496 let (val, ok) = match (success, failure) {
7453a54e
SL
1497 (Acquire, Acquire) => intrinsics::atomic_cxchgweak_acq(dst, old, new),
1498 (Release, Relaxed) => intrinsics::atomic_cxchgweak_rel(dst, old, new),
c30ab7b3 1499 (AcqRel, Acquire) => intrinsics::atomic_cxchgweak_acqrel(dst, old, new),
7453a54e 1500 (Relaxed, Relaxed) => intrinsics::atomic_cxchgweak_relaxed(dst, old, new),
c30ab7b3 1501 (SeqCst, SeqCst) => intrinsics::atomic_cxchgweak(dst, old, new),
7453a54e 1502 (Acquire, Relaxed) => intrinsics::atomic_cxchgweak_acq_failrelaxed(dst, old, new),
c30ab7b3
SL
1503 (AcqRel, Relaxed) => intrinsics::atomic_cxchgweak_acqrel_failrelaxed(dst, old, new),
1504 (SeqCst, Relaxed) => intrinsics::atomic_cxchgweak_failrelaxed(dst, old, new),
1505 (SeqCst, Acquire) => intrinsics::atomic_cxchgweak_failacq(dst, old, new),
1506 (__Nonexhaustive, _) => panic!("invalid memory ordering"),
1507 (_, __Nonexhaustive) => panic!("invalid memory ordering"),
54a0048b
SL
1508 (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"),
1509 (_, Release) => panic!("there is no such thing as a release failure ordering"),
7453a54e 1510 _ => panic!("a failure ordering can't be stronger than a success ordering"),
54a0048b 1511 };
c30ab7b3 1512 if ok { Ok(val) } else { Err(val) }
7453a54e
SL
1513}
1514
1a4d82fc 1515#[inline]
1a4d82fc
JJ
1516unsafe fn atomic_and<T>(dst: *mut T, val: T, order: Ordering) -> T {
1517 match order {
1518 Acquire => intrinsics::atomic_and_acq(dst, val),
1519 Release => intrinsics::atomic_and_rel(dst, val),
c30ab7b3 1520 AcqRel => intrinsics::atomic_and_acqrel(dst, val),
1a4d82fc 1521 Relaxed => intrinsics::atomic_and_relaxed(dst, val),
c30ab7b3
SL
1522 SeqCst => intrinsics::atomic_and(dst, val),
1523 __Nonexhaustive => panic!("invalid memory ordering"),
1a4d82fc
JJ
1524 }
1525}
1526
1a4d82fc 1527#[inline]
1a4d82fc
JJ
1528unsafe fn atomic_or<T>(dst: *mut T, val: T, order: Ordering) -> T {
1529 match order {
1530 Acquire => intrinsics::atomic_or_acq(dst, val),
1531 Release => intrinsics::atomic_or_rel(dst, val),
c30ab7b3 1532 AcqRel => intrinsics::atomic_or_acqrel(dst, val),
1a4d82fc 1533 Relaxed => intrinsics::atomic_or_relaxed(dst, val),
c30ab7b3
SL
1534 SeqCst => intrinsics::atomic_or(dst, val),
1535 __Nonexhaustive => panic!("invalid memory ordering"),
1a4d82fc
JJ
1536 }
1537}
1538
1a4d82fc 1539#[inline]
1a4d82fc
JJ
1540unsafe fn atomic_xor<T>(dst: *mut T, val: T, order: Ordering) -> T {
1541 match order {
1542 Acquire => intrinsics::atomic_xor_acq(dst, val),
1543 Release => intrinsics::atomic_xor_rel(dst, val),
c30ab7b3 1544 AcqRel => intrinsics::atomic_xor_acqrel(dst, val),
1a4d82fc 1545 Relaxed => intrinsics::atomic_xor_relaxed(dst, val),
c30ab7b3
SL
1546 SeqCst => intrinsics::atomic_xor(dst, val),
1547 __Nonexhaustive => panic!("invalid memory ordering"),
1a4d82fc
JJ
1548 }
1549}
1550
1a4d82fc
JJ
1551/// An atomic fence.
1552///
32a655c1
SL
1553/// A fence 'A' which has [`Release`] ordering semantics, synchronizes with a
1554/// fence 'B' with (at least) [`Acquire`] semantics, if and only if there exists
1a4d82fc
JJ
1555/// atomic operations X and Y, both operating on some atomic object 'M' such
1556/// that A is sequenced before X, Y is synchronized before B and Y observes
1557/// the change to M. This provides a happens-before dependence between A and B.
1558///
32a655c1 1559/// Atomic operations with [`Release`] or [`Acquire`] semantics can also synchronize
1a4d82fc
JJ
1560/// with a fence.
1561///
32a655c1
SL
1562/// A fence which has [`SeqCst`] ordering, in addition to having both [`Acquire`]
1563/// and [`Release`] semantics, participates in the global program order of the
1564/// other [`SeqCst`] operations and/or fences.
1a4d82fc 1565///
32a655c1 1566/// Accepts [`Acquire`], [`Release`], [`AcqRel`] and [`SeqCst`] orderings.
1a4d82fc
JJ
1567///
1568/// # Panics
1569///
32a655c1
SL
1570/// Panics if `order` is [`Relaxed`].
1571///
1572/// [`Ordering`]: enum.Ordering.html
1573/// [`Acquire`]: enum.Ordering.html#variant.Acquire
1574/// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1575/// [`Release`]: enum.Ordering.html#variant.Release
1576/// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1577/// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1a4d82fc 1578#[inline]
85aaf69f 1579#[stable(feature = "rust1", since = "1.0.0")]
1a4d82fc
JJ
1580pub fn fence(order: Ordering) {
1581 unsafe {
1582 match order {
1583 Acquire => intrinsics::atomic_fence_acq(),
1584 Release => intrinsics::atomic_fence_rel(),
c30ab7b3
SL
1585 AcqRel => intrinsics::atomic_fence_acqrel(),
1586 SeqCst => intrinsics::atomic_fence(),
1587 Relaxed => panic!("there is no such thing as a relaxed fence"),
1588 __Nonexhaustive => panic!("invalid memory ordering"),
1a4d82fc
JJ
1589 }
1590 }
1591}
c1a9b12d 1592
c1a9b12d 1593
cc61c64b
XL
1594/// A compiler memory fence.
1595///
1596/// `compiler_fence` does not emit any machine code, but prevents the compiler from re-ordering
1597/// memory operations across this point. Which reorderings are disallowed is dictated by the given
1598/// [`Ordering`]. Note that `compiler_fence` does *not* introduce inter-thread memory
1599/// synchronization; for that, a [`fence`] is needed.
1600///
1601/// The re-ordering prevented by the different ordering semantics are:
1602///
1603/// - with [`SeqCst`], no re-ordering of reads and writes across this point is allowed.
1604/// - with [`Release`], preceding reads and writes cannot be moved past subsequent writes.
1605/// - with [`Acquire`], subsequent reads and writes cannot be moved ahead of preceding reads.
1606/// - with [`AcqRel`], both of the above rules are enforced.
1607///
1608/// # Panics
1609///
1610/// Panics if `order` is [`Relaxed`].
1611///
1612/// [`fence`]: fn.fence.html
1613/// [`Ordering`]: enum.Ordering.html
1614/// [`Acquire`]: enum.Ordering.html#variant.Acquire
1615/// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1616/// [`Release`]: enum.Ordering.html#variant.Release
1617/// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1618/// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1619#[inline]
1620#[unstable(feature = "compiler_fences", issue = "41091")]
1621pub fn compiler_fence(order: Ordering) {
1622 unsafe {
1623 match order {
1624 Acquire => intrinsics::atomic_singlethreadfence_acq(),
1625 Release => intrinsics::atomic_singlethreadfence_rel(),
1626 AcqRel => intrinsics::atomic_singlethreadfence_acqrel(),
1627 SeqCst => intrinsics::atomic_singlethreadfence(),
1628 Relaxed => panic!("there is no such thing as a relaxed compiler fence"),
1629 __Nonexhaustive => panic!("invalid memory ordering"),
1630 }
1631 }
1632}
1633
1634
3157f602 1635#[cfg(target_has_atomic = "8")]
a7813a04
XL
1636#[stable(feature = "atomic_debug", since = "1.3.0")]
1637impl fmt::Debug for AtomicBool {
1638 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1639 f.debug_tuple("AtomicBool").field(&self.load(Ordering::SeqCst)).finish()
1640 }
1641}
c1a9b12d 1642
3157f602 1643#[cfg(target_has_atomic = "ptr")]
c1a9b12d
SL
1644#[stable(feature = "atomic_debug", since = "1.3.0")]
1645impl<T> fmt::Debug for AtomicPtr<T> {
1646 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1647 f.debug_tuple("AtomicPtr").field(&self.load(Ordering::SeqCst)).finish()
1648 }
1649}