1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
13 //! Atomic types provide primitive shared-memory communication between
14 //! threads, and are the building blocks of other concurrent
17 //! This module defines atomic versions of a select number of primitive
18 //! types, including `AtomicBool`, `AtomicIsize`, and `AtomicUsize`.
19 //! Atomic types present operations that, when used correctly, synchronize
20 //! updates between threads.
22 //! Each method takes an `Ordering` which represents the strength of
23 //! the memory barrier for that operation. These orderings are the
24 //! same as [LLVM atomic orderings][1].
26 //! [1]: http://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations
28 //! Atomic variables are safe to share between threads (they implement `Sync`)
29 //! but they do not themselves provide the mechanism for sharing. The most
30 //! common way to share an atomic variable is to put it into an `Arc` (an
31 //! atomically-reference-counted shared pointer).
33 //! Most atomic types may be stored in static variables, initialized using
34 //! the provided static initializers like `INIT_ATOMIC_BOOL`. Atomic statics
35 //! are often used for lazy global initialization.
40 //! A simple spinlock:
43 //! use std::sync::Arc;
44 //! use std::sync::atomic::{AtomicUsize, Ordering};
48 //! let spinlock = Arc::new(AtomicUsize::new(1));
50 //! let spinlock_clone = spinlock.clone();
51 //! thread::spawn(move|| {
52 //! spinlock_clone.store(0, Ordering::SeqCst);
55 //! // Wait for the other thread to release the lock
56 //! while spinlock.load(Ordering::SeqCst) != 0 {}
60 //! Keep a global count of live threads:
63 //! use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
65 //! static GLOBAL_THREAD_COUNT: AtomicUsize = ATOMIC_USIZE_INIT;
67 //! let old_thread_count = GLOBAL_THREAD_COUNT.fetch_add(1, Ordering::SeqCst);
68 //! println!("live threads: {}", old_thread_count + 1);
71 #![stable(feature = "rust1", since = "1.0.0")]
73 use self::Ordering
::*;
75 use marker
::{Send, Sync}
;
80 use result
::Result
::{self, Ok, Err}
;
85 /// A boolean type which can be safely shared between threads.
86 #[stable(feature = "rust1", since = "1.0.0")]
87 pub struct AtomicBool
{
91 #[stable(feature = "rust1", since = "1.0.0")]
92 impl Default
for AtomicBool
{
93 fn default() -> Self {
94 Self::new(Default
::default())
98 // Send is implicitly implemented for AtomicBool.
99 #[stable(feature = "rust1", since = "1.0.0")]
100 unsafe impl Sync
for AtomicBool {}
102 /// A signed integer type which can be safely shared between threads.
103 #[stable(feature = "rust1", since = "1.0.0")]
104 pub struct AtomicIsize
{
105 v
: UnsafeCell
<isize>,
108 #[stable(feature = "rust1", since = "1.0.0")]
109 impl Default
for AtomicIsize
{
110 fn default() -> Self {
111 Self::new(Default
::default())
115 // Send is implicitly implemented for AtomicIsize.
116 #[stable(feature = "rust1", since = "1.0.0")]
117 unsafe impl Sync
for AtomicIsize {}
119 /// An unsigned integer type which can be safely shared between threads.
120 #[stable(feature = "rust1", since = "1.0.0")]
121 pub struct AtomicUsize
{
122 v
: UnsafeCell
<usize>,
125 #[stable(feature = "rust1", since = "1.0.0")]
126 impl Default
for AtomicUsize
{
127 fn default() -> Self {
128 Self::new(Default
::default())
132 // Send is implicitly implemented for AtomicUsize.
133 #[stable(feature = "rust1", since = "1.0.0")]
134 unsafe impl Sync
for AtomicUsize {}
136 /// A raw pointer type which can be safely shared between threads.
137 #[stable(feature = "rust1", since = "1.0.0")]
138 pub struct AtomicPtr
<T
> {
139 p
: UnsafeCell
<*mut T
>,
142 #[stable(feature = "rust1", since = "1.0.0")]
143 impl<T
> Default
for AtomicPtr
<T
> {
144 fn default() -> AtomicPtr
<T
> {
145 AtomicPtr
::new(::ptr
::null_mut())
149 #[stable(feature = "rust1", since = "1.0.0")]
150 unsafe impl<T
> Send
for AtomicPtr
<T
> {}
151 #[stable(feature = "rust1", since = "1.0.0")]
152 unsafe impl<T
> Sync
for AtomicPtr
<T
> {}
154 /// Atomic memory orderings
156 /// Memory orderings limit the ways that both the compiler and CPU may reorder
157 /// instructions around atomic operations. At its most restrictive,
158 /// "sequentially consistent" atomics allow neither reads nor writes
159 /// to be moved either before or after the atomic operation; on the other end
160 /// "relaxed" atomics allow all reorderings.
162 /// Rust's memory orderings are [the same as
163 /// LLVM's](http://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations).
164 #[stable(feature = "rust1", since = "1.0.0")]
165 #[derive(Copy, Clone, Debug)]
167 /// No ordering constraints, only atomic operations. Corresponds to LLVM's
168 /// `Monotonic` ordering.
169 #[stable(feature = "rust1", since = "1.0.0")]
171 /// When coupled with a store, all previous writes become visible
172 /// to another thread that performs a load with `Acquire` ordering
173 /// on the same value.
174 #[stable(feature = "rust1", since = "1.0.0")]
176 /// When coupled with a load, all subsequent loads will see data
177 /// written before a store with `Release` ordering on the same value
178 /// in another thread.
179 #[stable(feature = "rust1", since = "1.0.0")]
181 /// When coupled with a load, uses `Acquire` ordering, and with a store
182 /// `Release` ordering.
183 #[stable(feature = "rust1", since = "1.0.0")]
185 /// Like `AcqRel` with the additional guarantee that all threads see all
186 /// sequentially consistent operations in the same order.
187 #[stable(feature = "rust1", since = "1.0.0")]
191 /// An `AtomicBool` initialized to `false`.
192 #[stable(feature = "rust1", since = "1.0.0")]
193 pub const ATOMIC_BOOL_INIT
: AtomicBool
= AtomicBool
::new(false);
194 /// An `AtomicIsize` initialized to `0`.
195 #[stable(feature = "rust1", since = "1.0.0")]
196 pub const ATOMIC_ISIZE_INIT
: AtomicIsize
= AtomicIsize
::new(0);
197 /// An `AtomicUsize` initialized to `0`.
198 #[stable(feature = "rust1", since = "1.0.0")]
199 pub const ATOMIC_USIZE_INIT
: AtomicUsize
= AtomicUsize
::new(0);
201 // NB: Needs to be -1 (0b11111111...) to make fetch_nand work correctly
202 const UINT_TRUE
: usize = !0;
205 /// Creates a new `AtomicBool`.
210 /// use std::sync::atomic::AtomicBool;
212 /// let atomic_true = AtomicBool::new(true);
213 /// let atomic_false = AtomicBool::new(false);
216 #[stable(feature = "rust1", since = "1.0.0")]
217 pub const fn new(v
: bool
) -> AtomicBool
{
218 AtomicBool { v: UnsafeCell::new(-(v as isize) as usize) }
221 /// Loads a value from the bool.
223 /// `load` takes an `Ordering` argument which describes the memory ordering of this operation.
227 /// Panics if `order` is `Release` or `AcqRel`.
232 /// use std::sync::atomic::{AtomicBool, Ordering};
234 /// let some_bool = AtomicBool::new(true);
236 /// assert_eq!(some_bool.load(Ordering::Relaxed), true);
239 #[stable(feature = "rust1", since = "1.0.0")]
240 pub fn load(&self, order
: Ordering
) -> bool
{
241 unsafe { atomic_load(self.v.get(), order) > 0 }
244 /// Stores a value into the bool.
246 /// `store` takes an `Ordering` argument which describes the memory ordering of this operation.
251 /// use std::sync::atomic::{AtomicBool, Ordering};
253 /// let some_bool = AtomicBool::new(true);
255 /// some_bool.store(false, Ordering::Relaxed);
256 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
261 /// Panics if `order` is `Acquire` or `AcqRel`.
263 #[stable(feature = "rust1", since = "1.0.0")]
264 pub fn store(&self, val
: bool
, order
: Ordering
) {
265 let val
= if val { UINT_TRUE }
else { 0 }
;
267 unsafe { atomic_store(self.v.get(), val, order); }
270 /// Stores a value into the bool, returning the old value.
272 /// `swap` takes an `Ordering` argument which describes the memory ordering of this operation.
277 /// use std::sync::atomic::{AtomicBool, Ordering};
279 /// let some_bool = AtomicBool::new(true);
281 /// assert_eq!(some_bool.swap(false, Ordering::Relaxed), true);
282 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
285 #[stable(feature = "rust1", since = "1.0.0")]
286 pub fn swap(&self, val
: bool
, order
: Ordering
) -> bool
{
287 let val
= if val { UINT_TRUE }
else { 0 }
;
289 unsafe { atomic_swap(self.v.get(), val, order) > 0 }
292 /// Stores a value into the `bool` if the current value is the same as the `current` value.
294 /// The return value is always the previous value. If it is equal to `current`, then the value
297 /// `compare_and_swap` also takes an `Ordering` argument which describes the memory ordering of
303 /// use std::sync::atomic::{AtomicBool, Ordering};
305 /// let some_bool = AtomicBool::new(true);
307 /// assert_eq!(some_bool.compare_and_swap(true, false, Ordering::Relaxed), true);
308 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
310 /// assert_eq!(some_bool.compare_and_swap(true, true, Ordering::Relaxed), false);
311 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
314 #[stable(feature = "rust1", since = "1.0.0")]
315 pub fn compare_and_swap(&self, current
: bool
, new
: bool
, order
: Ordering
) -> bool
{
316 match self.compare_exchange(current
, new
, order
, strongest_failure_ordering(order
)) {
322 /// Stores a value into the `bool` if the current value is the same as the `current` value.
324 /// The return value is a result indicating whether the new value was written and containing
325 /// the previous value. On success this value is guaranteed to be equal to `new`.
327 /// `compare_exchange` takes two `Ordering` arguments to describe the memory ordering of this
328 /// operation. The first describes the required ordering if the operation succeeds while the
329 /// second describes the required ordering when the operation fails. The failure ordering can't
330 /// be `Release` or `AcqRel` and must be equivalent or weaker than the success ordering.
335 /// # #![feature(extended_compare_and_swap)]
336 /// use std::sync::atomic::{AtomicBool, Ordering};
338 /// let some_bool = AtomicBool::new(true);
340 /// assert_eq!(some_bool.compare_exchange(true,
342 /// Ordering::Acquire,
343 /// Ordering::Relaxed),
345 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
347 /// assert_eq!(some_bool.compare_exchange(true, true,
348 /// Ordering::SeqCst,
349 /// Ordering::Acquire),
351 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
354 #[unstable(feature = "extended_compare_and_swap", reason = "recently added", issue = "31767")]
355 pub fn compare_exchange(&self,
359 failure
: Ordering
) -> Result
<bool
, bool
> {
360 let current
= if current { UINT_TRUE }
else { 0 }
;
361 let new
= if new { UINT_TRUE }
else { 0 }
;
363 match unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) }
{
365 Err(x
) => Err(x
> 0),
369 /// Stores a value into the `bool` if the current value is the same as the `current` value.
371 /// Unlike `compare_exchange`, this function is allowed to spuriously fail even when the
372 /// comparison succeeds, which can result in more efficient code on some platforms. The
373 /// return value is a result indicating whether the new value was written and containing the
376 /// `compare_exchange_weak` takes two `Ordering` arguments to describe the memory
377 /// ordering of this operation. The first describes the required ordering if the operation
378 /// succeeds while the second describes the required ordering when the operation fails. The
379 /// failure ordering can't be `Release` or `AcqRel` and must be equivalent or weaker than the
380 /// success ordering.
385 /// # #![feature(extended_compare_and_swap)]
386 /// use std::sync::atomic::{AtomicBool, Ordering};
388 /// let val = AtomicBool::new(false);
391 /// let mut old = val.load(Ordering::Relaxed);
393 /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
395 /// Err(x) => old = x,
400 #[unstable(feature = "extended_compare_and_swap", reason = "recently added", issue = "31767")]
401 pub fn compare_exchange_weak(&self,
405 failure
: Ordering
) -> Result
<bool
, bool
> {
406 let current
= if current { UINT_TRUE }
else { 0 }
;
407 let new
= if new { UINT_TRUE }
else { 0 }
;
409 match unsafe { atomic_compare_exchange_weak(self.v
.get(), current
, new
,
410 success
, failure
) } {
412 Err(x
) => Err(x
> 0),
416 /// Logical "and" with a boolean value.
418 /// Performs a logical "and" operation on the current value and the argument `val`, and sets
419 /// the new value to the result.
421 /// Returns the previous value.
426 /// use std::sync::atomic::{AtomicBool, Ordering};
428 /// let foo = AtomicBool::new(true);
429 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), true);
430 /// assert_eq!(foo.load(Ordering::SeqCst), false);
432 /// let foo = AtomicBool::new(true);
433 /// assert_eq!(foo.fetch_and(true, Ordering::SeqCst), true);
434 /// assert_eq!(foo.load(Ordering::SeqCst), true);
436 /// let foo = AtomicBool::new(false);
437 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), false);
438 /// assert_eq!(foo.load(Ordering::SeqCst), false);
441 #[stable(feature = "rust1", since = "1.0.0")]
442 pub fn fetch_and(&self, val
: bool
, order
: Ordering
) -> bool
{
443 let val
= if val { UINT_TRUE }
else { 0 }
;
445 unsafe { atomic_and(self.v.get(), val, order) > 0 }
448 /// Logical "nand" with a boolean value.
450 /// Performs a logical "nand" operation on the current value and the argument `val`, and sets
451 /// the new value to the result.
453 /// Returns the previous value.
458 /// use std::sync::atomic::{AtomicBool, Ordering};
460 /// let foo = AtomicBool::new(true);
461 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), true);
462 /// assert_eq!(foo.load(Ordering::SeqCst), true);
464 /// let foo = AtomicBool::new(true);
465 /// assert_eq!(foo.fetch_nand(true, Ordering::SeqCst), true);
466 /// assert_eq!(foo.load(Ordering::SeqCst) as usize, 0);
467 /// assert_eq!(foo.load(Ordering::SeqCst), false);
469 /// let foo = AtomicBool::new(false);
470 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), false);
471 /// assert_eq!(foo.load(Ordering::SeqCst), true);
474 #[stable(feature = "rust1", since = "1.0.0")]
475 pub fn fetch_nand(&self, val
: bool
, order
: Ordering
) -> bool
{
476 let val
= if val { UINT_TRUE }
else { 0 }
;
478 unsafe { atomic_nand(self.v.get(), val, order) > 0 }
481 /// Logical "or" with a boolean value.
483 /// Performs a logical "or" operation on the current value and the argument `val`, and sets the
484 /// new value to the result.
486 /// Returns the previous value.
491 /// use std::sync::atomic::{AtomicBool, Ordering};
493 /// let foo = AtomicBool::new(true);
494 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), true);
495 /// assert_eq!(foo.load(Ordering::SeqCst), true);
497 /// let foo = AtomicBool::new(true);
498 /// assert_eq!(foo.fetch_or(true, Ordering::SeqCst), true);
499 /// assert_eq!(foo.load(Ordering::SeqCst), true);
501 /// let foo = AtomicBool::new(false);
502 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), false);
503 /// assert_eq!(foo.load(Ordering::SeqCst), false);
506 #[stable(feature = "rust1", since = "1.0.0")]
507 pub fn fetch_or(&self, val
: bool
, order
: Ordering
) -> bool
{
508 let val
= if val { UINT_TRUE }
else { 0 }
;
510 unsafe { atomic_or(self.v.get(), val, order) > 0 }
513 /// Logical "xor" with a boolean value.
515 /// Performs a logical "xor" operation on the current value and the argument `val`, and sets
516 /// the new value to the result.
518 /// Returns the previous value.
523 /// use std::sync::atomic::{AtomicBool, Ordering};
525 /// let foo = AtomicBool::new(true);
526 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), true);
527 /// assert_eq!(foo.load(Ordering::SeqCst), true);
529 /// let foo = AtomicBool::new(true);
530 /// assert_eq!(foo.fetch_xor(true, Ordering::SeqCst), true);
531 /// assert_eq!(foo.load(Ordering::SeqCst), false);
533 /// let foo = AtomicBool::new(false);
534 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), false);
535 /// assert_eq!(foo.load(Ordering::SeqCst), false);
538 #[stable(feature = "rust1", since = "1.0.0")]
539 pub fn fetch_xor(&self, val
: bool
, order
: Ordering
) -> bool
{
540 let val
= if val { UINT_TRUE }
else { 0 }
;
542 unsafe { atomic_xor(self.v.get(), val, order) > 0 }
547 /// Creates a new `AtomicIsize`.
552 /// use std::sync::atomic::AtomicIsize;
554 /// let atomic_forty_two = AtomicIsize::new(42);
557 #[stable(feature = "rust1", since = "1.0.0")]
558 pub const fn new(v
: isize) -> AtomicIsize
{
559 AtomicIsize {v: UnsafeCell::new(v)}
562 /// Loads a value from the isize.
564 /// `load` takes an `Ordering` argument which describes the memory ordering of this operation.
568 /// Panics if `order` is `Release` or `AcqRel`.
573 /// use std::sync::atomic::{AtomicIsize, Ordering};
575 /// let some_isize = AtomicIsize::new(5);
577 /// assert_eq!(some_isize.load(Ordering::Relaxed), 5);
580 #[stable(feature = "rust1", since = "1.0.0")]
581 pub fn load(&self, order
: Ordering
) -> isize {
582 unsafe { atomic_load(self.v.get(), order) }
585 /// Stores a value into the isize.
587 /// `store` takes an `Ordering` argument which describes the memory ordering of this operation.
592 /// use std::sync::atomic::{AtomicIsize, Ordering};
594 /// let some_isize = AtomicIsize::new(5);
596 /// some_isize.store(10, Ordering::Relaxed);
597 /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
602 /// Panics if `order` is `Acquire` or `AcqRel`.
604 #[stable(feature = "rust1", since = "1.0.0")]
605 pub fn store(&self, val
: isize, order
: Ordering
) {
606 unsafe { atomic_store(self.v.get(), val, order); }
609 /// Stores a value into the isize, returning the old value.
611 /// `swap` takes an `Ordering` argument which describes the memory ordering of this operation.
616 /// use std::sync::atomic::{AtomicIsize, Ordering};
618 /// let some_isize = AtomicIsize::new(5);
620 /// assert_eq!(some_isize.swap(10, Ordering::Relaxed), 5);
623 #[stable(feature = "rust1", since = "1.0.0")]
624 pub fn swap(&self, val
: isize, order
: Ordering
) -> isize {
625 unsafe { atomic_swap(self.v.get(), val, order) }
628 /// Stores a value into the `isize` if the current value is the same as the `current` value.
630 /// The return value is always the previous value. If it is equal to `current`, then the value
633 /// `compare_and_swap` also takes an `Ordering` argument which describes the memory ordering of
639 /// use std::sync::atomic::{AtomicIsize, Ordering};
641 /// let some_isize = AtomicIsize::new(5);
643 /// assert_eq!(some_isize.compare_and_swap(5, 10, Ordering::Relaxed), 5);
644 /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
646 /// assert_eq!(some_isize.compare_and_swap(6, 12, Ordering::Relaxed), 10);
647 /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
650 #[stable(feature = "rust1", since = "1.0.0")]
651 pub fn compare_and_swap(&self, current
: isize, new
: isize, order
: Ordering
) -> isize {
652 match self.compare_exchange(current
, new
, order
, strongest_failure_ordering(order
)) {
658 /// Stores a value into the `isize` if the current value is the same as the `current` value.
660 /// The return value is a result indicating whether the new value was written and containing
661 /// the previous value. On success this value is guaranteed to be equal to `new`.
663 /// `compare_exchange` takes two `Ordering` arguments to describe the memory ordering of this
664 /// operation. The first describes the required ordering if the operation succeeds while the
665 /// second describes the required ordering when the operation fails. The failure ordering can't
666 /// be `Release` or `AcqRel` and must be equivalent or weaker than the success ordering.
671 /// # #![feature(extended_compare_and_swap)]
672 /// use std::sync::atomic::{AtomicIsize, Ordering};
674 /// let some_isize = AtomicIsize::new(5);
676 /// assert_eq!(some_isize.compare_exchange(5, 10,
677 /// Ordering::Acquire,
678 /// Ordering::Relaxed),
680 /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
682 /// assert_eq!(some_isize.compare_exchange(6, 12,
683 /// Ordering::SeqCst,
684 /// Ordering::Acquire),
686 /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
689 #[unstable(feature = "extended_compare_and_swap", reason = "recently added", issue = "31767")]
690 pub fn compare_exchange(&self,
694 failure
: Ordering
) -> Result
<isize, isize> {
695 unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) }
698 /// Stores a value into the `isize` if the current value is the same as the `current` value.
700 /// Unlike `compare_exchange`, this function is allowed to spuriously fail even when the
701 /// comparison succeeds, which can result in more efficient code on some platforms. The
702 /// return value is a result indicating whether the new value was written and containing the
705 /// `compare_exchange_weak` takes two `Ordering` arguments to describe the memory
706 /// ordering of this operation. The first describes the required ordering if the operation
707 /// succeeds while the second describes the required ordering when the operation fails. The
708 /// failure ordering can't be `Release` or `AcqRel` and must be equivalent or weaker than the
709 /// success ordering.
714 /// # #![feature(extended_compare_and_swap)]
715 /// use std::sync::atomic::{AtomicIsize, Ordering};
717 /// let val = AtomicIsize::new(4);
719 /// let mut old = val.load(Ordering::Relaxed);
721 /// let new = old * 2;
722 /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
724 /// Err(x) => old = x,
729 #[unstable(feature = "extended_compare_and_swap", reason = "recently added", issue = "31767")]
730 pub fn compare_exchange_weak(&self,
734 failure
: Ordering
) -> Result
<isize, isize> {
735 unsafe { atomic_compare_exchange_weak(self.v.get(), current, new, success, failure) }
738 /// Add an isize to the current value, returning the previous value.
743 /// use std::sync::atomic::{AtomicIsize, Ordering};
745 /// let foo = AtomicIsize::new(0);
746 /// assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0);
747 /// assert_eq!(foo.load(Ordering::SeqCst), 10);
750 #[stable(feature = "rust1", since = "1.0.0")]
751 pub fn fetch_add(&self, val
: isize, order
: Ordering
) -> isize {
752 unsafe { atomic_add(self.v.get(), val, order) }
755 /// Subtract an isize from the current value, returning the previous value.
760 /// use std::sync::atomic::{AtomicIsize, Ordering};
762 /// let foo = AtomicIsize::new(0);
763 /// assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 0);
764 /// assert_eq!(foo.load(Ordering::SeqCst), -10);
767 #[stable(feature = "rust1", since = "1.0.0")]
768 pub fn fetch_sub(&self, val
: isize, order
: Ordering
) -> isize {
769 unsafe { atomic_sub(self.v.get(), val, order) }
772 /// Bitwise and with the current isize, returning the previous value.
777 /// use std::sync::atomic::{AtomicIsize, Ordering};
779 /// let foo = AtomicIsize::new(0b101101);
780 /// assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101);
781 /// assert_eq!(foo.load(Ordering::SeqCst), 0b100001);
783 #[stable(feature = "rust1", since = "1.0.0")]
784 pub fn fetch_and(&self, val
: isize, order
: Ordering
) -> isize {
785 unsafe { atomic_and(self.v.get(), val, order) }
788 /// Bitwise or with the current isize, returning the previous value.
793 /// use std::sync::atomic::{AtomicIsize, Ordering};
795 /// let foo = AtomicIsize::new(0b101101);
796 /// assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101);
797 /// assert_eq!(foo.load(Ordering::SeqCst), 0b111111);
799 #[stable(feature = "rust1", since = "1.0.0")]
800 pub fn fetch_or(&self, val
: isize, order
: Ordering
) -> isize {
801 unsafe { atomic_or(self.v.get(), val, order) }
804 /// Bitwise xor with the current isize, returning the previous value.
809 /// use std::sync::atomic::{AtomicIsize, Ordering};
811 /// let foo = AtomicIsize::new(0b101101);
812 /// assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101);
813 /// assert_eq!(foo.load(Ordering::SeqCst), 0b011110);
815 #[stable(feature = "rust1", since = "1.0.0")]
816 pub fn fetch_xor(&self, val
: isize, order
: Ordering
) -> isize {
817 unsafe { atomic_xor(self.v.get(), val, order) }
822 /// Creates a new `AtomicUsize`.
827 /// use std::sync::atomic::AtomicUsize;
829 /// let atomic_forty_two = AtomicUsize::new(42);
832 #[stable(feature = "rust1", since = "1.0.0")]
833 pub const fn new(v
: usize) -> AtomicUsize
{
834 AtomicUsize { v: UnsafeCell::new(v) }
837 /// Loads a value from the usize.
839 /// `load` takes an `Ordering` argument which describes the memory ordering of this operation.
843 /// Panics if `order` is `Release` or `AcqRel`.
848 /// use std::sync::atomic::{AtomicUsize, Ordering};
850 /// let some_usize = AtomicUsize::new(5);
852 /// assert_eq!(some_usize.load(Ordering::Relaxed), 5);
855 #[stable(feature = "rust1", since = "1.0.0")]
856 pub fn load(&self, order
: Ordering
) -> usize {
857 unsafe { atomic_load(self.v.get(), order) }
860 /// Stores a value into the usize.
862 /// `store` takes an `Ordering` argument which describes the memory ordering of this operation.
867 /// use std::sync::atomic::{AtomicUsize, Ordering};
869 /// let some_usize = AtomicUsize::new(5);
871 /// some_usize.store(10, Ordering::Relaxed);
872 /// assert_eq!(some_usize.load(Ordering::Relaxed), 10);
877 /// Panics if `order` is `Acquire` or `AcqRel`.
879 #[stable(feature = "rust1", since = "1.0.0")]
880 pub fn store(&self, val
: usize, order
: Ordering
) {
881 unsafe { atomic_store(self.v.get(), val, order); }
884 /// Stores a value into the usize, returning the old value.
886 /// `swap` takes an `Ordering` argument which describes the memory ordering of this operation.
891 /// use std::sync::atomic::{AtomicUsize, Ordering};
893 /// let some_usize = AtomicUsize::new(5);
895 /// assert_eq!(some_usize.swap(10, Ordering::Relaxed), 5);
896 /// assert_eq!(some_usize.load(Ordering::Relaxed), 10);
899 #[stable(feature = "rust1", since = "1.0.0")]
900 pub fn swap(&self, val
: usize, order
: Ordering
) -> usize {
901 unsafe { atomic_swap(self.v.get(), val, order) }
904 /// Stores a value into the `usize` if the current value is the same as the `current` value.
906 /// The return value is always the previous value. If it is equal to `current`, then the value
909 /// `compare_and_swap` also takes an `Ordering` argument which describes the memory ordering of
915 /// use std::sync::atomic::{AtomicUsize, Ordering};
917 /// let some_usize = AtomicUsize::new(5);
919 /// assert_eq!(some_usize.compare_and_swap(5, 10, Ordering::Relaxed), 5);
920 /// assert_eq!(some_usize.load(Ordering::Relaxed), 10);
922 /// assert_eq!(some_usize.compare_and_swap(6, 12, Ordering::Relaxed), 10);
923 /// assert_eq!(some_usize.load(Ordering::Relaxed), 10);
926 #[stable(feature = "rust1", since = "1.0.0")]
927 pub fn compare_and_swap(&self, current
: usize, new
: usize, order
: Ordering
) -> usize {
928 match self.compare_exchange(current
, new
, order
, strongest_failure_ordering(order
)) {
934 /// Stores a value into the `usize` if the current value is the same as the `current` value.
936 /// The return value is a result indicating whether the new value was written and containing
937 /// the previous value. On success this value is guaranteed to be equal to `new`.
939 /// `compare_exchange` takes two `Ordering` arguments to describe the memory ordering of this
940 /// operation. The first describes the required ordering if the operation succeeds while the
941 /// second describes the required ordering when the operation fails. The failure ordering can't
942 /// be `Release` or `AcqRel` and must be equivalent or weaker than the success ordering.
947 /// # #![feature(extended_compare_and_swap)]
948 /// use std::sync::atomic::{AtomicUsize, Ordering};
950 /// let some_isize = AtomicUsize::new(5);
952 /// assert_eq!(some_isize.compare_exchange(5, 10,
953 /// Ordering::Acquire,
954 /// Ordering::Relaxed),
956 /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
958 /// assert_eq!(some_isize.compare_exchange(6, 12,
959 /// Ordering::SeqCst,
960 /// Ordering::Acquire),
962 /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
965 #[unstable(feature = "extended_compare_and_swap", reason = "recently added", issue = "31767")]
966 pub fn compare_exchange(&self,
970 failure
: Ordering
) -> Result
<usize, usize> {
971 unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) }
974 /// Stores a value into the `usize` if the current value is the same as the `current` value.
976 /// Unlike `compare_exchange`, this function is allowed to spuriously fail even when the
977 /// comparison succeeds, which can result in more efficient code on some platforms. The
978 /// return value is a result indicating whether the new value was written and containing the
981 /// `compare_exchange_weak` takes two `Ordering` arguments to describe the memory
982 /// ordering of this operation. The first describes the required ordering if the operation
983 /// succeeds while the second describes the required ordering when the operation fails. The
984 /// failure ordering can't be `Release` or `AcqRel` and must be equivalent or weaker than the
985 /// success ordering.
990 /// # #![feature(extended_compare_and_swap)]
991 /// use std::sync::atomic::{AtomicUsize, Ordering};
993 /// let val = AtomicUsize::new(4);
995 /// let mut old = val.load(Ordering::Relaxed);
997 /// let new = old * 2;
998 /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
1000 /// Err(x) => old = x,
1005 #[unstable(feature = "extended_compare_and_swap", reason = "recently added", issue = "31767")]
1006 pub fn compare_exchange_weak(&self,
1010 failure
: Ordering
) -> Result
<usize, usize> {
1011 unsafe { atomic_compare_exchange_weak(self.v.get(), current, new, success, failure) }
1014 /// Add to the current usize, returning the previous value.
1019 /// use std::sync::atomic::{AtomicUsize, Ordering};
1021 /// let foo = AtomicUsize::new(0);
1022 /// assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0);
1023 /// assert_eq!(foo.load(Ordering::SeqCst), 10);
1026 #[stable(feature = "rust1", since = "1.0.0")]
1027 pub fn fetch_add(&self, val
: usize, order
: Ordering
) -> usize {
1028 unsafe { atomic_add(self.v.get(), val, order) }
1031 /// Subtract from the current usize, returning the previous value.
1036 /// use std::sync::atomic::{AtomicUsize, Ordering};
1038 /// let foo = AtomicUsize::new(10);
1039 /// assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 10);
1040 /// assert_eq!(foo.load(Ordering::SeqCst), 0);
1043 #[stable(feature = "rust1", since = "1.0.0")]
1044 pub fn fetch_sub(&self, val
: usize, order
: Ordering
) -> usize {
1045 unsafe { atomic_sub(self.v.get(), val, order) }
1048 /// Bitwise and with the current usize, returning the previous value.
1053 /// use std::sync::atomic::{AtomicUsize, Ordering};
1055 /// let foo = AtomicUsize::new(0b101101);
1056 /// assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101);
1057 /// assert_eq!(foo.load(Ordering::SeqCst), 0b100001);
1059 #[stable(feature = "rust1", since = "1.0.0")]
1060 pub fn fetch_and(&self, val
: usize, order
: Ordering
) -> usize {
1061 unsafe { atomic_and(self.v.get(), val, order) }
1064 /// Bitwise or with the current usize, returning the previous value.
1069 /// use std::sync::atomic::{AtomicUsize, Ordering};
1071 /// let foo = AtomicUsize::new(0b101101);
1072 /// assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101);
1073 /// assert_eq!(foo.load(Ordering::SeqCst), 0b111111);
1075 #[stable(feature = "rust1", since = "1.0.0")]
1076 pub fn fetch_or(&self, val
: usize, order
: Ordering
) -> usize {
1077 unsafe { atomic_or(self.v.get(), val, order) }
1080 /// Bitwise xor with the current usize, returning the previous value.
1085 /// use std::sync::atomic::{AtomicUsize, Ordering};
1087 /// let foo = AtomicUsize::new(0b101101);
1088 /// assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101);
1089 /// assert_eq!(foo.load(Ordering::SeqCst), 0b011110);
1091 #[stable(feature = "rust1", since = "1.0.0")]
1092 pub fn fetch_xor(&self, val
: usize, order
: Ordering
) -> usize {
1093 unsafe { atomic_xor(self.v.get(), val, order) }
1097 impl<T
> AtomicPtr
<T
> {
1098 /// Creates a new `AtomicPtr`.
1103 /// use std::sync::atomic::AtomicPtr;
1105 /// let ptr = &mut 5;
1106 /// let atomic_ptr = AtomicPtr::new(ptr);
1109 #[stable(feature = "rust1", since = "1.0.0")]
1110 pub const fn new(p
: *mut T
) -> AtomicPtr
<T
> {
1111 AtomicPtr { p: UnsafeCell::new(p) }
1114 /// Loads a value from the pointer.
1116 /// `load` takes an `Ordering` argument which describes the memory ordering of this operation.
1120 /// Panics if `order` is `Release` or `AcqRel`.
1125 /// use std::sync::atomic::{AtomicPtr, Ordering};
1127 /// let ptr = &mut 5;
1128 /// let some_ptr = AtomicPtr::new(ptr);
1130 /// let value = some_ptr.load(Ordering::Relaxed);
1133 #[stable(feature = "rust1", since = "1.0.0")]
1134 pub fn load(&self, order
: Ordering
) -> *mut T
{
1136 atomic_load(self.p
.get() as *mut usize, order
) as *mut T
1140 /// Stores a value into the pointer.
1142 /// `store` takes an `Ordering` argument which describes the memory ordering of this operation.
1147 /// use std::sync::atomic::{AtomicPtr, Ordering};
1149 /// let ptr = &mut 5;
1150 /// let some_ptr = AtomicPtr::new(ptr);
1152 /// let other_ptr = &mut 10;
1154 /// some_ptr.store(other_ptr, Ordering::Relaxed);
1159 /// Panics if `order` is `Acquire` or `AcqRel`.
1161 #[stable(feature = "rust1", since = "1.0.0")]
1162 pub fn store(&self, ptr
: *mut T
, order
: Ordering
) {
1163 unsafe { atomic_store(self.p.get() as *mut usize, ptr as usize, order); }
1166 /// Stores a value into the pointer, returning the old value.
1168 /// `swap` takes an `Ordering` argument which describes the memory ordering of this operation.
1173 /// use std::sync::atomic::{AtomicPtr, Ordering};
1175 /// let ptr = &mut 5;
1176 /// let some_ptr = AtomicPtr::new(ptr);
1178 /// let other_ptr = &mut 10;
1180 /// let value = some_ptr.swap(other_ptr, Ordering::Relaxed);
1183 #[stable(feature = "rust1", since = "1.0.0")]
1184 pub fn swap(&self, ptr
: *mut T
, order
: Ordering
) -> *mut T
{
1185 unsafe { atomic_swap(self.p.get() as *mut usize, ptr as usize, order) as *mut T }
1188 /// Stores a value into the pointer if the current value is the same as the `current` value.
1190 /// The return value is always the previous value. If it is equal to `current`, then the value
1193 /// `compare_and_swap` also takes an `Ordering` argument which describes the memory ordering of
1199 /// use std::sync::atomic::{AtomicPtr, Ordering};
1201 /// let ptr = &mut 5;
1202 /// let some_ptr = AtomicPtr::new(ptr);
1204 /// let other_ptr = &mut 10;
1205 /// let another_ptr = &mut 10;
1207 /// let value = some_ptr.compare_and_swap(other_ptr, another_ptr, Ordering::Relaxed);
1210 #[stable(feature = "rust1", since = "1.0.0")]
1211 pub fn compare_and_swap(&self, current
: *mut T
, new
: *mut T
, order
: Ordering
) -> *mut T
{
1212 match self.compare_exchange(current
, new
, order
, strongest_failure_ordering(order
)) {
1218 /// Stores a value into the pointer if the current value is the same as the `current` value.
1220 /// The return value is a result indicating whether the new value was written and containing
1221 /// the previous value. On success this value is guaranteed to be equal to `new`.
1223 /// `compare_exchange` takes two `Ordering` arguments to describe the memory ordering of this
1224 /// operation. The first describes the required ordering if the operation succeeds while the
1225 /// second describes the required ordering when the operation fails. The failure ordering can't
1226 /// be `Release` or `AcqRel` and must be equivalent or weaker than the success ordering.
1231 /// # #![feature(extended_compare_and_swap)]
1232 /// use std::sync::atomic::{AtomicPtr, Ordering};
1234 /// let ptr = &mut 5;
1235 /// let some_ptr = AtomicPtr::new(ptr);
1237 /// let other_ptr = &mut 10;
1238 /// let another_ptr = &mut 10;
1240 /// let value = some_ptr.compare_exchange(other_ptr, another_ptr,
1241 /// Ordering::SeqCst, Ordering::Relaxed);
1244 #[unstable(feature = "extended_compare_and_swap", reason = "recently added", issue = "31767")]
1245 pub fn compare_exchange(&self,
1249 failure
: Ordering
) -> Result
<*mut T
, *mut T
> {
1251 let res
= atomic_compare_exchange(self.p
.get() as *mut usize,
1257 Ok(x
) => Ok(x
as *mut T
),
1258 Err(x
) => Err(x
as *mut T
),
1263 /// Stores a value into the pointer if the current value is the same as the `current` value.
1265 /// Unlike `compare_exchange`, this function is allowed to spuriously fail even when the
1266 /// comparison succeeds, which can result in more efficient code on some platforms. The
1267 /// return value is a result indicating whether the new value was written and containing the
1270 /// `compare_exchange_weak` takes two `Ordering` arguments to describe the memory
1271 /// ordering of this operation. The first describes the required ordering if the operation
1272 /// succeeds while the second describes the required ordering when the operation fails. The
1273 /// failure ordering can't be `Release` or `AcqRel` and must be equivalent or weaker than the
1274 /// success ordering.
1279 /// # #![feature(extended_compare_and_swap)]
1280 /// use std::sync::atomic::{AtomicPtr, Ordering};
1282 /// let some_ptr = AtomicPtr::new(&mut 5);
1284 /// let new = &mut 10;
1285 /// let mut old = some_ptr.load(Ordering::Relaxed);
1287 /// match some_ptr.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
1289 /// Err(x) => old = x,
1294 #[unstable(feature = "extended_compare_and_swap", reason = "recently added", issue = "31767")]
1295 pub fn compare_exchange_weak(&self,
1299 failure
: Ordering
) -> Result
<*mut T
, *mut T
> {
1301 let res
= atomic_compare_exchange_weak(self.p
.get() as *mut usize,
1307 Ok(x
) => Ok(x
as *mut T
),
1308 Err(x
) => Err(x
as *mut T
),
1315 fn strongest_failure_ordering(order
: Ordering
) -> Ordering
{
1326 unsafe fn atomic_store
<T
>(dst
: *mut T
, val
: T
, order
: Ordering
) {
1328 Release
=> intrinsics
::atomic_store_rel(dst
, val
),
1329 Relaxed
=> intrinsics
::atomic_store_relaxed(dst
, val
),
1330 SeqCst
=> intrinsics
::atomic_store(dst
, val
),
1331 Acquire
=> panic
!("there is no such thing as an acquire store"),
1332 AcqRel
=> panic
!("there is no such thing as an acquire/release store"),
1337 unsafe fn atomic_load
<T
>(dst
: *const T
, order
: Ordering
) -> T
{
1339 Acquire
=> intrinsics
::atomic_load_acq(dst
),
1340 Relaxed
=> intrinsics
::atomic_load_relaxed(dst
),
1341 SeqCst
=> intrinsics
::atomic_load(dst
),
1342 Release
=> panic
!("there is no such thing as a release load"),
1343 AcqRel
=> panic
!("there is no such thing as an acquire/release load"),
1348 unsafe fn atomic_swap
<T
>(dst
: *mut T
, val
: T
, order
: Ordering
) -> T
{
1350 Acquire
=> intrinsics
::atomic_xchg_acq(dst
, val
),
1351 Release
=> intrinsics
::atomic_xchg_rel(dst
, val
),
1352 AcqRel
=> intrinsics
::atomic_xchg_acqrel(dst
, val
),
1353 Relaxed
=> intrinsics
::atomic_xchg_relaxed(dst
, val
),
1354 SeqCst
=> intrinsics
::atomic_xchg(dst
, val
)
1358 /// Returns the old value (like __sync_fetch_and_add).
1360 unsafe fn atomic_add
<T
>(dst
: *mut T
, val
: T
, order
: Ordering
) -> T
{
1362 Acquire
=> intrinsics
::atomic_xadd_acq(dst
, val
),
1363 Release
=> intrinsics
::atomic_xadd_rel(dst
, val
),
1364 AcqRel
=> intrinsics
::atomic_xadd_acqrel(dst
, val
),
1365 Relaxed
=> intrinsics
::atomic_xadd_relaxed(dst
, val
),
1366 SeqCst
=> intrinsics
::atomic_xadd(dst
, val
)
1370 /// Returns the old value (like __sync_fetch_and_sub).
1372 unsafe fn atomic_sub
<T
>(dst
: *mut T
, val
: T
, order
: Ordering
) -> T
{
1374 Acquire
=> intrinsics
::atomic_xsub_acq(dst
, val
),
1375 Release
=> intrinsics
::atomic_xsub_rel(dst
, val
),
1376 AcqRel
=> intrinsics
::atomic_xsub_acqrel(dst
, val
),
1377 Relaxed
=> intrinsics
::atomic_xsub_relaxed(dst
, val
),
1378 SeqCst
=> intrinsics
::atomic_xsub(dst
, val
)
1383 #[cfg(any(not(stage0), cargobuild))]
1384 unsafe fn atomic_compare_exchange
<T
>(dst
: *mut T
,
1388 failure
: Ordering
) -> Result
<T
, T
> {
1389 let (val
, ok
) = match (success
, failure
) {
1390 (Acquire
, Acquire
) => intrinsics
::atomic_cxchg_acq(dst
, old
, new
),
1391 (Release
, Relaxed
) => intrinsics
::atomic_cxchg_rel(dst
, old
, new
),
1392 (AcqRel
, Acquire
) => intrinsics
::atomic_cxchg_acqrel(dst
, old
, new
),
1393 (Relaxed
, Relaxed
) => intrinsics
::atomic_cxchg_relaxed(dst
, old
, new
),
1394 (SeqCst
, SeqCst
) => intrinsics
::atomic_cxchg(dst
, old
, new
),
1395 (Acquire
, Relaxed
) => intrinsics
::atomic_cxchg_acq_failrelaxed(dst
, old
, new
),
1396 (AcqRel
, Relaxed
) => intrinsics
::atomic_cxchg_acqrel_failrelaxed(dst
, old
, new
),
1397 (SeqCst
, Relaxed
) => intrinsics
::atomic_cxchg_failrelaxed(dst
, old
, new
),
1398 (SeqCst
, Acquire
) => intrinsics
::atomic_cxchg_failacq(dst
, old
, new
),
1399 (_
, AcqRel
) => panic
!("there is no such thing as an acquire/release failure ordering"),
1400 (_
, Release
) => panic
!("there is no such thing as a release failure ordering"),
1401 _
=> panic
!("a failure ordering can't be stronger than a success ordering"),
1411 #[cfg(all(stage0, not(cargobuild)))]
1412 unsafe fn atomic_compare_exchange
<T
>(dst
: *mut T
,
1416 _
: Ordering
) -> Result
<T
, T
>
1417 where T
: ::cmp
::Eq
+ ::marker
::Copy
1419 let val
= match success
{
1420 Acquire
=> intrinsics
::atomic_cxchg_acq(dst
, old
, new
),
1421 Release
=> intrinsics
::atomic_cxchg_rel(dst
, old
, new
),
1422 AcqRel
=> intrinsics
::atomic_cxchg_acqrel(dst
, old
, new
),
1423 Relaxed
=> intrinsics
::atomic_cxchg_relaxed(dst
, old
, new
),
1424 SeqCst
=> intrinsics
::atomic_cxchg(dst
, old
, new
),
1434 unsafe fn atomic_compare_exchange_weak
<T
>(dst
: *mut T
,
1438 failure
: Ordering
) -> Result
<T
, T
> {
1439 let (val
, ok
) = match (success
, failure
) {
1440 (Acquire
, Acquire
) => intrinsics
::atomic_cxchgweak_acq(dst
, old
, new
),
1441 (Release
, Relaxed
) => intrinsics
::atomic_cxchgweak_rel(dst
, old
, new
),
1442 (AcqRel
, Acquire
) => intrinsics
::atomic_cxchgweak_acqrel(dst
, old
, new
),
1443 (Relaxed
, Relaxed
) => intrinsics
::atomic_cxchgweak_relaxed(dst
, old
, new
),
1444 (SeqCst
, SeqCst
) => intrinsics
::atomic_cxchgweak(dst
, old
, new
),
1445 (Acquire
, Relaxed
) => intrinsics
::atomic_cxchgweak_acq_failrelaxed(dst
, old
, new
),
1446 (AcqRel
, Relaxed
) => intrinsics
::atomic_cxchgweak_acqrel_failrelaxed(dst
, old
, new
),
1447 (SeqCst
, Relaxed
) => intrinsics
::atomic_cxchgweak_failrelaxed(dst
, old
, new
),
1448 (SeqCst
, Acquire
) => intrinsics
::atomic_cxchgweak_failacq(dst
, old
, new
),
1449 (_
, AcqRel
) => panic
!("there is no such thing as an acquire/release failure ordering"),
1450 (_
, Release
) => panic
!("there is no such thing as a release failure ordering"),
1451 _
=> panic
!("a failure ordering can't be stronger than a success ordering"),
1461 unsafe fn atomic_and
<T
>(dst
: *mut T
, val
: T
, order
: Ordering
) -> T
{
1463 Acquire
=> intrinsics
::atomic_and_acq(dst
, val
),
1464 Release
=> intrinsics
::atomic_and_rel(dst
, val
),
1465 AcqRel
=> intrinsics
::atomic_and_acqrel(dst
, val
),
1466 Relaxed
=> intrinsics
::atomic_and_relaxed(dst
, val
),
1467 SeqCst
=> intrinsics
::atomic_and(dst
, val
)
1472 unsafe fn atomic_nand
<T
>(dst
: *mut T
, val
: T
, order
: Ordering
) -> T
{
1474 Acquire
=> intrinsics
::atomic_nand_acq(dst
, val
),
1475 Release
=> intrinsics
::atomic_nand_rel(dst
, val
),
1476 AcqRel
=> intrinsics
::atomic_nand_acqrel(dst
, val
),
1477 Relaxed
=> intrinsics
::atomic_nand_relaxed(dst
, val
),
1478 SeqCst
=> intrinsics
::atomic_nand(dst
, val
)
1484 unsafe fn atomic_or
<T
>(dst
: *mut T
, val
: T
, order
: Ordering
) -> T
{
1486 Acquire
=> intrinsics
::atomic_or_acq(dst
, val
),
1487 Release
=> intrinsics
::atomic_or_rel(dst
, val
),
1488 AcqRel
=> intrinsics
::atomic_or_acqrel(dst
, val
),
1489 Relaxed
=> intrinsics
::atomic_or_relaxed(dst
, val
),
1490 SeqCst
=> intrinsics
::atomic_or(dst
, val
)
1496 unsafe fn atomic_xor
<T
>(dst
: *mut T
, val
: T
, order
: Ordering
) -> T
{
1498 Acquire
=> intrinsics
::atomic_xor_acq(dst
, val
),
1499 Release
=> intrinsics
::atomic_xor_rel(dst
, val
),
1500 AcqRel
=> intrinsics
::atomic_xor_acqrel(dst
, val
),
1501 Relaxed
=> intrinsics
::atomic_xor_relaxed(dst
, val
),
1502 SeqCst
=> intrinsics
::atomic_xor(dst
, val
)
1507 /// An atomic fence.
1509 /// A fence 'A' which has `Release` ordering semantics, synchronizes with a
1510 /// fence 'B' with (at least) `Acquire` semantics, if and only if there exists
1511 /// atomic operations X and Y, both operating on some atomic object 'M' such
1512 /// that A is sequenced before X, Y is synchronized before B and Y observes
1513 /// the change to M. This provides a happens-before dependence between A and B.
1515 /// Atomic operations with `Release` or `Acquire` semantics can also synchronize
1518 /// A fence which has `SeqCst` ordering, in addition to having both `Acquire`
1519 /// and `Release` semantics, participates in the global program order of the
1520 /// other `SeqCst` operations and/or fences.
1522 /// Accepts `Acquire`, `Release`, `AcqRel` and `SeqCst` orderings.
1526 /// Panics if `order` is `Relaxed`.
1528 #[stable(feature = "rust1", since = "1.0.0")]
1529 pub fn fence(order
: Ordering
) {
1532 Acquire
=> intrinsics
::atomic_fence_acq(),
1533 Release
=> intrinsics
::atomic_fence_rel(),
1534 AcqRel
=> intrinsics
::atomic_fence_acqrel(),
1535 SeqCst
=> intrinsics
::atomic_fence(),
1536 Relaxed
=> panic
!("there is no such thing as a relaxed fence")
1541 macro_rules
! impl_Debug
{
1542 ($
($t
:ident
)*) => ($
(
1543 #[stable(feature = "atomic_debug", since = "1.3.0")]
1544 impl fmt
::Debug
for $t
{
1545 fn fmt(&self, f
: &mut fmt
::Formatter
) -> fmt
::Result
{
1546 f
.debug_tuple(stringify
!($t
)).field(&self.load(Ordering
::SeqCst
)).finish()
1552 impl_Debug
!{ AtomicUsize AtomicIsize AtomicBool }
1554 #[stable(feature = "atomic_debug", since = "1.3.0")]
1555 impl<T
> fmt
::Debug
for AtomicPtr
<T
> {
1556 fn fmt(&self, f
: &mut fmt
::Formatter
) -> fmt
::Result
{
1557 f
.debug_tuple("AtomicPtr").field(&self.load(Ordering
::SeqCst
)).finish()