1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
13 //! Atomic types provide primitive shared-memory communication between
14 //! threads, and are the building blocks of other concurrent
17 //! This module defines atomic versions of a select number of primitive
18 //! types, including `AtomicBool`, `AtomicIsize`, and `AtomicUsize`.
19 //! Atomic types present operations that, when used correctly, synchronize
20 //! updates between threads.
22 //! Each method takes an `Ordering` which represents the strength of
23 //! the memory barrier for that operation. These orderings are the
24 //! same as [LLVM atomic orderings][1].
26 //! [1]: http://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations
28 //! Atomic variables are safe to share between threads (they implement `Sync`)
29 //! but they do not themselves provide the mechanism for sharing. The most
30 //! common way to share an atomic variable is to put it into an `Arc` (an
31 //! atomically-reference-counted shared pointer).
33 //! Most atomic types may be stored in static variables, initialized using
34 //! the provided static initializers like `INIT_ATOMIC_BOOL`. Atomic statics
35 //! are often used for lazy global initialization.
40 //! A simple spinlock:
43 //! use std::sync::Arc;
44 //! use std::sync::atomic::{AtomicUsize, Ordering};
48 //! let spinlock = Arc::new(AtomicUsize::new(1));
50 //! let spinlock_clone = spinlock.clone();
51 //! thread::spawn(move|| {
52 //! spinlock_clone.store(0, Ordering::SeqCst);
55 //! // Wait for the other thread to release the lock
56 //! while spinlock.load(Ordering::SeqCst) != 0 {}
60 //! Keep a global count of live threads:
63 //! use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
65 //! static GLOBAL_THREAD_COUNT: AtomicUsize = ATOMIC_USIZE_INIT;
67 //! let old_thread_count = GLOBAL_THREAD_COUNT.fetch_add(1, Ordering::SeqCst);
68 //! println!("live threads: {}", old_thread_count + 1);
71 #![stable(feature = "rust1", since = "1.0.0")]
73 use self::Ordering
::*;
75 use marker
::{Send, Sync}
;
83 /// A boolean type which can be safely shared between threads.
84 #[stable(feature = "rust1", since = "1.0.0")]
85 pub struct AtomicBool
{
89 impl Default
for AtomicBool
{
90 fn default() -> Self {
91 Self::new(Default
::default())
95 unsafe impl Sync
for AtomicBool {}
97 /// A signed integer type which can be safely shared between threads.
98 #[stable(feature = "rust1", since = "1.0.0")]
99 pub struct AtomicIsize
{
100 v
: UnsafeCell
<isize>,
103 impl Default
for AtomicIsize
{
104 fn default() -> Self {
105 Self::new(Default
::default())
109 unsafe impl Sync
for AtomicIsize {}
111 /// An unsigned integer type which can be safely shared between threads.
112 #[stable(feature = "rust1", since = "1.0.0")]
113 pub struct AtomicUsize
{
114 v
: UnsafeCell
<usize>,
117 impl Default
for AtomicUsize
{
118 fn default() -> Self {
119 Self::new(Default
::default())
123 unsafe impl Sync
for AtomicUsize {}
125 /// A raw pointer type which can be safely shared between threads.
126 #[stable(feature = "rust1", since = "1.0.0")]
127 pub struct AtomicPtr
<T
> {
128 p
: UnsafeCell
<*mut T
>,
131 impl<T
> Default
for AtomicPtr
<T
> {
132 fn default() -> AtomicPtr
<T
> {
133 AtomicPtr
::new(::ptr
::null_mut())
137 unsafe impl<T
> Send
for AtomicPtr
<T
> {}
138 unsafe impl<T
> Sync
for AtomicPtr
<T
> {}
140 /// Atomic memory orderings
142 /// Memory orderings limit the ways that both the compiler and CPU may reorder
143 /// instructions around atomic operations. At its most restrictive,
144 /// "sequentially consistent" atomics allow neither reads nor writes
145 /// to be moved either before or after the atomic operation; on the other end
146 /// "relaxed" atomics allow all reorderings.
148 /// Rust's memory orderings are [the same as
149 /// LLVM's](http://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations).
150 #[stable(feature = "rust1", since = "1.0.0")]
151 #[derive(Copy, Clone)]
153 /// No ordering constraints, only atomic operations.
154 #[stable(feature = "rust1", since = "1.0.0")]
156 /// When coupled with a store, all previous writes become visible
157 /// to another thread that performs a load with `Acquire` ordering
158 /// on the same value.
159 #[stable(feature = "rust1", since = "1.0.0")]
161 /// When coupled with a load, all subsequent loads will see data
162 /// written before a store with `Release` ordering on the same value
163 /// in another thread.
164 #[stable(feature = "rust1", since = "1.0.0")]
166 /// When coupled with a load, uses `Acquire` ordering, and with a store
167 /// `Release` ordering.
168 #[stable(feature = "rust1", since = "1.0.0")]
170 /// Like `AcqRel` with the additional guarantee that all threads see all
171 /// sequentially consistent operations in the same order.
172 #[stable(feature = "rust1", since = "1.0.0")]
176 /// An `AtomicBool` initialized to `false`.
177 #[stable(feature = "rust1", since = "1.0.0")]
178 pub const ATOMIC_BOOL_INIT
: AtomicBool
= AtomicBool
::new(false);
179 /// An `AtomicIsize` initialized to `0`.
180 #[stable(feature = "rust1", since = "1.0.0")]
181 pub const ATOMIC_ISIZE_INIT
: AtomicIsize
= AtomicIsize
::new(0);
182 /// An `AtomicUsize` initialized to `0`.
183 #[stable(feature = "rust1", since = "1.0.0")]
184 pub const ATOMIC_USIZE_INIT
: AtomicUsize
= AtomicUsize
::new(0);
186 // NB: Needs to be -1 (0b11111111...) to make fetch_nand work correctly
187 const UINT_TRUE
: usize = !0;
190 /// Creates a new `AtomicBool`.
195 /// use std::sync::atomic::AtomicBool;
197 /// let atomic_true = AtomicBool::new(true);
198 /// let atomic_false = AtomicBool::new(false);
201 #[stable(feature = "rust1", since = "1.0.0")]
202 pub const fn new(v
: bool
) -> AtomicBool
{
203 AtomicBool { v: UnsafeCell::new(-(v as isize) as usize) }
206 /// Loads a value from the bool.
208 /// `load` takes an `Ordering` argument which describes the memory ordering of this operation.
212 /// Panics if `order` is `Release` or `AcqRel`.
217 /// use std::sync::atomic::{AtomicBool, Ordering};
219 /// let some_bool = AtomicBool::new(true);
221 /// assert_eq!(some_bool.load(Ordering::Relaxed), true);
224 #[stable(feature = "rust1", since = "1.0.0")]
225 pub fn load(&self, order
: Ordering
) -> bool
{
226 unsafe { atomic_load(self.v.get(), order) > 0 }
229 /// Stores a value into the bool.
231 /// `store` takes an `Ordering` argument which describes the memory ordering of this operation.
236 /// use std::sync::atomic::{AtomicBool, Ordering};
238 /// let some_bool = AtomicBool::new(true);
240 /// some_bool.store(false, Ordering::Relaxed);
241 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
246 /// Panics if `order` is `Acquire` or `AcqRel`.
248 #[stable(feature = "rust1", since = "1.0.0")]
249 pub fn store(&self, val
: bool
, order
: Ordering
) {
250 let val
= if val { UINT_TRUE }
else { 0 }
;
252 unsafe { atomic_store(self.v.get(), val, order); }
255 /// Stores a value into the bool, returning the old value.
257 /// `swap` takes an `Ordering` argument which describes the memory ordering of this operation.
262 /// use std::sync::atomic::{AtomicBool, Ordering};
264 /// let some_bool = AtomicBool::new(true);
266 /// assert_eq!(some_bool.swap(false, Ordering::Relaxed), true);
267 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
270 #[stable(feature = "rust1", since = "1.0.0")]
271 pub fn swap(&self, val
: bool
, order
: Ordering
) -> bool
{
272 let val
= if val { UINT_TRUE }
else { 0 }
;
274 unsafe { atomic_swap(self.v.get(), val, order) > 0 }
277 /// Stores a value into the `bool` if the current value is the same as the `current` value.
279 /// The return value is always the previous value. If it is equal to `current`, then the value
282 /// `compare_and_swap` also takes an `Ordering` argument which describes the memory ordering of
288 /// use std::sync::atomic::{AtomicBool, Ordering};
290 /// let some_bool = AtomicBool::new(true);
292 /// assert_eq!(some_bool.compare_and_swap(true, false, Ordering::Relaxed), true);
293 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
295 /// assert_eq!(some_bool.compare_and_swap(true, true, Ordering::Relaxed), false);
296 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
299 #[stable(feature = "rust1", since = "1.0.0")]
300 pub fn compare_and_swap(&self, current
: bool
, new
: bool
, order
: Ordering
) -> bool
{
301 let current
= if current { UINT_TRUE }
else { 0 }
;
302 let new
= if new { UINT_TRUE }
else { 0 }
;
304 unsafe { atomic_compare_and_swap(self.v.get(), current, new, order) > 0 }
307 /// Logical "and" with a boolean value.
309 /// Performs a logical "and" operation on the current value and the argument `val`, and sets
310 /// the new value to the result.
312 /// Returns the previous value.
317 /// use std::sync::atomic::{AtomicBool, Ordering};
319 /// let foo = AtomicBool::new(true);
320 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), true);
321 /// assert_eq!(foo.load(Ordering::SeqCst), false);
323 /// let foo = AtomicBool::new(true);
324 /// assert_eq!(foo.fetch_and(true, Ordering::SeqCst), true);
325 /// assert_eq!(foo.load(Ordering::SeqCst), true);
327 /// let foo = AtomicBool::new(false);
328 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), false);
329 /// assert_eq!(foo.load(Ordering::SeqCst), false);
332 #[stable(feature = "rust1", since = "1.0.0")]
333 pub fn fetch_and(&self, val
: bool
, order
: Ordering
) -> bool
{
334 let val
= if val { UINT_TRUE }
else { 0 }
;
336 unsafe { atomic_and(self.v.get(), val, order) > 0 }
339 /// Logical "nand" with a boolean value.
341 /// Performs a logical "nand" operation on the current value and the argument `val`, and sets
342 /// the new value to the result.
344 /// Returns the previous value.
349 /// use std::sync::atomic::{AtomicBool, Ordering};
351 /// let foo = AtomicBool::new(true);
352 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), true);
353 /// assert_eq!(foo.load(Ordering::SeqCst), true);
355 /// let foo = AtomicBool::new(true);
356 /// assert_eq!(foo.fetch_nand(true, Ordering::SeqCst), true);
357 /// assert_eq!(foo.load(Ordering::SeqCst) as usize, 0);
358 /// assert_eq!(foo.load(Ordering::SeqCst), false);
360 /// let foo = AtomicBool::new(false);
361 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), false);
362 /// assert_eq!(foo.load(Ordering::SeqCst), true);
365 #[stable(feature = "rust1", since = "1.0.0")]
366 pub fn fetch_nand(&self, val
: bool
, order
: Ordering
) -> bool
{
367 let val
= if val { UINT_TRUE }
else { 0 }
;
369 unsafe { atomic_nand(self.v.get(), val, order) > 0 }
372 /// Logical "or" with a boolean value.
374 /// Performs a logical "or" operation on the current value and the argument `val`, and sets the
375 /// new value to the result.
377 /// Returns the previous value.
382 /// use std::sync::atomic::{AtomicBool, Ordering};
384 /// let foo = AtomicBool::new(true);
385 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), true);
386 /// assert_eq!(foo.load(Ordering::SeqCst), true);
388 /// let foo = AtomicBool::new(true);
389 /// assert_eq!(foo.fetch_or(true, Ordering::SeqCst), true);
390 /// assert_eq!(foo.load(Ordering::SeqCst), true);
392 /// let foo = AtomicBool::new(false);
393 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), false);
394 /// assert_eq!(foo.load(Ordering::SeqCst), false);
397 #[stable(feature = "rust1", since = "1.0.0")]
398 pub fn fetch_or(&self, val
: bool
, order
: Ordering
) -> bool
{
399 let val
= if val { UINT_TRUE }
else { 0 }
;
401 unsafe { atomic_or(self.v.get(), val, order) > 0 }
404 /// Logical "xor" with a boolean value.
406 /// Performs a logical "xor" operation on the current value and the argument `val`, and sets
407 /// the new value to the result.
409 /// Returns the previous value.
414 /// use std::sync::atomic::{AtomicBool, Ordering};
416 /// let foo = AtomicBool::new(true);
417 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), true);
418 /// assert_eq!(foo.load(Ordering::SeqCst), true);
420 /// let foo = AtomicBool::new(true);
421 /// assert_eq!(foo.fetch_xor(true, Ordering::SeqCst), true);
422 /// assert_eq!(foo.load(Ordering::SeqCst), false);
424 /// let foo = AtomicBool::new(false);
425 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), false);
426 /// assert_eq!(foo.load(Ordering::SeqCst), false);
429 #[stable(feature = "rust1", since = "1.0.0")]
430 pub fn fetch_xor(&self, val
: bool
, order
: Ordering
) -> bool
{
431 let val
= if val { UINT_TRUE }
else { 0 }
;
433 unsafe { atomic_xor(self.v.get(), val, order) > 0 }
437 #[stable(feature = "rust1", since = "1.0.0")]
439 /// Creates a new `AtomicIsize`.
444 /// use std::sync::atomic::AtomicIsize;
446 /// let atomic_forty_two = AtomicIsize::new(42);
449 #[stable(feature = "rust1", since = "1.0.0")]
450 pub const fn new(v
: isize) -> AtomicIsize
{
451 AtomicIsize {v: UnsafeCell::new(v)}
454 /// Loads a value from the isize.
456 /// `load` takes an `Ordering` argument which describes the memory ordering of this operation.
460 /// Panics if `order` is `Release` or `AcqRel`.
465 /// use std::sync::atomic::{AtomicIsize, Ordering};
467 /// let some_isize = AtomicIsize::new(5);
469 /// assert_eq!(some_isize.load(Ordering::Relaxed), 5);
472 #[stable(feature = "rust1", since = "1.0.0")]
473 pub fn load(&self, order
: Ordering
) -> isize {
474 unsafe { atomic_load(self.v.get(), order) }
477 /// Stores a value into the isize.
479 /// `store` takes an `Ordering` argument which describes the memory ordering of this operation.
484 /// use std::sync::atomic::{AtomicIsize, Ordering};
486 /// let some_isize = AtomicIsize::new(5);
488 /// some_isize.store(10, Ordering::Relaxed);
489 /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
494 /// Panics if `order` is `Acquire` or `AcqRel`.
496 #[stable(feature = "rust1", since = "1.0.0")]
497 pub fn store(&self, val
: isize, order
: Ordering
) {
498 unsafe { atomic_store(self.v.get(), val, order); }
501 /// Stores a value into the isize, returning the old value.
503 /// `swap` takes an `Ordering` argument which describes the memory ordering of this operation.
508 /// use std::sync::atomic::{AtomicIsize, Ordering};
510 /// let some_isize = AtomicIsize::new(5);
512 /// assert_eq!(some_isize.swap(10, Ordering::Relaxed), 5);
515 #[stable(feature = "rust1", since = "1.0.0")]
516 pub fn swap(&self, val
: isize, order
: Ordering
) -> isize {
517 unsafe { atomic_swap(self.v.get(), val, order) }
520 /// Stores a value into the `isize` if the current value is the same as the `current` value.
522 /// The return value is always the previous value. If it is equal to `current`, then the value
525 /// `compare_and_swap` also takes an `Ordering` argument which describes the memory ordering of
531 /// use std::sync::atomic::{AtomicIsize, Ordering};
533 /// let some_isize = AtomicIsize::new(5);
535 /// assert_eq!(some_isize.compare_and_swap(5, 10, Ordering::Relaxed), 5);
536 /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
538 /// assert_eq!(some_isize.compare_and_swap(6, 12, Ordering::Relaxed), 10);
539 /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
542 #[stable(feature = "rust1", since = "1.0.0")]
543 pub fn compare_and_swap(&self, current
: isize, new
: isize, order
: Ordering
) -> isize {
544 unsafe { atomic_compare_and_swap(self.v.get(), current, new, order) }
547 /// Add an isize to the current value, returning the previous value.
552 /// use std::sync::atomic::{AtomicIsize, Ordering};
554 /// let foo = AtomicIsize::new(0);
555 /// assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0);
556 /// assert_eq!(foo.load(Ordering::SeqCst), 10);
559 #[stable(feature = "rust1", since = "1.0.0")]
560 pub fn fetch_add(&self, val
: isize, order
: Ordering
) -> isize {
561 unsafe { atomic_add(self.v.get(), val, order) }
564 /// Subtract an isize from the current value, returning the previous value.
569 /// use std::sync::atomic::{AtomicIsize, Ordering};
571 /// let foo = AtomicIsize::new(0);
572 /// assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 0);
573 /// assert_eq!(foo.load(Ordering::SeqCst), -10);
576 #[stable(feature = "rust1", since = "1.0.0")]
577 pub fn fetch_sub(&self, val
: isize, order
: Ordering
) -> isize {
578 unsafe { atomic_sub(self.v.get(), val, order) }
581 /// Bitwise and with the current isize, returning the previous value.
586 /// use std::sync::atomic::{AtomicIsize, Ordering};
588 /// let foo = AtomicIsize::new(0b101101);
589 /// assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101);
590 /// assert_eq!(foo.load(Ordering::SeqCst), 0b100001);
592 #[stable(feature = "rust1", since = "1.0.0")]
593 pub fn fetch_and(&self, val
: isize, order
: Ordering
) -> isize {
594 unsafe { atomic_and(self.v.get(), val, order) }
597 /// Bitwise or with the current isize, returning the previous value.
602 /// use std::sync::atomic::{AtomicIsize, Ordering};
604 /// let foo = AtomicIsize::new(0b101101);
605 /// assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101);
606 /// assert_eq!(foo.load(Ordering::SeqCst), 0b111111);
608 #[stable(feature = "rust1", since = "1.0.0")]
609 pub fn fetch_or(&self, val
: isize, order
: Ordering
) -> isize {
610 unsafe { atomic_or(self.v.get(), val, order) }
613 /// Bitwise xor with the current isize, returning the previous value.
618 /// use std::sync::atomic::{AtomicIsize, Ordering};
620 /// let foo = AtomicIsize::new(0b101101);
621 /// assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101);
622 /// assert_eq!(foo.load(Ordering::SeqCst), 0b011110);
624 #[stable(feature = "rust1", since = "1.0.0")]
625 pub fn fetch_xor(&self, val
: isize, order
: Ordering
) -> isize {
626 unsafe { atomic_xor(self.v.get(), val, order) }
630 #[stable(feature = "rust1", since = "1.0.0")]
632 /// Creates a new `AtomicUsize`.
637 /// use std::sync::atomic::AtomicUsize;
639 /// let atomic_forty_two = AtomicUsize::new(42);
642 #[stable(feature = "rust1", since = "1.0.0")]
643 pub const fn new(v
: usize) -> AtomicUsize
{
644 AtomicUsize { v: UnsafeCell::new(v) }
647 /// Loads a value from the usize.
649 /// `load` takes an `Ordering` argument which describes the memory ordering of this operation.
653 /// Panics if `order` is `Release` or `AcqRel`.
658 /// use std::sync::atomic::{AtomicUsize, Ordering};
660 /// let some_usize = AtomicUsize::new(5);
662 /// assert_eq!(some_usize.load(Ordering::Relaxed), 5);
665 #[stable(feature = "rust1", since = "1.0.0")]
666 pub fn load(&self, order
: Ordering
) -> usize {
667 unsafe { atomic_load(self.v.get(), order) }
670 /// Stores a value into the usize.
672 /// `store` takes an `Ordering` argument which describes the memory ordering of this operation.
677 /// use std::sync::atomic::{AtomicUsize, Ordering};
679 /// let some_usize = AtomicUsize::new(5);
681 /// some_usize.store(10, Ordering::Relaxed);
682 /// assert_eq!(some_usize.load(Ordering::Relaxed), 10);
687 /// Panics if `order` is `Acquire` or `AcqRel`.
689 #[stable(feature = "rust1", since = "1.0.0")]
690 pub fn store(&self, val
: usize, order
: Ordering
) {
691 unsafe { atomic_store(self.v.get(), val, order); }
694 /// Stores a value into the usize, returning the old value.
696 /// `swap` takes an `Ordering` argument which describes the memory ordering of this operation.
701 /// use std::sync::atomic::{AtomicUsize, Ordering};
703 /// let some_usize= AtomicUsize::new(5);
705 /// assert_eq!(some_usize.swap(10, Ordering::Relaxed), 5);
706 /// assert_eq!(some_usize.load(Ordering::Relaxed), 10);
709 #[stable(feature = "rust1", since = "1.0.0")]
710 pub fn swap(&self, val
: usize, order
: Ordering
) -> usize {
711 unsafe { atomic_swap(self.v.get(), val, order) }
714 /// Stores a value into the `usize` if the current value is the same as the `current` value.
716 /// The return value is always the previous value. If it is equal to `current`, then the value
719 /// `compare_and_swap` also takes an `Ordering` argument which describes the memory ordering of
725 /// use std::sync::atomic::{AtomicUsize, Ordering};
727 /// let some_usize = AtomicUsize::new(5);
729 /// assert_eq!(some_usize.compare_and_swap(5, 10, Ordering::Relaxed), 5);
730 /// assert_eq!(some_usize.load(Ordering::Relaxed), 10);
732 /// assert_eq!(some_usize.compare_and_swap(6, 12, Ordering::Relaxed), 10);
733 /// assert_eq!(some_usize.load(Ordering::Relaxed), 10);
736 #[stable(feature = "rust1", since = "1.0.0")]
737 pub fn compare_and_swap(&self, current
: usize, new
: usize, order
: Ordering
) -> usize {
738 unsafe { atomic_compare_and_swap(self.v.get(), current, new, order) }
741 /// Add to the current usize, returning the previous value.
746 /// use std::sync::atomic::{AtomicUsize, Ordering};
748 /// let foo = AtomicUsize::new(0);
749 /// assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0);
750 /// assert_eq!(foo.load(Ordering::SeqCst), 10);
753 #[stable(feature = "rust1", since = "1.0.0")]
754 pub fn fetch_add(&self, val
: usize, order
: Ordering
) -> usize {
755 unsafe { atomic_add(self.v.get(), val, order) }
758 /// Subtract from the current usize, returning the previous value.
763 /// use std::sync::atomic::{AtomicUsize, Ordering};
765 /// let foo = AtomicUsize::new(10);
766 /// assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 10);
767 /// assert_eq!(foo.load(Ordering::SeqCst), 0);
770 #[stable(feature = "rust1", since = "1.0.0")]
771 pub fn fetch_sub(&self, val
: usize, order
: Ordering
) -> usize {
772 unsafe { atomic_sub(self.v.get(), val, order) }
775 /// Bitwise and with the current usize, returning the previous value.
780 /// use std::sync::atomic::{AtomicUsize, Ordering};
782 /// let foo = AtomicUsize::new(0b101101);
783 /// assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101);
784 /// assert_eq!(foo.load(Ordering::SeqCst), 0b100001);
786 #[stable(feature = "rust1", since = "1.0.0")]
787 pub fn fetch_and(&self, val
: usize, order
: Ordering
) -> usize {
788 unsafe { atomic_and(self.v.get(), val, order) }
791 /// Bitwise or with the current usize, returning the previous value.
796 /// use std::sync::atomic::{AtomicUsize, Ordering};
798 /// let foo = AtomicUsize::new(0b101101);
799 /// assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101);
800 /// assert_eq!(foo.load(Ordering::SeqCst), 0b111111);
802 #[stable(feature = "rust1", since = "1.0.0")]
803 pub fn fetch_or(&self, val
: usize, order
: Ordering
) -> usize {
804 unsafe { atomic_or(self.v.get(), val, order) }
807 /// Bitwise xor with the current usize, returning the previous value.
812 /// use std::sync::atomic::{AtomicUsize, Ordering};
814 /// let foo = AtomicUsize::new(0b101101);
815 /// assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101);
816 /// assert_eq!(foo.load(Ordering::SeqCst), 0b011110);
818 #[stable(feature = "rust1", since = "1.0.0")]
819 pub fn fetch_xor(&self, val
: usize, order
: Ordering
) -> usize {
820 unsafe { atomic_xor(self.v.get(), val, order) }
824 impl<T
> AtomicPtr
<T
> {
825 /// Creates a new `AtomicPtr`.
830 /// use std::sync::atomic::AtomicPtr;
832 /// let ptr = &mut 5;
833 /// let atomic_ptr = AtomicPtr::new(ptr);
836 #[stable(feature = "rust1", since = "1.0.0")]
837 pub const fn new(p
: *mut T
) -> AtomicPtr
<T
> {
838 AtomicPtr { p: UnsafeCell::new(p) }
841 /// Loads a value from the pointer.
843 /// `load` takes an `Ordering` argument which describes the memory ordering of this operation.
847 /// Panics if `order` is `Release` or `AcqRel`.
852 /// use std::sync::atomic::{AtomicPtr, Ordering};
854 /// let ptr = &mut 5;
855 /// let some_ptr = AtomicPtr::new(ptr);
857 /// let value = some_ptr.load(Ordering::Relaxed);
860 #[stable(feature = "rust1", since = "1.0.0")]
861 pub fn load(&self, order
: Ordering
) -> *mut T
{
863 atomic_load(self.p
.get() as *mut usize, order
) as *mut T
867 /// Stores a value into the pointer.
869 /// `store` takes an `Ordering` argument which describes the memory ordering of this operation.
874 /// use std::sync::atomic::{AtomicPtr, Ordering};
876 /// let ptr = &mut 5;
877 /// let some_ptr = AtomicPtr::new(ptr);
879 /// let other_ptr = &mut 10;
881 /// some_ptr.store(other_ptr, Ordering::Relaxed);
886 /// Panics if `order` is `Acquire` or `AcqRel`.
888 #[stable(feature = "rust1", since = "1.0.0")]
889 pub fn store(&self, ptr
: *mut T
, order
: Ordering
) {
890 unsafe { atomic_store(self.p.get() as *mut usize, ptr as usize, order); }
893 /// Stores a value into the pointer, returning the old value.
895 /// `swap` takes an `Ordering` argument which describes the memory ordering of this operation.
900 /// use std::sync::atomic::{AtomicPtr, Ordering};
902 /// let ptr = &mut 5;
903 /// let some_ptr = AtomicPtr::new(ptr);
905 /// let other_ptr = &mut 10;
907 /// let value = some_ptr.swap(other_ptr, Ordering::Relaxed);
910 #[stable(feature = "rust1", since = "1.0.0")]
911 pub fn swap(&self, ptr
: *mut T
, order
: Ordering
) -> *mut T
{
912 unsafe { atomic_swap(self.p.get() as *mut usize, ptr as usize, order) as *mut T }
915 /// Stores a value into the pointer if the current value is the same as the `current` value.
917 /// The return value is always the previous value. If it is equal to `current`, then the value
920 /// `compare_and_swap` also takes an `Ordering` argument which describes the memory ordering of
926 /// use std::sync::atomic::{AtomicPtr, Ordering};
928 /// let ptr = &mut 5;
929 /// let some_ptr = AtomicPtr::new(ptr);
931 /// let other_ptr = &mut 10;
932 /// let another_ptr = &mut 10;
934 /// let value = some_ptr.compare_and_swap(other_ptr, another_ptr, Ordering::Relaxed);
937 #[stable(feature = "rust1", since = "1.0.0")]
938 pub fn compare_and_swap(&self, current
: *mut T
, new
: *mut T
, order
: Ordering
) -> *mut T
{
940 atomic_compare_and_swap(self.p
.get() as *mut usize, current
as usize,
941 new
as usize, order
) as *mut T
947 unsafe fn atomic_store
<T
>(dst
: *mut T
, val
: T
, order
:Ordering
) {
949 Release
=> intrinsics
::atomic_store_rel(dst
, val
),
950 Relaxed
=> intrinsics
::atomic_store_relaxed(dst
, val
),
951 SeqCst
=> intrinsics
::atomic_store(dst
, val
),
952 Acquire
=> panic
!("there is no such thing as an acquire store"),
953 AcqRel
=> panic
!("there is no such thing as an acquire/release store"),
958 unsafe fn atomic_load
<T
>(dst
: *const T
, order
:Ordering
) -> T
{
960 Acquire
=> intrinsics
::atomic_load_acq(dst
),
961 Relaxed
=> intrinsics
::atomic_load_relaxed(dst
),
962 SeqCst
=> intrinsics
::atomic_load(dst
),
963 Release
=> panic
!("there is no such thing as a release load"),
964 AcqRel
=> panic
!("there is no such thing as an acquire/release load"),
969 unsafe fn atomic_swap
<T
>(dst
: *mut T
, val
: T
, order
: Ordering
) -> T
{
971 Acquire
=> intrinsics
::atomic_xchg_acq(dst
, val
),
972 Release
=> intrinsics
::atomic_xchg_rel(dst
, val
),
973 AcqRel
=> intrinsics
::atomic_xchg_acqrel(dst
, val
),
974 Relaxed
=> intrinsics
::atomic_xchg_relaxed(dst
, val
),
975 SeqCst
=> intrinsics
::atomic_xchg(dst
, val
)
979 /// Returns the old value (like __sync_fetch_and_add).
981 unsafe fn atomic_add
<T
>(dst
: *mut T
, val
: T
, order
: Ordering
) -> T
{
983 Acquire
=> intrinsics
::atomic_xadd_acq(dst
, val
),
984 Release
=> intrinsics
::atomic_xadd_rel(dst
, val
),
985 AcqRel
=> intrinsics
::atomic_xadd_acqrel(dst
, val
),
986 Relaxed
=> intrinsics
::atomic_xadd_relaxed(dst
, val
),
987 SeqCst
=> intrinsics
::atomic_xadd(dst
, val
)
991 /// Returns the old value (like __sync_fetch_and_sub).
993 unsafe fn atomic_sub
<T
>(dst
: *mut T
, val
: T
, order
: Ordering
) -> T
{
995 Acquire
=> intrinsics
::atomic_xsub_acq(dst
, val
),
996 Release
=> intrinsics
::atomic_xsub_rel(dst
, val
),
997 AcqRel
=> intrinsics
::atomic_xsub_acqrel(dst
, val
),
998 Relaxed
=> intrinsics
::atomic_xsub_relaxed(dst
, val
),
999 SeqCst
=> intrinsics
::atomic_xsub(dst
, val
)
1004 unsafe fn atomic_compare_and_swap
<T
>(dst
: *mut T
, old
:T
, new
:T
, order
: Ordering
) -> T
{
1006 Acquire
=> intrinsics
::atomic_cxchg_acq(dst
, old
, new
),
1007 Release
=> intrinsics
::atomic_cxchg_rel(dst
, old
, new
),
1008 AcqRel
=> intrinsics
::atomic_cxchg_acqrel(dst
, old
, new
),
1009 Relaxed
=> intrinsics
::atomic_cxchg_relaxed(dst
, old
, new
),
1010 SeqCst
=> intrinsics
::atomic_cxchg(dst
, old
, new
),
1015 unsafe fn atomic_and
<T
>(dst
: *mut T
, val
: T
, order
: Ordering
) -> T
{
1017 Acquire
=> intrinsics
::atomic_and_acq(dst
, val
),
1018 Release
=> intrinsics
::atomic_and_rel(dst
, val
),
1019 AcqRel
=> intrinsics
::atomic_and_acqrel(dst
, val
),
1020 Relaxed
=> intrinsics
::atomic_and_relaxed(dst
, val
),
1021 SeqCst
=> intrinsics
::atomic_and(dst
, val
)
1026 unsafe fn atomic_nand
<T
>(dst
: *mut T
, val
: T
, order
: Ordering
) -> T
{
1028 Acquire
=> intrinsics
::atomic_nand_acq(dst
, val
),
1029 Release
=> intrinsics
::atomic_nand_rel(dst
, val
),
1030 AcqRel
=> intrinsics
::atomic_nand_acqrel(dst
, val
),
1031 Relaxed
=> intrinsics
::atomic_nand_relaxed(dst
, val
),
1032 SeqCst
=> intrinsics
::atomic_nand(dst
, val
)
1038 unsafe fn atomic_or
<T
>(dst
: *mut T
, val
: T
, order
: Ordering
) -> T
{
1040 Acquire
=> intrinsics
::atomic_or_acq(dst
, val
),
1041 Release
=> intrinsics
::atomic_or_rel(dst
, val
),
1042 AcqRel
=> intrinsics
::atomic_or_acqrel(dst
, val
),
1043 Relaxed
=> intrinsics
::atomic_or_relaxed(dst
, val
),
1044 SeqCst
=> intrinsics
::atomic_or(dst
, val
)
1050 unsafe fn atomic_xor
<T
>(dst
: *mut T
, val
: T
, order
: Ordering
) -> T
{
1052 Acquire
=> intrinsics
::atomic_xor_acq(dst
, val
),
1053 Release
=> intrinsics
::atomic_xor_rel(dst
, val
),
1054 AcqRel
=> intrinsics
::atomic_xor_acqrel(dst
, val
),
1055 Relaxed
=> intrinsics
::atomic_xor_relaxed(dst
, val
),
1056 SeqCst
=> intrinsics
::atomic_xor(dst
, val
)
1061 /// An atomic fence.
1063 /// A fence 'A' which has `Release` ordering semantics, synchronizes with a
1064 /// fence 'B' with (at least) `Acquire` semantics, if and only if there exists
1065 /// atomic operations X and Y, both operating on some atomic object 'M' such
1066 /// that A is sequenced before X, Y is synchronized before B and Y observes
1067 /// the change to M. This provides a happens-before dependence between A and B.
1069 /// Atomic operations with `Release` or `Acquire` semantics can also synchronize
1072 /// A fence which has `SeqCst` ordering, in addition to having both `Acquire`
1073 /// and `Release` semantics, participates in the global program order of the
1074 /// other `SeqCst` operations and/or fences.
1076 /// Accepts `Acquire`, `Release`, `AcqRel` and `SeqCst` orderings.
1080 /// Panics if `order` is `Relaxed`.
1082 #[stable(feature = "rust1", since = "1.0.0")]
1083 pub fn fence(order
: Ordering
) {
1086 Acquire
=> intrinsics
::atomic_fence_acq(),
1087 Release
=> intrinsics
::atomic_fence_rel(),
1088 AcqRel
=> intrinsics
::atomic_fence_acqrel(),
1089 SeqCst
=> intrinsics
::atomic_fence(),
1090 Relaxed
=> panic
!("there is no such thing as a relaxed fence")
1095 macro_rules
! impl_Debug
{
1096 ($
($t
:ident
)*) => ($
(
1097 #[stable(feature = "atomic_debug", since = "1.3.0")]
1098 impl fmt
::Debug
for $t
{
1099 fn fmt(&self, f
: &mut fmt
::Formatter
) -> fmt
::Result
{
1100 f
.debug_tuple(stringify
!($t
)).field(&self.load(Ordering
::SeqCst
)).finish()
1106 impl_Debug
!{ AtomicUsize AtomicIsize AtomicBool }
1108 #[stable(feature = "atomic_debug", since = "1.3.0")]
1109 impl<T
> fmt
::Debug
for AtomicPtr
<T
> {
1110 fn fmt(&self, f
: &mut fmt
::Formatter
) -> fmt
::Result
{
1111 f
.debug_tuple("AtomicPtr").field(&self.load(Ordering
::SeqCst
)).finish()