]> git.proxmox.com Git - rustc.git/blob - src/libcore/sync/atomic.rs
New upstream version 1.27.1+dfsg1
[rustc.git] / src / libcore / sync / atomic.rs
1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 //! Atomic types
12 //!
13 //! Atomic types provide primitive shared-memory communication between
14 //! threads, and are the building blocks of other concurrent
15 //! types.
16 //!
17 //! This module defines atomic versions of a select number of primitive
18 //! types, including [`AtomicBool`], [`AtomicIsize`], and [`AtomicUsize`].
19 //! Atomic types present operations that, when used correctly, synchronize
20 //! updates between threads.
21 //!
22 //! [`AtomicBool`]: struct.AtomicBool.html
23 //! [`AtomicIsize`]: struct.AtomicIsize.html
24 //! [`AtomicUsize`]: struct.AtomicUsize.html
25 //!
26 //! Each method takes an [`Ordering`] which represents the strength of
27 //! the memory barrier for that operation. These orderings are the
28 //! same as [LLVM atomic orderings][1]. For more information see the [nomicon][2].
29 //!
30 //! [`Ordering`]: enum.Ordering.html
31 //!
32 //! [1]: http://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations
33 //! [2]: ../../../nomicon/atomics.html
34 //!
35 //! Atomic variables are safe to share between threads (they implement [`Sync`])
36 //! but they do not themselves provide the mechanism for sharing and follow the
37 //! [threading model](../../../std/thread/index.html#the-threading-model) of rust.
38 //! The most common way to share an atomic variable is to put it into an [`Arc`][arc] (an
39 //! atomically-reference-counted shared pointer).
40 //!
41 //! [`Sync`]: ../../marker/trait.Sync.html
42 //! [arc]: ../../../std/sync/struct.Arc.html
43 //!
44 //! Most atomic types may be stored in static variables, initialized using
45 //! the provided static initializers like [`ATOMIC_BOOL_INIT`]. Atomic statics
46 //! are often used for lazy global initialization.
47 //!
48 //! [`ATOMIC_BOOL_INIT`]: constant.ATOMIC_BOOL_INIT.html
49 //!
50 //! # Examples
51 //!
52 //! A simple spinlock:
53 //!
54 //! ```
55 //! use std::sync::Arc;
56 //! use std::sync::atomic::{AtomicUsize, Ordering};
57 //! use std::thread;
58 //!
59 //! fn main() {
60 //! let spinlock = Arc::new(AtomicUsize::new(1));
61 //!
62 //! let spinlock_clone = spinlock.clone();
63 //! let thread = thread::spawn(move|| {
64 //! spinlock_clone.store(0, Ordering::SeqCst);
65 //! });
66 //!
67 //! // Wait for the other thread to release the lock
68 //! while spinlock.load(Ordering::SeqCst) != 0 {}
69 //!
70 //! if let Err(panic) = thread.join() {
71 //! println!("Thread had an error: {:?}", panic);
72 //! }
73 //! }
74 //! ```
75 //!
76 //! Keep a global count of live threads:
77 //!
78 //! ```
79 //! use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
80 //!
81 //! static GLOBAL_THREAD_COUNT: AtomicUsize = ATOMIC_USIZE_INIT;
82 //!
83 //! let old_thread_count = GLOBAL_THREAD_COUNT.fetch_add(1, Ordering::SeqCst);
84 //! println!("live threads: {}", old_thread_count + 1);
85 //! ```
86
87 #![stable(feature = "rust1", since = "1.0.0")]
88 #![cfg_attr(not(target_has_atomic = "8"), allow(dead_code))]
89 #![cfg_attr(not(target_has_atomic = "8"), allow(unused_imports))]
90
91 use self::Ordering::*;
92
93 use intrinsics;
94 use cell::UnsafeCell;
95 use fmt;
96
97 /// Save power or switch hyperthreads in a busy-wait spin-loop.
98 ///
99 /// This function is deliberately more primitive than
100 /// [`std::thread::yield_now`](../../../std/thread/fn.yield_now.html) and
101 /// does not directly yield to the system's scheduler.
102 /// In some cases it might be useful to use a combination of both functions.
103 /// Careful benchmarking is advised.
104 ///
105 /// On some platforms this function may not do anything at all.
106 #[inline]
107 #[stable(feature = "spin_loop_hint", since = "1.24.0")]
108 pub fn spin_loop_hint() {
109 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
110 unsafe {
111 asm!("pause" ::: "memory" : "volatile");
112 }
113
114 #[cfg(target_arch = "aarch64")]
115 unsafe {
116 asm!("yield" ::: "memory" : "volatile");
117 }
118 }
119
120 /// A boolean type which can be safely shared between threads.
121 ///
122 /// This type has the same in-memory representation as a [`bool`].
123 ///
124 /// [`bool`]: ../../../std/primitive.bool.html
125 #[cfg(target_has_atomic = "8")]
126 #[stable(feature = "rust1", since = "1.0.0")]
127 pub struct AtomicBool {
128 v: UnsafeCell<u8>,
129 }
130
131 #[cfg(target_has_atomic = "8")]
132 #[stable(feature = "rust1", since = "1.0.0")]
133 impl Default for AtomicBool {
134 /// Creates an `AtomicBool` initialized to `false`.
135 fn default() -> Self {
136 Self::new(false)
137 }
138 }
139
140 // Send is implicitly implemented for AtomicBool.
141 #[cfg(target_has_atomic = "8")]
142 #[stable(feature = "rust1", since = "1.0.0")]
143 unsafe impl Sync for AtomicBool {}
144
145 /// A raw pointer type which can be safely shared between threads.
146 ///
147 /// This type has the same in-memory representation as a `*mut T`.
148 #[cfg(target_has_atomic = "ptr")]
149 #[stable(feature = "rust1", since = "1.0.0")]
150 pub struct AtomicPtr<T> {
151 p: UnsafeCell<*mut T>,
152 }
153
154 #[cfg(target_has_atomic = "ptr")]
155 #[stable(feature = "rust1", since = "1.0.0")]
156 impl<T> Default for AtomicPtr<T> {
157 /// Creates a null `AtomicPtr<T>`.
158 fn default() -> AtomicPtr<T> {
159 AtomicPtr::new(::ptr::null_mut())
160 }
161 }
162
163 #[cfg(target_has_atomic = "ptr")]
164 #[stable(feature = "rust1", since = "1.0.0")]
165 unsafe impl<T> Send for AtomicPtr<T> {}
166 #[cfg(target_has_atomic = "ptr")]
167 #[stable(feature = "rust1", since = "1.0.0")]
168 unsafe impl<T> Sync for AtomicPtr<T> {}
169
170 /// Atomic memory orderings
171 ///
172 /// Memory orderings limit the ways that both the compiler and CPU may reorder
173 /// instructions around atomic operations. At its most restrictive,
174 /// "sequentially consistent" atomics allow neither reads nor writes
175 /// to be moved either before or after the atomic operation; on the other end
176 /// "relaxed" atomics allow all reorderings.
177 ///
178 /// Rust's memory orderings are [the same as
179 /// LLVM's](http://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations).
180 ///
181 /// For more information see the [nomicon].
182 ///
183 /// [nomicon]: ../../../nomicon/atomics.html
184 #[stable(feature = "rust1", since = "1.0.0")]
185 #[derive(Copy, Clone, Debug)]
186 pub enum Ordering {
187 /// No ordering constraints, only atomic operations.
188 ///
189 /// Corresponds to LLVM's [`Monotonic`] ordering.
190 ///
191 /// [`Monotonic`]: http://llvm.org/docs/Atomics.html#monotonic
192 #[stable(feature = "rust1", since = "1.0.0")]
193 Relaxed,
194 /// When coupled with a store, all previous writes become visible
195 /// to the other threads that perform a load with [`Acquire`] ordering
196 /// on the same value.
197 ///
198 /// [`Acquire`]: http://llvm.org/docs/Atomics.html#acquire
199 #[stable(feature = "rust1", since = "1.0.0")]
200 Release,
201 /// When coupled with a load, all subsequent loads will see data
202 /// written before a store with [`Release`] ordering on the same value
203 /// in other threads.
204 ///
205 /// [`Release`]: http://llvm.org/docs/Atomics.html#release
206 #[stable(feature = "rust1", since = "1.0.0")]
207 Acquire,
208 /// Has the effects of both [`Acquire`] and [`Release`] together.
209 ///
210 /// This ordering is only applicable for operations that combine both loads and stores.
211 ///
212 /// For loads it uses [`Acquire`] ordering. For stores it uses the [`Release`] ordering.
213 ///
214 /// [`Acquire`]: http://llvm.org/docs/Atomics.html#acquire
215 /// [`Release`]: http://llvm.org/docs/Atomics.html#release
216 #[stable(feature = "rust1", since = "1.0.0")]
217 AcqRel,
218 /// Like `AcqRel` with the additional guarantee that all threads see all
219 /// sequentially consistent operations in the same order.
220 #[stable(feature = "rust1", since = "1.0.0")]
221 SeqCst,
222 // Prevent exhaustive matching to allow for future extension
223 #[doc(hidden)]
224 #[unstable(feature = "future_atomic_orderings", issue = "0")]
225 __Nonexhaustive,
226 }
227
228 /// An [`AtomicBool`] initialized to `false`.
229 ///
230 /// [`AtomicBool`]: struct.AtomicBool.html
231 #[cfg(target_has_atomic = "8")]
232 #[stable(feature = "rust1", since = "1.0.0")]
233 pub const ATOMIC_BOOL_INIT: AtomicBool = AtomicBool::new(false);
234
235 #[cfg(target_has_atomic = "8")]
236 impl AtomicBool {
237 /// Creates a new `AtomicBool`.
238 ///
239 /// # Examples
240 ///
241 /// ```
242 /// use std::sync::atomic::AtomicBool;
243 ///
244 /// let atomic_true = AtomicBool::new(true);
245 /// let atomic_false = AtomicBool::new(false);
246 /// ```
247 #[inline]
248 #[stable(feature = "rust1", since = "1.0.0")]
249 pub const fn new(v: bool) -> AtomicBool {
250 AtomicBool { v: UnsafeCell::new(v as u8) }
251 }
252
253 /// Returns a mutable reference to the underlying [`bool`].
254 ///
255 /// This is safe because the mutable reference guarantees that no other threads are
256 /// concurrently accessing the atomic data.
257 ///
258 /// [`bool`]: ../../../std/primitive.bool.html
259 ///
260 /// # Examples
261 ///
262 /// ```
263 /// use std::sync::atomic::{AtomicBool, Ordering};
264 ///
265 /// let mut some_bool = AtomicBool::new(true);
266 /// assert_eq!(*some_bool.get_mut(), true);
267 /// *some_bool.get_mut() = false;
268 /// assert_eq!(some_bool.load(Ordering::SeqCst), false);
269 /// ```
270 #[inline]
271 #[stable(feature = "atomic_access", since = "1.15.0")]
272 pub fn get_mut(&mut self) -> &mut bool {
273 unsafe { &mut *(self.v.get() as *mut bool) }
274 }
275
276 /// Consumes the atomic and returns the contained value.
277 ///
278 /// This is safe because passing `self` by value guarantees that no other threads are
279 /// concurrently accessing the atomic data.
280 ///
281 /// # Examples
282 ///
283 /// ```
284 /// use std::sync::atomic::AtomicBool;
285 ///
286 /// let some_bool = AtomicBool::new(true);
287 /// assert_eq!(some_bool.into_inner(), true);
288 /// ```
289 #[inline]
290 #[stable(feature = "atomic_access", since = "1.15.0")]
291 pub fn into_inner(self) -> bool {
292 self.v.into_inner() != 0
293 }
294
295 /// Loads a value from the bool.
296 ///
297 /// `load` takes an [`Ordering`] argument which describes the memory ordering
298 /// of this operation.
299 ///
300 /// # Panics
301 ///
302 /// Panics if `order` is [`Release`] or [`AcqRel`].
303 ///
304 /// [`Ordering`]: enum.Ordering.html
305 /// [`Release`]: enum.Ordering.html#variant.Release
306 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
307 ///
308 /// # Examples
309 ///
310 /// ```
311 /// use std::sync::atomic::{AtomicBool, Ordering};
312 ///
313 /// let some_bool = AtomicBool::new(true);
314 ///
315 /// assert_eq!(some_bool.load(Ordering::Relaxed), true);
316 /// ```
317 #[inline]
318 #[stable(feature = "rust1", since = "1.0.0")]
319 pub fn load(&self, order: Ordering) -> bool {
320 unsafe { atomic_load(self.v.get(), order) != 0 }
321 }
322
323 /// Stores a value into the bool.
324 ///
325 /// `store` takes an [`Ordering`] argument which describes the memory ordering
326 /// of this operation.
327 ///
328 /// [`Ordering`]: enum.Ordering.html
329 ///
330 /// # Examples
331 ///
332 /// ```
333 /// use std::sync::atomic::{AtomicBool, Ordering};
334 ///
335 /// let some_bool = AtomicBool::new(true);
336 ///
337 /// some_bool.store(false, Ordering::Relaxed);
338 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
339 /// ```
340 ///
341 /// # Panics
342 ///
343 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
344 ///
345 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
346 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
347 #[inline]
348 #[stable(feature = "rust1", since = "1.0.0")]
349 pub fn store(&self, val: bool, order: Ordering) {
350 unsafe {
351 atomic_store(self.v.get(), val as u8, order);
352 }
353 }
354
355 /// Stores a value into the bool, returning the previous value.
356 ///
357 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
358 /// of this operation.
359 ///
360 /// [`Ordering`]: enum.Ordering.html
361 ///
362 /// # Examples
363 ///
364 /// ```
365 /// use std::sync::atomic::{AtomicBool, Ordering};
366 ///
367 /// let some_bool = AtomicBool::new(true);
368 ///
369 /// assert_eq!(some_bool.swap(false, Ordering::Relaxed), true);
370 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
371 /// ```
372 #[inline]
373 #[stable(feature = "rust1", since = "1.0.0")]
374 pub fn swap(&self, val: bool, order: Ordering) -> bool {
375 unsafe { atomic_swap(self.v.get(), val as u8, order) != 0 }
376 }
377
378 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
379 ///
380 /// The return value is always the previous value. If it is equal to `current`, then the value
381 /// was updated.
382 ///
383 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
384 /// ordering of this operation.
385 ///
386 /// [`Ordering`]: enum.Ordering.html
387 /// [`bool`]: ../../../std/primitive.bool.html
388 ///
389 /// # Examples
390 ///
391 /// ```
392 /// use std::sync::atomic::{AtomicBool, Ordering};
393 ///
394 /// let some_bool = AtomicBool::new(true);
395 ///
396 /// assert_eq!(some_bool.compare_and_swap(true, false, Ordering::Relaxed), true);
397 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
398 ///
399 /// assert_eq!(some_bool.compare_and_swap(true, true, Ordering::Relaxed), false);
400 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
401 /// ```
402 #[inline]
403 #[stable(feature = "rust1", since = "1.0.0")]
404 pub fn compare_and_swap(&self, current: bool, new: bool, order: Ordering) -> bool {
405 match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
406 Ok(x) => x,
407 Err(x) => x,
408 }
409 }
410
411 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
412 ///
413 /// The return value is a result indicating whether the new value was written and containing
414 /// the previous value. On success this value is guaranteed to be equal to `current`.
415 ///
416 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
417 /// ordering of this operation. The first describes the required ordering if the
418 /// operation succeeds while the second describes the required ordering when the
419 /// operation fails. The failure ordering can't be [`Release`] or [`AcqRel`] and must
420 /// be equivalent or weaker than the success ordering.
421 ///
422 /// [`bool`]: ../../../std/primitive.bool.html
423 /// [`Ordering`]: enum.Ordering.html
424 /// [`Release`]: enum.Ordering.html#variant.Release
425 /// [`AcqRel`]: enum.Ordering.html#variant.Release
426 ///
427 /// # Examples
428 ///
429 /// ```
430 /// use std::sync::atomic::{AtomicBool, Ordering};
431 ///
432 /// let some_bool = AtomicBool::new(true);
433 ///
434 /// assert_eq!(some_bool.compare_exchange(true,
435 /// false,
436 /// Ordering::Acquire,
437 /// Ordering::Relaxed),
438 /// Ok(true));
439 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
440 ///
441 /// assert_eq!(some_bool.compare_exchange(true, true,
442 /// Ordering::SeqCst,
443 /// Ordering::Acquire),
444 /// Err(false));
445 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
446 /// ```
447 #[inline]
448 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
449 pub fn compare_exchange(&self,
450 current: bool,
451 new: bool,
452 success: Ordering,
453 failure: Ordering)
454 -> Result<bool, bool> {
455 match unsafe {
456 atomic_compare_exchange(self.v.get(), current as u8, new as u8, success, failure)
457 } {
458 Ok(x) => Ok(x != 0),
459 Err(x) => Err(x != 0),
460 }
461 }
462
463 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
464 ///
465 /// Unlike [`compare_exchange`], this function is allowed to spuriously fail even when the
466 /// comparison succeeds, which can result in more efficient code on some platforms. The
467 /// return value is a result indicating whether the new value was written and containing the
468 /// previous value.
469 ///
470 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
471 /// ordering of this operation. The first describes the required ordering if the operation
472 /// succeeds while the second describes the required ordering when the operation fails. The
473 /// failure ordering can't be [`Release`] or [`AcqRel`] and must be equivalent or
474 /// weaker than the success ordering.
475 ///
476 /// [`bool`]: ../../../std/primitive.bool.html
477 /// [`compare_exchange`]: #method.compare_exchange
478 /// [`Ordering`]: enum.Ordering.html
479 /// [`Release`]: enum.Ordering.html#variant.Release
480 /// [`AcqRel`]: enum.Ordering.html#variant.Release
481 ///
482 /// # Examples
483 ///
484 /// ```
485 /// use std::sync::atomic::{AtomicBool, Ordering};
486 ///
487 /// let val = AtomicBool::new(false);
488 ///
489 /// let new = true;
490 /// let mut old = val.load(Ordering::Relaxed);
491 /// loop {
492 /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
493 /// Ok(_) => break,
494 /// Err(x) => old = x,
495 /// }
496 /// }
497 /// ```
498 #[inline]
499 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
500 pub fn compare_exchange_weak(&self,
501 current: bool,
502 new: bool,
503 success: Ordering,
504 failure: Ordering)
505 -> Result<bool, bool> {
506 match unsafe {
507 atomic_compare_exchange_weak(self.v.get(), current as u8, new as u8, success, failure)
508 } {
509 Ok(x) => Ok(x != 0),
510 Err(x) => Err(x != 0),
511 }
512 }
513
514 /// Logical "and" with a boolean value.
515 ///
516 /// Performs a logical "and" operation on the current value and the argument `val`, and sets
517 /// the new value to the result.
518 ///
519 /// Returns the previous value.
520 ///
521 /// # Examples
522 ///
523 /// ```
524 /// use std::sync::atomic::{AtomicBool, Ordering};
525 ///
526 /// let foo = AtomicBool::new(true);
527 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), true);
528 /// assert_eq!(foo.load(Ordering::SeqCst), false);
529 ///
530 /// let foo = AtomicBool::new(true);
531 /// assert_eq!(foo.fetch_and(true, Ordering::SeqCst), true);
532 /// assert_eq!(foo.load(Ordering::SeqCst), true);
533 ///
534 /// let foo = AtomicBool::new(false);
535 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), false);
536 /// assert_eq!(foo.load(Ordering::SeqCst), false);
537 /// ```
538 #[inline]
539 #[stable(feature = "rust1", since = "1.0.0")]
540 pub fn fetch_and(&self, val: bool, order: Ordering) -> bool {
541 unsafe { atomic_and(self.v.get(), val as u8, order) != 0 }
542 }
543
544 /// Logical "nand" with a boolean value.
545 ///
546 /// Performs a logical "nand" operation on the current value and the argument `val`, and sets
547 /// the new value to the result.
548 ///
549 /// Returns the previous value.
550 ///
551 /// # Examples
552 ///
553 /// ```
554 /// use std::sync::atomic::{AtomicBool, Ordering};
555 ///
556 /// let foo = AtomicBool::new(true);
557 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), true);
558 /// assert_eq!(foo.load(Ordering::SeqCst), true);
559 ///
560 /// let foo = AtomicBool::new(true);
561 /// assert_eq!(foo.fetch_nand(true, Ordering::SeqCst), true);
562 /// assert_eq!(foo.load(Ordering::SeqCst) as usize, 0);
563 /// assert_eq!(foo.load(Ordering::SeqCst), false);
564 ///
565 /// let foo = AtomicBool::new(false);
566 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), false);
567 /// assert_eq!(foo.load(Ordering::SeqCst), true);
568 /// ```
569 #[inline]
570 #[stable(feature = "rust1", since = "1.0.0")]
571 pub fn fetch_nand(&self, val: bool, order: Ordering) -> bool {
572 // We can't use atomic_nand here because it can result in a bool with
573 // an invalid value. This happens because the atomic operation is done
574 // with an 8-bit integer internally, which would set the upper 7 bits.
575 // So we just use fetch_xor or swap instead.
576 if val {
577 // !(x & true) == !x
578 // We must invert the bool.
579 self.fetch_xor(true, order)
580 } else {
581 // !(x & false) == true
582 // We must set the bool to true.
583 self.swap(true, order)
584 }
585 }
586
587 /// Logical "or" with a boolean value.
588 ///
589 /// Performs a logical "or" operation on the current value and the argument `val`, and sets the
590 /// new value to the result.
591 ///
592 /// Returns the previous value.
593 ///
594 /// # Examples
595 ///
596 /// ```
597 /// use std::sync::atomic::{AtomicBool, Ordering};
598 ///
599 /// let foo = AtomicBool::new(true);
600 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), true);
601 /// assert_eq!(foo.load(Ordering::SeqCst), true);
602 ///
603 /// let foo = AtomicBool::new(true);
604 /// assert_eq!(foo.fetch_or(true, Ordering::SeqCst), true);
605 /// assert_eq!(foo.load(Ordering::SeqCst), true);
606 ///
607 /// let foo = AtomicBool::new(false);
608 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), false);
609 /// assert_eq!(foo.load(Ordering::SeqCst), false);
610 /// ```
611 #[inline]
612 #[stable(feature = "rust1", since = "1.0.0")]
613 pub fn fetch_or(&self, val: bool, order: Ordering) -> bool {
614 unsafe { atomic_or(self.v.get(), val as u8, order) != 0 }
615 }
616
617 /// Logical "xor" with a boolean value.
618 ///
619 /// Performs a logical "xor" operation on the current value and the argument `val`, and sets
620 /// the new value to the result.
621 ///
622 /// Returns the previous value.
623 ///
624 /// # Examples
625 ///
626 /// ```
627 /// use std::sync::atomic::{AtomicBool, Ordering};
628 ///
629 /// let foo = AtomicBool::new(true);
630 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), true);
631 /// assert_eq!(foo.load(Ordering::SeqCst), true);
632 ///
633 /// let foo = AtomicBool::new(true);
634 /// assert_eq!(foo.fetch_xor(true, Ordering::SeqCst), true);
635 /// assert_eq!(foo.load(Ordering::SeqCst), false);
636 ///
637 /// let foo = AtomicBool::new(false);
638 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), false);
639 /// assert_eq!(foo.load(Ordering::SeqCst), false);
640 /// ```
641 #[inline]
642 #[stable(feature = "rust1", since = "1.0.0")]
643 pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool {
644 unsafe { atomic_xor(self.v.get(), val as u8, order) != 0 }
645 }
646 }
647
648 #[cfg(target_has_atomic = "ptr")]
649 impl<T> AtomicPtr<T> {
650 /// Creates a new `AtomicPtr`.
651 ///
652 /// # Examples
653 ///
654 /// ```
655 /// use std::sync::atomic::AtomicPtr;
656 ///
657 /// let ptr = &mut 5;
658 /// let atomic_ptr = AtomicPtr::new(ptr);
659 /// ```
660 #[inline]
661 #[stable(feature = "rust1", since = "1.0.0")]
662 pub const fn new(p: *mut T) -> AtomicPtr<T> {
663 AtomicPtr { p: UnsafeCell::new(p) }
664 }
665
666 /// Returns a mutable reference to the underlying pointer.
667 ///
668 /// This is safe because the mutable reference guarantees that no other threads are
669 /// concurrently accessing the atomic data.
670 ///
671 /// # Examples
672 ///
673 /// ```
674 /// use std::sync::atomic::{AtomicPtr, Ordering};
675 ///
676 /// let mut atomic_ptr = AtomicPtr::new(&mut 10);
677 /// *atomic_ptr.get_mut() = &mut 5;
678 /// assert_eq!(unsafe { *atomic_ptr.load(Ordering::SeqCst) }, 5);
679 /// ```
680 #[inline]
681 #[stable(feature = "atomic_access", since = "1.15.0")]
682 pub fn get_mut(&mut self) -> &mut *mut T {
683 unsafe { &mut *self.p.get() }
684 }
685
686 /// Consumes the atomic and returns the contained value.
687 ///
688 /// This is safe because passing `self` by value guarantees that no other threads are
689 /// concurrently accessing the atomic data.
690 ///
691 /// # Examples
692 ///
693 /// ```
694 /// use std::sync::atomic::AtomicPtr;
695 ///
696 /// let atomic_ptr = AtomicPtr::new(&mut 5);
697 /// assert_eq!(unsafe { *atomic_ptr.into_inner() }, 5);
698 /// ```
699 #[inline]
700 #[stable(feature = "atomic_access", since = "1.15.0")]
701 pub fn into_inner(self) -> *mut T {
702 self.p.into_inner()
703 }
704
705 /// Loads a value from the pointer.
706 ///
707 /// `load` takes an [`Ordering`] argument which describes the memory ordering
708 /// of this operation.
709 ///
710 /// # Panics
711 ///
712 /// Panics if `order` is [`Release`] or [`AcqRel`].
713 ///
714 /// [`Ordering`]: enum.Ordering.html
715 /// [`Release`]: enum.Ordering.html#variant.Release
716 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
717 ///
718 /// # Examples
719 ///
720 /// ```
721 /// use std::sync::atomic::{AtomicPtr, Ordering};
722 ///
723 /// let ptr = &mut 5;
724 /// let some_ptr = AtomicPtr::new(ptr);
725 ///
726 /// let value = some_ptr.load(Ordering::Relaxed);
727 /// ```
728 #[inline]
729 #[stable(feature = "rust1", since = "1.0.0")]
730 pub fn load(&self, order: Ordering) -> *mut T {
731 unsafe { atomic_load(self.p.get() as *mut usize, order) as *mut T }
732 }
733
734 /// Stores a value into the pointer.
735 ///
736 /// `store` takes an [`Ordering`] argument which describes the memory ordering
737 /// of this operation.
738 ///
739 /// [`Ordering`]: enum.Ordering.html
740 ///
741 /// # Examples
742 ///
743 /// ```
744 /// use std::sync::atomic::{AtomicPtr, Ordering};
745 ///
746 /// let ptr = &mut 5;
747 /// let some_ptr = AtomicPtr::new(ptr);
748 ///
749 /// let other_ptr = &mut 10;
750 ///
751 /// some_ptr.store(other_ptr, Ordering::Relaxed);
752 /// ```
753 ///
754 /// # Panics
755 ///
756 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
757 ///
758 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
759 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
760 #[inline]
761 #[stable(feature = "rust1", since = "1.0.0")]
762 pub fn store(&self, ptr: *mut T, order: Ordering) {
763 unsafe {
764 atomic_store(self.p.get() as *mut usize, ptr as usize, order);
765 }
766 }
767
768 /// Stores a value into the pointer, returning the previous value.
769 ///
770 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
771 /// of this operation.
772 ///
773 /// [`Ordering`]: enum.Ordering.html
774 ///
775 /// # Examples
776 ///
777 /// ```
778 /// use std::sync::atomic::{AtomicPtr, Ordering};
779 ///
780 /// let ptr = &mut 5;
781 /// let some_ptr = AtomicPtr::new(ptr);
782 ///
783 /// let other_ptr = &mut 10;
784 ///
785 /// let value = some_ptr.swap(other_ptr, Ordering::Relaxed);
786 /// ```
787 #[inline]
788 #[stable(feature = "rust1", since = "1.0.0")]
789 pub fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T {
790 unsafe { atomic_swap(self.p.get() as *mut usize, ptr as usize, order) as *mut T }
791 }
792
793 /// Stores a value into the pointer if the current value is the same as the `current` value.
794 ///
795 /// The return value is always the previous value. If it is equal to `current`, then the value
796 /// was updated.
797 ///
798 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
799 /// ordering of this operation.
800 ///
801 /// [`Ordering`]: enum.Ordering.html
802 ///
803 /// # Examples
804 ///
805 /// ```
806 /// use std::sync::atomic::{AtomicPtr, Ordering};
807 ///
808 /// let ptr = &mut 5;
809 /// let some_ptr = AtomicPtr::new(ptr);
810 ///
811 /// let other_ptr = &mut 10;
812 /// let another_ptr = &mut 10;
813 ///
814 /// let value = some_ptr.compare_and_swap(other_ptr, another_ptr, Ordering::Relaxed);
815 /// ```
816 #[inline]
817 #[stable(feature = "rust1", since = "1.0.0")]
818 pub fn compare_and_swap(&self, current: *mut T, new: *mut T, order: Ordering) -> *mut T {
819 match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
820 Ok(x) => x,
821 Err(x) => x,
822 }
823 }
824
825 /// Stores a value into the pointer if the current value is the same as the `current` value.
826 ///
827 /// The return value is a result indicating whether the new value was written and containing
828 /// the previous value. On success this value is guaranteed to be equal to `current`.
829 ///
830 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
831 /// ordering of this operation. The first describes the required ordering if
832 /// the operation succeeds while the second describes the required ordering when
833 /// the operation fails. The failure ordering can't be [`Release`] or [`AcqRel`]
834 /// and must be equivalent or weaker than the success ordering.
835 ///
836 /// [`Ordering`]: enum.Ordering.html
837 /// [`Release`]: enum.Ordering.html#variant.Release
838 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
839 ///
840 /// # Examples
841 ///
842 /// ```
843 /// use std::sync::atomic::{AtomicPtr, Ordering};
844 ///
845 /// let ptr = &mut 5;
846 /// let some_ptr = AtomicPtr::new(ptr);
847 ///
848 /// let other_ptr = &mut 10;
849 /// let another_ptr = &mut 10;
850 ///
851 /// let value = some_ptr.compare_exchange(other_ptr, another_ptr,
852 /// Ordering::SeqCst, Ordering::Relaxed);
853 /// ```
854 #[inline]
855 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
856 pub fn compare_exchange(&self,
857 current: *mut T,
858 new: *mut T,
859 success: Ordering,
860 failure: Ordering)
861 -> Result<*mut T, *mut T> {
862 unsafe {
863 let res = atomic_compare_exchange(self.p.get() as *mut usize,
864 current as usize,
865 new as usize,
866 success,
867 failure);
868 match res {
869 Ok(x) => Ok(x as *mut T),
870 Err(x) => Err(x as *mut T),
871 }
872 }
873 }
874
875 /// Stores a value into the pointer if the current value is the same as the `current` value.
876 ///
877 /// Unlike [`compare_exchange`], this function is allowed to spuriously fail even when the
878 /// comparison succeeds, which can result in more efficient code on some platforms. The
879 /// return value is a result indicating whether the new value was written and containing the
880 /// previous value.
881 ///
882 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
883 /// ordering of this operation. The first describes the required ordering if the operation
884 /// succeeds while the second describes the required ordering when the operation fails. The
885 /// failure ordering can't be [`Release`] or [`AcqRel`] and must be equivalent or
886 /// weaker than the success ordering.
887 ///
888 /// [`compare_exchange`]: #method.compare_exchange
889 /// [`Ordering`]: enum.Ordering.html
890 /// [`Release`]: enum.Ordering.html#variant.Release
891 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
892 ///
893 /// # Examples
894 ///
895 /// ```
896 /// use std::sync::atomic::{AtomicPtr, Ordering};
897 ///
898 /// let some_ptr = AtomicPtr::new(&mut 5);
899 ///
900 /// let new = &mut 10;
901 /// let mut old = some_ptr.load(Ordering::Relaxed);
902 /// loop {
903 /// match some_ptr.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
904 /// Ok(_) => break,
905 /// Err(x) => old = x,
906 /// }
907 /// }
908 /// ```
909 #[inline]
910 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
911 pub fn compare_exchange_weak(&self,
912 current: *mut T,
913 new: *mut T,
914 success: Ordering,
915 failure: Ordering)
916 -> Result<*mut T, *mut T> {
917 unsafe {
918 let res = atomic_compare_exchange_weak(self.p.get() as *mut usize,
919 current as usize,
920 new as usize,
921 success,
922 failure);
923 match res {
924 Ok(x) => Ok(x as *mut T),
925 Err(x) => Err(x as *mut T),
926 }
927 }
928 }
929 }
930
931 #[cfg(target_has_atomic = "8")]
932 #[stable(feature = "atomic_bool_from", since = "1.24.0")]
933 impl From<bool> for AtomicBool {
934 #[inline]
935 fn from(b: bool) -> Self { Self::new(b) }
936 }
937
938 #[cfg(target_has_atomic = "ptr")]
939 #[stable(feature = "atomic_from", since = "1.23.0")]
940 impl<T> From<*mut T> for AtomicPtr<T> {
941 #[inline]
942 fn from(p: *mut T) -> Self { Self::new(p) }
943 }
944
945 #[cfg(target_has_atomic = "ptr")]
946 macro_rules! atomic_int {
947 ($stable:meta,
948 $stable_cxchg:meta,
949 $stable_debug:meta,
950 $stable_access:meta,
951 $stable_from:meta,
952 $stable_nand:meta,
953 $s_int_type:expr, $int_ref:expr,
954 $extra_feature:expr,
955 $min_fn:ident, $max_fn:ident,
956 $int_type:ident $atomic_type:ident $atomic_init:ident) => {
957 /// An integer type which can be safely shared between threads.
958 ///
959 /// This type has the same in-memory representation as the underlying
960 /// integer type, [`
961 #[doc = $s_int_type]
962 /// `](
963 #[doc = $int_ref]
964 /// ). For more about the differences between atomic types and
965 /// non-atomic types, please see the [module-level documentation].
966 ///
967 /// [module-level documentation]: index.html
968 #[$stable]
969 pub struct $atomic_type {
970 v: UnsafeCell<$int_type>,
971 }
972
973 /// An atomic integer initialized to `0`.
974 #[$stable]
975 pub const $atomic_init: $atomic_type = $atomic_type::new(0);
976
977 #[$stable]
978 impl Default for $atomic_type {
979 fn default() -> Self {
980 Self::new(Default::default())
981 }
982 }
983
984 #[$stable_from]
985 impl From<$int_type> for $atomic_type {
986 #[inline]
987 fn from(v: $int_type) -> Self { Self::new(v) }
988 }
989
990 #[$stable_debug]
991 impl fmt::Debug for $atomic_type {
992 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
993 fmt::Debug::fmt(&self.load(Ordering::SeqCst), f)
994 }
995 }
996
997 // Send is implicitly implemented.
998 #[$stable]
999 unsafe impl Sync for $atomic_type {}
1000
1001 impl $atomic_type {
1002 doc_comment! {
1003 concat!("Creates a new atomic integer.
1004
1005 # Examples
1006
1007 ```
1008 ", $extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";
1009
1010 let atomic_forty_two = ", stringify!($atomic_type), "::new(42);
1011 ```"),
1012 #[inline]
1013 #[$stable]
1014 pub const fn new(v: $int_type) -> Self {
1015 $atomic_type {v: UnsafeCell::new(v)}
1016 }
1017 }
1018
1019 doc_comment! {
1020 concat!("Returns a mutable reference to the underlying integer.
1021
1022 This is safe because the mutable reference guarantees that no other threads are
1023 concurrently accessing the atomic data.
1024
1025 # Examples
1026
1027 ```
1028 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1029
1030 let mut some_var = ", stringify!($atomic_type), "::new(10);
1031 assert_eq!(*some_var.get_mut(), 10);
1032 *some_var.get_mut() = 5;
1033 assert_eq!(some_var.load(Ordering::SeqCst), 5);
1034 ```"),
1035 #[inline]
1036 #[$stable_access]
1037 pub fn get_mut(&mut self) -> &mut $int_type {
1038 unsafe { &mut *self.v.get() }
1039 }
1040 }
1041
1042 doc_comment! {
1043 concat!("Consumes the atomic and returns the contained value.
1044
1045 This is safe because passing `self` by value guarantees that no other threads are
1046 concurrently accessing the atomic data.
1047
1048 # Examples
1049
1050 ```
1051 ", $extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";
1052
1053 let some_var = ", stringify!($atomic_type), "::new(5);
1054 assert_eq!(some_var.into_inner(), 5);
1055 ```"),
1056 #[inline]
1057 #[$stable_access]
1058 pub fn into_inner(self) -> $int_type {
1059 self.v.into_inner()
1060 }
1061 }
1062
1063 doc_comment! {
1064 concat!("Loads a value from the atomic integer.
1065
1066 `load` takes an [`Ordering`] argument which describes the memory ordering of this operation.
1067
1068 # Panics
1069
1070 Panics if `order` is [`Release`] or [`AcqRel`].
1071
1072 [`Ordering`]: enum.Ordering.html
1073 [`Release`]: enum.Ordering.html#variant.Release
1074 [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1075
1076 # Examples
1077
1078 ```
1079 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1080
1081 let some_var = ", stringify!($atomic_type), "::new(5);
1082
1083 assert_eq!(some_var.load(Ordering::Relaxed), 5);
1084 ```"),
1085 #[inline]
1086 #[$stable]
1087 pub fn load(&self, order: Ordering) -> $int_type {
1088 unsafe { atomic_load(self.v.get(), order) }
1089 }
1090 }
1091
1092 doc_comment! {
1093 concat!("Stores a value into the atomic integer.
1094
1095 `store` takes an [`Ordering`] argument which describes the memory ordering of this operation.
1096
1097 [`Ordering`]: enum.Ordering.html
1098
1099 # Examples
1100
1101 ```
1102 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1103
1104 let some_var = ", stringify!($atomic_type), "::new(5);
1105
1106 some_var.store(10, Ordering::Relaxed);
1107 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1108 ```
1109
1110 # Panics
1111
1112 Panics if `order` is [`Acquire`] or [`AcqRel`].
1113
1114 [`Acquire`]: enum.Ordering.html#variant.Acquire
1115 [`AcqRel`]: enum.Ordering.html#variant.AcqRel"),
1116 #[inline]
1117 #[$stable]
1118 pub fn store(&self, val: $int_type, order: Ordering) {
1119 unsafe { atomic_store(self.v.get(), val, order); }
1120 }
1121 }
1122
1123 doc_comment! {
1124 concat!("Stores a value into the atomic integer, returning the previous value.
1125
1126 `swap` takes an [`Ordering`] argument which describes the memory ordering of this operation.
1127
1128 [`Ordering`]: enum.Ordering.html
1129
1130 # Examples
1131
1132 ```
1133 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1134
1135 let some_var = ", stringify!($atomic_type), "::new(5);
1136
1137 assert_eq!(some_var.swap(10, Ordering::Relaxed), 5);
1138 ```"),
1139 #[inline]
1140 #[$stable]
1141 pub fn swap(&self, val: $int_type, order: Ordering) -> $int_type {
1142 unsafe { atomic_swap(self.v.get(), val, order) }
1143 }
1144 }
1145
1146 doc_comment! {
1147 concat!("Stores a value into the atomic integer if the current value is the same as
1148 the `current` value.
1149
1150 The return value is always the previous value. If it is equal to `current`, then the
1151 value was updated.
1152
1153 `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
1154 ordering of this operation.
1155
1156 [`Ordering`]: enum.Ordering.html
1157
1158 # Examples
1159
1160 ```
1161 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1162
1163 let some_var = ", stringify!($atomic_type), "::new(5);
1164
1165 assert_eq!(some_var.compare_and_swap(5, 10, Ordering::Relaxed), 5);
1166 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1167
1168 assert_eq!(some_var.compare_and_swap(6, 12, Ordering::Relaxed), 10);
1169 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1170 ```"),
1171 #[inline]
1172 #[$stable]
1173 pub fn compare_and_swap(&self,
1174 current: $int_type,
1175 new: $int_type,
1176 order: Ordering) -> $int_type {
1177 match self.compare_exchange(current,
1178 new,
1179 order,
1180 strongest_failure_ordering(order)) {
1181 Ok(x) => x,
1182 Err(x) => x,
1183 }
1184 }
1185 }
1186
1187 doc_comment! {
1188 concat!("Stores a value into the atomic integer if the current value is the same as
1189 the `current` value.
1190
1191 The return value is a result indicating whether the new value was written and
1192 containing the previous value. On success this value is guaranteed to be equal to
1193 `current`.
1194
1195 `compare_exchange` takes two [`Ordering`] arguments to describe the memory
1196 ordering of this operation. The first describes the required ordering if
1197 the operation succeeds while the second describes the required ordering when
1198 the operation fails. The failure ordering can't be [`Release`] or [`AcqRel`] and
1199 must be equivalent or weaker than the success ordering.
1200
1201 [`Ordering`]: enum.Ordering.html
1202 [`Release`]: enum.Ordering.html#variant.Release
1203 [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1204
1205 # Examples
1206
1207 ```
1208 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1209
1210 let some_var = ", stringify!($atomic_type), "::new(5);
1211
1212 assert_eq!(some_var.compare_exchange(5, 10,
1213 Ordering::Acquire,
1214 Ordering::Relaxed),
1215 Ok(5));
1216 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1217
1218 assert_eq!(some_var.compare_exchange(6, 12,
1219 Ordering::SeqCst,
1220 Ordering::Acquire),
1221 Err(10));
1222 assert_eq!(some_var.load(Ordering::Relaxed), 10);
1223 ```"),
1224 #[inline]
1225 #[$stable_cxchg]
1226 pub fn compare_exchange(&self,
1227 current: $int_type,
1228 new: $int_type,
1229 success: Ordering,
1230 failure: Ordering) -> Result<$int_type, $int_type> {
1231 unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) }
1232 }
1233 }
1234
1235 doc_comment! {
1236 concat!("Stores a value into the atomic integer if the current value is the same as
1237 the `current` value.
1238
1239 Unlike [`compare_exchange`], this function is allowed to spuriously fail even
1240 when the comparison succeeds, which can result in more efficient code on some
1241 platforms. The return value is a result indicating whether the new value was
1242 written and containing the previous value.
1243
1244 `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
1245 ordering of this operation. The first describes the required ordering if the
1246 operation succeeds while the second describes the required ordering when the
1247 operation fails. The failure ordering can't be [`Release`] or [`AcqRel`] and
1248 must be equivalent or weaker than the success ordering.
1249
1250 [`compare_exchange`]: #method.compare_exchange
1251 [`Ordering`]: enum.Ordering.html
1252 [`Release`]: enum.Ordering.html#variant.Release
1253 [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1254
1255 # Examples
1256
1257 ```
1258 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1259
1260 let val = ", stringify!($atomic_type), "::new(4);
1261
1262 let mut old = val.load(Ordering::Relaxed);
1263 loop {
1264 let new = old * 2;
1265 match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
1266 Ok(_) => break,
1267 Err(x) => old = x,
1268 }
1269 }
1270 ```"),
1271 #[inline]
1272 #[$stable_cxchg]
1273 pub fn compare_exchange_weak(&self,
1274 current: $int_type,
1275 new: $int_type,
1276 success: Ordering,
1277 failure: Ordering) -> Result<$int_type, $int_type> {
1278 unsafe {
1279 atomic_compare_exchange_weak(self.v.get(), current, new, success, failure)
1280 }
1281 }
1282 }
1283
1284 doc_comment! {
1285 concat!("Adds to the current value, returning the previous value.
1286
1287 This operation wraps around on overflow.
1288
1289 # Examples
1290
1291 ```
1292 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1293
1294 let foo = ", stringify!($atomic_type), "::new(0);
1295 assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0);
1296 assert_eq!(foo.load(Ordering::SeqCst), 10);
1297 ```"),
1298 #[inline]
1299 #[$stable]
1300 pub fn fetch_add(&self, val: $int_type, order: Ordering) -> $int_type {
1301 unsafe { atomic_add(self.v.get(), val, order) }
1302 }
1303 }
1304
1305 doc_comment! {
1306 concat!("Subtracts from the current value, returning the previous value.
1307
1308 This operation wraps around on overflow.
1309
1310 # Examples
1311
1312 ```
1313 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1314
1315 let foo = ", stringify!($atomic_type), "::new(20);
1316 assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 20);
1317 assert_eq!(foo.load(Ordering::SeqCst), 10);
1318 ```"),
1319 #[inline]
1320 #[$stable]
1321 pub fn fetch_sub(&self, val: $int_type, order: Ordering) -> $int_type {
1322 unsafe { atomic_sub(self.v.get(), val, order) }
1323 }
1324 }
1325
1326 doc_comment! {
1327 concat!("Bitwise \"and\" with the current value.
1328
1329 Performs a bitwise \"and\" operation on the current value and the argument `val`, and
1330 sets the new value to the result.
1331
1332 Returns the previous value.
1333
1334 # Examples
1335
1336 ```
1337 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1338
1339 let foo = ", stringify!($atomic_type), "::new(0b101101);
1340 assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101);
1341 assert_eq!(foo.load(Ordering::SeqCst), 0b100001);
1342 ```"),
1343 #[inline]
1344 #[$stable]
1345 pub fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type {
1346 unsafe { atomic_and(self.v.get(), val, order) }
1347 }
1348 }
1349
1350 doc_comment! {
1351 concat!("Bitwise \"nand\" with the current value.
1352
1353 Performs a bitwise \"nand\" operation on the current value and the argument `val`, and
1354 sets the new value to the result.
1355
1356 Returns the previous value.
1357
1358 # Examples
1359
1360 ```
1361 ", $extra_feature, "
1362 use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1363
1364 let foo = ", stringify!($atomic_type), "::new(0x13);
1365 assert_eq!(foo.fetch_nand(0x31, Ordering::SeqCst), 0x13);
1366 assert_eq!(foo.load(Ordering::SeqCst), !(0x13 & 0x31));
1367 ```"),
1368 #[inline]
1369 #[$stable_nand]
1370 pub fn fetch_nand(&self, val: $int_type, order: Ordering) -> $int_type {
1371 unsafe { atomic_nand(self.v.get(), val, order) }
1372 }
1373 }
1374
1375 doc_comment! {
1376 concat!("Bitwise \"or\" with the current value.
1377
1378 Performs a bitwise \"or\" operation on the current value and the argument `val`, and
1379 sets the new value to the result.
1380
1381 Returns the previous value.
1382
1383 # Examples
1384
1385 ```
1386 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1387
1388 let foo = ", stringify!($atomic_type), "::new(0b101101);
1389 assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101);
1390 assert_eq!(foo.load(Ordering::SeqCst), 0b111111);
1391 ```"),
1392 #[inline]
1393 #[$stable]
1394 pub fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type {
1395 unsafe { atomic_or(self.v.get(), val, order) }
1396 }
1397 }
1398
1399 doc_comment! {
1400 concat!("Bitwise \"xor\" with the current value.
1401
1402 Performs a bitwise \"xor\" operation on the current value and the argument `val`, and
1403 sets the new value to the result.
1404
1405 Returns the previous value.
1406
1407 # Examples
1408
1409 ```
1410 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1411
1412 let foo = ", stringify!($atomic_type), "::new(0b101101);
1413 assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101);
1414 assert_eq!(foo.load(Ordering::SeqCst), 0b011110);
1415 ```"),
1416 #[inline]
1417 #[$stable]
1418 pub fn fetch_xor(&self, val: $int_type, order: Ordering) -> $int_type {
1419 unsafe { atomic_xor(self.v.get(), val, order) }
1420 }
1421 }
1422
1423 doc_comment! {
1424 concat!("Fetches the value, and applies a function to it that returns an optional
1425 new value. Returns a `Result` of `Ok(previous_value)` if the function returned `Some(_)`, else
1426 `Err(previous_value)`.
1427
1428 Note: This may call the function multiple times if the value has been changed from other threads in
1429 the meantime, as long as the function returns `Some(_)`, but the function will have been applied
1430 but once to the stored value.
1431
1432 # Examples
1433
1434 ```rust
1435 #![feature(no_more_cas)]
1436 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1437
1438 let x = ", stringify!($atomic_type), "::new(7);
1439 assert_eq!(x.fetch_update(|_| None, Ordering::SeqCst, Ordering::SeqCst), Err(7));
1440 assert_eq!(x.fetch_update(|x| Some(x + 1), Ordering::SeqCst, Ordering::SeqCst), Ok(7));
1441 assert_eq!(x.fetch_update(|x| Some(x + 1), Ordering::SeqCst, Ordering::SeqCst), Ok(8));
1442 assert_eq!(x.load(Ordering::SeqCst), 9);
1443 ```"),
1444 #[inline]
1445 #[unstable(feature = "no_more_cas",
1446 reason = "no more CAS loops in user code",
1447 issue = "48655")]
1448 pub fn fetch_update<F>(&self,
1449 mut f: F,
1450 fetch_order: Ordering,
1451 set_order: Ordering) -> Result<$int_type, $int_type>
1452 where F: FnMut($int_type) -> Option<$int_type> {
1453 let mut prev = self.load(fetch_order);
1454 while let Some(next) = f(prev) {
1455 match self.compare_exchange_weak(prev, next, set_order, fetch_order) {
1456 x @ Ok(_) => return x,
1457 Err(next_prev) => prev = next_prev
1458 }
1459 }
1460 Err(prev)
1461 }
1462 }
1463
1464 doc_comment! {
1465 concat!("Maximum with the current value.
1466
1467 Finds the maximum of the current value and the argument `val`, and
1468 sets the new value to the result.
1469
1470 Returns the previous value.
1471
1472 # Examples
1473
1474 ```
1475 #![feature(atomic_min_max)]
1476 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1477
1478 let foo = ", stringify!($atomic_type), "::new(23);
1479 assert_eq!(foo.fetch_max(42, Ordering::SeqCst), 23);
1480 assert_eq!(foo.load(Ordering::SeqCst), 42);
1481 ```
1482
1483 If you want to obtain the maximum value in one step, you can use the following:
1484
1485 ```
1486 #![feature(atomic_min_max)]
1487 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1488
1489 let foo = ", stringify!($atomic_type), "::new(23);
1490 let bar = 42;
1491 let max_foo = foo.fetch_max(bar, Ordering::SeqCst).max(bar);
1492 assert!(max_foo == 42);
1493 ```"),
1494 #[inline]
1495 #[unstable(feature = "atomic_min_max",
1496 reason = "easier and faster min/max than writing manual CAS loop",
1497 issue = "48655")]
1498 pub fn fetch_max(&self, val: $int_type, order: Ordering) -> $int_type {
1499 unsafe { $max_fn(self.v.get(), val, order) }
1500 }
1501 }
1502
1503 doc_comment! {
1504 concat!("Minimum with the current value.
1505
1506 Finds the minimum of the current value and the argument `val`, and
1507 sets the new value to the result.
1508
1509 Returns the previous value.
1510
1511 # Examples
1512
1513 ```
1514 #![feature(atomic_min_max)]
1515 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1516
1517 let foo = ", stringify!($atomic_type), "::new(23);
1518 assert_eq!(foo.fetch_min(42, Ordering::Relaxed), 23);
1519 assert_eq!(foo.load(Ordering::Relaxed), 23);
1520 assert_eq!(foo.fetch_min(22, Ordering::Relaxed), 23);
1521 assert_eq!(foo.load(Ordering::Relaxed), 22);
1522 ```
1523
1524 If you want to obtain the minimum value in one step, you can use the following:
1525
1526 ```
1527 #![feature(atomic_min_max)]
1528 ", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
1529
1530 let foo = ", stringify!($atomic_type), "::new(23);
1531 let bar = 12;
1532 let min_foo = foo.fetch_min(bar, Ordering::SeqCst).min(bar);
1533 assert_eq!(min_foo, 12);
1534 ```"),
1535 #[inline]
1536 #[unstable(feature = "atomic_min_max",
1537 reason = "easier and faster min/max than writing manual CAS loop",
1538 issue = "48655")]
1539 pub fn fetch_min(&self, val: $int_type, order: Ordering) -> $int_type {
1540 unsafe { $min_fn(self.v.get(), val, order) }
1541 }
1542 }
1543
1544 }
1545 }
1546 }
1547
1548 #[cfg(target_has_atomic = "8")]
1549 atomic_int! {
1550 unstable(feature = "integer_atomics", issue = "32976"),
1551 unstable(feature = "integer_atomics", issue = "32976"),
1552 unstable(feature = "integer_atomics", issue = "32976"),
1553 unstable(feature = "integer_atomics", issue = "32976"),
1554 unstable(feature = "integer_atomics", issue = "32976"),
1555 unstable(feature = "integer_atomics", issue = "32976"),
1556 "i8", "../../../std/primitive.i8.html",
1557 "#![feature(integer_atomics)]\n\n",
1558 atomic_min, atomic_max,
1559 i8 AtomicI8 ATOMIC_I8_INIT
1560 }
1561 #[cfg(target_has_atomic = "8")]
1562 atomic_int! {
1563 unstable(feature = "integer_atomics", issue = "32976"),
1564 unstable(feature = "integer_atomics", issue = "32976"),
1565 unstable(feature = "integer_atomics", issue = "32976"),
1566 unstable(feature = "integer_atomics", issue = "32976"),
1567 unstable(feature = "integer_atomics", issue = "32976"),
1568 unstable(feature = "integer_atomics", issue = "32976"),
1569 "u8", "../../../std/primitive.u8.html",
1570 "#![feature(integer_atomics)]\n\n",
1571 atomic_umin, atomic_umax,
1572 u8 AtomicU8 ATOMIC_U8_INIT
1573 }
1574 #[cfg(target_has_atomic = "16")]
1575 atomic_int! {
1576 unstable(feature = "integer_atomics", issue = "32976"),
1577 unstable(feature = "integer_atomics", issue = "32976"),
1578 unstable(feature = "integer_atomics", issue = "32976"),
1579 unstable(feature = "integer_atomics", issue = "32976"),
1580 unstable(feature = "integer_atomics", issue = "32976"),
1581 unstable(feature = "integer_atomics", issue = "32976"),
1582 "i16", "../../../std/primitive.i16.html",
1583 "#![feature(integer_atomics)]\n\n",
1584 atomic_min, atomic_max,
1585 i16 AtomicI16 ATOMIC_I16_INIT
1586 }
1587 #[cfg(target_has_atomic = "16")]
1588 atomic_int! {
1589 unstable(feature = "integer_atomics", issue = "32976"),
1590 unstable(feature = "integer_atomics", issue = "32976"),
1591 unstable(feature = "integer_atomics", issue = "32976"),
1592 unstable(feature = "integer_atomics", issue = "32976"),
1593 unstable(feature = "integer_atomics", issue = "32976"),
1594 unstable(feature = "integer_atomics", issue = "32976"),
1595 "u16", "../../../std/primitive.u16.html",
1596 "#![feature(integer_atomics)]\n\n",
1597 atomic_umin, atomic_umax,
1598 u16 AtomicU16 ATOMIC_U16_INIT
1599 }
1600 #[cfg(target_has_atomic = "32")]
1601 atomic_int! {
1602 unstable(feature = "integer_atomics", issue = "32976"),
1603 unstable(feature = "integer_atomics", issue = "32976"),
1604 unstable(feature = "integer_atomics", issue = "32976"),
1605 unstable(feature = "integer_atomics", issue = "32976"),
1606 unstable(feature = "integer_atomics", issue = "32976"),
1607 unstable(feature = "integer_atomics", issue = "32976"),
1608 "i32", "../../../std/primitive.i32.html",
1609 "#![feature(integer_atomics)]\n\n",
1610 atomic_min, atomic_max,
1611 i32 AtomicI32 ATOMIC_I32_INIT
1612 }
1613 #[cfg(target_has_atomic = "32")]
1614 atomic_int! {
1615 unstable(feature = "integer_atomics", issue = "32976"),
1616 unstable(feature = "integer_atomics", issue = "32976"),
1617 unstable(feature = "integer_atomics", issue = "32976"),
1618 unstable(feature = "integer_atomics", issue = "32976"),
1619 unstable(feature = "integer_atomics", issue = "32976"),
1620 unstable(feature = "integer_atomics", issue = "32976"),
1621 "u32", "../../../std/primitive.u32.html",
1622 "#![feature(integer_atomics)]\n\n",
1623 atomic_umin, atomic_umax,
1624 u32 AtomicU32 ATOMIC_U32_INIT
1625 }
1626 #[cfg(target_has_atomic = "64")]
1627 atomic_int! {
1628 unstable(feature = "integer_atomics", issue = "32976"),
1629 unstable(feature = "integer_atomics", issue = "32976"),
1630 unstable(feature = "integer_atomics", issue = "32976"),
1631 unstable(feature = "integer_atomics", issue = "32976"),
1632 unstable(feature = "integer_atomics", issue = "32976"),
1633 unstable(feature = "integer_atomics", issue = "32976"),
1634 "i64", "../../../std/primitive.i64.html",
1635 "#![feature(integer_atomics)]\n\n",
1636 atomic_min, atomic_max,
1637 i64 AtomicI64 ATOMIC_I64_INIT
1638 }
1639 #[cfg(target_has_atomic = "64")]
1640 atomic_int! {
1641 unstable(feature = "integer_atomics", issue = "32976"),
1642 unstable(feature = "integer_atomics", issue = "32976"),
1643 unstable(feature = "integer_atomics", issue = "32976"),
1644 unstable(feature = "integer_atomics", issue = "32976"),
1645 unstable(feature = "integer_atomics", issue = "32976"),
1646 unstable(feature = "integer_atomics", issue = "32976"),
1647 "u64", "../../../std/primitive.u64.html",
1648 "#![feature(integer_atomics)]\n\n",
1649 atomic_umin, atomic_umax,
1650 u64 AtomicU64 ATOMIC_U64_INIT
1651 }
1652 #[cfg(target_has_atomic = "ptr")]
1653 atomic_int!{
1654 stable(feature = "rust1", since = "1.0.0"),
1655 stable(feature = "extended_compare_and_swap", since = "1.10.0"),
1656 stable(feature = "atomic_debug", since = "1.3.0"),
1657 stable(feature = "atomic_access", since = "1.15.0"),
1658 stable(feature = "atomic_from", since = "1.23.0"),
1659 stable(feature = "atomic_nand", since = "1.27.0"),
1660 "isize", "../../../std/primitive.isize.html",
1661 "",
1662 atomic_min, atomic_max,
1663 isize AtomicIsize ATOMIC_ISIZE_INIT
1664 }
1665 #[cfg(target_has_atomic = "ptr")]
1666 atomic_int!{
1667 stable(feature = "rust1", since = "1.0.0"),
1668 stable(feature = "extended_compare_and_swap", since = "1.10.0"),
1669 stable(feature = "atomic_debug", since = "1.3.0"),
1670 stable(feature = "atomic_access", since = "1.15.0"),
1671 stable(feature = "atomic_from", since = "1.23.0"),
1672 stable(feature = "atomic_nand", since = "1.27.0"),
1673 "usize", "../../../std/primitive.usize.html",
1674 "",
1675 atomic_umin, atomic_umax,
1676 usize AtomicUsize ATOMIC_USIZE_INIT
1677 }
1678
1679 #[inline]
1680 fn strongest_failure_ordering(order: Ordering) -> Ordering {
1681 match order {
1682 Release => Relaxed,
1683 Relaxed => Relaxed,
1684 SeqCst => SeqCst,
1685 Acquire => Acquire,
1686 AcqRel => Acquire,
1687 __Nonexhaustive => __Nonexhaustive,
1688 }
1689 }
1690
1691 #[inline]
1692 unsafe fn atomic_store<T>(dst: *mut T, val: T, order: Ordering) {
1693 match order {
1694 Release => intrinsics::atomic_store_rel(dst, val),
1695 Relaxed => intrinsics::atomic_store_relaxed(dst, val),
1696 SeqCst => intrinsics::atomic_store(dst, val),
1697 Acquire => panic!("there is no such thing as an acquire store"),
1698 AcqRel => panic!("there is no such thing as an acquire/release store"),
1699 __Nonexhaustive => panic!("invalid memory ordering"),
1700 }
1701 }
1702
1703 #[inline]
1704 unsafe fn atomic_load<T>(dst: *const T, order: Ordering) -> T {
1705 match order {
1706 Acquire => intrinsics::atomic_load_acq(dst),
1707 Relaxed => intrinsics::atomic_load_relaxed(dst),
1708 SeqCst => intrinsics::atomic_load(dst),
1709 Release => panic!("there is no such thing as a release load"),
1710 AcqRel => panic!("there is no such thing as an acquire/release load"),
1711 __Nonexhaustive => panic!("invalid memory ordering"),
1712 }
1713 }
1714
1715 #[inline]
1716 unsafe fn atomic_swap<T>(dst: *mut T, val: T, order: Ordering) -> T {
1717 match order {
1718 Acquire => intrinsics::atomic_xchg_acq(dst, val),
1719 Release => intrinsics::atomic_xchg_rel(dst, val),
1720 AcqRel => intrinsics::atomic_xchg_acqrel(dst, val),
1721 Relaxed => intrinsics::atomic_xchg_relaxed(dst, val),
1722 SeqCst => intrinsics::atomic_xchg(dst, val),
1723 __Nonexhaustive => panic!("invalid memory ordering"),
1724 }
1725 }
1726
1727 /// Returns the previous value (like __sync_fetch_and_add).
1728 #[inline]
1729 unsafe fn atomic_add<T>(dst: *mut T, val: T, order: Ordering) -> T {
1730 match order {
1731 Acquire => intrinsics::atomic_xadd_acq(dst, val),
1732 Release => intrinsics::atomic_xadd_rel(dst, val),
1733 AcqRel => intrinsics::atomic_xadd_acqrel(dst, val),
1734 Relaxed => intrinsics::atomic_xadd_relaxed(dst, val),
1735 SeqCst => intrinsics::atomic_xadd(dst, val),
1736 __Nonexhaustive => panic!("invalid memory ordering"),
1737 }
1738 }
1739
1740 /// Returns the previous value (like __sync_fetch_and_sub).
1741 #[inline]
1742 unsafe fn atomic_sub<T>(dst: *mut T, val: T, order: Ordering) -> T {
1743 match order {
1744 Acquire => intrinsics::atomic_xsub_acq(dst, val),
1745 Release => intrinsics::atomic_xsub_rel(dst, val),
1746 AcqRel => intrinsics::atomic_xsub_acqrel(dst, val),
1747 Relaxed => intrinsics::atomic_xsub_relaxed(dst, val),
1748 SeqCst => intrinsics::atomic_xsub(dst, val),
1749 __Nonexhaustive => panic!("invalid memory ordering"),
1750 }
1751 }
1752
1753 #[inline]
1754 unsafe fn atomic_compare_exchange<T>(dst: *mut T,
1755 old: T,
1756 new: T,
1757 success: Ordering,
1758 failure: Ordering)
1759 -> Result<T, T> {
1760 let (val, ok) = match (success, failure) {
1761 (Acquire, Acquire) => intrinsics::atomic_cxchg_acq(dst, old, new),
1762 (Release, Relaxed) => intrinsics::atomic_cxchg_rel(dst, old, new),
1763 (AcqRel, Acquire) => intrinsics::atomic_cxchg_acqrel(dst, old, new),
1764 (Relaxed, Relaxed) => intrinsics::atomic_cxchg_relaxed(dst, old, new),
1765 (SeqCst, SeqCst) => intrinsics::atomic_cxchg(dst, old, new),
1766 (Acquire, Relaxed) => intrinsics::atomic_cxchg_acq_failrelaxed(dst, old, new),
1767 (AcqRel, Relaxed) => intrinsics::atomic_cxchg_acqrel_failrelaxed(dst, old, new),
1768 (SeqCst, Relaxed) => intrinsics::atomic_cxchg_failrelaxed(dst, old, new),
1769 (SeqCst, Acquire) => intrinsics::atomic_cxchg_failacq(dst, old, new),
1770 (__Nonexhaustive, _) => panic!("invalid memory ordering"),
1771 (_, __Nonexhaustive) => panic!("invalid memory ordering"),
1772 (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"),
1773 (_, Release) => panic!("there is no such thing as a release failure ordering"),
1774 _ => panic!("a failure ordering can't be stronger than a success ordering"),
1775 };
1776 if ok { Ok(val) } else { Err(val) }
1777 }
1778
1779 #[inline]
1780 unsafe fn atomic_compare_exchange_weak<T>(dst: *mut T,
1781 old: T,
1782 new: T,
1783 success: Ordering,
1784 failure: Ordering)
1785 -> Result<T, T> {
1786 let (val, ok) = match (success, failure) {
1787 (Acquire, Acquire) => intrinsics::atomic_cxchgweak_acq(dst, old, new),
1788 (Release, Relaxed) => intrinsics::atomic_cxchgweak_rel(dst, old, new),
1789 (AcqRel, Acquire) => intrinsics::atomic_cxchgweak_acqrel(dst, old, new),
1790 (Relaxed, Relaxed) => intrinsics::atomic_cxchgweak_relaxed(dst, old, new),
1791 (SeqCst, SeqCst) => intrinsics::atomic_cxchgweak(dst, old, new),
1792 (Acquire, Relaxed) => intrinsics::atomic_cxchgweak_acq_failrelaxed(dst, old, new),
1793 (AcqRel, Relaxed) => intrinsics::atomic_cxchgweak_acqrel_failrelaxed(dst, old, new),
1794 (SeqCst, Relaxed) => intrinsics::atomic_cxchgweak_failrelaxed(dst, old, new),
1795 (SeqCst, Acquire) => intrinsics::atomic_cxchgweak_failacq(dst, old, new),
1796 (__Nonexhaustive, _) => panic!("invalid memory ordering"),
1797 (_, __Nonexhaustive) => panic!("invalid memory ordering"),
1798 (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"),
1799 (_, Release) => panic!("there is no such thing as a release failure ordering"),
1800 _ => panic!("a failure ordering can't be stronger than a success ordering"),
1801 };
1802 if ok { Ok(val) } else { Err(val) }
1803 }
1804
1805 #[inline]
1806 unsafe fn atomic_and<T>(dst: *mut T, val: T, order: Ordering) -> T {
1807 match order {
1808 Acquire => intrinsics::atomic_and_acq(dst, val),
1809 Release => intrinsics::atomic_and_rel(dst, val),
1810 AcqRel => intrinsics::atomic_and_acqrel(dst, val),
1811 Relaxed => intrinsics::atomic_and_relaxed(dst, val),
1812 SeqCst => intrinsics::atomic_and(dst, val),
1813 __Nonexhaustive => panic!("invalid memory ordering"),
1814 }
1815 }
1816
1817 #[inline]
1818 unsafe fn atomic_nand<T>(dst: *mut T, val: T, order: Ordering) -> T {
1819 match order {
1820 Acquire => intrinsics::atomic_nand_acq(dst, val),
1821 Release => intrinsics::atomic_nand_rel(dst, val),
1822 AcqRel => intrinsics::atomic_nand_acqrel(dst, val),
1823 Relaxed => intrinsics::atomic_nand_relaxed(dst, val),
1824 SeqCst => intrinsics::atomic_nand(dst, val),
1825 __Nonexhaustive => panic!("invalid memory ordering"),
1826 }
1827 }
1828
1829 #[inline]
1830 unsafe fn atomic_or<T>(dst: *mut T, val: T, order: Ordering) -> T {
1831 match order {
1832 Acquire => intrinsics::atomic_or_acq(dst, val),
1833 Release => intrinsics::atomic_or_rel(dst, val),
1834 AcqRel => intrinsics::atomic_or_acqrel(dst, val),
1835 Relaxed => intrinsics::atomic_or_relaxed(dst, val),
1836 SeqCst => intrinsics::atomic_or(dst, val),
1837 __Nonexhaustive => panic!("invalid memory ordering"),
1838 }
1839 }
1840
1841 #[inline]
1842 unsafe fn atomic_xor<T>(dst: *mut T, val: T, order: Ordering) -> T {
1843 match order {
1844 Acquire => intrinsics::atomic_xor_acq(dst, val),
1845 Release => intrinsics::atomic_xor_rel(dst, val),
1846 AcqRel => intrinsics::atomic_xor_acqrel(dst, val),
1847 Relaxed => intrinsics::atomic_xor_relaxed(dst, val),
1848 SeqCst => intrinsics::atomic_xor(dst, val),
1849 __Nonexhaustive => panic!("invalid memory ordering"),
1850 }
1851 }
1852
1853 /// returns the max value (signed comparison)
1854 #[inline]
1855 unsafe fn atomic_max<T>(dst: *mut T, val: T, order: Ordering) -> T {
1856 match order {
1857 Acquire => intrinsics::atomic_max_acq(dst, val),
1858 Release => intrinsics::atomic_max_rel(dst, val),
1859 AcqRel => intrinsics::atomic_max_acqrel(dst, val),
1860 Relaxed => intrinsics::atomic_max_relaxed(dst, val),
1861 SeqCst => intrinsics::atomic_max(dst, val),
1862 __Nonexhaustive => panic!("invalid memory ordering"),
1863 }
1864 }
1865
1866 /// returns the min value (signed comparison)
1867 #[inline]
1868 unsafe fn atomic_min<T>(dst: *mut T, val: T, order: Ordering) -> T {
1869 match order {
1870 Acquire => intrinsics::atomic_min_acq(dst, val),
1871 Release => intrinsics::atomic_min_rel(dst, val),
1872 AcqRel => intrinsics::atomic_min_acqrel(dst, val),
1873 Relaxed => intrinsics::atomic_min_relaxed(dst, val),
1874 SeqCst => intrinsics::atomic_min(dst, val),
1875 __Nonexhaustive => panic!("invalid memory ordering"),
1876 }
1877 }
1878
1879 /// returns the max value (signed comparison)
1880 #[inline]
1881 unsafe fn atomic_umax<T>(dst: *mut T, val: T, order: Ordering) -> T {
1882 match order {
1883 Acquire => intrinsics::atomic_umax_acq(dst, val),
1884 Release => intrinsics::atomic_umax_rel(dst, val),
1885 AcqRel => intrinsics::atomic_umax_acqrel(dst, val),
1886 Relaxed => intrinsics::atomic_umax_relaxed(dst, val),
1887 SeqCst => intrinsics::atomic_umax(dst, val),
1888 __Nonexhaustive => panic!("invalid memory ordering"),
1889 }
1890 }
1891
1892 /// returns the min value (signed comparison)
1893 #[inline]
1894 unsafe fn atomic_umin<T>(dst: *mut T, val: T, order: Ordering) -> T {
1895 match order {
1896 Acquire => intrinsics::atomic_umin_acq(dst, val),
1897 Release => intrinsics::atomic_umin_rel(dst, val),
1898 AcqRel => intrinsics::atomic_umin_acqrel(dst, val),
1899 Relaxed => intrinsics::atomic_umin_relaxed(dst, val),
1900 SeqCst => intrinsics::atomic_umin(dst, val),
1901 __Nonexhaustive => panic!("invalid memory ordering"),
1902 }
1903 }
1904
1905 /// An atomic fence.
1906 ///
1907 /// Depending on the specified order, a fence prevents the compiler and CPU from
1908 /// reordering certain types of memory operations around it.
1909 /// That creates synchronizes-with relationships between it and atomic operations
1910 /// or fences in other threads.
1911 ///
1912 /// A fence 'A' which has (at least) [`Release`] ordering semantics, synchronizes
1913 /// with a fence 'B' with (at least) [`Acquire`] semantics, if and only if there
1914 /// exist operations X and Y, both operating on some atomic object 'M' such
1915 /// that A is sequenced before X, Y is synchronized before B and Y observes
1916 /// the change to M. This provides a happens-before dependence between A and B.
1917 ///
1918 /// ```text
1919 /// Thread 1 Thread 2
1920 ///
1921 /// fence(Release); A --------------
1922 /// x.store(3, Relaxed); X --------- |
1923 /// | |
1924 /// | |
1925 /// -------------> Y if x.load(Relaxed) == 3 {
1926 /// |-------> B fence(Acquire);
1927 /// ...
1928 /// }
1929 /// ```
1930 ///
1931 /// Atomic operations with [`Release`] or [`Acquire`] semantics can also synchronize
1932 /// with a fence.
1933 ///
1934 /// A fence which has [`SeqCst`] ordering, in addition to having both [`Acquire`]
1935 /// and [`Release`] semantics, participates in the global program order of the
1936 /// other [`SeqCst`] operations and/or fences.
1937 ///
1938 /// Accepts [`Acquire`], [`Release`], [`AcqRel`] and [`SeqCst`] orderings.
1939 ///
1940 /// # Panics
1941 ///
1942 /// Panics if `order` is [`Relaxed`].
1943 ///
1944 /// # Examples
1945 ///
1946 /// ```
1947 /// use std::sync::atomic::AtomicBool;
1948 /// use std::sync::atomic::fence;
1949 /// use std::sync::atomic::Ordering;
1950 ///
1951 /// // A mutual exclusion primitive based on spinlock.
1952 /// pub struct Mutex {
1953 /// flag: AtomicBool,
1954 /// }
1955 ///
1956 /// impl Mutex {
1957 /// pub fn new() -> Mutex {
1958 /// Mutex {
1959 /// flag: AtomicBool::new(false),
1960 /// }
1961 /// }
1962 ///
1963 /// pub fn lock(&self) {
1964 /// while !self.flag.compare_and_swap(false, true, Ordering::Relaxed) {}
1965 /// // This fence synchronizes-with store in `unlock`.
1966 /// fence(Ordering::Acquire);
1967 /// }
1968 ///
1969 /// pub fn unlock(&self) {
1970 /// self.flag.store(false, Ordering::Release);
1971 /// }
1972 /// }
1973 /// ```
1974 ///
1975 /// [`Ordering`]: enum.Ordering.html
1976 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
1977 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
1978 /// [`Release`]: enum.Ordering.html#variant.Release
1979 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
1980 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
1981 #[inline]
1982 #[stable(feature = "rust1", since = "1.0.0")]
1983 pub fn fence(order: Ordering) {
1984 unsafe {
1985 match order {
1986 Acquire => intrinsics::atomic_fence_acq(),
1987 Release => intrinsics::atomic_fence_rel(),
1988 AcqRel => intrinsics::atomic_fence_acqrel(),
1989 SeqCst => intrinsics::atomic_fence(),
1990 Relaxed => panic!("there is no such thing as a relaxed fence"),
1991 __Nonexhaustive => panic!("invalid memory ordering"),
1992 }
1993 }
1994 }
1995
1996
1997 /// A compiler memory fence.
1998 ///
1999 /// `compiler_fence` does not emit any machine code, but restricts the kinds
2000 /// of memory re-ordering the compiler is allowed to do. Specifically, depending on
2001 /// the given [`Ordering`] semantics, the compiler may be disallowed from moving reads
2002 /// or writes from before or after the call to the other side of the call to
2003 /// `compiler_fence`. Note that it does **not** prevent the *hardware*
2004 /// from doing such re-ordering. This is not a problem in a single-threaded,
2005 /// execution context, but when other threads may modify memory at the same
2006 /// time, stronger synchronization primitives such as [`fence`] are required.
2007 ///
2008 /// The re-ordering prevented by the different ordering semantics are:
2009 ///
2010 /// - with [`SeqCst`], no re-ordering of reads and writes across this point is allowed.
2011 /// - with [`Release`], preceding reads and writes cannot be moved past subsequent writes.
2012 /// - with [`Acquire`], subsequent reads and writes cannot be moved ahead of preceding reads.
2013 /// - with [`AcqRel`], both of the above rules are enforced.
2014 ///
2015 /// `compiler_fence` is generally only useful for preventing a thread from
2016 /// racing *with itself*. That is, if a given thread is executing one piece
2017 /// of code, and is then interrupted, and starts executing code elsewhere
2018 /// (while still in the same thread, and conceptually still on the same
2019 /// core). In traditional programs, this can only occur when a signal
2020 /// handler is registered. In more low-level code, such situations can also
2021 /// arise when handling interrupts, when implementing green threads with
2022 /// pre-emption, etc. Curious readers are encouraged to read the Linux kernel's
2023 /// discussion of [memory barriers].
2024 ///
2025 /// # Panics
2026 ///
2027 /// Panics if `order` is [`Relaxed`].
2028 ///
2029 /// # Examples
2030 ///
2031 /// Without `compiler_fence`, the `assert_eq!` in following code
2032 /// is *not* guaranteed to succeed, despite everything happening in a single thread.
2033 /// To see why, remember that the compiler is free to swap the stores to
2034 /// `IMPORTANT_VARIABLE` and `IS_READ` since they are both
2035 /// `Ordering::Relaxed`. If it does, and the signal handler is invoked right
2036 /// after `IS_READY` is updated, then the signal handler will see
2037 /// `IS_READY=1`, but `IMPORTANT_VARIABLE=0`.
2038 /// Using a `compiler_fence` remedies this situation.
2039 ///
2040 /// ```
2041 /// use std::sync::atomic::{AtomicBool, AtomicUsize};
2042 /// use std::sync::atomic::{ATOMIC_BOOL_INIT, ATOMIC_USIZE_INIT};
2043 /// use std::sync::atomic::Ordering;
2044 /// use std::sync::atomic::compiler_fence;
2045 ///
2046 /// static IMPORTANT_VARIABLE: AtomicUsize = ATOMIC_USIZE_INIT;
2047 /// static IS_READY: AtomicBool = ATOMIC_BOOL_INIT;
2048 ///
2049 /// fn main() {
2050 /// IMPORTANT_VARIABLE.store(42, Ordering::Relaxed);
2051 /// // prevent earlier writes from being moved beyond this point
2052 /// compiler_fence(Ordering::Release);
2053 /// IS_READY.store(true, Ordering::Relaxed);
2054 /// }
2055 ///
2056 /// fn signal_handler() {
2057 /// if IS_READY.load(Ordering::Relaxed) {
2058 /// assert_eq!(IMPORTANT_VARIABLE.load(Ordering::Relaxed), 42);
2059 /// }
2060 /// }
2061 /// ```
2062 ///
2063 /// [`fence`]: fn.fence.html
2064 /// [`Ordering`]: enum.Ordering.html
2065 /// [`Acquire`]: enum.Ordering.html#variant.Acquire
2066 /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst
2067 /// [`Release`]: enum.Ordering.html#variant.Release
2068 /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel
2069 /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
2070 /// [memory barriers]: https://www.kernel.org/doc/Documentation/memory-barriers.txt
2071 #[inline]
2072 #[stable(feature = "compiler_fences", since = "1.21.0")]
2073 pub fn compiler_fence(order: Ordering) {
2074 unsafe {
2075 match order {
2076 Acquire => intrinsics::atomic_singlethreadfence_acq(),
2077 Release => intrinsics::atomic_singlethreadfence_rel(),
2078 AcqRel => intrinsics::atomic_singlethreadfence_acqrel(),
2079 SeqCst => intrinsics::atomic_singlethreadfence(),
2080 Relaxed => panic!("there is no such thing as a relaxed compiler fence"),
2081 __Nonexhaustive => panic!("invalid memory ordering"),
2082 }
2083 }
2084 }
2085
2086
2087 #[cfg(target_has_atomic = "8")]
2088 #[stable(feature = "atomic_debug", since = "1.3.0")]
2089 impl fmt::Debug for AtomicBool {
2090 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2091 fmt::Debug::fmt(&self.load(Ordering::SeqCst), f)
2092 }
2093 }
2094
2095 #[cfg(target_has_atomic = "ptr")]
2096 #[stable(feature = "atomic_debug", since = "1.3.0")]
2097 impl<T> fmt::Debug for AtomicPtr<T> {
2098 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2099 fmt::Debug::fmt(&self.load(Ordering::SeqCst), f)
2100 }
2101 }
2102
2103 #[cfg(target_has_atomic = "ptr")]
2104 #[stable(feature = "atomic_pointer", since = "1.24.0")]
2105 impl<T> fmt::Pointer for AtomicPtr<T> {
2106 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2107 fmt::Pointer::fmt(&self.load(Ordering::SeqCst), f)
2108 }
2109 }