]>
Commit | Line | Data |
---|---|---|
1a4d82fc JJ |
1 | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT |
2 | // file at the top-level directory of this distribution and at | |
3 | // http://rust-lang.org/COPYRIGHT. | |
4 | // | |
5 | // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or | |
6 | // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license | |
7 | // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your | |
8 | // option. This file may not be copied, modified, or distributed | |
9 | // except according to those terms. | |
10 | ||
11 | //! Atomic types | |
12 | //! | |
13 | //! Atomic types provide primitive shared-memory communication between | |
14 | //! threads, and are the building blocks of other concurrent | |
15 | //! types. | |
16 | //! | |
17 | //! This module defines atomic versions of a select number of primitive | |
c34b1796 | 18 | //! types, including `AtomicBool`, `AtomicIsize`, and `AtomicUsize`. |
1a4d82fc JJ |
19 | //! Atomic types present operations that, when used correctly, synchronize |
20 | //! updates between threads. | |
21 | //! | |
22 | //! Each method takes an `Ordering` which represents the strength of | |
23 | //! the memory barrier for that operation. These orderings are the | |
85aaf69f | 24 | //! same as [LLVM atomic orderings][1]. |
1a4d82fc | 25 | //! |
85aaf69f | 26 | //! [1]: http://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations |
1a4d82fc JJ |
27 | //! |
28 | //! Atomic variables are safe to share between threads (they implement `Sync`) | |
a7813a04 XL |
29 | //! but they do not themselves provide the mechanism for sharing and follow the |
30 | //! [threading model](../../../std/thread/index.html#the-threading-model) of rust. | |
31 | //! The most common way to share an atomic variable is to put it into an `Arc` (an | |
1a4d82fc JJ |
32 | //! atomically-reference-counted shared pointer). |
33 | //! | |
34 | //! Most atomic types may be stored in static variables, initialized using | |
5bcae85e | 35 | //! the provided static initializers like `ATOMIC_BOOL_INIT`. Atomic statics |
1a4d82fc JJ |
36 | //! are often used for lazy global initialization. |
37 | //! | |
38 | //! | |
39 | //! # Examples | |
40 | //! | |
41 | //! A simple spinlock: | |
42 | //! | |
43 | //! ``` | |
44 | //! use std::sync::Arc; | |
85aaf69f SL |
45 | //! use std::sync::atomic::{AtomicUsize, Ordering}; |
46 | //! use std::thread; | |
1a4d82fc JJ |
47 | //! |
48 | //! fn main() { | |
85aaf69f | 49 | //! let spinlock = Arc::new(AtomicUsize::new(1)); |
1a4d82fc JJ |
50 | //! |
51 | //! let spinlock_clone = spinlock.clone(); | |
a7813a04 | 52 | //! let thread = thread::spawn(move|| { |
1a4d82fc JJ |
53 | //! spinlock_clone.store(0, Ordering::SeqCst); |
54 | //! }); | |
55 | //! | |
bd371182 | 56 | //! // Wait for the other thread to release the lock |
1a4d82fc | 57 | //! while spinlock.load(Ordering::SeqCst) != 0 {} |
a7813a04 XL |
58 | //! |
59 | //! if let Err(panic) = thread.join() { | |
60 | //! println!("Thread had an error: {:?}", panic); | |
61 | //! } | |
1a4d82fc JJ |
62 | //! } |
63 | //! ``` | |
64 | //! | |
bd371182 | 65 | //! Keep a global count of live threads: |
1a4d82fc JJ |
66 | //! |
67 | //! ``` | |
85aaf69f | 68 | //! use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT}; |
1a4d82fc | 69 | //! |
bd371182 | 70 | //! static GLOBAL_THREAD_COUNT: AtomicUsize = ATOMIC_USIZE_INIT; |
1a4d82fc | 71 | //! |
bd371182 AL |
72 | //! let old_thread_count = GLOBAL_THREAD_COUNT.fetch_add(1, Ordering::SeqCst); |
73 | //! println!("live threads: {}", old_thread_count + 1); | |
1a4d82fc JJ |
74 | //! ``` |
75 | ||
85aaf69f | 76 | #![stable(feature = "rust1", since = "1.0.0")] |
5bcae85e SL |
77 | #![cfg_attr(not(target_has_atomic = "8"), allow(dead_code))] |
78 | #![cfg_attr(not(target_has_atomic = "8"), allow(unused_imports))] | |
1a4d82fc JJ |
79 | |
80 | use self::Ordering::*; | |
81 | ||
1a4d82fc JJ |
82 | use intrinsics; |
83 | use cell::UnsafeCell; | |
c1a9b12d | 84 | use fmt; |
9346a6ac | 85 | |
1a4d82fc | 86 | /// A boolean type which can be safely shared between threads. |
9e0c209e SL |
87 | /// |
88 | /// This type has the same in-memory representation as a `bool`. | |
3157f602 | 89 | #[cfg(target_has_atomic = "8")] |
85aaf69f | 90 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc | 91 | pub struct AtomicBool { |
a7813a04 | 92 | v: UnsafeCell<u8>, |
1a4d82fc JJ |
93 | } |
94 | ||
3157f602 | 95 | #[cfg(target_has_atomic = "8")] |
92a42be0 | 96 | #[stable(feature = "rust1", since = "1.0.0")] |
9346a6ac | 97 | impl Default for AtomicBool { |
c30ab7b3 | 98 | /// Creates an `AtomicBool` initialized to `false`. |
62682a34 | 99 | fn default() -> Self { |
a7813a04 | 100 | Self::new(false) |
9346a6ac AL |
101 | } |
102 | } | |
103 | ||
b039eaaf | 104 | // Send is implicitly implemented for AtomicBool. |
3157f602 | 105 | #[cfg(target_has_atomic = "8")] |
92a42be0 | 106 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc JJ |
107 | unsafe impl Sync for AtomicBool {} |
108 | ||
1a4d82fc | 109 | /// A raw pointer type which can be safely shared between threads. |
9e0c209e SL |
110 | /// |
111 | /// This type has the same in-memory representation as a `*mut T`. | |
3157f602 | 112 | #[cfg(target_has_atomic = "ptr")] |
85aaf69f | 113 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc | 114 | pub struct AtomicPtr<T> { |
62682a34 | 115 | p: UnsafeCell<*mut T>, |
1a4d82fc JJ |
116 | } |
117 | ||
3157f602 | 118 | #[cfg(target_has_atomic = "ptr")] |
92a42be0 | 119 | #[stable(feature = "rust1", since = "1.0.0")] |
d9579d0f | 120 | impl<T> Default for AtomicPtr<T> { |
9e0c209e | 121 | /// Creates a null `AtomicPtr<T>`. |
d9579d0f AL |
122 | fn default() -> AtomicPtr<T> { |
123 | AtomicPtr::new(::ptr::null_mut()) | |
124 | } | |
125 | } | |
126 | ||
3157f602 | 127 | #[cfg(target_has_atomic = "ptr")] |
92a42be0 | 128 | #[stable(feature = "rust1", since = "1.0.0")] |
c1a9b12d | 129 | unsafe impl<T> Send for AtomicPtr<T> {} |
3157f602 | 130 | #[cfg(target_has_atomic = "ptr")] |
92a42be0 | 131 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc JJ |
132 | unsafe impl<T> Sync for AtomicPtr<T> {} |
133 | ||
134 | /// Atomic memory orderings | |
135 | /// | |
136 | /// Memory orderings limit the ways that both the compiler and CPU may reorder | |
137 | /// instructions around atomic operations. At its most restrictive, | |
138 | /// "sequentially consistent" atomics allow neither reads nor writes | |
139 | /// to be moved either before or after the atomic operation; on the other end | |
140 | /// "relaxed" atomics allow all reorderings. | |
141 | /// | |
142 | /// Rust's memory orderings are [the same as | |
c1a9b12d | 143 | /// LLVM's](http://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations). |
85aaf69f | 144 | #[stable(feature = "rust1", since = "1.0.0")] |
54a0048b | 145 | #[derive(Copy, Clone, Debug)] |
1a4d82fc | 146 | pub enum Ordering { |
b039eaaf SL |
147 | /// No ordering constraints, only atomic operations. Corresponds to LLVM's |
148 | /// `Monotonic` ordering. | |
85aaf69f | 149 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc JJ |
150 | Relaxed, |
151 | /// When coupled with a store, all previous writes become visible | |
a7813a04 | 152 | /// to the other threads that perform a load with `Acquire` ordering |
1a4d82fc | 153 | /// on the same value. |
85aaf69f | 154 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc JJ |
155 | Release, |
156 | /// When coupled with a load, all subsequent loads will see data | |
157 | /// written before a store with `Release` ordering on the same value | |
a7813a04 | 158 | /// in other threads. |
85aaf69f | 159 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc JJ |
160 | Acquire, |
161 | /// When coupled with a load, uses `Acquire` ordering, and with a store | |
162 | /// `Release` ordering. | |
85aaf69f | 163 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc JJ |
164 | AcqRel, |
165 | /// Like `AcqRel` with the additional guarantee that all threads see all | |
166 | /// sequentially consistent operations in the same order. | |
85aaf69f | 167 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc | 168 | SeqCst, |
c30ab7b3 SL |
169 | // Prevent exhaustive matching to allow for future extension |
170 | #[doc(hidden)] | |
171 | #[unstable(feature = "future_atomic_orderings", issue = "0")] | |
172 | __Nonexhaustive, | |
1a4d82fc JJ |
173 | } |
174 | ||
175 | /// An `AtomicBool` initialized to `false`. | |
3157f602 | 176 | #[cfg(target_has_atomic = "8")] |
85aaf69f | 177 | #[stable(feature = "rust1", since = "1.0.0")] |
62682a34 | 178 | pub const ATOMIC_BOOL_INIT: AtomicBool = AtomicBool::new(false); |
1a4d82fc | 179 | |
3157f602 | 180 | #[cfg(target_has_atomic = "8")] |
1a4d82fc JJ |
181 | impl AtomicBool { |
182 | /// Creates a new `AtomicBool`. | |
183 | /// | |
184 | /// # Examples | |
185 | /// | |
186 | /// ``` | |
187 | /// use std::sync::atomic::AtomicBool; | |
188 | /// | |
189 | /// let atomic_true = AtomicBool::new(true); | |
190 | /// let atomic_false = AtomicBool::new(false); | |
191 | /// ``` | |
192 | #[inline] | |
85aaf69f | 193 | #[stable(feature = "rust1", since = "1.0.0")] |
62682a34 | 194 | pub const fn new(v: bool) -> AtomicBool { |
a7813a04 | 195 | AtomicBool { v: UnsafeCell::new(v as u8) } |
1a4d82fc JJ |
196 | } |
197 | ||
9e0c209e SL |
198 | /// Returns a mutable reference to the underlying `bool`. |
199 | /// | |
200 | /// This is safe because the mutable reference guarantees that no other threads are | |
201 | /// concurrently accessing the atomic data. | |
202 | /// | |
203 | /// # Examples | |
204 | /// | |
205 | /// ``` | |
206 | /// #![feature(atomic_access)] | |
207 | /// use std::sync::atomic::{AtomicBool, Ordering}; | |
208 | /// | |
209 | /// let mut some_bool = AtomicBool::new(true); | |
210 | /// assert_eq!(*some_bool.get_mut(), true); | |
211 | /// *some_bool.get_mut() = false; | |
212 | /// assert_eq!(some_bool.load(Ordering::SeqCst), false); | |
213 | /// ``` | |
214 | #[inline] | |
215 | #[unstable(feature = "atomic_access", issue = "35603")] | |
216 | pub fn get_mut(&mut self) -> &mut bool { | |
217 | unsafe { &mut *(self.v.get() as *mut bool) } | |
218 | } | |
219 | ||
220 | /// Consumes the atomic and returns the contained value. | |
221 | /// | |
222 | /// This is safe because passing `self` by value guarantees that no other threads are | |
223 | /// concurrently accessing the atomic data. | |
224 | /// | |
225 | /// # Examples | |
226 | /// | |
227 | /// ``` | |
228 | /// #![feature(atomic_access)] | |
229 | /// use std::sync::atomic::AtomicBool; | |
230 | /// | |
231 | /// let some_bool = AtomicBool::new(true); | |
232 | /// assert_eq!(some_bool.into_inner(), true); | |
233 | /// ``` | |
234 | #[inline] | |
235 | #[unstable(feature = "atomic_access", issue = "35603")] | |
236 | pub fn into_inner(self) -> bool { | |
237 | unsafe { self.v.into_inner() != 0 } | |
238 | } | |
239 | ||
1a4d82fc JJ |
240 | /// Loads a value from the bool. |
241 | /// | |
242 | /// `load` takes an `Ordering` argument which describes the memory ordering of this operation. | |
243 | /// | |
244 | /// # Panics | |
245 | /// | |
246 | /// Panics if `order` is `Release` or `AcqRel`. | |
247 | /// | |
248 | /// # Examples | |
249 | /// | |
250 | /// ``` | |
251 | /// use std::sync::atomic::{AtomicBool, Ordering}; | |
252 | /// | |
253 | /// let some_bool = AtomicBool::new(true); | |
254 | /// | |
62682a34 | 255 | /// assert_eq!(some_bool.load(Ordering::Relaxed), true); |
1a4d82fc JJ |
256 | /// ``` |
257 | #[inline] | |
85aaf69f | 258 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc | 259 | pub fn load(&self, order: Ordering) -> bool { |
a7813a04 | 260 | unsafe { atomic_load(self.v.get(), order) != 0 } |
1a4d82fc JJ |
261 | } |
262 | ||
263 | /// Stores a value into the bool. | |
264 | /// | |
265 | /// `store` takes an `Ordering` argument which describes the memory ordering of this operation. | |
266 | /// | |
267 | /// # Examples | |
268 | /// | |
269 | /// ``` | |
270 | /// use std::sync::atomic::{AtomicBool, Ordering}; | |
271 | /// | |
272 | /// let some_bool = AtomicBool::new(true); | |
273 | /// | |
274 | /// some_bool.store(false, Ordering::Relaxed); | |
62682a34 | 275 | /// assert_eq!(some_bool.load(Ordering::Relaxed), false); |
1a4d82fc JJ |
276 | /// ``` |
277 | /// | |
278 | /// # Panics | |
279 | /// | |
280 | /// Panics if `order` is `Acquire` or `AcqRel`. | |
281 | #[inline] | |
85aaf69f | 282 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc | 283 | pub fn store(&self, val: bool, order: Ordering) { |
c30ab7b3 SL |
284 | unsafe { |
285 | atomic_store(self.v.get(), val as u8, order); | |
286 | } | |
1a4d82fc JJ |
287 | } |
288 | ||
289 | /// Stores a value into the bool, returning the old value. | |
290 | /// | |
291 | /// `swap` takes an `Ordering` argument which describes the memory ordering of this operation. | |
292 | /// | |
293 | /// # Examples | |
294 | /// | |
295 | /// ``` | |
296 | /// use std::sync::atomic::{AtomicBool, Ordering}; | |
297 | /// | |
298 | /// let some_bool = AtomicBool::new(true); | |
299 | /// | |
62682a34 SL |
300 | /// assert_eq!(some_bool.swap(false, Ordering::Relaxed), true); |
301 | /// assert_eq!(some_bool.load(Ordering::Relaxed), false); | |
1a4d82fc JJ |
302 | /// ``` |
303 | #[inline] | |
85aaf69f | 304 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc | 305 | pub fn swap(&self, val: bool, order: Ordering) -> bool { |
a7813a04 | 306 | unsafe { atomic_swap(self.v.get(), val as u8, order) != 0 } |
1a4d82fc JJ |
307 | } |
308 | ||
c1a9b12d | 309 | /// Stores a value into the `bool` if the current value is the same as the `current` value. |
1a4d82fc | 310 | /// |
c1a9b12d SL |
311 | /// The return value is always the previous value. If it is equal to `current`, then the value |
312 | /// was updated. | |
1a4d82fc | 313 | /// |
c1a9b12d SL |
314 | /// `compare_and_swap` also takes an `Ordering` argument which describes the memory ordering of |
315 | /// this operation. | |
1a4d82fc JJ |
316 | /// |
317 | /// # Examples | |
318 | /// | |
319 | /// ``` | |
320 | /// use std::sync::atomic::{AtomicBool, Ordering}; | |
321 | /// | |
322 | /// let some_bool = AtomicBool::new(true); | |
323 | /// | |
62682a34 SL |
324 | /// assert_eq!(some_bool.compare_and_swap(true, false, Ordering::Relaxed), true); |
325 | /// assert_eq!(some_bool.load(Ordering::Relaxed), false); | |
326 | /// | |
327 | /// assert_eq!(some_bool.compare_and_swap(true, true, Ordering::Relaxed), false); | |
328 | /// assert_eq!(some_bool.load(Ordering::Relaxed), false); | |
1a4d82fc JJ |
329 | /// ``` |
330 | #[inline] | |
85aaf69f | 331 | #[stable(feature = "rust1", since = "1.0.0")] |
c1a9b12d | 332 | pub fn compare_and_swap(&self, current: bool, new: bool, order: Ordering) -> bool { |
54a0048b SL |
333 | match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) { |
334 | Ok(x) => x, | |
335 | Err(x) => x, | |
336 | } | |
7453a54e SL |
337 | } |
338 | ||
339 | /// Stores a value into the `bool` if the current value is the same as the `current` value. | |
340 | /// | |
54a0048b | 341 | /// The return value is a result indicating whether the new value was written and containing |
3157f602 | 342 | /// the previous value. On success this value is guaranteed to be equal to `current`. |
7453a54e SL |
343 | /// |
344 | /// `compare_exchange` takes two `Ordering` arguments to describe the memory ordering of this | |
345 | /// operation. The first describes the required ordering if the operation succeeds while the | |
346 | /// second describes the required ordering when the operation fails. The failure ordering can't | |
54a0048b | 347 | /// be `Release` or `AcqRel` and must be equivalent or weaker than the success ordering. |
7453a54e SL |
348 | /// |
349 | /// # Examples | |
350 | /// | |
351 | /// ``` | |
7453a54e SL |
352 | /// use std::sync::atomic::{AtomicBool, Ordering}; |
353 | /// | |
354 | /// let some_bool = AtomicBool::new(true); | |
355 | /// | |
356 | /// assert_eq!(some_bool.compare_exchange(true, | |
357 | /// false, | |
358 | /// Ordering::Acquire, | |
359 | /// Ordering::Relaxed), | |
54a0048b | 360 | /// Ok(true)); |
7453a54e SL |
361 | /// assert_eq!(some_bool.load(Ordering::Relaxed), false); |
362 | /// | |
363 | /// assert_eq!(some_bool.compare_exchange(true, true, | |
364 | /// Ordering::SeqCst, | |
365 | /// Ordering::Acquire), | |
54a0048b | 366 | /// Err(false)); |
7453a54e SL |
367 | /// assert_eq!(some_bool.load(Ordering::Relaxed), false); |
368 | /// ``` | |
369 | #[inline] | |
a7813a04 | 370 | #[stable(feature = "extended_compare_and_swap", since = "1.10.0")] |
7453a54e SL |
371 | pub fn compare_exchange(&self, |
372 | current: bool, | |
373 | new: bool, | |
374 | success: Ordering, | |
c30ab7b3 SL |
375 | failure: Ordering) |
376 | -> Result<bool, bool> { | |
377 | match unsafe { | |
378 | atomic_compare_exchange(self.v.get(), current as u8, new as u8, success, failure) | |
379 | } { | |
a7813a04 XL |
380 | Ok(x) => Ok(x != 0), |
381 | Err(x) => Err(x != 0), | |
54a0048b | 382 | } |
7453a54e SL |
383 | } |
384 | ||
385 | /// Stores a value into the `bool` if the current value is the same as the `current` value. | |
386 | /// | |
387 | /// Unlike `compare_exchange`, this function is allowed to spuriously fail even when the | |
388 | /// comparison succeeds, which can result in more efficient code on some platforms. The | |
54a0048b SL |
389 | /// return value is a result indicating whether the new value was written and containing the |
390 | /// previous value. | |
7453a54e SL |
391 | /// |
392 | /// `compare_exchange_weak` takes two `Ordering` arguments to describe the memory | |
393 | /// ordering of this operation. The first describes the required ordering if the operation | |
394 | /// succeeds while the second describes the required ordering when the operation fails. The | |
54a0048b | 395 | /// failure ordering can't be `Release` or `AcqRel` and must be equivalent or weaker than the |
7453a54e SL |
396 | /// success ordering. |
397 | /// | |
398 | /// # Examples | |
399 | /// | |
400 | /// ``` | |
7453a54e SL |
401 | /// use std::sync::atomic::{AtomicBool, Ordering}; |
402 | /// | |
403 | /// let val = AtomicBool::new(false); | |
404 | /// | |
405 | /// let new = true; | |
406 | /// let mut old = val.load(Ordering::Relaxed); | |
407 | /// loop { | |
54a0048b SL |
408 | /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) { |
409 | /// Ok(_) => break, | |
410 | /// Err(x) => old = x, | |
7453a54e SL |
411 | /// } |
412 | /// } | |
413 | /// ``` | |
414 | #[inline] | |
a7813a04 | 415 | #[stable(feature = "extended_compare_and_swap", since = "1.10.0")] |
7453a54e SL |
416 | pub fn compare_exchange_weak(&self, |
417 | current: bool, | |
418 | new: bool, | |
419 | success: Ordering, | |
c30ab7b3 SL |
420 | failure: Ordering) |
421 | -> Result<bool, bool> { | |
422 | match unsafe { | |
423 | atomic_compare_exchange_weak(self.v.get(), current as u8, new as u8, success, failure) | |
424 | } { | |
a7813a04 XL |
425 | Ok(x) => Ok(x != 0), |
426 | Err(x) => Err(x != 0), | |
54a0048b | 427 | } |
1a4d82fc JJ |
428 | } |
429 | ||
430 | /// Logical "and" with a boolean value. | |
431 | /// | |
432 | /// Performs a logical "and" operation on the current value and the argument `val`, and sets | |
433 | /// the new value to the result. | |
434 | /// | |
435 | /// Returns the previous value. | |
436 | /// | |
437 | /// # Examples | |
438 | /// | |
439 | /// ``` | |
440 | /// use std::sync::atomic::{AtomicBool, Ordering}; | |
441 | /// | |
442 | /// let foo = AtomicBool::new(true); | |
62682a34 SL |
443 | /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), true); |
444 | /// assert_eq!(foo.load(Ordering::SeqCst), false); | |
1a4d82fc JJ |
445 | /// |
446 | /// let foo = AtomicBool::new(true); | |
62682a34 SL |
447 | /// assert_eq!(foo.fetch_and(true, Ordering::SeqCst), true); |
448 | /// assert_eq!(foo.load(Ordering::SeqCst), true); | |
1a4d82fc JJ |
449 | /// |
450 | /// let foo = AtomicBool::new(false); | |
62682a34 SL |
451 | /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), false); |
452 | /// assert_eq!(foo.load(Ordering::SeqCst), false); | |
1a4d82fc JJ |
453 | /// ``` |
454 | #[inline] | |
85aaf69f | 455 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc | 456 | pub fn fetch_and(&self, val: bool, order: Ordering) -> bool { |
a7813a04 | 457 | unsafe { atomic_and(self.v.get(), val as u8, order) != 0 } |
1a4d82fc JJ |
458 | } |
459 | ||
460 | /// Logical "nand" with a boolean value. | |
461 | /// | |
462 | /// Performs a logical "nand" operation on the current value and the argument `val`, and sets | |
463 | /// the new value to the result. | |
464 | /// | |
465 | /// Returns the previous value. | |
466 | /// | |
467 | /// # Examples | |
468 | /// | |
469 | /// ``` | |
470 | /// use std::sync::atomic::{AtomicBool, Ordering}; | |
471 | /// | |
472 | /// let foo = AtomicBool::new(true); | |
62682a34 SL |
473 | /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), true); |
474 | /// assert_eq!(foo.load(Ordering::SeqCst), true); | |
1a4d82fc JJ |
475 | /// |
476 | /// let foo = AtomicBool::new(true); | |
62682a34 SL |
477 | /// assert_eq!(foo.fetch_nand(true, Ordering::SeqCst), true); |
478 | /// assert_eq!(foo.load(Ordering::SeqCst) as usize, 0); | |
479 | /// assert_eq!(foo.load(Ordering::SeqCst), false); | |
1a4d82fc JJ |
480 | /// |
481 | /// let foo = AtomicBool::new(false); | |
62682a34 SL |
482 | /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), false); |
483 | /// assert_eq!(foo.load(Ordering::SeqCst), true); | |
1a4d82fc JJ |
484 | /// ``` |
485 | #[inline] | |
85aaf69f | 486 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc | 487 | pub fn fetch_nand(&self, val: bool, order: Ordering) -> bool { |
a7813a04 XL |
488 | // We can't use atomic_nand here because it can result in a bool with |
489 | // an invalid value. This happens because the atomic operation is done | |
490 | // with an 8-bit integer internally, which would set the upper 7 bits. | |
491 | // So we just use a compare-exchange loop instead, which is what the | |
492 | // intrinsic actually expands to anyways on many platforms. | |
493 | let mut old = self.load(Relaxed); | |
494 | loop { | |
495 | let new = !(old && val); | |
496 | match self.compare_exchange_weak(old, new, order, Relaxed) { | |
497 | Ok(_) => break, | |
498 | Err(x) => old = x, | |
499 | } | |
500 | } | |
501 | old | |
1a4d82fc JJ |
502 | } |
503 | ||
504 | /// Logical "or" with a boolean value. | |
505 | /// | |
506 | /// Performs a logical "or" operation on the current value and the argument `val`, and sets the | |
507 | /// new value to the result. | |
508 | /// | |
509 | /// Returns the previous value. | |
510 | /// | |
511 | /// # Examples | |
512 | /// | |
513 | /// ``` | |
514 | /// use std::sync::atomic::{AtomicBool, Ordering}; | |
515 | /// | |
516 | /// let foo = AtomicBool::new(true); | |
62682a34 SL |
517 | /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), true); |
518 | /// assert_eq!(foo.load(Ordering::SeqCst), true); | |
1a4d82fc JJ |
519 | /// |
520 | /// let foo = AtomicBool::new(true); | |
62682a34 SL |
521 | /// assert_eq!(foo.fetch_or(true, Ordering::SeqCst), true); |
522 | /// assert_eq!(foo.load(Ordering::SeqCst), true); | |
1a4d82fc JJ |
523 | /// |
524 | /// let foo = AtomicBool::new(false); | |
62682a34 SL |
525 | /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), false); |
526 | /// assert_eq!(foo.load(Ordering::SeqCst), false); | |
1a4d82fc JJ |
527 | /// ``` |
528 | #[inline] | |
85aaf69f | 529 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc | 530 | pub fn fetch_or(&self, val: bool, order: Ordering) -> bool { |
a7813a04 | 531 | unsafe { atomic_or(self.v.get(), val as u8, order) != 0 } |
1a4d82fc JJ |
532 | } |
533 | ||
534 | /// Logical "xor" with a boolean value. | |
535 | /// | |
536 | /// Performs a logical "xor" operation on the current value and the argument `val`, and sets | |
537 | /// the new value to the result. | |
538 | /// | |
539 | /// Returns the previous value. | |
540 | /// | |
541 | /// # Examples | |
542 | /// | |
543 | /// ``` | |
544 | /// use std::sync::atomic::{AtomicBool, Ordering}; | |
545 | /// | |
546 | /// let foo = AtomicBool::new(true); | |
62682a34 SL |
547 | /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), true); |
548 | /// assert_eq!(foo.load(Ordering::SeqCst), true); | |
1a4d82fc JJ |
549 | /// |
550 | /// let foo = AtomicBool::new(true); | |
62682a34 SL |
551 | /// assert_eq!(foo.fetch_xor(true, Ordering::SeqCst), true); |
552 | /// assert_eq!(foo.load(Ordering::SeqCst), false); | |
1a4d82fc JJ |
553 | /// |
554 | /// let foo = AtomicBool::new(false); | |
62682a34 SL |
555 | /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), false); |
556 | /// assert_eq!(foo.load(Ordering::SeqCst), false); | |
1a4d82fc JJ |
557 | /// ``` |
558 | #[inline] | |
85aaf69f | 559 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc | 560 | pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool { |
a7813a04 | 561 | unsafe { atomic_xor(self.v.get(), val as u8, order) != 0 } |
1a4d82fc JJ |
562 | } |
563 | } | |
564 | ||
3157f602 | 565 | #[cfg(target_has_atomic = "ptr")] |
1a4d82fc JJ |
566 | impl<T> AtomicPtr<T> { |
567 | /// Creates a new `AtomicPtr`. | |
568 | /// | |
569 | /// # Examples | |
570 | /// | |
571 | /// ``` | |
572 | /// use std::sync::atomic::AtomicPtr; | |
573 | /// | |
85aaf69f | 574 | /// let ptr = &mut 5; |
1a4d82fc JJ |
575 | /// let atomic_ptr = AtomicPtr::new(ptr); |
576 | /// ``` | |
577 | #[inline] | |
85aaf69f | 578 | #[stable(feature = "rust1", since = "1.0.0")] |
62682a34 SL |
579 | pub const fn new(p: *mut T) -> AtomicPtr<T> { |
580 | AtomicPtr { p: UnsafeCell::new(p) } | |
1a4d82fc JJ |
581 | } |
582 | ||
9e0c209e SL |
583 | /// Returns a mutable reference to the underlying pointer. |
584 | /// | |
585 | /// This is safe because the mutable reference guarantees that no other threads are | |
586 | /// concurrently accessing the atomic data. | |
587 | /// | |
588 | /// # Examples | |
589 | /// | |
590 | /// ``` | |
591 | /// #![feature(atomic_access)] | |
592 | /// use std::sync::atomic::{AtomicPtr, Ordering}; | |
593 | /// | |
594 | /// let mut atomic_ptr = AtomicPtr::new(&mut 10); | |
595 | /// *atomic_ptr.get_mut() = &mut 5; | |
596 | /// assert_eq!(unsafe { *atomic_ptr.load(Ordering::SeqCst) }, 5); | |
597 | /// ``` | |
598 | #[inline] | |
599 | #[unstable(feature = "atomic_access", issue = "35603")] | |
600 | pub fn get_mut(&mut self) -> &mut *mut T { | |
601 | unsafe { &mut *self.p.get() } | |
602 | } | |
603 | ||
604 | /// Consumes the atomic and returns the contained value. | |
605 | /// | |
606 | /// This is safe because passing `self` by value guarantees that no other threads are | |
607 | /// concurrently accessing the atomic data. | |
608 | /// | |
609 | /// # Examples | |
610 | /// | |
611 | /// ``` | |
612 | /// #![feature(atomic_access)] | |
613 | /// use std::sync::atomic::AtomicPtr; | |
614 | /// | |
615 | /// let atomic_ptr = AtomicPtr::new(&mut 5); | |
616 | /// assert_eq!(unsafe { *atomic_ptr.into_inner() }, 5); | |
617 | /// ``` | |
618 | #[inline] | |
619 | #[unstable(feature = "atomic_access", issue = "35603")] | |
620 | pub fn into_inner(self) -> *mut T { | |
621 | unsafe { self.p.into_inner() } | |
622 | } | |
623 | ||
1a4d82fc JJ |
624 | /// Loads a value from the pointer. |
625 | /// | |
626 | /// `load` takes an `Ordering` argument which describes the memory ordering of this operation. | |
627 | /// | |
628 | /// # Panics | |
629 | /// | |
630 | /// Panics if `order` is `Release` or `AcqRel`. | |
631 | /// | |
632 | /// # Examples | |
633 | /// | |
634 | /// ``` | |
635 | /// use std::sync::atomic::{AtomicPtr, Ordering}; | |
636 | /// | |
85aaf69f | 637 | /// let ptr = &mut 5; |
1a4d82fc JJ |
638 | /// let some_ptr = AtomicPtr::new(ptr); |
639 | /// | |
640 | /// let value = some_ptr.load(Ordering::Relaxed); | |
641 | /// ``` | |
642 | #[inline] | |
85aaf69f | 643 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc | 644 | pub fn load(&self, order: Ordering) -> *mut T { |
c30ab7b3 | 645 | unsafe { atomic_load(self.p.get() as *mut usize, order) as *mut T } |
1a4d82fc JJ |
646 | } |
647 | ||
648 | /// Stores a value into the pointer. | |
649 | /// | |
650 | /// `store` takes an `Ordering` argument which describes the memory ordering of this operation. | |
651 | /// | |
652 | /// # Examples | |
653 | /// | |
654 | /// ``` | |
655 | /// use std::sync::atomic::{AtomicPtr, Ordering}; | |
656 | /// | |
85aaf69f | 657 | /// let ptr = &mut 5; |
1a4d82fc JJ |
658 | /// let some_ptr = AtomicPtr::new(ptr); |
659 | /// | |
85aaf69f | 660 | /// let other_ptr = &mut 10; |
1a4d82fc JJ |
661 | /// |
662 | /// some_ptr.store(other_ptr, Ordering::Relaxed); | |
663 | /// ``` | |
664 | /// | |
665 | /// # Panics | |
666 | /// | |
667 | /// Panics if `order` is `Acquire` or `AcqRel`. | |
668 | #[inline] | |
85aaf69f | 669 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc | 670 | pub fn store(&self, ptr: *mut T, order: Ordering) { |
c30ab7b3 SL |
671 | unsafe { |
672 | atomic_store(self.p.get() as *mut usize, ptr as usize, order); | |
673 | } | |
1a4d82fc JJ |
674 | } |
675 | ||
676 | /// Stores a value into the pointer, returning the old value. | |
677 | /// | |
678 | /// `swap` takes an `Ordering` argument which describes the memory ordering of this operation. | |
679 | /// | |
680 | /// # Examples | |
681 | /// | |
682 | /// ``` | |
683 | /// use std::sync::atomic::{AtomicPtr, Ordering}; | |
684 | /// | |
85aaf69f | 685 | /// let ptr = &mut 5; |
1a4d82fc JJ |
686 | /// let some_ptr = AtomicPtr::new(ptr); |
687 | /// | |
85aaf69f | 688 | /// let other_ptr = &mut 10; |
1a4d82fc JJ |
689 | /// |
690 | /// let value = some_ptr.swap(other_ptr, Ordering::Relaxed); | |
691 | /// ``` | |
692 | #[inline] | |
85aaf69f | 693 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc | 694 | pub fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T { |
62682a34 | 695 | unsafe { atomic_swap(self.p.get() as *mut usize, ptr as usize, order) as *mut T } |
1a4d82fc JJ |
696 | } |
697 | ||
c1a9b12d | 698 | /// Stores a value into the pointer if the current value is the same as the `current` value. |
1a4d82fc | 699 | /// |
c1a9b12d SL |
700 | /// The return value is always the previous value. If it is equal to `current`, then the value |
701 | /// was updated. | |
1a4d82fc JJ |
702 | /// |
703 | /// `compare_and_swap` also takes an `Ordering` argument which describes the memory ordering of | |
704 | /// this operation. | |
705 | /// | |
706 | /// # Examples | |
707 | /// | |
708 | /// ``` | |
709 | /// use std::sync::atomic::{AtomicPtr, Ordering}; | |
710 | /// | |
85aaf69f | 711 | /// let ptr = &mut 5; |
1a4d82fc JJ |
712 | /// let some_ptr = AtomicPtr::new(ptr); |
713 | /// | |
85aaf69f SL |
714 | /// let other_ptr = &mut 10; |
715 | /// let another_ptr = &mut 10; | |
1a4d82fc JJ |
716 | /// |
717 | /// let value = some_ptr.compare_and_swap(other_ptr, another_ptr, Ordering::Relaxed); | |
718 | /// ``` | |
719 | #[inline] | |
85aaf69f | 720 | #[stable(feature = "rust1", since = "1.0.0")] |
c1a9b12d | 721 | pub fn compare_and_swap(&self, current: *mut T, new: *mut T, order: Ordering) -> *mut T { |
54a0048b SL |
722 | match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) { |
723 | Ok(x) => x, | |
724 | Err(x) => x, | |
725 | } | |
7453a54e SL |
726 | } |
727 | ||
728 | /// Stores a value into the pointer if the current value is the same as the `current` value. | |
729 | /// | |
54a0048b | 730 | /// The return value is a result indicating whether the new value was written and containing |
3157f602 | 731 | /// the previous value. On success this value is guaranteed to be equal to `current`. |
7453a54e SL |
732 | /// |
733 | /// `compare_exchange` takes two `Ordering` arguments to describe the memory ordering of this | |
734 | /// operation. The first describes the required ordering if the operation succeeds while the | |
735 | /// second describes the required ordering when the operation fails. The failure ordering can't | |
54a0048b | 736 | /// be `Release` or `AcqRel` and must be equivalent or weaker than the success ordering. |
7453a54e SL |
737 | /// |
738 | /// # Examples | |
739 | /// | |
740 | /// ``` | |
7453a54e SL |
741 | /// use std::sync::atomic::{AtomicPtr, Ordering}; |
742 | /// | |
743 | /// let ptr = &mut 5; | |
744 | /// let some_ptr = AtomicPtr::new(ptr); | |
745 | /// | |
746 | /// let other_ptr = &mut 10; | |
747 | /// let another_ptr = &mut 10; | |
748 | /// | |
749 | /// let value = some_ptr.compare_exchange(other_ptr, another_ptr, | |
750 | /// Ordering::SeqCst, Ordering::Relaxed); | |
751 | /// ``` | |
752 | #[inline] | |
a7813a04 | 753 | #[stable(feature = "extended_compare_and_swap", since = "1.10.0")] |
7453a54e SL |
754 | pub fn compare_exchange(&self, |
755 | current: *mut T, | |
756 | new: *mut T, | |
757 | success: Ordering, | |
c30ab7b3 SL |
758 | failure: Ordering) |
759 | -> Result<*mut T, *mut T> { | |
1a4d82fc | 760 | unsafe { |
54a0048b SL |
761 | let res = atomic_compare_exchange(self.p.get() as *mut usize, |
762 | current as usize, | |
763 | new as usize, | |
764 | success, | |
765 | failure); | |
766 | match res { | |
767 | Ok(x) => Ok(x as *mut T), | |
768 | Err(x) => Err(x as *mut T), | |
769 | } | |
1a4d82fc JJ |
770 | } |
771 | } | |
7453a54e SL |
772 | |
773 | /// Stores a value into the pointer if the current value is the same as the `current` value. | |
774 | /// | |
775 | /// Unlike `compare_exchange`, this function is allowed to spuriously fail even when the | |
776 | /// comparison succeeds, which can result in more efficient code on some platforms. The | |
54a0048b SL |
777 | /// return value is a result indicating whether the new value was written and containing the |
778 | /// previous value. | |
7453a54e SL |
779 | /// |
780 | /// `compare_exchange_weak` takes two `Ordering` arguments to describe the memory | |
781 | /// ordering of this operation. The first describes the required ordering if the operation | |
782 | /// succeeds while the second describes the required ordering when the operation fails. The | |
54a0048b | 783 | /// failure ordering can't be `Release` or `AcqRel` and must be equivalent or weaker than the |
7453a54e SL |
784 | /// success ordering. |
785 | /// | |
786 | /// # Examples | |
787 | /// | |
788 | /// ``` | |
7453a54e SL |
789 | /// use std::sync::atomic::{AtomicPtr, Ordering}; |
790 | /// | |
791 | /// let some_ptr = AtomicPtr::new(&mut 5); | |
792 | /// | |
793 | /// let new = &mut 10; | |
794 | /// let mut old = some_ptr.load(Ordering::Relaxed); | |
795 | /// loop { | |
54a0048b SL |
796 | /// match some_ptr.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) { |
797 | /// Ok(_) => break, | |
798 | /// Err(x) => old = x, | |
7453a54e SL |
799 | /// } |
800 | /// } | |
801 | /// ``` | |
802 | #[inline] | |
a7813a04 | 803 | #[stable(feature = "extended_compare_and_swap", since = "1.10.0")] |
7453a54e SL |
804 | pub fn compare_exchange_weak(&self, |
805 | current: *mut T, | |
806 | new: *mut T, | |
807 | success: Ordering, | |
c30ab7b3 SL |
808 | failure: Ordering) |
809 | -> Result<*mut T, *mut T> { | |
54a0048b SL |
810 | unsafe { |
811 | let res = atomic_compare_exchange_weak(self.p.get() as *mut usize, | |
812 | current as usize, | |
813 | new as usize, | |
814 | success, | |
815 | failure); | |
816 | match res { | |
817 | Ok(x) => Ok(x as *mut T), | |
818 | Err(x) => Err(x as *mut T), | |
819 | } | |
820 | } | |
7453a54e SL |
821 | } |
822 | } | |
823 | ||
a7813a04 XL |
824 | macro_rules! atomic_int { |
825 | ($stable:meta, | |
826 | $stable_cxchg:meta, | |
827 | $stable_debug:meta, | |
9e0c209e | 828 | $stable_access:meta, |
a7813a04 XL |
829 | $int_type:ident $atomic_type:ident $atomic_init:ident) => { |
830 | /// An integer type which can be safely shared between threads. | |
9e0c209e SL |
831 | /// |
832 | /// This type has the same in-memory representation as the underlying integer type. | |
a7813a04 XL |
833 | #[$stable] |
834 | pub struct $atomic_type { | |
835 | v: UnsafeCell<$int_type>, | |
836 | } | |
837 | ||
838 | /// An atomic integer initialized to `0`. | |
839 | #[$stable] | |
840 | pub const $atomic_init: $atomic_type = $atomic_type::new(0); | |
841 | ||
842 | #[$stable] | |
843 | impl Default for $atomic_type { | |
844 | fn default() -> Self { | |
845 | Self::new(Default::default()) | |
846 | } | |
847 | } | |
848 | ||
849 | #[$stable_debug] | |
850 | impl fmt::Debug for $atomic_type { | |
851 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { | |
852 | f.debug_tuple(stringify!($atomic_type)) | |
853 | .field(&self.load(Ordering::SeqCst)) | |
854 | .finish() | |
855 | } | |
856 | } | |
857 | ||
858 | // Send is implicitly implemented. | |
859 | #[$stable] | |
860 | unsafe impl Sync for $atomic_type {} | |
861 | ||
862 | impl $atomic_type { | |
863 | /// Creates a new atomic integer. | |
864 | /// | |
865 | /// # Examples | |
866 | /// | |
867 | /// ``` | |
868 | /// use std::sync::atomic::AtomicIsize; | |
869 | /// | |
870 | /// let atomic_forty_two = AtomicIsize::new(42); | |
871 | /// ``` | |
872 | #[inline] | |
873 | #[$stable] | |
874 | pub const fn new(v: $int_type) -> Self { | |
875 | $atomic_type {v: UnsafeCell::new(v)} | |
876 | } | |
877 | ||
9e0c209e SL |
878 | /// Returns a mutable reference to the underlying integer. |
879 | /// | |
880 | /// This is safe because the mutable reference guarantees that no other threads are | |
881 | /// concurrently accessing the atomic data. | |
882 | /// | |
883 | /// # Examples | |
884 | /// | |
885 | /// ``` | |
886 | /// #![feature(atomic_access)] | |
887 | /// use std::sync::atomic::{AtomicIsize, Ordering}; | |
888 | /// | |
889 | /// let mut some_isize = AtomicIsize::new(10); | |
890 | /// assert_eq!(*some_isize.get_mut(), 10); | |
891 | /// *some_isize.get_mut() = 5; | |
892 | /// assert_eq!(some_isize.load(Ordering::SeqCst), 5); | |
893 | /// ``` | |
894 | #[inline] | |
895 | #[$stable_access] | |
896 | pub fn get_mut(&mut self) -> &mut $int_type { | |
897 | unsafe { &mut *self.v.get() } | |
898 | } | |
899 | ||
900 | /// Consumes the atomic and returns the contained value. | |
901 | /// | |
902 | /// This is safe because passing `self` by value guarantees that no other threads are | |
903 | /// concurrently accessing the atomic data. | |
904 | /// | |
905 | /// # Examples | |
906 | /// | |
907 | /// ``` | |
908 | /// #![feature(atomic_access)] | |
909 | /// use std::sync::atomic::AtomicIsize; | |
910 | /// | |
911 | /// let some_isize = AtomicIsize::new(5); | |
912 | /// assert_eq!(some_isize.into_inner(), 5); | |
913 | /// ``` | |
914 | #[inline] | |
915 | #[$stable_access] | |
916 | pub fn into_inner(self) -> $int_type { | |
917 | unsafe { self.v.into_inner() } | |
918 | } | |
919 | ||
a7813a04 XL |
920 | /// Loads a value from the atomic integer. |
921 | /// | |
922 | /// `load` takes an `Ordering` argument which describes the memory ordering of this | |
923 | /// operation. | |
924 | /// | |
925 | /// # Panics | |
926 | /// | |
927 | /// Panics if `order` is `Release` or `AcqRel`. | |
928 | /// | |
929 | /// # Examples | |
930 | /// | |
931 | /// ``` | |
932 | /// use std::sync::atomic::{AtomicIsize, Ordering}; | |
933 | /// | |
934 | /// let some_isize = AtomicIsize::new(5); | |
935 | /// | |
936 | /// assert_eq!(some_isize.load(Ordering::Relaxed), 5); | |
937 | /// ``` | |
938 | #[inline] | |
939 | #[$stable] | |
940 | pub fn load(&self, order: Ordering) -> $int_type { | |
941 | unsafe { atomic_load(self.v.get(), order) } | |
942 | } | |
943 | ||
944 | /// Stores a value into the atomic integer. | |
945 | /// | |
946 | /// `store` takes an `Ordering` argument which describes the memory ordering of this | |
947 | /// operation. | |
948 | /// | |
949 | /// # Examples | |
950 | /// | |
951 | /// ``` | |
952 | /// use std::sync::atomic::{AtomicIsize, Ordering}; | |
953 | /// | |
954 | /// let some_isize = AtomicIsize::new(5); | |
955 | /// | |
956 | /// some_isize.store(10, Ordering::Relaxed); | |
957 | /// assert_eq!(some_isize.load(Ordering::Relaxed), 10); | |
958 | /// ``` | |
959 | /// | |
960 | /// # Panics | |
961 | /// | |
962 | /// Panics if `order` is `Acquire` or `AcqRel`. | |
963 | #[inline] | |
964 | #[$stable] | |
965 | pub fn store(&self, val: $int_type, order: Ordering) { | |
966 | unsafe { atomic_store(self.v.get(), val, order); } | |
967 | } | |
968 | ||
969 | /// Stores a value into the atomic integer, returning the old value. | |
970 | /// | |
971 | /// `swap` takes an `Ordering` argument which describes the memory ordering of this | |
972 | /// operation. | |
973 | /// | |
974 | /// # Examples | |
975 | /// | |
976 | /// ``` | |
977 | /// use std::sync::atomic::{AtomicIsize, Ordering}; | |
978 | /// | |
979 | /// let some_isize = AtomicIsize::new(5); | |
980 | /// | |
981 | /// assert_eq!(some_isize.swap(10, Ordering::Relaxed), 5); | |
982 | /// ``` | |
983 | #[inline] | |
984 | #[$stable] | |
985 | pub fn swap(&self, val: $int_type, order: Ordering) -> $int_type { | |
986 | unsafe { atomic_swap(self.v.get(), val, order) } | |
987 | } | |
988 | ||
989 | /// Stores a value into the atomic integer if the current value is the same as the | |
990 | /// `current` value. | |
991 | /// | |
992 | /// The return value is always the previous value. If it is equal to `current`, then the | |
993 | /// value was updated. | |
994 | /// | |
995 | /// `compare_and_swap` also takes an `Ordering` argument which describes the memory | |
996 | /// ordering of this operation. | |
997 | /// | |
998 | /// # Examples | |
999 | /// | |
1000 | /// ``` | |
1001 | /// use std::sync::atomic::{AtomicIsize, Ordering}; | |
1002 | /// | |
1003 | /// let some_isize = AtomicIsize::new(5); | |
1004 | /// | |
1005 | /// assert_eq!(some_isize.compare_and_swap(5, 10, Ordering::Relaxed), 5); | |
1006 | /// assert_eq!(some_isize.load(Ordering::Relaxed), 10); | |
1007 | /// | |
1008 | /// assert_eq!(some_isize.compare_and_swap(6, 12, Ordering::Relaxed), 10); | |
1009 | /// assert_eq!(some_isize.load(Ordering::Relaxed), 10); | |
1010 | /// ``` | |
1011 | #[inline] | |
1012 | #[$stable] | |
1013 | pub fn compare_and_swap(&self, | |
1014 | current: $int_type, | |
1015 | new: $int_type, | |
1016 | order: Ordering) -> $int_type { | |
1017 | match self.compare_exchange(current, | |
1018 | new, | |
1019 | order, | |
1020 | strongest_failure_ordering(order)) { | |
1021 | Ok(x) => x, | |
1022 | Err(x) => x, | |
1023 | } | |
1024 | } | |
1025 | ||
1026 | /// Stores a value into the atomic integer if the current value is the same as the | |
1027 | /// `current` value. | |
1028 | /// | |
1029 | /// The return value is a result indicating whether the new value was written and | |
1030 | /// containing the previous value. On success this value is guaranteed to be equal to | |
3157f602 | 1031 | /// `current`. |
a7813a04 XL |
1032 | /// |
1033 | /// `compare_exchange` takes two `Ordering` arguments to describe the memory ordering of | |
1034 | /// this operation. The first describes the required ordering if the operation succeeds | |
1035 | /// while the second describes the required ordering when the operation fails. The | |
1036 | /// failure ordering can't be `Release` or `AcqRel` and must be equivalent or weaker | |
1037 | /// than the success ordering. | |
1038 | /// | |
1039 | /// # Examples | |
1040 | /// | |
1041 | /// ``` | |
1042 | /// use std::sync::atomic::{AtomicIsize, Ordering}; | |
1043 | /// | |
1044 | /// let some_isize = AtomicIsize::new(5); | |
1045 | /// | |
1046 | /// assert_eq!(some_isize.compare_exchange(5, 10, | |
1047 | /// Ordering::Acquire, | |
1048 | /// Ordering::Relaxed), | |
1049 | /// Ok(5)); | |
1050 | /// assert_eq!(some_isize.load(Ordering::Relaxed), 10); | |
1051 | /// | |
1052 | /// assert_eq!(some_isize.compare_exchange(6, 12, | |
1053 | /// Ordering::SeqCst, | |
1054 | /// Ordering::Acquire), | |
1055 | /// Err(10)); | |
1056 | /// assert_eq!(some_isize.load(Ordering::Relaxed), 10); | |
1057 | /// ``` | |
1058 | #[inline] | |
1059 | #[$stable_cxchg] | |
1060 | pub fn compare_exchange(&self, | |
1061 | current: $int_type, | |
1062 | new: $int_type, | |
1063 | success: Ordering, | |
1064 | failure: Ordering) -> Result<$int_type, $int_type> { | |
1065 | unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) } | |
1066 | } | |
1067 | ||
1068 | /// Stores a value into the atomic integer if the current value is the same as the | |
1069 | /// `current` value. | |
1070 | /// | |
1071 | /// Unlike `compare_exchange`, this function is allowed to spuriously fail even when the | |
1072 | /// comparison succeeds, which can result in more efficient code on some platforms. The | |
1073 | /// return value is a result indicating whether the new value was written and containing | |
1074 | /// the previous value. | |
1075 | /// | |
1076 | /// `compare_exchange_weak` takes two `Ordering` arguments to describe the memory | |
1077 | /// ordering of this operation. The first describes the required ordering if the | |
1078 | /// operation succeeds while the second describes the required ordering when the | |
1079 | /// operation fails. The failure ordering can't be `Release` or `AcqRel` and must be | |
1080 | /// equivalent or weaker than the success ordering. | |
1081 | /// | |
1082 | /// # Examples | |
1083 | /// | |
1084 | /// ``` | |
1085 | /// use std::sync::atomic::{AtomicIsize, Ordering}; | |
1086 | /// | |
1087 | /// let val = AtomicIsize::new(4); | |
1088 | /// | |
1089 | /// let mut old = val.load(Ordering::Relaxed); | |
1090 | /// loop { | |
1091 | /// let new = old * 2; | |
1092 | /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) { | |
1093 | /// Ok(_) => break, | |
1094 | /// Err(x) => old = x, | |
1095 | /// } | |
1096 | /// } | |
1097 | /// ``` | |
1098 | #[inline] | |
1099 | #[$stable_cxchg] | |
1100 | pub fn compare_exchange_weak(&self, | |
1101 | current: $int_type, | |
1102 | new: $int_type, | |
1103 | success: Ordering, | |
1104 | failure: Ordering) -> Result<$int_type, $int_type> { | |
1105 | unsafe { | |
1106 | atomic_compare_exchange_weak(self.v.get(), current, new, success, failure) | |
1107 | } | |
1108 | } | |
1109 | ||
1110 | /// Add to the current value, returning the previous value. | |
1111 | /// | |
1112 | /// # Examples | |
1113 | /// | |
1114 | /// ``` | |
1115 | /// use std::sync::atomic::{AtomicIsize, Ordering}; | |
1116 | /// | |
1117 | /// let foo = AtomicIsize::new(0); | |
1118 | /// assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0); | |
1119 | /// assert_eq!(foo.load(Ordering::SeqCst), 10); | |
1120 | /// ``` | |
1121 | #[inline] | |
1122 | #[$stable] | |
1123 | pub fn fetch_add(&self, val: $int_type, order: Ordering) -> $int_type { | |
1124 | unsafe { atomic_add(self.v.get(), val, order) } | |
1125 | } | |
1126 | ||
1127 | /// Subtract from the current value, returning the previous value. | |
1128 | /// | |
1129 | /// # Examples | |
1130 | /// | |
1131 | /// ``` | |
1132 | /// use std::sync::atomic::{AtomicIsize, Ordering}; | |
1133 | /// | |
1134 | /// let foo = AtomicIsize::new(0); | |
1135 | /// assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 0); | |
1136 | /// assert_eq!(foo.load(Ordering::SeqCst), -10); | |
1137 | /// ``` | |
1138 | #[inline] | |
1139 | #[$stable] | |
1140 | pub fn fetch_sub(&self, val: $int_type, order: Ordering) -> $int_type { | |
1141 | unsafe { atomic_sub(self.v.get(), val, order) } | |
1142 | } | |
1143 | ||
1144 | /// Bitwise and with the current value, returning the previous value. | |
1145 | /// | |
1146 | /// # Examples | |
1147 | /// | |
1148 | /// ``` | |
1149 | /// use std::sync::atomic::{AtomicIsize, Ordering}; | |
1150 | /// | |
1151 | /// let foo = AtomicIsize::new(0b101101); | |
1152 | /// assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101); | |
1153 | /// assert_eq!(foo.load(Ordering::SeqCst), 0b100001); | |
1154 | #[inline] | |
1155 | #[$stable] | |
1156 | pub fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type { | |
1157 | unsafe { atomic_and(self.v.get(), val, order) } | |
1158 | } | |
1159 | ||
1160 | /// Bitwise or with the current value, returning the previous value. | |
1161 | /// | |
1162 | /// # Examples | |
1163 | /// | |
1164 | /// ``` | |
1165 | /// use std::sync::atomic::{AtomicIsize, Ordering}; | |
1166 | /// | |
1167 | /// let foo = AtomicIsize::new(0b101101); | |
1168 | /// assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101); | |
1169 | /// assert_eq!(foo.load(Ordering::SeqCst), 0b111111); | |
1170 | #[inline] | |
1171 | #[$stable] | |
1172 | pub fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type { | |
1173 | unsafe { atomic_or(self.v.get(), val, order) } | |
1174 | } | |
1175 | ||
1176 | /// Bitwise xor with the current value, returning the previous value. | |
1177 | /// | |
1178 | /// # Examples | |
1179 | /// | |
1180 | /// ``` | |
1181 | /// use std::sync::atomic::{AtomicIsize, Ordering}; | |
1182 | /// | |
1183 | /// let foo = AtomicIsize::new(0b101101); | |
1184 | /// assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101); | |
1185 | /// assert_eq!(foo.load(Ordering::SeqCst), 0b011110); | |
1186 | #[inline] | |
1187 | #[$stable] | |
1188 | pub fn fetch_xor(&self, val: $int_type, order: Ordering) -> $int_type { | |
1189 | unsafe { atomic_xor(self.v.get(), val, order) } | |
1190 | } | |
1191 | } | |
1192 | } | |
1193 | } | |
1194 | ||
1195 | #[cfg(target_has_atomic = "8")] | |
1196 | atomic_int! { | |
1197 | unstable(feature = "integer_atomics", issue = "32976"), | |
1198 | unstable(feature = "integer_atomics", issue = "32976"), | |
1199 | unstable(feature = "integer_atomics", issue = "32976"), | |
9e0c209e | 1200 | unstable(feature = "integer_atomics", issue = "32976"), |
a7813a04 XL |
1201 | i8 AtomicI8 ATOMIC_I8_INIT |
1202 | } | |
1203 | #[cfg(target_has_atomic = "8")] | |
1204 | atomic_int! { | |
1205 | unstable(feature = "integer_atomics", issue = "32976"), | |
1206 | unstable(feature = "integer_atomics", issue = "32976"), | |
1207 | unstable(feature = "integer_atomics", issue = "32976"), | |
9e0c209e | 1208 | unstable(feature = "integer_atomics", issue = "32976"), |
a7813a04 XL |
1209 | u8 AtomicU8 ATOMIC_U8_INIT |
1210 | } | |
1211 | #[cfg(target_has_atomic = "16")] | |
1212 | atomic_int! { | |
1213 | unstable(feature = "integer_atomics", issue = "32976"), | |
1214 | unstable(feature = "integer_atomics", issue = "32976"), | |
1215 | unstable(feature = "integer_atomics", issue = "32976"), | |
9e0c209e | 1216 | unstable(feature = "integer_atomics", issue = "32976"), |
a7813a04 XL |
1217 | i16 AtomicI16 ATOMIC_I16_INIT |
1218 | } | |
1219 | #[cfg(target_has_atomic = "16")] | |
1220 | atomic_int! { | |
1221 | unstable(feature = "integer_atomics", issue = "32976"), | |
1222 | unstable(feature = "integer_atomics", issue = "32976"), | |
1223 | unstable(feature = "integer_atomics", issue = "32976"), | |
9e0c209e | 1224 | unstable(feature = "integer_atomics", issue = "32976"), |
a7813a04 XL |
1225 | u16 AtomicU16 ATOMIC_U16_INIT |
1226 | } | |
1227 | #[cfg(target_has_atomic = "32")] | |
1228 | atomic_int! { | |
1229 | unstable(feature = "integer_atomics", issue = "32976"), | |
1230 | unstable(feature = "integer_atomics", issue = "32976"), | |
1231 | unstable(feature = "integer_atomics", issue = "32976"), | |
9e0c209e | 1232 | unstable(feature = "integer_atomics", issue = "32976"), |
a7813a04 XL |
1233 | i32 AtomicI32 ATOMIC_I32_INIT |
1234 | } | |
1235 | #[cfg(target_has_atomic = "32")] | |
1236 | atomic_int! { | |
1237 | unstable(feature = "integer_atomics", issue = "32976"), | |
1238 | unstable(feature = "integer_atomics", issue = "32976"), | |
1239 | unstable(feature = "integer_atomics", issue = "32976"), | |
9e0c209e | 1240 | unstable(feature = "integer_atomics", issue = "32976"), |
a7813a04 XL |
1241 | u32 AtomicU32 ATOMIC_U32_INIT |
1242 | } | |
1243 | #[cfg(target_has_atomic = "64")] | |
1244 | atomic_int! { | |
1245 | unstable(feature = "integer_atomics", issue = "32976"), | |
1246 | unstable(feature = "integer_atomics", issue = "32976"), | |
1247 | unstable(feature = "integer_atomics", issue = "32976"), | |
9e0c209e | 1248 | unstable(feature = "integer_atomics", issue = "32976"), |
a7813a04 XL |
1249 | i64 AtomicI64 ATOMIC_I64_INIT |
1250 | } | |
1251 | #[cfg(target_has_atomic = "64")] | |
1252 | atomic_int! { | |
1253 | unstable(feature = "integer_atomics", issue = "32976"), | |
1254 | unstable(feature = "integer_atomics", issue = "32976"), | |
1255 | unstable(feature = "integer_atomics", issue = "32976"), | |
9e0c209e | 1256 | unstable(feature = "integer_atomics", issue = "32976"), |
a7813a04 XL |
1257 | u64 AtomicU64 ATOMIC_U64_INIT |
1258 | } | |
3157f602 | 1259 | #[cfg(target_has_atomic = "ptr")] |
a7813a04 XL |
1260 | atomic_int!{ |
1261 | stable(feature = "rust1", since = "1.0.0"), | |
1262 | stable(feature = "extended_compare_and_swap", since = "1.10.0"), | |
1263 | stable(feature = "atomic_debug", since = "1.3.0"), | |
9e0c209e | 1264 | unstable(feature = "atomic_access", issue = "35603"), |
a7813a04 XL |
1265 | isize AtomicIsize ATOMIC_ISIZE_INIT |
1266 | } | |
3157f602 | 1267 | #[cfg(target_has_atomic = "ptr")] |
a7813a04 XL |
1268 | atomic_int!{ |
1269 | stable(feature = "rust1", since = "1.0.0"), | |
1270 | stable(feature = "extended_compare_and_swap", since = "1.10.0"), | |
1271 | stable(feature = "atomic_debug", since = "1.3.0"), | |
9e0c209e | 1272 | unstable(feature = "atomic_access", issue = "35603"), |
a7813a04 XL |
1273 | usize AtomicUsize ATOMIC_USIZE_INIT |
1274 | } | |
1275 | ||
7453a54e SL |
1276 | #[inline] |
1277 | fn strongest_failure_ordering(order: Ordering) -> Ordering { | |
1278 | match order { | |
1279 | Release => Relaxed, | |
1280 | Relaxed => Relaxed, | |
c30ab7b3 | 1281 | SeqCst => SeqCst, |
7453a54e | 1282 | Acquire => Acquire, |
c30ab7b3 SL |
1283 | AcqRel => Acquire, |
1284 | __Nonexhaustive => __Nonexhaustive, | |
7453a54e | 1285 | } |
1a4d82fc JJ |
1286 | } |
1287 | ||
1288 | #[inline] | |
7453a54e | 1289 | unsafe fn atomic_store<T>(dst: *mut T, val: T, order: Ordering) { |
1a4d82fc JJ |
1290 | match order { |
1291 | Release => intrinsics::atomic_store_rel(dst, val), | |
1292 | Relaxed => intrinsics::atomic_store_relaxed(dst, val), | |
c30ab7b3 | 1293 | SeqCst => intrinsics::atomic_store(dst, val), |
1a4d82fc | 1294 | Acquire => panic!("there is no such thing as an acquire store"), |
c30ab7b3 SL |
1295 | AcqRel => panic!("there is no such thing as an acquire/release store"), |
1296 | __Nonexhaustive => panic!("invalid memory ordering"), | |
1a4d82fc JJ |
1297 | } |
1298 | } | |
1299 | ||
1300 | #[inline] | |
7453a54e | 1301 | unsafe fn atomic_load<T>(dst: *const T, order: Ordering) -> T { |
1a4d82fc JJ |
1302 | match order { |
1303 | Acquire => intrinsics::atomic_load_acq(dst), | |
1304 | Relaxed => intrinsics::atomic_load_relaxed(dst), | |
c30ab7b3 | 1305 | SeqCst => intrinsics::atomic_load(dst), |
1a4d82fc | 1306 | Release => panic!("there is no such thing as a release load"), |
c30ab7b3 SL |
1307 | AcqRel => panic!("there is no such thing as an acquire/release load"), |
1308 | __Nonexhaustive => panic!("invalid memory ordering"), | |
1a4d82fc JJ |
1309 | } |
1310 | } | |
1311 | ||
1312 | #[inline] | |
1a4d82fc JJ |
1313 | unsafe fn atomic_swap<T>(dst: *mut T, val: T, order: Ordering) -> T { |
1314 | match order { | |
1315 | Acquire => intrinsics::atomic_xchg_acq(dst, val), | |
1316 | Release => intrinsics::atomic_xchg_rel(dst, val), | |
c30ab7b3 | 1317 | AcqRel => intrinsics::atomic_xchg_acqrel(dst, val), |
1a4d82fc | 1318 | Relaxed => intrinsics::atomic_xchg_relaxed(dst, val), |
c30ab7b3 SL |
1319 | SeqCst => intrinsics::atomic_xchg(dst, val), |
1320 | __Nonexhaustive => panic!("invalid memory ordering"), | |
1a4d82fc JJ |
1321 | } |
1322 | } | |
1323 | ||
1324 | /// Returns the old value (like __sync_fetch_and_add). | |
1325 | #[inline] | |
1a4d82fc JJ |
1326 | unsafe fn atomic_add<T>(dst: *mut T, val: T, order: Ordering) -> T { |
1327 | match order { | |
1328 | Acquire => intrinsics::atomic_xadd_acq(dst, val), | |
1329 | Release => intrinsics::atomic_xadd_rel(dst, val), | |
c30ab7b3 | 1330 | AcqRel => intrinsics::atomic_xadd_acqrel(dst, val), |
1a4d82fc | 1331 | Relaxed => intrinsics::atomic_xadd_relaxed(dst, val), |
c30ab7b3 SL |
1332 | SeqCst => intrinsics::atomic_xadd(dst, val), |
1333 | __Nonexhaustive => panic!("invalid memory ordering"), | |
1a4d82fc JJ |
1334 | } |
1335 | } | |
1336 | ||
1337 | /// Returns the old value (like __sync_fetch_and_sub). | |
1338 | #[inline] | |
1a4d82fc JJ |
1339 | unsafe fn atomic_sub<T>(dst: *mut T, val: T, order: Ordering) -> T { |
1340 | match order { | |
1341 | Acquire => intrinsics::atomic_xsub_acq(dst, val), | |
1342 | Release => intrinsics::atomic_xsub_rel(dst, val), | |
c30ab7b3 | 1343 | AcqRel => intrinsics::atomic_xsub_acqrel(dst, val), |
1a4d82fc | 1344 | Relaxed => intrinsics::atomic_xsub_relaxed(dst, val), |
c30ab7b3 SL |
1345 | SeqCst => intrinsics::atomic_xsub(dst, val), |
1346 | __Nonexhaustive => panic!("invalid memory ordering"), | |
1a4d82fc JJ |
1347 | } |
1348 | } | |
1349 | ||
1350 | #[inline] | |
7453a54e SL |
1351 | unsafe fn atomic_compare_exchange<T>(dst: *mut T, |
1352 | old: T, | |
1353 | new: T, | |
1354 | success: Ordering, | |
c30ab7b3 SL |
1355 | failure: Ordering) |
1356 | -> Result<T, T> { | |
54a0048b | 1357 | let (val, ok) = match (success, failure) { |
7453a54e SL |
1358 | (Acquire, Acquire) => intrinsics::atomic_cxchg_acq(dst, old, new), |
1359 | (Release, Relaxed) => intrinsics::atomic_cxchg_rel(dst, old, new), | |
c30ab7b3 | 1360 | (AcqRel, Acquire) => intrinsics::atomic_cxchg_acqrel(dst, old, new), |
7453a54e | 1361 | (Relaxed, Relaxed) => intrinsics::atomic_cxchg_relaxed(dst, old, new), |
c30ab7b3 | 1362 | (SeqCst, SeqCst) => intrinsics::atomic_cxchg(dst, old, new), |
7453a54e | 1363 | (Acquire, Relaxed) => intrinsics::atomic_cxchg_acq_failrelaxed(dst, old, new), |
c30ab7b3 SL |
1364 | (AcqRel, Relaxed) => intrinsics::atomic_cxchg_acqrel_failrelaxed(dst, old, new), |
1365 | (SeqCst, Relaxed) => intrinsics::atomic_cxchg_failrelaxed(dst, old, new), | |
1366 | (SeqCst, Acquire) => intrinsics::atomic_cxchg_failacq(dst, old, new), | |
1367 | (__Nonexhaustive, _) => panic!("invalid memory ordering"), | |
1368 | (_, __Nonexhaustive) => panic!("invalid memory ordering"), | |
54a0048b SL |
1369 | (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"), |
1370 | (_, Release) => panic!("there is no such thing as a release failure ordering"), | |
7453a54e | 1371 | _ => panic!("a failure ordering can't be stronger than a success ordering"), |
54a0048b | 1372 | }; |
c30ab7b3 | 1373 | if ok { Ok(val) } else { Err(val) } |
7453a54e SL |
1374 | } |
1375 | ||
7453a54e | 1376 | #[inline] |
7453a54e SL |
1377 | unsafe fn atomic_compare_exchange_weak<T>(dst: *mut T, |
1378 | old: T, | |
1379 | new: T, | |
1380 | success: Ordering, | |
c30ab7b3 SL |
1381 | failure: Ordering) |
1382 | -> Result<T, T> { | |
54a0048b | 1383 | let (val, ok) = match (success, failure) { |
7453a54e SL |
1384 | (Acquire, Acquire) => intrinsics::atomic_cxchgweak_acq(dst, old, new), |
1385 | (Release, Relaxed) => intrinsics::atomic_cxchgweak_rel(dst, old, new), | |
c30ab7b3 | 1386 | (AcqRel, Acquire) => intrinsics::atomic_cxchgweak_acqrel(dst, old, new), |
7453a54e | 1387 | (Relaxed, Relaxed) => intrinsics::atomic_cxchgweak_relaxed(dst, old, new), |
c30ab7b3 | 1388 | (SeqCst, SeqCst) => intrinsics::atomic_cxchgweak(dst, old, new), |
7453a54e | 1389 | (Acquire, Relaxed) => intrinsics::atomic_cxchgweak_acq_failrelaxed(dst, old, new), |
c30ab7b3 SL |
1390 | (AcqRel, Relaxed) => intrinsics::atomic_cxchgweak_acqrel_failrelaxed(dst, old, new), |
1391 | (SeqCst, Relaxed) => intrinsics::atomic_cxchgweak_failrelaxed(dst, old, new), | |
1392 | (SeqCst, Acquire) => intrinsics::atomic_cxchgweak_failacq(dst, old, new), | |
1393 | (__Nonexhaustive, _) => panic!("invalid memory ordering"), | |
1394 | (_, __Nonexhaustive) => panic!("invalid memory ordering"), | |
54a0048b SL |
1395 | (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"), |
1396 | (_, Release) => panic!("there is no such thing as a release failure ordering"), | |
7453a54e | 1397 | _ => panic!("a failure ordering can't be stronger than a success ordering"), |
54a0048b | 1398 | }; |
c30ab7b3 | 1399 | if ok { Ok(val) } else { Err(val) } |
7453a54e SL |
1400 | } |
1401 | ||
1a4d82fc | 1402 | #[inline] |
1a4d82fc JJ |
1403 | unsafe fn atomic_and<T>(dst: *mut T, val: T, order: Ordering) -> T { |
1404 | match order { | |
1405 | Acquire => intrinsics::atomic_and_acq(dst, val), | |
1406 | Release => intrinsics::atomic_and_rel(dst, val), | |
c30ab7b3 | 1407 | AcqRel => intrinsics::atomic_and_acqrel(dst, val), |
1a4d82fc | 1408 | Relaxed => intrinsics::atomic_and_relaxed(dst, val), |
c30ab7b3 SL |
1409 | SeqCst => intrinsics::atomic_and(dst, val), |
1410 | __Nonexhaustive => panic!("invalid memory ordering"), | |
1a4d82fc JJ |
1411 | } |
1412 | } | |
1413 | ||
1a4d82fc | 1414 | #[inline] |
1a4d82fc JJ |
1415 | unsafe fn atomic_or<T>(dst: *mut T, val: T, order: Ordering) -> T { |
1416 | match order { | |
1417 | Acquire => intrinsics::atomic_or_acq(dst, val), | |
1418 | Release => intrinsics::atomic_or_rel(dst, val), | |
c30ab7b3 | 1419 | AcqRel => intrinsics::atomic_or_acqrel(dst, val), |
1a4d82fc | 1420 | Relaxed => intrinsics::atomic_or_relaxed(dst, val), |
c30ab7b3 SL |
1421 | SeqCst => intrinsics::atomic_or(dst, val), |
1422 | __Nonexhaustive => panic!("invalid memory ordering"), | |
1a4d82fc JJ |
1423 | } |
1424 | } | |
1425 | ||
1a4d82fc | 1426 | #[inline] |
1a4d82fc JJ |
1427 | unsafe fn atomic_xor<T>(dst: *mut T, val: T, order: Ordering) -> T { |
1428 | match order { | |
1429 | Acquire => intrinsics::atomic_xor_acq(dst, val), | |
1430 | Release => intrinsics::atomic_xor_rel(dst, val), | |
c30ab7b3 | 1431 | AcqRel => intrinsics::atomic_xor_acqrel(dst, val), |
1a4d82fc | 1432 | Relaxed => intrinsics::atomic_xor_relaxed(dst, val), |
c30ab7b3 SL |
1433 | SeqCst => intrinsics::atomic_xor(dst, val), |
1434 | __Nonexhaustive => panic!("invalid memory ordering"), | |
1a4d82fc JJ |
1435 | } |
1436 | } | |
1437 | ||
1a4d82fc JJ |
1438 | /// An atomic fence. |
1439 | /// | |
1440 | /// A fence 'A' which has `Release` ordering semantics, synchronizes with a | |
1441 | /// fence 'B' with (at least) `Acquire` semantics, if and only if there exists | |
1442 | /// atomic operations X and Y, both operating on some atomic object 'M' such | |
1443 | /// that A is sequenced before X, Y is synchronized before B and Y observes | |
1444 | /// the change to M. This provides a happens-before dependence between A and B. | |
1445 | /// | |
1446 | /// Atomic operations with `Release` or `Acquire` semantics can also synchronize | |
1447 | /// with a fence. | |
1448 | /// | |
1449 | /// A fence which has `SeqCst` ordering, in addition to having both `Acquire` | |
1450 | /// and `Release` semantics, participates in the global program order of the | |
1451 | /// other `SeqCst` operations and/or fences. | |
1452 | /// | |
1453 | /// Accepts `Acquire`, `Release`, `AcqRel` and `SeqCst` orderings. | |
1454 | /// | |
1455 | /// # Panics | |
1456 | /// | |
1457 | /// Panics if `order` is `Relaxed`. | |
1458 | #[inline] | |
85aaf69f | 1459 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc JJ |
1460 | pub fn fence(order: Ordering) { |
1461 | unsafe { | |
1462 | match order { | |
1463 | Acquire => intrinsics::atomic_fence_acq(), | |
1464 | Release => intrinsics::atomic_fence_rel(), | |
c30ab7b3 SL |
1465 | AcqRel => intrinsics::atomic_fence_acqrel(), |
1466 | SeqCst => intrinsics::atomic_fence(), | |
1467 | Relaxed => panic!("there is no such thing as a relaxed fence"), | |
1468 | __Nonexhaustive => panic!("invalid memory ordering"), | |
1a4d82fc JJ |
1469 | } |
1470 | } | |
1471 | } | |
c1a9b12d | 1472 | |
c1a9b12d | 1473 | |
3157f602 | 1474 | #[cfg(target_has_atomic = "8")] |
a7813a04 XL |
1475 | #[stable(feature = "atomic_debug", since = "1.3.0")] |
1476 | impl fmt::Debug for AtomicBool { | |
1477 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { | |
1478 | f.debug_tuple("AtomicBool").field(&self.load(Ordering::SeqCst)).finish() | |
1479 | } | |
1480 | } | |
c1a9b12d | 1481 | |
3157f602 | 1482 | #[cfg(target_has_atomic = "ptr")] |
c1a9b12d SL |
1483 | #[stable(feature = "atomic_debug", since = "1.3.0")] |
1484 | impl<T> fmt::Debug for AtomicPtr<T> { | |
1485 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { | |
1486 | f.debug_tuple("AtomicPtr").field(&self.load(Ordering::SeqCst)).finish() | |
1487 | } | |
1488 | } |