]>
Commit | Line | Data |
---|---|---|
1a4d82fc JJ |
1 | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT |
2 | // file at the top-level directory of this distribution and at | |
3 | // http://rust-lang.org/COPYRIGHT. | |
4 | // | |
5 | // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or | |
6 | // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license | |
7 | // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your | |
8 | // option. This file may not be copied, modified, or distributed | |
9 | // except according to those terms. | |
10 | ||
11 | //! Atomic types | |
12 | //! | |
13 | //! Atomic types provide primitive shared-memory communication between | |
14 | //! threads, and are the building blocks of other concurrent | |
15 | //! types. | |
16 | //! | |
17 | //! This module defines atomic versions of a select number of primitive | |
c34b1796 | 18 | //! types, including `AtomicBool`, `AtomicIsize`, and `AtomicUsize`. |
1a4d82fc JJ |
19 | //! Atomic types present operations that, when used correctly, synchronize |
20 | //! updates between threads. | |
21 | //! | |
22 | //! Each method takes an `Ordering` which represents the strength of | |
23 | //! the memory barrier for that operation. These orderings are the | |
85aaf69f | 24 | //! same as [LLVM atomic orderings][1]. |
1a4d82fc | 25 | //! |
85aaf69f | 26 | //! [1]: http://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations |
1a4d82fc JJ |
27 | //! |
28 | //! Atomic variables are safe to share between threads (they implement `Sync`) | |
29 | //! but they do not themselves provide the mechanism for sharing. The most | |
30 | //! common way to share an atomic variable is to put it into an `Arc` (an | |
31 | //! atomically-reference-counted shared pointer). | |
32 | //! | |
33 | //! Most atomic types may be stored in static variables, initialized using | |
34 | //! the provided static initializers like `INIT_ATOMIC_BOOL`. Atomic statics | |
35 | //! are often used for lazy global initialization. | |
36 | //! | |
37 | //! | |
38 | //! # Examples | |
39 | //! | |
40 | //! A simple spinlock: | |
41 | //! | |
42 | //! ``` | |
43 | //! use std::sync::Arc; | |
85aaf69f SL |
44 | //! use std::sync::atomic::{AtomicUsize, Ordering}; |
45 | //! use std::thread; | |
1a4d82fc JJ |
46 | //! |
47 | //! fn main() { | |
85aaf69f | 48 | //! let spinlock = Arc::new(AtomicUsize::new(1)); |
1a4d82fc JJ |
49 | //! |
50 | //! let spinlock_clone = spinlock.clone(); | |
85aaf69f | 51 | //! thread::spawn(move|| { |
1a4d82fc JJ |
52 | //! spinlock_clone.store(0, Ordering::SeqCst); |
53 | //! }); | |
54 | //! | |
bd371182 | 55 | //! // Wait for the other thread to release the lock |
1a4d82fc JJ |
56 | //! while spinlock.load(Ordering::SeqCst) != 0 {} |
57 | //! } | |
58 | //! ``` | |
59 | //! | |
bd371182 | 60 | //! Keep a global count of live threads: |
1a4d82fc JJ |
61 | //! |
62 | //! ``` | |
85aaf69f | 63 | //! use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT}; |
1a4d82fc | 64 | //! |
bd371182 | 65 | //! static GLOBAL_THREAD_COUNT: AtomicUsize = ATOMIC_USIZE_INIT; |
1a4d82fc | 66 | //! |
bd371182 AL |
67 | //! let old_thread_count = GLOBAL_THREAD_COUNT.fetch_add(1, Ordering::SeqCst); |
68 | //! println!("live threads: {}", old_thread_count + 1); | |
1a4d82fc JJ |
69 | //! ``` |
70 | ||
85aaf69f | 71 | #![stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc JJ |
72 | |
73 | use self::Ordering::*; | |
74 | ||
c1a9b12d | 75 | use marker::{Send, Sync}; |
1a4d82fc JJ |
76 | |
77 | use intrinsics; | |
78 | use cell::UnsafeCell; | |
79 | ||
54a0048b SL |
80 | use result::Result::{self, Ok, Err}; |
81 | ||
9346a6ac | 82 | use default::Default; |
c1a9b12d | 83 | use fmt; |
9346a6ac | 84 | |
1a4d82fc | 85 | /// A boolean type which can be safely shared between threads. |
85aaf69f | 86 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc | 87 | pub struct AtomicBool { |
85aaf69f | 88 | v: UnsafeCell<usize>, |
1a4d82fc JJ |
89 | } |
90 | ||
92a42be0 | 91 | #[stable(feature = "rust1", since = "1.0.0")] |
9346a6ac | 92 | impl Default for AtomicBool { |
62682a34 SL |
93 | fn default() -> Self { |
94 | Self::new(Default::default()) | |
9346a6ac AL |
95 | } |
96 | } | |
97 | ||
b039eaaf | 98 | // Send is implicitly implemented for AtomicBool. |
92a42be0 | 99 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc JJ |
100 | unsafe impl Sync for AtomicBool {} |
101 | ||
102 | /// A signed integer type which can be safely shared between threads. | |
85aaf69f SL |
103 | #[stable(feature = "rust1", since = "1.0.0")] |
104 | pub struct AtomicIsize { | |
105 | v: UnsafeCell<isize>, | |
1a4d82fc JJ |
106 | } |
107 | ||
92a42be0 | 108 | #[stable(feature = "rust1", since = "1.0.0")] |
9346a6ac | 109 | impl Default for AtomicIsize { |
62682a34 SL |
110 | fn default() -> Self { |
111 | Self::new(Default::default()) | |
9346a6ac AL |
112 | } |
113 | } | |
114 | ||
b039eaaf | 115 | // Send is implicitly implemented for AtomicIsize. |
92a42be0 | 116 | #[stable(feature = "rust1", since = "1.0.0")] |
85aaf69f | 117 | unsafe impl Sync for AtomicIsize {} |
1a4d82fc JJ |
118 | |
119 | /// An unsigned integer type which can be safely shared between threads. | |
85aaf69f SL |
120 | #[stable(feature = "rust1", since = "1.0.0")] |
121 | pub struct AtomicUsize { | |
122 | v: UnsafeCell<usize>, | |
1a4d82fc JJ |
123 | } |
124 | ||
92a42be0 | 125 | #[stable(feature = "rust1", since = "1.0.0")] |
9346a6ac | 126 | impl Default for AtomicUsize { |
62682a34 SL |
127 | fn default() -> Self { |
128 | Self::new(Default::default()) | |
9346a6ac AL |
129 | } |
130 | } | |
131 | ||
b039eaaf | 132 | // Send is implicitly implemented for AtomicUsize. |
92a42be0 | 133 | #[stable(feature = "rust1", since = "1.0.0")] |
85aaf69f | 134 | unsafe impl Sync for AtomicUsize {} |
1a4d82fc JJ |
135 | |
136 | /// A raw pointer type which can be safely shared between threads. | |
85aaf69f | 137 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc | 138 | pub struct AtomicPtr<T> { |
62682a34 | 139 | p: UnsafeCell<*mut T>, |
1a4d82fc JJ |
140 | } |
141 | ||
92a42be0 | 142 | #[stable(feature = "rust1", since = "1.0.0")] |
d9579d0f AL |
143 | impl<T> Default for AtomicPtr<T> { |
144 | fn default() -> AtomicPtr<T> { | |
145 | AtomicPtr::new(::ptr::null_mut()) | |
146 | } | |
147 | } | |
148 | ||
92a42be0 | 149 | #[stable(feature = "rust1", since = "1.0.0")] |
c1a9b12d | 150 | unsafe impl<T> Send for AtomicPtr<T> {} |
92a42be0 | 151 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc JJ |
152 | unsafe impl<T> Sync for AtomicPtr<T> {} |
153 | ||
154 | /// Atomic memory orderings | |
155 | /// | |
156 | /// Memory orderings limit the ways that both the compiler and CPU may reorder | |
157 | /// instructions around atomic operations. At its most restrictive, | |
158 | /// "sequentially consistent" atomics allow neither reads nor writes | |
159 | /// to be moved either before or after the atomic operation; on the other end | |
160 | /// "relaxed" atomics allow all reorderings. | |
161 | /// | |
162 | /// Rust's memory orderings are [the same as | |
c1a9b12d | 163 | /// LLVM's](http://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations). |
85aaf69f | 164 | #[stable(feature = "rust1", since = "1.0.0")] |
54a0048b | 165 | #[derive(Copy, Clone, Debug)] |
1a4d82fc | 166 | pub enum Ordering { |
b039eaaf SL |
167 | /// No ordering constraints, only atomic operations. Corresponds to LLVM's |
168 | /// `Monotonic` ordering. | |
85aaf69f | 169 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc JJ |
170 | Relaxed, |
171 | /// When coupled with a store, all previous writes become visible | |
172 | /// to another thread that performs a load with `Acquire` ordering | |
173 | /// on the same value. | |
85aaf69f | 174 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc JJ |
175 | Release, |
176 | /// When coupled with a load, all subsequent loads will see data | |
177 | /// written before a store with `Release` ordering on the same value | |
178 | /// in another thread. | |
85aaf69f | 179 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc JJ |
180 | Acquire, |
181 | /// When coupled with a load, uses `Acquire` ordering, and with a store | |
182 | /// `Release` ordering. | |
85aaf69f | 183 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc JJ |
184 | AcqRel, |
185 | /// Like `AcqRel` with the additional guarantee that all threads see all | |
186 | /// sequentially consistent operations in the same order. | |
85aaf69f | 187 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc JJ |
188 | SeqCst, |
189 | } | |
190 | ||
191 | /// An `AtomicBool` initialized to `false`. | |
85aaf69f | 192 | #[stable(feature = "rust1", since = "1.0.0")] |
62682a34 | 193 | pub const ATOMIC_BOOL_INIT: AtomicBool = AtomicBool::new(false); |
85aaf69f SL |
194 | /// An `AtomicIsize` initialized to `0`. |
195 | #[stable(feature = "rust1", since = "1.0.0")] | |
62682a34 | 196 | pub const ATOMIC_ISIZE_INIT: AtomicIsize = AtomicIsize::new(0); |
85aaf69f SL |
197 | /// An `AtomicUsize` initialized to `0`. |
198 | #[stable(feature = "rust1", since = "1.0.0")] | |
62682a34 | 199 | pub const ATOMIC_USIZE_INIT: AtomicUsize = AtomicUsize::new(0); |
1a4d82fc JJ |
200 | |
201 | // NB: Needs to be -1 (0b11111111...) to make fetch_nand work correctly | |
c34b1796 | 202 | const UINT_TRUE: usize = !0; |
1a4d82fc JJ |
203 | |
204 | impl AtomicBool { | |
205 | /// Creates a new `AtomicBool`. | |
206 | /// | |
207 | /// # Examples | |
208 | /// | |
209 | /// ``` | |
210 | /// use std::sync::atomic::AtomicBool; | |
211 | /// | |
212 | /// let atomic_true = AtomicBool::new(true); | |
213 | /// let atomic_false = AtomicBool::new(false); | |
214 | /// ``` | |
215 | #[inline] | |
85aaf69f | 216 | #[stable(feature = "rust1", since = "1.0.0")] |
62682a34 SL |
217 | pub const fn new(v: bool) -> AtomicBool { |
218 | AtomicBool { v: UnsafeCell::new(-(v as isize) as usize) } | |
1a4d82fc JJ |
219 | } |
220 | ||
221 | /// Loads a value from the bool. | |
222 | /// | |
223 | /// `load` takes an `Ordering` argument which describes the memory ordering of this operation. | |
224 | /// | |
225 | /// # Panics | |
226 | /// | |
227 | /// Panics if `order` is `Release` or `AcqRel`. | |
228 | /// | |
229 | /// # Examples | |
230 | /// | |
231 | /// ``` | |
232 | /// use std::sync::atomic::{AtomicBool, Ordering}; | |
233 | /// | |
234 | /// let some_bool = AtomicBool::new(true); | |
235 | /// | |
62682a34 | 236 | /// assert_eq!(some_bool.load(Ordering::Relaxed), true); |
1a4d82fc JJ |
237 | /// ``` |
238 | #[inline] | |
85aaf69f | 239 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc | 240 | pub fn load(&self, order: Ordering) -> bool { |
85aaf69f | 241 | unsafe { atomic_load(self.v.get(), order) > 0 } |
1a4d82fc JJ |
242 | } |
243 | ||
244 | /// Stores a value into the bool. | |
245 | /// | |
246 | /// `store` takes an `Ordering` argument which describes the memory ordering of this operation. | |
247 | /// | |
248 | /// # Examples | |
249 | /// | |
250 | /// ``` | |
251 | /// use std::sync::atomic::{AtomicBool, Ordering}; | |
252 | /// | |
253 | /// let some_bool = AtomicBool::new(true); | |
254 | /// | |
255 | /// some_bool.store(false, Ordering::Relaxed); | |
62682a34 | 256 | /// assert_eq!(some_bool.load(Ordering::Relaxed), false); |
1a4d82fc JJ |
257 | /// ``` |
258 | /// | |
259 | /// # Panics | |
260 | /// | |
261 | /// Panics if `order` is `Acquire` or `AcqRel`. | |
262 | #[inline] | |
85aaf69f | 263 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc JJ |
264 | pub fn store(&self, val: bool, order: Ordering) { |
265 | let val = if val { UINT_TRUE } else { 0 }; | |
266 | ||
267 | unsafe { atomic_store(self.v.get(), val, order); } | |
268 | } | |
269 | ||
270 | /// Stores a value into the bool, returning the old value. | |
271 | /// | |
272 | /// `swap` takes an `Ordering` argument which describes the memory ordering of this operation. | |
273 | /// | |
274 | /// # Examples | |
275 | /// | |
276 | /// ``` | |
277 | /// use std::sync::atomic::{AtomicBool, Ordering}; | |
278 | /// | |
279 | /// let some_bool = AtomicBool::new(true); | |
280 | /// | |
62682a34 SL |
281 | /// assert_eq!(some_bool.swap(false, Ordering::Relaxed), true); |
282 | /// assert_eq!(some_bool.load(Ordering::Relaxed), false); | |
1a4d82fc JJ |
283 | /// ``` |
284 | #[inline] | |
85aaf69f | 285 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc JJ |
286 | pub fn swap(&self, val: bool, order: Ordering) -> bool { |
287 | let val = if val { UINT_TRUE } else { 0 }; | |
288 | ||
289 | unsafe { atomic_swap(self.v.get(), val, order) > 0 } | |
290 | } | |
291 | ||
c1a9b12d | 292 | /// Stores a value into the `bool` if the current value is the same as the `current` value. |
1a4d82fc | 293 | /// |
c1a9b12d SL |
294 | /// The return value is always the previous value. If it is equal to `current`, then the value |
295 | /// was updated. | |
1a4d82fc | 296 | /// |
c1a9b12d SL |
297 | /// `compare_and_swap` also takes an `Ordering` argument which describes the memory ordering of |
298 | /// this operation. | |
1a4d82fc JJ |
299 | /// |
300 | /// # Examples | |
301 | /// | |
302 | /// ``` | |
303 | /// use std::sync::atomic::{AtomicBool, Ordering}; | |
304 | /// | |
305 | /// let some_bool = AtomicBool::new(true); | |
306 | /// | |
62682a34 SL |
307 | /// assert_eq!(some_bool.compare_and_swap(true, false, Ordering::Relaxed), true); |
308 | /// assert_eq!(some_bool.load(Ordering::Relaxed), false); | |
309 | /// | |
310 | /// assert_eq!(some_bool.compare_and_swap(true, true, Ordering::Relaxed), false); | |
311 | /// assert_eq!(some_bool.load(Ordering::Relaxed), false); | |
1a4d82fc JJ |
312 | /// ``` |
313 | #[inline] | |
85aaf69f | 314 | #[stable(feature = "rust1", since = "1.0.0")] |
c1a9b12d | 315 | pub fn compare_and_swap(&self, current: bool, new: bool, order: Ordering) -> bool { |
54a0048b SL |
316 | match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) { |
317 | Ok(x) => x, | |
318 | Err(x) => x, | |
319 | } | |
7453a54e SL |
320 | } |
321 | ||
322 | /// Stores a value into the `bool` if the current value is the same as the `current` value. | |
323 | /// | |
54a0048b SL |
324 | /// The return value is a result indicating whether the new value was written and containing |
325 | /// the previous value. On success this value is guaranteed to be equal to `new`. | |
7453a54e SL |
326 | /// |
327 | /// `compare_exchange` takes two `Ordering` arguments to describe the memory ordering of this | |
328 | /// operation. The first describes the required ordering if the operation succeeds while the | |
329 | /// second describes the required ordering when the operation fails. The failure ordering can't | |
54a0048b | 330 | /// be `Release` or `AcqRel` and must be equivalent or weaker than the success ordering. |
7453a54e SL |
331 | /// |
332 | /// # Examples | |
333 | /// | |
334 | /// ``` | |
335 | /// # #![feature(extended_compare_and_swap)] | |
336 | /// use std::sync::atomic::{AtomicBool, Ordering}; | |
337 | /// | |
338 | /// let some_bool = AtomicBool::new(true); | |
339 | /// | |
340 | /// assert_eq!(some_bool.compare_exchange(true, | |
341 | /// false, | |
342 | /// Ordering::Acquire, | |
343 | /// Ordering::Relaxed), | |
54a0048b | 344 | /// Ok(true)); |
7453a54e SL |
345 | /// assert_eq!(some_bool.load(Ordering::Relaxed), false); |
346 | /// | |
347 | /// assert_eq!(some_bool.compare_exchange(true, true, | |
348 | /// Ordering::SeqCst, | |
349 | /// Ordering::Acquire), | |
54a0048b | 350 | /// Err(false)); |
7453a54e SL |
351 | /// assert_eq!(some_bool.load(Ordering::Relaxed), false); |
352 | /// ``` | |
353 | #[inline] | |
354 | #[unstable(feature = "extended_compare_and_swap", reason = "recently added", issue = "31767")] | |
355 | pub fn compare_exchange(&self, | |
356 | current: bool, | |
357 | new: bool, | |
358 | success: Ordering, | |
54a0048b | 359 | failure: Ordering) -> Result<bool, bool> { |
c1a9b12d | 360 | let current = if current { UINT_TRUE } else { 0 }; |
1a4d82fc JJ |
361 | let new = if new { UINT_TRUE } else { 0 }; |
362 | ||
54a0048b SL |
363 | match unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) } { |
364 | Ok(x) => Ok(x > 0), | |
365 | Err(x) => Err(x > 0), | |
366 | } | |
7453a54e SL |
367 | } |
368 | ||
369 | /// Stores a value into the `bool` if the current value is the same as the `current` value. | |
370 | /// | |
371 | /// Unlike `compare_exchange`, this function is allowed to spuriously fail even when the | |
372 | /// comparison succeeds, which can result in more efficient code on some platforms. The | |
54a0048b SL |
373 | /// return value is a result indicating whether the new value was written and containing the |
374 | /// previous value. | |
7453a54e SL |
375 | /// |
376 | /// `compare_exchange_weak` takes two `Ordering` arguments to describe the memory | |
377 | /// ordering of this operation. The first describes the required ordering if the operation | |
378 | /// succeeds while the second describes the required ordering when the operation fails. The | |
54a0048b | 379 | /// failure ordering can't be `Release` or `AcqRel` and must be equivalent or weaker than the |
7453a54e SL |
380 | /// success ordering. |
381 | /// | |
382 | /// # Examples | |
383 | /// | |
384 | /// ``` | |
385 | /// # #![feature(extended_compare_and_swap)] | |
386 | /// use std::sync::atomic::{AtomicBool, Ordering}; | |
387 | /// | |
388 | /// let val = AtomicBool::new(false); | |
389 | /// | |
390 | /// let new = true; | |
391 | /// let mut old = val.load(Ordering::Relaxed); | |
392 | /// loop { | |
54a0048b SL |
393 | /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) { |
394 | /// Ok(_) => break, | |
395 | /// Err(x) => old = x, | |
7453a54e SL |
396 | /// } |
397 | /// } | |
398 | /// ``` | |
399 | #[inline] | |
400 | #[unstable(feature = "extended_compare_and_swap", reason = "recently added", issue = "31767")] | |
401 | pub fn compare_exchange_weak(&self, | |
402 | current: bool, | |
403 | new: bool, | |
404 | success: Ordering, | |
54a0048b | 405 | failure: Ordering) -> Result<bool, bool> { |
7453a54e SL |
406 | let current = if current { UINT_TRUE } else { 0 }; |
407 | let new = if new { UINT_TRUE } else { 0 }; | |
408 | ||
54a0048b SL |
409 | match unsafe { atomic_compare_exchange_weak(self.v.get(), current, new, |
410 | success, failure) } { | |
411 | Ok(x) => Ok(x > 0), | |
412 | Err(x) => Err(x > 0), | |
413 | } | |
1a4d82fc JJ |
414 | } |
415 | ||
416 | /// Logical "and" with a boolean value. | |
417 | /// | |
418 | /// Performs a logical "and" operation on the current value and the argument `val`, and sets | |
419 | /// the new value to the result. | |
420 | /// | |
421 | /// Returns the previous value. | |
422 | /// | |
423 | /// # Examples | |
424 | /// | |
425 | /// ``` | |
426 | /// use std::sync::atomic::{AtomicBool, Ordering}; | |
427 | /// | |
428 | /// let foo = AtomicBool::new(true); | |
62682a34 SL |
429 | /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), true); |
430 | /// assert_eq!(foo.load(Ordering::SeqCst), false); | |
1a4d82fc JJ |
431 | /// |
432 | /// let foo = AtomicBool::new(true); | |
62682a34 SL |
433 | /// assert_eq!(foo.fetch_and(true, Ordering::SeqCst), true); |
434 | /// assert_eq!(foo.load(Ordering::SeqCst), true); | |
1a4d82fc JJ |
435 | /// |
436 | /// let foo = AtomicBool::new(false); | |
62682a34 SL |
437 | /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), false); |
438 | /// assert_eq!(foo.load(Ordering::SeqCst), false); | |
1a4d82fc JJ |
439 | /// ``` |
440 | #[inline] | |
85aaf69f | 441 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc JJ |
442 | pub fn fetch_and(&self, val: bool, order: Ordering) -> bool { |
443 | let val = if val { UINT_TRUE } else { 0 }; | |
444 | ||
445 | unsafe { atomic_and(self.v.get(), val, order) > 0 } | |
446 | } | |
447 | ||
448 | /// Logical "nand" with a boolean value. | |
449 | /// | |
450 | /// Performs a logical "nand" operation on the current value and the argument `val`, and sets | |
451 | /// the new value to the result. | |
452 | /// | |
453 | /// Returns the previous value. | |
454 | /// | |
455 | /// # Examples | |
456 | /// | |
457 | /// ``` | |
458 | /// use std::sync::atomic::{AtomicBool, Ordering}; | |
459 | /// | |
460 | /// let foo = AtomicBool::new(true); | |
62682a34 SL |
461 | /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), true); |
462 | /// assert_eq!(foo.load(Ordering::SeqCst), true); | |
1a4d82fc JJ |
463 | /// |
464 | /// let foo = AtomicBool::new(true); | |
62682a34 SL |
465 | /// assert_eq!(foo.fetch_nand(true, Ordering::SeqCst), true); |
466 | /// assert_eq!(foo.load(Ordering::SeqCst) as usize, 0); | |
467 | /// assert_eq!(foo.load(Ordering::SeqCst), false); | |
1a4d82fc JJ |
468 | /// |
469 | /// let foo = AtomicBool::new(false); | |
62682a34 SL |
470 | /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), false); |
471 | /// assert_eq!(foo.load(Ordering::SeqCst), true); | |
1a4d82fc JJ |
472 | /// ``` |
473 | #[inline] | |
85aaf69f | 474 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc JJ |
475 | pub fn fetch_nand(&self, val: bool, order: Ordering) -> bool { |
476 | let val = if val { UINT_TRUE } else { 0 }; | |
477 | ||
478 | unsafe { atomic_nand(self.v.get(), val, order) > 0 } | |
479 | } | |
480 | ||
481 | /// Logical "or" with a boolean value. | |
482 | /// | |
483 | /// Performs a logical "or" operation on the current value and the argument `val`, and sets the | |
484 | /// new value to the result. | |
485 | /// | |
486 | /// Returns the previous value. | |
487 | /// | |
488 | /// # Examples | |
489 | /// | |
490 | /// ``` | |
491 | /// use std::sync::atomic::{AtomicBool, Ordering}; | |
492 | /// | |
493 | /// let foo = AtomicBool::new(true); | |
62682a34 SL |
494 | /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), true); |
495 | /// assert_eq!(foo.load(Ordering::SeqCst), true); | |
1a4d82fc JJ |
496 | /// |
497 | /// let foo = AtomicBool::new(true); | |
62682a34 SL |
498 | /// assert_eq!(foo.fetch_or(true, Ordering::SeqCst), true); |
499 | /// assert_eq!(foo.load(Ordering::SeqCst), true); | |
1a4d82fc JJ |
500 | /// |
501 | /// let foo = AtomicBool::new(false); | |
62682a34 SL |
502 | /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), false); |
503 | /// assert_eq!(foo.load(Ordering::SeqCst), false); | |
1a4d82fc JJ |
504 | /// ``` |
505 | #[inline] | |
85aaf69f | 506 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc JJ |
507 | pub fn fetch_or(&self, val: bool, order: Ordering) -> bool { |
508 | let val = if val { UINT_TRUE } else { 0 }; | |
509 | ||
510 | unsafe { atomic_or(self.v.get(), val, order) > 0 } | |
511 | } | |
512 | ||
513 | /// Logical "xor" with a boolean value. | |
514 | /// | |
515 | /// Performs a logical "xor" operation on the current value and the argument `val`, and sets | |
516 | /// the new value to the result. | |
517 | /// | |
518 | /// Returns the previous value. | |
519 | /// | |
520 | /// # Examples | |
521 | /// | |
522 | /// ``` | |
523 | /// use std::sync::atomic::{AtomicBool, Ordering}; | |
524 | /// | |
525 | /// let foo = AtomicBool::new(true); | |
62682a34 SL |
526 | /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), true); |
527 | /// assert_eq!(foo.load(Ordering::SeqCst), true); | |
1a4d82fc JJ |
528 | /// |
529 | /// let foo = AtomicBool::new(true); | |
62682a34 SL |
530 | /// assert_eq!(foo.fetch_xor(true, Ordering::SeqCst), true); |
531 | /// assert_eq!(foo.load(Ordering::SeqCst), false); | |
1a4d82fc JJ |
532 | /// |
533 | /// let foo = AtomicBool::new(false); | |
62682a34 SL |
534 | /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), false); |
535 | /// assert_eq!(foo.load(Ordering::SeqCst), false); | |
1a4d82fc JJ |
536 | /// ``` |
537 | #[inline] | |
85aaf69f | 538 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc JJ |
539 | pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool { |
540 | let val = if val { UINT_TRUE } else { 0 }; | |
541 | ||
542 | unsafe { atomic_xor(self.v.get(), val, order) > 0 } | |
543 | } | |
544 | } | |
545 | ||
85aaf69f SL |
546 | impl AtomicIsize { |
547 | /// Creates a new `AtomicIsize`. | |
1a4d82fc JJ |
548 | /// |
549 | /// # Examples | |
550 | /// | |
551 | /// ``` | |
85aaf69f | 552 | /// use std::sync::atomic::AtomicIsize; |
1a4d82fc | 553 | /// |
85aaf69f | 554 | /// let atomic_forty_two = AtomicIsize::new(42); |
1a4d82fc JJ |
555 | /// ``` |
556 | #[inline] | |
85aaf69f | 557 | #[stable(feature = "rust1", since = "1.0.0")] |
62682a34 | 558 | pub const fn new(v: isize) -> AtomicIsize { |
85aaf69f | 559 | AtomicIsize {v: UnsafeCell::new(v)} |
1a4d82fc JJ |
560 | } |
561 | ||
85aaf69f | 562 | /// Loads a value from the isize. |
1a4d82fc JJ |
563 | /// |
564 | /// `load` takes an `Ordering` argument which describes the memory ordering of this operation. | |
565 | /// | |
566 | /// # Panics | |
567 | /// | |
568 | /// Panics if `order` is `Release` or `AcqRel`. | |
569 | /// | |
570 | /// # Examples | |
571 | /// | |
572 | /// ``` | |
85aaf69f | 573 | /// use std::sync::atomic::{AtomicIsize, Ordering}; |
1a4d82fc | 574 | /// |
85aaf69f | 575 | /// let some_isize = AtomicIsize::new(5); |
1a4d82fc | 576 | /// |
62682a34 | 577 | /// assert_eq!(some_isize.load(Ordering::Relaxed), 5); |
1a4d82fc JJ |
578 | /// ``` |
579 | #[inline] | |
85aaf69f SL |
580 | #[stable(feature = "rust1", since = "1.0.0")] |
581 | pub fn load(&self, order: Ordering) -> isize { | |
582 | unsafe { atomic_load(self.v.get(), order) } | |
1a4d82fc JJ |
583 | } |
584 | ||
85aaf69f | 585 | /// Stores a value into the isize. |
1a4d82fc JJ |
586 | /// |
587 | /// `store` takes an `Ordering` argument which describes the memory ordering of this operation. | |
588 | /// | |
589 | /// # Examples | |
590 | /// | |
591 | /// ``` | |
85aaf69f | 592 | /// use std::sync::atomic::{AtomicIsize, Ordering}; |
1a4d82fc | 593 | /// |
85aaf69f | 594 | /// let some_isize = AtomicIsize::new(5); |
1a4d82fc | 595 | /// |
85aaf69f | 596 | /// some_isize.store(10, Ordering::Relaxed); |
62682a34 | 597 | /// assert_eq!(some_isize.load(Ordering::Relaxed), 10); |
1a4d82fc JJ |
598 | /// ``` |
599 | /// | |
600 | /// # Panics | |
601 | /// | |
602 | /// Panics if `order` is `Acquire` or `AcqRel`. | |
603 | #[inline] | |
85aaf69f SL |
604 | #[stable(feature = "rust1", since = "1.0.0")] |
605 | pub fn store(&self, val: isize, order: Ordering) { | |
1a4d82fc JJ |
606 | unsafe { atomic_store(self.v.get(), val, order); } |
607 | } | |
608 | ||
85aaf69f | 609 | /// Stores a value into the isize, returning the old value. |
1a4d82fc JJ |
610 | /// |
611 | /// `swap` takes an `Ordering` argument which describes the memory ordering of this operation. | |
612 | /// | |
613 | /// # Examples | |
614 | /// | |
615 | /// ``` | |
85aaf69f | 616 | /// use std::sync::atomic::{AtomicIsize, Ordering}; |
1a4d82fc | 617 | /// |
85aaf69f | 618 | /// let some_isize = AtomicIsize::new(5); |
1a4d82fc | 619 | /// |
62682a34 | 620 | /// assert_eq!(some_isize.swap(10, Ordering::Relaxed), 5); |
1a4d82fc JJ |
621 | /// ``` |
622 | #[inline] | |
85aaf69f SL |
623 | #[stable(feature = "rust1", since = "1.0.0")] |
624 | pub fn swap(&self, val: isize, order: Ordering) -> isize { | |
1a4d82fc JJ |
625 | unsafe { atomic_swap(self.v.get(), val, order) } |
626 | } | |
627 | ||
c1a9b12d | 628 | /// Stores a value into the `isize` if the current value is the same as the `current` value. |
1a4d82fc | 629 | /// |
c1a9b12d SL |
630 | /// The return value is always the previous value. If it is equal to `current`, then the value |
631 | /// was updated. | |
1a4d82fc JJ |
632 | /// |
633 | /// `compare_and_swap` also takes an `Ordering` argument which describes the memory ordering of | |
634 | /// this operation. | |
635 | /// | |
636 | /// # Examples | |
637 | /// | |
638 | /// ``` | |
85aaf69f | 639 | /// use std::sync::atomic::{AtomicIsize, Ordering}; |
1a4d82fc | 640 | /// |
85aaf69f | 641 | /// let some_isize = AtomicIsize::new(5); |
1a4d82fc | 642 | /// |
62682a34 SL |
643 | /// assert_eq!(some_isize.compare_and_swap(5, 10, Ordering::Relaxed), 5); |
644 | /// assert_eq!(some_isize.load(Ordering::Relaxed), 10); | |
645 | /// | |
646 | /// assert_eq!(some_isize.compare_and_swap(6, 12, Ordering::Relaxed), 10); | |
647 | /// assert_eq!(some_isize.load(Ordering::Relaxed), 10); | |
1a4d82fc JJ |
648 | /// ``` |
649 | #[inline] | |
85aaf69f | 650 | #[stable(feature = "rust1", since = "1.0.0")] |
c1a9b12d | 651 | pub fn compare_and_swap(&self, current: isize, new: isize, order: Ordering) -> isize { |
54a0048b SL |
652 | match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) { |
653 | Ok(x) => x, | |
654 | Err(x) => x, | |
655 | } | |
7453a54e SL |
656 | } |
657 | ||
658 | /// Stores a value into the `isize` if the current value is the same as the `current` value. | |
659 | /// | |
54a0048b SL |
660 | /// The return value is a result indicating whether the new value was written and containing |
661 | /// the previous value. On success this value is guaranteed to be equal to `new`. | |
7453a54e SL |
662 | /// |
663 | /// `compare_exchange` takes two `Ordering` arguments to describe the memory ordering of this | |
664 | /// operation. The first describes the required ordering if the operation succeeds while the | |
665 | /// second describes the required ordering when the operation fails. The failure ordering can't | |
54a0048b | 666 | /// be `Release` or `AcqRel` and must be equivalent or weaker than the success ordering. |
7453a54e SL |
667 | /// |
668 | /// # Examples | |
669 | /// | |
670 | /// ``` | |
671 | /// # #![feature(extended_compare_and_swap)] | |
672 | /// use std::sync::atomic::{AtomicIsize, Ordering}; | |
673 | /// | |
674 | /// let some_isize = AtomicIsize::new(5); | |
675 | /// | |
676 | /// assert_eq!(some_isize.compare_exchange(5, 10, | |
677 | /// Ordering::Acquire, | |
678 | /// Ordering::Relaxed), | |
54a0048b | 679 | /// Ok(5)); |
7453a54e SL |
680 | /// assert_eq!(some_isize.load(Ordering::Relaxed), 10); |
681 | /// | |
682 | /// assert_eq!(some_isize.compare_exchange(6, 12, | |
683 | /// Ordering::SeqCst, | |
684 | /// Ordering::Acquire), | |
54a0048b | 685 | /// Err(10)); |
7453a54e SL |
686 | /// assert_eq!(some_isize.load(Ordering::Relaxed), 10); |
687 | /// ``` | |
688 | #[inline] | |
689 | #[unstable(feature = "extended_compare_and_swap", reason = "recently added", issue = "31767")] | |
690 | pub fn compare_exchange(&self, | |
691 | current: isize, | |
692 | new: isize, | |
693 | success: Ordering, | |
54a0048b | 694 | failure: Ordering) -> Result<isize, isize> { |
7453a54e SL |
695 | unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) } |
696 | } | |
697 | ||
54a0048b | 698 | /// Stores a value into the `isize` if the current value is the same as the `current` value. |
7453a54e SL |
699 | /// |
700 | /// Unlike `compare_exchange`, this function is allowed to spuriously fail even when the | |
701 | /// comparison succeeds, which can result in more efficient code on some platforms. The | |
54a0048b SL |
702 | /// return value is a result indicating whether the new value was written and containing the |
703 | /// previous value. | |
7453a54e SL |
704 | /// |
705 | /// `compare_exchange_weak` takes two `Ordering` arguments to describe the memory | |
706 | /// ordering of this operation. The first describes the required ordering if the operation | |
707 | /// succeeds while the second describes the required ordering when the operation fails. The | |
54a0048b | 708 | /// failure ordering can't be `Release` or `AcqRel` and must be equivalent or weaker than the |
7453a54e SL |
709 | /// success ordering. |
710 | /// | |
711 | /// # Examples | |
712 | /// | |
713 | /// ``` | |
714 | /// # #![feature(extended_compare_and_swap)] | |
715 | /// use std::sync::atomic::{AtomicIsize, Ordering}; | |
716 | /// | |
717 | /// let val = AtomicIsize::new(4); | |
718 | /// | |
719 | /// let mut old = val.load(Ordering::Relaxed); | |
720 | /// loop { | |
721 | /// let new = old * 2; | |
54a0048b SL |
722 | /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) { |
723 | /// Ok(_) => break, | |
724 | /// Err(x) => old = x, | |
7453a54e SL |
725 | /// } |
726 | /// } | |
727 | /// ``` | |
728 | #[inline] | |
729 | #[unstable(feature = "extended_compare_and_swap", reason = "recently added", issue = "31767")] | |
730 | pub fn compare_exchange_weak(&self, | |
731 | current: isize, | |
732 | new: isize, | |
733 | success: Ordering, | |
54a0048b | 734 | failure: Ordering) -> Result<isize, isize> { |
7453a54e | 735 | unsafe { atomic_compare_exchange_weak(self.v.get(), current, new, success, failure) } |
1a4d82fc JJ |
736 | } |
737 | ||
85aaf69f | 738 | /// Add an isize to the current value, returning the previous value. |
1a4d82fc JJ |
739 | /// |
740 | /// # Examples | |
741 | /// | |
742 | /// ``` | |
85aaf69f | 743 | /// use std::sync::atomic::{AtomicIsize, Ordering}; |
1a4d82fc | 744 | /// |
85aaf69f | 745 | /// let foo = AtomicIsize::new(0); |
62682a34 SL |
746 | /// assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0); |
747 | /// assert_eq!(foo.load(Ordering::SeqCst), 10); | |
1a4d82fc JJ |
748 | /// ``` |
749 | #[inline] | |
85aaf69f SL |
750 | #[stable(feature = "rust1", since = "1.0.0")] |
751 | pub fn fetch_add(&self, val: isize, order: Ordering) -> isize { | |
1a4d82fc JJ |
752 | unsafe { atomic_add(self.v.get(), val, order) } |
753 | } | |
754 | ||
85aaf69f | 755 | /// Subtract an isize from the current value, returning the previous value. |
1a4d82fc JJ |
756 | /// |
757 | /// # Examples | |
758 | /// | |
759 | /// ``` | |
85aaf69f | 760 | /// use std::sync::atomic::{AtomicIsize, Ordering}; |
1a4d82fc | 761 | /// |
85aaf69f | 762 | /// let foo = AtomicIsize::new(0); |
62682a34 SL |
763 | /// assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 0); |
764 | /// assert_eq!(foo.load(Ordering::SeqCst), -10); | |
1a4d82fc JJ |
765 | /// ``` |
766 | #[inline] | |
85aaf69f SL |
767 | #[stable(feature = "rust1", since = "1.0.0")] |
768 | pub fn fetch_sub(&self, val: isize, order: Ordering) -> isize { | |
1a4d82fc JJ |
769 | unsafe { atomic_sub(self.v.get(), val, order) } |
770 | } | |
771 | ||
85aaf69f | 772 | /// Bitwise and with the current isize, returning the previous value. |
1a4d82fc JJ |
773 | /// |
774 | /// # Examples | |
775 | /// | |
776 | /// ``` | |
85aaf69f | 777 | /// use std::sync::atomic::{AtomicIsize, Ordering}; |
1a4d82fc | 778 | /// |
85aaf69f | 779 | /// let foo = AtomicIsize::new(0b101101); |
62682a34 SL |
780 | /// assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101); |
781 | /// assert_eq!(foo.load(Ordering::SeqCst), 0b100001); | |
1a4d82fc | 782 | #[inline] |
85aaf69f SL |
783 | #[stable(feature = "rust1", since = "1.0.0")] |
784 | pub fn fetch_and(&self, val: isize, order: Ordering) -> isize { | |
1a4d82fc JJ |
785 | unsafe { atomic_and(self.v.get(), val, order) } |
786 | } | |
787 | ||
85aaf69f | 788 | /// Bitwise or with the current isize, returning the previous value. |
1a4d82fc JJ |
789 | /// |
790 | /// # Examples | |
791 | /// | |
792 | /// ``` | |
85aaf69f | 793 | /// use std::sync::atomic::{AtomicIsize, Ordering}; |
1a4d82fc | 794 | /// |
85aaf69f | 795 | /// let foo = AtomicIsize::new(0b101101); |
62682a34 SL |
796 | /// assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101); |
797 | /// assert_eq!(foo.load(Ordering::SeqCst), 0b111111); | |
1a4d82fc | 798 | #[inline] |
85aaf69f SL |
799 | #[stable(feature = "rust1", since = "1.0.0")] |
800 | pub fn fetch_or(&self, val: isize, order: Ordering) -> isize { | |
1a4d82fc JJ |
801 | unsafe { atomic_or(self.v.get(), val, order) } |
802 | } | |
803 | ||
85aaf69f | 804 | /// Bitwise xor with the current isize, returning the previous value. |
1a4d82fc JJ |
805 | /// |
806 | /// # Examples | |
807 | /// | |
808 | /// ``` | |
85aaf69f | 809 | /// use std::sync::atomic::{AtomicIsize, Ordering}; |
1a4d82fc | 810 | /// |
85aaf69f | 811 | /// let foo = AtomicIsize::new(0b101101); |
62682a34 SL |
812 | /// assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101); |
813 | /// assert_eq!(foo.load(Ordering::SeqCst), 0b011110); | |
1a4d82fc | 814 | #[inline] |
85aaf69f SL |
815 | #[stable(feature = "rust1", since = "1.0.0")] |
816 | pub fn fetch_xor(&self, val: isize, order: Ordering) -> isize { | |
1a4d82fc JJ |
817 | unsafe { atomic_xor(self.v.get(), val, order) } |
818 | } | |
819 | } | |
820 | ||
85aaf69f SL |
821 | impl AtomicUsize { |
822 | /// Creates a new `AtomicUsize`. | |
1a4d82fc JJ |
823 | /// |
824 | /// # Examples | |
825 | /// | |
826 | /// ``` | |
85aaf69f | 827 | /// use std::sync::atomic::AtomicUsize; |
1a4d82fc | 828 | /// |
85aaf69f | 829 | /// let atomic_forty_two = AtomicUsize::new(42); |
1a4d82fc JJ |
830 | /// ``` |
831 | #[inline] | |
85aaf69f | 832 | #[stable(feature = "rust1", since = "1.0.0")] |
62682a34 | 833 | pub const fn new(v: usize) -> AtomicUsize { |
85aaf69f | 834 | AtomicUsize { v: UnsafeCell::new(v) } |
1a4d82fc JJ |
835 | } |
836 | ||
85aaf69f | 837 | /// Loads a value from the usize. |
1a4d82fc JJ |
838 | /// |
839 | /// `load` takes an `Ordering` argument which describes the memory ordering of this operation. | |
840 | /// | |
841 | /// # Panics | |
842 | /// | |
843 | /// Panics if `order` is `Release` or `AcqRel`. | |
844 | /// | |
845 | /// # Examples | |
846 | /// | |
847 | /// ``` | |
85aaf69f | 848 | /// use std::sync::atomic::{AtomicUsize, Ordering}; |
1a4d82fc | 849 | /// |
85aaf69f | 850 | /// let some_usize = AtomicUsize::new(5); |
1a4d82fc | 851 | /// |
62682a34 | 852 | /// assert_eq!(some_usize.load(Ordering::Relaxed), 5); |
1a4d82fc JJ |
853 | /// ``` |
854 | #[inline] | |
85aaf69f SL |
855 | #[stable(feature = "rust1", since = "1.0.0")] |
856 | pub fn load(&self, order: Ordering) -> usize { | |
857 | unsafe { atomic_load(self.v.get(), order) } | |
1a4d82fc JJ |
858 | } |
859 | ||
85aaf69f | 860 | /// Stores a value into the usize. |
1a4d82fc JJ |
861 | /// |
862 | /// `store` takes an `Ordering` argument which describes the memory ordering of this operation. | |
863 | /// | |
864 | /// # Examples | |
865 | /// | |
866 | /// ``` | |
85aaf69f | 867 | /// use std::sync::atomic::{AtomicUsize, Ordering}; |
1a4d82fc | 868 | /// |
85aaf69f | 869 | /// let some_usize = AtomicUsize::new(5); |
1a4d82fc | 870 | /// |
85aaf69f | 871 | /// some_usize.store(10, Ordering::Relaxed); |
62682a34 | 872 | /// assert_eq!(some_usize.load(Ordering::Relaxed), 10); |
1a4d82fc JJ |
873 | /// ``` |
874 | /// | |
875 | /// # Panics | |
876 | /// | |
877 | /// Panics if `order` is `Acquire` or `AcqRel`. | |
878 | #[inline] | |
85aaf69f SL |
879 | #[stable(feature = "rust1", since = "1.0.0")] |
880 | pub fn store(&self, val: usize, order: Ordering) { | |
1a4d82fc JJ |
881 | unsafe { atomic_store(self.v.get(), val, order); } |
882 | } | |
883 | ||
85aaf69f | 884 | /// Stores a value into the usize, returning the old value. |
1a4d82fc JJ |
885 | /// |
886 | /// `swap` takes an `Ordering` argument which describes the memory ordering of this operation. | |
887 | /// | |
888 | /// # Examples | |
889 | /// | |
890 | /// ``` | |
85aaf69f | 891 | /// use std::sync::atomic::{AtomicUsize, Ordering}; |
1a4d82fc | 892 | /// |
7453a54e | 893 | /// let some_usize = AtomicUsize::new(5); |
1a4d82fc | 894 | /// |
62682a34 SL |
895 | /// assert_eq!(some_usize.swap(10, Ordering::Relaxed), 5); |
896 | /// assert_eq!(some_usize.load(Ordering::Relaxed), 10); | |
1a4d82fc JJ |
897 | /// ``` |
898 | #[inline] | |
85aaf69f SL |
899 | #[stable(feature = "rust1", since = "1.0.0")] |
900 | pub fn swap(&self, val: usize, order: Ordering) -> usize { | |
1a4d82fc JJ |
901 | unsafe { atomic_swap(self.v.get(), val, order) } |
902 | } | |
903 | ||
c1a9b12d | 904 | /// Stores a value into the `usize` if the current value is the same as the `current` value. |
1a4d82fc | 905 | /// |
c1a9b12d SL |
906 | /// The return value is always the previous value. If it is equal to `current`, then the value |
907 | /// was updated. | |
1a4d82fc JJ |
908 | /// |
909 | /// `compare_and_swap` also takes an `Ordering` argument which describes the memory ordering of | |
910 | /// this operation. | |
911 | /// | |
912 | /// # Examples | |
913 | /// | |
914 | /// ``` | |
85aaf69f | 915 | /// use std::sync::atomic::{AtomicUsize, Ordering}; |
1a4d82fc | 916 | /// |
85aaf69f | 917 | /// let some_usize = AtomicUsize::new(5); |
1a4d82fc | 918 | /// |
62682a34 SL |
919 | /// assert_eq!(some_usize.compare_and_swap(5, 10, Ordering::Relaxed), 5); |
920 | /// assert_eq!(some_usize.load(Ordering::Relaxed), 10); | |
921 | /// | |
922 | /// assert_eq!(some_usize.compare_and_swap(6, 12, Ordering::Relaxed), 10); | |
923 | /// assert_eq!(some_usize.load(Ordering::Relaxed), 10); | |
1a4d82fc JJ |
924 | /// ``` |
925 | #[inline] | |
85aaf69f | 926 | #[stable(feature = "rust1", since = "1.0.0")] |
c1a9b12d | 927 | pub fn compare_and_swap(&self, current: usize, new: usize, order: Ordering) -> usize { |
54a0048b SL |
928 | match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) { |
929 | Ok(x) => x, | |
930 | Err(x) => x, | |
931 | } | |
7453a54e SL |
932 | } |
933 | ||
934 | /// Stores a value into the `usize` if the current value is the same as the `current` value. | |
935 | /// | |
54a0048b SL |
936 | /// The return value is a result indicating whether the new value was written and containing |
937 | /// the previous value. On success this value is guaranteed to be equal to `new`. | |
7453a54e SL |
938 | /// |
939 | /// `compare_exchange` takes two `Ordering` arguments to describe the memory ordering of this | |
940 | /// operation. The first describes the required ordering if the operation succeeds while the | |
941 | /// second describes the required ordering when the operation fails. The failure ordering can't | |
54a0048b | 942 | /// be `Release` or `AcqRel` and must be equivalent or weaker than the success ordering. |
7453a54e SL |
943 | /// |
944 | /// # Examples | |
945 | /// | |
946 | /// ``` | |
947 | /// # #![feature(extended_compare_and_swap)] | |
948 | /// use std::sync::atomic::{AtomicUsize, Ordering}; | |
949 | /// | |
950 | /// let some_isize = AtomicUsize::new(5); | |
951 | /// | |
952 | /// assert_eq!(some_isize.compare_exchange(5, 10, | |
953 | /// Ordering::Acquire, | |
954 | /// Ordering::Relaxed), | |
54a0048b | 955 | /// Ok(5)); |
7453a54e SL |
956 | /// assert_eq!(some_isize.load(Ordering::Relaxed), 10); |
957 | /// | |
958 | /// assert_eq!(some_isize.compare_exchange(6, 12, | |
959 | /// Ordering::SeqCst, | |
960 | /// Ordering::Acquire), | |
54a0048b | 961 | /// Err(10)); |
7453a54e SL |
962 | /// assert_eq!(some_isize.load(Ordering::Relaxed), 10); |
963 | /// ``` | |
964 | #[inline] | |
965 | #[unstable(feature = "extended_compare_and_swap", reason = "recently added", issue = "31767")] | |
966 | pub fn compare_exchange(&self, | |
967 | current: usize, | |
968 | new: usize, | |
969 | success: Ordering, | |
54a0048b | 970 | failure: Ordering) -> Result<usize, usize> { |
7453a54e SL |
971 | unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) } |
972 | } | |
973 | ||
974 | /// Stores a value into the `usize` if the current value is the same as the `current` value. | |
975 | /// | |
976 | /// Unlike `compare_exchange`, this function is allowed to spuriously fail even when the | |
977 | /// comparison succeeds, which can result in more efficient code on some platforms. The | |
54a0048b SL |
978 | /// return value is a result indicating whether the new value was written and containing the |
979 | /// previous value. | |
7453a54e SL |
980 | /// |
981 | /// `compare_exchange_weak` takes two `Ordering` arguments to describe the memory | |
982 | /// ordering of this operation. The first describes the required ordering if the operation | |
983 | /// succeeds while the second describes the required ordering when the operation fails. The | |
54a0048b | 984 | /// failure ordering can't be `Release` or `AcqRel` and must be equivalent or weaker than the |
7453a54e SL |
985 | /// success ordering. |
986 | /// | |
987 | /// # Examples | |
988 | /// | |
989 | /// ``` | |
990 | /// # #![feature(extended_compare_and_swap)] | |
991 | /// use std::sync::atomic::{AtomicUsize, Ordering}; | |
992 | /// | |
993 | /// let val = AtomicUsize::new(4); | |
994 | /// | |
995 | /// let mut old = val.load(Ordering::Relaxed); | |
996 | /// loop { | |
997 | /// let new = old * 2; | |
54a0048b SL |
998 | /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) { |
999 | /// Ok(_) => break, | |
1000 | /// Err(x) => old = x, | |
7453a54e SL |
1001 | /// } |
1002 | /// } | |
1003 | /// ``` | |
1004 | #[inline] | |
1005 | #[unstable(feature = "extended_compare_and_swap", reason = "recently added", issue = "31767")] | |
1006 | pub fn compare_exchange_weak(&self, | |
1007 | current: usize, | |
1008 | new: usize, | |
1009 | success: Ordering, | |
54a0048b | 1010 | failure: Ordering) -> Result<usize, usize> { |
7453a54e | 1011 | unsafe { atomic_compare_exchange_weak(self.v.get(), current, new, success, failure) } |
1a4d82fc JJ |
1012 | } |
1013 | ||
85aaf69f | 1014 | /// Add to the current usize, returning the previous value. |
1a4d82fc JJ |
1015 | /// |
1016 | /// # Examples | |
1017 | /// | |
1018 | /// ``` | |
85aaf69f | 1019 | /// use std::sync::atomic::{AtomicUsize, Ordering}; |
1a4d82fc | 1020 | /// |
85aaf69f | 1021 | /// let foo = AtomicUsize::new(0); |
62682a34 SL |
1022 | /// assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0); |
1023 | /// assert_eq!(foo.load(Ordering::SeqCst), 10); | |
1a4d82fc JJ |
1024 | /// ``` |
1025 | #[inline] | |
85aaf69f SL |
1026 | #[stable(feature = "rust1", since = "1.0.0")] |
1027 | pub fn fetch_add(&self, val: usize, order: Ordering) -> usize { | |
1a4d82fc JJ |
1028 | unsafe { atomic_add(self.v.get(), val, order) } |
1029 | } | |
1030 | ||
85aaf69f | 1031 | /// Subtract from the current usize, returning the previous value. |
1a4d82fc JJ |
1032 | /// |
1033 | /// # Examples | |
1034 | /// | |
1035 | /// ``` | |
85aaf69f | 1036 | /// use std::sync::atomic::{AtomicUsize, Ordering}; |
1a4d82fc | 1037 | /// |
85aaf69f | 1038 | /// let foo = AtomicUsize::new(10); |
62682a34 SL |
1039 | /// assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 10); |
1040 | /// assert_eq!(foo.load(Ordering::SeqCst), 0); | |
1a4d82fc JJ |
1041 | /// ``` |
1042 | #[inline] | |
85aaf69f SL |
1043 | #[stable(feature = "rust1", since = "1.0.0")] |
1044 | pub fn fetch_sub(&self, val: usize, order: Ordering) -> usize { | |
1a4d82fc JJ |
1045 | unsafe { atomic_sub(self.v.get(), val, order) } |
1046 | } | |
1047 | ||
85aaf69f | 1048 | /// Bitwise and with the current usize, returning the previous value. |
1a4d82fc JJ |
1049 | /// |
1050 | /// # Examples | |
1051 | /// | |
1052 | /// ``` | |
85aaf69f | 1053 | /// use std::sync::atomic::{AtomicUsize, Ordering}; |
1a4d82fc | 1054 | /// |
85aaf69f | 1055 | /// let foo = AtomicUsize::new(0b101101); |
62682a34 SL |
1056 | /// assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101); |
1057 | /// assert_eq!(foo.load(Ordering::SeqCst), 0b100001); | |
1a4d82fc | 1058 | #[inline] |
85aaf69f SL |
1059 | #[stable(feature = "rust1", since = "1.0.0")] |
1060 | pub fn fetch_and(&self, val: usize, order: Ordering) -> usize { | |
1a4d82fc JJ |
1061 | unsafe { atomic_and(self.v.get(), val, order) } |
1062 | } | |
1063 | ||
85aaf69f | 1064 | /// Bitwise or with the current usize, returning the previous value. |
1a4d82fc JJ |
1065 | /// |
1066 | /// # Examples | |
1067 | /// | |
1068 | /// ``` | |
85aaf69f | 1069 | /// use std::sync::atomic::{AtomicUsize, Ordering}; |
1a4d82fc | 1070 | /// |
85aaf69f | 1071 | /// let foo = AtomicUsize::new(0b101101); |
62682a34 SL |
1072 | /// assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101); |
1073 | /// assert_eq!(foo.load(Ordering::SeqCst), 0b111111); | |
1a4d82fc | 1074 | #[inline] |
85aaf69f SL |
1075 | #[stable(feature = "rust1", since = "1.0.0")] |
1076 | pub fn fetch_or(&self, val: usize, order: Ordering) -> usize { | |
1a4d82fc JJ |
1077 | unsafe { atomic_or(self.v.get(), val, order) } |
1078 | } | |
1079 | ||
85aaf69f | 1080 | /// Bitwise xor with the current usize, returning the previous value. |
1a4d82fc JJ |
1081 | /// |
1082 | /// # Examples | |
1083 | /// | |
1084 | /// ``` | |
85aaf69f | 1085 | /// use std::sync::atomic::{AtomicUsize, Ordering}; |
1a4d82fc | 1086 | /// |
85aaf69f | 1087 | /// let foo = AtomicUsize::new(0b101101); |
62682a34 SL |
1088 | /// assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101); |
1089 | /// assert_eq!(foo.load(Ordering::SeqCst), 0b011110); | |
1a4d82fc | 1090 | #[inline] |
85aaf69f SL |
1091 | #[stable(feature = "rust1", since = "1.0.0")] |
1092 | pub fn fetch_xor(&self, val: usize, order: Ordering) -> usize { | |
1a4d82fc JJ |
1093 | unsafe { atomic_xor(self.v.get(), val, order) } |
1094 | } | |
1095 | } | |
1096 | ||
1097 | impl<T> AtomicPtr<T> { | |
1098 | /// Creates a new `AtomicPtr`. | |
1099 | /// | |
1100 | /// # Examples | |
1101 | /// | |
1102 | /// ``` | |
1103 | /// use std::sync::atomic::AtomicPtr; | |
1104 | /// | |
85aaf69f | 1105 | /// let ptr = &mut 5; |
1a4d82fc JJ |
1106 | /// let atomic_ptr = AtomicPtr::new(ptr); |
1107 | /// ``` | |
1108 | #[inline] | |
85aaf69f | 1109 | #[stable(feature = "rust1", since = "1.0.0")] |
62682a34 SL |
1110 | pub const fn new(p: *mut T) -> AtomicPtr<T> { |
1111 | AtomicPtr { p: UnsafeCell::new(p) } | |
1a4d82fc JJ |
1112 | } |
1113 | ||
1114 | /// Loads a value from the pointer. | |
1115 | /// | |
1116 | /// `load` takes an `Ordering` argument which describes the memory ordering of this operation. | |
1117 | /// | |
1118 | /// # Panics | |
1119 | /// | |
1120 | /// Panics if `order` is `Release` or `AcqRel`. | |
1121 | /// | |
1122 | /// # Examples | |
1123 | /// | |
1124 | /// ``` | |
1125 | /// use std::sync::atomic::{AtomicPtr, Ordering}; | |
1126 | /// | |
85aaf69f | 1127 | /// let ptr = &mut 5; |
1a4d82fc JJ |
1128 | /// let some_ptr = AtomicPtr::new(ptr); |
1129 | /// | |
1130 | /// let value = some_ptr.load(Ordering::Relaxed); | |
1131 | /// ``` | |
1132 | #[inline] | |
85aaf69f | 1133 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc JJ |
1134 | pub fn load(&self, order: Ordering) -> *mut T { |
1135 | unsafe { | |
62682a34 | 1136 | atomic_load(self.p.get() as *mut usize, order) as *mut T |
1a4d82fc JJ |
1137 | } |
1138 | } | |
1139 | ||
1140 | /// Stores a value into the pointer. | |
1141 | /// | |
1142 | /// `store` takes an `Ordering` argument which describes the memory ordering of this operation. | |
1143 | /// | |
1144 | /// # Examples | |
1145 | /// | |
1146 | /// ``` | |
1147 | /// use std::sync::atomic::{AtomicPtr, Ordering}; | |
1148 | /// | |
85aaf69f | 1149 | /// let ptr = &mut 5; |
1a4d82fc JJ |
1150 | /// let some_ptr = AtomicPtr::new(ptr); |
1151 | /// | |
85aaf69f | 1152 | /// let other_ptr = &mut 10; |
1a4d82fc JJ |
1153 | /// |
1154 | /// some_ptr.store(other_ptr, Ordering::Relaxed); | |
1155 | /// ``` | |
1156 | /// | |
1157 | /// # Panics | |
1158 | /// | |
1159 | /// Panics if `order` is `Acquire` or `AcqRel`. | |
1160 | #[inline] | |
85aaf69f | 1161 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc | 1162 | pub fn store(&self, ptr: *mut T, order: Ordering) { |
62682a34 | 1163 | unsafe { atomic_store(self.p.get() as *mut usize, ptr as usize, order); } |
1a4d82fc JJ |
1164 | } |
1165 | ||
1166 | /// Stores a value into the pointer, returning the old value. | |
1167 | /// | |
1168 | /// `swap` takes an `Ordering` argument which describes the memory ordering of this operation. | |
1169 | /// | |
1170 | /// # Examples | |
1171 | /// | |
1172 | /// ``` | |
1173 | /// use std::sync::atomic::{AtomicPtr, Ordering}; | |
1174 | /// | |
85aaf69f | 1175 | /// let ptr = &mut 5; |
1a4d82fc JJ |
1176 | /// let some_ptr = AtomicPtr::new(ptr); |
1177 | /// | |
85aaf69f | 1178 | /// let other_ptr = &mut 10; |
1a4d82fc JJ |
1179 | /// |
1180 | /// let value = some_ptr.swap(other_ptr, Ordering::Relaxed); | |
1181 | /// ``` | |
1182 | #[inline] | |
85aaf69f | 1183 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc | 1184 | pub fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T { |
62682a34 | 1185 | unsafe { atomic_swap(self.p.get() as *mut usize, ptr as usize, order) as *mut T } |
1a4d82fc JJ |
1186 | } |
1187 | ||
c1a9b12d | 1188 | /// Stores a value into the pointer if the current value is the same as the `current` value. |
1a4d82fc | 1189 | /// |
c1a9b12d SL |
1190 | /// The return value is always the previous value. If it is equal to `current`, then the value |
1191 | /// was updated. | |
1a4d82fc JJ |
1192 | /// |
1193 | /// `compare_and_swap` also takes an `Ordering` argument which describes the memory ordering of | |
1194 | /// this operation. | |
1195 | /// | |
1196 | /// # Examples | |
1197 | /// | |
1198 | /// ``` | |
1199 | /// use std::sync::atomic::{AtomicPtr, Ordering}; | |
1200 | /// | |
85aaf69f | 1201 | /// let ptr = &mut 5; |
1a4d82fc JJ |
1202 | /// let some_ptr = AtomicPtr::new(ptr); |
1203 | /// | |
85aaf69f SL |
1204 | /// let other_ptr = &mut 10; |
1205 | /// let another_ptr = &mut 10; | |
1a4d82fc JJ |
1206 | /// |
1207 | /// let value = some_ptr.compare_and_swap(other_ptr, another_ptr, Ordering::Relaxed); | |
1208 | /// ``` | |
1209 | #[inline] | |
85aaf69f | 1210 | #[stable(feature = "rust1", since = "1.0.0")] |
c1a9b12d | 1211 | pub fn compare_and_swap(&self, current: *mut T, new: *mut T, order: Ordering) -> *mut T { |
54a0048b SL |
1212 | match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) { |
1213 | Ok(x) => x, | |
1214 | Err(x) => x, | |
1215 | } | |
7453a54e SL |
1216 | } |
1217 | ||
1218 | /// Stores a value into the pointer if the current value is the same as the `current` value. | |
1219 | /// | |
54a0048b SL |
1220 | /// The return value is a result indicating whether the new value was written and containing |
1221 | /// the previous value. On success this value is guaranteed to be equal to `new`. | |
7453a54e SL |
1222 | /// |
1223 | /// `compare_exchange` takes two `Ordering` arguments to describe the memory ordering of this | |
1224 | /// operation. The first describes the required ordering if the operation succeeds while the | |
1225 | /// second describes the required ordering when the operation fails. The failure ordering can't | |
54a0048b | 1226 | /// be `Release` or `AcqRel` and must be equivalent or weaker than the success ordering. |
7453a54e SL |
1227 | /// |
1228 | /// # Examples | |
1229 | /// | |
1230 | /// ``` | |
1231 | /// # #![feature(extended_compare_and_swap)] | |
1232 | /// use std::sync::atomic::{AtomicPtr, Ordering}; | |
1233 | /// | |
1234 | /// let ptr = &mut 5; | |
1235 | /// let some_ptr = AtomicPtr::new(ptr); | |
1236 | /// | |
1237 | /// let other_ptr = &mut 10; | |
1238 | /// let another_ptr = &mut 10; | |
1239 | /// | |
1240 | /// let value = some_ptr.compare_exchange(other_ptr, another_ptr, | |
1241 | /// Ordering::SeqCst, Ordering::Relaxed); | |
1242 | /// ``` | |
1243 | #[inline] | |
1244 | #[unstable(feature = "extended_compare_and_swap", reason = "recently added", issue = "31767")] | |
1245 | pub fn compare_exchange(&self, | |
1246 | current: *mut T, | |
1247 | new: *mut T, | |
1248 | success: Ordering, | |
54a0048b | 1249 | failure: Ordering) -> Result<*mut T, *mut T> { |
1a4d82fc | 1250 | unsafe { |
54a0048b SL |
1251 | let res = atomic_compare_exchange(self.p.get() as *mut usize, |
1252 | current as usize, | |
1253 | new as usize, | |
1254 | success, | |
1255 | failure); | |
1256 | match res { | |
1257 | Ok(x) => Ok(x as *mut T), | |
1258 | Err(x) => Err(x as *mut T), | |
1259 | } | |
1a4d82fc JJ |
1260 | } |
1261 | } | |
7453a54e SL |
1262 | |
1263 | /// Stores a value into the pointer if the current value is the same as the `current` value. | |
1264 | /// | |
1265 | /// Unlike `compare_exchange`, this function is allowed to spuriously fail even when the | |
1266 | /// comparison succeeds, which can result in more efficient code on some platforms. The | |
54a0048b SL |
1267 | /// return value is a result indicating whether the new value was written and containing the |
1268 | /// previous value. | |
7453a54e SL |
1269 | /// |
1270 | /// `compare_exchange_weak` takes two `Ordering` arguments to describe the memory | |
1271 | /// ordering of this operation. The first describes the required ordering if the operation | |
1272 | /// succeeds while the second describes the required ordering when the operation fails. The | |
54a0048b | 1273 | /// failure ordering can't be `Release` or `AcqRel` and must be equivalent or weaker than the |
7453a54e SL |
1274 | /// success ordering. |
1275 | /// | |
1276 | /// # Examples | |
1277 | /// | |
1278 | /// ``` | |
1279 | /// # #![feature(extended_compare_and_swap)] | |
1280 | /// use std::sync::atomic::{AtomicPtr, Ordering}; | |
1281 | /// | |
1282 | /// let some_ptr = AtomicPtr::new(&mut 5); | |
1283 | /// | |
1284 | /// let new = &mut 10; | |
1285 | /// let mut old = some_ptr.load(Ordering::Relaxed); | |
1286 | /// loop { | |
54a0048b SL |
1287 | /// match some_ptr.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) { |
1288 | /// Ok(_) => break, | |
1289 | /// Err(x) => old = x, | |
7453a54e SL |
1290 | /// } |
1291 | /// } | |
1292 | /// ``` | |
1293 | #[inline] | |
1294 | #[unstable(feature = "extended_compare_and_swap", reason = "recently added", issue = "31767")] | |
1295 | pub fn compare_exchange_weak(&self, | |
1296 | current: *mut T, | |
1297 | new: *mut T, | |
1298 | success: Ordering, | |
54a0048b SL |
1299 | failure: Ordering) -> Result<*mut T, *mut T> { |
1300 | unsafe { | |
1301 | let res = atomic_compare_exchange_weak(self.p.get() as *mut usize, | |
1302 | current as usize, | |
1303 | new as usize, | |
1304 | success, | |
1305 | failure); | |
1306 | match res { | |
1307 | Ok(x) => Ok(x as *mut T), | |
1308 | Err(x) => Err(x as *mut T), | |
1309 | } | |
1310 | } | |
7453a54e SL |
1311 | } |
1312 | } | |
1313 | ||
1314 | #[inline] | |
1315 | fn strongest_failure_ordering(order: Ordering) -> Ordering { | |
1316 | match order { | |
1317 | Release => Relaxed, | |
1318 | Relaxed => Relaxed, | |
1319 | SeqCst => SeqCst, | |
1320 | Acquire => Acquire, | |
1321 | AcqRel => Acquire, | |
1322 | } | |
1a4d82fc JJ |
1323 | } |
1324 | ||
1325 | #[inline] | |
7453a54e | 1326 | unsafe fn atomic_store<T>(dst: *mut T, val: T, order: Ordering) { |
1a4d82fc JJ |
1327 | match order { |
1328 | Release => intrinsics::atomic_store_rel(dst, val), | |
1329 | Relaxed => intrinsics::atomic_store_relaxed(dst, val), | |
1330 | SeqCst => intrinsics::atomic_store(dst, val), | |
1331 | Acquire => panic!("there is no such thing as an acquire store"), | |
1332 | AcqRel => panic!("there is no such thing as an acquire/release store"), | |
1333 | } | |
1334 | } | |
1335 | ||
1336 | #[inline] | |
7453a54e | 1337 | unsafe fn atomic_load<T>(dst: *const T, order: Ordering) -> T { |
1a4d82fc JJ |
1338 | match order { |
1339 | Acquire => intrinsics::atomic_load_acq(dst), | |
1340 | Relaxed => intrinsics::atomic_load_relaxed(dst), | |
1341 | SeqCst => intrinsics::atomic_load(dst), | |
1342 | Release => panic!("there is no such thing as a release load"), | |
1343 | AcqRel => panic!("there is no such thing as an acquire/release load"), | |
1344 | } | |
1345 | } | |
1346 | ||
1347 | #[inline] | |
1a4d82fc JJ |
1348 | unsafe fn atomic_swap<T>(dst: *mut T, val: T, order: Ordering) -> T { |
1349 | match order { | |
1350 | Acquire => intrinsics::atomic_xchg_acq(dst, val), | |
1351 | Release => intrinsics::atomic_xchg_rel(dst, val), | |
1352 | AcqRel => intrinsics::atomic_xchg_acqrel(dst, val), | |
1353 | Relaxed => intrinsics::atomic_xchg_relaxed(dst, val), | |
1354 | SeqCst => intrinsics::atomic_xchg(dst, val) | |
1355 | } | |
1356 | } | |
1357 | ||
1358 | /// Returns the old value (like __sync_fetch_and_add). | |
1359 | #[inline] | |
1a4d82fc JJ |
1360 | unsafe fn atomic_add<T>(dst: *mut T, val: T, order: Ordering) -> T { |
1361 | match order { | |
1362 | Acquire => intrinsics::atomic_xadd_acq(dst, val), | |
1363 | Release => intrinsics::atomic_xadd_rel(dst, val), | |
1364 | AcqRel => intrinsics::atomic_xadd_acqrel(dst, val), | |
1365 | Relaxed => intrinsics::atomic_xadd_relaxed(dst, val), | |
1366 | SeqCst => intrinsics::atomic_xadd(dst, val) | |
1367 | } | |
1368 | } | |
1369 | ||
1370 | /// Returns the old value (like __sync_fetch_and_sub). | |
1371 | #[inline] | |
1a4d82fc JJ |
1372 | unsafe fn atomic_sub<T>(dst: *mut T, val: T, order: Ordering) -> T { |
1373 | match order { | |
1374 | Acquire => intrinsics::atomic_xsub_acq(dst, val), | |
1375 | Release => intrinsics::atomic_xsub_rel(dst, val), | |
1376 | AcqRel => intrinsics::atomic_xsub_acqrel(dst, val), | |
1377 | Relaxed => intrinsics::atomic_xsub_relaxed(dst, val), | |
1378 | SeqCst => intrinsics::atomic_xsub(dst, val) | |
1379 | } | |
1380 | } | |
1381 | ||
1382 | #[inline] | |
54a0048b | 1383 | #[cfg(any(not(stage0), cargobuild))] |
7453a54e SL |
1384 | unsafe fn atomic_compare_exchange<T>(dst: *mut T, |
1385 | old: T, | |
1386 | new: T, | |
1387 | success: Ordering, | |
54a0048b SL |
1388 | failure: Ordering) -> Result<T, T> { |
1389 | let (val, ok) = match (success, failure) { | |
7453a54e SL |
1390 | (Acquire, Acquire) => intrinsics::atomic_cxchg_acq(dst, old, new), |
1391 | (Release, Relaxed) => intrinsics::atomic_cxchg_rel(dst, old, new), | |
1392 | (AcqRel, Acquire) => intrinsics::atomic_cxchg_acqrel(dst, old, new), | |
1393 | (Relaxed, Relaxed) => intrinsics::atomic_cxchg_relaxed(dst, old, new), | |
1394 | (SeqCst, SeqCst) => intrinsics::atomic_cxchg(dst, old, new), | |
1395 | (Acquire, Relaxed) => intrinsics::atomic_cxchg_acq_failrelaxed(dst, old, new), | |
1396 | (AcqRel, Relaxed) => intrinsics::atomic_cxchg_acqrel_failrelaxed(dst, old, new), | |
1397 | (SeqCst, Relaxed) => intrinsics::atomic_cxchg_failrelaxed(dst, old, new), | |
1398 | (SeqCst, Acquire) => intrinsics::atomic_cxchg_failacq(dst, old, new), | |
54a0048b SL |
1399 | (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"), |
1400 | (_, Release) => panic!("there is no such thing as a release failure ordering"), | |
7453a54e | 1401 | _ => panic!("a failure ordering can't be stronger than a success ordering"), |
54a0048b SL |
1402 | }; |
1403 | if ok { | |
1404 | Ok(val) | |
1405 | } else { | |
1406 | Err(val) | |
7453a54e SL |
1407 | } |
1408 | } | |
1409 | ||
1410 | #[inline] | |
54a0048b | 1411 | #[cfg(all(stage0, not(cargobuild)))] |
7453a54e SL |
1412 | unsafe fn atomic_compare_exchange<T>(dst: *mut T, |
1413 | old: T, | |
1414 | new: T, | |
1415 | success: Ordering, | |
54a0048b SL |
1416 | _: Ordering) -> Result<T, T> |
1417 | where T: ::cmp::Eq + ::marker::Copy | |
1418 | { | |
1419 | let val = match success { | |
1a4d82fc JJ |
1420 | Acquire => intrinsics::atomic_cxchg_acq(dst, old, new), |
1421 | Release => intrinsics::atomic_cxchg_rel(dst, old, new), | |
1422 | AcqRel => intrinsics::atomic_cxchg_acqrel(dst, old, new), | |
1423 | Relaxed => intrinsics::atomic_cxchg_relaxed(dst, old, new), | |
1424 | SeqCst => intrinsics::atomic_cxchg(dst, old, new), | |
54a0048b SL |
1425 | }; |
1426 | if val == old { | |
1427 | Ok(val) | |
1428 | } else { | |
1429 | Err(val) | |
1a4d82fc JJ |
1430 | } |
1431 | } | |
1432 | ||
7453a54e | 1433 | #[inline] |
7453a54e SL |
1434 | unsafe fn atomic_compare_exchange_weak<T>(dst: *mut T, |
1435 | old: T, | |
1436 | new: T, | |
1437 | success: Ordering, | |
54a0048b SL |
1438 | failure: Ordering) -> Result<T, T> { |
1439 | let (val, ok) = match (success, failure) { | |
7453a54e SL |
1440 | (Acquire, Acquire) => intrinsics::atomic_cxchgweak_acq(dst, old, new), |
1441 | (Release, Relaxed) => intrinsics::atomic_cxchgweak_rel(dst, old, new), | |
1442 | (AcqRel, Acquire) => intrinsics::atomic_cxchgweak_acqrel(dst, old, new), | |
1443 | (Relaxed, Relaxed) => intrinsics::atomic_cxchgweak_relaxed(dst, old, new), | |
1444 | (SeqCst, SeqCst) => intrinsics::atomic_cxchgweak(dst, old, new), | |
1445 | (Acquire, Relaxed) => intrinsics::atomic_cxchgweak_acq_failrelaxed(dst, old, new), | |
1446 | (AcqRel, Relaxed) => intrinsics::atomic_cxchgweak_acqrel_failrelaxed(dst, old, new), | |
1447 | (SeqCst, Relaxed) => intrinsics::atomic_cxchgweak_failrelaxed(dst, old, new), | |
1448 | (SeqCst, Acquire) => intrinsics::atomic_cxchgweak_failacq(dst, old, new), | |
54a0048b SL |
1449 | (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"), |
1450 | (_, Release) => panic!("there is no such thing as a release failure ordering"), | |
7453a54e | 1451 | _ => panic!("a failure ordering can't be stronger than a success ordering"), |
54a0048b SL |
1452 | }; |
1453 | if ok { | |
1454 | Ok(val) | |
1455 | } else { | |
1456 | Err(val) | |
7453a54e SL |
1457 | } |
1458 | } | |
1459 | ||
1a4d82fc | 1460 | #[inline] |
1a4d82fc JJ |
1461 | unsafe fn atomic_and<T>(dst: *mut T, val: T, order: Ordering) -> T { |
1462 | match order { | |
1463 | Acquire => intrinsics::atomic_and_acq(dst, val), | |
1464 | Release => intrinsics::atomic_and_rel(dst, val), | |
1465 | AcqRel => intrinsics::atomic_and_acqrel(dst, val), | |
1466 | Relaxed => intrinsics::atomic_and_relaxed(dst, val), | |
1467 | SeqCst => intrinsics::atomic_and(dst, val) | |
1468 | } | |
1469 | } | |
1470 | ||
1471 | #[inline] | |
1a4d82fc JJ |
1472 | unsafe fn atomic_nand<T>(dst: *mut T, val: T, order: Ordering) -> T { |
1473 | match order { | |
1474 | Acquire => intrinsics::atomic_nand_acq(dst, val), | |
1475 | Release => intrinsics::atomic_nand_rel(dst, val), | |
1476 | AcqRel => intrinsics::atomic_nand_acqrel(dst, val), | |
1477 | Relaxed => intrinsics::atomic_nand_relaxed(dst, val), | |
1478 | SeqCst => intrinsics::atomic_nand(dst, val) | |
1479 | } | |
1480 | } | |
1481 | ||
1482 | ||
1483 | #[inline] | |
1a4d82fc JJ |
1484 | unsafe fn atomic_or<T>(dst: *mut T, val: T, order: Ordering) -> T { |
1485 | match order { | |
1486 | Acquire => intrinsics::atomic_or_acq(dst, val), | |
1487 | Release => intrinsics::atomic_or_rel(dst, val), | |
1488 | AcqRel => intrinsics::atomic_or_acqrel(dst, val), | |
1489 | Relaxed => intrinsics::atomic_or_relaxed(dst, val), | |
1490 | SeqCst => intrinsics::atomic_or(dst, val) | |
1491 | } | |
1492 | } | |
1493 | ||
1494 | ||
1495 | #[inline] | |
1a4d82fc JJ |
1496 | unsafe fn atomic_xor<T>(dst: *mut T, val: T, order: Ordering) -> T { |
1497 | match order { | |
1498 | Acquire => intrinsics::atomic_xor_acq(dst, val), | |
1499 | Release => intrinsics::atomic_xor_rel(dst, val), | |
1500 | AcqRel => intrinsics::atomic_xor_acqrel(dst, val), | |
1501 | Relaxed => intrinsics::atomic_xor_relaxed(dst, val), | |
1502 | SeqCst => intrinsics::atomic_xor(dst, val) | |
1503 | } | |
1504 | } | |
1505 | ||
1506 | ||
1507 | /// An atomic fence. | |
1508 | /// | |
1509 | /// A fence 'A' which has `Release` ordering semantics, synchronizes with a | |
1510 | /// fence 'B' with (at least) `Acquire` semantics, if and only if there exists | |
1511 | /// atomic operations X and Y, both operating on some atomic object 'M' such | |
1512 | /// that A is sequenced before X, Y is synchronized before B and Y observes | |
1513 | /// the change to M. This provides a happens-before dependence between A and B. | |
1514 | /// | |
1515 | /// Atomic operations with `Release` or `Acquire` semantics can also synchronize | |
1516 | /// with a fence. | |
1517 | /// | |
1518 | /// A fence which has `SeqCst` ordering, in addition to having both `Acquire` | |
1519 | /// and `Release` semantics, participates in the global program order of the | |
1520 | /// other `SeqCst` operations and/or fences. | |
1521 | /// | |
1522 | /// Accepts `Acquire`, `Release`, `AcqRel` and `SeqCst` orderings. | |
1523 | /// | |
1524 | /// # Panics | |
1525 | /// | |
1526 | /// Panics if `order` is `Relaxed`. | |
1527 | #[inline] | |
85aaf69f | 1528 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc JJ |
1529 | pub fn fence(order: Ordering) { |
1530 | unsafe { | |
1531 | match order { | |
1532 | Acquire => intrinsics::atomic_fence_acq(), | |
1533 | Release => intrinsics::atomic_fence_rel(), | |
1534 | AcqRel => intrinsics::atomic_fence_acqrel(), | |
1535 | SeqCst => intrinsics::atomic_fence(), | |
1536 | Relaxed => panic!("there is no such thing as a relaxed fence") | |
1537 | } | |
1538 | } | |
1539 | } | |
c1a9b12d SL |
1540 | |
1541 | macro_rules! impl_Debug { | |
1542 | ($($t:ident)*) => ($( | |
1543 | #[stable(feature = "atomic_debug", since = "1.3.0")] | |
1544 | impl fmt::Debug for $t { | |
1545 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { | |
1546 | f.debug_tuple(stringify!($t)).field(&self.load(Ordering::SeqCst)).finish() | |
1547 | } | |
1548 | } | |
1549 | )*); | |
1550 | } | |
1551 | ||
1552 | impl_Debug!{ AtomicUsize AtomicIsize AtomicBool } | |
1553 | ||
1554 | #[stable(feature = "atomic_debug", since = "1.3.0")] | |
1555 | impl<T> fmt::Debug for AtomicPtr<T> { | |
1556 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { | |
1557 | f.debug_tuple("AtomicPtr").field(&self.load(Ordering::SeqCst)).finish() | |
1558 | } | |
1559 | } |