]>
Commit | Line | Data |
---|---|---|
1a4d82fc JJ |
1 | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT |
2 | // file at the top-level directory of this distribution and at | |
3 | // http://rust-lang.org/COPYRIGHT. | |
4 | // | |
5 | // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or | |
6 | // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license | |
7 | // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your | |
8 | // option. This file may not be copied, modified, or distributed | |
9 | // except according to those terms. | |
10 | ||
11 | //! Atomic types | |
12 | //! | |
13 | //! Atomic types provide primitive shared-memory communication between | |
14 | //! threads, and are the building blocks of other concurrent | |
15 | //! types. | |
16 | //! | |
17 | //! This module defines atomic versions of a select number of primitive | |
c34b1796 | 18 | //! types, including `AtomicBool`, `AtomicIsize`, and `AtomicUsize`. |
1a4d82fc JJ |
19 | //! Atomic types present operations that, when used correctly, synchronize |
20 | //! updates between threads. | |
21 | //! | |
22 | //! Each method takes an `Ordering` which represents the strength of | |
23 | //! the memory barrier for that operation. These orderings are the | |
85aaf69f | 24 | //! same as [LLVM atomic orderings][1]. |
1a4d82fc | 25 | //! |
85aaf69f | 26 | //! [1]: http://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations |
1a4d82fc JJ |
27 | //! |
28 | //! Atomic variables are safe to share between threads (they implement `Sync`) | |
a7813a04 XL |
29 | //! but they do not themselves provide the mechanism for sharing and follow the |
30 | //! [threading model](../../../std/thread/index.html#the-threading-model) of rust. | |
31 | //! The most common way to share an atomic variable is to put it into an `Arc` (an | |
1a4d82fc JJ |
32 | //! atomically-reference-counted shared pointer). |
33 | //! | |
34 | //! Most atomic types may be stored in static variables, initialized using | |
5bcae85e | 35 | //! the provided static initializers like `ATOMIC_BOOL_INIT`. Atomic statics |
1a4d82fc JJ |
36 | //! are often used for lazy global initialization. |
37 | //! | |
38 | //! | |
39 | //! # Examples | |
40 | //! | |
41 | //! A simple spinlock: | |
42 | //! | |
43 | //! ``` | |
44 | //! use std::sync::Arc; | |
85aaf69f SL |
45 | //! use std::sync::atomic::{AtomicUsize, Ordering}; |
46 | //! use std::thread; | |
1a4d82fc JJ |
47 | //! |
48 | //! fn main() { | |
85aaf69f | 49 | //! let spinlock = Arc::new(AtomicUsize::new(1)); |
1a4d82fc JJ |
50 | //! |
51 | //! let spinlock_clone = spinlock.clone(); | |
a7813a04 | 52 | //! let thread = thread::spawn(move|| { |
1a4d82fc JJ |
53 | //! spinlock_clone.store(0, Ordering::SeqCst); |
54 | //! }); | |
55 | //! | |
bd371182 | 56 | //! // Wait for the other thread to release the lock |
1a4d82fc | 57 | //! while spinlock.load(Ordering::SeqCst) != 0 {} |
a7813a04 XL |
58 | //! |
59 | //! if let Err(panic) = thread.join() { | |
60 | //! println!("Thread had an error: {:?}", panic); | |
61 | //! } | |
1a4d82fc JJ |
62 | //! } |
63 | //! ``` | |
64 | //! | |
bd371182 | 65 | //! Keep a global count of live threads: |
1a4d82fc JJ |
66 | //! |
67 | //! ``` | |
85aaf69f | 68 | //! use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT}; |
1a4d82fc | 69 | //! |
bd371182 | 70 | //! static GLOBAL_THREAD_COUNT: AtomicUsize = ATOMIC_USIZE_INIT; |
1a4d82fc | 71 | //! |
bd371182 AL |
72 | //! let old_thread_count = GLOBAL_THREAD_COUNT.fetch_add(1, Ordering::SeqCst); |
73 | //! println!("live threads: {}", old_thread_count + 1); | |
1a4d82fc JJ |
74 | //! ``` |
75 | ||
85aaf69f | 76 | #![stable(feature = "rust1", since = "1.0.0")] |
5bcae85e SL |
77 | #![cfg_attr(not(target_has_atomic = "8"), allow(dead_code))] |
78 | #![cfg_attr(not(target_has_atomic = "8"), allow(unused_imports))] | |
1a4d82fc JJ |
79 | |
80 | use self::Ordering::*; | |
81 | ||
c1a9b12d | 82 | use marker::{Send, Sync}; |
1a4d82fc JJ |
83 | |
84 | use intrinsics; | |
85 | use cell::UnsafeCell; | |
86 | ||
54a0048b SL |
87 | use result::Result::{self, Ok, Err}; |
88 | ||
9346a6ac | 89 | use default::Default; |
c1a9b12d | 90 | use fmt; |
9346a6ac | 91 | |
1a4d82fc | 92 | /// A boolean type which can be safely shared between threads. |
3157f602 | 93 | #[cfg(target_has_atomic = "8")] |
85aaf69f | 94 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc | 95 | pub struct AtomicBool { |
a7813a04 | 96 | v: UnsafeCell<u8>, |
1a4d82fc JJ |
97 | } |
98 | ||
3157f602 | 99 | #[cfg(target_has_atomic = "8")] |
92a42be0 | 100 | #[stable(feature = "rust1", since = "1.0.0")] |
9346a6ac | 101 | impl Default for AtomicBool { |
62682a34 | 102 | fn default() -> Self { |
a7813a04 | 103 | Self::new(false) |
9346a6ac AL |
104 | } |
105 | } | |
106 | ||
b039eaaf | 107 | // Send is implicitly implemented for AtomicBool. |
3157f602 | 108 | #[cfg(target_has_atomic = "8")] |
92a42be0 | 109 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc JJ |
110 | unsafe impl Sync for AtomicBool {} |
111 | ||
1a4d82fc | 112 | /// A raw pointer type which can be safely shared between threads. |
3157f602 | 113 | #[cfg(target_has_atomic = "ptr")] |
85aaf69f | 114 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc | 115 | pub struct AtomicPtr<T> { |
62682a34 | 116 | p: UnsafeCell<*mut T>, |
1a4d82fc JJ |
117 | } |
118 | ||
3157f602 | 119 | #[cfg(target_has_atomic = "ptr")] |
92a42be0 | 120 | #[stable(feature = "rust1", since = "1.0.0")] |
d9579d0f AL |
121 | impl<T> Default for AtomicPtr<T> { |
122 | fn default() -> AtomicPtr<T> { | |
123 | AtomicPtr::new(::ptr::null_mut()) | |
124 | } | |
125 | } | |
126 | ||
3157f602 | 127 | #[cfg(target_has_atomic = "ptr")] |
92a42be0 | 128 | #[stable(feature = "rust1", since = "1.0.0")] |
c1a9b12d | 129 | unsafe impl<T> Send for AtomicPtr<T> {} |
3157f602 | 130 | #[cfg(target_has_atomic = "ptr")] |
92a42be0 | 131 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc JJ |
132 | unsafe impl<T> Sync for AtomicPtr<T> {} |
133 | ||
134 | /// Atomic memory orderings | |
135 | /// | |
136 | /// Memory orderings limit the ways that both the compiler and CPU may reorder | |
137 | /// instructions around atomic operations. At its most restrictive, | |
138 | /// "sequentially consistent" atomics allow neither reads nor writes | |
139 | /// to be moved either before or after the atomic operation; on the other end | |
140 | /// "relaxed" atomics allow all reorderings. | |
141 | /// | |
142 | /// Rust's memory orderings are [the same as | |
c1a9b12d | 143 | /// LLVM's](http://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations). |
85aaf69f | 144 | #[stable(feature = "rust1", since = "1.0.0")] |
54a0048b | 145 | #[derive(Copy, Clone, Debug)] |
1a4d82fc | 146 | pub enum Ordering { |
b039eaaf SL |
147 | /// No ordering constraints, only atomic operations. Corresponds to LLVM's |
148 | /// `Monotonic` ordering. | |
85aaf69f | 149 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc JJ |
150 | Relaxed, |
151 | /// When coupled with a store, all previous writes become visible | |
a7813a04 | 152 | /// to the other threads that perform a load with `Acquire` ordering |
1a4d82fc | 153 | /// on the same value. |
85aaf69f | 154 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc JJ |
155 | Release, |
156 | /// When coupled with a load, all subsequent loads will see data | |
157 | /// written before a store with `Release` ordering on the same value | |
a7813a04 | 158 | /// in other threads. |
85aaf69f | 159 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc JJ |
160 | Acquire, |
161 | /// When coupled with a load, uses `Acquire` ordering, and with a store | |
162 | /// `Release` ordering. | |
85aaf69f | 163 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc JJ |
164 | AcqRel, |
165 | /// Like `AcqRel` with the additional guarantee that all threads see all | |
166 | /// sequentially consistent operations in the same order. | |
85aaf69f | 167 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc JJ |
168 | SeqCst, |
169 | } | |
170 | ||
171 | /// An `AtomicBool` initialized to `false`. | |
3157f602 | 172 | #[cfg(target_has_atomic = "8")] |
85aaf69f | 173 | #[stable(feature = "rust1", since = "1.0.0")] |
62682a34 | 174 | pub const ATOMIC_BOOL_INIT: AtomicBool = AtomicBool::new(false); |
1a4d82fc | 175 | |
3157f602 | 176 | #[cfg(target_has_atomic = "8")] |
1a4d82fc JJ |
177 | impl AtomicBool { |
178 | /// Creates a new `AtomicBool`. | |
179 | /// | |
180 | /// # Examples | |
181 | /// | |
182 | /// ``` | |
183 | /// use std::sync::atomic::AtomicBool; | |
184 | /// | |
185 | /// let atomic_true = AtomicBool::new(true); | |
186 | /// let atomic_false = AtomicBool::new(false); | |
187 | /// ``` | |
188 | #[inline] | |
85aaf69f | 189 | #[stable(feature = "rust1", since = "1.0.0")] |
62682a34 | 190 | pub const fn new(v: bool) -> AtomicBool { |
a7813a04 | 191 | AtomicBool { v: UnsafeCell::new(v as u8) } |
1a4d82fc JJ |
192 | } |
193 | ||
194 | /// Loads a value from the bool. | |
195 | /// | |
196 | /// `load` takes an `Ordering` argument which describes the memory ordering of this operation. | |
197 | /// | |
198 | /// # Panics | |
199 | /// | |
200 | /// Panics if `order` is `Release` or `AcqRel`. | |
201 | /// | |
202 | /// # Examples | |
203 | /// | |
204 | /// ``` | |
205 | /// use std::sync::atomic::{AtomicBool, Ordering}; | |
206 | /// | |
207 | /// let some_bool = AtomicBool::new(true); | |
208 | /// | |
62682a34 | 209 | /// assert_eq!(some_bool.load(Ordering::Relaxed), true); |
1a4d82fc JJ |
210 | /// ``` |
211 | #[inline] | |
85aaf69f | 212 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc | 213 | pub fn load(&self, order: Ordering) -> bool { |
a7813a04 | 214 | unsafe { atomic_load(self.v.get(), order) != 0 } |
1a4d82fc JJ |
215 | } |
216 | ||
217 | /// Stores a value into the bool. | |
218 | /// | |
219 | /// `store` takes an `Ordering` argument which describes the memory ordering of this operation. | |
220 | /// | |
221 | /// # Examples | |
222 | /// | |
223 | /// ``` | |
224 | /// use std::sync::atomic::{AtomicBool, Ordering}; | |
225 | /// | |
226 | /// let some_bool = AtomicBool::new(true); | |
227 | /// | |
228 | /// some_bool.store(false, Ordering::Relaxed); | |
62682a34 | 229 | /// assert_eq!(some_bool.load(Ordering::Relaxed), false); |
1a4d82fc JJ |
230 | /// ``` |
231 | /// | |
232 | /// # Panics | |
233 | /// | |
234 | /// Panics if `order` is `Acquire` or `AcqRel`. | |
235 | #[inline] | |
85aaf69f | 236 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc | 237 | pub fn store(&self, val: bool, order: Ordering) { |
a7813a04 | 238 | unsafe { atomic_store(self.v.get(), val as u8, order); } |
1a4d82fc JJ |
239 | } |
240 | ||
241 | /// Stores a value into the bool, returning the old value. | |
242 | /// | |
243 | /// `swap` takes an `Ordering` argument which describes the memory ordering of this operation. | |
244 | /// | |
245 | /// # Examples | |
246 | /// | |
247 | /// ``` | |
248 | /// use std::sync::atomic::{AtomicBool, Ordering}; | |
249 | /// | |
250 | /// let some_bool = AtomicBool::new(true); | |
251 | /// | |
62682a34 SL |
252 | /// assert_eq!(some_bool.swap(false, Ordering::Relaxed), true); |
253 | /// assert_eq!(some_bool.load(Ordering::Relaxed), false); | |
1a4d82fc JJ |
254 | /// ``` |
255 | #[inline] | |
85aaf69f | 256 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc | 257 | pub fn swap(&self, val: bool, order: Ordering) -> bool { |
a7813a04 | 258 | unsafe { atomic_swap(self.v.get(), val as u8, order) != 0 } |
1a4d82fc JJ |
259 | } |
260 | ||
c1a9b12d | 261 | /// Stores a value into the `bool` if the current value is the same as the `current` value. |
1a4d82fc | 262 | /// |
c1a9b12d SL |
263 | /// The return value is always the previous value. If it is equal to `current`, then the value |
264 | /// was updated. | |
1a4d82fc | 265 | /// |
c1a9b12d SL |
266 | /// `compare_and_swap` also takes an `Ordering` argument which describes the memory ordering of |
267 | /// this operation. | |
1a4d82fc JJ |
268 | /// |
269 | /// # Examples | |
270 | /// | |
271 | /// ``` | |
272 | /// use std::sync::atomic::{AtomicBool, Ordering}; | |
273 | /// | |
274 | /// let some_bool = AtomicBool::new(true); | |
275 | /// | |
62682a34 SL |
276 | /// assert_eq!(some_bool.compare_and_swap(true, false, Ordering::Relaxed), true); |
277 | /// assert_eq!(some_bool.load(Ordering::Relaxed), false); | |
278 | /// | |
279 | /// assert_eq!(some_bool.compare_and_swap(true, true, Ordering::Relaxed), false); | |
280 | /// assert_eq!(some_bool.load(Ordering::Relaxed), false); | |
1a4d82fc JJ |
281 | /// ``` |
282 | #[inline] | |
85aaf69f | 283 | #[stable(feature = "rust1", since = "1.0.0")] |
c1a9b12d | 284 | pub fn compare_and_swap(&self, current: bool, new: bool, order: Ordering) -> bool { |
54a0048b SL |
285 | match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) { |
286 | Ok(x) => x, | |
287 | Err(x) => x, | |
288 | } | |
7453a54e SL |
289 | } |
290 | ||
291 | /// Stores a value into the `bool` if the current value is the same as the `current` value. | |
292 | /// | |
54a0048b | 293 | /// The return value is a result indicating whether the new value was written and containing |
3157f602 | 294 | /// the previous value. On success this value is guaranteed to be equal to `current`. |
7453a54e SL |
295 | /// |
296 | /// `compare_exchange` takes two `Ordering` arguments to describe the memory ordering of this | |
297 | /// operation. The first describes the required ordering if the operation succeeds while the | |
298 | /// second describes the required ordering when the operation fails. The failure ordering can't | |
54a0048b | 299 | /// be `Release` or `AcqRel` and must be equivalent or weaker than the success ordering. |
7453a54e SL |
300 | /// |
301 | /// # Examples | |
302 | /// | |
303 | /// ``` | |
7453a54e SL |
304 | /// use std::sync::atomic::{AtomicBool, Ordering}; |
305 | /// | |
306 | /// let some_bool = AtomicBool::new(true); | |
307 | /// | |
308 | /// assert_eq!(some_bool.compare_exchange(true, | |
309 | /// false, | |
310 | /// Ordering::Acquire, | |
311 | /// Ordering::Relaxed), | |
54a0048b | 312 | /// Ok(true)); |
7453a54e SL |
313 | /// assert_eq!(some_bool.load(Ordering::Relaxed), false); |
314 | /// | |
315 | /// assert_eq!(some_bool.compare_exchange(true, true, | |
316 | /// Ordering::SeqCst, | |
317 | /// Ordering::Acquire), | |
54a0048b | 318 | /// Err(false)); |
7453a54e SL |
319 | /// assert_eq!(some_bool.load(Ordering::Relaxed), false); |
320 | /// ``` | |
321 | #[inline] | |
a7813a04 | 322 | #[stable(feature = "extended_compare_and_swap", since = "1.10.0")] |
7453a54e SL |
323 | pub fn compare_exchange(&self, |
324 | current: bool, | |
325 | new: bool, | |
326 | success: Ordering, | |
54a0048b | 327 | failure: Ordering) -> Result<bool, bool> { |
a7813a04 XL |
328 | match unsafe { atomic_compare_exchange(self.v.get(), current as u8, new as u8, |
329 | success, failure) } { | |
330 | Ok(x) => Ok(x != 0), | |
331 | Err(x) => Err(x != 0), | |
54a0048b | 332 | } |
7453a54e SL |
333 | } |
334 | ||
335 | /// Stores a value into the `bool` if the current value is the same as the `current` value. | |
336 | /// | |
337 | /// Unlike `compare_exchange`, this function is allowed to spuriously fail even when the | |
338 | /// comparison succeeds, which can result in more efficient code on some platforms. The | |
54a0048b SL |
339 | /// return value is a result indicating whether the new value was written and containing the |
340 | /// previous value. | |
7453a54e SL |
341 | /// |
342 | /// `compare_exchange_weak` takes two `Ordering` arguments to describe the memory | |
343 | /// ordering of this operation. The first describes the required ordering if the operation | |
344 | /// succeeds while the second describes the required ordering when the operation fails. The | |
54a0048b | 345 | /// failure ordering can't be `Release` or `AcqRel` and must be equivalent or weaker than the |
7453a54e SL |
346 | /// success ordering. |
347 | /// | |
348 | /// # Examples | |
349 | /// | |
350 | /// ``` | |
7453a54e SL |
351 | /// use std::sync::atomic::{AtomicBool, Ordering}; |
352 | /// | |
353 | /// let val = AtomicBool::new(false); | |
354 | /// | |
355 | /// let new = true; | |
356 | /// let mut old = val.load(Ordering::Relaxed); | |
357 | /// loop { | |
54a0048b SL |
358 | /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) { |
359 | /// Ok(_) => break, | |
360 | /// Err(x) => old = x, | |
7453a54e SL |
361 | /// } |
362 | /// } | |
363 | /// ``` | |
364 | #[inline] | |
a7813a04 | 365 | #[stable(feature = "extended_compare_and_swap", since = "1.10.0")] |
7453a54e SL |
366 | pub fn compare_exchange_weak(&self, |
367 | current: bool, | |
368 | new: bool, | |
369 | success: Ordering, | |
54a0048b | 370 | failure: Ordering) -> Result<bool, bool> { |
a7813a04 | 371 | match unsafe { atomic_compare_exchange_weak(self.v.get(), current as u8, new as u8, |
54a0048b | 372 | success, failure) } { |
a7813a04 XL |
373 | Ok(x) => Ok(x != 0), |
374 | Err(x) => Err(x != 0), | |
54a0048b | 375 | } |
1a4d82fc JJ |
376 | } |
377 | ||
378 | /// Logical "and" with a boolean value. | |
379 | /// | |
380 | /// Performs a logical "and" operation on the current value and the argument `val`, and sets | |
381 | /// the new value to the result. | |
382 | /// | |
383 | /// Returns the previous value. | |
384 | /// | |
385 | /// # Examples | |
386 | /// | |
387 | /// ``` | |
388 | /// use std::sync::atomic::{AtomicBool, Ordering}; | |
389 | /// | |
390 | /// let foo = AtomicBool::new(true); | |
62682a34 SL |
391 | /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), true); |
392 | /// assert_eq!(foo.load(Ordering::SeqCst), false); | |
1a4d82fc JJ |
393 | /// |
394 | /// let foo = AtomicBool::new(true); | |
62682a34 SL |
395 | /// assert_eq!(foo.fetch_and(true, Ordering::SeqCst), true); |
396 | /// assert_eq!(foo.load(Ordering::SeqCst), true); | |
1a4d82fc JJ |
397 | /// |
398 | /// let foo = AtomicBool::new(false); | |
62682a34 SL |
399 | /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), false); |
400 | /// assert_eq!(foo.load(Ordering::SeqCst), false); | |
1a4d82fc JJ |
401 | /// ``` |
402 | #[inline] | |
85aaf69f | 403 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc | 404 | pub fn fetch_and(&self, val: bool, order: Ordering) -> bool { |
a7813a04 | 405 | unsafe { atomic_and(self.v.get(), val as u8, order) != 0 } |
1a4d82fc JJ |
406 | } |
407 | ||
408 | /// Logical "nand" with a boolean value. | |
409 | /// | |
410 | /// Performs a logical "nand" operation on the current value and the argument `val`, and sets | |
411 | /// the new value to the result. | |
412 | /// | |
413 | /// Returns the previous value. | |
414 | /// | |
415 | /// # Examples | |
416 | /// | |
417 | /// ``` | |
418 | /// use std::sync::atomic::{AtomicBool, Ordering}; | |
419 | /// | |
420 | /// let foo = AtomicBool::new(true); | |
62682a34 SL |
421 | /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), true); |
422 | /// assert_eq!(foo.load(Ordering::SeqCst), true); | |
1a4d82fc JJ |
423 | /// |
424 | /// let foo = AtomicBool::new(true); | |
62682a34 SL |
425 | /// assert_eq!(foo.fetch_nand(true, Ordering::SeqCst), true); |
426 | /// assert_eq!(foo.load(Ordering::SeqCst) as usize, 0); | |
427 | /// assert_eq!(foo.load(Ordering::SeqCst), false); | |
1a4d82fc JJ |
428 | /// |
429 | /// let foo = AtomicBool::new(false); | |
62682a34 SL |
430 | /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), false); |
431 | /// assert_eq!(foo.load(Ordering::SeqCst), true); | |
1a4d82fc JJ |
432 | /// ``` |
433 | #[inline] | |
85aaf69f | 434 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc | 435 | pub fn fetch_nand(&self, val: bool, order: Ordering) -> bool { |
a7813a04 XL |
436 | // We can't use atomic_nand here because it can result in a bool with |
437 | // an invalid value. This happens because the atomic operation is done | |
438 | // with an 8-bit integer internally, which would set the upper 7 bits. | |
439 | // So we just use a compare-exchange loop instead, which is what the | |
440 | // intrinsic actually expands to anyways on many platforms. | |
441 | let mut old = self.load(Relaxed); | |
442 | loop { | |
443 | let new = !(old && val); | |
444 | match self.compare_exchange_weak(old, new, order, Relaxed) { | |
445 | Ok(_) => break, | |
446 | Err(x) => old = x, | |
447 | } | |
448 | } | |
449 | old | |
1a4d82fc JJ |
450 | } |
451 | ||
452 | /// Logical "or" with a boolean value. | |
453 | /// | |
454 | /// Performs a logical "or" operation on the current value and the argument `val`, and sets the | |
455 | /// new value to the result. | |
456 | /// | |
457 | /// Returns the previous value. | |
458 | /// | |
459 | /// # Examples | |
460 | /// | |
461 | /// ``` | |
462 | /// use std::sync::atomic::{AtomicBool, Ordering}; | |
463 | /// | |
464 | /// let foo = AtomicBool::new(true); | |
62682a34 SL |
465 | /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), true); |
466 | /// assert_eq!(foo.load(Ordering::SeqCst), true); | |
1a4d82fc JJ |
467 | /// |
468 | /// let foo = AtomicBool::new(true); | |
62682a34 SL |
469 | /// assert_eq!(foo.fetch_or(true, Ordering::SeqCst), true); |
470 | /// assert_eq!(foo.load(Ordering::SeqCst), true); | |
1a4d82fc JJ |
471 | /// |
472 | /// let foo = AtomicBool::new(false); | |
62682a34 SL |
473 | /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), false); |
474 | /// assert_eq!(foo.load(Ordering::SeqCst), false); | |
1a4d82fc JJ |
475 | /// ``` |
476 | #[inline] | |
85aaf69f | 477 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc | 478 | pub fn fetch_or(&self, val: bool, order: Ordering) -> bool { |
a7813a04 | 479 | unsafe { atomic_or(self.v.get(), val as u8, order) != 0 } |
1a4d82fc JJ |
480 | } |
481 | ||
482 | /// Logical "xor" with a boolean value. | |
483 | /// | |
484 | /// Performs a logical "xor" operation on the current value and the argument `val`, and sets | |
485 | /// the new value to the result. | |
486 | /// | |
487 | /// Returns the previous value. | |
488 | /// | |
489 | /// # Examples | |
490 | /// | |
491 | /// ``` | |
492 | /// use std::sync::atomic::{AtomicBool, Ordering}; | |
493 | /// | |
494 | /// let foo = AtomicBool::new(true); | |
62682a34 SL |
495 | /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), true); |
496 | /// assert_eq!(foo.load(Ordering::SeqCst), true); | |
1a4d82fc JJ |
497 | /// |
498 | /// let foo = AtomicBool::new(true); | |
62682a34 SL |
499 | /// assert_eq!(foo.fetch_xor(true, Ordering::SeqCst), true); |
500 | /// assert_eq!(foo.load(Ordering::SeqCst), false); | |
1a4d82fc JJ |
501 | /// |
502 | /// let foo = AtomicBool::new(false); | |
62682a34 SL |
503 | /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), false); |
504 | /// assert_eq!(foo.load(Ordering::SeqCst), false); | |
1a4d82fc JJ |
505 | /// ``` |
506 | #[inline] | |
85aaf69f | 507 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc | 508 | pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool { |
a7813a04 | 509 | unsafe { atomic_xor(self.v.get(), val as u8, order) != 0 } |
1a4d82fc JJ |
510 | } |
511 | } | |
512 | ||
3157f602 | 513 | #[cfg(target_has_atomic = "ptr")] |
1a4d82fc JJ |
514 | impl<T> AtomicPtr<T> { |
515 | /// Creates a new `AtomicPtr`. | |
516 | /// | |
517 | /// # Examples | |
518 | /// | |
519 | /// ``` | |
520 | /// use std::sync::atomic::AtomicPtr; | |
521 | /// | |
85aaf69f | 522 | /// let ptr = &mut 5; |
1a4d82fc JJ |
523 | /// let atomic_ptr = AtomicPtr::new(ptr); |
524 | /// ``` | |
525 | #[inline] | |
85aaf69f | 526 | #[stable(feature = "rust1", since = "1.0.0")] |
62682a34 SL |
527 | pub const fn new(p: *mut T) -> AtomicPtr<T> { |
528 | AtomicPtr { p: UnsafeCell::new(p) } | |
1a4d82fc JJ |
529 | } |
530 | ||
531 | /// Loads a value from the pointer. | |
532 | /// | |
533 | /// `load` takes an `Ordering` argument which describes the memory ordering of this operation. | |
534 | /// | |
535 | /// # Panics | |
536 | /// | |
537 | /// Panics if `order` is `Release` or `AcqRel`. | |
538 | /// | |
539 | /// # Examples | |
540 | /// | |
541 | /// ``` | |
542 | /// use std::sync::atomic::{AtomicPtr, Ordering}; | |
543 | /// | |
85aaf69f | 544 | /// let ptr = &mut 5; |
1a4d82fc JJ |
545 | /// let some_ptr = AtomicPtr::new(ptr); |
546 | /// | |
547 | /// let value = some_ptr.load(Ordering::Relaxed); | |
548 | /// ``` | |
549 | #[inline] | |
85aaf69f | 550 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc JJ |
551 | pub fn load(&self, order: Ordering) -> *mut T { |
552 | unsafe { | |
62682a34 | 553 | atomic_load(self.p.get() as *mut usize, order) as *mut T |
1a4d82fc JJ |
554 | } |
555 | } | |
556 | ||
557 | /// Stores a value into the pointer. | |
558 | /// | |
559 | /// `store` takes an `Ordering` argument which describes the memory ordering of this operation. | |
560 | /// | |
561 | /// # Examples | |
562 | /// | |
563 | /// ``` | |
564 | /// use std::sync::atomic::{AtomicPtr, Ordering}; | |
565 | /// | |
85aaf69f | 566 | /// let ptr = &mut 5; |
1a4d82fc JJ |
567 | /// let some_ptr = AtomicPtr::new(ptr); |
568 | /// | |
85aaf69f | 569 | /// let other_ptr = &mut 10; |
1a4d82fc JJ |
570 | /// |
571 | /// some_ptr.store(other_ptr, Ordering::Relaxed); | |
572 | /// ``` | |
573 | /// | |
574 | /// # Panics | |
575 | /// | |
576 | /// Panics if `order` is `Acquire` or `AcqRel`. | |
577 | #[inline] | |
85aaf69f | 578 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc | 579 | pub fn store(&self, ptr: *mut T, order: Ordering) { |
62682a34 | 580 | unsafe { atomic_store(self.p.get() as *mut usize, ptr as usize, order); } |
1a4d82fc JJ |
581 | } |
582 | ||
583 | /// Stores a value into the pointer, returning the old value. | |
584 | /// | |
585 | /// `swap` takes an `Ordering` argument which describes the memory ordering of this operation. | |
586 | /// | |
587 | /// # Examples | |
588 | /// | |
589 | /// ``` | |
590 | /// use std::sync::atomic::{AtomicPtr, Ordering}; | |
591 | /// | |
85aaf69f | 592 | /// let ptr = &mut 5; |
1a4d82fc JJ |
593 | /// let some_ptr = AtomicPtr::new(ptr); |
594 | /// | |
85aaf69f | 595 | /// let other_ptr = &mut 10; |
1a4d82fc JJ |
596 | /// |
597 | /// let value = some_ptr.swap(other_ptr, Ordering::Relaxed); | |
598 | /// ``` | |
599 | #[inline] | |
85aaf69f | 600 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc | 601 | pub fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T { |
62682a34 | 602 | unsafe { atomic_swap(self.p.get() as *mut usize, ptr as usize, order) as *mut T } |
1a4d82fc JJ |
603 | } |
604 | ||
c1a9b12d | 605 | /// Stores a value into the pointer if the current value is the same as the `current` value. |
1a4d82fc | 606 | /// |
c1a9b12d SL |
607 | /// The return value is always the previous value. If it is equal to `current`, then the value |
608 | /// was updated. | |
1a4d82fc JJ |
609 | /// |
610 | /// `compare_and_swap` also takes an `Ordering` argument which describes the memory ordering of | |
611 | /// this operation. | |
612 | /// | |
613 | /// # Examples | |
614 | /// | |
615 | /// ``` | |
616 | /// use std::sync::atomic::{AtomicPtr, Ordering}; | |
617 | /// | |
85aaf69f | 618 | /// let ptr = &mut 5; |
1a4d82fc JJ |
619 | /// let some_ptr = AtomicPtr::new(ptr); |
620 | /// | |
85aaf69f SL |
621 | /// let other_ptr = &mut 10; |
622 | /// let another_ptr = &mut 10; | |
1a4d82fc JJ |
623 | /// |
624 | /// let value = some_ptr.compare_and_swap(other_ptr, another_ptr, Ordering::Relaxed); | |
625 | /// ``` | |
626 | #[inline] | |
85aaf69f | 627 | #[stable(feature = "rust1", since = "1.0.0")] |
c1a9b12d | 628 | pub fn compare_and_swap(&self, current: *mut T, new: *mut T, order: Ordering) -> *mut T { |
54a0048b SL |
629 | match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) { |
630 | Ok(x) => x, | |
631 | Err(x) => x, | |
632 | } | |
7453a54e SL |
633 | } |
634 | ||
635 | /// Stores a value into the pointer if the current value is the same as the `current` value. | |
636 | /// | |
54a0048b | 637 | /// The return value is a result indicating whether the new value was written and containing |
3157f602 | 638 | /// the previous value. On success this value is guaranteed to be equal to `current`. |
7453a54e SL |
639 | /// |
640 | /// `compare_exchange` takes two `Ordering` arguments to describe the memory ordering of this | |
641 | /// operation. The first describes the required ordering if the operation succeeds while the | |
642 | /// second describes the required ordering when the operation fails. The failure ordering can't | |
54a0048b | 643 | /// be `Release` or `AcqRel` and must be equivalent or weaker than the success ordering. |
7453a54e SL |
644 | /// |
645 | /// # Examples | |
646 | /// | |
647 | /// ``` | |
7453a54e SL |
648 | /// use std::sync::atomic::{AtomicPtr, Ordering}; |
649 | /// | |
650 | /// let ptr = &mut 5; | |
651 | /// let some_ptr = AtomicPtr::new(ptr); | |
652 | /// | |
653 | /// let other_ptr = &mut 10; | |
654 | /// let another_ptr = &mut 10; | |
655 | /// | |
656 | /// let value = some_ptr.compare_exchange(other_ptr, another_ptr, | |
657 | /// Ordering::SeqCst, Ordering::Relaxed); | |
658 | /// ``` | |
659 | #[inline] | |
a7813a04 | 660 | #[stable(feature = "extended_compare_and_swap", since = "1.10.0")] |
7453a54e SL |
661 | pub fn compare_exchange(&self, |
662 | current: *mut T, | |
663 | new: *mut T, | |
664 | success: Ordering, | |
54a0048b | 665 | failure: Ordering) -> Result<*mut T, *mut T> { |
1a4d82fc | 666 | unsafe { |
54a0048b SL |
667 | let res = atomic_compare_exchange(self.p.get() as *mut usize, |
668 | current as usize, | |
669 | new as usize, | |
670 | success, | |
671 | failure); | |
672 | match res { | |
673 | Ok(x) => Ok(x as *mut T), | |
674 | Err(x) => Err(x as *mut T), | |
675 | } | |
1a4d82fc JJ |
676 | } |
677 | } | |
7453a54e SL |
678 | |
679 | /// Stores a value into the pointer if the current value is the same as the `current` value. | |
680 | /// | |
681 | /// Unlike `compare_exchange`, this function is allowed to spuriously fail even when the | |
682 | /// comparison succeeds, which can result in more efficient code on some platforms. The | |
54a0048b SL |
683 | /// return value is a result indicating whether the new value was written and containing the |
684 | /// previous value. | |
7453a54e SL |
685 | /// |
686 | /// `compare_exchange_weak` takes two `Ordering` arguments to describe the memory | |
687 | /// ordering of this operation. The first describes the required ordering if the operation | |
688 | /// succeeds while the second describes the required ordering when the operation fails. The | |
54a0048b | 689 | /// failure ordering can't be `Release` or `AcqRel` and must be equivalent or weaker than the |
7453a54e SL |
690 | /// success ordering. |
691 | /// | |
692 | /// # Examples | |
693 | /// | |
694 | /// ``` | |
7453a54e SL |
695 | /// use std::sync::atomic::{AtomicPtr, Ordering}; |
696 | /// | |
697 | /// let some_ptr = AtomicPtr::new(&mut 5); | |
698 | /// | |
699 | /// let new = &mut 10; | |
700 | /// let mut old = some_ptr.load(Ordering::Relaxed); | |
701 | /// loop { | |
54a0048b SL |
702 | /// match some_ptr.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) { |
703 | /// Ok(_) => break, | |
704 | /// Err(x) => old = x, | |
7453a54e SL |
705 | /// } |
706 | /// } | |
707 | /// ``` | |
708 | #[inline] | |
a7813a04 | 709 | #[stable(feature = "extended_compare_and_swap", since = "1.10.0")] |
7453a54e SL |
710 | pub fn compare_exchange_weak(&self, |
711 | current: *mut T, | |
712 | new: *mut T, | |
713 | success: Ordering, | |
54a0048b SL |
714 | failure: Ordering) -> Result<*mut T, *mut T> { |
715 | unsafe { | |
716 | let res = atomic_compare_exchange_weak(self.p.get() as *mut usize, | |
717 | current as usize, | |
718 | new as usize, | |
719 | success, | |
720 | failure); | |
721 | match res { | |
722 | Ok(x) => Ok(x as *mut T), | |
723 | Err(x) => Err(x as *mut T), | |
724 | } | |
725 | } | |
7453a54e SL |
726 | } |
727 | } | |
728 | ||
a7813a04 XL |
729 | macro_rules! atomic_int { |
730 | ($stable:meta, | |
731 | $stable_cxchg:meta, | |
732 | $stable_debug:meta, | |
733 | $int_type:ident $atomic_type:ident $atomic_init:ident) => { | |
734 | /// An integer type which can be safely shared between threads. | |
735 | #[$stable] | |
736 | pub struct $atomic_type { | |
737 | v: UnsafeCell<$int_type>, | |
738 | } | |
739 | ||
740 | /// An atomic integer initialized to `0`. | |
741 | #[$stable] | |
742 | pub const $atomic_init: $atomic_type = $atomic_type::new(0); | |
743 | ||
744 | #[$stable] | |
745 | impl Default for $atomic_type { | |
746 | fn default() -> Self { | |
747 | Self::new(Default::default()) | |
748 | } | |
749 | } | |
750 | ||
751 | #[$stable_debug] | |
752 | impl fmt::Debug for $atomic_type { | |
753 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { | |
754 | f.debug_tuple(stringify!($atomic_type)) | |
755 | .field(&self.load(Ordering::SeqCst)) | |
756 | .finish() | |
757 | } | |
758 | } | |
759 | ||
760 | // Send is implicitly implemented. | |
761 | #[$stable] | |
762 | unsafe impl Sync for $atomic_type {} | |
763 | ||
764 | impl $atomic_type { | |
765 | /// Creates a new atomic integer. | |
766 | /// | |
767 | /// # Examples | |
768 | /// | |
769 | /// ``` | |
770 | /// use std::sync::atomic::AtomicIsize; | |
771 | /// | |
772 | /// let atomic_forty_two = AtomicIsize::new(42); | |
773 | /// ``` | |
774 | #[inline] | |
775 | #[$stable] | |
776 | pub const fn new(v: $int_type) -> Self { | |
777 | $atomic_type {v: UnsafeCell::new(v)} | |
778 | } | |
779 | ||
780 | /// Loads a value from the atomic integer. | |
781 | /// | |
782 | /// `load` takes an `Ordering` argument which describes the memory ordering of this | |
783 | /// operation. | |
784 | /// | |
785 | /// # Panics | |
786 | /// | |
787 | /// Panics if `order` is `Release` or `AcqRel`. | |
788 | /// | |
789 | /// # Examples | |
790 | /// | |
791 | /// ``` | |
792 | /// use std::sync::atomic::{AtomicIsize, Ordering}; | |
793 | /// | |
794 | /// let some_isize = AtomicIsize::new(5); | |
795 | /// | |
796 | /// assert_eq!(some_isize.load(Ordering::Relaxed), 5); | |
797 | /// ``` | |
798 | #[inline] | |
799 | #[$stable] | |
800 | pub fn load(&self, order: Ordering) -> $int_type { | |
801 | unsafe { atomic_load(self.v.get(), order) } | |
802 | } | |
803 | ||
804 | /// Stores a value into the atomic integer. | |
805 | /// | |
806 | /// `store` takes an `Ordering` argument which describes the memory ordering of this | |
807 | /// operation. | |
808 | /// | |
809 | /// # Examples | |
810 | /// | |
811 | /// ``` | |
812 | /// use std::sync::atomic::{AtomicIsize, Ordering}; | |
813 | /// | |
814 | /// let some_isize = AtomicIsize::new(5); | |
815 | /// | |
816 | /// some_isize.store(10, Ordering::Relaxed); | |
817 | /// assert_eq!(some_isize.load(Ordering::Relaxed), 10); | |
818 | /// ``` | |
819 | /// | |
820 | /// # Panics | |
821 | /// | |
822 | /// Panics if `order` is `Acquire` or `AcqRel`. | |
823 | #[inline] | |
824 | #[$stable] | |
825 | pub fn store(&self, val: $int_type, order: Ordering) { | |
826 | unsafe { atomic_store(self.v.get(), val, order); } | |
827 | } | |
828 | ||
829 | /// Stores a value into the atomic integer, returning the old value. | |
830 | /// | |
831 | /// `swap` takes an `Ordering` argument which describes the memory ordering of this | |
832 | /// operation. | |
833 | /// | |
834 | /// # Examples | |
835 | /// | |
836 | /// ``` | |
837 | /// use std::sync::atomic::{AtomicIsize, Ordering}; | |
838 | /// | |
839 | /// let some_isize = AtomicIsize::new(5); | |
840 | /// | |
841 | /// assert_eq!(some_isize.swap(10, Ordering::Relaxed), 5); | |
842 | /// ``` | |
843 | #[inline] | |
844 | #[$stable] | |
845 | pub fn swap(&self, val: $int_type, order: Ordering) -> $int_type { | |
846 | unsafe { atomic_swap(self.v.get(), val, order) } | |
847 | } | |
848 | ||
849 | /// Stores a value into the atomic integer if the current value is the same as the | |
850 | /// `current` value. | |
851 | /// | |
852 | /// The return value is always the previous value. If it is equal to `current`, then the | |
853 | /// value was updated. | |
854 | /// | |
855 | /// `compare_and_swap` also takes an `Ordering` argument which describes the memory | |
856 | /// ordering of this operation. | |
857 | /// | |
858 | /// # Examples | |
859 | /// | |
860 | /// ``` | |
861 | /// use std::sync::atomic::{AtomicIsize, Ordering}; | |
862 | /// | |
863 | /// let some_isize = AtomicIsize::new(5); | |
864 | /// | |
865 | /// assert_eq!(some_isize.compare_and_swap(5, 10, Ordering::Relaxed), 5); | |
866 | /// assert_eq!(some_isize.load(Ordering::Relaxed), 10); | |
867 | /// | |
868 | /// assert_eq!(some_isize.compare_and_swap(6, 12, Ordering::Relaxed), 10); | |
869 | /// assert_eq!(some_isize.load(Ordering::Relaxed), 10); | |
870 | /// ``` | |
871 | #[inline] | |
872 | #[$stable] | |
873 | pub fn compare_and_swap(&self, | |
874 | current: $int_type, | |
875 | new: $int_type, | |
876 | order: Ordering) -> $int_type { | |
877 | match self.compare_exchange(current, | |
878 | new, | |
879 | order, | |
880 | strongest_failure_ordering(order)) { | |
881 | Ok(x) => x, | |
882 | Err(x) => x, | |
883 | } | |
884 | } | |
885 | ||
886 | /// Stores a value into the atomic integer if the current value is the same as the | |
887 | /// `current` value. | |
888 | /// | |
889 | /// The return value is a result indicating whether the new value was written and | |
890 | /// containing the previous value. On success this value is guaranteed to be equal to | |
3157f602 | 891 | /// `current`. |
a7813a04 XL |
892 | /// |
893 | /// `compare_exchange` takes two `Ordering` arguments to describe the memory ordering of | |
894 | /// this operation. The first describes the required ordering if the operation succeeds | |
895 | /// while the second describes the required ordering when the operation fails. The | |
896 | /// failure ordering can't be `Release` or `AcqRel` and must be equivalent or weaker | |
897 | /// than the success ordering. | |
898 | /// | |
899 | /// # Examples | |
900 | /// | |
901 | /// ``` | |
902 | /// use std::sync::atomic::{AtomicIsize, Ordering}; | |
903 | /// | |
904 | /// let some_isize = AtomicIsize::new(5); | |
905 | /// | |
906 | /// assert_eq!(some_isize.compare_exchange(5, 10, | |
907 | /// Ordering::Acquire, | |
908 | /// Ordering::Relaxed), | |
909 | /// Ok(5)); | |
910 | /// assert_eq!(some_isize.load(Ordering::Relaxed), 10); | |
911 | /// | |
912 | /// assert_eq!(some_isize.compare_exchange(6, 12, | |
913 | /// Ordering::SeqCst, | |
914 | /// Ordering::Acquire), | |
915 | /// Err(10)); | |
916 | /// assert_eq!(some_isize.load(Ordering::Relaxed), 10); | |
917 | /// ``` | |
918 | #[inline] | |
919 | #[$stable_cxchg] | |
920 | pub fn compare_exchange(&self, | |
921 | current: $int_type, | |
922 | new: $int_type, | |
923 | success: Ordering, | |
924 | failure: Ordering) -> Result<$int_type, $int_type> { | |
925 | unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) } | |
926 | } | |
927 | ||
928 | /// Stores a value into the atomic integer if the current value is the same as the | |
929 | /// `current` value. | |
930 | /// | |
931 | /// Unlike `compare_exchange`, this function is allowed to spuriously fail even when the | |
932 | /// comparison succeeds, which can result in more efficient code on some platforms. The | |
933 | /// return value is a result indicating whether the new value was written and containing | |
934 | /// the previous value. | |
935 | /// | |
936 | /// `compare_exchange_weak` takes two `Ordering` arguments to describe the memory | |
937 | /// ordering of this operation. The first describes the required ordering if the | |
938 | /// operation succeeds while the second describes the required ordering when the | |
939 | /// operation fails. The failure ordering can't be `Release` or `AcqRel` and must be | |
940 | /// equivalent or weaker than the success ordering. | |
941 | /// | |
942 | /// # Examples | |
943 | /// | |
944 | /// ``` | |
945 | /// use std::sync::atomic::{AtomicIsize, Ordering}; | |
946 | /// | |
947 | /// let val = AtomicIsize::new(4); | |
948 | /// | |
949 | /// let mut old = val.load(Ordering::Relaxed); | |
950 | /// loop { | |
951 | /// let new = old * 2; | |
952 | /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) { | |
953 | /// Ok(_) => break, | |
954 | /// Err(x) => old = x, | |
955 | /// } | |
956 | /// } | |
957 | /// ``` | |
958 | #[inline] | |
959 | #[$stable_cxchg] | |
960 | pub fn compare_exchange_weak(&self, | |
961 | current: $int_type, | |
962 | new: $int_type, | |
963 | success: Ordering, | |
964 | failure: Ordering) -> Result<$int_type, $int_type> { | |
965 | unsafe { | |
966 | atomic_compare_exchange_weak(self.v.get(), current, new, success, failure) | |
967 | } | |
968 | } | |
969 | ||
970 | /// Add to the current value, returning the previous value. | |
971 | /// | |
972 | /// # Examples | |
973 | /// | |
974 | /// ``` | |
975 | /// use std::sync::atomic::{AtomicIsize, Ordering}; | |
976 | /// | |
977 | /// let foo = AtomicIsize::new(0); | |
978 | /// assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0); | |
979 | /// assert_eq!(foo.load(Ordering::SeqCst), 10); | |
980 | /// ``` | |
981 | #[inline] | |
982 | #[$stable] | |
983 | pub fn fetch_add(&self, val: $int_type, order: Ordering) -> $int_type { | |
984 | unsafe { atomic_add(self.v.get(), val, order) } | |
985 | } | |
986 | ||
987 | /// Subtract from the current value, returning the previous value. | |
988 | /// | |
989 | /// # Examples | |
990 | /// | |
991 | /// ``` | |
992 | /// use std::sync::atomic::{AtomicIsize, Ordering}; | |
993 | /// | |
994 | /// let foo = AtomicIsize::new(0); | |
995 | /// assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 0); | |
996 | /// assert_eq!(foo.load(Ordering::SeqCst), -10); | |
997 | /// ``` | |
998 | #[inline] | |
999 | #[$stable] | |
1000 | pub fn fetch_sub(&self, val: $int_type, order: Ordering) -> $int_type { | |
1001 | unsafe { atomic_sub(self.v.get(), val, order) } | |
1002 | } | |
1003 | ||
1004 | /// Bitwise and with the current value, returning the previous value. | |
1005 | /// | |
1006 | /// # Examples | |
1007 | /// | |
1008 | /// ``` | |
1009 | /// use std::sync::atomic::{AtomicIsize, Ordering}; | |
1010 | /// | |
1011 | /// let foo = AtomicIsize::new(0b101101); | |
1012 | /// assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101); | |
1013 | /// assert_eq!(foo.load(Ordering::SeqCst), 0b100001); | |
1014 | #[inline] | |
1015 | #[$stable] | |
1016 | pub fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type { | |
1017 | unsafe { atomic_and(self.v.get(), val, order) } | |
1018 | } | |
1019 | ||
1020 | /// Bitwise or with the current value, returning the previous value. | |
1021 | /// | |
1022 | /// # Examples | |
1023 | /// | |
1024 | /// ``` | |
1025 | /// use std::sync::atomic::{AtomicIsize, Ordering}; | |
1026 | /// | |
1027 | /// let foo = AtomicIsize::new(0b101101); | |
1028 | /// assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101); | |
1029 | /// assert_eq!(foo.load(Ordering::SeqCst), 0b111111); | |
1030 | #[inline] | |
1031 | #[$stable] | |
1032 | pub fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type { | |
1033 | unsafe { atomic_or(self.v.get(), val, order) } | |
1034 | } | |
1035 | ||
1036 | /// Bitwise xor with the current value, returning the previous value. | |
1037 | /// | |
1038 | /// # Examples | |
1039 | /// | |
1040 | /// ``` | |
1041 | /// use std::sync::atomic::{AtomicIsize, Ordering}; | |
1042 | /// | |
1043 | /// let foo = AtomicIsize::new(0b101101); | |
1044 | /// assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101); | |
1045 | /// assert_eq!(foo.load(Ordering::SeqCst), 0b011110); | |
1046 | #[inline] | |
1047 | #[$stable] | |
1048 | pub fn fetch_xor(&self, val: $int_type, order: Ordering) -> $int_type { | |
1049 | unsafe { atomic_xor(self.v.get(), val, order) } | |
1050 | } | |
1051 | } | |
1052 | } | |
1053 | } | |
1054 | ||
1055 | #[cfg(target_has_atomic = "8")] | |
1056 | atomic_int! { | |
1057 | unstable(feature = "integer_atomics", issue = "32976"), | |
1058 | unstable(feature = "integer_atomics", issue = "32976"), | |
1059 | unstable(feature = "integer_atomics", issue = "32976"), | |
1060 | i8 AtomicI8 ATOMIC_I8_INIT | |
1061 | } | |
1062 | #[cfg(target_has_atomic = "8")] | |
1063 | atomic_int! { | |
1064 | unstable(feature = "integer_atomics", issue = "32976"), | |
1065 | unstable(feature = "integer_atomics", issue = "32976"), | |
1066 | unstable(feature = "integer_atomics", issue = "32976"), | |
1067 | u8 AtomicU8 ATOMIC_U8_INIT | |
1068 | } | |
1069 | #[cfg(target_has_atomic = "16")] | |
1070 | atomic_int! { | |
1071 | unstable(feature = "integer_atomics", issue = "32976"), | |
1072 | unstable(feature = "integer_atomics", issue = "32976"), | |
1073 | unstable(feature = "integer_atomics", issue = "32976"), | |
1074 | i16 AtomicI16 ATOMIC_I16_INIT | |
1075 | } | |
1076 | #[cfg(target_has_atomic = "16")] | |
1077 | atomic_int! { | |
1078 | unstable(feature = "integer_atomics", issue = "32976"), | |
1079 | unstable(feature = "integer_atomics", issue = "32976"), | |
1080 | unstable(feature = "integer_atomics", issue = "32976"), | |
1081 | u16 AtomicU16 ATOMIC_U16_INIT | |
1082 | } | |
1083 | #[cfg(target_has_atomic = "32")] | |
1084 | atomic_int! { | |
1085 | unstable(feature = "integer_atomics", issue = "32976"), | |
1086 | unstable(feature = "integer_atomics", issue = "32976"), | |
1087 | unstable(feature = "integer_atomics", issue = "32976"), | |
1088 | i32 AtomicI32 ATOMIC_I32_INIT | |
1089 | } | |
1090 | #[cfg(target_has_atomic = "32")] | |
1091 | atomic_int! { | |
1092 | unstable(feature = "integer_atomics", issue = "32976"), | |
1093 | unstable(feature = "integer_atomics", issue = "32976"), | |
1094 | unstable(feature = "integer_atomics", issue = "32976"), | |
1095 | u32 AtomicU32 ATOMIC_U32_INIT | |
1096 | } | |
1097 | #[cfg(target_has_atomic = "64")] | |
1098 | atomic_int! { | |
1099 | unstable(feature = "integer_atomics", issue = "32976"), | |
1100 | unstable(feature = "integer_atomics", issue = "32976"), | |
1101 | unstable(feature = "integer_atomics", issue = "32976"), | |
1102 | i64 AtomicI64 ATOMIC_I64_INIT | |
1103 | } | |
1104 | #[cfg(target_has_atomic = "64")] | |
1105 | atomic_int! { | |
1106 | unstable(feature = "integer_atomics", issue = "32976"), | |
1107 | unstable(feature = "integer_atomics", issue = "32976"), | |
1108 | unstable(feature = "integer_atomics", issue = "32976"), | |
1109 | u64 AtomicU64 ATOMIC_U64_INIT | |
1110 | } | |
3157f602 | 1111 | #[cfg(target_has_atomic = "ptr")] |
a7813a04 XL |
1112 | atomic_int!{ |
1113 | stable(feature = "rust1", since = "1.0.0"), | |
1114 | stable(feature = "extended_compare_and_swap", since = "1.10.0"), | |
1115 | stable(feature = "atomic_debug", since = "1.3.0"), | |
1116 | isize AtomicIsize ATOMIC_ISIZE_INIT | |
1117 | } | |
3157f602 | 1118 | #[cfg(target_has_atomic = "ptr")] |
a7813a04 XL |
1119 | atomic_int!{ |
1120 | stable(feature = "rust1", since = "1.0.0"), | |
1121 | stable(feature = "extended_compare_and_swap", since = "1.10.0"), | |
1122 | stable(feature = "atomic_debug", since = "1.3.0"), | |
1123 | usize AtomicUsize ATOMIC_USIZE_INIT | |
1124 | } | |
1125 | ||
7453a54e SL |
1126 | #[inline] |
1127 | fn strongest_failure_ordering(order: Ordering) -> Ordering { | |
1128 | match order { | |
1129 | Release => Relaxed, | |
1130 | Relaxed => Relaxed, | |
1131 | SeqCst => SeqCst, | |
1132 | Acquire => Acquire, | |
1133 | AcqRel => Acquire, | |
1134 | } | |
1a4d82fc JJ |
1135 | } |
1136 | ||
1137 | #[inline] | |
7453a54e | 1138 | unsafe fn atomic_store<T>(dst: *mut T, val: T, order: Ordering) { |
1a4d82fc JJ |
1139 | match order { |
1140 | Release => intrinsics::atomic_store_rel(dst, val), | |
1141 | Relaxed => intrinsics::atomic_store_relaxed(dst, val), | |
1142 | SeqCst => intrinsics::atomic_store(dst, val), | |
1143 | Acquire => panic!("there is no such thing as an acquire store"), | |
1144 | AcqRel => panic!("there is no such thing as an acquire/release store"), | |
1145 | } | |
1146 | } | |
1147 | ||
1148 | #[inline] | |
7453a54e | 1149 | unsafe fn atomic_load<T>(dst: *const T, order: Ordering) -> T { |
1a4d82fc JJ |
1150 | match order { |
1151 | Acquire => intrinsics::atomic_load_acq(dst), | |
1152 | Relaxed => intrinsics::atomic_load_relaxed(dst), | |
1153 | SeqCst => intrinsics::atomic_load(dst), | |
1154 | Release => panic!("there is no such thing as a release load"), | |
1155 | AcqRel => panic!("there is no such thing as an acquire/release load"), | |
1156 | } | |
1157 | } | |
1158 | ||
1159 | #[inline] | |
1a4d82fc JJ |
1160 | unsafe fn atomic_swap<T>(dst: *mut T, val: T, order: Ordering) -> T { |
1161 | match order { | |
1162 | Acquire => intrinsics::atomic_xchg_acq(dst, val), | |
1163 | Release => intrinsics::atomic_xchg_rel(dst, val), | |
1164 | AcqRel => intrinsics::atomic_xchg_acqrel(dst, val), | |
1165 | Relaxed => intrinsics::atomic_xchg_relaxed(dst, val), | |
1166 | SeqCst => intrinsics::atomic_xchg(dst, val) | |
1167 | } | |
1168 | } | |
1169 | ||
1170 | /// Returns the old value (like __sync_fetch_and_add). | |
1171 | #[inline] | |
1a4d82fc JJ |
1172 | unsafe fn atomic_add<T>(dst: *mut T, val: T, order: Ordering) -> T { |
1173 | match order { | |
1174 | Acquire => intrinsics::atomic_xadd_acq(dst, val), | |
1175 | Release => intrinsics::atomic_xadd_rel(dst, val), | |
1176 | AcqRel => intrinsics::atomic_xadd_acqrel(dst, val), | |
1177 | Relaxed => intrinsics::atomic_xadd_relaxed(dst, val), | |
1178 | SeqCst => intrinsics::atomic_xadd(dst, val) | |
1179 | } | |
1180 | } | |
1181 | ||
1182 | /// Returns the old value (like __sync_fetch_and_sub). | |
1183 | #[inline] | |
1a4d82fc JJ |
1184 | unsafe fn atomic_sub<T>(dst: *mut T, val: T, order: Ordering) -> T { |
1185 | match order { | |
1186 | Acquire => intrinsics::atomic_xsub_acq(dst, val), | |
1187 | Release => intrinsics::atomic_xsub_rel(dst, val), | |
1188 | AcqRel => intrinsics::atomic_xsub_acqrel(dst, val), | |
1189 | Relaxed => intrinsics::atomic_xsub_relaxed(dst, val), | |
1190 | SeqCst => intrinsics::atomic_xsub(dst, val) | |
1191 | } | |
1192 | } | |
1193 | ||
1194 | #[inline] | |
7453a54e SL |
1195 | unsafe fn atomic_compare_exchange<T>(dst: *mut T, |
1196 | old: T, | |
1197 | new: T, | |
1198 | success: Ordering, | |
54a0048b SL |
1199 | failure: Ordering) -> Result<T, T> { |
1200 | let (val, ok) = match (success, failure) { | |
7453a54e SL |
1201 | (Acquire, Acquire) => intrinsics::atomic_cxchg_acq(dst, old, new), |
1202 | (Release, Relaxed) => intrinsics::atomic_cxchg_rel(dst, old, new), | |
1203 | (AcqRel, Acquire) => intrinsics::atomic_cxchg_acqrel(dst, old, new), | |
1204 | (Relaxed, Relaxed) => intrinsics::atomic_cxchg_relaxed(dst, old, new), | |
1205 | (SeqCst, SeqCst) => intrinsics::atomic_cxchg(dst, old, new), | |
1206 | (Acquire, Relaxed) => intrinsics::atomic_cxchg_acq_failrelaxed(dst, old, new), | |
1207 | (AcqRel, Relaxed) => intrinsics::atomic_cxchg_acqrel_failrelaxed(dst, old, new), | |
1208 | (SeqCst, Relaxed) => intrinsics::atomic_cxchg_failrelaxed(dst, old, new), | |
1209 | (SeqCst, Acquire) => intrinsics::atomic_cxchg_failacq(dst, old, new), | |
54a0048b SL |
1210 | (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"), |
1211 | (_, Release) => panic!("there is no such thing as a release failure ordering"), | |
7453a54e | 1212 | _ => panic!("a failure ordering can't be stronger than a success ordering"), |
54a0048b SL |
1213 | }; |
1214 | if ok { | |
1215 | Ok(val) | |
1216 | } else { | |
1217 | Err(val) | |
7453a54e SL |
1218 | } |
1219 | } | |
1220 | ||
7453a54e | 1221 | #[inline] |
7453a54e SL |
1222 | unsafe fn atomic_compare_exchange_weak<T>(dst: *mut T, |
1223 | old: T, | |
1224 | new: T, | |
1225 | success: Ordering, | |
54a0048b SL |
1226 | failure: Ordering) -> Result<T, T> { |
1227 | let (val, ok) = match (success, failure) { | |
7453a54e SL |
1228 | (Acquire, Acquire) => intrinsics::atomic_cxchgweak_acq(dst, old, new), |
1229 | (Release, Relaxed) => intrinsics::atomic_cxchgweak_rel(dst, old, new), | |
1230 | (AcqRel, Acquire) => intrinsics::atomic_cxchgweak_acqrel(dst, old, new), | |
1231 | (Relaxed, Relaxed) => intrinsics::atomic_cxchgweak_relaxed(dst, old, new), | |
1232 | (SeqCst, SeqCst) => intrinsics::atomic_cxchgweak(dst, old, new), | |
1233 | (Acquire, Relaxed) => intrinsics::atomic_cxchgweak_acq_failrelaxed(dst, old, new), | |
1234 | (AcqRel, Relaxed) => intrinsics::atomic_cxchgweak_acqrel_failrelaxed(dst, old, new), | |
1235 | (SeqCst, Relaxed) => intrinsics::atomic_cxchgweak_failrelaxed(dst, old, new), | |
1236 | (SeqCst, Acquire) => intrinsics::atomic_cxchgweak_failacq(dst, old, new), | |
54a0048b SL |
1237 | (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"), |
1238 | (_, Release) => panic!("there is no such thing as a release failure ordering"), | |
7453a54e | 1239 | _ => panic!("a failure ordering can't be stronger than a success ordering"), |
54a0048b SL |
1240 | }; |
1241 | if ok { | |
1242 | Ok(val) | |
1243 | } else { | |
1244 | Err(val) | |
7453a54e SL |
1245 | } |
1246 | } | |
1247 | ||
1a4d82fc | 1248 | #[inline] |
1a4d82fc JJ |
1249 | unsafe fn atomic_and<T>(dst: *mut T, val: T, order: Ordering) -> T { |
1250 | match order { | |
1251 | Acquire => intrinsics::atomic_and_acq(dst, val), | |
1252 | Release => intrinsics::atomic_and_rel(dst, val), | |
1253 | AcqRel => intrinsics::atomic_and_acqrel(dst, val), | |
1254 | Relaxed => intrinsics::atomic_and_relaxed(dst, val), | |
1255 | SeqCst => intrinsics::atomic_and(dst, val) | |
1256 | } | |
1257 | } | |
1258 | ||
1a4d82fc | 1259 | #[inline] |
1a4d82fc JJ |
1260 | unsafe fn atomic_or<T>(dst: *mut T, val: T, order: Ordering) -> T { |
1261 | match order { | |
1262 | Acquire => intrinsics::atomic_or_acq(dst, val), | |
1263 | Release => intrinsics::atomic_or_rel(dst, val), | |
1264 | AcqRel => intrinsics::atomic_or_acqrel(dst, val), | |
1265 | Relaxed => intrinsics::atomic_or_relaxed(dst, val), | |
1266 | SeqCst => intrinsics::atomic_or(dst, val) | |
1267 | } | |
1268 | } | |
1269 | ||
1a4d82fc | 1270 | #[inline] |
1a4d82fc JJ |
1271 | unsafe fn atomic_xor<T>(dst: *mut T, val: T, order: Ordering) -> T { |
1272 | match order { | |
1273 | Acquire => intrinsics::atomic_xor_acq(dst, val), | |
1274 | Release => intrinsics::atomic_xor_rel(dst, val), | |
1275 | AcqRel => intrinsics::atomic_xor_acqrel(dst, val), | |
1276 | Relaxed => intrinsics::atomic_xor_relaxed(dst, val), | |
1277 | SeqCst => intrinsics::atomic_xor(dst, val) | |
1278 | } | |
1279 | } | |
1280 | ||
1a4d82fc JJ |
1281 | /// An atomic fence. |
1282 | /// | |
1283 | /// A fence 'A' which has `Release` ordering semantics, synchronizes with a | |
1284 | /// fence 'B' with (at least) `Acquire` semantics, if and only if there exists | |
1285 | /// atomic operations X and Y, both operating on some atomic object 'M' such | |
1286 | /// that A is sequenced before X, Y is synchronized before B and Y observes | |
1287 | /// the change to M. This provides a happens-before dependence between A and B. | |
1288 | /// | |
1289 | /// Atomic operations with `Release` or `Acquire` semantics can also synchronize | |
1290 | /// with a fence. | |
1291 | /// | |
1292 | /// A fence which has `SeqCst` ordering, in addition to having both `Acquire` | |
1293 | /// and `Release` semantics, participates in the global program order of the | |
1294 | /// other `SeqCst` operations and/or fences. | |
1295 | /// | |
1296 | /// Accepts `Acquire`, `Release`, `AcqRel` and `SeqCst` orderings. | |
1297 | /// | |
1298 | /// # Panics | |
1299 | /// | |
1300 | /// Panics if `order` is `Relaxed`. | |
1301 | #[inline] | |
85aaf69f | 1302 | #[stable(feature = "rust1", since = "1.0.0")] |
1a4d82fc JJ |
1303 | pub fn fence(order: Ordering) { |
1304 | unsafe { | |
1305 | match order { | |
1306 | Acquire => intrinsics::atomic_fence_acq(), | |
1307 | Release => intrinsics::atomic_fence_rel(), | |
1308 | AcqRel => intrinsics::atomic_fence_acqrel(), | |
1309 | SeqCst => intrinsics::atomic_fence(), | |
1310 | Relaxed => panic!("there is no such thing as a relaxed fence") | |
1311 | } | |
1312 | } | |
1313 | } | |
c1a9b12d | 1314 | |
c1a9b12d | 1315 | |
3157f602 | 1316 | #[cfg(target_has_atomic = "8")] |
a7813a04 XL |
1317 | #[stable(feature = "atomic_debug", since = "1.3.0")] |
1318 | impl fmt::Debug for AtomicBool { | |
1319 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { | |
1320 | f.debug_tuple("AtomicBool").field(&self.load(Ordering::SeqCst)).finish() | |
1321 | } | |
1322 | } | |
c1a9b12d | 1323 | |
3157f602 | 1324 | #[cfg(target_has_atomic = "ptr")] |
c1a9b12d SL |
1325 | #[stable(feature = "atomic_debug", since = "1.3.0")] |
1326 | impl<T> fmt::Debug for AtomicPtr<T> { | |
1327 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { | |
1328 | f.debug_tuple("AtomicPtr").field(&self.load(Ordering::SeqCst)).finish() | |
1329 | } | |
1330 | } |