]> git.proxmox.com Git - rustc.git/blob - src/libcore/atomic.rs
Imported Upstream version 1.3.0+dfsg1
[rustc.git] / src / libcore / atomic.rs
1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 //! Atomic types
12 //!
13 //! Atomic types provide primitive shared-memory communication between
14 //! threads, and are the building blocks of other concurrent
15 //! types.
16 //!
17 //! This module defines atomic versions of a select number of primitive
18 //! types, including `AtomicBool`, `AtomicIsize`, and `AtomicUsize`.
19 //! Atomic types present operations that, when used correctly, synchronize
20 //! updates between threads.
21 //!
22 //! Each method takes an `Ordering` which represents the strength of
23 //! the memory barrier for that operation. These orderings are the
24 //! same as [LLVM atomic orderings][1].
25 //!
26 //! [1]: http://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations
27 //!
28 //! Atomic variables are safe to share between threads (they implement `Sync`)
29 //! but they do not themselves provide the mechanism for sharing. The most
30 //! common way to share an atomic variable is to put it into an `Arc` (an
31 //! atomically-reference-counted shared pointer).
32 //!
33 //! Most atomic types may be stored in static variables, initialized using
34 //! the provided static initializers like `INIT_ATOMIC_BOOL`. Atomic statics
35 //! are often used for lazy global initialization.
36 //!
37 //!
38 //! # Examples
39 //!
40 //! A simple spinlock:
41 //!
42 //! ```
43 //! use std::sync::Arc;
44 //! use std::sync::atomic::{AtomicUsize, Ordering};
45 //! use std::thread;
46 //!
47 //! fn main() {
48 //! let spinlock = Arc::new(AtomicUsize::new(1));
49 //!
50 //! let spinlock_clone = spinlock.clone();
51 //! thread::spawn(move|| {
52 //! spinlock_clone.store(0, Ordering::SeqCst);
53 //! });
54 //!
55 //! // Wait for the other thread to release the lock
56 //! while spinlock.load(Ordering::SeqCst) != 0 {}
57 //! }
58 //! ```
59 //!
60 //! Keep a global count of live threads:
61 //!
62 //! ```
63 //! use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
64 //!
65 //! static GLOBAL_THREAD_COUNT: AtomicUsize = ATOMIC_USIZE_INIT;
66 //!
67 //! let old_thread_count = GLOBAL_THREAD_COUNT.fetch_add(1, Ordering::SeqCst);
68 //! println!("live threads: {}", old_thread_count + 1);
69 //! ```
70
71 #![stable(feature = "rust1", since = "1.0.0")]
72
73 use self::Ordering::*;
74
75 use marker::{Send, Sync};
76
77 use intrinsics;
78 use cell::UnsafeCell;
79
80 use default::Default;
81 use fmt;
82
83 /// A boolean type which can be safely shared between threads.
84 #[stable(feature = "rust1", since = "1.0.0")]
85 pub struct AtomicBool {
86 v: UnsafeCell<usize>,
87 }
88
89 impl Default for AtomicBool {
90 fn default() -> Self {
91 Self::new(Default::default())
92 }
93 }
94
95 unsafe impl Sync for AtomicBool {}
96
97 /// A signed integer type which can be safely shared between threads.
98 #[stable(feature = "rust1", since = "1.0.0")]
99 pub struct AtomicIsize {
100 v: UnsafeCell<isize>,
101 }
102
103 impl Default for AtomicIsize {
104 fn default() -> Self {
105 Self::new(Default::default())
106 }
107 }
108
109 unsafe impl Sync for AtomicIsize {}
110
111 /// An unsigned integer type which can be safely shared between threads.
112 #[stable(feature = "rust1", since = "1.0.0")]
113 pub struct AtomicUsize {
114 v: UnsafeCell<usize>,
115 }
116
117 impl Default for AtomicUsize {
118 fn default() -> Self {
119 Self::new(Default::default())
120 }
121 }
122
123 unsafe impl Sync for AtomicUsize {}
124
125 /// A raw pointer type which can be safely shared between threads.
126 #[stable(feature = "rust1", since = "1.0.0")]
127 pub struct AtomicPtr<T> {
128 p: UnsafeCell<*mut T>,
129 }
130
131 impl<T> Default for AtomicPtr<T> {
132 fn default() -> AtomicPtr<T> {
133 AtomicPtr::new(::ptr::null_mut())
134 }
135 }
136
137 unsafe impl<T> Send for AtomicPtr<T> {}
138 unsafe impl<T> Sync for AtomicPtr<T> {}
139
140 /// Atomic memory orderings
141 ///
142 /// Memory orderings limit the ways that both the compiler and CPU may reorder
143 /// instructions around atomic operations. At its most restrictive,
144 /// "sequentially consistent" atomics allow neither reads nor writes
145 /// to be moved either before or after the atomic operation; on the other end
146 /// "relaxed" atomics allow all reorderings.
147 ///
148 /// Rust's memory orderings are [the same as
149 /// LLVM's](http://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations).
150 #[stable(feature = "rust1", since = "1.0.0")]
151 #[derive(Copy, Clone)]
152 pub enum Ordering {
153 /// No ordering constraints, only atomic operations.
154 #[stable(feature = "rust1", since = "1.0.0")]
155 Relaxed,
156 /// When coupled with a store, all previous writes become visible
157 /// to another thread that performs a load with `Acquire` ordering
158 /// on the same value.
159 #[stable(feature = "rust1", since = "1.0.0")]
160 Release,
161 /// When coupled with a load, all subsequent loads will see data
162 /// written before a store with `Release` ordering on the same value
163 /// in another thread.
164 #[stable(feature = "rust1", since = "1.0.0")]
165 Acquire,
166 /// When coupled with a load, uses `Acquire` ordering, and with a store
167 /// `Release` ordering.
168 #[stable(feature = "rust1", since = "1.0.0")]
169 AcqRel,
170 /// Like `AcqRel` with the additional guarantee that all threads see all
171 /// sequentially consistent operations in the same order.
172 #[stable(feature = "rust1", since = "1.0.0")]
173 SeqCst,
174 }
175
176 /// An `AtomicBool` initialized to `false`.
177 #[stable(feature = "rust1", since = "1.0.0")]
178 pub const ATOMIC_BOOL_INIT: AtomicBool = AtomicBool::new(false);
179 /// An `AtomicIsize` initialized to `0`.
180 #[stable(feature = "rust1", since = "1.0.0")]
181 pub const ATOMIC_ISIZE_INIT: AtomicIsize = AtomicIsize::new(0);
182 /// An `AtomicUsize` initialized to `0`.
183 #[stable(feature = "rust1", since = "1.0.0")]
184 pub const ATOMIC_USIZE_INIT: AtomicUsize = AtomicUsize::new(0);
185
186 // NB: Needs to be -1 (0b11111111...) to make fetch_nand work correctly
187 const UINT_TRUE: usize = !0;
188
189 impl AtomicBool {
190 /// Creates a new `AtomicBool`.
191 ///
192 /// # Examples
193 ///
194 /// ```
195 /// use std::sync::atomic::AtomicBool;
196 ///
197 /// let atomic_true = AtomicBool::new(true);
198 /// let atomic_false = AtomicBool::new(false);
199 /// ```
200 #[inline]
201 #[stable(feature = "rust1", since = "1.0.0")]
202 pub const fn new(v: bool) -> AtomicBool {
203 AtomicBool { v: UnsafeCell::new(-(v as isize) as usize) }
204 }
205
206 /// Loads a value from the bool.
207 ///
208 /// `load` takes an `Ordering` argument which describes the memory ordering of this operation.
209 ///
210 /// # Panics
211 ///
212 /// Panics if `order` is `Release` or `AcqRel`.
213 ///
214 /// # Examples
215 ///
216 /// ```
217 /// use std::sync::atomic::{AtomicBool, Ordering};
218 ///
219 /// let some_bool = AtomicBool::new(true);
220 ///
221 /// assert_eq!(some_bool.load(Ordering::Relaxed), true);
222 /// ```
223 #[inline]
224 #[stable(feature = "rust1", since = "1.0.0")]
225 pub fn load(&self, order: Ordering) -> bool {
226 unsafe { atomic_load(self.v.get(), order) > 0 }
227 }
228
229 /// Stores a value into the bool.
230 ///
231 /// `store` takes an `Ordering` argument which describes the memory ordering of this operation.
232 ///
233 /// # Examples
234 ///
235 /// ```
236 /// use std::sync::atomic::{AtomicBool, Ordering};
237 ///
238 /// let some_bool = AtomicBool::new(true);
239 ///
240 /// some_bool.store(false, Ordering::Relaxed);
241 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
242 /// ```
243 ///
244 /// # Panics
245 ///
246 /// Panics if `order` is `Acquire` or `AcqRel`.
247 #[inline]
248 #[stable(feature = "rust1", since = "1.0.0")]
249 pub fn store(&self, val: bool, order: Ordering) {
250 let val = if val { UINT_TRUE } else { 0 };
251
252 unsafe { atomic_store(self.v.get(), val, order); }
253 }
254
255 /// Stores a value into the bool, returning the old value.
256 ///
257 /// `swap` takes an `Ordering` argument which describes the memory ordering of this operation.
258 ///
259 /// # Examples
260 ///
261 /// ```
262 /// use std::sync::atomic::{AtomicBool, Ordering};
263 ///
264 /// let some_bool = AtomicBool::new(true);
265 ///
266 /// assert_eq!(some_bool.swap(false, Ordering::Relaxed), true);
267 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
268 /// ```
269 #[inline]
270 #[stable(feature = "rust1", since = "1.0.0")]
271 pub fn swap(&self, val: bool, order: Ordering) -> bool {
272 let val = if val { UINT_TRUE } else { 0 };
273
274 unsafe { atomic_swap(self.v.get(), val, order) > 0 }
275 }
276
277 /// Stores a value into the `bool` if the current value is the same as the `current` value.
278 ///
279 /// The return value is always the previous value. If it is equal to `current`, then the value
280 /// was updated.
281 ///
282 /// `compare_and_swap` also takes an `Ordering` argument which describes the memory ordering of
283 /// this operation.
284 ///
285 /// # Examples
286 ///
287 /// ```
288 /// use std::sync::atomic::{AtomicBool, Ordering};
289 ///
290 /// let some_bool = AtomicBool::new(true);
291 ///
292 /// assert_eq!(some_bool.compare_and_swap(true, false, Ordering::Relaxed), true);
293 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
294 ///
295 /// assert_eq!(some_bool.compare_and_swap(true, true, Ordering::Relaxed), false);
296 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
297 /// ```
298 #[inline]
299 #[stable(feature = "rust1", since = "1.0.0")]
300 pub fn compare_and_swap(&self, current: bool, new: bool, order: Ordering) -> bool {
301 let current = if current { UINT_TRUE } else { 0 };
302 let new = if new { UINT_TRUE } else { 0 };
303
304 unsafe { atomic_compare_and_swap(self.v.get(), current, new, order) > 0 }
305 }
306
307 /// Logical "and" with a boolean value.
308 ///
309 /// Performs a logical "and" operation on the current value and the argument `val`, and sets
310 /// the new value to the result.
311 ///
312 /// Returns the previous value.
313 ///
314 /// # Examples
315 ///
316 /// ```
317 /// use std::sync::atomic::{AtomicBool, Ordering};
318 ///
319 /// let foo = AtomicBool::new(true);
320 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), true);
321 /// assert_eq!(foo.load(Ordering::SeqCst), false);
322 ///
323 /// let foo = AtomicBool::new(true);
324 /// assert_eq!(foo.fetch_and(true, Ordering::SeqCst), true);
325 /// assert_eq!(foo.load(Ordering::SeqCst), true);
326 ///
327 /// let foo = AtomicBool::new(false);
328 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), false);
329 /// assert_eq!(foo.load(Ordering::SeqCst), false);
330 /// ```
331 #[inline]
332 #[stable(feature = "rust1", since = "1.0.0")]
333 pub fn fetch_and(&self, val: bool, order: Ordering) -> bool {
334 let val = if val { UINT_TRUE } else { 0 };
335
336 unsafe { atomic_and(self.v.get(), val, order) > 0 }
337 }
338
339 /// Logical "nand" with a boolean value.
340 ///
341 /// Performs a logical "nand" operation on the current value and the argument `val`, and sets
342 /// the new value to the result.
343 ///
344 /// Returns the previous value.
345 ///
346 /// # Examples
347 ///
348 /// ```
349 /// use std::sync::atomic::{AtomicBool, Ordering};
350 ///
351 /// let foo = AtomicBool::new(true);
352 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), true);
353 /// assert_eq!(foo.load(Ordering::SeqCst), true);
354 ///
355 /// let foo = AtomicBool::new(true);
356 /// assert_eq!(foo.fetch_nand(true, Ordering::SeqCst), true);
357 /// assert_eq!(foo.load(Ordering::SeqCst) as usize, 0);
358 /// assert_eq!(foo.load(Ordering::SeqCst), false);
359 ///
360 /// let foo = AtomicBool::new(false);
361 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), false);
362 /// assert_eq!(foo.load(Ordering::SeqCst), true);
363 /// ```
364 #[inline]
365 #[stable(feature = "rust1", since = "1.0.0")]
366 pub fn fetch_nand(&self, val: bool, order: Ordering) -> bool {
367 let val = if val { UINT_TRUE } else { 0 };
368
369 unsafe { atomic_nand(self.v.get(), val, order) > 0 }
370 }
371
372 /// Logical "or" with a boolean value.
373 ///
374 /// Performs a logical "or" operation on the current value and the argument `val`, and sets the
375 /// new value to the result.
376 ///
377 /// Returns the previous value.
378 ///
379 /// # Examples
380 ///
381 /// ```
382 /// use std::sync::atomic::{AtomicBool, Ordering};
383 ///
384 /// let foo = AtomicBool::new(true);
385 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), true);
386 /// assert_eq!(foo.load(Ordering::SeqCst), true);
387 ///
388 /// let foo = AtomicBool::new(true);
389 /// assert_eq!(foo.fetch_or(true, Ordering::SeqCst), true);
390 /// assert_eq!(foo.load(Ordering::SeqCst), true);
391 ///
392 /// let foo = AtomicBool::new(false);
393 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), false);
394 /// assert_eq!(foo.load(Ordering::SeqCst), false);
395 /// ```
396 #[inline]
397 #[stable(feature = "rust1", since = "1.0.0")]
398 pub fn fetch_or(&self, val: bool, order: Ordering) -> bool {
399 let val = if val { UINT_TRUE } else { 0 };
400
401 unsafe { atomic_or(self.v.get(), val, order) > 0 }
402 }
403
404 /// Logical "xor" with a boolean value.
405 ///
406 /// Performs a logical "xor" operation on the current value and the argument `val`, and sets
407 /// the new value to the result.
408 ///
409 /// Returns the previous value.
410 ///
411 /// # Examples
412 ///
413 /// ```
414 /// use std::sync::atomic::{AtomicBool, Ordering};
415 ///
416 /// let foo = AtomicBool::new(true);
417 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), true);
418 /// assert_eq!(foo.load(Ordering::SeqCst), true);
419 ///
420 /// let foo = AtomicBool::new(true);
421 /// assert_eq!(foo.fetch_xor(true, Ordering::SeqCst), true);
422 /// assert_eq!(foo.load(Ordering::SeqCst), false);
423 ///
424 /// let foo = AtomicBool::new(false);
425 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), false);
426 /// assert_eq!(foo.load(Ordering::SeqCst), false);
427 /// ```
428 #[inline]
429 #[stable(feature = "rust1", since = "1.0.0")]
430 pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool {
431 let val = if val { UINT_TRUE } else { 0 };
432
433 unsafe { atomic_xor(self.v.get(), val, order) > 0 }
434 }
435 }
436
437 #[stable(feature = "rust1", since = "1.0.0")]
438 impl AtomicIsize {
439 /// Creates a new `AtomicIsize`.
440 ///
441 /// # Examples
442 ///
443 /// ```
444 /// use std::sync::atomic::AtomicIsize;
445 ///
446 /// let atomic_forty_two = AtomicIsize::new(42);
447 /// ```
448 #[inline]
449 #[stable(feature = "rust1", since = "1.0.0")]
450 pub const fn new(v: isize) -> AtomicIsize {
451 AtomicIsize {v: UnsafeCell::new(v)}
452 }
453
454 /// Loads a value from the isize.
455 ///
456 /// `load` takes an `Ordering` argument which describes the memory ordering of this operation.
457 ///
458 /// # Panics
459 ///
460 /// Panics if `order` is `Release` or `AcqRel`.
461 ///
462 /// # Examples
463 ///
464 /// ```
465 /// use std::sync::atomic::{AtomicIsize, Ordering};
466 ///
467 /// let some_isize = AtomicIsize::new(5);
468 ///
469 /// assert_eq!(some_isize.load(Ordering::Relaxed), 5);
470 /// ```
471 #[inline]
472 #[stable(feature = "rust1", since = "1.0.0")]
473 pub fn load(&self, order: Ordering) -> isize {
474 unsafe { atomic_load(self.v.get(), order) }
475 }
476
477 /// Stores a value into the isize.
478 ///
479 /// `store` takes an `Ordering` argument which describes the memory ordering of this operation.
480 ///
481 /// # Examples
482 ///
483 /// ```
484 /// use std::sync::atomic::{AtomicIsize, Ordering};
485 ///
486 /// let some_isize = AtomicIsize::new(5);
487 ///
488 /// some_isize.store(10, Ordering::Relaxed);
489 /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
490 /// ```
491 ///
492 /// # Panics
493 ///
494 /// Panics if `order` is `Acquire` or `AcqRel`.
495 #[inline]
496 #[stable(feature = "rust1", since = "1.0.0")]
497 pub fn store(&self, val: isize, order: Ordering) {
498 unsafe { atomic_store(self.v.get(), val, order); }
499 }
500
501 /// Stores a value into the isize, returning the old value.
502 ///
503 /// `swap` takes an `Ordering` argument which describes the memory ordering of this operation.
504 ///
505 /// # Examples
506 ///
507 /// ```
508 /// use std::sync::atomic::{AtomicIsize, Ordering};
509 ///
510 /// let some_isize = AtomicIsize::new(5);
511 ///
512 /// assert_eq!(some_isize.swap(10, Ordering::Relaxed), 5);
513 /// ```
514 #[inline]
515 #[stable(feature = "rust1", since = "1.0.0")]
516 pub fn swap(&self, val: isize, order: Ordering) -> isize {
517 unsafe { atomic_swap(self.v.get(), val, order) }
518 }
519
520 /// Stores a value into the `isize` if the current value is the same as the `current` value.
521 ///
522 /// The return value is always the previous value. If it is equal to `current`, then the value
523 /// was updated.
524 ///
525 /// `compare_and_swap` also takes an `Ordering` argument which describes the memory ordering of
526 /// this operation.
527 ///
528 /// # Examples
529 ///
530 /// ```
531 /// use std::sync::atomic::{AtomicIsize, Ordering};
532 ///
533 /// let some_isize = AtomicIsize::new(5);
534 ///
535 /// assert_eq!(some_isize.compare_and_swap(5, 10, Ordering::Relaxed), 5);
536 /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
537 ///
538 /// assert_eq!(some_isize.compare_and_swap(6, 12, Ordering::Relaxed), 10);
539 /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
540 /// ```
541 #[inline]
542 #[stable(feature = "rust1", since = "1.0.0")]
543 pub fn compare_and_swap(&self, current: isize, new: isize, order: Ordering) -> isize {
544 unsafe { atomic_compare_and_swap(self.v.get(), current, new, order) }
545 }
546
547 /// Add an isize to the current value, returning the previous value.
548 ///
549 /// # Examples
550 ///
551 /// ```
552 /// use std::sync::atomic::{AtomicIsize, Ordering};
553 ///
554 /// let foo = AtomicIsize::new(0);
555 /// assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0);
556 /// assert_eq!(foo.load(Ordering::SeqCst), 10);
557 /// ```
558 #[inline]
559 #[stable(feature = "rust1", since = "1.0.0")]
560 pub fn fetch_add(&self, val: isize, order: Ordering) -> isize {
561 unsafe { atomic_add(self.v.get(), val, order) }
562 }
563
564 /// Subtract an isize from the current value, returning the previous value.
565 ///
566 /// # Examples
567 ///
568 /// ```
569 /// use std::sync::atomic::{AtomicIsize, Ordering};
570 ///
571 /// let foo = AtomicIsize::new(0);
572 /// assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 0);
573 /// assert_eq!(foo.load(Ordering::SeqCst), -10);
574 /// ```
575 #[inline]
576 #[stable(feature = "rust1", since = "1.0.0")]
577 pub fn fetch_sub(&self, val: isize, order: Ordering) -> isize {
578 unsafe { atomic_sub(self.v.get(), val, order) }
579 }
580
581 /// Bitwise and with the current isize, returning the previous value.
582 ///
583 /// # Examples
584 ///
585 /// ```
586 /// use std::sync::atomic::{AtomicIsize, Ordering};
587 ///
588 /// let foo = AtomicIsize::new(0b101101);
589 /// assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101);
590 /// assert_eq!(foo.load(Ordering::SeqCst), 0b100001);
591 #[inline]
592 #[stable(feature = "rust1", since = "1.0.0")]
593 pub fn fetch_and(&self, val: isize, order: Ordering) -> isize {
594 unsafe { atomic_and(self.v.get(), val, order) }
595 }
596
597 /// Bitwise or with the current isize, returning the previous value.
598 ///
599 /// # Examples
600 ///
601 /// ```
602 /// use std::sync::atomic::{AtomicIsize, Ordering};
603 ///
604 /// let foo = AtomicIsize::new(0b101101);
605 /// assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101);
606 /// assert_eq!(foo.load(Ordering::SeqCst), 0b111111);
607 #[inline]
608 #[stable(feature = "rust1", since = "1.0.0")]
609 pub fn fetch_or(&self, val: isize, order: Ordering) -> isize {
610 unsafe { atomic_or(self.v.get(), val, order) }
611 }
612
613 /// Bitwise xor with the current isize, returning the previous value.
614 ///
615 /// # Examples
616 ///
617 /// ```
618 /// use std::sync::atomic::{AtomicIsize, Ordering};
619 ///
620 /// let foo = AtomicIsize::new(0b101101);
621 /// assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101);
622 /// assert_eq!(foo.load(Ordering::SeqCst), 0b011110);
623 #[inline]
624 #[stable(feature = "rust1", since = "1.0.0")]
625 pub fn fetch_xor(&self, val: isize, order: Ordering) -> isize {
626 unsafe { atomic_xor(self.v.get(), val, order) }
627 }
628 }
629
630 #[stable(feature = "rust1", since = "1.0.0")]
631 impl AtomicUsize {
632 /// Creates a new `AtomicUsize`.
633 ///
634 /// # Examples
635 ///
636 /// ```
637 /// use std::sync::atomic::AtomicUsize;
638 ///
639 /// let atomic_forty_two = AtomicUsize::new(42);
640 /// ```
641 #[inline]
642 #[stable(feature = "rust1", since = "1.0.0")]
643 pub const fn new(v: usize) -> AtomicUsize {
644 AtomicUsize { v: UnsafeCell::new(v) }
645 }
646
647 /// Loads a value from the usize.
648 ///
649 /// `load` takes an `Ordering` argument which describes the memory ordering of this operation.
650 ///
651 /// # Panics
652 ///
653 /// Panics if `order` is `Release` or `AcqRel`.
654 ///
655 /// # Examples
656 ///
657 /// ```
658 /// use std::sync::atomic::{AtomicUsize, Ordering};
659 ///
660 /// let some_usize = AtomicUsize::new(5);
661 ///
662 /// assert_eq!(some_usize.load(Ordering::Relaxed), 5);
663 /// ```
664 #[inline]
665 #[stable(feature = "rust1", since = "1.0.0")]
666 pub fn load(&self, order: Ordering) -> usize {
667 unsafe { atomic_load(self.v.get(), order) }
668 }
669
670 /// Stores a value into the usize.
671 ///
672 /// `store` takes an `Ordering` argument which describes the memory ordering of this operation.
673 ///
674 /// # Examples
675 ///
676 /// ```
677 /// use std::sync::atomic::{AtomicUsize, Ordering};
678 ///
679 /// let some_usize = AtomicUsize::new(5);
680 ///
681 /// some_usize.store(10, Ordering::Relaxed);
682 /// assert_eq!(some_usize.load(Ordering::Relaxed), 10);
683 /// ```
684 ///
685 /// # Panics
686 ///
687 /// Panics if `order` is `Acquire` or `AcqRel`.
688 #[inline]
689 #[stable(feature = "rust1", since = "1.0.0")]
690 pub fn store(&self, val: usize, order: Ordering) {
691 unsafe { atomic_store(self.v.get(), val, order); }
692 }
693
694 /// Stores a value into the usize, returning the old value.
695 ///
696 /// `swap` takes an `Ordering` argument which describes the memory ordering of this operation.
697 ///
698 /// # Examples
699 ///
700 /// ```
701 /// use std::sync::atomic::{AtomicUsize, Ordering};
702 ///
703 /// let some_usize= AtomicUsize::new(5);
704 ///
705 /// assert_eq!(some_usize.swap(10, Ordering::Relaxed), 5);
706 /// assert_eq!(some_usize.load(Ordering::Relaxed), 10);
707 /// ```
708 #[inline]
709 #[stable(feature = "rust1", since = "1.0.0")]
710 pub fn swap(&self, val: usize, order: Ordering) -> usize {
711 unsafe { atomic_swap(self.v.get(), val, order) }
712 }
713
714 /// Stores a value into the `usize` if the current value is the same as the `current` value.
715 ///
716 /// The return value is always the previous value. If it is equal to `current`, then the value
717 /// was updated.
718 ///
719 /// `compare_and_swap` also takes an `Ordering` argument which describes the memory ordering of
720 /// this operation.
721 ///
722 /// # Examples
723 ///
724 /// ```
725 /// use std::sync::atomic::{AtomicUsize, Ordering};
726 ///
727 /// let some_usize = AtomicUsize::new(5);
728 ///
729 /// assert_eq!(some_usize.compare_and_swap(5, 10, Ordering::Relaxed), 5);
730 /// assert_eq!(some_usize.load(Ordering::Relaxed), 10);
731 ///
732 /// assert_eq!(some_usize.compare_and_swap(6, 12, Ordering::Relaxed), 10);
733 /// assert_eq!(some_usize.load(Ordering::Relaxed), 10);
734 /// ```
735 #[inline]
736 #[stable(feature = "rust1", since = "1.0.0")]
737 pub fn compare_and_swap(&self, current: usize, new: usize, order: Ordering) -> usize {
738 unsafe { atomic_compare_and_swap(self.v.get(), current, new, order) }
739 }
740
741 /// Add to the current usize, returning the previous value.
742 ///
743 /// # Examples
744 ///
745 /// ```
746 /// use std::sync::atomic::{AtomicUsize, Ordering};
747 ///
748 /// let foo = AtomicUsize::new(0);
749 /// assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0);
750 /// assert_eq!(foo.load(Ordering::SeqCst), 10);
751 /// ```
752 #[inline]
753 #[stable(feature = "rust1", since = "1.0.0")]
754 pub fn fetch_add(&self, val: usize, order: Ordering) -> usize {
755 unsafe { atomic_add(self.v.get(), val, order) }
756 }
757
758 /// Subtract from the current usize, returning the previous value.
759 ///
760 /// # Examples
761 ///
762 /// ```
763 /// use std::sync::atomic::{AtomicUsize, Ordering};
764 ///
765 /// let foo = AtomicUsize::new(10);
766 /// assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 10);
767 /// assert_eq!(foo.load(Ordering::SeqCst), 0);
768 /// ```
769 #[inline]
770 #[stable(feature = "rust1", since = "1.0.0")]
771 pub fn fetch_sub(&self, val: usize, order: Ordering) -> usize {
772 unsafe { atomic_sub(self.v.get(), val, order) }
773 }
774
775 /// Bitwise and with the current usize, returning the previous value.
776 ///
777 /// # Examples
778 ///
779 /// ```
780 /// use std::sync::atomic::{AtomicUsize, Ordering};
781 ///
782 /// let foo = AtomicUsize::new(0b101101);
783 /// assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101);
784 /// assert_eq!(foo.load(Ordering::SeqCst), 0b100001);
785 #[inline]
786 #[stable(feature = "rust1", since = "1.0.0")]
787 pub fn fetch_and(&self, val: usize, order: Ordering) -> usize {
788 unsafe { atomic_and(self.v.get(), val, order) }
789 }
790
791 /// Bitwise or with the current usize, returning the previous value.
792 ///
793 /// # Examples
794 ///
795 /// ```
796 /// use std::sync::atomic::{AtomicUsize, Ordering};
797 ///
798 /// let foo = AtomicUsize::new(0b101101);
799 /// assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101);
800 /// assert_eq!(foo.load(Ordering::SeqCst), 0b111111);
801 #[inline]
802 #[stable(feature = "rust1", since = "1.0.0")]
803 pub fn fetch_or(&self, val: usize, order: Ordering) -> usize {
804 unsafe { atomic_or(self.v.get(), val, order) }
805 }
806
807 /// Bitwise xor with the current usize, returning the previous value.
808 ///
809 /// # Examples
810 ///
811 /// ```
812 /// use std::sync::atomic::{AtomicUsize, Ordering};
813 ///
814 /// let foo = AtomicUsize::new(0b101101);
815 /// assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101);
816 /// assert_eq!(foo.load(Ordering::SeqCst), 0b011110);
817 #[inline]
818 #[stable(feature = "rust1", since = "1.0.0")]
819 pub fn fetch_xor(&self, val: usize, order: Ordering) -> usize {
820 unsafe { atomic_xor(self.v.get(), val, order) }
821 }
822 }
823
824 impl<T> AtomicPtr<T> {
825 /// Creates a new `AtomicPtr`.
826 ///
827 /// # Examples
828 ///
829 /// ```
830 /// use std::sync::atomic::AtomicPtr;
831 ///
832 /// let ptr = &mut 5;
833 /// let atomic_ptr = AtomicPtr::new(ptr);
834 /// ```
835 #[inline]
836 #[stable(feature = "rust1", since = "1.0.0")]
837 pub const fn new(p: *mut T) -> AtomicPtr<T> {
838 AtomicPtr { p: UnsafeCell::new(p) }
839 }
840
841 /// Loads a value from the pointer.
842 ///
843 /// `load` takes an `Ordering` argument which describes the memory ordering of this operation.
844 ///
845 /// # Panics
846 ///
847 /// Panics if `order` is `Release` or `AcqRel`.
848 ///
849 /// # Examples
850 ///
851 /// ```
852 /// use std::sync::atomic::{AtomicPtr, Ordering};
853 ///
854 /// let ptr = &mut 5;
855 /// let some_ptr = AtomicPtr::new(ptr);
856 ///
857 /// let value = some_ptr.load(Ordering::Relaxed);
858 /// ```
859 #[inline]
860 #[stable(feature = "rust1", since = "1.0.0")]
861 pub fn load(&self, order: Ordering) -> *mut T {
862 unsafe {
863 atomic_load(self.p.get() as *mut usize, order) as *mut T
864 }
865 }
866
867 /// Stores a value into the pointer.
868 ///
869 /// `store` takes an `Ordering` argument which describes the memory ordering of this operation.
870 ///
871 /// # Examples
872 ///
873 /// ```
874 /// use std::sync::atomic::{AtomicPtr, Ordering};
875 ///
876 /// let ptr = &mut 5;
877 /// let some_ptr = AtomicPtr::new(ptr);
878 ///
879 /// let other_ptr = &mut 10;
880 ///
881 /// some_ptr.store(other_ptr, Ordering::Relaxed);
882 /// ```
883 ///
884 /// # Panics
885 ///
886 /// Panics if `order` is `Acquire` or `AcqRel`.
887 #[inline]
888 #[stable(feature = "rust1", since = "1.0.0")]
889 pub fn store(&self, ptr: *mut T, order: Ordering) {
890 unsafe { atomic_store(self.p.get() as *mut usize, ptr as usize, order); }
891 }
892
893 /// Stores a value into the pointer, returning the old value.
894 ///
895 /// `swap` takes an `Ordering` argument which describes the memory ordering of this operation.
896 ///
897 /// # Examples
898 ///
899 /// ```
900 /// use std::sync::atomic::{AtomicPtr, Ordering};
901 ///
902 /// let ptr = &mut 5;
903 /// let some_ptr = AtomicPtr::new(ptr);
904 ///
905 /// let other_ptr = &mut 10;
906 ///
907 /// let value = some_ptr.swap(other_ptr, Ordering::Relaxed);
908 /// ```
909 #[inline]
910 #[stable(feature = "rust1", since = "1.0.0")]
911 pub fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T {
912 unsafe { atomic_swap(self.p.get() as *mut usize, ptr as usize, order) as *mut T }
913 }
914
915 /// Stores a value into the pointer if the current value is the same as the `current` value.
916 ///
917 /// The return value is always the previous value. If it is equal to `current`, then the value
918 /// was updated.
919 ///
920 /// `compare_and_swap` also takes an `Ordering` argument which describes the memory ordering of
921 /// this operation.
922 ///
923 /// # Examples
924 ///
925 /// ```
926 /// use std::sync::atomic::{AtomicPtr, Ordering};
927 ///
928 /// let ptr = &mut 5;
929 /// let some_ptr = AtomicPtr::new(ptr);
930 ///
931 /// let other_ptr = &mut 10;
932 /// let another_ptr = &mut 10;
933 ///
934 /// let value = some_ptr.compare_and_swap(other_ptr, another_ptr, Ordering::Relaxed);
935 /// ```
936 #[inline]
937 #[stable(feature = "rust1", since = "1.0.0")]
938 pub fn compare_and_swap(&self, current: *mut T, new: *mut T, order: Ordering) -> *mut T {
939 unsafe {
940 atomic_compare_and_swap(self.p.get() as *mut usize, current as usize,
941 new as usize, order) as *mut T
942 }
943 }
944 }
945
946 #[inline]
947 unsafe fn atomic_store<T>(dst: *mut T, val: T, order:Ordering) {
948 match order {
949 Release => intrinsics::atomic_store_rel(dst, val),
950 Relaxed => intrinsics::atomic_store_relaxed(dst, val),
951 SeqCst => intrinsics::atomic_store(dst, val),
952 Acquire => panic!("there is no such thing as an acquire store"),
953 AcqRel => panic!("there is no such thing as an acquire/release store"),
954 }
955 }
956
957 #[inline]
958 unsafe fn atomic_load<T>(dst: *const T, order:Ordering) -> T {
959 match order {
960 Acquire => intrinsics::atomic_load_acq(dst),
961 Relaxed => intrinsics::atomic_load_relaxed(dst),
962 SeqCst => intrinsics::atomic_load(dst),
963 Release => panic!("there is no such thing as a release load"),
964 AcqRel => panic!("there is no such thing as an acquire/release load"),
965 }
966 }
967
968 #[inline]
969 unsafe fn atomic_swap<T>(dst: *mut T, val: T, order: Ordering) -> T {
970 match order {
971 Acquire => intrinsics::atomic_xchg_acq(dst, val),
972 Release => intrinsics::atomic_xchg_rel(dst, val),
973 AcqRel => intrinsics::atomic_xchg_acqrel(dst, val),
974 Relaxed => intrinsics::atomic_xchg_relaxed(dst, val),
975 SeqCst => intrinsics::atomic_xchg(dst, val)
976 }
977 }
978
979 /// Returns the old value (like __sync_fetch_and_add).
980 #[inline]
981 unsafe fn atomic_add<T>(dst: *mut T, val: T, order: Ordering) -> T {
982 match order {
983 Acquire => intrinsics::atomic_xadd_acq(dst, val),
984 Release => intrinsics::atomic_xadd_rel(dst, val),
985 AcqRel => intrinsics::atomic_xadd_acqrel(dst, val),
986 Relaxed => intrinsics::atomic_xadd_relaxed(dst, val),
987 SeqCst => intrinsics::atomic_xadd(dst, val)
988 }
989 }
990
991 /// Returns the old value (like __sync_fetch_and_sub).
992 #[inline]
993 unsafe fn atomic_sub<T>(dst: *mut T, val: T, order: Ordering) -> T {
994 match order {
995 Acquire => intrinsics::atomic_xsub_acq(dst, val),
996 Release => intrinsics::atomic_xsub_rel(dst, val),
997 AcqRel => intrinsics::atomic_xsub_acqrel(dst, val),
998 Relaxed => intrinsics::atomic_xsub_relaxed(dst, val),
999 SeqCst => intrinsics::atomic_xsub(dst, val)
1000 }
1001 }
1002
1003 #[inline]
1004 unsafe fn atomic_compare_and_swap<T>(dst: *mut T, old:T, new:T, order: Ordering) -> T {
1005 match order {
1006 Acquire => intrinsics::atomic_cxchg_acq(dst, old, new),
1007 Release => intrinsics::atomic_cxchg_rel(dst, old, new),
1008 AcqRel => intrinsics::atomic_cxchg_acqrel(dst, old, new),
1009 Relaxed => intrinsics::atomic_cxchg_relaxed(dst, old, new),
1010 SeqCst => intrinsics::atomic_cxchg(dst, old, new),
1011 }
1012 }
1013
1014 #[inline]
1015 unsafe fn atomic_and<T>(dst: *mut T, val: T, order: Ordering) -> T {
1016 match order {
1017 Acquire => intrinsics::atomic_and_acq(dst, val),
1018 Release => intrinsics::atomic_and_rel(dst, val),
1019 AcqRel => intrinsics::atomic_and_acqrel(dst, val),
1020 Relaxed => intrinsics::atomic_and_relaxed(dst, val),
1021 SeqCst => intrinsics::atomic_and(dst, val)
1022 }
1023 }
1024
1025 #[inline]
1026 unsafe fn atomic_nand<T>(dst: *mut T, val: T, order: Ordering) -> T {
1027 match order {
1028 Acquire => intrinsics::atomic_nand_acq(dst, val),
1029 Release => intrinsics::atomic_nand_rel(dst, val),
1030 AcqRel => intrinsics::atomic_nand_acqrel(dst, val),
1031 Relaxed => intrinsics::atomic_nand_relaxed(dst, val),
1032 SeqCst => intrinsics::atomic_nand(dst, val)
1033 }
1034 }
1035
1036
1037 #[inline]
1038 unsafe fn atomic_or<T>(dst: *mut T, val: T, order: Ordering) -> T {
1039 match order {
1040 Acquire => intrinsics::atomic_or_acq(dst, val),
1041 Release => intrinsics::atomic_or_rel(dst, val),
1042 AcqRel => intrinsics::atomic_or_acqrel(dst, val),
1043 Relaxed => intrinsics::atomic_or_relaxed(dst, val),
1044 SeqCst => intrinsics::atomic_or(dst, val)
1045 }
1046 }
1047
1048
1049 #[inline]
1050 unsafe fn atomic_xor<T>(dst: *mut T, val: T, order: Ordering) -> T {
1051 match order {
1052 Acquire => intrinsics::atomic_xor_acq(dst, val),
1053 Release => intrinsics::atomic_xor_rel(dst, val),
1054 AcqRel => intrinsics::atomic_xor_acqrel(dst, val),
1055 Relaxed => intrinsics::atomic_xor_relaxed(dst, val),
1056 SeqCst => intrinsics::atomic_xor(dst, val)
1057 }
1058 }
1059
1060
1061 /// An atomic fence.
1062 ///
1063 /// A fence 'A' which has `Release` ordering semantics, synchronizes with a
1064 /// fence 'B' with (at least) `Acquire` semantics, if and only if there exists
1065 /// atomic operations X and Y, both operating on some atomic object 'M' such
1066 /// that A is sequenced before X, Y is synchronized before B and Y observes
1067 /// the change to M. This provides a happens-before dependence between A and B.
1068 ///
1069 /// Atomic operations with `Release` or `Acquire` semantics can also synchronize
1070 /// with a fence.
1071 ///
1072 /// A fence which has `SeqCst` ordering, in addition to having both `Acquire`
1073 /// and `Release` semantics, participates in the global program order of the
1074 /// other `SeqCst` operations and/or fences.
1075 ///
1076 /// Accepts `Acquire`, `Release`, `AcqRel` and `SeqCst` orderings.
1077 ///
1078 /// # Panics
1079 ///
1080 /// Panics if `order` is `Relaxed`.
1081 #[inline]
1082 #[stable(feature = "rust1", since = "1.0.0")]
1083 pub fn fence(order: Ordering) {
1084 unsafe {
1085 match order {
1086 Acquire => intrinsics::atomic_fence_acq(),
1087 Release => intrinsics::atomic_fence_rel(),
1088 AcqRel => intrinsics::atomic_fence_acqrel(),
1089 SeqCst => intrinsics::atomic_fence(),
1090 Relaxed => panic!("there is no such thing as a relaxed fence")
1091 }
1092 }
1093 }
1094
1095 macro_rules! impl_Debug {
1096 ($($t:ident)*) => ($(
1097 #[stable(feature = "atomic_debug", since = "1.3.0")]
1098 impl fmt::Debug for $t {
1099 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1100 f.debug_tuple(stringify!($t)).field(&self.load(Ordering::SeqCst)).finish()
1101 }
1102 }
1103 )*);
1104 }
1105
1106 impl_Debug!{ AtomicUsize AtomicIsize AtomicBool }
1107
1108 #[stable(feature = "atomic_debug", since = "1.3.0")]
1109 impl<T> fmt::Debug for AtomicPtr<T> {
1110 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1111 f.debug_tuple("AtomicPtr").field(&self.load(Ordering::SeqCst)).finish()
1112 }
1113 }