]> git.proxmox.com Git - rustc.git/blob - src/libcore/atomic.rs
1b8ee8db5f47a1ac485fe286f925ab6ac7efe81b
[rustc.git] / src / libcore / atomic.rs
1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 //! Atomic types
12 //!
13 //! Atomic types provide primitive shared-memory communication between
14 //! threads, and are the building blocks of other concurrent
15 //! types.
16 //!
17 //! This module defines atomic versions of a select number of primitive
18 //! types, including `AtomicBool`, `AtomicIsize`, and `AtomicUsize`.
19 //! Atomic types present operations that, when used correctly, synchronize
20 //! updates between threads.
21 //!
22 //! Each method takes an `Ordering` which represents the strength of
23 //! the memory barrier for that operation. These orderings are the
24 //! same as [LLVM atomic orderings][1].
25 //!
26 //! [1]: http://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations
27 //!
28 //! Atomic variables are safe to share between threads (they implement `Sync`)
29 //! but they do not themselves provide the mechanism for sharing. The most
30 //! common way to share an atomic variable is to put it into an `Arc` (an
31 //! atomically-reference-counted shared pointer).
32 //!
33 //! Most atomic types may be stored in static variables, initialized using
34 //! the provided static initializers like `INIT_ATOMIC_BOOL`. Atomic statics
35 //! are often used for lazy global initialization.
36 //!
37 //!
38 //! # Examples
39 //!
40 //! A simple spinlock:
41 //!
42 //! ```
43 //! use std::sync::Arc;
44 //! use std::sync::atomic::{AtomicUsize, Ordering};
45 //! use std::thread;
46 //!
47 //! fn main() {
48 //! let spinlock = Arc::new(AtomicUsize::new(1));
49 //!
50 //! let spinlock_clone = spinlock.clone();
51 //! thread::spawn(move|| {
52 //! spinlock_clone.store(0, Ordering::SeqCst);
53 //! });
54 //!
55 //! // Wait for the other thread to release the lock
56 //! while spinlock.load(Ordering::SeqCst) != 0 {}
57 //! }
58 //! ```
59 //!
60 //! Keep a global count of live threads:
61 //!
62 //! ```
63 //! use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
64 //!
65 //! static GLOBAL_THREAD_COUNT: AtomicUsize = ATOMIC_USIZE_INIT;
66 //!
67 //! let old_thread_count = GLOBAL_THREAD_COUNT.fetch_add(1, Ordering::SeqCst);
68 //! println!("live threads: {}", old_thread_count + 1);
69 //! ```
70
71 #![stable(feature = "rust1", since = "1.0.0")]
72
73 use self::Ordering::*;
74
75 use marker::Sync;
76
77 use intrinsics;
78 use cell::UnsafeCell;
79
80 use default::Default;
81
82 /// A boolean type which can be safely shared between threads.
83 #[stable(feature = "rust1", since = "1.0.0")]
84 pub struct AtomicBool {
85 v: UnsafeCell<usize>,
86 }
87
88 impl Default for AtomicBool {
89 fn default() -> Self {
90 Self::new(Default::default())
91 }
92 }
93
94 unsafe impl Sync for AtomicBool {}
95
96 /// A signed integer type which can be safely shared between threads.
97 #[stable(feature = "rust1", since = "1.0.0")]
98 pub struct AtomicIsize {
99 v: UnsafeCell<isize>,
100 }
101
102 impl Default for AtomicIsize {
103 fn default() -> Self {
104 Self::new(Default::default())
105 }
106 }
107
108 unsafe impl Sync for AtomicIsize {}
109
110 /// An unsigned integer type which can be safely shared between threads.
111 #[stable(feature = "rust1", since = "1.0.0")]
112 pub struct AtomicUsize {
113 v: UnsafeCell<usize>,
114 }
115
116 impl Default for AtomicUsize {
117 fn default() -> Self {
118 Self::new(Default::default())
119 }
120 }
121
122 unsafe impl Sync for AtomicUsize {}
123
124 /// A raw pointer type which can be safely shared between threads.
125 #[stable(feature = "rust1", since = "1.0.0")]
126 pub struct AtomicPtr<T> {
127 p: UnsafeCell<*mut T>,
128 }
129
130 impl<T> Default for AtomicPtr<T> {
131 fn default() -> AtomicPtr<T> {
132 AtomicPtr::new(::ptr::null_mut())
133 }
134 }
135
136 unsafe impl<T> Sync for AtomicPtr<T> {}
137
138 /// Atomic memory orderings
139 ///
140 /// Memory orderings limit the ways that both the compiler and CPU may reorder
141 /// instructions around atomic operations. At its most restrictive,
142 /// "sequentially consistent" atomics allow neither reads nor writes
143 /// to be moved either before or after the atomic operation; on the other end
144 /// "relaxed" atomics allow all reorderings.
145 ///
146 /// Rust's memory orderings are [the same as
147 /// C++'s](http://gcc.gnu.org/wiki/Atomic/GCCMM/AtomicSync).
148 #[stable(feature = "rust1", since = "1.0.0")]
149 #[derive(Copy, Clone)]
150 pub enum Ordering {
151 /// No ordering constraints, only atomic operations.
152 #[stable(feature = "rust1", since = "1.0.0")]
153 Relaxed,
154 /// When coupled with a store, all previous writes become visible
155 /// to another thread that performs a load with `Acquire` ordering
156 /// on the same value.
157 #[stable(feature = "rust1", since = "1.0.0")]
158 Release,
159 /// When coupled with a load, all subsequent loads will see data
160 /// written before a store with `Release` ordering on the same value
161 /// in another thread.
162 #[stable(feature = "rust1", since = "1.0.0")]
163 Acquire,
164 /// When coupled with a load, uses `Acquire` ordering, and with a store
165 /// `Release` ordering.
166 #[stable(feature = "rust1", since = "1.0.0")]
167 AcqRel,
168 /// Like `AcqRel` with the additional guarantee that all threads see all
169 /// sequentially consistent operations in the same order.
170 #[stable(feature = "rust1", since = "1.0.0")]
171 SeqCst,
172 }
173
174 /// An `AtomicBool` initialized to `false`.
175 #[stable(feature = "rust1", since = "1.0.0")]
176 pub const ATOMIC_BOOL_INIT: AtomicBool = AtomicBool::new(false);
177 /// An `AtomicIsize` initialized to `0`.
178 #[stable(feature = "rust1", since = "1.0.0")]
179 pub const ATOMIC_ISIZE_INIT: AtomicIsize = AtomicIsize::new(0);
180 /// An `AtomicUsize` initialized to `0`.
181 #[stable(feature = "rust1", since = "1.0.0")]
182 pub const ATOMIC_USIZE_INIT: AtomicUsize = AtomicUsize::new(0);
183
184 // NB: Needs to be -1 (0b11111111...) to make fetch_nand work correctly
185 const UINT_TRUE: usize = !0;
186
187 impl AtomicBool {
188 /// Creates a new `AtomicBool`.
189 ///
190 /// # Examples
191 ///
192 /// ```
193 /// use std::sync::atomic::AtomicBool;
194 ///
195 /// let atomic_true = AtomicBool::new(true);
196 /// let atomic_false = AtomicBool::new(false);
197 /// ```
198 #[inline]
199 #[stable(feature = "rust1", since = "1.0.0")]
200 pub const fn new(v: bool) -> AtomicBool {
201 AtomicBool { v: UnsafeCell::new(-(v as isize) as usize) }
202 }
203
204 /// Loads a value from the bool.
205 ///
206 /// `load` takes an `Ordering` argument which describes the memory ordering of this operation.
207 ///
208 /// # Panics
209 ///
210 /// Panics if `order` is `Release` or `AcqRel`.
211 ///
212 /// # Examples
213 ///
214 /// ```
215 /// use std::sync::atomic::{AtomicBool, Ordering};
216 ///
217 /// let some_bool = AtomicBool::new(true);
218 ///
219 /// assert_eq!(some_bool.load(Ordering::Relaxed), true);
220 /// ```
221 #[inline]
222 #[stable(feature = "rust1", since = "1.0.0")]
223 pub fn load(&self, order: Ordering) -> bool {
224 unsafe { atomic_load(self.v.get(), order) > 0 }
225 }
226
227 /// Stores a value into the bool.
228 ///
229 /// `store` takes an `Ordering` argument which describes the memory ordering of this operation.
230 ///
231 /// # Examples
232 ///
233 /// ```
234 /// use std::sync::atomic::{AtomicBool, Ordering};
235 ///
236 /// let some_bool = AtomicBool::new(true);
237 ///
238 /// some_bool.store(false, Ordering::Relaxed);
239 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
240 /// ```
241 ///
242 /// # Panics
243 ///
244 /// Panics if `order` is `Acquire` or `AcqRel`.
245 #[inline]
246 #[stable(feature = "rust1", since = "1.0.0")]
247 pub fn store(&self, val: bool, order: Ordering) {
248 let val = if val { UINT_TRUE } else { 0 };
249
250 unsafe { atomic_store(self.v.get(), val, order); }
251 }
252
253 /// Stores a value into the bool, returning the old value.
254 ///
255 /// `swap` takes an `Ordering` argument which describes the memory ordering of this operation.
256 ///
257 /// # Examples
258 ///
259 /// ```
260 /// use std::sync::atomic::{AtomicBool, Ordering};
261 ///
262 /// let some_bool = AtomicBool::new(true);
263 ///
264 /// assert_eq!(some_bool.swap(false, Ordering::Relaxed), true);
265 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
266 /// ```
267 #[inline]
268 #[stable(feature = "rust1", since = "1.0.0")]
269 pub fn swap(&self, val: bool, order: Ordering) -> bool {
270 let val = if val { UINT_TRUE } else { 0 };
271
272 unsafe { atomic_swap(self.v.get(), val, order) > 0 }
273 }
274
275 /// Stores a value into the bool if the current value is the same as the expected value.
276 ///
277 /// The return value is always the previous value. If it is equal to `old`, then the value was
278 /// updated.
279 ///
280 /// `swap` also takes an `Ordering` argument which describes the memory ordering of this
281 /// operation.
282 ///
283 /// # Examples
284 ///
285 /// ```
286 /// use std::sync::atomic::{AtomicBool, Ordering};
287 ///
288 /// let some_bool = AtomicBool::new(true);
289 ///
290 /// assert_eq!(some_bool.compare_and_swap(true, false, Ordering::Relaxed), true);
291 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
292 ///
293 /// assert_eq!(some_bool.compare_and_swap(true, true, Ordering::Relaxed), false);
294 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
295 /// ```
296 #[inline]
297 #[stable(feature = "rust1", since = "1.0.0")]
298 pub fn compare_and_swap(&self, old: bool, new: bool, order: Ordering) -> bool {
299 let old = if old { UINT_TRUE } else { 0 };
300 let new = if new { UINT_TRUE } else { 0 };
301
302 unsafe { atomic_compare_and_swap(self.v.get(), old, new, order) > 0 }
303 }
304
305 /// Logical "and" with a boolean value.
306 ///
307 /// Performs a logical "and" operation on the current value and the argument `val`, and sets
308 /// the new value to the result.
309 ///
310 /// Returns the previous value.
311 ///
312 /// # Examples
313 ///
314 /// ```
315 /// use std::sync::atomic::{AtomicBool, Ordering};
316 ///
317 /// let foo = AtomicBool::new(true);
318 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), true);
319 /// assert_eq!(foo.load(Ordering::SeqCst), false);
320 ///
321 /// let foo = AtomicBool::new(true);
322 /// assert_eq!(foo.fetch_and(true, Ordering::SeqCst), true);
323 /// assert_eq!(foo.load(Ordering::SeqCst), true);
324 ///
325 /// let foo = AtomicBool::new(false);
326 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), false);
327 /// assert_eq!(foo.load(Ordering::SeqCst), false);
328 /// ```
329 #[inline]
330 #[stable(feature = "rust1", since = "1.0.0")]
331 pub fn fetch_and(&self, val: bool, order: Ordering) -> bool {
332 let val = if val { UINT_TRUE } else { 0 };
333
334 unsafe { atomic_and(self.v.get(), val, order) > 0 }
335 }
336
337 /// Logical "nand" with a boolean value.
338 ///
339 /// Performs a logical "nand" operation on the current value and the argument `val`, and sets
340 /// the new value to the result.
341 ///
342 /// Returns the previous value.
343 ///
344 /// # Examples
345 ///
346 /// ```
347 /// use std::sync::atomic::{AtomicBool, Ordering};
348 ///
349 /// let foo = AtomicBool::new(true);
350 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), true);
351 /// assert_eq!(foo.load(Ordering::SeqCst), true);
352 ///
353 /// let foo = AtomicBool::new(true);
354 /// assert_eq!(foo.fetch_nand(true, Ordering::SeqCst), true);
355 /// assert_eq!(foo.load(Ordering::SeqCst) as usize, 0);
356 /// assert_eq!(foo.load(Ordering::SeqCst), false);
357 ///
358 /// let foo = AtomicBool::new(false);
359 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), false);
360 /// assert_eq!(foo.load(Ordering::SeqCst), true);
361 /// ```
362 #[inline]
363 #[stable(feature = "rust1", since = "1.0.0")]
364 pub fn fetch_nand(&self, val: bool, order: Ordering) -> bool {
365 let val = if val { UINT_TRUE } else { 0 };
366
367 unsafe { atomic_nand(self.v.get(), val, order) > 0 }
368 }
369
370 /// Logical "or" with a boolean value.
371 ///
372 /// Performs a logical "or" operation on the current value and the argument `val`, and sets the
373 /// new value to the result.
374 ///
375 /// Returns the previous value.
376 ///
377 /// # Examples
378 ///
379 /// ```
380 /// use std::sync::atomic::{AtomicBool, Ordering};
381 ///
382 /// let foo = AtomicBool::new(true);
383 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), true);
384 /// assert_eq!(foo.load(Ordering::SeqCst), true);
385 ///
386 /// let foo = AtomicBool::new(true);
387 /// assert_eq!(foo.fetch_or(true, Ordering::SeqCst), true);
388 /// assert_eq!(foo.load(Ordering::SeqCst), true);
389 ///
390 /// let foo = AtomicBool::new(false);
391 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), false);
392 /// assert_eq!(foo.load(Ordering::SeqCst), false);
393 /// ```
394 #[inline]
395 #[stable(feature = "rust1", since = "1.0.0")]
396 pub fn fetch_or(&self, val: bool, order: Ordering) -> bool {
397 let val = if val { UINT_TRUE } else { 0 };
398
399 unsafe { atomic_or(self.v.get(), val, order) > 0 }
400 }
401
402 /// Logical "xor" with a boolean value.
403 ///
404 /// Performs a logical "xor" operation on the current value and the argument `val`, and sets
405 /// the new value to the result.
406 ///
407 /// Returns the previous value.
408 ///
409 /// # Examples
410 ///
411 /// ```
412 /// use std::sync::atomic::{AtomicBool, Ordering};
413 ///
414 /// let foo = AtomicBool::new(true);
415 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), true);
416 /// assert_eq!(foo.load(Ordering::SeqCst), true);
417 ///
418 /// let foo = AtomicBool::new(true);
419 /// assert_eq!(foo.fetch_xor(true, Ordering::SeqCst), true);
420 /// assert_eq!(foo.load(Ordering::SeqCst), false);
421 ///
422 /// let foo = AtomicBool::new(false);
423 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), false);
424 /// assert_eq!(foo.load(Ordering::SeqCst), false);
425 /// ```
426 #[inline]
427 #[stable(feature = "rust1", since = "1.0.0")]
428 pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool {
429 let val = if val { UINT_TRUE } else { 0 };
430
431 unsafe { atomic_xor(self.v.get(), val, order) > 0 }
432 }
433 }
434
435 #[stable(feature = "rust1", since = "1.0.0")]
436 impl AtomicIsize {
437 /// Creates a new `AtomicIsize`.
438 ///
439 /// # Examples
440 ///
441 /// ```
442 /// use std::sync::atomic::AtomicIsize;
443 ///
444 /// let atomic_forty_two = AtomicIsize::new(42);
445 /// ```
446 #[inline]
447 #[stable(feature = "rust1", since = "1.0.0")]
448 pub const fn new(v: isize) -> AtomicIsize {
449 AtomicIsize {v: UnsafeCell::new(v)}
450 }
451
452 /// Loads a value from the isize.
453 ///
454 /// `load` takes an `Ordering` argument which describes the memory ordering of this operation.
455 ///
456 /// # Panics
457 ///
458 /// Panics if `order` is `Release` or `AcqRel`.
459 ///
460 /// # Examples
461 ///
462 /// ```
463 /// use std::sync::atomic::{AtomicIsize, Ordering};
464 ///
465 /// let some_isize = AtomicIsize::new(5);
466 ///
467 /// assert_eq!(some_isize.load(Ordering::Relaxed), 5);
468 /// ```
469 #[inline]
470 #[stable(feature = "rust1", since = "1.0.0")]
471 pub fn load(&self, order: Ordering) -> isize {
472 unsafe { atomic_load(self.v.get(), order) }
473 }
474
475 /// Stores a value into the isize.
476 ///
477 /// `store` takes an `Ordering` argument which describes the memory ordering of this operation.
478 ///
479 /// # Examples
480 ///
481 /// ```
482 /// use std::sync::atomic::{AtomicIsize, Ordering};
483 ///
484 /// let some_isize = AtomicIsize::new(5);
485 ///
486 /// some_isize.store(10, Ordering::Relaxed);
487 /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
488 /// ```
489 ///
490 /// # Panics
491 ///
492 /// Panics if `order` is `Acquire` or `AcqRel`.
493 #[inline]
494 #[stable(feature = "rust1", since = "1.0.0")]
495 pub fn store(&self, val: isize, order: Ordering) {
496 unsafe { atomic_store(self.v.get(), val, order); }
497 }
498
499 /// Stores a value into the isize, returning the old value.
500 ///
501 /// `swap` takes an `Ordering` argument which describes the memory ordering of this operation.
502 ///
503 /// # Examples
504 ///
505 /// ```
506 /// use std::sync::atomic::{AtomicIsize, Ordering};
507 ///
508 /// let some_isize = AtomicIsize::new(5);
509 ///
510 /// assert_eq!(some_isize.swap(10, Ordering::Relaxed), 5);
511 /// ```
512 #[inline]
513 #[stable(feature = "rust1", since = "1.0.0")]
514 pub fn swap(&self, val: isize, order: Ordering) -> isize {
515 unsafe { atomic_swap(self.v.get(), val, order) }
516 }
517
518 /// Stores a value into the isize if the current value is the same as the expected value.
519 ///
520 /// The return value is always the previous value. If it is equal to `old`, then the value was
521 /// updated.
522 ///
523 /// `compare_and_swap` also takes an `Ordering` argument which describes the memory ordering of
524 /// this operation.
525 ///
526 /// # Examples
527 ///
528 /// ```
529 /// use std::sync::atomic::{AtomicIsize, Ordering};
530 ///
531 /// let some_isize = AtomicIsize::new(5);
532 ///
533 /// assert_eq!(some_isize.compare_and_swap(5, 10, Ordering::Relaxed), 5);
534 /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
535 ///
536 /// assert_eq!(some_isize.compare_and_swap(6, 12, Ordering::Relaxed), 10);
537 /// assert_eq!(some_isize.load(Ordering::Relaxed), 10);
538 /// ```
539 #[inline]
540 #[stable(feature = "rust1", since = "1.0.0")]
541 pub fn compare_and_swap(&self, old: isize, new: isize, order: Ordering) -> isize {
542 unsafe { atomic_compare_and_swap(self.v.get(), old, new, order) }
543 }
544
545 /// Add an isize to the current value, returning the previous value.
546 ///
547 /// # Examples
548 ///
549 /// ```
550 /// use std::sync::atomic::{AtomicIsize, Ordering};
551 ///
552 /// let foo = AtomicIsize::new(0);
553 /// assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0);
554 /// assert_eq!(foo.load(Ordering::SeqCst), 10);
555 /// ```
556 #[inline]
557 #[stable(feature = "rust1", since = "1.0.0")]
558 pub fn fetch_add(&self, val: isize, order: Ordering) -> isize {
559 unsafe { atomic_add(self.v.get(), val, order) }
560 }
561
562 /// Subtract an isize from the current value, returning the previous value.
563 ///
564 /// # Examples
565 ///
566 /// ```
567 /// use std::sync::atomic::{AtomicIsize, Ordering};
568 ///
569 /// let foo = AtomicIsize::new(0);
570 /// assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 0);
571 /// assert_eq!(foo.load(Ordering::SeqCst), -10);
572 /// ```
573 #[inline]
574 #[stable(feature = "rust1", since = "1.0.0")]
575 pub fn fetch_sub(&self, val: isize, order: Ordering) -> isize {
576 unsafe { atomic_sub(self.v.get(), val, order) }
577 }
578
579 /// Bitwise and with the current isize, returning the previous value.
580 ///
581 /// # Examples
582 ///
583 /// ```
584 /// use std::sync::atomic::{AtomicIsize, Ordering};
585 ///
586 /// let foo = AtomicIsize::new(0b101101);
587 /// assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101);
588 /// assert_eq!(foo.load(Ordering::SeqCst), 0b100001);
589 #[inline]
590 #[stable(feature = "rust1", since = "1.0.0")]
591 pub fn fetch_and(&self, val: isize, order: Ordering) -> isize {
592 unsafe { atomic_and(self.v.get(), val, order) }
593 }
594
595 /// Bitwise or with the current isize, returning the previous value.
596 ///
597 /// # Examples
598 ///
599 /// ```
600 /// use std::sync::atomic::{AtomicIsize, Ordering};
601 ///
602 /// let foo = AtomicIsize::new(0b101101);
603 /// assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101);
604 /// assert_eq!(foo.load(Ordering::SeqCst), 0b111111);
605 #[inline]
606 #[stable(feature = "rust1", since = "1.0.0")]
607 pub fn fetch_or(&self, val: isize, order: Ordering) -> isize {
608 unsafe { atomic_or(self.v.get(), val, order) }
609 }
610
611 /// Bitwise xor with the current isize, returning the previous value.
612 ///
613 /// # Examples
614 ///
615 /// ```
616 /// use std::sync::atomic::{AtomicIsize, Ordering};
617 ///
618 /// let foo = AtomicIsize::new(0b101101);
619 /// assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101);
620 /// assert_eq!(foo.load(Ordering::SeqCst), 0b011110);
621 #[inline]
622 #[stable(feature = "rust1", since = "1.0.0")]
623 pub fn fetch_xor(&self, val: isize, order: Ordering) -> isize {
624 unsafe { atomic_xor(self.v.get(), val, order) }
625 }
626 }
627
628 #[stable(feature = "rust1", since = "1.0.0")]
629 impl AtomicUsize {
630 /// Creates a new `AtomicUsize`.
631 ///
632 /// # Examples
633 ///
634 /// ```
635 /// use std::sync::atomic::AtomicUsize;
636 ///
637 /// let atomic_forty_two = AtomicUsize::new(42);
638 /// ```
639 #[inline]
640 #[stable(feature = "rust1", since = "1.0.0")]
641 pub const fn new(v: usize) -> AtomicUsize {
642 AtomicUsize { v: UnsafeCell::new(v) }
643 }
644
645 /// Loads a value from the usize.
646 ///
647 /// `load` takes an `Ordering` argument which describes the memory ordering of this operation.
648 ///
649 /// # Panics
650 ///
651 /// Panics if `order` is `Release` or `AcqRel`.
652 ///
653 /// # Examples
654 ///
655 /// ```
656 /// use std::sync::atomic::{AtomicUsize, Ordering};
657 ///
658 /// let some_usize = AtomicUsize::new(5);
659 ///
660 /// assert_eq!(some_usize.load(Ordering::Relaxed), 5);
661 /// ```
662 #[inline]
663 #[stable(feature = "rust1", since = "1.0.0")]
664 pub fn load(&self, order: Ordering) -> usize {
665 unsafe { atomic_load(self.v.get(), order) }
666 }
667
668 /// Stores a value into the usize.
669 ///
670 /// `store` takes an `Ordering` argument which describes the memory ordering of this operation.
671 ///
672 /// # Examples
673 ///
674 /// ```
675 /// use std::sync::atomic::{AtomicUsize, Ordering};
676 ///
677 /// let some_usize = AtomicUsize::new(5);
678 ///
679 /// some_usize.store(10, Ordering::Relaxed);
680 /// assert_eq!(some_usize.load(Ordering::Relaxed), 10);
681 /// ```
682 ///
683 /// # Panics
684 ///
685 /// Panics if `order` is `Acquire` or `AcqRel`.
686 #[inline]
687 #[stable(feature = "rust1", since = "1.0.0")]
688 pub fn store(&self, val: usize, order: Ordering) {
689 unsafe { atomic_store(self.v.get(), val, order); }
690 }
691
692 /// Stores a value into the usize, returning the old value.
693 ///
694 /// `swap` takes an `Ordering` argument which describes the memory ordering of this operation.
695 ///
696 /// # Examples
697 ///
698 /// ```
699 /// use std::sync::atomic::{AtomicUsize, Ordering};
700 ///
701 /// let some_usize= AtomicUsize::new(5);
702 ///
703 /// assert_eq!(some_usize.swap(10, Ordering::Relaxed), 5);
704 /// assert_eq!(some_usize.load(Ordering::Relaxed), 10);
705 /// ```
706 #[inline]
707 #[stable(feature = "rust1", since = "1.0.0")]
708 pub fn swap(&self, val: usize, order: Ordering) -> usize {
709 unsafe { atomic_swap(self.v.get(), val, order) }
710 }
711
712 /// Stores a value into the usize if the current value is the same as the expected value.
713 ///
714 /// The return value is always the previous value. If it is equal to `old`, then the value was
715 /// updated.
716 ///
717 /// `compare_and_swap` also takes an `Ordering` argument which describes the memory ordering of
718 /// this operation.
719 ///
720 /// # Examples
721 ///
722 /// ```
723 /// use std::sync::atomic::{AtomicUsize, Ordering};
724 ///
725 /// let some_usize = AtomicUsize::new(5);
726 ///
727 /// assert_eq!(some_usize.compare_and_swap(5, 10, Ordering::Relaxed), 5);
728 /// assert_eq!(some_usize.load(Ordering::Relaxed), 10);
729 ///
730 /// assert_eq!(some_usize.compare_and_swap(6, 12, Ordering::Relaxed), 10);
731 /// assert_eq!(some_usize.load(Ordering::Relaxed), 10);
732 /// ```
733 #[inline]
734 #[stable(feature = "rust1", since = "1.0.0")]
735 pub fn compare_and_swap(&self, old: usize, new: usize, order: Ordering) -> usize {
736 unsafe { atomic_compare_and_swap(self.v.get(), old, new, order) }
737 }
738
739 /// Add to the current usize, returning the previous value.
740 ///
741 /// # Examples
742 ///
743 /// ```
744 /// use std::sync::atomic::{AtomicUsize, Ordering};
745 ///
746 /// let foo = AtomicUsize::new(0);
747 /// assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0);
748 /// assert_eq!(foo.load(Ordering::SeqCst), 10);
749 /// ```
750 #[inline]
751 #[stable(feature = "rust1", since = "1.0.0")]
752 pub fn fetch_add(&self, val: usize, order: Ordering) -> usize {
753 unsafe { atomic_add(self.v.get(), val, order) }
754 }
755
756 /// Subtract from the current usize, returning the previous value.
757 ///
758 /// # Examples
759 ///
760 /// ```
761 /// use std::sync::atomic::{AtomicUsize, Ordering};
762 ///
763 /// let foo = AtomicUsize::new(10);
764 /// assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 10);
765 /// assert_eq!(foo.load(Ordering::SeqCst), 0);
766 /// ```
767 #[inline]
768 #[stable(feature = "rust1", since = "1.0.0")]
769 pub fn fetch_sub(&self, val: usize, order: Ordering) -> usize {
770 unsafe { atomic_sub(self.v.get(), val, order) }
771 }
772
773 /// Bitwise and with the current usize, returning the previous value.
774 ///
775 /// # Examples
776 ///
777 /// ```
778 /// use std::sync::atomic::{AtomicUsize, Ordering};
779 ///
780 /// let foo = AtomicUsize::new(0b101101);
781 /// assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101);
782 /// assert_eq!(foo.load(Ordering::SeqCst), 0b100001);
783 #[inline]
784 #[stable(feature = "rust1", since = "1.0.0")]
785 pub fn fetch_and(&self, val: usize, order: Ordering) -> usize {
786 unsafe { atomic_and(self.v.get(), val, order) }
787 }
788
789 /// Bitwise or with the current usize, returning the previous value.
790 ///
791 /// # Examples
792 ///
793 /// ```
794 /// use std::sync::atomic::{AtomicUsize, Ordering};
795 ///
796 /// let foo = AtomicUsize::new(0b101101);
797 /// assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101);
798 /// assert_eq!(foo.load(Ordering::SeqCst), 0b111111);
799 #[inline]
800 #[stable(feature = "rust1", since = "1.0.0")]
801 pub fn fetch_or(&self, val: usize, order: Ordering) -> usize {
802 unsafe { atomic_or(self.v.get(), val, order) }
803 }
804
805 /// Bitwise xor with the current usize, returning the previous value.
806 ///
807 /// # Examples
808 ///
809 /// ```
810 /// use std::sync::atomic::{AtomicUsize, Ordering};
811 ///
812 /// let foo = AtomicUsize::new(0b101101);
813 /// assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101);
814 /// assert_eq!(foo.load(Ordering::SeqCst), 0b011110);
815 #[inline]
816 #[stable(feature = "rust1", since = "1.0.0")]
817 pub fn fetch_xor(&self, val: usize, order: Ordering) -> usize {
818 unsafe { atomic_xor(self.v.get(), val, order) }
819 }
820 }
821
822 impl<T> AtomicPtr<T> {
823 /// Creates a new `AtomicPtr`.
824 ///
825 /// # Examples
826 ///
827 /// ```
828 /// use std::sync::atomic::AtomicPtr;
829 ///
830 /// let ptr = &mut 5;
831 /// let atomic_ptr = AtomicPtr::new(ptr);
832 /// ```
833 #[inline]
834 #[stable(feature = "rust1", since = "1.0.0")]
835 pub const fn new(p: *mut T) -> AtomicPtr<T> {
836 AtomicPtr { p: UnsafeCell::new(p) }
837 }
838
839 /// Loads a value from the pointer.
840 ///
841 /// `load` takes an `Ordering` argument which describes the memory ordering of this operation.
842 ///
843 /// # Panics
844 ///
845 /// Panics if `order` is `Release` or `AcqRel`.
846 ///
847 /// # Examples
848 ///
849 /// ```
850 /// use std::sync::atomic::{AtomicPtr, Ordering};
851 ///
852 /// let ptr = &mut 5;
853 /// let some_ptr = AtomicPtr::new(ptr);
854 ///
855 /// let value = some_ptr.load(Ordering::Relaxed);
856 /// ```
857 #[inline]
858 #[stable(feature = "rust1", since = "1.0.0")]
859 pub fn load(&self, order: Ordering) -> *mut T {
860 unsafe {
861 atomic_load(self.p.get() as *mut usize, order) as *mut T
862 }
863 }
864
865 /// Stores a value into the pointer.
866 ///
867 /// `store` takes an `Ordering` argument which describes the memory ordering of this operation.
868 ///
869 /// # Examples
870 ///
871 /// ```
872 /// use std::sync::atomic::{AtomicPtr, Ordering};
873 ///
874 /// let ptr = &mut 5;
875 /// let some_ptr = AtomicPtr::new(ptr);
876 ///
877 /// let other_ptr = &mut 10;
878 ///
879 /// some_ptr.store(other_ptr, Ordering::Relaxed);
880 /// ```
881 ///
882 /// # Panics
883 ///
884 /// Panics if `order` is `Acquire` or `AcqRel`.
885 #[inline]
886 #[stable(feature = "rust1", since = "1.0.0")]
887 pub fn store(&self, ptr: *mut T, order: Ordering) {
888 unsafe { atomic_store(self.p.get() as *mut usize, ptr as usize, order); }
889 }
890
891 /// Stores a value into the pointer, returning the old value.
892 ///
893 /// `swap` takes an `Ordering` argument which describes the memory ordering of this operation.
894 ///
895 /// # Examples
896 ///
897 /// ```
898 /// use std::sync::atomic::{AtomicPtr, Ordering};
899 ///
900 /// let ptr = &mut 5;
901 /// let some_ptr = AtomicPtr::new(ptr);
902 ///
903 /// let other_ptr = &mut 10;
904 ///
905 /// let value = some_ptr.swap(other_ptr, Ordering::Relaxed);
906 /// ```
907 #[inline]
908 #[stable(feature = "rust1", since = "1.0.0")]
909 pub fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T {
910 unsafe { atomic_swap(self.p.get() as *mut usize, ptr as usize, order) as *mut T }
911 }
912
913 /// Stores a value into the pointer if the current value is the same as the expected value.
914 ///
915 /// The return value is always the previous value. If it is equal to `old`, then the value was
916 /// updated.
917 ///
918 /// `compare_and_swap` also takes an `Ordering` argument which describes the memory ordering of
919 /// this operation.
920 ///
921 /// # Examples
922 ///
923 /// ```
924 /// use std::sync::atomic::{AtomicPtr, Ordering};
925 ///
926 /// let ptr = &mut 5;
927 /// let some_ptr = AtomicPtr::new(ptr);
928 ///
929 /// let other_ptr = &mut 10;
930 /// let another_ptr = &mut 10;
931 ///
932 /// let value = some_ptr.compare_and_swap(other_ptr, another_ptr, Ordering::Relaxed);
933 /// ```
934 #[inline]
935 #[stable(feature = "rust1", since = "1.0.0")]
936 pub fn compare_and_swap(&self, old: *mut T, new: *mut T, order: Ordering) -> *mut T {
937 unsafe {
938 atomic_compare_and_swap(self.p.get() as *mut usize, old as usize,
939 new as usize, order) as *mut T
940 }
941 }
942 }
943
944 #[inline]
945 unsafe fn atomic_store<T>(dst: *mut T, val: T, order:Ordering) {
946 match order {
947 Release => intrinsics::atomic_store_rel(dst, val),
948 Relaxed => intrinsics::atomic_store_relaxed(dst, val),
949 SeqCst => intrinsics::atomic_store(dst, val),
950 Acquire => panic!("there is no such thing as an acquire store"),
951 AcqRel => panic!("there is no such thing as an acquire/release store"),
952 }
953 }
954
955 #[inline]
956 #[stable(feature = "rust1", since = "1.0.0")]
957 unsafe fn atomic_load<T>(dst: *const T, order:Ordering) -> T {
958 match order {
959 Acquire => intrinsics::atomic_load_acq(dst),
960 Relaxed => intrinsics::atomic_load_relaxed(dst),
961 SeqCst => intrinsics::atomic_load(dst),
962 Release => panic!("there is no such thing as a release load"),
963 AcqRel => panic!("there is no such thing as an acquire/release load"),
964 }
965 }
966
967 #[inline]
968 #[stable(feature = "rust1", since = "1.0.0")]
969 unsafe fn atomic_swap<T>(dst: *mut T, val: T, order: Ordering) -> T {
970 match order {
971 Acquire => intrinsics::atomic_xchg_acq(dst, val),
972 Release => intrinsics::atomic_xchg_rel(dst, val),
973 AcqRel => intrinsics::atomic_xchg_acqrel(dst, val),
974 Relaxed => intrinsics::atomic_xchg_relaxed(dst, val),
975 SeqCst => intrinsics::atomic_xchg(dst, val)
976 }
977 }
978
979 /// Returns the old value (like __sync_fetch_and_add).
980 #[inline]
981 #[stable(feature = "rust1", since = "1.0.0")]
982 unsafe fn atomic_add<T>(dst: *mut T, val: T, order: Ordering) -> T {
983 match order {
984 Acquire => intrinsics::atomic_xadd_acq(dst, val),
985 Release => intrinsics::atomic_xadd_rel(dst, val),
986 AcqRel => intrinsics::atomic_xadd_acqrel(dst, val),
987 Relaxed => intrinsics::atomic_xadd_relaxed(dst, val),
988 SeqCst => intrinsics::atomic_xadd(dst, val)
989 }
990 }
991
992 /// Returns the old value (like __sync_fetch_and_sub).
993 #[inline]
994 #[stable(feature = "rust1", since = "1.0.0")]
995 unsafe fn atomic_sub<T>(dst: *mut T, val: T, order: Ordering) -> T {
996 match order {
997 Acquire => intrinsics::atomic_xsub_acq(dst, val),
998 Release => intrinsics::atomic_xsub_rel(dst, val),
999 AcqRel => intrinsics::atomic_xsub_acqrel(dst, val),
1000 Relaxed => intrinsics::atomic_xsub_relaxed(dst, val),
1001 SeqCst => intrinsics::atomic_xsub(dst, val)
1002 }
1003 }
1004
1005 #[inline]
1006 #[stable(feature = "rust1", since = "1.0.0")]
1007 unsafe fn atomic_compare_and_swap<T>(dst: *mut T, old:T, new:T, order: Ordering) -> T {
1008 match order {
1009 Acquire => intrinsics::atomic_cxchg_acq(dst, old, new),
1010 Release => intrinsics::atomic_cxchg_rel(dst, old, new),
1011 AcqRel => intrinsics::atomic_cxchg_acqrel(dst, old, new),
1012 Relaxed => intrinsics::atomic_cxchg_relaxed(dst, old, new),
1013 SeqCst => intrinsics::atomic_cxchg(dst, old, new),
1014 }
1015 }
1016
1017 #[inline]
1018 #[stable(feature = "rust1", since = "1.0.0")]
1019 unsafe fn atomic_and<T>(dst: *mut T, val: T, order: Ordering) -> T {
1020 match order {
1021 Acquire => intrinsics::atomic_and_acq(dst, val),
1022 Release => intrinsics::atomic_and_rel(dst, val),
1023 AcqRel => intrinsics::atomic_and_acqrel(dst, val),
1024 Relaxed => intrinsics::atomic_and_relaxed(dst, val),
1025 SeqCst => intrinsics::atomic_and(dst, val)
1026 }
1027 }
1028
1029 #[inline]
1030 #[stable(feature = "rust1", since = "1.0.0")]
1031 unsafe fn atomic_nand<T>(dst: *mut T, val: T, order: Ordering) -> T {
1032 match order {
1033 Acquire => intrinsics::atomic_nand_acq(dst, val),
1034 Release => intrinsics::atomic_nand_rel(dst, val),
1035 AcqRel => intrinsics::atomic_nand_acqrel(dst, val),
1036 Relaxed => intrinsics::atomic_nand_relaxed(dst, val),
1037 SeqCst => intrinsics::atomic_nand(dst, val)
1038 }
1039 }
1040
1041
1042 #[inline]
1043 #[stable(feature = "rust1", since = "1.0.0")]
1044 unsafe fn atomic_or<T>(dst: *mut T, val: T, order: Ordering) -> T {
1045 match order {
1046 Acquire => intrinsics::atomic_or_acq(dst, val),
1047 Release => intrinsics::atomic_or_rel(dst, val),
1048 AcqRel => intrinsics::atomic_or_acqrel(dst, val),
1049 Relaxed => intrinsics::atomic_or_relaxed(dst, val),
1050 SeqCst => intrinsics::atomic_or(dst, val)
1051 }
1052 }
1053
1054
1055 #[inline]
1056 #[stable(feature = "rust1", since = "1.0.0")]
1057 unsafe fn atomic_xor<T>(dst: *mut T, val: T, order: Ordering) -> T {
1058 match order {
1059 Acquire => intrinsics::atomic_xor_acq(dst, val),
1060 Release => intrinsics::atomic_xor_rel(dst, val),
1061 AcqRel => intrinsics::atomic_xor_acqrel(dst, val),
1062 Relaxed => intrinsics::atomic_xor_relaxed(dst, val),
1063 SeqCst => intrinsics::atomic_xor(dst, val)
1064 }
1065 }
1066
1067
1068 /// An atomic fence.
1069 ///
1070 /// A fence 'A' which has `Release` ordering semantics, synchronizes with a
1071 /// fence 'B' with (at least) `Acquire` semantics, if and only if there exists
1072 /// atomic operations X and Y, both operating on some atomic object 'M' such
1073 /// that A is sequenced before X, Y is synchronized before B and Y observes
1074 /// the change to M. This provides a happens-before dependence between A and B.
1075 ///
1076 /// Atomic operations with `Release` or `Acquire` semantics can also synchronize
1077 /// with a fence.
1078 ///
1079 /// A fence which has `SeqCst` ordering, in addition to having both `Acquire`
1080 /// and `Release` semantics, participates in the global program order of the
1081 /// other `SeqCst` operations and/or fences.
1082 ///
1083 /// Accepts `Acquire`, `Release`, `AcqRel` and `SeqCst` orderings.
1084 ///
1085 /// # Panics
1086 ///
1087 /// Panics if `order` is `Relaxed`.
1088 #[inline]
1089 #[stable(feature = "rust1", since = "1.0.0")]
1090 pub fn fence(order: Ordering) {
1091 unsafe {
1092 match order {
1093 Acquire => intrinsics::atomic_fence_acq(),
1094 Release => intrinsics::atomic_fence_rel(),
1095 AcqRel => intrinsics::atomic_fence_acqrel(),
1096 SeqCst => intrinsics::atomic_fence(),
1097 Relaxed => panic!("there is no such thing as a relaxed fence")
1098 }
1099 }
1100 }