]> git.proxmox.com Git - rustc.git/blame - vendor/crossbeam-utils/src/atomic/atomic_cell.rs
New upstream version 1.40.0+dfsg1
[rustc.git] / vendor / crossbeam-utils / src / atomic / atomic_cell.rs
CommitLineData
416331ca
XL
1use core::cell::UnsafeCell;
2use core::fmt;
3use core::mem;
4use core::ptr;
5use core::slice;
6use core::sync::atomic::{self, AtomicBool, AtomicUsize, Ordering};
7
8use Backoff;
9
10/// A thread-safe mutable memory location.
11///
12/// This type is equivalent to [`Cell`], except it can also be shared among multiple threads.
13///
14/// Operations on `AtomicCell`s use atomic instructions whenever possible, and synchronize using
15/// global locks otherwise. You can call [`AtomicCell::<T>::is_lock_free()`] to check whether
16/// atomic instructions or locks will be used.
17///
18/// [`Cell`]: https://doc.rust-lang.org/std/cell/struct.Cell.html
19/// [`AtomicCell::<T>::is_lock_free()`]: struct.AtomicCell.html#method.is_lock_free
20pub struct AtomicCell<T> {
21 /// The inner value.
22 ///
23 /// If this value can be transmuted into a primitive atomic type, it will be treated as such.
24 /// Otherwise, all potentially concurrent operations on this data will be protected by a global
25 /// lock.
26 value: UnsafeCell<T>,
27}
28
29unsafe impl<T: Send> Send for AtomicCell<T> {}
30unsafe impl<T: Send> Sync for AtomicCell<T> {}
31
32impl<T> AtomicCell<T> {
33 /// Creates a new atomic cell initialized with `val`.
34 ///
35 /// # Examples
36 ///
37 /// ```
38 /// use crossbeam_utils::atomic::AtomicCell;
39 ///
40 /// let a = AtomicCell::new(7);
41 /// ```
42 pub fn new(val: T) -> AtomicCell<T> {
43 AtomicCell {
44 value: UnsafeCell::new(val),
45 }
46 }
47
48 /// Returns a mutable reference to the inner value.
49 ///
50 /// # Examples
51 ///
52 /// ```
53 /// use crossbeam_utils::atomic::AtomicCell;
54 ///
55 /// let mut a = AtomicCell::new(7);
56 /// *a.get_mut() += 1;
57 ///
58 /// assert_eq!(a.load(), 8);
59 /// ```
60 pub fn get_mut(&mut self) -> &mut T {
61 unsafe { &mut *self.value.get() }
62 }
63
64 /// Unwraps the atomic cell and returns its inner value.
65 ///
66 /// # Examples
67 ///
68 /// ```
69 /// use crossbeam_utils::atomic::AtomicCell;
70 ///
71 /// let mut a = AtomicCell::new(7);
72 /// let v = a.into_inner();
73 ///
74 /// assert_eq!(v, 7);
75 /// ```
76 pub fn into_inner(self) -> T {
77 self.value.into_inner()
78 }
79
80 /// Returns `true` if operations on values of this type are lock-free.
81 ///
82 /// If the compiler or the platform doesn't support the necessary atomic instructions,
83 /// `AtomicCell<T>` will use global locks for every potentially concurrent atomic operation.
84 ///
85 /// # Examples
86 ///
87 /// ```
88 /// use crossbeam_utils::atomic::AtomicCell;
89 ///
90 /// // This type is internally represented as `AtomicUsize` so we can just use atomic
91 /// // operations provided by it.
92 /// assert_eq!(AtomicCell::<usize>::is_lock_free(), true);
93 ///
94 /// // A wrapper struct around `isize`.
95 /// struct Foo {
96 /// bar: isize,
97 /// }
98 /// // `AtomicCell<Foo>` will be internally represented as `AtomicIsize`.
99 /// assert_eq!(AtomicCell::<Foo>::is_lock_free(), true);
100 ///
101 /// // Operations on zero-sized types are always lock-free.
102 /// assert_eq!(AtomicCell::<()>::is_lock_free(), true);
103 ///
104 /// // Very large types cannot be represented as any of the standard atomic types, so atomic
105 /// // operations on them will have to use global locks for synchronization.
106 /// assert_eq!(AtomicCell::<[u8; 1000]>::is_lock_free(), false);
107 /// ```
108 pub fn is_lock_free() -> bool {
109 atomic_is_lock_free::<T>()
110 }
111
112 /// Stores `val` into the atomic cell.
113 ///
114 /// # Examples
115 ///
116 /// ```
117 /// use crossbeam_utils::atomic::AtomicCell;
118 ///
119 /// let a = AtomicCell::new(7);
120 ///
121 /// assert_eq!(a.load(), 7);
122 /// a.store(8);
123 /// assert_eq!(a.load(), 8);
124 /// ```
125 pub fn store(&self, val: T) {
126 if mem::needs_drop::<T>() {
127 drop(self.swap(val));
128 } else {
129 unsafe {
130 atomic_store(self.value.get(), val);
131 }
132 }
133 }
134
135 /// Stores `val` into the atomic cell and returns the previous value.
136 ///
137 /// # Examples
138 ///
139 /// ```
140 /// use crossbeam_utils::atomic::AtomicCell;
141 ///
142 /// let a = AtomicCell::new(7);
143 ///
144 /// assert_eq!(a.load(), 7);
145 /// assert_eq!(a.swap(8), 7);
146 /// assert_eq!(a.load(), 8);
147 /// ```
148 pub fn swap(&self, val: T) -> T {
149 unsafe { atomic_swap(self.value.get(), val) }
150 }
151}
152
153impl<T: Copy> AtomicCell<T> {
154 /// Loads a value.
155 ///
156 /// # Examples
157 ///
158 /// ```
159 /// use crossbeam_utils::atomic::AtomicCell;
160 ///
161 /// let a = AtomicCell::new(7);
162 ///
163 /// assert_eq!(a.load(), 7);
164 /// ```
165 pub fn load(&self) -> T {
166 unsafe { atomic_load(self.value.get()) }
167 }
168}
169
170impl<T: Copy + Eq> AtomicCell<T> {
171 /// If the current value equals `current`, stores `new` into the atomic cell.
172 ///
173 /// The return value is always the previous value. If it is equal to `current`, then the value
174 /// was updated.
175 ///
176 /// # Examples
177 ///
178 /// ```
179 /// use crossbeam_utils::atomic::AtomicCell;
180 ///
181 /// let a = AtomicCell::new(1);
182 ///
183 /// assert_eq!(a.compare_exchange(2, 3), Err(1));
184 /// assert_eq!(a.load(), 1);
185 ///
186 /// assert_eq!(a.compare_exchange(1, 2), Ok(1));
187 /// assert_eq!(a.load(), 2);
188 /// ```
189 pub fn compare_and_swap(&self, current: T, new: T) -> T {
190 match self.compare_exchange(current, new) {
191 Ok(v) => v,
192 Err(v) => v,
193 }
194 }
195
196 /// If the current value equals `current`, stores `new` into the atomic cell.
197 ///
198 /// The return value is a result indicating whether the new value was written and containing
199 /// the previous value. On success this value is guaranteed to be equal to `current`.
200 ///
201 /// # Examples
202 ///
203 /// ```
204 /// use crossbeam_utils::atomic::AtomicCell;
205 ///
206 /// let a = AtomicCell::new(1);
207 ///
208 /// assert_eq!(a.compare_exchange(2, 3), Err(1));
209 /// assert_eq!(a.load(), 1);
210 ///
211 /// assert_eq!(a.compare_exchange(1, 2), Ok(1));
212 /// assert_eq!(a.load(), 2);
213 /// ```
214 pub fn compare_exchange(&self, mut current: T, new: T) -> Result<T, T> {
215 loop {
216 match unsafe { atomic_compare_exchange_weak(self.value.get(), current, new) } {
217 Ok(_) => return Ok(current),
218 Err(previous) => {
219 if previous != current {
220 return Err(previous);
221 }
222
223 // The compare-exchange operation has failed and didn't store `new`. The
224 // failure is either spurious, or `previous` was semantically equal to
225 // `current` but not byte-equal. Let's retry with `previous` as the new
226 // `current`.
227 current = previous;
228 }
229 }
230 }
231 }
232}
233
234macro_rules! impl_arithmetic {
235 ($t:ty, $example:tt) => {
236 impl AtomicCell<$t> {
237 /// Increments the current value by `val` and returns the previous value.
238 ///
239 /// The addition wraps on overflow.
240 ///
241 /// # Examples
242 ///
243 /// ```
244 /// use crossbeam_utils::atomic::AtomicCell;
245 ///
246 #[doc = $example]
247 ///
248 /// assert_eq!(a.fetch_add(3), 7);
249 /// assert_eq!(a.load(), 10);
250 /// ```
251 #[inline]
252 pub fn fetch_add(&self, val: $t) -> $t {
253 if can_transmute::<$t, atomic::AtomicUsize>() {
254 let a = unsafe { &*(self.value.get() as *const atomic::AtomicUsize) };
255 a.fetch_add(val as usize, Ordering::SeqCst) as $t
256 } else {
257 let _guard = lock(self.value.get() as usize).write();
258 let value = unsafe { &mut *(self.value.get()) };
259 let old = *value;
260 *value = value.wrapping_add(val);
261 old
262 }
263 }
264
265 /// Decrements the current value by `val` and returns the previous value.
266 ///
267 /// The subtraction wraps on overflow.
268 ///
269 /// # Examples
270 ///
271 /// ```
272 /// use crossbeam_utils::atomic::AtomicCell;
273 ///
274 #[doc = $example]
275 ///
276 /// assert_eq!(a.fetch_sub(3), 7);
277 /// assert_eq!(a.load(), 4);
278 /// ```
279 #[inline]
280 pub fn fetch_sub(&self, val: $t) -> $t {
281 if can_transmute::<$t, atomic::AtomicUsize>() {
282 let a = unsafe { &*(self.value.get() as *const atomic::AtomicUsize) };
283 a.fetch_sub(val as usize, Ordering::SeqCst) as $t
284 } else {
285 let _guard = lock(self.value.get() as usize).write();
286 let value = unsafe { &mut *(self.value.get()) };
287 let old = *value;
288 *value = value.wrapping_sub(val);
289 old
290 }
291 }
292
293 /// Applies bitwise "and" to the current value and returns the previous value.
294 ///
295 /// # Examples
296 ///
297 /// ```
298 /// use crossbeam_utils::atomic::AtomicCell;
299 ///
300 #[doc = $example]
301 ///
302 /// assert_eq!(a.fetch_and(3), 7);
303 /// assert_eq!(a.load(), 3);
304 /// ```
305 #[inline]
306 pub fn fetch_and(&self, val: $t) -> $t {
307 if can_transmute::<$t, atomic::AtomicUsize>() {
308 let a = unsafe { &*(self.value.get() as *const atomic::AtomicUsize) };
309 a.fetch_and(val as usize, Ordering::SeqCst) as $t
310 } else {
311 let _guard = lock(self.value.get() as usize).write();
312 let value = unsafe { &mut *(self.value.get()) };
313 let old = *value;
314 *value &= val;
315 old
316 }
317 }
318
319 /// Applies bitwise "or" to the current value and returns the previous value.
320 ///
321 /// # Examples
322 ///
323 /// ```
324 /// use crossbeam_utils::atomic::AtomicCell;
325 ///
326 #[doc = $example]
327 ///
328 /// assert_eq!(a.fetch_or(16), 7);
329 /// assert_eq!(a.load(), 23);
330 /// ```
331 #[inline]
332 pub fn fetch_or(&self, val: $t) -> $t {
333 if can_transmute::<$t, atomic::AtomicUsize>() {
334 let a = unsafe { &*(self.value.get() as *const atomic::AtomicUsize) };
335 a.fetch_or(val as usize, Ordering::SeqCst) as $t
336 } else {
337 let _guard = lock(self.value.get() as usize).write();
338 let value = unsafe { &mut *(self.value.get()) };
339 let old = *value;
340 *value |= val;
341 old
342 }
343 }
344
345 /// Applies bitwise "xor" to the current value and returns the previous value.
346 ///
347 /// # Examples
348 ///
349 /// ```
350 /// use crossbeam_utils::atomic::AtomicCell;
351 ///
352 #[doc = $example]
353 ///
354 /// assert_eq!(a.fetch_xor(2), 7);
355 /// assert_eq!(a.load(), 5);
356 /// ```
357 #[inline]
358 pub fn fetch_xor(&self, val: $t) -> $t {
359 if can_transmute::<$t, atomic::AtomicUsize>() {
360 let a = unsafe { &*(self.value.get() as *const atomic::AtomicUsize) };
361 a.fetch_xor(val as usize, Ordering::SeqCst) as $t
362 } else {
363 let _guard = lock(self.value.get() as usize).write();
364 let value = unsafe { &mut *(self.value.get()) };
365 let old = *value;
366 *value ^= val;
367 old
368 }
369 }
370 }
371 };
372 ($t:ty, $atomic:ty, $example:tt) => {
373 impl AtomicCell<$t> {
374 /// Increments the current value by `val` and returns the previous value.
375 ///
376 /// The addition wraps on overflow.
377 ///
378 /// # Examples
379 ///
380 /// ```
381 /// use crossbeam_utils::atomic::AtomicCell;
382 ///
383 #[doc = $example]
384 ///
385 /// assert_eq!(a.fetch_add(3), 7);
386 /// assert_eq!(a.load(), 10);
387 /// ```
388 #[inline]
389 pub fn fetch_add(&self, val: $t) -> $t {
390 let a = unsafe { &*(self.value.get() as *const $atomic) };
391 a.fetch_add(val, Ordering::SeqCst)
392 }
393
394 /// Decrements the current value by `val` and returns the previous value.
395 ///
396 /// The subtraction wraps on overflow.
397 ///
398 /// # Examples
399 ///
400 /// ```
401 /// use crossbeam_utils::atomic::AtomicCell;
402 ///
403 #[doc = $example]
404 ///
405 /// assert_eq!(a.fetch_sub(3), 7);
406 /// assert_eq!(a.load(), 4);
407 /// ```
408 #[inline]
409 pub fn fetch_sub(&self, val: $t) -> $t {
410 let a = unsafe { &*(self.value.get() as *const $atomic) };
411 a.fetch_sub(val, Ordering::SeqCst)
412 }
413
414 /// Applies bitwise "and" to the current value and returns the previous value.
415 ///
416 /// # Examples
417 ///
418 /// ```
419 /// use crossbeam_utils::atomic::AtomicCell;
420 ///
421 #[doc = $example]
422 ///
423 /// assert_eq!(a.fetch_and(3), 7);
424 /// assert_eq!(a.load(), 3);
425 /// ```
426 #[inline]
427 pub fn fetch_and(&self, val: $t) -> $t {
428 let a = unsafe { &*(self.value.get() as *const $atomic) };
429 a.fetch_and(val, Ordering::SeqCst)
430 }
431
432 /// Applies bitwise "or" to the current value and returns the previous value.
433 ///
434 /// # Examples
435 ///
436 /// ```
437 /// use crossbeam_utils::atomic::AtomicCell;
438 ///
439 #[doc = $example]
440 ///
441 /// assert_eq!(a.fetch_or(16), 7);
442 /// assert_eq!(a.load(), 23);
443 /// ```
444 #[inline]
445 pub fn fetch_or(&self, val: $t) -> $t {
446 let a = unsafe { &*(self.value.get() as *const $atomic) };
447 a.fetch_or(val, Ordering::SeqCst)
448 }
449
450 /// Applies bitwise "xor" to the current value and returns the previous value.
451 ///
452 /// # Examples
453 ///
454 /// ```
455 /// use crossbeam_utils::atomic::AtomicCell;
456 ///
457 #[doc = $example]
458 ///
459 /// assert_eq!(a.fetch_xor(2), 7);
460 /// assert_eq!(a.load(), 5);
461 /// ```
462 #[inline]
463 pub fn fetch_xor(&self, val: $t) -> $t {
464 let a = unsafe { &*(self.value.get() as *const $atomic) };
465 a.fetch_xor(val, Ordering::SeqCst)
466 }
467 }
468 };
469 ($t:ty, $size:tt, $atomic:ty, $example:tt) => {
470 #[cfg(target_has_atomic = $size)]
471 impl_arithmetic!($t, $atomic, $example);
472 };
473}
474
475cfg_if! {
476 if #[cfg(feature = "nightly")] {
477 impl_arithmetic!(u8, "8", atomic::AtomicU8, "let a = AtomicCell::new(7u8);");
478 impl_arithmetic!(i8, "8", atomic::AtomicI8, "let a = AtomicCell::new(7i8);");
479 impl_arithmetic!(u16, "16", atomic::AtomicU16, "let a = AtomicCell::new(7u16);");
480 impl_arithmetic!(i16, "16", atomic::AtomicI16, "let a = AtomicCell::new(7i16);");
481 impl_arithmetic!(u32, "32", atomic::AtomicU32, "let a = AtomicCell::new(7u32);");
482 impl_arithmetic!(i32, "32", atomic::AtomicI32, "let a = AtomicCell::new(7i32);");
483 impl_arithmetic!(u64, "64", atomic::AtomicU64, "let a = AtomicCell::new(7u64);");
484 impl_arithmetic!(i64, "64", atomic::AtomicI64, "let a = AtomicCell::new(7i64);");
485 impl_arithmetic!(u128, "let a = AtomicCell::new(7u128);");
486 impl_arithmetic!(i128, "let a = AtomicCell::new(7i128);");
487 } else {
488 impl_arithmetic!(u8, "let a = AtomicCell::new(7u8);");
489 impl_arithmetic!(i8, "let a = AtomicCell::new(7i8);");
490 impl_arithmetic!(u16, "let a = AtomicCell::new(7u16);");
491 impl_arithmetic!(i16, "let a = AtomicCell::new(7i16);");
492 impl_arithmetic!(u32, "let a = AtomicCell::new(7u32);");
493 impl_arithmetic!(i32, "let a = AtomicCell::new(7i32);");
494 impl_arithmetic!(u64, "let a = AtomicCell::new(7u64);");
495 impl_arithmetic!(i64, "let a = AtomicCell::new(7i64);");
496 impl_arithmetic!(u128, "let a = AtomicCell::new(7u128);");
497 impl_arithmetic!(i128, "let a = AtomicCell::new(7i128);");
498 }
499}
500
501impl_arithmetic!(
502 usize,
503 atomic::AtomicUsize,
504 "let a = AtomicCell::new(7usize);"
505);
506impl_arithmetic!(
507 isize,
508 atomic::AtomicIsize,
509 "let a = AtomicCell::new(7isize);"
510);
511
512impl AtomicCell<bool> {
513 /// Applies logical "and" to the current value and returns the previous value.
514 ///
515 /// # Examples
516 ///
517 /// ```
518 /// use crossbeam_utils::atomic::AtomicCell;
519 ///
520 /// let a = AtomicCell::new(true);
521 ///
522 /// assert_eq!(a.fetch_and(true), true);
523 /// assert_eq!(a.load(), true);
524 ///
525 /// assert_eq!(a.fetch_and(false), true);
526 /// assert_eq!(a.load(), false);
527 /// ```
528 #[inline]
529 pub fn fetch_and(&self, val: bool) -> bool {
530 let a = unsafe { &*(self.value.get() as *const AtomicBool) };
531 a.fetch_and(val, Ordering::SeqCst)
532 }
533
534 /// Applies logical "or" to the current value and returns the previous value.
535 ///
536 /// # Examples
537 ///
538 /// ```
539 /// use crossbeam_utils::atomic::AtomicCell;
540 ///
541 /// let a = AtomicCell::new(false);
542 ///
543 /// assert_eq!(a.fetch_or(false), false);
544 /// assert_eq!(a.load(), false);
545 ///
546 /// assert_eq!(a.fetch_or(true), false);
547 /// assert_eq!(a.load(), true);
548 /// ```
549 #[inline]
550 pub fn fetch_or(&self, val: bool) -> bool {
551 let a = unsafe { &*(self.value.get() as *const AtomicBool) };
552 a.fetch_or(val, Ordering::SeqCst)
553 }
554
555 /// Applies logical "xor" to the current value and returns the previous value.
556 ///
557 /// # Examples
558 ///
559 /// ```
560 /// use crossbeam_utils::atomic::AtomicCell;
561 ///
562 /// let a = AtomicCell::new(true);
563 ///
564 /// assert_eq!(a.fetch_xor(false), true);
565 /// assert_eq!(a.load(), true);
566 ///
567 /// assert_eq!(a.fetch_xor(true), true);
568 /// assert_eq!(a.load(), false);
569 /// ```
570 #[inline]
571 pub fn fetch_xor(&self, val: bool) -> bool {
572 let a = unsafe { &*(self.value.get() as *const AtomicBool) };
573 a.fetch_xor(val, Ordering::SeqCst)
574 }
575}
576
577impl<T: Default> Default for AtomicCell<T> {
578 fn default() -> AtomicCell<T> {
579 AtomicCell::new(T::default())
580 }
581}
582
583impl<T: Copy + fmt::Debug> fmt::Debug for AtomicCell<T> {
584 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
585 f.debug_struct("AtomicCell")
586 .field("value", &self.load())
587 .finish()
588 }
589}
590
591/// Returns `true` if the two values are equal byte-for-byte.
592fn byte_eq<T>(a: &T, b: &T) -> bool {
593 unsafe {
594 let a = slice::from_raw_parts(a as *const _ as *const u8, mem::size_of::<T>());
595 let b = slice::from_raw_parts(b as *const _ as *const u8, mem::size_of::<T>());
596 a == b
597 }
598}
599
600/// Returns `true` if values of type `A` can be transmuted into values of type `B`.
601fn can_transmute<A, B>() -> bool {
602 // Sizes must be equal, but alignment of `A` must be greater or equal than that of `B`.
603 mem::size_of::<A>() == mem::size_of::<B>() && mem::align_of::<A>() >= mem::align_of::<B>()
604}
605
606/// A simple stamped lock.
607struct Lock {
608 /// The current state of the lock.
609 ///
610 /// All bits except the least significant one hold the current stamp. When locked, the state
611 /// equals 1 and doesn't contain a valid stamp.
612 state: AtomicUsize,
613}
614
615impl Lock {
616 /// If not locked, returns the current stamp.
617 ///
618 /// This method should be called before optimistic reads.
619 #[inline]
620 fn optimistic_read(&self) -> Option<usize> {
621 let state = self.state.load(Ordering::Acquire);
622 if state == 1 {
623 None
624 } else {
625 Some(state)
626 }
627 }
628
629 /// Returns `true` if the current stamp is equal to `stamp`.
630 ///
631 /// This method should be called after optimistic reads to check whether they are valid. The
632 /// argument `stamp` should correspond to the one returned by method `optimistic_read`.
633 #[inline]
634 fn validate_read(&self, stamp: usize) -> bool {
635 atomic::fence(Ordering::Acquire);
636 self.state.load(Ordering::Relaxed) == stamp
637 }
638
639 /// Grabs the lock for writing.
640 #[inline]
641 fn write(&'static self) -> WriteGuard {
642 let backoff = Backoff::new();
643 loop {
644 let previous = self.state.swap(1, Ordering::Acquire);
645
646 if previous != 1 {
647 atomic::fence(Ordering::Release);
648
649 return WriteGuard {
650 lock: self,
651 state: previous,
652 };
653 }
654
655 backoff.snooze();
656 }
657 }
658}
659
660/// A RAII guard that releases the lock and increments the stamp when dropped.
661struct WriteGuard {
662 /// The parent lock.
663 lock: &'static Lock,
664
665 /// The stamp before locking.
666 state: usize,
667}
668
669impl WriteGuard {
670 /// Releases the lock without incrementing the stamp.
671 #[inline]
672 fn abort(self) {
673 self.lock.state.store(self.state, Ordering::Release);
674 }
675}
676
677impl Drop for WriteGuard {
678 #[inline]
679 fn drop(&mut self) {
680 // Release the lock and increment the stamp.
681 self.lock
682 .state
683 .store(self.state.wrapping_add(2), Ordering::Release);
684 }
685}
686
687/// Returns a reference to the global lock associated with the `AtomicCell` at address `addr`.
688///
689/// This function is used to protect atomic data which doesn't fit into any of the primitive atomic
690/// types in `std::sync::atomic`. Operations on such atomics must therefore use a global lock.
691///
692/// However, there is not only one global lock but an array of many locks, and one of them is
693/// picked based on the given address. Having many locks reduces contention and improves
694/// scalability.
695#[inline]
696#[must_use]
697fn lock(addr: usize) -> &'static Lock {
698 // The number of locks is a prime number because we want to make sure `addr % LEN` gets
699 // dispersed across all locks.
700 //
701 // Note that addresses are always aligned to some power of 2, depending on type `T` in
702 // `AtomicCell<T>`. If `LEN` was an even number, then `addr % LEN` would be an even number,
703 // too, which means only half of the locks would get utilized!
704 //
705 // It is also possible for addresses to accidentally get aligned to a number that is not a
706 // power of 2. Consider this example:
707 //
708 // ```
709 // #[repr(C)]
710 // struct Foo {
711 // a: AtomicCell<u8>,
712 // b: u8,
713 // c: u8,
714 // }
715 // ```
716 //
717 // Now, if we have a slice of type `&[Foo]`, it is possible that field `a` in all items gets
718 // stored at addresses that are multiples of 3. It'd be too bad if `LEN` was divisible by 3.
719 // In order to protect from such cases, we simply choose a large prime number for `LEN`.
720 const LEN: usize = 97;
721
722 const L: Lock = Lock {
723 state: AtomicUsize::new(0),
724 };
725 static LOCKS: [Lock; LEN] = [
726 L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L,
727 L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L,
728 L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L,
729 L, L, L, L, L, L, L,
730 ];
731
732 // If the modulus is a constant number, the compiler will use crazy math to transform this into
733 // a sequence of cheap arithmetic operations rather than using the slow modulo instruction.
734 &LOCKS[addr % LEN]
735}
736
737/// An atomic `()`.
738///
739/// All operations are noops.
740struct AtomicUnit;
741
742impl AtomicUnit {
743 #[inline]
744 fn load(&self, _order: Ordering) {}
745
746 #[inline]
747 fn store(&self, _val: (), _order: Ordering) {}
748
749 #[inline]
750 fn swap(&self, _val: (), _order: Ordering) {}
751
752 #[inline]
753 fn compare_exchange_weak(
754 &self,
755 _current: (),
756 _new: (),
757 _success: Ordering,
758 _failure: Ordering,
759 ) -> Result<(), ()> {
760 Ok(())
761 }
762}
763
764macro_rules! atomic {
765 // If values of type `$t` can be transmuted into values of the primitive atomic type `$atomic`,
766 // declares variable `$a` of type `$atomic` and executes `$atomic_op`, breaking out of the loop.
767 (@check, $t:ty, $atomic:ty, $a:ident, $atomic_op:expr) => {
768 if can_transmute::<$t, $atomic>() {
769 let $a: &$atomic;
770 break $atomic_op;
771 }
772 };
773
774 // If values of type `$t` can be transmuted into values of a primitive atomic type, declares
775 // variable `$a` of that type and executes `$atomic_op`. Otherwise, just executes
776 // `$fallback_op`.
777 ($t:ty, $a:ident, $atomic_op:expr, $fallback_op:expr) => {
778 loop {
779 atomic!(@check, $t, AtomicUnit, $a, $atomic_op);
780 atomic!(@check, $t, atomic::AtomicUsize, $a, $atomic_op);
781
782 #[cfg(feature = "nightly")]
783 {
784 #[cfg(target_has_atomic = "8")]
785 atomic!(@check, $t, atomic::AtomicU8, $a, $atomic_op);
786 #[cfg(target_has_atomic = "16")]
787 atomic!(@check, $t, atomic::AtomicU16, $a, $atomic_op);
788 #[cfg(target_has_atomic = "32")]
789 atomic!(@check, $t, atomic::AtomicU32, $a, $atomic_op);
790 #[cfg(target_has_atomic = "64")]
791 atomic!(@check, $t, atomic::AtomicU64, $a, $atomic_op);
792 }
793
794 break $fallback_op;
795 }
796 };
797}
798
799/// Returns `true` if operations on `AtomicCell<T>` are lock-free.
800fn atomic_is_lock_free<T>() -> bool {
801 atomic! { T, _a, true, false }
802}
803
804/// Atomically reads data from `src`.
805///
806/// This operation uses the `SeqCst` ordering. If possible, an atomic instructions is used, and a
807/// global lock otherwise.
808unsafe fn atomic_load<T>(src: *mut T) -> T
809where
810 T: Copy,
811{
812 atomic! {
813 T, a,
814 {
815 a = &*(src as *const _ as *const _);
816 mem::transmute_copy(&a.load(Ordering::SeqCst))
817 },
818 {
819 let lock = lock(src as usize);
820
821 // Try doing an optimistic read first.
822 if let Some(stamp) = lock.optimistic_read() {
823 // We need a volatile read here because other threads might concurrently modify the
824 // value. In theory, data races are *always* UB, even if we use volatile reads and
825 // discard the data when a data race is detected. The proper solution would be to
826 // do atomic reads and atomic writes, but we can't atomically read and write all
827 // kinds of data since `AtomicU8` is not available on stable Rust yet.
828 let val = ptr::read_volatile(src);
829
830 if lock.validate_read(stamp) {
831 return val;
832 }
833 }
834
835 // Grab a regular write lock so that writers don't starve this load.
836 let guard = lock.write();
837 let val = ptr::read(src);
838 // The value hasn't been changed. Drop the guard without incrementing the stamp.
839 guard.abort();
840 val
841 }
842 }
843}
844
845/// Atomically writes `val` to `dst`.
846///
847/// This operation uses the `SeqCst` ordering. If possible, an atomic instructions is used, and a
848/// global lock otherwise.
849unsafe fn atomic_store<T>(dst: *mut T, val: T) {
850 atomic! {
851 T, a,
852 {
853 a = &*(dst as *const _ as *const _);
854 let res = a.store(mem::transmute_copy(&val), Ordering::SeqCst);
855 mem::forget(val);
856 res
857 },
858 {
859 let _guard = lock(dst as usize).write();
860 ptr::write(dst, val)
861 }
862 }
863}
864
865/// Atomically swaps data at `dst` with `val`.
866///
867/// This operation uses the `SeqCst` ordering. If possible, an atomic instructions is used, and a
868/// global lock otherwise.
869unsafe fn atomic_swap<T>(dst: *mut T, val: T) -> T {
870 atomic! {
871 T, a,
872 {
873 a = &*(dst as *const _ as *const _);
874 let res = mem::transmute_copy(&a.swap(mem::transmute_copy(&val), Ordering::SeqCst));
875 mem::forget(val);
876 res
877 },
878 {
879 let _guard = lock(dst as usize).write();
880 ptr::replace(dst, val)
881 }
882 }
883}
884
885/// Atomically compares data at `dst` to `current` and, if equal byte-for-byte, exchanges data at
886/// `dst` with `new`.
887///
888/// Returns the old value on success, or the current value at `dst` on failure.
889///
890/// This operation uses the `SeqCst` ordering. If possible, an atomic instructions is used, and a
891/// global lock otherwise.
892unsafe fn atomic_compare_exchange_weak<T>(dst: *mut T, current: T, new: T) -> Result<T, T>
893where
894 T: Copy,
895{
896 atomic! {
897 T, a,
898 {
899 a = &*(dst as *const _ as *const _);
900 let res = a.compare_exchange_weak(
901 mem::transmute_copy(&current),
902 mem::transmute_copy(&new),
903 Ordering::SeqCst,
904 Ordering::SeqCst,
905 );
906 match res {
907 Ok(v) => Ok(mem::transmute_copy(&v)),
908 Err(v) => Err(mem::transmute_copy(&v)),
909 }
910 },
911 {
912 let guard = lock(dst as usize).write();
913
914 if byte_eq(&*dst, &current) {
915 Ok(ptr::replace(dst, new))
916 } else {
917 let val = ptr::read(dst);
918 // The value hasn't been changed. Drop the guard without incrementing the stamp.
919 guard.abort();
920 Err(val)
921 }
922 }
923 }
924}