]> git.proxmox.com Git - rustc.git/blob - vendor/crossbeam-utils-0.6.5/src/cache_padded.rs
New upstream version 1.41.1+dfsg1
[rustc.git] / vendor / crossbeam-utils-0.6.5 / src / cache_padded.rs
1 use core::fmt;
2 use core::ops::{Deref, DerefMut};
3
4 /// Pads and aligns a value to the length of a cache line.
5 ///
6 /// In concurrent programming, sometimes it is desirable to make sure commonly accessed pieces of
7 /// data are not placed into the same cache line. Updating an atomic value invalides the whole
8 /// cache line it belongs to, which makes the next access to the same cache line slower for other
9 /// CPU cores. Use `CachePadded` to ensure updating one piece of data doesn't invalidate other
10 /// cached data.
11 ///
12 /// Cache lines are assumed to be 64 bytes on all architectures.
13 ///
14 /// # Size and alignment
15 ///
16 /// The size of `CachePadded<T>` is the smallest multiple of 64 bytes large enough to accommodate
17 /// a value of type `T`.
18 ///
19 /// The alignment of `CachePadded<T>` is the maximum of 64 bytes and the alignment of `T`.
20 ///
21 /// # Examples
22 ///
23 /// Alignment and padding:
24 ///
25 /// ```
26 /// use crossbeam_utils::CachePadded;
27 ///
28 /// let array = [CachePadded::new(1i32), CachePadded::new(2i32)];
29 /// let addr1 = &*array[0] as *const i32 as usize;
30 /// let addr2 = &*array[1] as *const i32 as usize;
31 ///
32 /// assert_eq!(addr2 - addr1, 64);
33 /// assert_eq!(addr1 % 64, 0);
34 /// assert_eq!(addr2 % 64, 0);
35 /// ```
36 ///
37 /// When building a concurrent queue with a head and a tail index, it is wise to place them in
38 /// different cache lines so that concurrent threads pushing and popping elements don't invalidate
39 /// each other's cache lines:
40 ///
41 /// ```
42 /// use crossbeam_utils::CachePadded;
43 /// use std::sync::atomic::AtomicUsize;
44 ///
45 /// struct Queue<T> {
46 /// head: CachePadded<AtomicUsize>,
47 /// tail: CachePadded<AtomicUsize>,
48 /// buffer: *mut T,
49 /// }
50 /// ```
51 #[derive(Clone, Copy, Default, Hash, PartialEq, Eq)]
52 #[repr(align(64))]
53 pub struct CachePadded<T> {
54 value: T,
55 }
56
57 unsafe impl<T: Send> Send for CachePadded<T> {}
58 unsafe impl<T: Sync> Sync for CachePadded<T> {}
59
60 impl<T> CachePadded<T> {
61 /// Pads and aligns a value to the length of a cache line.
62 ///
63 /// # Examples
64 ///
65 /// ```
66 /// use crossbeam_utils::CachePadded;
67 ///
68 /// let padded_value = CachePadded::new(1);
69 /// ```
70 pub fn new(t: T) -> CachePadded<T> {
71 CachePadded::<T> { value: t }
72 }
73
74 /// Returns the value value.
75 ///
76 /// # Examples
77 ///
78 /// ```
79 /// use crossbeam_utils::CachePadded;
80 ///
81 /// let padded_value = CachePadded::new(7);
82 /// let value = padded_value.into_inner();
83 /// assert_eq!(value, 7);
84 /// ```
85 pub fn into_inner(self) -> T {
86 self.value
87 }
88 }
89
90 impl<T> Deref for CachePadded<T> {
91 type Target = T;
92
93 fn deref(&self) -> &T {
94 &self.value
95 }
96 }
97
98 impl<T> DerefMut for CachePadded<T> {
99 fn deref_mut(&mut self) -> &mut T {
100 &mut self.value
101 }
102 }
103
104 impl<T: fmt::Debug> fmt::Debug for CachePadded<T> {
105 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
106 f.debug_struct("CachePadded")
107 .field("value", &self.value)
108 .finish()
109 }
110 }
111
112 impl<T> From<T> for CachePadded<T> {
113 fn from(t: T) -> Self {
114 CachePadded::new(t)
115 }
116 }