]> git.proxmox.com Git - rustc.git/blob - vendor/crossbeam-utils/src/cache_padded.rs
New upstream version 1.43.0+dfsg1
[rustc.git] / vendor / crossbeam-utils / src / cache_padded.rs
1 use core::fmt;
2 use core::ops::{Deref, DerefMut};
3
4 /// Pads and aligns a value to the length of a cache line.
5 ///
6 /// In concurrent programming, sometimes it is desirable to make sure commonly accessed pieces of
7 /// data are not placed into the same cache line. Updating an atomic value invalides the whole
8 /// cache line it belongs to, which makes the next access to the same cache line slower for other
9 /// CPU cores. Use `CachePadded` to ensure updating one piece of data doesn't invalidate other
10 /// cached data.
11 ///
12 /// # Size and alignment
13 ///
14 /// Cache lines are assumed to be N bytes long, depending on the architecture:
15 ///
16 /// * On x86-64, N = 128.
17 /// * On all others, N = 64.
18 ///
19 /// Note that N is just a reasonable guess and is not guaranteed to match the actual cache line
20 /// length of the machine the program is running on. On modern Intel architectures, spatial
21 /// prefetcher is pulling pairs of 64-byte cache lines at a time, so we pessimistically assume that
22 /// cache lines are 128 bytes long.
23 ///
24 /// The size of `CachePadded<T>` is the smallest multiple of N bytes large enough to accommodate
25 /// a value of type `T`.
26 ///
27 /// The alignment of `CachePadded<T>` is the maximum of N bytes and the alignment of `T`.
28 ///
29 /// # Examples
30 ///
31 /// Alignment and padding:
32 ///
33 /// ```
34 /// use crossbeam_utils::CachePadded;
35 ///
36 /// let array = [CachePadded::new(1i8), CachePadded::new(2i8)];
37 /// let addr1 = &*array[0] as *const i8 as usize;
38 /// let addr2 = &*array[1] as *const i8 as usize;
39 ///
40 /// assert!(addr2 - addr1 >= 64);
41 /// assert_eq!(addr1 % 64, 0);
42 /// assert_eq!(addr2 % 64, 0);
43 /// ```
44 ///
45 /// When building a concurrent queue with a head and a tail index, it is wise to place them in
46 /// different cache lines so that concurrent threads pushing and popping elements don't invalidate
47 /// each other's cache lines:
48 ///
49 /// ```
50 /// use crossbeam_utils::CachePadded;
51 /// use std::sync::atomic::AtomicUsize;
52 ///
53 /// struct Queue<T> {
54 /// head: CachePadded<AtomicUsize>,
55 /// tail: CachePadded<AtomicUsize>,
56 /// buffer: *mut T,
57 /// }
58 /// ```
59 #[derive(Clone, Copy, Default, Hash, PartialEq, Eq)]
60 // Starting from Intel's Sandy Bridge, spatial prefetcher is now pulling pairs of 64-byte cache
61 // lines at a time, so we have to align to 128 bytes rather than 64.
62 //
63 // Sources:
64 // - https://www.intel.com/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-optimization-manual.pdf
65 // - https://github.com/facebook/folly/blob/1b5288e6eea6df074758f877c849b6e73bbb9fbb/folly/lang/Align.h#L107
66 #[cfg_attr(target_arch = "x86_64", repr(align(128)))]
67 #[cfg_attr(not(target_arch = "x86_64"), repr(align(64)))]
68 pub struct CachePadded<T> {
69 value: T,
70 }
71
72 unsafe impl<T: Send> Send for CachePadded<T> {}
73 unsafe impl<T: Sync> Sync for CachePadded<T> {}
74
75 impl<T> CachePadded<T> {
76 /// Pads and aligns a value to the length of a cache line.
77 ///
78 /// # Examples
79 ///
80 /// ```
81 /// use crossbeam_utils::CachePadded;
82 ///
83 /// let padded_value = CachePadded::new(1);
84 /// ```
85 pub fn new(t: T) -> CachePadded<T> {
86 CachePadded::<T> { value: t }
87 }
88
89 /// Returns the inner value.
90 ///
91 /// # Examples
92 ///
93 /// ```
94 /// use crossbeam_utils::CachePadded;
95 ///
96 /// let padded_value = CachePadded::new(7);
97 /// let value = padded_value.into_inner();
98 /// assert_eq!(value, 7);
99 /// ```
100 pub fn into_inner(self) -> T {
101 self.value
102 }
103 }
104
105 impl<T> Deref for CachePadded<T> {
106 type Target = T;
107
108 fn deref(&self) -> &T {
109 &self.value
110 }
111 }
112
113 impl<T> DerefMut for CachePadded<T> {
114 fn deref_mut(&mut self) -> &mut T {
115 &mut self.value
116 }
117 }
118
119 impl<T: fmt::Debug> fmt::Debug for CachePadded<T> {
120 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
121 f.debug_struct("CachePadded")
122 .field("value", &self.value)
123 .finish()
124 }
125 }
126
127 impl<T> From<T> for CachePadded<T> {
128 fn from(t: T) -> Self {
129 CachePadded::new(t)
130 }
131 }