]>
git.proxmox.com Git - rustc.git/blob - vendor/crossbeam-utils-0.6.6/src/cache_padded.rs
2 use core
::ops
::{Deref, DerefMut}
;
4 /// Pads and aligns a value to the length of a cache line.
6 /// In concurrent programming, sometimes it is desirable to make sure commonly accessed pieces of
7 /// data are not placed into the same cache line. Updating an atomic value invalides the whole
8 /// cache line it belongs to, which makes the next access to the same cache line slower for other
9 /// CPU cores. Use `CachePadded` to ensure updating one piece of data doesn't invalidate other
12 /// # Size and alignment
14 /// Cache lines are assumed to be N bytes long, depending on the architecture:
16 /// * On x86-64, N = 128.
17 /// * On all others, N = 64.
19 /// Note that N is just a reasonable guess and is not guaranteed to match the actual cache line
20 /// length of the machine the program is running on. On modern Intel architectures, spatial
21 /// prefetcher is pulling pairs of 64-byte cache lines at a time, so we pessimistically assume that
22 /// cache lines are 128 bytes long.
24 /// The size of `CachePadded<T>` is the smallest multiple of N bytes large enough to accommodate
25 /// a value of type `T`.
27 /// The alignment of `CachePadded<T>` is the maximum of N bytes and the alignment of `T`.
31 /// Alignment and padding:
34 /// use crossbeam_utils::CachePadded;
36 /// let array = [CachePadded::new(1i8), CachePadded::new(2i8)];
37 /// let addr1 = &*array[0] as *const i8 as usize;
38 /// let addr2 = &*array[1] as *const i8 as usize;
40 /// assert!(addr2 - addr1 >= 64);
41 /// assert_eq!(addr1 % 64, 0);
42 /// assert_eq!(addr2 % 64, 0);
45 /// When building a concurrent queue with a head and a tail index, it is wise to place them in
46 /// different cache lines so that concurrent threads pushing and popping elements don't invalidate
47 /// each other's cache lines:
50 /// use crossbeam_utils::CachePadded;
51 /// use std::sync::atomic::AtomicUsize;
54 /// head: CachePadded<AtomicUsize>,
55 /// tail: CachePadded<AtomicUsize>,
59 #[derive(Clone, Copy, Default, Hash, PartialEq, Eq)]
60 // Starting from Intel's Sandy Bridge, spatial prefetcher is now pulling pairs of 64-byte cache
61 // lines at a time, so we have to align to 128 bytes rather than 64.
64 // - https://www.intel.com/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-optimization-manual.pdf
65 // - https://github.com/facebook/folly/blob/1b5288e6eea6df074758f877c849b6e73bbb9fbb/folly/lang/Align.h#L107
66 #[cfg_attr(target_arch = "x86_64", repr(align(128)))]
67 #[cfg_attr(not(target_arch = "x86_64"), repr(align(64)))]
68 pub struct CachePadded
<T
> {
72 unsafe impl<T
: Send
> Send
for CachePadded
<T
> {}
73 unsafe impl<T
: Sync
> Sync
for CachePadded
<T
> {}
75 impl<T
> CachePadded
<T
> {
76 /// Pads and aligns a value to the length of a cache line.
81 /// use crossbeam_utils::CachePadded;
83 /// let padded_value = CachePadded::new(1);
85 pub fn new(t
: T
) -> CachePadded
<T
> {
86 CachePadded
::<T
> { value: t }
89 /// Returns the inner value.
94 /// use crossbeam_utils::CachePadded;
96 /// let padded_value = CachePadded::new(7);
97 /// let value = padded_value.into_inner();
98 /// assert_eq!(value, 7);
100 pub fn into_inner(self) -> T
{
105 impl<T
> Deref
for CachePadded
<T
> {
108 fn deref(&self) -> &T
{
113 impl<T
> DerefMut
for CachePadded
<T
> {
114 fn deref_mut(&mut self) -> &mut T
{
119 impl<T
: fmt
::Debug
> fmt
::Debug
for CachePadded
<T
> {
120 fn fmt(&self, f
: &mut fmt
::Formatter
) -> fmt
::Result
{
121 f
.debug_struct("CachePadded")
122 .field("value", &self.value
)
127 impl<T
> From
<T
> for CachePadded
<T
> {
128 fn from(t
: T
) -> Self {