]> git.proxmox.com Git - rustc.git/blame - src/librustc_data_structures/sharded.rs
New upstream version 1.47.0+dfsg1
[rustc.git] / src / librustc_data_structures / sharded.rs
CommitLineData
dfeec247
XL
1use crate::fx::{FxHashMap, FxHasher};
2use crate::sync::{Lock, LockGuard};
3use smallvec::SmallVec;
416331ca
XL
4use std::borrow::Borrow;
5use std::collections::hash_map::RawEntryMut;
dfeec247
XL
6use std::hash::{Hash, Hasher};
7use std::mem;
416331ca
XL
8
9#[derive(Clone, Default)]
10#[cfg_attr(parallel_compiler, repr(align(64)))]
11struct CacheAligned<T>(T);
12
13#[cfg(parallel_compiler)]
14// 32 shards is sufficient to reduce contention on an 8-core Ryzen 7 1700,
15// but this should be tested on higher core count CPUs. How the `Sharded` type gets used
74b04a01 16// may also affect the ideal number of shards.
416331ca
XL
17const SHARD_BITS: usize = 5;
18
19#[cfg(not(parallel_compiler))]
20const SHARD_BITS: usize = 0;
21
e74abb32 22pub const SHARDS: usize = 1 << SHARD_BITS;
416331ca
XL
23
24/// An array of cache-line aligned inner locked structures with convenience methods.
25#[derive(Clone)]
26pub struct Sharded<T> {
27 shards: [CacheAligned<Lock<T>>; SHARDS],
28}
29
30impl<T: Default> Default for Sharded<T> {
31 #[inline]
32 fn default() -> Self {
ba9703b0 33 Self::new(T::default)
e74abb32
XL
34 }
35}
36
37impl<T> Sharded<T> {
38 #[inline]
39 pub fn new(mut value: impl FnMut() -> T) -> Self {
40 // Create a vector of the values we want
dfeec247
XL
41 let mut values: SmallVec<[_; SHARDS]> =
42 (0..SHARDS).map(|_| CacheAligned(Lock::new(value()))).collect();
e74abb32 43
74b04a01 44 // Create an uninitialized array
416331ca
XL
45 let mut shards: mem::MaybeUninit<[CacheAligned<Lock<T>>; SHARDS]> =
46 mem::MaybeUninit::uninit();
e74abb32 47
416331ca 48 unsafe {
e74abb32
XL
49 // Copy the values into our array
50 let first = shards.as_mut_ptr() as *mut CacheAligned<Lock<T>>;
51 values.as_ptr().copy_to_nonoverlapping(first, SHARDS);
52
53 // Ignore the content of the vector
54 values.set_len(0);
55
dfeec247 56 Sharded { shards: shards.assume_init() }
416331ca
XL
57 }
58 }
416331ca 59
60c5eb7d 60 /// The shard is selected by hashing `val` with `FxHasher`.
416331ca
XL
61 #[inline]
62 pub fn get_shard_by_value<K: Hash + ?Sized>(&self, val: &K) -> &Lock<T> {
dfeec247 63 if SHARDS == 1 { &self.shards[0].0 } else { self.get_shard_by_hash(make_hash(val)) }
416331ca
XL
64 }
65
60c5eb7d
XL
66 /// Get a shard with a pre-computed hash value. If `get_shard_by_value` is
67 /// ever used in combination with `get_shard_by_hash` on a single `Sharded`
68 /// instance, then `hash` must be computed with `FxHasher`. Otherwise,
69 /// `hash` can be computed with any hasher, so long as that hasher is used
70 /// consistently for each `Sharded` instance.
416331ca 71 #[inline]
74b04a01 72 pub fn get_shard_index_by_hash(&self, hash: u64) -> usize {
416331ca
XL
73 let hash_len = mem::size_of::<usize>();
74 // Ignore the top 7 bits as hashbrown uses these and get the next SHARD_BITS highest bits.
75 // hashbrown also uses the lowest bits, so we can't use those
76 let bits = (hash >> (hash_len * 8 - 7 - SHARD_BITS)) as usize;
74b04a01
XL
77 bits % SHARDS
78 }
79
80 #[inline]
81 pub fn get_shard_by_hash(&self, hash: u64) -> &Lock<T> {
82 &self.shards[self.get_shard_index_by_hash(hash)].0
83 }
84
85 #[inline]
86 pub fn get_shard_by_index(&self, i: usize) -> &Lock<T> {
416331ca
XL
87 &self.shards[i].0
88 }
89
90 pub fn lock_shards(&self) -> Vec<LockGuard<'_, T>> {
91 (0..SHARDS).map(|i| self.shards[i].0.lock()).collect()
92 }
93
94 pub fn try_lock_shards(&self) -> Option<Vec<LockGuard<'_, T>>> {
95 (0..SHARDS).map(|i| self.shards[i].0.try_lock()).collect()
96 }
97}
98
99pub type ShardedHashMap<K, V> = Sharded<FxHashMap<K, V>>;
100
e74abb32 101impl<K: Eq, V> ShardedHashMap<K, V> {
416331ca
XL
102 pub fn len(&self) -> usize {
103 self.lock_shards().iter().map(|shard| shard.len()).sum()
104 }
105}
106
107impl<K: Eq + Hash + Copy> ShardedHashMap<K, ()> {
108 #[inline]
109 pub fn intern_ref<Q: ?Sized>(&self, value: &Q, make: impl FnOnce() -> K) -> K
dfeec247
XL
110 where
111 K: Borrow<Q>,
112 Q: Hash + Eq,
416331ca
XL
113 {
114 let hash = make_hash(value);
115 let mut shard = self.get_shard_by_hash(hash).lock();
116 let entry = shard.raw_entry_mut().from_key_hashed_nocheck(hash, value);
117
118 match entry {
119 RawEntryMut::Occupied(e) => *e.key(),
120 RawEntryMut::Vacant(e) => {
121 let v = make();
122 e.insert_hashed_nocheck(hash, v, ());
123 v
124 }
125 }
126 }
127
128 #[inline]
129 pub fn intern<Q>(&self, value: Q, make: impl FnOnce(Q) -> K) -> K
dfeec247
XL
130 where
131 K: Borrow<Q>,
132 Q: Hash + Eq,
416331ca
XL
133 {
134 let hash = make_hash(&value);
135 let mut shard = self.get_shard_by_hash(hash).lock();
136 let entry = shard.raw_entry_mut().from_key_hashed_nocheck(hash, &value);
137
138 match entry {
139 RawEntryMut::Occupied(e) => *e.key(),
140 RawEntryMut::Vacant(e) => {
141 let v = make(value);
142 e.insert_hashed_nocheck(hash, v, ());
143 v
144 }
145 }
146 }
147}
148
dfeec247
XL
149pub trait IntoPointer {
150 /// Returns a pointer which outlives `self`.
151 fn into_pointer(&self) -> *const ();
152}
153
154impl<K: Eq + Hash + Copy + IntoPointer> ShardedHashMap<K, ()> {
155 pub fn contains_pointer_to<T: Hash + IntoPointer>(&self, value: &T) -> bool {
156 let hash = make_hash(&value);
157 let shard = self.get_shard_by_hash(hash).lock();
158 let value = value.into_pointer();
159 shard.raw_entry().from_hash(hash, |entry| entry.into_pointer() == value).is_some()
160 }
161}
162
416331ca
XL
163#[inline]
164fn make_hash<K: Hash + ?Sized>(val: &K) -> u64 {
165 let mut state = FxHasher::default();
166 val.hash(&mut state);
167 state.finish()
168}