1 use crate::dep_graph
::DepNodeIndex
;
3 use rustc_arena
::TypedArena
;
4 use rustc_data_structures
::fx
::FxHashMap
;
5 use rustc_data_structures
::sharded
;
6 #[cfg(parallel_compiler)]
7 use rustc_data_structures
::sharded
::Sharded
;
8 #[cfg(not(parallel_compiler))]
9 use rustc_data_structures
::sync
::Lock
;
10 use rustc_data_structures
::sync
::WorkerLocal
;
11 use std
::default::Default
;
14 use std
::marker
::PhantomData
;
16 pub trait CacheSelector
<K
, V
> {
20 pub trait QueryStorage
{
24 /// Store a value without putting it in the cache.
25 /// This is meant to be used with cycle errors.
26 fn store_nocache(&self, value
: Self::Value
) -> Self::Stored
;
29 pub trait QueryCache
: QueryStorage
+ Sized
{
30 type Key
: Hash
+ Eq
+ Clone
+ Debug
;
32 /// Checks if the query is already computed and in the cache.
33 /// It returns the shard index and a lock guard to the shard,
34 /// which will be used if the query is not in the cache and we need
39 // `on_hit` can be called while holding a lock to the query state shard.
43 OnHit
: FnOnce(&Self::Stored
, DepNodeIndex
) -> R
;
45 fn complete(&self, key
: Self::Key
, value
: Self::Value
, index
: DepNodeIndex
) -> Self::Stored
;
47 fn iter(&self, f
: &mut dyn FnMut(&Self::Key
, &Self::Value
, DepNodeIndex
));
50 pub struct DefaultCacheSelector
;
52 impl<K
: Eq
+ Hash
, V
: Clone
> CacheSelector
<K
, V
> for DefaultCacheSelector
{
53 type Cache
= DefaultCache
<K
, V
>;
56 pub struct DefaultCache
<K
, V
> {
57 #[cfg(parallel_compiler)]
58 cache
: Sharded
<FxHashMap
<K
, (V
, DepNodeIndex
)>>,
59 #[cfg(not(parallel_compiler))]
60 cache
: Lock
<FxHashMap
<K
, (V
, DepNodeIndex
)>>,
63 impl<K
, V
> Default
for DefaultCache
<K
, V
> {
64 fn default() -> Self {
65 DefaultCache { cache: Default::default() }
69 impl<K
: Eq
+ Hash
, V
: Clone
+ Debug
> QueryStorage
for DefaultCache
<K
, V
> {
74 fn store_nocache(&self, value
: Self::Value
) -> Self::Stored
{
75 // We have no dedicated storage
80 impl<K
, V
> QueryCache
for DefaultCache
<K
, V
>
82 K
: Eq
+ Hash
+ Clone
+ Debug
,
88 fn lookup
<R
, OnHit
>(&self, key
: &K
, on_hit
: OnHit
) -> Result
<R
, ()>
90 OnHit
: FnOnce(&V
, DepNodeIndex
) -> R
,
92 let key_hash
= sharded
::make_hash(key
);
93 #[cfg(parallel_compiler)]
94 let lock
= self.cache
.get_shard_by_hash(key_hash
).lock();
95 #[cfg(not(parallel_compiler))]
96 let lock
= self.cache
.lock();
97 let result
= lock
.raw_entry().from_key_hashed_nocheck(key_hash
, key
);
99 if let Some((_
, value
)) = result
{
100 let hit_result
= on_hit(&value
.0, value
.1);
108 fn complete(&self, key
: K
, value
: V
, index
: DepNodeIndex
) -> Self::Stored
{
109 #[cfg(parallel_compiler)]
110 let mut lock
= self.cache
.get_shard_by_value(&key
).lock();
111 #[cfg(not(parallel_compiler))]
112 let mut lock
= self.cache
.lock();
113 lock
.insert(key
, (value
.clone(), index
));
117 fn iter(&self, f
: &mut dyn FnMut(&Self::Key
, &Self::Value
, DepNodeIndex
)) {
118 #[cfg(parallel_compiler)]
120 let shards
= self.cache
.lock_shards();
121 for shard
in shards
.iter() {
122 for (k
, v
) in shard
.iter() {
127 #[cfg(not(parallel_compiler))]
129 let map
= self.cache
.lock();
130 for (k
, v
) in map
.iter() {
137 pub struct ArenaCacheSelector
<'tcx
>(PhantomData
<&'
tcx ()>);
139 impl<'tcx
, K
: Eq
+ Hash
, V
: 'tcx
> CacheSelector
<K
, V
> for ArenaCacheSelector
<'tcx
> {
140 type Cache
= ArenaCache
<'tcx
, K
, V
>;
143 pub struct ArenaCache
<'tcx
, K
, V
> {
144 arena
: WorkerLocal
<TypedArena
<(V
, DepNodeIndex
)>>,
145 #[cfg(parallel_compiler)]
146 cache
: Sharded
<FxHashMap
<K
, &'
tcx (V
, DepNodeIndex
)>>,
147 #[cfg(not(parallel_compiler))]
148 cache
: Lock
<FxHashMap
<K
, &'
tcx (V
, DepNodeIndex
)>>,
151 impl<'tcx
, K
, V
> Default
for ArenaCache
<'tcx
, K
, V
> {
152 fn default() -> Self {
153 ArenaCache { arena: WorkerLocal::new(|_| TypedArena::default()), cache: Default::default() }
157 impl<'tcx
, K
: Eq
+ Hash
, V
: Debug
+ 'tcx
> QueryStorage
for ArenaCache
<'tcx
, K
, V
> {
159 type Stored
= &'tcx V
;
162 fn store_nocache(&self, value
: Self::Value
) -> Self::Stored
{
163 let value
= self.arena
.alloc((value
, DepNodeIndex
::INVALID
));
164 let value
= unsafe { &*(&value.0 as *const _) }
;
169 impl<'tcx
, K
, V
: 'tcx
> QueryCache
for ArenaCache
<'tcx
, K
, V
>
171 K
: Eq
+ Hash
+ Clone
+ Debug
,
177 fn lookup
<R
, OnHit
>(&self, key
: &K
, on_hit
: OnHit
) -> Result
<R
, ()>
179 OnHit
: FnOnce(&&'tcx V
, DepNodeIndex
) -> R
,
181 let key_hash
= sharded
::make_hash(key
);
182 #[cfg(parallel_compiler)]
183 let lock
= self.cache
.get_shard_by_hash(key_hash
).lock();
184 #[cfg(not(parallel_compiler))]
185 let lock
= self.cache
.lock();
186 let result
= lock
.raw_entry().from_key_hashed_nocheck(key_hash
, key
);
188 if let Some((_
, value
)) = result
{
189 let hit_result
= on_hit(&&value
.0, value
.1);
197 fn complete(&self, key
: K
, value
: V
, index
: DepNodeIndex
) -> Self::Stored
{
198 let value
= self.arena
.alloc((value
, index
));
199 let value
= unsafe { &*(value as *const _) }
;
200 #[cfg(parallel_compiler)]
201 let mut lock
= self.cache
.get_shard_by_value(&key
).lock();
202 #[cfg(not(parallel_compiler))]
203 let mut lock
= self.cache
.lock();
204 lock
.insert(key
, value
);
208 fn iter(&self, f
: &mut dyn FnMut(&Self::Key
, &Self::Value
, DepNodeIndex
)) {
209 #[cfg(parallel_compiler)]
211 let shards
= self.cache
.lock_shards();
212 for shard
in shards
.iter() {
213 for (k
, v
) in shard
.iter() {
218 #[cfg(not(parallel_compiler))]
220 let map
= self.cache
.lock();
221 for (k
, v
) in map
.iter() {