]> git.proxmox.com Git - rustc.git/blame - compiler/rustc_query_system/src/query/plumbing.rs
New upstream version 1.56.0~beta.4+dfsg1
[rustc.git] / compiler / rustc_query_system / src / query / plumbing.rs
CommitLineData
ba9703b0
XL
1//! The implementation of the query system itself. This defines the macros that
2//! generate the actual methods on tcx which find and execute the provider,
3//! manage the caches, and so forth.
4
17df50a5 5use crate::dep_graph::{DepContext, DepKind, DepNode, DepNodeParams};
ba9703b0
XL
6use crate::dep_graph::{DepNodeIndex, SerializedDepNodeIndex};
7use crate::query::caches::QueryCache;
f9f354fc 8use crate::query::config::{QueryDescription, QueryVtable, QueryVtableExt};
6a06907d
XL
9use crate::query::job::{
10 report_cycle, QueryInfo, QueryJob, QueryJobId, QueryJobInfo, QueryShardJobId,
11};
94222f64 12use crate::query::{QueryContext, QueryMap, QuerySideEffects, QueryStackFrame};
ba9703b0 13
ba9703b0
XL
14use rustc_data_structures::fingerprint::Fingerprint;
15use rustc_data_structures::fx::{FxHashMap, FxHasher};
6a06907d 16use rustc_data_structures::sharded::{get_shard_index_by_hash, Sharded};
ba9703b0
XL
17use rustc_data_structures::sync::{Lock, LockGuard};
18use rustc_data_structures::thin_vec::ThinVec;
17df50a5
XL
19#[cfg(not(parallel_compiler))]
20use rustc_errors::DiagnosticBuilder;
ba9703b0 21use rustc_errors::{Diagnostic, FatalError};
17df50a5 22use rustc_span::{Span, DUMMY_SP};
94222f64 23use std::cell::Cell;
ba9703b0 24use std::collections::hash_map::Entry;
5869c6ff 25use std::fmt::Debug;
ba9703b0
XL
26use std::hash::{Hash, Hasher};
27use std::mem;
28use std::num::NonZeroU32;
29use std::ptr;
30#[cfg(debug_assertions)]
31use std::sync::atomic::{AtomicUsize, Ordering};
32
6a06907d
XL
33pub struct QueryCacheStore<C: QueryCache> {
34 cache: C,
35 shards: Sharded<C::Sharded>,
36 #[cfg(debug_assertions)]
37 pub cache_hits: AtomicUsize,
ba9703b0
XL
38}
39
17df50a5 40impl<C: QueryCache + Default> Default for QueryCacheStore<C> {
6a06907d
XL
41 fn default() -> Self {
42 Self {
43 cache: C::default(),
44 shards: Default::default(),
45 #[cfg(debug_assertions)]
46 cache_hits: AtomicUsize::new(0),
47 }
ba9703b0
XL
48 }
49}
50
6a06907d
XL
51/// Values used when checking a query cache which can be reused on a cache-miss to execute the query.
52pub struct QueryLookup {
53 pub(super) key_hash: u64,
54 shard: usize,
55}
56
57// We compute the key's hash once and then use it for both the
58// shard lookup and the hashmap lookup. This relies on the fact
59// that both of them use `FxHasher`.
60fn hash_for_shard<K: Hash>(key: &K) -> u64 {
61 let mut hasher = FxHasher::default();
62 key.hash(&mut hasher);
63 hasher.finish()
ba9703b0
XL
64}
65
6a06907d 66impl<C: QueryCache> QueryCacheStore<C> {
ba9703b0
XL
67 pub(super) fn get_lookup<'tcx>(
68 &'tcx self,
69 key: &C::Key,
6a06907d
XL
70 ) -> (QueryLookup, LockGuard<'tcx, C::Sharded>) {
71 let key_hash = hash_for_shard(key);
72 let shard = get_shard_index_by_hash(key_hash);
ba9703b0 73 let lock = self.shards.get_shard_by_index(shard).lock();
6a06907d 74 (QueryLookup { key_hash, shard }, lock)
ba9703b0 75 }
6a06907d 76
cdc7bbd5 77 pub fn iter_results(&self, f: &mut dyn FnMut(&C::Key, &C::Value, DepNodeIndex)) {
6a06907d
XL
78 self.cache.iter(&self.shards, f)
79 }
80}
81
82struct QueryStateShard<D, K> {
83 active: FxHashMap<K, QueryResult<D>>,
84
85 /// Used to generate unique ids for active jobs.
86 jobs: u32,
87}
88
89impl<D, K> Default for QueryStateShard<D, K> {
90 fn default() -> QueryStateShard<D, K> {
91 QueryStateShard { active: Default::default(), jobs: 0 }
92 }
93}
94
95pub struct QueryState<D, K> {
96 shards: Sharded<QueryStateShard<D, K>>,
ba9703b0
XL
97}
98
99/// Indicates the state of a query for a given key in a query map.
6a06907d 100enum QueryResult<D> {
ba9703b0 101 /// An already executing query. The query job can be used to await for its completion.
6a06907d 102 Started(QueryJob<D>),
ba9703b0
XL
103
104 /// The query panicked. Queries trying to wait on this will raise a fatal error which will
105 /// silently panic.
106 Poisoned,
107}
108
6a06907d 109impl<D, K> QueryState<D, K>
29967ef6
XL
110where
111 D: Copy + Clone + Eq + Hash,
6a06907d 112 K: Eq + Hash + Clone + Debug,
29967ef6 113{
ba9703b0
XL
114 pub fn all_inactive(&self) -> bool {
115 let shards = self.shards.lock_shards();
116 shards.iter().all(|shard| shard.active.is_empty())
117 }
118
6a06907d 119 pub fn try_collect_active_jobs<CTX: Copy>(
ba9703b0 120 &self,
6a06907d 121 tcx: CTX,
29967ef6 122 kind: D,
6a06907d
XL
123 make_query: fn(CTX, K) -> QueryStackFrame,
124 jobs: &mut QueryMap<D>,
29967ef6 125 ) -> Option<()> {
ba9703b0
XL
126 // We use try_lock_shards here since we are called from the
127 // deadlock handler, and this shouldn't be locked.
128 let shards = self.shards.try_lock_shards()?;
17df50a5
XL
129 for (shard_id, shard) in shards.iter().enumerate() {
130 for (k, v) in shard.active.iter() {
ba9703b0 131 if let QueryResult::Started(ref job) = *v {
29967ef6 132 let id = QueryJobId::new(job.id, shard_id, kind);
94222f64
XL
133 let query = make_query(tcx, k.clone());
134 jobs.insert(id, QueryJobInfo { query, job: job.clone() });
ba9703b0 135 }
17df50a5
XL
136 }
137 }
ba9703b0
XL
138
139 Some(())
140 }
141}
142
6a06907d
XL
143impl<D, K> Default for QueryState<D, K> {
144 fn default() -> QueryState<D, K> {
145 QueryState { shards: Default::default() }
ba9703b0
XL
146 }
147}
148
ba9703b0
XL
149/// A type representing the responsibility to execute the job in the `job` field.
150/// This will poison the relevant query if dropped.
6a06907d 151struct JobOwner<'tcx, D, C>
ba9703b0 152where
29967ef6 153 D: Copy + Clone + Eq + Hash,
ba9703b0 154 C: QueryCache,
ba9703b0 155{
6a06907d
XL
156 state: &'tcx QueryState<D, C::Key>,
157 cache: &'tcx QueryCacheStore<C>,
ba9703b0 158 key: C::Key,
29967ef6 159 id: QueryJobId<D>,
ba9703b0
XL
160}
161
17df50a5
XL
162#[cold]
163#[inline(never)]
164#[cfg(not(parallel_compiler))]
165fn mk_cycle<CTX, V, R>(
166 tcx: CTX,
167 root: QueryJobId<CTX::DepKind>,
168 span: Span,
169 handle_cycle_error: fn(CTX, DiagnosticBuilder<'_>) -> V,
170 cache: &dyn crate::query::QueryStorage<Value = V, Stored = R>,
171) -> R
172where
173 CTX: QueryContext,
174 V: std::fmt::Debug,
175 R: Clone,
176{
177 let error: CycleError = root.find_cycle_in_stack(
178 tcx.try_collect_active_jobs().unwrap(),
179 &tcx.current_query_job(),
180 span,
181 );
182 let error = report_cycle(tcx.dep_context().sess(), error);
183 let value = handle_cycle_error(tcx, error);
184 cache.store_nocache(value)
185}
186
6a06907d 187impl<'tcx, D, C> JobOwner<'tcx, D, C>
ba9703b0 188where
29967ef6 189 D: Copy + Clone + Eq + Hash,
ba9703b0 190 C: QueryCache,
ba9703b0
XL
191{
192 /// Either gets a `JobOwner` corresponding the query, allowing us to
193 /// start executing the query, or returns with the result of the query.
194 /// This function assumes that `try_get_cached` is already called and returned `lookup`.
195 /// If the query is executing elsewhere, this will wait for it and return the result.
196 /// If the query panicked, this will silently panic.
197 ///
198 /// This function is inlined because that results in a noticeable speed-up
199 /// for some compile-time benchmarks.
200 #[inline(always)]
6a06907d 201 fn try_start<'b, CTX>(
ba9703b0 202 tcx: CTX,
6a06907d
XL
203 state: &'b QueryState<CTX::DepKind, C::Key>,
204 cache: &'b QueryCacheStore<C>,
ba9703b0 205 span: Span,
17df50a5 206 key: C::Key,
6a06907d 207 lookup: QueryLookup,
f9f354fc 208 query: &QueryVtable<CTX, C::Key, C::Value>,
6a06907d 209 ) -> TryGetJob<'b, CTX::DepKind, C>
ba9703b0 210 where
ba9703b0
XL
211 CTX: QueryContext,
212 {
6a06907d
XL
213 let shard = lookup.shard;
214 let mut state_lock = state.shards.get_shard_by_index(shard).lock();
215 let lock = &mut *state_lock;
ba9703b0 216
17df50a5 217 match lock.active.entry(key) {
ba9703b0 218 Entry::Vacant(entry) => {
ba9703b0
XL
219 // Generate an id unique within this shard.
220 let id = lock.jobs.checked_add(1).unwrap();
221 lock.jobs = id;
222 let id = QueryShardJobId(NonZeroU32::new(id).unwrap());
223
ba9703b0
XL
224 let job = tcx.current_query_job();
225 let job = QueryJob::new(id, span, job);
226
17df50a5 227 let key = entry.key().clone();
ba9703b0
XL
228 entry.insert(QueryResult::Started(job));
229
17df50a5
XL
230 let global_id = QueryJobId::new(id, shard, query.dep_kind);
231 let owner = JobOwner { state, cache, id: global_id, key };
ba9703b0
XL
232 return TryGetJob::NotYetStarted(owner);
233 }
17df50a5
XL
234 Entry::Occupied(mut entry) => {
235 match entry.get_mut() {
236 #[cfg(not(parallel_compiler))]
237 QueryResult::Started(job) => {
238 let id = QueryJobId::new(job.id, shard, query.dep_kind);
ba9703b0 239
17df50a5
XL
240 drop(state_lock);
241
242 // If we are single-threaded we know that we have cycle error,
243 // so we just return the error.
244 return TryGetJob::Cycle(mk_cycle(
245 tcx,
246 id,
247 span,
248 query.handle_cycle_error,
249 &cache.cache,
250 ));
6a06907d 251 }
17df50a5
XL
252 #[cfg(parallel_compiler)]
253 QueryResult::Started(job) => {
254 // For parallel queries, we'll block and wait until the query running
255 // in another thread has completed. Record how long we wait in the
256 // self-profiler.
257 let query_blocked_prof_timer = tcx.dep_context().profiler().query_blocked();
258
259 // Get the latch out
260 let latch = job.latch();
261 let key = entry.key().clone();
262
263 drop(state_lock);
264
265 // With parallel queries we might just have to wait on some other
266 // thread.
267 let result = latch.wait_on(tcx.current_query_job(), span);
268
269 if let Err(cycle) = result {
270 let cycle = report_cycle(tcx.dep_context().sess(), cycle);
271 let value = (query.handle_cycle_error)(tcx, cycle);
272 let value = cache.cache.store_nocache(value);
273 return TryGetJob::Cycle(value);
274 }
275
276 let cached = cache
277 .cache
278 .lookup(cache, &key, |value, index| {
279 if unlikely!(tcx.dep_context().profiler().enabled()) {
280 tcx.dep_context().profiler().query_cache_hit(index.into());
281 }
282 #[cfg(debug_assertions)]
283 {
284 cache.cache_hits.fetch_add(1, Ordering::Relaxed);
285 }
286 (value.clone(), index)
287 })
288 .unwrap_or_else(|_| panic!("value must be in cache after waiting"));
289
290 query_blocked_prof_timer.finish_with_query_invocation_id(cached.1.into());
291
292 return TryGetJob::JobCompleted(cached);
6a06907d 293 }
17df50a5
XL
294 QueryResult::Poisoned => FatalError.raise(),
295 }
ba9703b0 296 }
ba9703b0
XL
297 }
298 }
299
300 /// Completes the query by updating the query cache with the `result`,
301 /// signals the waiter and forgets the JobOwner, so it won't poison the query
3dfed10e 302 fn complete(self, result: C::Value, dep_node_index: DepNodeIndex) -> C::Stored {
ba9703b0
XL
303 // We can move out of `self` here because we `mem::forget` it below
304 let key = unsafe { ptr::read(&self.key) };
305 let state = self.state;
6a06907d 306 let cache = self.cache;
ba9703b0
XL
307
308 // Forget ourself so our destructor won't poison the query
309 mem::forget(self);
310
f9f354fc 311 let (job, result) = {
6a06907d
XL
312 let key_hash = hash_for_shard(&key);
313 let shard = get_shard_index_by_hash(key_hash);
314 let job = {
315 let mut lock = state.shards.get_shard_by_index(shard).lock();
316 match lock.active.remove(&key).unwrap() {
317 QueryResult::Started(job) => job,
318 QueryResult::Poisoned => panic!(),
319 }
320 };
321 let result = {
322 let mut lock = cache.shards.get_shard_by_index(shard).lock();
323 cache.cache.complete(&mut lock, key, result, dep_node_index)
ba9703b0 324 };
f9f354fc 325 (job, result)
ba9703b0
XL
326 };
327
328 job.signal_complete();
f9f354fc 329 result
ba9703b0
XL
330 }
331}
332
ba9703b0
XL
333fn with_diagnostics<F, R>(f: F) -> (R, ThinVec<Diagnostic>)
334where
335 F: FnOnce(Option<&Lock<ThinVec<Diagnostic>>>) -> R,
336{
337 let diagnostics = Lock::new(ThinVec::new());
338 let result = f(Some(&diagnostics));
339 (result, diagnostics.into_inner())
340}
341
6a06907d 342impl<'tcx, D, C> Drop for JobOwner<'tcx, D, C>
ba9703b0 343where
29967ef6 344 D: Copy + Clone + Eq + Hash,
29967ef6 345 C: QueryCache,
ba9703b0
XL
346{
347 #[inline(never)]
348 #[cold]
349 fn drop(&mut self) {
350 // Poison the query so jobs waiting on it panic.
351 let state = self.state;
352 let shard = state.shards.get_shard_by_value(&self.key);
353 let job = {
354 let mut shard = shard.lock();
355 let job = match shard.active.remove(&self.key).unwrap() {
356 QueryResult::Started(job) => job,
357 QueryResult::Poisoned => panic!(),
358 };
359 shard.active.insert(self.key.clone(), QueryResult::Poisoned);
360 job
361 };
362 // Also signal the completion of the job, so waiters
363 // will continue execution.
364 job.signal_complete();
365 }
366}
367
368#[derive(Clone)]
6a06907d 369pub(crate) struct CycleError {
ba9703b0 370 /// The query and related span that uses the cycle.
6a06907d
XL
371 pub usage: Option<(Span, QueryStackFrame)>,
372 pub cycle: Vec<QueryInfo>,
ba9703b0
XL
373}
374
375/// The result of `try_start`.
6a06907d 376enum TryGetJob<'tcx, D, C>
ba9703b0 377where
29967ef6 378 D: Copy + Clone + Eq + Hash,
29967ef6 379 C: QueryCache,
ba9703b0
XL
380{
381 /// The query is not yet started. Contains a guard to the cache eventually used to start it.
6a06907d 382 NotYetStarted(JobOwner<'tcx, D, C>),
ba9703b0
XL
383
384 /// The query was already completed.
385 /// Returns the result of the query and its dep-node index
386 /// if it succeeded or a cycle error if it failed.
387 #[cfg(parallel_compiler)]
f9f354fc 388 JobCompleted((C::Stored, DepNodeIndex)),
ba9703b0
XL
389
390 /// Trying to execute the query resulted in a cycle.
f9f354fc 391 Cycle(C::Stored),
ba9703b0
XL
392}
393
394/// Checks if the query is already computed and in the cache.
395/// It returns the shard index and a lock guard to the shard,
396/// which will be used if the query is not in the cache and we need
397/// to compute it.
6a06907d
XL
398#[inline]
399pub fn try_get_cached<'a, CTX, C, R, OnHit>(
ba9703b0 400 tcx: CTX,
6a06907d
XL
401 cache: &'a QueryCacheStore<C>,
402 key: &C::Key,
ba9703b0
XL
403 // `on_hit` can be called while holding a lock to the query cache
404 on_hit: OnHit,
6a06907d 405) -> Result<R, QueryLookup>
ba9703b0
XL
406where
407 C: QueryCache,
6a06907d
XL
408 CTX: DepContext,
409 OnHit: FnOnce(&C::Stored) -> R,
ba9703b0 410{
6a06907d
XL
411 cache.cache.lookup(cache, &key, |value, index| {
412 if unlikely!(tcx.profiler().enabled()) {
413 tcx.profiler().query_cache_hit(index.into());
414 }
415 #[cfg(debug_assertions)]
416 {
417 cache.cache_hits.fetch_add(1, Ordering::Relaxed);
418 }
419 tcx.dep_graph().read_index(index);
420 on_hit(value)
421 })
ba9703b0
XL
422}
423
f9f354fc 424fn try_execute_query<CTX, C>(
ba9703b0 425 tcx: CTX,
6a06907d
XL
426 state: &QueryState<CTX::DepKind, C::Key>,
427 cache: &QueryCacheStore<C>,
ba9703b0 428 span: Span,
f9f354fc 429 key: C::Key,
6a06907d 430 lookup: QueryLookup,
f9f354fc 431 query: &QueryVtable<CTX, C::Key, C::Value>,
136023e0 432 compute: fn(CTX::DepContext, C::Key) -> C::Value,
f9f354fc 433) -> C::Stored
ba9703b0 434where
f9f354fc 435 C: QueryCache,
17df50a5 436 C::Key: DepNodeParams<CTX::DepContext>,
ba9703b0
XL
437 CTX: QueryContext,
438{
6a06907d 439 let job = match JobOwner::<'_, CTX::DepKind, C>::try_start(
17df50a5
XL
440 tcx,
441 state,
442 cache,
443 span,
444 key.clone(),
445 lookup,
446 query,
29967ef6 447 ) {
ba9703b0
XL
448 TryGetJob::NotYetStarted(job) => job,
449 TryGetJob::Cycle(result) => return result,
450 #[cfg(parallel_compiler)]
451 TryGetJob::JobCompleted((v, index)) => {
6a06907d 452 tcx.dep_context().dep_graph().read_index(index);
ba9703b0
XL
453 return v;
454 }
455 };
456
17df50a5
XL
457 let dep_graph = tcx.dep_context().dep_graph();
458
459 // Fast path for when incr. comp. is off.
460 if !dep_graph.is_fully_enabled() {
461 let prof_timer = tcx.dep_context().profiler().query_provider();
136023e0 462 let result = tcx.start_query(job.id, None, || compute(*tcx.dep_context(), key));
17df50a5
XL
463 let dep_node_index = dep_graph.next_virtual_depnode_index();
464 prof_timer.finish_with_query_invocation_id(dep_node_index.into());
465 return job.complete(result, dep_node_index);
ba9703b0
XL
466 }
467
f9f354fc 468 if query.anon {
6a06907d 469 let prof_timer = tcx.dep_context().profiler().query_provider();
ba9703b0
XL
470
471 let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| {
6a06907d 472 tcx.start_query(job.id, diagnostics, || {
136023e0
XL
473 dep_graph.with_anon_task(*tcx.dep_context(), query.dep_kind, || {
474 compute(*tcx.dep_context(), key)
475 })
ba9703b0
XL
476 })
477 });
478
479 prof_timer.finish_with_query_invocation_id(dep_node_index.into());
480
17df50a5 481 dep_graph.read_index(dep_node_index);
ba9703b0 482
94222f64
XL
483 let side_effects = QuerySideEffects { diagnostics };
484
485 if unlikely!(!side_effects.is_empty()) {
486 tcx.store_side_effects_for_anon_node(dep_node_index, side_effects);
ba9703b0
XL
487 }
488
3dfed10e 489 return job.complete(result, dep_node_index);
ba9703b0
XL
490 }
491
6a06907d 492 let dep_node = query.to_dep_node(*tcx.dep_context(), &key);
ba9703b0 493
f9f354fc 494 if !query.eval_always {
ba9703b0
XL
495 // The diagnostics for this query will be
496 // promoted to the current session during
497 // `try_mark_green()`, so we can ignore them here.
6a06907d 498 let loaded = tcx.start_query(job.id, None, || {
17df50a5 499 let marked = dep_graph.try_mark_green_and_read(tcx, &dep_node);
ba9703b0
XL
500 marked.map(|(prev_dep_node_index, dep_node_index)| {
501 (
f9f354fc 502 load_from_disk_and_cache_in_memory(
ba9703b0
XL
503 tcx,
504 key.clone(),
505 prev_dep_node_index,
506 dep_node_index,
507 &dep_node,
f9f354fc 508 query,
136023e0 509 compute,
ba9703b0
XL
510 ),
511 dep_node_index,
512 )
513 })
514 });
515 if let Some((result, dep_node_index)) = loaded {
3dfed10e 516 return job.complete(result, dep_node_index);
ba9703b0
XL
517 }
518 }
519
136023e0 520 let (result, dep_node_index) = force_query_with_job(tcx, key, job, dep_node, query, compute);
17df50a5 521 dep_graph.read_index(dep_node_index);
ba9703b0
XL
522 result
523}
524
5869c6ff 525fn load_from_disk_and_cache_in_memory<CTX, K, V: Debug>(
ba9703b0 526 tcx: CTX,
f9f354fc 527 key: K,
ba9703b0
XL
528 prev_dep_node_index: SerializedDepNodeIndex,
529 dep_node_index: DepNodeIndex,
530 dep_node: &DepNode<CTX::DepKind>,
f9f354fc 531 query: &QueryVtable<CTX, K, V>,
136023e0 532 compute: fn(CTX::DepContext, K) -> V,
f9f354fc 533) -> V
ba9703b0
XL
534where
535 CTX: QueryContext,
ba9703b0
XL
536{
537 // Note this function can be called concurrently from the same query
538 // We must ensure that this is handled correctly.
539
6a06907d 540 debug_assert!(tcx.dep_context().dep_graph().is_green(dep_node));
ba9703b0
XL
541
542 // First we try to load the result from the on-disk cache.
f9f354fc 543 let result = if query.cache_on_disk(tcx, &key, None) {
6a06907d 544 let prof_timer = tcx.dep_context().profiler().incr_cache_loading();
f9f354fc 545 let result = query.try_load_from_disk(tcx, prev_dep_node_index);
ba9703b0
XL
546 prof_timer.finish_with_query_invocation_id(dep_node_index.into());
547
548 // We always expect to find a cached result for things that
549 // can be forced from `DepNode`.
550 debug_assert!(
551 !dep_node.kind.can_reconstruct_query_key() || result.is_some(),
552 "missing on-disk cache entry for {:?}",
553 dep_node
554 );
555 result
556 } else {
557 // Some things are never cached on disk.
558 None
559 };
560
6a06907d
XL
561 if let Some(result) = result {
562 // If `-Zincremental-verify-ich` is specified, re-hash results from
563 // the cache and make sure that they have the expected fingerprint.
564 if unlikely!(tcx.dep_context().sess().opts.debugging_opts.incremental_verify_ich) {
cdc7bbd5 565 incremental_verify_ich(*tcx.dep_context(), &result, dep_node, query);
6a06907d
XL
566 }
567
ba9703b0
XL
568 result
569 } else {
570 // We could not load a result from the on-disk cache, so
571 // recompute.
6a06907d 572 let prof_timer = tcx.dep_context().profiler().query_provider();
ba9703b0
XL
573
574 // The dep-graph for this computation is already in-place.
136023e0 575 let result = tcx.dep_context().dep_graph().with_ignore(|| compute(*tcx.dep_context(), key));
ba9703b0
XL
576
577 prof_timer.finish_with_query_invocation_id(dep_node_index.into());
578
6a06907d
XL
579 // Verify that re-running the query produced a result with the expected hash
580 // This catches bugs in query implementations, turning them into ICEs.
581 // For example, a query might sort its result by `DefId` - since `DefId`s are
582 // not stable across compilation sessions, the result could get up getting sorted
583 // in a different order when the query is re-run, even though all of the inputs
584 // (e.g. `DefPathHash` values) were green.
585 //
586 // See issue #82920 for an example of a miscompilation that would get turned into
587 // an ICE by this check
cdc7bbd5 588 incremental_verify_ich(*tcx.dep_context(), &result, dep_node, query);
ba9703b0 589
6a06907d 590 result
ba9703b0 591 }
ba9703b0
XL
592}
593
5869c6ff 594fn incremental_verify_ich<CTX, K, V: Debug>(
6a06907d 595 tcx: CTX::DepContext,
f9f354fc 596 result: &V,
ba9703b0 597 dep_node: &DepNode<CTX::DepKind>,
f9f354fc 598 query: &QueryVtable<CTX, K, V>,
ba9703b0
XL
599) where
600 CTX: QueryContext,
ba9703b0
XL
601{
602 assert!(
cdc7bbd5 603 tcx.dep_graph().is_green(dep_node),
ba9703b0
XL
604 "fingerprint for green query instance not loaded from cache: {:?}",
605 dep_node,
606 );
607
608 debug!("BEGIN verify_ich({:?})", dep_node);
609 let mut hcx = tcx.create_stable_hashing_context();
610
f9f354fc 611 let new_hash = query.hash_result(&mut hcx, result).unwrap_or(Fingerprint::ZERO);
ba9703b0
XL
612 debug!("END verify_ich({:?})", dep_node);
613
cdc7bbd5 614 let old_hash = tcx.dep_graph().prev_fingerprint_of(dep_node);
ba9703b0 615
cdc7bbd5 616 if Some(new_hash) != old_hash {
f20569fa
XL
617 let run_cmd = if let Some(crate_name) = &tcx.sess().opts.crate_name {
618 format!("`cargo clean -p {}` or `cargo clean`", crate_name)
619 } else {
620 "`cargo clean`".to_string()
621 };
94222f64
XL
622
623 // When we emit an error message and panic, we try to debug-print the `DepNode`
624 // and query result. Unforunately, this can cause us to run additional queries,
625 // which may result in another fingerprint mismatch while we're in the middle
626 // of processing this one. To avoid a double-panic (which kills the process
627 // before we can print out the query static), we print out a terse
628 // but 'safe' message if we detect a re-entrant call to this method.
629 thread_local! {
630 static INSIDE_VERIFY_PANIC: Cell<bool> = const { Cell::new(false) };
631 };
632
633 let old_in_panic = INSIDE_VERIFY_PANIC.with(|in_panic| in_panic.replace(true));
634
635 if old_in_panic {
636 tcx.sess().struct_err("internal compiler error: re-entrant incremental verify failure, suppressing message")
637 .emit();
638 } else {
639 tcx.sess().struct_err(&format!("internal compiler error: encountered incremental compilation error with {:?}", dep_node))
640 .help(&format!("This is a known issue with the compiler. Run {} to allow your project to compile", run_cmd))
641 .note(&"Please follow the instructions below to create a bug report with the provided information")
642 .note(&"See <https://github.com/rust-lang/rust/issues/84970> for more information")
643 .emit();
644 panic!("Found unstable fingerprints for {:?}: {:?}", dep_node, result);
645 }
646
647 INSIDE_VERIFY_PANIC.with(|in_panic| in_panic.set(old_in_panic));
f20569fa 648 }
ba9703b0
XL
649}
650
f9f354fc 651fn force_query_with_job<C, CTX>(
ba9703b0 652 tcx: CTX,
f9f354fc 653 key: C::Key,
6a06907d 654 job: JobOwner<'_, CTX::DepKind, C>,
ba9703b0 655 dep_node: DepNode<CTX::DepKind>,
f9f354fc 656 query: &QueryVtable<CTX, C::Key, C::Value>,
136023e0 657 compute: fn(CTX::DepContext, C::Key) -> C::Value,
f9f354fc 658) -> (C::Stored, DepNodeIndex)
ba9703b0 659where
f9f354fc 660 C: QueryCache,
ba9703b0
XL
661 CTX: QueryContext,
662{
663 // If the following assertion triggers, it can have two reasons:
664 // 1. Something is wrong with DepNode creation, either here or
665 // in `DepGraph::try_mark_green()`.
666 // 2. Two distinct query keys get mapped to the same `DepNode`
667 // (see for example #48923).
668 assert!(
6a06907d 669 !tcx.dep_context().dep_graph().dep_node_exists(&dep_node),
ba9703b0
XL
670 "forcing query with already existing `DepNode`\n\
671 - query-key: {:?}\n\
672 - dep-node: {:?}",
673 key,
674 dep_node
675 );
676
6a06907d 677 let prof_timer = tcx.dep_context().profiler().query_provider();
ba9703b0
XL
678
679 let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| {
6a06907d 680 tcx.start_query(job.id, diagnostics, || {
f9f354fc 681 if query.eval_always {
6a06907d 682 tcx.dep_context().dep_graph().with_eval_always_task(
ba9703b0 683 dep_node,
136023e0 684 *tcx.dep_context(),
ba9703b0 685 key,
136023e0 686 compute,
f9f354fc 687 query.hash_result,
ba9703b0
XL
688 )
689 } else {
6a06907d
XL
690 tcx.dep_context().dep_graph().with_task(
691 dep_node,
136023e0 692 *tcx.dep_context(),
6a06907d 693 key,
136023e0 694 compute,
6a06907d
XL
695 query.hash_result,
696 )
ba9703b0
XL
697 }
698 })
699 });
700
701 prof_timer.finish_with_query_invocation_id(dep_node_index.into());
702
94222f64
XL
703 let side_effects = QuerySideEffects { diagnostics };
704
705 if unlikely!(!side_effects.is_empty()) && dep_node.kind != DepKind::NULL {
706 tcx.store_side_effects(dep_node_index, side_effects);
ba9703b0
XL
707 }
708
3dfed10e 709 let result = job.complete(result, dep_node_index);
ba9703b0
XL
710
711 (result, dep_node_index)
712}
713
714#[inline(never)]
f9f354fc
XL
715fn get_query_impl<CTX, C>(
716 tcx: CTX,
6a06907d
XL
717 state: &QueryState<CTX::DepKind, C::Key>,
718 cache: &QueryCacheStore<C>,
f9f354fc
XL
719 span: Span,
720 key: C::Key,
6a06907d 721 lookup: QueryLookup,
f9f354fc 722 query: &QueryVtable<CTX, C::Key, C::Value>,
136023e0 723 compute: fn(CTX::DepContext, C::Key) -> C::Value,
f9f354fc 724) -> C::Stored
ba9703b0 725where
ba9703b0 726 CTX: QueryContext,
f9f354fc 727 C: QueryCache,
17df50a5 728 C::Key: DepNodeParams<CTX::DepContext>,
ba9703b0 729{
136023e0 730 try_execute_query(tcx, state, cache, span, key, lookup, query, compute)
ba9703b0
XL
731}
732
733/// Ensure that either this query has all green inputs or been executed.
734/// Executing `query::ensure(D)` is considered a read of the dep-node `D`.
6a06907d 735/// Returns true if the query should still run.
ba9703b0
XL
736///
737/// This function is particularly useful when executing passes for their
738/// side-effects -- e.g., in order to report errors for erroneous programs.
739///
740/// Note: The optimization is only available during incr. comp.
f9f354fc 741#[inline(never)]
6a06907d
XL
742fn ensure_must_run<CTX, K, V>(tcx: CTX, key: &K, query: &QueryVtable<CTX, K, V>) -> bool
743where
744 K: crate::dep_graph::DepNodeParams<CTX::DepContext>,
ba9703b0
XL
745 CTX: QueryContext,
746{
f9f354fc 747 if query.eval_always {
6a06907d 748 return true;
ba9703b0
XL
749 }
750
751 // Ensuring an anonymous query makes no sense
f9f354fc 752 assert!(!query.anon);
ba9703b0 753
6a06907d 754 let dep_node = query.to_dep_node(*tcx.dep_context(), key);
ba9703b0 755
6a06907d 756 match tcx.dep_context().dep_graph().try_mark_green_and_read(tcx, &dep_node) {
ba9703b0
XL
757 None => {
758 // A None return from `try_mark_green_and_read` means that this is either
759 // a new dep node or that the dep node has already been marked red.
760 // Either way, we can't call `dep_graph.read()` as we don't have the
761 // DepNodeIndex. We must invoke the query itself. The performance cost
762 // this introduces should be negligible as we'll immediately hit the
763 // in-memory cache, or another query down the line will.
6a06907d 764 true
ba9703b0
XL
765 }
766 Some((_, dep_node_index)) => {
6a06907d
XL
767 tcx.dep_context().profiler().query_cache_hit(dep_node_index.into());
768 false
ba9703b0
XL
769 }
770 }
771}
772
f9f354fc
XL
773#[inline(never)]
774fn force_query_impl<CTX, C>(
775 tcx: CTX,
6a06907d
XL
776 state: &QueryState<CTX::DepKind, C::Key>,
777 cache: &QueryCacheStore<C>,
136023e0 778 key: C::Key,
f9f354fc
XL
779 dep_node: DepNode<CTX::DepKind>,
780 query: &QueryVtable<CTX, C::Key, C::Value>,
136023e0 781 compute: fn(CTX::DepContext, C::Key) -> C::Value,
17df50a5
XL
782) -> bool
783where
f9f354fc 784 C: QueryCache,
17df50a5 785 C::Key: DepNodeParams<CTX::DepContext>,
ba9703b0
XL
786 CTX: QueryContext,
787{
17df50a5
XL
788 debug_assert!(!query.anon);
789
ba9703b0
XL
790 // We may be concurrently trying both execute and force a query.
791 // Ensure that only one of them runs the query.
6a06907d
XL
792 let cached = cache.cache.lookup(cache, &key, |_, index| {
793 if unlikely!(tcx.dep_context().profiler().enabled()) {
794 tcx.dep_context().profiler().query_cache_hit(index.into());
795 }
796 #[cfg(debug_assertions)]
797 {
798 cache.cache_hits.fetch_add(1, Ordering::Relaxed);
799 }
800 });
ba9703b0 801
6a06907d 802 let lookup = match cached {
17df50a5 803 Ok(()) => return true,
6a06907d
XL
804 Err(lookup) => lookup,
805 };
f9f354fc 806
6a06907d 807 let job = match JobOwner::<'_, CTX::DepKind, C>::try_start(
17df50a5
XL
808 tcx,
809 state,
810 cache,
811 DUMMY_SP,
812 key.clone(),
813 lookup,
814 query,
6a06907d
XL
815 ) {
816 TryGetJob::NotYetStarted(job) => job,
17df50a5 817 TryGetJob::Cycle(_) => return true,
6a06907d 818 #[cfg(parallel_compiler)]
17df50a5 819 TryGetJob::JobCompleted(_) => return true,
6a06907d 820 };
17df50a5 821
136023e0 822 force_query_with_job(tcx, key, job, dep_node, query, compute);
17df50a5
XL
823
824 true
6a06907d 825}
f9f354fc 826
6a06907d
XL
827pub enum QueryMode {
828 Get,
829 Ensure,
f9f354fc
XL
830}
831
6a06907d
XL
832pub fn get_query<Q, CTX>(
833 tcx: CTX,
834 span: Span,
835 key: Q::Key,
836 lookup: QueryLookup,
837 mode: QueryMode,
838) -> Option<Q::Stored>
f9f354fc
XL
839where
840 Q: QueryDescription<CTX>,
17df50a5 841 Q::Key: DepNodeParams<CTX::DepContext>,
f9f354fc
XL
842 CTX: QueryContext,
843{
6a06907d
XL
844 let query = &Q::VTABLE;
845 if let QueryMode::Ensure = mode {
846 if !ensure_must_run(tcx, &key, query) {
847 return None;
848 }
849 }
850
851 debug!("ty::query::get_query<{}>(key={:?}, span={:?})", Q::NAME, key, span);
136023e0
XL
852 let compute = Q::compute_fn(tcx, &key);
853 let value = get_query_impl(
854 tcx,
855 Q::query_state(tcx),
856 Q::query_cache(tcx),
857 span,
858 key,
859 lookup,
860 query,
861 compute,
862 );
6a06907d 863 Some(value)
f9f354fc
XL
864}
865
17df50a5 866pub fn force_query<Q, CTX>(tcx: CTX, dep_node: &DepNode<CTX::DepKind>) -> bool
f9f354fc
XL
867where
868 Q: QueryDescription<CTX>,
17df50a5 869 Q::Key: DepNodeParams<CTX::DepContext>,
f9f354fc
XL
870 CTX: QueryContext,
871{
17df50a5
XL
872 if Q::ANON {
873 return false;
874 }
875
136023e0
XL
876 if !<Q::Key as DepNodeParams<CTX::DepContext>>::can_reconstruct_query_key() {
877 return false;
878 }
879
880 let key = if let Some(key) =
881 <Q::Key as DepNodeParams<CTX::DepContext>>::recover(*tcx.dep_context(), &dep_node)
882 {
883 key
884 } else {
885 return false;
886 };
887
888 let compute = Q::compute_fn(tcx, &key);
889 force_query_impl(
890 tcx,
891 Q::query_state(tcx),
892 Q::query_cache(tcx),
893 key,
894 *dep_node,
895 &Q::VTABLE,
896 compute,
897 )
f9f354fc 898}