]> git.proxmox.com Git - rustc.git/blame - compiler/rustc_query_system/src/query/plumbing.rs
New upstream version 1.71.1+dfsg1
[rustc.git] / compiler / rustc_query_system / src / query / plumbing.rs
CommitLineData
ba9703b0
XL
1//! The implementation of the query system itself. This defines the macros that
2//! generate the actual methods on tcx which find and execute the provider,
3//! manage the caches, and so forth.
4
9ffffee4 5use crate::dep_graph::{DepContext, DepKind, DepNode, DepNodeIndex, DepNodeParams};
353b0b11 6use crate::dep_graph::{DepGraphData, HasDepContext};
487cf647 7use crate::ich::StableHashingContext;
ba9703b0 8use crate::query::caches::QueryCache;
353b0b11
FG
9#[cfg(parallel_compiler)]
10use crate::query::job::QueryLatch;
5099ac24 11use crate::query::job::{report_cycle, QueryInfo, QueryJob, QueryJobId, QueryJobInfo};
353b0b11 12use crate::query::SerializedDepNodeIndex;
94222f64 13use crate::query::{QueryContext, QueryMap, QuerySideEffects, QueryStackFrame};
f2b60f7d 14use crate::HandleCycleError;
ba9703b0 15use rustc_data_structures::fingerprint::Fingerprint;
5e7ed085 16use rustc_data_structures::fx::FxHashMap;
9ffffee4 17use rustc_data_structures::stack::ensure_sufficient_stack;
5e7ed085 18use rustc_data_structures::sync::Lock;
353b0b11
FG
19#[cfg(parallel_compiler)]
20use rustc_data_structures::{cold_path, sharded::Sharded};
5e7ed085 21use rustc_errors::{DiagnosticBuilder, ErrorGuaranteed, FatalError};
17df50a5 22use rustc_span::{Span, DUMMY_SP};
94222f64 23use std::cell::Cell;
ba9703b0 24use std::collections::hash_map::Entry;
5869c6ff 25use std::fmt::Debug;
5e7ed085 26use std::hash::Hash;
ba9703b0 27use std::mem;
f2b60f7d 28use thin_vec::ThinVec;
ba9703b0 29
487cf647
FG
30use super::QueryConfig;
31
9c376795 32pub struct QueryState<K, D: DepKind> {
5e7ed085 33 #[cfg(parallel_compiler)]
9c376795 34 active: Sharded<FxHashMap<K, QueryResult<D>>>,
5e7ed085 35 #[cfg(not(parallel_compiler))]
9c376795 36 active: Lock<FxHashMap<K, QueryResult<D>>>,
ba9703b0
XL
37}
38
39/// Indicates the state of a query for a given key in a query map.
9c376795 40enum QueryResult<D: DepKind> {
ba9703b0 41 /// An already executing query. The query job can be used to await for its completion.
9c376795 42 Started(QueryJob<D>),
ba9703b0
XL
43
44 /// The query panicked. Queries trying to wait on this will raise a fatal error which will
45 /// silently panic.
46 Poisoned,
47}
48
9c376795 49impl<K, D> QueryState<K, D>
29967ef6 50where
9ffffee4 51 K: Eq + Hash + Copy + Debug,
9c376795 52 D: DepKind,
29967ef6 53{
ba9703b0 54 pub fn all_inactive(&self) -> bool {
5e7ed085
FG
55 #[cfg(parallel_compiler)]
56 {
57 let shards = self.active.lock_shards();
58 shards.iter().all(|shard| shard.is_empty())
59 }
60 #[cfg(not(parallel_compiler))]
61 {
62 self.active.lock().is_empty()
63 }
ba9703b0
XL
64 }
65
487cf647 66 pub fn try_collect_active_jobs<Qcx: Copy>(
ba9703b0 67 &self,
487cf647 68 qcx: Qcx,
9c376795
FG
69 make_query: fn(Qcx, K) -> QueryStackFrame<D>,
70 jobs: &mut QueryMap<D>,
29967ef6 71 ) -> Option<()> {
5e7ed085
FG
72 #[cfg(parallel_compiler)]
73 {
74 // We use try_lock_shards here since we are called from the
75 // deadlock handler, and this shouldn't be locked.
76 let shards = self.active.try_lock_shards()?;
77 for shard in shards.iter() {
78 for (k, v) in shard.iter() {
79 if let QueryResult::Started(ref job) = *v {
9ffffee4 80 let query = make_query(qcx, *k);
5e7ed085
FG
81 jobs.insert(job.id, QueryJobInfo { query, job: job.clone() });
82 }
83 }
84 }
85 }
86 #[cfg(not(parallel_compiler))]
87 {
88 // We use try_lock here since we are called from the
89 // deadlock handler, and this shouldn't be locked.
90 // (FIXME: Is this relevant for non-parallel compilers? It doesn't
91 // really hurt much.)
92 for (k, v) in self.active.try_lock()?.iter() {
ba9703b0 93 if let QueryResult::Started(ref job) = *v {
9ffffee4 94 let query = make_query(qcx, *k);
5099ac24 95 jobs.insert(job.id, QueryJobInfo { query, job: job.clone() });
ba9703b0 96 }
17df50a5
XL
97 }
98 }
ba9703b0
XL
99
100 Some(())
101 }
102}
103
9c376795
FG
104impl<K, D: DepKind> Default for QueryState<K, D> {
105 fn default() -> QueryState<K, D> {
5e7ed085 106 QueryState { active: Default::default() }
ba9703b0
XL
107 }
108}
109
ba9703b0
XL
110/// A type representing the responsibility to execute the job in the `job` field.
111/// This will poison the relevant query if dropped.
9c376795 112struct JobOwner<'tcx, K, D: DepKind>
ba9703b0 113where
9ffffee4 114 K: Eq + Hash + Copy,
ba9703b0 115{
9c376795 116 state: &'tcx QueryState<K, D>,
c295e0f8 117 key: K,
ba9703b0
XL
118}
119
17df50a5
XL
120#[cold]
121#[inline(never)]
353b0b11
FG
122fn mk_cycle<Q, Qcx>(
123 query: Q,
487cf647 124 qcx: Qcx,
353b0b11 125 cycle_error: CycleError<Qcx::DepKind>,
f2b60f7d 126 handler: HandleCycleError,
353b0b11 127) -> Q::Value
17df50a5 128where
353b0b11
FG
129 Q: QueryConfig<Qcx>,
130 Qcx: QueryContext,
17df50a5 131{
487cf647 132 let error = report_cycle(qcx.dep_context().sess(), &cycle_error);
353b0b11 133 handle_cycle_error(query, qcx, &cycle_error, error, handler)
17df50a5
XL
134}
135
353b0b11
FG
136fn handle_cycle_error<Q, Qcx>(
137 query: Q,
138 qcx: Qcx,
139 cycle_error: &CycleError<Qcx::DepKind>,
f2b60f7d
FG
140 mut error: DiagnosticBuilder<'_, ErrorGuaranteed>,
141 handler: HandleCycleError,
353b0b11 142) -> Q::Value
f2b60f7d 143where
353b0b11
FG
144 Q: QueryConfig<Qcx>,
145 Qcx: QueryContext,
f2b60f7d
FG
146{
147 use HandleCycleError::*;
148 match handler {
149 Error => {
150 error.emit();
353b0b11 151 query.value_from_cycle_error(*qcx.dep_context(), &cycle_error.cycle)
f2b60f7d
FG
152 }
153 Fatal => {
154 error.emit();
353b0b11 155 qcx.dep_context().sess().abort_if_errors();
f2b60f7d
FG
156 unreachable!()
157 }
158 DelayBug => {
159 error.delay_as_bug();
353b0b11 160 query.value_from_cycle_error(*qcx.dep_context(), &cycle_error.cycle)
f2b60f7d
FG
161 }
162 }
163}
164
9c376795 165impl<'tcx, K, D: DepKind> JobOwner<'tcx, K, D>
ba9703b0 166where
9ffffee4 167 K: Eq + Hash + Copy,
ba9703b0 168{
ba9703b0
XL
169 /// Completes the query by updating the query cache with the `result`,
170 /// signals the waiter and forgets the JobOwner, so it won't poison the query
9ffffee4 171 fn complete<C>(self, cache: &C, result: C::Value, dep_node_index: DepNodeIndex)
c295e0f8
XL
172 where
173 C: QueryCache<Key = K>,
174 {
9ffffee4 175 let key = self.key;
ba9703b0
XL
176 let state = self.state;
177
178 // Forget ourself so our destructor won't poison the query
179 mem::forget(self);
180
9ffffee4
FG
181 // Mark as complete before we remove the job from the active state
182 // so no other thread can re-execute this query.
183 cache.complete(key, result, dep_node_index);
184
185 let job = {
186 #[cfg(parallel_compiler)]
187 let mut lock = state.active.get_shard_by_value(&key).lock();
188 #[cfg(not(parallel_compiler))]
189 let mut lock = state.active.lock();
190 match lock.remove(&key).unwrap() {
191 QueryResult::Started(job) => job,
192 QueryResult::Poisoned => panic!(),
193 }
ba9703b0
XL
194 };
195
196 job.signal_complete();
197 }
198}
199
9c376795 200impl<'tcx, K, D> Drop for JobOwner<'tcx, K, D>
ba9703b0 201where
9ffffee4 202 K: Eq + Hash + Copy,
9c376795 203 D: DepKind,
ba9703b0
XL
204{
205 #[inline(never)]
206 #[cold]
207 fn drop(&mut self) {
208 // Poison the query so jobs waiting on it panic.
209 let state = self.state;
ba9703b0 210 let job = {
5e7ed085
FG
211 #[cfg(parallel_compiler)]
212 let mut shard = state.active.get_shard_by_value(&self.key).lock();
213 #[cfg(not(parallel_compiler))]
214 let mut shard = state.active.lock();
215 let job = match shard.remove(&self.key).unwrap() {
ba9703b0
XL
216 QueryResult::Started(job) => job,
217 QueryResult::Poisoned => panic!(),
218 };
9ffffee4 219 shard.insert(self.key, QueryResult::Poisoned);
ba9703b0
XL
220 job
221 };
222 // Also signal the completion of the job, so waiters
223 // will continue execution.
224 job.signal_complete();
225 }
226}
227
228#[derive(Clone)]
9c376795 229pub(crate) struct CycleError<D: DepKind> {
ba9703b0 230 /// The query and related span that uses the cycle.
9c376795
FG
231 pub usage: Option<(Span, QueryStackFrame<D>)>,
232 pub cycle: Vec<QueryInfo<D>>,
ba9703b0
XL
233}
234
ba9703b0
XL
235/// Checks if the query is already computed and in the cache.
236/// It returns the shard index and a lock guard to the shard,
237/// which will be used if the query is not in the cache and we need
238/// to compute it.
49aad941 239#[inline(always)]
9ffffee4 240pub fn try_get_cached<Tcx, C>(tcx: Tcx, cache: &C, key: &C::Key) -> Option<C::Value>
ba9703b0
XL
241where
242 C: QueryCache,
487cf647 243 Tcx: DepContext,
ba9703b0 244{
9ffffee4
FG
245 match cache.lookup(&key) {
246 Some((value, index)) => {
6a06907d 247 tcx.profiler().query_cache_hit(index.into());
9ffffee4
FG
248 tcx.dep_graph().read_index(index);
249 Some(value)
6a06907d 250 }
9ffffee4
FG
251 None => None,
252 }
ba9703b0
XL
253}
254
353b0b11 255#[cold]
9ffffee4 256#[inline(never)]
353b0b11
FG
257#[cfg(not(parallel_compiler))]
258fn cycle_error<Q, Qcx>(
259 query: Q,
260 qcx: Qcx,
261 try_execute: QueryJobId,
262 span: Span,
263) -> (Q::Value, Option<DepNodeIndex>)
264where
265 Q: QueryConfig<Qcx>,
266 Qcx: QueryContext,
267{
268 let error = try_execute.find_cycle_in_stack(
269 qcx.try_collect_active_jobs().unwrap(),
270 &qcx.current_query_job(),
271 span,
272 );
273 (mk_cycle(query, qcx, error, query.handle_cycle_error()), None)
274}
275
276#[inline(always)]
277#[cfg(parallel_compiler)]
278fn wait_for_query<Q, Qcx>(
279 query: Q,
487cf647 280 qcx: Qcx,
ba9703b0 281 span: Span,
9c376795 282 key: Q::Key,
353b0b11
FG
283 latch: QueryLatch<Qcx::DepKind>,
284 current: Option<QueryJobId>,
9ffffee4 285) -> (Q::Value, Option<DepNodeIndex>)
ba9703b0 286where
9c376795 287 Q: QueryConfig<Qcx>,
487cf647 288 Qcx: QueryContext,
ba9703b0 289{
353b0b11
FG
290 // For parallel queries, we'll block and wait until the query running
291 // in another thread has completed. Record how long we wait in the
292 // self-profiler.
293 let query_blocked_prof_timer = qcx.dep_context().profiler().query_blocked();
294
295 // With parallel queries we might just have to wait on some other
296 // thread.
297 let result = latch.wait_on(current, span);
298
299 match result {
300 Ok(()) => {
301 let Some((v, index)) = query.query_cache(qcx).lookup(&key) else {
302 cold_path(|| panic!("value must be in cache after waiting"))
9ffffee4 303 };
c295e0f8 304
9ffffee4 305 qcx.dep_context().profiler().query_cache_hit(index.into());
c295e0f8
XL
306 query_blocked_prof_timer.finish_with_query_invocation_id(index.into());
307
308 (v, Some(index))
ba9703b0 309 }
353b0b11
FG
310 Err(cycle) => (mk_cycle(query, qcx, cycle, query.handle_cycle_error()), None),
311 }
312}
313
314#[inline(never)]
49aad941 315fn try_execute_query<Q, Qcx, const INCR: bool>(
353b0b11
FG
316 query: Q,
317 qcx: Qcx,
318 span: Span,
319 key: Q::Key,
320 dep_node: Option<DepNode<Qcx::DepKind>>,
321) -> (Q::Value, Option<DepNodeIndex>)
322where
323 Q: QueryConfig<Qcx>,
324 Qcx: QueryContext,
325{
326 let state = query.query_state(qcx);
327 #[cfg(parallel_compiler)]
328 let mut state_lock = state.active.get_shard_by_value(&key).lock();
329 #[cfg(not(parallel_compiler))]
330 let mut state_lock = state.active.lock();
331
332 // For the parallel compiler we need to check both the query cache and query state structures
333 // while holding the state lock to ensure that 1) the query has not yet completed and 2) the
334 // query is not still executing. Without checking the query cache here, we can end up
335 // re-executing the query since `try_start` only checks that the query is not currently
336 // executing, but another thread may have already completed the query and stores it result
337 // in the query cache.
338 if cfg!(parallel_compiler) && qcx.dep_context().sess().threads() > 1 {
339 if let Some((value, index)) = query.query_cache(qcx).lookup(&key) {
340 qcx.dep_context().profiler().query_cache_hit(index.into());
341 return (value, Some(index));
342 }
343 }
344
345 let current_job_id = qcx.current_query_job();
346
347 match state_lock.entry(key) {
348 Entry::Vacant(entry) => {
349 // Nothing has computed or is computing the query, so we start a new job and insert it in the
350 // state map.
351 let id = qcx.next_job_id();
352 let job = QueryJob::new(id, span, current_job_id);
353 entry.insert(QueryResult::Started(job));
354
355 // Drop the lock before we start executing the query
356 drop(state_lock);
357
49aad941 358 execute_job::<_, _, INCR>(query, qcx, state, key, id, dep_node)
353b0b11
FG
359 }
360 Entry::Occupied(mut entry) => {
361 match entry.get_mut() {
362 #[cfg(not(parallel_compiler))]
363 QueryResult::Started(job) => {
364 let id = job.id;
365 drop(state_lock);
366
367 // If we are single-threaded we know that we have cycle error,
368 // so we just return the error.
369 cycle_error(query, qcx, id, span)
370 }
371 #[cfg(parallel_compiler)]
372 QueryResult::Started(job) => {
373 // Get the latch out
374 let latch = job.latch();
375 drop(state_lock);
376
377 wait_for_query(query, qcx, span, key, latch, current_job_id)
378 }
379 QueryResult::Poisoned => FatalError.raise(),
380 }
381 }
c295e0f8
XL
382 }
383}
ba9703b0 384
9ffffee4 385#[inline(always)]
49aad941 386fn execute_job<Q, Qcx, const INCR: bool>(
353b0b11 387 query: Q,
487cf647 388 qcx: Qcx,
353b0b11 389 state: &QueryState<Q::Key, Qcx::DepKind>,
9c376795 390 key: Q::Key,
353b0b11
FG
391 id: QueryJobId,
392 dep_node: Option<DepNode<Qcx::DepKind>>,
393) -> (Q::Value, Option<DepNodeIndex>)
c295e0f8 394where
9c376795 395 Q: QueryConfig<Qcx>,
487cf647 396 Qcx: QueryContext,
c295e0f8 397{
353b0b11
FG
398 // Use `JobOwner` so the query will be poisoned if executing it panics.
399 let job_owner = JobOwner { state, key };
400
49aad941
FG
401 debug_assert_eq!(qcx.dep_context().dep_graph().is_fully_enabled(), INCR);
402
403 let (result, dep_node_index) = if INCR {
404 execute_job_incr(
405 query,
406 qcx,
407 qcx.dep_context().dep_graph().data().unwrap(),
408 key,
409 dep_node,
410 id,
411 )
412 } else {
413 execute_job_non_incr(query, qcx, key, id)
353b0b11 414 };
17df50a5 415
353b0b11
FG
416 let cache = query.query_cache(qcx);
417 if query.feedable() {
418 // We should not compute queries that also got a value via feeding.
419 // This can't happen, as query feeding adds the very dependencies to the fed query
420 // as its feeding query had. So if the fed query is red, so is its feeder, which will
421 // get evaluated first, and re-feed the query.
422 if let Some((cached_result, _)) = cache.lookup(&key) {
49aad941
FG
423 let Some(hasher) = query.hash_result() else {
424 panic!(
425 "no_hash fed query later has its value computed.\n\
426 Remove `no_hash` modifier to allow recomputation.\n\
427 The already cached value: {}",
428 (query.format_value())(&cached_result)
429 );
430 };
431
432 let (old_hash, new_hash) = qcx.dep_context().with_stable_hashing_context(|mut hcx| {
433 (hasher(&mut hcx, &cached_result), hasher(&mut hcx, &result))
434 });
435 let formatter = query.format_value();
436 if old_hash != new_hash {
437 // We have an inconsistency. This can happen if one of the two
438 // results is tainted by errors. In this case, delay a bug to
439 // ensure compilation is doomed.
440 qcx.dep_context().sess().delay_span_bug(
441 DUMMY_SP,
442 format!(
443 "Computed query value for {:?}({:?}) is inconsistent with fed value,\n\
444 computed={:#?}\nfed={:#?}",
445 query.dep_kind(),
446 key,
447 formatter(&result),
448 formatter(&cached_result),
449 ),
450 );
451 }
9ffffee4 452 }
353b0b11
FG
453 }
454 job_owner.complete(cache, result, dep_node_index);
9ffffee4 455
353b0b11
FG
456 (result, Some(dep_node_index))
457}
9ffffee4 458
353b0b11
FG
459// Fast path for when incr. comp. is off.
460#[inline(always)]
461fn execute_job_non_incr<Q, Qcx>(
462 query: Q,
463 qcx: Qcx,
464 key: Q::Key,
465 job_id: QueryJobId,
466) -> (Q::Value, DepNodeIndex)
467where
468 Q: QueryConfig<Qcx>,
469 Qcx: QueryContext,
470{
471 debug_assert!(!qcx.dep_context().dep_graph().is_fully_enabled());
9ffffee4 472
353b0b11
FG
473 // Fingerprint the key, just to assert that it doesn't
474 // have anything we don't consider hashable
475 if cfg!(debug_assertions) {
476 let _ = key.to_fingerprint(*qcx.dep_context());
ba9703b0
XL
477 }
478
353b0b11
FG
479 let prof_timer = qcx.dep_context().profiler().query_provider();
480 let result = qcx.start_query(job_id, query.depth_limit(), None, || query.compute(qcx, key));
481 let dep_node_index = qcx.dep_context().dep_graph().next_virtual_depnode_index();
482 prof_timer.finish_with_query_invocation_id(dep_node_index.into());
483
484 // Similarly, fingerprint the result to assert that
485 // it doesn't have anything not considered hashable.
486 if cfg!(debug_assertions) && let Some(hash_result) = query.hash_result() {
487 qcx.dep_context().with_stable_hashing_context(|mut hcx| {
488 hash_result(&mut hcx, &result);
489 });
490 }
491
492 (result, dep_node_index)
493}
494
495#[inline(always)]
496fn execute_job_incr<Q, Qcx>(
497 query: Q,
498 qcx: Qcx,
499 dep_graph_data: &DepGraphData<Qcx::DepKind>,
500 key: Q::Key,
501 mut dep_node_opt: Option<DepNode<Qcx::DepKind>>,
502 job_id: QueryJobId,
503) -> (Q::Value, DepNodeIndex)
504where
505 Q: QueryConfig<Qcx>,
506 Qcx: QueryContext,
507{
508 if !query.anon() && !query.eval_always() {
c295e0f8
XL
509 // `to_dep_node` is expensive for some `DepKind`s.
510 let dep_node =
353b0b11 511 dep_node_opt.get_or_insert_with(|| query.construct_dep_node(*qcx.dep_context(), &key));
ba9703b0 512
c295e0f8
XL
513 // The diagnostics for this query will be promoted to the current session during
514 // `try_mark_green()`, so we can ignore them here.
487cf647 515 if let Some(ret) = qcx.start_query(job_id, false, None, || {
353b0b11 516 try_load_from_disk_and_cache_in_memory(query, dep_graph_data, qcx, &key, &dep_node)
c295e0f8
XL
517 }) {
518 return ret;
519 }
520 }
ba9703b0 521
487cf647 522 let prof_timer = qcx.dep_context().profiler().query_provider();
c295e0f8 523 let diagnostics = Lock::new(ThinVec::new());
ba9703b0 524
f2b60f7d 525 let (result, dep_node_index) =
353b0b11
FG
526 qcx.start_query(job_id, query.depth_limit(), Some(&diagnostics), || {
527 if query.anon() {
528 return dep_graph_data.with_anon_task(*qcx.dep_context(), query.dep_kind(), || {
529 query.compute(qcx, key)
530 });
f2b60f7d 531 }
ba9703b0 532
f2b60f7d
FG
533 // `to_dep_node` is expensive for some `DepKind`s.
534 let dep_node =
353b0b11
FG
535 dep_node_opt.unwrap_or_else(|| query.construct_dep_node(*qcx.dep_context(), &key));
536
537 dep_graph_data.with_task(
538 dep_node,
539 (qcx, query),
540 key,
541 |(qcx, query), key| query.compute(qcx, key),
542 query.hash_result(),
543 )
f2b60f7d 544 });
ba9703b0 545
c295e0f8 546 prof_timer.finish_with_query_invocation_id(dep_node_index.into());
ba9703b0 547
c295e0f8
XL
548 let diagnostics = diagnostics.into_inner();
549 let side_effects = QuerySideEffects { diagnostics };
ba9703b0 550
923072b8 551 if std::intrinsics::unlikely(!side_effects.is_empty()) {
353b0b11 552 if query.anon() {
487cf647 553 qcx.store_side_effects_for_anon_node(dep_node_index, side_effects);
c295e0f8 554 } else {
487cf647 555 qcx.store_side_effects(dep_node_index, side_effects);
ba9703b0
XL
556 }
557 }
558
c295e0f8 559 (result, dep_node_index)
ba9703b0
XL
560}
561
9ffffee4 562#[inline(always)]
9c376795 563fn try_load_from_disk_and_cache_in_memory<Q, Qcx>(
353b0b11
FG
564 query: Q,
565 dep_graph_data: &DepGraphData<Qcx::DepKind>,
487cf647 566 qcx: Qcx,
9c376795 567 key: &Q::Key,
487cf647 568 dep_node: &DepNode<Qcx::DepKind>,
9c376795 569) -> Option<(Q::Value, DepNodeIndex)>
ba9703b0 570where
9c376795 571 Q: QueryConfig<Qcx>,
487cf647 572 Qcx: QueryContext,
ba9703b0
XL
573{
574 // Note this function can be called concurrently from the same query
575 // We must ensure that this is handled correctly.
576
353b0b11 577 let (prev_dep_node_index, dep_node_index) = dep_graph_data.try_mark_green(qcx, &dep_node)?;
c295e0f8 578
353b0b11 579 debug_assert!(dep_graph_data.is_index_green(prev_dep_node_index));
ba9703b0
XL
580
581 // First we try to load the result from the on-disk cache.
c295e0f8 582 // Some things are never cached on disk.
49aad941
FG
583 if let Some(result) = query.try_load_from_disk(qcx, key, prev_dep_node_index, dep_node_index) {
584 if std::intrinsics::unlikely(qcx.dep_context().sess().opts.unstable_opts.query_dep_graph) {
585 dep_graph_data.mark_debug_loaded_from_disk(*dep_node)
586 }
6a06907d 587
49aad941
FG
588 let prev_fingerprint = dep_graph_data.prev_fingerprint_of(prev_dep_node_index);
589 // If `-Zincremental-verify-ich` is specified, re-hash results from
590 // the cache and make sure that they have the expected fingerprint.
591 //
592 // If not, we still seek to verify a subset of fingerprints loaded
593 // from disk. Re-hashing results is fairly expensive, so we can't
594 // currently afford to verify every hash. This subset should still
595 // give us some coverage of potential bugs though.
596 let try_verify = prev_fingerprint.split().1.as_u64() % 32 == 0;
597 if std::intrinsics::unlikely(
598 try_verify || qcx.dep_context().sess().opts.unstable_opts.incremental_verify_ich,
599 ) {
600 incremental_verify_ich(
601 *qcx.dep_context(),
602 dep_graph_data,
603 &result,
604 prev_dep_node_index,
605 query.hash_result(),
606 query.format_value(),
607 );
c295e0f8 608 }
3c0e092e 609
49aad941 610 return Some((result, dep_node_index));
c295e0f8 611 }
ba9703b0 612
49aad941
FG
613 // We always expect to find a cached result for things that
614 // can be forced from `DepNode`.
615 debug_assert!(
616 !query.cache_on_disk(*qcx.dep_context(), key)
617 || !qcx.dep_context().fingerprint_style(dep_node.kind).reconstructible(),
618 "missing on-disk cache entry for {dep_node:?}"
619 );
620
353b0b11
FG
621 // Sanity check for the logic in `ensure`: if the node is green and the result loadable,
622 // we should actually be able to load it.
623 debug_assert!(
624 !query.loadable_from_disk(qcx, &key, prev_dep_node_index),
625 "missing on-disk cache entry for loadable {dep_node:?}"
626 );
627
c295e0f8
XL
628 // We could not load a result from the on-disk cache, so
629 // recompute.
487cf647 630 let prof_timer = qcx.dep_context().profiler().query_provider();
ba9703b0 631
c295e0f8 632 // The dep-graph for this computation is already in-place.
353b0b11 633 let result = qcx.dep_context().dep_graph().with_ignore(|| query.compute(qcx, *key));
ba9703b0 634
c295e0f8 635 prof_timer.finish_with_query_invocation_id(dep_node_index.into());
ba9703b0 636
c295e0f8
XL
637 // Verify that re-running the query produced a result with the expected hash
638 // This catches bugs in query implementations, turning them into ICEs.
639 // For example, a query might sort its result by `DefId` - since `DefId`s are
640 // not stable across compilation sessions, the result could get up getting sorted
641 // in a different order when the query is re-run, even though all of the inputs
642 // (e.g. `DefPathHash` values) were green.
643 //
644 // See issue #82920 for an example of a miscompilation that would get turned into
645 // an ICE by this check
353b0b11
FG
646 incremental_verify_ich(
647 *qcx.dep_context(),
648 dep_graph_data,
649 &result,
650 prev_dep_node_index,
651 query.hash_result(),
652 query.format_value(),
653 );
c295e0f8
XL
654
655 Some((result, dep_node_index))
ba9703b0
XL
656}
657
9ffffee4 658#[inline]
353b0b11
FG
659#[instrument(skip(tcx, dep_graph_data, result, hash_result, format_value), level = "debug")]
660pub(crate) fn incremental_verify_ich<Tcx, V>(
487cf647 661 tcx: Tcx,
353b0b11 662 dep_graph_data: &DepGraphData<Tcx::DepKind>,
f9f354fc 663 result: &V,
353b0b11 664 prev_index: SerializedDepNodeIndex,
487cf647 665 hash_result: Option<fn(&mut StableHashingContext<'_>, &V) -> Fingerprint>,
353b0b11
FG
666 format_value: fn(&V) -> String,
667) where
487cf647 668 Tcx: DepContext,
ba9703b0 669{
353b0b11
FG
670 if !dep_graph_data.is_index_green(prev_index) {
671 incremental_verify_ich_not_green(tcx, prev_index)
672 }
ba9703b0 673
487cf647 674 let new_hash = hash_result.map_or(Fingerprint::ZERO, |f| {
064997fb 675 tcx.with_stable_hashing_context(|mut hcx| f(&mut hcx, result))
3c0e092e 676 });
487cf647 677
353b0b11 678 let old_hash = dep_graph_data.prev_fingerprint_of(prev_index);
ba9703b0 679
353b0b11
FG
680 if new_hash != old_hash {
681 incremental_verify_ich_failed(tcx, prev_index, &|| format_value(&result));
3c0e092e 682 }
3c0e092e 683}
94222f64 684
353b0b11
FG
685#[cold]
686#[inline(never)]
687fn incremental_verify_ich_not_green<Tcx>(tcx: Tcx, prev_index: SerializedDepNodeIndex)
3c0e092e 688where
353b0b11 689 Tcx: DepContext,
3c0e092e 690{
353b0b11
FG
691 panic!(
692 "fingerprint for green query instance not loaded from cache: {:?}",
693 tcx.dep_graph().data().unwrap().prev_node_of(prev_index)
694 )
3c0e092e
XL
695}
696
353b0b11
FG
697// Note that this is marked #[cold] and intentionally takes `dyn Debug` for `result`,
698// as we want to avoid generating a bunch of different implementations for LLVM to
699// chew on (and filling up the final binary, too).
3c0e092e 700#[cold]
353b0b11
FG
701#[inline(never)]
702fn incremental_verify_ich_failed<Tcx>(
703 tcx: Tcx,
704 prev_index: SerializedDepNodeIndex,
705 result: &dyn Fn() -> String,
706) where
707 Tcx: DepContext,
708{
3c0e092e
XL
709 // When we emit an error message and panic, we try to debug-print the `DepNode`
710 // and query result. Unfortunately, this can cause us to run additional queries,
711 // which may result in another fingerprint mismatch while we're in the middle
712 // of processing this one. To avoid a double-panic (which kills the process
713 // before we can print out the query static), we print out a terse
49aad941 714 // but 'safe' message if we detect a reentrant call to this method.
3c0e092e
XL
715 thread_local! {
716 static INSIDE_VERIFY_PANIC: Cell<bool> = const { Cell::new(false) };
717 };
718
719 let old_in_panic = INSIDE_VERIFY_PANIC.with(|in_panic| in_panic.replace(true));
720
721 if old_in_panic {
353b0b11 722 tcx.sess().emit_err(crate::error::Reentrant);
3c0e092e 723 } else {
353b0b11 724 let run_cmd = if let Some(crate_name) = &tcx.sess().opts.crate_name {
9c376795 725 format!("`cargo clean -p {crate_name}` or `cargo clean`")
487cf647
FG
726 } else {
727 "`cargo clean`".to_string()
728 };
729
353b0b11
FG
730 let dep_node = tcx.dep_graph().data().unwrap().prev_node_of(prev_index);
731 tcx.sess().emit_err(crate::error::IncrementCompilation {
f2b60f7d 732 run_cmd,
9c376795 733 dep_node: format!("{dep_node:?}"),
f2b60f7d 734 });
353b0b11 735 panic!("Found unstable fingerprints for {dep_node:?}: {}", result());
f20569fa 736 }
3c0e092e
XL
737
738 INSIDE_VERIFY_PANIC.with(|in_panic| in_panic.set(old_in_panic));
ba9703b0
XL
739}
740
ba9703b0
XL
741/// Ensure that either this query has all green inputs or been executed.
742/// Executing `query::ensure(D)` is considered a read of the dep-node `D`.
6a06907d 743/// Returns true if the query should still run.
ba9703b0
XL
744///
745/// This function is particularly useful when executing passes for their
746/// side-effects -- e.g., in order to report errors for erroneous programs.
747///
748/// Note: The optimization is only available during incr. comp.
f9f354fc 749#[inline(never)]
353b0b11
FG
750fn ensure_must_run<Q, Qcx>(
751 query: Q,
752 qcx: Qcx,
753 key: &Q::Key,
754 check_cache: bool,
755) -> (bool, Option<DepNode<Qcx::DepKind>>)
6a06907d 756where
9c376795 757 Q: QueryConfig<Qcx>,
487cf647 758 Qcx: QueryContext,
ba9703b0 759{
353b0b11 760 if query.eval_always() {
c295e0f8 761 return (true, None);
ba9703b0
XL
762 }
763
764 // Ensuring an anonymous query makes no sense
353b0b11 765 assert!(!query.anon());
ba9703b0 766
353b0b11 767 let dep_node = query.construct_dep_node(*qcx.dep_context(), key);
ba9703b0 768
487cf647 769 let dep_graph = qcx.dep_context().dep_graph();
353b0b11 770 let serialized_dep_node_index = match dep_graph.try_mark_green(qcx, &dep_node) {
ba9703b0 771 None => {
c295e0f8 772 // A None return from `try_mark_green` means that this is either
ba9703b0
XL
773 // a new dep node or that the dep node has already been marked red.
774 // Either way, we can't call `dep_graph.read()` as we don't have the
775 // DepNodeIndex. We must invoke the query itself. The performance cost
776 // this introduces should be negligible as we'll immediately hit the
777 // in-memory cache, or another query down the line will.
353b0b11 778 return (true, Some(dep_node));
ba9703b0 779 }
353b0b11 780 Some((serialized_dep_node_index, dep_node_index)) => {
c295e0f8 781 dep_graph.read_index(dep_node_index);
487cf647 782 qcx.dep_context().profiler().query_cache_hit(dep_node_index.into());
353b0b11 783 serialized_dep_node_index
ba9703b0 784 }
353b0b11
FG
785 };
786
787 // We do not need the value at all, so do not check the cache.
788 if !check_cache {
789 return (false, None);
ba9703b0 790 }
353b0b11
FG
791
792 let loadable = query.loadable_from_disk(qcx, key, serialized_dep_node_index);
793 (!loadable, Some(dep_node))
ba9703b0
XL
794}
795
04454e1e 796#[derive(Debug)]
6a06907d
XL
797pub enum QueryMode {
798 Get,
353b0b11 799 Ensure { check_cache: bool },
f9f354fc
XL
800}
801
9ffffee4 802#[inline(always)]
49aad941
FG
803pub fn get_query_non_incr<Q, Qcx>(query: Q, qcx: Qcx, span: Span, key: Q::Key) -> Q::Value
804where
805 Q: QueryConfig<Qcx>,
806 Qcx: QueryContext,
807{
808 debug_assert!(!qcx.dep_context().dep_graph().is_fully_enabled());
809
810 ensure_sufficient_stack(|| try_execute_query::<Q, Qcx, false>(query, qcx, span, key, None).0)
811}
812
813#[inline(always)]
814pub fn get_query_incr<Q, Qcx>(
353b0b11
FG
815 query: Q,
816 qcx: Qcx,
817 span: Span,
818 key: Q::Key,
819 mode: QueryMode,
820) -> Option<Q::Value>
f9f354fc 821where
487cf647 822 Q: QueryConfig<Qcx>,
487cf647 823 Qcx: QueryContext,
f9f354fc 824{
49aad941
FG
825 debug_assert!(qcx.dep_context().dep_graph().is_fully_enabled());
826
353b0b11
FG
827 let dep_node = if let QueryMode::Ensure { check_cache } = mode {
828 let (must_run, dep_node) = ensure_must_run(query, qcx, &key, check_cache);
c295e0f8 829 if !must_run {
6a06907d
XL
830 return None;
831 }
c295e0f8
XL
832 dep_node
833 } else {
834 None
835 };
6a06907d 836
49aad941
FG
837 let (result, dep_node_index) = ensure_sufficient_stack(|| {
838 try_execute_query::<_, _, true>(query, qcx, span, key, dep_node)
839 });
c295e0f8 840 if let Some(dep_node_index) = dep_node_index {
487cf647 841 qcx.dep_context().dep_graph().read_index(dep_node_index)
c295e0f8
XL
842 }
843 Some(result)
f9f354fc
XL
844}
845
353b0b11
FG
846pub fn force_query<Q, Qcx>(
847 query: Q,
848 qcx: Qcx,
849 key: Q::Key,
850 dep_node: DepNode<<Qcx as HasDepContext>::DepKind>,
851) where
487cf647 852 Q: QueryConfig<Qcx>,
487cf647 853 Qcx: QueryContext,
f9f354fc 854{
3c0e092e
XL
855 // We may be concurrently trying both execute and force a query.
856 // Ensure that only one of them runs the query.
353b0b11 857 if let Some((_, index)) = query.query_cache(qcx).lookup(&key) {
9ffffee4
FG
858 qcx.dep_context().profiler().query_cache_hit(index.into());
859 return;
5e7ed085 860 }
136023e0 861
353b0b11 862 debug_assert!(!query.anon());
3c0e092e 863
49aad941
FG
864 ensure_sufficient_stack(|| {
865 try_execute_query::<_, _, true>(query, qcx, DUMMY_SP, key, Some(dep_node))
866 });
f9f354fc 867}