1 //! The implementation of the query system itself. This defines the macros that
2 //! generate the actual methods on tcx which find and execute the provider,
3 //! manage the caches, and so forth.
5 use crate::dep_graph
::{DepContext, DepNode, DepNodeIndex, DepNodeParams}
;
6 use crate::query
::caches
::QueryCache
;
7 use crate::query
::config
::{QueryDescription, QueryVtable}
;
8 use crate::query
::job
::{report_cycle, QueryInfo, QueryJob, QueryJobId, QueryJobInfo}
;
9 use crate::query
::{QueryContext, QueryMap, QuerySideEffects, QueryStackFrame}
;
10 use rustc_data_structures
::fingerprint
::Fingerprint
;
11 use rustc_data_structures
::fx
::FxHashMap
;
12 #[cfg(parallel_compiler)]
13 use rustc_data_structures
::profiling
::TimingGuard
;
14 #[cfg(parallel_compiler)]
15 use rustc_data_structures
::sharded
::Sharded
;
16 use rustc_data_structures
::sync
::Lock
;
17 use rustc_data_structures
::thin_vec
::ThinVec
;
18 use rustc_errors
::{DiagnosticBuilder, ErrorGuaranteed, FatalError}
;
19 use rustc_session
::Session
;
20 use rustc_span
::{Span, DUMMY_SP}
;
22 use std
::collections
::hash_map
::Entry
;
28 pub struct QueryState
<K
> {
29 #[cfg(parallel_compiler)]
30 active
: Sharded
<FxHashMap
<K
, QueryResult
>>,
31 #[cfg(not(parallel_compiler))]
32 active
: Lock
<FxHashMap
<K
, QueryResult
>>,
35 /// Indicates the state of a query for a given key in a query map.
37 /// An already executing query. The query job can be used to await for its completion.
40 /// The query panicked. Queries trying to wait on this will raise a fatal error which will
47 K
: Eq
+ Hash
+ Clone
+ Debug
,
49 pub fn all_inactive(&self) -> bool
{
50 #[cfg(parallel_compiler)]
52 let shards
= self.active
.lock_shards();
53 shards
.iter().all(|shard
| shard
.is_empty())
55 #[cfg(not(parallel_compiler))]
57 self.active
.lock().is_empty()
61 pub fn try_collect_active_jobs
<CTX
: Copy
>(
64 make_query
: fn(CTX
, K
) -> QueryStackFrame
,
67 #[cfg(parallel_compiler)]
69 // We use try_lock_shards here since we are called from the
70 // deadlock handler, and this shouldn't be locked.
71 let shards
= self.active
.try_lock_shards()?
;
72 for shard
in shards
.iter() {
73 for (k
, v
) in shard
.iter() {
74 if let QueryResult
::Started(ref job
) = *v
{
75 let query
= make_query(tcx
, k
.clone());
76 jobs
.insert(job
.id
, QueryJobInfo { query, job: job.clone() }
);
81 #[cfg(not(parallel_compiler))]
83 // We use try_lock here since we are called from the
84 // deadlock handler, and this shouldn't be locked.
85 // (FIXME: Is this relevant for non-parallel compilers? It doesn't
87 for (k
, v
) in self.active
.try_lock()?
.iter() {
88 if let QueryResult
::Started(ref job
) = *v
{
89 let query
= make_query(tcx
, k
.clone());
90 jobs
.insert(job
.id
, QueryJobInfo { query, job: job.clone() }
);
99 impl<K
> Default
for QueryState
<K
> {
100 fn default() -> QueryState
<K
> {
101 QueryState { active: Default::default() }
105 /// A type representing the responsibility to execute the job in the `job` field.
106 /// This will poison the relevant query if dropped.
107 struct JobOwner
<'tcx
, K
>
109 K
: Eq
+ Hash
+ Clone
,
111 state
: &'tcx QueryState
<K
>,
118 fn mk_cycle
<CTX
, V
, R
>(
121 handle_cycle_error
: fn(CTX
, DiagnosticBuilder
<'_
, ErrorGuaranteed
>) -> V
,
122 cache
: &dyn crate::query
::QueryStorage
<Value
= V
, Stored
= R
>,
129 let error
= report_cycle(tcx
.dep_context().sess(), error
);
130 let value
= handle_cycle_error(tcx
, error
);
131 cache
.store_nocache(value
)
134 impl<'tcx
, K
> JobOwner
<'tcx
, K
>
136 K
: Eq
+ Hash
+ Clone
,
138 /// Either gets a `JobOwner` corresponding the query, allowing us to
139 /// start executing the query, or returns with the result of the query.
140 /// This function assumes that `try_get_cached` is already called and returned `lookup`.
141 /// If the query is executing elsewhere, this will wait for it and return the result.
142 /// If the query panicked, this will silently panic.
144 /// This function is inlined because that results in a noticeable speed-up
145 /// for some compile-time benchmarks.
147 fn try_start
<'b
, CTX
>(
149 state
: &'b QueryState
<K
>,
152 ) -> TryGetJob
<'b
, K
>
156 #[cfg(parallel_compiler)]
157 let mut state_lock
= state
.active
.get_shard_by_value(&key
).lock();
158 #[cfg(not(parallel_compiler))]
159 let mut state_lock
= state
.active
.lock();
160 let lock
= &mut *state_lock
;
162 match lock
.entry(key
) {
163 Entry
::Vacant(entry
) => {
164 let id
= tcx
.next_job_id();
165 let job
= tcx
.current_query_job();
166 let job
= QueryJob
::new(id
, span
, job
);
168 let key
= entry
.key().clone();
169 entry
.insert(QueryResult
::Started(job
));
171 let owner
= JobOwner { state, id, key }
;
172 return TryGetJob
::NotYetStarted(owner
);
174 Entry
::Occupied(mut entry
) => {
175 match entry
.get_mut() {
176 #[cfg(not(parallel_compiler))]
177 QueryResult
::Started(job
) => {
181 // If we are single-threaded we know that we have cycle error,
182 // so we just return the error.
183 return TryGetJob
::Cycle(id
.find_cycle_in_stack(
184 tcx
.try_collect_active_jobs().unwrap(),
185 &tcx
.current_query_job(),
189 #[cfg(parallel_compiler)]
190 QueryResult
::Started(job
) => {
191 // For parallel queries, we'll block and wait until the query running
192 // in another thread has completed. Record how long we wait in the
194 let query_blocked_prof_timer
= tcx
.dep_context().profiler().query_blocked();
197 let latch
= job
.latch();
201 // With parallel queries we might just have to wait on some other
203 let result
= latch
.wait_on(tcx
.current_query_job(), span
);
206 Ok(()) => TryGetJob
::JobCompleted(query_blocked_prof_timer
),
207 Err(cycle
) => TryGetJob
::Cycle(cycle
),
210 QueryResult
::Poisoned
=> FatalError
.raise(),
216 /// Completes the query by updating the query cache with the `result`,
217 /// signals the waiter and forgets the JobOwner, so it won't poison the query
218 fn complete
<C
>(self, cache
: &C
, result
: C
::Value
, dep_node_index
: DepNodeIndex
) -> C
::Stored
220 C
: QueryCache
<Key
= K
>,
222 // We can move out of `self` here because we `mem::forget` it below
223 let key
= unsafe { ptr::read(&self.key) }
;
224 let state
= self.state
;
226 // Forget ourself so our destructor won't poison the query
229 let (job
, result
) = {
231 #[cfg(parallel_compiler)]
232 let mut lock
= state
.active
.get_shard_by_value(&key
).lock();
233 #[cfg(not(parallel_compiler))]
234 let mut lock
= state
.active
.lock();
235 match lock
.remove(&key
).unwrap() {
236 QueryResult
::Started(job
) => job
,
237 QueryResult
::Poisoned
=> panic
!(),
240 let result
= cache
.complete(key
, result
, dep_node_index
);
244 job
.signal_complete();
249 impl<'tcx
, K
> Drop
for JobOwner
<'tcx
, K
>
251 K
: Eq
+ Hash
+ Clone
,
256 // Poison the query so jobs waiting on it panic.
257 let state
= self.state
;
259 #[cfg(parallel_compiler)]
260 let mut shard
= state
.active
.get_shard_by_value(&self.key
).lock();
261 #[cfg(not(parallel_compiler))]
262 let mut shard
= state
.active
.lock();
263 let job
= match shard
.remove(&self.key
).unwrap() {
264 QueryResult
::Started(job
) => job
,
265 QueryResult
::Poisoned
=> panic
!(),
267 shard
.insert(self.key
.clone(), QueryResult
::Poisoned
);
270 // Also signal the completion of the job, so waiters
271 // will continue execution.
272 job
.signal_complete();
277 pub(crate) struct CycleError
{
278 /// The query and related span that uses the cycle.
279 pub usage
: Option
<(Span
, QueryStackFrame
)>,
280 pub cycle
: Vec
<QueryInfo
>,
283 /// The result of `try_start`.
284 enum TryGetJob
<'tcx
, K
>
286 K
: Eq
+ Hash
+ Clone
,
288 /// The query is not yet started. Contains a guard to the cache eventually used to start it.
289 NotYetStarted(JobOwner
<'tcx
, K
>),
291 /// The query was already completed.
292 /// Returns the result of the query and its dep-node index
293 /// if it succeeded or a cycle error if it failed.
294 #[cfg(parallel_compiler)]
295 JobCompleted(TimingGuard
<'tcx
>),
297 /// Trying to execute the query resulted in a cycle.
301 /// Checks if the query is already computed and in the cache.
302 /// It returns the shard index and a lock guard to the shard,
303 /// which will be used if the query is not in the cache and we need
306 pub fn try_get_cached
<'a
, CTX
, C
, R
, OnHit
>(
310 // `on_hit` can be called while holding a lock to the query cache
316 OnHit
: FnOnce(&C
::Stored
) -> R
,
318 cache
.lookup(&key
, |value
, index
| {
319 if unlikely
!(tcx
.profiler().enabled()) {
320 tcx
.profiler().query_cache_hit(index
.into());
322 tcx
.dep_graph().read_index(index
);
327 fn try_execute_query
<CTX
, C
>(
329 state
: &QueryState
<C
::Key
>,
333 dep_node
: Option
<DepNode
<CTX
::DepKind
>>,
334 query
: &QueryVtable
<CTX
, C
::Key
, C
::Value
>,
335 ) -> (C
::Stored
, Option
<DepNodeIndex
>)
338 C
::Key
: Clone
+ DepNodeParams
<CTX
::DepContext
>,
341 match JobOwner
::<'_
, C
::Key
>::try_start(&tcx
, state
, span
, key
.clone()) {
342 TryGetJob
::NotYetStarted(job
) => {
343 let (result
, dep_node_index
) = execute_job(tcx
, key
, dep_node
, query
, job
.id
);
344 let result
= job
.complete(cache
, result
, dep_node_index
);
345 (result
, Some(dep_node_index
))
347 TryGetJob
::Cycle(error
) => {
348 let result
= mk_cycle(tcx
, error
, query
.handle_cycle_error
, cache
);
351 #[cfg(parallel_compiler)]
352 TryGetJob
::JobCompleted(query_blocked_prof_timer
) => {
353 let (v
, index
) = cache
354 .lookup(&key
, |value
, index
| (value
.clone(), index
))
355 .unwrap_or_else(|_
| panic
!("value must be in cache after waiting"));
357 if unlikely
!(tcx
.dep_context().profiler().enabled()) {
358 tcx
.dep_context().profiler().query_cache_hit(index
.into());
360 query_blocked_prof_timer
.finish_with_query_invocation_id(index
.into());
367 fn execute_job
<CTX
, K
, V
>(
370 mut dep_node_opt
: Option
<DepNode
<CTX
::DepKind
>>,
371 query
: &QueryVtable
<CTX
, K
, V
>,
373 ) -> (V
, DepNodeIndex
)
375 K
: Clone
+ DepNodeParams
<CTX
::DepContext
>,
379 let dep_graph
= tcx
.dep_context().dep_graph();
381 // Fast path for when incr. comp. is off.
382 if !dep_graph
.is_fully_enabled() {
383 let prof_timer
= tcx
.dep_context().profiler().query_provider();
384 let result
= tcx
.start_query(job_id
, None
, || query
.compute(*tcx
.dep_context(), key
));
385 let dep_node_index
= dep_graph
.next_virtual_depnode_index();
386 prof_timer
.finish_with_query_invocation_id(dep_node_index
.into());
387 return (result
, dep_node_index
);
390 if !query
.anon
&& !query
.eval_always
{
391 // `to_dep_node` is expensive for some `DepKind`s.
393 dep_node_opt
.get_or_insert_with(|| query
.to_dep_node(*tcx
.dep_context(), &key
));
395 // The diagnostics for this query will be promoted to the current session during
396 // `try_mark_green()`, so we can ignore them here.
397 if let Some(ret
) = tcx
.start_query(job_id
, None
, || {
398 try_load_from_disk_and_cache_in_memory(tcx
, &key
, &dep_node
, query
)
404 let prof_timer
= tcx
.dep_context().profiler().query_provider();
405 let diagnostics
= Lock
::new(ThinVec
::new());
407 let (result
, dep_node_index
) = tcx
.start_query(job_id
, Some(&diagnostics
), || {
409 return dep_graph
.with_anon_task(*tcx
.dep_context(), query
.dep_kind
, || {
410 query
.compute(*tcx
.dep_context(), key
)
414 // `to_dep_node` is expensive for some `DepKind`s.
415 let dep_node
= dep_node_opt
.unwrap_or_else(|| query
.to_dep_node(*tcx
.dep_context(), &key
));
417 dep_graph
.with_task(dep_node
, *tcx
.dep_context(), key
, query
.compute
, query
.hash_result
)
420 prof_timer
.finish_with_query_invocation_id(dep_node_index
.into());
422 let diagnostics
= diagnostics
.into_inner();
423 let side_effects
= QuerySideEffects { diagnostics }
;
425 if unlikely
!(!side_effects
.is_empty()) {
427 tcx
.store_side_effects_for_anon_node(dep_node_index
, side_effects
);
429 tcx
.store_side_effects(dep_node_index
, side_effects
);
433 (result
, dep_node_index
)
436 fn try_load_from_disk_and_cache_in_memory
<CTX
, K
, V
>(
439 dep_node
: &DepNode
<CTX
::DepKind
>,
440 query
: &QueryVtable
<CTX
, K
, V
>,
441 ) -> Option
<(V
, DepNodeIndex
)>
447 // Note this function can be called concurrently from the same query
448 // We must ensure that this is handled correctly.
450 let dep_graph
= tcx
.dep_context().dep_graph();
451 let (prev_dep_node_index
, dep_node_index
) = dep_graph
.try_mark_green(tcx
, &dep_node
)?
;
453 debug_assert
!(dep_graph
.is_green(dep_node
));
455 // First we try to load the result from the on-disk cache.
456 // Some things are never cached on disk.
457 if query
.cache_on_disk
{
458 let prof_timer
= tcx
.dep_context().profiler().incr_cache_loading();
460 // The call to `with_query_deserialization` enforces that no new `DepNodes`
461 // are created during deserialization. See the docs of that method for more
463 let result
= dep_graph
464 .with_query_deserialization(|| query
.try_load_from_disk(tcx
, prev_dep_node_index
));
466 prof_timer
.finish_with_query_invocation_id(dep_node_index
.into());
468 if let Some(result
) = result
{
469 if unlikely
!(tcx
.dep_context().sess().opts
.debugging_opts
.query_dep_graph
) {
470 dep_graph
.mark_debug_loaded_from_disk(*dep_node
)
473 let prev_fingerprint
= tcx
476 .prev_fingerprint_of(dep_node
)
477 .unwrap_or(Fingerprint
::ZERO
);
478 // If `-Zincremental-verify-ich` is specified, re-hash results from
479 // the cache and make sure that they have the expected fingerprint.
481 // If not, we still seek to verify a subset of fingerprints loaded
482 // from disk. Re-hashing results is fairly expensive, so we can't
483 // currently afford to verify every hash. This subset should still
484 // give us some coverage of potential bugs though.
485 let try_verify
= prev_fingerprint
.as_value().1 % 32 == 0;
487 try_verify
|| tcx
.dep_context().sess().opts
.debugging_opts
.incremental_verify_ich
489 incremental_verify_ich(*tcx
.dep_context(), &result
, dep_node
, query
);
492 return Some((result
, dep_node_index
));
495 // We always expect to find a cached result for things that
496 // can be forced from `DepNode`.
498 !tcx
.dep_context().fingerprint_style(dep_node
.kind
).reconstructible(),
499 "missing on-disk cache entry for {:?}",
504 // We could not load a result from the on-disk cache, so
506 let prof_timer
= tcx
.dep_context().profiler().query_provider();
508 // The dep-graph for this computation is already in-place.
509 let result
= dep_graph
.with_ignore(|| query
.compute(*tcx
.dep_context(), key
.clone()));
511 prof_timer
.finish_with_query_invocation_id(dep_node_index
.into());
513 // Verify that re-running the query produced a result with the expected hash
514 // This catches bugs in query implementations, turning them into ICEs.
515 // For example, a query might sort its result by `DefId` - since `DefId`s are
516 // not stable across compilation sessions, the result could get up getting sorted
517 // in a different order when the query is re-run, even though all of the inputs
518 // (e.g. `DefPathHash` values) were green.
520 // See issue #82920 for an example of a miscompilation that would get turned into
521 // an ICE by this check
522 incremental_verify_ich(*tcx
.dep_context(), &result
, dep_node
, query
);
524 Some((result
, dep_node_index
))
527 fn incremental_verify_ich
<CTX
, K
, V
: Debug
>(
528 tcx
: CTX
::DepContext
,
530 dep_node
: &DepNode
<CTX
::DepKind
>,
531 query
: &QueryVtable
<CTX
, K
, V
>,
536 tcx
.dep_graph().is_green(dep_node
),
537 "fingerprint for green query instance not loaded from cache: {:?}",
541 debug
!("BEGIN verify_ich({:?})", dep_node
);
542 let new_hash
= query
.hash_result
.map_or(Fingerprint
::ZERO
, |f
| {
543 let mut hcx
= tcx
.create_stable_hashing_context();
546 let old_hash
= tcx
.dep_graph().prev_fingerprint_of(dep_node
);
547 debug
!("END verify_ich({:?})", dep_node
);
549 if Some(new_hash
) != old_hash
{
550 incremental_verify_ich_cold(tcx
.sess(), DebugArg
::from(&dep_node
), DebugArg
::from(&result
));
554 // This DebugArg business is largely a mirror of std::fmt::ArgumentV1, which is
555 // currently not exposed publicly.
557 // The PR which added this attempted to use `&dyn Debug` instead, but that
558 // showed statistically significant worse compiler performance. It's not
559 // actually clear what the cause there was -- the code should be cold. If this
560 // can be replaced with `&dyn Debug` with on perf impact, then it probably
566 struct DebugArg
<'a
> {
568 fmt
: fn(&Opaque
, &mut std
::fmt
::Formatter
<'_
>) -> std
::fmt
::Result
,
571 impl<'a
, T
> From
<&'a T
> for DebugArg
<'a
>
575 fn from(value
: &'a T
) -> DebugArg
<'a
> {
577 value
: unsafe { std::mem::transmute(value) }
,
579 std
::mem
::transmute(<T
as std
::fmt
::Debug
>::fmt
as fn(_
, _
) -> std
::fmt
::Result
)
585 impl std
::fmt
::Debug
for DebugArg
<'_
> {
586 fn fmt(&self, f
: &mut std
::fmt
::Formatter
<'_
>) -> std
::fmt
::Result
{
587 (self.fmt
)(self.value
, f
)
591 // Note that this is marked #[cold] and intentionally takes the equivalent of
592 // `dyn Debug` for its arguments, as we want to avoid generating a bunch of
593 // different implementations for LLVM to chew on (and filling up the final
596 fn incremental_verify_ich_cold(sess
: &Session
, dep_node
: DebugArg
<'_
>, result
: DebugArg
<'_
>) {
597 let run_cmd
= if let Some(crate_name
) = &sess
.opts
.crate_name
{
598 format
!("`cargo clean -p {}` or `cargo clean`", crate_name
)
600 "`cargo clean`".to_string()
603 // When we emit an error message and panic, we try to debug-print the `DepNode`
604 // and query result. Unfortunately, this can cause us to run additional queries,
605 // which may result in another fingerprint mismatch while we're in the middle
606 // of processing this one. To avoid a double-panic (which kills the process
607 // before we can print out the query static), we print out a terse
608 // but 'safe' message if we detect a re-entrant call to this method.
610 static INSIDE_VERIFY_PANIC
: Cell
<bool
> = const { Cell::new(false) }
;
613 let old_in_panic
= INSIDE_VERIFY_PANIC
.with(|in_panic
| in_panic
.replace(true));
617 "internal compiler error: re-entrant incremental verify failure, suppressing message",
621 sess
.struct_err(&format
!("internal compiler error: encountered incremental compilation error with {:?}", dep_node
))
622 .help(&format
!("This is a known issue with the compiler. Run {} to allow your project to compile", run_cmd
))
623 .note(&"Please follow the instructions below to create a bug report with the provided information")
624 .note(&"See <https://github.com/rust-lang/rust/issues/84970> for more information")
626 panic
!("Found unstable fingerprints for {:?}: {:?}", dep_node
, result
);
629 INSIDE_VERIFY_PANIC
.with(|in_panic
| in_panic
.set(old_in_panic
));
632 /// Ensure that either this query has all green inputs or been executed.
633 /// Executing `query::ensure(D)` is considered a read of the dep-node `D`.
634 /// Returns true if the query should still run.
636 /// This function is particularly useful when executing passes for their
637 /// side-effects -- e.g., in order to report errors for erroneous programs.
639 /// Note: The optimization is only available during incr. comp.
641 fn ensure_must_run
<CTX
, K
, V
>(
644 query
: &QueryVtable
<CTX
, K
, V
>,
645 ) -> (bool
, Option
<DepNode
<CTX
::DepKind
>>)
647 K
: crate::dep_graph
::DepNodeParams
<CTX
::DepContext
>,
650 if query
.eval_always
{
654 // Ensuring an anonymous query makes no sense
655 assert
!(!query
.anon
);
657 let dep_node
= query
.to_dep_node(*tcx
.dep_context(), key
);
659 let dep_graph
= tcx
.dep_context().dep_graph();
660 match dep_graph
.try_mark_green(tcx
, &dep_node
) {
662 // A None return from `try_mark_green` means that this is either
663 // a new dep node or that the dep node has already been marked red.
664 // Either way, we can't call `dep_graph.read()` as we don't have the
665 // DepNodeIndex. We must invoke the query itself. The performance cost
666 // this introduces should be negligible as we'll immediately hit the
667 // in-memory cache, or another query down the line will.
668 (true, Some(dep_node
))
670 Some((_
, dep_node_index
)) => {
671 dep_graph
.read_index(dep_node_index
);
672 tcx
.dep_context().profiler().query_cache_hit(dep_node_index
.into());
683 pub fn get_query
<Q
, CTX
>(tcx
: CTX
, span
: Span
, key
: Q
::Key
, mode
: QueryMode
) -> Option
<Q
::Stored
>
685 Q
: QueryDescription
<CTX
>,
686 Q
::Key
: DepNodeParams
<CTX
::DepContext
>,
689 let query
= Q
::make_vtable(tcx
, &key
);
690 let dep_node
= if let QueryMode
::Ensure
= mode
{
691 let (must_run
, dep_node
) = ensure_must_run(tcx
, &key
, &query
);
700 debug
!("ty::query::get_query<{}>(key={:?}, span={:?})", Q
::NAME
, key
, span
);
701 let (result
, dep_node_index
) = try_execute_query(
710 if let Some(dep_node_index
) = dep_node_index
{
711 tcx
.dep_context().dep_graph().read_index(dep_node_index
)
716 pub fn force_query
<Q
, CTX
>(tcx
: CTX
, key
: Q
::Key
, dep_node
: DepNode
<CTX
::DepKind
>)
718 Q
: QueryDescription
<CTX
>,
719 Q
::Key
: DepNodeParams
<CTX
::DepContext
>,
722 // We may be concurrently trying both execute and force a query.
723 // Ensure that only one of them runs the query.
724 let cache
= Q
::query_cache(tcx
);
725 let cached
= cache
.lookup(&key
, |_
, index
| {
726 if unlikely
!(tcx
.dep_context().profiler().enabled()) {
727 tcx
.dep_context().profiler().query_cache_hit(index
.into());
736 let query
= Q
::make_vtable(tcx
, &key
);
737 let state
= Q
::query_state(tcx
);
738 debug_assert
!(!query
.anon
);
740 try_execute_query(tcx
, state
, cache
, DUMMY_SP
, key
, Some(dep_node
), &query
);