1 //! The implementation of the query system itself. This defines the macros that
2 //! generate the actual methods on tcx which find and execute the provider,
3 //! manage the caches, and so forth.
5 use crate::dep_graph
::{DepNode, DepNodeIndex, SerializedDepNodeIndex}
;
6 use crate::ty
::query
::caches
::QueryCache
;
7 use crate::ty
::query
::config
::{QueryAccessors, QueryDescription}
;
8 use crate::ty
::query
::job
::{QueryInfo, QueryJob, QueryJobId, QueryShardJobId}
;
9 use crate::ty
::query
::Query
;
11 use crate::ty
::{self, TyCtxt}
;
13 #[cfg(not(parallel_compiler))]
14 use rustc_data_structures
::cold_path
;
15 use rustc_data_structures
::fx
::{FxHashMap, FxHasher}
;
16 use rustc_data_structures
::sharded
::Sharded
;
17 use rustc_data_structures
::sync
::{Lock, LockGuard}
;
18 use rustc_data_structures
::thin_vec
::ThinVec
;
19 use rustc_errors
::{struct_span_err, Diagnostic, DiagnosticBuilder, FatalError, Handler, Level}
;
20 use rustc_span
::source_map
::DUMMY_SP
;
22 use std
::collections
::hash_map
::Entry
;
23 use std
::hash
::{Hash, Hasher}
;
25 use std
::num
::NonZeroU32
;
27 #[cfg(debug_assertions)]
28 use std
::sync
::atomic
::{AtomicUsize, Ordering}
;
30 pub(crate) struct QueryStateShard
<'tcx
, D
: QueryAccessors
<'tcx
> + ?Sized
> {
31 pub(super) cache
: <<D
as QueryAccessors
<'tcx
>>::Cache
as QueryCache
<D
::Key
, D
::Value
>>::Sharded
,
32 pub(super) active
: FxHashMap
<D
::Key
, QueryResult
<'tcx
>>,
34 /// Used to generate unique ids for active jobs.
38 impl<'tcx
, Q
: QueryAccessors
<'tcx
>> QueryStateShard
<'tcx
, Q
> {
41 ) -> &mut <<Q
as QueryAccessors
<'tcx
>>::Cache
as QueryCache
<Q
::Key
, Q
::Value
>>::Sharded
{
46 impl<'tcx
, Q
: QueryAccessors
<'tcx
>> Default
for QueryStateShard
<'tcx
, Q
> {
47 fn default() -> QueryStateShard
<'tcx
, Q
> {
48 QueryStateShard { cache: Default::default(), active: Default::default(), jobs: 0 }
52 pub(crate) struct QueryState
<'tcx
, D
: QueryAccessors
<'tcx
> + ?Sized
> {
53 pub(super) cache
: D
::Cache
,
54 pub(super) shards
: Sharded
<QueryStateShard
<'tcx
, D
>>,
55 #[cfg(debug_assertions)]
56 pub(super) cache_hits
: AtomicUsize
,
59 impl<'tcx
, Q
: QueryAccessors
<'tcx
>> QueryState
<'tcx
, Q
> {
60 pub(super) fn get_lookup
<K
: Hash
>(&'tcx
self, key
: &K
) -> QueryLookup
<'tcx
, Q
> {
61 // We compute the key's hash once and then use it for both the
62 // shard lookup and the hashmap lookup. This relies on the fact
63 // that both of them use `FxHasher`.
64 let mut hasher
= FxHasher
::default();
65 key
.hash(&mut hasher
);
66 let key_hash
= hasher
.finish();
68 let shard
= self.shards
.get_shard_index_by_hash(key_hash
);
69 let lock
= self.shards
.get_shard_by_index(shard
).lock();
70 QueryLookup { key_hash, shard, lock }
74 /// Indicates the state of a query for a given key in a query map.
75 pub(super) enum QueryResult
<'tcx
> {
76 /// An already executing query. The query job can be used to await for its completion.
77 Started(QueryJob
<'tcx
>),
79 /// The query panicked. Queries trying to wait on this will raise a fatal error which will
84 impl<'tcx
, M
: QueryAccessors
<'tcx
>> QueryState
<'tcx
, M
> {
85 pub fn iter_results
<R
>(
87 f
: impl for<'a
> FnOnce(
88 Box
<dyn Iterator
<Item
= (&'a M
::Key
, &'a M
::Value
, DepNodeIndex
)> + 'a
>,
91 self.cache
.iter(&self.shards
, |shard
| &mut shard
.cache
, f
)
93 pub fn all_inactive(&self) -> bool
{
94 let shards
= self.shards
.lock_shards();
95 shards
.iter().all(|shard
| shard
.active
.is_empty())
99 impl<'tcx
, M
: QueryAccessors
<'tcx
>> Default
for QueryState
<'tcx
, M
> {
100 fn default() -> QueryState
<'tcx
, M
> {
102 cache
: M
::Cache
::default(),
103 shards
: Default
::default(),
104 #[cfg(debug_assertions)]
105 cache_hits
: AtomicUsize
::new(0),
110 /// Values used when checking a query cache which can be reused on a cache-miss to execute the query.
111 pub(crate) struct QueryLookup
<'tcx
, Q
: QueryAccessors
<'tcx
>> {
112 pub(super) key_hash
: u64,
113 pub(super) shard
: usize,
114 pub(super) lock
: LockGuard
<'tcx
, QueryStateShard
<'tcx
, Q
>>,
117 /// A type representing the responsibility to execute the job in the `job` field.
118 /// This will poison the relevant query if dropped.
119 pub(super) struct JobOwner
<'tcx
, Q
: QueryDescription
<'tcx
>> {
125 impl<'tcx
, Q
: QueryDescription
<'tcx
>> JobOwner
<'tcx
, Q
> {
126 /// Either gets a `JobOwner` corresponding the query, allowing us to
127 /// start executing the query, or returns with the result of the query.
128 /// This function assumes that `try_get_cached` is already called and returned `lookup`.
129 /// If the query is executing elsewhere, this will wait for it and return the result.
130 /// If the query panicked, this will silently panic.
132 /// This function is inlined because that results in a noticeable speed-up
133 /// for some compile-time benchmarks.
135 pub(super) fn try_start(
139 mut lookup
: QueryLookup
<'tcx
, Q
>,
140 ) -> TryGetJob
<'tcx
, Q
> {
141 let lock
= &mut *lookup
.lock
;
143 let (latch
, mut _query_blocked_prof_timer
) = match lock
.active
.entry((*key
).clone()) {
144 Entry
::Occupied(mut entry
) => {
145 match entry
.get_mut() {
146 QueryResult
::Started(job
) => {
147 // For parallel queries, we'll block and wait until the query running
148 // in another thread has completed. Record how long we wait in the
150 let _query_blocked_prof_timer
= if cfg
!(parallel_compiler
) {
151 Some(tcx
.prof
.query_blocked())
156 // Create the id of the job we're waiting for
157 let id
= QueryJobId
::new(job
.id
, lookup
.shard
, Q
::dep_kind());
159 (job
.latch(id
), _query_blocked_prof_timer
)
161 QueryResult
::Poisoned
=> FatalError
.raise(),
164 Entry
::Vacant(entry
) => {
165 // No job entry for this query. Return a new one to be started later.
167 // Generate an id unique within this shard.
168 let id
= lock
.jobs
.checked_add(1).unwrap();
170 let id
= QueryShardJobId(NonZeroU32
::new(id
).unwrap());
172 let global_id
= QueryJobId
::new(id
, lookup
.shard
, Q
::dep_kind());
174 let job
= tls
::with_related_context(tcx
, |icx
| QueryJob
::new(id
, span
, icx
.query
));
176 entry
.insert(QueryResult
::Started(job
));
178 let owner
= JobOwner { tcx, id: global_id, key: (*key).clone() }
;
179 return TryGetJob
::NotYetStarted(owner
);
182 mem
::drop(lookup
.lock
);
184 // If we are single-threaded we know that we have cycle error,
185 // so we just return the error.
186 #[cfg(not(parallel_compiler))]
187 return TryGetJob
::Cycle(cold_path(|| {
188 Q
::handle_cycle_error(tcx
, latch
.find_cycle_in_stack(tcx
, span
))
191 // With parallel queries we might just have to wait on some other
193 #[cfg(parallel_compiler)]
195 let result
= latch
.wait_on(tcx
, span
);
197 if let Err(cycle
) = result
{
198 return TryGetJob
::Cycle(Q
::handle_cycle_error(tcx
, cycle
));
201 let cached
= tcx
.try_get_cached
::<Q
, _
, _
, _
>(
203 |value
, index
| (value
.clone(), index
),
204 |_
, _
| panic
!("value must be in cache after waiting"),
207 if let Some(prof_timer
) = _query_blocked_prof_timer
.take() {
208 prof_timer
.finish_with_query_invocation_id(cached
.1.into
());
211 return TryGetJob
::JobCompleted(cached
);
215 /// Completes the query by updating the query cache with the `result`,
216 /// signals the waiter and forgets the JobOwner, so it won't poison the query
218 pub(super) fn complete(self, result
: &Q
::Value
, dep_node_index
: DepNodeIndex
) {
219 // We can move out of `self` here because we `mem::forget` it below
220 let key
= unsafe { ptr::read(&self.key) }
;
223 // Forget ourself so our destructor won't poison the query
227 let state
= Q
::query_state(tcx
);
228 let result
= result
.clone();
229 let mut lock
= state
.shards
.get_shard_by_value(&key
).lock();
230 let job
= match lock
.active
.remove(&key
).unwrap() {
231 QueryResult
::Started(job
) => job
,
232 QueryResult
::Poisoned
=> panic
!(),
234 state
.cache
.complete(tcx
, &mut lock
.cache
, key
, result
, dep_node_index
);
238 job
.signal_complete();
243 fn with_diagnostics
<F
, R
>(f
: F
) -> (R
, ThinVec
<Diagnostic
>)
245 F
: FnOnce(Option
<&Lock
<ThinVec
<Diagnostic
>>>) -> R
,
247 let diagnostics
= Lock
::new(ThinVec
::new());
248 let result
= f(Some(&diagnostics
));
249 (result
, diagnostics
.into_inner())
252 impl<'tcx
, Q
: QueryDescription
<'tcx
>> Drop
for JobOwner
<'tcx
, Q
> {
256 // Poison the query so jobs waiting on it panic.
257 let state
= Q
::query_state(self.tcx
);
258 let shard
= state
.shards
.get_shard_by_value(&self.key
);
260 let mut shard
= shard
.lock();
261 let job
= match shard
.active
.remove(&self.key
).unwrap() {
262 QueryResult
::Started(job
) => job
,
263 QueryResult
::Poisoned
=> panic
!(),
265 shard
.active
.insert(self.key
.clone(), QueryResult
::Poisoned
);
268 // Also signal the completion of the job, so waiters
269 // will continue execution.
270 job
.signal_complete();
275 pub struct CycleError
<'tcx
> {
276 /// The query and related span that uses the cycle.
277 pub(super) usage
: Option
<(Span
, Query
<'tcx
>)>,
278 pub(super) cycle
: Vec
<QueryInfo
<'tcx
>>,
281 /// The result of `try_start`.
282 pub(super) enum TryGetJob
<'tcx
, D
: QueryDescription
<'tcx
>> {
283 /// The query is not yet started. Contains a guard to the cache eventually used to start it.
284 NotYetStarted(JobOwner
<'tcx
, D
>),
286 /// The query was already completed.
287 /// Returns the result of the query and its dep-node index
288 /// if it succeeded or a cycle error if it failed.
289 #[cfg(parallel_compiler)]
290 JobCompleted((D
::Value
, DepNodeIndex
)),
292 /// Trying to execute the query resulted in a cycle.
296 impl<'tcx
> TyCtxt
<'tcx
> {
297 /// Executes a job by changing the `ImplicitCtxt` to point to the
298 /// new query job while it executes. It returns the diagnostics
299 /// captured during execution and the actual result.
301 pub(super) fn start_query
<F
, R
>(
304 diagnostics
: Option
<&Lock
<ThinVec
<Diagnostic
>>>,
308 F
: FnOnce(TyCtxt
<'tcx
>) -> R
,
310 // The `TyCtxt` stored in TLS has the same global interner lifetime
311 // as `self`, so we use `with_related_context` to relate the 'tcx lifetimes
312 // when accessing the `ImplicitCtxt`.
313 tls
::with_related_context(self, move |current_icx
| {
314 // Update the `ImplicitCtxt` to point to our new query job.
315 let new_icx
= tls
::ImplicitCtxt
{
319 layout_depth
: current_icx
.layout_depth
,
320 task_deps
: current_icx
.task_deps
,
323 // Use the `ImplicitCtxt` while we execute the query.
324 tls
::enter_context(&new_icx
, |_
| compute(self))
330 pub(super) fn report_cycle(
332 CycleError { usage, cycle: stack }
: CycleError
<'tcx
>,
333 ) -> DiagnosticBuilder
<'tcx
> {
334 assert
!(!stack
.is_empty());
336 let fix_span
= |span
: Span
, query
: &Query
<'tcx
>| {
337 self.sess
.source_map().def_span(query
.default_span(self, span
))
340 // Disable naming impls with types in this path, since that
341 // sometimes cycles itself, leading to extra cycle errors.
342 // (And cycle errors around impls tend to occur during the
343 // collect/coherence phases anyhow.)
344 ty
::print
::with_forced_impl_filename_line(|| {
345 let span
= fix_span(stack
[1 % stack
.len()].span
, &stack
[0].query
);
346 let mut err
= struct_span_err
!(
350 "cycle detected when {}",
351 stack
[0].query
.describe(self)
354 for i
in 1..stack
.len() {
355 let query
= &stack
[i
].query
;
356 let span
= fix_span(stack
[(i
+ 1) % stack
.len()].span
, query
);
357 err
.span_note(span
, &format
!("...which requires {}...", query
.describe(self)));
361 "...which again requires {}, completing the cycle",
362 stack
[0].query
.describe(self)
365 if let Some((span
, query
)) = usage
{
367 fix_span(span
, &query
),
368 &format
!("cycle used when {}", query
.describe(self)),
376 pub fn try_print_query_stack(handler
: &Handler
) {
377 eprintln
!("query stack during panic:");
379 // Be careful reyling on global state here: this code is called from
380 // a panic hook, which means that the global `Handler` may be in a weird
381 // state if it was responsible for triggering the panic.
382 tls
::with_context_opt(|icx
| {
383 if let Some(icx
) = icx
{
384 let query_map
= icx
.tcx
.queries
.try_collect_active_jobs();
386 let mut current_query
= icx
.query
;
389 while let Some(query
) = current_query
{
391 if let Some(info
) = query_map
.as_ref().and_then(|map
| map
.get(&query
)) {
396 let mut diag
= Diagnostic
::new(
401 query_info
.info
.query
.name(),
402 query_info
.info
.query
.describe(icx
.tcx
)
405 diag
.span
= icx
.tcx
.sess
.source_map().def_span(query_info
.info
.span
).into();
406 handler
.force_print_diagnostic(diag
);
408 current_query
= query_info
.job
.parent
;
414 eprintln
!("end of query stack");
417 /// Checks if the query is already computed and in the cache.
418 /// It returns the shard index and a lock guard to the shard,
419 /// which will be used if the query is not in the cache and we need
422 fn try_get_cached
<Q
, R
, OnHit
, OnMiss
>(
425 // `on_hit` can be called while holding a lock to the query cache
430 Q
: QueryDescription
<'tcx
> + 'tcx
,
431 OnHit
: FnOnce(&Q
::Value
, DepNodeIndex
) -> R
,
432 OnMiss
: FnOnce(Q
::Key
, QueryLookup
<'tcx
, Q
>) -> R
,
434 let state
= Q
::query_state(self);
438 QueryStateShard
::<Q
>::get_cache
,
441 if unlikely
!(self.prof
.enabled()) {
442 self.prof
.query_cache_hit(index
.into());
444 #[cfg(debug_assertions)]
446 state
.cache_hits
.fetch_add(1, Ordering
::Relaxed
);
455 pub(super) fn get_query
<Q
: QueryDescription
<'tcx
> + 'tcx
>(
460 debug
!("ty::query::get_query<{}>(key={:?}, span={:?})", Q
::NAME
, key
, span
);
462 self.try_get_cached
::<Q
, _
, _
, _
>(
465 self.dep_graph
.read_index(index
);
468 |key
, lookup
| self.try_execute_query
::<Q
>(span
, key
, lookup
),
473 pub(super) fn try_execute_query
<Q
: QueryDescription
<'tcx
>>(
477 lookup
: QueryLookup
<'tcx
, Q
>,
479 let job
= match JobOwner
::try_start(self, span
, &key
, lookup
) {
480 TryGetJob
::NotYetStarted(job
) => job
,
481 TryGetJob
::Cycle(result
) => return result
,
482 #[cfg(parallel_compiler)]
483 TryGetJob
::JobCompleted((v
, index
)) => {
484 self.dep_graph
.read_index(index
);
489 // Fast path for when incr. comp. is off. `to_dep_node` is
490 // expensive for some `DepKind`s.
491 if !self.dep_graph
.is_fully_enabled() {
492 let null_dep_node
= DepNode
::new_no_params(crate::dep_graph
::DepKind
::Null
);
493 return self.force_query_with_job
::<Q
>(key
, job
, null_dep_node
).0;
497 let prof_timer
= self.prof
.query_provider();
499 let ((result
, dep_node_index
), diagnostics
) = with_diagnostics(|diagnostics
| {
500 self.start_query(job
.id
, diagnostics
, |tcx
| {
501 tcx
.dep_graph
.with_anon_task(Q
::dep_kind(), || Q
::compute(tcx
, key
))
505 prof_timer
.finish_with_query_invocation_id(dep_node_index
.into());
507 self.dep_graph
.read_index(dep_node_index
);
509 if unlikely
!(!diagnostics
.is_empty()) {
512 .store_diagnostics_for_anon_node(dep_node_index
, diagnostics
);
515 job
.complete(&result
, dep_node_index
);
520 let dep_node
= Q
::to_dep_node(self, &key
);
523 // The diagnostics for this query will be
524 // promoted to the current session during
525 // `try_mark_green()`, so we can ignore them here.
526 let loaded
= self.start_query(job
.id
, None
, |tcx
| {
527 let marked
= tcx
.dep_graph
.try_mark_green_and_read(tcx
, &dep_node
);
528 marked
.map(|(prev_dep_node_index
, dep_node_index
)| {
530 tcx
.load_from_disk_and_cache_in_memory
::<Q
>(
540 if let Some((result
, dep_node_index
)) = loaded
{
541 job
.complete(&result
, dep_node_index
);
546 let (result
, dep_node_index
) = self.force_query_with_job
::<Q
>(key
, job
, dep_node
);
547 self.dep_graph
.read_index(dep_node_index
);
551 fn load_from_disk_and_cache_in_memory
<Q
: QueryDescription
<'tcx
>>(
554 prev_dep_node_index
: SerializedDepNodeIndex
,
555 dep_node_index
: DepNodeIndex
,
558 // Note this function can be called concurrently from the same query
559 // We must ensure that this is handled correctly.
561 debug_assert
!(self.dep_graph
.is_green(dep_node
));
563 // First we try to load the result from the on-disk cache.
564 let result
= if Q
::cache_on_disk(self, key
.clone(), None
)
565 && self.sess
.opts
.debugging_opts
.incremental_queries
567 let prof_timer
= self.prof
.incr_cache_loading();
568 let result
= Q
::try_load_from_disk(self, prev_dep_node_index
);
569 prof_timer
.finish_with_query_invocation_id(dep_node_index
.into());
571 // We always expect to find a cached result for things that
572 // can be forced from `DepNode`.
574 !dep_node
.kind
.can_reconstruct_query_key() || result
.is_some(),
575 "missing on-disk cache entry for {:?}",
580 // Some things are never cached on disk.
584 let result
= if let Some(result
) = result
{
587 // We could not load a result from the on-disk cache, so
589 let prof_timer
= self.prof
.query_provider();
591 // The dep-graph for this computation is already in-place.
592 let result
= self.dep_graph
.with_ignore(|| Q
::compute(self, key
));
594 prof_timer
.finish_with_query_invocation_id(dep_node_index
.into());
599 // If `-Zincremental-verify-ich` is specified, re-hash results from
600 // the cache and make sure that they have the expected fingerprint.
601 if unlikely
!(self.sess
.opts
.debugging_opts
.incremental_verify_ich
) {
602 self.incremental_verify_ich
::<Q
>(&result
, dep_node
, dep_node_index
);
610 fn incremental_verify_ich
<Q
: QueryDescription
<'tcx
>>(
614 dep_node_index
: DepNodeIndex
,
616 use crate::ich
::Fingerprint
;
619 Some(self.dep_graph
.fingerprint_of(dep_node_index
))
620 == self.dep_graph
.prev_fingerprint_of(dep_node
),
621 "fingerprint for green query instance not loaded from cache: {:?}",
625 debug
!("BEGIN verify_ich({:?})", dep_node
);
626 let mut hcx
= self.create_stable_hashing_context();
628 let new_hash
= Q
::hash_result(&mut hcx
, result
).unwrap_or(Fingerprint
::ZERO
);
629 debug
!("END verify_ich({:?})", dep_node
);
631 let old_hash
= self.dep_graph
.fingerprint_of(dep_node_index
);
633 assert
!(new_hash
== old_hash
, "found unstable fingerprints for {:?}", dep_node
,);
637 fn force_query_with_job
<Q
: QueryDescription
<'tcx
>>(
640 job
: JobOwner
<'tcx
, Q
>,
642 ) -> (Q
::Value
, DepNodeIndex
) {
643 // If the following assertion triggers, it can have two reasons:
644 // 1. Something is wrong with DepNode creation, either here or
645 // in `DepGraph::try_mark_green()`.
646 // 2. Two distinct query keys get mapped to the same `DepNode`
647 // (see for example #48923).
649 !self.dep_graph
.dep_node_exists(&dep_node
),
650 "forcing query with already existing `DepNode`\n\
657 let prof_timer
= self.prof
.query_provider();
659 let ((result
, dep_node_index
), diagnostics
) = with_diagnostics(|diagnostics
| {
660 self.start_query(job
.id
, diagnostics
, |tcx
| {
662 tcx
.dep_graph
.with_eval_always_task(
670 tcx
.dep_graph
.with_task(dep_node
, tcx
, key
, Q
::compute
, Q
::hash_result
)
675 prof_timer
.finish_with_query_invocation_id(dep_node_index
.into());
677 if unlikely
!(!diagnostics
.is_empty()) {
678 if dep_node
.kind
!= crate::dep_graph
::DepKind
::Null
{
679 self.queries
.on_disk_cache
.store_diagnostics(dep_node_index
, diagnostics
);
683 job
.complete(&result
, dep_node_index
);
685 (result
, dep_node_index
)
688 /// Ensure that either this query has all green inputs or been executed.
689 /// Executing `query::ensure(D)` is considered a read of the dep-node `D`.
691 /// This function is particularly useful when executing passes for their
692 /// side-effects -- e.g., in order to report errors for erroneous programs.
694 /// Note: The optimization is only available during incr. comp.
695 pub(super) fn ensure_query
<Q
: QueryDescription
<'tcx
> + 'tcx
>(self, key
: Q
::Key
) -> () {
697 let _
= self.get_query
::<Q
>(DUMMY_SP
, key
);
701 // Ensuring an anonymous query makes no sense
704 let dep_node
= Q
::to_dep_node(self, &key
);
706 match self.dep_graph
.try_mark_green_and_read(self, &dep_node
) {
708 // A None return from `try_mark_green_and_read` means that this is either
709 // a new dep node or that the dep node has already been marked red.
710 // Either way, we can't call `dep_graph.read()` as we don't have the
711 // DepNodeIndex. We must invoke the query itself. The performance cost
712 // this introduces should be negligible as we'll immediately hit the
713 // in-memory cache, or another query down the line will.
714 let _
= self.get_query
::<Q
>(DUMMY_SP
, key
);
716 Some((_
, dep_node_index
)) => {
717 self.prof
.query_cache_hit(dep_node_index
.into());
723 pub(super) fn force_query
<Q
: QueryDescription
<'tcx
> + 'tcx
>(
729 // We may be concurrently trying both execute and force a query.
730 // Ensure that only one of them runs the query.
732 self.try_get_cached
::<Q
, _
, _
, _
>(
735 // Cache hit, do nothing
738 let job
= match JobOwner
::try_start(self, span
, &key
, lookup
) {
739 TryGetJob
::NotYetStarted(job
) => job
,
740 TryGetJob
::Cycle(_
) => return,
741 #[cfg(parallel_compiler)]
742 TryGetJob
::JobCompleted(_
) => return,
744 self.force_query_with_job
::<Q
>(key
, job
, dep_node
);
750 macro_rules
! handle_cycle_error
{
751 ([][$tcx
: expr
, $error
:expr
]) => {{
752 $tcx
.report_cycle($error
).emit();
753 Value
::from_cycle_error($tcx
)
755 ([fatal_cycle $
($rest
:tt
)*][$tcx
:expr
, $error
:expr
]) => {{
756 $tcx
.report_cycle($error
).emit();
757 $tcx
.sess
.abort_if_errors();
760 ([cycle_delay_bug $
($rest
:tt
)*][$tcx
:expr
, $error
:expr
]) => {{
761 $tcx
.report_cycle($error
).delay_as_bug();
762 Value
::from_cycle_error($tcx
)
764 ([$other
:ident $
(($
($other_args
:tt
)*))* $
(, $
($modifiers
:tt
)*)*][$
($args
:tt
)*]) => {
765 handle_cycle_error
!([$
($
($modifiers
)*)*][$
($args
)*])
769 macro_rules
! is_anon
{
773 ([anon $
($rest
:tt
)*]) => {{
776 ([$other
:ident $
(($
($other_args
:tt
)*))* $
(, $
($modifiers
:tt
)*)*]) => {
777 is_anon
!([$
($
($modifiers
)*)*])
781 macro_rules
! is_eval_always
{
785 ([eval_always $
($rest
:tt
)*]) => {{
788 ([$other
:ident $
(($
($other_args
:tt
)*))* $
(, $
($modifiers
:tt
)*)*]) => {
789 is_eval_always
!([$
($
($modifiers
)*)*])
793 macro_rules
! query_storage
{
794 ([][$K
:ty
, $V
:ty
]) => {
795 <<$K
as Key
>::CacheSelector
as CacheSelector
<$K
, $V
>>::Cache
797 ([storage($ty
:ty
) $
($rest
:tt
)*][$K
:ty
, $V
:ty
]) => {
800 ([$other
:ident $
(($
($other_args
:tt
)*))* $
(, $
($modifiers
:tt
)*)*][$
($args
:tt
)*]) => {
801 query_storage
!([$
($
($modifiers
)*)*][$
($args
)*])
805 macro_rules
! hash_result
{
806 ([][$hcx
:expr
, $result
:expr
]) => {{
807 dep_graph
::hash_result($hcx
, &$result
)
809 ([no_hash $
($rest
:tt
)*][$hcx
:expr
, $result
:expr
]) => {{
812 ([$other
:ident $
(($
($other_args
:tt
)*))* $
(, $
($modifiers
:tt
)*)*][$
($args
:tt
)*]) => {
813 hash_result
!([$
($
($modifiers
)*)*][$
($args
)*])
817 macro_rules
! define_queries
{
818 (<$tcx
:tt
> $
($category
:tt
{
819 $
($
(#[$attr:meta])* [$($modifiers:tt)*] fn $name:ident: $node:ident($K:ty) -> $V:ty,)*
821 define_queries_inner
! { <$tcx
>
822 $
($
( $
(#[$attr])* category<$category> [$($modifiers)*] fn $name: $node($K) -> $V,)*)*
827 macro_rules
! define_queries_inner
{
829 $
($
(#[$attr:meta])* category<$category:tt>
830 [$
($modifiers
:tt
)*] fn $name
:ident
: $node
:ident($K
:ty
) -> $V
:ty
,)*) => {
834 rustc_data_structures
::stable_hasher
::HashStable
,
835 rustc_data_structures
::stable_hasher
::StableHasher
,
836 ich
::StableHashingContext
838 use rustc_data_structures
::profiling
::ProfileCategory
;
840 define_queries_struct
! {
842 input
: ($
(([$
($modifiers
)*] [$
($attr
)*] [$name
]))*)
845 impl<$tcx
> Queries
<$tcx
> {
847 providers
: IndexVec
<CrateNum
, Providers
<$tcx
>>,
848 fallback_extern_providers
: Providers
<$tcx
>,
849 on_disk_cache
: OnDiskCache
<'tcx
>,
853 fallback_extern_providers
: Box
::new(fallback_extern_providers
),
855 $
($name
: Default
::default()),*
859 pub fn try_collect_active_jobs(
861 ) -> Option
<FxHashMap
<QueryJobId
, QueryJobInfo
<'tcx
>>> {
862 let mut jobs
= FxHashMap
::default();
865 // We use try_lock_shards here since we are called from the
866 // deadlock handler, and this shouldn't be locked.
867 let shards
= self.$name
.shards
.try_lock_shards()?
;
868 let shards
= shards
.iter().enumerate();
869 jobs
.extend(shards
.flat_map(|(shard_id
, shard
)| {
870 shard
.active
.iter().filter_map(move |(k
, v
)| {
871 if let QueryResult
::Started(ref job
) = *v
{
872 let id
= QueryJobId
{
874 shard
: u16::try_from(shard_id
).unwrap(),
876 <queries
::$name
<'tcx
> as QueryAccessors
<'tcx
>>::dep_kind(),
878 let info
= QueryInfo
{
880 query
: queries
::$name
::query(k
.clone())
882 Some((id
, QueryJobInfo { info, job: job.clone() }
))
894 #[allow(nonstandard_style)]
895 #[derive(Clone, Debug)]
896 pub enum Query
<$tcx
> {
897 $
($
(#[$attr])* $name($K)),*
900 impl<$tcx
> Query
<$tcx
> {
901 pub fn name(&self) -> &'
static str {
903 $
(Query
::$
name(_
) => stringify
!($name
),)*
907 pub fn describe(&self, tcx
: TyCtxt
<'_
>) -> Cow
<'
static, str> {
908 let (r
, name
) = match *self {
909 $
(Query
::$
name(key
) => {
910 (queries
::$name
::describe(tcx
, key
), stringify
!($name
))
913 if tcx
.sess
.verbose() {
914 format
!("{} [{}]", r
, name
).into()
920 // FIXME(eddyb) Get more valid `Span`s on queries.
921 pub fn default_span(&self, tcx
: TyCtxt
<$tcx
>, span
: Span
) -> Span
{
922 if !span
.is_dummy() {
925 // The `def_span` query is used to calculate `default_span`,
926 // so exit to avoid infinite recursion.
927 if let Query
::def_span(..) = *self {
931 $
(Query
::$
name(key
) => key
.default_span(tcx
),)*
936 impl<'a
, $tcx
> HashStable
<StableHashingContext
<'a
>> for Query
<$tcx
> {
937 fn hash_stable(&self, hcx
: &mut StableHashingContext
<'a
>, hasher
: &mut StableHasher
) {
938 mem
::discriminant(self).hash_stable(hcx
, hasher
);
940 $
(Query
::$
name(key
) => key
.hash_stable(hcx
, hasher
),)*
946 use std
::marker
::PhantomData
;
948 $
(#[allow(nonstandard_style)]
949 pub struct $name
<$tcx
> {
950 data
: PhantomData
<&$
tcx ()>
954 // This module and the functions in it exist only to provide a
955 // predictable symbol name prefix for query providers. This is helpful
956 // for analyzing queries in profilers.
957 pub(super) mod __query_compute
{
959 pub fn $name
<F
: FnOnce() -> R
, R
>(f
: F
) -> R
{
964 $
(impl<$tcx
> QueryConfig
<$tcx
> for queries
::$name
<$tcx
> {
967 const NAME
: &'
static str = stringify
!($name
);
968 const CATEGORY
: ProfileCategory
= $category
;
971 impl<$tcx
> QueryAccessors
<$tcx
> for queries
::$name
<$tcx
> {
972 const ANON
: bool
= is_anon
!([$
($modifiers
)*]);
973 const EVAL_ALWAYS
: bool
= is_eval_always
!([$
($modifiers
)*]);
975 type Cache
= query_storage
!([$
($modifiers
)*][$K
, $V
]);
978 fn query(key
: Self::Key
) -> Query
<'tcx
> {
983 fn query_state
<'a
>(tcx
: TyCtxt
<$tcx
>) -> &'a QueryState
<$tcx
, Self> {
989 fn to_dep_node(tcx
: TyCtxt
<$tcx
>, key
: &Self::Key
) -> DepNode
{
990 DepConstructor
::$
node(tcx
, *key
)
994 fn dep_kind() -> dep_graph
::DepKind
{
995 dep_graph
::DepKind
::$node
999 fn compute(tcx
: TyCtxt
<'tcx
>, key
: Self::Key
) -> Self::Value
{
1000 __query_compute
::$
name(move || {
1001 let provider
= tcx
.queries
.providers
.get(key
.query_crate())
1002 // HACK(eddyb) it's possible crates may be loaded after
1003 // the query engine is created, and because crate loading
1004 // is not yet integrated with the query engine, such crates
1005 // would be missing appropriate entries in `providers`.
1006 .unwrap_or(&tcx
.queries
.fallback_extern_providers
)
1013 _hcx
: &mut StableHashingContext
<'_
>,
1014 _result
: &Self::Value
1015 ) -> Option
<Fingerprint
> {
1016 hash_result
!([$
($modifiers
)*][_hcx
, _result
])
1019 fn handle_cycle_error(
1021 error
: CycleError
<'tcx
>
1023 handle_cycle_error
!([$
($modifiers
)*][tcx
, error
])
1027 #[derive(Copy, Clone)]
1028 pub struct TyCtxtEnsure
<'tcx
> {
1029 pub tcx
: TyCtxt
<'tcx
>,
1032 impl TyCtxtEnsure
<$tcx
> {
1035 pub fn $
name(self, key
: $K
) {
1036 self.tcx
.ensure_query
::<queries
::$name
<'_
>>(key
)
1040 #[derive(Copy, Clone)]
1041 pub struct TyCtxtAt
<'tcx
> {
1042 pub tcx
: TyCtxt
<'tcx
>,
1046 impl Deref
for TyCtxtAt
<'tcx
> {
1047 type Target
= TyCtxt
<'tcx
>;
1049 fn deref(&self) -> &Self::Target
{
1055 /// Returns a transparent wrapper for `TyCtxt`, which ensures queries
1056 /// are executed instead of just returning their results.
1058 pub fn ensure(self) -> TyCtxtEnsure
<$tcx
> {
1064 /// Returns a transparent wrapper for `TyCtxt` which uses
1065 /// `span` as the location of queries performed through it.
1067 pub fn at(self, span
: Span
) -> TyCtxtAt
<$tcx
> {
1076 pub fn $
name(self, key
: $K
) -> $V
{
1077 self.at(DUMMY_SP
).$
name(key
)
1080 /// All self-profiling events generated by the query engine use
1081 /// virtual `StringId`s for their `event_id`. This method makes all
1082 /// those virtual `StringId`s point to actual strings.
1084 /// If we are recording only summary data, the ids will point to
1085 /// just the query names. If we are recording query keys too, we
1086 /// allocate the corresponding strings here.
1087 pub fn alloc_self_profile_query_strings(self) {
1088 use crate::ty
::query
::profiling_support
::{
1089 alloc_self_profile_query_strings_for_query_cache
,
1090 QueryKeyStringCache
,
1093 if !self.prof
.enabled() {
1097 let mut string_cache
= QueryKeyStringCache
::new();
1100 alloc_self_profile_query_strings_for_query_cache(
1103 &self.queries
.$name
,
1110 impl TyCtxtAt
<$tcx
> {
1113 pub fn $
name(self, key
: $K
) -> $V
{
1114 self.tcx
.get_query
::<queries
::$name
<'_
>>(self.span
, key
)
1118 define_provider_struct
! {
1120 input
: ($
(([$
($modifiers
)*] [$name
] [$K
] [$V
]))*)
1123 impl<$tcx
> Copy
for Providers
<$tcx
> {}
1124 impl<$tcx
> Clone
for Providers
<$tcx
> {
1125 fn clone(&self) -> Self { *self }
1130 macro_rules
! define_queries_struct
{
1132 input
: ($
(([$
($modifiers
:tt
)*] [$
($attr
:tt
)*] [$name
:ident
]))*)) => {
1133 pub struct Queries
<$tcx
> {
1134 /// This provides access to the incrimental comilation on-disk cache for query results.
1135 /// Do not access this directly. It is only meant to be used by
1136 /// `DepGraph::try_mark_green()` and the query infrastructure.
1137 pub(crate) on_disk_cache
: OnDiskCache
<'tcx
>,
1139 providers
: IndexVec
<CrateNum
, Providers
<$tcx
>>,
1140 fallback_extern_providers
: Box
<Providers
<$tcx
>>,
1142 $
($
(#[$attr])* $name: QueryState<$tcx, queries::$name<$tcx>>,)*
1147 macro_rules
! define_provider_struct
{
1149 input
: ($
(([$
($modifiers
:tt
)*] [$name
:ident
] [$K
:ty
] [$R
:ty
]))*)) => {
1150 pub struct Providers
<$tcx
> {
1151 $
(pub $name
: fn(TyCtxt
<$tcx
>, $K
) -> $R
,)*
1154 impl<$tcx
> Default
for Providers
<$tcx
> {
1155 fn default() -> Self {
1156 $
(fn $name
<$tcx
>(_
: TyCtxt
<$tcx
>, key
: $K
) -> $R
{
1157 bug
!("`tcx.{}({:?})` unsupported by its crate",
1158 stringify
!($name
), key
);
1160 Providers { $($name),* }