]> git.proxmox.com Git - rustc.git/blob - src/librustc/ty/query/plumbing.rs
New upstream version 1.43.0+dfsg1
[rustc.git] / src / librustc / ty / query / plumbing.rs
1 //! The implementation of the query system itself. This defines the macros that
2 //! generate the actual methods on tcx which find and execute the provider,
3 //! manage the caches, and so forth.
4
5 use crate::dep_graph::{DepNode, DepNodeIndex, SerializedDepNodeIndex};
6 use crate::ty::query::caches::QueryCache;
7 use crate::ty::query::config::{QueryAccessors, QueryDescription};
8 use crate::ty::query::job::{QueryInfo, QueryJob, QueryJobId, QueryShardJobId};
9 use crate::ty::query::Query;
10 use crate::ty::tls;
11 use crate::ty::{self, TyCtxt};
12
13 #[cfg(not(parallel_compiler))]
14 use rustc_data_structures::cold_path;
15 use rustc_data_structures::fx::{FxHashMap, FxHasher};
16 use rustc_data_structures::sharded::Sharded;
17 use rustc_data_structures::sync::{Lock, LockGuard};
18 use rustc_data_structures::thin_vec::ThinVec;
19 use rustc_errors::{struct_span_err, Diagnostic, DiagnosticBuilder, FatalError, Handler, Level};
20 use rustc_span::source_map::DUMMY_SP;
21 use rustc_span::Span;
22 use std::collections::hash_map::Entry;
23 use std::hash::{Hash, Hasher};
24 use std::mem;
25 use std::num::NonZeroU32;
26 use std::ptr;
27 #[cfg(debug_assertions)]
28 use std::sync::atomic::{AtomicUsize, Ordering};
29
30 pub(crate) struct QueryStateShard<'tcx, D: QueryAccessors<'tcx> + ?Sized> {
31 pub(super) cache: <<D as QueryAccessors<'tcx>>::Cache as QueryCache<D::Key, D::Value>>::Sharded,
32 pub(super) active: FxHashMap<D::Key, QueryResult<'tcx>>,
33
34 /// Used to generate unique ids for active jobs.
35 pub(super) jobs: u32,
36 }
37
38 impl<'tcx, Q: QueryAccessors<'tcx>> QueryStateShard<'tcx, Q> {
39 fn get_cache(
40 &mut self,
41 ) -> &mut <<Q as QueryAccessors<'tcx>>::Cache as QueryCache<Q::Key, Q::Value>>::Sharded {
42 &mut self.cache
43 }
44 }
45
46 impl<'tcx, Q: QueryAccessors<'tcx>> Default for QueryStateShard<'tcx, Q> {
47 fn default() -> QueryStateShard<'tcx, Q> {
48 QueryStateShard { cache: Default::default(), active: Default::default(), jobs: 0 }
49 }
50 }
51
52 pub(crate) struct QueryState<'tcx, D: QueryAccessors<'tcx> + ?Sized> {
53 pub(super) cache: D::Cache,
54 pub(super) shards: Sharded<QueryStateShard<'tcx, D>>,
55 #[cfg(debug_assertions)]
56 pub(super) cache_hits: AtomicUsize,
57 }
58
59 impl<'tcx, Q: QueryAccessors<'tcx>> QueryState<'tcx, Q> {
60 pub(super) fn get_lookup<K: Hash>(&'tcx self, key: &K) -> QueryLookup<'tcx, Q> {
61 // We compute the key's hash once and then use it for both the
62 // shard lookup and the hashmap lookup. This relies on the fact
63 // that both of them use `FxHasher`.
64 let mut hasher = FxHasher::default();
65 key.hash(&mut hasher);
66 let key_hash = hasher.finish();
67
68 let shard = self.shards.get_shard_index_by_hash(key_hash);
69 let lock = self.shards.get_shard_by_index(shard).lock();
70 QueryLookup { key_hash, shard, lock }
71 }
72 }
73
74 /// Indicates the state of a query for a given key in a query map.
75 pub(super) enum QueryResult<'tcx> {
76 /// An already executing query. The query job can be used to await for its completion.
77 Started(QueryJob<'tcx>),
78
79 /// The query panicked. Queries trying to wait on this will raise a fatal error which will
80 /// silently panic.
81 Poisoned,
82 }
83
84 impl<'tcx, M: QueryAccessors<'tcx>> QueryState<'tcx, M> {
85 pub fn iter_results<R>(
86 &self,
87 f: impl for<'a> FnOnce(
88 Box<dyn Iterator<Item = (&'a M::Key, &'a M::Value, DepNodeIndex)> + 'a>,
89 ) -> R,
90 ) -> R {
91 self.cache.iter(&self.shards, |shard| &mut shard.cache, f)
92 }
93 pub fn all_inactive(&self) -> bool {
94 let shards = self.shards.lock_shards();
95 shards.iter().all(|shard| shard.active.is_empty())
96 }
97 }
98
99 impl<'tcx, M: QueryAccessors<'tcx>> Default for QueryState<'tcx, M> {
100 fn default() -> QueryState<'tcx, M> {
101 QueryState {
102 cache: M::Cache::default(),
103 shards: Default::default(),
104 #[cfg(debug_assertions)]
105 cache_hits: AtomicUsize::new(0),
106 }
107 }
108 }
109
110 /// Values used when checking a query cache which can be reused on a cache-miss to execute the query.
111 pub(crate) struct QueryLookup<'tcx, Q: QueryAccessors<'tcx>> {
112 pub(super) key_hash: u64,
113 pub(super) shard: usize,
114 pub(super) lock: LockGuard<'tcx, QueryStateShard<'tcx, Q>>,
115 }
116
117 /// A type representing the responsibility to execute the job in the `job` field.
118 /// This will poison the relevant query if dropped.
119 pub(super) struct JobOwner<'tcx, Q: QueryDescription<'tcx>> {
120 tcx: TyCtxt<'tcx>,
121 key: Q::Key,
122 id: QueryJobId,
123 }
124
125 impl<'tcx, Q: QueryDescription<'tcx>> JobOwner<'tcx, Q> {
126 /// Either gets a `JobOwner` corresponding the query, allowing us to
127 /// start executing the query, or returns with the result of the query.
128 /// This function assumes that `try_get_cached` is already called and returned `lookup`.
129 /// If the query is executing elsewhere, this will wait for it and return the result.
130 /// If the query panicked, this will silently panic.
131 ///
132 /// This function is inlined because that results in a noticeable speed-up
133 /// for some compile-time benchmarks.
134 #[inline(always)]
135 pub(super) fn try_start(
136 tcx: TyCtxt<'tcx>,
137 span: Span,
138 key: &Q::Key,
139 mut lookup: QueryLookup<'tcx, Q>,
140 ) -> TryGetJob<'tcx, Q> {
141 let lock = &mut *lookup.lock;
142
143 let (latch, mut _query_blocked_prof_timer) = match lock.active.entry((*key).clone()) {
144 Entry::Occupied(mut entry) => {
145 match entry.get_mut() {
146 QueryResult::Started(job) => {
147 // For parallel queries, we'll block and wait until the query running
148 // in another thread has completed. Record how long we wait in the
149 // self-profiler.
150 let _query_blocked_prof_timer = if cfg!(parallel_compiler) {
151 Some(tcx.prof.query_blocked())
152 } else {
153 None
154 };
155
156 // Create the id of the job we're waiting for
157 let id = QueryJobId::new(job.id, lookup.shard, Q::dep_kind());
158
159 (job.latch(id), _query_blocked_prof_timer)
160 }
161 QueryResult::Poisoned => FatalError.raise(),
162 }
163 }
164 Entry::Vacant(entry) => {
165 // No job entry for this query. Return a new one to be started later.
166
167 // Generate an id unique within this shard.
168 let id = lock.jobs.checked_add(1).unwrap();
169 lock.jobs = id;
170 let id = QueryShardJobId(NonZeroU32::new(id).unwrap());
171
172 let global_id = QueryJobId::new(id, lookup.shard, Q::dep_kind());
173
174 let job = tls::with_related_context(tcx, |icx| QueryJob::new(id, span, icx.query));
175
176 entry.insert(QueryResult::Started(job));
177
178 let owner = JobOwner { tcx, id: global_id, key: (*key).clone() };
179 return TryGetJob::NotYetStarted(owner);
180 }
181 };
182 mem::drop(lookup.lock);
183
184 // If we are single-threaded we know that we have cycle error,
185 // so we just return the error.
186 #[cfg(not(parallel_compiler))]
187 return TryGetJob::Cycle(cold_path(|| {
188 Q::handle_cycle_error(tcx, latch.find_cycle_in_stack(tcx, span))
189 }));
190
191 // With parallel queries we might just have to wait on some other
192 // thread.
193 #[cfg(parallel_compiler)]
194 {
195 let result = latch.wait_on(tcx, span);
196
197 if let Err(cycle) = result {
198 return TryGetJob::Cycle(Q::handle_cycle_error(tcx, cycle));
199 }
200
201 let cached = tcx.try_get_cached::<Q, _, _, _>(
202 (*key).clone(),
203 |value, index| (value.clone(), index),
204 |_, _| panic!("value must be in cache after waiting"),
205 );
206
207 if let Some(prof_timer) = _query_blocked_prof_timer.take() {
208 prof_timer.finish_with_query_invocation_id(cached.1.into());
209 }
210
211 return TryGetJob::JobCompleted(cached);
212 }
213 }
214
215 /// Completes the query by updating the query cache with the `result`,
216 /// signals the waiter and forgets the JobOwner, so it won't poison the query
217 #[inline(always)]
218 pub(super) fn complete(self, result: &Q::Value, dep_node_index: DepNodeIndex) {
219 // We can move out of `self` here because we `mem::forget` it below
220 let key = unsafe { ptr::read(&self.key) };
221 let tcx = self.tcx;
222
223 // Forget ourself so our destructor won't poison the query
224 mem::forget(self);
225
226 let job = {
227 let state = Q::query_state(tcx);
228 let result = result.clone();
229 let mut lock = state.shards.get_shard_by_value(&key).lock();
230 let job = match lock.active.remove(&key).unwrap() {
231 QueryResult::Started(job) => job,
232 QueryResult::Poisoned => panic!(),
233 };
234 state.cache.complete(tcx, &mut lock.cache, key, result, dep_node_index);
235 job
236 };
237
238 job.signal_complete();
239 }
240 }
241
242 #[inline(always)]
243 fn with_diagnostics<F, R>(f: F) -> (R, ThinVec<Diagnostic>)
244 where
245 F: FnOnce(Option<&Lock<ThinVec<Diagnostic>>>) -> R,
246 {
247 let diagnostics = Lock::new(ThinVec::new());
248 let result = f(Some(&diagnostics));
249 (result, diagnostics.into_inner())
250 }
251
252 impl<'tcx, Q: QueryDescription<'tcx>> Drop for JobOwner<'tcx, Q> {
253 #[inline(never)]
254 #[cold]
255 fn drop(&mut self) {
256 // Poison the query so jobs waiting on it panic.
257 let state = Q::query_state(self.tcx);
258 let shard = state.shards.get_shard_by_value(&self.key);
259 let job = {
260 let mut shard = shard.lock();
261 let job = match shard.active.remove(&self.key).unwrap() {
262 QueryResult::Started(job) => job,
263 QueryResult::Poisoned => panic!(),
264 };
265 shard.active.insert(self.key.clone(), QueryResult::Poisoned);
266 job
267 };
268 // Also signal the completion of the job, so waiters
269 // will continue execution.
270 job.signal_complete();
271 }
272 }
273
274 #[derive(Clone)]
275 pub struct CycleError<'tcx> {
276 /// The query and related span that uses the cycle.
277 pub(super) usage: Option<(Span, Query<'tcx>)>,
278 pub(super) cycle: Vec<QueryInfo<'tcx>>,
279 }
280
281 /// The result of `try_start`.
282 pub(super) enum TryGetJob<'tcx, D: QueryDescription<'tcx>> {
283 /// The query is not yet started. Contains a guard to the cache eventually used to start it.
284 NotYetStarted(JobOwner<'tcx, D>),
285
286 /// The query was already completed.
287 /// Returns the result of the query and its dep-node index
288 /// if it succeeded or a cycle error if it failed.
289 #[cfg(parallel_compiler)]
290 JobCompleted((D::Value, DepNodeIndex)),
291
292 /// Trying to execute the query resulted in a cycle.
293 Cycle(D::Value),
294 }
295
296 impl<'tcx> TyCtxt<'tcx> {
297 /// Executes a job by changing the `ImplicitCtxt` to point to the
298 /// new query job while it executes. It returns the diagnostics
299 /// captured during execution and the actual result.
300 #[inline(always)]
301 pub(super) fn start_query<F, R>(
302 self,
303 token: QueryJobId,
304 diagnostics: Option<&Lock<ThinVec<Diagnostic>>>,
305 compute: F,
306 ) -> R
307 where
308 F: FnOnce(TyCtxt<'tcx>) -> R,
309 {
310 // The `TyCtxt` stored in TLS has the same global interner lifetime
311 // as `self`, so we use `with_related_context` to relate the 'tcx lifetimes
312 // when accessing the `ImplicitCtxt`.
313 tls::with_related_context(self, move |current_icx| {
314 // Update the `ImplicitCtxt` to point to our new query job.
315 let new_icx = tls::ImplicitCtxt {
316 tcx: self,
317 query: Some(token),
318 diagnostics,
319 layout_depth: current_icx.layout_depth,
320 task_deps: current_icx.task_deps,
321 };
322
323 // Use the `ImplicitCtxt` while we execute the query.
324 tls::enter_context(&new_icx, |_| compute(self))
325 })
326 }
327
328 #[inline(never)]
329 #[cold]
330 pub(super) fn report_cycle(
331 self,
332 CycleError { usage, cycle: stack }: CycleError<'tcx>,
333 ) -> DiagnosticBuilder<'tcx> {
334 assert!(!stack.is_empty());
335
336 let fix_span = |span: Span, query: &Query<'tcx>| {
337 self.sess.source_map().def_span(query.default_span(self, span))
338 };
339
340 // Disable naming impls with types in this path, since that
341 // sometimes cycles itself, leading to extra cycle errors.
342 // (And cycle errors around impls tend to occur during the
343 // collect/coherence phases anyhow.)
344 ty::print::with_forced_impl_filename_line(|| {
345 let span = fix_span(stack[1 % stack.len()].span, &stack[0].query);
346 let mut err = struct_span_err!(
347 self.sess,
348 span,
349 E0391,
350 "cycle detected when {}",
351 stack[0].query.describe(self)
352 );
353
354 for i in 1..stack.len() {
355 let query = &stack[i].query;
356 let span = fix_span(stack[(i + 1) % stack.len()].span, query);
357 err.span_note(span, &format!("...which requires {}...", query.describe(self)));
358 }
359
360 err.note(&format!(
361 "...which again requires {}, completing the cycle",
362 stack[0].query.describe(self)
363 ));
364
365 if let Some((span, query)) = usage {
366 err.span_note(
367 fix_span(span, &query),
368 &format!("cycle used when {}", query.describe(self)),
369 );
370 }
371
372 err
373 })
374 }
375
376 pub fn try_print_query_stack(handler: &Handler) {
377 eprintln!("query stack during panic:");
378
379 // Be careful reyling on global state here: this code is called from
380 // a panic hook, which means that the global `Handler` may be in a weird
381 // state if it was responsible for triggering the panic.
382 tls::with_context_opt(|icx| {
383 if let Some(icx) = icx {
384 let query_map = icx.tcx.queries.try_collect_active_jobs();
385
386 let mut current_query = icx.query;
387 let mut i = 0;
388
389 while let Some(query) = current_query {
390 let query_info =
391 if let Some(info) = query_map.as_ref().and_then(|map| map.get(&query)) {
392 info
393 } else {
394 break;
395 };
396 let mut diag = Diagnostic::new(
397 Level::FailureNote,
398 &format!(
399 "#{} [{}] {}",
400 i,
401 query_info.info.query.name(),
402 query_info.info.query.describe(icx.tcx)
403 ),
404 );
405 diag.span = icx.tcx.sess.source_map().def_span(query_info.info.span).into();
406 handler.force_print_diagnostic(diag);
407
408 current_query = query_info.job.parent;
409 i += 1;
410 }
411 }
412 });
413
414 eprintln!("end of query stack");
415 }
416
417 /// Checks if the query is already computed and in the cache.
418 /// It returns the shard index and a lock guard to the shard,
419 /// which will be used if the query is not in the cache and we need
420 /// to compute it.
421 #[inline(always)]
422 fn try_get_cached<Q, R, OnHit, OnMiss>(
423 self,
424 key: Q::Key,
425 // `on_hit` can be called while holding a lock to the query cache
426 on_hit: OnHit,
427 on_miss: OnMiss,
428 ) -> R
429 where
430 Q: QueryDescription<'tcx> + 'tcx,
431 OnHit: FnOnce(&Q::Value, DepNodeIndex) -> R,
432 OnMiss: FnOnce(Q::Key, QueryLookup<'tcx, Q>) -> R,
433 {
434 let state = Q::query_state(self);
435
436 state.cache.lookup(
437 state,
438 QueryStateShard::<Q>::get_cache,
439 key,
440 |value, index| {
441 if unlikely!(self.prof.enabled()) {
442 self.prof.query_cache_hit(index.into());
443 }
444 #[cfg(debug_assertions)]
445 {
446 state.cache_hits.fetch_add(1, Ordering::Relaxed);
447 }
448 on_hit(value, index)
449 },
450 on_miss,
451 )
452 }
453
454 #[inline(never)]
455 pub(super) fn get_query<Q: QueryDescription<'tcx> + 'tcx>(
456 self,
457 span: Span,
458 key: Q::Key,
459 ) -> Q::Value {
460 debug!("ty::query::get_query<{}>(key={:?}, span={:?})", Q::NAME, key, span);
461
462 self.try_get_cached::<Q, _, _, _>(
463 key,
464 |value, index| {
465 self.dep_graph.read_index(index);
466 value.clone()
467 },
468 |key, lookup| self.try_execute_query::<Q>(span, key, lookup),
469 )
470 }
471
472 #[inline(always)]
473 pub(super) fn try_execute_query<Q: QueryDescription<'tcx>>(
474 self,
475 span: Span,
476 key: Q::Key,
477 lookup: QueryLookup<'tcx, Q>,
478 ) -> Q::Value {
479 let job = match JobOwner::try_start(self, span, &key, lookup) {
480 TryGetJob::NotYetStarted(job) => job,
481 TryGetJob::Cycle(result) => return result,
482 #[cfg(parallel_compiler)]
483 TryGetJob::JobCompleted((v, index)) => {
484 self.dep_graph.read_index(index);
485 return v;
486 }
487 };
488
489 // Fast path for when incr. comp. is off. `to_dep_node` is
490 // expensive for some `DepKind`s.
491 if !self.dep_graph.is_fully_enabled() {
492 let null_dep_node = DepNode::new_no_params(crate::dep_graph::DepKind::Null);
493 return self.force_query_with_job::<Q>(key, job, null_dep_node).0;
494 }
495
496 if Q::ANON {
497 let prof_timer = self.prof.query_provider();
498
499 let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| {
500 self.start_query(job.id, diagnostics, |tcx| {
501 tcx.dep_graph.with_anon_task(Q::dep_kind(), || Q::compute(tcx, key))
502 })
503 });
504
505 prof_timer.finish_with_query_invocation_id(dep_node_index.into());
506
507 self.dep_graph.read_index(dep_node_index);
508
509 if unlikely!(!diagnostics.is_empty()) {
510 self.queries
511 .on_disk_cache
512 .store_diagnostics_for_anon_node(dep_node_index, diagnostics);
513 }
514
515 job.complete(&result, dep_node_index);
516
517 return result;
518 }
519
520 let dep_node = Q::to_dep_node(self, &key);
521
522 if !Q::EVAL_ALWAYS {
523 // The diagnostics for this query will be
524 // promoted to the current session during
525 // `try_mark_green()`, so we can ignore them here.
526 let loaded = self.start_query(job.id, None, |tcx| {
527 let marked = tcx.dep_graph.try_mark_green_and_read(tcx, &dep_node);
528 marked.map(|(prev_dep_node_index, dep_node_index)| {
529 (
530 tcx.load_from_disk_and_cache_in_memory::<Q>(
531 key.clone(),
532 prev_dep_node_index,
533 dep_node_index,
534 &dep_node,
535 ),
536 dep_node_index,
537 )
538 })
539 });
540 if let Some((result, dep_node_index)) = loaded {
541 job.complete(&result, dep_node_index);
542 return result;
543 }
544 }
545
546 let (result, dep_node_index) = self.force_query_with_job::<Q>(key, job, dep_node);
547 self.dep_graph.read_index(dep_node_index);
548 result
549 }
550
551 fn load_from_disk_and_cache_in_memory<Q: QueryDescription<'tcx>>(
552 self,
553 key: Q::Key,
554 prev_dep_node_index: SerializedDepNodeIndex,
555 dep_node_index: DepNodeIndex,
556 dep_node: &DepNode,
557 ) -> Q::Value {
558 // Note this function can be called concurrently from the same query
559 // We must ensure that this is handled correctly.
560
561 debug_assert!(self.dep_graph.is_green(dep_node));
562
563 // First we try to load the result from the on-disk cache.
564 let result = if Q::cache_on_disk(self, key.clone(), None)
565 && self.sess.opts.debugging_opts.incremental_queries
566 {
567 let prof_timer = self.prof.incr_cache_loading();
568 let result = Q::try_load_from_disk(self, prev_dep_node_index);
569 prof_timer.finish_with_query_invocation_id(dep_node_index.into());
570
571 // We always expect to find a cached result for things that
572 // can be forced from `DepNode`.
573 debug_assert!(
574 !dep_node.kind.can_reconstruct_query_key() || result.is_some(),
575 "missing on-disk cache entry for {:?}",
576 dep_node
577 );
578 result
579 } else {
580 // Some things are never cached on disk.
581 None
582 };
583
584 let result = if let Some(result) = result {
585 result
586 } else {
587 // We could not load a result from the on-disk cache, so
588 // recompute.
589 let prof_timer = self.prof.query_provider();
590
591 // The dep-graph for this computation is already in-place.
592 let result = self.dep_graph.with_ignore(|| Q::compute(self, key));
593
594 prof_timer.finish_with_query_invocation_id(dep_node_index.into());
595
596 result
597 };
598
599 // If `-Zincremental-verify-ich` is specified, re-hash results from
600 // the cache and make sure that they have the expected fingerprint.
601 if unlikely!(self.sess.opts.debugging_opts.incremental_verify_ich) {
602 self.incremental_verify_ich::<Q>(&result, dep_node, dep_node_index);
603 }
604
605 result
606 }
607
608 #[inline(never)]
609 #[cold]
610 fn incremental_verify_ich<Q: QueryDescription<'tcx>>(
611 self,
612 result: &Q::Value,
613 dep_node: &DepNode,
614 dep_node_index: DepNodeIndex,
615 ) {
616 use crate::ich::Fingerprint;
617
618 assert!(
619 Some(self.dep_graph.fingerprint_of(dep_node_index))
620 == self.dep_graph.prev_fingerprint_of(dep_node),
621 "fingerprint for green query instance not loaded from cache: {:?}",
622 dep_node,
623 );
624
625 debug!("BEGIN verify_ich({:?})", dep_node);
626 let mut hcx = self.create_stable_hashing_context();
627
628 let new_hash = Q::hash_result(&mut hcx, result).unwrap_or(Fingerprint::ZERO);
629 debug!("END verify_ich({:?})", dep_node);
630
631 let old_hash = self.dep_graph.fingerprint_of(dep_node_index);
632
633 assert!(new_hash == old_hash, "found unstable fingerprints for {:?}", dep_node,);
634 }
635
636 #[inline(always)]
637 fn force_query_with_job<Q: QueryDescription<'tcx>>(
638 self,
639 key: Q::Key,
640 job: JobOwner<'tcx, Q>,
641 dep_node: DepNode,
642 ) -> (Q::Value, DepNodeIndex) {
643 // If the following assertion triggers, it can have two reasons:
644 // 1. Something is wrong with DepNode creation, either here or
645 // in `DepGraph::try_mark_green()`.
646 // 2. Two distinct query keys get mapped to the same `DepNode`
647 // (see for example #48923).
648 assert!(
649 !self.dep_graph.dep_node_exists(&dep_node),
650 "forcing query with already existing `DepNode`\n\
651 - query-key: {:?}\n\
652 - dep-node: {:?}",
653 key,
654 dep_node
655 );
656
657 let prof_timer = self.prof.query_provider();
658
659 let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| {
660 self.start_query(job.id, diagnostics, |tcx| {
661 if Q::EVAL_ALWAYS {
662 tcx.dep_graph.with_eval_always_task(
663 dep_node,
664 tcx,
665 key,
666 Q::compute,
667 Q::hash_result,
668 )
669 } else {
670 tcx.dep_graph.with_task(dep_node, tcx, key, Q::compute, Q::hash_result)
671 }
672 })
673 });
674
675 prof_timer.finish_with_query_invocation_id(dep_node_index.into());
676
677 if unlikely!(!diagnostics.is_empty()) {
678 if dep_node.kind != crate::dep_graph::DepKind::Null {
679 self.queries.on_disk_cache.store_diagnostics(dep_node_index, diagnostics);
680 }
681 }
682
683 job.complete(&result, dep_node_index);
684
685 (result, dep_node_index)
686 }
687
688 /// Ensure that either this query has all green inputs or been executed.
689 /// Executing `query::ensure(D)` is considered a read of the dep-node `D`.
690 ///
691 /// This function is particularly useful when executing passes for their
692 /// side-effects -- e.g., in order to report errors for erroneous programs.
693 ///
694 /// Note: The optimization is only available during incr. comp.
695 pub(super) fn ensure_query<Q: QueryDescription<'tcx> + 'tcx>(self, key: Q::Key) -> () {
696 if Q::EVAL_ALWAYS {
697 let _ = self.get_query::<Q>(DUMMY_SP, key);
698 return;
699 }
700
701 // Ensuring an anonymous query makes no sense
702 assert!(!Q::ANON);
703
704 let dep_node = Q::to_dep_node(self, &key);
705
706 match self.dep_graph.try_mark_green_and_read(self, &dep_node) {
707 None => {
708 // A None return from `try_mark_green_and_read` means that this is either
709 // a new dep node or that the dep node has already been marked red.
710 // Either way, we can't call `dep_graph.read()` as we don't have the
711 // DepNodeIndex. We must invoke the query itself. The performance cost
712 // this introduces should be negligible as we'll immediately hit the
713 // in-memory cache, or another query down the line will.
714 let _ = self.get_query::<Q>(DUMMY_SP, key);
715 }
716 Some((_, dep_node_index)) => {
717 self.prof.query_cache_hit(dep_node_index.into());
718 }
719 }
720 }
721
722 #[allow(dead_code)]
723 pub(super) fn force_query<Q: QueryDescription<'tcx> + 'tcx>(
724 self,
725 key: Q::Key,
726 span: Span,
727 dep_node: DepNode,
728 ) {
729 // We may be concurrently trying both execute and force a query.
730 // Ensure that only one of them runs the query.
731
732 self.try_get_cached::<Q, _, _, _>(
733 key,
734 |_, _| {
735 // Cache hit, do nothing
736 },
737 |key, lookup| {
738 let job = match JobOwner::try_start(self, span, &key, lookup) {
739 TryGetJob::NotYetStarted(job) => job,
740 TryGetJob::Cycle(_) => return,
741 #[cfg(parallel_compiler)]
742 TryGetJob::JobCompleted(_) => return,
743 };
744 self.force_query_with_job::<Q>(key, job, dep_node);
745 },
746 );
747 }
748 }
749
750 macro_rules! handle_cycle_error {
751 ([][$tcx: expr, $error:expr]) => {{
752 $tcx.report_cycle($error).emit();
753 Value::from_cycle_error($tcx)
754 }};
755 ([fatal_cycle $($rest:tt)*][$tcx:expr, $error:expr]) => {{
756 $tcx.report_cycle($error).emit();
757 $tcx.sess.abort_if_errors();
758 unreachable!()
759 }};
760 ([cycle_delay_bug $($rest:tt)*][$tcx:expr, $error:expr]) => {{
761 $tcx.report_cycle($error).delay_as_bug();
762 Value::from_cycle_error($tcx)
763 }};
764 ([$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*][$($args:tt)*]) => {
765 handle_cycle_error!([$($($modifiers)*)*][$($args)*])
766 };
767 }
768
769 macro_rules! is_anon {
770 ([]) => {{
771 false
772 }};
773 ([anon $($rest:tt)*]) => {{
774 true
775 }};
776 ([$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*]) => {
777 is_anon!([$($($modifiers)*)*])
778 };
779 }
780
781 macro_rules! is_eval_always {
782 ([]) => {{
783 false
784 }};
785 ([eval_always $($rest:tt)*]) => {{
786 true
787 }};
788 ([$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*]) => {
789 is_eval_always!([$($($modifiers)*)*])
790 };
791 }
792
793 macro_rules! query_storage {
794 ([][$K:ty, $V:ty]) => {
795 <<$K as Key>::CacheSelector as CacheSelector<$K, $V>>::Cache
796 };
797 ([storage($ty:ty) $($rest:tt)*][$K:ty, $V:ty]) => {
798 $ty
799 };
800 ([$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*][$($args:tt)*]) => {
801 query_storage!([$($($modifiers)*)*][$($args)*])
802 };
803 }
804
805 macro_rules! hash_result {
806 ([][$hcx:expr, $result:expr]) => {{
807 dep_graph::hash_result($hcx, &$result)
808 }};
809 ([no_hash $($rest:tt)*][$hcx:expr, $result:expr]) => {{
810 None
811 }};
812 ([$other:ident $(($($other_args:tt)*))* $(, $($modifiers:tt)*)*][$($args:tt)*]) => {
813 hash_result!([$($($modifiers)*)*][$($args)*])
814 };
815 }
816
817 macro_rules! define_queries {
818 (<$tcx:tt> $($category:tt {
819 $($(#[$attr:meta])* [$($modifiers:tt)*] fn $name:ident: $node:ident($K:ty) -> $V:ty,)*
820 },)*) => {
821 define_queries_inner! { <$tcx>
822 $($( $(#[$attr])* category<$category> [$($modifiers)*] fn $name: $node($K) -> $V,)*)*
823 }
824 }
825 }
826
827 macro_rules! define_queries_inner {
828 (<$tcx:tt>
829 $($(#[$attr:meta])* category<$category:tt>
830 [$($modifiers:tt)*] fn $name:ident: $node:ident($K:ty) -> $V:ty,)*) => {
831
832 use std::mem;
833 use crate::{
834 rustc_data_structures::stable_hasher::HashStable,
835 rustc_data_structures::stable_hasher::StableHasher,
836 ich::StableHashingContext
837 };
838 use rustc_data_structures::profiling::ProfileCategory;
839
840 define_queries_struct! {
841 tcx: $tcx,
842 input: ($(([$($modifiers)*] [$($attr)*] [$name]))*)
843 }
844
845 impl<$tcx> Queries<$tcx> {
846 pub fn new(
847 providers: IndexVec<CrateNum, Providers<$tcx>>,
848 fallback_extern_providers: Providers<$tcx>,
849 on_disk_cache: OnDiskCache<'tcx>,
850 ) -> Self {
851 Queries {
852 providers,
853 fallback_extern_providers: Box::new(fallback_extern_providers),
854 on_disk_cache,
855 $($name: Default::default()),*
856 }
857 }
858
859 pub fn try_collect_active_jobs(
860 &self
861 ) -> Option<FxHashMap<QueryJobId, QueryJobInfo<'tcx>>> {
862 let mut jobs = FxHashMap::default();
863
864 $(
865 // We use try_lock_shards here since we are called from the
866 // deadlock handler, and this shouldn't be locked.
867 let shards = self.$name.shards.try_lock_shards()?;
868 let shards = shards.iter().enumerate();
869 jobs.extend(shards.flat_map(|(shard_id, shard)| {
870 shard.active.iter().filter_map(move |(k, v)| {
871 if let QueryResult::Started(ref job) = *v {
872 let id = QueryJobId {
873 job: job.id,
874 shard: u16::try_from(shard_id).unwrap(),
875 kind:
876 <queries::$name<'tcx> as QueryAccessors<'tcx>>::dep_kind(),
877 };
878 let info = QueryInfo {
879 span: job.span,
880 query: queries::$name::query(k.clone())
881 };
882 Some((id, QueryJobInfo { info, job: job.clone() }))
883 } else {
884 None
885 }
886 })
887 }));
888 )*
889
890 Some(jobs)
891 }
892 }
893
894 #[allow(nonstandard_style)]
895 #[derive(Clone, Debug)]
896 pub enum Query<$tcx> {
897 $($(#[$attr])* $name($K)),*
898 }
899
900 impl<$tcx> Query<$tcx> {
901 pub fn name(&self) -> &'static str {
902 match *self {
903 $(Query::$name(_) => stringify!($name),)*
904 }
905 }
906
907 pub fn describe(&self, tcx: TyCtxt<'_>) -> Cow<'static, str> {
908 let (r, name) = match *self {
909 $(Query::$name(key) => {
910 (queries::$name::describe(tcx, key), stringify!($name))
911 })*
912 };
913 if tcx.sess.verbose() {
914 format!("{} [{}]", r, name).into()
915 } else {
916 r
917 }
918 }
919
920 // FIXME(eddyb) Get more valid `Span`s on queries.
921 pub fn default_span(&self, tcx: TyCtxt<$tcx>, span: Span) -> Span {
922 if !span.is_dummy() {
923 return span;
924 }
925 // The `def_span` query is used to calculate `default_span`,
926 // so exit to avoid infinite recursion.
927 if let Query::def_span(..) = *self {
928 return span
929 }
930 match *self {
931 $(Query::$name(key) => key.default_span(tcx),)*
932 }
933 }
934 }
935
936 impl<'a, $tcx> HashStable<StableHashingContext<'a>> for Query<$tcx> {
937 fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
938 mem::discriminant(self).hash_stable(hcx, hasher);
939 match *self {
940 $(Query::$name(key) => key.hash_stable(hcx, hasher),)*
941 }
942 }
943 }
944
945 pub mod queries {
946 use std::marker::PhantomData;
947
948 $(#[allow(nonstandard_style)]
949 pub struct $name<$tcx> {
950 data: PhantomData<&$tcx ()>
951 })*
952 }
953
954 // This module and the functions in it exist only to provide a
955 // predictable symbol name prefix for query providers. This is helpful
956 // for analyzing queries in profilers.
957 pub(super) mod __query_compute {
958 $(#[inline(never)]
959 pub fn $name<F: FnOnce() -> R, R>(f: F) -> R {
960 f()
961 })*
962 }
963
964 $(impl<$tcx> QueryConfig<$tcx> for queries::$name<$tcx> {
965 type Key = $K;
966 type Value = $V;
967 const NAME: &'static str = stringify!($name);
968 const CATEGORY: ProfileCategory = $category;
969 }
970
971 impl<$tcx> QueryAccessors<$tcx> for queries::$name<$tcx> {
972 const ANON: bool = is_anon!([$($modifiers)*]);
973 const EVAL_ALWAYS: bool = is_eval_always!([$($modifiers)*]);
974
975 type Cache = query_storage!([$($modifiers)*][$K, $V]);
976
977 #[inline(always)]
978 fn query(key: Self::Key) -> Query<'tcx> {
979 Query::$name(key)
980 }
981
982 #[inline(always)]
983 fn query_state<'a>(tcx: TyCtxt<$tcx>) -> &'a QueryState<$tcx, Self> {
984 &tcx.queries.$name
985 }
986
987 #[allow(unused)]
988 #[inline(always)]
989 fn to_dep_node(tcx: TyCtxt<$tcx>, key: &Self::Key) -> DepNode {
990 DepConstructor::$node(tcx, *key)
991 }
992
993 #[inline(always)]
994 fn dep_kind() -> dep_graph::DepKind {
995 dep_graph::DepKind::$node
996 }
997
998 #[inline]
999 fn compute(tcx: TyCtxt<'tcx>, key: Self::Key) -> Self::Value {
1000 __query_compute::$name(move || {
1001 let provider = tcx.queries.providers.get(key.query_crate())
1002 // HACK(eddyb) it's possible crates may be loaded after
1003 // the query engine is created, and because crate loading
1004 // is not yet integrated with the query engine, such crates
1005 // would be missing appropriate entries in `providers`.
1006 .unwrap_or(&tcx.queries.fallback_extern_providers)
1007 .$name;
1008 provider(tcx, key)
1009 })
1010 }
1011
1012 fn hash_result(
1013 _hcx: &mut StableHashingContext<'_>,
1014 _result: &Self::Value
1015 ) -> Option<Fingerprint> {
1016 hash_result!([$($modifiers)*][_hcx, _result])
1017 }
1018
1019 fn handle_cycle_error(
1020 tcx: TyCtxt<'tcx>,
1021 error: CycleError<'tcx>
1022 ) -> Self::Value {
1023 handle_cycle_error!([$($modifiers)*][tcx, error])
1024 }
1025 })*
1026
1027 #[derive(Copy, Clone)]
1028 pub struct TyCtxtEnsure<'tcx> {
1029 pub tcx: TyCtxt<'tcx>,
1030 }
1031
1032 impl TyCtxtEnsure<$tcx> {
1033 $($(#[$attr])*
1034 #[inline(always)]
1035 pub fn $name(self, key: $K) {
1036 self.tcx.ensure_query::<queries::$name<'_>>(key)
1037 })*
1038 }
1039
1040 #[derive(Copy, Clone)]
1041 pub struct TyCtxtAt<'tcx> {
1042 pub tcx: TyCtxt<'tcx>,
1043 pub span: Span,
1044 }
1045
1046 impl Deref for TyCtxtAt<'tcx> {
1047 type Target = TyCtxt<'tcx>;
1048 #[inline(always)]
1049 fn deref(&self) -> &Self::Target {
1050 &self.tcx
1051 }
1052 }
1053
1054 impl TyCtxt<$tcx> {
1055 /// Returns a transparent wrapper for `TyCtxt`, which ensures queries
1056 /// are executed instead of just returning their results.
1057 #[inline(always)]
1058 pub fn ensure(self) -> TyCtxtEnsure<$tcx> {
1059 TyCtxtEnsure {
1060 tcx: self,
1061 }
1062 }
1063
1064 /// Returns a transparent wrapper for `TyCtxt` which uses
1065 /// `span` as the location of queries performed through it.
1066 #[inline(always)]
1067 pub fn at(self, span: Span) -> TyCtxtAt<$tcx> {
1068 TyCtxtAt {
1069 tcx: self,
1070 span
1071 }
1072 }
1073
1074 $($(#[$attr])*
1075 #[inline(always)]
1076 pub fn $name(self, key: $K) -> $V {
1077 self.at(DUMMY_SP).$name(key)
1078 })*
1079
1080 /// All self-profiling events generated by the query engine use
1081 /// virtual `StringId`s for their `event_id`. This method makes all
1082 /// those virtual `StringId`s point to actual strings.
1083 ///
1084 /// If we are recording only summary data, the ids will point to
1085 /// just the query names. If we are recording query keys too, we
1086 /// allocate the corresponding strings here.
1087 pub fn alloc_self_profile_query_strings(self) {
1088 use crate::ty::query::profiling_support::{
1089 alloc_self_profile_query_strings_for_query_cache,
1090 QueryKeyStringCache,
1091 };
1092
1093 if !self.prof.enabled() {
1094 return;
1095 }
1096
1097 let mut string_cache = QueryKeyStringCache::new();
1098
1099 $({
1100 alloc_self_profile_query_strings_for_query_cache(
1101 self,
1102 stringify!($name),
1103 &self.queries.$name,
1104 &mut string_cache,
1105 );
1106 })*
1107 }
1108 }
1109
1110 impl TyCtxtAt<$tcx> {
1111 $($(#[$attr])*
1112 #[inline(always)]
1113 pub fn $name(self, key: $K) -> $V {
1114 self.tcx.get_query::<queries::$name<'_>>(self.span, key)
1115 })*
1116 }
1117
1118 define_provider_struct! {
1119 tcx: $tcx,
1120 input: ($(([$($modifiers)*] [$name] [$K] [$V]))*)
1121 }
1122
1123 impl<$tcx> Copy for Providers<$tcx> {}
1124 impl<$tcx> Clone for Providers<$tcx> {
1125 fn clone(&self) -> Self { *self }
1126 }
1127 }
1128 }
1129
1130 macro_rules! define_queries_struct {
1131 (tcx: $tcx:tt,
1132 input: ($(([$($modifiers:tt)*] [$($attr:tt)*] [$name:ident]))*)) => {
1133 pub struct Queries<$tcx> {
1134 /// This provides access to the incrimental comilation on-disk cache for query results.
1135 /// Do not access this directly. It is only meant to be used by
1136 /// `DepGraph::try_mark_green()` and the query infrastructure.
1137 pub(crate) on_disk_cache: OnDiskCache<'tcx>,
1138
1139 providers: IndexVec<CrateNum, Providers<$tcx>>,
1140 fallback_extern_providers: Box<Providers<$tcx>>,
1141
1142 $($(#[$attr])* $name: QueryState<$tcx, queries::$name<$tcx>>,)*
1143 }
1144 };
1145 }
1146
1147 macro_rules! define_provider_struct {
1148 (tcx: $tcx:tt,
1149 input: ($(([$($modifiers:tt)*] [$name:ident] [$K:ty] [$R:ty]))*)) => {
1150 pub struct Providers<$tcx> {
1151 $(pub $name: fn(TyCtxt<$tcx>, $K) -> $R,)*
1152 }
1153
1154 impl<$tcx> Default for Providers<$tcx> {
1155 fn default() -> Self {
1156 $(fn $name<$tcx>(_: TyCtxt<$tcx>, key: $K) -> $R {
1157 bug!("`tcx.{}({:?})` unsupported by its crate",
1158 stringify!($name), key);
1159 })*
1160 Providers { $($name),* }
1161 }
1162 }
1163 };
1164 }