]>
Commit | Line | Data |
---|---|---|
1 | //! The implementation of the query system itself. This defines the macros that | |
2 | //! generate the actual methods on tcx which find and execute the provider, | |
3 | //! manage the caches, and so forth. | |
4 | ||
5 | use crate::dep_graph::{DepContext, DepKind, DepNode, DepNodeIndex, DepNodeParams}; | |
6 | use crate::dep_graph::{DepGraphData, HasDepContext}; | |
7 | use crate::ich::StableHashingContext; | |
8 | use crate::query::caches::QueryCache; | |
9 | #[cfg(parallel_compiler)] | |
10 | use crate::query::job::QueryLatch; | |
11 | use crate::query::job::{report_cycle, QueryInfo, QueryJob, QueryJobId, QueryJobInfo}; | |
12 | use crate::query::SerializedDepNodeIndex; | |
13 | use crate::query::{QueryContext, QueryMap, QuerySideEffects, QueryStackFrame}; | |
14 | use crate::HandleCycleError; | |
15 | use rustc_data_structures::fingerprint::Fingerprint; | |
16 | use rustc_data_structures::fx::FxHashMap; | |
17 | use rustc_data_structures::stack::ensure_sufficient_stack; | |
18 | use rustc_data_structures::sync::Lock; | |
19 | #[cfg(parallel_compiler)] | |
20 | use rustc_data_structures::{cold_path, sharded::Sharded}; | |
21 | use rustc_errors::{DiagnosticBuilder, ErrorGuaranteed, FatalError}; | |
22 | use rustc_span::{Span, DUMMY_SP}; | |
23 | use std::cell::Cell; | |
24 | use std::collections::hash_map::Entry; | |
25 | use std::fmt::Debug; | |
26 | use std::hash::Hash; | |
27 | use std::mem; | |
28 | use thin_vec::ThinVec; | |
29 | ||
30 | use super::QueryConfig; | |
31 | ||
32 | pub struct QueryState<K, D: DepKind> { | |
33 | #[cfg(parallel_compiler)] | |
34 | active: Sharded<FxHashMap<K, QueryResult<D>>>, | |
35 | #[cfg(not(parallel_compiler))] | |
36 | active: Lock<FxHashMap<K, QueryResult<D>>>, | |
37 | } | |
38 | ||
39 | /// Indicates the state of a query for a given key in a query map. | |
40 | enum QueryResult<D: DepKind> { | |
41 | /// An already executing query. The query job can be used to await for its completion. | |
42 | Started(QueryJob<D>), | |
43 | ||
44 | /// The query panicked. Queries trying to wait on this will raise a fatal error which will | |
45 | /// silently panic. | |
46 | Poisoned, | |
47 | } | |
48 | ||
49 | impl<K, D> QueryState<K, D> | |
50 | where | |
51 | K: Eq + Hash + Copy + Debug, | |
52 | D: DepKind, | |
53 | { | |
54 | pub fn all_inactive(&self) -> bool { | |
55 | #[cfg(parallel_compiler)] | |
56 | { | |
57 | let shards = self.active.lock_shards(); | |
58 | shards.iter().all(|shard| shard.is_empty()) | |
59 | } | |
60 | #[cfg(not(parallel_compiler))] | |
61 | { | |
62 | self.active.lock().is_empty() | |
63 | } | |
64 | } | |
65 | ||
66 | pub fn try_collect_active_jobs<Qcx: Copy>( | |
67 | &self, | |
68 | qcx: Qcx, | |
69 | make_query: fn(Qcx, K) -> QueryStackFrame<D>, | |
70 | jobs: &mut QueryMap<D>, | |
71 | ) -> Option<()> { | |
72 | #[cfg(parallel_compiler)] | |
73 | { | |
74 | // We use try_lock_shards here since we are called from the | |
75 | // deadlock handler, and this shouldn't be locked. | |
76 | let shards = self.active.try_lock_shards()?; | |
77 | for shard in shards.iter() { | |
78 | for (k, v) in shard.iter() { | |
79 | if let QueryResult::Started(ref job) = *v { | |
80 | let query = make_query(qcx, *k); | |
81 | jobs.insert(job.id, QueryJobInfo { query, job: job.clone() }); | |
82 | } | |
83 | } | |
84 | } | |
85 | } | |
86 | #[cfg(not(parallel_compiler))] | |
87 | { | |
88 | // We use try_lock here since we are called from the | |
89 | // deadlock handler, and this shouldn't be locked. | |
90 | // (FIXME: Is this relevant for non-parallel compilers? It doesn't | |
91 | // really hurt much.) | |
92 | for (k, v) in self.active.try_lock()?.iter() { | |
93 | if let QueryResult::Started(ref job) = *v { | |
94 | let query = make_query(qcx, *k); | |
95 | jobs.insert(job.id, QueryJobInfo { query, job: job.clone() }); | |
96 | } | |
97 | } | |
98 | } | |
99 | ||
100 | Some(()) | |
101 | } | |
102 | } | |
103 | ||
104 | impl<K, D: DepKind> Default for QueryState<K, D> { | |
105 | fn default() -> QueryState<K, D> { | |
106 | QueryState { active: Default::default() } | |
107 | } | |
108 | } | |
109 | ||
110 | /// A type representing the responsibility to execute the job in the `job` field. | |
111 | /// This will poison the relevant query if dropped. | |
112 | struct JobOwner<'tcx, K, D: DepKind> | |
113 | where | |
114 | K: Eq + Hash + Copy, | |
115 | { | |
116 | state: &'tcx QueryState<K, D>, | |
117 | key: K, | |
118 | } | |
119 | ||
120 | #[cold] | |
121 | #[inline(never)] | |
122 | fn mk_cycle<Q, Qcx>( | |
123 | query: Q, | |
124 | qcx: Qcx, | |
125 | cycle_error: CycleError<Qcx::DepKind>, | |
126 | handler: HandleCycleError, | |
127 | ) -> Q::Value | |
128 | where | |
129 | Q: QueryConfig<Qcx>, | |
130 | Qcx: QueryContext, | |
131 | { | |
132 | let error = report_cycle(qcx.dep_context().sess(), &cycle_error); | |
133 | handle_cycle_error(query, qcx, &cycle_error, error, handler) | |
134 | } | |
135 | ||
136 | fn handle_cycle_error<Q, Qcx>( | |
137 | query: Q, | |
138 | qcx: Qcx, | |
139 | cycle_error: &CycleError<Qcx::DepKind>, | |
140 | mut error: DiagnosticBuilder<'_, ErrorGuaranteed>, | |
141 | handler: HandleCycleError, | |
142 | ) -> Q::Value | |
143 | where | |
144 | Q: QueryConfig<Qcx>, | |
145 | Qcx: QueryContext, | |
146 | { | |
147 | use HandleCycleError::*; | |
148 | match handler { | |
149 | Error => { | |
150 | error.emit(); | |
151 | query.value_from_cycle_error(*qcx.dep_context(), &cycle_error.cycle) | |
152 | } | |
153 | Fatal => { | |
154 | error.emit(); | |
155 | qcx.dep_context().sess().abort_if_errors(); | |
156 | unreachable!() | |
157 | } | |
158 | DelayBug => { | |
159 | error.delay_as_bug(); | |
160 | query.value_from_cycle_error(*qcx.dep_context(), &cycle_error.cycle) | |
161 | } | |
162 | } | |
163 | } | |
164 | ||
165 | impl<'tcx, K, D: DepKind> JobOwner<'tcx, K, D> | |
166 | where | |
167 | K: Eq + Hash + Copy, | |
168 | { | |
169 | /// Completes the query by updating the query cache with the `result`, | |
170 | /// signals the waiter and forgets the JobOwner, so it won't poison the query | |
171 | fn complete<C>(self, cache: &C, result: C::Value, dep_node_index: DepNodeIndex) | |
172 | where | |
173 | C: QueryCache<Key = K>, | |
174 | { | |
175 | let key = self.key; | |
176 | let state = self.state; | |
177 | ||
178 | // Forget ourself so our destructor won't poison the query | |
179 | mem::forget(self); | |
180 | ||
181 | // Mark as complete before we remove the job from the active state | |
182 | // so no other thread can re-execute this query. | |
183 | cache.complete(key, result, dep_node_index); | |
184 | ||
185 | let job = { | |
186 | #[cfg(parallel_compiler)] | |
187 | let mut lock = state.active.get_shard_by_value(&key).lock(); | |
188 | #[cfg(not(parallel_compiler))] | |
189 | let mut lock = state.active.lock(); | |
190 | match lock.remove(&key).unwrap() { | |
191 | QueryResult::Started(job) => job, | |
192 | QueryResult::Poisoned => panic!(), | |
193 | } | |
194 | }; | |
195 | ||
196 | job.signal_complete(); | |
197 | } | |
198 | } | |
199 | ||
200 | impl<'tcx, K, D> Drop for JobOwner<'tcx, K, D> | |
201 | where | |
202 | K: Eq + Hash + Copy, | |
203 | D: DepKind, | |
204 | { | |
205 | #[inline(never)] | |
206 | #[cold] | |
207 | fn drop(&mut self) { | |
208 | // Poison the query so jobs waiting on it panic. | |
209 | let state = self.state; | |
210 | let job = { | |
211 | #[cfg(parallel_compiler)] | |
212 | let mut shard = state.active.get_shard_by_value(&self.key).lock(); | |
213 | #[cfg(not(parallel_compiler))] | |
214 | let mut shard = state.active.lock(); | |
215 | let job = match shard.remove(&self.key).unwrap() { | |
216 | QueryResult::Started(job) => job, | |
217 | QueryResult::Poisoned => panic!(), | |
218 | }; | |
219 | shard.insert(self.key, QueryResult::Poisoned); | |
220 | job | |
221 | }; | |
222 | // Also signal the completion of the job, so waiters | |
223 | // will continue execution. | |
224 | job.signal_complete(); | |
225 | } | |
226 | } | |
227 | ||
228 | #[derive(Clone)] | |
229 | pub(crate) struct CycleError<D: DepKind> { | |
230 | /// The query and related span that uses the cycle. | |
231 | pub usage: Option<(Span, QueryStackFrame<D>)>, | |
232 | pub cycle: Vec<QueryInfo<D>>, | |
233 | } | |
234 | ||
235 | /// Checks if the query is already computed and in the cache. | |
236 | /// It returns the shard index and a lock guard to the shard, | |
237 | /// which will be used if the query is not in the cache and we need | |
238 | /// to compute it. | |
239 | #[inline] | |
240 | pub fn try_get_cached<Tcx, C>(tcx: Tcx, cache: &C, key: &C::Key) -> Option<C::Value> | |
241 | where | |
242 | C: QueryCache, | |
243 | Tcx: DepContext, | |
244 | { | |
245 | match cache.lookup(&key) { | |
246 | Some((value, index)) => { | |
247 | tcx.profiler().query_cache_hit(index.into()); | |
248 | tcx.dep_graph().read_index(index); | |
249 | Some(value) | |
250 | } | |
251 | None => None, | |
252 | } | |
253 | } | |
254 | ||
255 | #[cold] | |
256 | #[inline(never)] | |
257 | #[cfg(not(parallel_compiler))] | |
258 | fn cycle_error<Q, Qcx>( | |
259 | query: Q, | |
260 | qcx: Qcx, | |
261 | try_execute: QueryJobId, | |
262 | span: Span, | |
263 | ) -> (Q::Value, Option<DepNodeIndex>) | |
264 | where | |
265 | Q: QueryConfig<Qcx>, | |
266 | Qcx: QueryContext, | |
267 | { | |
268 | let error = try_execute.find_cycle_in_stack( | |
269 | qcx.try_collect_active_jobs().unwrap(), | |
270 | &qcx.current_query_job(), | |
271 | span, | |
272 | ); | |
273 | (mk_cycle(query, qcx, error, query.handle_cycle_error()), None) | |
274 | } | |
275 | ||
276 | #[inline(always)] | |
277 | #[cfg(parallel_compiler)] | |
278 | fn wait_for_query<Q, Qcx>( | |
279 | query: Q, | |
280 | qcx: Qcx, | |
281 | span: Span, | |
282 | key: Q::Key, | |
283 | latch: QueryLatch<Qcx::DepKind>, | |
284 | current: Option<QueryJobId>, | |
285 | ) -> (Q::Value, Option<DepNodeIndex>) | |
286 | where | |
287 | Q: QueryConfig<Qcx>, | |
288 | Qcx: QueryContext, | |
289 | { | |
290 | // For parallel queries, we'll block and wait until the query running | |
291 | // in another thread has completed. Record how long we wait in the | |
292 | // self-profiler. | |
293 | let query_blocked_prof_timer = qcx.dep_context().profiler().query_blocked(); | |
294 | ||
295 | // With parallel queries we might just have to wait on some other | |
296 | // thread. | |
297 | let result = latch.wait_on(current, span); | |
298 | ||
299 | match result { | |
300 | Ok(()) => { | |
301 | let Some((v, index)) = query.query_cache(qcx).lookup(&key) else { | |
302 | cold_path(|| panic!("value must be in cache after waiting")) | |
303 | }; | |
304 | ||
305 | qcx.dep_context().profiler().query_cache_hit(index.into()); | |
306 | query_blocked_prof_timer.finish_with_query_invocation_id(index.into()); | |
307 | ||
308 | (v, Some(index)) | |
309 | } | |
310 | Err(cycle) => (mk_cycle(query, qcx, cycle, query.handle_cycle_error()), None), | |
311 | } | |
312 | } | |
313 | ||
314 | #[inline(never)] | |
315 | fn try_execute_query<Q, Qcx>( | |
316 | query: Q, | |
317 | qcx: Qcx, | |
318 | span: Span, | |
319 | key: Q::Key, | |
320 | dep_node: Option<DepNode<Qcx::DepKind>>, | |
321 | ) -> (Q::Value, Option<DepNodeIndex>) | |
322 | where | |
323 | Q: QueryConfig<Qcx>, | |
324 | Qcx: QueryContext, | |
325 | { | |
326 | let state = query.query_state(qcx); | |
327 | #[cfg(parallel_compiler)] | |
328 | let mut state_lock = state.active.get_shard_by_value(&key).lock(); | |
329 | #[cfg(not(parallel_compiler))] | |
330 | let mut state_lock = state.active.lock(); | |
331 | ||
332 | // For the parallel compiler we need to check both the query cache and query state structures | |
333 | // while holding the state lock to ensure that 1) the query has not yet completed and 2) the | |
334 | // query is not still executing. Without checking the query cache here, we can end up | |
335 | // re-executing the query since `try_start` only checks that the query is not currently | |
336 | // executing, but another thread may have already completed the query and stores it result | |
337 | // in the query cache. | |
338 | if cfg!(parallel_compiler) && qcx.dep_context().sess().threads() > 1 { | |
339 | if let Some((value, index)) = query.query_cache(qcx).lookup(&key) { | |
340 | qcx.dep_context().profiler().query_cache_hit(index.into()); | |
341 | return (value, Some(index)); | |
342 | } | |
343 | } | |
344 | ||
345 | let current_job_id = qcx.current_query_job(); | |
346 | ||
347 | match state_lock.entry(key) { | |
348 | Entry::Vacant(entry) => { | |
349 | // Nothing has computed or is computing the query, so we start a new job and insert it in the | |
350 | // state map. | |
351 | let id = qcx.next_job_id(); | |
352 | let job = QueryJob::new(id, span, current_job_id); | |
353 | entry.insert(QueryResult::Started(job)); | |
354 | ||
355 | // Drop the lock before we start executing the query | |
356 | drop(state_lock); | |
357 | ||
358 | execute_job(query, qcx, state, key, id, dep_node) | |
359 | } | |
360 | Entry::Occupied(mut entry) => { | |
361 | match entry.get_mut() { | |
362 | #[cfg(not(parallel_compiler))] | |
363 | QueryResult::Started(job) => { | |
364 | let id = job.id; | |
365 | drop(state_lock); | |
366 | ||
367 | // If we are single-threaded we know that we have cycle error, | |
368 | // so we just return the error. | |
369 | cycle_error(query, qcx, id, span) | |
370 | } | |
371 | #[cfg(parallel_compiler)] | |
372 | QueryResult::Started(job) => { | |
373 | // Get the latch out | |
374 | let latch = job.latch(); | |
375 | drop(state_lock); | |
376 | ||
377 | wait_for_query(query, qcx, span, key, latch, current_job_id) | |
378 | } | |
379 | QueryResult::Poisoned => FatalError.raise(), | |
380 | } | |
381 | } | |
382 | } | |
383 | } | |
384 | ||
385 | #[inline(always)] | |
386 | fn execute_job<Q, Qcx>( | |
387 | query: Q, | |
388 | qcx: Qcx, | |
389 | state: &QueryState<Q::Key, Qcx::DepKind>, | |
390 | key: Q::Key, | |
391 | id: QueryJobId, | |
392 | dep_node: Option<DepNode<Qcx::DepKind>>, | |
393 | ) -> (Q::Value, Option<DepNodeIndex>) | |
394 | where | |
395 | Q: QueryConfig<Qcx>, | |
396 | Qcx: QueryContext, | |
397 | { | |
398 | // Use `JobOwner` so the query will be poisoned if executing it panics. | |
399 | let job_owner = JobOwner { state, key }; | |
400 | ||
401 | let (result, dep_node_index) = match qcx.dep_context().dep_graph().data() { | |
402 | None => execute_job_non_incr(query, qcx, key, id), | |
403 | Some(data) => execute_job_incr(query, qcx, data, key, dep_node, id), | |
404 | }; | |
405 | ||
406 | let cache = query.query_cache(qcx); | |
407 | if query.feedable() { | |
408 | // We should not compute queries that also got a value via feeding. | |
409 | // This can't happen, as query feeding adds the very dependencies to the fed query | |
410 | // as its feeding query had. So if the fed query is red, so is its feeder, which will | |
411 | // get evaluated first, and re-feed the query. | |
412 | if let Some((cached_result, _)) = cache.lookup(&key) { | |
413 | panic!( | |
414 | "fed query later has its value computed. The already cached value: {}", | |
415 | (query.format_value())(&cached_result) | |
416 | ); | |
417 | } | |
418 | } | |
419 | job_owner.complete(cache, result, dep_node_index); | |
420 | ||
421 | (result, Some(dep_node_index)) | |
422 | } | |
423 | ||
424 | // Fast path for when incr. comp. is off. | |
425 | #[inline(always)] | |
426 | fn execute_job_non_incr<Q, Qcx>( | |
427 | query: Q, | |
428 | qcx: Qcx, | |
429 | key: Q::Key, | |
430 | job_id: QueryJobId, | |
431 | ) -> (Q::Value, DepNodeIndex) | |
432 | where | |
433 | Q: QueryConfig<Qcx>, | |
434 | Qcx: QueryContext, | |
435 | { | |
436 | debug_assert!(!qcx.dep_context().dep_graph().is_fully_enabled()); | |
437 | ||
438 | // Fingerprint the key, just to assert that it doesn't | |
439 | // have anything we don't consider hashable | |
440 | if cfg!(debug_assertions) { | |
441 | let _ = key.to_fingerprint(*qcx.dep_context()); | |
442 | } | |
443 | ||
444 | let prof_timer = qcx.dep_context().profiler().query_provider(); | |
445 | let result = qcx.start_query(job_id, query.depth_limit(), None, || query.compute(qcx, key)); | |
446 | let dep_node_index = qcx.dep_context().dep_graph().next_virtual_depnode_index(); | |
447 | prof_timer.finish_with_query_invocation_id(dep_node_index.into()); | |
448 | ||
449 | // Similarly, fingerprint the result to assert that | |
450 | // it doesn't have anything not considered hashable. | |
451 | if cfg!(debug_assertions) && let Some(hash_result) = query.hash_result() { | |
452 | qcx.dep_context().with_stable_hashing_context(|mut hcx| { | |
453 | hash_result(&mut hcx, &result); | |
454 | }); | |
455 | } | |
456 | ||
457 | (result, dep_node_index) | |
458 | } | |
459 | ||
460 | #[inline(always)] | |
461 | fn execute_job_incr<Q, Qcx>( | |
462 | query: Q, | |
463 | qcx: Qcx, | |
464 | dep_graph_data: &DepGraphData<Qcx::DepKind>, | |
465 | key: Q::Key, | |
466 | mut dep_node_opt: Option<DepNode<Qcx::DepKind>>, | |
467 | job_id: QueryJobId, | |
468 | ) -> (Q::Value, DepNodeIndex) | |
469 | where | |
470 | Q: QueryConfig<Qcx>, | |
471 | Qcx: QueryContext, | |
472 | { | |
473 | if !query.anon() && !query.eval_always() { | |
474 | // `to_dep_node` is expensive for some `DepKind`s. | |
475 | let dep_node = | |
476 | dep_node_opt.get_or_insert_with(|| query.construct_dep_node(*qcx.dep_context(), &key)); | |
477 | ||
478 | // The diagnostics for this query will be promoted to the current session during | |
479 | // `try_mark_green()`, so we can ignore them here. | |
480 | if let Some(ret) = qcx.start_query(job_id, false, None, || { | |
481 | try_load_from_disk_and_cache_in_memory(query, dep_graph_data, qcx, &key, &dep_node) | |
482 | }) { | |
483 | return ret; | |
484 | } | |
485 | } | |
486 | ||
487 | let prof_timer = qcx.dep_context().profiler().query_provider(); | |
488 | let diagnostics = Lock::new(ThinVec::new()); | |
489 | ||
490 | let (result, dep_node_index) = | |
491 | qcx.start_query(job_id, query.depth_limit(), Some(&diagnostics), || { | |
492 | if query.anon() { | |
493 | return dep_graph_data.with_anon_task(*qcx.dep_context(), query.dep_kind(), || { | |
494 | query.compute(qcx, key) | |
495 | }); | |
496 | } | |
497 | ||
498 | // `to_dep_node` is expensive for some `DepKind`s. | |
499 | let dep_node = | |
500 | dep_node_opt.unwrap_or_else(|| query.construct_dep_node(*qcx.dep_context(), &key)); | |
501 | ||
502 | dep_graph_data.with_task( | |
503 | dep_node, | |
504 | (qcx, query), | |
505 | key, | |
506 | |(qcx, query), key| query.compute(qcx, key), | |
507 | query.hash_result(), | |
508 | ) | |
509 | }); | |
510 | ||
511 | prof_timer.finish_with_query_invocation_id(dep_node_index.into()); | |
512 | ||
513 | let diagnostics = diagnostics.into_inner(); | |
514 | let side_effects = QuerySideEffects { diagnostics }; | |
515 | ||
516 | if std::intrinsics::unlikely(!side_effects.is_empty()) { | |
517 | if query.anon() { | |
518 | qcx.store_side_effects_for_anon_node(dep_node_index, side_effects); | |
519 | } else { | |
520 | qcx.store_side_effects(dep_node_index, side_effects); | |
521 | } | |
522 | } | |
523 | ||
524 | (result, dep_node_index) | |
525 | } | |
526 | ||
527 | #[inline(always)] | |
528 | fn try_load_from_disk_and_cache_in_memory<Q, Qcx>( | |
529 | query: Q, | |
530 | dep_graph_data: &DepGraphData<Qcx::DepKind>, | |
531 | qcx: Qcx, | |
532 | key: &Q::Key, | |
533 | dep_node: &DepNode<Qcx::DepKind>, | |
534 | ) -> Option<(Q::Value, DepNodeIndex)> | |
535 | where | |
536 | Q: QueryConfig<Qcx>, | |
537 | Qcx: QueryContext, | |
538 | { | |
539 | // Note this function can be called concurrently from the same query | |
540 | // We must ensure that this is handled correctly. | |
541 | ||
542 | let (prev_dep_node_index, dep_node_index) = dep_graph_data.try_mark_green(qcx, &dep_node)?; | |
543 | ||
544 | debug_assert!(dep_graph_data.is_index_green(prev_dep_node_index)); | |
545 | ||
546 | // First we try to load the result from the on-disk cache. | |
547 | // Some things are never cached on disk. | |
548 | if let Some(try_load_from_disk) = query.try_load_from_disk(qcx, &key) { | |
549 | let prof_timer = qcx.dep_context().profiler().incr_cache_loading(); | |
550 | ||
551 | // The call to `with_query_deserialization` enforces that no new `DepNodes` | |
552 | // are created during deserialization. See the docs of that method for more | |
553 | // details. | |
554 | let result = qcx | |
555 | .dep_context() | |
556 | .dep_graph() | |
557 | .with_query_deserialization(|| try_load_from_disk(qcx, prev_dep_node_index)); | |
558 | ||
559 | prof_timer.finish_with_query_invocation_id(dep_node_index.into()); | |
560 | ||
561 | if let Some(result) = result { | |
562 | if std::intrinsics::unlikely( | |
563 | qcx.dep_context().sess().opts.unstable_opts.query_dep_graph, | |
564 | ) { | |
565 | dep_graph_data.mark_debug_loaded_from_disk(*dep_node) | |
566 | } | |
567 | ||
568 | let prev_fingerprint = dep_graph_data.prev_fingerprint_of(prev_dep_node_index); | |
569 | // If `-Zincremental-verify-ich` is specified, re-hash results from | |
570 | // the cache and make sure that they have the expected fingerprint. | |
571 | // | |
572 | // If not, we still seek to verify a subset of fingerprints loaded | |
573 | // from disk. Re-hashing results is fairly expensive, so we can't | |
574 | // currently afford to verify every hash. This subset should still | |
575 | // give us some coverage of potential bugs though. | |
576 | let try_verify = prev_fingerprint.as_value().1 % 32 == 0; | |
577 | if std::intrinsics::unlikely( | |
578 | try_verify || qcx.dep_context().sess().opts.unstable_opts.incremental_verify_ich, | |
579 | ) { | |
580 | incremental_verify_ich( | |
581 | *qcx.dep_context(), | |
582 | dep_graph_data, | |
583 | &result, | |
584 | prev_dep_node_index, | |
585 | query.hash_result(), | |
586 | query.format_value(), | |
587 | ); | |
588 | } | |
589 | ||
590 | return Some((result, dep_node_index)); | |
591 | } | |
592 | ||
593 | // We always expect to find a cached result for things that | |
594 | // can be forced from `DepNode`. | |
595 | debug_assert!( | |
596 | !qcx.dep_context().fingerprint_style(dep_node.kind).reconstructible(), | |
597 | "missing on-disk cache entry for reconstructible {dep_node:?}" | |
598 | ); | |
599 | } | |
600 | ||
601 | // Sanity check for the logic in `ensure`: if the node is green and the result loadable, | |
602 | // we should actually be able to load it. | |
603 | debug_assert!( | |
604 | !query.loadable_from_disk(qcx, &key, prev_dep_node_index), | |
605 | "missing on-disk cache entry for loadable {dep_node:?}" | |
606 | ); | |
607 | ||
608 | // We could not load a result from the on-disk cache, so | |
609 | // recompute. | |
610 | let prof_timer = qcx.dep_context().profiler().query_provider(); | |
611 | ||
612 | // The dep-graph for this computation is already in-place. | |
613 | let result = qcx.dep_context().dep_graph().with_ignore(|| query.compute(qcx, *key)); | |
614 | ||
615 | prof_timer.finish_with_query_invocation_id(dep_node_index.into()); | |
616 | ||
617 | // Verify that re-running the query produced a result with the expected hash | |
618 | // This catches bugs in query implementations, turning them into ICEs. | |
619 | // For example, a query might sort its result by `DefId` - since `DefId`s are | |
620 | // not stable across compilation sessions, the result could get up getting sorted | |
621 | // in a different order when the query is re-run, even though all of the inputs | |
622 | // (e.g. `DefPathHash` values) were green. | |
623 | // | |
624 | // See issue #82920 for an example of a miscompilation that would get turned into | |
625 | // an ICE by this check | |
626 | incremental_verify_ich( | |
627 | *qcx.dep_context(), | |
628 | dep_graph_data, | |
629 | &result, | |
630 | prev_dep_node_index, | |
631 | query.hash_result(), | |
632 | query.format_value(), | |
633 | ); | |
634 | ||
635 | Some((result, dep_node_index)) | |
636 | } | |
637 | ||
638 | #[inline] | |
639 | #[instrument(skip(tcx, dep_graph_data, result, hash_result, format_value), level = "debug")] | |
640 | pub(crate) fn incremental_verify_ich<Tcx, V>( | |
641 | tcx: Tcx, | |
642 | dep_graph_data: &DepGraphData<Tcx::DepKind>, | |
643 | result: &V, | |
644 | prev_index: SerializedDepNodeIndex, | |
645 | hash_result: Option<fn(&mut StableHashingContext<'_>, &V) -> Fingerprint>, | |
646 | format_value: fn(&V) -> String, | |
647 | ) where | |
648 | Tcx: DepContext, | |
649 | { | |
650 | if !dep_graph_data.is_index_green(prev_index) { | |
651 | incremental_verify_ich_not_green(tcx, prev_index) | |
652 | } | |
653 | ||
654 | let new_hash = hash_result.map_or(Fingerprint::ZERO, |f| { | |
655 | tcx.with_stable_hashing_context(|mut hcx| f(&mut hcx, result)) | |
656 | }); | |
657 | ||
658 | let old_hash = dep_graph_data.prev_fingerprint_of(prev_index); | |
659 | ||
660 | if new_hash != old_hash { | |
661 | incremental_verify_ich_failed(tcx, prev_index, &|| format_value(&result)); | |
662 | } | |
663 | } | |
664 | ||
665 | #[cold] | |
666 | #[inline(never)] | |
667 | fn incremental_verify_ich_not_green<Tcx>(tcx: Tcx, prev_index: SerializedDepNodeIndex) | |
668 | where | |
669 | Tcx: DepContext, | |
670 | { | |
671 | panic!( | |
672 | "fingerprint for green query instance not loaded from cache: {:?}", | |
673 | tcx.dep_graph().data().unwrap().prev_node_of(prev_index) | |
674 | ) | |
675 | } | |
676 | ||
677 | // Note that this is marked #[cold] and intentionally takes `dyn Debug` for `result`, | |
678 | // as we want to avoid generating a bunch of different implementations for LLVM to | |
679 | // chew on (and filling up the final binary, too). | |
680 | #[cold] | |
681 | #[inline(never)] | |
682 | fn incremental_verify_ich_failed<Tcx>( | |
683 | tcx: Tcx, | |
684 | prev_index: SerializedDepNodeIndex, | |
685 | result: &dyn Fn() -> String, | |
686 | ) where | |
687 | Tcx: DepContext, | |
688 | { | |
689 | // When we emit an error message and panic, we try to debug-print the `DepNode` | |
690 | // and query result. Unfortunately, this can cause us to run additional queries, | |
691 | // which may result in another fingerprint mismatch while we're in the middle | |
692 | // of processing this one. To avoid a double-panic (which kills the process | |
693 | // before we can print out the query static), we print out a terse | |
694 | // but 'safe' message if we detect a re-entrant call to this method. | |
695 | thread_local! { | |
696 | static INSIDE_VERIFY_PANIC: Cell<bool> = const { Cell::new(false) }; | |
697 | }; | |
698 | ||
699 | let old_in_panic = INSIDE_VERIFY_PANIC.with(|in_panic| in_panic.replace(true)); | |
700 | ||
701 | if old_in_panic { | |
702 | tcx.sess().emit_err(crate::error::Reentrant); | |
703 | } else { | |
704 | let run_cmd = if let Some(crate_name) = &tcx.sess().opts.crate_name { | |
705 | format!("`cargo clean -p {crate_name}` or `cargo clean`") | |
706 | } else { | |
707 | "`cargo clean`".to_string() | |
708 | }; | |
709 | ||
710 | let dep_node = tcx.dep_graph().data().unwrap().prev_node_of(prev_index); | |
711 | tcx.sess().emit_err(crate::error::IncrementCompilation { | |
712 | run_cmd, | |
713 | dep_node: format!("{dep_node:?}"), | |
714 | }); | |
715 | panic!("Found unstable fingerprints for {dep_node:?}: {}", result()); | |
716 | } | |
717 | ||
718 | INSIDE_VERIFY_PANIC.with(|in_panic| in_panic.set(old_in_panic)); | |
719 | } | |
720 | ||
721 | /// Ensure that either this query has all green inputs or been executed. | |
722 | /// Executing `query::ensure(D)` is considered a read of the dep-node `D`. | |
723 | /// Returns true if the query should still run. | |
724 | /// | |
725 | /// This function is particularly useful when executing passes for their | |
726 | /// side-effects -- e.g., in order to report errors for erroneous programs. | |
727 | /// | |
728 | /// Note: The optimization is only available during incr. comp. | |
729 | #[inline(never)] | |
730 | fn ensure_must_run<Q, Qcx>( | |
731 | query: Q, | |
732 | qcx: Qcx, | |
733 | key: &Q::Key, | |
734 | check_cache: bool, | |
735 | ) -> (bool, Option<DepNode<Qcx::DepKind>>) | |
736 | where | |
737 | Q: QueryConfig<Qcx>, | |
738 | Qcx: QueryContext, | |
739 | { | |
740 | if query.eval_always() { | |
741 | return (true, None); | |
742 | } | |
743 | ||
744 | // Ensuring an anonymous query makes no sense | |
745 | assert!(!query.anon()); | |
746 | ||
747 | let dep_node = query.construct_dep_node(*qcx.dep_context(), key); | |
748 | ||
749 | let dep_graph = qcx.dep_context().dep_graph(); | |
750 | let serialized_dep_node_index = match dep_graph.try_mark_green(qcx, &dep_node) { | |
751 | None => { | |
752 | // A None return from `try_mark_green` means that this is either | |
753 | // a new dep node or that the dep node has already been marked red. | |
754 | // Either way, we can't call `dep_graph.read()` as we don't have the | |
755 | // DepNodeIndex. We must invoke the query itself. The performance cost | |
756 | // this introduces should be negligible as we'll immediately hit the | |
757 | // in-memory cache, or another query down the line will. | |
758 | return (true, Some(dep_node)); | |
759 | } | |
760 | Some((serialized_dep_node_index, dep_node_index)) => { | |
761 | dep_graph.read_index(dep_node_index); | |
762 | qcx.dep_context().profiler().query_cache_hit(dep_node_index.into()); | |
763 | serialized_dep_node_index | |
764 | } | |
765 | }; | |
766 | ||
767 | // We do not need the value at all, so do not check the cache. | |
768 | if !check_cache { | |
769 | return (false, None); | |
770 | } | |
771 | ||
772 | let loadable = query.loadable_from_disk(qcx, key, serialized_dep_node_index); | |
773 | (!loadable, Some(dep_node)) | |
774 | } | |
775 | ||
776 | #[derive(Debug)] | |
777 | pub enum QueryMode { | |
778 | Get, | |
779 | Ensure { check_cache: bool }, | |
780 | } | |
781 | ||
782 | #[inline(always)] | |
783 | pub fn get_query<Q, Qcx>( | |
784 | query: Q, | |
785 | qcx: Qcx, | |
786 | span: Span, | |
787 | key: Q::Key, | |
788 | mode: QueryMode, | |
789 | ) -> Option<Q::Value> | |
790 | where | |
791 | Q: QueryConfig<Qcx>, | |
792 | Qcx: QueryContext, | |
793 | { | |
794 | let dep_node = if let QueryMode::Ensure { check_cache } = mode { | |
795 | let (must_run, dep_node) = ensure_must_run(query, qcx, &key, check_cache); | |
796 | if !must_run { | |
797 | return None; | |
798 | } | |
799 | dep_node | |
800 | } else { | |
801 | None | |
802 | }; | |
803 | ||
804 | let (result, dep_node_index) = | |
805 | ensure_sufficient_stack(|| try_execute_query(query, qcx, span, key, dep_node)); | |
806 | if let Some(dep_node_index) = dep_node_index { | |
807 | qcx.dep_context().dep_graph().read_index(dep_node_index) | |
808 | } | |
809 | Some(result) | |
810 | } | |
811 | ||
812 | pub fn force_query<Q, Qcx>( | |
813 | query: Q, | |
814 | qcx: Qcx, | |
815 | key: Q::Key, | |
816 | dep_node: DepNode<<Qcx as HasDepContext>::DepKind>, | |
817 | ) where | |
818 | Q: QueryConfig<Qcx>, | |
819 | Qcx: QueryContext, | |
820 | { | |
821 | // We may be concurrently trying both execute and force a query. | |
822 | // Ensure that only one of them runs the query. | |
823 | if let Some((_, index)) = query.query_cache(qcx).lookup(&key) { | |
824 | qcx.dep_context().profiler().query_cache_hit(index.into()); | |
825 | return; | |
826 | } | |
827 | ||
828 | debug_assert!(!query.anon()); | |
829 | ||
830 | ensure_sufficient_stack(|| try_execute_query(query, qcx, DUMMY_SP, key, Some(dep_node))); | |
831 | } |