]>
Commit | Line | Data |
---|---|---|
ba9703b0 XL |
1 | //! The implementation of the query system itself. This defines the macros that |
2 | //! generate the actual methods on tcx which find and execute the provider, | |
3 | //! manage the caches, and so forth. | |
4 | ||
9ffffee4 | 5 | use crate::dep_graph::{DepContext, DepKind, DepNode, DepNodeIndex, DepNodeParams}; |
353b0b11 | 6 | use crate::dep_graph::{DepGraphData, HasDepContext}; |
487cf647 | 7 | use crate::ich::StableHashingContext; |
ba9703b0 | 8 | use crate::query::caches::QueryCache; |
353b0b11 FG |
9 | #[cfg(parallel_compiler)] |
10 | use crate::query::job::QueryLatch; | |
5099ac24 | 11 | use crate::query::job::{report_cycle, QueryInfo, QueryJob, QueryJobId, QueryJobInfo}; |
353b0b11 | 12 | use crate::query::SerializedDepNodeIndex; |
94222f64 | 13 | use crate::query::{QueryContext, QueryMap, QuerySideEffects, QueryStackFrame}; |
f2b60f7d | 14 | use crate::HandleCycleError; |
ba9703b0 | 15 | use rustc_data_structures::fingerprint::Fingerprint; |
5e7ed085 | 16 | use rustc_data_structures::fx::FxHashMap; |
9ffffee4 | 17 | use rustc_data_structures::stack::ensure_sufficient_stack; |
5e7ed085 | 18 | use rustc_data_structures::sync::Lock; |
353b0b11 FG |
19 | #[cfg(parallel_compiler)] |
20 | use rustc_data_structures::{cold_path, sharded::Sharded}; | |
5e7ed085 | 21 | use rustc_errors::{DiagnosticBuilder, ErrorGuaranteed, FatalError}; |
17df50a5 | 22 | use rustc_span::{Span, DUMMY_SP}; |
94222f64 | 23 | use std::cell::Cell; |
ba9703b0 | 24 | use std::collections::hash_map::Entry; |
5869c6ff | 25 | use std::fmt::Debug; |
5e7ed085 | 26 | use std::hash::Hash; |
ba9703b0 | 27 | use std::mem; |
f2b60f7d | 28 | use thin_vec::ThinVec; |
ba9703b0 | 29 | |
487cf647 FG |
30 | use super::QueryConfig; |
31 | ||
9c376795 | 32 | pub struct QueryState<K, D: DepKind> { |
5e7ed085 | 33 | #[cfg(parallel_compiler)] |
9c376795 | 34 | active: Sharded<FxHashMap<K, QueryResult<D>>>, |
5e7ed085 | 35 | #[cfg(not(parallel_compiler))] |
9c376795 | 36 | active: Lock<FxHashMap<K, QueryResult<D>>>, |
ba9703b0 XL |
37 | } |
38 | ||
39 | /// Indicates the state of a query for a given key in a query map. | |
9c376795 | 40 | enum QueryResult<D: DepKind> { |
ba9703b0 | 41 | /// An already executing query. The query job can be used to await for its completion. |
9c376795 | 42 | Started(QueryJob<D>), |
ba9703b0 XL |
43 | |
44 | /// The query panicked. Queries trying to wait on this will raise a fatal error which will | |
45 | /// silently panic. | |
46 | Poisoned, | |
47 | } | |
48 | ||
9c376795 | 49 | impl<K, D> QueryState<K, D> |
29967ef6 | 50 | where |
9ffffee4 | 51 | K: Eq + Hash + Copy + Debug, |
9c376795 | 52 | D: DepKind, |
29967ef6 | 53 | { |
ba9703b0 | 54 | pub fn all_inactive(&self) -> bool { |
5e7ed085 FG |
55 | #[cfg(parallel_compiler)] |
56 | { | |
57 | let shards = self.active.lock_shards(); | |
58 | shards.iter().all(|shard| shard.is_empty()) | |
59 | } | |
60 | #[cfg(not(parallel_compiler))] | |
61 | { | |
62 | self.active.lock().is_empty() | |
63 | } | |
ba9703b0 XL |
64 | } |
65 | ||
487cf647 | 66 | pub fn try_collect_active_jobs<Qcx: Copy>( |
ba9703b0 | 67 | &self, |
487cf647 | 68 | qcx: Qcx, |
9c376795 FG |
69 | make_query: fn(Qcx, K) -> QueryStackFrame<D>, |
70 | jobs: &mut QueryMap<D>, | |
29967ef6 | 71 | ) -> Option<()> { |
5e7ed085 FG |
72 | #[cfg(parallel_compiler)] |
73 | { | |
74 | // We use try_lock_shards here since we are called from the | |
75 | // deadlock handler, and this shouldn't be locked. | |
76 | let shards = self.active.try_lock_shards()?; | |
77 | for shard in shards.iter() { | |
78 | for (k, v) in shard.iter() { | |
79 | if let QueryResult::Started(ref job) = *v { | |
9ffffee4 | 80 | let query = make_query(qcx, *k); |
5e7ed085 FG |
81 | jobs.insert(job.id, QueryJobInfo { query, job: job.clone() }); |
82 | } | |
83 | } | |
84 | } | |
85 | } | |
86 | #[cfg(not(parallel_compiler))] | |
87 | { | |
88 | // We use try_lock here since we are called from the | |
89 | // deadlock handler, and this shouldn't be locked. | |
90 | // (FIXME: Is this relevant for non-parallel compilers? It doesn't | |
91 | // really hurt much.) | |
92 | for (k, v) in self.active.try_lock()?.iter() { | |
ba9703b0 | 93 | if let QueryResult::Started(ref job) = *v { |
9ffffee4 | 94 | let query = make_query(qcx, *k); |
5099ac24 | 95 | jobs.insert(job.id, QueryJobInfo { query, job: job.clone() }); |
ba9703b0 | 96 | } |
17df50a5 XL |
97 | } |
98 | } | |
ba9703b0 XL |
99 | |
100 | Some(()) | |
101 | } | |
102 | } | |
103 | ||
9c376795 FG |
104 | impl<K, D: DepKind> Default for QueryState<K, D> { |
105 | fn default() -> QueryState<K, D> { | |
5e7ed085 | 106 | QueryState { active: Default::default() } |
ba9703b0 XL |
107 | } |
108 | } | |
109 | ||
ba9703b0 XL |
110 | /// A type representing the responsibility to execute the job in the `job` field. |
111 | /// This will poison the relevant query if dropped. | |
9c376795 | 112 | struct JobOwner<'tcx, K, D: DepKind> |
ba9703b0 | 113 | where |
9ffffee4 | 114 | K: Eq + Hash + Copy, |
ba9703b0 | 115 | { |
9c376795 | 116 | state: &'tcx QueryState<K, D>, |
c295e0f8 | 117 | key: K, |
ba9703b0 XL |
118 | } |
119 | ||
17df50a5 XL |
120 | #[cold] |
121 | #[inline(never)] | |
353b0b11 FG |
122 | fn mk_cycle<Q, Qcx>( |
123 | query: Q, | |
487cf647 | 124 | qcx: Qcx, |
353b0b11 | 125 | cycle_error: CycleError<Qcx::DepKind>, |
f2b60f7d | 126 | handler: HandleCycleError, |
353b0b11 | 127 | ) -> Q::Value |
17df50a5 | 128 | where |
353b0b11 FG |
129 | Q: QueryConfig<Qcx>, |
130 | Qcx: QueryContext, | |
17df50a5 | 131 | { |
487cf647 | 132 | let error = report_cycle(qcx.dep_context().sess(), &cycle_error); |
353b0b11 | 133 | handle_cycle_error(query, qcx, &cycle_error, error, handler) |
17df50a5 XL |
134 | } |
135 | ||
353b0b11 FG |
136 | fn handle_cycle_error<Q, Qcx>( |
137 | query: Q, | |
138 | qcx: Qcx, | |
139 | cycle_error: &CycleError<Qcx::DepKind>, | |
f2b60f7d FG |
140 | mut error: DiagnosticBuilder<'_, ErrorGuaranteed>, |
141 | handler: HandleCycleError, | |
353b0b11 | 142 | ) -> Q::Value |
f2b60f7d | 143 | where |
353b0b11 FG |
144 | Q: QueryConfig<Qcx>, |
145 | Qcx: QueryContext, | |
f2b60f7d FG |
146 | { |
147 | use HandleCycleError::*; | |
148 | match handler { | |
149 | Error => { | |
150 | error.emit(); | |
353b0b11 | 151 | query.value_from_cycle_error(*qcx.dep_context(), &cycle_error.cycle) |
f2b60f7d FG |
152 | } |
153 | Fatal => { | |
154 | error.emit(); | |
353b0b11 | 155 | qcx.dep_context().sess().abort_if_errors(); |
f2b60f7d FG |
156 | unreachable!() |
157 | } | |
158 | DelayBug => { | |
159 | error.delay_as_bug(); | |
353b0b11 | 160 | query.value_from_cycle_error(*qcx.dep_context(), &cycle_error.cycle) |
f2b60f7d FG |
161 | } |
162 | } | |
163 | } | |
164 | ||
9c376795 | 165 | impl<'tcx, K, D: DepKind> JobOwner<'tcx, K, D> |
ba9703b0 | 166 | where |
9ffffee4 | 167 | K: Eq + Hash + Copy, |
ba9703b0 | 168 | { |
ba9703b0 XL |
169 | /// Completes the query by updating the query cache with the `result`, |
170 | /// signals the waiter and forgets the JobOwner, so it won't poison the query | |
9ffffee4 | 171 | fn complete<C>(self, cache: &C, result: C::Value, dep_node_index: DepNodeIndex) |
c295e0f8 XL |
172 | where |
173 | C: QueryCache<Key = K>, | |
174 | { | |
9ffffee4 | 175 | let key = self.key; |
ba9703b0 XL |
176 | let state = self.state; |
177 | ||
178 | // Forget ourself so our destructor won't poison the query | |
179 | mem::forget(self); | |
180 | ||
9ffffee4 FG |
181 | // Mark as complete before we remove the job from the active state |
182 | // so no other thread can re-execute this query. | |
183 | cache.complete(key, result, dep_node_index); | |
184 | ||
185 | let job = { | |
186 | #[cfg(parallel_compiler)] | |
187 | let mut lock = state.active.get_shard_by_value(&key).lock(); | |
188 | #[cfg(not(parallel_compiler))] | |
189 | let mut lock = state.active.lock(); | |
190 | match lock.remove(&key).unwrap() { | |
191 | QueryResult::Started(job) => job, | |
192 | QueryResult::Poisoned => panic!(), | |
193 | } | |
ba9703b0 XL |
194 | }; |
195 | ||
196 | job.signal_complete(); | |
197 | } | |
198 | } | |
199 | ||
9c376795 | 200 | impl<'tcx, K, D> Drop for JobOwner<'tcx, K, D> |
ba9703b0 | 201 | where |
9ffffee4 | 202 | K: Eq + Hash + Copy, |
9c376795 | 203 | D: DepKind, |
ba9703b0 XL |
204 | { |
205 | #[inline(never)] | |
206 | #[cold] | |
207 | fn drop(&mut self) { | |
208 | // Poison the query so jobs waiting on it panic. | |
209 | let state = self.state; | |
ba9703b0 | 210 | let job = { |
5e7ed085 FG |
211 | #[cfg(parallel_compiler)] |
212 | let mut shard = state.active.get_shard_by_value(&self.key).lock(); | |
213 | #[cfg(not(parallel_compiler))] | |
214 | let mut shard = state.active.lock(); | |
215 | let job = match shard.remove(&self.key).unwrap() { | |
ba9703b0 XL |
216 | QueryResult::Started(job) => job, |
217 | QueryResult::Poisoned => panic!(), | |
218 | }; | |
9ffffee4 | 219 | shard.insert(self.key, QueryResult::Poisoned); |
ba9703b0 XL |
220 | job |
221 | }; | |
222 | // Also signal the completion of the job, so waiters | |
223 | // will continue execution. | |
224 | job.signal_complete(); | |
225 | } | |
226 | } | |
227 | ||
228 | #[derive(Clone)] | |
9c376795 | 229 | pub(crate) struct CycleError<D: DepKind> { |
ba9703b0 | 230 | /// The query and related span that uses the cycle. |
9c376795 FG |
231 | pub usage: Option<(Span, QueryStackFrame<D>)>, |
232 | pub cycle: Vec<QueryInfo<D>>, | |
ba9703b0 XL |
233 | } |
234 | ||
ba9703b0 XL |
235 | /// Checks if the query is already computed and in the cache. |
236 | /// It returns the shard index and a lock guard to the shard, | |
237 | /// which will be used if the query is not in the cache and we need | |
238 | /// to compute it. | |
6a06907d | 239 | #[inline] |
9ffffee4 | 240 | pub fn try_get_cached<Tcx, C>(tcx: Tcx, cache: &C, key: &C::Key) -> Option<C::Value> |
ba9703b0 XL |
241 | where |
242 | C: QueryCache, | |
487cf647 | 243 | Tcx: DepContext, |
ba9703b0 | 244 | { |
9ffffee4 FG |
245 | match cache.lookup(&key) { |
246 | Some((value, index)) => { | |
6a06907d | 247 | tcx.profiler().query_cache_hit(index.into()); |
9ffffee4 FG |
248 | tcx.dep_graph().read_index(index); |
249 | Some(value) | |
6a06907d | 250 | } |
9ffffee4 FG |
251 | None => None, |
252 | } | |
ba9703b0 XL |
253 | } |
254 | ||
353b0b11 | 255 | #[cold] |
9ffffee4 | 256 | #[inline(never)] |
353b0b11 FG |
257 | #[cfg(not(parallel_compiler))] |
258 | fn cycle_error<Q, Qcx>( | |
259 | query: Q, | |
260 | qcx: Qcx, | |
261 | try_execute: QueryJobId, | |
262 | span: Span, | |
263 | ) -> (Q::Value, Option<DepNodeIndex>) | |
264 | where | |
265 | Q: QueryConfig<Qcx>, | |
266 | Qcx: QueryContext, | |
267 | { | |
268 | let error = try_execute.find_cycle_in_stack( | |
269 | qcx.try_collect_active_jobs().unwrap(), | |
270 | &qcx.current_query_job(), | |
271 | span, | |
272 | ); | |
273 | (mk_cycle(query, qcx, error, query.handle_cycle_error()), None) | |
274 | } | |
275 | ||
276 | #[inline(always)] | |
277 | #[cfg(parallel_compiler)] | |
278 | fn wait_for_query<Q, Qcx>( | |
279 | query: Q, | |
487cf647 | 280 | qcx: Qcx, |
ba9703b0 | 281 | span: Span, |
9c376795 | 282 | key: Q::Key, |
353b0b11 FG |
283 | latch: QueryLatch<Qcx::DepKind>, |
284 | current: Option<QueryJobId>, | |
9ffffee4 | 285 | ) -> (Q::Value, Option<DepNodeIndex>) |
ba9703b0 | 286 | where |
9c376795 | 287 | Q: QueryConfig<Qcx>, |
487cf647 | 288 | Qcx: QueryContext, |
ba9703b0 | 289 | { |
353b0b11 FG |
290 | // For parallel queries, we'll block and wait until the query running |
291 | // in another thread has completed. Record how long we wait in the | |
292 | // self-profiler. | |
293 | let query_blocked_prof_timer = qcx.dep_context().profiler().query_blocked(); | |
294 | ||
295 | // With parallel queries we might just have to wait on some other | |
296 | // thread. | |
297 | let result = latch.wait_on(current, span); | |
298 | ||
299 | match result { | |
300 | Ok(()) => { | |
301 | let Some((v, index)) = query.query_cache(qcx).lookup(&key) else { | |
302 | cold_path(|| panic!("value must be in cache after waiting")) | |
9ffffee4 | 303 | }; |
c295e0f8 | 304 | |
9ffffee4 | 305 | qcx.dep_context().profiler().query_cache_hit(index.into()); |
c295e0f8 XL |
306 | query_blocked_prof_timer.finish_with_query_invocation_id(index.into()); |
307 | ||
308 | (v, Some(index)) | |
ba9703b0 | 309 | } |
353b0b11 FG |
310 | Err(cycle) => (mk_cycle(query, qcx, cycle, query.handle_cycle_error()), None), |
311 | } | |
312 | } | |
313 | ||
314 | #[inline(never)] | |
315 | fn try_execute_query<Q, Qcx>( | |
316 | query: Q, | |
317 | qcx: Qcx, | |
318 | span: Span, | |
319 | key: Q::Key, | |
320 | dep_node: Option<DepNode<Qcx::DepKind>>, | |
321 | ) -> (Q::Value, Option<DepNodeIndex>) | |
322 | where | |
323 | Q: QueryConfig<Qcx>, | |
324 | Qcx: QueryContext, | |
325 | { | |
326 | let state = query.query_state(qcx); | |
327 | #[cfg(parallel_compiler)] | |
328 | let mut state_lock = state.active.get_shard_by_value(&key).lock(); | |
329 | #[cfg(not(parallel_compiler))] | |
330 | let mut state_lock = state.active.lock(); | |
331 | ||
332 | // For the parallel compiler we need to check both the query cache and query state structures | |
333 | // while holding the state lock to ensure that 1) the query has not yet completed and 2) the | |
334 | // query is not still executing. Without checking the query cache here, we can end up | |
335 | // re-executing the query since `try_start` only checks that the query is not currently | |
336 | // executing, but another thread may have already completed the query and stores it result | |
337 | // in the query cache. | |
338 | if cfg!(parallel_compiler) && qcx.dep_context().sess().threads() > 1 { | |
339 | if let Some((value, index)) = query.query_cache(qcx).lookup(&key) { | |
340 | qcx.dep_context().profiler().query_cache_hit(index.into()); | |
341 | return (value, Some(index)); | |
342 | } | |
343 | } | |
344 | ||
345 | let current_job_id = qcx.current_query_job(); | |
346 | ||
347 | match state_lock.entry(key) { | |
348 | Entry::Vacant(entry) => { | |
349 | // Nothing has computed or is computing the query, so we start a new job and insert it in the | |
350 | // state map. | |
351 | let id = qcx.next_job_id(); | |
352 | let job = QueryJob::new(id, span, current_job_id); | |
353 | entry.insert(QueryResult::Started(job)); | |
354 | ||
355 | // Drop the lock before we start executing the query | |
356 | drop(state_lock); | |
357 | ||
358 | execute_job(query, qcx, state, key, id, dep_node) | |
359 | } | |
360 | Entry::Occupied(mut entry) => { | |
361 | match entry.get_mut() { | |
362 | #[cfg(not(parallel_compiler))] | |
363 | QueryResult::Started(job) => { | |
364 | let id = job.id; | |
365 | drop(state_lock); | |
366 | ||
367 | // If we are single-threaded we know that we have cycle error, | |
368 | // so we just return the error. | |
369 | cycle_error(query, qcx, id, span) | |
370 | } | |
371 | #[cfg(parallel_compiler)] | |
372 | QueryResult::Started(job) => { | |
373 | // Get the latch out | |
374 | let latch = job.latch(); | |
375 | drop(state_lock); | |
376 | ||
377 | wait_for_query(query, qcx, span, key, latch, current_job_id) | |
378 | } | |
379 | QueryResult::Poisoned => FatalError.raise(), | |
380 | } | |
381 | } | |
c295e0f8 XL |
382 | } |
383 | } | |
ba9703b0 | 384 | |
9ffffee4 | 385 | #[inline(always)] |
9c376795 | 386 | fn execute_job<Q, Qcx>( |
353b0b11 | 387 | query: Q, |
487cf647 | 388 | qcx: Qcx, |
353b0b11 | 389 | state: &QueryState<Q::Key, Qcx::DepKind>, |
9c376795 | 390 | key: Q::Key, |
353b0b11 FG |
391 | id: QueryJobId, |
392 | dep_node: Option<DepNode<Qcx::DepKind>>, | |
393 | ) -> (Q::Value, Option<DepNodeIndex>) | |
c295e0f8 | 394 | where |
9c376795 | 395 | Q: QueryConfig<Qcx>, |
487cf647 | 396 | Qcx: QueryContext, |
c295e0f8 | 397 | { |
353b0b11 FG |
398 | // Use `JobOwner` so the query will be poisoned if executing it panics. |
399 | let job_owner = JobOwner { state, key }; | |
400 | ||
401 | let (result, dep_node_index) = match qcx.dep_context().dep_graph().data() { | |
402 | None => execute_job_non_incr(query, qcx, key, id), | |
403 | Some(data) => execute_job_incr(query, qcx, data, key, dep_node, id), | |
404 | }; | |
17df50a5 | 405 | |
353b0b11 FG |
406 | let cache = query.query_cache(qcx); |
407 | if query.feedable() { | |
408 | // We should not compute queries that also got a value via feeding. | |
409 | // This can't happen, as query feeding adds the very dependencies to the fed query | |
410 | // as its feeding query had. So if the fed query is red, so is its feeder, which will | |
411 | // get evaluated first, and re-feed the query. | |
412 | if let Some((cached_result, _)) = cache.lookup(&key) { | |
413 | panic!( | |
414 | "fed query later has its value computed. The already cached value: {}", | |
415 | (query.format_value())(&cached_result) | |
416 | ); | |
9ffffee4 | 417 | } |
353b0b11 FG |
418 | } |
419 | job_owner.complete(cache, result, dep_node_index); | |
9ffffee4 | 420 | |
353b0b11 FG |
421 | (result, Some(dep_node_index)) |
422 | } | |
9ffffee4 | 423 | |
353b0b11 FG |
424 | // Fast path for when incr. comp. is off. |
425 | #[inline(always)] | |
426 | fn execute_job_non_incr<Q, Qcx>( | |
427 | query: Q, | |
428 | qcx: Qcx, | |
429 | key: Q::Key, | |
430 | job_id: QueryJobId, | |
431 | ) -> (Q::Value, DepNodeIndex) | |
432 | where | |
433 | Q: QueryConfig<Qcx>, | |
434 | Qcx: QueryContext, | |
435 | { | |
436 | debug_assert!(!qcx.dep_context().dep_graph().is_fully_enabled()); | |
9ffffee4 | 437 | |
353b0b11 FG |
438 | // Fingerprint the key, just to assert that it doesn't |
439 | // have anything we don't consider hashable | |
440 | if cfg!(debug_assertions) { | |
441 | let _ = key.to_fingerprint(*qcx.dep_context()); | |
ba9703b0 XL |
442 | } |
443 | ||
353b0b11 FG |
444 | let prof_timer = qcx.dep_context().profiler().query_provider(); |
445 | let result = qcx.start_query(job_id, query.depth_limit(), None, || query.compute(qcx, key)); | |
446 | let dep_node_index = qcx.dep_context().dep_graph().next_virtual_depnode_index(); | |
447 | prof_timer.finish_with_query_invocation_id(dep_node_index.into()); | |
448 | ||
449 | // Similarly, fingerprint the result to assert that | |
450 | // it doesn't have anything not considered hashable. | |
451 | if cfg!(debug_assertions) && let Some(hash_result) = query.hash_result() { | |
452 | qcx.dep_context().with_stable_hashing_context(|mut hcx| { | |
453 | hash_result(&mut hcx, &result); | |
454 | }); | |
455 | } | |
456 | ||
457 | (result, dep_node_index) | |
458 | } | |
459 | ||
460 | #[inline(always)] | |
461 | fn execute_job_incr<Q, Qcx>( | |
462 | query: Q, | |
463 | qcx: Qcx, | |
464 | dep_graph_data: &DepGraphData<Qcx::DepKind>, | |
465 | key: Q::Key, | |
466 | mut dep_node_opt: Option<DepNode<Qcx::DepKind>>, | |
467 | job_id: QueryJobId, | |
468 | ) -> (Q::Value, DepNodeIndex) | |
469 | where | |
470 | Q: QueryConfig<Qcx>, | |
471 | Qcx: QueryContext, | |
472 | { | |
473 | if !query.anon() && !query.eval_always() { | |
c295e0f8 XL |
474 | // `to_dep_node` is expensive for some `DepKind`s. |
475 | let dep_node = | |
353b0b11 | 476 | dep_node_opt.get_or_insert_with(|| query.construct_dep_node(*qcx.dep_context(), &key)); |
ba9703b0 | 477 | |
c295e0f8 XL |
478 | // The diagnostics for this query will be promoted to the current session during |
479 | // `try_mark_green()`, so we can ignore them here. | |
487cf647 | 480 | if let Some(ret) = qcx.start_query(job_id, false, None, || { |
353b0b11 | 481 | try_load_from_disk_and_cache_in_memory(query, dep_graph_data, qcx, &key, &dep_node) |
c295e0f8 XL |
482 | }) { |
483 | return ret; | |
484 | } | |
485 | } | |
ba9703b0 | 486 | |
487cf647 | 487 | let prof_timer = qcx.dep_context().profiler().query_provider(); |
c295e0f8 | 488 | let diagnostics = Lock::new(ThinVec::new()); |
ba9703b0 | 489 | |
f2b60f7d | 490 | let (result, dep_node_index) = |
353b0b11 FG |
491 | qcx.start_query(job_id, query.depth_limit(), Some(&diagnostics), || { |
492 | if query.anon() { | |
493 | return dep_graph_data.with_anon_task(*qcx.dep_context(), query.dep_kind(), || { | |
494 | query.compute(qcx, key) | |
495 | }); | |
f2b60f7d | 496 | } |
ba9703b0 | 497 | |
f2b60f7d FG |
498 | // `to_dep_node` is expensive for some `DepKind`s. |
499 | let dep_node = | |
353b0b11 FG |
500 | dep_node_opt.unwrap_or_else(|| query.construct_dep_node(*qcx.dep_context(), &key)); |
501 | ||
502 | dep_graph_data.with_task( | |
503 | dep_node, | |
504 | (qcx, query), | |
505 | key, | |
506 | |(qcx, query), key| query.compute(qcx, key), | |
507 | query.hash_result(), | |
508 | ) | |
f2b60f7d | 509 | }); |
ba9703b0 | 510 | |
c295e0f8 | 511 | prof_timer.finish_with_query_invocation_id(dep_node_index.into()); |
ba9703b0 | 512 | |
c295e0f8 XL |
513 | let diagnostics = diagnostics.into_inner(); |
514 | let side_effects = QuerySideEffects { diagnostics }; | |
ba9703b0 | 515 | |
923072b8 | 516 | if std::intrinsics::unlikely(!side_effects.is_empty()) { |
353b0b11 | 517 | if query.anon() { |
487cf647 | 518 | qcx.store_side_effects_for_anon_node(dep_node_index, side_effects); |
c295e0f8 | 519 | } else { |
487cf647 | 520 | qcx.store_side_effects(dep_node_index, side_effects); |
ba9703b0 XL |
521 | } |
522 | } | |
523 | ||
c295e0f8 | 524 | (result, dep_node_index) |
ba9703b0 XL |
525 | } |
526 | ||
9ffffee4 | 527 | #[inline(always)] |
9c376795 | 528 | fn try_load_from_disk_and_cache_in_memory<Q, Qcx>( |
353b0b11 FG |
529 | query: Q, |
530 | dep_graph_data: &DepGraphData<Qcx::DepKind>, | |
487cf647 | 531 | qcx: Qcx, |
9c376795 | 532 | key: &Q::Key, |
487cf647 | 533 | dep_node: &DepNode<Qcx::DepKind>, |
9c376795 | 534 | ) -> Option<(Q::Value, DepNodeIndex)> |
ba9703b0 | 535 | where |
9c376795 | 536 | Q: QueryConfig<Qcx>, |
487cf647 | 537 | Qcx: QueryContext, |
ba9703b0 XL |
538 | { |
539 | // Note this function can be called concurrently from the same query | |
540 | // We must ensure that this is handled correctly. | |
541 | ||
353b0b11 | 542 | let (prev_dep_node_index, dep_node_index) = dep_graph_data.try_mark_green(qcx, &dep_node)?; |
c295e0f8 | 543 | |
353b0b11 | 544 | debug_assert!(dep_graph_data.is_index_green(prev_dep_node_index)); |
ba9703b0 XL |
545 | |
546 | // First we try to load the result from the on-disk cache. | |
c295e0f8 | 547 | // Some things are never cached on disk. |
353b0b11 | 548 | if let Some(try_load_from_disk) = query.try_load_from_disk(qcx, &key) { |
487cf647 | 549 | let prof_timer = qcx.dep_context().profiler().incr_cache_loading(); |
5099ac24 FG |
550 | |
551 | // The call to `with_query_deserialization` enforces that no new `DepNodes` | |
552 | // are created during deserialization. See the docs of that method for more | |
553 | // details. | |
353b0b11 FG |
554 | let result = qcx |
555 | .dep_context() | |
556 | .dep_graph() | |
557 | .with_query_deserialization(|| try_load_from_disk(qcx, prev_dep_node_index)); | |
5099ac24 | 558 | |
ba9703b0 XL |
559 | prof_timer.finish_with_query_invocation_id(dep_node_index.into()); |
560 | ||
c295e0f8 | 561 | if let Some(result) = result { |
923072b8 | 562 | if std::intrinsics::unlikely( |
487cf647 | 563 | qcx.dep_context().sess().opts.unstable_opts.query_dep_graph, |
923072b8 | 564 | ) { |
353b0b11 | 565 | dep_graph_data.mark_debug_loaded_from_disk(*dep_node) |
a2a8927a XL |
566 | } |
567 | ||
353b0b11 | 568 | let prev_fingerprint = dep_graph_data.prev_fingerprint_of(prev_dep_node_index); |
c295e0f8 XL |
569 | // If `-Zincremental-verify-ich` is specified, re-hash results from |
570 | // the cache and make sure that they have the expected fingerprint. | |
3c0e092e XL |
571 | // |
572 | // If not, we still seek to verify a subset of fingerprints loaded | |
573 | // from disk. Re-hashing results is fairly expensive, so we can't | |
574 | // currently afford to verify every hash. This subset should still | |
575 | // give us some coverage of potential bugs though. | |
576 | let try_verify = prev_fingerprint.as_value().1 % 32 == 0; | |
923072b8 | 577 | if std::intrinsics::unlikely( |
487cf647 | 578 | try_verify || qcx.dep_context().sess().opts.unstable_opts.incremental_verify_ich, |
3c0e092e | 579 | ) { |
353b0b11 FG |
580 | incremental_verify_ich( |
581 | *qcx.dep_context(), | |
582 | dep_graph_data, | |
583 | &result, | |
584 | prev_dep_node_index, | |
585 | query.hash_result(), | |
586 | query.format_value(), | |
587 | ); | |
c295e0f8 | 588 | } |
6a06907d | 589 | |
c295e0f8 XL |
590 | return Some((result, dep_node_index)); |
591 | } | |
3c0e092e XL |
592 | |
593 | // We always expect to find a cached result for things that | |
594 | // can be forced from `DepNode`. | |
595 | debug_assert!( | |
487cf647 | 596 | !qcx.dep_context().fingerprint_style(dep_node.kind).reconstructible(), |
353b0b11 | 597 | "missing on-disk cache entry for reconstructible {dep_node:?}" |
3c0e092e | 598 | ); |
c295e0f8 | 599 | } |
ba9703b0 | 600 | |
353b0b11 FG |
601 | // Sanity check for the logic in `ensure`: if the node is green and the result loadable, |
602 | // we should actually be able to load it. | |
603 | debug_assert!( | |
604 | !query.loadable_from_disk(qcx, &key, prev_dep_node_index), | |
605 | "missing on-disk cache entry for loadable {dep_node:?}" | |
606 | ); | |
607 | ||
c295e0f8 XL |
608 | // We could not load a result from the on-disk cache, so |
609 | // recompute. | |
487cf647 | 610 | let prof_timer = qcx.dep_context().profiler().query_provider(); |
ba9703b0 | 611 | |
c295e0f8 | 612 | // The dep-graph for this computation is already in-place. |
353b0b11 | 613 | let result = qcx.dep_context().dep_graph().with_ignore(|| query.compute(qcx, *key)); |
ba9703b0 | 614 | |
c295e0f8 | 615 | prof_timer.finish_with_query_invocation_id(dep_node_index.into()); |
ba9703b0 | 616 | |
c295e0f8 XL |
617 | // Verify that re-running the query produced a result with the expected hash |
618 | // This catches bugs in query implementations, turning them into ICEs. | |
619 | // For example, a query might sort its result by `DefId` - since `DefId`s are | |
620 | // not stable across compilation sessions, the result could get up getting sorted | |
621 | // in a different order when the query is re-run, even though all of the inputs | |
622 | // (e.g. `DefPathHash` values) were green. | |
623 | // | |
624 | // See issue #82920 for an example of a miscompilation that would get turned into | |
625 | // an ICE by this check | |
353b0b11 FG |
626 | incremental_verify_ich( |
627 | *qcx.dep_context(), | |
628 | dep_graph_data, | |
629 | &result, | |
630 | prev_dep_node_index, | |
631 | query.hash_result(), | |
632 | query.format_value(), | |
633 | ); | |
c295e0f8 XL |
634 | |
635 | Some((result, dep_node_index)) | |
ba9703b0 XL |
636 | } |
637 | ||
9ffffee4 | 638 | #[inline] |
353b0b11 FG |
639 | #[instrument(skip(tcx, dep_graph_data, result, hash_result, format_value), level = "debug")] |
640 | pub(crate) fn incremental_verify_ich<Tcx, V>( | |
487cf647 | 641 | tcx: Tcx, |
353b0b11 | 642 | dep_graph_data: &DepGraphData<Tcx::DepKind>, |
f9f354fc | 643 | result: &V, |
353b0b11 | 644 | prev_index: SerializedDepNodeIndex, |
487cf647 | 645 | hash_result: Option<fn(&mut StableHashingContext<'_>, &V) -> Fingerprint>, |
353b0b11 FG |
646 | format_value: fn(&V) -> String, |
647 | ) where | |
487cf647 | 648 | Tcx: DepContext, |
ba9703b0 | 649 | { |
353b0b11 FG |
650 | if !dep_graph_data.is_index_green(prev_index) { |
651 | incremental_verify_ich_not_green(tcx, prev_index) | |
652 | } | |
ba9703b0 | 653 | |
487cf647 | 654 | let new_hash = hash_result.map_or(Fingerprint::ZERO, |f| { |
064997fb | 655 | tcx.with_stable_hashing_context(|mut hcx| f(&mut hcx, result)) |
3c0e092e | 656 | }); |
487cf647 | 657 | |
353b0b11 | 658 | let old_hash = dep_graph_data.prev_fingerprint_of(prev_index); |
ba9703b0 | 659 | |
353b0b11 FG |
660 | if new_hash != old_hash { |
661 | incremental_verify_ich_failed(tcx, prev_index, &|| format_value(&result)); | |
3c0e092e | 662 | } |
3c0e092e | 663 | } |
94222f64 | 664 | |
353b0b11 FG |
665 | #[cold] |
666 | #[inline(never)] | |
667 | fn incremental_verify_ich_not_green<Tcx>(tcx: Tcx, prev_index: SerializedDepNodeIndex) | |
3c0e092e | 668 | where |
353b0b11 | 669 | Tcx: DepContext, |
3c0e092e | 670 | { |
353b0b11 FG |
671 | panic!( |
672 | "fingerprint for green query instance not loaded from cache: {:?}", | |
673 | tcx.dep_graph().data().unwrap().prev_node_of(prev_index) | |
674 | ) | |
3c0e092e XL |
675 | } |
676 | ||
353b0b11 FG |
677 | // Note that this is marked #[cold] and intentionally takes `dyn Debug` for `result`, |
678 | // as we want to avoid generating a bunch of different implementations for LLVM to | |
679 | // chew on (and filling up the final binary, too). | |
3c0e092e | 680 | #[cold] |
353b0b11 FG |
681 | #[inline(never)] |
682 | fn incremental_verify_ich_failed<Tcx>( | |
683 | tcx: Tcx, | |
684 | prev_index: SerializedDepNodeIndex, | |
685 | result: &dyn Fn() -> String, | |
686 | ) where | |
687 | Tcx: DepContext, | |
688 | { | |
3c0e092e XL |
689 | // When we emit an error message and panic, we try to debug-print the `DepNode` |
690 | // and query result. Unfortunately, this can cause us to run additional queries, | |
691 | // which may result in another fingerprint mismatch while we're in the middle | |
692 | // of processing this one. To avoid a double-panic (which kills the process | |
693 | // before we can print out the query static), we print out a terse | |
694 | // but 'safe' message if we detect a re-entrant call to this method. | |
695 | thread_local! { | |
696 | static INSIDE_VERIFY_PANIC: Cell<bool> = const { Cell::new(false) }; | |
697 | }; | |
698 | ||
699 | let old_in_panic = INSIDE_VERIFY_PANIC.with(|in_panic| in_panic.replace(true)); | |
700 | ||
701 | if old_in_panic { | |
353b0b11 | 702 | tcx.sess().emit_err(crate::error::Reentrant); |
3c0e092e | 703 | } else { |
353b0b11 | 704 | let run_cmd = if let Some(crate_name) = &tcx.sess().opts.crate_name { |
9c376795 | 705 | format!("`cargo clean -p {crate_name}` or `cargo clean`") |
487cf647 FG |
706 | } else { |
707 | "`cargo clean`".to_string() | |
708 | }; | |
709 | ||
353b0b11 FG |
710 | let dep_node = tcx.dep_graph().data().unwrap().prev_node_of(prev_index); |
711 | tcx.sess().emit_err(crate::error::IncrementCompilation { | |
f2b60f7d | 712 | run_cmd, |
9c376795 | 713 | dep_node: format!("{dep_node:?}"), |
f2b60f7d | 714 | }); |
353b0b11 | 715 | panic!("Found unstable fingerprints for {dep_node:?}: {}", result()); |
f20569fa | 716 | } |
3c0e092e XL |
717 | |
718 | INSIDE_VERIFY_PANIC.with(|in_panic| in_panic.set(old_in_panic)); | |
ba9703b0 XL |
719 | } |
720 | ||
ba9703b0 XL |
721 | /// Ensure that either this query has all green inputs or been executed. |
722 | /// Executing `query::ensure(D)` is considered a read of the dep-node `D`. | |
6a06907d | 723 | /// Returns true if the query should still run. |
ba9703b0 XL |
724 | /// |
725 | /// This function is particularly useful when executing passes for their | |
726 | /// side-effects -- e.g., in order to report errors for erroneous programs. | |
727 | /// | |
728 | /// Note: The optimization is only available during incr. comp. | |
f9f354fc | 729 | #[inline(never)] |
353b0b11 FG |
730 | fn ensure_must_run<Q, Qcx>( |
731 | query: Q, | |
732 | qcx: Qcx, | |
733 | key: &Q::Key, | |
734 | check_cache: bool, | |
735 | ) -> (bool, Option<DepNode<Qcx::DepKind>>) | |
6a06907d | 736 | where |
9c376795 | 737 | Q: QueryConfig<Qcx>, |
487cf647 | 738 | Qcx: QueryContext, |
ba9703b0 | 739 | { |
353b0b11 | 740 | if query.eval_always() { |
c295e0f8 | 741 | return (true, None); |
ba9703b0 XL |
742 | } |
743 | ||
744 | // Ensuring an anonymous query makes no sense | |
353b0b11 | 745 | assert!(!query.anon()); |
ba9703b0 | 746 | |
353b0b11 | 747 | let dep_node = query.construct_dep_node(*qcx.dep_context(), key); |
ba9703b0 | 748 | |
487cf647 | 749 | let dep_graph = qcx.dep_context().dep_graph(); |
353b0b11 | 750 | let serialized_dep_node_index = match dep_graph.try_mark_green(qcx, &dep_node) { |
ba9703b0 | 751 | None => { |
c295e0f8 | 752 | // A None return from `try_mark_green` means that this is either |
ba9703b0 XL |
753 | // a new dep node or that the dep node has already been marked red. |
754 | // Either way, we can't call `dep_graph.read()` as we don't have the | |
755 | // DepNodeIndex. We must invoke the query itself. The performance cost | |
756 | // this introduces should be negligible as we'll immediately hit the | |
757 | // in-memory cache, or another query down the line will. | |
353b0b11 | 758 | return (true, Some(dep_node)); |
ba9703b0 | 759 | } |
353b0b11 | 760 | Some((serialized_dep_node_index, dep_node_index)) => { |
c295e0f8 | 761 | dep_graph.read_index(dep_node_index); |
487cf647 | 762 | qcx.dep_context().profiler().query_cache_hit(dep_node_index.into()); |
353b0b11 | 763 | serialized_dep_node_index |
ba9703b0 | 764 | } |
353b0b11 FG |
765 | }; |
766 | ||
767 | // We do not need the value at all, so do not check the cache. | |
768 | if !check_cache { | |
769 | return (false, None); | |
ba9703b0 | 770 | } |
353b0b11 FG |
771 | |
772 | let loadable = query.loadable_from_disk(qcx, key, serialized_dep_node_index); | |
773 | (!loadable, Some(dep_node)) | |
ba9703b0 XL |
774 | } |
775 | ||
04454e1e | 776 | #[derive(Debug)] |
6a06907d XL |
777 | pub enum QueryMode { |
778 | Get, | |
353b0b11 | 779 | Ensure { check_cache: bool }, |
f9f354fc XL |
780 | } |
781 | ||
9ffffee4 | 782 | #[inline(always)] |
353b0b11 FG |
783 | pub fn get_query<Q, Qcx>( |
784 | query: Q, | |
785 | qcx: Qcx, | |
786 | span: Span, | |
787 | key: Q::Key, | |
788 | mode: QueryMode, | |
789 | ) -> Option<Q::Value> | |
f9f354fc | 790 | where |
487cf647 | 791 | Q: QueryConfig<Qcx>, |
487cf647 | 792 | Qcx: QueryContext, |
f9f354fc | 793 | { |
353b0b11 FG |
794 | let dep_node = if let QueryMode::Ensure { check_cache } = mode { |
795 | let (must_run, dep_node) = ensure_must_run(query, qcx, &key, check_cache); | |
c295e0f8 | 796 | if !must_run { |
6a06907d XL |
797 | return None; |
798 | } | |
c295e0f8 XL |
799 | dep_node |
800 | } else { | |
801 | None | |
802 | }; | |
6a06907d | 803 | |
9ffffee4 | 804 | let (result, dep_node_index) = |
353b0b11 | 805 | ensure_sufficient_stack(|| try_execute_query(query, qcx, span, key, dep_node)); |
c295e0f8 | 806 | if let Some(dep_node_index) = dep_node_index { |
487cf647 | 807 | qcx.dep_context().dep_graph().read_index(dep_node_index) |
c295e0f8 XL |
808 | } |
809 | Some(result) | |
f9f354fc XL |
810 | } |
811 | ||
353b0b11 FG |
812 | pub fn force_query<Q, Qcx>( |
813 | query: Q, | |
814 | qcx: Qcx, | |
815 | key: Q::Key, | |
816 | dep_node: DepNode<<Qcx as HasDepContext>::DepKind>, | |
817 | ) where | |
487cf647 | 818 | Q: QueryConfig<Qcx>, |
487cf647 | 819 | Qcx: QueryContext, |
f9f354fc | 820 | { |
3c0e092e XL |
821 | // We may be concurrently trying both execute and force a query. |
822 | // Ensure that only one of them runs the query. | |
353b0b11 | 823 | if let Some((_, index)) = query.query_cache(qcx).lookup(&key) { |
9ffffee4 FG |
824 | qcx.dep_context().profiler().query_cache_hit(index.into()); |
825 | return; | |
5e7ed085 | 826 | } |
136023e0 | 827 | |
353b0b11 | 828 | debug_assert!(!query.anon()); |
3c0e092e | 829 | |
353b0b11 | 830 | ensure_sufficient_stack(|| try_execute_query(query, qcx, DUMMY_SP, key, Some(dep_node))); |
f9f354fc | 831 | } |