]> git.proxmox.com Git - rustc.git/blame - compiler/rustc_query_system/src/query/plumbing.rs
New upstream version 1.68.2+dfsg1
[rustc.git] / compiler / rustc_query_system / src / query / plumbing.rs
CommitLineData
ba9703b0
XL
1//! The implementation of the query system itself. This defines the macros that
2//! generate the actual methods on tcx which find and execute the provider,
3//! manage the caches, and so forth.
4
9c376795 5use crate::dep_graph::{DepContext, DepKind, DepNode, DepNodeIndex};
487cf647 6use crate::ich::StableHashingContext;
ba9703b0 7use crate::query::caches::QueryCache;
5099ac24 8use crate::query::job::{report_cycle, QueryInfo, QueryJob, QueryJobId, QueryJobInfo};
94222f64 9use crate::query::{QueryContext, QueryMap, QuerySideEffects, QueryStackFrame};
f2b60f7d
FG
10use crate::values::Value;
11use crate::HandleCycleError;
ba9703b0 12use rustc_data_structures::fingerprint::Fingerprint;
5e7ed085 13use rustc_data_structures::fx::FxHashMap;
c295e0f8
XL
14#[cfg(parallel_compiler)]
15use rustc_data_structures::profiling::TimingGuard;
5e7ed085
FG
16#[cfg(parallel_compiler)]
17use rustc_data_structures::sharded::Sharded;
18use rustc_data_structures::sync::Lock;
5e7ed085 19use rustc_errors::{DiagnosticBuilder, ErrorGuaranteed, FatalError};
3c0e092e 20use rustc_session::Session;
17df50a5 21use rustc_span::{Span, DUMMY_SP};
487cf647 22use std::borrow::Borrow;
94222f64 23use std::cell::Cell;
ba9703b0 24use std::collections::hash_map::Entry;
5869c6ff 25use std::fmt::Debug;
5e7ed085 26use std::hash::Hash;
ba9703b0 27use std::mem;
ba9703b0 28use std::ptr;
f2b60f7d 29use thin_vec::ThinVec;
ba9703b0 30
487cf647
FG
31use super::QueryConfig;
32
9c376795 33pub struct QueryState<K, D: DepKind> {
5e7ed085 34 #[cfg(parallel_compiler)]
9c376795 35 active: Sharded<FxHashMap<K, QueryResult<D>>>,
5e7ed085 36 #[cfg(not(parallel_compiler))]
9c376795 37 active: Lock<FxHashMap<K, QueryResult<D>>>,
ba9703b0
XL
38}
39
40/// Indicates the state of a query for a given key in a query map.
9c376795 41enum QueryResult<D: DepKind> {
ba9703b0 42 /// An already executing query. The query job can be used to await for its completion.
9c376795 43 Started(QueryJob<D>),
ba9703b0
XL
44
45 /// The query panicked. Queries trying to wait on this will raise a fatal error which will
46 /// silently panic.
47 Poisoned,
48}
49
9c376795 50impl<K, D> QueryState<K, D>
29967ef6 51where
6a06907d 52 K: Eq + Hash + Clone + Debug,
9c376795 53 D: DepKind,
29967ef6 54{
ba9703b0 55 pub fn all_inactive(&self) -> bool {
5e7ed085
FG
56 #[cfg(parallel_compiler)]
57 {
58 let shards = self.active.lock_shards();
59 shards.iter().all(|shard| shard.is_empty())
60 }
61 #[cfg(not(parallel_compiler))]
62 {
63 self.active.lock().is_empty()
64 }
ba9703b0
XL
65 }
66
487cf647 67 pub fn try_collect_active_jobs<Qcx: Copy>(
ba9703b0 68 &self,
487cf647 69 qcx: Qcx,
9c376795
FG
70 make_query: fn(Qcx, K) -> QueryStackFrame<D>,
71 jobs: &mut QueryMap<D>,
29967ef6 72 ) -> Option<()> {
5e7ed085
FG
73 #[cfg(parallel_compiler)]
74 {
75 // We use try_lock_shards here since we are called from the
76 // deadlock handler, and this shouldn't be locked.
77 let shards = self.active.try_lock_shards()?;
78 for shard in shards.iter() {
79 for (k, v) in shard.iter() {
80 if let QueryResult::Started(ref job) = *v {
487cf647 81 let query = make_query(qcx, k.clone());
5e7ed085
FG
82 jobs.insert(job.id, QueryJobInfo { query, job: job.clone() });
83 }
84 }
85 }
86 }
87 #[cfg(not(parallel_compiler))]
88 {
89 // We use try_lock here since we are called from the
90 // deadlock handler, and this shouldn't be locked.
91 // (FIXME: Is this relevant for non-parallel compilers? It doesn't
92 // really hurt much.)
93 for (k, v) in self.active.try_lock()?.iter() {
ba9703b0 94 if let QueryResult::Started(ref job) = *v {
487cf647 95 let query = make_query(qcx, k.clone());
5099ac24 96 jobs.insert(job.id, QueryJobInfo { query, job: job.clone() });
ba9703b0 97 }
17df50a5
XL
98 }
99 }
ba9703b0
XL
100
101 Some(())
102 }
103}
104
9c376795
FG
105impl<K, D: DepKind> Default for QueryState<K, D> {
106 fn default() -> QueryState<K, D> {
5e7ed085 107 QueryState { active: Default::default() }
ba9703b0
XL
108 }
109}
110
ba9703b0
XL
111/// A type representing the responsibility to execute the job in the `job` field.
112/// This will poison the relevant query if dropped.
9c376795 113struct JobOwner<'tcx, K, D: DepKind>
ba9703b0 114where
c295e0f8 115 K: Eq + Hash + Clone,
ba9703b0 116{
9c376795 117 state: &'tcx QueryState<K, D>,
c295e0f8 118 key: K,
5099ac24 119 id: QueryJobId,
ba9703b0
XL
120}
121
17df50a5
XL
122#[cold]
123#[inline(never)]
9c376795 124fn mk_cycle<Qcx, V, R, D: DepKind>(
487cf647 125 qcx: Qcx,
9c376795 126 cycle_error: CycleError<D>,
f2b60f7d 127 handler: HandleCycleError,
17df50a5
XL
128 cache: &dyn crate::query::QueryStorage<Value = V, Stored = R>,
129) -> R
130where
9c376795
FG
131 Qcx: QueryContext + crate::query::HasDepContext<DepKind = D>,
132 V: std::fmt::Debug + Value<Qcx::DepContext, Qcx::DepKind>,
17df50a5
XL
133 R: Clone,
134{
487cf647
FG
135 let error = report_cycle(qcx.dep_context().sess(), &cycle_error);
136 let value = handle_cycle_error(*qcx.dep_context(), &cycle_error, error, handler);
17df50a5
XL
137 cache.store_nocache(value)
138}
139
487cf647
FG
140fn handle_cycle_error<Tcx, V>(
141 tcx: Tcx,
9c376795 142 cycle_error: &CycleError<Tcx::DepKind>,
f2b60f7d
FG
143 mut error: DiagnosticBuilder<'_, ErrorGuaranteed>,
144 handler: HandleCycleError,
145) -> V
146where
487cf647 147 Tcx: DepContext,
9c376795 148 V: Value<Tcx, Tcx::DepKind>,
f2b60f7d
FG
149{
150 use HandleCycleError::*;
151 match handler {
152 Error => {
153 error.emit();
2b03887a 154 Value::from_cycle_error(tcx, &cycle_error.cycle)
f2b60f7d
FG
155 }
156 Fatal => {
157 error.emit();
158 tcx.sess().abort_if_errors();
159 unreachable!()
160 }
161 DelayBug => {
162 error.delay_as_bug();
2b03887a 163 Value::from_cycle_error(tcx, &cycle_error.cycle)
f2b60f7d
FG
164 }
165 }
166}
167
9c376795 168impl<'tcx, K, D: DepKind> JobOwner<'tcx, K, D>
ba9703b0 169where
c295e0f8 170 K: Eq + Hash + Clone,
ba9703b0
XL
171{
172 /// Either gets a `JobOwner` corresponding the query, allowing us to
173 /// start executing the query, or returns with the result of the query.
174 /// This function assumes that `try_get_cached` is already called and returned `lookup`.
175 /// If the query is executing elsewhere, this will wait for it and return the result.
176 /// If the query panicked, this will silently panic.
177 ///
178 /// This function is inlined because that results in a noticeable speed-up
179 /// for some compile-time benchmarks.
180 #[inline(always)]
487cf647
FG
181 fn try_start<'b, Qcx>(
182 qcx: &'b Qcx,
9c376795 183 state: &'b QueryState<K, Qcx::DepKind>,
ba9703b0 184 span: Span,
c295e0f8 185 key: K,
9c376795 186 ) -> TryGetJob<'b, K, D>
ba9703b0 187 where
9c376795 188 Qcx: QueryContext + crate::query::HasDepContext<DepKind = D>,
ba9703b0 189 {
5e7ed085
FG
190 #[cfg(parallel_compiler)]
191 let mut state_lock = state.active.get_shard_by_value(&key).lock();
192 #[cfg(not(parallel_compiler))]
193 let mut state_lock = state.active.lock();
6a06907d 194 let lock = &mut *state_lock;
ba9703b0 195
5e7ed085 196 match lock.entry(key) {
ba9703b0 197 Entry::Vacant(entry) => {
487cf647
FG
198 let id = qcx.next_job_id();
199 let job = qcx.current_query_job();
ba9703b0
XL
200 let job = QueryJob::new(id, span, job);
201
17df50a5 202 let key = entry.key().clone();
ba9703b0
XL
203 entry.insert(QueryResult::Started(job));
204
5099ac24 205 let owner = JobOwner { state, id, key };
ba9703b0
XL
206 return TryGetJob::NotYetStarted(owner);
207 }
17df50a5
XL
208 Entry::Occupied(mut entry) => {
209 match entry.get_mut() {
210 #[cfg(not(parallel_compiler))]
211 QueryResult::Started(job) => {
5099ac24 212 let id = job.id;
17df50a5
XL
213 drop(state_lock);
214
215 // If we are single-threaded we know that we have cycle error,
216 // so we just return the error.
c295e0f8 217 return TryGetJob::Cycle(id.find_cycle_in_stack(
487cf647
FG
218 qcx.try_collect_active_jobs().unwrap(),
219 &qcx.current_query_job(),
17df50a5 220 span,
17df50a5 221 ));
6a06907d 222 }
17df50a5
XL
223 #[cfg(parallel_compiler)]
224 QueryResult::Started(job) => {
225 // For parallel queries, we'll block and wait until the query running
226 // in another thread has completed. Record how long we wait in the
227 // self-profiler.
487cf647 228 let query_blocked_prof_timer = qcx.dep_context().profiler().query_blocked();
17df50a5
XL
229
230 // Get the latch out
231 let latch = job.latch();
17df50a5
XL
232
233 drop(state_lock);
234
235 // With parallel queries we might just have to wait on some other
236 // thread.
487cf647 237 let result = latch.wait_on(qcx.current_query_job(), span);
17df50a5 238
c295e0f8
XL
239 match result {
240 Ok(()) => TryGetJob::JobCompleted(query_blocked_prof_timer),
241 Err(cycle) => TryGetJob::Cycle(cycle),
17df50a5 242 }
6a06907d 243 }
17df50a5
XL
244 QueryResult::Poisoned => FatalError.raise(),
245 }
ba9703b0 246 }
ba9703b0
XL
247 }
248 }
249
250 /// Completes the query by updating the query cache with the `result`,
251 /// signals the waiter and forgets the JobOwner, so it won't poison the query
5e7ed085 252 fn complete<C>(self, cache: &C, result: C::Value, dep_node_index: DepNodeIndex) -> C::Stored
c295e0f8
XL
253 where
254 C: QueryCache<Key = K>,
255 {
ba9703b0
XL
256 // We can move out of `self` here because we `mem::forget` it below
257 let key = unsafe { ptr::read(&self.key) };
258 let state = self.state;
259
260 // Forget ourself so our destructor won't poison the query
261 mem::forget(self);
262
f9f354fc 263 let (job, result) = {
6a06907d 264 let job = {
5e7ed085
FG
265 #[cfg(parallel_compiler)]
266 let mut lock = state.active.get_shard_by_value(&key).lock();
267 #[cfg(not(parallel_compiler))]
268 let mut lock = state.active.lock();
269 match lock.remove(&key).unwrap() {
6a06907d
XL
270 QueryResult::Started(job) => job,
271 QueryResult::Poisoned => panic!(),
272 }
273 };
5e7ed085 274 let result = cache.complete(key, result, dep_node_index);
f9f354fc 275 (job, result)
ba9703b0
XL
276 };
277
278 job.signal_complete();
f9f354fc 279 result
ba9703b0
XL
280 }
281}
282
9c376795 283impl<'tcx, K, D> Drop for JobOwner<'tcx, K, D>
ba9703b0 284where
c295e0f8 285 K: Eq + Hash + Clone,
9c376795 286 D: DepKind,
ba9703b0
XL
287{
288 #[inline(never)]
289 #[cold]
290 fn drop(&mut self) {
291 // Poison the query so jobs waiting on it panic.
292 let state = self.state;
ba9703b0 293 let job = {
5e7ed085
FG
294 #[cfg(parallel_compiler)]
295 let mut shard = state.active.get_shard_by_value(&self.key).lock();
296 #[cfg(not(parallel_compiler))]
297 let mut shard = state.active.lock();
298 let job = match shard.remove(&self.key).unwrap() {
ba9703b0
XL
299 QueryResult::Started(job) => job,
300 QueryResult::Poisoned => panic!(),
301 };
5e7ed085 302 shard.insert(self.key.clone(), QueryResult::Poisoned);
ba9703b0
XL
303 job
304 };
305 // Also signal the completion of the job, so waiters
306 // will continue execution.
307 job.signal_complete();
308 }
309}
310
311#[derive(Clone)]
9c376795 312pub(crate) struct CycleError<D: DepKind> {
ba9703b0 313 /// The query and related span that uses the cycle.
9c376795
FG
314 pub usage: Option<(Span, QueryStackFrame<D>)>,
315 pub cycle: Vec<QueryInfo<D>>,
ba9703b0
XL
316}
317
318/// The result of `try_start`.
9c376795 319enum TryGetJob<'tcx, K, D>
ba9703b0 320where
c295e0f8 321 K: Eq + Hash + Clone,
9c376795 322 D: DepKind,
ba9703b0
XL
323{
324 /// The query is not yet started. Contains a guard to the cache eventually used to start it.
9c376795 325 NotYetStarted(JobOwner<'tcx, K, D>),
ba9703b0
XL
326
327 /// The query was already completed.
328 /// Returns the result of the query and its dep-node index
329 /// if it succeeded or a cycle error if it failed.
330 #[cfg(parallel_compiler)]
c295e0f8 331 JobCompleted(TimingGuard<'tcx>),
ba9703b0
XL
332
333 /// Trying to execute the query resulted in a cycle.
9c376795 334 Cycle(CycleError<D>),
ba9703b0
XL
335}
336
337/// Checks if the query is already computed and in the cache.
338/// It returns the shard index and a lock guard to the shard,
339/// which will be used if the query is not in the cache and we need
340/// to compute it.
6a06907d 341#[inline]
9c376795 342pub fn try_get_cached<Tcx, C, R, OnHit>(
487cf647 343 tcx: Tcx,
9c376795 344 cache: &C,
6a06907d 345 key: &C::Key,
ba9703b0
XL
346 // `on_hit` can be called while holding a lock to the query cache
347 on_hit: OnHit,
5e7ed085 348) -> Result<R, ()>
ba9703b0
XL
349where
350 C: QueryCache,
487cf647 351 Tcx: DepContext,
6a06907d 352 OnHit: FnOnce(&C::Stored) -> R,
ba9703b0 353{
5e7ed085 354 cache.lookup(&key, |value, index| {
923072b8 355 if std::intrinsics::unlikely(tcx.profiler().enabled()) {
6a06907d
XL
356 tcx.profiler().query_cache_hit(index.into());
357 }
6a06907d
XL
358 tcx.dep_graph().read_index(index);
359 on_hit(value)
360 })
ba9703b0
XL
361}
362
9c376795 363fn try_execute_query<Q, Qcx>(
487cf647 364 qcx: Qcx,
9c376795
FG
365 state: &QueryState<Q::Key, Qcx::DepKind>,
366 cache: &Q::Cache,
ba9703b0 367 span: Span,
9c376795 368 key: Q::Key,
487cf647 369 dep_node: Option<DepNode<Qcx::DepKind>>,
9c376795 370) -> (Q::Stored, Option<DepNodeIndex>)
ba9703b0 371where
9c376795 372 Q: QueryConfig<Qcx>,
487cf647 373 Qcx: QueryContext,
ba9703b0 374{
9c376795 375 match JobOwner::<'_, Q::Key, Qcx::DepKind>::try_start(&qcx, state, span, key.clone()) {
c295e0f8 376 TryGetJob::NotYetStarted(job) => {
9c376795
FG
377 let (result, dep_node_index) =
378 execute_job::<Q, Qcx>(qcx, key.clone(), dep_node, job.id);
379 if Q::FEEDABLE {
487cf647
FG
380 // We may have put a value inside the cache from inside the execution.
381 // Verify that it has the same hash as what we have now, to ensure consistency.
382 let _ = cache.lookup(&key, |cached_result, _| {
9c376795
FG
383 let hasher = Q::HASH_RESULT.expect("feedable forbids no_hash");
384
487cf647
FG
385 let old_hash = qcx.dep_context().with_stable_hashing_context(|mut hcx| hasher(&mut hcx, cached_result.borrow()));
386 let new_hash = qcx.dep_context().with_stable_hashing_context(|mut hcx| hasher(&mut hcx, &result));
387 debug_assert_eq!(
388 old_hash, new_hash,
389 "Computed query value for {:?}({:?}) is inconsistent with fed value,\ncomputed={:#?}\nfed={:#?}",
9c376795 390 Q::DEP_KIND, key, result, cached_result,
487cf647
FG
391 );
392 });
393 }
c295e0f8
XL
394 let result = job.complete(cache, result, dep_node_index);
395 (result, Some(dep_node_index))
396 }
397 TryGetJob::Cycle(error) => {
9c376795 398 let result = mk_cycle(qcx, error, Q::HANDLE_CYCLE_ERROR, cache);
c295e0f8
XL
399 (result, None)
400 }
ba9703b0 401 #[cfg(parallel_compiler)]
c295e0f8
XL
402 TryGetJob::JobCompleted(query_blocked_prof_timer) => {
403 let (v, index) = cache
5e7ed085 404 .lookup(&key, |value, index| (value.clone(), index))
c295e0f8
XL
405 .unwrap_or_else(|_| panic!("value must be in cache after waiting"));
406
487cf647
FG
407 if std::intrinsics::unlikely(qcx.dep_context().profiler().enabled()) {
408 qcx.dep_context().profiler().query_cache_hit(index.into());
c295e0f8
XL
409 }
410 query_blocked_prof_timer.finish_with_query_invocation_id(index.into());
411
412 (v, Some(index))
ba9703b0 413 }
c295e0f8
XL
414 }
415}
ba9703b0 416
9c376795 417fn execute_job<Q, Qcx>(
487cf647 418 qcx: Qcx,
9c376795 419 key: Q::Key,
487cf647 420 mut dep_node_opt: Option<DepNode<Qcx::DepKind>>,
5099ac24 421 job_id: QueryJobId,
9c376795 422) -> (Q::Value, DepNodeIndex)
c295e0f8 423where
9c376795 424 Q: QueryConfig<Qcx>,
487cf647 425 Qcx: QueryContext,
c295e0f8 426{
487cf647 427 let dep_graph = qcx.dep_context().dep_graph();
17df50a5
XL
428
429 // Fast path for when incr. comp. is off.
430 if !dep_graph.is_fully_enabled() {
487cf647 431 let prof_timer = qcx.dep_context().profiler().query_provider();
9c376795
FG
432 let result = qcx.start_query(job_id, Q::DEPTH_LIMIT, None, || {
433 Q::compute(qcx, &key)(*qcx.dep_context(), key)
f2b60f7d 434 });
17df50a5
XL
435 let dep_node_index = dep_graph.next_virtual_depnode_index();
436 prof_timer.finish_with_query_invocation_id(dep_node_index.into());
c295e0f8 437 return (result, dep_node_index);
ba9703b0
XL
438 }
439
9c376795 440 if !Q::ANON && !Q::EVAL_ALWAYS {
c295e0f8
XL
441 // `to_dep_node` is expensive for some `DepKind`s.
442 let dep_node =
9c376795 443 dep_node_opt.get_or_insert_with(|| Q::construct_dep_node(*qcx.dep_context(), &key));
ba9703b0 444
c295e0f8
XL
445 // The diagnostics for this query will be promoted to the current session during
446 // `try_mark_green()`, so we can ignore them here.
487cf647 447 if let Some(ret) = qcx.start_query(job_id, false, None, || {
9c376795 448 try_load_from_disk_and_cache_in_memory::<Q, Qcx>(qcx, &key, &dep_node)
c295e0f8
XL
449 }) {
450 return ret;
451 }
452 }
ba9703b0 453
487cf647 454 let prof_timer = qcx.dep_context().profiler().query_provider();
c295e0f8 455 let diagnostics = Lock::new(ThinVec::new());
ba9703b0 456
f2b60f7d 457 let (result, dep_node_index) =
9c376795
FG
458 qcx.start_query(job_id, Q::DEPTH_LIMIT, Some(&diagnostics), || {
459 if Q::ANON {
460 return dep_graph.with_anon_task(*qcx.dep_context(), Q::DEP_KIND, || {
461 Q::compute(qcx, &key)(*qcx.dep_context(), key)
f2b60f7d
FG
462 });
463 }
ba9703b0 464
f2b60f7d
FG
465 // `to_dep_node` is expensive for some `DepKind`s.
466 let dep_node =
9c376795 467 dep_node_opt.unwrap_or_else(|| Q::construct_dep_node(*qcx.dep_context(), &key));
94222f64 468
9c376795
FG
469 let task = Q::compute(qcx, &key);
470 dep_graph.with_task(dep_node, *qcx.dep_context(), key, task, Q::HASH_RESULT)
f2b60f7d 471 });
ba9703b0 472
c295e0f8 473 prof_timer.finish_with_query_invocation_id(dep_node_index.into());
ba9703b0 474
c295e0f8
XL
475 let diagnostics = diagnostics.into_inner();
476 let side_effects = QuerySideEffects { diagnostics };
ba9703b0 477
923072b8 478 if std::intrinsics::unlikely(!side_effects.is_empty()) {
9c376795 479 if Q::ANON {
487cf647 480 qcx.store_side_effects_for_anon_node(dep_node_index, side_effects);
c295e0f8 481 } else {
487cf647 482 qcx.store_side_effects(dep_node_index, side_effects);
ba9703b0
XL
483 }
484 }
485
c295e0f8 486 (result, dep_node_index)
ba9703b0
XL
487}
488
9c376795 489fn try_load_from_disk_and_cache_in_memory<Q, Qcx>(
487cf647 490 qcx: Qcx,
9c376795 491 key: &Q::Key,
487cf647 492 dep_node: &DepNode<Qcx::DepKind>,
9c376795 493) -> Option<(Q::Value, DepNodeIndex)>
ba9703b0 494where
9c376795 495 Q: QueryConfig<Qcx>,
487cf647 496 Qcx: QueryContext,
ba9703b0
XL
497{
498 // Note this function can be called concurrently from the same query
499 // We must ensure that this is handled correctly.
500
487cf647
FG
501 let dep_graph = qcx.dep_context().dep_graph();
502 let (prev_dep_node_index, dep_node_index) = dep_graph.try_mark_green(qcx, &dep_node)?;
c295e0f8
XL
503
504 debug_assert!(dep_graph.is_green(dep_node));
ba9703b0
XL
505
506 // First we try to load the result from the on-disk cache.
c295e0f8 507 // Some things are never cached on disk.
9c376795 508 if let Some(try_load_from_disk) = Q::try_load_from_disk(qcx, &key) {
487cf647 509 let prof_timer = qcx.dep_context().profiler().incr_cache_loading();
5099ac24
FG
510
511 // The call to `with_query_deserialization` enforces that no new `DepNodes`
512 // are created during deserialization. See the docs of that method for more
513 // details.
f2b60f7d 514 let result =
487cf647 515 dep_graph.with_query_deserialization(|| try_load_from_disk(qcx, prev_dep_node_index));
5099ac24 516
ba9703b0
XL
517 prof_timer.finish_with_query_invocation_id(dep_node_index.into());
518
c295e0f8 519 if let Some(result) = result {
923072b8 520 if std::intrinsics::unlikely(
487cf647 521 qcx.dep_context().sess().opts.unstable_opts.query_dep_graph,
923072b8 522 ) {
a2a8927a
XL
523 dep_graph.mark_debug_loaded_from_disk(*dep_node)
524 }
525
487cf647 526 let prev_fingerprint = qcx
3c0e092e
XL
527 .dep_context()
528 .dep_graph()
529 .prev_fingerprint_of(dep_node)
530 .unwrap_or(Fingerprint::ZERO);
c295e0f8
XL
531 // If `-Zincremental-verify-ich` is specified, re-hash results from
532 // the cache and make sure that they have the expected fingerprint.
3c0e092e
XL
533 //
534 // If not, we still seek to verify a subset of fingerprints loaded
535 // from disk. Re-hashing results is fairly expensive, so we can't
536 // currently afford to verify every hash. This subset should still
537 // give us some coverage of potential bugs though.
538 let try_verify = prev_fingerprint.as_value().1 % 32 == 0;
923072b8 539 if std::intrinsics::unlikely(
487cf647 540 try_verify || qcx.dep_context().sess().opts.unstable_opts.incremental_verify_ich,
3c0e092e 541 ) {
9c376795 542 incremental_verify_ich(*qcx.dep_context(), &result, dep_node, Q::HASH_RESULT);
c295e0f8 543 }
6a06907d 544
c295e0f8
XL
545 return Some((result, dep_node_index));
546 }
3c0e092e
XL
547
548 // We always expect to find a cached result for things that
549 // can be forced from `DepNode`.
550 debug_assert!(
487cf647 551 !qcx.dep_context().fingerprint_style(dep_node.kind).reconstructible(),
9c376795 552 "missing on-disk cache entry for {dep_node:?}"
3c0e092e 553 );
c295e0f8 554 }
ba9703b0 555
c295e0f8
XL
556 // We could not load a result from the on-disk cache, so
557 // recompute.
487cf647 558 let prof_timer = qcx.dep_context().profiler().query_provider();
ba9703b0 559
c295e0f8 560 // The dep-graph for this computation is already in-place.
9c376795 561 let result = dep_graph.with_ignore(|| Q::compute(qcx, key)(*qcx.dep_context(), key.clone()));
ba9703b0 562
c295e0f8 563 prof_timer.finish_with_query_invocation_id(dep_node_index.into());
ba9703b0 564
c295e0f8
XL
565 // Verify that re-running the query produced a result with the expected hash
566 // This catches bugs in query implementations, turning them into ICEs.
567 // For example, a query might sort its result by `DefId` - since `DefId`s are
568 // not stable across compilation sessions, the result could get up getting sorted
569 // in a different order when the query is re-run, even though all of the inputs
570 // (e.g. `DefPathHash` values) were green.
571 //
572 // See issue #82920 for an example of a miscompilation that would get turned into
573 // an ICE by this check
9c376795 574 incremental_verify_ich(*qcx.dep_context(), &result, dep_node, Q::HASH_RESULT);
c295e0f8
XL
575
576 Some((result, dep_node_index))
ba9703b0
XL
577}
578
487cf647
FG
579#[instrument(skip(tcx, result, hash_result), level = "debug")]
580pub(crate) fn incremental_verify_ich<Tcx, V: Debug>(
581 tcx: Tcx,
f9f354fc 582 result: &V,
487cf647
FG
583 dep_node: &DepNode<Tcx::DepKind>,
584 hash_result: Option<fn(&mut StableHashingContext<'_>, &V) -> Fingerprint>,
585) -> Fingerprint
586where
587 Tcx: DepContext,
ba9703b0
XL
588{
589 assert!(
cdc7bbd5 590 tcx.dep_graph().is_green(dep_node),
9c376795 591 "fingerprint for green query instance not loaded from cache: {dep_node:?}",
ba9703b0
XL
592 );
593
487cf647 594 let new_hash = hash_result.map_or(Fingerprint::ZERO, |f| {
064997fb 595 tcx.with_stable_hashing_context(|mut hcx| f(&mut hcx, result))
3c0e092e 596 });
487cf647 597
cdc7bbd5 598 let old_hash = tcx.dep_graph().prev_fingerprint_of(dep_node);
ba9703b0 599
cdc7bbd5 600 if Some(new_hash) != old_hash {
487cf647
FG
601 incremental_verify_ich_failed(
602 tcx.sess(),
603 DebugArg::from(&dep_node),
604 DebugArg::from(&result),
605 );
3c0e092e 606 }
487cf647
FG
607
608 new_hash
3c0e092e 609}
94222f64 610
3c0e092e
XL
611// This DebugArg business is largely a mirror of std::fmt::ArgumentV1, which is
612// currently not exposed publicly.
613//
614// The PR which added this attempted to use `&dyn Debug` instead, but that
615// showed statistically significant worse compiler performance. It's not
616// actually clear what the cause there was -- the code should be cold. If this
617// can be replaced with `&dyn Debug` with on perf impact, then it probably
618// should be.
619extern "C" {
620 type Opaque;
621}
94222f64 622
3c0e092e
XL
623struct DebugArg<'a> {
624 value: &'a Opaque,
625 fmt: fn(&Opaque, &mut std::fmt::Formatter<'_>) -> std::fmt::Result,
626}
94222f64 627
3c0e092e
XL
628impl<'a, T> From<&'a T> for DebugArg<'a>
629where
630 T: std::fmt::Debug,
631{
632 fn from(value: &'a T) -> DebugArg<'a> {
633 DebugArg {
634 value: unsafe { std::mem::transmute(value) },
635 fmt: unsafe {
636 std::mem::transmute(<T as std::fmt::Debug>::fmt as fn(_, _) -> std::fmt::Result)
637 },
638 }
639 }
640}
641
642impl std::fmt::Debug for DebugArg<'_> {
643 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
644 (self.fmt)(self.value, f)
645 }
646}
647
648// Note that this is marked #[cold] and intentionally takes the equivalent of
649// `dyn Debug` for its arguments, as we want to avoid generating a bunch of
650// different implementations for LLVM to chew on (and filling up the final
651// binary, too).
652#[cold]
487cf647 653fn incremental_verify_ich_failed(sess: &Session, dep_node: DebugArg<'_>, result: DebugArg<'_>) {
3c0e092e
XL
654 // When we emit an error message and panic, we try to debug-print the `DepNode`
655 // and query result. Unfortunately, this can cause us to run additional queries,
656 // which may result in another fingerprint mismatch while we're in the middle
657 // of processing this one. To avoid a double-panic (which kills the process
658 // before we can print out the query static), we print out a terse
659 // but 'safe' message if we detect a re-entrant call to this method.
660 thread_local! {
661 static INSIDE_VERIFY_PANIC: Cell<bool> = const { Cell::new(false) };
662 };
663
664 let old_in_panic = INSIDE_VERIFY_PANIC.with(|in_panic| in_panic.replace(true));
665
666 if old_in_panic {
f2b60f7d 667 sess.emit_err(crate::error::Reentrant);
3c0e092e 668 } else {
487cf647 669 let run_cmd = if let Some(crate_name) = &sess.opts.crate_name {
9c376795 670 format!("`cargo clean -p {crate_name}` or `cargo clean`")
487cf647
FG
671 } else {
672 "`cargo clean`".to_string()
673 };
674
f2b60f7d
FG
675 sess.emit_err(crate::error::IncrementCompilation {
676 run_cmd,
9c376795 677 dep_node: format!("{dep_node:?}"),
f2b60f7d 678 });
9c376795 679 panic!("Found unstable fingerprints for {dep_node:?}: {result:?}");
f20569fa 680 }
3c0e092e
XL
681
682 INSIDE_VERIFY_PANIC.with(|in_panic| in_panic.set(old_in_panic));
ba9703b0
XL
683}
684
ba9703b0
XL
685/// Ensure that either this query has all green inputs or been executed.
686/// Executing `query::ensure(D)` is considered a read of the dep-node `D`.
6a06907d 687/// Returns true if the query should still run.
ba9703b0
XL
688///
689/// This function is particularly useful when executing passes for their
690/// side-effects -- e.g., in order to report errors for erroneous programs.
691///
692/// Note: The optimization is only available during incr. comp.
f9f354fc 693#[inline(never)]
9c376795 694fn ensure_must_run<Q, Qcx>(qcx: Qcx, key: &Q::Key) -> (bool, Option<DepNode<Qcx::DepKind>>)
6a06907d 695where
9c376795 696 Q: QueryConfig<Qcx>,
487cf647 697 Qcx: QueryContext,
ba9703b0 698{
9c376795 699 if Q::EVAL_ALWAYS {
c295e0f8 700 return (true, None);
ba9703b0
XL
701 }
702
703 // Ensuring an anonymous query makes no sense
9c376795 704 assert!(!Q::ANON);
ba9703b0 705
9c376795 706 let dep_node = Q::construct_dep_node(*qcx.dep_context(), key);
ba9703b0 707
487cf647
FG
708 let dep_graph = qcx.dep_context().dep_graph();
709 match dep_graph.try_mark_green(qcx, &dep_node) {
ba9703b0 710 None => {
c295e0f8 711 // A None return from `try_mark_green` means that this is either
ba9703b0
XL
712 // a new dep node or that the dep node has already been marked red.
713 // Either way, we can't call `dep_graph.read()` as we don't have the
714 // DepNodeIndex. We must invoke the query itself. The performance cost
715 // this introduces should be negligible as we'll immediately hit the
716 // in-memory cache, or another query down the line will.
c295e0f8 717 (true, Some(dep_node))
ba9703b0
XL
718 }
719 Some((_, dep_node_index)) => {
c295e0f8 720 dep_graph.read_index(dep_node_index);
487cf647 721 qcx.dep_context().profiler().query_cache_hit(dep_node_index.into());
c295e0f8 722 (false, None)
ba9703b0
XL
723 }
724 }
725}
726
04454e1e 727#[derive(Debug)]
6a06907d
XL
728pub enum QueryMode {
729 Get,
730 Ensure,
f9f354fc
XL
731}
732
9c376795 733pub fn get_query<Q, Qcx, D>(qcx: Qcx, span: Span, key: Q::Key, mode: QueryMode) -> Option<Q::Stored>
f9f354fc 734where
9c376795 735 D: DepKind,
487cf647 736 Q: QueryConfig<Qcx>,
9c376795 737 Q::Value: Value<Qcx::DepContext, D>,
487cf647 738 Qcx: QueryContext,
f9f354fc 739{
c295e0f8 740 let dep_node = if let QueryMode::Ensure = mode {
9c376795 741 let (must_run, dep_node) = ensure_must_run::<Q, _>(qcx, &key);
c295e0f8 742 if !must_run {
6a06907d
XL
743 return None;
744 }
c295e0f8
XL
745 dep_node
746 } else {
747 None
748 };
6a06907d 749
9c376795 750 let (result, dep_node_index) = try_execute_query::<Q, Qcx>(
487cf647
FG
751 qcx,
752 Q::query_state(qcx),
753 Q::query_cache(qcx),
136023e0
XL
754 span,
755 key,
c295e0f8 756 dep_node,
136023e0 757 );
c295e0f8 758 if let Some(dep_node_index) = dep_node_index {
487cf647 759 qcx.dep_context().dep_graph().read_index(dep_node_index)
c295e0f8
XL
760 }
761 Some(result)
f9f354fc
XL
762}
763
9c376795 764pub fn force_query<Q, Qcx, D>(qcx: Qcx, key: Q::Key, dep_node: DepNode<Qcx::DepKind>)
f9f354fc 765where
9c376795 766 D: DepKind,
487cf647 767 Q: QueryConfig<Qcx>,
9c376795 768 Q::Value: Value<Qcx::DepContext, D>,
487cf647 769 Qcx: QueryContext,
f9f354fc 770{
3c0e092e
XL
771 // We may be concurrently trying both execute and force a query.
772 // Ensure that only one of them runs the query.
487cf647 773 let cache = Q::query_cache(qcx);
5e7ed085 774 let cached = cache.lookup(&key, |_, index| {
487cf647
FG
775 if std::intrinsics::unlikely(qcx.dep_context().profiler().enabled()) {
776 qcx.dep_context().profiler().query_cache_hit(index.into());
3c0e092e
XL
777 }
778 });
136023e0 779
5e7ed085 780 match cached {
3c0e092e 781 Ok(()) => return,
5e7ed085
FG
782 Err(()) => {}
783 }
136023e0 784
487cf647 785 let state = Q::query_state(qcx);
9c376795 786 debug_assert!(!Q::ANON);
3c0e092e 787
9c376795 788 try_execute_query::<Q, _>(qcx, state, cache, DUMMY_SP, key, Some(dep_node));
f9f354fc 789}