]> git.proxmox.com Git - rustc.git/blame - compiler/rustc_query_system/src/query/plumbing.rs
New upstream version 1.62.1+dfsg1
[rustc.git] / compiler / rustc_query_system / src / query / plumbing.rs
CommitLineData
ba9703b0
XL
1//! The implementation of the query system itself. This defines the macros that
2//! generate the actual methods on tcx which find and execute the provider,
3//! manage the caches, and so forth.
4
3c0e092e 5use crate::dep_graph::{DepContext, DepNode, DepNodeIndex, DepNodeParams};
ba9703b0 6use crate::query::caches::QueryCache;
3c0e092e 7use crate::query::config::{QueryDescription, QueryVtable};
5099ac24 8use crate::query::job::{report_cycle, QueryInfo, QueryJob, QueryJobId, QueryJobInfo};
94222f64 9use crate::query::{QueryContext, QueryMap, QuerySideEffects, QueryStackFrame};
ba9703b0 10use rustc_data_structures::fingerprint::Fingerprint;
5e7ed085 11use rustc_data_structures::fx::FxHashMap;
c295e0f8
XL
12#[cfg(parallel_compiler)]
13use rustc_data_structures::profiling::TimingGuard;
5e7ed085
FG
14#[cfg(parallel_compiler)]
15use rustc_data_structures::sharded::Sharded;
16use rustc_data_structures::sync::Lock;
ba9703b0 17use rustc_data_structures::thin_vec::ThinVec;
5e7ed085 18use rustc_errors::{DiagnosticBuilder, ErrorGuaranteed, FatalError};
3c0e092e 19use rustc_session::Session;
17df50a5 20use rustc_span::{Span, DUMMY_SP};
94222f64 21use std::cell::Cell;
ba9703b0 22use std::collections::hash_map::Entry;
5869c6ff 23use std::fmt::Debug;
5e7ed085 24use std::hash::Hash;
ba9703b0 25use std::mem;
ba9703b0 26use std::ptr;
ba9703b0 27
5099ac24 28pub struct QueryState<K> {
5e7ed085
FG
29 #[cfg(parallel_compiler)]
30 active: Sharded<FxHashMap<K, QueryResult>>,
31 #[cfg(not(parallel_compiler))]
32 active: Lock<FxHashMap<K, QueryResult>>,
ba9703b0
XL
33}
34
35/// Indicates the state of a query for a given key in a query map.
5099ac24 36enum QueryResult {
ba9703b0 37 /// An already executing query. The query job can be used to await for its completion.
5099ac24 38 Started(QueryJob),
ba9703b0
XL
39
40 /// The query panicked. Queries trying to wait on this will raise a fatal error which will
41 /// silently panic.
42 Poisoned,
43}
44
5099ac24 45impl<K> QueryState<K>
29967ef6 46where
6a06907d 47 K: Eq + Hash + Clone + Debug,
29967ef6 48{
ba9703b0 49 pub fn all_inactive(&self) -> bool {
5e7ed085
FG
50 #[cfg(parallel_compiler)]
51 {
52 let shards = self.active.lock_shards();
53 shards.iter().all(|shard| shard.is_empty())
54 }
55 #[cfg(not(parallel_compiler))]
56 {
57 self.active.lock().is_empty()
58 }
ba9703b0
XL
59 }
60
6a06907d 61 pub fn try_collect_active_jobs<CTX: Copy>(
ba9703b0 62 &self,
6a06907d 63 tcx: CTX,
6a06907d 64 make_query: fn(CTX, K) -> QueryStackFrame,
5099ac24 65 jobs: &mut QueryMap,
29967ef6 66 ) -> Option<()> {
5e7ed085
FG
67 #[cfg(parallel_compiler)]
68 {
69 // We use try_lock_shards here since we are called from the
70 // deadlock handler, and this shouldn't be locked.
71 let shards = self.active.try_lock_shards()?;
72 for shard in shards.iter() {
73 for (k, v) in shard.iter() {
74 if let QueryResult::Started(ref job) = *v {
75 let query = make_query(tcx, k.clone());
76 jobs.insert(job.id, QueryJobInfo { query, job: job.clone() });
77 }
78 }
79 }
80 }
81 #[cfg(not(parallel_compiler))]
82 {
83 // We use try_lock here since we are called from the
84 // deadlock handler, and this shouldn't be locked.
85 // (FIXME: Is this relevant for non-parallel compilers? It doesn't
86 // really hurt much.)
87 for (k, v) in self.active.try_lock()?.iter() {
ba9703b0 88 if let QueryResult::Started(ref job) = *v {
94222f64 89 let query = make_query(tcx, k.clone());
5099ac24 90 jobs.insert(job.id, QueryJobInfo { query, job: job.clone() });
ba9703b0 91 }
17df50a5
XL
92 }
93 }
ba9703b0
XL
94
95 Some(())
96 }
97}
98
5099ac24
FG
99impl<K> Default for QueryState<K> {
100 fn default() -> QueryState<K> {
5e7ed085 101 QueryState { active: Default::default() }
ba9703b0
XL
102 }
103}
104
ba9703b0
XL
105/// A type representing the responsibility to execute the job in the `job` field.
106/// This will poison the relevant query if dropped.
5099ac24 107struct JobOwner<'tcx, K>
ba9703b0 108where
c295e0f8 109 K: Eq + Hash + Clone,
ba9703b0 110{
5099ac24 111 state: &'tcx QueryState<K>,
c295e0f8 112 key: K,
5099ac24 113 id: QueryJobId,
ba9703b0
XL
114}
115
17df50a5
XL
116#[cold]
117#[inline(never)]
17df50a5
XL
118fn mk_cycle<CTX, V, R>(
119 tcx: CTX,
c295e0f8 120 error: CycleError,
5e7ed085 121 handle_cycle_error: fn(CTX, DiagnosticBuilder<'_, ErrorGuaranteed>) -> V,
17df50a5
XL
122 cache: &dyn crate::query::QueryStorage<Value = V, Stored = R>,
123) -> R
124where
125 CTX: QueryContext,
126 V: std::fmt::Debug,
127 R: Clone,
128{
17df50a5
XL
129 let error = report_cycle(tcx.dep_context().sess(), error);
130 let value = handle_cycle_error(tcx, error);
131 cache.store_nocache(value)
132}
133
5099ac24 134impl<'tcx, K> JobOwner<'tcx, K>
ba9703b0 135where
c295e0f8 136 K: Eq + Hash + Clone,
ba9703b0
XL
137{
138 /// Either gets a `JobOwner` corresponding the query, allowing us to
139 /// start executing the query, or returns with the result of the query.
140 /// This function assumes that `try_get_cached` is already called and returned `lookup`.
141 /// If the query is executing elsewhere, this will wait for it and return the result.
142 /// If the query panicked, this will silently panic.
143 ///
144 /// This function is inlined because that results in a noticeable speed-up
145 /// for some compile-time benchmarks.
146 #[inline(always)]
6a06907d 147 fn try_start<'b, CTX>(
c295e0f8 148 tcx: &'b CTX,
5099ac24 149 state: &'b QueryState<K>,
ba9703b0 150 span: Span,
c295e0f8 151 key: K,
5099ac24 152 ) -> TryGetJob<'b, K>
ba9703b0 153 where
ba9703b0
XL
154 CTX: QueryContext,
155 {
5e7ed085
FG
156 #[cfg(parallel_compiler)]
157 let mut state_lock = state.active.get_shard_by_value(&key).lock();
158 #[cfg(not(parallel_compiler))]
159 let mut state_lock = state.active.lock();
6a06907d 160 let lock = &mut *state_lock;
ba9703b0 161
5e7ed085 162 match lock.entry(key) {
ba9703b0 163 Entry::Vacant(entry) => {
5099ac24 164 let id = tcx.next_job_id();
ba9703b0
XL
165 let job = tcx.current_query_job();
166 let job = QueryJob::new(id, span, job);
167
17df50a5 168 let key = entry.key().clone();
ba9703b0
XL
169 entry.insert(QueryResult::Started(job));
170
5099ac24 171 let owner = JobOwner { state, id, key };
ba9703b0
XL
172 return TryGetJob::NotYetStarted(owner);
173 }
17df50a5
XL
174 Entry::Occupied(mut entry) => {
175 match entry.get_mut() {
176 #[cfg(not(parallel_compiler))]
177 QueryResult::Started(job) => {
5099ac24 178 let id = job.id;
17df50a5
XL
179 drop(state_lock);
180
181 // If we are single-threaded we know that we have cycle error,
182 // so we just return the error.
c295e0f8
XL
183 return TryGetJob::Cycle(id.find_cycle_in_stack(
184 tcx.try_collect_active_jobs().unwrap(),
185 &tcx.current_query_job(),
17df50a5 186 span,
17df50a5 187 ));
6a06907d 188 }
17df50a5
XL
189 #[cfg(parallel_compiler)]
190 QueryResult::Started(job) => {
191 // For parallel queries, we'll block and wait until the query running
192 // in another thread has completed. Record how long we wait in the
193 // self-profiler.
194 let query_blocked_prof_timer = tcx.dep_context().profiler().query_blocked();
195
196 // Get the latch out
197 let latch = job.latch();
17df50a5
XL
198
199 drop(state_lock);
200
201 // With parallel queries we might just have to wait on some other
202 // thread.
203 let result = latch.wait_on(tcx.current_query_job(), span);
204
c295e0f8
XL
205 match result {
206 Ok(()) => TryGetJob::JobCompleted(query_blocked_prof_timer),
207 Err(cycle) => TryGetJob::Cycle(cycle),
17df50a5 208 }
6a06907d 209 }
17df50a5
XL
210 QueryResult::Poisoned => FatalError.raise(),
211 }
ba9703b0 212 }
ba9703b0
XL
213 }
214 }
215
216 /// Completes the query by updating the query cache with the `result`,
217 /// signals the waiter and forgets the JobOwner, so it won't poison the query
5e7ed085 218 fn complete<C>(self, cache: &C, result: C::Value, dep_node_index: DepNodeIndex) -> C::Stored
c295e0f8
XL
219 where
220 C: QueryCache<Key = K>,
221 {
ba9703b0
XL
222 // We can move out of `self` here because we `mem::forget` it below
223 let key = unsafe { ptr::read(&self.key) };
224 let state = self.state;
225
226 // Forget ourself so our destructor won't poison the query
227 mem::forget(self);
228
f9f354fc 229 let (job, result) = {
6a06907d 230 let job = {
5e7ed085
FG
231 #[cfg(parallel_compiler)]
232 let mut lock = state.active.get_shard_by_value(&key).lock();
233 #[cfg(not(parallel_compiler))]
234 let mut lock = state.active.lock();
235 match lock.remove(&key).unwrap() {
6a06907d
XL
236 QueryResult::Started(job) => job,
237 QueryResult::Poisoned => panic!(),
238 }
239 };
5e7ed085 240 let result = cache.complete(key, result, dep_node_index);
f9f354fc 241 (job, result)
ba9703b0
XL
242 };
243
244 job.signal_complete();
f9f354fc 245 result
ba9703b0
XL
246 }
247}
248
5099ac24 249impl<'tcx, K> Drop for JobOwner<'tcx, K>
ba9703b0 250where
c295e0f8 251 K: Eq + Hash + Clone,
ba9703b0
XL
252{
253 #[inline(never)]
254 #[cold]
255 fn drop(&mut self) {
256 // Poison the query so jobs waiting on it panic.
257 let state = self.state;
ba9703b0 258 let job = {
5e7ed085
FG
259 #[cfg(parallel_compiler)]
260 let mut shard = state.active.get_shard_by_value(&self.key).lock();
261 #[cfg(not(parallel_compiler))]
262 let mut shard = state.active.lock();
263 let job = match shard.remove(&self.key).unwrap() {
ba9703b0
XL
264 QueryResult::Started(job) => job,
265 QueryResult::Poisoned => panic!(),
266 };
5e7ed085 267 shard.insert(self.key.clone(), QueryResult::Poisoned);
ba9703b0
XL
268 job
269 };
270 // Also signal the completion of the job, so waiters
271 // will continue execution.
272 job.signal_complete();
273 }
274}
275
276#[derive(Clone)]
6a06907d 277pub(crate) struct CycleError {
ba9703b0 278 /// The query and related span that uses the cycle.
6a06907d
XL
279 pub usage: Option<(Span, QueryStackFrame)>,
280 pub cycle: Vec<QueryInfo>,
ba9703b0
XL
281}
282
283/// The result of `try_start`.
5099ac24 284enum TryGetJob<'tcx, K>
ba9703b0 285where
c295e0f8 286 K: Eq + Hash + Clone,
ba9703b0
XL
287{
288 /// The query is not yet started. Contains a guard to the cache eventually used to start it.
5099ac24 289 NotYetStarted(JobOwner<'tcx, K>),
ba9703b0
XL
290
291 /// The query was already completed.
292 /// Returns the result of the query and its dep-node index
293 /// if it succeeded or a cycle error if it failed.
294 #[cfg(parallel_compiler)]
c295e0f8 295 JobCompleted(TimingGuard<'tcx>),
ba9703b0
XL
296
297 /// Trying to execute the query resulted in a cycle.
c295e0f8 298 Cycle(CycleError),
ba9703b0
XL
299}
300
301/// Checks if the query is already computed and in the cache.
302/// It returns the shard index and a lock guard to the shard,
303/// which will be used if the query is not in the cache and we need
304/// to compute it.
6a06907d
XL
305#[inline]
306pub fn try_get_cached<'a, CTX, C, R, OnHit>(
ba9703b0 307 tcx: CTX,
5e7ed085 308 cache: &'a C,
6a06907d 309 key: &C::Key,
ba9703b0
XL
310 // `on_hit` can be called while holding a lock to the query cache
311 on_hit: OnHit,
5e7ed085 312) -> Result<R, ()>
ba9703b0
XL
313where
314 C: QueryCache,
6a06907d
XL
315 CTX: DepContext,
316 OnHit: FnOnce(&C::Stored) -> R,
ba9703b0 317{
5e7ed085 318 cache.lookup(&key, |value, index| {
6a06907d
XL
319 if unlikely!(tcx.profiler().enabled()) {
320 tcx.profiler().query_cache_hit(index.into());
321 }
6a06907d
XL
322 tcx.dep_graph().read_index(index);
323 on_hit(value)
324 })
ba9703b0
XL
325}
326
f9f354fc 327fn try_execute_query<CTX, C>(
ba9703b0 328 tcx: CTX,
5099ac24 329 state: &QueryState<C::Key>,
5e7ed085 330 cache: &C,
ba9703b0 331 span: Span,
f9f354fc 332 key: C::Key,
c295e0f8 333 dep_node: Option<DepNode<CTX::DepKind>>,
f9f354fc 334 query: &QueryVtable<CTX, C::Key, C::Value>,
c295e0f8 335) -> (C::Stored, Option<DepNodeIndex>)
ba9703b0 336where
f9f354fc 337 C: QueryCache,
c295e0f8 338 C::Key: Clone + DepNodeParams<CTX::DepContext>,
ba9703b0
XL
339 CTX: QueryContext,
340{
5e7ed085 341 match JobOwner::<'_, C::Key>::try_start(&tcx, state, span, key.clone()) {
c295e0f8 342 TryGetJob::NotYetStarted(job) => {
3c0e092e 343 let (result, dep_node_index) = execute_job(tcx, key, dep_node, query, job.id);
c295e0f8
XL
344 let result = job.complete(cache, result, dep_node_index);
345 (result, Some(dep_node_index))
346 }
347 TryGetJob::Cycle(error) => {
5e7ed085 348 let result = mk_cycle(tcx, error, query.handle_cycle_error, cache);
c295e0f8
XL
349 (result, None)
350 }
ba9703b0 351 #[cfg(parallel_compiler)]
c295e0f8
XL
352 TryGetJob::JobCompleted(query_blocked_prof_timer) => {
353 let (v, index) = cache
5e7ed085 354 .lookup(&key, |value, index| (value.clone(), index))
c295e0f8
XL
355 .unwrap_or_else(|_| panic!("value must be in cache after waiting"));
356
357 if unlikely!(tcx.dep_context().profiler().enabled()) {
358 tcx.dep_context().profiler().query_cache_hit(index.into());
359 }
360 query_blocked_prof_timer.finish_with_query_invocation_id(index.into());
361
362 (v, Some(index))
ba9703b0 363 }
c295e0f8
XL
364 }
365}
ba9703b0 366
c295e0f8
XL
367fn execute_job<CTX, K, V>(
368 tcx: CTX,
369 key: K,
370 mut dep_node_opt: Option<DepNode<CTX::DepKind>>,
371 query: &QueryVtable<CTX, K, V>,
5099ac24 372 job_id: QueryJobId,
c295e0f8
XL
373) -> (V, DepNodeIndex)
374where
375 K: Clone + DepNodeParams<CTX::DepContext>,
376 V: Debug,
377 CTX: QueryContext,
378{
17df50a5
XL
379 let dep_graph = tcx.dep_context().dep_graph();
380
381 // Fast path for when incr. comp. is off.
382 if !dep_graph.is_fully_enabled() {
383 let prof_timer = tcx.dep_context().profiler().query_provider();
3c0e092e 384 let result = tcx.start_query(job_id, None, || query.compute(*tcx.dep_context(), key));
17df50a5
XL
385 let dep_node_index = dep_graph.next_virtual_depnode_index();
386 prof_timer.finish_with_query_invocation_id(dep_node_index.into());
c295e0f8 387 return (result, dep_node_index);
ba9703b0
XL
388 }
389
c295e0f8
XL
390 if !query.anon && !query.eval_always {
391 // `to_dep_node` is expensive for some `DepKind`s.
392 let dep_node =
393 dep_node_opt.get_or_insert_with(|| query.to_dep_node(*tcx.dep_context(), &key));
ba9703b0 394
c295e0f8
XL
395 // The diagnostics for this query will be promoted to the current session during
396 // `try_mark_green()`, so we can ignore them here.
397 if let Some(ret) = tcx.start_query(job_id, None, || {
3c0e092e 398 try_load_from_disk_and_cache_in_memory(tcx, &key, &dep_node, query)
c295e0f8
XL
399 }) {
400 return ret;
401 }
402 }
ba9703b0 403
c295e0f8
XL
404 let prof_timer = tcx.dep_context().profiler().query_provider();
405 let diagnostics = Lock::new(ThinVec::new());
ba9703b0 406
c295e0f8
XL
407 let (result, dep_node_index) = tcx.start_query(job_id, Some(&diagnostics), || {
408 if query.anon {
409 return dep_graph.with_anon_task(*tcx.dep_context(), query.dep_kind, || {
3c0e092e 410 query.compute(*tcx.dep_context(), key)
c295e0f8
XL
411 });
412 }
ba9703b0 413
c295e0f8
XL
414 // `to_dep_node` is expensive for some `DepKind`s.
415 let dep_node = dep_node_opt.unwrap_or_else(|| query.to_dep_node(*tcx.dep_context(), &key));
94222f64 416
3c0e092e 417 dep_graph.with_task(dep_node, *tcx.dep_context(), key, query.compute, query.hash_result)
c295e0f8 418 });
ba9703b0 419
c295e0f8 420 prof_timer.finish_with_query_invocation_id(dep_node_index.into());
ba9703b0 421
c295e0f8
XL
422 let diagnostics = diagnostics.into_inner();
423 let side_effects = QuerySideEffects { diagnostics };
ba9703b0 424
c295e0f8
XL
425 if unlikely!(!side_effects.is_empty()) {
426 if query.anon {
427 tcx.store_side_effects_for_anon_node(dep_node_index, side_effects);
428 } else {
429 tcx.store_side_effects(dep_node_index, side_effects);
ba9703b0
XL
430 }
431 }
432
c295e0f8 433 (result, dep_node_index)
ba9703b0
XL
434}
435
c295e0f8 436fn try_load_from_disk_and_cache_in_memory<CTX, K, V>(
ba9703b0 437 tcx: CTX,
c295e0f8 438 key: &K,
ba9703b0 439 dep_node: &DepNode<CTX::DepKind>,
f9f354fc 440 query: &QueryVtable<CTX, K, V>,
c295e0f8 441) -> Option<(V, DepNodeIndex)>
ba9703b0 442where
c295e0f8 443 K: Clone,
ba9703b0 444 CTX: QueryContext,
c295e0f8 445 V: Debug,
ba9703b0
XL
446{
447 // Note this function can be called concurrently from the same query
448 // We must ensure that this is handled correctly.
449
c295e0f8
XL
450 let dep_graph = tcx.dep_context().dep_graph();
451 let (prev_dep_node_index, dep_node_index) = dep_graph.try_mark_green(tcx, &dep_node)?;
452
453 debug_assert!(dep_graph.is_green(dep_node));
ba9703b0
XL
454
455 // First we try to load the result from the on-disk cache.
c295e0f8 456 // Some things are never cached on disk.
3c0e092e 457 if query.cache_on_disk {
6a06907d 458 let prof_timer = tcx.dep_context().profiler().incr_cache_loading();
5099ac24
FG
459
460 // The call to `with_query_deserialization` enforces that no new `DepNodes`
461 // are created during deserialization. See the docs of that method for more
462 // details.
463 let result = dep_graph
464 .with_query_deserialization(|| query.try_load_from_disk(tcx, prev_dep_node_index));
465
ba9703b0
XL
466 prof_timer.finish_with_query_invocation_id(dep_node_index.into());
467
c295e0f8 468 if let Some(result) = result {
a2a8927a
XL
469 if unlikely!(tcx.dep_context().sess().opts.debugging_opts.query_dep_graph) {
470 dep_graph.mark_debug_loaded_from_disk(*dep_node)
471 }
472
3c0e092e
XL
473 let prev_fingerprint = tcx
474 .dep_context()
475 .dep_graph()
476 .prev_fingerprint_of(dep_node)
477 .unwrap_or(Fingerprint::ZERO);
c295e0f8
XL
478 // If `-Zincremental-verify-ich` is specified, re-hash results from
479 // the cache and make sure that they have the expected fingerprint.
3c0e092e
XL
480 //
481 // If not, we still seek to verify a subset of fingerprints loaded
482 // from disk. Re-hashing results is fairly expensive, so we can't
483 // currently afford to verify every hash. This subset should still
484 // give us some coverage of potential bugs though.
485 let try_verify = prev_fingerprint.as_value().1 % 32 == 0;
486 if unlikely!(
487 try_verify || tcx.dep_context().sess().opts.debugging_opts.incremental_verify_ich
488 ) {
c295e0f8
XL
489 incremental_verify_ich(*tcx.dep_context(), &result, dep_node, query);
490 }
6a06907d 491
c295e0f8
XL
492 return Some((result, dep_node_index));
493 }
3c0e092e
XL
494
495 // We always expect to find a cached result for things that
496 // can be forced from `DepNode`.
497 debug_assert!(
498 !tcx.dep_context().fingerprint_style(dep_node.kind).reconstructible(),
499 "missing on-disk cache entry for {:?}",
500 dep_node
501 );
c295e0f8 502 }
ba9703b0 503
c295e0f8
XL
504 // We could not load a result from the on-disk cache, so
505 // recompute.
506 let prof_timer = tcx.dep_context().profiler().query_provider();
ba9703b0 507
c295e0f8 508 // The dep-graph for this computation is already in-place.
3c0e092e 509 let result = dep_graph.with_ignore(|| query.compute(*tcx.dep_context(), key.clone()));
ba9703b0 510
c295e0f8 511 prof_timer.finish_with_query_invocation_id(dep_node_index.into());
ba9703b0 512
c295e0f8
XL
513 // Verify that re-running the query produced a result with the expected hash
514 // This catches bugs in query implementations, turning them into ICEs.
515 // For example, a query might sort its result by `DefId` - since `DefId`s are
516 // not stable across compilation sessions, the result could get up getting sorted
517 // in a different order when the query is re-run, even though all of the inputs
518 // (e.g. `DefPathHash` values) were green.
519 //
520 // See issue #82920 for an example of a miscompilation that would get turned into
521 // an ICE by this check
522 incremental_verify_ich(*tcx.dep_context(), &result, dep_node, query);
523
524 Some((result, dep_node_index))
ba9703b0
XL
525}
526
5869c6ff 527fn incremental_verify_ich<CTX, K, V: Debug>(
6a06907d 528 tcx: CTX::DepContext,
f9f354fc 529 result: &V,
ba9703b0 530 dep_node: &DepNode<CTX::DepKind>,
f9f354fc 531 query: &QueryVtable<CTX, K, V>,
ba9703b0
XL
532) where
533 CTX: QueryContext,
ba9703b0
XL
534{
535 assert!(
cdc7bbd5 536 tcx.dep_graph().is_green(dep_node),
ba9703b0
XL
537 "fingerprint for green query instance not loaded from cache: {:?}",
538 dep_node,
539 );
540
541 debug!("BEGIN verify_ich({:?})", dep_node);
3c0e092e
XL
542 let new_hash = query.hash_result.map_or(Fingerprint::ZERO, |f| {
543 let mut hcx = tcx.create_stable_hashing_context();
544 f(&mut hcx, result)
545 });
cdc7bbd5 546 let old_hash = tcx.dep_graph().prev_fingerprint_of(dep_node);
3c0e092e 547 debug!("END verify_ich({:?})", dep_node);
ba9703b0 548
cdc7bbd5 549 if Some(new_hash) != old_hash {
3c0e092e
XL
550 incremental_verify_ich_cold(tcx.sess(), DebugArg::from(&dep_node), DebugArg::from(&result));
551 }
552}
94222f64 553
3c0e092e
XL
554// This DebugArg business is largely a mirror of std::fmt::ArgumentV1, which is
555// currently not exposed publicly.
556//
557// The PR which added this attempted to use `&dyn Debug` instead, but that
558// showed statistically significant worse compiler performance. It's not
559// actually clear what the cause there was -- the code should be cold. If this
560// can be replaced with `&dyn Debug` with on perf impact, then it probably
561// should be.
562extern "C" {
563 type Opaque;
564}
94222f64 565
3c0e092e
XL
566struct DebugArg<'a> {
567 value: &'a Opaque,
568 fmt: fn(&Opaque, &mut std::fmt::Formatter<'_>) -> std::fmt::Result,
569}
94222f64 570
3c0e092e
XL
571impl<'a, T> From<&'a T> for DebugArg<'a>
572where
573 T: std::fmt::Debug,
574{
575 fn from(value: &'a T) -> DebugArg<'a> {
576 DebugArg {
577 value: unsafe { std::mem::transmute(value) },
578 fmt: unsafe {
579 std::mem::transmute(<T as std::fmt::Debug>::fmt as fn(_, _) -> std::fmt::Result)
580 },
581 }
582 }
583}
584
585impl std::fmt::Debug for DebugArg<'_> {
586 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
587 (self.fmt)(self.value, f)
588 }
589}
590
591// Note that this is marked #[cold] and intentionally takes the equivalent of
592// `dyn Debug` for its arguments, as we want to avoid generating a bunch of
593// different implementations for LLVM to chew on (and filling up the final
594// binary, too).
595#[cold]
596fn incremental_verify_ich_cold(sess: &Session, dep_node: DebugArg<'_>, result: DebugArg<'_>) {
597 let run_cmd = if let Some(crate_name) = &sess.opts.crate_name {
598 format!("`cargo clean -p {}` or `cargo clean`", crate_name)
599 } else {
600 "`cargo clean`".to_string()
601 };
602
603 // When we emit an error message and panic, we try to debug-print the `DepNode`
604 // and query result. Unfortunately, this can cause us to run additional queries,
605 // which may result in another fingerprint mismatch while we're in the middle
606 // of processing this one. To avoid a double-panic (which kills the process
607 // before we can print out the query static), we print out a terse
608 // but 'safe' message if we detect a re-entrant call to this method.
609 thread_local! {
610 static INSIDE_VERIFY_PANIC: Cell<bool> = const { Cell::new(false) };
611 };
612
613 let old_in_panic = INSIDE_VERIFY_PANIC.with(|in_panic| in_panic.replace(true));
614
615 if old_in_panic {
616 sess.struct_err(
617 "internal compiler error: re-entrant incremental verify failure, suppressing message",
618 )
619 .emit();
620 } else {
621 sess.struct_err(&format!("internal compiler error: encountered incremental compilation error with {:?}", dep_node))
94222f64 622 .help(&format!("This is a known issue with the compiler. Run {} to allow your project to compile", run_cmd))
04454e1e
FG
623 .note("Please follow the instructions below to create a bug report with the provided information")
624 .note("See <https://github.com/rust-lang/rust/issues/84970> for more information")
94222f64 625 .emit();
3c0e092e 626 panic!("Found unstable fingerprints for {:?}: {:?}", dep_node, result);
f20569fa 627 }
3c0e092e
XL
628
629 INSIDE_VERIFY_PANIC.with(|in_panic| in_panic.set(old_in_panic));
ba9703b0
XL
630}
631
ba9703b0
XL
632/// Ensure that either this query has all green inputs or been executed.
633/// Executing `query::ensure(D)` is considered a read of the dep-node `D`.
6a06907d 634/// Returns true if the query should still run.
ba9703b0
XL
635///
636/// This function is particularly useful when executing passes for their
637/// side-effects -- e.g., in order to report errors for erroneous programs.
638///
639/// Note: The optimization is only available during incr. comp.
f9f354fc 640#[inline(never)]
c295e0f8
XL
641fn ensure_must_run<CTX, K, V>(
642 tcx: CTX,
643 key: &K,
644 query: &QueryVtable<CTX, K, V>,
645) -> (bool, Option<DepNode<CTX::DepKind>>)
6a06907d
XL
646where
647 K: crate::dep_graph::DepNodeParams<CTX::DepContext>,
ba9703b0
XL
648 CTX: QueryContext,
649{
f9f354fc 650 if query.eval_always {
c295e0f8 651 return (true, None);
ba9703b0
XL
652 }
653
654 // Ensuring an anonymous query makes no sense
f9f354fc 655 assert!(!query.anon);
ba9703b0 656
6a06907d 657 let dep_node = query.to_dep_node(*tcx.dep_context(), key);
ba9703b0 658
c295e0f8
XL
659 let dep_graph = tcx.dep_context().dep_graph();
660 match dep_graph.try_mark_green(tcx, &dep_node) {
ba9703b0 661 None => {
c295e0f8 662 // A None return from `try_mark_green` means that this is either
ba9703b0
XL
663 // a new dep node or that the dep node has already been marked red.
664 // Either way, we can't call `dep_graph.read()` as we don't have the
665 // DepNodeIndex. We must invoke the query itself. The performance cost
666 // this introduces should be negligible as we'll immediately hit the
667 // in-memory cache, or another query down the line will.
c295e0f8 668 (true, Some(dep_node))
ba9703b0
XL
669 }
670 Some((_, dep_node_index)) => {
c295e0f8 671 dep_graph.read_index(dep_node_index);
6a06907d 672 tcx.dep_context().profiler().query_cache_hit(dep_node_index.into());
c295e0f8 673 (false, None)
ba9703b0
XL
674 }
675 }
676}
677
04454e1e 678#[derive(Debug)]
6a06907d
XL
679pub enum QueryMode {
680 Get,
681 Ensure,
f9f354fc
XL
682}
683
5e7ed085 684pub fn get_query<Q, CTX>(tcx: CTX, span: Span, key: Q::Key, mode: QueryMode) -> Option<Q::Stored>
f9f354fc
XL
685where
686 Q: QueryDescription<CTX>,
17df50a5 687 Q::Key: DepNodeParams<CTX::DepContext>,
f9f354fc
XL
688 CTX: QueryContext,
689{
3c0e092e 690 let query = Q::make_vtable(tcx, &key);
c295e0f8 691 let dep_node = if let QueryMode::Ensure = mode {
3c0e092e 692 let (must_run, dep_node) = ensure_must_run(tcx, &key, &query);
c295e0f8 693 if !must_run {
6a06907d
XL
694 return None;
695 }
c295e0f8
XL
696 dep_node
697 } else {
698 None
699 };
6a06907d 700
c295e0f8 701 let (result, dep_node_index) = try_execute_query(
136023e0
XL
702 tcx,
703 Q::query_state(tcx),
704 Q::query_cache(tcx),
705 span,
706 key,
c295e0f8 707 dep_node,
3c0e092e 708 &query,
136023e0 709 );
c295e0f8
XL
710 if let Some(dep_node_index) = dep_node_index {
711 tcx.dep_context().dep_graph().read_index(dep_node_index)
712 }
713 Some(result)
f9f354fc
XL
714}
715
3c0e092e 716pub fn force_query<Q, CTX>(tcx: CTX, key: Q::Key, dep_node: DepNode<CTX::DepKind>)
f9f354fc
XL
717where
718 Q: QueryDescription<CTX>,
17df50a5 719 Q::Key: DepNodeParams<CTX::DepContext>,
f9f354fc
XL
720 CTX: QueryContext,
721{
3c0e092e
XL
722 // We may be concurrently trying both execute and force a query.
723 // Ensure that only one of them runs the query.
724 let cache = Q::query_cache(tcx);
5e7ed085 725 let cached = cache.lookup(&key, |_, index| {
3c0e092e
XL
726 if unlikely!(tcx.dep_context().profiler().enabled()) {
727 tcx.dep_context().profiler().query_cache_hit(index.into());
728 }
729 });
136023e0 730
5e7ed085 731 match cached {
3c0e092e 732 Ok(()) => return,
5e7ed085
FG
733 Err(()) => {}
734 }
136023e0 735
3c0e092e
XL
736 let query = Q::make_vtable(tcx, &key);
737 let state = Q::query_state(tcx);
738 debug_assert!(!query.anon);
739
5e7ed085 740 try_execute_query(tcx, state, cache, DUMMY_SP, key, Some(dep_node), &query);
f9f354fc 741}