]> git.proxmox.com Git - rustc.git/blame - compiler/rustc_query_system/src/query/plumbing.rs
New upstream version 1.65.0+dfsg1
[rustc.git] / compiler / rustc_query_system / src / query / plumbing.rs
CommitLineData
ba9703b0
XL
1//! The implementation of the query system itself. This defines the macros that
2//! generate the actual methods on tcx which find and execute the provider,
3//! manage the caches, and so forth.
4
3c0e092e 5use crate::dep_graph::{DepContext, DepNode, DepNodeIndex, DepNodeParams};
ba9703b0 6use crate::query::caches::QueryCache;
064997fb 7use crate::query::config::{QueryDescription, QueryVTable};
5099ac24 8use crate::query::job::{report_cycle, QueryInfo, QueryJob, QueryJobId, QueryJobInfo};
94222f64 9use crate::query::{QueryContext, QueryMap, QuerySideEffects, QueryStackFrame};
f2b60f7d
FG
10use crate::values::Value;
11use crate::HandleCycleError;
ba9703b0 12use rustc_data_structures::fingerprint::Fingerprint;
5e7ed085 13use rustc_data_structures::fx::FxHashMap;
c295e0f8
XL
14#[cfg(parallel_compiler)]
15use rustc_data_structures::profiling::TimingGuard;
5e7ed085
FG
16#[cfg(parallel_compiler)]
17use rustc_data_structures::sharded::Sharded;
18use rustc_data_structures::sync::Lock;
5e7ed085 19use rustc_errors::{DiagnosticBuilder, ErrorGuaranteed, FatalError};
3c0e092e 20use rustc_session::Session;
17df50a5 21use rustc_span::{Span, DUMMY_SP};
94222f64 22use std::cell::Cell;
ba9703b0 23use std::collections::hash_map::Entry;
5869c6ff 24use std::fmt::Debug;
5e7ed085 25use std::hash::Hash;
ba9703b0 26use std::mem;
ba9703b0 27use std::ptr;
f2b60f7d 28use thin_vec::ThinVec;
ba9703b0 29
5099ac24 30pub struct QueryState<K> {
5e7ed085
FG
31 #[cfg(parallel_compiler)]
32 active: Sharded<FxHashMap<K, QueryResult>>,
33 #[cfg(not(parallel_compiler))]
34 active: Lock<FxHashMap<K, QueryResult>>,
ba9703b0
XL
35}
36
37/// Indicates the state of a query for a given key in a query map.
5099ac24 38enum QueryResult {
ba9703b0 39 /// An already executing query. The query job can be used to await for its completion.
5099ac24 40 Started(QueryJob),
ba9703b0
XL
41
42 /// The query panicked. Queries trying to wait on this will raise a fatal error which will
43 /// silently panic.
44 Poisoned,
45}
46
5099ac24 47impl<K> QueryState<K>
29967ef6 48where
6a06907d 49 K: Eq + Hash + Clone + Debug,
29967ef6 50{
ba9703b0 51 pub fn all_inactive(&self) -> bool {
5e7ed085
FG
52 #[cfg(parallel_compiler)]
53 {
54 let shards = self.active.lock_shards();
55 shards.iter().all(|shard| shard.is_empty())
56 }
57 #[cfg(not(parallel_compiler))]
58 {
59 self.active.lock().is_empty()
60 }
ba9703b0
XL
61 }
62
6a06907d 63 pub fn try_collect_active_jobs<CTX: Copy>(
ba9703b0 64 &self,
6a06907d 65 tcx: CTX,
6a06907d 66 make_query: fn(CTX, K) -> QueryStackFrame,
5099ac24 67 jobs: &mut QueryMap,
29967ef6 68 ) -> Option<()> {
5e7ed085
FG
69 #[cfg(parallel_compiler)]
70 {
71 // We use try_lock_shards here since we are called from the
72 // deadlock handler, and this shouldn't be locked.
73 let shards = self.active.try_lock_shards()?;
74 for shard in shards.iter() {
75 for (k, v) in shard.iter() {
76 if let QueryResult::Started(ref job) = *v {
77 let query = make_query(tcx, k.clone());
78 jobs.insert(job.id, QueryJobInfo { query, job: job.clone() });
79 }
80 }
81 }
82 }
83 #[cfg(not(parallel_compiler))]
84 {
85 // We use try_lock here since we are called from the
86 // deadlock handler, and this shouldn't be locked.
87 // (FIXME: Is this relevant for non-parallel compilers? It doesn't
88 // really hurt much.)
89 for (k, v) in self.active.try_lock()?.iter() {
ba9703b0 90 if let QueryResult::Started(ref job) = *v {
94222f64 91 let query = make_query(tcx, k.clone());
5099ac24 92 jobs.insert(job.id, QueryJobInfo { query, job: job.clone() });
ba9703b0 93 }
17df50a5
XL
94 }
95 }
ba9703b0
XL
96
97 Some(())
98 }
99}
100
5099ac24
FG
101impl<K> Default for QueryState<K> {
102 fn default() -> QueryState<K> {
5e7ed085 103 QueryState { active: Default::default() }
ba9703b0
XL
104 }
105}
106
ba9703b0
XL
107/// A type representing the responsibility to execute the job in the `job` field.
108/// This will poison the relevant query if dropped.
5099ac24 109struct JobOwner<'tcx, K>
ba9703b0 110where
c295e0f8 111 K: Eq + Hash + Clone,
ba9703b0 112{
5099ac24 113 state: &'tcx QueryState<K>,
c295e0f8 114 key: K,
5099ac24 115 id: QueryJobId,
ba9703b0
XL
116}
117
17df50a5
XL
118#[cold]
119#[inline(never)]
17df50a5
XL
120fn mk_cycle<CTX, V, R>(
121 tcx: CTX,
c295e0f8 122 error: CycleError,
f2b60f7d 123 handler: HandleCycleError,
17df50a5
XL
124 cache: &dyn crate::query::QueryStorage<Value = V, Stored = R>,
125) -> R
126where
127 CTX: QueryContext,
f2b60f7d 128 V: std::fmt::Debug + Value<CTX::DepContext>,
17df50a5
XL
129 R: Clone,
130{
17df50a5 131 let error = report_cycle(tcx.dep_context().sess(), error);
f2b60f7d 132 let value = handle_cycle_error(*tcx.dep_context(), error, handler);
17df50a5
XL
133 cache.store_nocache(value)
134}
135
f2b60f7d
FG
136fn handle_cycle_error<CTX, V>(
137 tcx: CTX,
138 mut error: DiagnosticBuilder<'_, ErrorGuaranteed>,
139 handler: HandleCycleError,
140) -> V
141where
142 CTX: DepContext,
143 V: Value<CTX>,
144{
145 use HandleCycleError::*;
146 match handler {
147 Error => {
148 error.emit();
149 Value::from_cycle_error(tcx)
150 }
151 Fatal => {
152 error.emit();
153 tcx.sess().abort_if_errors();
154 unreachable!()
155 }
156 DelayBug => {
157 error.delay_as_bug();
158 Value::from_cycle_error(tcx)
159 }
160 }
161}
162
5099ac24 163impl<'tcx, K> JobOwner<'tcx, K>
ba9703b0 164where
c295e0f8 165 K: Eq + Hash + Clone,
ba9703b0
XL
166{
167 /// Either gets a `JobOwner` corresponding the query, allowing us to
168 /// start executing the query, or returns with the result of the query.
169 /// This function assumes that `try_get_cached` is already called and returned `lookup`.
170 /// If the query is executing elsewhere, this will wait for it and return the result.
171 /// If the query panicked, this will silently panic.
172 ///
173 /// This function is inlined because that results in a noticeable speed-up
174 /// for some compile-time benchmarks.
175 #[inline(always)]
6a06907d 176 fn try_start<'b, CTX>(
c295e0f8 177 tcx: &'b CTX,
5099ac24 178 state: &'b QueryState<K>,
ba9703b0 179 span: Span,
c295e0f8 180 key: K,
5099ac24 181 ) -> TryGetJob<'b, K>
ba9703b0 182 where
ba9703b0
XL
183 CTX: QueryContext,
184 {
5e7ed085
FG
185 #[cfg(parallel_compiler)]
186 let mut state_lock = state.active.get_shard_by_value(&key).lock();
187 #[cfg(not(parallel_compiler))]
188 let mut state_lock = state.active.lock();
6a06907d 189 let lock = &mut *state_lock;
ba9703b0 190
5e7ed085 191 match lock.entry(key) {
ba9703b0 192 Entry::Vacant(entry) => {
5099ac24 193 let id = tcx.next_job_id();
ba9703b0
XL
194 let job = tcx.current_query_job();
195 let job = QueryJob::new(id, span, job);
196
17df50a5 197 let key = entry.key().clone();
ba9703b0
XL
198 entry.insert(QueryResult::Started(job));
199
5099ac24 200 let owner = JobOwner { state, id, key };
ba9703b0
XL
201 return TryGetJob::NotYetStarted(owner);
202 }
17df50a5
XL
203 Entry::Occupied(mut entry) => {
204 match entry.get_mut() {
205 #[cfg(not(parallel_compiler))]
206 QueryResult::Started(job) => {
5099ac24 207 let id = job.id;
17df50a5
XL
208 drop(state_lock);
209
210 // If we are single-threaded we know that we have cycle error,
211 // so we just return the error.
c295e0f8
XL
212 return TryGetJob::Cycle(id.find_cycle_in_stack(
213 tcx.try_collect_active_jobs().unwrap(),
214 &tcx.current_query_job(),
17df50a5 215 span,
17df50a5 216 ));
6a06907d 217 }
17df50a5
XL
218 #[cfg(parallel_compiler)]
219 QueryResult::Started(job) => {
220 // For parallel queries, we'll block and wait until the query running
221 // in another thread has completed. Record how long we wait in the
222 // self-profiler.
223 let query_blocked_prof_timer = tcx.dep_context().profiler().query_blocked();
224
225 // Get the latch out
226 let latch = job.latch();
17df50a5
XL
227
228 drop(state_lock);
229
230 // With parallel queries we might just have to wait on some other
231 // thread.
232 let result = latch.wait_on(tcx.current_query_job(), span);
233
c295e0f8
XL
234 match result {
235 Ok(()) => TryGetJob::JobCompleted(query_blocked_prof_timer),
236 Err(cycle) => TryGetJob::Cycle(cycle),
17df50a5 237 }
6a06907d 238 }
17df50a5
XL
239 QueryResult::Poisoned => FatalError.raise(),
240 }
ba9703b0 241 }
ba9703b0
XL
242 }
243 }
244
245 /// Completes the query by updating the query cache with the `result`,
246 /// signals the waiter and forgets the JobOwner, so it won't poison the query
5e7ed085 247 fn complete<C>(self, cache: &C, result: C::Value, dep_node_index: DepNodeIndex) -> C::Stored
c295e0f8
XL
248 where
249 C: QueryCache<Key = K>,
250 {
ba9703b0
XL
251 // We can move out of `self` here because we `mem::forget` it below
252 let key = unsafe { ptr::read(&self.key) };
253 let state = self.state;
254
255 // Forget ourself so our destructor won't poison the query
256 mem::forget(self);
257
f9f354fc 258 let (job, result) = {
6a06907d 259 let job = {
5e7ed085
FG
260 #[cfg(parallel_compiler)]
261 let mut lock = state.active.get_shard_by_value(&key).lock();
262 #[cfg(not(parallel_compiler))]
263 let mut lock = state.active.lock();
264 match lock.remove(&key).unwrap() {
6a06907d
XL
265 QueryResult::Started(job) => job,
266 QueryResult::Poisoned => panic!(),
267 }
268 };
5e7ed085 269 let result = cache.complete(key, result, dep_node_index);
f9f354fc 270 (job, result)
ba9703b0
XL
271 };
272
273 job.signal_complete();
f9f354fc 274 result
ba9703b0
XL
275 }
276}
277
5099ac24 278impl<'tcx, K> Drop for JobOwner<'tcx, K>
ba9703b0 279where
c295e0f8 280 K: Eq + Hash + Clone,
ba9703b0
XL
281{
282 #[inline(never)]
283 #[cold]
284 fn drop(&mut self) {
285 // Poison the query so jobs waiting on it panic.
286 let state = self.state;
ba9703b0 287 let job = {
5e7ed085
FG
288 #[cfg(parallel_compiler)]
289 let mut shard = state.active.get_shard_by_value(&self.key).lock();
290 #[cfg(not(parallel_compiler))]
291 let mut shard = state.active.lock();
292 let job = match shard.remove(&self.key).unwrap() {
ba9703b0
XL
293 QueryResult::Started(job) => job,
294 QueryResult::Poisoned => panic!(),
295 };
5e7ed085 296 shard.insert(self.key.clone(), QueryResult::Poisoned);
ba9703b0
XL
297 job
298 };
299 // Also signal the completion of the job, so waiters
300 // will continue execution.
301 job.signal_complete();
302 }
303}
304
305#[derive(Clone)]
6a06907d 306pub(crate) struct CycleError {
ba9703b0 307 /// The query and related span that uses the cycle.
6a06907d
XL
308 pub usage: Option<(Span, QueryStackFrame)>,
309 pub cycle: Vec<QueryInfo>,
ba9703b0
XL
310}
311
312/// The result of `try_start`.
5099ac24 313enum TryGetJob<'tcx, K>
ba9703b0 314where
c295e0f8 315 K: Eq + Hash + Clone,
ba9703b0
XL
316{
317 /// The query is not yet started. Contains a guard to the cache eventually used to start it.
5099ac24 318 NotYetStarted(JobOwner<'tcx, K>),
ba9703b0
XL
319
320 /// The query was already completed.
321 /// Returns the result of the query and its dep-node index
322 /// if it succeeded or a cycle error if it failed.
323 #[cfg(parallel_compiler)]
c295e0f8 324 JobCompleted(TimingGuard<'tcx>),
ba9703b0
XL
325
326 /// Trying to execute the query resulted in a cycle.
c295e0f8 327 Cycle(CycleError),
ba9703b0
XL
328}
329
330/// Checks if the query is already computed and in the cache.
331/// It returns the shard index and a lock guard to the shard,
332/// which will be used if the query is not in the cache and we need
333/// to compute it.
6a06907d
XL
334#[inline]
335pub fn try_get_cached<'a, CTX, C, R, OnHit>(
ba9703b0 336 tcx: CTX,
5e7ed085 337 cache: &'a C,
6a06907d 338 key: &C::Key,
ba9703b0
XL
339 // `on_hit` can be called while holding a lock to the query cache
340 on_hit: OnHit,
5e7ed085 341) -> Result<R, ()>
ba9703b0
XL
342where
343 C: QueryCache,
6a06907d
XL
344 CTX: DepContext,
345 OnHit: FnOnce(&C::Stored) -> R,
ba9703b0 346{
5e7ed085 347 cache.lookup(&key, |value, index| {
923072b8 348 if std::intrinsics::unlikely(tcx.profiler().enabled()) {
6a06907d
XL
349 tcx.profiler().query_cache_hit(index.into());
350 }
6a06907d
XL
351 tcx.dep_graph().read_index(index);
352 on_hit(value)
353 })
ba9703b0
XL
354}
355
f9f354fc 356fn try_execute_query<CTX, C>(
ba9703b0 357 tcx: CTX,
5099ac24 358 state: &QueryState<C::Key>,
5e7ed085 359 cache: &C,
ba9703b0 360 span: Span,
f9f354fc 361 key: C::Key,
c295e0f8 362 dep_node: Option<DepNode<CTX::DepKind>>,
064997fb 363 query: &QueryVTable<CTX, C::Key, C::Value>,
c295e0f8 364) -> (C::Stored, Option<DepNodeIndex>)
ba9703b0 365where
f9f354fc 366 C: QueryCache,
c295e0f8 367 C::Key: Clone + DepNodeParams<CTX::DepContext>,
f2b60f7d 368 C::Value: Value<CTX::DepContext>,
ba9703b0
XL
369 CTX: QueryContext,
370{
5e7ed085 371 match JobOwner::<'_, C::Key>::try_start(&tcx, state, span, key.clone()) {
c295e0f8 372 TryGetJob::NotYetStarted(job) => {
3c0e092e 373 let (result, dep_node_index) = execute_job(tcx, key, dep_node, query, job.id);
c295e0f8
XL
374 let result = job.complete(cache, result, dep_node_index);
375 (result, Some(dep_node_index))
376 }
377 TryGetJob::Cycle(error) => {
5e7ed085 378 let result = mk_cycle(tcx, error, query.handle_cycle_error, cache);
c295e0f8
XL
379 (result, None)
380 }
ba9703b0 381 #[cfg(parallel_compiler)]
c295e0f8
XL
382 TryGetJob::JobCompleted(query_blocked_prof_timer) => {
383 let (v, index) = cache
5e7ed085 384 .lookup(&key, |value, index| (value.clone(), index))
c295e0f8
XL
385 .unwrap_or_else(|_| panic!("value must be in cache after waiting"));
386
923072b8 387 if std::intrinsics::unlikely(tcx.dep_context().profiler().enabled()) {
c295e0f8
XL
388 tcx.dep_context().profiler().query_cache_hit(index.into());
389 }
390 query_blocked_prof_timer.finish_with_query_invocation_id(index.into());
391
392 (v, Some(index))
ba9703b0 393 }
c295e0f8
XL
394 }
395}
ba9703b0 396
c295e0f8
XL
397fn execute_job<CTX, K, V>(
398 tcx: CTX,
399 key: K,
400 mut dep_node_opt: Option<DepNode<CTX::DepKind>>,
064997fb 401 query: &QueryVTable<CTX, K, V>,
5099ac24 402 job_id: QueryJobId,
c295e0f8
XL
403) -> (V, DepNodeIndex)
404where
405 K: Clone + DepNodeParams<CTX::DepContext>,
406 V: Debug,
407 CTX: QueryContext,
408{
17df50a5
XL
409 let dep_graph = tcx.dep_context().dep_graph();
410
411 // Fast path for when incr. comp. is off.
412 if !dep_graph.is_fully_enabled() {
413 let prof_timer = tcx.dep_context().profiler().query_provider();
f2b60f7d
FG
414 let result = tcx.start_query(job_id, query.depth_limit, None, || {
415 query.compute(*tcx.dep_context(), key)
416 });
17df50a5
XL
417 let dep_node_index = dep_graph.next_virtual_depnode_index();
418 prof_timer.finish_with_query_invocation_id(dep_node_index.into());
c295e0f8 419 return (result, dep_node_index);
ba9703b0
XL
420 }
421
c295e0f8
XL
422 if !query.anon && !query.eval_always {
423 // `to_dep_node` is expensive for some `DepKind`s.
424 let dep_node =
425 dep_node_opt.get_or_insert_with(|| query.to_dep_node(*tcx.dep_context(), &key));
ba9703b0 426
c295e0f8
XL
427 // The diagnostics for this query will be promoted to the current session during
428 // `try_mark_green()`, so we can ignore them here.
f2b60f7d 429 if let Some(ret) = tcx.start_query(job_id, false, None, || {
3c0e092e 430 try_load_from_disk_and_cache_in_memory(tcx, &key, &dep_node, query)
c295e0f8
XL
431 }) {
432 return ret;
433 }
434 }
ba9703b0 435
c295e0f8
XL
436 let prof_timer = tcx.dep_context().profiler().query_provider();
437 let diagnostics = Lock::new(ThinVec::new());
ba9703b0 438
f2b60f7d
FG
439 let (result, dep_node_index) =
440 tcx.start_query(job_id, query.depth_limit, Some(&diagnostics), || {
441 if query.anon {
442 return dep_graph.with_anon_task(*tcx.dep_context(), query.dep_kind, || {
443 query.compute(*tcx.dep_context(), key)
444 });
445 }
ba9703b0 446
f2b60f7d
FG
447 // `to_dep_node` is expensive for some `DepKind`s.
448 let dep_node =
449 dep_node_opt.unwrap_or_else(|| query.to_dep_node(*tcx.dep_context(), &key));
94222f64 450
f2b60f7d
FG
451 dep_graph.with_task(dep_node, *tcx.dep_context(), key, query.compute, query.hash_result)
452 });
ba9703b0 453
c295e0f8 454 prof_timer.finish_with_query_invocation_id(dep_node_index.into());
ba9703b0 455
c295e0f8
XL
456 let diagnostics = diagnostics.into_inner();
457 let side_effects = QuerySideEffects { diagnostics };
ba9703b0 458
923072b8 459 if std::intrinsics::unlikely(!side_effects.is_empty()) {
c295e0f8
XL
460 if query.anon {
461 tcx.store_side_effects_for_anon_node(dep_node_index, side_effects);
462 } else {
463 tcx.store_side_effects(dep_node_index, side_effects);
ba9703b0
XL
464 }
465 }
466
c295e0f8 467 (result, dep_node_index)
ba9703b0
XL
468}
469
c295e0f8 470fn try_load_from_disk_and_cache_in_memory<CTX, K, V>(
ba9703b0 471 tcx: CTX,
c295e0f8 472 key: &K,
ba9703b0 473 dep_node: &DepNode<CTX::DepKind>,
064997fb 474 query: &QueryVTable<CTX, K, V>,
c295e0f8 475) -> Option<(V, DepNodeIndex)>
ba9703b0 476where
c295e0f8 477 K: Clone,
ba9703b0 478 CTX: QueryContext,
c295e0f8 479 V: Debug,
ba9703b0
XL
480{
481 // Note this function can be called concurrently from the same query
482 // We must ensure that this is handled correctly.
483
c295e0f8
XL
484 let dep_graph = tcx.dep_context().dep_graph();
485 let (prev_dep_node_index, dep_node_index) = dep_graph.try_mark_green(tcx, &dep_node)?;
486
487 debug_assert!(dep_graph.is_green(dep_node));
ba9703b0
XL
488
489 // First we try to load the result from the on-disk cache.
c295e0f8 490 // Some things are never cached on disk.
f2b60f7d 491 if let Some(try_load_from_disk) = query.try_load_from_disk {
6a06907d 492 let prof_timer = tcx.dep_context().profiler().incr_cache_loading();
5099ac24
FG
493
494 // The call to `with_query_deserialization` enforces that no new `DepNodes`
495 // are created during deserialization. See the docs of that method for more
496 // details.
f2b60f7d
FG
497 let result =
498 dep_graph.with_query_deserialization(|| try_load_from_disk(tcx, prev_dep_node_index));
5099ac24 499
ba9703b0
XL
500 prof_timer.finish_with_query_invocation_id(dep_node_index.into());
501
c295e0f8 502 if let Some(result) = result {
923072b8 503 if std::intrinsics::unlikely(
064997fb 504 tcx.dep_context().sess().opts.unstable_opts.query_dep_graph,
923072b8 505 ) {
a2a8927a
XL
506 dep_graph.mark_debug_loaded_from_disk(*dep_node)
507 }
508
3c0e092e
XL
509 let prev_fingerprint = tcx
510 .dep_context()
511 .dep_graph()
512 .prev_fingerprint_of(dep_node)
513 .unwrap_or(Fingerprint::ZERO);
c295e0f8
XL
514 // If `-Zincremental-verify-ich` is specified, re-hash results from
515 // the cache and make sure that they have the expected fingerprint.
3c0e092e
XL
516 //
517 // If not, we still seek to verify a subset of fingerprints loaded
518 // from disk. Re-hashing results is fairly expensive, so we can't
519 // currently afford to verify every hash. This subset should still
520 // give us some coverage of potential bugs though.
521 let try_verify = prev_fingerprint.as_value().1 % 32 == 0;
923072b8 522 if std::intrinsics::unlikely(
064997fb 523 try_verify || tcx.dep_context().sess().opts.unstable_opts.incremental_verify_ich,
3c0e092e 524 ) {
c295e0f8
XL
525 incremental_verify_ich(*tcx.dep_context(), &result, dep_node, query);
526 }
6a06907d 527
c295e0f8
XL
528 return Some((result, dep_node_index));
529 }
3c0e092e
XL
530
531 // We always expect to find a cached result for things that
532 // can be forced from `DepNode`.
533 debug_assert!(
534 !tcx.dep_context().fingerprint_style(dep_node.kind).reconstructible(),
535 "missing on-disk cache entry for {:?}",
536 dep_node
537 );
c295e0f8 538 }
ba9703b0 539
c295e0f8
XL
540 // We could not load a result from the on-disk cache, so
541 // recompute.
542 let prof_timer = tcx.dep_context().profiler().query_provider();
ba9703b0 543
c295e0f8 544 // The dep-graph for this computation is already in-place.
3c0e092e 545 let result = dep_graph.with_ignore(|| query.compute(*tcx.dep_context(), key.clone()));
ba9703b0 546
c295e0f8 547 prof_timer.finish_with_query_invocation_id(dep_node_index.into());
ba9703b0 548
c295e0f8
XL
549 // Verify that re-running the query produced a result with the expected hash
550 // This catches bugs in query implementations, turning them into ICEs.
551 // For example, a query might sort its result by `DefId` - since `DefId`s are
552 // not stable across compilation sessions, the result could get up getting sorted
553 // in a different order when the query is re-run, even though all of the inputs
554 // (e.g. `DefPathHash` values) were green.
555 //
556 // See issue #82920 for an example of a miscompilation that would get turned into
557 // an ICE by this check
558 incremental_verify_ich(*tcx.dep_context(), &result, dep_node, query);
559
560 Some((result, dep_node_index))
ba9703b0
XL
561}
562
5869c6ff 563fn incremental_verify_ich<CTX, K, V: Debug>(
6a06907d 564 tcx: CTX::DepContext,
f9f354fc 565 result: &V,
ba9703b0 566 dep_node: &DepNode<CTX::DepKind>,
064997fb 567 query: &QueryVTable<CTX, K, V>,
ba9703b0
XL
568) where
569 CTX: QueryContext,
ba9703b0
XL
570{
571 assert!(
cdc7bbd5 572 tcx.dep_graph().is_green(dep_node),
ba9703b0
XL
573 "fingerprint for green query instance not loaded from cache: {:?}",
574 dep_node,
575 );
576
577 debug!("BEGIN verify_ich({:?})", dep_node);
3c0e092e 578 let new_hash = query.hash_result.map_or(Fingerprint::ZERO, |f| {
064997fb 579 tcx.with_stable_hashing_context(|mut hcx| f(&mut hcx, result))
3c0e092e 580 });
cdc7bbd5 581 let old_hash = tcx.dep_graph().prev_fingerprint_of(dep_node);
3c0e092e 582 debug!("END verify_ich({:?})", dep_node);
ba9703b0 583
cdc7bbd5 584 if Some(new_hash) != old_hash {
3c0e092e
XL
585 incremental_verify_ich_cold(tcx.sess(), DebugArg::from(&dep_node), DebugArg::from(&result));
586 }
587}
94222f64 588
3c0e092e
XL
589// This DebugArg business is largely a mirror of std::fmt::ArgumentV1, which is
590// currently not exposed publicly.
591//
592// The PR which added this attempted to use `&dyn Debug` instead, but that
593// showed statistically significant worse compiler performance. It's not
594// actually clear what the cause there was -- the code should be cold. If this
595// can be replaced with `&dyn Debug` with on perf impact, then it probably
596// should be.
597extern "C" {
598 type Opaque;
599}
94222f64 600
3c0e092e
XL
601struct DebugArg<'a> {
602 value: &'a Opaque,
603 fmt: fn(&Opaque, &mut std::fmt::Formatter<'_>) -> std::fmt::Result,
604}
94222f64 605
3c0e092e
XL
606impl<'a, T> From<&'a T> for DebugArg<'a>
607where
608 T: std::fmt::Debug,
609{
610 fn from(value: &'a T) -> DebugArg<'a> {
611 DebugArg {
612 value: unsafe { std::mem::transmute(value) },
613 fmt: unsafe {
614 std::mem::transmute(<T as std::fmt::Debug>::fmt as fn(_, _) -> std::fmt::Result)
615 },
616 }
617 }
618}
619
620impl std::fmt::Debug for DebugArg<'_> {
621 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
622 (self.fmt)(self.value, f)
623 }
624}
625
626// Note that this is marked #[cold] and intentionally takes the equivalent of
627// `dyn Debug` for its arguments, as we want to avoid generating a bunch of
628// different implementations for LLVM to chew on (and filling up the final
629// binary, too).
630#[cold]
631fn incremental_verify_ich_cold(sess: &Session, dep_node: DebugArg<'_>, result: DebugArg<'_>) {
632 let run_cmd = if let Some(crate_name) = &sess.opts.crate_name {
633 format!("`cargo clean -p {}` or `cargo clean`", crate_name)
634 } else {
635 "`cargo clean`".to_string()
636 };
637
638 // When we emit an error message and panic, we try to debug-print the `DepNode`
639 // and query result. Unfortunately, this can cause us to run additional queries,
640 // which may result in another fingerprint mismatch while we're in the middle
641 // of processing this one. To avoid a double-panic (which kills the process
642 // before we can print out the query static), we print out a terse
643 // but 'safe' message if we detect a re-entrant call to this method.
644 thread_local! {
645 static INSIDE_VERIFY_PANIC: Cell<bool> = const { Cell::new(false) };
646 };
647
648 let old_in_panic = INSIDE_VERIFY_PANIC.with(|in_panic| in_panic.replace(true));
649
650 if old_in_panic {
f2b60f7d 651 sess.emit_err(crate::error::Reentrant);
3c0e092e 652 } else {
f2b60f7d
FG
653 sess.emit_err(crate::error::IncrementCompilation {
654 run_cmd,
655 dep_node: format!("{:?}", dep_node),
656 });
3c0e092e 657 panic!("Found unstable fingerprints for {:?}: {:?}", dep_node, result);
f20569fa 658 }
3c0e092e
XL
659
660 INSIDE_VERIFY_PANIC.with(|in_panic| in_panic.set(old_in_panic));
ba9703b0
XL
661}
662
ba9703b0
XL
663/// Ensure that either this query has all green inputs or been executed.
664/// Executing `query::ensure(D)` is considered a read of the dep-node `D`.
6a06907d 665/// Returns true if the query should still run.
ba9703b0
XL
666///
667/// This function is particularly useful when executing passes for their
668/// side-effects -- e.g., in order to report errors for erroneous programs.
669///
670/// Note: The optimization is only available during incr. comp.
f9f354fc 671#[inline(never)]
c295e0f8
XL
672fn ensure_must_run<CTX, K, V>(
673 tcx: CTX,
674 key: &K,
064997fb 675 query: &QueryVTable<CTX, K, V>,
c295e0f8 676) -> (bool, Option<DepNode<CTX::DepKind>>)
6a06907d
XL
677where
678 K: crate::dep_graph::DepNodeParams<CTX::DepContext>,
ba9703b0
XL
679 CTX: QueryContext,
680{
f9f354fc 681 if query.eval_always {
c295e0f8 682 return (true, None);
ba9703b0
XL
683 }
684
685 // Ensuring an anonymous query makes no sense
f9f354fc 686 assert!(!query.anon);
ba9703b0 687
6a06907d 688 let dep_node = query.to_dep_node(*tcx.dep_context(), key);
ba9703b0 689
c295e0f8
XL
690 let dep_graph = tcx.dep_context().dep_graph();
691 match dep_graph.try_mark_green(tcx, &dep_node) {
ba9703b0 692 None => {
c295e0f8 693 // A None return from `try_mark_green` means that this is either
ba9703b0
XL
694 // a new dep node or that the dep node has already been marked red.
695 // Either way, we can't call `dep_graph.read()` as we don't have the
696 // DepNodeIndex. We must invoke the query itself. The performance cost
697 // this introduces should be negligible as we'll immediately hit the
698 // in-memory cache, or another query down the line will.
c295e0f8 699 (true, Some(dep_node))
ba9703b0
XL
700 }
701 Some((_, dep_node_index)) => {
c295e0f8 702 dep_graph.read_index(dep_node_index);
6a06907d 703 tcx.dep_context().profiler().query_cache_hit(dep_node_index.into());
c295e0f8 704 (false, None)
ba9703b0
XL
705 }
706 }
707}
708
04454e1e 709#[derive(Debug)]
6a06907d
XL
710pub enum QueryMode {
711 Get,
712 Ensure,
f9f354fc
XL
713}
714
5e7ed085 715pub fn get_query<Q, CTX>(tcx: CTX, span: Span, key: Q::Key, mode: QueryMode) -> Option<Q::Stored>
f9f354fc
XL
716where
717 Q: QueryDescription<CTX>,
17df50a5 718 Q::Key: DepNodeParams<CTX::DepContext>,
f2b60f7d 719 Q::Value: Value<CTX::DepContext>,
f9f354fc
XL
720 CTX: QueryContext,
721{
3c0e092e 722 let query = Q::make_vtable(tcx, &key);
c295e0f8 723 let dep_node = if let QueryMode::Ensure = mode {
3c0e092e 724 let (must_run, dep_node) = ensure_must_run(tcx, &key, &query);
c295e0f8 725 if !must_run {
6a06907d
XL
726 return None;
727 }
c295e0f8
XL
728 dep_node
729 } else {
730 None
731 };
6a06907d 732
c295e0f8 733 let (result, dep_node_index) = try_execute_query(
136023e0
XL
734 tcx,
735 Q::query_state(tcx),
736 Q::query_cache(tcx),
737 span,
738 key,
c295e0f8 739 dep_node,
3c0e092e 740 &query,
136023e0 741 );
c295e0f8
XL
742 if let Some(dep_node_index) = dep_node_index {
743 tcx.dep_context().dep_graph().read_index(dep_node_index)
744 }
745 Some(result)
f9f354fc
XL
746}
747
3c0e092e 748pub fn force_query<Q, CTX>(tcx: CTX, key: Q::Key, dep_node: DepNode<CTX::DepKind>)
f9f354fc
XL
749where
750 Q: QueryDescription<CTX>,
17df50a5 751 Q::Key: DepNodeParams<CTX::DepContext>,
f2b60f7d 752 Q::Value: Value<CTX::DepContext>,
f9f354fc
XL
753 CTX: QueryContext,
754{
3c0e092e
XL
755 // We may be concurrently trying both execute and force a query.
756 // Ensure that only one of them runs the query.
757 let cache = Q::query_cache(tcx);
5e7ed085 758 let cached = cache.lookup(&key, |_, index| {
923072b8 759 if std::intrinsics::unlikely(tcx.dep_context().profiler().enabled()) {
3c0e092e
XL
760 tcx.dep_context().profiler().query_cache_hit(index.into());
761 }
762 });
136023e0 763
5e7ed085 764 match cached {
3c0e092e 765 Ok(()) => return,
5e7ed085
FG
766 Err(()) => {}
767 }
136023e0 768
3c0e092e
XL
769 let query = Q::make_vtable(tcx, &key);
770 let state = Q::query_state(tcx);
771 debug_assert!(!query.anon);
772
5e7ed085 773 try_execute_query(tcx, state, cache, DUMMY_SP, key, Some(dep_node), &query);
f9f354fc 774}