]> git.proxmox.com Git - rustc.git/blame - compiler/rustc_query_system/src/query/job.rs
New upstream version 1.65.0+dfsg1
[rustc.git] / compiler / rustc_query_system / src / query / job.rs
CommitLineData
f2b60f7d 1use crate::error::CycleStack;
ba9703b0 2use crate::query::plumbing::CycleError;
5e7ed085 3use crate::query::{QueryContext, QueryStackFrame};
9fa01778 4
74b04a01 5use rustc_data_structures::fx::FxHashMap;
f2b60f7d
FG
6use rustc_errors::{Diagnostic, DiagnosticBuilder, ErrorGuaranteed, Handler, Level};
7use rustc_hir::def::DefKind;
8use rustc_session::{Session, SessionDiagnostic};
dfeec247 9use rustc_span::Span;
9fa01778 10
29967ef6 11use std::hash::Hash;
5099ac24 12use std::num::NonZeroU64;
b7449926 13
9fa01778 14#[cfg(parallel_compiler)]
94b46f34 15use {
dfeec247 16 parking_lot::{Condvar, Mutex},
416331ca 17 rustc_data_structures::fx::FxHashSet,
416331ca 18 rustc_data_structures::sync::Lock,
74b04a01 19 rustc_data_structures::sync::Lrc,
dfeec247 20 rustc_data_structures::{jobserver, OnDrop},
416331ca 21 rustc_rayon_core as rayon_core,
dfeec247 22 rustc_span::DUMMY_SP,
cdc7bbd5 23 std::iter::{self, FromIterator},
ba9703b0 24 std::{mem, process},
94b46f34
XL
25};
26
9fa01778 27/// Represents a span and a query key.
94b46f34 28#[derive(Clone, Debug)]
6a06907d 29pub struct QueryInfo {
9fa01778 30 /// The span corresponding to the reason for which this query was required.
94b46f34 31 pub span: Span,
6a06907d 32 pub query: QueryStackFrame,
94b46f34
XL
33}
34
5099ac24 35pub type QueryMap = FxHashMap<QueryJobId, QueryJobInfo>;
74b04a01 36
fc512014 37/// A value uniquely identifying an active query job.
74b04a01 38#[derive(Copy, Clone, Eq, PartialEq, Hash)]
5099ac24 39pub struct QueryJobId(pub NonZeroU64);
74b04a01 40
5099ac24
FG
41impl QueryJobId {
42 fn query(self, map: &QueryMap) -> QueryStackFrame {
94222f64 43 map.get(&self).unwrap().query.clone()
74b04a01
XL
44 }
45
46 #[cfg(parallel_compiler)]
5099ac24 47 fn span(self, map: &QueryMap) -> Span {
74b04a01
XL
48 map.get(&self).unwrap().job.span
49 }
50
51 #[cfg(parallel_compiler)]
5099ac24 52 fn parent(self, map: &QueryMap) -> Option<QueryJobId> {
74b04a01
XL
53 map.get(&self).unwrap().job.parent
54 }
55
56 #[cfg(parallel_compiler)]
5099ac24 57 fn latch<'a>(self, map: &'a QueryMap) -> Option<&'a QueryLatch> {
74b04a01
XL
58 map.get(&self).unwrap().job.latch.as_ref()
59 }
60}
61
5099ac24 62pub struct QueryJobInfo {
94222f64 63 pub query: QueryStackFrame,
5099ac24 64 pub job: QueryJob,
74b04a01
XL
65}
66
67/// Represents an active query job.
68#[derive(Clone)]
5099ac24
FG
69pub struct QueryJob {
70 pub id: QueryJobId,
74b04a01
XL
71
72 /// The span corresponding to the reason for which this query was required.
73 pub span: Span,
94b46f34
XL
74
75 /// The parent query job which created this job and is implicitly waiting on it.
5099ac24 76 pub parent: Option<QueryJobId>,
94b46f34 77
9fa01778
XL
78 /// The latch that is used to wait on this job.
79 #[cfg(parallel_compiler)]
5099ac24 80 latch: Option<QueryLatch>,
94b46f34
XL
81}
82
5099ac24 83impl QueryJob {
9fa01778 84 /// Creates a new query job.
064997fb 85 #[inline]
5099ac24 86 pub fn new(id: QueryJobId, span: Span, parent: Option<QueryJobId>) -> Self {
94b46f34 87 QueryJob {
74b04a01
XL
88 id,
89 span,
94b46f34 90 parent,
9fa01778 91 #[cfg(parallel_compiler)]
74b04a01 92 latch: None,
94b46f34
XL
93 }
94 }
95
9fa01778 96 #[cfg(parallel_compiler)]
5099ac24 97 pub(super) fn latch(&mut self) -> QueryLatch {
74b04a01
XL
98 if self.latch.is_none() {
99 self.latch = Some(QueryLatch::new());
100 }
101 self.latch.as_ref().unwrap().clone()
94b46f34
XL
102 }
103
74b04a01
XL
104 /// Signals to waiters that the query is complete.
105 ///
106 /// This does nothing for single threaded rustc,
107 /// as there are no concurrent jobs which could be waiting on us
064997fb 108 #[inline]
74b04a01
XL
109 pub fn signal_complete(self) {
110 #[cfg(parallel_compiler)]
f9f354fc
XL
111 {
112 if let Some(latch) = self.latch {
113 latch.set();
114 }
115 }
74b04a01
XL
116 }
117}
118
119#[cfg(not(parallel_compiler))]
5099ac24 120impl QueryJobId {
c295e0f8
XL
121 #[cold]
122 #[inline(never)]
29967ef6
XL
123 pub(super) fn find_cycle_in_stack(
124 &self,
5099ac24
FG
125 query_map: QueryMap,
126 current_job: &Option<QueryJobId>,
29967ef6 127 span: Span,
6a06907d 128 ) -> CycleError {
29967ef6 129 // Find the waitee amongst `current_job` parents
94b46f34 130 let mut cycle = Vec::new();
29967ef6 131 let mut current_job = Option::clone(current_job);
94b46f34
XL
132
133 while let Some(job) = current_job {
74b04a01 134 let info = query_map.get(&job).unwrap();
94222f64 135 cycle.push(QueryInfo { span: info.job.span, query: info.query.clone() });
94b46f34 136
17df50a5 137 if job == *self {
0bf4aa26
XL
138 cycle.reverse();
139
94b46f34
XL
140 // This is the end of the cycle
141 // The span entry we included was for the usage
142 // of the cycle itself, and not part of the cycle
143 // Replace it with the span which caused the cycle to form
144 cycle[0].span = span;
145 // Find out why the cycle itself was used
74b04a01
XL
146 let usage = info
147 .job
148 .parent
149 .as_ref()
94222f64 150 .map(|parent| (info.job.span, parent.query(&query_map)));
0731742a 151 return CycleError { usage, cycle };
94b46f34
XL
152 }
153
74b04a01 154 current_job = info.job.parent;
94b46f34
XL
155 }
156
157 panic!("did not find a cycle")
158 }
94b46f34
XL
159}
160
9fa01778 161#[cfg(parallel_compiler)]
5099ac24
FG
162struct QueryWaiter {
163 query: Option<QueryJobId>,
94b46f34
XL
164 condvar: Condvar,
165 span: Span,
6a06907d 166 cycle: Lock<Option<CycleError>>,
94b46f34
XL
167}
168
9fa01778 169#[cfg(parallel_compiler)]
5099ac24 170impl QueryWaiter {
94b46f34
XL
171 fn notify(&self, registry: &rayon_core::Registry) {
172 rayon_core::mark_unblocked(registry);
173 self.condvar.notify_one();
174 }
175}
176
9fa01778 177#[cfg(parallel_compiler)]
5099ac24 178struct QueryLatchInfo {
94b46f34 179 complete: bool,
5099ac24 180 waiters: Vec<Lrc<QueryWaiter>>,
94b46f34
XL
181}
182
9fa01778 183#[cfg(parallel_compiler)]
74b04a01 184#[derive(Clone)]
5099ac24
FG
185pub(super) struct QueryLatch {
186 info: Lrc<Mutex<QueryLatchInfo>>,
94b46f34
XL
187}
188
9fa01778 189#[cfg(parallel_compiler)]
5099ac24 190impl QueryLatch {
94b46f34 191 fn new() -> Self {
74b04a01
XL
192 QueryLatch {
193 info: Lrc::new(Mutex::new(QueryLatchInfo { complete: false, waiters: Vec::new() })),
194 }
195 }
196
197 /// Awaits for the query job to complete.
5099ac24 198 pub(super) fn wait_on(&self, query: Option<QueryJobId>, span: Span) -> Result<(), CycleError> {
ba9703b0
XL
199 let waiter =
200 Lrc::new(QueryWaiter { query, span, cycle: Lock::new(None), condvar: Condvar::new() });
201 self.wait_on_inner(&waiter);
202 // FIXME: Get rid of this lock. We have ownership of the QueryWaiter
203 // although another thread may still have a Lrc reference so we cannot
204 // use Lrc::get_mut
205 let mut cycle = waiter.cycle.lock();
206 match cycle.take() {
207 None => Ok(()),
208 Some(cycle) => Err(cycle),
209 }
94b46f34
XL
210 }
211
212 /// Awaits the caller on this latch by blocking the current thread.
5099ac24 213 fn wait_on_inner(&self, waiter: &Lrc<QueryWaiter>) {
94b46f34
XL
214 let mut info = self.info.lock();
215 if !info.complete {
216 // We push the waiter on to the `waiters` list. It can be accessed inside
217 // the `wait` call below, by 1) the `set` method or 2) by deadlock detection.
218 // Both of these will remove it from the `waiters` list before resuming
219 // this thread.
220 info.waiters.push(waiter.clone());
221
222 // If this detects a deadlock and the deadlock handler wants to resume this thread
223 // we have to be in the `wait` call. This is ensured by the deadlock handler
224 // getting the self.info lock.
225 rayon_core::mark_blocked();
532ac7d7 226 jobserver::release_thread();
94b46f34 227 waiter.condvar.wait(&mut info);
532ac7d7
XL
228 // Release the lock before we potentially block in `acquire_thread`
229 mem::drop(info);
230 jobserver::acquire_thread();
94b46f34
XL
231 }
232 }
233
234 /// Sets the latch and resumes all waiters on it
235 fn set(&self) {
236 let mut info = self.info.lock();
237 debug_assert!(!info.complete);
238 info.complete = true;
239 let registry = rayon_core::Registry::current();
240 for waiter in info.waiters.drain(..) {
241 waiter.notify(&registry);
242 }
243 }
244
9fa01778 245 /// Removes a single waiter from the list of waiters.
94b46f34 246 /// This is used to break query cycles.
5099ac24 247 fn extract_waiter(&self, waiter: usize) -> Lrc<QueryWaiter> {
94b46f34
XL
248 let mut info = self.info.lock();
249 debug_assert!(!info.complete);
250 // Remove the waiter from the list of waiters
251 info.waiters.remove(waiter)
252 }
253}
254
255/// A resumable waiter of a query. The usize is the index into waiters in the query's latch
9fa01778 256#[cfg(parallel_compiler)]
5099ac24 257type Waiter = (QueryJobId, usize);
94b46f34
XL
258
259/// Visits all the non-resumable and resumable waiters of a query.
260/// Only waiters in a query are visited.
261/// `visit` is called for every waiter and is passed a query waiting on `query_ref`
262/// and a span indicating the reason the query waited on `query_ref`.
263/// If `visit` returns Some, this function returns.
264/// For visits of non-resumable waiters it returns the return value of `visit`.
265/// For visits of resumable waiters it returns Some(Some(Waiter)) which has the
266/// required information to resume the waiter.
267/// If all `visit` calls returns None, this function also returns None.
9fa01778 268#[cfg(parallel_compiler)]
5099ac24 269fn visit_waiters<F>(query_map: &QueryMap, query: QueryJobId, mut visit: F) -> Option<Option<Waiter>>
94b46f34 270where
5099ac24 271 F: FnMut(Span, QueryJobId) -> Option<Option<Waiter>>,
94b46f34
XL
272{
273 // Visit the parent query which is a non-resumable waiter since it's on the same stack
74b04a01
XL
274 if let Some(parent) = query.parent(query_map) {
275 if let Some(cycle) = visit(query.span(query_map), parent) {
94b46f34
XL
276 return Some(cycle);
277 }
278 }
279
b7449926 280 // Visit the explicit waiters which use condvars and are resumable
74b04a01
XL
281 if let Some(latch) = query.latch(query_map) {
282 for (i, waiter) in latch.info.lock().waiters.iter().enumerate() {
283 if let Some(waiter_query) = waiter.query {
284 if visit(waiter.span, waiter_query).is_some() {
285 // Return a value which indicates that this waiter can be resumed
286 return Some(Some((query, i)));
287 }
94b46f34
XL
288 }
289 }
290 }
74b04a01 291
94b46f34
XL
292 None
293}
294
295/// Look for query cycles by doing a depth first search starting at `query`.
296/// `span` is the reason for the `query` to execute. This is initially DUMMY_SP.
297/// If a cycle is detected, this initial value is replaced with the span causing
298/// the cycle.
9fa01778 299#[cfg(parallel_compiler)]
5099ac24
FG
300fn cycle_check(
301 query_map: &QueryMap,
302 query: QueryJobId,
dfeec247 303 span: Span,
5099ac24
FG
304 stack: &mut Vec<(Span, QueryJobId)>,
305 visited: &mut FxHashSet<QueryJobId>,
306) -> Option<Option<Waiter>> {
74b04a01
XL
307 if !visited.insert(query) {
308 return if let Some(p) = stack.iter().position(|q| q.1 == query) {
94b46f34
XL
309 // We detected a query cycle, fix up the initial span and return Some
310
311 // Remove previous stack entries
0731742a 312 stack.drain(0..p);
94b46f34
XL
313 // Replace the span for the first query with the cycle cause
314 stack[0].0 = span;
315 Some(None)
316 } else {
317 None
dfeec247 318 };
94b46f34
XL
319 }
320
0731742a 321 // Query marked as visited is added it to the stack
74b04a01 322 stack.push((span, query));
94b46f34
XL
323
324 // Visit all the waiters
74b04a01
XL
325 let r = visit_waiters(query_map, query, |span, successor| {
326 cycle_check(query_map, successor, span, stack, visited)
327 });
94b46f34
XL
328
329 // Remove the entry in our stack if we didn't find a cycle
330 if r.is_none() {
331 stack.pop();
332 }
333
334 r
335}
336
337/// Finds out if there's a path to the compiler root (aka. code which isn't in a query)
338/// from `query` without going through any of the queries in `visited`.
339/// This is achieved with a depth first search.
9fa01778 340#[cfg(parallel_compiler)]
5099ac24
FG
341fn connected_to_root(
342 query_map: &QueryMap,
343 query: QueryJobId,
344 visited: &mut FxHashSet<QueryJobId>,
345) -> bool {
0bf4aa26 346 // We already visited this or we're deliberately ignoring it
74b04a01 347 if !visited.insert(query) {
0bf4aa26
XL
348 return false;
349 }
350
0731742a 351 // This query is connected to the root (it has no query parent), return true
74b04a01 352 if query.parent(query_map).is_none() {
0731742a
XL
353 return true;
354 }
94b46f34 355
74b04a01
XL
356 visit_waiters(query_map, query, |_, successor| {
357 connected_to_root(query_map, successor, visited).then_some(None)
358 })
359 .is_some()
94b46f34
XL
360}
361
0731742a 362// Deterministically pick an query from a list
9fa01778 363#[cfg(parallel_compiler)]
5099ac24 364fn pick_query<'a, T, F>(query_map: &QueryMap, queries: &'a [T], f: F) -> &'a T
ba9703b0 365where
5099ac24 366 F: Fn(&T) -> (Span, QueryJobId),
ba9703b0 367{
0731742a
XL
368 // Deterministically pick an entry point
369 // FIXME: Sort this instead
dfeec247
XL
370 queries
371 .iter()
372 .min_by_key(|v| {
373 let (span, query) = f(v);
6a06907d 374 let hash = query.query(query_map).hash;
dfeec247
XL
375 // Prefer entry points which have valid spans for nicer error messages
376 // We add an integer to the tuple ensuring that entry points
377 // with valid spans are picked first
378 let span_cmp = if span == DUMMY_SP { 1 } else { 0 };
6a06907d 379 (span_cmp, hash)
dfeec247
XL
380 })
381 .unwrap()
0731742a
XL
382}
383
94b46f34
XL
384/// Looks for query cycles starting from the last query in `jobs`.
385/// If a cycle is found, all queries in the cycle is removed from `jobs` and
386/// the function return true.
387/// If a cycle was not found, the starting query is removed from `jobs` and
388/// the function returns false.
9fa01778 389#[cfg(parallel_compiler)]
5099ac24
FG
390fn remove_cycle(
391 query_map: &QueryMap,
392 jobs: &mut Vec<QueryJobId>,
393 wakelist: &mut Vec<Lrc<QueryWaiter>>,
94b46f34 394) -> bool {
b7449926 395 let mut visited = FxHashSet::default();
94b46f34
XL
396 let mut stack = Vec::new();
397 // Look for a cycle starting with the last query in `jobs`
74b04a01
XL
398 if let Some(waiter) =
399 cycle_check(query_map, jobs.pop().unwrap(), DUMMY_SP, &mut stack, &mut visited)
400 {
0731742a
XL
401 // The stack is a vector of pairs of spans and queries; reverse it so that
402 // the earlier entries require later entries
403 let (mut spans, queries): (Vec<_>, Vec<_>) = stack.into_iter().rev().unzip();
94b46f34
XL
404
405 // Shift the spans so that queries are matched with the span for their waitee
0bf4aa26 406 spans.rotate_right(1);
94b46f34
XL
407
408 // Zip them back together
cdc7bbd5 409 let mut stack: Vec<_> = iter::zip(spans, queries).collect();
94b46f34
XL
410
411 // Remove the queries in our cycle from the list of jobs to look at
412 for r in &stack {
f035d41b
XL
413 if let Some(pos) = jobs.iter().position(|j| j == &r.1) {
414 jobs.remove(pos);
415 }
94b46f34
XL
416 }
417
418 // Find the queries in the cycle which are
419 // connected to queries outside the cycle
dfeec247
XL
420 let entry_points = stack
421 .iter()
74b04a01
XL
422 .filter_map(|&(span, query)| {
423 if query.parent(query_map).is_none() {
dfeec247 424 // This query is connected to the root (it has no query parent)
74b04a01 425 Some((span, query, None))
0731742a 426 } else {
dfeec247
XL
427 let mut waiters = Vec::new();
428 // Find all the direct waiters who lead to the root
74b04a01 429 visit_waiters(query_map, query, |span, waiter| {
dfeec247 430 // Mark all the other queries in the cycle as already visited
74b04a01 431 let mut visited = FxHashSet::from_iter(stack.iter().map(|q| q.1));
dfeec247 432
74b04a01 433 if connected_to_root(query_map, waiter, &mut visited) {
dfeec247
XL
434 waiters.push((span, waiter));
435 }
436
437 None
438 });
439 if waiters.is_empty() {
440 None
441 } else {
442 // Deterministically pick one of the waiters to show to the user
6a06907d 443 let waiter = *pick_query(query_map, &waiters, |s| *s);
74b04a01 444 Some((span, query, Some(waiter)))
dfeec247 445 }
94b46f34 446 }
dfeec247 447 })
5099ac24 448 .collect::<Vec<(Span, QueryJobId, Option<(Span, QueryJobId)>)>>();
94b46f34
XL
449
450 // Deterministically pick an entry point
6a06907d 451 let (_, entry_point, usage) = pick_query(query_map, &entry_points, |e| (e.0, e.1));
94b46f34 452
0bf4aa26 453 // Shift the stack so that our entry point is first
74b04a01 454 let entry_point_pos = stack.iter().position(|(_, query)| query == entry_point);
0bf4aa26 455 if let Some(pos) = entry_point_pos {
0731742a 456 stack.rotate_left(pos);
94b46f34
XL
457 }
458
74b04a01 459 let usage = usage.as_ref().map(|(span, query)| (*span, query.query(query_map)));
0731742a 460
94b46f34 461 // Create the cycle error
416331ca 462 let error = CycleError {
0731742a 463 usage,
dfeec247
XL
464 cycle: stack
465 .iter()
74b04a01 466 .map(|&(s, ref q)| QueryInfo { span: s, query: q.query(query_map) })
dfeec247 467 .collect(),
94b46f34
XL
468 };
469
470 // We unwrap `waiter` here since there must always be one
fc512014 471 // edge which is resumable / waited using a query latch
94b46f34
XL
472 let (waitee_query, waiter_idx) = waiter.unwrap();
473
474 // Extract the waiter we want to resume
74b04a01 475 let waiter = waitee_query.latch(query_map).unwrap().extract_waiter(waiter_idx);
94b46f34
XL
476
477 // Set the cycle error so it will be picked up when resumed
478 *waiter.cycle.lock() = Some(error);
479
480 // Put the waiter on the list of things to resume
481 wakelist.push(waiter);
482
483 true
484 } else {
485 false
486 }
487}
488
94b46f34
XL
489/// Detects query cycles by using depth first search over all active query jobs.
490/// If a query cycle is found it will break the cycle by finding an edge which
491/// uses a query latch and then resuming that waiter.
492/// There may be multiple cycles involved in a deadlock, so this searches
493/// all active queries for cycles before finally resuming all the waiters at once.
9fa01778 494#[cfg(parallel_compiler)]
064997fb 495pub fn deadlock(query_map: QueryMap, registry: &rayon_core::Registry) {
94b46f34
XL
496 let on_panic = OnDrop(|| {
497 eprintln!("deadlock handler panicked, aborting process");
498 process::abort();
499 });
500
501 let mut wakelist = Vec::new();
5099ac24 502 let mut jobs: Vec<QueryJobId> = query_map.keys().cloned().collect();
94b46f34
XL
503
504 let mut found_cycle = false;
505
506 while jobs.len() > 0 {
6a06907d 507 if remove_cycle(&query_map, &mut jobs, &mut wakelist) {
94b46f34
XL
508 found_cycle = true;
509 }
510 }
511
512 // Check that a cycle was found. It is possible for a deadlock to occur without
513 // a query cycle if a query which can be waited on uses Rayon to do multithreading
514 // internally. Such a query (X) may be executing on 2 threads (A and B) and A may
515 // wait using Rayon on B. Rayon may then switch to executing another query (Y)
516 // which in turn will wait on X causing a deadlock. We have a false dependency from
517 // X to Y due to Rayon waiting and a true dependency from Y to X. The algorithm here
518 // only considers the true dependency and won't detect a cycle.
519 assert!(found_cycle);
520
521 // FIXME: Ensure this won't cause a deadlock before we return
522 for waiter in wakelist.into_iter() {
523 waiter.notify(registry);
524 }
525
526 on_panic.disable();
527}
6a06907d
XL
528
529#[inline(never)]
530#[cold]
531pub(crate) fn report_cycle<'a>(
532 sess: &'a Session,
533 CycleError { usage, cycle: stack }: CycleError,
5e7ed085 534) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
6a06907d
XL
535 assert!(!stack.is_empty());
536
064997fb 537 let span = stack[0].query.default_span(stack[1 % stack.len()].span);
f2b60f7d
FG
538
539 let mut cycle_stack = Vec::new();
540
541 use crate::error::StackCount;
542 let stack_count = if stack.len() == 1 { StackCount::Single } else { StackCount::Multiple };
6a06907d
XL
543
544 for i in 1..stack.len() {
545 let query = &stack[i].query;
064997fb 546 let span = query.default_span(stack[(i + 1) % stack.len()].span);
f2b60f7d 547 cycle_stack.push(CycleStack { span, desc: query.description.to_owned() });
94222f64 548 }
6a06907d 549
f2b60f7d 550 let mut cycle_usage = None;
6a06907d 551 if let Some((span, query)) = usage {
f2b60f7d
FG
552 cycle_usage = Some(crate::error::CycleUsage {
553 span: query.default_span(span),
554 usage: query.description,
555 });
6a06907d
XL
556 }
557
f2b60f7d
FG
558 let alias = if stack.iter().all(|entry| entry.query.def_kind == Some(DefKind::TyAlias)) {
559 Some(crate::error::Alias::Ty)
560 } else if stack.iter().all(|entry| entry.query.def_kind == Some(DefKind::TraitAlias)) {
561 Some(crate::error::Alias::Trait)
562 } else {
563 None
564 };
565
566 let cycle_diag = crate::error::Cycle {
567 span,
568 cycle_stack,
569 stack_bottom: stack[0].query.description.to_owned(),
570 alias,
571 cycle_usage: cycle_usage,
572 stack_count,
573 };
574
575 cycle_diag.into_diagnostic(&sess.parse_sess.span_diagnostic)
6a06907d
XL
576}
577
578pub fn print_query_stack<CTX: QueryContext>(
579 tcx: CTX,
5099ac24 580 mut current_query: Option<QueryJobId>,
6a06907d
XL
581 handler: &Handler,
582 num_frames: Option<usize>,
583) -> usize {
584 // Be careful relying on global state here: this code is called from
585 // a panic hook, which means that the global `Handler` may be in a weird
586 // state if it was responsible for triggering the panic.
587 let mut i = 0;
588 let query_map = tcx.try_collect_active_jobs();
589
590 while let Some(query) = current_query {
591 if Some(i) == num_frames {
592 break;
593 }
3c0e092e 594 let Some(query_info) = query_map.as_ref().and_then(|map| map.get(&query)) else {
6a06907d
XL
595 break;
596 };
597 let mut diag = Diagnostic::new(
598 Level::FailureNote,
94222f64 599 &format!("#{} [{}] {}", i, query_info.query.name, query_info.query.description),
6a06907d 600 );
064997fb 601 diag.span = query_info.job.span.into();
6a06907d
XL
602 handler.force_print_diagnostic(diag);
603
604 current_query = query_info.job.parent;
605 i += 1;
606 }
607
608 i
609}