]> git.proxmox.com Git - rustc.git/blame - compiler/rustc_query_system/src/query/job.rs
New upstream version 1.68.2+dfsg1
[rustc.git] / compiler / rustc_query_system / src / query / job.rs
CommitLineData
9c376795 1use crate::dep_graph::DepKind;
f2b60f7d 2use crate::error::CycleStack;
ba9703b0 3use crate::query::plumbing::CycleError;
5e7ed085 4use crate::query::{QueryContext, QueryStackFrame};
9c376795 5use core::marker::PhantomData;
9fa01778 6
74b04a01 7use rustc_data_structures::fx::FxHashMap;
2b03887a
FG
8use rustc_errors::{
9 Diagnostic, DiagnosticBuilder, ErrorGuaranteed, Handler, IntoDiagnostic, Level,
10};
f2b60f7d 11use rustc_hir::def::DefKind;
2b03887a 12use rustc_session::Session;
dfeec247 13use rustc_span::Span;
9fa01778 14
29967ef6 15use std::hash::Hash;
5099ac24 16use std::num::NonZeroU64;
b7449926 17
9fa01778 18#[cfg(parallel_compiler)]
94b46f34 19use {
dfeec247 20 parking_lot::{Condvar, Mutex},
416331ca 21 rustc_data_structures::fx::FxHashSet,
416331ca 22 rustc_data_structures::sync::Lock,
74b04a01 23 rustc_data_structures::sync::Lrc,
dfeec247 24 rustc_data_structures::{jobserver, OnDrop},
416331ca 25 rustc_rayon_core as rayon_core,
dfeec247 26 rustc_span::DUMMY_SP,
9c376795
FG
27 std::iter,
28 std::process,
94b46f34
XL
29};
30
9fa01778 31/// Represents a span and a query key.
94b46f34 32#[derive(Clone, Debug)]
9c376795 33pub struct QueryInfo<D: DepKind> {
9fa01778 34 /// The span corresponding to the reason for which this query was required.
94b46f34 35 pub span: Span,
9c376795 36 pub query: QueryStackFrame<D>,
94b46f34
XL
37}
38
9c376795 39pub type QueryMap<D> = FxHashMap<QueryJobId, QueryJobInfo<D>>;
74b04a01 40
fc512014 41/// A value uniquely identifying an active query job.
74b04a01 42#[derive(Copy, Clone, Eq, PartialEq, Hash)]
5099ac24 43pub struct QueryJobId(pub NonZeroU64);
74b04a01 44
5099ac24 45impl QueryJobId {
9c376795 46 fn query<D: DepKind>(self, map: &QueryMap<D>) -> QueryStackFrame<D> {
94222f64 47 map.get(&self).unwrap().query.clone()
74b04a01
XL
48 }
49
50 #[cfg(parallel_compiler)]
9c376795 51 fn span<D: DepKind>(self, map: &QueryMap<D>) -> Span {
74b04a01
XL
52 map.get(&self).unwrap().job.span
53 }
54
55 #[cfg(parallel_compiler)]
9c376795 56 fn parent<D: DepKind>(self, map: &QueryMap<D>) -> Option<QueryJobId> {
74b04a01
XL
57 map.get(&self).unwrap().job.parent
58 }
59
60 #[cfg(parallel_compiler)]
9c376795 61 fn latch<D: DepKind>(self, map: &QueryMap<D>) -> Option<&QueryLatch<D>> {
74b04a01
XL
62 map.get(&self).unwrap().job.latch.as_ref()
63 }
64}
65
2b03887a 66#[derive(Clone)]
9c376795
FG
67pub struct QueryJobInfo<D: DepKind> {
68 pub query: QueryStackFrame<D>,
69 pub job: QueryJob<D>,
74b04a01
XL
70}
71
72/// Represents an active query job.
73#[derive(Clone)]
9c376795 74pub struct QueryJob<D: DepKind> {
5099ac24 75 pub id: QueryJobId,
74b04a01
XL
76
77 /// The span corresponding to the reason for which this query was required.
78 pub span: Span,
94b46f34
XL
79
80 /// The parent query job which created this job and is implicitly waiting on it.
5099ac24 81 pub parent: Option<QueryJobId>,
94b46f34 82
9fa01778
XL
83 /// The latch that is used to wait on this job.
84 #[cfg(parallel_compiler)]
9c376795
FG
85 latch: Option<QueryLatch<D>>,
86 spooky: core::marker::PhantomData<D>,
94b46f34
XL
87}
88
9c376795 89impl<D: DepKind> QueryJob<D> {
9fa01778 90 /// Creates a new query job.
064997fb 91 #[inline]
5099ac24 92 pub fn new(id: QueryJobId, span: Span, parent: Option<QueryJobId>) -> Self {
94b46f34 93 QueryJob {
74b04a01
XL
94 id,
95 span,
94b46f34 96 parent,
9fa01778 97 #[cfg(parallel_compiler)]
74b04a01 98 latch: None,
9c376795 99 spooky: PhantomData,
94b46f34
XL
100 }
101 }
102
9fa01778 103 #[cfg(parallel_compiler)]
9c376795 104 pub(super) fn latch(&mut self) -> QueryLatch<D> {
74b04a01
XL
105 if self.latch.is_none() {
106 self.latch = Some(QueryLatch::new());
107 }
108 self.latch.as_ref().unwrap().clone()
94b46f34
XL
109 }
110
74b04a01
XL
111 /// Signals to waiters that the query is complete.
112 ///
113 /// This does nothing for single threaded rustc,
114 /// as there are no concurrent jobs which could be waiting on us
064997fb 115 #[inline]
74b04a01
XL
116 pub fn signal_complete(self) {
117 #[cfg(parallel_compiler)]
f9f354fc
XL
118 {
119 if let Some(latch) = self.latch {
120 latch.set();
121 }
122 }
74b04a01
XL
123 }
124}
125
5099ac24 126impl QueryJobId {
c295e0f8
XL
127 #[cold]
128 #[inline(never)]
2b03887a 129 #[cfg(not(parallel_compiler))]
9c376795 130 pub(super) fn find_cycle_in_stack<D: DepKind>(
29967ef6 131 &self,
9c376795 132 query_map: QueryMap<D>,
5099ac24 133 current_job: &Option<QueryJobId>,
29967ef6 134 span: Span,
9c376795 135 ) -> CycleError<D> {
29967ef6 136 // Find the waitee amongst `current_job` parents
94b46f34 137 let mut cycle = Vec::new();
29967ef6 138 let mut current_job = Option::clone(current_job);
94b46f34
XL
139
140 while let Some(job) = current_job {
74b04a01 141 let info = query_map.get(&job).unwrap();
94222f64 142 cycle.push(QueryInfo { span: info.job.span, query: info.query.clone() });
94b46f34 143
17df50a5 144 if job == *self {
0bf4aa26
XL
145 cycle.reverse();
146
94b46f34
XL
147 // This is the end of the cycle
148 // The span entry we included was for the usage
149 // of the cycle itself, and not part of the cycle
150 // Replace it with the span which caused the cycle to form
151 cycle[0].span = span;
152 // Find out why the cycle itself was used
74b04a01
XL
153 let usage = info
154 .job
155 .parent
156 .as_ref()
94222f64 157 .map(|parent| (info.job.span, parent.query(&query_map)));
0731742a 158 return CycleError { usage, cycle };
94b46f34
XL
159 }
160
74b04a01 161 current_job = info.job.parent;
94b46f34
XL
162 }
163
164 panic!("did not find a cycle")
165 }
2b03887a
FG
166
167 #[cold]
168 #[inline(never)]
9c376795
FG
169 pub fn try_find_layout_root<D: DepKind>(
170 &self,
171 query_map: QueryMap<D>,
172 ) -> Option<(QueryJobInfo<D>, usize)> {
2b03887a
FG
173 let mut last_layout = None;
174 let mut current_id = Some(*self);
175 let mut depth = 0;
176
177 while let Some(id) = current_id {
178 let info = query_map.get(&id).unwrap();
9c376795
FG
179 // FIXME: This string comparison should probably not be done.
180 if format!("{:?}", info.query.dep_kind) == "layout_of" {
2b03887a
FG
181 depth += 1;
182 last_layout = Some((info.clone(), depth));
183 }
184 current_id = info.job.parent;
185 }
186 last_layout
187 }
94b46f34
XL
188}
189
9fa01778 190#[cfg(parallel_compiler)]
9c376795 191struct QueryWaiter<D: DepKind> {
5099ac24 192 query: Option<QueryJobId>,
94b46f34
XL
193 condvar: Condvar,
194 span: Span,
9c376795 195 cycle: Lock<Option<CycleError<D>>>,
94b46f34
XL
196}
197
9fa01778 198#[cfg(parallel_compiler)]
9c376795 199impl<D: DepKind> QueryWaiter<D> {
94b46f34
XL
200 fn notify(&self, registry: &rayon_core::Registry) {
201 rayon_core::mark_unblocked(registry);
202 self.condvar.notify_one();
203 }
204}
205
9fa01778 206#[cfg(parallel_compiler)]
9c376795 207struct QueryLatchInfo<D: DepKind> {
94b46f34 208 complete: bool,
9c376795 209 waiters: Vec<Lrc<QueryWaiter<D>>>,
94b46f34
XL
210}
211
9fa01778 212#[cfg(parallel_compiler)]
74b04a01 213#[derive(Clone)]
9c376795
FG
214pub(super) struct QueryLatch<D: DepKind> {
215 info: Lrc<Mutex<QueryLatchInfo<D>>>,
94b46f34
XL
216}
217
9fa01778 218#[cfg(parallel_compiler)]
9c376795 219impl<D: DepKind> QueryLatch<D> {
94b46f34 220 fn new() -> Self {
74b04a01
XL
221 QueryLatch {
222 info: Lrc::new(Mutex::new(QueryLatchInfo { complete: false, waiters: Vec::new() })),
223 }
224 }
225
226 /// Awaits for the query job to complete.
9c376795
FG
227 pub(super) fn wait_on(
228 &self,
229 query: Option<QueryJobId>,
230 span: Span,
231 ) -> Result<(), CycleError<D>> {
ba9703b0
XL
232 let waiter =
233 Lrc::new(QueryWaiter { query, span, cycle: Lock::new(None), condvar: Condvar::new() });
234 self.wait_on_inner(&waiter);
235 // FIXME: Get rid of this lock. We have ownership of the QueryWaiter
236 // although another thread may still have a Lrc reference so we cannot
237 // use Lrc::get_mut
238 let mut cycle = waiter.cycle.lock();
239 match cycle.take() {
240 None => Ok(()),
241 Some(cycle) => Err(cycle),
242 }
94b46f34
XL
243 }
244
245 /// Awaits the caller on this latch by blocking the current thread.
9c376795 246 fn wait_on_inner(&self, waiter: &Lrc<QueryWaiter<D>>) {
94b46f34
XL
247 let mut info = self.info.lock();
248 if !info.complete {
249 // We push the waiter on to the `waiters` list. It can be accessed inside
250 // the `wait` call below, by 1) the `set` method or 2) by deadlock detection.
251 // Both of these will remove it from the `waiters` list before resuming
252 // this thread.
253 info.waiters.push(waiter.clone());
254
255 // If this detects a deadlock and the deadlock handler wants to resume this thread
256 // we have to be in the `wait` call. This is ensured by the deadlock handler
257 // getting the self.info lock.
258 rayon_core::mark_blocked();
532ac7d7 259 jobserver::release_thread();
94b46f34 260 waiter.condvar.wait(&mut info);
532ac7d7 261 // Release the lock before we potentially block in `acquire_thread`
9c376795 262 drop(info);
532ac7d7 263 jobserver::acquire_thread();
94b46f34
XL
264 }
265 }
266
267 /// Sets the latch and resumes all waiters on it
268 fn set(&self) {
269 let mut info = self.info.lock();
270 debug_assert!(!info.complete);
271 info.complete = true;
272 let registry = rayon_core::Registry::current();
273 for waiter in info.waiters.drain(..) {
274 waiter.notify(&registry);
275 }
276 }
277
9fa01778 278 /// Removes a single waiter from the list of waiters.
94b46f34 279 /// This is used to break query cycles.
9c376795 280 fn extract_waiter(&self, waiter: usize) -> Lrc<QueryWaiter<D>> {
94b46f34
XL
281 let mut info = self.info.lock();
282 debug_assert!(!info.complete);
283 // Remove the waiter from the list of waiters
284 info.waiters.remove(waiter)
285 }
286}
287
288/// A resumable waiter of a query. The usize is the index into waiters in the query's latch
9fa01778 289#[cfg(parallel_compiler)]
5099ac24 290type Waiter = (QueryJobId, usize);
94b46f34
XL
291
292/// Visits all the non-resumable and resumable waiters of a query.
293/// Only waiters in a query are visited.
294/// `visit` is called for every waiter and is passed a query waiting on `query_ref`
295/// and a span indicating the reason the query waited on `query_ref`.
296/// If `visit` returns Some, this function returns.
297/// For visits of non-resumable waiters it returns the return value of `visit`.
298/// For visits of resumable waiters it returns Some(Some(Waiter)) which has the
299/// required information to resume the waiter.
300/// If all `visit` calls returns None, this function also returns None.
9fa01778 301#[cfg(parallel_compiler)]
9c376795
FG
302fn visit_waiters<F, D>(
303 query_map: &QueryMap<D>,
304 query: QueryJobId,
305 mut visit: F,
306) -> Option<Option<Waiter>>
94b46f34 307where
5099ac24 308 F: FnMut(Span, QueryJobId) -> Option<Option<Waiter>>,
9c376795 309 D: DepKind,
94b46f34
XL
310{
311 // Visit the parent query which is a non-resumable waiter since it's on the same stack
74b04a01
XL
312 if let Some(parent) = query.parent(query_map) {
313 if let Some(cycle) = visit(query.span(query_map), parent) {
94b46f34
XL
314 return Some(cycle);
315 }
316 }
317
b7449926 318 // Visit the explicit waiters which use condvars and are resumable
74b04a01
XL
319 if let Some(latch) = query.latch(query_map) {
320 for (i, waiter) in latch.info.lock().waiters.iter().enumerate() {
321 if let Some(waiter_query) = waiter.query {
322 if visit(waiter.span, waiter_query).is_some() {
323 // Return a value which indicates that this waiter can be resumed
324 return Some(Some((query, i)));
325 }
94b46f34
XL
326 }
327 }
328 }
74b04a01 329
94b46f34
XL
330 None
331}
332
333/// Look for query cycles by doing a depth first search starting at `query`.
334/// `span` is the reason for the `query` to execute. This is initially DUMMY_SP.
335/// If a cycle is detected, this initial value is replaced with the span causing
336/// the cycle.
9fa01778 337#[cfg(parallel_compiler)]
9c376795
FG
338fn cycle_check<D: DepKind>(
339 query_map: &QueryMap<D>,
5099ac24 340 query: QueryJobId,
dfeec247 341 span: Span,
5099ac24
FG
342 stack: &mut Vec<(Span, QueryJobId)>,
343 visited: &mut FxHashSet<QueryJobId>,
344) -> Option<Option<Waiter>> {
74b04a01
XL
345 if !visited.insert(query) {
346 return if let Some(p) = stack.iter().position(|q| q.1 == query) {
94b46f34
XL
347 // We detected a query cycle, fix up the initial span and return Some
348
349 // Remove previous stack entries
0731742a 350 stack.drain(0..p);
94b46f34
XL
351 // Replace the span for the first query with the cycle cause
352 stack[0].0 = span;
353 Some(None)
354 } else {
355 None
dfeec247 356 };
94b46f34
XL
357 }
358
0731742a 359 // Query marked as visited is added it to the stack
74b04a01 360 stack.push((span, query));
94b46f34
XL
361
362 // Visit all the waiters
74b04a01
XL
363 let r = visit_waiters(query_map, query, |span, successor| {
364 cycle_check(query_map, successor, span, stack, visited)
365 });
94b46f34
XL
366
367 // Remove the entry in our stack if we didn't find a cycle
368 if r.is_none() {
369 stack.pop();
370 }
371
372 r
373}
374
375/// Finds out if there's a path to the compiler root (aka. code which isn't in a query)
376/// from `query` without going through any of the queries in `visited`.
377/// This is achieved with a depth first search.
9fa01778 378#[cfg(parallel_compiler)]
9c376795
FG
379fn connected_to_root<D: DepKind>(
380 query_map: &QueryMap<D>,
5099ac24
FG
381 query: QueryJobId,
382 visited: &mut FxHashSet<QueryJobId>,
383) -> bool {
0bf4aa26 384 // We already visited this or we're deliberately ignoring it
74b04a01 385 if !visited.insert(query) {
0bf4aa26
XL
386 return false;
387 }
388
0731742a 389 // This query is connected to the root (it has no query parent), return true
74b04a01 390 if query.parent(query_map).is_none() {
0731742a
XL
391 return true;
392 }
94b46f34 393
74b04a01
XL
394 visit_waiters(query_map, query, |_, successor| {
395 connected_to_root(query_map, successor, visited).then_some(None)
396 })
397 .is_some()
94b46f34
XL
398}
399
0731742a 400// Deterministically pick an query from a list
9fa01778 401#[cfg(parallel_compiler)]
9c376795 402fn pick_query<'a, T, F, D>(query_map: &QueryMap<D>, queries: &'a [T], f: F) -> &'a T
ba9703b0 403where
5099ac24 404 F: Fn(&T) -> (Span, QueryJobId),
9c376795 405 D: DepKind,
ba9703b0 406{
0731742a
XL
407 // Deterministically pick an entry point
408 // FIXME: Sort this instead
dfeec247
XL
409 queries
410 .iter()
411 .min_by_key(|v| {
412 let (span, query) = f(v);
6a06907d 413 let hash = query.query(query_map).hash;
dfeec247
XL
414 // Prefer entry points which have valid spans for nicer error messages
415 // We add an integer to the tuple ensuring that entry points
416 // with valid spans are picked first
417 let span_cmp = if span == DUMMY_SP { 1 } else { 0 };
6a06907d 418 (span_cmp, hash)
dfeec247
XL
419 })
420 .unwrap()
0731742a
XL
421}
422
94b46f34
XL
423/// Looks for query cycles starting from the last query in `jobs`.
424/// If a cycle is found, all queries in the cycle is removed from `jobs` and
425/// the function return true.
426/// If a cycle was not found, the starting query is removed from `jobs` and
427/// the function returns false.
9fa01778 428#[cfg(parallel_compiler)]
9c376795
FG
429fn remove_cycle<D: DepKind>(
430 query_map: &QueryMap<D>,
5099ac24 431 jobs: &mut Vec<QueryJobId>,
9c376795 432 wakelist: &mut Vec<Lrc<QueryWaiter<D>>>,
94b46f34 433) -> bool {
b7449926 434 let mut visited = FxHashSet::default();
94b46f34
XL
435 let mut stack = Vec::new();
436 // Look for a cycle starting with the last query in `jobs`
74b04a01
XL
437 if let Some(waiter) =
438 cycle_check(query_map, jobs.pop().unwrap(), DUMMY_SP, &mut stack, &mut visited)
439 {
0731742a
XL
440 // The stack is a vector of pairs of spans and queries; reverse it so that
441 // the earlier entries require later entries
442 let (mut spans, queries): (Vec<_>, Vec<_>) = stack.into_iter().rev().unzip();
94b46f34
XL
443
444 // Shift the spans so that queries are matched with the span for their waitee
0bf4aa26 445 spans.rotate_right(1);
94b46f34
XL
446
447 // Zip them back together
cdc7bbd5 448 let mut stack: Vec<_> = iter::zip(spans, queries).collect();
94b46f34
XL
449
450 // Remove the queries in our cycle from the list of jobs to look at
451 for r in &stack {
f035d41b
XL
452 if let Some(pos) = jobs.iter().position(|j| j == &r.1) {
453 jobs.remove(pos);
454 }
94b46f34
XL
455 }
456
457 // Find the queries in the cycle which are
458 // connected to queries outside the cycle
dfeec247
XL
459 let entry_points = stack
460 .iter()
74b04a01
XL
461 .filter_map(|&(span, query)| {
462 if query.parent(query_map).is_none() {
dfeec247 463 // This query is connected to the root (it has no query parent)
74b04a01 464 Some((span, query, None))
0731742a 465 } else {
dfeec247
XL
466 let mut waiters = Vec::new();
467 // Find all the direct waiters who lead to the root
74b04a01 468 visit_waiters(query_map, query, |span, waiter| {
dfeec247 469 // Mark all the other queries in the cycle as already visited
74b04a01 470 let mut visited = FxHashSet::from_iter(stack.iter().map(|q| q.1));
dfeec247 471
74b04a01 472 if connected_to_root(query_map, waiter, &mut visited) {
dfeec247
XL
473 waiters.push((span, waiter));
474 }
475
476 None
477 });
478 if waiters.is_empty() {
479 None
480 } else {
481 // Deterministically pick one of the waiters to show to the user
6a06907d 482 let waiter = *pick_query(query_map, &waiters, |s| *s);
74b04a01 483 Some((span, query, Some(waiter)))
dfeec247 484 }
94b46f34 485 }
dfeec247 486 })
5099ac24 487 .collect::<Vec<(Span, QueryJobId, Option<(Span, QueryJobId)>)>>();
94b46f34
XL
488
489 // Deterministically pick an entry point
6a06907d 490 let (_, entry_point, usage) = pick_query(query_map, &entry_points, |e| (e.0, e.1));
94b46f34 491
0bf4aa26 492 // Shift the stack so that our entry point is first
74b04a01 493 let entry_point_pos = stack.iter().position(|(_, query)| query == entry_point);
0bf4aa26 494 if let Some(pos) = entry_point_pos {
0731742a 495 stack.rotate_left(pos);
94b46f34
XL
496 }
497
74b04a01 498 let usage = usage.as_ref().map(|(span, query)| (*span, query.query(query_map)));
0731742a 499
94b46f34 500 // Create the cycle error
416331ca 501 let error = CycleError {
0731742a 502 usage,
dfeec247
XL
503 cycle: stack
504 .iter()
74b04a01 505 .map(|&(s, ref q)| QueryInfo { span: s, query: q.query(query_map) })
dfeec247 506 .collect(),
94b46f34
XL
507 };
508
509 // We unwrap `waiter` here since there must always be one
fc512014 510 // edge which is resumable / waited using a query latch
94b46f34
XL
511 let (waitee_query, waiter_idx) = waiter.unwrap();
512
513 // Extract the waiter we want to resume
74b04a01 514 let waiter = waitee_query.latch(query_map).unwrap().extract_waiter(waiter_idx);
94b46f34
XL
515
516 // Set the cycle error so it will be picked up when resumed
517 *waiter.cycle.lock() = Some(error);
518
519 // Put the waiter on the list of things to resume
520 wakelist.push(waiter);
521
522 true
523 } else {
524 false
525 }
526}
527
94b46f34
XL
528/// Detects query cycles by using depth first search over all active query jobs.
529/// If a query cycle is found it will break the cycle by finding an edge which
530/// uses a query latch and then resuming that waiter.
531/// There may be multiple cycles involved in a deadlock, so this searches
532/// all active queries for cycles before finally resuming all the waiters at once.
9fa01778 533#[cfg(parallel_compiler)]
9c376795 534pub fn deadlock<D: DepKind>(query_map: QueryMap<D>, registry: &rayon_core::Registry) {
94b46f34
XL
535 let on_panic = OnDrop(|| {
536 eprintln!("deadlock handler panicked, aborting process");
537 process::abort();
538 });
539
540 let mut wakelist = Vec::new();
5099ac24 541 let mut jobs: Vec<QueryJobId> = query_map.keys().cloned().collect();
94b46f34
XL
542
543 let mut found_cycle = false;
544
545 while jobs.len() > 0 {
6a06907d 546 if remove_cycle(&query_map, &mut jobs, &mut wakelist) {
94b46f34
XL
547 found_cycle = true;
548 }
549 }
550
551 // Check that a cycle was found. It is possible for a deadlock to occur without
552 // a query cycle if a query which can be waited on uses Rayon to do multithreading
553 // internally. Such a query (X) may be executing on 2 threads (A and B) and A may
554 // wait using Rayon on B. Rayon may then switch to executing another query (Y)
555 // which in turn will wait on X causing a deadlock. We have a false dependency from
556 // X to Y due to Rayon waiting and a true dependency from Y to X. The algorithm here
557 // only considers the true dependency and won't detect a cycle.
558 assert!(found_cycle);
559
560 // FIXME: Ensure this won't cause a deadlock before we return
561 for waiter in wakelist.into_iter() {
562 waiter.notify(registry);
563 }
564
565 on_panic.disable();
566}
6a06907d
XL
567
568#[inline(never)]
569#[cold]
9c376795 570pub(crate) fn report_cycle<'a, D: DepKind>(
6a06907d 571 sess: &'a Session,
9c376795 572 CycleError { usage, cycle: stack }: &CycleError<D>,
5e7ed085 573) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
6a06907d
XL
574 assert!(!stack.is_empty());
575
064997fb 576 let span = stack[0].query.default_span(stack[1 % stack.len()].span);
f2b60f7d
FG
577
578 let mut cycle_stack = Vec::new();
579
580 use crate::error::StackCount;
581 let stack_count = if stack.len() == 1 { StackCount::Single } else { StackCount::Multiple };
6a06907d
XL
582
583 for i in 1..stack.len() {
584 let query = &stack[i].query;
064997fb 585 let span = query.default_span(stack[(i + 1) % stack.len()].span);
f2b60f7d 586 cycle_stack.push(CycleStack { span, desc: query.description.to_owned() });
94222f64 587 }
6a06907d 588
f2b60f7d 589 let mut cycle_usage = None;
2b03887a 590 if let Some((span, ref query)) = *usage {
f2b60f7d
FG
591 cycle_usage = Some(crate::error::CycleUsage {
592 span: query.default_span(span),
2b03887a 593 usage: query.description.to_string(),
f2b60f7d 594 });
6a06907d
XL
595 }
596
f2b60f7d
FG
597 let alias = if stack.iter().all(|entry| entry.query.def_kind == Some(DefKind::TyAlias)) {
598 Some(crate::error::Alias::Ty)
599 } else if stack.iter().all(|entry| entry.query.def_kind == Some(DefKind::TraitAlias)) {
600 Some(crate::error::Alias::Trait)
601 } else {
602 None
603 };
604
605 let cycle_diag = crate::error::Cycle {
606 span,
607 cycle_stack,
608 stack_bottom: stack[0].query.description.to_owned(),
609 alias,
610 cycle_usage: cycle_usage,
611 stack_count,
612 };
613
614 cycle_diag.into_diagnostic(&sess.parse_sess.span_diagnostic)
6a06907d
XL
615}
616
487cf647
FG
617pub fn print_query_stack<Qcx: QueryContext>(
618 qcx: Qcx,
5099ac24 619 mut current_query: Option<QueryJobId>,
6a06907d
XL
620 handler: &Handler,
621 num_frames: Option<usize>,
622) -> usize {
623 // Be careful relying on global state here: this code is called from
624 // a panic hook, which means that the global `Handler` may be in a weird
625 // state if it was responsible for triggering the panic.
626 let mut i = 0;
487cf647 627 let query_map = qcx.try_collect_active_jobs();
6a06907d
XL
628
629 while let Some(query) = current_query {
630 if Some(i) == num_frames {
631 break;
632 }
3c0e092e 633 let Some(query_info) = query_map.as_ref().and_then(|map| map.get(&query)) else {
6a06907d
XL
634 break;
635 };
636 let mut diag = Diagnostic::new(
637 Level::FailureNote,
9c376795 638 &format!("#{} [{:?}] {}", i, query_info.query.dep_kind, query_info.query.description),
6a06907d 639 );
064997fb 640 diag.span = query_info.job.span.into();
6a06907d
XL
641 handler.force_print_diagnostic(diag);
642
643 current_query = query_info.job.parent;
644 i += 1;
645 }
646
647 i
648}