]>
Commit | Line | Data |
---|---|---|
f2b60f7d | 1 | use crate::error::CycleStack; |
ba9703b0 | 2 | use crate::query::plumbing::CycleError; |
5e7ed085 | 3 | use crate::query::{QueryContext, QueryStackFrame}; |
9fa01778 | 4 | |
74b04a01 | 5 | use rustc_data_structures::fx::FxHashMap; |
2b03887a FG |
6 | use rustc_errors::{ |
7 | Diagnostic, DiagnosticBuilder, ErrorGuaranteed, Handler, IntoDiagnostic, Level, | |
8 | }; | |
f2b60f7d | 9 | use rustc_hir::def::DefKind; |
2b03887a | 10 | use rustc_session::Session; |
dfeec247 | 11 | use rustc_span::Span; |
9fa01778 | 12 | |
29967ef6 | 13 | use std::hash::Hash; |
5099ac24 | 14 | use std::num::NonZeroU64; |
b7449926 | 15 | |
9fa01778 | 16 | #[cfg(parallel_compiler)] |
94b46f34 | 17 | use { |
dfeec247 | 18 | parking_lot::{Condvar, Mutex}, |
416331ca | 19 | rustc_data_structures::fx::FxHashSet, |
416331ca | 20 | rustc_data_structures::sync::Lock, |
74b04a01 | 21 | rustc_data_structures::sync::Lrc, |
dfeec247 | 22 | rustc_data_structures::{jobserver, OnDrop}, |
416331ca | 23 | rustc_rayon_core as rayon_core, |
dfeec247 | 24 | rustc_span::DUMMY_SP, |
cdc7bbd5 | 25 | std::iter::{self, FromIterator}, |
ba9703b0 | 26 | std::{mem, process}, |
94b46f34 XL |
27 | }; |
28 | ||
9fa01778 | 29 | /// Represents a span and a query key. |
94b46f34 | 30 | #[derive(Clone, Debug)] |
6a06907d | 31 | pub struct QueryInfo { |
9fa01778 | 32 | /// The span corresponding to the reason for which this query was required. |
94b46f34 | 33 | pub span: Span, |
6a06907d | 34 | pub query: QueryStackFrame, |
94b46f34 XL |
35 | } |
36 | ||
5099ac24 | 37 | pub type QueryMap = FxHashMap<QueryJobId, QueryJobInfo>; |
74b04a01 | 38 | |
fc512014 | 39 | /// A value uniquely identifying an active query job. |
74b04a01 | 40 | #[derive(Copy, Clone, Eq, PartialEq, Hash)] |
5099ac24 | 41 | pub struct QueryJobId(pub NonZeroU64); |
74b04a01 | 42 | |
5099ac24 FG |
43 | impl QueryJobId { |
44 | fn query(self, map: &QueryMap) -> QueryStackFrame { | |
94222f64 | 45 | map.get(&self).unwrap().query.clone() |
74b04a01 XL |
46 | } |
47 | ||
48 | #[cfg(parallel_compiler)] | |
5099ac24 | 49 | fn span(self, map: &QueryMap) -> Span { |
74b04a01 XL |
50 | map.get(&self).unwrap().job.span |
51 | } | |
52 | ||
53 | #[cfg(parallel_compiler)] | |
5099ac24 | 54 | fn parent(self, map: &QueryMap) -> Option<QueryJobId> { |
74b04a01 XL |
55 | map.get(&self).unwrap().job.parent |
56 | } | |
57 | ||
58 | #[cfg(parallel_compiler)] | |
5099ac24 | 59 | fn latch<'a>(self, map: &'a QueryMap) -> Option<&'a QueryLatch> { |
74b04a01 XL |
60 | map.get(&self).unwrap().job.latch.as_ref() |
61 | } | |
62 | } | |
63 | ||
2b03887a | 64 | #[derive(Clone)] |
5099ac24 | 65 | pub struct QueryJobInfo { |
94222f64 | 66 | pub query: QueryStackFrame, |
5099ac24 | 67 | pub job: QueryJob, |
74b04a01 XL |
68 | } |
69 | ||
70 | /// Represents an active query job. | |
71 | #[derive(Clone)] | |
5099ac24 FG |
72 | pub struct QueryJob { |
73 | pub id: QueryJobId, | |
74b04a01 XL |
74 | |
75 | /// The span corresponding to the reason for which this query was required. | |
76 | pub span: Span, | |
94b46f34 XL |
77 | |
78 | /// The parent query job which created this job and is implicitly waiting on it. | |
5099ac24 | 79 | pub parent: Option<QueryJobId>, |
94b46f34 | 80 | |
9fa01778 XL |
81 | /// The latch that is used to wait on this job. |
82 | #[cfg(parallel_compiler)] | |
5099ac24 | 83 | latch: Option<QueryLatch>, |
94b46f34 XL |
84 | } |
85 | ||
5099ac24 | 86 | impl QueryJob { |
9fa01778 | 87 | /// Creates a new query job. |
064997fb | 88 | #[inline] |
5099ac24 | 89 | pub fn new(id: QueryJobId, span: Span, parent: Option<QueryJobId>) -> Self { |
94b46f34 | 90 | QueryJob { |
74b04a01 XL |
91 | id, |
92 | span, | |
94b46f34 | 93 | parent, |
9fa01778 | 94 | #[cfg(parallel_compiler)] |
74b04a01 | 95 | latch: None, |
94b46f34 XL |
96 | } |
97 | } | |
98 | ||
9fa01778 | 99 | #[cfg(parallel_compiler)] |
5099ac24 | 100 | pub(super) fn latch(&mut self) -> QueryLatch { |
74b04a01 XL |
101 | if self.latch.is_none() { |
102 | self.latch = Some(QueryLatch::new()); | |
103 | } | |
104 | self.latch.as_ref().unwrap().clone() | |
94b46f34 XL |
105 | } |
106 | ||
74b04a01 XL |
107 | /// Signals to waiters that the query is complete. |
108 | /// | |
109 | /// This does nothing for single threaded rustc, | |
110 | /// as there are no concurrent jobs which could be waiting on us | |
064997fb | 111 | #[inline] |
74b04a01 XL |
112 | pub fn signal_complete(self) { |
113 | #[cfg(parallel_compiler)] | |
f9f354fc XL |
114 | { |
115 | if let Some(latch) = self.latch { | |
116 | latch.set(); | |
117 | } | |
118 | } | |
74b04a01 XL |
119 | } |
120 | } | |
121 | ||
5099ac24 | 122 | impl QueryJobId { |
c295e0f8 XL |
123 | #[cold] |
124 | #[inline(never)] | |
2b03887a | 125 | #[cfg(not(parallel_compiler))] |
29967ef6 XL |
126 | pub(super) fn find_cycle_in_stack( |
127 | &self, | |
5099ac24 FG |
128 | query_map: QueryMap, |
129 | current_job: &Option<QueryJobId>, | |
29967ef6 | 130 | span: Span, |
6a06907d | 131 | ) -> CycleError { |
29967ef6 | 132 | // Find the waitee amongst `current_job` parents |
94b46f34 | 133 | let mut cycle = Vec::new(); |
29967ef6 | 134 | let mut current_job = Option::clone(current_job); |
94b46f34 XL |
135 | |
136 | while let Some(job) = current_job { | |
74b04a01 | 137 | let info = query_map.get(&job).unwrap(); |
94222f64 | 138 | cycle.push(QueryInfo { span: info.job.span, query: info.query.clone() }); |
94b46f34 | 139 | |
17df50a5 | 140 | if job == *self { |
0bf4aa26 XL |
141 | cycle.reverse(); |
142 | ||
94b46f34 XL |
143 | // This is the end of the cycle |
144 | // The span entry we included was for the usage | |
145 | // of the cycle itself, and not part of the cycle | |
146 | // Replace it with the span which caused the cycle to form | |
147 | cycle[0].span = span; | |
148 | // Find out why the cycle itself was used | |
74b04a01 XL |
149 | let usage = info |
150 | .job | |
151 | .parent | |
152 | .as_ref() | |
94222f64 | 153 | .map(|parent| (info.job.span, parent.query(&query_map))); |
0731742a | 154 | return CycleError { usage, cycle }; |
94b46f34 XL |
155 | } |
156 | ||
74b04a01 | 157 | current_job = info.job.parent; |
94b46f34 XL |
158 | } |
159 | ||
160 | panic!("did not find a cycle") | |
161 | } | |
2b03887a FG |
162 | |
163 | #[cold] | |
164 | #[inline(never)] | |
165 | pub fn try_find_layout_root(&self, query_map: QueryMap) -> Option<(QueryJobInfo, usize)> { | |
166 | let mut last_layout = None; | |
167 | let mut current_id = Some(*self); | |
168 | let mut depth = 0; | |
169 | ||
170 | while let Some(id) = current_id { | |
171 | let info = query_map.get(&id).unwrap(); | |
172 | if info.query.name == "layout_of" { | |
173 | depth += 1; | |
174 | last_layout = Some((info.clone(), depth)); | |
175 | } | |
176 | current_id = info.job.parent; | |
177 | } | |
178 | last_layout | |
179 | } | |
94b46f34 XL |
180 | } |
181 | ||
9fa01778 | 182 | #[cfg(parallel_compiler)] |
5099ac24 FG |
183 | struct QueryWaiter { |
184 | query: Option<QueryJobId>, | |
94b46f34 XL |
185 | condvar: Condvar, |
186 | span: Span, | |
6a06907d | 187 | cycle: Lock<Option<CycleError>>, |
94b46f34 XL |
188 | } |
189 | ||
9fa01778 | 190 | #[cfg(parallel_compiler)] |
5099ac24 | 191 | impl QueryWaiter { |
94b46f34 XL |
192 | fn notify(&self, registry: &rayon_core::Registry) { |
193 | rayon_core::mark_unblocked(registry); | |
194 | self.condvar.notify_one(); | |
195 | } | |
196 | } | |
197 | ||
9fa01778 | 198 | #[cfg(parallel_compiler)] |
5099ac24 | 199 | struct QueryLatchInfo { |
94b46f34 | 200 | complete: bool, |
5099ac24 | 201 | waiters: Vec<Lrc<QueryWaiter>>, |
94b46f34 XL |
202 | } |
203 | ||
9fa01778 | 204 | #[cfg(parallel_compiler)] |
74b04a01 | 205 | #[derive(Clone)] |
5099ac24 FG |
206 | pub(super) struct QueryLatch { |
207 | info: Lrc<Mutex<QueryLatchInfo>>, | |
94b46f34 XL |
208 | } |
209 | ||
9fa01778 | 210 | #[cfg(parallel_compiler)] |
5099ac24 | 211 | impl QueryLatch { |
94b46f34 | 212 | fn new() -> Self { |
74b04a01 XL |
213 | QueryLatch { |
214 | info: Lrc::new(Mutex::new(QueryLatchInfo { complete: false, waiters: Vec::new() })), | |
215 | } | |
216 | } | |
217 | ||
218 | /// Awaits for the query job to complete. | |
5099ac24 | 219 | pub(super) fn wait_on(&self, query: Option<QueryJobId>, span: Span) -> Result<(), CycleError> { |
ba9703b0 XL |
220 | let waiter = |
221 | Lrc::new(QueryWaiter { query, span, cycle: Lock::new(None), condvar: Condvar::new() }); | |
222 | self.wait_on_inner(&waiter); | |
223 | // FIXME: Get rid of this lock. We have ownership of the QueryWaiter | |
224 | // although another thread may still have a Lrc reference so we cannot | |
225 | // use Lrc::get_mut | |
226 | let mut cycle = waiter.cycle.lock(); | |
227 | match cycle.take() { | |
228 | None => Ok(()), | |
229 | Some(cycle) => Err(cycle), | |
230 | } | |
94b46f34 XL |
231 | } |
232 | ||
233 | /// Awaits the caller on this latch by blocking the current thread. | |
5099ac24 | 234 | fn wait_on_inner(&self, waiter: &Lrc<QueryWaiter>) { |
94b46f34 XL |
235 | let mut info = self.info.lock(); |
236 | if !info.complete { | |
237 | // We push the waiter on to the `waiters` list. It can be accessed inside | |
238 | // the `wait` call below, by 1) the `set` method or 2) by deadlock detection. | |
239 | // Both of these will remove it from the `waiters` list before resuming | |
240 | // this thread. | |
241 | info.waiters.push(waiter.clone()); | |
242 | ||
243 | // If this detects a deadlock and the deadlock handler wants to resume this thread | |
244 | // we have to be in the `wait` call. This is ensured by the deadlock handler | |
245 | // getting the self.info lock. | |
246 | rayon_core::mark_blocked(); | |
532ac7d7 | 247 | jobserver::release_thread(); |
94b46f34 | 248 | waiter.condvar.wait(&mut info); |
532ac7d7 XL |
249 | // Release the lock before we potentially block in `acquire_thread` |
250 | mem::drop(info); | |
251 | jobserver::acquire_thread(); | |
94b46f34 XL |
252 | } |
253 | } | |
254 | ||
255 | /// Sets the latch and resumes all waiters on it | |
256 | fn set(&self) { | |
257 | let mut info = self.info.lock(); | |
258 | debug_assert!(!info.complete); | |
259 | info.complete = true; | |
260 | let registry = rayon_core::Registry::current(); | |
261 | for waiter in info.waiters.drain(..) { | |
262 | waiter.notify(®istry); | |
263 | } | |
264 | } | |
265 | ||
9fa01778 | 266 | /// Removes a single waiter from the list of waiters. |
94b46f34 | 267 | /// This is used to break query cycles. |
5099ac24 | 268 | fn extract_waiter(&self, waiter: usize) -> Lrc<QueryWaiter> { |
94b46f34 XL |
269 | let mut info = self.info.lock(); |
270 | debug_assert!(!info.complete); | |
271 | // Remove the waiter from the list of waiters | |
272 | info.waiters.remove(waiter) | |
273 | } | |
274 | } | |
275 | ||
276 | /// A resumable waiter of a query. The usize is the index into waiters in the query's latch | |
9fa01778 | 277 | #[cfg(parallel_compiler)] |
5099ac24 | 278 | type Waiter = (QueryJobId, usize); |
94b46f34 XL |
279 | |
280 | /// Visits all the non-resumable and resumable waiters of a query. | |
281 | /// Only waiters in a query are visited. | |
282 | /// `visit` is called for every waiter and is passed a query waiting on `query_ref` | |
283 | /// and a span indicating the reason the query waited on `query_ref`. | |
284 | /// If `visit` returns Some, this function returns. | |
285 | /// For visits of non-resumable waiters it returns the return value of `visit`. | |
286 | /// For visits of resumable waiters it returns Some(Some(Waiter)) which has the | |
287 | /// required information to resume the waiter. | |
288 | /// If all `visit` calls returns None, this function also returns None. | |
9fa01778 | 289 | #[cfg(parallel_compiler)] |
5099ac24 | 290 | fn visit_waiters<F>(query_map: &QueryMap, query: QueryJobId, mut visit: F) -> Option<Option<Waiter>> |
94b46f34 | 291 | where |
5099ac24 | 292 | F: FnMut(Span, QueryJobId) -> Option<Option<Waiter>>, |
94b46f34 XL |
293 | { |
294 | // Visit the parent query which is a non-resumable waiter since it's on the same stack | |
74b04a01 XL |
295 | if let Some(parent) = query.parent(query_map) { |
296 | if let Some(cycle) = visit(query.span(query_map), parent) { | |
94b46f34 XL |
297 | return Some(cycle); |
298 | } | |
299 | } | |
300 | ||
b7449926 | 301 | // Visit the explicit waiters which use condvars and are resumable |
74b04a01 XL |
302 | if let Some(latch) = query.latch(query_map) { |
303 | for (i, waiter) in latch.info.lock().waiters.iter().enumerate() { | |
304 | if let Some(waiter_query) = waiter.query { | |
305 | if visit(waiter.span, waiter_query).is_some() { | |
306 | // Return a value which indicates that this waiter can be resumed | |
307 | return Some(Some((query, i))); | |
308 | } | |
94b46f34 XL |
309 | } |
310 | } | |
311 | } | |
74b04a01 | 312 | |
94b46f34 XL |
313 | None |
314 | } | |
315 | ||
316 | /// Look for query cycles by doing a depth first search starting at `query`. | |
317 | /// `span` is the reason for the `query` to execute. This is initially DUMMY_SP. | |
318 | /// If a cycle is detected, this initial value is replaced with the span causing | |
319 | /// the cycle. | |
9fa01778 | 320 | #[cfg(parallel_compiler)] |
5099ac24 FG |
321 | fn cycle_check( |
322 | query_map: &QueryMap, | |
323 | query: QueryJobId, | |
dfeec247 | 324 | span: Span, |
5099ac24 FG |
325 | stack: &mut Vec<(Span, QueryJobId)>, |
326 | visited: &mut FxHashSet<QueryJobId>, | |
327 | ) -> Option<Option<Waiter>> { | |
74b04a01 XL |
328 | if !visited.insert(query) { |
329 | return if let Some(p) = stack.iter().position(|q| q.1 == query) { | |
94b46f34 XL |
330 | // We detected a query cycle, fix up the initial span and return Some |
331 | ||
332 | // Remove previous stack entries | |
0731742a | 333 | stack.drain(0..p); |
94b46f34 XL |
334 | // Replace the span for the first query with the cycle cause |
335 | stack[0].0 = span; | |
336 | Some(None) | |
337 | } else { | |
338 | None | |
dfeec247 | 339 | }; |
94b46f34 XL |
340 | } |
341 | ||
0731742a | 342 | // Query marked as visited is added it to the stack |
74b04a01 | 343 | stack.push((span, query)); |
94b46f34 XL |
344 | |
345 | // Visit all the waiters | |
74b04a01 XL |
346 | let r = visit_waiters(query_map, query, |span, successor| { |
347 | cycle_check(query_map, successor, span, stack, visited) | |
348 | }); | |
94b46f34 XL |
349 | |
350 | // Remove the entry in our stack if we didn't find a cycle | |
351 | if r.is_none() { | |
352 | stack.pop(); | |
353 | } | |
354 | ||
355 | r | |
356 | } | |
357 | ||
358 | /// Finds out if there's a path to the compiler root (aka. code which isn't in a query) | |
359 | /// from `query` without going through any of the queries in `visited`. | |
360 | /// This is achieved with a depth first search. | |
9fa01778 | 361 | #[cfg(parallel_compiler)] |
5099ac24 FG |
362 | fn connected_to_root( |
363 | query_map: &QueryMap, | |
364 | query: QueryJobId, | |
365 | visited: &mut FxHashSet<QueryJobId>, | |
366 | ) -> bool { | |
0bf4aa26 | 367 | // We already visited this or we're deliberately ignoring it |
74b04a01 | 368 | if !visited.insert(query) { |
0bf4aa26 XL |
369 | return false; |
370 | } | |
371 | ||
0731742a | 372 | // This query is connected to the root (it has no query parent), return true |
74b04a01 | 373 | if query.parent(query_map).is_none() { |
0731742a XL |
374 | return true; |
375 | } | |
94b46f34 | 376 | |
74b04a01 XL |
377 | visit_waiters(query_map, query, |_, successor| { |
378 | connected_to_root(query_map, successor, visited).then_some(None) | |
379 | }) | |
380 | .is_some() | |
94b46f34 XL |
381 | } |
382 | ||
0731742a | 383 | // Deterministically pick an query from a list |
9fa01778 | 384 | #[cfg(parallel_compiler)] |
5099ac24 | 385 | fn pick_query<'a, T, F>(query_map: &QueryMap, queries: &'a [T], f: F) -> &'a T |
ba9703b0 | 386 | where |
5099ac24 | 387 | F: Fn(&T) -> (Span, QueryJobId), |
ba9703b0 | 388 | { |
0731742a XL |
389 | // Deterministically pick an entry point |
390 | // FIXME: Sort this instead | |
dfeec247 XL |
391 | queries |
392 | .iter() | |
393 | .min_by_key(|v| { | |
394 | let (span, query) = f(v); | |
6a06907d | 395 | let hash = query.query(query_map).hash; |
dfeec247 XL |
396 | // Prefer entry points which have valid spans for nicer error messages |
397 | // We add an integer to the tuple ensuring that entry points | |
398 | // with valid spans are picked first | |
399 | let span_cmp = if span == DUMMY_SP { 1 } else { 0 }; | |
6a06907d | 400 | (span_cmp, hash) |
dfeec247 XL |
401 | }) |
402 | .unwrap() | |
0731742a XL |
403 | } |
404 | ||
94b46f34 XL |
405 | /// Looks for query cycles starting from the last query in `jobs`. |
406 | /// If a cycle is found, all queries in the cycle is removed from `jobs` and | |
407 | /// the function return true. | |
408 | /// If a cycle was not found, the starting query is removed from `jobs` and | |
409 | /// the function returns false. | |
9fa01778 | 410 | #[cfg(parallel_compiler)] |
5099ac24 FG |
411 | fn remove_cycle( |
412 | query_map: &QueryMap, | |
413 | jobs: &mut Vec<QueryJobId>, | |
414 | wakelist: &mut Vec<Lrc<QueryWaiter>>, | |
94b46f34 | 415 | ) -> bool { |
b7449926 | 416 | let mut visited = FxHashSet::default(); |
94b46f34 XL |
417 | let mut stack = Vec::new(); |
418 | // Look for a cycle starting with the last query in `jobs` | |
74b04a01 XL |
419 | if let Some(waiter) = |
420 | cycle_check(query_map, jobs.pop().unwrap(), DUMMY_SP, &mut stack, &mut visited) | |
421 | { | |
0731742a XL |
422 | // The stack is a vector of pairs of spans and queries; reverse it so that |
423 | // the earlier entries require later entries | |
424 | let (mut spans, queries): (Vec<_>, Vec<_>) = stack.into_iter().rev().unzip(); | |
94b46f34 XL |
425 | |
426 | // Shift the spans so that queries are matched with the span for their waitee | |
0bf4aa26 | 427 | spans.rotate_right(1); |
94b46f34 XL |
428 | |
429 | // Zip them back together | |
cdc7bbd5 | 430 | let mut stack: Vec<_> = iter::zip(spans, queries).collect(); |
94b46f34 XL |
431 | |
432 | // Remove the queries in our cycle from the list of jobs to look at | |
433 | for r in &stack { | |
f035d41b XL |
434 | if let Some(pos) = jobs.iter().position(|j| j == &r.1) { |
435 | jobs.remove(pos); | |
436 | } | |
94b46f34 XL |
437 | } |
438 | ||
439 | // Find the queries in the cycle which are | |
440 | // connected to queries outside the cycle | |
dfeec247 XL |
441 | let entry_points = stack |
442 | .iter() | |
74b04a01 XL |
443 | .filter_map(|&(span, query)| { |
444 | if query.parent(query_map).is_none() { | |
dfeec247 | 445 | // This query is connected to the root (it has no query parent) |
74b04a01 | 446 | Some((span, query, None)) |
0731742a | 447 | } else { |
dfeec247 XL |
448 | let mut waiters = Vec::new(); |
449 | // Find all the direct waiters who lead to the root | |
74b04a01 | 450 | visit_waiters(query_map, query, |span, waiter| { |
dfeec247 | 451 | // Mark all the other queries in the cycle as already visited |
74b04a01 | 452 | let mut visited = FxHashSet::from_iter(stack.iter().map(|q| q.1)); |
dfeec247 | 453 | |
74b04a01 | 454 | if connected_to_root(query_map, waiter, &mut visited) { |
dfeec247 XL |
455 | waiters.push((span, waiter)); |
456 | } | |
457 | ||
458 | None | |
459 | }); | |
460 | if waiters.is_empty() { | |
461 | None | |
462 | } else { | |
463 | // Deterministically pick one of the waiters to show to the user | |
6a06907d | 464 | let waiter = *pick_query(query_map, &waiters, |s| *s); |
74b04a01 | 465 | Some((span, query, Some(waiter))) |
dfeec247 | 466 | } |
94b46f34 | 467 | } |
dfeec247 | 468 | }) |
5099ac24 | 469 | .collect::<Vec<(Span, QueryJobId, Option<(Span, QueryJobId)>)>>(); |
94b46f34 XL |
470 | |
471 | // Deterministically pick an entry point | |
6a06907d | 472 | let (_, entry_point, usage) = pick_query(query_map, &entry_points, |e| (e.0, e.1)); |
94b46f34 | 473 | |
0bf4aa26 | 474 | // Shift the stack so that our entry point is first |
74b04a01 | 475 | let entry_point_pos = stack.iter().position(|(_, query)| query == entry_point); |
0bf4aa26 | 476 | if let Some(pos) = entry_point_pos { |
0731742a | 477 | stack.rotate_left(pos); |
94b46f34 XL |
478 | } |
479 | ||
74b04a01 | 480 | let usage = usage.as_ref().map(|(span, query)| (*span, query.query(query_map))); |
0731742a | 481 | |
94b46f34 | 482 | // Create the cycle error |
416331ca | 483 | let error = CycleError { |
0731742a | 484 | usage, |
dfeec247 XL |
485 | cycle: stack |
486 | .iter() | |
74b04a01 | 487 | .map(|&(s, ref q)| QueryInfo { span: s, query: q.query(query_map) }) |
dfeec247 | 488 | .collect(), |
94b46f34 XL |
489 | }; |
490 | ||
491 | // We unwrap `waiter` here since there must always be one | |
fc512014 | 492 | // edge which is resumable / waited using a query latch |
94b46f34 XL |
493 | let (waitee_query, waiter_idx) = waiter.unwrap(); |
494 | ||
495 | // Extract the waiter we want to resume | |
74b04a01 | 496 | let waiter = waitee_query.latch(query_map).unwrap().extract_waiter(waiter_idx); |
94b46f34 XL |
497 | |
498 | // Set the cycle error so it will be picked up when resumed | |
499 | *waiter.cycle.lock() = Some(error); | |
500 | ||
501 | // Put the waiter on the list of things to resume | |
502 | wakelist.push(waiter); | |
503 | ||
504 | true | |
505 | } else { | |
506 | false | |
507 | } | |
508 | } | |
509 | ||
94b46f34 XL |
510 | /// Detects query cycles by using depth first search over all active query jobs. |
511 | /// If a query cycle is found it will break the cycle by finding an edge which | |
512 | /// uses a query latch and then resuming that waiter. | |
513 | /// There may be multiple cycles involved in a deadlock, so this searches | |
514 | /// all active queries for cycles before finally resuming all the waiters at once. | |
9fa01778 | 515 | #[cfg(parallel_compiler)] |
064997fb | 516 | pub fn deadlock(query_map: QueryMap, registry: &rayon_core::Registry) { |
94b46f34 XL |
517 | let on_panic = OnDrop(|| { |
518 | eprintln!("deadlock handler panicked, aborting process"); | |
519 | process::abort(); | |
520 | }); | |
521 | ||
522 | let mut wakelist = Vec::new(); | |
5099ac24 | 523 | let mut jobs: Vec<QueryJobId> = query_map.keys().cloned().collect(); |
94b46f34 XL |
524 | |
525 | let mut found_cycle = false; | |
526 | ||
527 | while jobs.len() > 0 { | |
6a06907d | 528 | if remove_cycle(&query_map, &mut jobs, &mut wakelist) { |
94b46f34 XL |
529 | found_cycle = true; |
530 | } | |
531 | } | |
532 | ||
533 | // Check that a cycle was found. It is possible for a deadlock to occur without | |
534 | // a query cycle if a query which can be waited on uses Rayon to do multithreading | |
535 | // internally. Such a query (X) may be executing on 2 threads (A and B) and A may | |
536 | // wait using Rayon on B. Rayon may then switch to executing another query (Y) | |
537 | // which in turn will wait on X causing a deadlock. We have a false dependency from | |
538 | // X to Y due to Rayon waiting and a true dependency from Y to X. The algorithm here | |
539 | // only considers the true dependency and won't detect a cycle. | |
540 | assert!(found_cycle); | |
541 | ||
542 | // FIXME: Ensure this won't cause a deadlock before we return | |
543 | for waiter in wakelist.into_iter() { | |
544 | waiter.notify(registry); | |
545 | } | |
546 | ||
547 | on_panic.disable(); | |
548 | } | |
6a06907d XL |
549 | |
550 | #[inline(never)] | |
551 | #[cold] | |
552 | pub(crate) fn report_cycle<'a>( | |
553 | sess: &'a Session, | |
2b03887a | 554 | CycleError { usage, cycle: stack }: &CycleError, |
5e7ed085 | 555 | ) -> DiagnosticBuilder<'a, ErrorGuaranteed> { |
6a06907d XL |
556 | assert!(!stack.is_empty()); |
557 | ||
064997fb | 558 | let span = stack[0].query.default_span(stack[1 % stack.len()].span); |
f2b60f7d FG |
559 | |
560 | let mut cycle_stack = Vec::new(); | |
561 | ||
562 | use crate::error::StackCount; | |
563 | let stack_count = if stack.len() == 1 { StackCount::Single } else { StackCount::Multiple }; | |
6a06907d XL |
564 | |
565 | for i in 1..stack.len() { | |
566 | let query = &stack[i].query; | |
064997fb | 567 | let span = query.default_span(stack[(i + 1) % stack.len()].span); |
f2b60f7d | 568 | cycle_stack.push(CycleStack { span, desc: query.description.to_owned() }); |
94222f64 | 569 | } |
6a06907d | 570 | |
f2b60f7d | 571 | let mut cycle_usage = None; |
2b03887a | 572 | if let Some((span, ref query)) = *usage { |
f2b60f7d FG |
573 | cycle_usage = Some(crate::error::CycleUsage { |
574 | span: query.default_span(span), | |
2b03887a | 575 | usage: query.description.to_string(), |
f2b60f7d | 576 | }); |
6a06907d XL |
577 | } |
578 | ||
f2b60f7d FG |
579 | let alias = if stack.iter().all(|entry| entry.query.def_kind == Some(DefKind::TyAlias)) { |
580 | Some(crate::error::Alias::Ty) | |
581 | } else if stack.iter().all(|entry| entry.query.def_kind == Some(DefKind::TraitAlias)) { | |
582 | Some(crate::error::Alias::Trait) | |
583 | } else { | |
584 | None | |
585 | }; | |
586 | ||
587 | let cycle_diag = crate::error::Cycle { | |
588 | span, | |
589 | cycle_stack, | |
590 | stack_bottom: stack[0].query.description.to_owned(), | |
591 | alias, | |
592 | cycle_usage: cycle_usage, | |
593 | stack_count, | |
594 | }; | |
595 | ||
596 | cycle_diag.into_diagnostic(&sess.parse_sess.span_diagnostic) | |
6a06907d XL |
597 | } |
598 | ||
487cf647 FG |
599 | pub fn print_query_stack<Qcx: QueryContext>( |
600 | qcx: Qcx, | |
5099ac24 | 601 | mut current_query: Option<QueryJobId>, |
6a06907d XL |
602 | handler: &Handler, |
603 | num_frames: Option<usize>, | |
604 | ) -> usize { | |
605 | // Be careful relying on global state here: this code is called from | |
606 | // a panic hook, which means that the global `Handler` may be in a weird | |
607 | // state if it was responsible for triggering the panic. | |
608 | let mut i = 0; | |
487cf647 | 609 | let query_map = qcx.try_collect_active_jobs(); |
6a06907d XL |
610 | |
611 | while let Some(query) = current_query { | |
612 | if Some(i) == num_frames { | |
613 | break; | |
614 | } | |
3c0e092e | 615 | let Some(query_info) = query_map.as_ref().and_then(|map| map.get(&query)) else { |
6a06907d XL |
616 | break; |
617 | }; | |
618 | let mut diag = Diagnostic::new( | |
619 | Level::FailureNote, | |
94222f64 | 620 | &format!("#{} [{}] {}", i, query_info.query.name, query_info.query.description), |
6a06907d | 621 | ); |
064997fb | 622 | diag.span = query_info.job.span.into(); |
6a06907d XL |
623 | handler.force_print_diagnostic(diag); |
624 | ||
625 | current_query = query_info.job.parent; | |
626 | i += 1; | |
627 | } | |
628 | ||
629 | i | |
630 | } |