]>
Commit | Line | Data |
---|---|---|
ba9703b0 | 1 | use crate::query::plumbing::CycleError; |
5e7ed085 FG |
2 | use crate::query::{QueryContext, QueryStackFrame}; |
3 | use rustc_hir::def::DefKind; | |
9fa01778 | 4 | |
74b04a01 | 5 | use rustc_data_structures::fx::FxHashMap; |
5e7ed085 FG |
6 | use rustc_errors::{ |
7 | struct_span_err, Diagnostic, DiagnosticBuilder, ErrorGuaranteed, Handler, Level, | |
8 | }; | |
6a06907d | 9 | use rustc_session::Session; |
dfeec247 | 10 | use rustc_span::Span; |
9fa01778 | 11 | |
29967ef6 | 12 | use std::hash::Hash; |
5099ac24 | 13 | use std::num::NonZeroU64; |
b7449926 | 14 | |
9fa01778 | 15 | #[cfg(parallel_compiler)] |
94b46f34 | 16 | use { |
dfeec247 | 17 | parking_lot::{Condvar, Mutex}, |
416331ca | 18 | rustc_data_structures::fx::FxHashSet, |
416331ca | 19 | rustc_data_structures::sync::Lock, |
74b04a01 | 20 | rustc_data_structures::sync::Lrc, |
dfeec247 | 21 | rustc_data_structures::{jobserver, OnDrop}, |
416331ca | 22 | rustc_rayon_core as rayon_core, |
dfeec247 | 23 | rustc_span::DUMMY_SP, |
cdc7bbd5 | 24 | std::iter::{self, FromIterator}, |
ba9703b0 | 25 | std::{mem, process}, |
94b46f34 XL |
26 | }; |
27 | ||
9fa01778 | 28 | /// Represents a span and a query key. |
94b46f34 | 29 | #[derive(Clone, Debug)] |
6a06907d | 30 | pub struct QueryInfo { |
9fa01778 | 31 | /// The span corresponding to the reason for which this query was required. |
94b46f34 | 32 | pub span: Span, |
6a06907d | 33 | pub query: QueryStackFrame, |
94b46f34 XL |
34 | } |
35 | ||
5099ac24 | 36 | pub type QueryMap = FxHashMap<QueryJobId, QueryJobInfo>; |
74b04a01 | 37 | |
fc512014 | 38 | /// A value uniquely identifying an active query job. |
74b04a01 | 39 | #[derive(Copy, Clone, Eq, PartialEq, Hash)] |
5099ac24 | 40 | pub struct QueryJobId(pub NonZeroU64); |
74b04a01 | 41 | |
5099ac24 FG |
42 | impl QueryJobId { |
43 | fn query(self, map: &QueryMap) -> QueryStackFrame { | |
94222f64 | 44 | map.get(&self).unwrap().query.clone() |
74b04a01 XL |
45 | } |
46 | ||
47 | #[cfg(parallel_compiler)] | |
5099ac24 | 48 | fn span(self, map: &QueryMap) -> Span { |
74b04a01 XL |
49 | map.get(&self).unwrap().job.span |
50 | } | |
51 | ||
52 | #[cfg(parallel_compiler)] | |
5099ac24 | 53 | fn parent(self, map: &QueryMap) -> Option<QueryJobId> { |
74b04a01 XL |
54 | map.get(&self).unwrap().job.parent |
55 | } | |
56 | ||
57 | #[cfg(parallel_compiler)] | |
5099ac24 | 58 | fn latch<'a>(self, map: &'a QueryMap) -> Option<&'a QueryLatch> { |
74b04a01 XL |
59 | map.get(&self).unwrap().job.latch.as_ref() |
60 | } | |
61 | } | |
62 | ||
5099ac24 | 63 | pub struct QueryJobInfo { |
94222f64 | 64 | pub query: QueryStackFrame, |
5099ac24 | 65 | pub job: QueryJob, |
74b04a01 XL |
66 | } |
67 | ||
68 | /// Represents an active query job. | |
69 | #[derive(Clone)] | |
5099ac24 FG |
70 | pub struct QueryJob { |
71 | pub id: QueryJobId, | |
74b04a01 XL |
72 | |
73 | /// The span corresponding to the reason for which this query was required. | |
74 | pub span: Span, | |
94b46f34 XL |
75 | |
76 | /// The parent query job which created this job and is implicitly waiting on it. | |
5099ac24 | 77 | pub parent: Option<QueryJobId>, |
94b46f34 | 78 | |
9fa01778 XL |
79 | /// The latch that is used to wait on this job. |
80 | #[cfg(parallel_compiler)] | |
5099ac24 | 81 | latch: Option<QueryLatch>, |
94b46f34 XL |
82 | } |
83 | ||
5099ac24 | 84 | impl QueryJob { |
9fa01778 | 85 | /// Creates a new query job. |
064997fb | 86 | #[inline] |
5099ac24 | 87 | pub fn new(id: QueryJobId, span: Span, parent: Option<QueryJobId>) -> Self { |
94b46f34 | 88 | QueryJob { |
74b04a01 XL |
89 | id, |
90 | span, | |
94b46f34 | 91 | parent, |
9fa01778 | 92 | #[cfg(parallel_compiler)] |
74b04a01 | 93 | latch: None, |
94b46f34 XL |
94 | } |
95 | } | |
96 | ||
9fa01778 | 97 | #[cfg(parallel_compiler)] |
5099ac24 | 98 | pub(super) fn latch(&mut self) -> QueryLatch { |
74b04a01 XL |
99 | if self.latch.is_none() { |
100 | self.latch = Some(QueryLatch::new()); | |
101 | } | |
102 | self.latch.as_ref().unwrap().clone() | |
94b46f34 XL |
103 | } |
104 | ||
74b04a01 XL |
105 | /// Signals to waiters that the query is complete. |
106 | /// | |
107 | /// This does nothing for single threaded rustc, | |
108 | /// as there are no concurrent jobs which could be waiting on us | |
064997fb | 109 | #[inline] |
74b04a01 XL |
110 | pub fn signal_complete(self) { |
111 | #[cfg(parallel_compiler)] | |
f9f354fc XL |
112 | { |
113 | if let Some(latch) = self.latch { | |
114 | latch.set(); | |
115 | } | |
116 | } | |
74b04a01 XL |
117 | } |
118 | } | |
119 | ||
120 | #[cfg(not(parallel_compiler))] | |
5099ac24 | 121 | impl QueryJobId { |
c295e0f8 XL |
122 | #[cold] |
123 | #[inline(never)] | |
29967ef6 XL |
124 | pub(super) fn find_cycle_in_stack( |
125 | &self, | |
5099ac24 FG |
126 | query_map: QueryMap, |
127 | current_job: &Option<QueryJobId>, | |
29967ef6 | 128 | span: Span, |
6a06907d | 129 | ) -> CycleError { |
29967ef6 | 130 | // Find the waitee amongst `current_job` parents |
94b46f34 | 131 | let mut cycle = Vec::new(); |
29967ef6 | 132 | let mut current_job = Option::clone(current_job); |
94b46f34 XL |
133 | |
134 | while let Some(job) = current_job { | |
74b04a01 | 135 | let info = query_map.get(&job).unwrap(); |
94222f64 | 136 | cycle.push(QueryInfo { span: info.job.span, query: info.query.clone() }); |
94b46f34 | 137 | |
17df50a5 | 138 | if job == *self { |
0bf4aa26 XL |
139 | cycle.reverse(); |
140 | ||
94b46f34 XL |
141 | // This is the end of the cycle |
142 | // The span entry we included was for the usage | |
143 | // of the cycle itself, and not part of the cycle | |
144 | // Replace it with the span which caused the cycle to form | |
145 | cycle[0].span = span; | |
146 | // Find out why the cycle itself was used | |
74b04a01 XL |
147 | let usage = info |
148 | .job | |
149 | .parent | |
150 | .as_ref() | |
94222f64 | 151 | .map(|parent| (info.job.span, parent.query(&query_map))); |
0731742a | 152 | return CycleError { usage, cycle }; |
94b46f34 XL |
153 | } |
154 | ||
74b04a01 | 155 | current_job = info.job.parent; |
94b46f34 XL |
156 | } |
157 | ||
158 | panic!("did not find a cycle") | |
159 | } | |
94b46f34 XL |
160 | } |
161 | ||
9fa01778 | 162 | #[cfg(parallel_compiler)] |
5099ac24 FG |
163 | struct QueryWaiter { |
164 | query: Option<QueryJobId>, | |
94b46f34 XL |
165 | condvar: Condvar, |
166 | span: Span, | |
6a06907d | 167 | cycle: Lock<Option<CycleError>>, |
94b46f34 XL |
168 | } |
169 | ||
9fa01778 | 170 | #[cfg(parallel_compiler)] |
5099ac24 | 171 | impl QueryWaiter { |
94b46f34 XL |
172 | fn notify(&self, registry: &rayon_core::Registry) { |
173 | rayon_core::mark_unblocked(registry); | |
174 | self.condvar.notify_one(); | |
175 | } | |
176 | } | |
177 | ||
9fa01778 | 178 | #[cfg(parallel_compiler)] |
5099ac24 | 179 | struct QueryLatchInfo { |
94b46f34 | 180 | complete: bool, |
5099ac24 | 181 | waiters: Vec<Lrc<QueryWaiter>>, |
94b46f34 XL |
182 | } |
183 | ||
9fa01778 | 184 | #[cfg(parallel_compiler)] |
74b04a01 | 185 | #[derive(Clone)] |
5099ac24 FG |
186 | pub(super) struct QueryLatch { |
187 | info: Lrc<Mutex<QueryLatchInfo>>, | |
94b46f34 XL |
188 | } |
189 | ||
9fa01778 | 190 | #[cfg(parallel_compiler)] |
5099ac24 | 191 | impl QueryLatch { |
94b46f34 | 192 | fn new() -> Self { |
74b04a01 XL |
193 | QueryLatch { |
194 | info: Lrc::new(Mutex::new(QueryLatchInfo { complete: false, waiters: Vec::new() })), | |
195 | } | |
196 | } | |
197 | ||
198 | /// Awaits for the query job to complete. | |
5099ac24 | 199 | pub(super) fn wait_on(&self, query: Option<QueryJobId>, span: Span) -> Result<(), CycleError> { |
ba9703b0 XL |
200 | let waiter = |
201 | Lrc::new(QueryWaiter { query, span, cycle: Lock::new(None), condvar: Condvar::new() }); | |
202 | self.wait_on_inner(&waiter); | |
203 | // FIXME: Get rid of this lock. We have ownership of the QueryWaiter | |
204 | // although another thread may still have a Lrc reference so we cannot | |
205 | // use Lrc::get_mut | |
206 | let mut cycle = waiter.cycle.lock(); | |
207 | match cycle.take() { | |
208 | None => Ok(()), | |
209 | Some(cycle) => Err(cycle), | |
210 | } | |
94b46f34 XL |
211 | } |
212 | ||
213 | /// Awaits the caller on this latch by blocking the current thread. | |
5099ac24 | 214 | fn wait_on_inner(&self, waiter: &Lrc<QueryWaiter>) { |
94b46f34 XL |
215 | let mut info = self.info.lock(); |
216 | if !info.complete { | |
217 | // We push the waiter on to the `waiters` list. It can be accessed inside | |
218 | // the `wait` call below, by 1) the `set` method or 2) by deadlock detection. | |
219 | // Both of these will remove it from the `waiters` list before resuming | |
220 | // this thread. | |
221 | info.waiters.push(waiter.clone()); | |
222 | ||
223 | // If this detects a deadlock and the deadlock handler wants to resume this thread | |
224 | // we have to be in the `wait` call. This is ensured by the deadlock handler | |
225 | // getting the self.info lock. | |
226 | rayon_core::mark_blocked(); | |
532ac7d7 | 227 | jobserver::release_thread(); |
94b46f34 | 228 | waiter.condvar.wait(&mut info); |
532ac7d7 XL |
229 | // Release the lock before we potentially block in `acquire_thread` |
230 | mem::drop(info); | |
231 | jobserver::acquire_thread(); | |
94b46f34 XL |
232 | } |
233 | } | |
234 | ||
235 | /// Sets the latch and resumes all waiters on it | |
236 | fn set(&self) { | |
237 | let mut info = self.info.lock(); | |
238 | debug_assert!(!info.complete); | |
239 | info.complete = true; | |
240 | let registry = rayon_core::Registry::current(); | |
241 | for waiter in info.waiters.drain(..) { | |
242 | waiter.notify(®istry); | |
243 | } | |
244 | } | |
245 | ||
9fa01778 | 246 | /// Removes a single waiter from the list of waiters. |
94b46f34 | 247 | /// This is used to break query cycles. |
5099ac24 | 248 | fn extract_waiter(&self, waiter: usize) -> Lrc<QueryWaiter> { |
94b46f34 XL |
249 | let mut info = self.info.lock(); |
250 | debug_assert!(!info.complete); | |
251 | // Remove the waiter from the list of waiters | |
252 | info.waiters.remove(waiter) | |
253 | } | |
254 | } | |
255 | ||
256 | /// A resumable waiter of a query. The usize is the index into waiters in the query's latch | |
9fa01778 | 257 | #[cfg(parallel_compiler)] |
5099ac24 | 258 | type Waiter = (QueryJobId, usize); |
94b46f34 XL |
259 | |
260 | /// Visits all the non-resumable and resumable waiters of a query. | |
261 | /// Only waiters in a query are visited. | |
262 | /// `visit` is called for every waiter and is passed a query waiting on `query_ref` | |
263 | /// and a span indicating the reason the query waited on `query_ref`. | |
264 | /// If `visit` returns Some, this function returns. | |
265 | /// For visits of non-resumable waiters it returns the return value of `visit`. | |
266 | /// For visits of resumable waiters it returns Some(Some(Waiter)) which has the | |
267 | /// required information to resume the waiter. | |
268 | /// If all `visit` calls returns None, this function also returns None. | |
9fa01778 | 269 | #[cfg(parallel_compiler)] |
5099ac24 | 270 | fn visit_waiters<F>(query_map: &QueryMap, query: QueryJobId, mut visit: F) -> Option<Option<Waiter>> |
94b46f34 | 271 | where |
5099ac24 | 272 | F: FnMut(Span, QueryJobId) -> Option<Option<Waiter>>, |
94b46f34 XL |
273 | { |
274 | // Visit the parent query which is a non-resumable waiter since it's on the same stack | |
74b04a01 XL |
275 | if let Some(parent) = query.parent(query_map) { |
276 | if let Some(cycle) = visit(query.span(query_map), parent) { | |
94b46f34 XL |
277 | return Some(cycle); |
278 | } | |
279 | } | |
280 | ||
b7449926 | 281 | // Visit the explicit waiters which use condvars and are resumable |
74b04a01 XL |
282 | if let Some(latch) = query.latch(query_map) { |
283 | for (i, waiter) in latch.info.lock().waiters.iter().enumerate() { | |
284 | if let Some(waiter_query) = waiter.query { | |
285 | if visit(waiter.span, waiter_query).is_some() { | |
286 | // Return a value which indicates that this waiter can be resumed | |
287 | return Some(Some((query, i))); | |
288 | } | |
94b46f34 XL |
289 | } |
290 | } | |
291 | } | |
74b04a01 | 292 | |
94b46f34 XL |
293 | None |
294 | } | |
295 | ||
296 | /// Look for query cycles by doing a depth first search starting at `query`. | |
297 | /// `span` is the reason for the `query` to execute. This is initially DUMMY_SP. | |
298 | /// If a cycle is detected, this initial value is replaced with the span causing | |
299 | /// the cycle. | |
9fa01778 | 300 | #[cfg(parallel_compiler)] |
5099ac24 FG |
301 | fn cycle_check( |
302 | query_map: &QueryMap, | |
303 | query: QueryJobId, | |
dfeec247 | 304 | span: Span, |
5099ac24 FG |
305 | stack: &mut Vec<(Span, QueryJobId)>, |
306 | visited: &mut FxHashSet<QueryJobId>, | |
307 | ) -> Option<Option<Waiter>> { | |
74b04a01 XL |
308 | if !visited.insert(query) { |
309 | return if let Some(p) = stack.iter().position(|q| q.1 == query) { | |
94b46f34 XL |
310 | // We detected a query cycle, fix up the initial span and return Some |
311 | ||
312 | // Remove previous stack entries | |
0731742a | 313 | stack.drain(0..p); |
94b46f34 XL |
314 | // Replace the span for the first query with the cycle cause |
315 | stack[0].0 = span; | |
316 | Some(None) | |
317 | } else { | |
318 | None | |
dfeec247 | 319 | }; |
94b46f34 XL |
320 | } |
321 | ||
0731742a | 322 | // Query marked as visited is added it to the stack |
74b04a01 | 323 | stack.push((span, query)); |
94b46f34 XL |
324 | |
325 | // Visit all the waiters | |
74b04a01 XL |
326 | let r = visit_waiters(query_map, query, |span, successor| { |
327 | cycle_check(query_map, successor, span, stack, visited) | |
328 | }); | |
94b46f34 XL |
329 | |
330 | // Remove the entry in our stack if we didn't find a cycle | |
331 | if r.is_none() { | |
332 | stack.pop(); | |
333 | } | |
334 | ||
335 | r | |
336 | } | |
337 | ||
338 | /// Finds out if there's a path to the compiler root (aka. code which isn't in a query) | |
339 | /// from `query` without going through any of the queries in `visited`. | |
340 | /// This is achieved with a depth first search. | |
9fa01778 | 341 | #[cfg(parallel_compiler)] |
5099ac24 FG |
342 | fn connected_to_root( |
343 | query_map: &QueryMap, | |
344 | query: QueryJobId, | |
345 | visited: &mut FxHashSet<QueryJobId>, | |
346 | ) -> bool { | |
0bf4aa26 | 347 | // We already visited this or we're deliberately ignoring it |
74b04a01 | 348 | if !visited.insert(query) { |
0bf4aa26 XL |
349 | return false; |
350 | } | |
351 | ||
0731742a | 352 | // This query is connected to the root (it has no query parent), return true |
74b04a01 | 353 | if query.parent(query_map).is_none() { |
0731742a XL |
354 | return true; |
355 | } | |
94b46f34 | 356 | |
74b04a01 XL |
357 | visit_waiters(query_map, query, |_, successor| { |
358 | connected_to_root(query_map, successor, visited).then_some(None) | |
359 | }) | |
360 | .is_some() | |
94b46f34 XL |
361 | } |
362 | ||
0731742a | 363 | // Deterministically pick an query from a list |
9fa01778 | 364 | #[cfg(parallel_compiler)] |
5099ac24 | 365 | fn pick_query<'a, T, F>(query_map: &QueryMap, queries: &'a [T], f: F) -> &'a T |
ba9703b0 | 366 | where |
5099ac24 | 367 | F: Fn(&T) -> (Span, QueryJobId), |
ba9703b0 | 368 | { |
0731742a XL |
369 | // Deterministically pick an entry point |
370 | // FIXME: Sort this instead | |
dfeec247 XL |
371 | queries |
372 | .iter() | |
373 | .min_by_key(|v| { | |
374 | let (span, query) = f(v); | |
6a06907d | 375 | let hash = query.query(query_map).hash; |
dfeec247 XL |
376 | // Prefer entry points which have valid spans for nicer error messages |
377 | // We add an integer to the tuple ensuring that entry points | |
378 | // with valid spans are picked first | |
379 | let span_cmp = if span == DUMMY_SP { 1 } else { 0 }; | |
6a06907d | 380 | (span_cmp, hash) |
dfeec247 XL |
381 | }) |
382 | .unwrap() | |
0731742a XL |
383 | } |
384 | ||
94b46f34 XL |
385 | /// Looks for query cycles starting from the last query in `jobs`. |
386 | /// If a cycle is found, all queries in the cycle is removed from `jobs` and | |
387 | /// the function return true. | |
388 | /// If a cycle was not found, the starting query is removed from `jobs` and | |
389 | /// the function returns false. | |
9fa01778 | 390 | #[cfg(parallel_compiler)] |
5099ac24 FG |
391 | fn remove_cycle( |
392 | query_map: &QueryMap, | |
393 | jobs: &mut Vec<QueryJobId>, | |
394 | wakelist: &mut Vec<Lrc<QueryWaiter>>, | |
94b46f34 | 395 | ) -> bool { |
b7449926 | 396 | let mut visited = FxHashSet::default(); |
94b46f34 XL |
397 | let mut stack = Vec::new(); |
398 | // Look for a cycle starting with the last query in `jobs` | |
74b04a01 XL |
399 | if let Some(waiter) = |
400 | cycle_check(query_map, jobs.pop().unwrap(), DUMMY_SP, &mut stack, &mut visited) | |
401 | { | |
0731742a XL |
402 | // The stack is a vector of pairs of spans and queries; reverse it so that |
403 | // the earlier entries require later entries | |
404 | let (mut spans, queries): (Vec<_>, Vec<_>) = stack.into_iter().rev().unzip(); | |
94b46f34 XL |
405 | |
406 | // Shift the spans so that queries are matched with the span for their waitee | |
0bf4aa26 | 407 | spans.rotate_right(1); |
94b46f34 XL |
408 | |
409 | // Zip them back together | |
cdc7bbd5 | 410 | let mut stack: Vec<_> = iter::zip(spans, queries).collect(); |
94b46f34 XL |
411 | |
412 | // Remove the queries in our cycle from the list of jobs to look at | |
413 | for r in &stack { | |
f035d41b XL |
414 | if let Some(pos) = jobs.iter().position(|j| j == &r.1) { |
415 | jobs.remove(pos); | |
416 | } | |
94b46f34 XL |
417 | } |
418 | ||
419 | // Find the queries in the cycle which are | |
420 | // connected to queries outside the cycle | |
dfeec247 XL |
421 | let entry_points = stack |
422 | .iter() | |
74b04a01 XL |
423 | .filter_map(|&(span, query)| { |
424 | if query.parent(query_map).is_none() { | |
dfeec247 | 425 | // This query is connected to the root (it has no query parent) |
74b04a01 | 426 | Some((span, query, None)) |
0731742a | 427 | } else { |
dfeec247 XL |
428 | let mut waiters = Vec::new(); |
429 | // Find all the direct waiters who lead to the root | |
74b04a01 | 430 | visit_waiters(query_map, query, |span, waiter| { |
dfeec247 | 431 | // Mark all the other queries in the cycle as already visited |
74b04a01 | 432 | let mut visited = FxHashSet::from_iter(stack.iter().map(|q| q.1)); |
dfeec247 | 433 | |
74b04a01 | 434 | if connected_to_root(query_map, waiter, &mut visited) { |
dfeec247 XL |
435 | waiters.push((span, waiter)); |
436 | } | |
437 | ||
438 | None | |
439 | }); | |
440 | if waiters.is_empty() { | |
441 | None | |
442 | } else { | |
443 | // Deterministically pick one of the waiters to show to the user | |
6a06907d | 444 | let waiter = *pick_query(query_map, &waiters, |s| *s); |
74b04a01 | 445 | Some((span, query, Some(waiter))) |
dfeec247 | 446 | } |
94b46f34 | 447 | } |
dfeec247 | 448 | }) |
5099ac24 | 449 | .collect::<Vec<(Span, QueryJobId, Option<(Span, QueryJobId)>)>>(); |
94b46f34 XL |
450 | |
451 | // Deterministically pick an entry point | |
6a06907d | 452 | let (_, entry_point, usage) = pick_query(query_map, &entry_points, |e| (e.0, e.1)); |
94b46f34 | 453 | |
0bf4aa26 | 454 | // Shift the stack so that our entry point is first |
74b04a01 | 455 | let entry_point_pos = stack.iter().position(|(_, query)| query == entry_point); |
0bf4aa26 | 456 | if let Some(pos) = entry_point_pos { |
0731742a | 457 | stack.rotate_left(pos); |
94b46f34 XL |
458 | } |
459 | ||
74b04a01 | 460 | let usage = usage.as_ref().map(|(span, query)| (*span, query.query(query_map))); |
0731742a | 461 | |
94b46f34 | 462 | // Create the cycle error |
416331ca | 463 | let error = CycleError { |
0731742a | 464 | usage, |
dfeec247 XL |
465 | cycle: stack |
466 | .iter() | |
74b04a01 | 467 | .map(|&(s, ref q)| QueryInfo { span: s, query: q.query(query_map) }) |
dfeec247 | 468 | .collect(), |
94b46f34 XL |
469 | }; |
470 | ||
471 | // We unwrap `waiter` here since there must always be one | |
fc512014 | 472 | // edge which is resumable / waited using a query latch |
94b46f34 XL |
473 | let (waitee_query, waiter_idx) = waiter.unwrap(); |
474 | ||
475 | // Extract the waiter we want to resume | |
74b04a01 | 476 | let waiter = waitee_query.latch(query_map).unwrap().extract_waiter(waiter_idx); |
94b46f34 XL |
477 | |
478 | // Set the cycle error so it will be picked up when resumed | |
479 | *waiter.cycle.lock() = Some(error); | |
480 | ||
481 | // Put the waiter on the list of things to resume | |
482 | wakelist.push(waiter); | |
483 | ||
484 | true | |
485 | } else { | |
486 | false | |
487 | } | |
488 | } | |
489 | ||
94b46f34 XL |
490 | /// Detects query cycles by using depth first search over all active query jobs. |
491 | /// If a query cycle is found it will break the cycle by finding an edge which | |
492 | /// uses a query latch and then resuming that waiter. | |
493 | /// There may be multiple cycles involved in a deadlock, so this searches | |
494 | /// all active queries for cycles before finally resuming all the waiters at once. | |
9fa01778 | 495 | #[cfg(parallel_compiler)] |
064997fb | 496 | pub fn deadlock(query_map: QueryMap, registry: &rayon_core::Registry) { |
94b46f34 XL |
497 | let on_panic = OnDrop(|| { |
498 | eprintln!("deadlock handler panicked, aborting process"); | |
499 | process::abort(); | |
500 | }); | |
501 | ||
502 | let mut wakelist = Vec::new(); | |
5099ac24 | 503 | let mut jobs: Vec<QueryJobId> = query_map.keys().cloned().collect(); |
94b46f34 XL |
504 | |
505 | let mut found_cycle = false; | |
506 | ||
507 | while jobs.len() > 0 { | |
6a06907d | 508 | if remove_cycle(&query_map, &mut jobs, &mut wakelist) { |
94b46f34 XL |
509 | found_cycle = true; |
510 | } | |
511 | } | |
512 | ||
513 | // Check that a cycle was found. It is possible for a deadlock to occur without | |
514 | // a query cycle if a query which can be waited on uses Rayon to do multithreading | |
515 | // internally. Such a query (X) may be executing on 2 threads (A and B) and A may | |
516 | // wait using Rayon on B. Rayon may then switch to executing another query (Y) | |
517 | // which in turn will wait on X causing a deadlock. We have a false dependency from | |
518 | // X to Y due to Rayon waiting and a true dependency from Y to X. The algorithm here | |
519 | // only considers the true dependency and won't detect a cycle. | |
520 | assert!(found_cycle); | |
521 | ||
522 | // FIXME: Ensure this won't cause a deadlock before we return | |
523 | for waiter in wakelist.into_iter() { | |
524 | waiter.notify(registry); | |
525 | } | |
526 | ||
527 | on_panic.disable(); | |
528 | } | |
6a06907d XL |
529 | |
530 | #[inline(never)] | |
531 | #[cold] | |
532 | pub(crate) fn report_cycle<'a>( | |
533 | sess: &'a Session, | |
534 | CycleError { usage, cycle: stack }: CycleError, | |
5e7ed085 | 535 | ) -> DiagnosticBuilder<'a, ErrorGuaranteed> { |
6a06907d XL |
536 | assert!(!stack.is_empty()); |
537 | ||
064997fb | 538 | let span = stack[0].query.default_span(stack[1 % stack.len()].span); |
6a06907d XL |
539 | let mut err = |
540 | struct_span_err!(sess, span, E0391, "cycle detected when {}", stack[0].query.description); | |
541 | ||
542 | for i in 1..stack.len() { | |
543 | let query = &stack[i].query; | |
064997fb | 544 | let span = query.default_span(stack[(i + 1) % stack.len()].span); |
6a06907d XL |
545 | err.span_note(span, &format!("...which requires {}...", query.description)); |
546 | } | |
547 | ||
94222f64 XL |
548 | if stack.len() == 1 { |
549 | err.note(&format!("...which immediately requires {} again", stack[0].query.description)); | |
550 | } else { | |
551 | err.note(&format!( | |
552 | "...which again requires {}, completing the cycle", | |
553 | stack[0].query.description | |
554 | )); | |
555 | } | |
556 | ||
557 | if stack.iter().all(|entry| { | |
5e7ed085 FG |
558 | entry |
559 | .query | |
560 | .def_kind | |
561 | .map_or(false, |def_kind| matches!(def_kind, DefKind::TyAlias | DefKind::TraitAlias)) | |
94222f64 XL |
562 | }) { |
563 | if stack.iter().all(|entry| { | |
5e7ed085 | 564 | entry.query.def_kind.map_or(false, |def_kind| matches!(def_kind, DefKind::TyAlias)) |
94222f64 XL |
565 | }) { |
566 | err.note("type aliases cannot be recursive"); | |
567 | err.help("consider using a struct, enum, or union instead to break the cycle"); | |
568 | err.help("see <https://doc.rust-lang.org/reference/types.html#recursive-types> for more information"); | |
569 | } else { | |
570 | err.note("trait aliases cannot be recursive"); | |
571 | } | |
572 | } | |
6a06907d XL |
573 | |
574 | if let Some((span, query)) = usage { | |
064997fb | 575 | err.span_note(query.default_span(span), &format!("cycle used when {}", query.description)); |
6a06907d XL |
576 | } |
577 | ||
578 | err | |
579 | } | |
580 | ||
581 | pub fn print_query_stack<CTX: QueryContext>( | |
582 | tcx: CTX, | |
5099ac24 | 583 | mut current_query: Option<QueryJobId>, |
6a06907d XL |
584 | handler: &Handler, |
585 | num_frames: Option<usize>, | |
586 | ) -> usize { | |
587 | // Be careful relying on global state here: this code is called from | |
588 | // a panic hook, which means that the global `Handler` may be in a weird | |
589 | // state if it was responsible for triggering the panic. | |
590 | let mut i = 0; | |
591 | let query_map = tcx.try_collect_active_jobs(); | |
592 | ||
593 | while let Some(query) = current_query { | |
594 | if Some(i) == num_frames { | |
595 | break; | |
596 | } | |
3c0e092e | 597 | let Some(query_info) = query_map.as_ref().and_then(|map| map.get(&query)) else { |
6a06907d XL |
598 | break; |
599 | }; | |
600 | let mut diag = Diagnostic::new( | |
601 | Level::FailureNote, | |
94222f64 | 602 | &format!("#{} [{}] {}", i, query_info.query.name, query_info.query.description), |
6a06907d | 603 | ); |
064997fb | 604 | diag.span = query_info.job.span.into(); |
6a06907d XL |
605 | handler.force_print_diagnostic(diag); |
606 | ||
607 | current_query = query_info.job.parent; | |
608 | i += 1; | |
609 | } | |
610 | ||
611 | i | |
612 | } |