]>
Commit | Line | Data |
---|---|---|
6a06907d | 1 | use crate::dep_graph::DepContext; |
ba9703b0 | 2 | use crate::query::plumbing::CycleError; |
94222f64 | 3 | use crate::query::{QueryContext, QueryStackFrame, SimpleDefKind}; |
9fa01778 | 4 | |
74b04a01 | 5 | use rustc_data_structures::fx::FxHashMap; |
6a06907d XL |
6 | use rustc_errors::{struct_span_err, Diagnostic, DiagnosticBuilder, Handler, Level}; |
7 | use rustc_session::Session; | |
dfeec247 | 8 | use rustc_span::Span; |
9fa01778 | 9 | |
74b04a01 | 10 | use std::convert::TryFrom; |
29967ef6 | 11 | use std::hash::Hash; |
74b04a01 | 12 | use std::num::NonZeroU32; |
b7449926 | 13 | |
9fa01778 | 14 | #[cfg(parallel_compiler)] |
94b46f34 | 15 | use { |
6a06907d | 16 | crate::dep_graph::DepKind, |
dfeec247 | 17 | parking_lot::{Condvar, Mutex}, |
416331ca | 18 | rustc_data_structures::fx::FxHashSet, |
416331ca | 19 | rustc_data_structures::sync::Lock, |
74b04a01 | 20 | rustc_data_structures::sync::Lrc, |
dfeec247 | 21 | rustc_data_structures::{jobserver, OnDrop}, |
416331ca | 22 | rustc_rayon_core as rayon_core, |
dfeec247 | 23 | rustc_span::DUMMY_SP, |
cdc7bbd5 | 24 | std::iter::{self, FromIterator}, |
ba9703b0 | 25 | std::{mem, process}, |
94b46f34 XL |
26 | }; |
27 | ||
9fa01778 | 28 | /// Represents a span and a query key. |
94b46f34 | 29 | #[derive(Clone, Debug)] |
6a06907d | 30 | pub struct QueryInfo { |
9fa01778 | 31 | /// The span corresponding to the reason for which this query was required. |
94b46f34 | 32 | pub span: Span, |
6a06907d | 33 | pub query: QueryStackFrame, |
94b46f34 XL |
34 | } |
35 | ||
6a06907d | 36 | pub type QueryMap<D> = FxHashMap<QueryJobId<D>, QueryJobInfo<D>>; |
74b04a01 | 37 | |
fc512014 | 38 | /// A value uniquely identifying an active query job within a shard in the query cache. |
74b04a01 XL |
39 | #[derive(Copy, Clone, Eq, PartialEq, Hash)] |
40 | pub struct QueryShardJobId(pub NonZeroU32); | |
41 | ||
fc512014 | 42 | /// A value uniquely identifying an active query job. |
74b04a01 | 43 | #[derive(Copy, Clone, Eq, PartialEq, Hash)] |
29967ef6 | 44 | pub struct QueryJobId<D> { |
74b04a01 XL |
45 | /// Which job within a shard is this |
46 | pub job: QueryShardJobId, | |
47 | ||
48 | /// In which shard is this job | |
49 | pub shard: u16, | |
50 | ||
29967ef6 XL |
51 | /// What kind of query this job is. |
52 | pub kind: D, | |
74b04a01 XL |
53 | } |
54 | ||
29967ef6 XL |
55 | impl<D> QueryJobId<D> |
56 | where | |
57 | D: Copy + Clone + Eq + Hash, | |
58 | { | |
59 | pub fn new(job: QueryShardJobId, shard: usize, kind: D) -> Self { | |
74b04a01 XL |
60 | QueryJobId { job, shard: u16::try_from(shard).unwrap(), kind } |
61 | } | |
62 | ||
6a06907d | 63 | fn query(self, map: &QueryMap<D>) -> QueryStackFrame { |
94222f64 | 64 | map.get(&self).unwrap().query.clone() |
74b04a01 XL |
65 | } |
66 | ||
67 | #[cfg(parallel_compiler)] | |
6a06907d | 68 | fn span(self, map: &QueryMap<D>) -> Span { |
74b04a01 XL |
69 | map.get(&self).unwrap().job.span |
70 | } | |
71 | ||
72 | #[cfg(parallel_compiler)] | |
6a06907d | 73 | fn parent(self, map: &QueryMap<D>) -> Option<QueryJobId<D>> { |
74b04a01 XL |
74 | map.get(&self).unwrap().job.parent |
75 | } | |
76 | ||
77 | #[cfg(parallel_compiler)] | |
6a06907d | 78 | fn latch<'a>(self, map: &'a QueryMap<D>) -> Option<&'a QueryLatch<D>> { |
74b04a01 XL |
79 | map.get(&self).unwrap().job.latch.as_ref() |
80 | } | |
81 | } | |
82 | ||
6a06907d | 83 | pub struct QueryJobInfo<D> { |
94222f64 | 84 | pub query: QueryStackFrame, |
6a06907d | 85 | pub job: QueryJob<D>, |
74b04a01 XL |
86 | } |
87 | ||
88 | /// Represents an active query job. | |
89 | #[derive(Clone)] | |
6a06907d | 90 | pub struct QueryJob<D> { |
74b04a01 XL |
91 | pub id: QueryShardJobId, |
92 | ||
93 | /// The span corresponding to the reason for which this query was required. | |
94 | pub span: Span, | |
94b46f34 XL |
95 | |
96 | /// The parent query job which created this job and is implicitly waiting on it. | |
29967ef6 | 97 | pub parent: Option<QueryJobId<D>>, |
94b46f34 | 98 | |
9fa01778 XL |
99 | /// The latch that is used to wait on this job. |
100 | #[cfg(parallel_compiler)] | |
6a06907d | 101 | latch: Option<QueryLatch<D>>, |
94b46f34 XL |
102 | } |
103 | ||
6a06907d | 104 | impl<D> QueryJob<D> |
29967ef6 XL |
105 | where |
106 | D: Copy + Clone + Eq + Hash, | |
29967ef6 | 107 | { |
9fa01778 | 108 | /// Creates a new query job. |
29967ef6 | 109 | pub fn new(id: QueryShardJobId, span: Span, parent: Option<QueryJobId<D>>) -> Self { |
94b46f34 | 110 | QueryJob { |
74b04a01 XL |
111 | id, |
112 | span, | |
94b46f34 | 113 | parent, |
9fa01778 | 114 | #[cfg(parallel_compiler)] |
74b04a01 | 115 | latch: None, |
94b46f34 XL |
116 | } |
117 | } | |
118 | ||
9fa01778 | 119 | #[cfg(parallel_compiler)] |
17df50a5 | 120 | pub(super) fn latch(&mut self) -> QueryLatch<D> { |
74b04a01 XL |
121 | if self.latch.is_none() { |
122 | self.latch = Some(QueryLatch::new()); | |
123 | } | |
124 | self.latch.as_ref().unwrap().clone() | |
94b46f34 XL |
125 | } |
126 | ||
74b04a01 XL |
127 | /// Signals to waiters that the query is complete. |
128 | /// | |
129 | /// This does nothing for single threaded rustc, | |
130 | /// as there are no concurrent jobs which could be waiting on us | |
131 | pub fn signal_complete(self) { | |
132 | #[cfg(parallel_compiler)] | |
f9f354fc XL |
133 | { |
134 | if let Some(latch) = self.latch { | |
135 | latch.set(); | |
136 | } | |
137 | } | |
74b04a01 XL |
138 | } |
139 | } | |
140 | ||
141 | #[cfg(not(parallel_compiler))] | |
17df50a5 | 142 | impl<D> QueryJobId<D> |
29967ef6 XL |
143 | where |
144 | D: Copy + Clone + Eq + Hash, | |
29967ef6 XL |
145 | { |
146 | pub(super) fn find_cycle_in_stack( | |
147 | &self, | |
6a06907d | 148 | query_map: QueryMap<D>, |
29967ef6 XL |
149 | current_job: &Option<QueryJobId<D>>, |
150 | span: Span, | |
6a06907d | 151 | ) -> CycleError { |
29967ef6 | 152 | // Find the waitee amongst `current_job` parents |
94b46f34 | 153 | let mut cycle = Vec::new(); |
29967ef6 | 154 | let mut current_job = Option::clone(current_job); |
94b46f34 XL |
155 | |
156 | while let Some(job) = current_job { | |
74b04a01 | 157 | let info = query_map.get(&job).unwrap(); |
94222f64 | 158 | cycle.push(QueryInfo { span: info.job.span, query: info.query.clone() }); |
94b46f34 | 159 | |
17df50a5 | 160 | if job == *self { |
0bf4aa26 XL |
161 | cycle.reverse(); |
162 | ||
94b46f34 XL |
163 | // This is the end of the cycle |
164 | // The span entry we included was for the usage | |
165 | // of the cycle itself, and not part of the cycle | |
166 | // Replace it with the span which caused the cycle to form | |
167 | cycle[0].span = span; | |
168 | // Find out why the cycle itself was used | |
74b04a01 XL |
169 | let usage = info |
170 | .job | |
171 | .parent | |
172 | .as_ref() | |
94222f64 | 173 | .map(|parent| (info.job.span, parent.query(&query_map))); |
0731742a | 174 | return CycleError { usage, cycle }; |
94b46f34 XL |
175 | } |
176 | ||
74b04a01 | 177 | current_job = info.job.parent; |
94b46f34 XL |
178 | } |
179 | ||
180 | panic!("did not find a cycle") | |
181 | } | |
94b46f34 XL |
182 | } |
183 | ||
9fa01778 | 184 | #[cfg(parallel_compiler)] |
6a06907d | 185 | struct QueryWaiter<D> { |
29967ef6 | 186 | query: Option<QueryJobId<D>>, |
94b46f34 XL |
187 | condvar: Condvar, |
188 | span: Span, | |
6a06907d | 189 | cycle: Lock<Option<CycleError>>, |
94b46f34 XL |
190 | } |
191 | ||
9fa01778 | 192 | #[cfg(parallel_compiler)] |
6a06907d | 193 | impl<D> QueryWaiter<D> { |
94b46f34 XL |
194 | fn notify(&self, registry: &rayon_core::Registry) { |
195 | rayon_core::mark_unblocked(registry); | |
196 | self.condvar.notify_one(); | |
197 | } | |
198 | } | |
199 | ||
9fa01778 | 200 | #[cfg(parallel_compiler)] |
6a06907d | 201 | struct QueryLatchInfo<D> { |
94b46f34 | 202 | complete: bool, |
6a06907d | 203 | waiters: Vec<Lrc<QueryWaiter<D>>>, |
94b46f34 XL |
204 | } |
205 | ||
9fa01778 | 206 | #[cfg(parallel_compiler)] |
74b04a01 | 207 | #[derive(Clone)] |
6a06907d XL |
208 | pub(super) struct QueryLatch<D> { |
209 | info: Lrc<Mutex<QueryLatchInfo<D>>>, | |
94b46f34 XL |
210 | } |
211 | ||
9fa01778 | 212 | #[cfg(parallel_compiler)] |
6a06907d | 213 | impl<D: Eq + Hash> QueryLatch<D> { |
94b46f34 | 214 | fn new() -> Self { |
74b04a01 XL |
215 | QueryLatch { |
216 | info: Lrc::new(Mutex::new(QueryLatchInfo { complete: false, waiters: Vec::new() })), | |
217 | } | |
218 | } | |
ba9703b0 | 219 | } |
74b04a01 | 220 | |
ba9703b0 | 221 | #[cfg(parallel_compiler)] |
6a06907d | 222 | impl<D> QueryLatch<D> { |
74b04a01 | 223 | /// Awaits for the query job to complete. |
29967ef6 XL |
224 | pub(super) fn wait_on( |
225 | &self, | |
226 | query: Option<QueryJobId<D>>, | |
227 | span: Span, | |
6a06907d | 228 | ) -> Result<(), CycleError> { |
ba9703b0 XL |
229 | let waiter = |
230 | Lrc::new(QueryWaiter { query, span, cycle: Lock::new(None), condvar: Condvar::new() }); | |
231 | self.wait_on_inner(&waiter); | |
232 | // FIXME: Get rid of this lock. We have ownership of the QueryWaiter | |
233 | // although another thread may still have a Lrc reference so we cannot | |
234 | // use Lrc::get_mut | |
235 | let mut cycle = waiter.cycle.lock(); | |
236 | match cycle.take() { | |
237 | None => Ok(()), | |
238 | Some(cycle) => Err(cycle), | |
239 | } | |
94b46f34 XL |
240 | } |
241 | ||
242 | /// Awaits the caller on this latch by blocking the current thread. | |
6a06907d | 243 | fn wait_on_inner(&self, waiter: &Lrc<QueryWaiter<D>>) { |
94b46f34 XL |
244 | let mut info = self.info.lock(); |
245 | if !info.complete { | |
246 | // We push the waiter on to the `waiters` list. It can be accessed inside | |
247 | // the `wait` call below, by 1) the `set` method or 2) by deadlock detection. | |
248 | // Both of these will remove it from the `waiters` list before resuming | |
249 | // this thread. | |
250 | info.waiters.push(waiter.clone()); | |
251 | ||
252 | // If this detects a deadlock and the deadlock handler wants to resume this thread | |
253 | // we have to be in the `wait` call. This is ensured by the deadlock handler | |
254 | // getting the self.info lock. | |
255 | rayon_core::mark_blocked(); | |
532ac7d7 | 256 | jobserver::release_thread(); |
94b46f34 | 257 | waiter.condvar.wait(&mut info); |
532ac7d7 XL |
258 | // Release the lock before we potentially block in `acquire_thread` |
259 | mem::drop(info); | |
260 | jobserver::acquire_thread(); | |
94b46f34 XL |
261 | } |
262 | } | |
263 | ||
264 | /// Sets the latch and resumes all waiters on it | |
265 | fn set(&self) { | |
266 | let mut info = self.info.lock(); | |
267 | debug_assert!(!info.complete); | |
268 | info.complete = true; | |
269 | let registry = rayon_core::Registry::current(); | |
270 | for waiter in info.waiters.drain(..) { | |
271 | waiter.notify(®istry); | |
272 | } | |
273 | } | |
274 | ||
9fa01778 | 275 | /// Removes a single waiter from the list of waiters. |
94b46f34 | 276 | /// This is used to break query cycles. |
6a06907d | 277 | fn extract_waiter(&self, waiter: usize) -> Lrc<QueryWaiter<D>> { |
94b46f34 XL |
278 | let mut info = self.info.lock(); |
279 | debug_assert!(!info.complete); | |
280 | // Remove the waiter from the list of waiters | |
281 | info.waiters.remove(waiter) | |
282 | } | |
283 | } | |
284 | ||
285 | /// A resumable waiter of a query. The usize is the index into waiters in the query's latch | |
9fa01778 | 286 | #[cfg(parallel_compiler)] |
29967ef6 | 287 | type Waiter<D> = (QueryJobId<D>, usize); |
94b46f34 XL |
288 | |
289 | /// Visits all the non-resumable and resumable waiters of a query. | |
290 | /// Only waiters in a query are visited. | |
291 | /// `visit` is called for every waiter and is passed a query waiting on `query_ref` | |
292 | /// and a span indicating the reason the query waited on `query_ref`. | |
293 | /// If `visit` returns Some, this function returns. | |
294 | /// For visits of non-resumable waiters it returns the return value of `visit`. | |
295 | /// For visits of resumable waiters it returns Some(Some(Waiter)) which has the | |
296 | /// required information to resume the waiter. | |
297 | /// If all `visit` calls returns None, this function also returns None. | |
9fa01778 | 298 | #[cfg(parallel_compiler)] |
6a06907d XL |
299 | fn visit_waiters<D, F>( |
300 | query_map: &QueryMap<D>, | |
29967ef6 | 301 | query: QueryJobId<D>, |
74b04a01 | 302 | mut visit: F, |
29967ef6 | 303 | ) -> Option<Option<Waiter<D>>> |
94b46f34 | 304 | where |
29967ef6 | 305 | D: Copy + Clone + Eq + Hash, |
29967ef6 | 306 | F: FnMut(Span, QueryJobId<D>) -> Option<Option<Waiter<D>>>, |
94b46f34 XL |
307 | { |
308 | // Visit the parent query which is a non-resumable waiter since it's on the same stack | |
74b04a01 XL |
309 | if let Some(parent) = query.parent(query_map) { |
310 | if let Some(cycle) = visit(query.span(query_map), parent) { | |
94b46f34 XL |
311 | return Some(cycle); |
312 | } | |
313 | } | |
314 | ||
b7449926 | 315 | // Visit the explicit waiters which use condvars and are resumable |
74b04a01 XL |
316 | if let Some(latch) = query.latch(query_map) { |
317 | for (i, waiter) in latch.info.lock().waiters.iter().enumerate() { | |
318 | if let Some(waiter_query) = waiter.query { | |
319 | if visit(waiter.span, waiter_query).is_some() { | |
320 | // Return a value which indicates that this waiter can be resumed | |
321 | return Some(Some((query, i))); | |
322 | } | |
94b46f34 XL |
323 | } |
324 | } | |
325 | } | |
74b04a01 | 326 | |
94b46f34 XL |
327 | None |
328 | } | |
329 | ||
330 | /// Look for query cycles by doing a depth first search starting at `query`. | |
331 | /// `span` is the reason for the `query` to execute. This is initially DUMMY_SP. | |
332 | /// If a cycle is detected, this initial value is replaced with the span causing | |
333 | /// the cycle. | |
9fa01778 | 334 | #[cfg(parallel_compiler)] |
6a06907d XL |
335 | fn cycle_check<D>( |
336 | query_map: &QueryMap<D>, | |
29967ef6 | 337 | query: QueryJobId<D>, |
dfeec247 | 338 | span: Span, |
29967ef6 XL |
339 | stack: &mut Vec<(Span, QueryJobId<D>)>, |
340 | visited: &mut FxHashSet<QueryJobId<D>>, | |
341 | ) -> Option<Option<Waiter<D>>> | |
342 | where | |
343 | D: Copy + Clone + Eq + Hash, | |
29967ef6 | 344 | { |
74b04a01 XL |
345 | if !visited.insert(query) { |
346 | return if let Some(p) = stack.iter().position(|q| q.1 == query) { | |
94b46f34 XL |
347 | // We detected a query cycle, fix up the initial span and return Some |
348 | ||
349 | // Remove previous stack entries | |
0731742a | 350 | stack.drain(0..p); |
94b46f34 XL |
351 | // Replace the span for the first query with the cycle cause |
352 | stack[0].0 = span; | |
353 | Some(None) | |
354 | } else { | |
355 | None | |
dfeec247 | 356 | }; |
94b46f34 XL |
357 | } |
358 | ||
0731742a | 359 | // Query marked as visited is added it to the stack |
74b04a01 | 360 | stack.push((span, query)); |
94b46f34 XL |
361 | |
362 | // Visit all the waiters | |
74b04a01 XL |
363 | let r = visit_waiters(query_map, query, |span, successor| { |
364 | cycle_check(query_map, successor, span, stack, visited) | |
365 | }); | |
94b46f34 XL |
366 | |
367 | // Remove the entry in our stack if we didn't find a cycle | |
368 | if r.is_none() { | |
369 | stack.pop(); | |
370 | } | |
371 | ||
372 | r | |
373 | } | |
374 | ||
375 | /// Finds out if there's a path to the compiler root (aka. code which isn't in a query) | |
376 | /// from `query` without going through any of the queries in `visited`. | |
377 | /// This is achieved with a depth first search. | |
9fa01778 | 378 | #[cfg(parallel_compiler)] |
6a06907d XL |
379 | fn connected_to_root<D>( |
380 | query_map: &QueryMap<D>, | |
29967ef6 XL |
381 | query: QueryJobId<D>, |
382 | visited: &mut FxHashSet<QueryJobId<D>>, | |
383 | ) -> bool | |
384 | where | |
385 | D: Copy + Clone + Eq + Hash, | |
29967ef6 | 386 | { |
0bf4aa26 | 387 | // We already visited this or we're deliberately ignoring it |
74b04a01 | 388 | if !visited.insert(query) { |
0bf4aa26 XL |
389 | return false; |
390 | } | |
391 | ||
0731742a | 392 | // This query is connected to the root (it has no query parent), return true |
74b04a01 | 393 | if query.parent(query_map).is_none() { |
0731742a XL |
394 | return true; |
395 | } | |
94b46f34 | 396 | |
74b04a01 XL |
397 | visit_waiters(query_map, query, |_, successor| { |
398 | connected_to_root(query_map, successor, visited).then_some(None) | |
399 | }) | |
400 | .is_some() | |
94b46f34 XL |
401 | } |
402 | ||
0731742a | 403 | // Deterministically pick an query from a list |
9fa01778 | 404 | #[cfg(parallel_compiler)] |
6a06907d | 405 | fn pick_query<'a, D, T, F>(query_map: &QueryMap<D>, queries: &'a [T], f: F) -> &'a T |
ba9703b0 | 406 | where |
6a06907d XL |
407 | D: Copy + Clone + Eq + Hash, |
408 | F: Fn(&T) -> (Span, QueryJobId<D>), | |
ba9703b0 | 409 | { |
0731742a XL |
410 | // Deterministically pick an entry point |
411 | // FIXME: Sort this instead | |
dfeec247 XL |
412 | queries |
413 | .iter() | |
414 | .min_by_key(|v| { | |
415 | let (span, query) = f(v); | |
6a06907d | 416 | let hash = query.query(query_map).hash; |
dfeec247 XL |
417 | // Prefer entry points which have valid spans for nicer error messages |
418 | // We add an integer to the tuple ensuring that entry points | |
419 | // with valid spans are picked first | |
420 | let span_cmp = if span == DUMMY_SP { 1 } else { 0 }; | |
6a06907d | 421 | (span_cmp, hash) |
dfeec247 XL |
422 | }) |
423 | .unwrap() | |
0731742a XL |
424 | } |
425 | ||
94b46f34 XL |
426 | /// Looks for query cycles starting from the last query in `jobs`. |
427 | /// If a cycle is found, all queries in the cycle is removed from `jobs` and | |
428 | /// the function return true. | |
429 | /// If a cycle was not found, the starting query is removed from `jobs` and | |
430 | /// the function returns false. | |
9fa01778 | 431 | #[cfg(parallel_compiler)] |
6a06907d XL |
432 | fn remove_cycle<D: DepKind>( |
433 | query_map: &QueryMap<D>, | |
434 | jobs: &mut Vec<QueryJobId<D>>, | |
435 | wakelist: &mut Vec<Lrc<QueryWaiter<D>>>, | |
94b46f34 | 436 | ) -> bool { |
b7449926 | 437 | let mut visited = FxHashSet::default(); |
94b46f34 XL |
438 | let mut stack = Vec::new(); |
439 | // Look for a cycle starting with the last query in `jobs` | |
74b04a01 XL |
440 | if let Some(waiter) = |
441 | cycle_check(query_map, jobs.pop().unwrap(), DUMMY_SP, &mut stack, &mut visited) | |
442 | { | |
0731742a XL |
443 | // The stack is a vector of pairs of spans and queries; reverse it so that |
444 | // the earlier entries require later entries | |
445 | let (mut spans, queries): (Vec<_>, Vec<_>) = stack.into_iter().rev().unzip(); | |
94b46f34 XL |
446 | |
447 | // Shift the spans so that queries are matched with the span for their waitee | |
0bf4aa26 | 448 | spans.rotate_right(1); |
94b46f34 XL |
449 | |
450 | // Zip them back together | |
cdc7bbd5 | 451 | let mut stack: Vec<_> = iter::zip(spans, queries).collect(); |
94b46f34 XL |
452 | |
453 | // Remove the queries in our cycle from the list of jobs to look at | |
454 | for r in &stack { | |
f035d41b XL |
455 | if let Some(pos) = jobs.iter().position(|j| j == &r.1) { |
456 | jobs.remove(pos); | |
457 | } | |
94b46f34 XL |
458 | } |
459 | ||
460 | // Find the queries in the cycle which are | |
461 | // connected to queries outside the cycle | |
dfeec247 XL |
462 | let entry_points = stack |
463 | .iter() | |
74b04a01 XL |
464 | .filter_map(|&(span, query)| { |
465 | if query.parent(query_map).is_none() { | |
dfeec247 | 466 | // This query is connected to the root (it has no query parent) |
74b04a01 | 467 | Some((span, query, None)) |
0731742a | 468 | } else { |
dfeec247 XL |
469 | let mut waiters = Vec::new(); |
470 | // Find all the direct waiters who lead to the root | |
74b04a01 | 471 | visit_waiters(query_map, query, |span, waiter| { |
dfeec247 | 472 | // Mark all the other queries in the cycle as already visited |
74b04a01 | 473 | let mut visited = FxHashSet::from_iter(stack.iter().map(|q| q.1)); |
dfeec247 | 474 | |
74b04a01 | 475 | if connected_to_root(query_map, waiter, &mut visited) { |
dfeec247 XL |
476 | waiters.push((span, waiter)); |
477 | } | |
478 | ||
479 | None | |
480 | }); | |
481 | if waiters.is_empty() { | |
482 | None | |
483 | } else { | |
484 | // Deterministically pick one of the waiters to show to the user | |
6a06907d | 485 | let waiter = *pick_query(query_map, &waiters, |s| *s); |
74b04a01 | 486 | Some((span, query, Some(waiter))) |
dfeec247 | 487 | } |
94b46f34 | 488 | } |
dfeec247 | 489 | }) |
6a06907d | 490 | .collect::<Vec<(Span, QueryJobId<D>, Option<(Span, QueryJobId<D>)>)>>(); |
94b46f34 XL |
491 | |
492 | // Deterministically pick an entry point | |
6a06907d | 493 | let (_, entry_point, usage) = pick_query(query_map, &entry_points, |e| (e.0, e.1)); |
94b46f34 | 494 | |
0bf4aa26 | 495 | // Shift the stack so that our entry point is first |
74b04a01 | 496 | let entry_point_pos = stack.iter().position(|(_, query)| query == entry_point); |
0bf4aa26 | 497 | if let Some(pos) = entry_point_pos { |
0731742a | 498 | stack.rotate_left(pos); |
94b46f34 XL |
499 | } |
500 | ||
74b04a01 | 501 | let usage = usage.as_ref().map(|(span, query)| (*span, query.query(query_map))); |
0731742a | 502 | |
94b46f34 | 503 | // Create the cycle error |
416331ca | 504 | let error = CycleError { |
0731742a | 505 | usage, |
dfeec247 XL |
506 | cycle: stack |
507 | .iter() | |
74b04a01 | 508 | .map(|&(s, ref q)| QueryInfo { span: s, query: q.query(query_map) }) |
dfeec247 | 509 | .collect(), |
94b46f34 XL |
510 | }; |
511 | ||
512 | // We unwrap `waiter` here since there must always be one | |
fc512014 | 513 | // edge which is resumable / waited using a query latch |
94b46f34 XL |
514 | let (waitee_query, waiter_idx) = waiter.unwrap(); |
515 | ||
516 | // Extract the waiter we want to resume | |
74b04a01 | 517 | let waiter = waitee_query.latch(query_map).unwrap().extract_waiter(waiter_idx); |
94b46f34 XL |
518 | |
519 | // Set the cycle error so it will be picked up when resumed | |
520 | *waiter.cycle.lock() = Some(error); | |
521 | ||
522 | // Put the waiter on the list of things to resume | |
523 | wakelist.push(waiter); | |
524 | ||
525 | true | |
526 | } else { | |
527 | false | |
528 | } | |
529 | } | |
530 | ||
94b46f34 XL |
531 | /// Detects query cycles by using depth first search over all active query jobs. |
532 | /// If a query cycle is found it will break the cycle by finding an edge which | |
533 | /// uses a query latch and then resuming that waiter. | |
534 | /// There may be multiple cycles involved in a deadlock, so this searches | |
535 | /// all active queries for cycles before finally resuming all the waiters at once. | |
9fa01778 | 536 | #[cfg(parallel_compiler)] |
ba9703b0 | 537 | pub fn deadlock<CTX: QueryContext>(tcx: CTX, registry: &rayon_core::Registry) { |
94b46f34 XL |
538 | let on_panic = OnDrop(|| { |
539 | eprintln!("deadlock handler panicked, aborting process"); | |
540 | process::abort(); | |
541 | }); | |
542 | ||
543 | let mut wakelist = Vec::new(); | |
ba9703b0 XL |
544 | let query_map = tcx.try_collect_active_jobs().unwrap(); |
545 | let mut jobs: Vec<QueryJobId<CTX::DepKind>> = query_map.keys().cloned().collect(); | |
94b46f34 XL |
546 | |
547 | let mut found_cycle = false; | |
548 | ||
549 | while jobs.len() > 0 { | |
6a06907d | 550 | if remove_cycle(&query_map, &mut jobs, &mut wakelist) { |
94b46f34 XL |
551 | found_cycle = true; |
552 | } | |
553 | } | |
554 | ||
555 | // Check that a cycle was found. It is possible for a deadlock to occur without | |
556 | // a query cycle if a query which can be waited on uses Rayon to do multithreading | |
557 | // internally. Such a query (X) may be executing on 2 threads (A and B) and A may | |
558 | // wait using Rayon on B. Rayon may then switch to executing another query (Y) | |
559 | // which in turn will wait on X causing a deadlock. We have a false dependency from | |
560 | // X to Y due to Rayon waiting and a true dependency from Y to X. The algorithm here | |
561 | // only considers the true dependency and won't detect a cycle. | |
562 | assert!(found_cycle); | |
563 | ||
564 | // FIXME: Ensure this won't cause a deadlock before we return | |
565 | for waiter in wakelist.into_iter() { | |
566 | waiter.notify(registry); | |
567 | } | |
568 | ||
569 | on_panic.disable(); | |
570 | } | |
6a06907d XL |
571 | |
572 | #[inline(never)] | |
573 | #[cold] | |
574 | pub(crate) fn report_cycle<'a>( | |
575 | sess: &'a Session, | |
576 | CycleError { usage, cycle: stack }: CycleError, | |
577 | ) -> DiagnosticBuilder<'a> { | |
578 | assert!(!stack.is_empty()); | |
579 | ||
580 | let fix_span = |span: Span, query: &QueryStackFrame| { | |
581 | sess.source_map().guess_head_span(query.default_span(span)) | |
582 | }; | |
583 | ||
584 | let span = fix_span(stack[1 % stack.len()].span, &stack[0].query); | |
585 | let mut err = | |
586 | struct_span_err!(sess, span, E0391, "cycle detected when {}", stack[0].query.description); | |
587 | ||
588 | for i in 1..stack.len() { | |
589 | let query = &stack[i].query; | |
590 | let span = fix_span(stack[(i + 1) % stack.len()].span, query); | |
591 | err.span_note(span, &format!("...which requires {}...", query.description)); | |
592 | } | |
593 | ||
94222f64 XL |
594 | if stack.len() == 1 { |
595 | err.note(&format!("...which immediately requires {} again", stack[0].query.description)); | |
596 | } else { | |
597 | err.note(&format!( | |
598 | "...which again requires {}, completing the cycle", | |
599 | stack[0].query.description | |
600 | )); | |
601 | } | |
602 | ||
603 | if stack.iter().all(|entry| { | |
604 | entry.query.def_kind.map_or(false, |def_kind| { | |
605 | matches!(def_kind, SimpleDefKind::TyAlias | SimpleDefKind::TraitAlias) | |
606 | }) | |
607 | }) { | |
608 | if stack.iter().all(|entry| { | |
609 | entry | |
610 | .query | |
611 | .def_kind | |
612 | .map_or(false, |def_kind| matches!(def_kind, SimpleDefKind::TyAlias)) | |
613 | }) { | |
614 | err.note("type aliases cannot be recursive"); | |
615 | err.help("consider using a struct, enum, or union instead to break the cycle"); | |
616 | err.help("see <https://doc.rust-lang.org/reference/types.html#recursive-types> for more information"); | |
617 | } else { | |
618 | err.note("trait aliases cannot be recursive"); | |
619 | } | |
620 | } | |
6a06907d XL |
621 | |
622 | if let Some((span, query)) = usage { | |
623 | err.span_note(fix_span(span, &query), &format!("cycle used when {}", query.description)); | |
624 | } | |
625 | ||
626 | err | |
627 | } | |
628 | ||
629 | pub fn print_query_stack<CTX: QueryContext>( | |
630 | tcx: CTX, | |
631 | mut current_query: Option<QueryJobId<CTX::DepKind>>, | |
632 | handler: &Handler, | |
633 | num_frames: Option<usize>, | |
634 | ) -> usize { | |
635 | // Be careful relying on global state here: this code is called from | |
636 | // a panic hook, which means that the global `Handler` may be in a weird | |
637 | // state if it was responsible for triggering the panic. | |
638 | let mut i = 0; | |
639 | let query_map = tcx.try_collect_active_jobs(); | |
640 | ||
641 | while let Some(query) = current_query { | |
642 | if Some(i) == num_frames { | |
643 | break; | |
644 | } | |
645 | let query_info = if let Some(info) = query_map.as_ref().and_then(|map| map.get(&query)) { | |
646 | info | |
647 | } else { | |
648 | break; | |
649 | }; | |
650 | let mut diag = Diagnostic::new( | |
651 | Level::FailureNote, | |
94222f64 | 652 | &format!("#{} [{}] {}", i, query_info.query.name, query_info.query.description), |
6a06907d XL |
653 | ); |
654 | diag.span = | |
94222f64 | 655 | tcx.dep_context().sess().source_map().guess_head_span(query_info.job.span).into(); |
6a06907d XL |
656 | handler.force_print_diagnostic(diag); |
657 | ||
658 | current_query = query_info.job.parent; | |
659 | i += 1; | |
660 | } | |
661 | ||
662 | i | |
663 | } |