1 use crate::dep_graph
::DepContext
;
2 use crate::query
::plumbing
::CycleError
;
3 use crate::query
::{QueryContext, QueryStackFrame}
;
5 use rustc_data_structures
::fx
::FxHashMap
;
6 use rustc_errors
::{struct_span_err, Diagnostic, DiagnosticBuilder, Handler, Level}
;
7 use rustc_session
::Session
;
10 use std
::convert
::TryFrom
;
12 use std
::marker
::PhantomData
;
13 use std
::num
::NonZeroU32
;
15 #[cfg(parallel_compiler)]
17 crate::dep_graph
::DepKind
,
18 parking_lot
::{Condvar, Mutex}
,
19 rustc_data_structures
::fx
::FxHashSet
,
20 rustc_data_structures
::sync
::Lock
,
21 rustc_data_structures
::sync
::Lrc
,
22 rustc_data_structures
::{jobserver, OnDrop}
,
23 rustc_rayon_core
as rayon_core
,
25 std
::iter
::{self, FromIterator}
,
29 /// Represents a span and a query key.
30 #[derive(Clone, Debug)]
31 pub struct QueryInfo
{
32 /// The span corresponding to the reason for which this query was required.
34 pub query
: QueryStackFrame
,
37 pub type QueryMap
<D
> = FxHashMap
<QueryJobId
<D
>, QueryJobInfo
<D
>>;
39 /// A value uniquely identifying an active query job within a shard in the query cache.
40 #[derive(Copy, Clone, Eq, PartialEq, Hash)]
41 pub struct QueryShardJobId(pub NonZeroU32
);
43 /// A value uniquely identifying an active query job.
44 #[derive(Copy, Clone, Eq, PartialEq, Hash)]
45 pub struct QueryJobId
<D
> {
46 /// Which job within a shard is this
47 pub job
: QueryShardJobId
,
49 /// In which shard is this job
52 /// What kind of query this job is.
58 D
: Copy
+ Clone
+ Eq
+ Hash
,
60 pub fn new(job
: QueryShardJobId
, shard
: usize, kind
: D
) -> Self {
61 QueryJobId { job, shard: u16::try_from(shard).unwrap(), kind }
64 fn query(self, map
: &QueryMap
<D
>) -> QueryStackFrame
{
65 map
.get(&self).unwrap().info
.query
.clone()
68 #[cfg(parallel_compiler)]
69 fn span(self, map
: &QueryMap
<D
>) -> Span
{
70 map
.get(&self).unwrap().job
.span
73 #[cfg(parallel_compiler)]
74 fn parent(self, map
: &QueryMap
<D
>) -> Option
<QueryJobId
<D
>> {
75 map
.get(&self).unwrap().job
.parent
78 #[cfg(parallel_compiler)]
79 fn latch
<'a
>(self, map
: &'a QueryMap
<D
>) -> Option
<&'a QueryLatch
<D
>> {
80 map
.get(&self).unwrap().job
.latch
.as_ref()
84 pub struct QueryJobInfo
<D
> {
89 /// Represents an active query job.
91 pub struct QueryJob
<D
> {
92 pub id
: QueryShardJobId
,
94 /// The span corresponding to the reason for which this query was required.
97 /// The parent query job which created this job and is implicitly waiting on it.
98 pub parent
: Option
<QueryJobId
<D
>>,
100 /// The latch that is used to wait on this job.
101 #[cfg(parallel_compiler)]
102 latch
: Option
<QueryLatch
<D
>>,
104 dummy
: PhantomData
<QueryLatch
<D
>>,
109 D
: Copy
+ Clone
+ Eq
+ Hash
,
111 /// Creates a new query job.
112 pub fn new(id
: QueryShardJobId
, span
: Span
, parent
: Option
<QueryJobId
<D
>>) -> Self {
117 #[cfg(parallel_compiler)]
123 #[cfg(parallel_compiler)]
124 pub(super) fn latch(&mut self, _id
: QueryJobId
<D
>) -> QueryLatch
<D
> {
125 if self.latch
.is_none() {
126 self.latch
= Some(QueryLatch
::new());
128 self.latch
.as_ref().unwrap().clone()
131 #[cfg(not(parallel_compiler))]
132 pub(super) fn latch(&mut self, id
: QueryJobId
<D
>) -> QueryLatch
<D
> {
136 /// Signals to waiters that the query is complete.
138 /// This does nothing for single threaded rustc,
139 /// as there are no concurrent jobs which could be waiting on us
140 pub fn signal_complete(self) {
141 #[cfg(parallel_compiler)]
143 if let Some(latch
) = self.latch
{
150 #[cfg(not(parallel_compiler))]
152 pub(super) struct QueryLatch
<D
> {
156 #[cfg(not(parallel_compiler))]
157 impl<D
> QueryLatch
<D
>
159 D
: Copy
+ Clone
+ Eq
+ Hash
,
161 pub(super) fn find_cycle_in_stack(
163 query_map
: QueryMap
<D
>,
164 current_job
: &Option
<QueryJobId
<D
>>,
167 // Find the waitee amongst `current_job` parents
168 let mut cycle
= Vec
::new();
169 let mut current_job
= Option
::clone(current_job
);
171 while let Some(job
) = current_job
{
172 let info
= query_map
.get(&job
).unwrap();
173 cycle
.push(info
.info
.clone());
178 // This is the end of the cycle
179 // The span entry we included was for the usage
180 // of the cycle itself, and not part of the cycle
181 // Replace it with the span which caused the cycle to form
182 cycle
[0].span
= span
;
183 // Find out why the cycle itself was used
188 .map(|parent
| (info
.info
.span
, parent
.query(&query_map
)));
189 return CycleError { usage, cycle }
;
192 current_job
= info
.job
.parent
;
195 panic
!("did not find a cycle")
199 #[cfg(parallel_compiler)]
200 struct QueryWaiter
<D
> {
201 query
: Option
<QueryJobId
<D
>>,
204 cycle
: Lock
<Option
<CycleError
>>,
207 #[cfg(parallel_compiler)]
208 impl<D
> QueryWaiter
<D
> {
209 fn notify(&self, registry
: &rayon_core
::Registry
) {
210 rayon_core
::mark_unblocked(registry
);
211 self.condvar
.notify_one();
215 #[cfg(parallel_compiler)]
216 struct QueryLatchInfo
<D
> {
218 waiters
: Vec
<Lrc
<QueryWaiter
<D
>>>,
221 #[cfg(parallel_compiler)]
223 pub(super) struct QueryLatch
<D
> {
224 info
: Lrc
<Mutex
<QueryLatchInfo
<D
>>>,
227 #[cfg(parallel_compiler)]
228 impl<D
: Eq
+ Hash
> QueryLatch
<D
> {
231 info
: Lrc
::new(Mutex
::new(QueryLatchInfo { complete: false, waiters: Vec::new() }
)),
236 #[cfg(parallel_compiler)]
237 impl<D
> QueryLatch
<D
> {
238 /// Awaits for the query job to complete.
239 pub(super) fn wait_on(
241 query
: Option
<QueryJobId
<D
>>,
243 ) -> Result
<(), CycleError
> {
245 Lrc
::new(QueryWaiter { query, span, cycle: Lock::new(None), condvar: Condvar::new() }
);
246 self.wait_on_inner(&waiter
);
247 // FIXME: Get rid of this lock. We have ownership of the QueryWaiter
248 // although another thread may still have a Lrc reference so we cannot
250 let mut cycle
= waiter
.cycle
.lock();
253 Some(cycle
) => Err(cycle
),
257 /// Awaits the caller on this latch by blocking the current thread.
258 fn wait_on_inner(&self, waiter
: &Lrc
<QueryWaiter
<D
>>) {
259 let mut info
= self.info
.lock();
261 // We push the waiter on to the `waiters` list. It can be accessed inside
262 // the `wait` call below, by 1) the `set` method or 2) by deadlock detection.
263 // Both of these will remove it from the `waiters` list before resuming
265 info
.waiters
.push(waiter
.clone());
267 // If this detects a deadlock and the deadlock handler wants to resume this thread
268 // we have to be in the `wait` call. This is ensured by the deadlock handler
269 // getting the self.info lock.
270 rayon_core
::mark_blocked();
271 jobserver
::release_thread();
272 waiter
.condvar
.wait(&mut info
);
273 // Release the lock before we potentially block in `acquire_thread`
275 jobserver
::acquire_thread();
279 /// Sets the latch and resumes all waiters on it
281 let mut info
= self.info
.lock();
282 debug_assert
!(!info
.complete
);
283 info
.complete
= true;
284 let registry
= rayon_core
::Registry
::current();
285 for waiter
in info
.waiters
.drain(..) {
286 waiter
.notify(®istry
);
290 /// Removes a single waiter from the list of waiters.
291 /// This is used to break query cycles.
292 fn extract_waiter(&self, waiter
: usize) -> Lrc
<QueryWaiter
<D
>> {
293 let mut info
= self.info
.lock();
294 debug_assert
!(!info
.complete
);
295 // Remove the waiter from the list of waiters
296 info
.waiters
.remove(waiter
)
300 /// A resumable waiter of a query. The usize is the index into waiters in the query's latch
301 #[cfg(parallel_compiler)]
302 type Waiter
<D
> = (QueryJobId
<D
>, usize);
304 /// Visits all the non-resumable and resumable waiters of a query.
305 /// Only waiters in a query are visited.
306 /// `visit` is called for every waiter and is passed a query waiting on `query_ref`
307 /// and a span indicating the reason the query waited on `query_ref`.
308 /// If `visit` returns Some, this function returns.
309 /// For visits of non-resumable waiters it returns the return value of `visit`.
310 /// For visits of resumable waiters it returns Some(Some(Waiter)) which has the
311 /// required information to resume the waiter.
312 /// If all `visit` calls returns None, this function also returns None.
313 #[cfg(parallel_compiler)]
314 fn visit_waiters
<D
, F
>(
315 query_map
: &QueryMap
<D
>,
316 query
: QueryJobId
<D
>,
318 ) -> Option
<Option
<Waiter
<D
>>>
320 D
: Copy
+ Clone
+ Eq
+ Hash
,
321 F
: FnMut(Span
, QueryJobId
<D
>) -> Option
<Option
<Waiter
<D
>>>,
323 // Visit the parent query which is a non-resumable waiter since it's on the same stack
324 if let Some(parent
) = query
.parent(query_map
) {
325 if let Some(cycle
) = visit(query
.span(query_map
), parent
) {
330 // Visit the explicit waiters which use condvars and are resumable
331 if let Some(latch
) = query
.latch(query_map
) {
332 for (i
, waiter
) in latch
.info
.lock().waiters
.iter().enumerate() {
333 if let Some(waiter_query
) = waiter
.query
{
334 if visit(waiter
.span
, waiter_query
).is_some() {
335 // Return a value which indicates that this waiter can be resumed
336 return Some(Some((query
, i
)));
345 /// Look for query cycles by doing a depth first search starting at `query`.
346 /// `span` is the reason for the `query` to execute. This is initially DUMMY_SP.
347 /// If a cycle is detected, this initial value is replaced with the span causing
349 #[cfg(parallel_compiler)]
351 query_map
: &QueryMap
<D
>,
352 query
: QueryJobId
<D
>,
354 stack
: &mut Vec
<(Span
, QueryJobId
<D
>)>,
355 visited
: &mut FxHashSet
<QueryJobId
<D
>>,
356 ) -> Option
<Option
<Waiter
<D
>>>
358 D
: Copy
+ Clone
+ Eq
+ Hash
,
360 if !visited
.insert(query
) {
361 return if let Some(p
) = stack
.iter().position(|q
| q
.1 == query
) {
362 // We detected a query cycle, fix up the initial span and return Some
364 // Remove previous stack entries
366 // Replace the span for the first query with the cycle cause
374 // Query marked as visited is added it to the stack
375 stack
.push((span
, query
));
377 // Visit all the waiters
378 let r
= visit_waiters(query_map
, query
, |span
, successor
| {
379 cycle_check(query_map
, successor
, span
, stack
, visited
)
382 // Remove the entry in our stack if we didn't find a cycle
390 /// Finds out if there's a path to the compiler root (aka. code which isn't in a query)
391 /// from `query` without going through any of the queries in `visited`.
392 /// This is achieved with a depth first search.
393 #[cfg(parallel_compiler)]
394 fn connected_to_root
<D
>(
395 query_map
: &QueryMap
<D
>,
396 query
: QueryJobId
<D
>,
397 visited
: &mut FxHashSet
<QueryJobId
<D
>>,
400 D
: Copy
+ Clone
+ Eq
+ Hash
,
402 // We already visited this or we're deliberately ignoring it
403 if !visited
.insert(query
) {
407 // This query is connected to the root (it has no query parent), return true
408 if query
.parent(query_map
).is_none() {
412 visit_waiters(query_map
, query
, |_
, successor
| {
413 connected_to_root(query_map
, successor
, visited
).then_some(None
)
418 // Deterministically pick an query from a list
419 #[cfg(parallel_compiler)]
420 fn pick_query
<'a
, D
, T
, F
>(query_map
: &QueryMap
<D
>, queries
: &'a
[T
], f
: F
) -> &'a T
422 D
: Copy
+ Clone
+ Eq
+ Hash
,
423 F
: Fn(&T
) -> (Span
, QueryJobId
<D
>),
425 // Deterministically pick an entry point
426 // FIXME: Sort this instead
430 let (span
, query
) = f(v
);
431 let hash
= query
.query(query_map
).hash
;
432 // Prefer entry points which have valid spans for nicer error messages
433 // We add an integer to the tuple ensuring that entry points
434 // with valid spans are picked first
435 let span_cmp
= if span
== DUMMY_SP { 1 }
else { 0 }
;
441 /// Looks for query cycles starting from the last query in `jobs`.
442 /// If a cycle is found, all queries in the cycle is removed from `jobs` and
443 /// the function return true.
444 /// If a cycle was not found, the starting query is removed from `jobs` and
445 /// the function returns false.
446 #[cfg(parallel_compiler)]
447 fn remove_cycle
<D
: DepKind
>(
448 query_map
: &QueryMap
<D
>,
449 jobs
: &mut Vec
<QueryJobId
<D
>>,
450 wakelist
: &mut Vec
<Lrc
<QueryWaiter
<D
>>>,
452 let mut visited
= FxHashSet
::default();
453 let mut stack
= Vec
::new();
454 // Look for a cycle starting with the last query in `jobs`
455 if let Some(waiter
) =
456 cycle_check(query_map
, jobs
.pop().unwrap(), DUMMY_SP
, &mut stack
, &mut visited
)
458 // The stack is a vector of pairs of spans and queries; reverse it so that
459 // the earlier entries require later entries
460 let (mut spans
, queries
): (Vec
<_
>, Vec
<_
>) = stack
.into_iter().rev().unzip();
462 // Shift the spans so that queries are matched with the span for their waitee
463 spans
.rotate_right(1);
465 // Zip them back together
466 let mut stack
: Vec
<_
> = iter
::zip(spans
, queries
).collect();
468 // Remove the queries in our cycle from the list of jobs to look at
470 if let Some(pos
) = jobs
.iter().position(|j
| j
== &r
.1) {
475 // Find the queries in the cycle which are
476 // connected to queries outside the cycle
477 let entry_points
= stack
479 .filter_map(|&(span
, query
)| {
480 if query
.parent(query_map
).is_none() {
481 // This query is connected to the root (it has no query parent)
482 Some((span
, query
, None
))
484 let mut waiters
= Vec
::new();
485 // Find all the direct waiters who lead to the root
486 visit_waiters(query_map
, query
, |span
, waiter
| {
487 // Mark all the other queries in the cycle as already visited
488 let mut visited
= FxHashSet
::from_iter(stack
.iter().map(|q
| q
.1));
490 if connected_to_root(query_map
, waiter
, &mut visited
) {
491 waiters
.push((span
, waiter
));
496 if waiters
.is_empty() {
499 // Deterministically pick one of the waiters to show to the user
500 let waiter
= *pick_query(query_map
, &waiters
, |s
| *s
);
501 Some((span
, query
, Some(waiter
)))
505 .collect
::<Vec
<(Span
, QueryJobId
<D
>, Option
<(Span
, QueryJobId
<D
>)>)>>();
507 // Deterministically pick an entry point
508 let (_
, entry_point
, usage
) = pick_query(query_map
, &entry_points
, |e
| (e
.0, e
.1));
510 // Shift the stack so that our entry point is first
511 let entry_point_pos
= stack
.iter().position(|(_
, query
)| query
== entry_point
);
512 if let Some(pos
) = entry_point_pos
{
513 stack
.rotate_left(pos
);
516 let usage
= usage
.as_ref().map(|(span
, query
)| (*span
, query
.query(query_map
)));
518 // Create the cycle error
519 let error
= CycleError
{
523 .map(|&(s
, ref q
)| QueryInfo { span: s, query: q.query(query_map) }
)
527 // We unwrap `waiter` here since there must always be one
528 // edge which is resumable / waited using a query latch
529 let (waitee_query
, waiter_idx
) = waiter
.unwrap();
531 // Extract the waiter we want to resume
532 let waiter
= waitee_query
.latch(query_map
).unwrap().extract_waiter(waiter_idx
);
534 // Set the cycle error so it will be picked up when resumed
535 *waiter
.cycle
.lock() = Some(error
);
537 // Put the waiter on the list of things to resume
538 wakelist
.push(waiter
);
546 /// Detects query cycles by using depth first search over all active query jobs.
547 /// If a query cycle is found it will break the cycle by finding an edge which
548 /// uses a query latch and then resuming that waiter.
549 /// There may be multiple cycles involved in a deadlock, so this searches
550 /// all active queries for cycles before finally resuming all the waiters at once.
551 #[cfg(parallel_compiler)]
552 pub fn deadlock
<CTX
: QueryContext
>(tcx
: CTX
, registry
: &rayon_core
::Registry
) {
553 let on_panic
= OnDrop(|| {
554 eprintln
!("deadlock handler panicked, aborting process");
558 let mut wakelist
= Vec
::new();
559 let query_map
= tcx
.try_collect_active_jobs().unwrap();
560 let mut jobs
: Vec
<QueryJobId
<CTX
::DepKind
>> = query_map
.keys().cloned().collect();
562 let mut found_cycle
= false;
564 while jobs
.len() > 0 {
565 if remove_cycle(&query_map
, &mut jobs
, &mut wakelist
) {
570 // Check that a cycle was found. It is possible for a deadlock to occur without
571 // a query cycle if a query which can be waited on uses Rayon to do multithreading
572 // internally. Such a query (X) may be executing on 2 threads (A and B) and A may
573 // wait using Rayon on B. Rayon may then switch to executing another query (Y)
574 // which in turn will wait on X causing a deadlock. We have a false dependency from
575 // X to Y due to Rayon waiting and a true dependency from Y to X. The algorithm here
576 // only considers the true dependency and won't detect a cycle.
577 assert
!(found_cycle
);
579 // FIXME: Ensure this won't cause a deadlock before we return
580 for waiter
in wakelist
.into_iter() {
581 waiter
.notify(registry
);
589 pub(crate) fn report_cycle
<'a
>(
591 CycleError { usage, cycle: stack }
: CycleError
,
592 ) -> DiagnosticBuilder
<'a
> {
593 assert
!(!stack
.is_empty());
595 let fix_span
= |span
: Span
, query
: &QueryStackFrame
| {
596 sess
.source_map().guess_head_span(query
.default_span(span
))
599 let span
= fix_span(stack
[1 % stack
.len()].span
, &stack
[0].query
);
601 struct_span_err
!(sess
, span
, E0391
, "cycle detected when {}", stack
[0].query
.description
);
603 for i
in 1..stack
.len() {
604 let query
= &stack
[i
].query
;
605 let span
= fix_span(stack
[(i
+ 1) % stack
.len()].span
, query
);
606 err
.span_note(span
, &format
!("...which requires {}...", query
.description
));
610 "...which again requires {}, completing the cycle",
611 stack
[0].query
.description
614 if let Some((span
, query
)) = usage
{
615 err
.span_note(fix_span(span
, &query
), &format
!("cycle used when {}", query
.description
));
621 pub fn print_query_stack
<CTX
: QueryContext
>(
623 mut current_query
: Option
<QueryJobId
<CTX
::DepKind
>>,
625 num_frames
: Option
<usize>,
627 // Be careful relying on global state here: this code is called from
628 // a panic hook, which means that the global `Handler` may be in a weird
629 // state if it was responsible for triggering the panic.
631 let query_map
= tcx
.try_collect_active_jobs();
633 while let Some(query
) = current_query
{
634 if Some(i
) == num_frames
{
637 let query_info
= if let Some(info
) = query_map
.as_ref().and_then(|map
| map
.get(&query
)) {
642 let mut diag
= Diagnostic
::new(
646 i
, query_info
.info
.query
.name
, query_info
.info
.query
.description
650 tcx
.dep_context().sess().source_map().guess_head_span(query_info
.info
.span
).into();
651 handler
.force_print_diagnostic(diag
);
653 current_query
= query_info
.job
.parent
;