7 use rustc_data_structures
::fx
::FxHashSet
;
8 use rustc_data_structures
::sync
::{Lock, LockGuard, Lrc, Weak}
;
9 use rustc_data_structures
::OnDrop
;
10 use rustc_data_structures
::jobserver
;
14 use crate::ty
::query
::Query
;
15 use crate::ty
::query
::plumbing
::CycleError
;
16 #[cfg(not(parallel_compiler))]
17 use crate::ty
::query
::{
19 config
::QueryDescription
,
21 use crate::ty
::context
::TyCtxt
;
23 #[cfg(parallel_compiler)]
25 rustc_rayon_core
as rayon_core
,
26 parking_lot
::{Mutex, Condvar}
,
27 std
::sync
::atomic
::Ordering
,
30 std
::iter
::FromIterator
,
32 rustc_data_structures
::stable_hasher
::{StableHasherResult, StableHasher, HashStable}
,
35 /// Indicates the state of a query for a given key in a query map.
36 pub(super) enum QueryResult
<'tcx
> {
37 /// An already executing query. The query job can be used to await for its completion.
38 Started(Lrc
<QueryJob
<'tcx
>>),
40 /// The query panicked. Queries trying to wait on this will raise a fatal error or
45 /// Represents a span and a query key.
46 #[derive(Clone, Debug)]
47 pub struct QueryInfo
<'tcx
> {
48 /// The span corresponding to the reason for which this query was required.
50 pub query
: Query
<'tcx
>,
53 /// Representss an object representing an active query job.
54 pub struct QueryJob
<'tcx
> {
55 pub info
: QueryInfo
<'tcx
>,
57 /// The parent query job which created this job and is implicitly waiting on it.
58 pub parent
: Option
<Lrc
<QueryJob
<'tcx
>>>,
60 /// The latch that is used to wait on this job.
61 #[cfg(parallel_compiler)]
62 latch
: QueryLatch
<'tcx
>,
65 impl<'tcx
> QueryJob
<'tcx
> {
66 /// Creates a new query job.
67 pub fn new(info
: QueryInfo
<'tcx
>, parent
: Option
<Lrc
<QueryJob
<'tcx
>>>) -> Self {
71 #[cfg(parallel_compiler)]
72 latch
: QueryLatch
::new(),
76 /// Awaits for the query job to complete.
77 #[cfg(parallel_compiler)]
78 pub(super) fn r
#await<'lcx>(
80 tcx
: TyCtxt
<'_
, 'tcx
, 'lcx
>,
82 ) -> Result
<(), CycleError
<'tcx
>> {
83 tls
::with_related_context(tcx
, move |icx
| {
84 let mut waiter
= Lrc
::new(QueryWaiter
{
85 query
: icx
.query
.clone(),
87 cycle
: Lock
::new(None
),
88 condvar
: Condvar
::new(),
90 self.latch
.r
#await(&waiter);
91 // FIXME: Get rid of this lock. We have ownership of the QueryWaiter
92 // although another thread may still have a Lrc reference so we cannot
94 let mut cycle
= waiter
.cycle
.lock();
97 Some(cycle
) => Err(cycle
)
102 #[cfg(not(parallel_compiler))]
103 pub(super) fn find_cycle_in_stack
<'lcx
>(
105 tcx
: TyCtxt
<'_
, 'tcx
, 'lcx
>,
107 ) -> CycleError
<'tcx
> {
108 // Get the current executing query (waiter) and find the waitee amongst its parents
109 let mut current_job
= tls
::with_related_context(tcx
, |icx
| icx
.query
.clone());
110 let mut cycle
= Vec
::new();
112 while let Some(job
) = current_job
{
113 cycle
.push(job
.info
.clone());
115 if ptr
::eq(&*job
, self) {
118 // This is the end of the cycle
119 // The span entry we included was for the usage
120 // of the cycle itself, and not part of the cycle
121 // Replace it with the span which caused the cycle to form
122 cycle
[0].span
= span
;
123 // Find out why the cycle itself was used
124 let usage
= job
.parent
.as_ref().map(|parent
| {
125 (job
.info
.span
, parent
.info
.query
.clone())
127 return CycleError { usage, cycle }
;
130 current_job
= job
.parent
.clone();
133 panic
!("did not find a cycle")
136 /// Signals to waiters that the query is complete.
138 /// This does nothing for single threaded rustc,
139 /// as there are no concurrent jobs which could be waiting on us
140 pub fn signal_complete(&self) {
141 #[cfg(parallel_compiler)]
145 fn as_ptr(&self) -> *const QueryJob
<'tcx
> {
150 #[cfg(parallel_compiler)]
151 struct QueryWaiter
<'tcx
> {
152 query
: Option
<Lrc
<QueryJob
<'tcx
>>>,
155 cycle
: Lock
<Option
<CycleError
<'tcx
>>>,
158 #[cfg(parallel_compiler)]
159 impl<'tcx
> QueryWaiter
<'tcx
> {
160 fn notify(&self, registry
: &rayon_core
::Registry
) {
161 rayon_core
::mark_unblocked(registry
);
162 self.condvar
.notify_one();
166 #[cfg(parallel_compiler)]
167 struct QueryLatchInfo
<'tcx
> {
169 waiters
: Vec
<Lrc
<QueryWaiter
<'tcx
>>>,
172 #[cfg(parallel_compiler)]
173 struct QueryLatch
<'tcx
> {
174 info
: Mutex
<QueryLatchInfo
<'tcx
>>,
177 #[cfg(parallel_compiler)]
178 impl<'tcx
> QueryLatch
<'tcx
> {
181 info
: Mutex
::new(QueryLatchInfo
{
188 /// Awaits the caller on this latch by blocking the current thread.
189 fn r
#await(&self, waiter: &Lrc<QueryWaiter<'tcx>>) {
190 let mut info
= self.info
.lock();
192 // We push the waiter on to the `waiters` list. It can be accessed inside
193 // the `wait` call below, by 1) the `set` method or 2) by deadlock detection.
194 // Both of these will remove it from the `waiters` list before resuming
196 info
.waiters
.push(waiter
.clone());
198 // If this detects a deadlock and the deadlock handler wants to resume this thread
199 // we have to be in the `wait` call. This is ensured by the deadlock handler
200 // getting the self.info lock.
201 rayon_core
::mark_blocked();
202 jobserver
::release_thread();
203 waiter
.condvar
.wait(&mut info
);
204 // Release the lock before we potentially block in `acquire_thread`
206 jobserver
::acquire_thread();
210 /// Sets the latch and resumes all waiters on it
212 let mut info
= self.info
.lock();
213 debug_assert
!(!info
.complete
);
214 info
.complete
= true;
215 let registry
= rayon_core
::Registry
::current();
216 for waiter
in info
.waiters
.drain(..) {
217 waiter
.notify(®istry
);
221 /// Removes a single waiter from the list of waiters.
222 /// This is used to break query cycles.
226 ) -> Lrc
<QueryWaiter
<'tcx
>> {
227 let mut info
= self.info
.lock();
228 debug_assert
!(!info
.complete
);
229 // Remove the waiter from the list of waiters
230 info
.waiters
.remove(waiter
)
234 /// A resumable waiter of a query. The usize is the index into waiters in the query's latch
235 #[cfg(parallel_compiler)]
236 type Waiter
<'tcx
> = (Lrc
<QueryJob
<'tcx
>>, usize);
238 /// Visits all the non-resumable and resumable waiters of a query.
239 /// Only waiters in a query are visited.
240 /// `visit` is called for every waiter and is passed a query waiting on `query_ref`
241 /// and a span indicating the reason the query waited on `query_ref`.
242 /// If `visit` returns Some, this function returns.
243 /// For visits of non-resumable waiters it returns the return value of `visit`.
244 /// For visits of resumable waiters it returns Some(Some(Waiter)) which has the
245 /// required information to resume the waiter.
246 /// If all `visit` calls returns None, this function also returns None.
247 #[cfg(parallel_compiler)]
248 fn visit_waiters
<'tcx
, F
>(query
: Lrc
<QueryJob
<'tcx
>>, mut visit
: F
) -> Option
<Option
<Waiter
<'tcx
>>>
250 F
: FnMut(Span
, Lrc
<QueryJob
<'tcx
>>) -> Option
<Option
<Waiter
<'tcx
>>>
252 // Visit the parent query which is a non-resumable waiter since it's on the same stack
253 if let Some(ref parent
) = query
.parent
{
254 if let Some(cycle
) = visit(query
.info
.span
, parent
.clone()) {
259 // Visit the explicit waiters which use condvars and are resumable
260 for (i
, waiter
) in query
.latch
.info
.lock().waiters
.iter().enumerate() {
261 if let Some(ref waiter_query
) = waiter
.query
{
262 if visit(waiter
.span
, waiter_query
.clone()).is_some() {
263 // Return a value which indicates that this waiter can be resumed
264 return Some(Some((query
.clone(), i
)));
271 /// Look for query cycles by doing a depth first search starting at `query`.
272 /// `span` is the reason for the `query` to execute. This is initially DUMMY_SP.
273 /// If a cycle is detected, this initial value is replaced with the span causing
275 #[cfg(parallel_compiler)]
276 fn cycle_check
<'tcx
>(query
: Lrc
<QueryJob
<'tcx
>>,
278 stack
: &mut Vec
<(Span
, Lrc
<QueryJob
<'tcx
>>)>,
279 visited
: &mut FxHashSet
<*const QueryJob
<'tcx
>>
280 ) -> Option
<Option
<Waiter
<'tcx
>>> {
281 if !visited
.insert(query
.as_ptr()) {
282 return if let Some(p
) = stack
.iter().position(|q
| q
.1.as_ptr() == query
.as_ptr()) {
283 // We detected a query cycle, fix up the initial span and return Some
285 // Remove previous stack entries
287 // Replace the span for the first query with the cycle cause
295 // Query marked as visited is added it to the stack
296 stack
.push((span
, query
.clone()));
298 // Visit all the waiters
299 let r
= visit_waiters(query
, |span
, successor
| {
300 cycle_check(successor
, span
, stack
, visited
)
303 // Remove the entry in our stack if we didn't find a cycle
311 /// Finds out if there's a path to the compiler root (aka. code which isn't in a query)
312 /// from `query` without going through any of the queries in `visited`.
313 /// This is achieved with a depth first search.
314 #[cfg(parallel_compiler)]
315 fn connected_to_root
<'tcx
>(
316 query
: Lrc
<QueryJob
<'tcx
>>,
317 visited
: &mut FxHashSet
<*const QueryJob
<'tcx
>>
319 // We already visited this or we're deliberately ignoring it
320 if !visited
.insert(query
.as_ptr()) {
324 // This query is connected to the root (it has no query parent), return true
325 if query
.parent
.is_none() {
329 visit_waiters(query
, |_
, successor
| {
330 if connected_to_root(successor
, visited
) {
338 // Deterministically pick an query from a list
339 #[cfg(parallel_compiler)]
340 fn pick_query
<'a
, 'tcx
, T
, F
: Fn(&T
) -> (Span
, Lrc
<QueryJob
<'tcx
>>)>(
341 tcx
: TyCtxt
<'_
, 'tcx
, '_
>,
345 // Deterministically pick an entry point
346 // FIXME: Sort this instead
347 let mut hcx
= tcx
.create_stable_hashing_context();
348 queries
.iter().min_by_key(|v
| {
349 let (span
, query
) = f(v
);
350 let mut stable_hasher
= StableHasher
::<u64>::new();
351 query
.info
.query
.hash_stable(&mut hcx
, &mut stable_hasher
);
352 // Prefer entry points which have valid spans for nicer error messages
353 // We add an integer to the tuple ensuring that entry points
354 // with valid spans are picked first
355 let span_cmp
= if span
== DUMMY_SP { 1 }
else { 0 }
;
356 (span_cmp
, stable_hasher
.finish())
360 /// Looks for query cycles starting from the last query in `jobs`.
361 /// If a cycle is found, all queries in the cycle is removed from `jobs` and
362 /// the function return true.
363 /// If a cycle was not found, the starting query is removed from `jobs` and
364 /// the function returns false.
365 #[cfg(parallel_compiler)]
366 fn remove_cycle
<'tcx
>(
367 jobs
: &mut Vec
<Lrc
<QueryJob
<'tcx
>>>,
368 wakelist
: &mut Vec
<Lrc
<QueryWaiter
<'tcx
>>>,
369 tcx
: TyCtxt
<'_
, 'tcx
, '_
>
371 let mut visited
= FxHashSet
::default();
372 let mut stack
= Vec
::new();
373 // Look for a cycle starting with the last query in `jobs`
374 if let Some(waiter
) = cycle_check(jobs
.pop().unwrap(),
378 // The stack is a vector of pairs of spans and queries; reverse it so that
379 // the earlier entries require later entries
380 let (mut spans
, queries
): (Vec
<_
>, Vec
<_
>) = stack
.into_iter().rev().unzip();
382 // Shift the spans so that queries are matched with the span for their waitee
383 spans
.rotate_right(1);
385 // Zip them back together
386 let mut stack
: Vec
<_
> = spans
.into_iter().zip(queries
).collect();
388 // Remove the queries in our cycle from the list of jobs to look at
390 if let Some(pos
) = jobs
.iter().position(|j
| j
.as_ptr() == r
.1.as_ptr()) {
395 // Find the queries in the cycle which are
396 // connected to queries outside the cycle
397 let entry_points
= stack
.iter().filter_map(|(span
, query
)| {
398 if query
.parent
.is_none() {
399 // This query is connected to the root (it has no query parent)
400 Some((*span
, query
.clone(), None
))
402 let mut waiters
= Vec
::new();
403 // Find all the direct waiters who lead to the root
404 visit_waiters(query
.clone(), |span
, waiter
| {
405 // Mark all the other queries in the cycle as already visited
406 let mut visited
= FxHashSet
::from_iter(stack
.iter().map(|q
| q
.1.as_ptr()));
408 if connected_to_root(waiter
.clone(), &mut visited
) {
409 waiters
.push((span
, waiter
));
414 if waiters
.is_empty() {
417 // Deterministically pick one of the waiters to show to the user
418 let waiter
= pick_query(tcx
, &waiters
, |s
| s
.clone()).clone();
419 Some((*span
, query
.clone(), Some(waiter
)))
422 }).collect
::<Vec
<(Span
, Lrc
<QueryJob
<'tcx
>>, Option
<(Span
, Lrc
<QueryJob
<'tcx
>>)>)>>();
424 // Deterministically pick an entry point
425 let (_
, entry_point
, usage
) = pick_query(tcx
, &entry_points
, |e
| (e
.0, e
.1.clone()));
427 // Shift the stack so that our entry point is first
428 let entry_point_pos
= stack
.iter().position(|(_
, query
)| {
429 query
.as_ptr() == entry_point
.as_ptr()
431 if let Some(pos
) = entry_point_pos
{
432 stack
.rotate_left(pos
);
435 let usage
= usage
.as_ref().map(|(span
, query
)| (*span
, query
.info
.query
.clone()));
437 // Create the cycle error
438 let mut error
= CycleError
{
440 cycle
: stack
.iter().map(|&(s
, ref q
)| QueryInfo
{
442 query
: q
.info
.query
.clone(),
446 // We unwrap `waiter` here since there must always be one
447 // edge which is resumeable / waited using a query latch
448 let (waitee_query
, waiter_idx
) = waiter
.unwrap();
450 // Extract the waiter we want to resume
451 let waiter
= waitee_query
.latch
.extract_waiter(waiter_idx
);
453 // Set the cycle error so it will be picked up when resumed
454 *waiter
.cycle
.lock() = Some(error
);
456 // Put the waiter on the list of things to resume
457 wakelist
.push(waiter
);
465 /// Creates a new thread and forwards information in thread locals to it.
466 /// The new thread runs the deadlock handler.
467 /// Must only be called when a deadlock is about to happen.
468 #[cfg(parallel_compiler)]
469 pub unsafe fn handle_deadlock() {
473 let registry
= rayon_core
::Registry
::current();
475 let gcx_ptr
= tls
::GCX_PTR
.with(|gcx_ptr
| {
478 let gcx_ptr
= &*gcx_ptr
;
480 let syntax_globals
= syntax
::GLOBALS
.with(|syntax_globals
| {
481 syntax_globals
as *const _
483 let syntax_globals
= &*syntax_globals
;
485 let syntax_pos_globals
= syntax_pos
::GLOBALS
.with(|syntax_pos_globals
| {
486 syntax_pos_globals
as *const _
488 let syntax_pos_globals
= &*syntax_pos_globals
;
489 thread
::spawn(move || {
490 tls
::GCX_PTR
.set(gcx_ptr
, || {
491 syntax_pos
::GLOBALS
.set(syntax_pos_globals
, || {
492 syntax_pos
::GLOBALS
.set(syntax_pos_globals
, || {
493 tls
::with_thread_locals(|| {
494 tls
::with_global(|tcx
| deadlock(tcx
, ®istry
))
502 /// Detects query cycles by using depth first search over all active query jobs.
503 /// If a query cycle is found it will break the cycle by finding an edge which
504 /// uses a query latch and then resuming that waiter.
505 /// There may be multiple cycles involved in a deadlock, so this searches
506 /// all active queries for cycles before finally resuming all the waiters at once.
507 #[cfg(parallel_compiler)]
508 fn deadlock(tcx
: TyCtxt
<'_
, '_
, '_
>, registry
: &rayon_core
::Registry
) {
509 let on_panic
= OnDrop(|| {
510 eprintln
!("deadlock handler panicked, aborting process");
514 let mut wakelist
= Vec
::new();
515 let mut jobs
: Vec
<_
> = tcx
.queries
.collect_active_jobs();
517 let mut found_cycle
= false;
519 while jobs
.len() > 0 {
520 if remove_cycle(&mut jobs
, &mut wakelist
, tcx
) {
525 // Check that a cycle was found. It is possible for a deadlock to occur without
526 // a query cycle if a query which can be waited on uses Rayon to do multithreading
527 // internally. Such a query (X) may be executing on 2 threads (A and B) and A may
528 // wait using Rayon on B. Rayon may then switch to executing another query (Y)
529 // which in turn will wait on X causing a deadlock. We have a false dependency from
530 // X to Y due to Rayon waiting and a true dependency from Y to X. The algorithm here
531 // only considers the true dependency and won't detect a cycle.
532 assert
!(found_cycle
);
534 // FIXME: Ensure this won't cause a deadlock before we return
535 for waiter
in wakelist
.into_iter() {
536 waiter
.notify(registry
);