]> git.proxmox.com Git - rustc.git/blob - src/librustc/ty/query/job.rs
New upstream version 1.35.0+dfsg1
[rustc.git] / src / librustc / ty / query / job.rs
1 #![allow(warnings)]
2
3 use std::mem;
4 use std::process;
5 use std::{fmt, ptr};
6
7 use rustc_data_structures::fx::FxHashSet;
8 use rustc_data_structures::sync::{Lock, LockGuard, Lrc, Weak};
9 use rustc_data_structures::OnDrop;
10 use rustc_data_structures::jobserver;
11 use syntax_pos::Span;
12
13 use crate::ty::tls;
14 use crate::ty::query::Query;
15 use crate::ty::query::plumbing::CycleError;
16 #[cfg(not(parallel_compiler))]
17 use crate::ty::query::{
18 plumbing::TryGetJob,
19 config::QueryDescription,
20 };
21 use crate::ty::context::TyCtxt;
22
23 #[cfg(parallel_compiler)]
24 use {
25 rustc_rayon_core as rayon_core,
26 parking_lot::{Mutex, Condvar},
27 std::sync::atomic::Ordering,
28 std::thread,
29 std::iter,
30 std::iter::FromIterator,
31 syntax_pos::DUMMY_SP,
32 rustc_data_structures::stable_hasher::{StableHasherResult, StableHasher, HashStable},
33 };
34
35 /// Indicates the state of a query for a given key in a query map.
36 pub(super) enum QueryResult<'tcx> {
37 /// An already executing query. The query job can be used to await for its completion.
38 Started(Lrc<QueryJob<'tcx>>),
39
40 /// The query panicked. Queries trying to wait on this will raise a fatal error or
41 /// silently panic.
42 Poisoned,
43 }
44
45 /// Represents a span and a query key.
46 #[derive(Clone, Debug)]
47 pub struct QueryInfo<'tcx> {
48 /// The span corresponding to the reason for which this query was required.
49 pub span: Span,
50 pub query: Query<'tcx>,
51 }
52
53 /// Representss an object representing an active query job.
54 pub struct QueryJob<'tcx> {
55 pub info: QueryInfo<'tcx>,
56
57 /// The parent query job which created this job and is implicitly waiting on it.
58 pub parent: Option<Lrc<QueryJob<'tcx>>>,
59
60 /// The latch that is used to wait on this job.
61 #[cfg(parallel_compiler)]
62 latch: QueryLatch<'tcx>,
63 }
64
65 impl<'tcx> QueryJob<'tcx> {
66 /// Creates a new query job.
67 pub fn new(info: QueryInfo<'tcx>, parent: Option<Lrc<QueryJob<'tcx>>>) -> Self {
68 QueryJob {
69 info,
70 parent,
71 #[cfg(parallel_compiler)]
72 latch: QueryLatch::new(),
73 }
74 }
75
76 /// Awaits for the query job to complete.
77 #[cfg(parallel_compiler)]
78 pub(super) fn r#await<'lcx>(
79 &self,
80 tcx: TyCtxt<'_, 'tcx, 'lcx>,
81 span: Span,
82 ) -> Result<(), CycleError<'tcx>> {
83 tls::with_related_context(tcx, move |icx| {
84 let mut waiter = Lrc::new(QueryWaiter {
85 query: icx.query.clone(),
86 span,
87 cycle: Lock::new(None),
88 condvar: Condvar::new(),
89 });
90 self.latch.r#await(&waiter);
91 // FIXME: Get rid of this lock. We have ownership of the QueryWaiter
92 // although another thread may still have a Lrc reference so we cannot
93 // use Lrc::get_mut
94 let mut cycle = waiter.cycle.lock();
95 match cycle.take() {
96 None => Ok(()),
97 Some(cycle) => Err(cycle)
98 }
99 })
100 }
101
102 #[cfg(not(parallel_compiler))]
103 pub(super) fn find_cycle_in_stack<'lcx>(
104 &self,
105 tcx: TyCtxt<'_, 'tcx, 'lcx>,
106 span: Span,
107 ) -> CycleError<'tcx> {
108 // Get the current executing query (waiter) and find the waitee amongst its parents
109 let mut current_job = tls::with_related_context(tcx, |icx| icx.query.clone());
110 let mut cycle = Vec::new();
111
112 while let Some(job) = current_job {
113 cycle.push(job.info.clone());
114
115 if ptr::eq(&*job, self) {
116 cycle.reverse();
117
118 // This is the end of the cycle
119 // The span entry we included was for the usage
120 // of the cycle itself, and not part of the cycle
121 // Replace it with the span which caused the cycle to form
122 cycle[0].span = span;
123 // Find out why the cycle itself was used
124 let usage = job.parent.as_ref().map(|parent| {
125 (job.info.span, parent.info.query.clone())
126 });
127 return CycleError { usage, cycle };
128 }
129
130 current_job = job.parent.clone();
131 }
132
133 panic!("did not find a cycle")
134 }
135
136 /// Signals to waiters that the query is complete.
137 ///
138 /// This does nothing for single threaded rustc,
139 /// as there are no concurrent jobs which could be waiting on us
140 pub fn signal_complete(&self) {
141 #[cfg(parallel_compiler)]
142 self.latch.set();
143 }
144
145 fn as_ptr(&self) -> *const QueryJob<'tcx> {
146 self as *const _
147 }
148 }
149
150 #[cfg(parallel_compiler)]
151 struct QueryWaiter<'tcx> {
152 query: Option<Lrc<QueryJob<'tcx>>>,
153 condvar: Condvar,
154 span: Span,
155 cycle: Lock<Option<CycleError<'tcx>>>,
156 }
157
158 #[cfg(parallel_compiler)]
159 impl<'tcx> QueryWaiter<'tcx> {
160 fn notify(&self, registry: &rayon_core::Registry) {
161 rayon_core::mark_unblocked(registry);
162 self.condvar.notify_one();
163 }
164 }
165
166 #[cfg(parallel_compiler)]
167 struct QueryLatchInfo<'tcx> {
168 complete: bool,
169 waiters: Vec<Lrc<QueryWaiter<'tcx>>>,
170 }
171
172 #[cfg(parallel_compiler)]
173 struct QueryLatch<'tcx> {
174 info: Mutex<QueryLatchInfo<'tcx>>,
175 }
176
177 #[cfg(parallel_compiler)]
178 impl<'tcx> QueryLatch<'tcx> {
179 fn new() -> Self {
180 QueryLatch {
181 info: Mutex::new(QueryLatchInfo {
182 complete: false,
183 waiters: Vec::new(),
184 }),
185 }
186 }
187
188 /// Awaits the caller on this latch by blocking the current thread.
189 fn r#await(&self, waiter: &Lrc<QueryWaiter<'tcx>>) {
190 let mut info = self.info.lock();
191 if !info.complete {
192 // We push the waiter on to the `waiters` list. It can be accessed inside
193 // the `wait` call below, by 1) the `set` method or 2) by deadlock detection.
194 // Both of these will remove it from the `waiters` list before resuming
195 // this thread.
196 info.waiters.push(waiter.clone());
197
198 // If this detects a deadlock and the deadlock handler wants to resume this thread
199 // we have to be in the `wait` call. This is ensured by the deadlock handler
200 // getting the self.info lock.
201 rayon_core::mark_blocked();
202 jobserver::release_thread();
203 waiter.condvar.wait(&mut info);
204 // Release the lock before we potentially block in `acquire_thread`
205 mem::drop(info);
206 jobserver::acquire_thread();
207 }
208 }
209
210 /// Sets the latch and resumes all waiters on it
211 fn set(&self) {
212 let mut info = self.info.lock();
213 debug_assert!(!info.complete);
214 info.complete = true;
215 let registry = rayon_core::Registry::current();
216 for waiter in info.waiters.drain(..) {
217 waiter.notify(&registry);
218 }
219 }
220
221 /// Removes a single waiter from the list of waiters.
222 /// This is used to break query cycles.
223 fn extract_waiter(
224 &self,
225 waiter: usize,
226 ) -> Lrc<QueryWaiter<'tcx>> {
227 let mut info = self.info.lock();
228 debug_assert!(!info.complete);
229 // Remove the waiter from the list of waiters
230 info.waiters.remove(waiter)
231 }
232 }
233
234 /// A resumable waiter of a query. The usize is the index into waiters in the query's latch
235 #[cfg(parallel_compiler)]
236 type Waiter<'tcx> = (Lrc<QueryJob<'tcx>>, usize);
237
238 /// Visits all the non-resumable and resumable waiters of a query.
239 /// Only waiters in a query are visited.
240 /// `visit` is called for every waiter and is passed a query waiting on `query_ref`
241 /// and a span indicating the reason the query waited on `query_ref`.
242 /// If `visit` returns Some, this function returns.
243 /// For visits of non-resumable waiters it returns the return value of `visit`.
244 /// For visits of resumable waiters it returns Some(Some(Waiter)) which has the
245 /// required information to resume the waiter.
246 /// If all `visit` calls returns None, this function also returns None.
247 #[cfg(parallel_compiler)]
248 fn visit_waiters<'tcx, F>(query: Lrc<QueryJob<'tcx>>, mut visit: F) -> Option<Option<Waiter<'tcx>>>
249 where
250 F: FnMut(Span, Lrc<QueryJob<'tcx>>) -> Option<Option<Waiter<'tcx>>>
251 {
252 // Visit the parent query which is a non-resumable waiter since it's on the same stack
253 if let Some(ref parent) = query.parent {
254 if let Some(cycle) = visit(query.info.span, parent.clone()) {
255 return Some(cycle);
256 }
257 }
258
259 // Visit the explicit waiters which use condvars and are resumable
260 for (i, waiter) in query.latch.info.lock().waiters.iter().enumerate() {
261 if let Some(ref waiter_query) = waiter.query {
262 if visit(waiter.span, waiter_query.clone()).is_some() {
263 // Return a value which indicates that this waiter can be resumed
264 return Some(Some((query.clone(), i)));
265 }
266 }
267 }
268 None
269 }
270
271 /// Look for query cycles by doing a depth first search starting at `query`.
272 /// `span` is the reason for the `query` to execute. This is initially DUMMY_SP.
273 /// If a cycle is detected, this initial value is replaced with the span causing
274 /// the cycle.
275 #[cfg(parallel_compiler)]
276 fn cycle_check<'tcx>(query: Lrc<QueryJob<'tcx>>,
277 span: Span,
278 stack: &mut Vec<(Span, Lrc<QueryJob<'tcx>>)>,
279 visited: &mut FxHashSet<*const QueryJob<'tcx>>
280 ) -> Option<Option<Waiter<'tcx>>> {
281 if !visited.insert(query.as_ptr()) {
282 return if let Some(p) = stack.iter().position(|q| q.1.as_ptr() == query.as_ptr()) {
283 // We detected a query cycle, fix up the initial span and return Some
284
285 // Remove previous stack entries
286 stack.drain(0..p);
287 // Replace the span for the first query with the cycle cause
288 stack[0].0 = span;
289 Some(None)
290 } else {
291 None
292 }
293 }
294
295 // Query marked as visited is added it to the stack
296 stack.push((span, query.clone()));
297
298 // Visit all the waiters
299 let r = visit_waiters(query, |span, successor| {
300 cycle_check(successor, span, stack, visited)
301 });
302
303 // Remove the entry in our stack if we didn't find a cycle
304 if r.is_none() {
305 stack.pop();
306 }
307
308 r
309 }
310
311 /// Finds out if there's a path to the compiler root (aka. code which isn't in a query)
312 /// from `query` without going through any of the queries in `visited`.
313 /// This is achieved with a depth first search.
314 #[cfg(parallel_compiler)]
315 fn connected_to_root<'tcx>(
316 query: Lrc<QueryJob<'tcx>>,
317 visited: &mut FxHashSet<*const QueryJob<'tcx>>
318 ) -> bool {
319 // We already visited this or we're deliberately ignoring it
320 if !visited.insert(query.as_ptr()) {
321 return false;
322 }
323
324 // This query is connected to the root (it has no query parent), return true
325 if query.parent.is_none() {
326 return true;
327 }
328
329 visit_waiters(query, |_, successor| {
330 if connected_to_root(successor, visited) {
331 Some(None)
332 } else {
333 None
334 }
335 }).is_some()
336 }
337
338 // Deterministically pick an query from a list
339 #[cfg(parallel_compiler)]
340 fn pick_query<'a, 'tcx, T, F: Fn(&T) -> (Span, Lrc<QueryJob<'tcx>>)>(
341 tcx: TyCtxt<'_, 'tcx, '_>,
342 queries: &'a [T],
343 f: F
344 ) -> &'a T {
345 // Deterministically pick an entry point
346 // FIXME: Sort this instead
347 let mut hcx = tcx.create_stable_hashing_context();
348 queries.iter().min_by_key(|v| {
349 let (span, query) = f(v);
350 let mut stable_hasher = StableHasher::<u64>::new();
351 query.info.query.hash_stable(&mut hcx, &mut stable_hasher);
352 // Prefer entry points which have valid spans for nicer error messages
353 // We add an integer to the tuple ensuring that entry points
354 // with valid spans are picked first
355 let span_cmp = if span == DUMMY_SP { 1 } else { 0 };
356 (span_cmp, stable_hasher.finish())
357 }).unwrap()
358 }
359
360 /// Looks for query cycles starting from the last query in `jobs`.
361 /// If a cycle is found, all queries in the cycle is removed from `jobs` and
362 /// the function return true.
363 /// If a cycle was not found, the starting query is removed from `jobs` and
364 /// the function returns false.
365 #[cfg(parallel_compiler)]
366 fn remove_cycle<'tcx>(
367 jobs: &mut Vec<Lrc<QueryJob<'tcx>>>,
368 wakelist: &mut Vec<Lrc<QueryWaiter<'tcx>>>,
369 tcx: TyCtxt<'_, 'tcx, '_>
370 ) -> bool {
371 let mut visited = FxHashSet::default();
372 let mut stack = Vec::new();
373 // Look for a cycle starting with the last query in `jobs`
374 if let Some(waiter) = cycle_check(jobs.pop().unwrap(),
375 DUMMY_SP,
376 &mut stack,
377 &mut visited) {
378 // The stack is a vector of pairs of spans and queries; reverse it so that
379 // the earlier entries require later entries
380 let (mut spans, queries): (Vec<_>, Vec<_>) = stack.into_iter().rev().unzip();
381
382 // Shift the spans so that queries are matched with the span for their waitee
383 spans.rotate_right(1);
384
385 // Zip them back together
386 let mut stack: Vec<_> = spans.into_iter().zip(queries).collect();
387
388 // Remove the queries in our cycle from the list of jobs to look at
389 for r in &stack {
390 if let Some(pos) = jobs.iter().position(|j| j.as_ptr() == r.1.as_ptr()) {
391 jobs.remove(pos);
392 }
393 }
394
395 // Find the queries in the cycle which are
396 // connected to queries outside the cycle
397 let entry_points = stack.iter().filter_map(|(span, query)| {
398 if query.parent.is_none() {
399 // This query is connected to the root (it has no query parent)
400 Some((*span, query.clone(), None))
401 } else {
402 let mut waiters = Vec::new();
403 // Find all the direct waiters who lead to the root
404 visit_waiters(query.clone(), |span, waiter| {
405 // Mark all the other queries in the cycle as already visited
406 let mut visited = FxHashSet::from_iter(stack.iter().map(|q| q.1.as_ptr()));
407
408 if connected_to_root(waiter.clone(), &mut visited) {
409 waiters.push((span, waiter));
410 }
411
412 None
413 });
414 if waiters.is_empty() {
415 None
416 } else {
417 // Deterministically pick one of the waiters to show to the user
418 let waiter = pick_query(tcx, &waiters, |s| s.clone()).clone();
419 Some((*span, query.clone(), Some(waiter)))
420 }
421 }
422 }).collect::<Vec<(Span, Lrc<QueryJob<'tcx>>, Option<(Span, Lrc<QueryJob<'tcx>>)>)>>();
423
424 // Deterministically pick an entry point
425 let (_, entry_point, usage) = pick_query(tcx, &entry_points, |e| (e.0, e.1.clone()));
426
427 // Shift the stack so that our entry point is first
428 let entry_point_pos = stack.iter().position(|(_, query)| {
429 query.as_ptr() == entry_point.as_ptr()
430 });
431 if let Some(pos) = entry_point_pos {
432 stack.rotate_left(pos);
433 }
434
435 let usage = usage.as_ref().map(|(span, query)| (*span, query.info.query.clone()));
436
437 // Create the cycle error
438 let mut error = CycleError {
439 usage,
440 cycle: stack.iter().map(|&(s, ref q)| QueryInfo {
441 span: s,
442 query: q.info.query.clone(),
443 } ).collect(),
444 };
445
446 // We unwrap `waiter` here since there must always be one
447 // edge which is resumeable / waited using a query latch
448 let (waitee_query, waiter_idx) = waiter.unwrap();
449
450 // Extract the waiter we want to resume
451 let waiter = waitee_query.latch.extract_waiter(waiter_idx);
452
453 // Set the cycle error so it will be picked up when resumed
454 *waiter.cycle.lock() = Some(error);
455
456 // Put the waiter on the list of things to resume
457 wakelist.push(waiter);
458
459 true
460 } else {
461 false
462 }
463 }
464
465 /// Creates a new thread and forwards information in thread locals to it.
466 /// The new thread runs the deadlock handler.
467 /// Must only be called when a deadlock is about to happen.
468 #[cfg(parallel_compiler)]
469 pub unsafe fn handle_deadlock() {
470 use syntax;
471 use syntax_pos;
472
473 let registry = rayon_core::Registry::current();
474
475 let gcx_ptr = tls::GCX_PTR.with(|gcx_ptr| {
476 gcx_ptr as *const _
477 });
478 let gcx_ptr = &*gcx_ptr;
479
480 let syntax_globals = syntax::GLOBALS.with(|syntax_globals| {
481 syntax_globals as *const _
482 });
483 let syntax_globals = &*syntax_globals;
484
485 let syntax_pos_globals = syntax_pos::GLOBALS.with(|syntax_pos_globals| {
486 syntax_pos_globals as *const _
487 });
488 let syntax_pos_globals = &*syntax_pos_globals;
489 thread::spawn(move || {
490 tls::GCX_PTR.set(gcx_ptr, || {
491 syntax_pos::GLOBALS.set(syntax_pos_globals, || {
492 syntax_pos::GLOBALS.set(syntax_pos_globals, || {
493 tls::with_thread_locals(|| {
494 tls::with_global(|tcx| deadlock(tcx, &registry))
495 })
496 })
497 })
498 })
499 });
500 }
501
502 /// Detects query cycles by using depth first search over all active query jobs.
503 /// If a query cycle is found it will break the cycle by finding an edge which
504 /// uses a query latch and then resuming that waiter.
505 /// There may be multiple cycles involved in a deadlock, so this searches
506 /// all active queries for cycles before finally resuming all the waiters at once.
507 #[cfg(parallel_compiler)]
508 fn deadlock(tcx: TyCtxt<'_, '_, '_>, registry: &rayon_core::Registry) {
509 let on_panic = OnDrop(|| {
510 eprintln!("deadlock handler panicked, aborting process");
511 process::abort();
512 });
513
514 let mut wakelist = Vec::new();
515 let mut jobs: Vec<_> = tcx.queries.collect_active_jobs();
516
517 let mut found_cycle = false;
518
519 while jobs.len() > 0 {
520 if remove_cycle(&mut jobs, &mut wakelist, tcx) {
521 found_cycle = true;
522 }
523 }
524
525 // Check that a cycle was found. It is possible for a deadlock to occur without
526 // a query cycle if a query which can be waited on uses Rayon to do multithreading
527 // internally. Such a query (X) may be executing on 2 threads (A and B) and A may
528 // wait using Rayon on B. Rayon may then switch to executing another query (Y)
529 // which in turn will wait on X causing a deadlock. We have a false dependency from
530 // X to Y due to Rayon waiting and a true dependency from Y to X. The algorithm here
531 // only considers the true dependency and won't detect a cycle.
532 assert!(found_cycle);
533
534 // FIXME: Ensure this won't cause a deadlock before we return
535 for waiter in wakelist.into_iter() {
536 waiter.notify(registry);
537 }
538
539 on_panic.disable();
540 }