]> git.proxmox.com Git - rustc.git/blob - compiler/rustc_query_system/src/query/job.rs
New upstream version 1.53.0+dfsg1
[rustc.git] / compiler / rustc_query_system / src / query / job.rs
1 use crate::dep_graph::DepContext;
2 use crate::query::plumbing::CycleError;
3 use crate::query::{QueryContext, QueryStackFrame};
4
5 use rustc_data_structures::fx::FxHashMap;
6 use rustc_errors::{struct_span_err, Diagnostic, DiagnosticBuilder, Handler, Level};
7 use rustc_session::Session;
8 use rustc_span::Span;
9
10 use std::convert::TryFrom;
11 use std::hash::Hash;
12 use std::marker::PhantomData;
13 use std::num::NonZeroU32;
14
15 #[cfg(parallel_compiler)]
16 use {
17 crate::dep_graph::DepKind,
18 parking_lot::{Condvar, Mutex},
19 rustc_data_structures::fx::FxHashSet,
20 rustc_data_structures::sync::Lock,
21 rustc_data_structures::sync::Lrc,
22 rustc_data_structures::{jobserver, OnDrop},
23 rustc_rayon_core as rayon_core,
24 rustc_span::DUMMY_SP,
25 std::iter::{self, FromIterator},
26 std::{mem, process},
27 };
28
29 /// Represents a span and a query key.
30 #[derive(Clone, Debug)]
31 pub struct QueryInfo {
32 /// The span corresponding to the reason for which this query was required.
33 pub span: Span,
34 pub query: QueryStackFrame,
35 }
36
37 pub type QueryMap<D> = FxHashMap<QueryJobId<D>, QueryJobInfo<D>>;
38
39 /// A value uniquely identifying an active query job within a shard in the query cache.
40 #[derive(Copy, Clone, Eq, PartialEq, Hash)]
41 pub struct QueryShardJobId(pub NonZeroU32);
42
43 /// A value uniquely identifying an active query job.
44 #[derive(Copy, Clone, Eq, PartialEq, Hash)]
45 pub struct QueryJobId<D> {
46 /// Which job within a shard is this
47 pub job: QueryShardJobId,
48
49 /// In which shard is this job
50 pub shard: u16,
51
52 /// What kind of query this job is.
53 pub kind: D,
54 }
55
56 impl<D> QueryJobId<D>
57 where
58 D: Copy + Clone + Eq + Hash,
59 {
60 pub fn new(job: QueryShardJobId, shard: usize, kind: D) -> Self {
61 QueryJobId { job, shard: u16::try_from(shard).unwrap(), kind }
62 }
63
64 fn query(self, map: &QueryMap<D>) -> QueryStackFrame {
65 map.get(&self).unwrap().info.query.clone()
66 }
67
68 #[cfg(parallel_compiler)]
69 fn span(self, map: &QueryMap<D>) -> Span {
70 map.get(&self).unwrap().job.span
71 }
72
73 #[cfg(parallel_compiler)]
74 fn parent(self, map: &QueryMap<D>) -> Option<QueryJobId<D>> {
75 map.get(&self).unwrap().job.parent
76 }
77
78 #[cfg(parallel_compiler)]
79 fn latch<'a>(self, map: &'a QueryMap<D>) -> Option<&'a QueryLatch<D>> {
80 map.get(&self).unwrap().job.latch.as_ref()
81 }
82 }
83
84 pub struct QueryJobInfo<D> {
85 pub info: QueryInfo,
86 pub job: QueryJob<D>,
87 }
88
89 /// Represents an active query job.
90 #[derive(Clone)]
91 pub struct QueryJob<D> {
92 pub id: QueryShardJobId,
93
94 /// The span corresponding to the reason for which this query was required.
95 pub span: Span,
96
97 /// The parent query job which created this job and is implicitly waiting on it.
98 pub parent: Option<QueryJobId<D>>,
99
100 /// The latch that is used to wait on this job.
101 #[cfg(parallel_compiler)]
102 latch: Option<QueryLatch<D>>,
103
104 dummy: PhantomData<QueryLatch<D>>,
105 }
106
107 impl<D> QueryJob<D>
108 where
109 D: Copy + Clone + Eq + Hash,
110 {
111 /// Creates a new query job.
112 pub fn new(id: QueryShardJobId, span: Span, parent: Option<QueryJobId<D>>) -> Self {
113 QueryJob {
114 id,
115 span,
116 parent,
117 #[cfg(parallel_compiler)]
118 latch: None,
119 dummy: PhantomData,
120 }
121 }
122
123 #[cfg(parallel_compiler)]
124 pub(super) fn latch(&mut self, _id: QueryJobId<D>) -> QueryLatch<D> {
125 if self.latch.is_none() {
126 self.latch = Some(QueryLatch::new());
127 }
128 self.latch.as_ref().unwrap().clone()
129 }
130
131 #[cfg(not(parallel_compiler))]
132 pub(super) fn latch(&mut self, id: QueryJobId<D>) -> QueryLatch<D> {
133 QueryLatch { id }
134 }
135
136 /// Signals to waiters that the query is complete.
137 ///
138 /// This does nothing for single threaded rustc,
139 /// as there are no concurrent jobs which could be waiting on us
140 pub fn signal_complete(self) {
141 #[cfg(parallel_compiler)]
142 {
143 if let Some(latch) = self.latch {
144 latch.set();
145 }
146 }
147 }
148 }
149
150 #[cfg(not(parallel_compiler))]
151 #[derive(Clone)]
152 pub(super) struct QueryLatch<D> {
153 id: QueryJobId<D>,
154 }
155
156 #[cfg(not(parallel_compiler))]
157 impl<D> QueryLatch<D>
158 where
159 D: Copy + Clone + Eq + Hash,
160 {
161 pub(super) fn find_cycle_in_stack(
162 &self,
163 query_map: QueryMap<D>,
164 current_job: &Option<QueryJobId<D>>,
165 span: Span,
166 ) -> CycleError {
167 // Find the waitee amongst `current_job` parents
168 let mut cycle = Vec::new();
169 let mut current_job = Option::clone(current_job);
170
171 while let Some(job) = current_job {
172 let info = query_map.get(&job).unwrap();
173 cycle.push(info.info.clone());
174
175 if job == self.id {
176 cycle.reverse();
177
178 // This is the end of the cycle
179 // The span entry we included was for the usage
180 // of the cycle itself, and not part of the cycle
181 // Replace it with the span which caused the cycle to form
182 cycle[0].span = span;
183 // Find out why the cycle itself was used
184 let usage = info
185 .job
186 .parent
187 .as_ref()
188 .map(|parent| (info.info.span, parent.query(&query_map)));
189 return CycleError { usage, cycle };
190 }
191
192 current_job = info.job.parent;
193 }
194
195 panic!("did not find a cycle")
196 }
197 }
198
199 #[cfg(parallel_compiler)]
200 struct QueryWaiter<D> {
201 query: Option<QueryJobId<D>>,
202 condvar: Condvar,
203 span: Span,
204 cycle: Lock<Option<CycleError>>,
205 }
206
207 #[cfg(parallel_compiler)]
208 impl<D> QueryWaiter<D> {
209 fn notify(&self, registry: &rayon_core::Registry) {
210 rayon_core::mark_unblocked(registry);
211 self.condvar.notify_one();
212 }
213 }
214
215 #[cfg(parallel_compiler)]
216 struct QueryLatchInfo<D> {
217 complete: bool,
218 waiters: Vec<Lrc<QueryWaiter<D>>>,
219 }
220
221 #[cfg(parallel_compiler)]
222 #[derive(Clone)]
223 pub(super) struct QueryLatch<D> {
224 info: Lrc<Mutex<QueryLatchInfo<D>>>,
225 }
226
227 #[cfg(parallel_compiler)]
228 impl<D: Eq + Hash> QueryLatch<D> {
229 fn new() -> Self {
230 QueryLatch {
231 info: Lrc::new(Mutex::new(QueryLatchInfo { complete: false, waiters: Vec::new() })),
232 }
233 }
234 }
235
236 #[cfg(parallel_compiler)]
237 impl<D> QueryLatch<D> {
238 /// Awaits for the query job to complete.
239 pub(super) fn wait_on(
240 &self,
241 query: Option<QueryJobId<D>>,
242 span: Span,
243 ) -> Result<(), CycleError> {
244 let waiter =
245 Lrc::new(QueryWaiter { query, span, cycle: Lock::new(None), condvar: Condvar::new() });
246 self.wait_on_inner(&waiter);
247 // FIXME: Get rid of this lock. We have ownership of the QueryWaiter
248 // although another thread may still have a Lrc reference so we cannot
249 // use Lrc::get_mut
250 let mut cycle = waiter.cycle.lock();
251 match cycle.take() {
252 None => Ok(()),
253 Some(cycle) => Err(cycle),
254 }
255 }
256
257 /// Awaits the caller on this latch by blocking the current thread.
258 fn wait_on_inner(&self, waiter: &Lrc<QueryWaiter<D>>) {
259 let mut info = self.info.lock();
260 if !info.complete {
261 // We push the waiter on to the `waiters` list. It can be accessed inside
262 // the `wait` call below, by 1) the `set` method or 2) by deadlock detection.
263 // Both of these will remove it from the `waiters` list before resuming
264 // this thread.
265 info.waiters.push(waiter.clone());
266
267 // If this detects a deadlock and the deadlock handler wants to resume this thread
268 // we have to be in the `wait` call. This is ensured by the deadlock handler
269 // getting the self.info lock.
270 rayon_core::mark_blocked();
271 jobserver::release_thread();
272 waiter.condvar.wait(&mut info);
273 // Release the lock before we potentially block in `acquire_thread`
274 mem::drop(info);
275 jobserver::acquire_thread();
276 }
277 }
278
279 /// Sets the latch and resumes all waiters on it
280 fn set(&self) {
281 let mut info = self.info.lock();
282 debug_assert!(!info.complete);
283 info.complete = true;
284 let registry = rayon_core::Registry::current();
285 for waiter in info.waiters.drain(..) {
286 waiter.notify(&registry);
287 }
288 }
289
290 /// Removes a single waiter from the list of waiters.
291 /// This is used to break query cycles.
292 fn extract_waiter(&self, waiter: usize) -> Lrc<QueryWaiter<D>> {
293 let mut info = self.info.lock();
294 debug_assert!(!info.complete);
295 // Remove the waiter from the list of waiters
296 info.waiters.remove(waiter)
297 }
298 }
299
300 /// A resumable waiter of a query. The usize is the index into waiters in the query's latch
301 #[cfg(parallel_compiler)]
302 type Waiter<D> = (QueryJobId<D>, usize);
303
304 /// Visits all the non-resumable and resumable waiters of a query.
305 /// Only waiters in a query are visited.
306 /// `visit` is called for every waiter and is passed a query waiting on `query_ref`
307 /// and a span indicating the reason the query waited on `query_ref`.
308 /// If `visit` returns Some, this function returns.
309 /// For visits of non-resumable waiters it returns the return value of `visit`.
310 /// For visits of resumable waiters it returns Some(Some(Waiter)) which has the
311 /// required information to resume the waiter.
312 /// If all `visit` calls returns None, this function also returns None.
313 #[cfg(parallel_compiler)]
314 fn visit_waiters<D, F>(
315 query_map: &QueryMap<D>,
316 query: QueryJobId<D>,
317 mut visit: F,
318 ) -> Option<Option<Waiter<D>>>
319 where
320 D: Copy + Clone + Eq + Hash,
321 F: FnMut(Span, QueryJobId<D>) -> Option<Option<Waiter<D>>>,
322 {
323 // Visit the parent query which is a non-resumable waiter since it's on the same stack
324 if let Some(parent) = query.parent(query_map) {
325 if let Some(cycle) = visit(query.span(query_map), parent) {
326 return Some(cycle);
327 }
328 }
329
330 // Visit the explicit waiters which use condvars and are resumable
331 if let Some(latch) = query.latch(query_map) {
332 for (i, waiter) in latch.info.lock().waiters.iter().enumerate() {
333 if let Some(waiter_query) = waiter.query {
334 if visit(waiter.span, waiter_query).is_some() {
335 // Return a value which indicates that this waiter can be resumed
336 return Some(Some((query, i)));
337 }
338 }
339 }
340 }
341
342 None
343 }
344
345 /// Look for query cycles by doing a depth first search starting at `query`.
346 /// `span` is the reason for the `query` to execute. This is initially DUMMY_SP.
347 /// If a cycle is detected, this initial value is replaced with the span causing
348 /// the cycle.
349 #[cfg(parallel_compiler)]
350 fn cycle_check<D>(
351 query_map: &QueryMap<D>,
352 query: QueryJobId<D>,
353 span: Span,
354 stack: &mut Vec<(Span, QueryJobId<D>)>,
355 visited: &mut FxHashSet<QueryJobId<D>>,
356 ) -> Option<Option<Waiter<D>>>
357 where
358 D: Copy + Clone + Eq + Hash,
359 {
360 if !visited.insert(query) {
361 return if let Some(p) = stack.iter().position(|q| q.1 == query) {
362 // We detected a query cycle, fix up the initial span and return Some
363
364 // Remove previous stack entries
365 stack.drain(0..p);
366 // Replace the span for the first query with the cycle cause
367 stack[0].0 = span;
368 Some(None)
369 } else {
370 None
371 };
372 }
373
374 // Query marked as visited is added it to the stack
375 stack.push((span, query));
376
377 // Visit all the waiters
378 let r = visit_waiters(query_map, query, |span, successor| {
379 cycle_check(query_map, successor, span, stack, visited)
380 });
381
382 // Remove the entry in our stack if we didn't find a cycle
383 if r.is_none() {
384 stack.pop();
385 }
386
387 r
388 }
389
390 /// Finds out if there's a path to the compiler root (aka. code which isn't in a query)
391 /// from `query` without going through any of the queries in `visited`.
392 /// This is achieved with a depth first search.
393 #[cfg(parallel_compiler)]
394 fn connected_to_root<D>(
395 query_map: &QueryMap<D>,
396 query: QueryJobId<D>,
397 visited: &mut FxHashSet<QueryJobId<D>>,
398 ) -> bool
399 where
400 D: Copy + Clone + Eq + Hash,
401 {
402 // We already visited this or we're deliberately ignoring it
403 if !visited.insert(query) {
404 return false;
405 }
406
407 // This query is connected to the root (it has no query parent), return true
408 if query.parent(query_map).is_none() {
409 return true;
410 }
411
412 visit_waiters(query_map, query, |_, successor| {
413 connected_to_root(query_map, successor, visited).then_some(None)
414 })
415 .is_some()
416 }
417
418 // Deterministically pick an query from a list
419 #[cfg(parallel_compiler)]
420 fn pick_query<'a, D, T, F>(query_map: &QueryMap<D>, queries: &'a [T], f: F) -> &'a T
421 where
422 D: Copy + Clone + Eq + Hash,
423 F: Fn(&T) -> (Span, QueryJobId<D>),
424 {
425 // Deterministically pick an entry point
426 // FIXME: Sort this instead
427 queries
428 .iter()
429 .min_by_key(|v| {
430 let (span, query) = f(v);
431 let hash = query.query(query_map).hash;
432 // Prefer entry points which have valid spans for nicer error messages
433 // We add an integer to the tuple ensuring that entry points
434 // with valid spans are picked first
435 let span_cmp = if span == DUMMY_SP { 1 } else { 0 };
436 (span_cmp, hash)
437 })
438 .unwrap()
439 }
440
441 /// Looks for query cycles starting from the last query in `jobs`.
442 /// If a cycle is found, all queries in the cycle is removed from `jobs` and
443 /// the function return true.
444 /// If a cycle was not found, the starting query is removed from `jobs` and
445 /// the function returns false.
446 #[cfg(parallel_compiler)]
447 fn remove_cycle<D: DepKind>(
448 query_map: &QueryMap<D>,
449 jobs: &mut Vec<QueryJobId<D>>,
450 wakelist: &mut Vec<Lrc<QueryWaiter<D>>>,
451 ) -> bool {
452 let mut visited = FxHashSet::default();
453 let mut stack = Vec::new();
454 // Look for a cycle starting with the last query in `jobs`
455 if let Some(waiter) =
456 cycle_check(query_map, jobs.pop().unwrap(), DUMMY_SP, &mut stack, &mut visited)
457 {
458 // The stack is a vector of pairs of spans and queries; reverse it so that
459 // the earlier entries require later entries
460 let (mut spans, queries): (Vec<_>, Vec<_>) = stack.into_iter().rev().unzip();
461
462 // Shift the spans so that queries are matched with the span for their waitee
463 spans.rotate_right(1);
464
465 // Zip them back together
466 let mut stack: Vec<_> = iter::zip(spans, queries).collect();
467
468 // Remove the queries in our cycle from the list of jobs to look at
469 for r in &stack {
470 if let Some(pos) = jobs.iter().position(|j| j == &r.1) {
471 jobs.remove(pos);
472 }
473 }
474
475 // Find the queries in the cycle which are
476 // connected to queries outside the cycle
477 let entry_points = stack
478 .iter()
479 .filter_map(|&(span, query)| {
480 if query.parent(query_map).is_none() {
481 // This query is connected to the root (it has no query parent)
482 Some((span, query, None))
483 } else {
484 let mut waiters = Vec::new();
485 // Find all the direct waiters who lead to the root
486 visit_waiters(query_map, query, |span, waiter| {
487 // Mark all the other queries in the cycle as already visited
488 let mut visited = FxHashSet::from_iter(stack.iter().map(|q| q.1));
489
490 if connected_to_root(query_map, waiter, &mut visited) {
491 waiters.push((span, waiter));
492 }
493
494 None
495 });
496 if waiters.is_empty() {
497 None
498 } else {
499 // Deterministically pick one of the waiters to show to the user
500 let waiter = *pick_query(query_map, &waiters, |s| *s);
501 Some((span, query, Some(waiter)))
502 }
503 }
504 })
505 .collect::<Vec<(Span, QueryJobId<D>, Option<(Span, QueryJobId<D>)>)>>();
506
507 // Deterministically pick an entry point
508 let (_, entry_point, usage) = pick_query(query_map, &entry_points, |e| (e.0, e.1));
509
510 // Shift the stack so that our entry point is first
511 let entry_point_pos = stack.iter().position(|(_, query)| query == entry_point);
512 if let Some(pos) = entry_point_pos {
513 stack.rotate_left(pos);
514 }
515
516 let usage = usage.as_ref().map(|(span, query)| (*span, query.query(query_map)));
517
518 // Create the cycle error
519 let error = CycleError {
520 usage,
521 cycle: stack
522 .iter()
523 .map(|&(s, ref q)| QueryInfo { span: s, query: q.query(query_map) })
524 .collect(),
525 };
526
527 // We unwrap `waiter` here since there must always be one
528 // edge which is resumable / waited using a query latch
529 let (waitee_query, waiter_idx) = waiter.unwrap();
530
531 // Extract the waiter we want to resume
532 let waiter = waitee_query.latch(query_map).unwrap().extract_waiter(waiter_idx);
533
534 // Set the cycle error so it will be picked up when resumed
535 *waiter.cycle.lock() = Some(error);
536
537 // Put the waiter on the list of things to resume
538 wakelist.push(waiter);
539
540 true
541 } else {
542 false
543 }
544 }
545
546 /// Detects query cycles by using depth first search over all active query jobs.
547 /// If a query cycle is found it will break the cycle by finding an edge which
548 /// uses a query latch and then resuming that waiter.
549 /// There may be multiple cycles involved in a deadlock, so this searches
550 /// all active queries for cycles before finally resuming all the waiters at once.
551 #[cfg(parallel_compiler)]
552 pub fn deadlock<CTX: QueryContext>(tcx: CTX, registry: &rayon_core::Registry) {
553 let on_panic = OnDrop(|| {
554 eprintln!("deadlock handler panicked, aborting process");
555 process::abort();
556 });
557
558 let mut wakelist = Vec::new();
559 let query_map = tcx.try_collect_active_jobs().unwrap();
560 let mut jobs: Vec<QueryJobId<CTX::DepKind>> = query_map.keys().cloned().collect();
561
562 let mut found_cycle = false;
563
564 while jobs.len() > 0 {
565 if remove_cycle(&query_map, &mut jobs, &mut wakelist) {
566 found_cycle = true;
567 }
568 }
569
570 // Check that a cycle was found. It is possible for a deadlock to occur without
571 // a query cycle if a query which can be waited on uses Rayon to do multithreading
572 // internally. Such a query (X) may be executing on 2 threads (A and B) and A may
573 // wait using Rayon on B. Rayon may then switch to executing another query (Y)
574 // which in turn will wait on X causing a deadlock. We have a false dependency from
575 // X to Y due to Rayon waiting and a true dependency from Y to X. The algorithm here
576 // only considers the true dependency and won't detect a cycle.
577 assert!(found_cycle);
578
579 // FIXME: Ensure this won't cause a deadlock before we return
580 for waiter in wakelist.into_iter() {
581 waiter.notify(registry);
582 }
583
584 on_panic.disable();
585 }
586
587 #[inline(never)]
588 #[cold]
589 pub(crate) fn report_cycle<'a>(
590 sess: &'a Session,
591 CycleError { usage, cycle: stack }: CycleError,
592 ) -> DiagnosticBuilder<'a> {
593 assert!(!stack.is_empty());
594
595 let fix_span = |span: Span, query: &QueryStackFrame| {
596 sess.source_map().guess_head_span(query.default_span(span))
597 };
598
599 let span = fix_span(stack[1 % stack.len()].span, &stack[0].query);
600 let mut err =
601 struct_span_err!(sess, span, E0391, "cycle detected when {}", stack[0].query.description);
602
603 for i in 1..stack.len() {
604 let query = &stack[i].query;
605 let span = fix_span(stack[(i + 1) % stack.len()].span, query);
606 err.span_note(span, &format!("...which requires {}...", query.description));
607 }
608
609 err.note(&format!(
610 "...which again requires {}, completing the cycle",
611 stack[0].query.description
612 ));
613
614 if let Some((span, query)) = usage {
615 err.span_note(fix_span(span, &query), &format!("cycle used when {}", query.description));
616 }
617
618 err
619 }
620
621 pub fn print_query_stack<CTX: QueryContext>(
622 tcx: CTX,
623 mut current_query: Option<QueryJobId<CTX::DepKind>>,
624 handler: &Handler,
625 num_frames: Option<usize>,
626 ) -> usize {
627 // Be careful relying on global state here: this code is called from
628 // a panic hook, which means that the global `Handler` may be in a weird
629 // state if it was responsible for triggering the panic.
630 let mut i = 0;
631 let query_map = tcx.try_collect_active_jobs();
632
633 while let Some(query) = current_query {
634 if Some(i) == num_frames {
635 break;
636 }
637 let query_info = if let Some(info) = query_map.as_ref().and_then(|map| map.get(&query)) {
638 info
639 } else {
640 break;
641 };
642 let mut diag = Diagnostic::new(
643 Level::FailureNote,
644 &format!(
645 "#{} [{}] {}",
646 i, query_info.info.query.name, query_info.info.query.description
647 ),
648 );
649 diag.span =
650 tcx.dep_context().sess().source_map().guess_head_span(query_info.info.span).into();
651 handler.force_print_diagnostic(diag);
652
653 current_query = query_info.job.parent;
654 i += 1;
655 }
656
657 i
658 }