+use crate::dep_graph::DepContext;
use crate::query::plumbing::CycleError;
+use crate::query::{QueryContext, QueryStackFrame};
use rustc_data_structures::fx::FxHashMap;
+use rustc_errors::{struct_span_err, Diagnostic, DiagnosticBuilder, Handler, Level};
+use rustc_session::Session;
use rustc_span::Span;
use std::convert::TryFrom;
#[cfg(parallel_compiler)]
use {
- super::QueryContext,
+ crate::dep_graph::DepKind,
parking_lot::{Condvar, Mutex},
rustc_data_structures::fx::FxHashSet,
- rustc_data_structures::stable_hasher::{HashStable, StableHasher},
rustc_data_structures::sync::Lock,
rustc_data_structures::sync::Lrc,
rustc_data_structures::{jobserver, OnDrop},
/// Represents a span and a query key.
#[derive(Clone, Debug)]
-pub struct QueryInfo<Q> {
+pub struct QueryInfo {
/// The span corresponding to the reason for which this query was required.
pub span: Span,
- pub query: Q,
+ pub query: QueryStackFrame,
}
-pub(crate) type QueryMap<D, Q> = FxHashMap<QueryJobId<D>, QueryJobInfo<D, Q>>;
+pub type QueryMap<D> = FxHashMap<QueryJobId<D>, QueryJobInfo<D>>;
/// A value uniquely identifying an active query job within a shard in the query cache.
#[derive(Copy, Clone, Eq, PartialEq, Hash)]
QueryJobId { job, shard: u16::try_from(shard).unwrap(), kind }
}
- fn query<Q: Clone>(self, map: &QueryMap<D, Q>) -> Q {
+ fn query(self, map: &QueryMap<D>) -> QueryStackFrame {
map.get(&self).unwrap().info.query.clone()
}
#[cfg(parallel_compiler)]
- fn span<Q: Clone>(self, map: &QueryMap<D, Q>) -> Span {
+ fn span(self, map: &QueryMap<D>) -> Span {
map.get(&self).unwrap().job.span
}
#[cfg(parallel_compiler)]
- fn parent<Q: Clone>(self, map: &QueryMap<D, Q>) -> Option<QueryJobId<D>> {
+ fn parent(self, map: &QueryMap<D>) -> Option<QueryJobId<D>> {
map.get(&self).unwrap().job.parent
}
#[cfg(parallel_compiler)]
- fn latch<'a, Q: Clone>(self, map: &'a QueryMap<D, Q>) -> Option<&'a QueryLatch<D, Q>> {
+ fn latch<'a>(self, map: &'a QueryMap<D>) -> Option<&'a QueryLatch<D>> {
map.get(&self).unwrap().job.latch.as_ref()
}
}
-pub struct QueryJobInfo<D, Q> {
- pub info: QueryInfo<Q>,
- pub job: QueryJob<D, Q>,
+pub struct QueryJobInfo<D> {
+ pub info: QueryInfo,
+ pub job: QueryJob<D>,
}
/// Represents an active query job.
#[derive(Clone)]
-pub struct QueryJob<D, Q> {
+pub struct QueryJob<D> {
pub id: QueryShardJobId,
/// The span corresponding to the reason for which this query was required.
/// The latch that is used to wait on this job.
#[cfg(parallel_compiler)]
- latch: Option<QueryLatch<D, Q>>,
+ latch: Option<QueryLatch<D>>,
- dummy: PhantomData<QueryLatch<D, Q>>,
+ dummy: PhantomData<QueryLatch<D>>,
}
-impl<D, Q> QueryJob<D, Q>
+impl<D> QueryJob<D>
where
D: Copy + Clone + Eq + Hash,
- Q: Clone,
{
/// Creates a new query job.
pub fn new(id: QueryShardJobId, span: Span, parent: Option<QueryJobId<D>>) -> Self {
}
#[cfg(parallel_compiler)]
- pub(super) fn latch(&mut self, _id: QueryJobId<D>) -> QueryLatch<D, Q> {
+ pub(super) fn latch(&mut self, _id: QueryJobId<D>) -> QueryLatch<D> {
if self.latch.is_none() {
self.latch = Some(QueryLatch::new());
}
}
#[cfg(not(parallel_compiler))]
- pub(super) fn latch(&mut self, id: QueryJobId<D>) -> QueryLatch<D, Q> {
- QueryLatch { id, dummy: PhantomData }
+ pub(super) fn latch(&mut self, id: QueryJobId<D>) -> QueryLatch<D> {
+ QueryLatch { id }
}
/// Signals to waiters that the query is complete.
#[cfg(not(parallel_compiler))]
#[derive(Clone)]
-pub(super) struct QueryLatch<D, Q> {
+pub(super) struct QueryLatch<D> {
id: QueryJobId<D>,
- dummy: PhantomData<Q>,
}
#[cfg(not(parallel_compiler))]
-impl<D, Q> QueryLatch<D, Q>
+impl<D> QueryLatch<D>
where
D: Copy + Clone + Eq + Hash,
- Q: Clone,
{
pub(super) fn find_cycle_in_stack(
&self,
- query_map: QueryMap<D, Q>,
+ query_map: QueryMap<D>,
current_job: &Option<QueryJobId<D>>,
span: Span,
- ) -> CycleError<Q> {
+ ) -> CycleError {
// Find the waitee amongst `current_job` parents
let mut cycle = Vec::new();
let mut current_job = Option::clone(current_job);
}
#[cfg(parallel_compiler)]
-struct QueryWaiter<D, Q> {
+struct QueryWaiter<D> {
query: Option<QueryJobId<D>>,
condvar: Condvar,
span: Span,
- cycle: Lock<Option<CycleError<Q>>>,
+ cycle: Lock<Option<CycleError>>,
}
#[cfg(parallel_compiler)]
-impl<D, Q> QueryWaiter<D, Q> {
+impl<D> QueryWaiter<D> {
fn notify(&self, registry: &rayon_core::Registry) {
rayon_core::mark_unblocked(registry);
self.condvar.notify_one();
}
#[cfg(parallel_compiler)]
-struct QueryLatchInfo<D, Q> {
+struct QueryLatchInfo<D> {
complete: bool,
- waiters: Vec<Lrc<QueryWaiter<D, Q>>>,
+ waiters: Vec<Lrc<QueryWaiter<D>>>,
}
#[cfg(parallel_compiler)]
#[derive(Clone)]
-pub(super) struct QueryLatch<D, Q> {
- info: Lrc<Mutex<QueryLatchInfo<D, Q>>>,
+pub(super) struct QueryLatch<D> {
+ info: Lrc<Mutex<QueryLatchInfo<D>>>,
}
#[cfg(parallel_compiler)]
-impl<D: Eq + Hash, Q: Clone> QueryLatch<D, Q> {
+impl<D: Eq + Hash> QueryLatch<D> {
fn new() -> Self {
QueryLatch {
info: Lrc::new(Mutex::new(QueryLatchInfo { complete: false, waiters: Vec::new() })),
}
#[cfg(parallel_compiler)]
-impl<D, Q> QueryLatch<D, Q> {
+impl<D> QueryLatch<D> {
/// Awaits for the query job to complete.
pub(super) fn wait_on(
&self,
query: Option<QueryJobId<D>>,
span: Span,
- ) -> Result<(), CycleError<Q>> {
+ ) -> Result<(), CycleError> {
let waiter =
Lrc::new(QueryWaiter { query, span, cycle: Lock::new(None), condvar: Condvar::new() });
self.wait_on_inner(&waiter);
}
/// Awaits the caller on this latch by blocking the current thread.
- fn wait_on_inner(&self, waiter: &Lrc<QueryWaiter<D, Q>>) {
+ fn wait_on_inner(&self, waiter: &Lrc<QueryWaiter<D>>) {
let mut info = self.info.lock();
if !info.complete {
// We push the waiter on to the `waiters` list. It can be accessed inside
/// Removes a single waiter from the list of waiters.
/// This is used to break query cycles.
- fn extract_waiter(&self, waiter: usize) -> Lrc<QueryWaiter<D, Q>> {
+ fn extract_waiter(&self, waiter: usize) -> Lrc<QueryWaiter<D>> {
let mut info = self.info.lock();
debug_assert!(!info.complete);
// Remove the waiter from the list of waiters
/// required information to resume the waiter.
/// If all `visit` calls returns None, this function also returns None.
#[cfg(parallel_compiler)]
-fn visit_waiters<D, Q, F>(
- query_map: &QueryMap<D, Q>,
+fn visit_waiters<D, F>(
+ query_map: &QueryMap<D>,
query: QueryJobId<D>,
mut visit: F,
) -> Option<Option<Waiter<D>>>
where
D: Copy + Clone + Eq + Hash,
- Q: Clone,
F: FnMut(Span, QueryJobId<D>) -> Option<Option<Waiter<D>>>,
{
// Visit the parent query which is a non-resumable waiter since it's on the same stack
/// If a cycle is detected, this initial value is replaced with the span causing
/// the cycle.
#[cfg(parallel_compiler)]
-fn cycle_check<D, Q>(
- query_map: &QueryMap<D, Q>,
+fn cycle_check<D>(
+ query_map: &QueryMap<D>,
query: QueryJobId<D>,
span: Span,
stack: &mut Vec<(Span, QueryJobId<D>)>,
) -> Option<Option<Waiter<D>>>
where
D: Copy + Clone + Eq + Hash,
- Q: Clone,
{
if !visited.insert(query) {
return if let Some(p) = stack.iter().position(|q| q.1 == query) {
/// from `query` without going through any of the queries in `visited`.
/// This is achieved with a depth first search.
#[cfg(parallel_compiler)]
-fn connected_to_root<D, Q>(
- query_map: &QueryMap<D, Q>,
+fn connected_to_root<D>(
+ query_map: &QueryMap<D>,
query: QueryJobId<D>,
visited: &mut FxHashSet<QueryJobId<D>>,
) -> bool
where
D: Copy + Clone + Eq + Hash,
- Q: Clone,
{
// We already visited this or we're deliberately ignoring it
if !visited.insert(query) {
// Deterministically pick an query from a list
#[cfg(parallel_compiler)]
-fn pick_query<'a, CTX, T, F>(
- query_map: &QueryMap<CTX::DepKind, CTX::Query>,
- tcx: CTX,
- queries: &'a [T],
- f: F,
-) -> &'a T
+fn pick_query<'a, D, T, F>(query_map: &QueryMap<D>, queries: &'a [T], f: F) -> &'a T
where
- CTX: QueryContext,
- F: Fn(&T) -> (Span, QueryJobId<CTX::DepKind>),
+ D: Copy + Clone + Eq + Hash,
+ F: Fn(&T) -> (Span, QueryJobId<D>),
{
// Deterministically pick an entry point
// FIXME: Sort this instead
- let mut hcx = tcx.create_stable_hashing_context();
queries
.iter()
.min_by_key(|v| {
let (span, query) = f(v);
- let mut stable_hasher = StableHasher::new();
- query.query(query_map).hash_stable(&mut hcx, &mut stable_hasher);
+ let hash = query.query(query_map).hash;
// Prefer entry points which have valid spans for nicer error messages
// We add an integer to the tuple ensuring that entry points
// with valid spans are picked first
let span_cmp = if span == DUMMY_SP { 1 } else { 0 };
- (span_cmp, stable_hasher.finish::<u64>())
+ (span_cmp, hash)
})
.unwrap()
}
/// If a cycle was not found, the starting query is removed from `jobs` and
/// the function returns false.
#[cfg(parallel_compiler)]
-fn remove_cycle<CTX: QueryContext>(
- query_map: &QueryMap<CTX::DepKind, CTX::Query>,
- jobs: &mut Vec<QueryJobId<CTX::DepKind>>,
- wakelist: &mut Vec<Lrc<QueryWaiter<CTX::DepKind, CTX::Query>>>,
- tcx: CTX,
+fn remove_cycle<D: DepKind>(
+ query_map: &QueryMap<D>,
+ jobs: &mut Vec<QueryJobId<D>>,
+ wakelist: &mut Vec<Lrc<QueryWaiter<D>>>,
) -> bool {
let mut visited = FxHashSet::default();
let mut stack = Vec::new();
None
} else {
// Deterministically pick one of the waiters to show to the user
- let waiter = *pick_query(query_map, tcx, &waiters, |s| *s);
+ let waiter = *pick_query(query_map, &waiters, |s| *s);
Some((span, query, Some(waiter)))
}
}
})
- .collect::<Vec<(Span, QueryJobId<CTX::DepKind>, Option<(Span, QueryJobId<CTX::DepKind>)>)>>();
+ .collect::<Vec<(Span, QueryJobId<D>, Option<(Span, QueryJobId<D>)>)>>();
// Deterministically pick an entry point
- let (_, entry_point, usage) = pick_query(query_map, tcx, &entry_points, |e| (e.0, e.1));
+ let (_, entry_point, usage) = pick_query(query_map, &entry_points, |e| (e.0, e.1));
// Shift the stack so that our entry point is first
let entry_point_pos = stack.iter().position(|(_, query)| query == entry_point);
let mut found_cycle = false;
while jobs.len() > 0 {
- if remove_cycle(&query_map, &mut jobs, &mut wakelist, tcx) {
+ if remove_cycle(&query_map, &mut jobs, &mut wakelist) {
found_cycle = true;
}
}
on_panic.disable();
}
+
+#[inline(never)]
+#[cold]
+pub(crate) fn report_cycle<'a>(
+ sess: &'a Session,
+ CycleError { usage, cycle: stack }: CycleError,
+) -> DiagnosticBuilder<'a> {
+ assert!(!stack.is_empty());
+
+ let fix_span = |span: Span, query: &QueryStackFrame| {
+ sess.source_map().guess_head_span(query.default_span(span))
+ };
+
+ let span = fix_span(stack[1 % stack.len()].span, &stack[0].query);
+ let mut err =
+ struct_span_err!(sess, span, E0391, "cycle detected when {}", stack[0].query.description);
+
+ for i in 1..stack.len() {
+ let query = &stack[i].query;
+ let span = fix_span(stack[(i + 1) % stack.len()].span, query);
+ err.span_note(span, &format!("...which requires {}...", query.description));
+ }
+
+ err.note(&format!(
+ "...which again requires {}, completing the cycle",
+ stack[0].query.description
+ ));
+
+ if let Some((span, query)) = usage {
+ err.span_note(fix_span(span, &query), &format!("cycle used when {}", query.description));
+ }
+
+ err
+}
+
+pub fn print_query_stack<CTX: QueryContext>(
+ tcx: CTX,
+ mut current_query: Option<QueryJobId<CTX::DepKind>>,
+ handler: &Handler,
+ num_frames: Option<usize>,
+) -> usize {
+ // Be careful relying on global state here: this code is called from
+ // a panic hook, which means that the global `Handler` may be in a weird
+ // state if it was responsible for triggering the panic.
+ let mut i = 0;
+ let query_map = tcx.try_collect_active_jobs();
+
+ while let Some(query) = current_query {
+ if Some(i) == num_frames {
+ break;
+ }
+ let query_info = if let Some(info) = query_map.as_ref().and_then(|map| map.get(&query)) {
+ info
+ } else {
+ break;
+ };
+ let mut diag = Diagnostic::new(
+ Level::FailureNote,
+ &format!(
+ "#{} [{}] {}",
+ i, query_info.info.query.name, query_info.info.query.description
+ ),
+ );
+ diag.span =
+ tcx.dep_context().sess().source_map().guess_head_span(query_info.info.span).into();
+ handler.force_print_diagnostic(diag);
+
+ current_query = query_info.job.parent;
+ i += 1;
+ }
+
+ i
+}