]> git.proxmox.com Git - rustc.git/blobdiff - compiler/rustc_query_system/src/query/plumbing.rs
New upstream version 1.54.0+dfsg1
[rustc.git] / compiler / rustc_query_system / src / query / plumbing.rs
index 06a364691d653d28caa361e1cfb2658f4f47a7dd..c1f9fa39e98c5cf183abc8e66fce2f6b5004c98d 100644 (file)
@@ -2,7 +2,7 @@
 //! generate the actual methods on tcx which find and execute the provider,
 //! manage the caches, and so forth.
 
-use crate::dep_graph::{DepContext, DepKind, DepNode};
+use crate::dep_graph::{DepContext, DepKind, DepNode, DepNodeParams};
 use crate::dep_graph::{DepNodeIndex, SerializedDepNodeIndex};
 use crate::query::caches::QueryCache;
 use crate::query::config::{QueryDescription, QueryVtable, QueryVtableExt};
@@ -11,15 +11,15 @@ use crate::query::job::{
 };
 use crate::query::{QueryContext, QueryMap, QueryStackFrame};
 
-#[cfg(not(parallel_compiler))]
-use rustc_data_structures::cold_path;
 use rustc_data_structures::fingerprint::Fingerprint;
 use rustc_data_structures::fx::{FxHashMap, FxHasher};
 use rustc_data_structures::sharded::{get_shard_index_by_hash, Sharded};
 use rustc_data_structures::sync::{Lock, LockGuard};
 use rustc_data_structures::thin_vec::ThinVec;
+#[cfg(not(parallel_compiler))]
+use rustc_errors::DiagnosticBuilder;
 use rustc_errors::{Diagnostic, FatalError};
-use rustc_span::Span;
+use rustc_span::{Span, DUMMY_SP};
 use std::collections::hash_map::Entry;
 use std::fmt::Debug;
 use std::hash::{Hash, Hasher};
@@ -36,7 +36,7 @@ pub struct QueryCacheStore<C: QueryCache> {
     pub cache_hits: AtomicUsize,
 }
 
-impl<C: QueryCache> Default for QueryCacheStore<C> {
+impl<C: QueryCache + Default> Default for QueryCacheStore<C> {
     fn default() -> Self {
         Self {
             cache: C::default(),
@@ -125,18 +125,15 @@ where
         // We use try_lock_shards here since we are called from the
         // deadlock handler, and this shouldn't be locked.
         let shards = self.shards.try_lock_shards()?;
-        let shards = shards.iter().enumerate();
-        jobs.extend(shards.flat_map(|(shard_id, shard)| {
-            shard.active.iter().filter_map(move |(k, v)| {
+        for (shard_id, shard) in shards.iter().enumerate() {
+            for (k, v) in shard.active.iter() {
                 if let QueryResult::Started(ref job) = *v {
                     let id = QueryJobId::new(job.id, shard_id, kind);
                     let info = QueryInfo { span: job.span, query: make_query(tcx, k.clone()) };
-                    Some((id, QueryJobInfo { info, job: job.clone() }))
-                } else {
-                    None
+                    jobs.insert(id, QueryJobInfo { info, job: job.clone() });
                 }
-            })
-        }));
+            }
+        }
 
         Some(())
     }
@@ -161,6 +158,31 @@ where
     id: QueryJobId<D>,
 }
 
+#[cold]
+#[inline(never)]
+#[cfg(not(parallel_compiler))]
+fn mk_cycle<CTX, V, R>(
+    tcx: CTX,
+    root: QueryJobId<CTX::DepKind>,
+    span: Span,
+    handle_cycle_error: fn(CTX, DiagnosticBuilder<'_>) -> V,
+    cache: &dyn crate::query::QueryStorage<Value = V, Stored = R>,
+) -> R
+where
+    CTX: QueryContext,
+    V: std::fmt::Debug,
+    R: Clone,
+{
+    let error: CycleError = root.find_cycle_in_stack(
+        tcx.try_collect_active_jobs().unwrap(),
+        &tcx.current_query_job(),
+        span,
+    );
+    let error = report_cycle(tcx.dep_context().sess(), error);
+    let value = handle_cycle_error(tcx, error);
+    cache.store_nocache(value)
+}
+
 impl<'tcx, D, C> JobOwner<'tcx, D, C>
 where
     D: Copy + Clone + Eq + Hash,
@@ -180,7 +202,7 @@ where
         state: &'b QueryState<CTX::DepKind, C::Key>,
         cache: &'b QueryCacheStore<C>,
         span: Span,
-        key: &C::Key,
+        key: C::Key,
         lookup: QueryLookup,
         query: &QueryVtable<CTX, C::Key, C::Value>,
     ) -> TryGetJob<'b, CTX::DepKind, C>
@@ -191,94 +213,86 @@ where
         let mut state_lock = state.shards.get_shard_by_index(shard).lock();
         let lock = &mut *state_lock;
 
-        let (latch, mut _query_blocked_prof_timer) = match lock.active.entry((*key).clone()) {
-            Entry::Occupied(mut entry) => {
-                match entry.get_mut() {
-                    QueryResult::Started(job) => {
-                        // For parallel queries, we'll block and wait until the query running
-                        // in another thread has completed. Record how long we wait in the
-                        // self-profiler.
-                        let _query_blocked_prof_timer = if cfg!(parallel_compiler) {
-                            Some(tcx.dep_context().profiler().query_blocked())
-                        } else {
-                            None
-                        };
-
-                        // Create the id of the job we're waiting for
-                        let id = QueryJobId::new(job.id, shard, query.dep_kind);
-
-                        (job.latch(id), _query_blocked_prof_timer)
-                    }
-                    QueryResult::Poisoned => FatalError.raise(),
-                }
-            }
+        match lock.active.entry(key) {
             Entry::Vacant(entry) => {
-                // No job entry for this query. Return a new one to be started later.
-
                 // Generate an id unique within this shard.
                 let id = lock.jobs.checked_add(1).unwrap();
                 lock.jobs = id;
                 let id = QueryShardJobId(NonZeroU32::new(id).unwrap());
 
-                let global_id = QueryJobId::new(id, shard, query.dep_kind);
-
                 let job = tcx.current_query_job();
                 let job = QueryJob::new(id, span, job);
 
+                let key = entry.key().clone();
                 entry.insert(QueryResult::Started(job));
 
-                let owner = JobOwner { state, cache, id: global_id, key: (*key).clone() };
+                let global_id = QueryJobId::new(id, shard, query.dep_kind);
+                let owner = JobOwner { state, cache, id: global_id, key };
                 return TryGetJob::NotYetStarted(owner);
             }
-        };
-        mem::drop(state_lock);
-
-        // If we are single-threaded we know that we have cycle error,
-        // so we just return the error.
-        #[cfg(not(parallel_compiler))]
-        return TryGetJob::Cycle(cold_path(|| {
-            let error: CycleError = latch.find_cycle_in_stack(
-                tcx.try_collect_active_jobs().unwrap(),
-                &tcx.current_query_job(),
-                span,
-            );
-            let error = report_cycle(tcx.dep_context().sess(), error);
-            let value = query.handle_cycle_error(tcx, error);
-            cache.cache.store_nocache(value)
-        }));
-
-        // With parallel queries we might just have to wait on some other
-        // thread.
-        #[cfg(parallel_compiler)]
-        {
-            let result = latch.wait_on(tcx.current_query_job(), span);
-
-            if let Err(cycle) = result {
-                let cycle = report_cycle(tcx.dep_context().sess(), cycle);
-                let value = query.handle_cycle_error(tcx, cycle);
-                let value = cache.cache.store_nocache(value);
-                return TryGetJob::Cycle(value);
-            }
+            Entry::Occupied(mut entry) => {
+                match entry.get_mut() {
+                    #[cfg(not(parallel_compiler))]
+                    QueryResult::Started(job) => {
+                        let id = QueryJobId::new(job.id, shard, query.dep_kind);
 
-            let cached = cache
-                .cache
-                .lookup(cache, &key, |value, index| {
-                    if unlikely!(tcx.dep_context().profiler().enabled()) {
-                        tcx.dep_context().profiler().query_cache_hit(index.into());
+                        drop(state_lock);
+
+                        // If we are single-threaded we know that we have cycle error,
+                        // so we just return the error.
+                        return TryGetJob::Cycle(mk_cycle(
+                            tcx,
+                            id,
+                            span,
+                            query.handle_cycle_error,
+                            &cache.cache,
+                        ));
                     }
-                    #[cfg(debug_assertions)]
-                    {
-                        cache.cache_hits.fetch_add(1, Ordering::Relaxed);
+                    #[cfg(parallel_compiler)]
+                    QueryResult::Started(job) => {
+                        // For parallel queries, we'll block and wait until the query running
+                        // in another thread has completed. Record how long we wait in the
+                        // self-profiler.
+                        let query_blocked_prof_timer = tcx.dep_context().profiler().query_blocked();
+
+                        // Get the latch out
+                        let latch = job.latch();
+                        let key = entry.key().clone();
+
+                        drop(state_lock);
+
+                        // With parallel queries we might just have to wait on some other
+                        // thread.
+                        let result = latch.wait_on(tcx.current_query_job(), span);
+
+                        if let Err(cycle) = result {
+                            let cycle = report_cycle(tcx.dep_context().sess(), cycle);
+                            let value = (query.handle_cycle_error)(tcx, cycle);
+                            let value = cache.cache.store_nocache(value);
+                            return TryGetJob::Cycle(value);
+                        }
+
+                        let cached = cache
+                            .cache
+                            .lookup(cache, &key, |value, index| {
+                                if unlikely!(tcx.dep_context().profiler().enabled()) {
+                                    tcx.dep_context().profiler().query_cache_hit(index.into());
+                                }
+                                #[cfg(debug_assertions)]
+                                {
+                                    cache.cache_hits.fetch_add(1, Ordering::Relaxed);
+                                }
+                                (value.clone(), index)
+                            })
+                            .unwrap_or_else(|_| panic!("value must be in cache after waiting"));
+
+                        query_blocked_prof_timer.finish_with_query_invocation_id(cached.1.into());
+
+                        return TryGetJob::JobCompleted(cached);
                     }
-                    (value.clone(), index)
-                })
-                .unwrap_or_else(|_| panic!("value must be in cache after waiting"));
-
-            if let Some(prof_timer) = _query_blocked_prof_timer.take() {
-                prof_timer.finish_with_query_invocation_id(cached.1.into());
+                    QueryResult::Poisoned => FatalError.raise(),
+                }
             }
-
-            return TryGetJob::JobCompleted(cached);
         }
     }
 
@@ -417,11 +431,17 @@ fn try_execute_query<CTX, C>(
 ) -> C::Stored
 where
     C: QueryCache,
-    C::Key: crate::dep_graph::DepNodeParams<CTX::DepContext>,
+    C::Key: DepNodeParams<CTX::DepContext>,
     CTX: QueryContext,
 {
     let job = match JobOwner::<'_, CTX::DepKind, C>::try_start(
-        tcx, state, cache, span, &key, lookup, query,
+        tcx,
+        state,
+        cache,
+        span,
+        key.clone(),
+        lookup,
+        query,
     ) {
         TryGetJob::NotYetStarted(job) => job,
         TryGetJob::Cycle(result) => return result,
@@ -432,11 +452,15 @@ where
         }
     };
 
-    // Fast path for when incr. comp. is off. `to_dep_node` is
-    // expensive for some `DepKind`s.
-    if !tcx.dep_context().dep_graph().is_fully_enabled() {
-        let null_dep_node = DepNode::new_no_params(DepKind::NULL);
-        return force_query_with_job(tcx, key, job, null_dep_node, query).0;
+    let dep_graph = tcx.dep_context().dep_graph();
+
+    // Fast path for when incr. comp. is off.
+    if !dep_graph.is_fully_enabled() {
+        let prof_timer = tcx.dep_context().profiler().query_provider();
+        let result = tcx.start_query(job.id, None, || query.compute(tcx, key));
+        let dep_node_index = dep_graph.next_virtual_depnode_index();
+        prof_timer.finish_with_query_invocation_id(dep_node_index.into());
+        return job.complete(result, dep_node_index);
     }
 
     if query.anon {
@@ -444,17 +468,14 @@ where
 
         let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| {
             tcx.start_query(job.id, diagnostics, || {
-                tcx.dep_context().dep_graph().with_anon_task(
-                    *tcx.dep_context(),
-                    query.dep_kind,
-                    || query.compute(tcx, key),
-                )
+                dep_graph
+                    .with_anon_task(*tcx.dep_context(), query.dep_kind, || query.compute(tcx, key))
             })
         });
 
         prof_timer.finish_with_query_invocation_id(dep_node_index.into());
 
-        tcx.dep_context().dep_graph().read_index(dep_node_index);
+        dep_graph.read_index(dep_node_index);
 
         if unlikely!(!diagnostics.is_empty()) {
             tcx.store_diagnostics_for_anon_node(dep_node_index, diagnostics);
@@ -470,7 +491,7 @@ where
         // promoted to the current session during
         // `try_mark_green()`, so we can ignore them here.
         let loaded = tcx.start_query(job.id, None, || {
-            let marked = tcx.dep_context().dep_graph().try_mark_green_and_read(tcx, &dep_node);
+            let marked = dep_graph.try_mark_green_and_read(tcx, &dep_node);
             marked.map(|(prev_dep_node_index, dep_node_index)| {
                 (
                     load_from_disk_and_cache_in_memory(
@@ -491,7 +512,7 @@ where
     }
 
     let (result, dep_node_index) = force_query_with_job(tcx, key, job, dep_node, query);
-    tcx.dep_context().dep_graph().read_index(dep_node_index);
+    dep_graph.read_index(dep_node_index);
     result
 }
 
@@ -594,7 +615,7 @@ fn incremental_verify_ich<CTX, K, V: Debug>(
         tcx.sess().struct_err(&format!("internal compiler error: encountered incremental compilation error with {:?}", dep_node))
             .help(&format!("This is a known issue with the compiler. Run {} to allow your project to compile", run_cmd))
             .note(&format!("Please follow the instructions below to create a bug report with the provided information"))
-            .note(&format!("See <https://github.com/rust-lang/rust/issues/84970> for more information."))
+            .note(&format!("See <https://github.com/rust-lang/rust/issues/84970> for more information"))
             .emit();
         panic!("Found unstable fingerprints for {:?}: {:?}", dep_node, result);
     }
@@ -673,7 +694,7 @@ fn get_query_impl<CTX, C>(
 where
     CTX: QueryContext,
     C: QueryCache,
-    C::Key: crate::dep_graph::DepNodeParams<CTX::DepContext>,
+    C::Key: DepNodeParams<CTX::DepContext>,
 {
     try_execute_query(tcx, state, cache, span, key, lookup, query)
 }
@@ -723,15 +744,28 @@ fn force_query_impl<CTX, C>(
     tcx: CTX,
     state: &QueryState<CTX::DepKind, C::Key>,
     cache: &QueryCacheStore<C>,
-    key: C::Key,
-    span: Span,
     dep_node: DepNode<CTX::DepKind>,
     query: &QueryVtable<CTX, C::Key, C::Value>,
-) where
+) -> bool
+where
     C: QueryCache,
-    C::Key: crate::dep_graph::DepNodeParams<CTX::DepContext>,
+    C::Key: DepNodeParams<CTX::DepContext>,
     CTX: QueryContext,
 {
+    debug_assert!(!query.anon);
+
+    if !<C::Key as DepNodeParams<CTX::DepContext>>::can_reconstruct_query_key() {
+        return false;
+    }
+
+    let key = if let Some(key) =
+        <C::Key as DepNodeParams<CTX::DepContext>>::recover(*tcx.dep_context(), &dep_node)
+    {
+        key
+    } else {
+        return false;
+    };
+
     // We may be concurrently trying both execute and force a query.
     // Ensure that only one of them runs the query.
     let cached = cache.cache.lookup(cache, &key, |_, index| {
@@ -745,19 +779,28 @@ fn force_query_impl<CTX, C>(
     });
 
     let lookup = match cached {
-        Ok(()) => return,
+        Ok(()) => return true,
         Err(lookup) => lookup,
     };
 
     let job = match JobOwner::<'_, CTX::DepKind, C>::try_start(
-        tcx, state, cache, span, &key, lookup, query,
+        tcx,
+        state,
+        cache,
+        DUMMY_SP,
+        key.clone(),
+        lookup,
+        query,
     ) {
         TryGetJob::NotYetStarted(job) => job,
-        TryGetJob::Cycle(_) => return,
+        TryGetJob::Cycle(_) => return true,
         #[cfg(parallel_compiler)]
-        TryGetJob::JobCompleted(_) => return,
+        TryGetJob::JobCompleted(_) => return true,
     };
+
     force_query_with_job(tcx, key, job, dep_node, query);
+
+    true
 }
 
 pub enum QueryMode {
@@ -774,7 +817,7 @@ pub fn get_query<Q, CTX>(
 ) -> Option<Q::Stored>
 where
     Q: QueryDescription<CTX>,
-    Q::Key: crate::dep_graph::DepNodeParams<CTX::DepContext>,
+    Q::Key: DepNodeParams<CTX::DepContext>,
     CTX: QueryContext,
 {
     let query = &Q::VTABLE;
@@ -790,11 +833,15 @@ where
     Some(value)
 }
 
-pub fn force_query<Q, CTX>(tcx: CTX, key: Q::Key, span: Span, dep_node: DepNode<CTX::DepKind>)
+pub fn force_query<Q, CTX>(tcx: CTX, dep_node: &DepNode<CTX::DepKind>) -> bool
 where
     Q: QueryDescription<CTX>,
-    Q::Key: crate::dep_graph::DepNodeParams<CTX::DepContext>,
+    Q::Key: DepNodeParams<CTX::DepContext>,
     CTX: QueryContext,
 {
-    force_query_impl(tcx, Q::query_state(tcx), Q::query_cache(tcx), key, span, dep_node, &Q::VTABLE)
+    if Q::ANON {
+        return false;
+    }
+
+    force_query_impl(tcx, Q::query_state(tcx), Q::query_cache(tcx), *dep_node, &Q::VTABLE)
 }