1 use rustc_data_structures
::fingerprint
::Fingerprint
;
2 use rustc_data_structures
::fx
::{FxHashMap, FxHashSet}
;
3 use rustc_data_structures
::profiling
::QueryInvocationId
;
4 use rustc_data_structures
::sharded
::{self, Sharded}
;
5 use rustc_data_structures
::stable_hasher
::{HashStable, StableHasher}
;
6 use rustc_data_structures
::sync
::{AtomicU32, AtomicU64, Lock, Lrc, Ordering}
;
7 use rustc_data_structures
::unlikely
;
8 use rustc_errors
::Diagnostic
;
9 use rustc_index
::vec
::{Idx, IndexVec}
;
11 use parking_lot
::{Condvar, Mutex}
;
12 use smallvec
::{smallvec, SmallVec}
;
13 use std
::collections
::hash_map
::Entry
;
16 use std
::marker
::PhantomData
;
18 use std
::sync
::atomic
::Ordering
::Relaxed
;
20 use super::debug
::EdgeFilter
;
21 use super::prev
::PreviousDepGraph
;
22 use super::query
::DepGraphQuery
;
23 use super::serialized
::{SerializedDepGraph, SerializedDepNodeIndex}
;
24 use super::{DepContext, DepKind, DepNode, WorkProductId}
;
27 pub struct DepGraph
<K
: DepKind
> {
28 data
: Option
<Lrc
<DepGraphData
<K
>>>,
30 /// This field is used for assigning DepNodeIndices when running in
31 /// non-incremental mode. Even in non-incremental mode we make sure that
32 /// each task has a `DepNodeIndex` that uniquely identifies it. This unique
33 /// ID is used for self-profiling.
34 virtual_dep_node_index
: Lrc
<AtomicU32
>,
37 rustc_index
::newtype_index
! {
38 pub struct DepNodeIndex { .. }
42 pub const INVALID
: DepNodeIndex
= DepNodeIndex
::MAX
;
45 impl std
::convert
::From
<DepNodeIndex
> for QueryInvocationId
{
47 fn from(dep_node_index
: DepNodeIndex
) -> Self {
48 QueryInvocationId(dep_node_index
.as_u32())
53 pub enum DepNodeColor
{
59 pub fn is_green(self) -> bool
{
61 DepNodeColor
::Red
=> false,
62 DepNodeColor
::Green(_
) => true,
67 struct DepGraphData
<K
: DepKind
> {
68 /// The new encoding of the dependency graph, optimized for red/green
69 /// tracking. The `current` field is the dependency graph of only the
70 /// current compilation session: We don't merge the previous dep-graph into
71 /// current one anymore.
72 current
: CurrentDepGraph
<K
>,
74 /// The dep-graph from the previous compilation session. It contains all
75 /// nodes and edges as well as all fingerprints of nodes that have them.
76 previous
: PreviousDepGraph
<K
>,
78 colors
: DepNodeColorMap
,
80 /// A set of loaded diagnostics that is in the progress of being emitted.
81 emitting_diagnostics
: Mutex
<FxHashSet
<DepNodeIndex
>>,
83 /// Used to wait for diagnostics to be emitted.
84 emitting_diagnostics_cond_var
: Condvar
,
86 /// When we load, there may be `.o` files, cached MIR, or other such
87 /// things available to us. If we find that they are not dirty, we
88 /// load the path to the file storing those work-products here into
89 /// this map. We can later look for and extract that data.
90 previous_work_products
: FxHashMap
<WorkProductId
, WorkProduct
>,
92 dep_node_debug
: Lock
<FxHashMap
<DepNode
<K
>, String
>>,
95 pub fn hash_result
<HashCtxt
, R
>(hcx
: &mut HashCtxt
, result
: &R
) -> Option
<Fingerprint
>
97 R
: HashStable
<HashCtxt
>,
99 let mut stable_hasher
= StableHasher
::new();
100 result
.hash_stable(hcx
, &mut stable_hasher
);
102 Some(stable_hasher
.finish())
105 impl<K
: DepKind
> DepGraph
<K
> {
107 prev_graph
: PreviousDepGraph
<K
>,
108 prev_work_products
: FxHashMap
<WorkProductId
, WorkProduct
>,
110 let prev_graph_node_count
= prev_graph
.node_count();
113 data
: Some(Lrc
::new(DepGraphData
{
114 previous_work_products
: prev_work_products
,
115 dep_node_debug
: Default
::default(),
116 current
: CurrentDepGraph
::new(prev_graph_node_count
),
117 emitting_diagnostics
: Default
::default(),
118 emitting_diagnostics_cond_var
: Condvar
::new(),
119 previous
: prev_graph
,
120 colors
: DepNodeColorMap
::new(prev_graph_node_count
),
122 virtual_dep_node_index
: Lrc
::new(AtomicU32
::new(0)),
126 pub fn new_disabled() -> DepGraph
<K
> {
127 DepGraph { data: None, virtual_dep_node_index: Lrc::new(AtomicU32::new(0)) }
130 /// Returns `true` if we are actually building the full dep-graph, and `false` otherwise.
132 pub fn is_fully_enabled(&self) -> bool
{
136 pub fn query(&self) -> DepGraphQuery
<K
> {
137 let data
= self.data
.as_ref().unwrap().current
.data
.lock();
138 let nodes
: Vec
<_
> = data
.iter().map(|n
| n
.node
).collect();
139 let mut edges
= Vec
::new();
140 for (from
, edge_targets
) in data
.iter().map(|d
| (d
.node
, &d
.edges
)) {
141 for &edge_target
in edge_targets
.iter() {
142 let to
= data
[edge_target
].node
;
143 edges
.push((from
, to
));
147 DepGraphQuery
::new(&nodes
[..], &edges
[..])
150 pub fn assert_ignored(&self) {
151 if let Some(..) = self.data
{
152 K
::read_deps(|task_deps
| {
153 assert
!(task_deps
.is_none(), "expected no task dependency tracking");
158 pub fn with_ignore
<OP
, R
>(&self, op
: OP
) -> R
162 K
::with_deps(None
, op
)
165 /// Starts a new dep-graph task. Dep-graph tasks are specified
166 /// using a free function (`task`) and **not** a closure -- this
167 /// is intentional because we want to exercise tight control over
168 /// what state they have access to. In particular, we want to
169 /// prevent implicit 'leaks' of tracked state into the task (which
170 /// could then be read without generating correct edges in the
171 /// dep-graph -- see the [rustc dev guide] for more details on
172 /// the dep-graph). To this end, the task function gets exactly two
173 /// pieces of state: the context `cx` and an argument `arg`. Both
174 /// of these bits of state must be of some type that implements
175 /// `DepGraphSafe` and hence does not leak.
177 /// The choice of two arguments is not fundamental. One argument
178 /// would work just as well, since multiple values can be
179 /// collected using tuples. However, using two arguments works out
180 /// to be quite convenient, since it is common to need a context
181 /// (`cx`) and some argument (e.g., a `DefId` identifying what
182 /// item to process).
184 /// For cases where you need some other number of arguments:
186 /// - If you only need one argument, just use `()` for the `arg`
188 /// - If you need 3+ arguments, use a tuple for the
191 /// [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/incremental-compilation.html
192 pub fn with_task
<Ctxt
: DepContext
<DepKind
= K
>, A
, R
>(
197 task
: fn(Ctxt
, A
) -> R
,
198 hash_result
: impl FnOnce(&mut Ctxt
::StableHashingContext
, &R
) -> Option
<Fingerprint
>,
199 ) -> (R
, DepNodeIndex
) {
208 #[cfg(debug_assertions)]
210 reads
: SmallVec
::new(),
211 read_set
: Default
::default(),
212 phantom_data
: PhantomData
,
215 |data
, key
, fingerprint
, task
| data
.complete_task(key
, task
.unwrap(), fingerprint
),
220 fn with_task_impl
<Ctxt
: DepContext
<DepKind
= K
>, A
, R
>(
226 task
: fn(Ctxt
, A
) -> R
,
227 create_task
: fn(DepNode
<K
>) -> Option
<TaskDeps
<K
>>,
228 finish_task_and_alloc_depnode
: fn(
234 hash_result
: impl FnOnce(&mut Ctxt
::StableHashingContext
, &R
) -> Option
<Fingerprint
>,
235 ) -> (R
, DepNodeIndex
) {
236 if let Some(ref data
) = self.data
{
237 let task_deps
= create_task(key
).map(Lock
::new
);
239 // In incremental mode, hash the result of the task. We don't
240 // do anything with the hash yet, but we are computing it
242 // - we make sure that the infrastructure works and
243 // - we can get an idea of the runtime cost.
244 let mut hcx
= cx
.create_stable_hashing_context();
246 let result
= if no_tcx
{
249 K
::with_deps(task_deps
.as_ref(), || task(cx
, arg
))
252 let current_fingerprint
= hash_result(&mut hcx
, &result
);
254 let dep_node_index
= finish_task_and_alloc_depnode(
257 current_fingerprint
.unwrap_or(Fingerprint
::ZERO
),
258 task_deps
.map(|lock
| lock
.into_inner()),
261 let print_status
= cfg
!(debug_assertions
) && cx
.debug_dep_tasks();
263 // Determine the color of the new DepNode.
264 if let Some(prev_index
) = data
.previous
.node_to_index_opt(&key
) {
265 let prev_fingerprint
= data
.previous
.fingerprint_by_index(prev_index
);
267 let color
= if let Some(current_fingerprint
) = current_fingerprint
{
268 if current_fingerprint
== prev_fingerprint
{
270 eprintln
!("[task::green] {:?}", key
);
272 DepNodeColor
::Green(dep_node_index
)
275 eprintln
!("[task::red] {:?}", key
);
281 eprintln
!("[task::unknown] {:?}", key
);
283 // Mark the node as Red if we can't hash the result
288 data
.colors
.get(prev_index
).is_none(),
289 "DepGraph::with_task() - Duplicate DepNodeColor \
294 data
.colors
.insert(prev_index
, color
);
297 eprintln
!("[task::new] {:?}", key
);
301 (result
, dep_node_index
)
303 (task(cx
, arg
), self.next_virtual_depnode_index())
307 /// Executes something within an "anonymous" task, that is, a task the
308 /// `DepNode` of which is determined by the list of inputs it read from.
309 pub fn with_anon_task
<OP
, R
>(&self, dep_kind
: K
, op
: OP
) -> (R
, DepNodeIndex
)
313 if let Some(ref data
) = self.data
{
314 let task_deps
= Lock
::new(TaskDeps
::default());
316 let result
= K
::with_deps(Some(&task_deps
), op
);
317 let task_deps
= task_deps
.into_inner();
319 let dep_node_index
= data
.current
.complete_anon_task(dep_kind
, task_deps
);
320 (result
, dep_node_index
)
322 (op(), self.next_virtual_depnode_index())
326 /// Executes something within an "eval-always" task which is a task
327 /// that runs whenever anything changes.
328 pub fn with_eval_always_task
<Ctxt
: DepContext
<DepKind
= K
>, A
, R
>(
333 task
: fn(Ctxt
, A
) -> R
,
334 hash_result
: impl FnOnce(&mut Ctxt
::StableHashingContext
, &R
) -> Option
<Fingerprint
>,
335 ) -> (R
, DepNodeIndex
) {
343 |data
, key
, fingerprint
, _
| data
.alloc_node(key
, smallvec
![], fingerprint
),
349 pub fn read(&self, v
: DepNode
<K
>) {
350 if let Some(ref data
) = self.data
{
351 let map
= data
.current
.node_to_node_index
.get_shard_by_value(&v
).lock();
352 if let Some(dep_node_index
) = map
.get(&v
).copied() {
354 data
.read_index(dep_node_index
);
356 panic
!("DepKind {:?} should be pre-allocated but isn't.", v
.kind
)
362 pub fn read_index(&self, dep_node_index
: DepNodeIndex
) {
363 if let Some(ref data
) = self.data
{
364 data
.read_index(dep_node_index
);
369 pub fn dep_node_index_of(&self, dep_node
: &DepNode
<K
>) -> DepNodeIndex
{
375 .get_shard_by_value(dep_node
)
383 pub fn dep_node_exists(&self, dep_node
: &DepNode
<K
>) -> bool
{
384 if let Some(ref data
) = self.data
{
387 .get_shard_by_value(&dep_node
)
389 .contains_key(dep_node
)
396 pub fn fingerprint_of(&self, dep_node_index
: DepNodeIndex
) -> Fingerprint
{
397 let data
= self.data
.as_ref().expect("dep graph enabled").current
.data
.lock();
398 data
[dep_node_index
].fingerprint
401 pub fn prev_fingerprint_of(&self, dep_node
: &DepNode
<K
>) -> Option
<Fingerprint
> {
402 self.data
.as_ref().unwrap().previous
.fingerprint_of(dep_node
)
406 pub fn prev_dep_node_index_of(&self, dep_node
: &DepNode
<K
>) -> SerializedDepNodeIndex
{
407 self.data
.as_ref().unwrap().previous
.node_to_index(dep_node
)
410 /// Checks whether a previous work product exists for `v` and, if
411 /// so, return the path that leads to it. Used to skip doing work.
412 pub fn previous_work_product(&self, v
: &WorkProductId
) -> Option
<WorkProduct
> {
413 self.data
.as_ref().and_then(|data
| data
.previous_work_products
.get(v
).cloned())
416 /// Access the map of work-products created during the cached run. Only
417 /// used during saving of the dep-graph.
418 pub fn previous_work_products(&self) -> &FxHashMap
<WorkProductId
, WorkProduct
> {
419 &self.data
.as_ref().unwrap().previous_work_products
423 pub fn register_dep_node_debug_str
<F
>(&self, dep_node
: DepNode
<K
>, debug_str_gen
: F
)
425 F
: FnOnce() -> String
,
427 let dep_node_debug
= &self.data
.as_ref().unwrap().dep_node_debug
;
429 if dep_node_debug
.borrow().contains_key(&dep_node
) {
432 let debug_str
= debug_str_gen();
433 dep_node_debug
.borrow_mut().insert(dep_node
, debug_str
);
436 pub fn dep_node_debug_str(&self, dep_node
: DepNode
<K
>) -> Option
<String
> {
437 self.data
.as_ref()?
.dep_node_debug
.borrow().get(&dep_node
).cloned()
440 pub fn edge_deduplication_data(&self) -> Option
<(u64, u64)> {
441 if cfg
!(debug_assertions
) {
442 let current_dep_graph
= &self.data
.as_ref().unwrap().current
;
445 current_dep_graph
.total_read_count
.load(Relaxed
),
446 current_dep_graph
.total_duplicate_read_count
.load(Relaxed
),
453 pub fn serialize(&self) -> SerializedDepGraph
<K
> {
454 let data
= self.data
.as_ref().unwrap().current
.data
.lock();
456 let fingerprints
: IndexVec
<SerializedDepNodeIndex
, _
> =
457 data
.iter().map(|d
| d
.fingerprint
).collect();
458 let nodes
: IndexVec
<SerializedDepNodeIndex
, _
> = data
.iter().map(|d
| d
.node
).collect();
460 let total_edge_count
: usize = data
.iter().map(|d
| d
.edges
.len()).sum();
462 let mut edge_list_indices
= IndexVec
::with_capacity(nodes
.len());
463 let mut edge_list_data
= Vec
::with_capacity(total_edge_count
);
465 for (current_dep_node_index
, edges
) in data
.iter_enumerated().map(|(i
, d
)| (i
, &d
.edges
)) {
466 let start
= edge_list_data
.len() as u32;
467 // This should really just be a memcpy :/
468 edge_list_data
.extend(edges
.iter().map(|i
| SerializedDepNodeIndex
::new(i
.index())));
469 let end
= edge_list_data
.len() as u32;
471 debug_assert_eq
!(current_dep_node_index
.index(), edge_list_indices
.len());
472 edge_list_indices
.push((start
, end
));
475 debug_assert
!(edge_list_data
.len() <= u32::MAX
as usize);
476 debug_assert_eq
!(edge_list_data
.len(), total_edge_count
);
478 SerializedDepGraph { nodes, fingerprints, edge_list_indices, edge_list_data }
481 pub fn node_color(&self, dep_node
: &DepNode
<K
>) -> Option
<DepNodeColor
> {
482 if let Some(ref data
) = self.data
{
483 if let Some(prev_index
) = data
.previous
.node_to_index_opt(dep_node
) {
484 return data
.colors
.get(prev_index
);
486 // This is a node that did not exist in the previous compilation
487 // session, so we consider it to be red.
488 return Some(DepNodeColor
::Red
);
495 /// Try to read a node index for the node dep_node.
496 /// A node will have an index, when it's already been marked green, or when we can mark it
497 /// green. This function will mark the current task as a reader of the specified node, when
498 /// a node index can be found for that node.
499 pub fn try_mark_green_and_read
<Ctxt
: DepContext
<DepKind
= K
>>(
502 dep_node
: &DepNode
<K
>,
503 ) -> Option
<(SerializedDepNodeIndex
, DepNodeIndex
)> {
504 self.try_mark_green(tcx
, dep_node
).map(|(prev_index
, dep_node_index
)| {
505 debug_assert
!(self.is_green(&dep_node
));
506 self.read_index(dep_node_index
);
507 (prev_index
, dep_node_index
)
511 pub fn try_mark_green
<Ctxt
: DepContext
<DepKind
= K
>>(
514 dep_node
: &DepNode
<K
>,
515 ) -> Option
<(SerializedDepNodeIndex
, DepNodeIndex
)> {
516 debug_assert
!(!dep_node
.kind
.is_eval_always());
518 // Return None if the dep graph is disabled
519 let data
= self.data
.as_ref()?
;
521 // Return None if the dep node didn't exist in the previous session
522 let prev_index
= data
.previous
.node_to_index_opt(dep_node
)?
;
524 match data
.colors
.get(prev_index
) {
525 Some(DepNodeColor
::Green(dep_node_index
)) => Some((prev_index
, dep_node_index
)),
526 Some(DepNodeColor
::Red
) => None
,
528 // This DepNode and the corresponding query invocation existed
529 // in the previous compilation session too, so we can try to
530 // mark it as green by recursively marking all of its
531 // dependencies green.
532 self.try_mark_previous_green(tcx
, data
, prev_index
, &dep_node
)
533 .map(|dep_node_index
| (prev_index
, dep_node_index
))
538 /// Try to mark a dep-node which existed in the previous compilation session as green.
539 fn try_mark_previous_green
<Ctxt
: DepContext
<DepKind
= K
>>(
542 data
: &DepGraphData
<K
>,
543 prev_dep_node_index
: SerializedDepNodeIndex
,
544 dep_node
: &DepNode
<K
>,
545 ) -> Option
<DepNodeIndex
> {
546 debug
!("try_mark_previous_green({:?}) - BEGIN", dep_node
);
548 #[cfg(not(parallel_compiler))]
554 .get_shard_by_value(dep_node
)
556 .contains_key(dep_node
)
558 debug_assert
!(data
.colors
.get(prev_dep_node_index
).is_none());
561 // We never try to mark eval_always nodes as green
562 debug_assert
!(!dep_node
.kind
.is_eval_always());
564 debug_assert_eq
!(data
.previous
.index_to_node(prev_dep_node_index
), *dep_node
);
566 let prev_deps
= data
.previous
.edge_targets_from(prev_dep_node_index
);
568 let mut current_deps
= SmallVec
::new();
570 for &dep_dep_node_index
in prev_deps
{
571 let dep_dep_node_color
= data
.colors
.get(dep_dep_node_index
);
573 match dep_dep_node_color
{
574 Some(DepNodeColor
::Green(node_index
)) => {
575 // This dependency has been marked as green before, we are
576 // still fine and can continue with checking the other
579 "try_mark_previous_green({:?}) --- found dependency {:?} to \
580 be immediately green",
582 data
.previous
.index_to_node(dep_dep_node_index
)
584 current_deps
.push(node_index
);
586 Some(DepNodeColor
::Red
) => {
587 // We found a dependency the value of which has changed
588 // compared to the previous compilation session. We cannot
589 // mark the DepNode as green and also don't need to bother
590 // with checking any of the other dependencies.
592 "try_mark_previous_green({:?}) - END - dependency {:?} was \
595 data
.previous
.index_to_node(dep_dep_node_index
)
600 let dep_dep_node
= &data
.previous
.index_to_node(dep_dep_node_index
);
602 // We don't know the state of this dependency. If it isn't
603 // an eval_always node, let's try to mark it green recursively.
604 if !dep_dep_node
.kind
.is_eval_always() {
606 "try_mark_previous_green({:?}) --- state of dependency {:?} \
607 is unknown, trying to mark it green",
608 dep_node
, dep_dep_node
611 let node_index
= self.try_mark_previous_green(
617 if let Some(node_index
) = node_index
{
619 "try_mark_previous_green({:?}) --- managed to MARK \
620 dependency {:?} as green",
621 dep_node
, dep_dep_node
623 current_deps
.push(node_index
);
628 // We failed to mark it green, so we try to force the query.
630 "try_mark_previous_green({:?}) --- trying to force \
632 dep_node
, dep_dep_node
634 if tcx
.try_force_from_dep_node(dep_dep_node
) {
635 let dep_dep_node_color
= data
.colors
.get(dep_dep_node_index
);
637 match dep_dep_node_color
{
638 Some(DepNodeColor
::Green(node_index
)) => {
640 "try_mark_previous_green({:?}) --- managed to \
641 FORCE dependency {:?} to green",
642 dep_node
, dep_dep_node
644 current_deps
.push(node_index
);
646 Some(DepNodeColor
::Red
) => {
648 "try_mark_previous_green({:?}) - END - \
649 dependency {:?} was red after forcing",
650 dep_node
, dep_dep_node
655 if !tcx
.has_errors_or_delayed_span_bugs() {
657 "try_mark_previous_green() - Forcing the DepNode \
658 should have set its color"
661 // If the query we just forced has resulted in
662 // some kind of compilation error, we cannot rely on
663 // the dep-node color having been properly updated.
664 // This means that the query system has reached an
665 // invalid state. We let the compiler continue (by
666 // returning `None`) so it can emit error messages
667 // and wind down, but rely on the fact that this
668 // invalid state will not be persisted to the
669 // incremental compilation cache because of
670 // compilation errors being present.
672 "try_mark_previous_green({:?}) - END - \
673 dependency {:?} resulted in compilation error",
674 dep_node
, dep_dep_node
681 // The DepNode could not be forced.
683 "try_mark_previous_green({:?}) - END - dependency {:?} \
684 could not be forced",
685 dep_node
, dep_dep_node
693 // If we got here without hitting a `return` that means that all
694 // dependencies of this DepNode could be marked as green. Therefore we
695 // can also mark this DepNode as green.
697 // There may be multiple threads trying to mark the same dep node green concurrently
699 let dep_node_index
= {
700 // Copy the fingerprint from the previous graph,
701 // so we don't have to recompute it
702 let fingerprint
= data
.previous
.fingerprint_by_index(prev_dep_node_index
);
704 // We allocating an entry for the node in the current dependency graph and
705 // adding all the appropriate edges imported from the previous graph
706 data
.current
.intern_node(*dep_node
, current_deps
, fingerprint
)
709 // ... emitting any stored diagnostic ...
711 // FIXME: Store the fact that a node has diagnostics in a bit in the dep graph somewhere
712 // Maybe store a list on disk and encode this fact in the DepNodeState
713 let diagnostics
= tcx
.load_diagnostics(prev_dep_node_index
);
715 #[cfg(not(parallel_compiler))]
717 data
.colors
.get(prev_dep_node_index
).is_none(),
718 "DepGraph::try_mark_previous_green() - Duplicate DepNodeColor \
723 if unlikely
!(!diagnostics
.is_empty()) {
724 self.emit_diagnostics(tcx
, data
, dep_node_index
, prev_dep_node_index
, diagnostics
);
727 // ... and finally storing a "Green" entry in the color map.
728 // Multiple threads can all write the same color here
729 data
.colors
.insert(prev_dep_node_index
, DepNodeColor
::Green(dep_node_index
));
731 debug
!("try_mark_previous_green({:?}) - END - successfully marked as green", dep_node
);
735 /// Atomically emits some loaded diagnostics.
736 /// This may be called concurrently on multiple threads for the same dep node.
739 fn emit_diagnostics
<Ctxt
: DepContext
<DepKind
= K
>>(
742 data
: &DepGraphData
<K
>,
743 dep_node_index
: DepNodeIndex
,
744 prev_dep_node_index
: SerializedDepNodeIndex
,
745 diagnostics
: Vec
<Diagnostic
>,
747 let mut emitting
= data
.emitting_diagnostics
.lock();
749 if data
.colors
.get(prev_dep_node_index
) == Some(DepNodeColor
::Green(dep_node_index
)) {
750 // The node is already green so diagnostics must have been emitted already
754 if emitting
.insert(dep_node_index
) {
755 // We were the first to insert the node in the set so this thread
756 // must emit the diagnostics and signal other potentially waiting
760 // Promote the previous diagnostics to the current session.
761 tcx
.store_diagnostics(dep_node_index
, diagnostics
.clone().into());
763 let handle
= tcx
.diagnostic();
765 for diagnostic
in diagnostics
{
766 handle
.emit_diagnostic(&diagnostic
);
769 // Mark the node as green now that diagnostics are emitted
770 data
.colors
.insert(prev_dep_node_index
, DepNodeColor
::Green(dep_node_index
));
772 // Remove the node from the set
773 data
.emitting_diagnostics
.lock().remove(&dep_node_index
);
776 data
.emitting_diagnostics_cond_var
.notify_all();
778 // We must wait for the other thread to finish emitting the diagnostic
781 data
.emitting_diagnostics_cond_var
.wait(&mut emitting
);
782 if data
.colors
.get(prev_dep_node_index
) == Some(DepNodeColor
::Green(dep_node_index
))
790 // Returns true if the given node has been marked as green during the
791 // current compilation session. Used in various assertions
792 pub fn is_green(&self, dep_node
: &DepNode
<K
>) -> bool
{
793 self.node_color(dep_node
).map(|c
| c
.is_green()).unwrap_or(false)
796 // This method loads all on-disk cacheable query results into memory, so
797 // they can be written out to the new cache file again. Most query results
798 // will already be in memory but in the case where we marked something as
799 // green but then did not need the value, that value will never have been
802 // This method will only load queries that will end up in the disk cache.
803 // Other queries will not be executed.
804 pub fn exec_cache_promotions
<Ctxt
: DepContext
<DepKind
= K
>>(&self, tcx
: Ctxt
) {
805 let _prof_timer
= tcx
.profiler().generic_activity("incr_comp_query_cache_promotion");
807 let data
= self.data
.as_ref().unwrap();
808 for prev_index
in data
.colors
.values
.indices() {
809 match data
.colors
.get(prev_index
) {
810 Some(DepNodeColor
::Green(_
)) => {
811 let dep_node
= data
.previous
.index_to_node(prev_index
);
812 tcx
.try_load_from_on_disk_cache(&dep_node
);
814 None
| Some(DepNodeColor
::Red
) => {
815 // We can skip red nodes because a node can only be marked
816 // as red if the query result was recomputed and thus is
817 // already in memory.
823 fn next_virtual_depnode_index(&self) -> DepNodeIndex
{
824 let index
= self.virtual_dep_node_index
.fetch_add(1, Relaxed
);
825 DepNodeIndex
::from_u32(index
)
829 /// A "work product" is an intermediate result that we save into the
830 /// incremental directory for later re-use. The primary example are
831 /// the object files that we save for each partition at code
834 /// Each work product is associated with a dep-node, representing the
835 /// process that produced the work-product. If that dep-node is found
836 /// to be dirty when we load up, then we will delete the work-product
837 /// at load time. If the work-product is found to be clean, then we
838 /// will keep a record in the `previous_work_products` list.
840 /// In addition, work products have an associated hash. This hash is
841 /// an extra hash that can be used to decide if the work-product from
842 /// a previous compilation can be re-used (in addition to the dirty
845 /// As the primary example, consider the object files we generate for
846 /// each partition. In the first run, we create partitions based on
847 /// the symbols that need to be compiled. For each partition P, we
848 /// hash the symbols in P and create a `WorkProduct` record associated
849 /// with `DepNode::CodegenUnit(P)`; the hash is the set of symbols
852 /// The next time we compile, if the `DepNode::CodegenUnit(P)` is
853 /// judged to be clean (which means none of the things we read to
854 /// generate the partition were found to be dirty), it will be loaded
855 /// into previous work products. We will then regenerate the set of
856 /// symbols in the partition P and hash them (note that new symbols
857 /// may be added -- for example, new monomorphizations -- even if
858 /// nothing in P changed!). We will compare that hash against the
859 /// previous hash. If it matches up, we can reuse the object file.
860 #[derive(Clone, Debug, Encodable, Decodable)]
861 pub struct WorkProduct
{
862 pub cgu_name
: String
,
863 /// Saved file associated with this CGU.
864 pub saved_file
: Option
<String
>,
868 struct DepNodeData
<K
> {
871 fingerprint
: Fingerprint
,
874 /// `CurrentDepGraph` stores the dependency graph for the current session.
875 /// It will be populated as we run queries or tasks.
877 /// The nodes in it are identified by an index (`DepNodeIndex`).
878 /// The data for each node is stored in its `DepNodeData`, found in the `data` field.
880 /// We never remove nodes from the graph: they are only added.
882 /// This struct uses two locks internally. The `data` and `node_to_node_index` fields are
883 /// locked separately. Operations that take a `DepNodeIndex` typically just access
886 /// The only operation that must manipulate both locks is adding new nodes, in which case
887 /// we first acquire the `node_to_node_index` lock and then, once a new node is to be inserted,
888 /// acquire the lock on `data.`
889 pub(super) struct CurrentDepGraph
<K
> {
890 data
: Lock
<IndexVec
<DepNodeIndex
, DepNodeData
<K
>>>,
891 node_to_node_index
: Sharded
<FxHashMap
<DepNode
<K
>, DepNodeIndex
>>,
893 /// Used to trap when a specific edge is added to the graph.
894 /// This is used for debug purposes and is only active with `debug_assertions`.
896 forbidden_edge
: Option
<EdgeFilter
>,
898 /// Anonymous `DepNode`s are nodes whose IDs we compute from the list of
899 /// their edges. This has the beneficial side-effect that multiple anonymous
900 /// nodes can be coalesced into one without changing the semantics of the
901 /// dependency graph. However, the merging of nodes can lead to a subtle
902 /// problem during red-green marking: The color of an anonymous node from
903 /// the current session might "shadow" the color of the node with the same
904 /// ID from the previous session. In order to side-step this problem, we make
905 /// sure that anonymous `NodeId`s allocated in different sessions don't overlap.
906 /// This is implemented by mixing a session-key into the ID fingerprint of
907 /// each anon node. The session-key is just a random number generated when
908 /// the `DepGraph` is created.
909 anon_id_seed
: Fingerprint
,
911 /// These are simple counters that are for profiling and
912 /// debugging and only active with `debug_assertions`.
913 total_read_count
: AtomicU64
,
914 total_duplicate_read_count
: AtomicU64
,
917 impl<K
: DepKind
> CurrentDepGraph
<K
> {
918 fn new(prev_graph_node_count
: usize) -> CurrentDepGraph
<K
> {
919 use std
::time
::{SystemTime, UNIX_EPOCH}
;
921 let duration
= SystemTime
::now().duration_since(UNIX_EPOCH
).unwrap();
922 let nanos
= duration
.as_secs() * 1_000_000_000 + duration
.subsec_nanos() as u64;
923 let mut stable_hasher
= StableHasher
::new();
924 nanos
.hash(&mut stable_hasher
);
926 let forbidden_edge
= if cfg
!(debug_assertions
) {
927 match env
::var("RUST_FORBID_DEP_GRAPH_EDGE") {
928 Ok(s
) => match EdgeFilter
::new(&s
) {
930 Err(err
) => panic
!("RUST_FORBID_DEP_GRAPH_EDGE invalid: {}", err
),
938 // Pre-allocate the dep node structures. We over-allocate a little so
939 // that we hopefully don't have to re-allocate during this compilation
940 // session. The over-allocation is 2% plus a small constant to account
941 // for the fact that in very small crates 2% might not be enough.
942 let new_node_count_estimate
= (prev_graph_node_count
* 102) / 100 + 200;
945 data
: Lock
::new(IndexVec
::with_capacity(new_node_count_estimate
)),
946 node_to_node_index
: Sharded
::new(|| {
947 FxHashMap
::with_capacity_and_hasher(
948 new_node_count_estimate
/ sharded
::SHARDS
,
952 anon_id_seed
: stable_hasher
.finish(),
954 total_read_count
: AtomicU64
::new(0),
955 total_duplicate_read_count
: AtomicU64
::new(0),
962 task_deps
: TaskDeps
<K
>,
963 fingerprint
: Fingerprint
,
965 self.alloc_node(node
, task_deps
.reads
, fingerprint
)
968 fn complete_anon_task(&self, kind
: K
, task_deps
: TaskDeps
<K
>) -> DepNodeIndex
{
969 debug_assert
!(!kind
.is_eval_always());
971 let mut hasher
= StableHasher
::new();
973 // The dep node indices are hashed here instead of hashing the dep nodes of the
974 // dependencies. These indices may refer to different nodes per session, but this isn't
975 // a problem here because we that ensure the final dep node hash is per session only by
976 // combining it with the per session random number `anon_id_seed`. This hash only need
977 // to map the dependencies to a single value on a per session basis.
978 task_deps
.reads
.hash(&mut hasher
);
980 let target_dep_node
= DepNode
{
983 // Fingerprint::combine() is faster than sending Fingerprint
984 // through the StableHasher (at least as long as StableHasher
986 hash
: self.anon_id_seed
.combine(hasher
.finish()),
989 self.intern_node(target_dep_node
, task_deps
.reads
, Fingerprint
::ZERO
)
994 dep_node
: DepNode
<K
>,
996 fingerprint
: Fingerprint
,
999 !self.node_to_node_index
.get_shard_by_value(&dep_node
).lock().contains_key(&dep_node
)
1001 self.intern_node(dep_node
, edges
, fingerprint
)
1006 dep_node
: DepNode
<K
>,
1008 fingerprint
: Fingerprint
,
1010 match self.node_to_node_index
.get_shard_by_value(&dep_node
).lock().entry(dep_node
) {
1011 Entry
::Occupied(entry
) => *entry
.get(),
1012 Entry
::Vacant(entry
) => {
1013 let mut data
= self.data
.lock();
1014 let dep_node_index
= DepNodeIndex
::new(data
.len());
1015 data
.push(DepNodeData { node: dep_node, edges, fingerprint }
);
1016 entry
.insert(dep_node_index
);
1023 impl<K
: DepKind
> DepGraphData
<K
> {
1025 fn read_index(&self, source
: DepNodeIndex
) {
1026 K
::read_deps(|task_deps
| {
1027 if let Some(task_deps
) = task_deps
{
1028 let mut task_deps
= task_deps
.lock();
1029 let task_deps
= &mut *task_deps
;
1030 if cfg
!(debug_assertions
) {
1031 self.current
.total_read_count
.fetch_add(1, Relaxed
);
1034 // As long as we only have a low number of reads we can avoid doing a hash
1035 // insert and potentially allocating/reallocating the hashmap
1036 let new_read
= if task_deps
.reads
.len() < TASK_DEPS_READS_CAP
{
1037 task_deps
.reads
.iter().all(|other
| *other
!= source
)
1039 task_deps
.read_set
.insert(source
)
1042 task_deps
.reads
.push(source
);
1043 if task_deps
.reads
.len() == TASK_DEPS_READS_CAP
{
1044 // Fill `read_set` with what we have so far so we can use the hashset next
1046 task_deps
.read_set
.extend(task_deps
.reads
.iter().copied());
1049 #[cfg(debug_assertions)]
1051 if let Some(target
) = task_deps
.node
{
1052 let data
= self.current
.data
.lock();
1053 if let Some(ref forbidden_edge
) = self.current
.forbidden_edge
{
1054 let source
= data
[source
].node
;
1055 if forbidden_edge
.test(&source
, &target
) {
1056 panic
!("forbidden edge {:?} -> {:?} created", source
, target
)
1061 } else if cfg
!(debug_assertions
) {
1062 self.current
.total_duplicate_read_count
.fetch_add(1, Relaxed
);
1069 /// The capacity of the `reads` field `SmallVec`
1070 const TASK_DEPS_READS_CAP
: usize = 8;
1071 type EdgesVec
= SmallVec
<[DepNodeIndex
; TASK_DEPS_READS_CAP
]>;
1073 pub struct TaskDeps
<K
> {
1074 #[cfg(debug_assertions)]
1075 node
: Option
<DepNode
<K
>>,
1077 read_set
: FxHashSet
<DepNodeIndex
>,
1078 phantom_data
: PhantomData
<DepNode
<K
>>,
1081 impl<K
> Default
for TaskDeps
<K
> {
1082 fn default() -> Self {
1084 #[cfg(debug_assertions)]
1086 reads
: EdgesVec
::new(),
1087 read_set
: FxHashSet
::default(),
1088 phantom_data
: PhantomData
,
1093 // A data structure that stores Option<DepNodeColor> values as a contiguous
1094 // array, using one u32 per entry.
1095 struct DepNodeColorMap
{
1096 values
: IndexVec
<SerializedDepNodeIndex
, AtomicU32
>,
1099 const COMPRESSED_NONE
: u32 = 0;
1100 const COMPRESSED_RED
: u32 = 1;
1101 const COMPRESSED_FIRST_GREEN
: u32 = 2;
1103 impl DepNodeColorMap
{
1104 fn new(size
: usize) -> DepNodeColorMap
{
1105 DepNodeColorMap { values: (0..size).map(|_| AtomicU32::new(COMPRESSED_NONE)).collect() }
1109 fn get(&self, index
: SerializedDepNodeIndex
) -> Option
<DepNodeColor
> {
1110 match self.values
[index
].load(Ordering
::Acquire
) {
1111 COMPRESSED_NONE
=> None
,
1112 COMPRESSED_RED
=> Some(DepNodeColor
::Red
),
1114 Some(DepNodeColor
::Green(DepNodeIndex
::from_u32(value
- COMPRESSED_FIRST_GREEN
)))
1119 fn insert(&self, index
: SerializedDepNodeIndex
, color
: DepNodeColor
) {
1120 self.values
[index
].store(
1122 DepNodeColor
::Red
=> COMPRESSED_RED
,
1123 DepNodeColor
::Green(index
) => index
.as_u32() + COMPRESSED_FIRST_GREEN
,