1 use rustc_data_structures
::fingerprint
::Fingerprint
;
2 use rustc_data_structures
::fx
::{FxHashMap, FxHashSet}
;
3 use rustc_data_structures
::profiling
::QueryInvocationId
;
4 use rustc_data_structures
::profiling
::SelfProfilerRef
;
5 use rustc_data_structures
::sharded
::{self, Sharded}
;
6 use rustc_data_structures
::stable_hasher
::{HashStable, StableHasher}
;
7 use rustc_data_structures
::steal
::Steal
;
8 use rustc_data_structures
::sync
::{AtomicU32, AtomicU64, Lock, Lrc, Ordering}
;
9 use rustc_data_structures
::unlikely
;
10 use rustc_errors
::Diagnostic
;
11 use rustc_index
::vec
::IndexVec
;
12 use rustc_serialize
::opaque
::{FileEncodeResult, FileEncoder}
;
14 use parking_lot
::{Condvar, Mutex}
;
15 use smallvec
::{smallvec, SmallVec}
;
16 use std
::collections
::hash_map
::Entry
;
18 use std
::marker
::PhantomData
;
20 use std
::sync
::atomic
::Ordering
::Relaxed
;
22 use super::prev
::PreviousDepGraph
;
23 use super::query
::DepGraphQuery
;
24 use super::serialized
::{GraphEncoder, SerializedDepNodeIndex}
;
25 use super::{DepContext, DepKind, DepNode, HasDepContext, WorkProductId}
;
26 use crate::query
::QueryContext
;
28 #[cfg(debug_assertions)]
29 use {super::debug::EdgeFilter, std::env}
;
32 pub struct DepGraph
<K
: DepKind
> {
33 data
: Option
<Lrc
<DepGraphData
<K
>>>,
35 /// This field is used for assigning DepNodeIndices when running in
36 /// non-incremental mode. Even in non-incremental mode we make sure that
37 /// each task has a `DepNodeIndex` that uniquely identifies it. This unique
38 /// ID is used for self-profiling.
39 virtual_dep_node_index
: Lrc
<AtomicU32
>,
42 rustc_index
::newtype_index
! {
43 pub struct DepNodeIndex { .. }
47 pub const INVALID
: DepNodeIndex
= DepNodeIndex
::MAX
;
50 impl std
::convert
::From
<DepNodeIndex
> for QueryInvocationId
{
52 fn from(dep_node_index
: DepNodeIndex
) -> Self {
53 QueryInvocationId(dep_node_index
.as_u32())
58 pub enum DepNodeColor
{
64 pub fn is_green(self) -> bool
{
66 DepNodeColor
::Red
=> false,
67 DepNodeColor
::Green(_
) => true,
72 struct DepGraphData
<K
: DepKind
> {
73 /// The new encoding of the dependency graph, optimized for red/green
74 /// tracking. The `current` field is the dependency graph of only the
75 /// current compilation session: We don't merge the previous dep-graph into
76 /// current one anymore, but we do reference shared data to save space.
77 current
: CurrentDepGraph
<K
>,
79 /// The dep-graph from the previous compilation session. It contains all
80 /// nodes and edges as well as all fingerprints of nodes that have them.
81 previous
: PreviousDepGraph
<K
>,
83 colors
: DepNodeColorMap
,
85 /// A set of loaded diagnostics that is in the progress of being emitted.
86 emitting_diagnostics
: Mutex
<FxHashSet
<DepNodeIndex
>>,
88 /// Used to wait for diagnostics to be emitted.
89 emitting_diagnostics_cond_var
: Condvar
,
91 /// When we load, there may be `.o` files, cached MIR, or other such
92 /// things available to us. If we find that they are not dirty, we
93 /// load the path to the file storing those work-products here into
94 /// this map. We can later look for and extract that data.
95 previous_work_products
: FxHashMap
<WorkProductId
, WorkProduct
>,
97 dep_node_debug
: Lock
<FxHashMap
<DepNode
<K
>, String
>>,
100 pub fn hash_result
<HashCtxt
, R
>(hcx
: &mut HashCtxt
, result
: &R
) -> Option
<Fingerprint
>
102 R
: HashStable
<HashCtxt
>,
104 let mut stable_hasher
= StableHasher
::new();
105 result
.hash_stable(hcx
, &mut stable_hasher
);
107 Some(stable_hasher
.finish())
110 impl<K
: DepKind
> DepGraph
<K
> {
112 prev_graph
: PreviousDepGraph
<K
>,
113 prev_work_products
: FxHashMap
<WorkProductId
, WorkProduct
>,
114 encoder
: FileEncoder
,
118 let prev_graph_node_count
= prev_graph
.node_count();
121 data
: Some(Lrc
::new(DepGraphData
{
122 previous_work_products
: prev_work_products
,
123 dep_node_debug
: Default
::default(),
124 current
: CurrentDepGraph
::new(
125 prev_graph_node_count
,
130 emitting_diagnostics
: Default
::default(),
131 emitting_diagnostics_cond_var
: Condvar
::new(),
132 previous
: prev_graph
,
133 colors
: DepNodeColorMap
::new(prev_graph_node_count
),
135 virtual_dep_node_index
: Lrc
::new(AtomicU32
::new(0)),
139 pub fn new_disabled() -> DepGraph
<K
> {
140 DepGraph { data: None, virtual_dep_node_index: Lrc::new(AtomicU32::new(0)) }
143 /// Returns `true` if we are actually building the full dep-graph, and `false` otherwise.
145 pub fn is_fully_enabled(&self) -> bool
{
149 pub fn with_query(&self, f
: impl Fn(&DepGraphQuery
<K
>)) {
150 if let Some(data
) = &self.data
{
151 data
.current
.encoder
.borrow().with_query(f
)
155 pub fn assert_ignored(&self) {
156 if let Some(..) = self.data
{
157 K
::read_deps(|task_deps
| {
158 assert
!(task_deps
.is_none(), "expected no task dependency tracking");
163 pub fn with_ignore
<OP
, R
>(&self, op
: OP
) -> R
167 K
::with_deps(None
, op
)
170 /// Starts a new dep-graph task. Dep-graph tasks are specified
171 /// using a free function (`task`) and **not** a closure -- this
172 /// is intentional because we want to exercise tight control over
173 /// what state they have access to. In particular, we want to
174 /// prevent implicit 'leaks' of tracked state into the task (which
175 /// could then be read without generating correct edges in the
176 /// dep-graph -- see the [rustc dev guide] for more details on
177 /// the dep-graph). To this end, the task function gets exactly two
178 /// pieces of state: the context `cx` and an argument `arg`. Both
179 /// of these bits of state must be of some type that implements
180 /// `DepGraphSafe` and hence does not leak.
182 /// The choice of two arguments is not fundamental. One argument
183 /// would work just as well, since multiple values can be
184 /// collected using tuples. However, using two arguments works out
185 /// to be quite convenient, since it is common to need a context
186 /// (`cx`) and some argument (e.g., a `DefId` identifying what
187 /// item to process).
189 /// For cases where you need some other number of arguments:
191 /// - If you only need one argument, just use `()` for the `arg`
193 /// - If you need 3+ arguments, use a tuple for the
196 /// [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/incremental-compilation.html
197 pub fn with_task
<Ctxt
: HasDepContext
<DepKind
= K
>, A
, R
>(
202 task
: fn(Ctxt
, A
) -> R
,
203 hash_result
: impl FnOnce(&mut Ctxt
::StableHashingContext
, &R
) -> Option
<Fingerprint
>,
204 ) -> (R
, DepNodeIndex
) {
212 #[cfg(debug_assertions)]
214 reads
: SmallVec
::new(),
215 read_set
: Default
::default(),
216 phantom_data
: PhantomData
,
223 fn with_task_impl
<Ctxt
: HasDepContext
<DepKind
= K
>, A
, R
>(
228 task
: fn(Ctxt
, A
) -> R
,
229 create_task
: fn(DepNode
<K
>) -> Option
<TaskDeps
<K
>>,
230 hash_result
: impl FnOnce(&mut Ctxt
::StableHashingContext
, &R
) -> Option
<Fingerprint
>,
231 ) -> (R
, DepNodeIndex
) {
232 if let Some(ref data
) = self.data
{
233 let dcx
= cx
.dep_context();
234 let task_deps
= create_task(key
).map(Lock
::new
);
235 let result
= K
::with_deps(task_deps
.as_ref(), || task(cx
, arg
));
236 let edges
= task_deps
.map_or_else(|| smallvec
![], |lock
| lock
.into_inner().reads
);
238 let mut hcx
= dcx
.create_stable_hashing_context();
239 let current_fingerprint
= hash_result(&mut hcx
, &result
);
241 let print_status
= cfg
!(debug_assertions
) && dcx
.sess().opts
.debugging_opts
.dep_tasks
;
243 // Intern the new `DepNode`.
244 let (dep_node_index
, prev_and_color
) = data
.current
.intern_node(
253 if let Some((prev_index
, color
)) = prev_and_color
{
255 data
.colors
.get(prev_index
).is_none(),
256 "DepGraph::with_task() - Duplicate DepNodeColor \
261 data
.colors
.insert(prev_index
, color
);
264 (result
, dep_node_index
)
266 // Incremental compilation is turned off. We just execute the task
267 // without tracking. We still provide a dep-node index that uniquely
268 // identifies the task so that we have a cheap way of referring to
269 // the query for self-profiling.
270 (task(cx
, arg
), self.next_virtual_depnode_index())
274 /// Executes something within an "anonymous" task, that is, a task the
275 /// `DepNode` of which is determined by the list of inputs it read from.
276 pub fn with_anon_task
<Ctxt
: DepContext
<DepKind
= K
>, OP
, R
>(
281 ) -> (R
, DepNodeIndex
)
285 debug_assert
!(!dep_kind
.is_eval_always());
287 if let Some(ref data
) = self.data
{
288 let task_deps
= Lock
::new(TaskDeps
::default());
289 let result
= K
::with_deps(Some(&task_deps
), op
);
290 let task_deps
= task_deps
.into_inner();
292 // The dep node indices are hashed here instead of hashing the dep nodes of the
293 // dependencies. These indices may refer to different nodes per session, but this isn't
294 // a problem here because we that ensure the final dep node hash is per session only by
295 // combining it with the per session random number `anon_id_seed`. This hash only need
296 // to map the dependencies to a single value on a per session basis.
297 let mut hasher
= StableHasher
::new();
298 task_deps
.reads
.hash(&mut hasher
);
300 let target_dep_node
= DepNode
{
302 // Fingerprint::combine() is faster than sending Fingerprint
303 // through the StableHasher (at least as long as StableHasher
305 hash
: data
.current
.anon_id_seed
.combine(hasher
.finish()).into(),
308 let dep_node_index
= data
.current
.intern_new_node(
315 (result
, dep_node_index
)
317 (op(), self.next_virtual_depnode_index())
321 /// Executes something within an "eval-always" task which is a task
322 /// that runs whenever anything changes.
323 pub fn with_eval_always_task
<Ctxt
: HasDepContext
<DepKind
= K
>, A
, R
>(
328 task
: fn(Ctxt
, A
) -> R
,
329 hash_result
: impl FnOnce(&mut Ctxt
::StableHashingContext
, &R
) -> Option
<Fingerprint
>,
330 ) -> (R
, DepNodeIndex
) {
331 self.with_task_impl(key
, cx
, arg
, task
, |_
| None
, hash_result
)
335 pub fn read_index(&self, dep_node_index
: DepNodeIndex
) {
336 if let Some(ref data
) = self.data
{
337 K
::read_deps(|task_deps
| {
338 if let Some(task_deps
) = task_deps
{
339 let mut task_deps
= task_deps
.lock();
340 let task_deps
= &mut *task_deps
;
341 if cfg
!(debug_assertions
) {
342 data
.current
.total_read_count
.fetch_add(1, Relaxed
);
345 // As long as we only have a low number of reads we can avoid doing a hash
346 // insert and potentially allocating/reallocating the hashmap
347 let new_read
= if task_deps
.reads
.len() < TASK_DEPS_READS_CAP
{
348 task_deps
.reads
.iter().all(|other
| *other
!= dep_node_index
)
350 task_deps
.read_set
.insert(dep_node_index
)
353 task_deps
.reads
.push(dep_node_index
);
354 if task_deps
.reads
.len() == TASK_DEPS_READS_CAP
{
355 // Fill `read_set` with what we have so far so we can use the hashset
357 task_deps
.read_set
.extend(task_deps
.reads
.iter().copied());
360 #[cfg(debug_assertions)]
362 if let Some(target
) = task_deps
.node
{
363 if let Some(ref forbidden_edge
) = data
.current
.forbidden_edge
{
364 let src
= forbidden_edge
.index_to_node
.lock()[&dep_node_index
];
365 if forbidden_edge
.test(&src
, &target
) {
366 panic
!("forbidden edge {:?} -> {:?} created", src
, target
)
371 } else if cfg
!(debug_assertions
) {
372 data
.current
.total_duplicate_read_count
.fetch_add(1, Relaxed
);
380 pub fn dep_node_index_of(&self, dep_node
: &DepNode
<K
>) -> DepNodeIndex
{
381 self.dep_node_index_of_opt(dep_node
).unwrap()
385 pub fn dep_node_index_of_opt(&self, dep_node
: &DepNode
<K
>) -> Option
<DepNodeIndex
> {
386 let data
= self.data
.as_ref().unwrap();
387 let current
= &data
.current
;
389 if let Some(prev_index
) = data
.previous
.node_to_index_opt(dep_node
) {
390 current
.prev_index_to_index
.lock()[prev_index
]
392 current
.new_node_to_index
.get_shard_by_value(dep_node
).lock().get(dep_node
).copied()
397 pub fn dep_node_exists(&self, dep_node
: &DepNode
<K
>) -> bool
{
398 self.data
.is_some() && self.dep_node_index_of_opt(dep_node
).is_some()
401 pub fn prev_fingerprint_of(&self, dep_node
: &DepNode
<K
>) -> Option
<Fingerprint
> {
402 self.data
.as_ref().unwrap().previous
.fingerprint_of(dep_node
)
405 /// Checks whether a previous work product exists for `v` and, if
406 /// so, return the path that leads to it. Used to skip doing work.
407 pub fn previous_work_product(&self, v
: &WorkProductId
) -> Option
<WorkProduct
> {
408 self.data
.as_ref().and_then(|data
| data
.previous_work_products
.get(v
).cloned())
411 /// Access the map of work-products created during the cached run. Only
412 /// used during saving of the dep-graph.
413 pub fn previous_work_products(&self) -> &FxHashMap
<WorkProductId
, WorkProduct
> {
414 &self.data
.as_ref().unwrap().previous_work_products
418 pub fn register_dep_node_debug_str
<F
>(&self, dep_node
: DepNode
<K
>, debug_str_gen
: F
)
420 F
: FnOnce() -> String
,
422 let dep_node_debug
= &self.data
.as_ref().unwrap().dep_node_debug
;
424 if dep_node_debug
.borrow().contains_key(&dep_node
) {
427 let debug_str
= debug_str_gen();
428 dep_node_debug
.borrow_mut().insert(dep_node
, debug_str
);
431 pub fn dep_node_debug_str(&self, dep_node
: DepNode
<K
>) -> Option
<String
> {
432 self.data
.as_ref()?
.dep_node_debug
.borrow().get(&dep_node
).cloned()
435 fn node_color(&self, dep_node
: &DepNode
<K
>) -> Option
<DepNodeColor
> {
436 if let Some(ref data
) = self.data
{
437 if let Some(prev_index
) = data
.previous
.node_to_index_opt(dep_node
) {
438 return data
.colors
.get(prev_index
);
440 // This is a node that did not exist in the previous compilation session.
448 /// Try to read a node index for the node dep_node.
449 /// A node will have an index, when it's already been marked green, or when we can mark it
450 /// green. This function will mark the current task as a reader of the specified node, when
451 /// a node index can be found for that node.
452 pub fn try_mark_green_and_read
<Ctxt
: QueryContext
<DepKind
= K
>>(
455 dep_node
: &DepNode
<K
>,
456 ) -> Option
<(SerializedDepNodeIndex
, DepNodeIndex
)> {
457 self.try_mark_green(tcx
, dep_node
).map(|(prev_index
, dep_node_index
)| {
458 debug_assert
!(self.is_green(&dep_node
));
459 self.read_index(dep_node_index
);
460 (prev_index
, dep_node_index
)
464 pub fn try_mark_green
<Ctxt
: QueryContext
<DepKind
= K
>>(
467 dep_node
: &DepNode
<K
>,
468 ) -> Option
<(SerializedDepNodeIndex
, DepNodeIndex
)> {
469 debug_assert
!(!dep_node
.kind
.is_eval_always());
471 // Return None if the dep graph is disabled
472 let data
= self.data
.as_ref()?
;
474 // Return None if the dep node didn't exist in the previous session
475 let prev_index
= data
.previous
.node_to_index_opt(dep_node
)?
;
477 match data
.colors
.get(prev_index
) {
478 Some(DepNodeColor
::Green(dep_node_index
)) => Some((prev_index
, dep_node_index
)),
479 Some(DepNodeColor
::Red
) => None
,
481 // This DepNode and the corresponding query invocation existed
482 // in the previous compilation session too, so we can try to
483 // mark it as green by recursively marking all of its
484 // dependencies green.
485 self.try_mark_previous_green(tcx
, data
, prev_index
, &dep_node
)
486 .map(|dep_node_index
| (prev_index
, dep_node_index
))
491 /// Try to mark a dep-node which existed in the previous compilation session as green.
492 fn try_mark_previous_green
<Ctxt
: QueryContext
<DepKind
= K
>>(
495 data
: &DepGraphData
<K
>,
496 prev_dep_node_index
: SerializedDepNodeIndex
,
497 dep_node
: &DepNode
<K
>,
498 ) -> Option
<DepNodeIndex
> {
499 debug
!("try_mark_previous_green({:?}) - BEGIN", dep_node
);
501 #[cfg(not(parallel_compiler))]
503 debug_assert
!(!self.dep_node_exists(dep_node
));
504 debug_assert
!(data
.colors
.get(prev_dep_node_index
).is_none());
507 // We never try to mark eval_always nodes as green
508 debug_assert
!(!dep_node
.kind
.is_eval_always());
510 debug_assert_eq
!(data
.previous
.index_to_node(prev_dep_node_index
), *dep_node
);
512 let prev_deps
= data
.previous
.edge_targets_from(prev_dep_node_index
);
514 for &dep_dep_node_index
in prev_deps
{
515 let dep_dep_node_color
= data
.colors
.get(dep_dep_node_index
);
517 match dep_dep_node_color
{
518 Some(DepNodeColor
::Green(_
)) => {
519 // This dependency has been marked as green before, we are
520 // still fine and can continue with checking the other
523 "try_mark_previous_green({:?}) --- found dependency {:?} to \
524 be immediately green",
526 data
.previous
.index_to_node(dep_dep_node_index
)
529 Some(DepNodeColor
::Red
) => {
530 // We found a dependency the value of which has changed
531 // compared to the previous compilation session. We cannot
532 // mark the DepNode as green and also don't need to bother
533 // with checking any of the other dependencies.
535 "try_mark_previous_green({:?}) - END - dependency {:?} was \
538 data
.previous
.index_to_node(dep_dep_node_index
)
543 let dep_dep_node
= &data
.previous
.index_to_node(dep_dep_node_index
);
545 // We don't know the state of this dependency. If it isn't
546 // an eval_always node, let's try to mark it green recursively.
547 if !dep_dep_node
.kind
.is_eval_always() {
549 "try_mark_previous_green({:?}) --- state of dependency {:?} ({}) \
550 is unknown, trying to mark it green",
551 dep_node
, dep_dep_node
, dep_dep_node
.hash
,
554 let node_index
= self.try_mark_previous_green(
560 if node_index
.is_some() {
562 "try_mark_previous_green({:?}) --- managed to MARK \
563 dependency {:?} as green",
564 dep_node
, dep_dep_node
570 // We failed to mark it green, so we try to force the query.
572 "try_mark_previous_green({:?}) --- trying to force \
574 dep_node
, dep_dep_node
576 if tcx
.try_force_from_dep_node(dep_dep_node
) {
577 let dep_dep_node_color
= data
.colors
.get(dep_dep_node_index
);
579 match dep_dep_node_color
{
580 Some(DepNodeColor
::Green(_
)) => {
582 "try_mark_previous_green({:?}) --- managed to \
583 FORCE dependency {:?} to green",
584 dep_node
, dep_dep_node
587 Some(DepNodeColor
::Red
) => {
589 "try_mark_previous_green({:?}) - END - \
590 dependency {:?} was red after forcing",
591 dep_node
, dep_dep_node
596 if !tcx
.dep_context().sess().has_errors_or_delayed_span_bugs() {
598 "try_mark_previous_green() - Forcing the DepNode \
599 should have set its color"
602 // If the query we just forced has resulted in
603 // some kind of compilation error, we cannot rely on
604 // the dep-node color having been properly updated.
605 // This means that the query system has reached an
606 // invalid state. We let the compiler continue (by
607 // returning `None`) so it can emit error messages
608 // and wind down, but rely on the fact that this
609 // invalid state will not be persisted to the
610 // incremental compilation cache because of
611 // compilation errors being present.
613 "try_mark_previous_green({:?}) - END - \
614 dependency {:?} resulted in compilation error",
615 dep_node
, dep_dep_node
622 // The DepNode could not be forced.
624 "try_mark_previous_green({:?}) - END - dependency {:?} \
625 could not be forced",
626 dep_node
, dep_dep_node
634 // If we got here without hitting a `return` that means that all
635 // dependencies of this DepNode could be marked as green. Therefore we
636 // can also mark this DepNode as green.
638 // There may be multiple threads trying to mark the same dep node green concurrently
640 // We allocating an entry for the node in the current dependency graph and
641 // adding all the appropriate edges imported from the previous graph
642 let dep_node_index
= data
.current
.promote_node_and_deps_to_current(
643 tcx
.dep_context().profiler(),
648 // ... emitting any stored diagnostic ...
650 // FIXME: Store the fact that a node has diagnostics in a bit in the dep graph somewhere
651 // Maybe store a list on disk and encode this fact in the DepNodeState
652 let diagnostics
= tcx
.load_diagnostics(prev_dep_node_index
);
654 #[cfg(not(parallel_compiler))]
656 data
.colors
.get(prev_dep_node_index
).is_none(),
657 "DepGraph::try_mark_previous_green() - Duplicate DepNodeColor \
662 if unlikely
!(!diagnostics
.is_empty()) {
663 self.emit_diagnostics(tcx
, data
, dep_node_index
, prev_dep_node_index
, diagnostics
);
666 // ... and finally storing a "Green" entry in the color map.
667 // Multiple threads can all write the same color here
668 data
.colors
.insert(prev_dep_node_index
, DepNodeColor
::Green(dep_node_index
));
670 debug
!("try_mark_previous_green({:?}) - END - successfully marked as green", dep_node
);
674 /// Atomically emits some loaded diagnostics.
675 /// This may be called concurrently on multiple threads for the same dep node.
678 fn emit_diagnostics
<Ctxt
: QueryContext
<DepKind
= K
>>(
681 data
: &DepGraphData
<K
>,
682 dep_node_index
: DepNodeIndex
,
683 prev_dep_node_index
: SerializedDepNodeIndex
,
684 diagnostics
: Vec
<Diagnostic
>,
686 let mut emitting
= data
.emitting_diagnostics
.lock();
688 if data
.colors
.get(prev_dep_node_index
) == Some(DepNodeColor
::Green(dep_node_index
)) {
689 // The node is already green so diagnostics must have been emitted already
693 if emitting
.insert(dep_node_index
) {
694 // We were the first to insert the node in the set so this thread
695 // must emit the diagnostics and signal other potentially waiting
699 // Promote the previous diagnostics to the current session.
700 tcx
.store_diagnostics(dep_node_index
, diagnostics
.clone().into());
702 let handle
= tcx
.dep_context().sess().diagnostic();
704 for diagnostic
in diagnostics
{
705 handle
.emit_diagnostic(&diagnostic
);
708 // Mark the node as green now that diagnostics are emitted
709 data
.colors
.insert(prev_dep_node_index
, DepNodeColor
::Green(dep_node_index
));
711 // Remove the node from the set
712 data
.emitting_diagnostics
.lock().remove(&dep_node_index
);
715 data
.emitting_diagnostics_cond_var
.notify_all();
717 // We must wait for the other thread to finish emitting the diagnostic
720 data
.emitting_diagnostics_cond_var
.wait(&mut emitting
);
721 if data
.colors
.get(prev_dep_node_index
) == Some(DepNodeColor
::Green(dep_node_index
))
729 // Returns true if the given node has been marked as red during the
730 // current compilation session. Used in various assertions
731 pub fn is_red(&self, dep_node
: &DepNode
<K
>) -> bool
{
732 self.node_color(dep_node
) == Some(DepNodeColor
::Red
)
735 // Returns true if the given node has been marked as green during the
736 // current compilation session. Used in various assertions
737 pub fn is_green(&self, dep_node
: &DepNode
<K
>) -> bool
{
738 self.node_color(dep_node
).map_or(false, |c
| c
.is_green())
741 // This method loads all on-disk cacheable query results into memory, so
742 // they can be written out to the new cache file again. Most query results
743 // will already be in memory but in the case where we marked something as
744 // green but then did not need the value, that value will never have been
747 // This method will only load queries that will end up in the disk cache.
748 // Other queries will not be executed.
749 pub fn exec_cache_promotions
<Ctxt
: QueryContext
<DepKind
= K
>>(&self, qcx
: Ctxt
) {
750 let tcx
= qcx
.dep_context();
751 let _prof_timer
= tcx
.profiler().generic_activity("incr_comp_query_cache_promotion");
753 let data
= self.data
.as_ref().unwrap();
754 for prev_index
in data
.colors
.values
.indices() {
755 match data
.colors
.get(prev_index
) {
756 Some(DepNodeColor
::Green(_
)) => {
757 let dep_node
= data
.previous
.index_to_node(prev_index
);
758 qcx
.try_load_from_on_disk_cache(&dep_node
);
760 None
| Some(DepNodeColor
::Red
) => {
761 // We can skip red nodes because a node can only be marked
762 // as red if the query result was recomputed and thus is
763 // already in memory.
769 // Register reused dep nodes (i.e. nodes we've marked red or green) with the context.
770 pub fn register_reused_dep_nodes
<Ctxt
: DepContext
<DepKind
= K
>>(&self, tcx
: Ctxt
) {
771 let data
= self.data
.as_ref().unwrap();
772 for prev_index
in data
.colors
.values
.indices() {
773 match data
.colors
.get(prev_index
) {
774 Some(DepNodeColor
::Red
) | Some(DepNodeColor
::Green(_
)) => {
775 let dep_node
= data
.previous
.index_to_node(prev_index
);
776 tcx
.register_reused_dep_node(&dep_node
);
783 pub fn print_incremental_info(&self) {
784 if let Some(data
) = &self.data
{
785 data
.current
.encoder
.borrow().print_incremental_info(
786 data
.current
.total_read_count
.load(Relaxed
),
787 data
.current
.total_duplicate_read_count
.load(Relaxed
),
792 pub fn encode(&self, profiler
: &SelfProfilerRef
) -> FileEncodeResult
{
793 if let Some(data
) = &self.data
{
794 data
.current
.encoder
.steal().finish(profiler
)
800 fn next_virtual_depnode_index(&self) -> DepNodeIndex
{
801 let index
= self.virtual_dep_node_index
.fetch_add(1, Relaxed
);
802 DepNodeIndex
::from_u32(index
)
806 /// A "work product" is an intermediate result that we save into the
807 /// incremental directory for later re-use. The primary example are
808 /// the object files that we save for each partition at code
811 /// Each work product is associated with a dep-node, representing the
812 /// process that produced the work-product. If that dep-node is found
813 /// to be dirty when we load up, then we will delete the work-product
814 /// at load time. If the work-product is found to be clean, then we
815 /// will keep a record in the `previous_work_products` list.
817 /// In addition, work products have an associated hash. This hash is
818 /// an extra hash that can be used to decide if the work-product from
819 /// a previous compilation can be re-used (in addition to the dirty
822 /// As the primary example, consider the object files we generate for
823 /// each partition. In the first run, we create partitions based on
824 /// the symbols that need to be compiled. For each partition P, we
825 /// hash the symbols in P and create a `WorkProduct` record associated
826 /// with `DepNode::CodegenUnit(P)`; the hash is the set of symbols
829 /// The next time we compile, if the `DepNode::CodegenUnit(P)` is
830 /// judged to be clean (which means none of the things we read to
831 /// generate the partition were found to be dirty), it will be loaded
832 /// into previous work products. We will then regenerate the set of
833 /// symbols in the partition P and hash them (note that new symbols
834 /// may be added -- for example, new monomorphizations -- even if
835 /// nothing in P changed!). We will compare that hash against the
836 /// previous hash. If it matches up, we can reuse the object file.
837 #[derive(Clone, Debug, Encodable, Decodable)]
838 pub struct WorkProduct
{
839 pub cgu_name
: String
,
840 /// Saved file associated with this CGU.
841 pub saved_file
: Option
<String
>,
844 // Index type for `DepNodeData`'s edges.
845 rustc_index
::newtype_index
! {
846 struct EdgeIndex { .. }
849 /// `CurrentDepGraph` stores the dependency graph for the current session. It
850 /// will be populated as we run queries or tasks. We never remove nodes from the
851 /// graph: they are only added.
853 /// The nodes in it are identified by a `DepNodeIndex`. We avoid keeping the nodes
854 /// in memory. This is important, because these graph structures are some of the
855 /// largest in the compiler.
857 /// For this reason, we avoid storing `DepNode`s more than once as map
858 /// keys. The `new_node_to_index` map only contains nodes not in the previous
859 /// graph, and we map nodes in the previous graph to indices via a two-step
860 /// mapping. `PreviousDepGraph` maps from `DepNode` to `SerializedDepNodeIndex`,
861 /// and the `prev_index_to_index` vector (which is more compact and faster than
862 /// using a map) maps from `SerializedDepNodeIndex` to `DepNodeIndex`.
864 /// This struct uses three locks internally. The `data`, `new_node_to_index`,
865 /// and `prev_index_to_index` fields are locked separately. Operations that take
866 /// a `DepNodeIndex` typically just access the `data` field.
868 /// We only need to manipulate at most two locks simultaneously:
869 /// `new_node_to_index` and `data`, or `prev_index_to_index` and `data`. When
870 /// manipulating both, we acquire `new_node_to_index` or `prev_index_to_index`
871 /// first, and `data` second.
872 pub(super) struct CurrentDepGraph
<K
: DepKind
> {
873 encoder
: Steal
<GraphEncoder
<K
>>,
874 new_node_to_index
: Sharded
<FxHashMap
<DepNode
<K
>, DepNodeIndex
>>,
875 prev_index_to_index
: Lock
<IndexVec
<SerializedDepNodeIndex
, Option
<DepNodeIndex
>>>,
877 /// Used to trap when a specific edge is added to the graph.
878 /// This is used for debug purposes and is only active with `debug_assertions`.
879 #[cfg(debug_assertions)]
880 forbidden_edge
: Option
<EdgeFilter
<K
>>,
882 /// Anonymous `DepNode`s are nodes whose IDs we compute from the list of
883 /// their edges. This has the beneficial side-effect that multiple anonymous
884 /// nodes can be coalesced into one without changing the semantics of the
885 /// dependency graph. However, the merging of nodes can lead to a subtle
886 /// problem during red-green marking: The color of an anonymous node from
887 /// the current session might "shadow" the color of the node with the same
888 /// ID from the previous session. In order to side-step this problem, we make
889 /// sure that anonymous `NodeId`s allocated in different sessions don't overlap.
890 /// This is implemented by mixing a session-key into the ID fingerprint of
891 /// each anon node. The session-key is just a random number generated when
892 /// the `DepGraph` is created.
893 anon_id_seed
: Fingerprint
,
895 /// These are simple counters that are for profiling and
896 /// debugging and only active with `debug_assertions`.
897 total_read_count
: AtomicU64
,
898 total_duplicate_read_count
: AtomicU64
,
901 impl<K
: DepKind
> CurrentDepGraph
<K
> {
903 prev_graph_node_count
: usize,
904 encoder
: FileEncoder
,
907 ) -> CurrentDepGraph
<K
> {
908 use std
::time
::{SystemTime, UNIX_EPOCH}
;
910 let duration
= SystemTime
::now().duration_since(UNIX_EPOCH
).unwrap();
911 let nanos
= duration
.as_secs() * 1_000_000_000 + duration
.subsec_nanos() as u64;
912 let mut stable_hasher
= StableHasher
::new();
913 nanos
.hash(&mut stable_hasher
);
915 #[cfg(debug_assertions)]
916 let forbidden_edge
= match env
::var("RUST_FORBID_DEP_GRAPH_EDGE") {
917 Ok(s
) => match EdgeFilter
::new(&s
) {
919 Err(err
) => panic
!("RUST_FORBID_DEP_GRAPH_EDGE invalid: {}", err
),
924 // We store a large collection of these in `prev_index_to_index` during
925 // non-full incremental builds, and want to ensure that the element size
926 // doesn't inadvertently increase.
927 static_assert_size
!(Option
<DepNodeIndex
>, 4);
929 let new_node_count_estimate
= 102 * prev_graph_node_count
/ 100 + 200;
932 encoder
: Steal
::new(GraphEncoder
::new(
934 prev_graph_node_count
,
938 new_node_to_index
: Sharded
::new(|| {
939 FxHashMap
::with_capacity_and_hasher(
940 new_node_count_estimate
/ sharded
::SHARDS
,
944 prev_index_to_index
: Lock
::new(IndexVec
::from_elem_n(None
, prev_graph_node_count
)),
945 anon_id_seed
: stable_hasher
.finish(),
946 #[cfg(debug_assertions)]
948 total_read_count
: AtomicU64
::new(0),
949 total_duplicate_read_count
: AtomicU64
::new(0),
953 #[cfg(debug_assertions)]
954 fn record_edge(&self, dep_node_index
: DepNodeIndex
, key
: DepNode
<K
>) {
955 if let Some(forbidden_edge
) = &self.forbidden_edge
{
956 forbidden_edge
.index_to_node
.lock().insert(dep_node_index
, key
);
960 /// Writes the node to the current dep-graph and allocates a `DepNodeIndex` for it.
961 /// Assumes that this is a node that has no equivalent in the previous dep-graph.
964 profiler
: &SelfProfilerRef
,
967 current_fingerprint
: Fingerprint
,
969 match self.new_node_to_index
.get_shard_by_value(&key
).lock().entry(key
) {
970 Entry
::Occupied(entry
) => *entry
.get(),
971 Entry
::Vacant(entry
) => {
973 self.encoder
.borrow().send(profiler
, key
, current_fingerprint
, edges
);
974 entry
.insert(dep_node_index
);
975 #[cfg(debug_assertions)]
976 self.record_edge(dep_node_index
, key
);
984 profiler
: &SelfProfilerRef
,
985 prev_graph
: &PreviousDepGraph
<K
>,
988 fingerprint
: Option
<Fingerprint
>,
990 ) -> (DepNodeIndex
, Option
<(SerializedDepNodeIndex
, DepNodeColor
)>) {
991 let print_status
= cfg
!(debug_assertions
) && print_status
;
993 if let Some(prev_index
) = prev_graph
.node_to_index_opt(&key
) {
994 // Determine the color and index of the new `DepNode`.
995 if let Some(fingerprint
) = fingerprint
{
996 if fingerprint
== prev_graph
.fingerprint_by_index(prev_index
) {
998 eprintln
!("[task::green] {:?}", key
);
1001 // This is a green node: it existed in the previous compilation,
1002 // its query was re-executed, and it has the same result as before.
1003 let mut prev_index_to_index
= self.prev_index_to_index
.lock();
1005 let dep_node_index
= match prev_index_to_index
[prev_index
] {
1006 Some(dep_node_index
) => dep_node_index
,
1008 let dep_node_index
=
1009 self.encoder
.borrow().send(profiler
, key
, fingerprint
, edges
);
1010 prev_index_to_index
[prev_index
] = Some(dep_node_index
);
1015 #[cfg(debug_assertions)]
1016 self.record_edge(dep_node_index
, key
);
1017 (dep_node_index
, Some((prev_index
, DepNodeColor
::Green(dep_node_index
))))
1020 eprintln
!("[task::red] {:?}", key
);
1023 // This is a red node: it existed in the previous compilation, its query
1024 // was re-executed, but it has a different result from before.
1025 let mut prev_index_to_index
= self.prev_index_to_index
.lock();
1027 let dep_node_index
= match prev_index_to_index
[prev_index
] {
1028 Some(dep_node_index
) => dep_node_index
,
1030 let dep_node_index
=
1031 self.encoder
.borrow().send(profiler
, key
, fingerprint
, edges
);
1032 prev_index_to_index
[prev_index
] = Some(dep_node_index
);
1037 #[cfg(debug_assertions)]
1038 self.record_edge(dep_node_index
, key
);
1039 (dep_node_index
, Some((prev_index
, DepNodeColor
::Red
)))
1043 eprintln
!("[task::unknown] {:?}", key
);
1046 // This is a red node, effectively: it existed in the previous compilation
1047 // session, its query was re-executed, but it doesn't compute a result hash
1048 // (i.e. it represents a `no_hash` query), so we have no way of determining
1049 // whether or not the result was the same as before.
1050 let mut prev_index_to_index
= self.prev_index_to_index
.lock();
1052 let dep_node_index
= match prev_index_to_index
[prev_index
] {
1053 Some(dep_node_index
) => dep_node_index
,
1055 let dep_node_index
=
1056 self.encoder
.borrow().send(profiler
, key
, Fingerprint
::ZERO
, edges
);
1057 prev_index_to_index
[prev_index
] = Some(dep_node_index
);
1062 #[cfg(debug_assertions)]
1063 self.record_edge(dep_node_index
, key
);
1064 (dep_node_index
, Some((prev_index
, DepNodeColor
::Red
)))
1068 eprintln
!("[task::new] {:?}", key
);
1071 let fingerprint
= fingerprint
.unwrap_or(Fingerprint
::ZERO
);
1073 // This is a new node: it didn't exist in the previous compilation session.
1074 let dep_node_index
= self.intern_new_node(profiler
, key
, edges
, fingerprint
);
1076 (dep_node_index
, None
)
1080 fn promote_node_and_deps_to_current(
1082 profiler
: &SelfProfilerRef
,
1083 prev_graph
: &PreviousDepGraph
<K
>,
1084 prev_index
: SerializedDepNodeIndex
,
1086 self.debug_assert_not_in_new_nodes(prev_graph
, prev_index
);
1088 let mut prev_index_to_index
= self.prev_index_to_index
.lock();
1090 match prev_index_to_index
[prev_index
] {
1091 Some(dep_node_index
) => dep_node_index
,
1093 let key
= prev_graph
.index_to_node(prev_index
);
1094 let dep_node_index
= self.encoder
.borrow().send(
1097 prev_graph
.fingerprint_by_index(prev_index
),
1099 .edge_targets_from(prev_index
)
1101 .map(|i
| prev_index_to_index
[*i
].unwrap())
1104 prev_index_to_index
[prev_index
] = Some(dep_node_index
);
1105 #[cfg(debug_assertions)]
1106 self.record_edge(dep_node_index
, key
);
1113 fn debug_assert_not_in_new_nodes(
1115 prev_graph
: &PreviousDepGraph
<K
>,
1116 prev_index
: SerializedDepNodeIndex
,
1118 let node
= &prev_graph
.index_to_node(prev_index
);
1120 !self.new_node_to_index
.get_shard_by_value(node
).lock().contains_key(node
),
1121 "node from previous graph present in new node collection"
1126 /// The capacity of the `reads` field `SmallVec`
1127 const TASK_DEPS_READS_CAP
: usize = 8;
1128 type EdgesVec
= SmallVec
<[DepNodeIndex
; TASK_DEPS_READS_CAP
]>;
1130 pub struct TaskDeps
<K
> {
1131 #[cfg(debug_assertions)]
1132 node
: Option
<DepNode
<K
>>,
1134 read_set
: FxHashSet
<DepNodeIndex
>,
1135 phantom_data
: PhantomData
<DepNode
<K
>>,
1138 impl<K
> Default
for TaskDeps
<K
> {
1139 fn default() -> Self {
1141 #[cfg(debug_assertions)]
1143 reads
: EdgesVec
::new(),
1144 read_set
: FxHashSet
::default(),
1145 phantom_data
: PhantomData
,
1150 // A data structure that stores Option<DepNodeColor> values as a contiguous
1151 // array, using one u32 per entry.
1152 struct DepNodeColorMap
{
1153 values
: IndexVec
<SerializedDepNodeIndex
, AtomicU32
>,
1156 const COMPRESSED_NONE
: u32 = 0;
1157 const COMPRESSED_RED
: u32 = 1;
1158 const COMPRESSED_FIRST_GREEN
: u32 = 2;
1160 impl DepNodeColorMap
{
1161 fn new(size
: usize) -> DepNodeColorMap
{
1162 DepNodeColorMap { values: (0..size).map(|_| AtomicU32::new(COMPRESSED_NONE)).collect() }
1166 fn get(&self, index
: SerializedDepNodeIndex
) -> Option
<DepNodeColor
> {
1167 match self.values
[index
].load(Ordering
::Acquire
) {
1168 COMPRESSED_NONE
=> None
,
1169 COMPRESSED_RED
=> Some(DepNodeColor
::Red
),
1171 Some(DepNodeColor
::Green(DepNodeIndex
::from_u32(value
- COMPRESSED_FIRST_GREEN
)))
1176 fn insert(&self, index
: SerializedDepNodeIndex
, color
: DepNodeColor
) {
1177 self.values
[index
].store(
1179 DepNodeColor
::Red
=> COMPRESSED_RED
,
1180 DepNodeColor
::Green(index
) => index
.as_u32() + COMPRESSED_FIRST_GREEN
,