1 use parking_lot
::Mutex
;
2 use rustc_data_structures
::fingerprint
::Fingerprint
;
3 use rustc_data_structures
::fx
::{FxHashMap, FxHashSet}
;
4 use rustc_data_structures
::profiling
::{EventId, QueryInvocationId, SelfProfilerRef}
;
5 use rustc_data_structures
::sharded
::{self, Sharded}
;
6 use rustc_data_structures
::stable_hasher
::{HashStable, StableHasher}
;
7 use rustc_data_structures
::steal
::Steal
;
8 use rustc_data_structures
::sync
::{AtomicU32, AtomicU64, Lock, Lrc, Ordering}
;
9 use rustc_index
::vec
::IndexVec
;
10 use rustc_serialize
::opaque
::{FileEncodeResult, FileEncoder}
;
11 use smallvec
::{smallvec, SmallVec}
;
12 use std
::assert_matches
::assert_matches
;
13 use std
::collections
::hash_map
::Entry
;
16 use std
::marker
::PhantomData
;
17 use std
::sync
::atomic
::Ordering
::Relaxed
;
19 use super::query
::DepGraphQuery
;
20 use super::serialized
::{GraphEncoder, SerializedDepGraph, SerializedDepNodeIndex}
;
21 use super::{DepContext, DepKind, DepNode, HasDepContext, WorkProductId}
;
22 use crate::ich
::StableHashingContext
;
23 use crate::query
::{QueryContext, QuerySideEffects}
;
25 #[cfg(debug_assertions)]
26 use {super::debug::EdgeFilter, std::env}
;
29 pub struct DepGraph
<K
: DepKind
> {
30 data
: Option
<Lrc
<DepGraphData
<K
>>>,
32 /// This field is used for assigning DepNodeIndices when running in
33 /// non-incremental mode. Even in non-incremental mode we make sure that
34 /// each task has a `DepNodeIndex` that uniquely identifies it. This unique
35 /// ID is used for self-profiling.
36 virtual_dep_node_index
: Lrc
<AtomicU32
>,
39 rustc_index
::newtype_index
! {
40 pub struct DepNodeIndex {}
44 pub const INVALID
: DepNodeIndex
= DepNodeIndex
::MAX
;
45 pub const SINGLETON_DEPENDENCYLESS_ANON_NODE
: DepNodeIndex
= DepNodeIndex
::from_u32(0);
46 pub const FOREVER_RED_NODE
: DepNodeIndex
= DepNodeIndex
::from_u32(1);
49 impl From
<DepNodeIndex
> for QueryInvocationId
{
51 fn from(dep_node_index
: DepNodeIndex
) -> Self {
52 QueryInvocationId(dep_node_index
.as_u32())
57 pub enum DepNodeColor
{
64 pub fn is_green(self) -> bool
{
66 DepNodeColor
::Red
=> false,
67 DepNodeColor
::Green(_
) => true,
72 struct DepGraphData
<K
: DepKind
> {
73 /// The new encoding of the dependency graph, optimized for red/green
74 /// tracking. The `current` field is the dependency graph of only the
75 /// current compilation session: We don't merge the previous dep-graph into
76 /// current one anymore, but we do reference shared data to save space.
77 current
: CurrentDepGraph
<K
>,
79 /// The dep-graph from the previous compilation session. It contains all
80 /// nodes and edges as well as all fingerprints of nodes that have them.
81 previous
: SerializedDepGraph
<K
>,
83 colors
: DepNodeColorMap
,
85 processed_side_effects
: Mutex
<FxHashSet
<DepNodeIndex
>>,
87 /// When we load, there may be `.o` files, cached MIR, or other such
88 /// things available to us. If we find that they are not dirty, we
89 /// load the path to the file storing those work-products here into
90 /// this map. We can later look for and extract that data.
91 previous_work_products
: FxHashMap
<WorkProductId
, WorkProduct
>,
93 dep_node_debug
: Lock
<FxHashMap
<DepNode
<K
>, String
>>,
95 /// Used by incremental compilation tests to assert that
96 /// a particular query result was decoded from disk
97 /// (not just marked green)
98 debug_loaded_from_disk
: Lock
<FxHashSet
<DepNode
<K
>>>,
101 pub fn hash_result
<R
>(hcx
: &mut StableHashingContext
<'_
>, result
: &R
) -> Fingerprint
103 R
: for<'a
> HashStable
<StableHashingContext
<'a
>>,
105 let mut stable_hasher
= StableHasher
::new();
106 result
.hash_stable(hcx
, &mut stable_hasher
);
107 stable_hasher
.finish()
110 impl<K
: DepKind
> DepGraph
<K
> {
112 profiler
: &SelfProfilerRef
,
113 prev_graph
: SerializedDepGraph
<K
>,
114 prev_work_products
: FxHashMap
<WorkProductId
, WorkProduct
>,
115 encoder
: FileEncoder
,
119 let prev_graph_node_count
= prev_graph
.node_count();
121 let current
= CurrentDepGraph
::new(
123 prev_graph_node_count
,
129 let colors
= DepNodeColorMap
::new(prev_graph_node_count
);
131 // Instantiate a dependy-less node only once for anonymous queries.
132 let _green_node_index
= current
.intern_new_node(
134 DepNode { kind: DepKind::NULL, hash: current.anon_id_seed.into() }
,
138 assert_eq
!(_green_node_index
, DepNodeIndex
::SINGLETON_DEPENDENCYLESS_ANON_NODE
);
140 // Instantiate a dependy-less red node only once for anonymous queries.
141 let (_red_node_index
, _prev_and_index
) = current
.intern_node(
144 DepNode { kind: DepKind::RED, hash: Fingerprint::ZERO.into() }
,
149 assert_eq
!(_red_node_index
, DepNodeIndex
::FOREVER_RED_NODE
);
150 assert
!(matches
!(_prev_and_index
, None
| Some((_
, DepNodeColor
::Red
))));
153 data
: Some(Lrc
::new(DepGraphData
{
154 previous_work_products
: prev_work_products
,
155 dep_node_debug
: Default
::default(),
157 processed_side_effects
: Default
::default(),
158 previous
: prev_graph
,
160 debug_loaded_from_disk
: Default
::default(),
162 virtual_dep_node_index
: Lrc
::new(AtomicU32
::new(0)),
166 pub fn new_disabled() -> DepGraph
<K
> {
167 DepGraph { data: None, virtual_dep_node_index: Lrc::new(AtomicU32::new(0)) }
170 /// Returns `true` if we are actually building the full dep-graph, and `false` otherwise.
172 pub fn is_fully_enabled(&self) -> bool
{
176 pub fn with_query(&self, f
: impl Fn(&DepGraphQuery
<K
>)) {
177 if let Some(data
) = &self.data
{
178 data
.current
.encoder
.borrow().with_query(f
)
182 pub fn assert_ignored(&self) {
183 if let Some(..) = self.data
{
184 K
::read_deps(|task_deps
| {
188 "expected no task dependency tracking"
194 pub fn with_ignore
<OP
, R
>(&self, op
: OP
) -> R
198 K
::with_deps(TaskDepsRef
::Ignore
, op
)
201 /// Used to wrap the deserialization of a query result from disk,
202 /// This method enforces that no new `DepNodes` are created during
203 /// query result deserialization.
205 /// Enforcing this makes the query dep graph simpler - all nodes
206 /// must be created during the query execution, and should be
207 /// created from inside the 'body' of a query (the implementation
208 /// provided by a particular compiler crate).
210 /// Consider the case of three queries `A`, `B`, and `C`, where
211 /// `A` invokes `B` and `B` invokes `C`:
215 /// Suppose that decoding the result of query `B` required re-computing
216 /// the query `C`. If we did not create a fresh `TaskDeps` when
217 /// decoding `B`, we would still be using the `TaskDeps` for query `A`
218 /// (if we needed to re-execute `A`). This would cause us to create
219 /// a new edge `A -> C`. If this edge did not previously
220 /// exist in the `DepGraph`, then we could end up with a different
221 /// `DepGraph` at the end of compilation, even if there were no
222 /// meaningful changes to the overall program (e.g. a newline was added).
223 /// In addition, this edge might cause a subsequent compilation run
224 /// to try to force `C` before marking other necessary nodes green. If
225 /// `C` did not exist in the new compilation session, then we could
226 /// get an ICE. Normally, we would have tried (and failed) to mark
227 /// some other query green (e.g. `item_children`) which was used
228 /// to obtain `C`, which would prevent us from ever trying to force
229 /// a non-existent `D`.
231 /// It might be possible to enforce that all `DepNode`s read during
232 /// deserialization already exist in the previous `DepGraph`. In
233 /// the above example, we would invoke `D` during the deserialization
234 /// of `B`. Since we correctly create a new `TaskDeps` from the decoding
235 /// of `B`, this would result in an edge `B -> D`. If that edge already
236 /// existed (with the same `DepPathHash`es), then it should be correct
237 /// to allow the invocation of the query to proceed during deserialization
238 /// of a query result. We would merely assert that the dep-graph fragment
239 /// that would have been added by invoking `C` while decoding `B`
240 /// is equivalent to the dep-graph fragment that we already instantiated for B
241 /// (at the point where we successfully marked B as green).
243 /// However, this would require additional complexity
244 /// in the query infrastructure, and is not currently needed by the
245 /// decoding of any query results. Should the need arise in the future,
246 /// we should consider extending the query system with this functionality.
247 pub fn with_query_deserialization
<OP
, R
>(&self, op
: OP
) -> R
251 K
::with_deps(TaskDepsRef
::Forbid
, op
)
254 /// Starts a new dep-graph task. Dep-graph tasks are specified
255 /// using a free function (`task`) and **not** a closure -- this
256 /// is intentional because we want to exercise tight control over
257 /// what state they have access to. In particular, we want to
258 /// prevent implicit 'leaks' of tracked state into the task (which
259 /// could then be read without generating correct edges in the
260 /// dep-graph -- see the [rustc dev guide] for more details on
261 /// the dep-graph). To this end, the task function gets exactly two
262 /// pieces of state: the context `cx` and an argument `arg`. Both
263 /// of these bits of state must be of some type that implements
264 /// `DepGraphSafe` and hence does not leak.
266 /// The choice of two arguments is not fundamental. One argument
267 /// would work just as well, since multiple values can be
268 /// collected using tuples. However, using two arguments works out
269 /// to be quite convenient, since it is common to need a context
270 /// (`cx`) and some argument (e.g., a `DefId` identifying what
271 /// item to process).
273 /// For cases where you need some other number of arguments:
275 /// - If you only need one argument, just use `()` for the `arg`
277 /// - If you need 3+ arguments, use a tuple for the
280 /// [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/incremental-compilation.html
281 pub fn with_task
<Ctxt
: HasDepContext
<DepKind
= K
>, A
: Debug
, R
>(
286 task
: fn(Ctxt
, A
) -> R
,
287 hash_result
: Option
<fn(&mut StableHashingContext
<'_
>, &R
) -> Fingerprint
>,
288 ) -> (R
, DepNodeIndex
) {
289 if self.is_fully_enabled() {
290 self.with_task_impl(key
, cx
, arg
, task
, hash_result
)
292 // Incremental compilation is turned off. We just execute the task
293 // without tracking. We still provide a dep-node index that uniquely
294 // identifies the task so that we have a cheap way of referring to
295 // the query for self-profiling.
296 (task(cx
, arg
), self.next_virtual_depnode_index())
300 fn with_task_impl
<Ctxt
: HasDepContext
<DepKind
= K
>, A
: Debug
, R
>(
305 task
: fn(Ctxt
, A
) -> R
,
306 hash_result
: Option
<fn(&mut StableHashingContext
<'_
>, &R
) -> Fingerprint
>,
307 ) -> (R
, DepNodeIndex
) {
308 // This function is only called when the graph is enabled.
309 let data
= self.data
.as_ref().unwrap();
311 // If the following assertion triggers, it can have two reasons:
312 // 1. Something is wrong with DepNode creation, either here or
313 // in `DepGraph::try_mark_green()`.
314 // 2. Two distinct query keys get mapped to the same `DepNode`
315 // (see for example #48923).
317 !self.dep_node_exists(&key
),
318 "forcing query with already existing `DepNode`\n\
319 - query-key: {arg:?}\n\
323 let task_deps
= if cx
.dep_context().is_eval_always(key
.kind
) {
326 Some(Lock
::new(TaskDeps
{
327 #[cfg(debug_assertions)]
329 reads
: SmallVec
::new(),
330 read_set
: Default
::default(),
331 phantom_data
: PhantomData
,
335 let task_deps_ref
= match &task_deps
{
336 Some(deps
) => TaskDepsRef
::Allow(deps
),
337 None
=> TaskDepsRef
::Ignore
,
340 let result
= K
::with_deps(task_deps_ref
, || task(cx
, arg
));
341 let edges
= task_deps
.map_or_else(|| smallvec
![], |lock
| lock
.into_inner().reads
);
343 let dcx
= cx
.dep_context();
344 let hashing_timer
= dcx
.profiler().incr_result_hashing();
345 let current_fingerprint
=
346 hash_result
.map(|f
| dcx
.with_stable_hashing_context(|mut hcx
| f(&mut hcx
, &result
)));
348 let print_status
= cfg
!(debug_assertions
) && dcx
.sess().opts
.unstable_opts
.dep_tasks
;
350 // Intern the new `DepNode`.
351 let (dep_node_index
, prev_and_color
) = data
.current
.intern_node(
360 hashing_timer
.finish_with_query_invocation_id(dep_node_index
.into());
362 if let Some((prev_index
, color
)) = prev_and_color
{
364 data
.colors
.get(prev_index
).is_none(),
365 "DepGraph::with_task() - Duplicate DepNodeColor \
366 insertion for {key:?}"
369 data
.colors
.insert(prev_index
, color
);
372 (result
, dep_node_index
)
375 /// Executes something within an "anonymous" task, that is, a task the
376 /// `DepNode` of which is determined by the list of inputs it read from.
377 pub fn with_anon_task
<Tcx
: DepContext
<DepKind
= K
>, OP
, R
>(
382 ) -> (R
, DepNodeIndex
)
386 debug_assert
!(!cx
.is_eval_always(dep_kind
));
388 if let Some(ref data
) = self.data
{
389 let task_deps
= Lock
::new(TaskDeps
::default());
390 let result
= K
::with_deps(TaskDepsRef
::Allow(&task_deps
), op
);
391 let task_deps
= task_deps
.into_inner();
392 let task_deps
= task_deps
.reads
;
394 let dep_node_index
= match task_deps
.len() {
396 // Because the dep-node id of anon nodes is computed from the sets of its
397 // dependencies we already know what the ID of this dependency-less node is
398 // going to be (i.e. equal to the precomputed
399 // `SINGLETON_DEPENDENCYLESS_ANON_NODE`). As a consequence we can skip creating
400 // a `StableHasher` and sending the node through interning.
401 DepNodeIndex
::SINGLETON_DEPENDENCYLESS_ANON_NODE
404 // When there is only one dependency, don't bother creating a node.
408 // The dep node indices are hashed here instead of hashing the dep nodes of the
409 // dependencies. These indices may refer to different nodes per session, but this isn't
410 // a problem here because we that ensure the final dep node hash is per session only by
411 // combining it with the per session random number `anon_id_seed`. This hash only need
412 // to map the dependencies to a single value on a per session basis.
413 let mut hasher
= StableHasher
::new();
414 task_deps
.hash(&mut hasher
);
416 let target_dep_node
= DepNode
{
418 // Fingerprint::combine() is faster than sending Fingerprint
419 // through the StableHasher (at least as long as StableHasher
421 hash
: data
.current
.anon_id_seed
.combine(hasher
.finish()).into(),
424 data
.current
.intern_new_node(
433 (result
, dep_node_index
)
435 (op(), self.next_virtual_depnode_index())
440 pub fn read_index(&self, dep_node_index
: DepNodeIndex
) {
441 if let Some(ref data
) = self.data
{
442 K
::read_deps(|task_deps
| {
443 let mut task_deps
= match task_deps
{
444 TaskDepsRef
::Allow(deps
) => deps
.lock(),
445 TaskDepsRef
::Ignore
=> return,
446 TaskDepsRef
::Forbid
=> {
447 panic
!("Illegal read of: {dep_node_index:?}")
450 let task_deps
= &mut *task_deps
;
452 if cfg
!(debug_assertions
) {
453 data
.current
.total_read_count
.fetch_add(1, Relaxed
);
456 // As long as we only have a low number of reads we can avoid doing a hash
457 // insert and potentially allocating/reallocating the hashmap
458 let new_read
= if task_deps
.reads
.len() < TASK_DEPS_READS_CAP
{
459 task_deps
.reads
.iter().all(|other
| *other
!= dep_node_index
)
461 task_deps
.read_set
.insert(dep_node_index
)
464 task_deps
.reads
.push(dep_node_index
);
465 if task_deps
.reads
.len() == TASK_DEPS_READS_CAP
{
466 // Fill `read_set` with what we have so far so we can use the hashset
468 task_deps
.read_set
.extend(task_deps
.reads
.iter().copied());
471 #[cfg(debug_assertions)]
473 if let Some(target
) = task_deps
.node
{
474 if let Some(ref forbidden_edge
) = data
.current
.forbidden_edge
{
475 let src
= forbidden_edge
.index_to_node
.lock()[&dep_node_index
];
476 if forbidden_edge
.test(&src
, &target
) {
477 panic
!("forbidden edge {:?} -> {:?} created", src
, target
)
482 } else if cfg
!(debug_assertions
) {
483 data
.current
.total_duplicate_read_count
.fetch_add(1, Relaxed
);
489 /// Create a node when we force-feed a value into the query cache.
490 /// This is used to remove cycles during type-checking const generic parameters.
492 /// As usual in the query system, we consider the current state of the calling query
493 /// only depends on the list of dependencies up to now. As a consequence, the value
494 /// that this query gives us can only depend on those dependencies too. Therefore,
495 /// it is sound to use the current dependency set for the created node.
497 /// During replay, the order of the nodes is relevant in the dependency graph.
498 /// So the unchanged replay will mark the caller query before trying to mark this one.
499 /// If there is a change to report, the caller query will be re-executed before this one.
501 /// FIXME: If the code is changed enough for this node to be marked before requiring the
502 /// caller's node, we suppose that those changes will be enough to mark this node red and
503 /// force a recomputation using the "normal" way.
504 pub fn with_feed_task
<Ctxt
: DepContext
<DepKind
= K
>, A
: Debug
, R
: Debug
>(
510 hash_result
: Option
<fn(&mut StableHashingContext
<'_
>, &R
) -> Fingerprint
>,
512 if let Some(data
) = self.data
.as_ref() {
513 // The caller query has more dependencies than the node we are creating. We may
514 // encounter a case where this created node is marked as green, but the caller query is
515 // subsequently marked as red or recomputed. In this case, we will end up feeding a
516 // value to an existing node.
518 // For sanity, we still check that the loaded stable hash and the new one match.
519 if let Some(dep_node_index
) = self.dep_node_index_of_opt(&node
) {
520 let _current_fingerprint
=
521 crate::query
::incremental_verify_ich(cx
, result
, &node
, hash_result
);
523 #[cfg(debug_assertions)]
524 if hash_result
.is_some() {
525 data
.current
.record_edge(dep_node_index
, node
, _current_fingerprint
);
528 return dep_node_index
;
531 let mut edges
= SmallVec
::new();
532 K
::read_deps(|task_deps
| match task_deps
{
533 TaskDepsRef
::Allow(deps
) => edges
.extend(deps
.lock().reads
.iter().copied()),
534 TaskDepsRef
::Ignore
=> {}
// During HIR lowering, we have no dependencies.
535 TaskDepsRef
::Forbid
=> {
536 panic
!("Cannot summarize when dependencies are not recorded.")
540 let hashing_timer
= cx
.profiler().incr_result_hashing();
541 let current_fingerprint
= hash_result
.map(|hash_result
| {
542 cx
.with_stable_hashing_context(|mut hcx
| hash_result(&mut hcx
, result
))
545 let print_status
= cfg
!(debug_assertions
) && cx
.sess().opts
.unstable_opts
.dep_tasks
;
547 // Intern the new `DepNode` with the dependencies up-to-now.
548 let (dep_node_index
, prev_and_color
) = data
.current
.intern_node(
557 hashing_timer
.finish_with_query_invocation_id(dep_node_index
.into());
559 if let Some((prev_index
, color
)) = prev_and_color
{
561 data
.colors
.get(prev_index
).is_none(),
562 "DepGraph::with_task() - Duplicate DepNodeColor insertion for {key:?}",
565 data
.colors
.insert(prev_index
, color
);
570 // Incremental compilation is turned off. We just execute the task
571 // without tracking. We still provide a dep-node index that uniquely
572 // identifies the task so that we have a cheap way of referring to
573 // the query for self-profiling.
574 self.next_virtual_depnode_index()
579 pub fn dep_node_index_of(&self, dep_node
: &DepNode
<K
>) -> DepNodeIndex
{
580 self.dep_node_index_of_opt(dep_node
).unwrap()
584 pub fn dep_node_index_of_opt(&self, dep_node
: &DepNode
<K
>) -> Option
<DepNodeIndex
> {
585 let data
= self.data
.as_ref().unwrap();
586 let current
= &data
.current
;
588 if let Some(prev_index
) = data
.previous
.node_to_index_opt(dep_node
) {
589 current
.prev_index_to_index
.lock()[prev_index
]
591 current
.new_node_to_index
.get_shard_by_value(dep_node
).lock().get(dep_node
).copied()
596 pub fn dep_node_exists(&self, dep_node
: &DepNode
<K
>) -> bool
{
597 self.data
.is_some() && self.dep_node_index_of_opt(dep_node
).is_some()
600 pub fn prev_fingerprint_of(&self, dep_node
: &DepNode
<K
>) -> Option
<Fingerprint
> {
601 self.data
.as_ref().unwrap().previous
.fingerprint_of(dep_node
)
604 /// Checks whether a previous work product exists for `v` and, if
605 /// so, return the path that leads to it. Used to skip doing work.
606 pub fn previous_work_product(&self, v
: &WorkProductId
) -> Option
<WorkProduct
> {
607 self.data
.as_ref().and_then(|data
| data
.previous_work_products
.get(v
).cloned())
610 /// Access the map of work-products created during the cached run. Only
611 /// used during saving of the dep-graph.
612 pub fn previous_work_products(&self) -> &FxHashMap
<WorkProductId
, WorkProduct
> {
613 &self.data
.as_ref().unwrap().previous_work_products
616 pub fn mark_debug_loaded_from_disk(&self, dep_node
: DepNode
<K
>) {
617 self.data
.as_ref().unwrap().debug_loaded_from_disk
.lock().insert(dep_node
);
620 pub fn debug_was_loaded_from_disk(&self, dep_node
: DepNode
<K
>) -> bool
{
621 self.data
.as_ref().unwrap().debug_loaded_from_disk
.lock().contains(&dep_node
)
625 pub fn register_dep_node_debug_str
<F
>(&self, dep_node
: DepNode
<K
>, debug_str_gen
: F
)
627 F
: FnOnce() -> String
,
629 let dep_node_debug
= &self.data
.as_ref().unwrap().dep_node_debug
;
631 if dep_node_debug
.borrow().contains_key(&dep_node
) {
634 let debug_str
= self.with_ignore(debug_str_gen
);
635 dep_node_debug
.borrow_mut().insert(dep_node
, debug_str
);
638 pub fn dep_node_debug_str(&self, dep_node
: DepNode
<K
>) -> Option
<String
> {
639 self.data
.as_ref()?
.dep_node_debug
.borrow().get(&dep_node
).cloned()
642 fn node_color(&self, dep_node
: &DepNode
<K
>) -> Option
<DepNodeColor
> {
643 if let Some(ref data
) = self.data
{
644 if let Some(prev_index
) = data
.previous
.node_to_index_opt(dep_node
) {
645 return data
.colors
.get(prev_index
);
647 // This is a node that did not exist in the previous compilation session.
655 /// Try to mark a node index for the node dep_node.
657 /// A node will have an index, when it's already been marked green, or when we can mark it
658 /// green. This function will mark the current task as a reader of the specified node, when
659 /// a node index can be found for that node.
660 pub fn try_mark_green
<Qcx
: QueryContext
<DepKind
= K
>>(
663 dep_node
: &DepNode
<K
>,
664 ) -> Option
<(SerializedDepNodeIndex
, DepNodeIndex
)> {
665 debug_assert
!(!qcx
.dep_context().is_eval_always(dep_node
.kind
));
667 // Return None if the dep graph is disabled
668 let data
= self.data
.as_ref()?
;
670 // Return None if the dep node didn't exist in the previous session
671 let prev_index
= data
.previous
.node_to_index_opt(dep_node
)?
;
673 match data
.colors
.get(prev_index
) {
674 Some(DepNodeColor
::Green(dep_node_index
)) => Some((prev_index
, dep_node_index
)),
675 Some(DepNodeColor
::Red
) => None
,
677 // This DepNode and the corresponding query invocation existed
678 // in the previous compilation session too, so we can try to
679 // mark it as green by recursively marking all of its
680 // dependencies green.
681 self.try_mark_previous_green(qcx
, data
, prev_index
, &dep_node
)
682 .map(|dep_node_index
| (prev_index
, dep_node_index
))
687 #[instrument(skip(self, qcx, data, parent_dep_node_index), level = "debug")]
688 fn try_mark_parent_green
<Qcx
: QueryContext
<DepKind
= K
>>(
691 data
: &DepGraphData
<K
>,
692 parent_dep_node_index
: SerializedDepNodeIndex
,
693 dep_node
: &DepNode
<K
>,
695 let dep_dep_node_color
= data
.colors
.get(parent_dep_node_index
);
696 let dep_dep_node
= &data
.previous
.index_to_node(parent_dep_node_index
);
698 match dep_dep_node_color
{
699 Some(DepNodeColor
::Green(_
)) => {
700 // This dependency has been marked as green before, we are
701 // still fine and can continue with checking the other
703 debug
!("dependency {dep_dep_node:?} was immediately green");
706 Some(DepNodeColor
::Red
) => {
707 // We found a dependency the value of which has changed
708 // compared to the previous compilation session. We cannot
709 // mark the DepNode as green and also don't need to bother
710 // with checking any of the other dependencies.
711 debug
!("dependency {dep_dep_node:?} was immediately red");
717 // We don't know the state of this dependency. If it isn't
718 // an eval_always node, let's try to mark it green recursively.
719 if !qcx
.dep_context().is_eval_always(dep_dep_node
.kind
) {
721 "state of dependency {:?} ({}) is unknown, trying to mark it green",
722 dep_dep_node
, dep_dep_node
.hash
,
726 self.try_mark_previous_green(qcx
, data
, parent_dep_node_index
, dep_dep_node
);
728 if node_index
.is_some() {
729 debug
!("managed to MARK dependency {dep_dep_node:?} as green",);
734 // We failed to mark it green, so we try to force the query.
735 debug
!("trying to force dependency {dep_dep_node:?}");
736 if !qcx
.dep_context().try_force_from_dep_node(*dep_dep_node
) {
737 // The DepNode could not be forced.
738 debug
!("dependency {dep_dep_node:?} could not be forced");
742 let dep_dep_node_color
= data
.colors
.get(parent_dep_node_index
);
744 match dep_dep_node_color
{
745 Some(DepNodeColor
::Green(_
)) => {
746 debug
!("managed to FORCE dependency {dep_dep_node:?} to green");
749 Some(DepNodeColor
::Red
) => {
750 debug
!("dependency {dep_dep_node:?} was red after forcing",);
756 if let None
= qcx
.dep_context().sess().has_errors_or_delayed_span_bugs() {
757 panic
!("try_mark_previous_green() - Forcing the DepNode should have set its color")
760 // If the query we just forced has resulted in
761 // some kind of compilation error, we cannot rely on
762 // the dep-node color having been properly updated.
763 // This means that the query system has reached an
764 // invalid state. We let the compiler continue (by
765 // returning `None`) so it can emit error messages
766 // and wind down, but rely on the fact that this
767 // invalid state will not be persisted to the
768 // incremental compilation cache because of
769 // compilation errors being present.
770 debug
!("dependency {dep_dep_node:?} resulted in compilation error",);
774 /// Try to mark a dep-node which existed in the previous compilation session as green.
775 #[instrument(skip(self, qcx, data, prev_dep_node_index), level = "debug")]
776 fn try_mark_previous_green
<Qcx
: QueryContext
<DepKind
= K
>>(
779 data
: &DepGraphData
<K
>,
780 prev_dep_node_index
: SerializedDepNodeIndex
,
781 dep_node
: &DepNode
<K
>,
782 ) -> Option
<DepNodeIndex
> {
783 #[cfg(not(parallel_compiler))]
785 debug_assert
!(!self.dep_node_exists(dep_node
));
786 debug_assert
!(data
.colors
.get(prev_dep_node_index
).is_none());
789 // We never try to mark eval_always nodes as green
790 debug_assert
!(!qcx
.dep_context().is_eval_always(dep_node
.kind
));
792 debug_assert_eq
!(data
.previous
.index_to_node(prev_dep_node_index
), *dep_node
);
794 let prev_deps
= data
.previous
.edge_targets_from(prev_dep_node_index
);
796 for &dep_dep_node_index
in prev_deps
{
797 self.try_mark_parent_green(qcx
, data
, dep_dep_node_index
, dep_node
)?
800 // If we got here without hitting a `return` that means that all
801 // dependencies of this DepNode could be marked as green. Therefore we
802 // can also mark this DepNode as green.
804 // There may be multiple threads trying to mark the same dep node green concurrently
806 // We allocating an entry for the node in the current dependency graph and
807 // adding all the appropriate edges imported from the previous graph
808 let dep_node_index
= data
.current
.promote_node_and_deps_to_current(
809 qcx
.dep_context().profiler(),
814 // ... emitting any stored diagnostic ...
816 // FIXME: Store the fact that a node has diagnostics in a bit in the dep graph somewhere
817 // Maybe store a list on disk and encode this fact in the DepNodeState
818 let side_effects
= qcx
.load_side_effects(prev_dep_node_index
);
820 #[cfg(not(parallel_compiler))]
822 data
.colors
.get(prev_dep_node_index
).is_none(),
823 "DepGraph::try_mark_previous_green() - Duplicate DepNodeColor \
824 insertion for {dep_node:?}"
827 if !side_effects
.is_empty() {
828 self.with_query_deserialization(|| {
829 self.emit_side_effects(qcx
, data
, dep_node_index
, side_effects
)
833 // ... and finally storing a "Green" entry in the color map.
834 // Multiple threads can all write the same color here
835 data
.colors
.insert(prev_dep_node_index
, DepNodeColor
::Green(dep_node_index
));
837 debug
!("successfully marked {dep_node:?} as green");
841 /// Atomically emits some loaded diagnostics.
842 /// This may be called concurrently on multiple threads for the same dep node.
845 fn emit_side_effects
<Qcx
: QueryContext
<DepKind
= K
>>(
848 data
: &DepGraphData
<K
>,
849 dep_node_index
: DepNodeIndex
,
850 side_effects
: QuerySideEffects
,
852 let mut processed
= data
.processed_side_effects
.lock();
854 if processed
.insert(dep_node_index
) {
855 // We were the first to insert the node in the set so this thread
856 // must process side effects
858 // Promote the previous diagnostics to the current session.
859 qcx
.store_side_effects(dep_node_index
, side_effects
.clone());
861 let handle
= qcx
.dep_context().sess().diagnostic();
863 for mut diagnostic
in side_effects
.diagnostics
{
864 handle
.emit_diagnostic(&mut diagnostic
);
869 /// Returns true if the given node has been marked as red during the
870 /// current compilation session. Used in various assertions
871 pub fn is_red(&self, dep_node
: &DepNode
<K
>) -> bool
{
872 self.node_color(dep_node
) == Some(DepNodeColor
::Red
)
875 /// Returns true if the given node has been marked as green during the
876 /// current compilation session. Used in various assertions
877 pub fn is_green(&self, dep_node
: &DepNode
<K
>) -> bool
{
878 self.node_color(dep_node
).map_or(false, |c
| c
.is_green())
881 /// This method loads all on-disk cacheable query results into memory, so
882 /// they can be written out to the new cache file again. Most query results
883 /// will already be in memory but in the case where we marked something as
884 /// green but then did not need the value, that value will never have been
885 /// loaded from disk.
887 /// This method will only load queries that will end up in the disk cache.
888 /// Other queries will not be executed.
889 pub fn exec_cache_promotions
<Tcx
: DepContext
<DepKind
= K
>>(&self, tcx
: Tcx
) {
890 let _prof_timer
= tcx
.profiler().generic_activity("incr_comp_query_cache_promotion");
892 let data
= self.data
.as_ref().unwrap();
893 for prev_index
in data
.colors
.values
.indices() {
894 match data
.colors
.get(prev_index
) {
895 Some(DepNodeColor
::Green(_
)) => {
896 let dep_node
= data
.previous
.index_to_node(prev_index
);
897 tcx
.try_load_from_on_disk_cache(dep_node
);
899 None
| Some(DepNodeColor
::Red
) => {
900 // We can skip red nodes because a node can only be marked
901 // as red if the query result was recomputed and thus is
902 // already in memory.
908 pub fn print_incremental_info(&self) {
909 if let Some(data
) = &self.data
{
910 data
.current
.encoder
.borrow().print_incremental_info(
911 data
.current
.total_read_count
.load(Relaxed
),
912 data
.current
.total_duplicate_read_count
.load(Relaxed
),
917 pub fn encode(&self, profiler
: &SelfProfilerRef
) -> FileEncodeResult
{
918 if let Some(data
) = &self.data
{
919 data
.current
.encoder
.steal().finish(profiler
)
925 pub(crate) fn next_virtual_depnode_index(&self) -> DepNodeIndex
{
926 let index
= self.virtual_dep_node_index
.fetch_add(1, Relaxed
);
927 DepNodeIndex
::from_u32(index
)
931 /// A "work product" is an intermediate result that we save into the
932 /// incremental directory for later re-use. The primary example are
933 /// the object files that we save for each partition at code
936 /// Each work product is associated with a dep-node, representing the
937 /// process that produced the work-product. If that dep-node is found
938 /// to be dirty when we load up, then we will delete the work-product
939 /// at load time. If the work-product is found to be clean, then we
940 /// will keep a record in the `previous_work_products` list.
942 /// In addition, work products have an associated hash. This hash is
943 /// an extra hash that can be used to decide if the work-product from
944 /// a previous compilation can be re-used (in addition to the dirty
947 /// As the primary example, consider the object files we generate for
948 /// each partition. In the first run, we create partitions based on
949 /// the symbols that need to be compiled. For each partition P, we
950 /// hash the symbols in P and create a `WorkProduct` record associated
951 /// with `DepNode::CodegenUnit(P)`; the hash is the set of symbols
954 /// The next time we compile, if the `DepNode::CodegenUnit(P)` is
955 /// judged to be clean (which means none of the things we read to
956 /// generate the partition were found to be dirty), it will be loaded
957 /// into previous work products. We will then regenerate the set of
958 /// symbols in the partition P and hash them (note that new symbols
959 /// may be added -- for example, new monomorphizations -- even if
960 /// nothing in P changed!). We will compare that hash against the
961 /// previous hash. If it matches up, we can reuse the object file.
962 #[derive(Clone, Debug, Encodable, Decodable)]
963 pub struct WorkProduct
{
964 pub cgu_name
: String
,
965 /// Saved files associated with this CGU. In each key/value pair, the value is the path to the
966 /// saved file and the key is some identifier for the type of file being saved.
968 /// By convention, file extensions are currently used as identifiers, i.e. the key "o" maps to
969 /// the object file's path, and "dwo" to the dwarf object file's path.
970 pub saved_files
: FxHashMap
<String
, String
>,
973 // Index type for `DepNodeData`'s edges.
974 rustc_index
::newtype_index
! {
978 /// `CurrentDepGraph` stores the dependency graph for the current session. It
979 /// will be populated as we run queries or tasks. We never remove nodes from the
980 /// graph: they are only added.
982 /// The nodes in it are identified by a `DepNodeIndex`. We avoid keeping the nodes
983 /// in memory. This is important, because these graph structures are some of the
984 /// largest in the compiler.
986 /// For this reason, we avoid storing `DepNode`s more than once as map
987 /// keys. The `new_node_to_index` map only contains nodes not in the previous
988 /// graph, and we map nodes in the previous graph to indices via a two-step
989 /// mapping. `SerializedDepGraph` maps from `DepNode` to `SerializedDepNodeIndex`,
990 /// and the `prev_index_to_index` vector (which is more compact and faster than
991 /// using a map) maps from `SerializedDepNodeIndex` to `DepNodeIndex`.
993 /// This struct uses three locks internally. The `data`, `new_node_to_index`,
994 /// and `prev_index_to_index` fields are locked separately. Operations that take
995 /// a `DepNodeIndex` typically just access the `data` field.
997 /// We only need to manipulate at most two locks simultaneously:
998 /// `new_node_to_index` and `data`, or `prev_index_to_index` and `data`. When
999 /// manipulating both, we acquire `new_node_to_index` or `prev_index_to_index`
1000 /// first, and `data` second.
1001 pub(super) struct CurrentDepGraph
<K
: DepKind
> {
1002 encoder
: Steal
<GraphEncoder
<K
>>,
1003 new_node_to_index
: Sharded
<FxHashMap
<DepNode
<K
>, DepNodeIndex
>>,
1004 prev_index_to_index
: Lock
<IndexVec
<SerializedDepNodeIndex
, Option
<DepNodeIndex
>>>,
1006 /// This is used to verify that fingerprints do not change between the creation of a node
1007 /// and its recomputation.
1008 #[cfg(debug_assertions)]
1009 fingerprints
: Lock
<FxHashMap
<DepNode
<K
>, Fingerprint
>>,
1011 /// Used to trap when a specific edge is added to the graph.
1012 /// This is used for debug purposes and is only active with `debug_assertions`.
1013 #[cfg(debug_assertions)]
1014 forbidden_edge
: Option
<EdgeFilter
<K
>>,
1016 /// Anonymous `DepNode`s are nodes whose IDs we compute from the list of
1017 /// their edges. This has the beneficial side-effect that multiple anonymous
1018 /// nodes can be coalesced into one without changing the semantics of the
1019 /// dependency graph. However, the merging of nodes can lead to a subtle
1020 /// problem during red-green marking: The color of an anonymous node from
1021 /// the current session might "shadow" the color of the node with the same
1022 /// ID from the previous session. In order to side-step this problem, we make
1023 /// sure that anonymous `NodeId`s allocated in different sessions don't overlap.
1024 /// This is implemented by mixing a session-key into the ID fingerprint of
1025 /// each anon node. The session-key is just a random number generated when
1026 /// the `DepGraph` is created.
1027 anon_id_seed
: Fingerprint
,
1029 /// These are simple counters that are for profiling and
1030 /// debugging and only active with `debug_assertions`.
1031 total_read_count
: AtomicU64
,
1032 total_duplicate_read_count
: AtomicU64
,
1034 /// The cached event id for profiling node interning. This saves us
1035 /// from having to look up the event id every time we intern a node
1036 /// which may incur too much overhead.
1037 /// This will be None if self-profiling is disabled.
1038 node_intern_event_id
: Option
<EventId
>,
1041 impl<K
: DepKind
> CurrentDepGraph
<K
> {
1043 profiler
: &SelfProfilerRef
,
1044 prev_graph_node_count
: usize,
1045 encoder
: FileEncoder
,
1048 ) -> CurrentDepGraph
<K
> {
1049 use std
::time
::{SystemTime, UNIX_EPOCH}
;
1051 let duration
= SystemTime
::now().duration_since(UNIX_EPOCH
).unwrap();
1052 let nanos
= duration
.as_secs() * 1_000_000_000 + duration
.subsec_nanos() as u64;
1053 let mut stable_hasher
= StableHasher
::new();
1054 nanos
.hash(&mut stable_hasher
);
1055 let anon_id_seed
= stable_hasher
.finish();
1057 #[cfg(debug_assertions)]
1058 let forbidden_edge
= match env
::var("RUST_FORBID_DEP_GRAPH_EDGE") {
1059 Ok(s
) => match EdgeFilter
::new(&s
) {
1061 Err(err
) => panic
!("RUST_FORBID_DEP_GRAPH_EDGE invalid: {}", err
),
1066 // We store a large collection of these in `prev_index_to_index` during
1067 // non-full incremental builds, and want to ensure that the element size
1068 // doesn't inadvertently increase.
1069 static_assert_size
!(Option
<DepNodeIndex
>, 4);
1071 let new_node_count_estimate
= 102 * prev_graph_node_count
/ 100 + 200;
1073 let node_intern_event_id
= profiler
1074 .get_or_alloc_cached_string("incr_comp_intern_dep_graph_node")
1075 .map(EventId
::from_label
);
1078 encoder
: Steal
::new(GraphEncoder
::new(
1080 prev_graph_node_count
,
1084 new_node_to_index
: Sharded
::new(|| {
1085 FxHashMap
::with_capacity_and_hasher(
1086 new_node_count_estimate
/ sharded
::SHARDS
,
1090 prev_index_to_index
: Lock
::new(IndexVec
::from_elem_n(None
, prev_graph_node_count
)),
1092 #[cfg(debug_assertions)]
1094 #[cfg(debug_assertions)]
1095 fingerprints
: Lock
::new(Default
::default()),
1096 total_read_count
: AtomicU64
::new(0),
1097 total_duplicate_read_count
: AtomicU64
::new(0),
1098 node_intern_event_id
,
1102 #[cfg(debug_assertions)]
1103 fn record_edge(&self, dep_node_index
: DepNodeIndex
, key
: DepNode
<K
>, fingerprint
: Fingerprint
) {
1104 if let Some(forbidden_edge
) = &self.forbidden_edge
{
1105 forbidden_edge
.index_to_node
.lock().insert(dep_node_index
, key
);
1107 match self.fingerprints
.lock().entry(key
) {
1108 Entry
::Vacant(v
) => {
1109 v
.insert(fingerprint
);
1111 Entry
::Occupied(o
) => {
1112 assert_eq
!(*o
.get(), fingerprint
, "Unstable fingerprints for {:?}", key
);
1117 /// Writes the node to the current dep-graph and allocates a `DepNodeIndex` for it.
1118 /// Assumes that this is a node that has no equivalent in the previous dep-graph.
1121 profiler
: &SelfProfilerRef
,
1124 current_fingerprint
: Fingerprint
,
1126 let dep_node_index
= match self.new_node_to_index
.get_shard_by_value(&key
).lock().entry(key
)
1128 Entry
::Occupied(entry
) => *entry
.get(),
1129 Entry
::Vacant(entry
) => {
1130 let dep_node_index
=
1131 self.encoder
.borrow().send(profiler
, key
, current_fingerprint
, edges
);
1132 entry
.insert(dep_node_index
);
1137 #[cfg(debug_assertions)]
1138 self.record_edge(dep_node_index
, key
, current_fingerprint
);
1145 profiler
: &SelfProfilerRef
,
1146 prev_graph
: &SerializedDepGraph
<K
>,
1149 fingerprint
: Option
<Fingerprint
>,
1151 ) -> (DepNodeIndex
, Option
<(SerializedDepNodeIndex
, DepNodeColor
)>) {
1152 let print_status
= cfg
!(debug_assertions
) && print_status
;
1154 // Get timer for profiling `DepNode` interning
1155 let _node_intern_timer
=
1156 self.node_intern_event_id
.map(|eid
| profiler
.generic_activity_with_event_id(eid
));
1158 if let Some(prev_index
) = prev_graph
.node_to_index_opt(&key
) {
1159 // Determine the color and index of the new `DepNode`.
1160 if let Some(fingerprint
) = fingerprint
{
1161 if fingerprint
== prev_graph
.fingerprint_by_index(prev_index
) {
1163 eprintln
!("[task::green] {key:?}");
1166 // This is a green node: it existed in the previous compilation,
1167 // its query was re-executed, and it has the same result as before.
1168 let mut prev_index_to_index
= self.prev_index_to_index
.lock();
1170 let dep_node_index
= match prev_index_to_index
[prev_index
] {
1171 Some(dep_node_index
) => dep_node_index
,
1173 let dep_node_index
=
1174 self.encoder
.borrow().send(profiler
, key
, fingerprint
, edges
);
1175 prev_index_to_index
[prev_index
] = Some(dep_node_index
);
1180 #[cfg(debug_assertions)]
1181 self.record_edge(dep_node_index
, key
, fingerprint
);
1182 (dep_node_index
, Some((prev_index
, DepNodeColor
::Green(dep_node_index
))))
1185 eprintln
!("[task::red] {key:?}");
1188 // This is a red node: it existed in the previous compilation, its query
1189 // was re-executed, but it has a different result from before.
1190 let mut prev_index_to_index
= self.prev_index_to_index
.lock();
1192 let dep_node_index
= match prev_index_to_index
[prev_index
] {
1193 Some(dep_node_index
) => dep_node_index
,
1195 let dep_node_index
=
1196 self.encoder
.borrow().send(profiler
, key
, fingerprint
, edges
);
1197 prev_index_to_index
[prev_index
] = Some(dep_node_index
);
1202 #[cfg(debug_assertions)]
1203 self.record_edge(dep_node_index
, key
, fingerprint
);
1204 (dep_node_index
, Some((prev_index
, DepNodeColor
::Red
)))
1208 eprintln
!("[task::unknown] {key:?}");
1211 // This is a red node, effectively: it existed in the previous compilation
1212 // session, its query was re-executed, but it doesn't compute a result hash
1213 // (i.e. it represents a `no_hash` query), so we have no way of determining
1214 // whether or not the result was the same as before.
1215 let mut prev_index_to_index
= self.prev_index_to_index
.lock();
1217 let dep_node_index
= match prev_index_to_index
[prev_index
] {
1218 Some(dep_node_index
) => dep_node_index
,
1220 let dep_node_index
=
1221 self.encoder
.borrow().send(profiler
, key
, Fingerprint
::ZERO
, edges
);
1222 prev_index_to_index
[prev_index
] = Some(dep_node_index
);
1227 #[cfg(debug_assertions)]
1228 self.record_edge(dep_node_index
, key
, Fingerprint
::ZERO
);
1229 (dep_node_index
, Some((prev_index
, DepNodeColor
::Red
)))
1233 eprintln
!("[task::new] {key:?}");
1236 let fingerprint
= fingerprint
.unwrap_or(Fingerprint
::ZERO
);
1238 // This is a new node: it didn't exist in the previous compilation session.
1239 let dep_node_index
= self.intern_new_node(profiler
, key
, edges
, fingerprint
);
1241 (dep_node_index
, None
)
1245 fn promote_node_and_deps_to_current(
1247 profiler
: &SelfProfilerRef
,
1248 prev_graph
: &SerializedDepGraph
<K
>,
1249 prev_index
: SerializedDepNodeIndex
,
1251 self.debug_assert_not_in_new_nodes(prev_graph
, prev_index
);
1253 let mut prev_index_to_index
= self.prev_index_to_index
.lock();
1255 match prev_index_to_index
[prev_index
] {
1256 Some(dep_node_index
) => dep_node_index
,
1258 let key
= prev_graph
.index_to_node(prev_index
);
1259 let edges
= prev_graph
1260 .edge_targets_from(prev_index
)
1262 .map(|i
| prev_index_to_index
[*i
].unwrap())
1264 let fingerprint
= prev_graph
.fingerprint_by_index(prev_index
);
1265 let dep_node_index
= self.encoder
.borrow().send(profiler
, key
, fingerprint
, edges
);
1266 prev_index_to_index
[prev_index
] = Some(dep_node_index
);
1267 #[cfg(debug_assertions)]
1268 self.record_edge(dep_node_index
, key
, fingerprint
);
1275 fn debug_assert_not_in_new_nodes(
1277 prev_graph
: &SerializedDepGraph
<K
>,
1278 prev_index
: SerializedDepNodeIndex
,
1280 let node
= &prev_graph
.index_to_node(prev_index
);
1282 !self.new_node_to_index
.get_shard_by_value(node
).lock().contains_key(node
),
1283 "node from previous graph present in new node collection"
1288 /// The capacity of the `reads` field `SmallVec`
1289 const TASK_DEPS_READS_CAP
: usize = 8;
1290 type EdgesVec
= SmallVec
<[DepNodeIndex
; TASK_DEPS_READS_CAP
]>;
1292 #[derive(Debug, Clone, Copy)]
1293 pub enum TaskDepsRef
<'a
, K
: DepKind
> {
1294 /// New dependencies can be added to the
1295 /// `TaskDeps`. This is used when executing a 'normal' query
1296 /// (no `eval_always` modifier)
1297 Allow(&'a Lock
<TaskDeps
<K
>>),
1298 /// New dependencies are ignored. This is used when
1299 /// executing an `eval_always` query, since there's no
1300 /// need to track dependencies for a query that's always
1301 /// re-executed. This is also used for `dep_graph.with_ignore`
1303 /// Any attempt to add new dependencies will cause a panic.
1304 /// This is used when decoding a query result from disk,
1305 /// to ensure that the decoding process doesn't itself
1306 /// require the execution of any queries.
1311 pub struct TaskDeps
<K
: DepKind
> {
1312 #[cfg(debug_assertions)]
1313 node
: Option
<DepNode
<K
>>,
1315 read_set
: FxHashSet
<DepNodeIndex
>,
1316 phantom_data
: PhantomData
<DepNode
<K
>>,
1319 impl<K
: DepKind
> Default
for TaskDeps
<K
> {
1320 fn default() -> Self {
1322 #[cfg(debug_assertions)]
1324 reads
: EdgesVec
::new(),
1325 read_set
: FxHashSet
::default(),
1326 phantom_data
: PhantomData
,
1331 // A data structure that stores Option<DepNodeColor> values as a contiguous
1332 // array, using one u32 per entry.
1333 struct DepNodeColorMap
{
1334 values
: IndexVec
<SerializedDepNodeIndex
, AtomicU32
>,
1337 const COMPRESSED_NONE
: u32 = 0;
1338 const COMPRESSED_RED
: u32 = 1;
1339 const COMPRESSED_FIRST_GREEN
: u32 = 2;
1341 impl DepNodeColorMap
{
1342 fn new(size
: usize) -> DepNodeColorMap
{
1343 DepNodeColorMap { values: (0..size).map(|_| AtomicU32::new(COMPRESSED_NONE)).collect() }
1347 fn get(&self, index
: SerializedDepNodeIndex
) -> Option
<DepNodeColor
> {
1348 match self.values
[index
].load(Ordering
::Acquire
) {
1349 COMPRESSED_NONE
=> None
,
1350 COMPRESSED_RED
=> Some(DepNodeColor
::Red
),
1352 Some(DepNodeColor
::Green(DepNodeIndex
::from_u32(value
- COMPRESSED_FIRST_GREEN
)))
1357 fn insert(&self, index
: SerializedDepNodeIndex
, color
: DepNodeColor
) {
1358 self.values
[index
].store(
1360 DepNodeColor
::Red
=> COMPRESSED_RED
,
1361 DepNodeColor
::Green(index
) => index
.as_u32() + COMPRESSED_FIRST_GREEN
,