]> git.proxmox.com Git - rustc.git/blame - compiler/rustc_query_system/src/dep_graph/graph.rs
New upstream version 1.61.0+dfsg1
[rustc.git] / compiler / rustc_query_system / src / dep_graph / graph.rs
CommitLineData
c295e0f8 1use parking_lot::Mutex;
ba9703b0 2use rustc_data_structures::fingerprint::Fingerprint;
ea8adc8c 3use rustc_data_structures::fx::{FxHashMap, FxHashSet};
136023e0 4use rustc_data_structures::profiling::{EventId, QueryInvocationId, SelfProfilerRef};
dfeec247
XL
5use rustc_data_structures::sharded::{self, Sharded};
6use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
cdc7bbd5
XL
7use rustc_data_structures::steal::Steal;
8use rustc_data_structures::sync::{AtomicU32, AtomicU64, Lock, Lrc, Ordering};
cdc7bbd5
XL
9use rustc_index::vec::IndexVec;
10use rustc_serialize::opaque::{FileEncodeResult, FileEncoder};
ba9703b0 11use smallvec::{smallvec, SmallVec};
5099ac24 12use std::assert_matches::assert_matches;
dfeec247 13use std::collections::hash_map::Entry;
c295e0f8 14use std::fmt::Debug;
ea8adc8c 15use std::hash::Hash;
ba9703b0 16use std::marker::PhantomData;
dfeec247 17use std::sync::atomic::Ordering::Relaxed;
54a0048b 18
54a0048b 19use super::query::DepGraphQuery;
17df50a5 20use super::serialized::{GraphEncoder, SerializedDepGraph, SerializedDepNodeIndex};
6a06907d 21use super::{DepContext, DepKind, DepNode, HasDepContext, WorkProductId};
c295e0f8 22use crate::ich::StableHashingContext;
94222f64 23use crate::query::{QueryContext, QuerySideEffects};
54a0048b 24
cdc7bbd5
XL
25#[cfg(debug_assertions)]
26use {super::debug::EdgeFilter, std::env};
27
54a0048b 28#[derive(Clone)]
ba9703b0
XL
29pub struct DepGraph<K: DepKind> {
30 data: Option<Lrc<DepGraphData<K>>>,
dfeec247
XL
31
32 /// This field is used for assigning DepNodeIndices when running in
33 /// non-incremental mode. Even in non-incremental mode we make sure that
34 /// each task has a `DepNodeIndex` that uniquely identifies it. This unique
35 /// ID is used for self-profiling.
36 virtual_dep_node_index: Lrc<AtomicU32>,
ea8adc8c
XL
37}
38
e74abb32 39rustc_index::newtype_index! {
b7449926
XL
40 pub struct DepNodeIndex { .. }
41}
ea8adc8c
XL
42
43impl DepNodeIndex {
e74abb32 44 pub const INVALID: DepNodeIndex = DepNodeIndex::MAX;
17df50a5 45 pub const SINGLETON_DEPENDENCYLESS_ANON_NODE: DepNodeIndex = DepNodeIndex::from_u32(0);
ea8adc8c
XL
46}
47
dfeec247
XL
48impl std::convert::From<DepNodeIndex> for QueryInvocationId {
49 #[inline]
50 fn from(dep_node_index: DepNodeIndex) -> Self {
51 QueryInvocationId(dep_node_index.as_u32())
52 }
53}
54
e74abb32 55#[derive(PartialEq)]
ea8adc8c
XL
56pub enum DepNodeColor {
57 Red,
dfeec247 58 Green(DepNodeIndex),
ea8adc8c
XL
59}
60
61impl DepNodeColor {
62 pub fn is_green(self) -> bool {
63 match self {
64 DepNodeColor::Red => false,
65 DepNodeColor::Green(_) => true,
66 }
67 }
5bcae85e
SL
68}
69
ba9703b0 70struct DepGraphData<K: DepKind> {
ea8adc8c
XL
71 /// The new encoding of the dependency graph, optimized for red/green
72 /// tracking. The `current` field is the dependency graph of only the
73 /// current compilation session: We don't merge the previous dep-graph into
fc512014 74 /// current one anymore, but we do reference shared data to save space.
ba9703b0 75 current: CurrentDepGraph<K>,
ea8adc8c
XL
76
77 /// The dep-graph from the previous compilation session. It contains all
78 /// nodes and edges as well as all fingerprints of nodes that have them.
17df50a5 79 previous: SerializedDepGraph<K>,
ea8adc8c 80
9fa01778 81 colors: DepNodeColorMap,
5bcae85e 82
94222f64 83 processed_side_effects: Mutex<FxHashSet<DepNodeIndex>>,
9fa01778
XL
84
85 /// When we load, there may be `.o` files, cached MIR, or other such
5bcae85e
SL
86 /// things available to us. If we find that they are not dirty, we
87 /// load the path to the file storing those work-products here into
88 /// this map. We can later look for and extract that data.
94b46f34 89 previous_work_products: FxHashMap<WorkProductId, WorkProduct>,
041b39d2 90
ba9703b0 91 dep_node_debug: Lock<FxHashMap<DepNode<K>, String>>,
a2a8927a
XL
92
93 /// Used by incremental compilation tests to assert that
94 /// a particular query result was decoded from disk
95 /// (not just marked green)
96 debug_loaded_from_disk: Lock<FxHashSet<DepNode<K>>>,
54a0048b
SL
97}
98
3c0e092e 99pub fn hash_result<R>(hcx: &mut StableHashingContext<'_>, result: &R) -> Fingerprint
9fa01778 100where
c295e0f8 101 R: for<'a> HashStable<StableHashingContext<'a>>,
9fa01778
XL
102{
103 let mut stable_hasher = StableHasher::new();
104 result.hash_stable(hcx, &mut stable_hasher);
3c0e092e 105 stable_hasher.finish()
9fa01778
XL
106}
107
ba9703b0 108impl<K: DepKind> DepGraph<K> {
dfeec247 109 pub fn new(
17df50a5
XL
110 profiler: &SelfProfilerRef,
111 prev_graph: SerializedDepGraph<K>,
dfeec247 112 prev_work_products: FxHashMap<WorkProductId, WorkProduct>,
cdc7bbd5
XL
113 encoder: FileEncoder,
114 record_graph: bool,
115 record_stats: bool,
ba9703b0 116 ) -> DepGraph<K> {
0531ce1d
XL
117 let prev_graph_node_count = prev_graph.node_count();
118
3c0e092e
XL
119 let current = CurrentDepGraph::new(
120 profiler,
121 prev_graph_node_count,
122 encoder,
123 record_graph,
124 record_stats,
125 );
17df50a5
XL
126
127 // Instantiate a dependy-less node only once for anonymous queries.
128 let _green_node_index = current.intern_new_node(
129 profiler,
130 DepNode { kind: DepKind::NULL, hash: current.anon_id_seed.into() },
131 smallvec![],
132 Fingerprint::ZERO,
133 );
134 debug_assert_eq!(_green_node_index, DepNodeIndex::SINGLETON_DEPENDENCYLESS_ANON_NODE);
135
54a0048b 136 DepGraph {
0531ce1d 137 data: Some(Lrc::new(DepGraphData {
94b46f34 138 previous_work_products: prev_work_products,
a1dfa0c6 139 dep_node_debug: Default::default(),
17df50a5 140 current,
94222f64 141 processed_side_effects: Default::default(),
ea8adc8c 142 previous: prev_graph,
9fa01778 143 colors: DepNodeColorMap::new(prev_graph_node_count),
a2a8927a 144 debug_loaded_from_disk: Default::default(),
ea8adc8c 145 })),
dfeec247 146 virtual_dep_node_index: Lrc::new(AtomicU32::new(0)),
ea8adc8c
XL
147 }
148 }
149
ba9703b0 150 pub fn new_disabled() -> DepGraph<K> {
3c0e092e 151 DepGraph { data: None, virtual_dep_node_index: Lrc::new(AtomicU32::new(0)) }
54a0048b
SL
152 }
153
9fa01778 154 /// Returns `true` if we are actually building the full dep-graph, and `false` otherwise.
32a655c1
SL
155 #[inline]
156 pub fn is_fully_enabled(&self) -> bool {
041b39d2 157 self.data.is_some()
32a655c1
SL
158 }
159
cdc7bbd5
XL
160 pub fn with_query(&self, f: impl Fn(&DepGraphQuery<K>)) {
161 if let Some(data) = &self.data {
162 data.current.encoder.borrow().with_query(f)
ea8adc8c 163 }
54a0048b
SL
164 }
165
dfeec247 166 pub fn assert_ignored(&self) {
83c7162d 167 if let Some(..) = self.data {
ba9703b0 168 K::read_deps(|task_deps| {
5099ac24
FG
169 assert_matches!(
170 task_deps,
171 TaskDepsRef::Ignore,
172 "expected no task dependency tracking"
173 );
83c7162d 174 })
2c00a5a8 175 }
54a0048b
SL
176 }
177
dfeec247
XL
178 pub fn with_ignore<OP, R>(&self, op: OP) -> R
179 where
180 OP: FnOnce() -> R,
54a0048b 181 {
5099ac24
FG
182 K::with_deps(TaskDepsRef::Ignore, op)
183 }
184
185 /// Used to wrap the deserialization of a query result from disk,
186 /// This method enforces that no new `DepNodes` are created during
187 /// query result deserialization.
188 ///
189 /// Enforcing this makes the query dep graph simpler - all nodes
190 /// must be created during the query execution, and should be
191 /// created from inside the 'body' of a query (the implementation
192 /// provided by a particular compiler crate).
193 ///
194 /// Consider the case of three queries `A`, `B`, and `C`, where
195 /// `A` invokes `B` and `B` invokes `C`:
196 ///
197 /// `A -> B -> C`
198 ///
199 /// Suppose that decoding the result of query `B` required re-computing
200 /// the query `C`. If we did not create a fresh `TaskDeps` when
201 /// decoding `B`, we would still be using the `TaskDeps` for query `A`
202 /// (if we needed to re-execute `A`). This would cause us to create
203 /// a new edge `A -> C`. If this edge did not previously
204 /// exist in the `DepGraph`, then we could end up with a different
205 /// `DepGraph` at the end of compilation, even if there were no
206 /// meaningful changes to the overall program (e.g. a newline was added).
207 /// In addition, this edge might cause a subsequent compilation run
208 /// to try to force `C` before marking other necessary nodes green. If
209 /// `C` did not exist in the new compilation session, then we could
210 /// get an ICE. Normally, we would have tried (and failed) to mark
211 /// some other query green (e.g. `item_children`) which was used
212 /// to obtain `C`, which would prevent us from ever trying to force
213 /// a non-existent `D`.
214 ///
215 /// It might be possible to enforce that all `DepNode`s read during
216 /// deserialization already exist in the previous `DepGraph`. In
217 /// the above example, we would invoke `D` during the deserialization
218 /// of `B`. Since we correctly create a new `TaskDeps` from the decoding
219 /// of `B`, this would result in an edge `B -> D`. If that edge already
220 /// existed (with the same `DepPathHash`es), then it should be correct
221 /// to allow the invocation of the query to proceed during deserialization
222 /// of a query result. We would merely assert that the dep-graph fragment
223 /// that would have been added by invoking `C` while decoding `B`
224 /// is equivalent to the dep-graph fragment that we already instantiated for B
225 /// (at the point where we successfully marked B as green).
226 ///
227 /// However, this would require additional complexity
228 /// in the query infrastructure, and is not currently needed by the
229 /// decoding of any query results. Should the need arise in the future,
230 /// we should consider extending the query system with this functionality.
231 pub fn with_query_deserialization<OP, R>(&self, op: OP) -> R
232 where
233 OP: FnOnce() -> R,
234 {
235 K::with_deps(TaskDepsRef::Forbid, op)
54a0048b
SL
236 }
237
8bb4bdeb
XL
238 /// Starts a new dep-graph task. Dep-graph tasks are specified
239 /// using a free function (`task`) and **not** a closure -- this
240 /// is intentional because we want to exercise tight control over
241 /// what state they have access to. In particular, we want to
242 /// prevent implicit 'leaks' of tracked state into the task (which
243 /// could then be read without generating correct edges in the
ba9703b0 244 /// dep-graph -- see the [rustc dev guide] for more details on
ff7c6d11 245 /// the dep-graph). To this end, the task function gets exactly two
8bb4bdeb
XL
246 /// pieces of state: the context `cx` and an argument `arg`. Both
247 /// of these bits of state must be of some type that implements
248 /// `DepGraphSafe` and hence does not leak.
249 ///
250 /// The choice of two arguments is not fundamental. One argument
251 /// would work just as well, since multiple values can be
252 /// collected using tuples. However, using two arguments works out
253 /// to be quite convenient, since it is common to need a context
254 /// (`cx`) and some argument (e.g., a `DefId` identifying what
255 /// item to process).
256 ///
257 /// For cases where you need some other number of arguments:
258 ///
259 /// - If you only need one argument, just use `()` for the `arg`
260 /// parameter.
261 /// - If you need 3+ arguments, use a tuple for the
262 /// `arg` parameter.
263 ///
ba9703b0 264 /// [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/incremental-compilation.html
c295e0f8 265 pub fn with_task<Ctxt: HasDepContext<DepKind = K>, A: Debug, R>(
9fa01778 266 &self,
ba9703b0
XL
267 key: DepNode<K>,
268 cx: Ctxt,
9fa01778 269 arg: A,
ba9703b0 270 task: fn(Ctxt, A) -> R,
3c0e092e 271 hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
ba9703b0 272 ) -> (R, DepNodeIndex) {
c295e0f8
XL
273 if self.is_fully_enabled() {
274 self.with_task_impl(key, cx, arg, task, hash_result)
275 } else {
276 // Incremental compilation is turned off. We just execute the task
277 // without tracking. We still provide a dep-node index that uniquely
278 // identifies the task so that we have a cheap way of referring to
279 // the query for self-profiling.
280 (task(cx, arg), self.next_virtual_depnode_index())
281 }
abe05a73
XL
282 }
283
c295e0f8 284 fn with_task_impl<Ctxt: HasDepContext<DepKind = K>, A: Debug, R>(
83c7162d 285 &self,
ba9703b0
XL
286 key: DepNode<K>,
287 cx: Ctxt,
83c7162d 288 arg: A,
ba9703b0 289 task: fn(Ctxt, A) -> R,
3c0e092e 290 hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
ba9703b0 291 ) -> (R, DepNodeIndex) {
c295e0f8
XL
292 // This function is only called when the graph is enabled.
293 let data = self.data.as_ref().unwrap();
136023e0 294
c295e0f8
XL
295 // If the following assertion triggers, it can have two reasons:
296 // 1. Something is wrong with DepNode creation, either here or
297 // in `DepGraph::try_mark_green()`.
298 // 2. Two distinct query keys get mapped to the same `DepNode`
299 // (see for example #48923).
300 assert!(
301 !self.dep_node_exists(&key),
302 "forcing query with already existing `DepNode`\n\
303 - query-key: {:?}\n\
304 - dep-node: {:?}",
305 arg,
306 key
307 );
ea8adc8c 308
3c0e092e 309 let task_deps = if cx.dep_context().is_eval_always(key.kind) {
c295e0f8
XL
310 None
311 } else {
312 Some(Lock::new(TaskDeps {
313 #[cfg(debug_assertions)]
314 node: Some(key),
315 reads: SmallVec::new(),
316 read_set: Default::default(),
317 phantom_data: PhantomData,
318 }))
319 };
5099ac24
FG
320
321 let task_deps_ref = match &task_deps {
322 Some(deps) => TaskDepsRef::Allow(deps),
323 None => TaskDepsRef::Ignore,
324 };
325
326 let result = K::with_deps(task_deps_ref, || task(cx, arg));
c295e0f8
XL
327 let edges = task_deps.map_or_else(|| smallvec![], |lock| lock.into_inner().reads);
328
329 let dcx = cx.dep_context();
c295e0f8 330 let hashing_timer = dcx.profiler().incr_result_hashing();
3c0e092e
XL
331 let current_fingerprint = hash_result.map(|f| {
332 let mut hcx = dcx.create_stable_hashing_context();
333 f(&mut hcx, &result)
334 });
c295e0f8
XL
335
336 let print_status = cfg!(debug_assertions) && dcx.sess().opts.debugging_opts.dep_tasks;
337
c295e0f8
XL
338 // Intern the new `DepNode`.
339 let (dep_node_index, prev_and_color) = data.current.intern_node(
340 dcx.profiler(),
341 &data.previous,
342 key,
343 edges,
344 current_fingerprint,
345 print_status,
346 );
0531ce1d 347
c295e0f8 348 hashing_timer.finish_with_query_invocation_id(dep_node_index.into());
ea8adc8c 349
c295e0f8
XL
350 if let Some((prev_index, color)) = prev_and_color {
351 debug_assert!(
352 data.colors.get(prev_index).is_none(),
353 "DepGraph::with_task() - Duplicate DepNodeColor \
354 insertion for {:?}",
355 key
356 );
357
358 data.colors.insert(prev_index, color);
041b39d2 359 }
c295e0f8
XL
360
361 (result, dep_node_index)
54a0048b
SL
362 }
363
9fa01778
XL
364 /// Executes something within an "anonymous" task, that is, a task the
365 /// `DepNode` of which is determined by the list of inputs it read from.
cdc7bbd5
XL
366 pub fn with_anon_task<Ctxt: DepContext<DepKind = K>, OP, R>(
367 &self,
368 cx: Ctxt,
369 dep_kind: K,
370 op: OP,
371 ) -> (R, DepNodeIndex)
dfeec247
XL
372 where
373 OP: FnOnce() -> R,
041b39d2 374 {
3c0e092e 375 debug_assert!(!cx.is_eval_always(dep_kind));
fc512014 376
041b39d2 377 if let Some(ref data) = self.data {
ba9703b0 378 let task_deps = Lock::new(TaskDeps::default());
5099ac24 379 let result = K::with_deps(TaskDepsRef::Allow(&task_deps), op);
ba9703b0 380 let task_deps = task_deps.into_inner();
17df50a5
XL
381 let task_deps = task_deps.reads;
382
383 let dep_node_index = match task_deps.len() {
384 0 => {
385 // Because the dep-node id of anon nodes is computed from the sets of its
386 // dependencies we already know what the ID of this dependency-less node is
387 // going to be (i.e. equal to the precomputed
388 // `SINGLETON_DEPENDENCYLESS_ANON_NODE`). As a consequence we can skip creating
389 // a `StableHasher` and sending the node through interning.
390 DepNodeIndex::SINGLETON_DEPENDENCYLESS_ANON_NODE
391 }
392 1 => {
393 // When there is only one dependency, don't bother creating a node.
394 task_deps[0]
395 }
396 _ => {
397 // The dep node indices are hashed here instead of hashing the dep nodes of the
398 // dependencies. These indices may refer to different nodes per session, but this isn't
399 // a problem here because we that ensure the final dep node hash is per session only by
400 // combining it with the per session random number `anon_id_seed`. This hash only need
401 // to map the dependencies to a single value on a per session basis.
402 let mut hasher = StableHasher::new();
403 task_deps.hash(&mut hasher);
404
405 let target_dep_node = DepNode {
406 kind: dep_kind,
407 // Fingerprint::combine() is faster than sending Fingerprint
408 // through the StableHasher (at least as long as StableHasher
409 // is so slow).
410 hash: data.current.anon_id_seed.combine(hasher.finish()).into(),
411 };
83c7162d 412
17df50a5
XL
413 data.current.intern_new_node(
414 cx.profiler(),
415 target_dep_node,
416 task_deps,
417 Fingerprint::ZERO,
418 )
419 }
fc512014
XL
420 };
421
ea8adc8c 422 (result, dep_node_index)
041b39d2 423 } else {
dfeec247 424 (op(), self.next_virtual_depnode_index())
041b39d2
XL
425 }
426 }
427
428 #[inline]
fc512014 429 pub fn read_index(&self, dep_node_index: DepNodeIndex) {
041b39d2 430 if let Some(ref data) = self.data {
fc512014 431 K::read_deps(|task_deps| {
5099ac24
FG
432 let mut task_deps = match task_deps {
433 TaskDepsRef::Allow(deps) => deps.lock(),
434 TaskDepsRef::Ignore => return,
435 TaskDepsRef::Forbid => {
436 panic!("Illegal read of: {:?}", dep_node_index)
fc512014 437 }
5099ac24
FG
438 };
439 let task_deps = &mut *task_deps;
fc512014 440
5099ac24
FG
441 if cfg!(debug_assertions) {
442 data.current.total_read_count.fetch_add(1, Relaxed);
443 }
fc512014 444
5099ac24
FG
445 // As long as we only have a low number of reads we can avoid doing a hash
446 // insert and potentially allocating/reallocating the hashmap
447 let new_read = if task_deps.reads.len() < TASK_DEPS_READS_CAP {
448 task_deps.reads.iter().all(|other| *other != dep_node_index)
449 } else {
450 task_deps.read_set.insert(dep_node_index)
451 };
452 if new_read {
453 task_deps.reads.push(dep_node_index);
454 if task_deps.reads.len() == TASK_DEPS_READS_CAP {
455 // Fill `read_set` with what we have so far so we can use the hashset
456 // next time
457 task_deps.read_set.extend(task_deps.reads.iter().copied());
458 }
459
460 #[cfg(debug_assertions)]
461 {
462 if let Some(target) = task_deps.node {
463 if let Some(ref forbidden_edge) = data.current.forbidden_edge {
464 let src = forbidden_edge.index_to_node.lock()[&dep_node_index];
465 if forbidden_edge.test(&src, &target) {
466 panic!("forbidden edge {:?} -> {:?} created", src, target)
fc512014
XL
467 }
468 }
469 }
fc512014 470 }
5099ac24
FG
471 } else if cfg!(debug_assertions) {
472 data.current.total_duplicate_read_count.fetch_add(1, Relaxed);
fc512014
XL
473 }
474 })
041b39d2
XL
475 }
476 }
477
478 #[inline]
fc512014
XL
479 pub fn dep_node_index_of(&self, dep_node: &DepNode<K>) -> DepNodeIndex {
480 self.dep_node_index_of_opt(dep_node).unwrap()
54a0048b
SL
481 }
482
abe05a73 483 #[inline]
fc512014
XL
484 pub fn dep_node_index_of_opt(&self, dep_node: &DepNode<K>) -> Option<DepNodeIndex> {
485 let data = self.data.as_ref().unwrap();
486 let current = &data.current;
487
488 if let Some(prev_index) = data.previous.node_to_index_opt(dep_node) {
489 current.prev_index_to_index.lock()[prev_index]
490 } else {
491 current.new_node_to_index.get_shard_by_value(dep_node).lock().get(dep_node).copied()
492 }
ff7c6d11
XL
493 }
494
0531ce1d 495 #[inline]
ba9703b0 496 pub fn dep_node_exists(&self, dep_node: &DepNode<K>) -> bool {
fc512014
XL
497 self.data.is_some() && self.dep_node_index_of_opt(dep_node).is_some()
498 }
499
ba9703b0 500 pub fn prev_fingerprint_of(&self, dep_node: &DepNode<K>) -> Option<Fingerprint> {
ea8adc8c 501 self.data.as_ref().unwrap().previous.fingerprint_of(dep_node)
3b2f2976
XL
502 }
503
9fa01778 504 /// Checks whether a previous work product exists for `v` and, if
5bcae85e 505 /// so, return the path that leads to it. Used to skip doing work.
041b39d2 506 pub fn previous_work_product(&self, v: &WorkProductId) -> Option<WorkProduct> {
dfeec247 507 self.data.as_ref().and_then(|data| data.previous_work_products.get(v).cloned())
5bcae85e
SL
508 }
509
32a655c1
SL
510 /// Access the map of work-products created during the cached run. Only
511 /// used during saving of the dep-graph.
94b46f34
XL
512 pub fn previous_work_products(&self) -> &FxHashMap<WorkProductId, WorkProduct> {
513 &self.data.as_ref().unwrap().previous_work_products
041b39d2
XL
514 }
515
a2a8927a
XL
516 pub fn mark_debug_loaded_from_disk(&self, dep_node: DepNode<K>) {
517 self.data.as_ref().unwrap().debug_loaded_from_disk.lock().insert(dep_node);
518 }
519
520 pub fn debug_was_loaded_from_disk(&self, dep_node: DepNode<K>) -> bool {
521 self.data.as_ref().unwrap().debug_loaded_from_disk.lock().contains(&dep_node)
522 }
523
041b39d2 524 #[inline(always)]
ba9703b0 525 pub fn register_dep_node_debug_str<F>(&self, dep_node: DepNode<K>, debug_str_gen: F)
dfeec247
XL
526 where
527 F: FnOnce() -> String,
041b39d2 528 {
ea8adc8c 529 let dep_node_debug = &self.data.as_ref().unwrap().dep_node_debug;
041b39d2 530
ea8adc8c 531 if dep_node_debug.borrow().contains_key(&dep_node) {
dfeec247 532 return;
ea8adc8c
XL
533 }
534 let debug_str = debug_str_gen();
535 dep_node_debug.borrow_mut().insert(dep_node, debug_str);
041b39d2
XL
536 }
537
ba9703b0 538 pub fn dep_node_debug_str(&self, dep_node: DepNode<K>) -> Option<String> {
dfeec247 539 self.data.as_ref()?.dep_node_debug.borrow().get(&dep_node).cloned()
32a655c1 540 }
ea8adc8c 541
cdc7bbd5 542 fn node_color(&self, dep_node: &DepNode<K>) -> Option<DepNodeColor> {
0531ce1d
XL
543 if let Some(ref data) = self.data {
544 if let Some(prev_index) = data.previous.node_to_index_opt(dep_node) {
dfeec247 545 return data.colors.get(prev_index);
0531ce1d 546 } else {
cdc7bbd5
XL
547 // This is a node that did not exist in the previous compilation session.
548 return None;
0531ce1d
XL
549 }
550 }
551
552 None
ea8adc8c
XL
553 }
554
c295e0f8
XL
555 /// Try to mark a node index for the node dep_node.
556 ///
9fa01778
XL
557 /// A node will have an index, when it's already been marked green, or when we can mark it
558 /// green. This function will mark the current task as a reader of the specified node, when
559 /// a node index can be found for that node.
6a06907d 560 pub fn try_mark_green<Ctxt: QueryContext<DepKind = K>>(
9fa01778 561 &self,
ba9703b0
XL
562 tcx: Ctxt,
563 dep_node: &DepNode<K>,
9fa01778 564 ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
3c0e092e 565 debug_assert!(!tcx.dep_context().is_eval_always(dep_node.kind));
9fa01778
XL
566
567 // Return None if the dep graph is disabled
568 let data = self.data.as_ref()?;
569
570 // Return None if the dep node didn't exist in the previous session
571 let prev_index = data.previous.node_to_index_opt(dep_node)?;
ea8adc8c 572
9fa01778
XL
573 match data.colors.get(prev_index) {
574 Some(DepNodeColor::Green(dep_node_index)) => Some((prev_index, dep_node_index)),
575 Some(DepNodeColor::Red) => None,
576 None => {
ea8adc8c
XL
577 // This DepNode and the corresponding query invocation existed
578 // in the previous compilation session too, so we can try to
579 // mark it as green by recursively marking all of its
580 // dependencies green.
dfeec247
XL
581 self.try_mark_previous_green(tcx, data, prev_index, &dep_node)
582 .map(|dep_node_index| (prev_index, dep_node_index))
ea8adc8c 583 }
9fa01778
XL
584 }
585 }
586
17df50a5
XL
587 fn try_mark_parent_green<Ctxt: QueryContext<DepKind = K>>(
588 &self,
589 tcx: Ctxt,
590 data: &DepGraphData<K>,
591 parent_dep_node_index: SerializedDepNodeIndex,
592 dep_node: &DepNode<K>,
593 ) -> Option<()> {
594 let dep_dep_node_color = data.colors.get(parent_dep_node_index);
595 let dep_dep_node = &data.previous.index_to_node(parent_dep_node_index);
596
597 match dep_dep_node_color {
598 Some(DepNodeColor::Green(_)) => {
599 // This dependency has been marked as green before, we are
600 // still fine and can continue with checking the other
601 // dependencies.
602 debug!(
603 "try_mark_previous_green({:?}) --- found dependency {:?} to \
604 be immediately green",
605 dep_node, dep_dep_node,
606 );
607 return Some(());
608 }
609 Some(DepNodeColor::Red) => {
610 // We found a dependency the value of which has changed
611 // compared to the previous compilation session. We cannot
612 // mark the DepNode as green and also don't need to bother
613 // with checking any of the other dependencies.
614 debug!(
615 "try_mark_previous_green({:?}) - END - dependency {:?} was immediately red",
616 dep_node, dep_dep_node,
617 );
618 return None;
619 }
620 None => {}
621 }
622
623 // We don't know the state of this dependency. If it isn't
624 // an eval_always node, let's try to mark it green recursively.
3c0e092e 625 if !tcx.dep_context().is_eval_always(dep_dep_node.kind) {
17df50a5
XL
626 debug!(
627 "try_mark_previous_green({:?}) --- state of dependency {:?} ({}) \
628 is unknown, trying to mark it green",
629 dep_node, dep_dep_node, dep_dep_node.hash,
630 );
631
632 let node_index =
633 self.try_mark_previous_green(tcx, data, parent_dep_node_index, dep_dep_node);
634 if node_index.is_some() {
635 debug!(
636 "try_mark_previous_green({:?}) --- managed to MARK dependency {:?} as green",
637 dep_node, dep_dep_node
638 );
639 return Some(());
640 }
641 }
642
643 // We failed to mark it green, so we try to force the query.
644 debug!(
645 "try_mark_previous_green({:?}) --- trying to force dependency {:?}",
646 dep_node, dep_dep_node
647 );
3c0e092e 648 if !tcx.dep_context().try_force_from_dep_node(*dep_dep_node) {
17df50a5
XL
649 // The DepNode could not be forced.
650 debug!(
651 "try_mark_previous_green({:?}) - END - dependency {:?} could not be forced",
652 dep_node, dep_dep_node
653 );
654 return None;
655 }
656
657 let dep_dep_node_color = data.colors.get(parent_dep_node_index);
658
659 match dep_dep_node_color {
660 Some(DepNodeColor::Green(_)) => {
661 debug!(
662 "try_mark_previous_green({:?}) --- managed to FORCE dependency {:?} to green",
663 dep_node, dep_dep_node
664 );
665 return Some(());
666 }
667 Some(DepNodeColor::Red) => {
668 debug!(
669 "try_mark_previous_green({:?}) - END - dependency {:?} was red after forcing",
670 dep_node, dep_dep_node
671 );
672 return None;
673 }
674 None => {}
675 }
676
677 if !tcx.dep_context().sess().has_errors_or_delayed_span_bugs() {
678 panic!("try_mark_previous_green() - Forcing the DepNode should have set its color")
679 }
680
681 // If the query we just forced has resulted in
682 // some kind of compilation error, we cannot rely on
683 // the dep-node color having been properly updated.
684 // This means that the query system has reached an
685 // invalid state. We let the compiler continue (by
686 // returning `None`) so it can emit error messages
687 // and wind down, but rely on the fact that this
688 // invalid state will not be persisted to the
689 // incremental compilation cache because of
690 // compilation errors being present.
691 debug!(
692 "try_mark_previous_green({:?}) - END - dependency {:?} resulted in compilation error",
693 dep_node, dep_dep_node
694 );
695 return None;
696 }
697
9fa01778 698 /// Try to mark a dep-node which existed in the previous compilation session as green.
6a06907d 699 fn try_mark_previous_green<Ctxt: QueryContext<DepKind = K>>(
9fa01778 700 &self,
ba9703b0
XL
701 tcx: Ctxt,
702 data: &DepGraphData<K>,
9fa01778 703 prev_dep_node_index: SerializedDepNodeIndex,
ba9703b0 704 dep_node: &DepNode<K>,
9fa01778
XL
705 ) -> Option<DepNodeIndex> {
706 debug!("try_mark_previous_green({:?}) - BEGIN", dep_node);
707
708 #[cfg(not(parallel_compiler))]
709 {
fc512014 710 debug_assert!(!self.dep_node_exists(dep_node));
9fa01778
XL
711 debug_assert!(data.colors.get(prev_dep_node_index).is_none());
712 }
ea8adc8c 713
532ac7d7 714 // We never try to mark eval_always nodes as green
3c0e092e 715 debug_assert!(!tcx.dep_context().is_eval_always(dep_node.kind));
9fa01778
XL
716
717 debug_assert_eq!(data.previous.index_to_node(prev_dep_node_index), *dep_node);
718
719 let prev_deps = data.previous.edge_targets_from(prev_dep_node_index);
0531ce1d 720
ea8adc8c 721 for &dep_dep_node_index in prev_deps {
17df50a5 722 self.try_mark_parent_green(tcx, data, dep_dep_node_index, dep_node)?
ea8adc8c
XL
723 }
724
ea8adc8c
XL
725 // If we got here without hitting a `return` that means that all
726 // dependencies of this DepNode could be marked as green. Therefore we
83c7162d 727 // can also mark this DepNode as green.
ea8adc8c 728
83c7162d
XL
729 // There may be multiple threads trying to mark the same dep node green concurrently
730
cdc7bbd5
XL
731 // We allocating an entry for the node in the current dependency graph and
732 // adding all the appropriate edges imported from the previous graph
733 let dep_node_index = data.current.promote_node_and_deps_to_current(
734 tcx.dep_context().profiler(),
735 &data.previous,
736 prev_dep_node_index,
737 );
ea8adc8c 738
abe05a73 739 // ... emitting any stored diagnostic ...
abe05a73 740
416331ca
XL
741 // FIXME: Store the fact that a node has diagnostics in a bit in the dep graph somewhere
742 // Maybe store a list on disk and encode this fact in the DepNodeState
94222f64 743 let side_effects = tcx.load_side_effects(prev_dep_node_index);
416331ca
XL
744
745 #[cfg(not(parallel_compiler))]
dfeec247
XL
746 debug_assert!(
747 data.colors.get(prev_dep_node_index).is_none(),
748 "DepGraph::try_mark_previous_green() - Duplicate DepNodeColor \
749 insertion for {:?}",
750 dep_node
751 );
abe05a73 752
94222f64
XL
753 if unlikely!(!side_effects.is_empty()) {
754 self.emit_side_effects(tcx, data, dep_node_index, side_effects);
abe05a73
XL
755 }
756
ea8adc8c 757 // ... and finally storing a "Green" entry in the color map.
83c7162d 758 // Multiple threads can all write the same color here
9fa01778 759 data.colors.insert(prev_dep_node_index, DepNodeColor::Green(dep_node_index));
0531ce1d 760
9fa01778 761 debug!("try_mark_previous_green({:?}) - END - successfully marked as green", dep_node);
ea8adc8c
XL
762 Some(dep_node_index)
763 }
764
416331ca
XL
765 /// Atomically emits some loaded diagnostics.
766 /// This may be called concurrently on multiple threads for the same dep node.
9fa01778
XL
767 #[cold]
768 #[inline(never)]
94222f64 769 fn emit_side_effects<Ctxt: QueryContext<DepKind = K>>(
9fa01778 770 &self,
ba9703b0
XL
771 tcx: Ctxt,
772 data: &DepGraphData<K>,
9fa01778 773 dep_node_index: DepNodeIndex,
94222f64 774 side_effects: QuerySideEffects,
9fa01778 775 ) {
94222f64 776 let mut processed = data.processed_side_effects.lock();
416331ca 777
94222f64 778 if processed.insert(dep_node_index) {
416331ca 779 // We were the first to insert the node in the set so this thread
94222f64 780 // must process side effects
9fa01778
XL
781
782 // Promote the previous diagnostics to the current session.
94222f64 783 tcx.store_side_effects(dep_node_index, side_effects.clone());
416331ca 784
6a06907d 785 let handle = tcx.dep_context().sess().diagnostic();
9fa01778 786
5e7ed085
FG
787 for mut diagnostic in side_effects.diagnostics {
788 handle.emit_diagnostic(&mut diagnostic);
9fa01778 789 }
9fa01778
XL
790 }
791 }
792
cdc7bbd5
XL
793 // Returns true if the given node has been marked as red during the
794 // current compilation session. Used in various assertions
795 pub fn is_red(&self, dep_node: &DepNode<K>) -> bool {
796 self.node_color(dep_node) == Some(DepNodeColor::Red)
797 }
798
0531ce1d
XL
799 // Returns true if the given node has been marked as green during the
800 // current compilation session. Used in various assertions
ba9703b0 801 pub fn is_green(&self, dep_node: &DepNode<K>) -> bool {
5869c6ff 802 self.node_color(dep_node).map_or(false, |c| c.is_green())
ea8adc8c
XL
803 }
804
ff7c6d11
XL
805 // This method loads all on-disk cacheable query results into memory, so
806 // they can be written out to the new cache file again. Most query results
807 // will already be in memory but in the case where we marked something as
808 // green but then did not need the value, that value will never have been
809 // loaded from disk.
810 //
811 // This method will only load queries that will end up in the disk cache.
812 // Other queries will not be executed.
3c0e092e 813 pub fn exec_cache_promotions<Ctxt: DepContext<DepKind = K>>(&self, tcx: Ctxt) {
ba9703b0 814 let _prof_timer = tcx.profiler().generic_activity("incr_comp_query_cache_promotion");
e74abb32 815
dc9dc135
XL
816 let data = self.data.as_ref().unwrap();
817 for prev_index in data.colors.values.indices() {
818 match data.colors.get(prev_index) {
819 Some(DepNodeColor::Green(_)) => {
820 let dep_node = data.previous.index_to_node(prev_index);
3c0e092e 821 tcx.try_load_from_on_disk_cache(dep_node);
ff7c6d11 822 }
dfeec247 823 None | Some(DepNodeColor::Red) => {
dc9dc135
XL
824 // We can skip red nodes because a node can only be marked
825 // as red if the query result was recomputed and thus is
826 // already in memory.
827 }
828 }
ff7c6d11
XL
829 }
830 }
dfeec247 831
5869c6ff 832 pub fn print_incremental_info(&self) {
cdc7bbd5
XL
833 if let Some(data) = &self.data {
834 data.current.encoder.borrow().print_incremental_info(
835 data.current.total_read_count.load(Relaxed),
836 data.current.total_duplicate_read_count.load(Relaxed),
837 )
5869c6ff 838 }
cdc7bbd5 839 }
5869c6ff 840
cdc7bbd5
XL
841 pub fn encode(&self, profiler: &SelfProfilerRef) -> FileEncodeResult {
842 if let Some(data) = &self.data {
843 data.current.encoder.steal().finish(profiler)
844 } else {
845 Ok(())
5869c6ff 846 }
5869c6ff
XL
847 }
848
17df50a5 849 pub(crate) fn next_virtual_depnode_index(&self) -> DepNodeIndex {
dfeec247
XL
850 let index = self.virtual_dep_node_index.fetch_add(1, Relaxed);
851 DepNodeIndex::from_u32(index)
852 }
5bcae85e
SL
853}
854
855/// A "work product" is an intermediate result that we save into the
856/// incremental directory for later re-use. The primary example are
857/// the object files that we save for each partition at code
858/// generation time.
859///
860/// Each work product is associated with a dep-node, representing the
861/// process that produced the work-product. If that dep-node is found
862/// to be dirty when we load up, then we will delete the work-product
863/// at load time. If the work-product is found to be clean, then we
864/// will keep a record in the `previous_work_products` list.
865///
866/// In addition, work products have an associated hash. This hash is
867/// an extra hash that can be used to decide if the work-product from
868/// a previous compilation can be re-used (in addition to the dirty
869/// edges check).
870///
871/// As the primary example, consider the object files we generate for
872/// each partition. In the first run, we create partitions based on
873/// the symbols that need to be compiled. For each partition P, we
874/// hash the symbols in P and create a `WorkProduct` record associated
94b46f34 875/// with `DepNode::CodegenUnit(P)`; the hash is the set of symbols
5bcae85e
SL
876/// in P.
877///
94b46f34 878/// The next time we compile, if the `DepNode::CodegenUnit(P)` is
5bcae85e
SL
879/// judged to be clean (which means none of the things we read to
880/// generate the partition were found to be dirty), it will be loaded
881/// into previous work products. We will then regenerate the set of
882/// symbols in the partition P and hash them (note that new symbols
883/// may be added -- for example, new monomorphizations -- even if
884/// nothing in P changed!). We will compare that hash against the
885/// previous hash. If it matches up, we can reuse the object file.
3dfed10e 886#[derive(Clone, Debug, Encodable, Decodable)]
5bcae85e 887pub struct WorkProduct {
041b39d2 888 pub cgu_name: String,
f9f354fc
XL
889 /// Saved file associated with this CGU.
890 pub saved_file: Option<String>,
54a0048b 891}
ea8adc8c 892
fc512014
XL
893// Index type for `DepNodeData`'s edges.
894rustc_index::newtype_index! {
895 struct EdgeIndex { .. }
896}
897
fc512014
XL
898/// `CurrentDepGraph` stores the dependency graph for the current session. It
899/// will be populated as we run queries or tasks. We never remove nodes from the
900/// graph: they are only added.
e74abb32 901///
cdc7bbd5
XL
902/// The nodes in it are identified by a `DepNodeIndex`. We avoid keeping the nodes
903/// in memory. This is important, because these graph structures are some of the
904/// largest in the compiler.
e74abb32 905///
cdc7bbd5 906/// For this reason, we avoid storing `DepNode`s more than once as map
fc512014
XL
907/// keys. The `new_node_to_index` map only contains nodes not in the previous
908/// graph, and we map nodes in the previous graph to indices via a two-step
17df50a5 909/// mapping. `SerializedDepGraph` maps from `DepNode` to `SerializedDepNodeIndex`,
fc512014
XL
910/// and the `prev_index_to_index` vector (which is more compact and faster than
911/// using a map) maps from `SerializedDepNodeIndex` to `DepNodeIndex`.
e74abb32 912///
fc512014
XL
913/// This struct uses three locks internally. The `data`, `new_node_to_index`,
914/// and `prev_index_to_index` fields are locked separately. Operations that take
915/// a `DepNodeIndex` typically just access the `data` field.
e74abb32 916///
fc512014
XL
917/// We only need to manipulate at most two locks simultaneously:
918/// `new_node_to_index` and `data`, or `prev_index_to_index` and `data`. When
919/// manipulating both, we acquire `new_node_to_index` or `prev_index_to_index`
920/// first, and `data` second.
cdc7bbd5
XL
921pub(super) struct CurrentDepGraph<K: DepKind> {
922 encoder: Steal<GraphEncoder<K>>,
fc512014
XL
923 new_node_to_index: Sharded<FxHashMap<DepNode<K>, DepNodeIndex>>,
924 prev_index_to_index: Lock<IndexVec<SerializedDepNodeIndex, Option<DepNodeIndex>>>,
e74abb32
XL
925
926 /// Used to trap when a specific edge is added to the graph.
927 /// This is used for debug purposes and is only active with `debug_assertions`.
cdc7bbd5
XL
928 #[cfg(debug_assertions)]
929 forbidden_edge: Option<EdgeFilter<K>>,
ea8adc8c 930
9fa01778
XL
931 /// Anonymous `DepNode`s are nodes whose IDs we compute from the list of
932 /// their edges. This has the beneficial side-effect that multiple anonymous
933 /// nodes can be coalesced into one without changing the semantics of the
934 /// dependency graph. However, the merging of nodes can lead to a subtle
935 /// problem during red-green marking: The color of an anonymous node from
936 /// the current session might "shadow" the color of the node with the same
937 /// ID from the previous session. In order to side-step this problem, we make
938 /// sure that anonymous `NodeId`s allocated in different sessions don't overlap.
939 /// This is implemented by mixing a session-key into the ID fingerprint of
940 /// each anon node. The session-key is just a random number generated when
941 /// the `DepGraph` is created.
ea8adc8c 942 anon_id_seed: Fingerprint,
abe05a73 943
e74abb32
XL
944 /// These are simple counters that are for profiling and
945 /// debugging and only active with `debug_assertions`.
946 total_read_count: AtomicU64,
947 total_duplicate_read_count: AtomicU64,
3c0e092e
XL
948
949 /// The cached event id for profiling node interning. This saves us
950 /// from having to look up the event id every time we intern a node
951 /// which may incur too much overhead.
952 /// This will be None if self-profiling is disabled.
953 node_intern_event_id: Option<EventId>,
ea8adc8c
XL
954}
955
ba9703b0 956impl<K: DepKind> CurrentDepGraph<K> {
cdc7bbd5 957 fn new(
3c0e092e 958 profiler: &SelfProfilerRef,
cdc7bbd5
XL
959 prev_graph_node_count: usize,
960 encoder: FileEncoder,
961 record_graph: bool,
962 record_stats: bool,
963 ) -> CurrentDepGraph<K> {
ea8adc8c
XL
964 use std::time::{SystemTime, UNIX_EPOCH};
965
966 let duration = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
dfeec247 967 let nanos = duration.as_secs() * 1_000_000_000 + duration.subsec_nanos() as u64;
ea8adc8c
XL
968 let mut stable_hasher = StableHasher::new();
969 nanos.hash(&mut stable_hasher);
970
cdc7bbd5
XL
971 #[cfg(debug_assertions)]
972 let forbidden_edge = match env::var("RUST_FORBID_DEP_GRAPH_EDGE") {
973 Ok(s) => match EdgeFilter::new(&s) {
974 Ok(f) => Some(f),
975 Err(err) => panic!("RUST_FORBID_DEP_GRAPH_EDGE invalid: {}", err),
976 },
977 Err(_) => None,
ea8adc8c
XL
978 };
979
fc512014
XL
980 // We store a large collection of these in `prev_index_to_index` during
981 // non-full incremental builds, and want to ensure that the element size
982 // doesn't inadvertently increase.
983 static_assert_size!(Option<DepNodeIndex>, 4);
0731742a 984
cdc7bbd5
XL
985 let new_node_count_estimate = 102 * prev_graph_node_count / 100 + 200;
986
3c0e092e
XL
987 let node_intern_event_id = profiler
988 .get_or_alloc_cached_string("incr_comp_intern_dep_graph_node")
989 .map(EventId::from_label);
990
ea8adc8c 991 CurrentDepGraph {
cdc7bbd5
XL
992 encoder: Steal::new(GraphEncoder::new(
993 encoder,
994 prev_graph_node_count,
995 record_graph,
996 record_stats,
997 )),
fc512014 998 new_node_to_index: Sharded::new(|| {
dfeec247
XL
999 FxHashMap::with_capacity_and_hasher(
1000 new_node_count_estimate / sharded::SHARDS,
1001 Default::default(),
1002 )
1003 }),
fc512014 1004 prev_index_to_index: Lock::new(IndexVec::from_elem_n(None, prev_graph_node_count)),
ea8adc8c 1005 anon_id_seed: stable_hasher.finish(),
cdc7bbd5 1006 #[cfg(debug_assertions)]
ea8adc8c 1007 forbidden_edge,
e74abb32
XL
1008 total_read_count: AtomicU64::new(0),
1009 total_duplicate_read_count: AtomicU64::new(0),
3c0e092e 1010 node_intern_event_id,
ea8adc8c
XL
1011 }
1012 }
1013
cdc7bbd5
XL
1014 #[cfg(debug_assertions)]
1015 fn record_edge(&self, dep_node_index: DepNodeIndex, key: DepNode<K>) {
1016 if let Some(forbidden_edge) = &self.forbidden_edge {
1017 forbidden_edge.index_to_node.lock().insert(dep_node_index, key);
1018 }
1019 }
1020
1021 /// Writes the node to the current dep-graph and allocates a `DepNodeIndex` for it.
1022 /// Assumes that this is a node that has no equivalent in the previous dep-graph.
fc512014 1023 fn intern_new_node(
e74abb32 1024 &self,
cdc7bbd5
XL
1025 profiler: &SelfProfilerRef,
1026 key: DepNode<K>,
fc512014 1027 edges: EdgesVec,
cdc7bbd5 1028 current_fingerprint: Fingerprint,
0731742a 1029 ) -> DepNodeIndex {
cdc7bbd5 1030 match self.new_node_to_index.get_shard_by_value(&key).lock().entry(key) {
fc512014
XL
1031 Entry::Occupied(entry) => *entry.get(),
1032 Entry::Vacant(entry) => {
cdc7bbd5
XL
1033 let dep_node_index =
1034 self.encoder.borrow().send(profiler, key, current_fingerprint, edges);
fc512014 1035 entry.insert(dep_node_index);
cdc7bbd5
XL
1036 #[cfg(debug_assertions)]
1037 self.record_edge(dep_node_index, key);
fc512014
XL
1038 dep_node_index
1039 }
1040 }
ea8adc8c
XL
1041 }
1042
cdc7bbd5 1043 fn intern_node(
e74abb32 1044 &self,
cdc7bbd5 1045 profiler: &SelfProfilerRef,
17df50a5 1046 prev_graph: &SerializedDepGraph<K>,
cdc7bbd5 1047 key: DepNode<K>,
ba9703b0 1048 edges: EdgesVec,
cdc7bbd5
XL
1049 fingerprint: Option<Fingerprint>,
1050 print_status: bool,
1051 ) -> (DepNodeIndex, Option<(SerializedDepNodeIndex, DepNodeColor)>) {
1052 let print_status = cfg!(debug_assertions) && print_status;
1053
3c0e092e
XL
1054 // Get timer for profiling `DepNode` interning
1055 let _node_intern_timer =
1056 self.node_intern_event_id.map(|eid| profiler.generic_activity_with_event_id(eid));
1057
cdc7bbd5
XL
1058 if let Some(prev_index) = prev_graph.node_to_index_opt(&key) {
1059 // Determine the color and index of the new `DepNode`.
1060 if let Some(fingerprint) = fingerprint {
1061 if fingerprint == prev_graph.fingerprint_by_index(prev_index) {
1062 if print_status {
1063 eprintln!("[task::green] {:?}", key);
1064 }
fc512014 1065
cdc7bbd5
XL
1066 // This is a green node: it existed in the previous compilation,
1067 // its query was re-executed, and it has the same result as before.
1068 let mut prev_index_to_index = self.prev_index_to_index.lock();
1069
1070 let dep_node_index = match prev_index_to_index[prev_index] {
1071 Some(dep_node_index) => dep_node_index,
1072 None => {
1073 let dep_node_index =
1074 self.encoder.borrow().send(profiler, key, fingerprint, edges);
1075 prev_index_to_index[prev_index] = Some(dep_node_index);
1076 dep_node_index
1077 }
1078 };
fc512014 1079
cdc7bbd5
XL
1080 #[cfg(debug_assertions)]
1081 self.record_edge(dep_node_index, key);
1082 (dep_node_index, Some((prev_index, DepNodeColor::Green(dep_node_index))))
1083 } else {
1084 if print_status {
1085 eprintln!("[task::red] {:?}", key);
1086 }
0731742a 1087
cdc7bbd5
XL
1088 // This is a red node: it existed in the previous compilation, its query
1089 // was re-executed, but it has a different result from before.
1090 let mut prev_index_to_index = self.prev_index_to_index.lock();
1091
1092 let dep_node_index = match prev_index_to_index[prev_index] {
1093 Some(dep_node_index) => dep_node_index,
1094 None => {
1095 let dep_node_index =
1096 self.encoder.borrow().send(profiler, key, fingerprint, edges);
1097 prev_index_to_index[prev_index] = Some(dep_node_index);
1098 dep_node_index
1099 }
1100 };
fc512014 1101
cdc7bbd5
XL
1102 #[cfg(debug_assertions)]
1103 self.record_edge(dep_node_index, key);
1104 (dep_node_index, Some((prev_index, DepNodeColor::Red)))
1105 }
1106 } else {
1107 if print_status {
1108 eprintln!("[task::unknown] {:?}", key);
1109 }
fc512014 1110
cdc7bbd5
XL
1111 // This is a red node, effectively: it existed in the previous compilation
1112 // session, its query was re-executed, but it doesn't compute a result hash
1113 // (i.e. it represents a `no_hash` query), so we have no way of determining
1114 // whether or not the result was the same as before.
1115 let mut prev_index_to_index = self.prev_index_to_index.lock();
1116
1117 let dep_node_index = match prev_index_to_index[prev_index] {
1118 Some(dep_node_index) => dep_node_index,
1119 None => {
1120 let dep_node_index =
1121 self.encoder.borrow().send(profiler, key, Fingerprint::ZERO, edges);
1122 prev_index_to_index[prev_index] = Some(dep_node_index);
1123 dep_node_index
1124 }
1125 };
1126
1127 #[cfg(debug_assertions)]
1128 self.record_edge(dep_node_index, key);
1129 (dep_node_index, Some((prev_index, DepNodeColor::Red)))
1130 }
1131 } else {
1132 if print_status {
1133 eprintln!("[task::new] {:?}", key);
0731742a 1134 }
cdc7bbd5
XL
1135
1136 let fingerprint = fingerprint.unwrap_or(Fingerprint::ZERO);
1137
1138 // This is a new node: it didn't exist in the previous compilation session.
1139 let dep_node_index = self.intern_new_node(profiler, key, edges, fingerprint);
1140
1141 (dep_node_index, None)
abe05a73
XL
1142 }
1143 }
1144
cdc7bbd5 1145 fn promote_node_and_deps_to_current(
fc512014 1146 &self,
cdc7bbd5 1147 profiler: &SelfProfilerRef,
17df50a5 1148 prev_graph: &SerializedDepGraph<K>,
fc512014
XL
1149 prev_index: SerializedDepNodeIndex,
1150 ) -> DepNodeIndex {
1151 self.debug_assert_not_in_new_nodes(prev_graph, prev_index);
ba9703b0 1152
fc512014 1153 let mut prev_index_to_index = self.prev_index_to_index.lock();
0731742a 1154
fc512014
XL
1155 match prev_index_to_index[prev_index] {
1156 Some(dep_node_index) => dep_node_index,
1157 None => {
cdc7bbd5
XL
1158 let key = prev_graph.index_to_node(prev_index);
1159 let dep_node_index = self.encoder.borrow().send(
1160 profiler,
1161 key,
1162 prev_graph.fingerprint_by_index(prev_index),
1163 prev_graph
1164 .edge_targets_from(prev_index)
1165 .iter()
1166 .map(|i| prev_index_to_index[*i].unwrap())
1167 .collect(),
1168 );
fc512014 1169 prev_index_to_index[prev_index] = Some(dep_node_index);
cdc7bbd5
XL
1170 #[cfg(debug_assertions)]
1171 self.record_edge(dep_node_index, key);
fc512014 1172 dep_node_index
ea8adc8c 1173 }
fc512014
XL
1174 }
1175 }
1176
1177 #[inline]
1178 fn debug_assert_not_in_new_nodes(
1179 &self,
17df50a5 1180 prev_graph: &SerializedDepGraph<K>,
fc512014
XL
1181 prev_index: SerializedDepNodeIndex,
1182 ) {
1183 let node = &prev_graph.index_to_node(prev_index);
1184 debug_assert!(
1185 !self.new_node_to_index.get_shard_by_value(node).lock().contains_key(node),
1186 "node from previous graph present in new node collection"
1187 );
ea8adc8c 1188 }
83c7162d
XL
1189}
1190
ba9703b0
XL
1191/// The capacity of the `reads` field `SmallVec`
1192const TASK_DEPS_READS_CAP: usize = 8;
1193type EdgesVec = SmallVec<[DepNodeIndex; TASK_DEPS_READS_CAP]>;
1194
5099ac24
FG
1195#[derive(Debug, Clone, Copy)]
1196pub enum TaskDepsRef<'a, K: DepKind> {
1197 /// New dependencies can be added to the
1198 /// `TaskDeps`. This is used when executing a 'normal' query
1199 /// (no `eval_always` modifier)
1200 Allow(&'a Lock<TaskDeps<K>>),
1201 /// New dependencies are ignored. This is used when
1202 /// executing an `eval_always` query, since there's no
1203 /// need to track dependencies for a query that's always
1204 /// re-executed. This is also used for `dep_graph.with_ignore`
1205 Ignore,
1206 /// Any attempt to add new dependencies will cause a panic.
1207 /// This is used when decoding a query result from disk,
1208 /// to ensure that the decoding process doesn't itself
1209 /// require the execution of any queries.
1210 Forbid,
1211}
1212
1213#[derive(Debug)]
1214pub struct TaskDeps<K: DepKind> {
0731742a 1215 #[cfg(debug_assertions)]
ba9703b0
XL
1216 node: Option<DepNode<K>>,
1217 reads: EdgesVec,
83c7162d 1218 read_set: FxHashSet<DepNodeIndex>,
ba9703b0
XL
1219 phantom_data: PhantomData<DepNode<K>>,
1220}
1221
5099ac24 1222impl<K: DepKind> Default for TaskDeps<K> {
ba9703b0
XL
1223 fn default() -> Self {
1224 Self {
1225 #[cfg(debug_assertions)]
1226 node: None,
1227 reads: EdgesVec::new(),
1228 read_set: FxHashSet::default(),
1229 phantom_data: PhantomData,
1230 }
1231 }
83c7162d
XL
1232}
1233
0531ce1d
XL
1234// A data structure that stores Option<DepNodeColor> values as a contiguous
1235// array, using one u32 per entry.
1236struct DepNodeColorMap {
9fa01778 1237 values: IndexVec<SerializedDepNodeIndex, AtomicU32>,
0531ce1d
XL
1238}
1239
1240const COMPRESSED_NONE: u32 = 0;
1241const COMPRESSED_RED: u32 = 1;
1242const COMPRESSED_FIRST_GREEN: u32 = 2;
1243
1244impl DepNodeColorMap {
1245 fn new(size: usize) -> DepNodeColorMap {
dfeec247 1246 DepNodeColorMap { values: (0..size).map(|_| AtomicU32::new(COMPRESSED_NONE)).collect() }
0531ce1d
XL
1247 }
1248
ba9703b0 1249 #[inline]
0531ce1d 1250 fn get(&self, index: SerializedDepNodeIndex) -> Option<DepNodeColor> {
9fa01778 1251 match self.values[index].load(Ordering::Acquire) {
0531ce1d
XL
1252 COMPRESSED_NONE => None,
1253 COMPRESSED_RED => Some(DepNodeColor::Red),
dfeec247
XL
1254 value => {
1255 Some(DepNodeColor::Green(DepNodeIndex::from_u32(value - COMPRESSED_FIRST_GREEN)))
1256 }
0531ce1d
XL
1257 }
1258 }
1259
9fa01778 1260 fn insert(&self, index: SerializedDepNodeIndex, color: DepNodeColor) {
dfeec247
XL
1261 self.values[index].store(
1262 match color {
1263 DepNodeColor::Red => COMPRESSED_RED,
1264 DepNodeColor::Green(index) => index.as_u32() + COMPRESSED_FIRST_GREEN,
1265 },
1266 Ordering::Release,
1267 )
0531ce1d
XL
1268 }
1269}