]> git.proxmox.com Git - rustc.git/blob - compiler/rustc_query_system/src/dep_graph/graph.rs
New upstream version 1.53.0+dfsg1
[rustc.git] / compiler / rustc_query_system / src / dep_graph / graph.rs
1 use rustc_data_structures::fingerprint::Fingerprint;
2 use rustc_data_structures::fx::{FxHashMap, FxHashSet};
3 use rustc_data_structures::profiling::QueryInvocationId;
4 use rustc_data_structures::profiling::SelfProfilerRef;
5 use rustc_data_structures::sharded::{self, Sharded};
6 use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
7 use rustc_data_structures::steal::Steal;
8 use rustc_data_structures::sync::{AtomicU32, AtomicU64, Lock, Lrc, Ordering};
9 use rustc_data_structures::unlikely;
10 use rustc_errors::Diagnostic;
11 use rustc_index::vec::IndexVec;
12 use rustc_serialize::opaque::{FileEncodeResult, FileEncoder};
13
14 use parking_lot::{Condvar, Mutex};
15 use smallvec::{smallvec, SmallVec};
16 use std::collections::hash_map::Entry;
17 use std::hash::Hash;
18 use std::marker::PhantomData;
19 use std::mem;
20 use std::sync::atomic::Ordering::Relaxed;
21
22 use super::prev::PreviousDepGraph;
23 use super::query::DepGraphQuery;
24 use super::serialized::{GraphEncoder, SerializedDepNodeIndex};
25 use super::{DepContext, DepKind, DepNode, HasDepContext, WorkProductId};
26 use crate::query::QueryContext;
27
28 #[cfg(debug_assertions)]
29 use {super::debug::EdgeFilter, std::env};
30
31 #[derive(Clone)]
32 pub struct DepGraph<K: DepKind> {
33 data: Option<Lrc<DepGraphData<K>>>,
34
35 /// This field is used for assigning DepNodeIndices when running in
36 /// non-incremental mode. Even in non-incremental mode we make sure that
37 /// each task has a `DepNodeIndex` that uniquely identifies it. This unique
38 /// ID is used for self-profiling.
39 virtual_dep_node_index: Lrc<AtomicU32>,
40 }
41
42 rustc_index::newtype_index! {
43 pub struct DepNodeIndex { .. }
44 }
45
46 impl DepNodeIndex {
47 pub const INVALID: DepNodeIndex = DepNodeIndex::MAX;
48 }
49
50 impl std::convert::From<DepNodeIndex> for QueryInvocationId {
51 #[inline]
52 fn from(dep_node_index: DepNodeIndex) -> Self {
53 QueryInvocationId(dep_node_index.as_u32())
54 }
55 }
56
57 #[derive(PartialEq)]
58 pub enum DepNodeColor {
59 Red,
60 Green(DepNodeIndex),
61 }
62
63 impl DepNodeColor {
64 pub fn is_green(self) -> bool {
65 match self {
66 DepNodeColor::Red => false,
67 DepNodeColor::Green(_) => true,
68 }
69 }
70 }
71
72 struct DepGraphData<K: DepKind> {
73 /// The new encoding of the dependency graph, optimized for red/green
74 /// tracking. The `current` field is the dependency graph of only the
75 /// current compilation session: We don't merge the previous dep-graph into
76 /// current one anymore, but we do reference shared data to save space.
77 current: CurrentDepGraph<K>,
78
79 /// The dep-graph from the previous compilation session. It contains all
80 /// nodes and edges as well as all fingerprints of nodes that have them.
81 previous: PreviousDepGraph<K>,
82
83 colors: DepNodeColorMap,
84
85 /// A set of loaded diagnostics that is in the progress of being emitted.
86 emitting_diagnostics: Mutex<FxHashSet<DepNodeIndex>>,
87
88 /// Used to wait for diagnostics to be emitted.
89 emitting_diagnostics_cond_var: Condvar,
90
91 /// When we load, there may be `.o` files, cached MIR, or other such
92 /// things available to us. If we find that they are not dirty, we
93 /// load the path to the file storing those work-products here into
94 /// this map. We can later look for and extract that data.
95 previous_work_products: FxHashMap<WorkProductId, WorkProduct>,
96
97 dep_node_debug: Lock<FxHashMap<DepNode<K>, String>>,
98 }
99
100 pub fn hash_result<HashCtxt, R>(hcx: &mut HashCtxt, result: &R) -> Option<Fingerprint>
101 where
102 R: HashStable<HashCtxt>,
103 {
104 let mut stable_hasher = StableHasher::new();
105 result.hash_stable(hcx, &mut stable_hasher);
106
107 Some(stable_hasher.finish())
108 }
109
110 impl<K: DepKind> DepGraph<K> {
111 pub fn new(
112 prev_graph: PreviousDepGraph<K>,
113 prev_work_products: FxHashMap<WorkProductId, WorkProduct>,
114 encoder: FileEncoder,
115 record_graph: bool,
116 record_stats: bool,
117 ) -> DepGraph<K> {
118 let prev_graph_node_count = prev_graph.node_count();
119
120 DepGraph {
121 data: Some(Lrc::new(DepGraphData {
122 previous_work_products: prev_work_products,
123 dep_node_debug: Default::default(),
124 current: CurrentDepGraph::new(
125 prev_graph_node_count,
126 encoder,
127 record_graph,
128 record_stats,
129 ),
130 emitting_diagnostics: Default::default(),
131 emitting_diagnostics_cond_var: Condvar::new(),
132 previous: prev_graph,
133 colors: DepNodeColorMap::new(prev_graph_node_count),
134 })),
135 virtual_dep_node_index: Lrc::new(AtomicU32::new(0)),
136 }
137 }
138
139 pub fn new_disabled() -> DepGraph<K> {
140 DepGraph { data: None, virtual_dep_node_index: Lrc::new(AtomicU32::new(0)) }
141 }
142
143 /// Returns `true` if we are actually building the full dep-graph, and `false` otherwise.
144 #[inline]
145 pub fn is_fully_enabled(&self) -> bool {
146 self.data.is_some()
147 }
148
149 pub fn with_query(&self, f: impl Fn(&DepGraphQuery<K>)) {
150 if let Some(data) = &self.data {
151 data.current.encoder.borrow().with_query(f)
152 }
153 }
154
155 pub fn assert_ignored(&self) {
156 if let Some(..) = self.data {
157 K::read_deps(|task_deps| {
158 assert!(task_deps.is_none(), "expected no task dependency tracking");
159 })
160 }
161 }
162
163 pub fn with_ignore<OP, R>(&self, op: OP) -> R
164 where
165 OP: FnOnce() -> R,
166 {
167 K::with_deps(None, op)
168 }
169
170 /// Starts a new dep-graph task. Dep-graph tasks are specified
171 /// using a free function (`task`) and **not** a closure -- this
172 /// is intentional because we want to exercise tight control over
173 /// what state they have access to. In particular, we want to
174 /// prevent implicit 'leaks' of tracked state into the task (which
175 /// could then be read without generating correct edges in the
176 /// dep-graph -- see the [rustc dev guide] for more details on
177 /// the dep-graph). To this end, the task function gets exactly two
178 /// pieces of state: the context `cx` and an argument `arg`. Both
179 /// of these bits of state must be of some type that implements
180 /// `DepGraphSafe` and hence does not leak.
181 ///
182 /// The choice of two arguments is not fundamental. One argument
183 /// would work just as well, since multiple values can be
184 /// collected using tuples. However, using two arguments works out
185 /// to be quite convenient, since it is common to need a context
186 /// (`cx`) and some argument (e.g., a `DefId` identifying what
187 /// item to process).
188 ///
189 /// For cases where you need some other number of arguments:
190 ///
191 /// - If you only need one argument, just use `()` for the `arg`
192 /// parameter.
193 /// - If you need 3+ arguments, use a tuple for the
194 /// `arg` parameter.
195 ///
196 /// [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/incremental-compilation.html
197 pub fn with_task<Ctxt: HasDepContext<DepKind = K>, A, R>(
198 &self,
199 key: DepNode<K>,
200 cx: Ctxt,
201 arg: A,
202 task: fn(Ctxt, A) -> R,
203 hash_result: impl FnOnce(&mut Ctxt::StableHashingContext, &R) -> Option<Fingerprint>,
204 ) -> (R, DepNodeIndex) {
205 self.with_task_impl(
206 key,
207 cx,
208 arg,
209 task,
210 |_key| {
211 Some(TaskDeps {
212 #[cfg(debug_assertions)]
213 node: Some(_key),
214 reads: SmallVec::new(),
215 read_set: Default::default(),
216 phantom_data: PhantomData,
217 })
218 },
219 hash_result,
220 )
221 }
222
223 fn with_task_impl<Ctxt: HasDepContext<DepKind = K>, A, R>(
224 &self,
225 key: DepNode<K>,
226 cx: Ctxt,
227 arg: A,
228 task: fn(Ctxt, A) -> R,
229 create_task: fn(DepNode<K>) -> Option<TaskDeps<K>>,
230 hash_result: impl FnOnce(&mut Ctxt::StableHashingContext, &R) -> Option<Fingerprint>,
231 ) -> (R, DepNodeIndex) {
232 if let Some(ref data) = self.data {
233 let dcx = cx.dep_context();
234 let task_deps = create_task(key).map(Lock::new);
235 let result = K::with_deps(task_deps.as_ref(), || task(cx, arg));
236 let edges = task_deps.map_or_else(|| smallvec![], |lock| lock.into_inner().reads);
237
238 let mut hcx = dcx.create_stable_hashing_context();
239 let current_fingerprint = hash_result(&mut hcx, &result);
240
241 let print_status = cfg!(debug_assertions) && dcx.sess().opts.debugging_opts.dep_tasks;
242
243 // Intern the new `DepNode`.
244 let (dep_node_index, prev_and_color) = data.current.intern_node(
245 dcx.profiler(),
246 &data.previous,
247 key,
248 edges,
249 current_fingerprint,
250 print_status,
251 );
252
253 if let Some((prev_index, color)) = prev_and_color {
254 debug_assert!(
255 data.colors.get(prev_index).is_none(),
256 "DepGraph::with_task() - Duplicate DepNodeColor \
257 insertion for {:?}",
258 key
259 );
260
261 data.colors.insert(prev_index, color);
262 }
263
264 (result, dep_node_index)
265 } else {
266 // Incremental compilation is turned off. We just execute the task
267 // without tracking. We still provide a dep-node index that uniquely
268 // identifies the task so that we have a cheap way of referring to
269 // the query for self-profiling.
270 (task(cx, arg), self.next_virtual_depnode_index())
271 }
272 }
273
274 /// Executes something within an "anonymous" task, that is, a task the
275 /// `DepNode` of which is determined by the list of inputs it read from.
276 pub fn with_anon_task<Ctxt: DepContext<DepKind = K>, OP, R>(
277 &self,
278 cx: Ctxt,
279 dep_kind: K,
280 op: OP,
281 ) -> (R, DepNodeIndex)
282 where
283 OP: FnOnce() -> R,
284 {
285 debug_assert!(!dep_kind.is_eval_always());
286
287 if let Some(ref data) = self.data {
288 let task_deps = Lock::new(TaskDeps::default());
289 let result = K::with_deps(Some(&task_deps), op);
290 let task_deps = task_deps.into_inner();
291
292 // The dep node indices are hashed here instead of hashing the dep nodes of the
293 // dependencies. These indices may refer to different nodes per session, but this isn't
294 // a problem here because we that ensure the final dep node hash is per session only by
295 // combining it with the per session random number `anon_id_seed`. This hash only need
296 // to map the dependencies to a single value on a per session basis.
297 let mut hasher = StableHasher::new();
298 task_deps.reads.hash(&mut hasher);
299
300 let target_dep_node = DepNode {
301 kind: dep_kind,
302 // Fingerprint::combine() is faster than sending Fingerprint
303 // through the StableHasher (at least as long as StableHasher
304 // is so slow).
305 hash: data.current.anon_id_seed.combine(hasher.finish()).into(),
306 };
307
308 let dep_node_index = data.current.intern_new_node(
309 cx.profiler(),
310 target_dep_node,
311 task_deps.reads,
312 Fingerprint::ZERO,
313 );
314
315 (result, dep_node_index)
316 } else {
317 (op(), self.next_virtual_depnode_index())
318 }
319 }
320
321 /// Executes something within an "eval-always" task which is a task
322 /// that runs whenever anything changes.
323 pub fn with_eval_always_task<Ctxt: HasDepContext<DepKind = K>, A, R>(
324 &self,
325 key: DepNode<K>,
326 cx: Ctxt,
327 arg: A,
328 task: fn(Ctxt, A) -> R,
329 hash_result: impl FnOnce(&mut Ctxt::StableHashingContext, &R) -> Option<Fingerprint>,
330 ) -> (R, DepNodeIndex) {
331 self.with_task_impl(key, cx, arg, task, |_| None, hash_result)
332 }
333
334 #[inline]
335 pub fn read_index(&self, dep_node_index: DepNodeIndex) {
336 if let Some(ref data) = self.data {
337 K::read_deps(|task_deps| {
338 if let Some(task_deps) = task_deps {
339 let mut task_deps = task_deps.lock();
340 let task_deps = &mut *task_deps;
341 if cfg!(debug_assertions) {
342 data.current.total_read_count.fetch_add(1, Relaxed);
343 }
344
345 // As long as we only have a low number of reads we can avoid doing a hash
346 // insert and potentially allocating/reallocating the hashmap
347 let new_read = if task_deps.reads.len() < TASK_DEPS_READS_CAP {
348 task_deps.reads.iter().all(|other| *other != dep_node_index)
349 } else {
350 task_deps.read_set.insert(dep_node_index)
351 };
352 if new_read {
353 task_deps.reads.push(dep_node_index);
354 if task_deps.reads.len() == TASK_DEPS_READS_CAP {
355 // Fill `read_set` with what we have so far so we can use the hashset
356 // next time
357 task_deps.read_set.extend(task_deps.reads.iter().copied());
358 }
359
360 #[cfg(debug_assertions)]
361 {
362 if let Some(target) = task_deps.node {
363 if let Some(ref forbidden_edge) = data.current.forbidden_edge {
364 let src = forbidden_edge.index_to_node.lock()[&dep_node_index];
365 if forbidden_edge.test(&src, &target) {
366 panic!("forbidden edge {:?} -> {:?} created", src, target)
367 }
368 }
369 }
370 }
371 } else if cfg!(debug_assertions) {
372 data.current.total_duplicate_read_count.fetch_add(1, Relaxed);
373 }
374 }
375 })
376 }
377 }
378
379 #[inline]
380 pub fn dep_node_index_of(&self, dep_node: &DepNode<K>) -> DepNodeIndex {
381 self.dep_node_index_of_opt(dep_node).unwrap()
382 }
383
384 #[inline]
385 pub fn dep_node_index_of_opt(&self, dep_node: &DepNode<K>) -> Option<DepNodeIndex> {
386 let data = self.data.as_ref().unwrap();
387 let current = &data.current;
388
389 if let Some(prev_index) = data.previous.node_to_index_opt(dep_node) {
390 current.prev_index_to_index.lock()[prev_index]
391 } else {
392 current.new_node_to_index.get_shard_by_value(dep_node).lock().get(dep_node).copied()
393 }
394 }
395
396 #[inline]
397 pub fn dep_node_exists(&self, dep_node: &DepNode<K>) -> bool {
398 self.data.is_some() && self.dep_node_index_of_opt(dep_node).is_some()
399 }
400
401 pub fn prev_fingerprint_of(&self, dep_node: &DepNode<K>) -> Option<Fingerprint> {
402 self.data.as_ref().unwrap().previous.fingerprint_of(dep_node)
403 }
404
405 /// Checks whether a previous work product exists for `v` and, if
406 /// so, return the path that leads to it. Used to skip doing work.
407 pub fn previous_work_product(&self, v: &WorkProductId) -> Option<WorkProduct> {
408 self.data.as_ref().and_then(|data| data.previous_work_products.get(v).cloned())
409 }
410
411 /// Access the map of work-products created during the cached run. Only
412 /// used during saving of the dep-graph.
413 pub fn previous_work_products(&self) -> &FxHashMap<WorkProductId, WorkProduct> {
414 &self.data.as_ref().unwrap().previous_work_products
415 }
416
417 #[inline(always)]
418 pub fn register_dep_node_debug_str<F>(&self, dep_node: DepNode<K>, debug_str_gen: F)
419 where
420 F: FnOnce() -> String,
421 {
422 let dep_node_debug = &self.data.as_ref().unwrap().dep_node_debug;
423
424 if dep_node_debug.borrow().contains_key(&dep_node) {
425 return;
426 }
427 let debug_str = debug_str_gen();
428 dep_node_debug.borrow_mut().insert(dep_node, debug_str);
429 }
430
431 pub fn dep_node_debug_str(&self, dep_node: DepNode<K>) -> Option<String> {
432 self.data.as_ref()?.dep_node_debug.borrow().get(&dep_node).cloned()
433 }
434
435 fn node_color(&self, dep_node: &DepNode<K>) -> Option<DepNodeColor> {
436 if let Some(ref data) = self.data {
437 if let Some(prev_index) = data.previous.node_to_index_opt(dep_node) {
438 return data.colors.get(prev_index);
439 } else {
440 // This is a node that did not exist in the previous compilation session.
441 return None;
442 }
443 }
444
445 None
446 }
447
448 /// Try to read a node index for the node dep_node.
449 /// A node will have an index, when it's already been marked green, or when we can mark it
450 /// green. This function will mark the current task as a reader of the specified node, when
451 /// a node index can be found for that node.
452 pub fn try_mark_green_and_read<Ctxt: QueryContext<DepKind = K>>(
453 &self,
454 tcx: Ctxt,
455 dep_node: &DepNode<K>,
456 ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
457 self.try_mark_green(tcx, dep_node).map(|(prev_index, dep_node_index)| {
458 debug_assert!(self.is_green(&dep_node));
459 self.read_index(dep_node_index);
460 (prev_index, dep_node_index)
461 })
462 }
463
464 pub fn try_mark_green<Ctxt: QueryContext<DepKind = K>>(
465 &self,
466 tcx: Ctxt,
467 dep_node: &DepNode<K>,
468 ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
469 debug_assert!(!dep_node.kind.is_eval_always());
470
471 // Return None if the dep graph is disabled
472 let data = self.data.as_ref()?;
473
474 // Return None if the dep node didn't exist in the previous session
475 let prev_index = data.previous.node_to_index_opt(dep_node)?;
476
477 match data.colors.get(prev_index) {
478 Some(DepNodeColor::Green(dep_node_index)) => Some((prev_index, dep_node_index)),
479 Some(DepNodeColor::Red) => None,
480 None => {
481 // This DepNode and the corresponding query invocation existed
482 // in the previous compilation session too, so we can try to
483 // mark it as green by recursively marking all of its
484 // dependencies green.
485 self.try_mark_previous_green(tcx, data, prev_index, &dep_node)
486 .map(|dep_node_index| (prev_index, dep_node_index))
487 }
488 }
489 }
490
491 /// Try to mark a dep-node which existed in the previous compilation session as green.
492 fn try_mark_previous_green<Ctxt: QueryContext<DepKind = K>>(
493 &self,
494 tcx: Ctxt,
495 data: &DepGraphData<K>,
496 prev_dep_node_index: SerializedDepNodeIndex,
497 dep_node: &DepNode<K>,
498 ) -> Option<DepNodeIndex> {
499 debug!("try_mark_previous_green({:?}) - BEGIN", dep_node);
500
501 #[cfg(not(parallel_compiler))]
502 {
503 debug_assert!(!self.dep_node_exists(dep_node));
504 debug_assert!(data.colors.get(prev_dep_node_index).is_none());
505 }
506
507 // We never try to mark eval_always nodes as green
508 debug_assert!(!dep_node.kind.is_eval_always());
509
510 debug_assert_eq!(data.previous.index_to_node(prev_dep_node_index), *dep_node);
511
512 let prev_deps = data.previous.edge_targets_from(prev_dep_node_index);
513
514 for &dep_dep_node_index in prev_deps {
515 let dep_dep_node_color = data.colors.get(dep_dep_node_index);
516
517 match dep_dep_node_color {
518 Some(DepNodeColor::Green(_)) => {
519 // This dependency has been marked as green before, we are
520 // still fine and can continue with checking the other
521 // dependencies.
522 debug!(
523 "try_mark_previous_green({:?}) --- found dependency {:?} to \
524 be immediately green",
525 dep_node,
526 data.previous.index_to_node(dep_dep_node_index)
527 );
528 }
529 Some(DepNodeColor::Red) => {
530 // We found a dependency the value of which has changed
531 // compared to the previous compilation session. We cannot
532 // mark the DepNode as green and also don't need to bother
533 // with checking any of the other dependencies.
534 debug!(
535 "try_mark_previous_green({:?}) - END - dependency {:?} was \
536 immediately red",
537 dep_node,
538 data.previous.index_to_node(dep_dep_node_index)
539 );
540 return None;
541 }
542 None => {
543 let dep_dep_node = &data.previous.index_to_node(dep_dep_node_index);
544
545 // We don't know the state of this dependency. If it isn't
546 // an eval_always node, let's try to mark it green recursively.
547 if !dep_dep_node.kind.is_eval_always() {
548 debug!(
549 "try_mark_previous_green({:?}) --- state of dependency {:?} ({}) \
550 is unknown, trying to mark it green",
551 dep_node, dep_dep_node, dep_dep_node.hash,
552 );
553
554 let node_index = self.try_mark_previous_green(
555 tcx,
556 data,
557 dep_dep_node_index,
558 dep_dep_node,
559 );
560 if node_index.is_some() {
561 debug!(
562 "try_mark_previous_green({:?}) --- managed to MARK \
563 dependency {:?} as green",
564 dep_node, dep_dep_node
565 );
566 continue;
567 }
568 }
569
570 // We failed to mark it green, so we try to force the query.
571 debug!(
572 "try_mark_previous_green({:?}) --- trying to force \
573 dependency {:?}",
574 dep_node, dep_dep_node
575 );
576 if tcx.try_force_from_dep_node(dep_dep_node) {
577 let dep_dep_node_color = data.colors.get(dep_dep_node_index);
578
579 match dep_dep_node_color {
580 Some(DepNodeColor::Green(_)) => {
581 debug!(
582 "try_mark_previous_green({:?}) --- managed to \
583 FORCE dependency {:?} to green",
584 dep_node, dep_dep_node
585 );
586 }
587 Some(DepNodeColor::Red) => {
588 debug!(
589 "try_mark_previous_green({:?}) - END - \
590 dependency {:?} was red after forcing",
591 dep_node, dep_dep_node
592 );
593 return None;
594 }
595 None => {
596 if !tcx.dep_context().sess().has_errors_or_delayed_span_bugs() {
597 panic!(
598 "try_mark_previous_green() - Forcing the DepNode \
599 should have set its color"
600 )
601 } else {
602 // If the query we just forced has resulted in
603 // some kind of compilation error, we cannot rely on
604 // the dep-node color having been properly updated.
605 // This means that the query system has reached an
606 // invalid state. We let the compiler continue (by
607 // returning `None`) so it can emit error messages
608 // and wind down, but rely on the fact that this
609 // invalid state will not be persisted to the
610 // incremental compilation cache because of
611 // compilation errors being present.
612 debug!(
613 "try_mark_previous_green({:?}) - END - \
614 dependency {:?} resulted in compilation error",
615 dep_node, dep_dep_node
616 );
617 return None;
618 }
619 }
620 }
621 } else {
622 // The DepNode could not be forced.
623 debug!(
624 "try_mark_previous_green({:?}) - END - dependency {:?} \
625 could not be forced",
626 dep_node, dep_dep_node
627 );
628 return None;
629 }
630 }
631 }
632 }
633
634 // If we got here without hitting a `return` that means that all
635 // dependencies of this DepNode could be marked as green. Therefore we
636 // can also mark this DepNode as green.
637
638 // There may be multiple threads trying to mark the same dep node green concurrently
639
640 // We allocating an entry for the node in the current dependency graph and
641 // adding all the appropriate edges imported from the previous graph
642 let dep_node_index = data.current.promote_node_and_deps_to_current(
643 tcx.dep_context().profiler(),
644 &data.previous,
645 prev_dep_node_index,
646 );
647
648 // ... emitting any stored diagnostic ...
649
650 // FIXME: Store the fact that a node has diagnostics in a bit in the dep graph somewhere
651 // Maybe store a list on disk and encode this fact in the DepNodeState
652 let diagnostics = tcx.load_diagnostics(prev_dep_node_index);
653
654 #[cfg(not(parallel_compiler))]
655 debug_assert!(
656 data.colors.get(prev_dep_node_index).is_none(),
657 "DepGraph::try_mark_previous_green() - Duplicate DepNodeColor \
658 insertion for {:?}",
659 dep_node
660 );
661
662 if unlikely!(!diagnostics.is_empty()) {
663 self.emit_diagnostics(tcx, data, dep_node_index, prev_dep_node_index, diagnostics);
664 }
665
666 // ... and finally storing a "Green" entry in the color map.
667 // Multiple threads can all write the same color here
668 data.colors.insert(prev_dep_node_index, DepNodeColor::Green(dep_node_index));
669
670 debug!("try_mark_previous_green({:?}) - END - successfully marked as green", dep_node);
671 Some(dep_node_index)
672 }
673
674 /// Atomically emits some loaded diagnostics.
675 /// This may be called concurrently on multiple threads for the same dep node.
676 #[cold]
677 #[inline(never)]
678 fn emit_diagnostics<Ctxt: QueryContext<DepKind = K>>(
679 &self,
680 tcx: Ctxt,
681 data: &DepGraphData<K>,
682 dep_node_index: DepNodeIndex,
683 prev_dep_node_index: SerializedDepNodeIndex,
684 diagnostics: Vec<Diagnostic>,
685 ) {
686 let mut emitting = data.emitting_diagnostics.lock();
687
688 if data.colors.get(prev_dep_node_index) == Some(DepNodeColor::Green(dep_node_index)) {
689 // The node is already green so diagnostics must have been emitted already
690 return;
691 }
692
693 if emitting.insert(dep_node_index) {
694 // We were the first to insert the node in the set so this thread
695 // must emit the diagnostics and signal other potentially waiting
696 // threads after.
697 mem::drop(emitting);
698
699 // Promote the previous diagnostics to the current session.
700 tcx.store_diagnostics(dep_node_index, diagnostics.clone().into());
701
702 let handle = tcx.dep_context().sess().diagnostic();
703
704 for diagnostic in diagnostics {
705 handle.emit_diagnostic(&diagnostic);
706 }
707
708 // Mark the node as green now that diagnostics are emitted
709 data.colors.insert(prev_dep_node_index, DepNodeColor::Green(dep_node_index));
710
711 // Remove the node from the set
712 data.emitting_diagnostics.lock().remove(&dep_node_index);
713
714 // Wake up waiters
715 data.emitting_diagnostics_cond_var.notify_all();
716 } else {
717 // We must wait for the other thread to finish emitting the diagnostic
718
719 loop {
720 data.emitting_diagnostics_cond_var.wait(&mut emitting);
721 if data.colors.get(prev_dep_node_index) == Some(DepNodeColor::Green(dep_node_index))
722 {
723 break;
724 }
725 }
726 }
727 }
728
729 // Returns true if the given node has been marked as red during the
730 // current compilation session. Used in various assertions
731 pub fn is_red(&self, dep_node: &DepNode<K>) -> bool {
732 self.node_color(dep_node) == Some(DepNodeColor::Red)
733 }
734
735 // Returns true if the given node has been marked as green during the
736 // current compilation session. Used in various assertions
737 pub fn is_green(&self, dep_node: &DepNode<K>) -> bool {
738 self.node_color(dep_node).map_or(false, |c| c.is_green())
739 }
740
741 // This method loads all on-disk cacheable query results into memory, so
742 // they can be written out to the new cache file again. Most query results
743 // will already be in memory but in the case where we marked something as
744 // green but then did not need the value, that value will never have been
745 // loaded from disk.
746 //
747 // This method will only load queries that will end up in the disk cache.
748 // Other queries will not be executed.
749 pub fn exec_cache_promotions<Ctxt: QueryContext<DepKind = K>>(&self, qcx: Ctxt) {
750 let tcx = qcx.dep_context();
751 let _prof_timer = tcx.profiler().generic_activity("incr_comp_query_cache_promotion");
752
753 let data = self.data.as_ref().unwrap();
754 for prev_index in data.colors.values.indices() {
755 match data.colors.get(prev_index) {
756 Some(DepNodeColor::Green(_)) => {
757 let dep_node = data.previous.index_to_node(prev_index);
758 qcx.try_load_from_on_disk_cache(&dep_node);
759 }
760 None | Some(DepNodeColor::Red) => {
761 // We can skip red nodes because a node can only be marked
762 // as red if the query result was recomputed and thus is
763 // already in memory.
764 }
765 }
766 }
767 }
768
769 // Register reused dep nodes (i.e. nodes we've marked red or green) with the context.
770 pub fn register_reused_dep_nodes<Ctxt: DepContext<DepKind = K>>(&self, tcx: Ctxt) {
771 let data = self.data.as_ref().unwrap();
772 for prev_index in data.colors.values.indices() {
773 match data.colors.get(prev_index) {
774 Some(DepNodeColor::Red) | Some(DepNodeColor::Green(_)) => {
775 let dep_node = data.previous.index_to_node(prev_index);
776 tcx.register_reused_dep_node(&dep_node);
777 }
778 None => {}
779 }
780 }
781 }
782
783 pub fn print_incremental_info(&self) {
784 if let Some(data) = &self.data {
785 data.current.encoder.borrow().print_incremental_info(
786 data.current.total_read_count.load(Relaxed),
787 data.current.total_duplicate_read_count.load(Relaxed),
788 )
789 }
790 }
791
792 pub fn encode(&self, profiler: &SelfProfilerRef) -> FileEncodeResult {
793 if let Some(data) = &self.data {
794 data.current.encoder.steal().finish(profiler)
795 } else {
796 Ok(())
797 }
798 }
799
800 fn next_virtual_depnode_index(&self) -> DepNodeIndex {
801 let index = self.virtual_dep_node_index.fetch_add(1, Relaxed);
802 DepNodeIndex::from_u32(index)
803 }
804 }
805
806 /// A "work product" is an intermediate result that we save into the
807 /// incremental directory for later re-use. The primary example are
808 /// the object files that we save for each partition at code
809 /// generation time.
810 ///
811 /// Each work product is associated with a dep-node, representing the
812 /// process that produced the work-product. If that dep-node is found
813 /// to be dirty when we load up, then we will delete the work-product
814 /// at load time. If the work-product is found to be clean, then we
815 /// will keep a record in the `previous_work_products` list.
816 ///
817 /// In addition, work products have an associated hash. This hash is
818 /// an extra hash that can be used to decide if the work-product from
819 /// a previous compilation can be re-used (in addition to the dirty
820 /// edges check).
821 ///
822 /// As the primary example, consider the object files we generate for
823 /// each partition. In the first run, we create partitions based on
824 /// the symbols that need to be compiled. For each partition P, we
825 /// hash the symbols in P and create a `WorkProduct` record associated
826 /// with `DepNode::CodegenUnit(P)`; the hash is the set of symbols
827 /// in P.
828 ///
829 /// The next time we compile, if the `DepNode::CodegenUnit(P)` is
830 /// judged to be clean (which means none of the things we read to
831 /// generate the partition were found to be dirty), it will be loaded
832 /// into previous work products. We will then regenerate the set of
833 /// symbols in the partition P and hash them (note that new symbols
834 /// may be added -- for example, new monomorphizations -- even if
835 /// nothing in P changed!). We will compare that hash against the
836 /// previous hash. If it matches up, we can reuse the object file.
837 #[derive(Clone, Debug, Encodable, Decodable)]
838 pub struct WorkProduct {
839 pub cgu_name: String,
840 /// Saved file associated with this CGU.
841 pub saved_file: Option<String>,
842 }
843
844 // Index type for `DepNodeData`'s edges.
845 rustc_index::newtype_index! {
846 struct EdgeIndex { .. }
847 }
848
849 /// `CurrentDepGraph` stores the dependency graph for the current session. It
850 /// will be populated as we run queries or tasks. We never remove nodes from the
851 /// graph: they are only added.
852 ///
853 /// The nodes in it are identified by a `DepNodeIndex`. We avoid keeping the nodes
854 /// in memory. This is important, because these graph structures are some of the
855 /// largest in the compiler.
856 ///
857 /// For this reason, we avoid storing `DepNode`s more than once as map
858 /// keys. The `new_node_to_index` map only contains nodes not in the previous
859 /// graph, and we map nodes in the previous graph to indices via a two-step
860 /// mapping. `PreviousDepGraph` maps from `DepNode` to `SerializedDepNodeIndex`,
861 /// and the `prev_index_to_index` vector (which is more compact and faster than
862 /// using a map) maps from `SerializedDepNodeIndex` to `DepNodeIndex`.
863 ///
864 /// This struct uses three locks internally. The `data`, `new_node_to_index`,
865 /// and `prev_index_to_index` fields are locked separately. Operations that take
866 /// a `DepNodeIndex` typically just access the `data` field.
867 ///
868 /// We only need to manipulate at most two locks simultaneously:
869 /// `new_node_to_index` and `data`, or `prev_index_to_index` and `data`. When
870 /// manipulating both, we acquire `new_node_to_index` or `prev_index_to_index`
871 /// first, and `data` second.
872 pub(super) struct CurrentDepGraph<K: DepKind> {
873 encoder: Steal<GraphEncoder<K>>,
874 new_node_to_index: Sharded<FxHashMap<DepNode<K>, DepNodeIndex>>,
875 prev_index_to_index: Lock<IndexVec<SerializedDepNodeIndex, Option<DepNodeIndex>>>,
876
877 /// Used to trap when a specific edge is added to the graph.
878 /// This is used for debug purposes and is only active with `debug_assertions`.
879 #[cfg(debug_assertions)]
880 forbidden_edge: Option<EdgeFilter<K>>,
881
882 /// Anonymous `DepNode`s are nodes whose IDs we compute from the list of
883 /// their edges. This has the beneficial side-effect that multiple anonymous
884 /// nodes can be coalesced into one without changing the semantics of the
885 /// dependency graph. However, the merging of nodes can lead to a subtle
886 /// problem during red-green marking: The color of an anonymous node from
887 /// the current session might "shadow" the color of the node with the same
888 /// ID from the previous session. In order to side-step this problem, we make
889 /// sure that anonymous `NodeId`s allocated in different sessions don't overlap.
890 /// This is implemented by mixing a session-key into the ID fingerprint of
891 /// each anon node. The session-key is just a random number generated when
892 /// the `DepGraph` is created.
893 anon_id_seed: Fingerprint,
894
895 /// These are simple counters that are for profiling and
896 /// debugging and only active with `debug_assertions`.
897 total_read_count: AtomicU64,
898 total_duplicate_read_count: AtomicU64,
899 }
900
901 impl<K: DepKind> CurrentDepGraph<K> {
902 fn new(
903 prev_graph_node_count: usize,
904 encoder: FileEncoder,
905 record_graph: bool,
906 record_stats: bool,
907 ) -> CurrentDepGraph<K> {
908 use std::time::{SystemTime, UNIX_EPOCH};
909
910 let duration = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
911 let nanos = duration.as_secs() * 1_000_000_000 + duration.subsec_nanos() as u64;
912 let mut stable_hasher = StableHasher::new();
913 nanos.hash(&mut stable_hasher);
914
915 #[cfg(debug_assertions)]
916 let forbidden_edge = match env::var("RUST_FORBID_DEP_GRAPH_EDGE") {
917 Ok(s) => match EdgeFilter::new(&s) {
918 Ok(f) => Some(f),
919 Err(err) => panic!("RUST_FORBID_DEP_GRAPH_EDGE invalid: {}", err),
920 },
921 Err(_) => None,
922 };
923
924 // We store a large collection of these in `prev_index_to_index` during
925 // non-full incremental builds, and want to ensure that the element size
926 // doesn't inadvertently increase.
927 static_assert_size!(Option<DepNodeIndex>, 4);
928
929 let new_node_count_estimate = 102 * prev_graph_node_count / 100 + 200;
930
931 CurrentDepGraph {
932 encoder: Steal::new(GraphEncoder::new(
933 encoder,
934 prev_graph_node_count,
935 record_graph,
936 record_stats,
937 )),
938 new_node_to_index: Sharded::new(|| {
939 FxHashMap::with_capacity_and_hasher(
940 new_node_count_estimate / sharded::SHARDS,
941 Default::default(),
942 )
943 }),
944 prev_index_to_index: Lock::new(IndexVec::from_elem_n(None, prev_graph_node_count)),
945 anon_id_seed: stable_hasher.finish(),
946 #[cfg(debug_assertions)]
947 forbidden_edge,
948 total_read_count: AtomicU64::new(0),
949 total_duplicate_read_count: AtomicU64::new(0),
950 }
951 }
952
953 #[cfg(debug_assertions)]
954 fn record_edge(&self, dep_node_index: DepNodeIndex, key: DepNode<K>) {
955 if let Some(forbidden_edge) = &self.forbidden_edge {
956 forbidden_edge.index_to_node.lock().insert(dep_node_index, key);
957 }
958 }
959
960 /// Writes the node to the current dep-graph and allocates a `DepNodeIndex` for it.
961 /// Assumes that this is a node that has no equivalent in the previous dep-graph.
962 fn intern_new_node(
963 &self,
964 profiler: &SelfProfilerRef,
965 key: DepNode<K>,
966 edges: EdgesVec,
967 current_fingerprint: Fingerprint,
968 ) -> DepNodeIndex {
969 match self.new_node_to_index.get_shard_by_value(&key).lock().entry(key) {
970 Entry::Occupied(entry) => *entry.get(),
971 Entry::Vacant(entry) => {
972 let dep_node_index =
973 self.encoder.borrow().send(profiler, key, current_fingerprint, edges);
974 entry.insert(dep_node_index);
975 #[cfg(debug_assertions)]
976 self.record_edge(dep_node_index, key);
977 dep_node_index
978 }
979 }
980 }
981
982 fn intern_node(
983 &self,
984 profiler: &SelfProfilerRef,
985 prev_graph: &PreviousDepGraph<K>,
986 key: DepNode<K>,
987 edges: EdgesVec,
988 fingerprint: Option<Fingerprint>,
989 print_status: bool,
990 ) -> (DepNodeIndex, Option<(SerializedDepNodeIndex, DepNodeColor)>) {
991 let print_status = cfg!(debug_assertions) && print_status;
992
993 if let Some(prev_index) = prev_graph.node_to_index_opt(&key) {
994 // Determine the color and index of the new `DepNode`.
995 if let Some(fingerprint) = fingerprint {
996 if fingerprint == prev_graph.fingerprint_by_index(prev_index) {
997 if print_status {
998 eprintln!("[task::green] {:?}", key);
999 }
1000
1001 // This is a green node: it existed in the previous compilation,
1002 // its query was re-executed, and it has the same result as before.
1003 let mut prev_index_to_index = self.prev_index_to_index.lock();
1004
1005 let dep_node_index = match prev_index_to_index[prev_index] {
1006 Some(dep_node_index) => dep_node_index,
1007 None => {
1008 let dep_node_index =
1009 self.encoder.borrow().send(profiler, key, fingerprint, edges);
1010 prev_index_to_index[prev_index] = Some(dep_node_index);
1011 dep_node_index
1012 }
1013 };
1014
1015 #[cfg(debug_assertions)]
1016 self.record_edge(dep_node_index, key);
1017 (dep_node_index, Some((prev_index, DepNodeColor::Green(dep_node_index))))
1018 } else {
1019 if print_status {
1020 eprintln!("[task::red] {:?}", key);
1021 }
1022
1023 // This is a red node: it existed in the previous compilation, its query
1024 // was re-executed, but it has a different result from before.
1025 let mut prev_index_to_index = self.prev_index_to_index.lock();
1026
1027 let dep_node_index = match prev_index_to_index[prev_index] {
1028 Some(dep_node_index) => dep_node_index,
1029 None => {
1030 let dep_node_index =
1031 self.encoder.borrow().send(profiler, key, fingerprint, edges);
1032 prev_index_to_index[prev_index] = Some(dep_node_index);
1033 dep_node_index
1034 }
1035 };
1036
1037 #[cfg(debug_assertions)]
1038 self.record_edge(dep_node_index, key);
1039 (dep_node_index, Some((prev_index, DepNodeColor::Red)))
1040 }
1041 } else {
1042 if print_status {
1043 eprintln!("[task::unknown] {:?}", key);
1044 }
1045
1046 // This is a red node, effectively: it existed in the previous compilation
1047 // session, its query was re-executed, but it doesn't compute a result hash
1048 // (i.e. it represents a `no_hash` query), so we have no way of determining
1049 // whether or not the result was the same as before.
1050 let mut prev_index_to_index = self.prev_index_to_index.lock();
1051
1052 let dep_node_index = match prev_index_to_index[prev_index] {
1053 Some(dep_node_index) => dep_node_index,
1054 None => {
1055 let dep_node_index =
1056 self.encoder.borrow().send(profiler, key, Fingerprint::ZERO, edges);
1057 prev_index_to_index[prev_index] = Some(dep_node_index);
1058 dep_node_index
1059 }
1060 };
1061
1062 #[cfg(debug_assertions)]
1063 self.record_edge(dep_node_index, key);
1064 (dep_node_index, Some((prev_index, DepNodeColor::Red)))
1065 }
1066 } else {
1067 if print_status {
1068 eprintln!("[task::new] {:?}", key);
1069 }
1070
1071 let fingerprint = fingerprint.unwrap_or(Fingerprint::ZERO);
1072
1073 // This is a new node: it didn't exist in the previous compilation session.
1074 let dep_node_index = self.intern_new_node(profiler, key, edges, fingerprint);
1075
1076 (dep_node_index, None)
1077 }
1078 }
1079
1080 fn promote_node_and_deps_to_current(
1081 &self,
1082 profiler: &SelfProfilerRef,
1083 prev_graph: &PreviousDepGraph<K>,
1084 prev_index: SerializedDepNodeIndex,
1085 ) -> DepNodeIndex {
1086 self.debug_assert_not_in_new_nodes(prev_graph, prev_index);
1087
1088 let mut prev_index_to_index = self.prev_index_to_index.lock();
1089
1090 match prev_index_to_index[prev_index] {
1091 Some(dep_node_index) => dep_node_index,
1092 None => {
1093 let key = prev_graph.index_to_node(prev_index);
1094 let dep_node_index = self.encoder.borrow().send(
1095 profiler,
1096 key,
1097 prev_graph.fingerprint_by_index(prev_index),
1098 prev_graph
1099 .edge_targets_from(prev_index)
1100 .iter()
1101 .map(|i| prev_index_to_index[*i].unwrap())
1102 .collect(),
1103 );
1104 prev_index_to_index[prev_index] = Some(dep_node_index);
1105 #[cfg(debug_assertions)]
1106 self.record_edge(dep_node_index, key);
1107 dep_node_index
1108 }
1109 }
1110 }
1111
1112 #[inline]
1113 fn debug_assert_not_in_new_nodes(
1114 &self,
1115 prev_graph: &PreviousDepGraph<K>,
1116 prev_index: SerializedDepNodeIndex,
1117 ) {
1118 let node = &prev_graph.index_to_node(prev_index);
1119 debug_assert!(
1120 !self.new_node_to_index.get_shard_by_value(node).lock().contains_key(node),
1121 "node from previous graph present in new node collection"
1122 );
1123 }
1124 }
1125
1126 /// The capacity of the `reads` field `SmallVec`
1127 const TASK_DEPS_READS_CAP: usize = 8;
1128 type EdgesVec = SmallVec<[DepNodeIndex; TASK_DEPS_READS_CAP]>;
1129
1130 pub struct TaskDeps<K> {
1131 #[cfg(debug_assertions)]
1132 node: Option<DepNode<K>>,
1133 reads: EdgesVec,
1134 read_set: FxHashSet<DepNodeIndex>,
1135 phantom_data: PhantomData<DepNode<K>>,
1136 }
1137
1138 impl<K> Default for TaskDeps<K> {
1139 fn default() -> Self {
1140 Self {
1141 #[cfg(debug_assertions)]
1142 node: None,
1143 reads: EdgesVec::new(),
1144 read_set: FxHashSet::default(),
1145 phantom_data: PhantomData,
1146 }
1147 }
1148 }
1149
1150 // A data structure that stores Option<DepNodeColor> values as a contiguous
1151 // array, using one u32 per entry.
1152 struct DepNodeColorMap {
1153 values: IndexVec<SerializedDepNodeIndex, AtomicU32>,
1154 }
1155
1156 const COMPRESSED_NONE: u32 = 0;
1157 const COMPRESSED_RED: u32 = 1;
1158 const COMPRESSED_FIRST_GREEN: u32 = 2;
1159
1160 impl DepNodeColorMap {
1161 fn new(size: usize) -> DepNodeColorMap {
1162 DepNodeColorMap { values: (0..size).map(|_| AtomicU32::new(COMPRESSED_NONE)).collect() }
1163 }
1164
1165 #[inline]
1166 fn get(&self, index: SerializedDepNodeIndex) -> Option<DepNodeColor> {
1167 match self.values[index].load(Ordering::Acquire) {
1168 COMPRESSED_NONE => None,
1169 COMPRESSED_RED => Some(DepNodeColor::Red),
1170 value => {
1171 Some(DepNodeColor::Green(DepNodeIndex::from_u32(value - COMPRESSED_FIRST_GREEN)))
1172 }
1173 }
1174 }
1175
1176 fn insert(&self, index: SerializedDepNodeIndex, color: DepNodeColor) {
1177 self.values[index].store(
1178 match color {
1179 DepNodeColor::Red => COMPRESSED_RED,
1180 DepNodeColor::Green(index) => index.as_u32() + COMPRESSED_FIRST_GREEN,
1181 },
1182 Ordering::Release,
1183 )
1184 }
1185 }