]> git.proxmox.com Git - rustc.git/blame - compiler/rustc_query_system/src/dep_graph/graph.rs
New upstream version 1.52.0~beta.3+dfsg1
[rustc.git] / compiler / rustc_query_system / src / dep_graph / graph.rs
CommitLineData
ba9703b0 1use rustc_data_structures::fingerprint::Fingerprint;
ea8adc8c 2use rustc_data_structures::fx::{FxHashMap, FxHashSet};
dfeec247
XL
3use rustc_data_structures::profiling::QueryInvocationId;
4use rustc_data_structures::sharded::{self, Sharded};
5use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
fc512014 6use rustc_data_structures::sync::{AtomicU32, AtomicU64, Lock, LockGuard, Lrc, Ordering};
ba9703b0 7use rustc_data_structures::unlikely;
dfeec247 8use rustc_errors::Diagnostic;
e74abb32 9use rustc_index::vec::{Idx, IndexVec};
5869c6ff 10use rustc_serialize::{Encodable, Encoder};
ba9703b0
XL
11
12use parking_lot::{Condvar, Mutex};
13use smallvec::{smallvec, SmallVec};
dfeec247 14use std::collections::hash_map::Entry;
ea8adc8c
XL
15use std::env;
16use std::hash::Hash;
ba9703b0 17use std::marker::PhantomData;
416331ca 18use std::mem;
fc512014 19use std::ops::Range;
dfeec247 20use std::sync::atomic::Ordering::Relaxed;
54a0048b 21
ea8adc8c 22use super::debug::EdgeFilter;
dfeec247 23use super::prev::PreviousDepGraph;
54a0048b 24use super::query::DepGraphQuery;
5869c6ff 25use super::serialized::SerializedDepNodeIndex;
6a06907d
XL
26use super::{DepContext, DepKind, DepNode, HasDepContext, WorkProductId};
27use crate::query::QueryContext;
54a0048b
SL
28
29#[derive(Clone)]
ba9703b0
XL
30pub struct DepGraph<K: DepKind> {
31 data: Option<Lrc<DepGraphData<K>>>,
dfeec247
XL
32
33 /// This field is used for assigning DepNodeIndices when running in
34 /// non-incremental mode. Even in non-incremental mode we make sure that
35 /// each task has a `DepNodeIndex` that uniquely identifies it. This unique
36 /// ID is used for self-profiling.
37 virtual_dep_node_index: Lrc<AtomicU32>,
ea8adc8c
XL
38}
39
e74abb32 40rustc_index::newtype_index! {
b7449926
XL
41 pub struct DepNodeIndex { .. }
42}
ea8adc8c
XL
43
44impl DepNodeIndex {
e74abb32 45 pub const INVALID: DepNodeIndex = DepNodeIndex::MAX;
ea8adc8c
XL
46}
47
dfeec247
XL
48impl std::convert::From<DepNodeIndex> for QueryInvocationId {
49 #[inline]
50 fn from(dep_node_index: DepNodeIndex) -> Self {
51 QueryInvocationId(dep_node_index.as_u32())
52 }
53}
54
e74abb32 55#[derive(PartialEq)]
ea8adc8c
XL
56pub enum DepNodeColor {
57 Red,
dfeec247 58 Green(DepNodeIndex),
ea8adc8c
XL
59}
60
61impl DepNodeColor {
62 pub fn is_green(self) -> bool {
63 match self {
64 DepNodeColor::Red => false,
65 DepNodeColor::Green(_) => true,
66 }
67 }
5bcae85e
SL
68}
69
ba9703b0 70struct DepGraphData<K: DepKind> {
ea8adc8c
XL
71 /// The new encoding of the dependency graph, optimized for red/green
72 /// tracking. The `current` field is the dependency graph of only the
73 /// current compilation session: We don't merge the previous dep-graph into
fc512014 74 /// current one anymore, but we do reference shared data to save space.
ba9703b0 75 current: CurrentDepGraph<K>,
ea8adc8c
XL
76
77 /// The dep-graph from the previous compilation session. It contains all
78 /// nodes and edges as well as all fingerprints of nodes that have them.
ba9703b0 79 previous: PreviousDepGraph<K>,
ea8adc8c 80
9fa01778 81 colors: DepNodeColorMap,
5bcae85e 82
416331ca
XL
83 /// A set of loaded diagnostics that is in the progress of being emitted.
84 emitting_diagnostics: Mutex<FxHashSet<DepNodeIndex>>,
9fa01778
XL
85
86 /// Used to wait for diagnostics to be emitted.
416331ca 87 emitting_diagnostics_cond_var: Condvar,
9fa01778
XL
88
89 /// When we load, there may be `.o` files, cached MIR, or other such
5bcae85e
SL
90 /// things available to us. If we find that they are not dirty, we
91 /// load the path to the file storing those work-products here into
92 /// this map. We can later look for and extract that data.
94b46f34 93 previous_work_products: FxHashMap<WorkProductId, WorkProduct>,
041b39d2 94
ba9703b0 95 dep_node_debug: Lock<FxHashMap<DepNode<K>, String>>,
54a0048b
SL
96}
97
ba9703b0 98pub fn hash_result<HashCtxt, R>(hcx: &mut HashCtxt, result: &R) -> Option<Fingerprint>
9fa01778 99where
ba9703b0 100 R: HashStable<HashCtxt>,
9fa01778
XL
101{
102 let mut stable_hasher = StableHasher::new();
103 result.hash_stable(hcx, &mut stable_hasher);
104
105 Some(stable_hasher.finish())
106}
107
ba9703b0 108impl<K: DepKind> DepGraph<K> {
dfeec247 109 pub fn new(
ba9703b0 110 prev_graph: PreviousDepGraph<K>,
dfeec247 111 prev_work_products: FxHashMap<WorkProductId, WorkProduct>,
ba9703b0 112 ) -> DepGraph<K> {
0531ce1d
XL
113 let prev_graph_node_count = prev_graph.node_count();
114
54a0048b 115 DepGraph {
0531ce1d 116 data: Some(Lrc::new(DepGraphData {
94b46f34 117 previous_work_products: prev_work_products,
a1dfa0c6 118 dep_node_debug: Default::default(),
e74abb32 119 current: CurrentDepGraph::new(prev_graph_node_count),
416331ca
XL
120 emitting_diagnostics: Default::default(),
121 emitting_diagnostics_cond_var: Condvar::new(),
ea8adc8c 122 previous: prev_graph,
9fa01778 123 colors: DepNodeColorMap::new(prev_graph_node_count),
ea8adc8c 124 })),
dfeec247 125 virtual_dep_node_index: Lrc::new(AtomicU32::new(0)),
ea8adc8c
XL
126 }
127 }
128
ba9703b0 129 pub fn new_disabled() -> DepGraph<K> {
dfeec247 130 DepGraph { data: None, virtual_dep_node_index: Lrc::new(AtomicU32::new(0)) }
54a0048b
SL
131 }
132
9fa01778 133 /// Returns `true` if we are actually building the full dep-graph, and `false` otherwise.
32a655c1
SL
134 #[inline]
135 pub fn is_fully_enabled(&self) -> bool {
041b39d2 136 self.data.is_some()
32a655c1
SL
137 }
138
ba9703b0 139 pub fn query(&self) -> DepGraphQuery<K> {
fc512014
XL
140 let data = self.data.as_ref().unwrap();
141 let previous = &data.previous;
142
143 // Note locking order: `prev_index_to_index`, then `data`.
144 let prev_index_to_index = data.current.prev_index_to_index.lock();
145 let data = data.current.data.lock();
146 let node_count = data.hybrid_indices.len();
147 let edge_count = self.edge_count(&data);
148
149 let mut nodes = Vec::with_capacity(node_count);
150 let mut edge_list_indices = Vec::with_capacity(node_count);
151 let mut edge_list_data = Vec::with_capacity(edge_count);
152
5869c6ff 153 // See `DepGraph`'s `Encodable` implementation for notes on the approach used here.
fc512014
XL
154
155 edge_list_data.extend(data.unshared_edges.iter().map(|i| i.index()));
156
157 for &hybrid_index in data.hybrid_indices.iter() {
158 match hybrid_index.into() {
159 HybridIndex::New(new_index) => {
160 nodes.push(data.new.nodes[new_index]);
161 let edges = &data.new.edges[new_index];
162 edge_list_indices.push((edges.start.index(), edges.end.index()));
163 }
164 HybridIndex::Red(red_index) => {
165 nodes.push(previous.index_to_node(data.red.node_indices[red_index]));
166 let edges = &data.red.edges[red_index];
167 edge_list_indices.push((edges.start.index(), edges.end.index()));
168 }
169 HybridIndex::LightGreen(lg_index) => {
170 nodes.push(previous.index_to_node(data.light_green.node_indices[lg_index]));
171 let edges = &data.light_green.edges[lg_index];
172 edge_list_indices.push((edges.start.index(), edges.end.index()));
173 }
174 HybridIndex::DarkGreen(prev_index) => {
175 nodes.push(previous.index_to_node(prev_index));
176
177 let edges_iter = previous
178 .edge_targets_from(prev_index)
179 .iter()
180 .map(|&dst| prev_index_to_index[dst].unwrap().index());
181
182 let start = edge_list_data.len();
183 edge_list_data.extend(edges_iter);
184 let end = edge_list_data.len();
185 edge_list_indices.push((start, end));
186 }
ea8adc8c
XL
187 }
188 }
54a0048b 189
fc512014
XL
190 debug_assert_eq!(nodes.len(), node_count);
191 debug_assert_eq!(edge_list_indices.len(), node_count);
192 debug_assert_eq!(edge_list_data.len(), edge_count);
193
194 DepGraphQuery::new(&nodes[..], &edge_list_indices[..], &edge_list_data[..])
54a0048b
SL
195 }
196
dfeec247 197 pub fn assert_ignored(&self) {
83c7162d 198 if let Some(..) = self.data {
ba9703b0
XL
199 K::read_deps(|task_deps| {
200 assert!(task_deps.is_none(), "expected no task dependency tracking");
83c7162d 201 })
2c00a5a8 202 }
54a0048b
SL
203 }
204
dfeec247
XL
205 pub fn with_ignore<OP, R>(&self, op: OP) -> R
206 where
207 OP: FnOnce() -> R,
54a0048b 208 {
ba9703b0 209 K::with_deps(None, op)
54a0048b
SL
210 }
211
8bb4bdeb
XL
212 /// Starts a new dep-graph task. Dep-graph tasks are specified
213 /// using a free function (`task`) and **not** a closure -- this
214 /// is intentional because we want to exercise tight control over
215 /// what state they have access to. In particular, we want to
216 /// prevent implicit 'leaks' of tracked state into the task (which
217 /// could then be read without generating correct edges in the
ba9703b0 218 /// dep-graph -- see the [rustc dev guide] for more details on
ff7c6d11 219 /// the dep-graph). To this end, the task function gets exactly two
8bb4bdeb
XL
220 /// pieces of state: the context `cx` and an argument `arg`. Both
221 /// of these bits of state must be of some type that implements
222 /// `DepGraphSafe` and hence does not leak.
223 ///
224 /// The choice of two arguments is not fundamental. One argument
225 /// would work just as well, since multiple values can be
226 /// collected using tuples. However, using two arguments works out
227 /// to be quite convenient, since it is common to need a context
228 /// (`cx`) and some argument (e.g., a `DefId` identifying what
229 /// item to process).
230 ///
231 /// For cases where you need some other number of arguments:
232 ///
233 /// - If you only need one argument, just use `()` for the `arg`
234 /// parameter.
235 /// - If you need 3+ arguments, use a tuple for the
236 /// `arg` parameter.
237 ///
ba9703b0 238 /// [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/incremental-compilation.html
6a06907d 239 pub fn with_task<Ctxt: HasDepContext<DepKind = K>, A, R>(
9fa01778 240 &self,
ba9703b0
XL
241 key: DepNode<K>,
242 cx: Ctxt,
9fa01778 243 arg: A,
ba9703b0
XL
244 task: fn(Ctxt, A) -> R,
245 hash_result: impl FnOnce(&mut Ctxt::StableHashingContext, &R) -> Option<Fingerprint>,
246 ) -> (R, DepNodeIndex) {
dfeec247
XL
247 self.with_task_impl(
248 key,
249 cx,
250 arg,
dfeec247
XL
251 task,
252 |_key| {
253 Some(TaskDeps {
254 #[cfg(debug_assertions)]
255 node: Some(_key),
256 reads: SmallVec::new(),
257 read_set: Default::default(),
ba9703b0 258 phantom_data: PhantomData,
dfeec247 259 })
9fa01778 260 },
dfeec247
XL
261 hash_result,
262 )
abe05a73
XL
263 }
264
6a06907d 265 fn with_task_impl<Ctxt: HasDepContext<DepKind = K>, A, R>(
83c7162d 266 &self,
ba9703b0
XL
267 key: DepNode<K>,
268 cx: Ctxt,
83c7162d 269 arg: A,
ba9703b0
XL
270 task: fn(Ctxt, A) -> R,
271 create_task: fn(DepNode<K>) -> Option<TaskDeps<K>>,
ba9703b0
XL
272 hash_result: impl FnOnce(&mut Ctxt::StableHashingContext, &R) -> Option<Fingerprint>,
273 ) -> (R, DepNodeIndex) {
041b39d2 274 if let Some(ref data) = self.data {
6a06907d 275 let dcx = cx.dep_context();
ba9703b0 276 let task_deps = create_task(key).map(Lock::new);
fc512014
XL
277 let result = K::with_deps(task_deps.as_ref(), || task(cx, arg));
278 let edges = task_deps.map_or_else(|| smallvec![], |lock| lock.into_inner().reads);
ea8adc8c 279
6a06907d 280 let mut hcx = dcx.create_stable_hashing_context();
9fa01778 281 let current_fingerprint = hash_result(&mut hcx, &result);
ea8adc8c 282
6a06907d 283 let print_status = cfg!(debug_assertions) && dcx.sess().opts.debugging_opts.dep_tasks;
9fa01778 284
fc512014
XL
285 // Intern the new `DepNode`.
286 let dep_node_index = if let Some(prev_index) = data.previous.node_to_index_opt(&key) {
287 // Determine the color and index of the new `DepNode`.
288 let (color, dep_node_index) = if let Some(current_fingerprint) = current_fingerprint
289 {
290 if current_fingerprint == data.previous.fingerprint_by_index(prev_index) {
9fa01778
XL
291 if print_status {
292 eprintln!("[task::green] {:?}", key);
293 }
fc512014
XL
294
295 // This is a light green node: it existed in the previous compilation,
296 // its query was re-executed, and it has the same result as before.
297 let dep_node_index =
298 data.current.intern_light_green_node(&data.previous, prev_index, edges);
299
300 (DepNodeColor::Green(dep_node_index), dep_node_index)
9fa01778
XL
301 } else {
302 if print_status {
303 eprintln!("[task::red] {:?}", key);
304 }
fc512014
XL
305
306 // This is a red node: it existed in the previous compilation, its query
307 // was re-executed, but it has a different result from before.
308 let dep_node_index = data.current.intern_red_node(
309 &data.previous,
310 prev_index,
311 edges,
312 current_fingerprint,
313 );
314
315 (DepNodeColor::Red, dep_node_index)
9fa01778 316 }
ea8adc8c 317 } else {
9fa01778
XL
318 if print_status {
319 eprintln!("[task::unknown] {:?}", key);
320 }
fc512014
XL
321
322 // This is a red node, effectively: it existed in the previous compilation
323 // session, its query was re-executed, but it doesn't compute a result hash
324 // (i.e. it represents a `no_hash` query), so we have no way of determining
325 // whether or not the result was the same as before.
326 let dep_node_index = data.current.intern_red_node(
327 &data.previous,
328 prev_index,
329 edges,
330 Fingerprint::ZERO,
331 );
332
333 (DepNodeColor::Red, dep_node_index)
ea8adc8c
XL
334 };
335
dfeec247
XL
336 debug_assert!(
337 data.colors.get(prev_index).is_none(),
338 "DepGraph::with_task() - Duplicate DepNodeColor \
339 insertion for {:?}",
340 key
341 );
0531ce1d 342
9fa01778 343 data.colors.insert(prev_index, color);
fc512014
XL
344 dep_node_index
345 } else {
346 if print_status {
347 eprintln!("[task::new] {:?}", key);
348 }
349
350 // This is a new node: it didn't exist in the previous compilation session.
351 data.current.intern_new_node(
352 &data.previous,
353 key,
354 edges,
355 current_fingerprint.unwrap_or(Fingerprint::ZERO),
356 )
357 };
ea8adc8c 358
041b39d2
XL
359 (result, dep_node_index)
360 } else {
fc512014
XL
361 // Incremental compilation is turned off. We just execute the task
362 // without tracking. We still provide a dep-node index that uniquely
363 // identifies the task so that we have a cheap way of referring to
364 // the query for self-profiling.
dfeec247 365 (task(cx, arg), self.next_virtual_depnode_index())
041b39d2 366 }
54a0048b
SL
367 }
368
9fa01778
XL
369 /// Executes something within an "anonymous" task, that is, a task the
370 /// `DepNode` of which is determined by the list of inputs it read from.
ba9703b0 371 pub fn with_anon_task<OP, R>(&self, dep_kind: K, op: OP) -> (R, DepNodeIndex)
dfeec247
XL
372 where
373 OP: FnOnce() -> R,
041b39d2 374 {
fc512014
XL
375 debug_assert!(!dep_kind.is_eval_always());
376
041b39d2 377 if let Some(ref data) = self.data {
ba9703b0 378 let task_deps = Lock::new(TaskDeps::default());
ba9703b0
XL
379 let result = K::with_deps(Some(&task_deps), op);
380 let task_deps = task_deps.into_inner();
83c7162d 381
fc512014
XL
382 // The dep node indices are hashed here instead of hashing the dep nodes of the
383 // dependencies. These indices may refer to different nodes per session, but this isn't
384 // a problem here because we that ensure the final dep node hash is per session only by
385 // combining it with the per session random number `anon_id_seed`. This hash only need
386 // to map the dependencies to a single value on a per session basis.
387 let mut hasher = StableHasher::new();
388 task_deps.reads.hash(&mut hasher);
389
390 let target_dep_node = DepNode {
391 kind: dep_kind,
392 // Fingerprint::combine() is faster than sending Fingerprint
393 // through the StableHasher (at least as long as StableHasher
394 // is so slow).
395 hash: data.current.anon_id_seed.combine(hasher.finish()).into(),
396 };
397
398 let dep_node_index = data.current.intern_new_node(
399 &data.previous,
400 target_dep_node,
401 task_deps.reads,
402 Fingerprint::ZERO,
403 );
404
ea8adc8c 405 (result, dep_node_index)
041b39d2 406 } else {
dfeec247 407 (op(), self.next_virtual_depnode_index())
041b39d2
XL
408 }
409 }
410
9fa01778
XL
411 /// Executes something within an "eval-always" task which is a task
412 /// that runs whenever anything changes.
6a06907d 413 pub fn with_eval_always_task<Ctxt: HasDepContext<DepKind = K>, A, R>(
9fa01778 414 &self,
ba9703b0
XL
415 key: DepNode<K>,
416 cx: Ctxt,
9fa01778 417 arg: A,
ba9703b0
XL
418 task: fn(Ctxt, A) -> R,
419 hash_result: impl FnOnce(&mut Ctxt::StableHashingContext, &R) -> Option<Fingerprint>,
420 ) -> (R, DepNodeIndex) {
fc512014 421 self.with_task_impl(key, cx, arg, task, |_| None, hash_result)
abe05a73
XL
422 }
423
041b39d2 424 #[inline]
fc512014 425 pub fn read_index(&self, dep_node_index: DepNodeIndex) {
041b39d2 426 if let Some(ref data) = self.data {
fc512014
XL
427 K::read_deps(|task_deps| {
428 if let Some(task_deps) = task_deps {
429 let mut task_deps = task_deps.lock();
430 let task_deps = &mut *task_deps;
431 if cfg!(debug_assertions) {
432 data.current.total_read_count.fetch_add(1, Relaxed);
433 }
434
435 // As long as we only have a low number of reads we can avoid doing a hash
436 // insert and potentially allocating/reallocating the hashmap
437 let new_read = if task_deps.reads.len() < TASK_DEPS_READS_CAP {
438 task_deps.reads.iter().all(|other| *other != dep_node_index)
439 } else {
440 task_deps.read_set.insert(dep_node_index)
441 };
442 if new_read {
443 task_deps.reads.push(dep_node_index);
444 if task_deps.reads.len() == TASK_DEPS_READS_CAP {
445 // Fill `read_set` with what we have so far so we can use the hashset
446 // next time
447 task_deps.read_set.extend(task_deps.reads.iter().copied());
448 }
449
450 #[cfg(debug_assertions)]
451 {
452 if let Some(target) = task_deps.node {
453 if let Some(ref forbidden_edge) = data.current.forbidden_edge {
454 let src = self.dep_node_of(dep_node_index);
455 if forbidden_edge.test(&src, &target) {
456 panic!("forbidden edge {:?} -> {:?} created", src, target)
457 }
458 }
459 }
460 }
461 } else if cfg!(debug_assertions) {
462 data.current.total_duplicate_read_count.fetch_add(1, Relaxed);
463 }
464 }
465 })
041b39d2
XL
466 }
467 }
468
469 #[inline]
fc512014
XL
470 pub fn dep_node_index_of(&self, dep_node: &DepNode<K>) -> DepNodeIndex {
471 self.dep_node_index_of_opt(dep_node).unwrap()
54a0048b
SL
472 }
473
abe05a73 474 #[inline]
fc512014
XL
475 pub fn dep_node_index_of_opt(&self, dep_node: &DepNode<K>) -> Option<DepNodeIndex> {
476 let data = self.data.as_ref().unwrap();
477 let current = &data.current;
478
479 if let Some(prev_index) = data.previous.node_to_index_opt(dep_node) {
480 current.prev_index_to_index.lock()[prev_index]
481 } else {
482 current.new_node_to_index.get_shard_by_value(dep_node).lock().get(dep_node).copied()
483 }
ff7c6d11
XL
484 }
485
0531ce1d 486 #[inline]
ba9703b0 487 pub fn dep_node_exists(&self, dep_node: &DepNode<K>) -> bool {
fc512014
XL
488 self.data.is_some() && self.dep_node_index_of_opt(dep_node).is_some()
489 }
490
491 #[inline]
492 pub fn dep_node_of(&self, dep_node_index: DepNodeIndex) -> DepNode<K> {
493 let data = self.data.as_ref().unwrap();
494 let previous = &data.previous;
495 let data = data.current.data.lock();
496
497 match data.hybrid_indices[dep_node_index].into() {
498 HybridIndex::New(new_index) => data.new.nodes[new_index],
499 HybridIndex::Red(red_index) => previous.index_to_node(data.red.node_indices[red_index]),
500 HybridIndex::LightGreen(light_green_index) => {
501 previous.index_to_node(data.light_green.node_indices[light_green_index])
502 }
503 HybridIndex::DarkGreen(prev_index) => previous.index_to_node(prev_index),
0531ce1d
XL
504 }
505 }
506
ff7c6d11
XL
507 #[inline]
508 pub fn fingerprint_of(&self, dep_node_index: DepNodeIndex) -> Fingerprint {
fc512014
XL
509 let data = self.data.as_ref().unwrap();
510 let previous = &data.previous;
511 let data = data.current.data.lock();
512
513 match data.hybrid_indices[dep_node_index].into() {
514 HybridIndex::New(new_index) => data.new.fingerprints[new_index],
515 HybridIndex::Red(red_index) => data.red.fingerprints[red_index],
516 HybridIndex::LightGreen(light_green_index) => {
517 previous.fingerprint_by_index(data.light_green.node_indices[light_green_index])
518 }
519 HybridIndex::DarkGreen(prev_index) => previous.fingerprint_by_index(prev_index),
520 }
041b39d2
XL
521 }
522
ba9703b0 523 pub fn prev_fingerprint_of(&self, dep_node: &DepNode<K>) -> Option<Fingerprint> {
ea8adc8c 524 self.data.as_ref().unwrap().previous.fingerprint_of(dep_node)
3b2f2976
XL
525 }
526
9fa01778 527 /// Checks whether a previous work product exists for `v` and, if
5bcae85e 528 /// so, return the path that leads to it. Used to skip doing work.
041b39d2 529 pub fn previous_work_product(&self, v: &WorkProductId) -> Option<WorkProduct> {
dfeec247 530 self.data.as_ref().and_then(|data| data.previous_work_products.get(v).cloned())
5bcae85e
SL
531 }
532
32a655c1
SL
533 /// Access the map of work-products created during the cached run. Only
534 /// used during saving of the dep-graph.
94b46f34
XL
535 pub fn previous_work_products(&self) -> &FxHashMap<WorkProductId, WorkProduct> {
536 &self.data.as_ref().unwrap().previous_work_products
041b39d2
XL
537 }
538
539 #[inline(always)]
ba9703b0 540 pub fn register_dep_node_debug_str<F>(&self, dep_node: DepNode<K>, debug_str_gen: F)
dfeec247
XL
541 where
542 F: FnOnce() -> String,
041b39d2 543 {
ea8adc8c 544 let dep_node_debug = &self.data.as_ref().unwrap().dep_node_debug;
041b39d2 545
ea8adc8c 546 if dep_node_debug.borrow().contains_key(&dep_node) {
dfeec247 547 return;
ea8adc8c
XL
548 }
549 let debug_str = debug_str_gen();
550 dep_node_debug.borrow_mut().insert(dep_node, debug_str);
041b39d2
XL
551 }
552
ba9703b0 553 pub fn dep_node_debug_str(&self, dep_node: DepNode<K>) -> Option<String> {
dfeec247 554 self.data.as_ref()?.dep_node_debug.borrow().get(&dep_node).cloned()
32a655c1 555 }
ea8adc8c 556
fc512014
XL
557 fn edge_count(&self, node_data: &LockGuard<'_, DepNodeData<K>>) -> usize {
558 let data = self.data.as_ref().unwrap();
559 let previous = &data.previous;
ea8adc8c 560
fc512014 561 let mut edge_count = node_data.unshared_edges.len();
ea8adc8c 562
fc512014
XL
563 for &hybrid_index in node_data.hybrid_indices.iter() {
564 if let HybridIndex::DarkGreen(prev_index) = hybrid_index.into() {
565 edge_count += previous.edge_targets_from(prev_index).len()
566 }
567 }
ea8adc8c 568
fc512014
XL
569 edge_count
570 }
ea8adc8c 571
ba9703b0 572 pub fn node_color(&self, dep_node: &DepNode<K>) -> Option<DepNodeColor> {
0531ce1d
XL
573 if let Some(ref data) = self.data {
574 if let Some(prev_index) = data.previous.node_to_index_opt(dep_node) {
dfeec247 575 return data.colors.get(prev_index);
0531ce1d
XL
576 } else {
577 // This is a node that did not exist in the previous compilation
578 // session, so we consider it to be red.
dfeec247 579 return Some(DepNodeColor::Red);
0531ce1d
XL
580 }
581 }
582
583 None
ea8adc8c
XL
584 }
585
9fa01778
XL
586 /// Try to read a node index for the node dep_node.
587 /// A node will have an index, when it's already been marked green, or when we can mark it
588 /// green. This function will mark the current task as a reader of the specified node, when
589 /// a node index can be found for that node.
6a06907d 590 pub fn try_mark_green_and_read<Ctxt: QueryContext<DepKind = K>>(
9fa01778 591 &self,
ba9703b0
XL
592 tcx: Ctxt,
593 dep_node: &DepNode<K>,
9fa01778
XL
594 ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
595 self.try_mark_green(tcx, dep_node).map(|(prev_index, dep_node_index)| {
596 debug_assert!(self.is_green(&dep_node));
597 self.read_index(dep_node_index);
598 (prev_index, dep_node_index)
599 })
600 }
ea8adc8c 601
6a06907d 602 pub fn try_mark_green<Ctxt: QueryContext<DepKind = K>>(
9fa01778 603 &self,
ba9703b0
XL
604 tcx: Ctxt,
605 dep_node: &DepNode<K>,
9fa01778 606 ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
532ac7d7 607 debug_assert!(!dep_node.kind.is_eval_always());
9fa01778
XL
608
609 // Return None if the dep graph is disabled
610 let data = self.data.as_ref()?;
611
612 // Return None if the dep node didn't exist in the previous session
613 let prev_index = data.previous.node_to_index_opt(dep_node)?;
ea8adc8c 614
9fa01778
XL
615 match data.colors.get(prev_index) {
616 Some(DepNodeColor::Green(dep_node_index)) => Some((prev_index, dep_node_index)),
617 Some(DepNodeColor::Red) => None,
618 None => {
ea8adc8c
XL
619 // This DepNode and the corresponding query invocation existed
620 // in the previous compilation session too, so we can try to
621 // mark it as green by recursively marking all of its
622 // dependencies green.
dfeec247
XL
623 self.try_mark_previous_green(tcx, data, prev_index, &dep_node)
624 .map(|dep_node_index| (prev_index, dep_node_index))
ea8adc8c 625 }
9fa01778
XL
626 }
627 }
628
629 /// Try to mark a dep-node which existed in the previous compilation session as green.
6a06907d 630 fn try_mark_previous_green<Ctxt: QueryContext<DepKind = K>>(
9fa01778 631 &self,
ba9703b0
XL
632 tcx: Ctxt,
633 data: &DepGraphData<K>,
9fa01778 634 prev_dep_node_index: SerializedDepNodeIndex,
ba9703b0 635 dep_node: &DepNode<K>,
9fa01778
XL
636 ) -> Option<DepNodeIndex> {
637 debug!("try_mark_previous_green({:?}) - BEGIN", dep_node);
638
639 #[cfg(not(parallel_compiler))]
640 {
fc512014 641 debug_assert!(!self.dep_node_exists(dep_node));
9fa01778
XL
642 debug_assert!(data.colors.get(prev_dep_node_index).is_none());
643 }
ea8adc8c 644
532ac7d7
XL
645 // We never try to mark eval_always nodes as green
646 debug_assert!(!dep_node.kind.is_eval_always());
9fa01778
XL
647
648 debug_assert_eq!(data.previous.index_to_node(prev_dep_node_index), *dep_node);
649
650 let prev_deps = data.previous.edge_targets_from(prev_dep_node_index);
0531ce1d 651
ea8adc8c 652 for &dep_dep_node_index in prev_deps {
9fa01778 653 let dep_dep_node_color = data.colors.get(dep_dep_node_index);
ea8adc8c 654
ea8adc8c 655 match dep_dep_node_color {
fc512014 656 Some(DepNodeColor::Green(_)) => {
ea8adc8c
XL
657 // This dependency has been marked as green before, we are
658 // still fine and can continue with checking the other
659 // dependencies.
dfeec247
XL
660 debug!(
661 "try_mark_previous_green({:?}) --- found dependency {:?} to \
0531ce1d 662 be immediately green",
dfeec247
XL
663 dep_node,
664 data.previous.index_to_node(dep_dep_node_index)
665 );
ea8adc8c
XL
666 }
667 Some(DepNodeColor::Red) => {
668 // We found a dependency the value of which has changed
669 // compared to the previous compilation session. We cannot
670 // mark the DepNode as green and also don't need to bother
671 // with checking any of the other dependencies.
dfeec247
XL
672 debug!(
673 "try_mark_previous_green({:?}) - END - dependency {:?} was \
0531ce1d 674 immediately red",
dfeec247
XL
675 dep_node,
676 data.previous.index_to_node(dep_dep_node_index)
677 );
678 return None;
ea8adc8c
XL
679 }
680 None => {
0531ce1d
XL
681 let dep_dep_node = &data.previous.index_to_node(dep_dep_node_index);
682
abe05a73 683 // We don't know the state of this dependency. If it isn't
532ac7d7
XL
684 // an eval_always node, let's try to mark it green recursively.
685 if !dep_dep_node.kind.is_eval_always() {
dfeec247 686 debug!(
fc512014 687 "try_mark_previous_green({:?}) --- state of dependency {:?} ({}) \
dfeec247 688 is unknown, trying to mark it green",
fc512014 689 dep_node, dep_dep_node, dep_dep_node.hash,
dfeec247 690 );
abe05a73 691
9fa01778
XL
692 let node_index = self.try_mark_previous_green(
693 tcx,
694 data,
695 dep_dep_node_index,
dfeec247 696 dep_dep_node,
9fa01778 697 );
fc512014 698 if node_index.is_some() {
dfeec247
XL
699 debug!(
700 "try_mark_previous_green({:?}) --- managed to MARK \
701 dependency {:?} as green",
702 dep_node, dep_dep_node
703 );
abe05a73
XL
704 continue;
705 }
ea8adc8c 706 }
abe05a73
XL
707
708 // We failed to mark it green, so we try to force the query.
dfeec247
XL
709 debug!(
710 "try_mark_previous_green({:?}) --- trying to force \
711 dependency {:?}",
712 dep_node, dep_dep_node
713 );
ba9703b0 714 if tcx.try_force_from_dep_node(dep_dep_node) {
9fa01778 715 let dep_dep_node_color = data.colors.get(dep_dep_node_index);
0531ce1d 716
abe05a73 717 match dep_dep_node_color {
fc512014 718 Some(DepNodeColor::Green(_)) => {
dfeec247
XL
719 debug!(
720 "try_mark_previous_green({:?}) --- managed to \
abe05a73 721 FORCE dependency {:?} to green",
dfeec247
XL
722 dep_node, dep_dep_node
723 );
abe05a73
XL
724 }
725 Some(DepNodeColor::Red) => {
dfeec247
XL
726 debug!(
727 "try_mark_previous_green({:?}) - END - \
abe05a73 728 dependency {:?} was red after forcing",
dfeec247
XL
729 dep_node, dep_dep_node
730 );
731 return None;
abe05a73
XL
732 }
733 None => {
6a06907d 734 if !tcx.dep_context().sess().has_errors_or_delayed_span_bugs() {
ba9703b0 735 panic!(
dfeec247
XL
736 "try_mark_previous_green() - Forcing the DepNode \
737 should have set its color"
738 )
0531ce1d 739 } else {
60c5eb7d
XL
740 // If the query we just forced has resulted in
741 // some kind of compilation error, we cannot rely on
742 // the dep-node color having been properly updated.
743 // This means that the query system has reached an
744 // invalid state. We let the compiler continue (by
745 // returning `None`) so it can emit error messages
746 // and wind down, but rely on the fact that this
747 // invalid state will not be persisted to the
748 // incremental compilation cache because of
749 // compilation errors being present.
dfeec247
XL
750 debug!(
751 "try_mark_previous_green({:?}) - END - \
60c5eb7d 752 dependency {:?} resulted in compilation error",
dfeec247
XL
753 dep_node, dep_dep_node
754 );
755 return None;
0531ce1d 756 }
abe05a73
XL
757 }
758 }
759 } else {
760 // The DepNode could not be forced.
dfeec247
XL
761 debug!(
762 "try_mark_previous_green({:?}) - END - dependency {:?} \
763 could not be forced",
764 dep_node, dep_dep_node
765 );
766 return None;
abe05a73 767 }
ea8adc8c
XL
768 }
769 }
770 }
771
ea8adc8c
XL
772 // If we got here without hitting a `return` that means that all
773 // dependencies of this DepNode could be marked as green. Therefore we
83c7162d 774 // can also mark this DepNode as green.
ea8adc8c 775
83c7162d
XL
776 // There may be multiple threads trying to mark the same dep node green concurrently
777
416331ca 778 let dep_node_index = {
0731742a
XL
779 // We allocating an entry for the node in the current dependency graph and
780 // adding all the appropriate edges imported from the previous graph
fc512014 781 data.current.intern_dark_green_node(&data.previous, prev_dep_node_index)
0731742a 782 };
ea8adc8c 783
abe05a73 784 // ... emitting any stored diagnostic ...
abe05a73 785
416331ca
XL
786 // FIXME: Store the fact that a node has diagnostics in a bit in the dep graph somewhere
787 // Maybe store a list on disk and encode this fact in the DepNodeState
ba9703b0 788 let diagnostics = tcx.load_diagnostics(prev_dep_node_index);
416331ca
XL
789
790 #[cfg(not(parallel_compiler))]
dfeec247
XL
791 debug_assert!(
792 data.colors.get(prev_dep_node_index).is_none(),
793 "DepGraph::try_mark_previous_green() - Duplicate DepNodeColor \
794 insertion for {:?}",
795 dep_node
796 );
abe05a73 797
74b04a01 798 if unlikely!(!diagnostics.is_empty()) {
dfeec247 799 self.emit_diagnostics(tcx, data, dep_node_index, prev_dep_node_index, diagnostics);
abe05a73
XL
800 }
801
ea8adc8c 802 // ... and finally storing a "Green" entry in the color map.
83c7162d 803 // Multiple threads can all write the same color here
9fa01778 804 data.colors.insert(prev_dep_node_index, DepNodeColor::Green(dep_node_index));
0531ce1d 805
9fa01778 806 debug!("try_mark_previous_green({:?}) - END - successfully marked as green", dep_node);
ea8adc8c
XL
807 Some(dep_node_index)
808 }
809
416331ca
XL
810 /// Atomically emits some loaded diagnostics.
811 /// This may be called concurrently on multiple threads for the same dep node.
9fa01778
XL
812 #[cold]
813 #[inline(never)]
6a06907d 814 fn emit_diagnostics<Ctxt: QueryContext<DepKind = K>>(
9fa01778 815 &self,
ba9703b0
XL
816 tcx: Ctxt,
817 data: &DepGraphData<K>,
9fa01778 818 dep_node_index: DepNodeIndex,
416331ca 819 prev_dep_node_index: SerializedDepNodeIndex,
9fa01778
XL
820 diagnostics: Vec<Diagnostic>,
821 ) {
416331ca
XL
822 let mut emitting = data.emitting_diagnostics.lock();
823
824 if data.colors.get(prev_dep_node_index) == Some(DepNodeColor::Green(dep_node_index)) {
825 // The node is already green so diagnostics must have been emitted already
826 return;
827 }
828
829 if emitting.insert(dep_node_index) {
830 // We were the first to insert the node in the set so this thread
831 // must emit the diagnostics and signal other potentially waiting
832 // threads after.
833 mem::drop(emitting);
9fa01778
XL
834
835 // Promote the previous diagnostics to the current session.
ba9703b0 836 tcx.store_diagnostics(dep_node_index, diagnostics.clone().into());
416331ca 837
6a06907d 838 let handle = tcx.dep_context().sess().diagnostic();
9fa01778
XL
839
840 for diagnostic in diagnostics {
e1599b0c 841 handle.emit_diagnostic(&diagnostic);
9fa01778
XL
842 }
843
416331ca
XL
844 // Mark the node as green now that diagnostics are emitted
845 data.colors.insert(prev_dep_node_index, DepNodeColor::Green(dep_node_index));
846
847 // Remove the node from the set
848 data.emitting_diagnostics.lock().remove(&dep_node_index);
849
850 // Wake up waiters
851 data.emitting_diagnostics_cond_var.notify_all();
9fa01778 852 } else {
416331ca 853 // We must wait for the other thread to finish emitting the diagnostic
9fa01778 854
9fa01778 855 loop {
416331ca 856 data.emitting_diagnostics_cond_var.wait(&mut emitting);
dfeec247
XL
857 if data.colors.get(prev_dep_node_index) == Some(DepNodeColor::Green(dep_node_index))
858 {
9fa01778
XL
859 break;
860 }
9fa01778
XL
861 }
862 }
863 }
864
0531ce1d
XL
865 // Returns true if the given node has been marked as green during the
866 // current compilation session. Used in various assertions
ba9703b0 867 pub fn is_green(&self, dep_node: &DepNode<K>) -> bool {
5869c6ff 868 self.node_color(dep_node).map_or(false, |c| c.is_green())
ea8adc8c
XL
869 }
870
ff7c6d11
XL
871 // This method loads all on-disk cacheable query results into memory, so
872 // they can be written out to the new cache file again. Most query results
873 // will already be in memory but in the case where we marked something as
874 // green but then did not need the value, that value will never have been
875 // loaded from disk.
876 //
877 // This method will only load queries that will end up in the disk cache.
878 // Other queries will not be executed.
6a06907d
XL
879 pub fn exec_cache_promotions<Ctxt: QueryContext<DepKind = K>>(&self, qcx: Ctxt) {
880 let tcx = qcx.dep_context();
ba9703b0 881 let _prof_timer = tcx.profiler().generic_activity("incr_comp_query_cache_promotion");
e74abb32 882
dc9dc135
XL
883 let data = self.data.as_ref().unwrap();
884 for prev_index in data.colors.values.indices() {
885 match data.colors.get(prev_index) {
886 Some(DepNodeColor::Green(_)) => {
887 let dep_node = data.previous.index_to_node(prev_index);
6a06907d 888 qcx.try_load_from_on_disk_cache(&dep_node);
ff7c6d11 889 }
dfeec247 890 None | Some(DepNodeColor::Red) => {
dc9dc135
XL
891 // We can skip red nodes because a node can only be marked
892 // as red if the query result was recomputed and thus is
893 // already in memory.
894 }
895 }
ff7c6d11
XL
896 }
897 }
dfeec247 898
fc512014
XL
899 // Register reused dep nodes (i.e. nodes we've marked red or green) with the context.
900 pub fn register_reused_dep_nodes<Ctxt: DepContext<DepKind = K>>(&self, tcx: Ctxt) {
901 let data = self.data.as_ref().unwrap();
902 for prev_index in data.colors.values.indices() {
903 match data.colors.get(prev_index) {
904 Some(DepNodeColor::Red) | Some(DepNodeColor::Green(_)) => {
905 let dep_node = data.previous.index_to_node(prev_index);
906 tcx.register_reused_dep_node(&dep_node);
907 }
908 None => {}
909 }
910 }
911 }
912
5869c6ff
XL
913 pub fn print_incremental_info(&self) {
914 #[derive(Clone)]
915 struct Stat<Kind: DepKind> {
916 kind: Kind,
917 node_counter: u64,
918 edge_counter: u64,
919 }
920
921 let data = self.data.as_ref().unwrap();
922 let prev = &data.previous;
923 let current = &data.current;
924 let data = current.data.lock();
925
926 let mut stats: FxHashMap<_, Stat<K>> = FxHashMap::with_hasher(Default::default());
927
928 for &hybrid_index in data.hybrid_indices.iter() {
929 let (kind, edge_count) = match hybrid_index.into() {
930 HybridIndex::New(new_index) => {
931 let kind = data.new.nodes[new_index].kind;
932 let edge_range = &data.new.edges[new_index];
933 (kind, edge_range.end.as_usize() - edge_range.start.as_usize())
934 }
935 HybridIndex::Red(red_index) => {
936 let kind = prev.index_to_node(data.red.node_indices[red_index]).kind;
937 let edge_range = &data.red.edges[red_index];
938 (kind, edge_range.end.as_usize() - edge_range.start.as_usize())
939 }
940 HybridIndex::LightGreen(lg_index) => {
941 let kind = prev.index_to_node(data.light_green.node_indices[lg_index]).kind;
942 let edge_range = &data.light_green.edges[lg_index];
943 (kind, edge_range.end.as_usize() - edge_range.start.as_usize())
944 }
945 HybridIndex::DarkGreen(prev_index) => {
946 let kind = prev.index_to_node(prev_index).kind;
947 let edge_count = prev.edge_targets_from(prev_index).len();
948 (kind, edge_count)
949 }
950 };
951
952 let stat = stats.entry(kind).or_insert(Stat { kind, node_counter: 0, edge_counter: 0 });
953 stat.node_counter += 1;
954 stat.edge_counter += edge_count as u64;
955 }
956
957 let total_node_count = data.hybrid_indices.len();
958 let total_edge_count = self.edge_count(&data);
959
960 // Drop the lock guard.
961 std::mem::drop(data);
962
963 let mut stats: Vec<_> = stats.values().cloned().collect();
964 stats.sort_by_key(|s| -(s.node_counter as i64));
965
966 const SEPARATOR: &str = "[incremental] --------------------------------\
967 ----------------------------------------------\
968 ------------";
969
6a06907d
XL
970 eprintln!("[incremental]");
971 eprintln!("[incremental] DepGraph Statistics");
972 eprintln!("{}", SEPARATOR);
973 eprintln!("[incremental]");
974 eprintln!("[incremental] Total Node Count: {}", total_node_count);
975 eprintln!("[incremental] Total Edge Count: {}", total_edge_count);
5869c6ff
XL
976
977 if cfg!(debug_assertions) {
978 let total_edge_reads = current.total_read_count.load(Relaxed);
979 let total_duplicate_edge_reads = current.total_duplicate_read_count.load(Relaxed);
980
6a06907d
XL
981 eprintln!("[incremental] Total Edge Reads: {}", total_edge_reads);
982 eprintln!("[incremental] Total Duplicate Edge Reads: {}", total_duplicate_edge_reads);
5869c6ff
XL
983 }
984
6a06907d 985 eprintln!("[incremental]");
5869c6ff 986
6a06907d 987 eprintln!(
5869c6ff
XL
988 "[incremental] {:<36}| {:<17}| {:<12}| {:<17}|",
989 "Node Kind", "Node Frequency", "Node Count", "Avg. Edge Count"
990 );
991
6a06907d 992 eprintln!(
5869c6ff
XL
993 "[incremental] -------------------------------------\
994 |------------------\
995 |-------------\
996 |------------------|"
997 );
998
999 for stat in stats {
1000 let node_kind_ratio = (100.0 * (stat.node_counter as f64)) / (total_node_count as f64);
1001 let node_kind_avg_edges = (stat.edge_counter as f64) / (stat.node_counter as f64);
1002
6a06907d 1003 eprintln!(
5869c6ff
XL
1004 "[incremental] {:<36}|{:>16.1}% |{:>12} |{:>17.1} |",
1005 format!("{:?}", stat.kind),
1006 node_kind_ratio,
1007 stat.node_counter,
1008 node_kind_avg_edges,
1009 );
1010 }
1011
6a06907d
XL
1012 eprintln!("{}", SEPARATOR);
1013 eprintln!("[incremental]");
5869c6ff
XL
1014 }
1015
dfeec247
XL
1016 fn next_virtual_depnode_index(&self) -> DepNodeIndex {
1017 let index = self.virtual_dep_node_index.fetch_add(1, Relaxed);
1018 DepNodeIndex::from_u32(index)
1019 }
5bcae85e
SL
1020}
1021
5869c6ff
XL
1022impl<E: Encoder, K: DepKind + Encodable<E>> Encodable<E> for DepGraph<K> {
1023 fn encode(&self, e: &mut E) -> Result<(), E::Error> {
1024 // We used to serialize the dep graph by creating and serializing a `SerializedDepGraph`
1025 // using data copied from the `DepGraph`. But copying created a large memory spike, so we
1026 // now serialize directly from the `DepGraph` as if it's a `SerializedDepGraph`. Because we
1027 // deserialize that data into a `SerializedDepGraph` in the next compilation session, we
1028 // need `DepGraph`'s `Encodable` and `SerializedDepGraph`'s `Decodable` implementations to
1029 // be in sync. If you update this encoding, be sure to update the decoding, and vice-versa.
1030
1031 let data = self.data.as_ref().unwrap();
1032 let prev = &data.previous;
1033
1034 // Note locking order: `prev_index_to_index`, then `data`.
1035 let prev_index_to_index = data.current.prev_index_to_index.lock();
1036 let data = data.current.data.lock();
1037 let new = &data.new;
1038 let red = &data.red;
1039 let lg = &data.light_green;
1040
1041 let node_count = data.hybrid_indices.len();
1042 let edge_count = self.edge_count(&data);
1043
1044 // `rustc_middle::ty::query::OnDiskCache` expects nodes to be encoded in `DepNodeIndex`
1045 // order. The edges in `edge_list_data` don't need to be in a particular order, as long as
1046 // each node references its edges as a contiguous range within it. Therefore, we can encode
1047 // `edge_list_data` directly from `unshared_edges`. It meets the above requirements, as
1048 // each non-dark-green node already knows the range of edges to reference within it, which
1049 // they'll encode in `edge_list_indices`. Dark green nodes, however, don't have their edges
1050 // in `unshared_edges`, so need to add them to `edge_list_data`.
1051
1052 use HybridIndex::*;
1053
1054 // Encoded values (nodes, etc.) are explicitly typed below to avoid inadvertently
1055 // serializing data in the wrong format (i.e. one incompatible with `SerializedDepGraph`).
1056 e.emit_struct("SerializedDepGraph", 4, |e| {
1057 e.emit_struct_field("nodes", 0, |e| {
1058 // `SerializedDepGraph` expects this to be encoded as a sequence of `DepNode`s.
1059 e.emit_seq(node_count, |e| {
1060 for (seq_index, &hybrid_index) in data.hybrid_indices.iter().enumerate() {
1061 let node: DepNode<K> = match hybrid_index.into() {
1062 New(i) => new.nodes[i],
1063 Red(i) => prev.index_to_node(red.node_indices[i]),
1064 LightGreen(i) => prev.index_to_node(lg.node_indices[i]),
1065 DarkGreen(prev_index) => prev.index_to_node(prev_index),
1066 };
1067
1068 e.emit_seq_elt(seq_index, |e| node.encode(e))?;
1069 }
1070
1071 Ok(())
1072 })
1073 })?;
1074
1075 e.emit_struct_field("fingerprints", 1, |e| {
1076 // `SerializedDepGraph` expects this to be encoded as a sequence of `Fingerprints`s.
1077 e.emit_seq(node_count, |e| {
1078 for (seq_index, &hybrid_index) in data.hybrid_indices.iter().enumerate() {
1079 let fingerprint: Fingerprint = match hybrid_index.into() {
1080 New(i) => new.fingerprints[i],
1081 Red(i) => red.fingerprints[i],
1082 LightGreen(i) => prev.fingerprint_by_index(lg.node_indices[i]),
1083 DarkGreen(prev_index) => prev.fingerprint_by_index(prev_index),
1084 };
1085
1086 e.emit_seq_elt(seq_index, |e| fingerprint.encode(e))?;
1087 }
1088
1089 Ok(())
1090 })
1091 })?;
1092
1093 e.emit_struct_field("edge_list_indices", 2, |e| {
1094 // `SerializedDepGraph` expects this to be encoded as a sequence of `(u32, u32)`s.
1095 e.emit_seq(node_count, |e| {
1096 // Dark green node edges start after the unshared (all other nodes') edges.
1097 let mut dark_green_edge_index = data.unshared_edges.len();
1098
1099 for (seq_index, &hybrid_index) in data.hybrid_indices.iter().enumerate() {
1100 let edge_indices: (u32, u32) = match hybrid_index.into() {
1101 New(i) => (new.edges[i].start.as_u32(), new.edges[i].end.as_u32()),
1102 Red(i) => (red.edges[i].start.as_u32(), red.edges[i].end.as_u32()),
1103 LightGreen(i) => (lg.edges[i].start.as_u32(), lg.edges[i].end.as_u32()),
1104 DarkGreen(prev_index) => {
1105 let edge_count = prev.edge_targets_from(prev_index).len();
1106 let start = dark_green_edge_index as u32;
1107 dark_green_edge_index += edge_count;
1108 let end = dark_green_edge_index as u32;
1109 (start, end)
1110 }
1111 };
1112
1113 e.emit_seq_elt(seq_index, |e| edge_indices.encode(e))?;
1114 }
1115
1116 assert_eq!(dark_green_edge_index, edge_count);
1117
1118 Ok(())
1119 })
1120 })?;
1121
1122 e.emit_struct_field("edge_list_data", 3, |e| {
1123 // `SerializedDepGraph` expects this to be encoded as a sequence of
1124 // `SerializedDepNodeIndex`.
1125 e.emit_seq(edge_count, |e| {
1126 for (seq_index, &edge) in data.unshared_edges.iter().enumerate() {
1127 let serialized_edge = SerializedDepNodeIndex::new(edge.index());
1128 e.emit_seq_elt(seq_index, |e| serialized_edge.encode(e))?;
1129 }
1130
1131 let mut seq_index = data.unshared_edges.len();
1132
1133 for &hybrid_index in data.hybrid_indices.iter() {
1134 if let DarkGreen(prev_index) = hybrid_index.into() {
1135 for &edge in prev.edge_targets_from(prev_index) {
1136 // Dark green node edges are stored in the previous graph
1137 // and must be converted to edges in the current graph,
1138 // and then serialized as `SerializedDepNodeIndex`.
1139 let serialized_edge = SerializedDepNodeIndex::new(
1140 prev_index_to_index[edge].as_ref().unwrap().index(),
1141 );
1142
1143 e.emit_seq_elt(seq_index, |e| serialized_edge.encode(e))?;
1144 seq_index += 1;
1145 }
1146 }
1147 }
1148
1149 assert_eq!(seq_index, edge_count);
1150
1151 Ok(())
1152 })
1153 })
1154 })
1155 }
1156}
1157
5bcae85e
SL
1158/// A "work product" is an intermediate result that we save into the
1159/// incremental directory for later re-use. The primary example are
1160/// the object files that we save for each partition at code
1161/// generation time.
1162///
1163/// Each work product is associated with a dep-node, representing the
1164/// process that produced the work-product. If that dep-node is found
1165/// to be dirty when we load up, then we will delete the work-product
1166/// at load time. If the work-product is found to be clean, then we
1167/// will keep a record in the `previous_work_products` list.
1168///
1169/// In addition, work products have an associated hash. This hash is
1170/// an extra hash that can be used to decide if the work-product from
1171/// a previous compilation can be re-used (in addition to the dirty
1172/// edges check).
1173///
1174/// As the primary example, consider the object files we generate for
1175/// each partition. In the first run, we create partitions based on
1176/// the symbols that need to be compiled. For each partition P, we
1177/// hash the symbols in P and create a `WorkProduct` record associated
94b46f34 1178/// with `DepNode::CodegenUnit(P)`; the hash is the set of symbols
5bcae85e
SL
1179/// in P.
1180///
94b46f34 1181/// The next time we compile, if the `DepNode::CodegenUnit(P)` is
5bcae85e
SL
1182/// judged to be clean (which means none of the things we read to
1183/// generate the partition were found to be dirty), it will be loaded
1184/// into previous work products. We will then regenerate the set of
1185/// symbols in the partition P and hash them (note that new symbols
1186/// may be added -- for example, new monomorphizations -- even if
1187/// nothing in P changed!). We will compare that hash against the
1188/// previous hash. If it matches up, we can reuse the object file.
3dfed10e 1189#[derive(Clone, Debug, Encodable, Decodable)]
5bcae85e 1190pub struct WorkProduct {
041b39d2 1191 pub cgu_name: String,
f9f354fc
XL
1192 /// Saved file associated with this CGU.
1193 pub saved_file: Option<String>,
54a0048b 1194}
ea8adc8c 1195
fc512014
XL
1196// The maximum value of the follow index types leaves the upper two bits unused
1197// so that we can store multiple index types in `CompressedHybridIndex`, and use
1198// those bits to encode which index type it contains.
1199
1200// Index type for `NewDepNodeData`.
1201rustc_index::newtype_index! {
1202 struct NewDepNodeIndex {
1203 MAX = 0x7FFF_FFFF
1204 }
1205}
1206
1207// Index type for `RedDepNodeData`.
1208rustc_index::newtype_index! {
1209 struct RedDepNodeIndex {
1210 MAX = 0x7FFF_FFFF
1211 }
1212}
1213
1214// Index type for `LightGreenDepNodeData`.
1215rustc_index::newtype_index! {
1216 struct LightGreenDepNodeIndex {
1217 MAX = 0x7FFF_FFFF
1218 }
1219}
1220
1221/// Compressed representation of `HybridIndex` enum. Bits unused by the
1222/// contained index types are used to encode which index type it contains.
1223#[derive(Copy, Clone)]
1224struct CompressedHybridIndex(u32);
1225
1226impl CompressedHybridIndex {
1227 const NEW_TAG: u32 = 0b0000_0000_0000_0000_0000_0000_0000_0000;
1228 const RED_TAG: u32 = 0b0100_0000_0000_0000_0000_0000_0000_0000;
1229 const LIGHT_GREEN_TAG: u32 = 0b1000_0000_0000_0000_0000_0000_0000_0000;
1230 const DARK_GREEN_TAG: u32 = 0b1100_0000_0000_0000_0000_0000_0000_0000;
1231
1232 const TAG_MASK: u32 = 0b1100_0000_0000_0000_0000_0000_0000_0000;
1233 const INDEX_MASK: u32 = !Self::TAG_MASK;
1234}
1235
1236impl From<NewDepNodeIndex> for CompressedHybridIndex {
1237 #[inline]
1238 fn from(index: NewDepNodeIndex) -> Self {
1239 CompressedHybridIndex(Self::NEW_TAG | index.as_u32())
1240 }
1241}
1242
1243impl From<RedDepNodeIndex> for CompressedHybridIndex {
1244 #[inline]
1245 fn from(index: RedDepNodeIndex) -> Self {
1246 CompressedHybridIndex(Self::RED_TAG | index.as_u32())
1247 }
1248}
1249
1250impl From<LightGreenDepNodeIndex> for CompressedHybridIndex {
1251 #[inline]
1252 fn from(index: LightGreenDepNodeIndex) -> Self {
1253 CompressedHybridIndex(Self::LIGHT_GREEN_TAG | index.as_u32())
1254 }
1255}
1256
1257impl From<SerializedDepNodeIndex> for CompressedHybridIndex {
1258 #[inline]
1259 fn from(index: SerializedDepNodeIndex) -> Self {
1260 CompressedHybridIndex(Self::DARK_GREEN_TAG | index.as_u32())
1261 }
1262}
1263
1264/// Contains an index into one of several node data collections. Elsewhere, we
1265/// store `CompressedHyridIndex` instead of this to save space, but convert to
1266/// this type during processing to take advantage of the enum match ergonomics.
1267enum HybridIndex {
1268 New(NewDepNodeIndex),
1269 Red(RedDepNodeIndex),
1270 LightGreen(LightGreenDepNodeIndex),
1271 DarkGreen(SerializedDepNodeIndex),
1272}
1273
1274impl From<CompressedHybridIndex> for HybridIndex {
1275 #[inline]
1276 fn from(hybrid_index: CompressedHybridIndex) -> Self {
1277 let index = hybrid_index.0 & CompressedHybridIndex::INDEX_MASK;
1278
1279 match hybrid_index.0 & CompressedHybridIndex::TAG_MASK {
1280 CompressedHybridIndex::NEW_TAG => HybridIndex::New(NewDepNodeIndex::from_u32(index)),
1281 CompressedHybridIndex::RED_TAG => HybridIndex::Red(RedDepNodeIndex::from_u32(index)),
1282 CompressedHybridIndex::LIGHT_GREEN_TAG => {
1283 HybridIndex::LightGreen(LightGreenDepNodeIndex::from_u32(index))
1284 }
1285 CompressedHybridIndex::DARK_GREEN_TAG => {
1286 HybridIndex::DarkGreen(SerializedDepNodeIndex::from_u32(index))
1287 }
1288 _ => unreachable!(),
1289 }
1290 }
1291}
1292
1293// Index type for `DepNodeData`'s edges.
1294rustc_index::newtype_index! {
1295 struct EdgeIndex { .. }
1296}
1297
1298/// Data for nodes in the current graph, divided into different collections
1299/// based on their presence in the previous graph, and if present, their color.
1300/// We divide nodes this way because different types of nodes are able to share
1301/// more or less data with the previous graph.
1302///
1303/// To enable more sharing, we distinguish between two kinds of green nodes.
1304/// Light green nodes are nodes in the previous graph that have been marked
1305/// green because we re-executed their queries and the results were the same as
1306/// in the previous session. Dark green nodes are nodes in the previous graph
1307/// that have been marked green because we were able to mark all of their
1308/// dependencies green.
1309///
1310/// Both light and dark green nodes can share the dep node and fingerprint with
1311/// the previous graph, but for light green nodes, we can't be sure that the
1312/// edges may be shared without comparing them against the previous edges, so we
1313/// store them directly (an approach in which we compare edges with the previous
1314/// edges to see if they can be shared was evaluated, but was not found to be
1315/// very profitable).
1316///
1317/// For dark green nodes, we can share everything with the previous graph, which
1318/// is why the `HybridIndex::DarkGreen` enum variant contains the index of the
1319/// node in the previous graph, and why we don't have a separate collection for
1320/// dark green node data--the collection is the `PreviousDepGraph` itself.
1321///
1322/// (Note that for dark green nodes, the edges in the previous graph
1323/// (`SerializedDepNodeIndex`s) must be converted to edges in the current graph
1324/// (`DepNodeIndex`s). `CurrentDepGraph` contains `prev_index_to_index`, which
1325/// can perform this conversion. It should always be possible, as by definition,
1326/// a dark green node is one whose dependencies from the previous session have
1327/// all been marked green--which means `prev_index_to_index` contains them.)
1328///
1329/// Node data is stored in parallel vectors to eliminate the padding between
1330/// elements that would be needed to satisfy alignment requirements of the
1331/// structure that would contain all of a node's data. We could group tightly
1332/// packing subsets of node data together and use fewer vectors, but for
1333/// consistency's sake, we use separate vectors for each piece of data.
ba9703b0 1334struct DepNodeData<K> {
fc512014
XL
1335 /// Data for nodes not in previous graph.
1336 new: NewDepNodeData<K>,
1337
1338 /// Data for nodes in previous graph that have been marked red.
1339 red: RedDepNodeData,
1340
1341 /// Data for nodes in previous graph that have been marked light green.
1342 light_green: LightGreenDepNodeData,
1343
1344 // Edges for all nodes other than dark-green ones. Edges for each node
1345 // occupy a contiguous region of this collection, which a node can reference
1346 // using two indices. Storing edges this way rather than using an `EdgesVec`
1347 // for each node reduces memory consumption by a not insignificant amount
1348 // when compiling large crates. The downside is that we have to copy into
1349 // this collection the edges from the `EdgesVec`s that are built up during
1350 // query execution. But this is mostly balanced out by the more efficient
1351 // implementation of `DepGraph::serialize` enabled by this representation.
1352 unshared_edges: IndexVec<EdgeIndex, DepNodeIndex>,
1353
1354 /// Mapping from `DepNodeIndex` to an index into a collection above.
1355 /// Indicates which of the above collections contains a node's data.
1356 ///
1357 /// This collection is wasteful in time and space during incr-full builds,
1358 /// because for those, all nodes are new. However, the waste is relatively
1359 /// small, and the maintenance cost of avoiding using this for incr-full
1360 /// builds is somewhat high and prone to bugginess. It does not seem worth
1361 /// it at the time of this writing, but we may want to revisit the idea.
1362 hybrid_indices: IndexVec<DepNodeIndex, CompressedHybridIndex>,
0731742a
XL
1363}
1364
fc512014
XL
1365/// Data for nodes not in previous graph. Since we cannot share any data with
1366/// the previous graph, so we must store all of such a node's data here.
1367struct NewDepNodeData<K> {
1368 nodes: IndexVec<NewDepNodeIndex, DepNode<K>>,
1369 edges: IndexVec<NewDepNodeIndex, Range<EdgeIndex>>,
1370 fingerprints: IndexVec<NewDepNodeIndex, Fingerprint>,
1371}
1372
1373/// Data for nodes in previous graph that have been marked red. We can share the
1374/// dep node with the previous graph, but the edges may be different, and the
1375/// fingerprint is known to be different, so we store the latter two directly.
1376struct RedDepNodeData {
1377 node_indices: IndexVec<RedDepNodeIndex, SerializedDepNodeIndex>,
1378 edges: IndexVec<RedDepNodeIndex, Range<EdgeIndex>>,
1379 fingerprints: IndexVec<RedDepNodeIndex, Fingerprint>,
1380}
1381
1382/// Data for nodes in previous graph that have been marked green because we
1383/// re-executed their queries and the results were the same as in the previous
1384/// session. We can share the dep node and the fingerprint with the previous
1385/// graph, but the edges may be different, so we store them directly.
1386struct LightGreenDepNodeData {
1387 node_indices: IndexVec<LightGreenDepNodeIndex, SerializedDepNodeIndex>,
1388 edges: IndexVec<LightGreenDepNodeIndex, Range<EdgeIndex>>,
1389}
1390
1391/// `CurrentDepGraph` stores the dependency graph for the current session. It
1392/// will be populated as we run queries or tasks. We never remove nodes from the
1393/// graph: they are only added.
e74abb32 1394///
fc512014
XL
1395/// The nodes in it are identified by a `DepNodeIndex`. Internally, this maps to
1396/// a `HybridIndex`, which identifies which collection in the `data` field
1397/// contains a node's data. Which collection is used for a node depends on
1398/// whether the node was present in the `PreviousDepGraph`, and if so, the color
1399/// of the node. Each type of node can share more or less data with the previous
1400/// graph. When possible, we can store just the index of the node in the
1401/// previous graph, rather than duplicating its data in our own collections.
1402/// This is important, because these graph structures are some of the largest in
1403/// the compiler.
e74abb32 1404///
fc512014
XL
1405/// For the same reason, we also avoid storing `DepNode`s more than once as map
1406/// keys. The `new_node_to_index` map only contains nodes not in the previous
1407/// graph, and we map nodes in the previous graph to indices via a two-step
1408/// mapping. `PreviousDepGraph` maps from `DepNode` to `SerializedDepNodeIndex`,
1409/// and the `prev_index_to_index` vector (which is more compact and faster than
1410/// using a map) maps from `SerializedDepNodeIndex` to `DepNodeIndex`.
e74abb32 1411///
fc512014
XL
1412/// This struct uses three locks internally. The `data`, `new_node_to_index`,
1413/// and `prev_index_to_index` fields are locked separately. Operations that take
1414/// a `DepNodeIndex` typically just access the `data` field.
e74abb32 1415///
fc512014
XL
1416/// We only need to manipulate at most two locks simultaneously:
1417/// `new_node_to_index` and `data`, or `prev_index_to_index` and `data`. When
1418/// manipulating both, we acquire `new_node_to_index` or `prev_index_to_index`
1419/// first, and `data` second.
ba9703b0 1420pub(super) struct CurrentDepGraph<K> {
fc512014
XL
1421 data: Lock<DepNodeData<K>>,
1422 new_node_to_index: Sharded<FxHashMap<DepNode<K>, DepNodeIndex>>,
1423 prev_index_to_index: Lock<IndexVec<SerializedDepNodeIndex, Option<DepNodeIndex>>>,
e74abb32
XL
1424
1425 /// Used to trap when a specific edge is added to the graph.
1426 /// This is used for debug purposes and is only active with `debug_assertions`.
0731742a 1427 #[allow(dead_code)]
ea8adc8c
XL
1428 forbidden_edge: Option<EdgeFilter>,
1429
9fa01778
XL
1430 /// Anonymous `DepNode`s are nodes whose IDs we compute from the list of
1431 /// their edges. This has the beneficial side-effect that multiple anonymous
1432 /// nodes can be coalesced into one without changing the semantics of the
1433 /// dependency graph. However, the merging of nodes can lead to a subtle
1434 /// problem during red-green marking: The color of an anonymous node from
1435 /// the current session might "shadow" the color of the node with the same
1436 /// ID from the previous session. In order to side-step this problem, we make
1437 /// sure that anonymous `NodeId`s allocated in different sessions don't overlap.
1438 /// This is implemented by mixing a session-key into the ID fingerprint of
1439 /// each anon node. The session-key is just a random number generated when
1440 /// the `DepGraph` is created.
ea8adc8c 1441 anon_id_seed: Fingerprint,
abe05a73 1442
e74abb32
XL
1443 /// These are simple counters that are for profiling and
1444 /// debugging and only active with `debug_assertions`.
1445 total_read_count: AtomicU64,
1446 total_duplicate_read_count: AtomicU64,
ea8adc8c
XL
1447}
1448
ba9703b0
XL
1449impl<K: DepKind> CurrentDepGraph<K> {
1450 fn new(prev_graph_node_count: usize) -> CurrentDepGraph<K> {
ea8adc8c
XL
1451 use std::time::{SystemTime, UNIX_EPOCH};
1452
1453 let duration = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
dfeec247 1454 let nanos = duration.as_secs() * 1_000_000_000 + duration.subsec_nanos() as u64;
ea8adc8c
XL
1455 let mut stable_hasher = StableHasher::new();
1456 nanos.hash(&mut stable_hasher);
1457
1458 let forbidden_edge = if cfg!(debug_assertions) {
1459 match env::var("RUST_FORBID_DEP_GRAPH_EDGE") {
dfeec247
XL
1460 Ok(s) => match EdgeFilter::new(&s) {
1461 Ok(f) => Some(f),
ba9703b0 1462 Err(err) => panic!("RUST_FORBID_DEP_GRAPH_EDGE invalid: {}", err),
dfeec247 1463 },
ea8adc8c
XL
1464 Err(_) => None,
1465 }
1466 } else {
1467 None
1468 };
1469
0731742a
XL
1470 // Pre-allocate the dep node structures. We over-allocate a little so
1471 // that we hopefully don't have to re-allocate during this compilation
fc512014
XL
1472 // session. The over-allocation for new nodes is 2% plus a small
1473 // constant to account for the fact that in very small crates 2% might
1474 // not be enough. The allocation for red and green node data doesn't
1475 // include a constant, as we don't want to allocate anything for these
1476 // structures during full incremental builds, where they aren't used.
1477 //
1478 // These estimates are based on the distribution of node and edge counts
1479 // seen in rustc-perf benchmarks, adjusted somewhat to account for the
1480 // fact that these benchmarks aren't perfectly representative.
1481 //
1482 // FIXME Use a collection type that doesn't copy node and edge data and
1483 // grow multiplicatively on reallocation. Without such a collection or
1484 // solution having the same effect, there is a performance hazard here
1485 // in both time and space, as growing these collections means copying a
1486 // large amount of data and doubling already large buffer capacities. A
1487 // solution for this will also mean that it's less important to get
1488 // these estimates right.
1489 let new_node_count_estimate = (prev_graph_node_count * 2) / 100 + 200;
1490 let red_node_count_estimate = (prev_graph_node_count * 3) / 100;
1491 let light_green_node_count_estimate = (prev_graph_node_count * 25) / 100;
1492 let total_node_count_estimate = prev_graph_node_count + new_node_count_estimate;
1493
1494 let average_edges_per_node_estimate = 6;
1495 let unshared_edge_count_estimate = average_edges_per_node_estimate
1496 * (new_node_count_estimate + red_node_count_estimate + light_green_node_count_estimate);
1497
1498 // We store a large collection of these in `prev_index_to_index` during
1499 // non-full incremental builds, and want to ensure that the element size
1500 // doesn't inadvertently increase.
1501 static_assert_size!(Option<DepNodeIndex>, 4);
0731742a 1502
ea8adc8c 1503 CurrentDepGraph {
fc512014
XL
1504 data: Lock::new(DepNodeData {
1505 new: NewDepNodeData {
1506 nodes: IndexVec::with_capacity(new_node_count_estimate),
1507 edges: IndexVec::with_capacity(new_node_count_estimate),
1508 fingerprints: IndexVec::with_capacity(new_node_count_estimate),
1509 },
1510 red: RedDepNodeData {
1511 node_indices: IndexVec::with_capacity(red_node_count_estimate),
1512 edges: IndexVec::with_capacity(red_node_count_estimate),
1513 fingerprints: IndexVec::with_capacity(red_node_count_estimate),
1514 },
1515 light_green: LightGreenDepNodeData {
1516 node_indices: IndexVec::with_capacity(light_green_node_count_estimate),
1517 edges: IndexVec::with_capacity(light_green_node_count_estimate),
1518 },
1519 unshared_edges: IndexVec::with_capacity(unshared_edge_count_estimate),
1520 hybrid_indices: IndexVec::with_capacity(total_node_count_estimate),
1521 }),
1522 new_node_to_index: Sharded::new(|| {
dfeec247
XL
1523 FxHashMap::with_capacity_and_hasher(
1524 new_node_count_estimate / sharded::SHARDS,
1525 Default::default(),
1526 )
1527 }),
fc512014 1528 prev_index_to_index: Lock::new(IndexVec::from_elem_n(None, prev_graph_node_count)),
ea8adc8c 1529 anon_id_seed: stable_hasher.finish(),
ea8adc8c 1530 forbidden_edge,
e74abb32
XL
1531 total_read_count: AtomicU64::new(0),
1532 total_duplicate_read_count: AtomicU64::new(0),
ea8adc8c
XL
1533 }
1534 }
1535
fc512014 1536 fn intern_new_node(
e74abb32 1537 &self,
fc512014
XL
1538 prev_graph: &PreviousDepGraph<K>,
1539 dep_node: DepNode<K>,
1540 edges: EdgesVec,
dfeec247 1541 fingerprint: Fingerprint,
0731742a 1542 ) -> DepNodeIndex {
fc512014
XL
1543 debug_assert!(
1544 prev_graph.node_to_index_opt(&dep_node).is_none(),
1545 "node in previous graph should be interned using one \
1546 of `intern_red_node`, `intern_light_green_node`, etc."
1547 );
ea8adc8c 1548
fc512014
XL
1549 match self.new_node_to_index.get_shard_by_value(&dep_node).lock().entry(dep_node) {
1550 Entry::Occupied(entry) => *entry.get(),
1551 Entry::Vacant(entry) => {
1552 let data = &mut *self.data.lock();
1553 let new_index = data.new.nodes.push(dep_node);
1554 add_edges(&mut data.unshared_edges, &mut data.new.edges, edges);
1555 data.new.fingerprints.push(fingerprint);
1556 let dep_node_index = data.hybrid_indices.push(new_index.into());
1557 entry.insert(dep_node_index);
1558 dep_node_index
1559 }
1560 }
ea8adc8c
XL
1561 }
1562
fc512014 1563 fn intern_red_node(
e74abb32 1564 &self,
fc512014
XL
1565 prev_graph: &PreviousDepGraph<K>,
1566 prev_index: SerializedDepNodeIndex,
ba9703b0 1567 edges: EdgesVec,
dfeec247 1568 fingerprint: Fingerprint,
0731742a 1569 ) -> DepNodeIndex {
fc512014
XL
1570 self.debug_assert_not_in_new_nodes(prev_graph, prev_index);
1571
1572 let mut prev_index_to_index = self.prev_index_to_index.lock();
1573
1574 match prev_index_to_index[prev_index] {
1575 Some(dep_node_index) => dep_node_index,
1576 None => {
1577 let data = &mut *self.data.lock();
1578 let red_index = data.red.node_indices.push(prev_index);
1579 add_edges(&mut data.unshared_edges, &mut data.red.edges, edges);
1580 data.red.fingerprints.push(fingerprint);
1581 let dep_node_index = data.hybrid_indices.push(red_index.into());
1582 prev_index_to_index[prev_index] = Some(dep_node_index);
1583 dep_node_index
1584 }
1585 }
0731742a
XL
1586 }
1587
fc512014 1588 fn intern_light_green_node(
e74abb32 1589 &self,
fc512014
XL
1590 prev_graph: &PreviousDepGraph<K>,
1591 prev_index: SerializedDepNodeIndex,
ba9703b0 1592 edges: EdgesVec,
416331ca 1593 ) -> DepNodeIndex {
fc512014
XL
1594 self.debug_assert_not_in_new_nodes(prev_graph, prev_index);
1595
1596 let mut prev_index_to_index = self.prev_index_to_index.lock();
1597
1598 match prev_index_to_index[prev_index] {
1599 Some(dep_node_index) => dep_node_index,
1600 None => {
1601 let data = &mut *self.data.lock();
1602 let light_green_index = data.light_green.node_indices.push(prev_index);
1603 add_edges(&mut data.unshared_edges, &mut data.light_green.edges, edges);
1604 let dep_node_index = data.hybrid_indices.push(light_green_index.into());
1605 prev_index_to_index[prev_index] = Some(dep_node_index);
416331ca 1606 dep_node_index
0731742a 1607 }
abe05a73
XL
1608 }
1609 }
1610
fc512014
XL
1611 fn intern_dark_green_node(
1612 &self,
1613 prev_graph: &PreviousDepGraph<K>,
1614 prev_index: SerializedDepNodeIndex,
1615 ) -> DepNodeIndex {
1616 self.debug_assert_not_in_new_nodes(prev_graph, prev_index);
ba9703b0 1617
fc512014 1618 let mut prev_index_to_index = self.prev_index_to_index.lock();
0731742a 1619
fc512014
XL
1620 match prev_index_to_index[prev_index] {
1621 Some(dep_node_index) => dep_node_index,
1622 None => {
1623 let mut data = self.data.lock();
1624 let dep_node_index = data.hybrid_indices.push(prev_index.into());
1625 prev_index_to_index[prev_index] = Some(dep_node_index);
1626 dep_node_index
ea8adc8c 1627 }
fc512014
XL
1628 }
1629 }
1630
1631 #[inline]
1632 fn debug_assert_not_in_new_nodes(
1633 &self,
1634 prev_graph: &PreviousDepGraph<K>,
1635 prev_index: SerializedDepNodeIndex,
1636 ) {
1637 let node = &prev_graph.index_to_node(prev_index);
1638 debug_assert!(
1639 !self.new_node_to_index.get_shard_by_value(node).lock().contains_key(node),
1640 "node from previous graph present in new node collection"
1641 );
ea8adc8c 1642 }
83c7162d
XL
1643}
1644
fc512014
XL
1645#[inline]
1646fn add_edges<I: Idx>(
1647 edges: &mut IndexVec<EdgeIndex, DepNodeIndex>,
1648 edge_indices: &mut IndexVec<I, Range<EdgeIndex>>,
1649 new_edges: EdgesVec,
1650) {
1651 let start = edges.next_index();
1652 edges.extend(new_edges);
1653 let end = edges.next_index();
1654 edge_indices.push(start..end);
1655}
1656
ba9703b0
XL
1657/// The capacity of the `reads` field `SmallVec`
1658const TASK_DEPS_READS_CAP: usize = 8;
1659type EdgesVec = SmallVec<[DepNodeIndex; TASK_DEPS_READS_CAP]>;
1660
1661pub struct TaskDeps<K> {
0731742a 1662 #[cfg(debug_assertions)]
ba9703b0
XL
1663 node: Option<DepNode<K>>,
1664 reads: EdgesVec,
83c7162d 1665 read_set: FxHashSet<DepNodeIndex>,
ba9703b0
XL
1666 phantom_data: PhantomData<DepNode<K>>,
1667}
1668
1669impl<K> Default for TaskDeps<K> {
1670 fn default() -> Self {
1671 Self {
1672 #[cfg(debug_assertions)]
1673 node: None,
1674 reads: EdgesVec::new(),
1675 read_set: FxHashSet::default(),
1676 phantom_data: PhantomData,
1677 }
1678 }
83c7162d
XL
1679}
1680
0531ce1d
XL
1681// A data structure that stores Option<DepNodeColor> values as a contiguous
1682// array, using one u32 per entry.
1683struct DepNodeColorMap {
9fa01778 1684 values: IndexVec<SerializedDepNodeIndex, AtomicU32>,
0531ce1d
XL
1685}
1686
1687const COMPRESSED_NONE: u32 = 0;
1688const COMPRESSED_RED: u32 = 1;
1689const COMPRESSED_FIRST_GREEN: u32 = 2;
1690
1691impl DepNodeColorMap {
1692 fn new(size: usize) -> DepNodeColorMap {
dfeec247 1693 DepNodeColorMap { values: (0..size).map(|_| AtomicU32::new(COMPRESSED_NONE)).collect() }
0531ce1d
XL
1694 }
1695
ba9703b0 1696 #[inline]
0531ce1d 1697 fn get(&self, index: SerializedDepNodeIndex) -> Option<DepNodeColor> {
9fa01778 1698 match self.values[index].load(Ordering::Acquire) {
0531ce1d
XL
1699 COMPRESSED_NONE => None,
1700 COMPRESSED_RED => Some(DepNodeColor::Red),
dfeec247
XL
1701 value => {
1702 Some(DepNodeColor::Green(DepNodeIndex::from_u32(value - COMPRESSED_FIRST_GREEN)))
1703 }
0531ce1d
XL
1704 }
1705 }
1706
9fa01778 1707 fn insert(&self, index: SerializedDepNodeIndex, color: DepNodeColor) {
dfeec247
XL
1708 self.values[index].store(
1709 match color {
1710 DepNodeColor::Red => COMPRESSED_RED,
1711 DepNodeColor::Green(index) => index.as_u32() + COMPRESSED_FIRST_GREEN,
1712 },
1713 Ordering::Release,
1714 )
0531ce1d
XL
1715 }
1716}