]> git.proxmox.com Git - rustc.git/blame - src/librustc/dep_graph/graph.rs
New upstream version 1.41.1+dfsg1
[rustc.git] / src / librustc / dep_graph / graph.rs
CommitLineData
e1599b0c 1use errors::Diagnostic;
0531ce1d 2use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
ea8adc8c 3use rustc_data_structures::fx::{FxHashMap, FxHashSet};
e74abb32 4use rustc_index::vec::{Idx, IndexVec};
b7449926 5use smallvec::SmallVec;
e74abb32
XL
6use rustc_data_structures::sync::{Lrc, Lock, AtomicU32, AtomicU64, Ordering};
7use rustc_data_structures::sharded::{self, Sharded};
8use std::sync::atomic::Ordering::SeqCst;
ea8adc8c
XL
9use std::env;
10use std::hash::Hash;
0731742a 11use std::collections::hash_map::Entry;
416331ca 12use std::mem;
9fa01778 13use crate::ty::{self, TyCtxt};
9fa01778 14use parking_lot::{Mutex, Condvar};
54a0048b 15
9fa01778 16use crate::ich::{StableHashingContext, StableHashingContextProvider, Fingerprint};
ea8adc8c
XL
17
18use super::debug::EdgeFilter;
041b39d2 19use super::dep_node::{DepNode, DepKind, WorkProductId};
54a0048b 20use super::query::DepGraphQuery;
8bb4bdeb 21use super::safe::DepGraphSafe;
ea8adc8c
XL
22use super::serialized::{SerializedDepGraph, SerializedDepNodeIndex};
23use super::prev::PreviousDepGraph;
54a0048b
SL
24
25#[derive(Clone)]
26pub struct DepGraph {
0531ce1d 27 data: Option<Lrc<DepGraphData>>,
ea8adc8c
XL
28}
29
e74abb32 30rustc_index::newtype_index! {
b7449926
XL
31 pub struct DepNodeIndex { .. }
32}
ea8adc8c
XL
33
34impl DepNodeIndex {
e74abb32 35 pub const INVALID: DepNodeIndex = DepNodeIndex::MAX;
ea8adc8c
XL
36}
37
e74abb32 38#[derive(PartialEq)]
ea8adc8c
XL
39pub enum DepNodeColor {
40 Red,
41 Green(DepNodeIndex)
42}
43
44impl DepNodeColor {
45 pub fn is_green(self) -> bool {
46 match self {
47 DepNodeColor::Red => false,
48 DepNodeColor::Green(_) => true,
49 }
50 }
5bcae85e
SL
51}
52
53struct DepGraphData {
ea8adc8c
XL
54 /// The new encoding of the dependency graph, optimized for red/green
55 /// tracking. The `current` field is the dependency graph of only the
56 /// current compilation session: We don't merge the previous dep-graph into
57 /// current one anymore.
e74abb32 58 current: CurrentDepGraph,
ea8adc8c
XL
59
60 /// The dep-graph from the previous compilation session. It contains all
61 /// nodes and edges as well as all fingerprints of nodes that have them.
62 previous: PreviousDepGraph,
63
9fa01778 64 colors: DepNodeColorMap,
5bcae85e 65
416331ca
XL
66 /// A set of loaded diagnostics that is in the progress of being emitted.
67 emitting_diagnostics: Mutex<FxHashSet<DepNodeIndex>>,
9fa01778
XL
68
69 /// Used to wait for diagnostics to be emitted.
416331ca 70 emitting_diagnostics_cond_var: Condvar,
9fa01778
XL
71
72 /// When we load, there may be `.o` files, cached MIR, or other such
5bcae85e
SL
73 /// things available to us. If we find that they are not dirty, we
74 /// load the path to the file storing those work-products here into
75 /// this map. We can later look for and extract that data.
94b46f34 76 previous_work_products: FxHashMap<WorkProductId, WorkProduct>,
041b39d2 77
83c7162d 78 dep_node_debug: Lock<FxHashMap<DepNode, String>>,
54a0048b
SL
79}
80
9fa01778
XL
81pub fn hash_result<R>(hcx: &mut StableHashingContext<'_>, result: &R) -> Option<Fingerprint>
82where
83 R: for<'a> HashStable<StableHashingContext<'a>>,
84{
85 let mut stable_hasher = StableHasher::new();
86 result.hash_stable(hcx, &mut stable_hasher);
87
88 Some(stable_hasher.finish())
89}
90
54a0048b 91impl DepGraph {
94b46f34
XL
92 pub fn new(prev_graph: PreviousDepGraph,
93 prev_work_products: FxHashMap<WorkProductId, WorkProduct>) -> DepGraph {
0531ce1d
XL
94 let prev_graph_node_count = prev_graph.node_count();
95
54a0048b 96 DepGraph {
0531ce1d 97 data: Some(Lrc::new(DepGraphData {
94b46f34 98 previous_work_products: prev_work_products,
a1dfa0c6 99 dep_node_debug: Default::default(),
e74abb32 100 current: CurrentDepGraph::new(prev_graph_node_count),
416331ca
XL
101 emitting_diagnostics: Default::default(),
102 emitting_diagnostics_cond_var: Condvar::new(),
ea8adc8c 103 previous: prev_graph,
9fa01778 104 colors: DepNodeColorMap::new(prev_graph_node_count),
ea8adc8c 105 })),
ea8adc8c
XL
106 }
107 }
108
109 pub fn new_disabled() -> DepGraph {
110 DepGraph {
111 data: None,
54a0048b
SL
112 }
113 }
114
9fa01778 115 /// Returns `true` if we are actually building the full dep-graph, and `false` otherwise.
32a655c1
SL
116 #[inline]
117 pub fn is_fully_enabled(&self) -> bool {
041b39d2 118 self.data.is_some()
32a655c1
SL
119 }
120
041b39d2 121 pub fn query(&self) -> DepGraphQuery {
e74abb32
XL
122 let data = self.data.as_ref().unwrap().current.data.lock();
123 let nodes: Vec<_> = data.iter().map(|n| n.node).collect();
ea8adc8c 124 let mut edges = Vec::new();
e74abb32 125 for (from, edge_targets) in data.iter().map(|d| (d.node, &d.edges)) {
94b46f34 126 for &edge_target in edge_targets.iter() {
e74abb32 127 let to = data[edge_target].node;
ea8adc8c
XL
128 edges.push((from, to));
129 }
130 }
54a0048b 131
ea8adc8c 132 DepGraphQuery::new(&nodes[..], &edges[..])
54a0048b
SL
133 }
134
2c00a5a8
XL
135 pub fn assert_ignored(&self)
136 {
83c7162d
XL
137 if let Some(..) = self.data {
138 ty::tls::with_context_opt(|icx| {
139 let icx = if let Some(icx) = icx { icx } else { return };
0731742a 140 assert!(icx.task_deps.is_none(), "expected no task dependency tracking");
83c7162d 141 })
2c00a5a8 142 }
54a0048b
SL
143 }
144
145 pub fn with_ignore<OP,R>(&self, op: OP) -> R
146 where OP: FnOnce() -> R
147 {
83c7162d
XL
148 ty::tls::with_context(|icx| {
149 let icx = ty::tls::ImplicitCtxt {
0731742a 150 task_deps: None,
83c7162d
XL
151 ..icx.clone()
152 };
153
154 ty::tls::enter_context(&icx, |_| {
155 op()
156 })
157 })
54a0048b
SL
158 }
159
8bb4bdeb
XL
160 /// Starts a new dep-graph task. Dep-graph tasks are specified
161 /// using a free function (`task`) and **not** a closure -- this
162 /// is intentional because we want to exercise tight control over
163 /// what state they have access to. In particular, we want to
164 /// prevent implicit 'leaks' of tracked state into the task (which
165 /// could then be read without generating correct edges in the
0531ce1d 166 /// dep-graph -- see the [rustc guide] for more details on
ff7c6d11 167 /// the dep-graph). To this end, the task function gets exactly two
8bb4bdeb
XL
168 /// pieces of state: the context `cx` and an argument `arg`. Both
169 /// of these bits of state must be of some type that implements
170 /// `DepGraphSafe` and hence does not leak.
171 ///
172 /// The choice of two arguments is not fundamental. One argument
173 /// would work just as well, since multiple values can be
174 /// collected using tuples. However, using two arguments works out
175 /// to be quite convenient, since it is common to need a context
176 /// (`cx`) and some argument (e.g., a `DefId` identifying what
177 /// item to process).
178 ///
179 /// For cases where you need some other number of arguments:
180 ///
181 /// - If you only need one argument, just use `()` for the `arg`
182 /// parameter.
183 /// - If you need 3+ arguments, use a tuple for the
184 /// `arg` parameter.
185 ///
a1dfa0c6 186 /// [rustc guide]: https://rust-lang.github.io/rustc-guide/incremental-compilation.html
9fa01778
XL
187 pub fn with_task<'a, C, A, R>(
188 &self,
189 key: DepNode,
190 cx: C,
191 arg: A,
192 task: fn(C, A) -> R,
193 hash_result: impl FnOnce(&mut StableHashingContext<'_>, &R) -> Option<Fingerprint>,
194 ) -> (R, DepNodeIndex)
195 where
196 C: DepGraphSafe + StableHashingContextProvider<'a>,
abe05a73 197 {
83c7162d 198 self.with_task_impl(key, cx, arg, false, task,
0731742a
XL
199 |_key| Some(TaskDeps {
200 #[cfg(debug_assertions)]
201 node: Some(_key),
94b46f34 202 reads: SmallVec::new(),
0bf4aa26 203 read_set: Default::default(),
0731742a
XL
204 }),
205 |data, key, fingerprint, task| {
e74abb32 206 data.complete_task(key, task.unwrap(), fingerprint)
9fa01778
XL
207 },
208 hash_result)
abe05a73
XL
209 }
210
83c7162d 211 /// Creates a new dep-graph input with value `input`
9fa01778 212 pub fn input_task<'a, C, R>(&self,
83c7162d
XL
213 key: DepNode,
214 cx: C,
215 input: R)
216 -> (R, DepNodeIndex)
9fa01778
XL
217 where C: DepGraphSafe + StableHashingContextProvider<'a>,
218 R: for<'b> HashStable<StableHashingContext<'b>>,
83c7162d
XL
219 {
220 fn identity_fn<C, A>(_: C, arg: A) -> A {
221 arg
222 }
223
224 self.with_task_impl(key, cx, input, true, identity_fn,
0731742a
XL
225 |_| None,
226 |data, key, fingerprint, _| {
e74abb32 227 data.alloc_node(key, SmallVec::new(), fingerprint)
9fa01778
XL
228 },
229 hash_result::<R>)
83c7162d
XL
230 }
231
9fa01778 232 fn with_task_impl<'a, C, A, R>(
83c7162d
XL
233 &self,
234 key: DepNode,
235 cx: C,
236 arg: A,
237 no_tcx: bool,
238 task: fn(C, A) -> R,
0731742a 239 create_task: fn(DepNode) -> Option<TaskDeps>,
e74abb32 240 finish_task_and_alloc_depnode: fn(&CurrentDepGraph,
83c7162d 241 DepNode,
0731742a 242 Fingerprint,
9fa01778
XL
243 Option<TaskDeps>) -> DepNodeIndex,
244 hash_result: impl FnOnce(&mut StableHashingContext<'_>, &R) -> Option<Fingerprint>,
83c7162d
XL
245 ) -> (R, DepNodeIndex)
246 where
9fa01778 247 C: DepGraphSafe + StableHashingContextProvider<'a>,
54a0048b 248 {
041b39d2 249 if let Some(ref data) = self.data {
0731742a 250 let task_deps = create_task(key).map(|deps| Lock::new(deps));
ea8adc8c
XL
251
252 // In incremental mode, hash the result of the task. We don't
253 // do anything with the hash yet, but we are computing it
254 // anyway so that
255 // - we make sure that the infrastructure works and
256 // - we can get an idea of the runtime cost.
0531ce1d
XL
257 let mut hcx = cx.get_stable_hashing_context();
258
83c7162d
XL
259 let result = if no_tcx {
260 task(cx, arg)
261 } else {
262 ty::tls::with_context(|icx| {
263 let icx = ty::tls::ImplicitCtxt {
0731742a 264 task_deps: task_deps.as_ref(),
83c7162d
XL
265 ..icx.clone()
266 };
267
268 ty::tls::enter_context(&icx, |_| {
269 task(cx, arg)
270 })
271 })
272 };
0531ce1d 273
9fa01778 274 let current_fingerprint = hash_result(&mut hcx, &result);
ea8adc8c 275
0731742a
XL
276 let dep_node_index = finish_task_and_alloc_depnode(
277 &data.current,
278 key,
9fa01778 279 current_fingerprint.unwrap_or(Fingerprint::ZERO),
0731742a
XL
280 task_deps.map(|lock| lock.into_inner()),
281 );
ea8adc8c 282
9fa01778
XL
283 let print_status = cfg!(debug_assertions) && hcx.sess().opts.debugging_opts.dep_tasks;
284
ea8adc8c 285 // Determine the color of the new DepNode.
0531ce1d
XL
286 if let Some(prev_index) = data.previous.node_to_index_opt(&key) {
287 let prev_fingerprint = data.previous.fingerprint_by_index(prev_index);
ea8adc8c 288
9fa01778
XL
289 let color = if let Some(current_fingerprint) = current_fingerprint {
290 if current_fingerprint == prev_fingerprint {
291 if print_status {
292 eprintln!("[task::green] {:?}", key);
293 }
294 DepNodeColor::Green(dep_node_index)
295 } else {
296 if print_status {
297 eprintln!("[task::red] {:?}", key);
298 }
299 DepNodeColor::Red
300 }
ea8adc8c 301 } else {
9fa01778
XL
302 if print_status {
303 eprintln!("[task::unknown] {:?}", key);
304 }
305 // Mark the node as Red if we can't hash the result
ea8adc8c
XL
306 DepNodeColor::Red
307 };
308
9fa01778
XL
309 debug_assert!(data.colors.get(prev_index).is_none(),
310 "DepGraph::with_task() - Duplicate DepNodeColor \
311 insertion for {:?}", key);
0531ce1d 312
9fa01778
XL
313 data.colors.insert(prev_index, color);
314 } else {
315 if print_status {
316 eprintln!("[task::new] {:?}", key);
317 }
ea8adc8c
XL
318 }
319
041b39d2
XL
320 (result, dep_node_index)
321 } else {
0731742a 322 (task(cx, arg), DepNodeIndex::INVALID)
041b39d2 323 }
54a0048b
SL
324 }
325
9fa01778
XL
326 /// Executes something within an "anonymous" task, that is, a task the
327 /// `DepNode` of which is determined by the list of inputs it read from.
041b39d2
XL
328 pub fn with_anon_task<OP,R>(&self, dep_kind: DepKind, op: OP) -> (R, DepNodeIndex)
329 where OP: FnOnce() -> R
330 {
331 if let Some(ref data) = self.data {
0731742a
XL
332 let (result, task_deps) = ty::tls::with_context(|icx| {
333 let task_deps = Lock::new(TaskDeps {
334 #[cfg(debug_assertions)]
335 node: None,
94b46f34 336 reads: SmallVec::new(),
0bf4aa26 337 read_set: Default::default(),
0731742a 338 });
83c7162d
XL
339
340 let r = {
341 let icx = ty::tls::ImplicitCtxt {
0731742a 342 task_deps: Some(&task_deps),
83c7162d
XL
343 ..icx.clone()
344 };
345
346 ty::tls::enter_context(&icx, |_| {
347 op()
348 })
349 };
350
0731742a 351 (r, task_deps.into_inner())
83c7162d 352 });
ea8adc8c 353 let dep_node_index = data.current
0731742a 354 .complete_anon_task(dep_kind, task_deps);
ea8adc8c 355 (result, dep_node_index)
041b39d2
XL
356 } else {
357 (op(), DepNodeIndex::INVALID)
358 }
359 }
360
9fa01778
XL
361 /// Executes something within an "eval-always" task which is a task
362 /// that runs whenever anything changes.
363 pub fn with_eval_always_task<'a, C, A, R>(
364 &self,
365 key: DepNode,
366 cx: C,
367 arg: A,
368 task: fn(C, A) -> R,
369 hash_result: impl FnOnce(&mut StableHashingContext<'_>, &R) -> Option<Fingerprint>,
370 ) -> (R, DepNodeIndex)
371 where
372 C: DepGraphSafe + StableHashingContextProvider<'a>,
abe05a73 373 {
83c7162d 374 self.with_task_impl(key, cx, arg, false, task,
0731742a
XL
375 |_| None,
376 |data, key, fingerprint, _| {
e74abb32 377 data.alloc_node(key, smallvec![], fingerprint)
9fa01778
XL
378 },
379 hash_result)
abe05a73
XL
380 }
381
041b39d2
XL
382 #[inline]
383 pub fn read(&self, v: DepNode) {
384 if let Some(ref data) = self.data {
e74abb32
XL
385 let map = data.current.node_to_node_index.get_shard_by_value(&v).lock();
386 if let Some(dep_node_index) = map.get(&v).copied() {
387 std::mem::drop(map);
0731742a 388 data.read_index(dep_node_index);
ea8adc8c
XL
389 } else {
390 bug!("DepKind {:?} should be pre-allocated but isn't.", v.kind)
391 }
041b39d2
XL
392 }
393 }
394
395 #[inline]
ea8adc8c 396 pub fn read_index(&self, dep_node_index: DepNodeIndex) {
041b39d2 397 if let Some(ref data) = self.data {
0731742a 398 data.read_index(dep_node_index);
c30ab7b3 399 }
54a0048b
SL
400 }
401
abe05a73 402 #[inline]
ff7c6d11
XL
403 pub fn dep_node_index_of(&self, dep_node: &DepNode) -> DepNodeIndex {
404 self.data
405 .as_ref()
406 .unwrap()
407 .current
ff7c6d11 408 .node_to_node_index
e74abb32
XL
409 .get_shard_by_value(dep_node)
410 .lock()
ff7c6d11
XL
411 .get(dep_node)
412 .cloned()
413 .unwrap()
414 }
415
0531ce1d
XL
416 #[inline]
417 pub fn dep_node_exists(&self, dep_node: &DepNode) -> bool {
418 if let Some(ref data) = self.data {
e74abb32
XL
419 data.current
420 .node_to_node_index
421 .get_shard_by_value(&dep_node)
422 .lock()
423 .contains_key(dep_node)
0531ce1d
XL
424 } else {
425 false
426 }
427 }
428
ff7c6d11
XL
429 #[inline]
430 pub fn fingerprint_of(&self, dep_node_index: DepNodeIndex) -> Fingerprint {
e74abb32
XL
431 let data = self.data.as_ref().expect("dep graph enabled").current.data.lock();
432 data[dep_node_index].fingerprint
041b39d2
XL
433 }
434
ea8adc8c
XL
435 pub fn prev_fingerprint_of(&self, dep_node: &DepNode) -> Option<Fingerprint> {
436 self.data.as_ref().unwrap().previous.fingerprint_of(dep_node)
3b2f2976
XL
437 }
438
abe05a73
XL
439 #[inline]
440 pub fn prev_dep_node_index_of(&self, dep_node: &DepNode) -> SerializedDepNodeIndex {
441 self.data.as_ref().unwrap().previous.node_to_index(dep_node)
442 }
443
9fa01778 444 /// Checks whether a previous work product exists for `v` and, if
5bcae85e 445 /// so, return the path that leads to it. Used to skip doing work.
041b39d2
XL
446 pub fn previous_work_product(&self, v: &WorkProductId) -> Option<WorkProduct> {
447 self.data
448 .as_ref()
449 .and_then(|data| {
94b46f34 450 data.previous_work_products.get(v).cloned()
041b39d2 451 })
5bcae85e
SL
452 }
453
32a655c1
SL
454 /// Access the map of work-products created during the cached run. Only
455 /// used during saving of the dep-graph.
94b46f34
XL
456 pub fn previous_work_products(&self) -> &FxHashMap<WorkProductId, WorkProduct> {
457 &self.data.as_ref().unwrap().previous_work_products
041b39d2
XL
458 }
459
460 #[inline(always)]
461 pub fn register_dep_node_debug_str<F>(&self,
462 dep_node: DepNode,
463 debug_str_gen: F)
464 where F: FnOnce() -> String
465 {
ea8adc8c 466 let dep_node_debug = &self.data.as_ref().unwrap().dep_node_debug;
041b39d2 467
ea8adc8c
XL
468 if dep_node_debug.borrow().contains_key(&dep_node) {
469 return
470 }
471 let debug_str = debug_str_gen();
472 dep_node_debug.borrow_mut().insert(dep_node, debug_str);
041b39d2
XL
473 }
474
475 pub(super) fn dep_node_debug_str(&self, dep_node: DepNode) -> Option<String> {
8faf50e0
XL
476 self.data
477 .as_ref()?
478 .dep_node_debug
479 .borrow()
480 .get(&dep_node)
481 .cloned()
32a655c1 482 }
ea8adc8c 483
0731742a
XL
484 pub fn edge_deduplication_data(&self) -> Option<(u64, u64)> {
485 if cfg!(debug_assertions) {
e74abb32 486 let current_dep_graph = &self.data.as_ref().unwrap().current;
abe05a73 487
e74abb32
XL
488 Some((current_dep_graph.total_read_count.load(SeqCst),
489 current_dep_graph.total_duplicate_read_count.load(SeqCst)))
0731742a
XL
490 } else {
491 None
492 }
abe05a73
XL
493 }
494
ea8adc8c 495 pub fn serialize(&self) -> SerializedDepGraph {
e74abb32 496 let data = self.data.as_ref().unwrap().current.data.lock();
ea8adc8c 497
0731742a 498 let fingerprints: IndexVec<SerializedDepNodeIndex, _> =
e74abb32 499 data.iter().map(|d| d.fingerprint).collect();
0731742a 500 let nodes: IndexVec<SerializedDepNodeIndex, _> =
e74abb32 501 data.iter().map(|d| d.node).collect();
ea8adc8c 502
e74abb32 503 let total_edge_count: usize = data.iter().map(|d| d.edges.len()).sum();
ea8adc8c
XL
504
505 let mut edge_list_indices = IndexVec::with_capacity(nodes.len());
506 let mut edge_list_data = Vec::with_capacity(total_edge_count);
507
e74abb32 508 for (current_dep_node_index, edges) in data.iter_enumerated().map(|(i, d)| (i, &d.edges)) {
ea8adc8c
XL
509 let start = edge_list_data.len() as u32;
510 // This should really just be a memcpy :/
511 edge_list_data.extend(edges.iter().map(|i| SerializedDepNodeIndex::new(i.index())));
512 let end = edge_list_data.len() as u32;
513
514 debug_assert_eq!(current_dep_node_index.index(), edge_list_indices.len());
515 edge_list_indices.push((start, end));
516 }
517
518 debug_assert!(edge_list_data.len() <= ::std::u32::MAX as usize);
519 debug_assert_eq!(edge_list_data.len(), total_edge_count);
520
521 SerializedDepGraph {
522 nodes,
0531ce1d 523 fingerprints,
ea8adc8c
XL
524 edge_list_indices,
525 edge_list_data,
526 }
527 }
528
529 pub fn node_color(&self, dep_node: &DepNode) -> Option<DepNodeColor> {
0531ce1d
XL
530 if let Some(ref data) = self.data {
531 if let Some(prev_index) = data.previous.node_to_index_opt(dep_node) {
9fa01778 532 return data.colors.get(prev_index)
0531ce1d
XL
533 } else {
534 // This is a node that did not exist in the previous compilation
535 // session, so we consider it to be red.
536 return Some(DepNodeColor::Red)
537 }
538 }
539
540 None
ea8adc8c
XL
541 }
542
9fa01778
XL
543 /// Try to read a node index for the node dep_node.
544 /// A node will have an index, when it's already been marked green, or when we can mark it
545 /// green. This function will mark the current task as a reader of the specified node, when
546 /// a node index can be found for that node.
547 pub fn try_mark_green_and_read(
548 &self,
dc9dc135
XL
549 tcx: TyCtxt<'_>,
550 dep_node: &DepNode,
9fa01778
XL
551 ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
552 self.try_mark_green(tcx, dep_node).map(|(prev_index, dep_node_index)| {
553 debug_assert!(self.is_green(&dep_node));
554 self.read_index(dep_node_index);
555 (prev_index, dep_node_index)
556 })
557 }
ea8adc8c 558
9fa01778
XL
559 pub fn try_mark_green(
560 &self,
dc9dc135
XL
561 tcx: TyCtxt<'_>,
562 dep_node: &DepNode,
9fa01778 563 ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
532ac7d7 564 debug_assert!(!dep_node.kind.is_eval_always());
9fa01778
XL
565
566 // Return None if the dep graph is disabled
567 let data = self.data.as_ref()?;
568
569 // Return None if the dep node didn't exist in the previous session
570 let prev_index = data.previous.node_to_index_opt(dep_node)?;
ea8adc8c 571
9fa01778
XL
572 match data.colors.get(prev_index) {
573 Some(DepNodeColor::Green(dep_node_index)) => Some((prev_index, dep_node_index)),
574 Some(DepNodeColor::Red) => None,
575 None => {
ea8adc8c
XL
576 // This DepNode and the corresponding query invocation existed
577 // in the previous compilation session too, so we can try to
578 // mark it as green by recursively marking all of its
579 // dependencies green.
9fa01778 580 self.try_mark_previous_green(
e74abb32 581 tcx,
9fa01778
XL
582 data,
583 prev_index,
584 &dep_node
585 ).map(|dep_node_index| {
586 (prev_index, dep_node_index)
587 })
ea8adc8c 588 }
9fa01778
XL
589 }
590 }
591
592 /// Try to mark a dep-node which existed in the previous compilation session as green.
593 fn try_mark_previous_green<'tcx>(
594 &self,
dc9dc135 595 tcx: TyCtxt<'tcx>,
9fa01778
XL
596 data: &DepGraphData,
597 prev_dep_node_index: SerializedDepNodeIndex,
dc9dc135 598 dep_node: &DepNode,
9fa01778
XL
599 ) -> Option<DepNodeIndex> {
600 debug!("try_mark_previous_green({:?}) - BEGIN", dep_node);
601
602 #[cfg(not(parallel_compiler))]
603 {
e74abb32
XL
604 debug_assert!(!data.current
605 .node_to_node_index
606 .get_shard_by_value(dep_node)
607 .lock()
608 .contains_key(dep_node));
9fa01778
XL
609 debug_assert!(data.colors.get(prev_dep_node_index).is_none());
610 }
ea8adc8c 611
532ac7d7
XL
612 // We never try to mark eval_always nodes as green
613 debug_assert!(!dep_node.kind.is_eval_always());
9fa01778
XL
614
615 debug_assert_eq!(data.previous.index_to_node(prev_dep_node_index), *dep_node);
616
617 let prev_deps = data.previous.edge_targets_from(prev_dep_node_index);
0531ce1d 618
94b46f34 619 let mut current_deps = SmallVec::new();
ea8adc8c
XL
620
621 for &dep_dep_node_index in prev_deps {
9fa01778 622 let dep_dep_node_color = data.colors.get(dep_dep_node_index);
ea8adc8c 623
ea8adc8c
XL
624 match dep_dep_node_color {
625 Some(DepNodeColor::Green(node_index)) => {
626 // This dependency has been marked as green before, we are
627 // still fine and can continue with checking the other
628 // dependencies.
9fa01778 629 debug!("try_mark_previous_green({:?}) --- found dependency {:?} to \
0531ce1d
XL
630 be immediately green",
631 dep_node,
632 data.previous.index_to_node(dep_dep_node_index));
ea8adc8c
XL
633 current_deps.push(node_index);
634 }
635 Some(DepNodeColor::Red) => {
636 // We found a dependency the value of which has changed
637 // compared to the previous compilation session. We cannot
638 // mark the DepNode as green and also don't need to bother
639 // with checking any of the other dependencies.
9fa01778 640 debug!("try_mark_previous_green({:?}) - END - dependency {:?} was \
0531ce1d
XL
641 immediately red",
642 dep_node,
643 data.previous.index_to_node(dep_dep_node_index));
ea8adc8c
XL
644 return None
645 }
646 None => {
0531ce1d
XL
647 let dep_dep_node = &data.previous.index_to_node(dep_dep_node_index);
648
abe05a73 649 // We don't know the state of this dependency. If it isn't
532ac7d7
XL
650 // an eval_always node, let's try to mark it green recursively.
651 if !dep_dep_node.kind.is_eval_always() {
9fa01778 652 debug!("try_mark_previous_green({:?}) --- state of dependency {:?} \
abe05a73
XL
653 is unknown, trying to mark it green", dep_node,
654 dep_dep_node);
655
9fa01778
XL
656 let node_index = self.try_mark_previous_green(
657 tcx,
658 data,
659 dep_dep_node_index,
660 dep_dep_node
661 );
662 if let Some(node_index) = node_index {
663 debug!("try_mark_previous_green({:?}) --- managed to MARK \
abe05a73
XL
664 dependency {:?} as green", dep_node, dep_dep_node);
665 current_deps.push(node_index);
666 continue;
667 }
ea8adc8c 668 } else {
abe05a73
XL
669 match dep_dep_node.kind {
670 DepKind::Hir |
671 DepKind::HirBody |
672 DepKind::CrateMetadata => {
0731742a 673 if dep_dep_node.extract_def_id(tcx).is_none() {
abe05a73
XL
674 // If the node does not exist anymore, we
675 // just fail to mark green.
ea8adc8c 676 return None
abe05a73
XL
677 } else {
678 // If the node does exist, it should have
679 // been pre-allocated.
680 bug!("DepNode {:?} should have been \
681 pre-allocated but wasn't.",
682 dep_dep_node)
ea8adc8c 683 }
ea8adc8c 684 }
abe05a73 685 _ => {
532ac7d7 686 // For other kinds of nodes it's OK to be
abe05a73
XL
687 // forced.
688 }
ea8adc8c
XL
689 }
690 }
abe05a73
XL
691
692 // We failed to mark it green, so we try to force the query.
9fa01778 693 debug!("try_mark_previous_green({:?}) --- trying to force \
abe05a73 694 dependency {:?}", dep_node, dep_dep_node);
9fa01778
XL
695 if crate::ty::query::force_from_dep_node(tcx, dep_dep_node) {
696 let dep_dep_node_color = data.colors.get(dep_dep_node_index);
0531ce1d 697
abe05a73
XL
698 match dep_dep_node_color {
699 Some(DepNodeColor::Green(node_index)) => {
9fa01778 700 debug!("try_mark_previous_green({:?}) --- managed to \
abe05a73
XL
701 FORCE dependency {:?} to green",
702 dep_node, dep_dep_node);
703 current_deps.push(node_index);
704 }
705 Some(DepNodeColor::Red) => {
9fa01778 706 debug!("try_mark_previous_green({:?}) - END - \
abe05a73
XL
707 dependency {:?} was red after forcing",
708 dep_node,
709 dep_dep_node);
710 return None
711 }
712 None => {
60c5eb7d 713 if !tcx.sess.has_errors_or_delayed_span_bugs() {
9fa01778 714 bug!("try_mark_previous_green() - Forcing the DepNode \
0531ce1d
XL
715 should have set its color")
716 } else {
60c5eb7d
XL
717 // If the query we just forced has resulted in
718 // some kind of compilation error, we cannot rely on
719 // the dep-node color having been properly updated.
720 // This means that the query system has reached an
721 // invalid state. We let the compiler continue (by
722 // returning `None`) so it can emit error messages
723 // and wind down, but rely on the fact that this
724 // invalid state will not be persisted to the
725 // incremental compilation cache because of
726 // compilation errors being present.
727 debug!("try_mark_previous_green({:?}) - END - \
728 dependency {:?} resulted in compilation error",
729 dep_node,
730 dep_dep_node);
731 return None
0531ce1d 732 }
abe05a73
XL
733 }
734 }
735 } else {
736 // The DepNode could not be forced.
9fa01778 737 debug!("try_mark_previous_green({:?}) - END - dependency {:?} \
abe05a73
XL
738 could not be forced", dep_node, dep_dep_node);
739 return None
740 }
ea8adc8c
XL
741 }
742 }
743 }
744
ea8adc8c
XL
745 // If we got here without hitting a `return` that means that all
746 // dependencies of this DepNode could be marked as green. Therefore we
83c7162d 747 // can also mark this DepNode as green.
ea8adc8c 748
83c7162d
XL
749 // There may be multiple threads trying to mark the same dep node green concurrently
750
416331ca 751 let dep_node_index = {
0731742a
XL
752 // Copy the fingerprint from the previous graph,
753 // so we don't have to recompute it
ff7c6d11 754 let fingerprint = data.previous.fingerprint_by_index(prev_dep_node_index);
ff7c6d11 755
0731742a
XL
756 // We allocating an entry for the node in the current dependency graph and
757 // adding all the appropriate edges imported from the previous graph
e74abb32 758 data.current.intern_node(*dep_node, current_deps, fingerprint)
0731742a 759 };
ea8adc8c 760
abe05a73 761 // ... emitting any stored diagnostic ...
abe05a73 762
416331ca
XL
763 // FIXME: Store the fact that a node has diagnostics in a bit in the dep graph somewhere
764 // Maybe store a list on disk and encode this fact in the DepNodeState
9fa01778 765 let diagnostics = tcx.queries.on_disk_cache
416331ca
XL
766 .load_diagnostics(tcx, prev_dep_node_index);
767
768 #[cfg(not(parallel_compiler))]
769 debug_assert!(data.colors.get(prev_dep_node_index).is_none(),
770 "DepGraph::try_mark_previous_green() - Duplicate DepNodeColor \
771 insertion for {:?}", dep_node);
abe05a73 772
9fa01778
XL
773 if unlikely!(diagnostics.len() > 0) {
774 self.emit_diagnostics(
775 tcx,
776 data,
777 dep_node_index,
416331ca 778 prev_dep_node_index,
9fa01778
XL
779 diagnostics
780 );
abe05a73
XL
781 }
782
ea8adc8c 783 // ... and finally storing a "Green" entry in the color map.
83c7162d 784 // Multiple threads can all write the same color here
9fa01778 785 data.colors.insert(prev_dep_node_index, DepNodeColor::Green(dep_node_index));
0531ce1d 786
9fa01778 787 debug!("try_mark_previous_green({:?}) - END - successfully marked as green", dep_node);
ea8adc8c
XL
788 Some(dep_node_index)
789 }
790
416331ca
XL
791 /// Atomically emits some loaded diagnostics.
792 /// This may be called concurrently on multiple threads for the same dep node.
9fa01778
XL
793 #[cold]
794 #[inline(never)]
795 fn emit_diagnostics<'tcx>(
796 &self,
dc9dc135 797 tcx: TyCtxt<'tcx>,
9fa01778
XL
798 data: &DepGraphData,
799 dep_node_index: DepNodeIndex,
416331ca 800 prev_dep_node_index: SerializedDepNodeIndex,
9fa01778
XL
801 diagnostics: Vec<Diagnostic>,
802 ) {
416331ca
XL
803 let mut emitting = data.emitting_diagnostics.lock();
804
805 if data.colors.get(prev_dep_node_index) == Some(DepNodeColor::Green(dep_node_index)) {
806 // The node is already green so diagnostics must have been emitted already
807 return;
808 }
809
810 if emitting.insert(dep_node_index) {
811 // We were the first to insert the node in the set so this thread
812 // must emit the diagnostics and signal other potentially waiting
813 // threads after.
814 mem::drop(emitting);
9fa01778
XL
815
816 // Promote the previous diagnostics to the current session.
817 tcx.queries.on_disk_cache
416331ca
XL
818 .store_diagnostics(dep_node_index, diagnostics.clone().into());
819
820 let handle = tcx.sess.diagnostic();
9fa01778
XL
821
822 for diagnostic in diagnostics {
e1599b0c 823 handle.emit_diagnostic(&diagnostic);
9fa01778
XL
824 }
825
416331ca
XL
826 // Mark the node as green now that diagnostics are emitted
827 data.colors.insert(prev_dep_node_index, DepNodeColor::Green(dep_node_index));
828
829 // Remove the node from the set
830 data.emitting_diagnostics.lock().remove(&dep_node_index);
831
832 // Wake up waiters
833 data.emitting_diagnostics_cond_var.notify_all();
9fa01778 834 } else {
416331ca 835 // We must wait for the other thread to finish emitting the diagnostic
9fa01778 836
9fa01778 837 loop {
416331ca
XL
838 data.emitting_diagnostics_cond_var.wait(&mut emitting);
839 if data.colors
840 .get(prev_dep_node_index) == Some(DepNodeColor::Green(dep_node_index)) {
9fa01778
XL
841 break;
842 }
9fa01778
XL
843 }
844 }
845 }
846
0531ce1d
XL
847 // Returns true if the given node has been marked as green during the
848 // current compilation session. Used in various assertions
849 pub fn is_green(&self, dep_node: &DepNode) -> bool {
850 self.node_color(dep_node).map(|c| c.is_green()).unwrap_or(false)
ea8adc8c
XL
851 }
852
ff7c6d11
XL
853 // This method loads all on-disk cacheable query results into memory, so
854 // they can be written out to the new cache file again. Most query results
855 // will already be in memory but in the case where we marked something as
856 // green but then did not need the value, that value will never have been
857 // loaded from disk.
858 //
859 // This method will only load queries that will end up in the disk cache.
860 // Other queries will not be executed.
416331ca 861 pub fn exec_cache_promotions(&self, tcx: TyCtxt<'_>) {
e74abb32
XL
862 let _prof_timer = tcx.prof.generic_activity("incr_comp_query_cache_promotion");
863
dc9dc135
XL
864 let data = self.data.as_ref().unwrap();
865 for prev_index in data.colors.values.indices() {
866 match data.colors.get(prev_index) {
867 Some(DepNodeColor::Green(_)) => {
868 let dep_node = data.previous.index_to_node(prev_index);
869 dep_node.try_load_from_on_disk_cache(tcx);
ff7c6d11 870 }
dc9dc135
XL
871 None |
872 Some(DepNodeColor::Red) => {
873 // We can skip red nodes because a node can only be marked
874 // as red if the query result was recomputed and thus is
875 // already in memory.
876 }
877 }
ff7c6d11
XL
878 }
879 }
5bcae85e
SL
880}
881
882/// A "work product" is an intermediate result that we save into the
883/// incremental directory for later re-use. The primary example are
884/// the object files that we save for each partition at code
885/// generation time.
886///
887/// Each work product is associated with a dep-node, representing the
888/// process that produced the work-product. If that dep-node is found
889/// to be dirty when we load up, then we will delete the work-product
890/// at load time. If the work-product is found to be clean, then we
891/// will keep a record in the `previous_work_products` list.
892///
893/// In addition, work products have an associated hash. This hash is
894/// an extra hash that can be used to decide if the work-product from
895/// a previous compilation can be re-used (in addition to the dirty
896/// edges check).
897///
898/// As the primary example, consider the object files we generate for
899/// each partition. In the first run, we create partitions based on
900/// the symbols that need to be compiled. For each partition P, we
901/// hash the symbols in P and create a `WorkProduct` record associated
94b46f34 902/// with `DepNode::CodegenUnit(P)`; the hash is the set of symbols
5bcae85e
SL
903/// in P.
904///
94b46f34 905/// The next time we compile, if the `DepNode::CodegenUnit(P)` is
5bcae85e
SL
906/// judged to be clean (which means none of the things we read to
907/// generate the partition were found to be dirty), it will be loaded
908/// into previous work products. We will then regenerate the set of
909/// symbols in the partition P and hash them (note that new symbols
910/// may be added -- for example, new monomorphizations -- even if
911/// nothing in P changed!). We will compare that hash against the
912/// previous hash. If it matches up, we can reuse the object file.
913#[derive(Clone, Debug, RustcEncodable, RustcDecodable)]
914pub struct WorkProduct {
041b39d2 915 pub cgu_name: String,
9fa01778 916 /// Saved files associated with this CGU.
abe05a73
XL
917 pub saved_files: Vec<(WorkProductFileKind, String)>,
918}
919
b7449926 920#[derive(Clone, Copy, Debug, RustcEncodable, RustcDecodable, PartialEq)]
abe05a73
XL
921pub enum WorkProductFileKind {
922 Object,
923 Bytecode,
924 BytecodeCompressed,
54a0048b 925}
ea8adc8c 926
0731742a
XL
927#[derive(Clone)]
928struct DepNodeData {
929 node: DepNode,
930 edges: SmallVec<[DepNodeIndex; 8]>,
931 fingerprint: Fingerprint,
932}
933
e74abb32
XL
934/// `CurrentDepGraph` stores the dependency graph for the current session.
935/// It will be populated as we run queries or tasks.
936///
937/// The nodes in it are identified by an index (`DepNodeIndex`).
938/// The data for each node is stored in its `DepNodeData`, found in the `data` field.
939///
940/// We never remove nodes from the graph: they are only added.
941///
942/// This struct uses two locks internally. The `data` and `node_to_node_index` fields are
943/// locked separately. Operations that take a `DepNodeIndex` typically just access
944/// the data field.
945///
946/// The only operation that must manipulate both locks is adding new nodes, in which case
947/// we first acquire the `node_to_node_index` lock and then, once a new node is to be inserted,
948/// acquire the lock on `data.`
ea8adc8c 949pub(super) struct CurrentDepGraph {
e74abb32
XL
950 data: Lock<IndexVec<DepNodeIndex, DepNodeData>>,
951 node_to_node_index: Sharded<FxHashMap<DepNode, DepNodeIndex>>,
952
953 /// Used to trap when a specific edge is added to the graph.
954 /// This is used for debug purposes and is only active with `debug_assertions`.
0731742a 955 #[allow(dead_code)]
ea8adc8c
XL
956 forbidden_edge: Option<EdgeFilter>,
957
9fa01778
XL
958 /// Anonymous `DepNode`s are nodes whose IDs we compute from the list of
959 /// their edges. This has the beneficial side-effect that multiple anonymous
960 /// nodes can be coalesced into one without changing the semantics of the
961 /// dependency graph. However, the merging of nodes can lead to a subtle
962 /// problem during red-green marking: The color of an anonymous node from
963 /// the current session might "shadow" the color of the node with the same
964 /// ID from the previous session. In order to side-step this problem, we make
965 /// sure that anonymous `NodeId`s allocated in different sessions don't overlap.
966 /// This is implemented by mixing a session-key into the ID fingerprint of
967 /// each anon node. The session-key is just a random number generated when
968 /// the `DepGraph` is created.
ea8adc8c 969 anon_id_seed: Fingerprint,
abe05a73 970
e74abb32
XL
971 /// These are simple counters that are for profiling and
972 /// debugging and only active with `debug_assertions`.
973 total_read_count: AtomicU64,
974 total_duplicate_read_count: AtomicU64,
ea8adc8c
XL
975}
976
977impl CurrentDepGraph {
0731742a 978 fn new(prev_graph_node_count: usize) -> CurrentDepGraph {
ea8adc8c
XL
979 use std::time::{SystemTime, UNIX_EPOCH};
980
981 let duration = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
982 let nanos = duration.as_secs() * 1_000_000_000 +
983 duration.subsec_nanos() as u64;
984 let mut stable_hasher = StableHasher::new();
985 nanos.hash(&mut stable_hasher);
986
987 let forbidden_edge = if cfg!(debug_assertions) {
988 match env::var("RUST_FORBID_DEP_GRAPH_EDGE") {
989 Ok(s) => {
990 match EdgeFilter::new(&s) {
991 Ok(f) => Some(f),
992 Err(err) => bug!("RUST_FORBID_DEP_GRAPH_EDGE invalid: {}", err),
993 }
994 }
995 Err(_) => None,
996 }
997 } else {
998 None
999 };
1000
0731742a
XL
1001 // Pre-allocate the dep node structures. We over-allocate a little so
1002 // that we hopefully don't have to re-allocate during this compilation
48663c56
XL
1003 // session. The over-allocation is 2% plus a small constant to account
1004 // for the fact that in very small crates 2% might not be enough.
1005 let new_node_count_estimate = (prev_graph_node_count * 102) / 100 + 200;
0731742a 1006
ea8adc8c 1007 CurrentDepGraph {
e74abb32
XL
1008 data: Lock::new(IndexVec::with_capacity(new_node_count_estimate)),
1009 node_to_node_index: Sharded::new(|| FxHashMap::with_capacity_and_hasher(
1010 new_node_count_estimate / sharded::SHARDS,
0731742a 1011 Default::default(),
e74abb32 1012 )),
ea8adc8c 1013 anon_id_seed: stable_hasher.finish(),
ea8adc8c 1014 forbidden_edge,
e74abb32
XL
1015 total_read_count: AtomicU64::new(0),
1016 total_duplicate_read_count: AtomicU64::new(0),
ea8adc8c
XL
1017 }
1018 }
1019
0731742a 1020 fn complete_task(
e74abb32 1021 &self,
0731742a
XL
1022 node: DepNode,
1023 task_deps: TaskDeps,
1024 fingerprint: Fingerprint
1025 ) -> DepNodeIndex {
0731742a 1026 self.alloc_node(node, task_deps.reads, fingerprint)
ea8adc8c
XL
1027 }
1028
e74abb32 1029 fn complete_anon_task(&self, kind: DepKind, task_deps: TaskDeps) -> DepNodeIndex {
532ac7d7 1030 debug_assert!(!kind.is_eval_always());
abe05a73 1031
0731742a 1032 let mut hasher = StableHasher::new();
ea8adc8c 1033
48663c56
XL
1034 // The dep node indices are hashed here instead of hashing the dep nodes of the
1035 // dependencies. These indices may refer to different nodes per session, but this isn't
1036 // a problem here because we that ensure the final dep node hash is per session only by
1037 // combining it with the per session random number `anon_id_seed`. This hash only need
1038 // to map the dependencies to a single value on a per session basis.
1039 task_deps.reads.hash(&mut hasher);
ea8adc8c 1040
48663c56
XL
1041 let target_dep_node = DepNode {
1042 kind,
ea8adc8c 1043
0731742a
XL
1044 // Fingerprint::combine() is faster than sending Fingerprint
1045 // through the StableHasher (at least as long as StableHasher
1046 // is so slow).
48663c56 1047 hash: self.anon_id_seed.combine(hasher.finish()),
0731742a 1048 };
ea8adc8c 1049
416331ca 1050 self.intern_node(target_dep_node, task_deps.reads, Fingerprint::ZERO)
ea8adc8c
XL
1051 }
1052
0731742a 1053 fn alloc_node(
e74abb32 1054 &self,
0731742a
XL
1055 dep_node: DepNode,
1056 edges: SmallVec<[DepNodeIndex; 8]>,
1057 fingerprint: Fingerprint
1058 ) -> DepNodeIndex {
e74abb32
XL
1059 debug_assert!(!self.node_to_node_index
1060 .get_shard_by_value(&dep_node)
1061 .lock()
1062 .contains_key(&dep_node));
416331ca 1063 self.intern_node(dep_node, edges, fingerprint)
0731742a
XL
1064 }
1065
1066 fn intern_node(
e74abb32 1067 &self,
0731742a
XL
1068 dep_node: DepNode,
1069 edges: SmallVec<[DepNodeIndex; 8]>,
1070 fingerprint: Fingerprint
416331ca 1071 ) -> DepNodeIndex {
e74abb32 1072 match self.node_to_node_index.get_shard_by_value(&dep_node).lock().entry(dep_node) {
416331ca 1073 Entry::Occupied(entry) => *entry.get(),
0731742a 1074 Entry::Vacant(entry) => {
e74abb32
XL
1075 let mut data = self.data.lock();
1076 let dep_node_index = DepNodeIndex::new(data.len());
1077 data.push(DepNodeData {
0731742a
XL
1078 node: dep_node,
1079 edges,
1080 fingerprint
1081 });
1082 entry.insert(dep_node_index);
416331ca 1083 dep_node_index
0731742a 1084 }
abe05a73
XL
1085 }
1086 }
0731742a 1087}
abe05a73 1088
0731742a
XL
1089impl DepGraphData {
1090 fn read_index(&self, source: DepNodeIndex) {
83c7162d 1091 ty::tls::with_context_opt(|icx| {
0731742a
XL
1092 let icx = if let Some(icx) = icx { icx } else { return };
1093 if let Some(task_deps) = icx.task_deps {
1094 let mut task_deps = task_deps.lock();
1095 if cfg!(debug_assertions) {
e74abb32 1096 self.current.total_read_count.fetch_add(1, SeqCst);
0731742a
XL
1097 }
1098 if task_deps.read_set.insert(source) {
1099 task_deps.reads.push(source);
1100
1101 #[cfg(debug_assertions)]
1102 {
1103 if let Some(target) = task_deps.node {
e74abb32
XL
1104 let data = self.current.data.lock();
1105 if let Some(ref forbidden_edge) = self.current.forbidden_edge {
1106 let source = data[source].node;
83c7162d
XL
1107 if forbidden_edge.test(&source, &target) {
1108 bug!("forbidden edge {:?} -> {:?} created",
1109 source,
1110 target)
1111 }
ea8adc8c
XL
1112 }
1113 }
83c7162d 1114 }
0731742a 1115 } else if cfg!(debug_assertions) {
e74abb32 1116 self.current.total_duplicate_read_count.fetch_add(1, SeqCst);
ea8adc8c
XL
1117 }
1118 }
83c7162d 1119 })
ea8adc8c 1120 }
83c7162d
XL
1121}
1122
0731742a
XL
1123pub struct TaskDeps {
1124 #[cfg(debug_assertions)]
1125 node: Option<DepNode>,
94b46f34 1126 reads: SmallVec<[DepNodeIndex; 8]>,
83c7162d
XL
1127 read_set: FxHashSet<DepNodeIndex>,
1128}
1129
0531ce1d
XL
1130// A data structure that stores Option<DepNodeColor> values as a contiguous
1131// array, using one u32 per entry.
1132struct DepNodeColorMap {
9fa01778 1133 values: IndexVec<SerializedDepNodeIndex, AtomicU32>,
0531ce1d
XL
1134}
1135
1136const COMPRESSED_NONE: u32 = 0;
1137const COMPRESSED_RED: u32 = 1;
1138const COMPRESSED_FIRST_GREEN: u32 = 2;
1139
1140impl DepNodeColorMap {
1141 fn new(size: usize) -> DepNodeColorMap {
1142 DepNodeColorMap {
9fa01778 1143 values: (0..size).map(|_| AtomicU32::new(COMPRESSED_NONE)).collect(),
0531ce1d
XL
1144 }
1145 }
1146
1147 fn get(&self, index: SerializedDepNodeIndex) -> Option<DepNodeColor> {
9fa01778 1148 match self.values[index].load(Ordering::Acquire) {
0531ce1d
XL
1149 COMPRESSED_NONE => None,
1150 COMPRESSED_RED => Some(DepNodeColor::Red),
b7449926
XL
1151 value => Some(DepNodeColor::Green(DepNodeIndex::from_u32(
1152 value - COMPRESSED_FIRST_GREEN
1153 )))
0531ce1d
XL
1154 }
1155 }
1156
9fa01778
XL
1157 fn insert(&self, index: SerializedDepNodeIndex, color: DepNodeColor) {
1158 self.values[index].store(match color {
0531ce1d 1159 DepNodeColor::Red => COMPRESSED_RED,
b7449926 1160 DepNodeColor::Green(index) => index.as_u32() + COMPRESSED_FIRST_GREEN,
9fa01778 1161 }, Ordering::Release)
0531ce1d
XL
1162 }
1163}