]> git.proxmox.com Git - rustc.git/blame - src/librustc/dep_graph/graph.rs
New upstream version 1.43.0+dfsg1
[rustc.git] / src / librustc / dep_graph / graph.rs
CommitLineData
dfeec247
XL
1use crate::ty::{self, TyCtxt};
2use parking_lot::{Condvar, Mutex};
ea8adc8c 3use rustc_data_structures::fx::{FxHashMap, FxHashSet};
dfeec247
XL
4use rustc_data_structures::profiling::QueryInvocationId;
5use rustc_data_structures::sharded::{self, Sharded};
6use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
7use rustc_data_structures::sync::{AtomicU32, AtomicU64, Lock, Lrc, Ordering};
8use rustc_errors::Diagnostic;
74b04a01 9use rustc_hir::def_id::DefId;
e74abb32 10use rustc_index::vec::{Idx, IndexVec};
b7449926 11use smallvec::SmallVec;
dfeec247 12use std::collections::hash_map::Entry;
ea8adc8c
XL
13use std::env;
14use std::hash::Hash;
416331ca 15use std::mem;
dfeec247 16use std::sync::atomic::Ordering::Relaxed;
54a0048b 17
dfeec247 18use crate::ich::{Fingerprint, StableHashingContext, StableHashingContextProvider};
ea8adc8c
XL
19
20use super::debug::EdgeFilter;
dfeec247
XL
21use super::dep_node::{DepKind, DepNode, WorkProductId};
22use super::prev::PreviousDepGraph;
54a0048b 23use super::query::DepGraphQuery;
8bb4bdeb 24use super::safe::DepGraphSafe;
ea8adc8c 25use super::serialized::{SerializedDepGraph, SerializedDepNodeIndex};
54a0048b
SL
26
27#[derive(Clone)]
28pub struct DepGraph {
0531ce1d 29 data: Option<Lrc<DepGraphData>>,
dfeec247
XL
30
31 /// This field is used for assigning DepNodeIndices when running in
32 /// non-incremental mode. Even in non-incremental mode we make sure that
33 /// each task has a `DepNodeIndex` that uniquely identifies it. This unique
34 /// ID is used for self-profiling.
35 virtual_dep_node_index: Lrc<AtomicU32>,
ea8adc8c
XL
36}
37
e74abb32 38rustc_index::newtype_index! {
b7449926
XL
39 pub struct DepNodeIndex { .. }
40}
ea8adc8c
XL
41
42impl DepNodeIndex {
e74abb32 43 pub const INVALID: DepNodeIndex = DepNodeIndex::MAX;
ea8adc8c
XL
44}
45
dfeec247
XL
46impl std::convert::From<DepNodeIndex> for QueryInvocationId {
47 #[inline]
48 fn from(dep_node_index: DepNodeIndex) -> Self {
49 QueryInvocationId(dep_node_index.as_u32())
50 }
51}
52
e74abb32 53#[derive(PartialEq)]
ea8adc8c
XL
54pub enum DepNodeColor {
55 Red,
dfeec247 56 Green(DepNodeIndex),
ea8adc8c
XL
57}
58
59impl DepNodeColor {
60 pub fn is_green(self) -> bool {
61 match self {
62 DepNodeColor::Red => false,
63 DepNodeColor::Green(_) => true,
64 }
65 }
5bcae85e
SL
66}
67
68struct DepGraphData {
ea8adc8c
XL
69 /// The new encoding of the dependency graph, optimized for red/green
70 /// tracking. The `current` field is the dependency graph of only the
71 /// current compilation session: We don't merge the previous dep-graph into
72 /// current one anymore.
e74abb32 73 current: CurrentDepGraph,
ea8adc8c
XL
74
75 /// The dep-graph from the previous compilation session. It contains all
76 /// nodes and edges as well as all fingerprints of nodes that have them.
77 previous: PreviousDepGraph,
78
9fa01778 79 colors: DepNodeColorMap,
5bcae85e 80
416331ca
XL
81 /// A set of loaded diagnostics that is in the progress of being emitted.
82 emitting_diagnostics: Mutex<FxHashSet<DepNodeIndex>>,
9fa01778
XL
83
84 /// Used to wait for diagnostics to be emitted.
416331ca 85 emitting_diagnostics_cond_var: Condvar,
9fa01778
XL
86
87 /// When we load, there may be `.o` files, cached MIR, or other such
5bcae85e
SL
88 /// things available to us. If we find that they are not dirty, we
89 /// load the path to the file storing those work-products here into
90 /// this map. We can later look for and extract that data.
94b46f34 91 previous_work_products: FxHashMap<WorkProductId, WorkProduct>,
041b39d2 92
83c7162d 93 dep_node_debug: Lock<FxHashMap<DepNode, String>>,
54a0048b
SL
94}
95
9fa01778
XL
96pub fn hash_result<R>(hcx: &mut StableHashingContext<'_>, result: &R) -> Option<Fingerprint>
97where
98 R: for<'a> HashStable<StableHashingContext<'a>>,
99{
100 let mut stable_hasher = StableHasher::new();
101 result.hash_stable(hcx, &mut stable_hasher);
102
103 Some(stable_hasher.finish())
104}
105
54a0048b 106impl DepGraph {
dfeec247
XL
107 pub fn new(
108 prev_graph: PreviousDepGraph,
109 prev_work_products: FxHashMap<WorkProductId, WorkProduct>,
110 ) -> DepGraph {
0531ce1d
XL
111 let prev_graph_node_count = prev_graph.node_count();
112
54a0048b 113 DepGraph {
0531ce1d 114 data: Some(Lrc::new(DepGraphData {
94b46f34 115 previous_work_products: prev_work_products,
a1dfa0c6 116 dep_node_debug: Default::default(),
e74abb32 117 current: CurrentDepGraph::new(prev_graph_node_count),
416331ca
XL
118 emitting_diagnostics: Default::default(),
119 emitting_diagnostics_cond_var: Condvar::new(),
ea8adc8c 120 previous: prev_graph,
9fa01778 121 colors: DepNodeColorMap::new(prev_graph_node_count),
ea8adc8c 122 })),
dfeec247 123 virtual_dep_node_index: Lrc::new(AtomicU32::new(0)),
ea8adc8c
XL
124 }
125 }
126
127 pub fn new_disabled() -> DepGraph {
dfeec247 128 DepGraph { data: None, virtual_dep_node_index: Lrc::new(AtomicU32::new(0)) }
54a0048b
SL
129 }
130
9fa01778 131 /// Returns `true` if we are actually building the full dep-graph, and `false` otherwise.
32a655c1
SL
132 #[inline]
133 pub fn is_fully_enabled(&self) -> bool {
041b39d2 134 self.data.is_some()
32a655c1
SL
135 }
136
041b39d2 137 pub fn query(&self) -> DepGraphQuery {
e74abb32
XL
138 let data = self.data.as_ref().unwrap().current.data.lock();
139 let nodes: Vec<_> = data.iter().map(|n| n.node).collect();
ea8adc8c 140 let mut edges = Vec::new();
e74abb32 141 for (from, edge_targets) in data.iter().map(|d| (d.node, &d.edges)) {
94b46f34 142 for &edge_target in edge_targets.iter() {
e74abb32 143 let to = data[edge_target].node;
ea8adc8c
XL
144 edges.push((from, to));
145 }
146 }
54a0048b 147
ea8adc8c 148 DepGraphQuery::new(&nodes[..], &edges[..])
54a0048b
SL
149 }
150
dfeec247 151 pub fn assert_ignored(&self) {
83c7162d
XL
152 if let Some(..) = self.data {
153 ty::tls::with_context_opt(|icx| {
154 let icx = if let Some(icx) = icx { icx } else { return };
0731742a 155 assert!(icx.task_deps.is_none(), "expected no task dependency tracking");
83c7162d 156 })
2c00a5a8 157 }
54a0048b
SL
158 }
159
dfeec247
XL
160 pub fn with_ignore<OP, R>(&self, op: OP) -> R
161 where
162 OP: FnOnce() -> R,
54a0048b 163 {
83c7162d 164 ty::tls::with_context(|icx| {
dfeec247 165 let icx = ty::tls::ImplicitCtxt { task_deps: None, ..icx.clone() };
83c7162d 166
dfeec247 167 ty::tls::enter_context(&icx, |_| op())
83c7162d 168 })
54a0048b
SL
169 }
170
8bb4bdeb
XL
171 /// Starts a new dep-graph task. Dep-graph tasks are specified
172 /// using a free function (`task`) and **not** a closure -- this
173 /// is intentional because we want to exercise tight control over
174 /// what state they have access to. In particular, we want to
175 /// prevent implicit 'leaks' of tracked state into the task (which
176 /// could then be read without generating correct edges in the
0531ce1d 177 /// dep-graph -- see the [rustc guide] for more details on
ff7c6d11 178 /// the dep-graph). To this end, the task function gets exactly two
8bb4bdeb
XL
179 /// pieces of state: the context `cx` and an argument `arg`. Both
180 /// of these bits of state must be of some type that implements
181 /// `DepGraphSafe` and hence does not leak.
182 ///
183 /// The choice of two arguments is not fundamental. One argument
184 /// would work just as well, since multiple values can be
185 /// collected using tuples. However, using two arguments works out
186 /// to be quite convenient, since it is common to need a context
187 /// (`cx`) and some argument (e.g., a `DefId` identifying what
188 /// item to process).
189 ///
190 /// For cases where you need some other number of arguments:
191 ///
192 /// - If you only need one argument, just use `()` for the `arg`
193 /// parameter.
194 /// - If you need 3+ arguments, use a tuple for the
195 /// `arg` parameter.
196 ///
a1dfa0c6 197 /// [rustc guide]: https://rust-lang.github.io/rustc-guide/incremental-compilation.html
9fa01778
XL
198 pub fn with_task<'a, C, A, R>(
199 &self,
200 key: DepNode,
201 cx: C,
202 arg: A,
203 task: fn(C, A) -> R,
204 hash_result: impl FnOnce(&mut StableHashingContext<'_>, &R) -> Option<Fingerprint>,
205 ) -> (R, DepNodeIndex)
206 where
207 C: DepGraphSafe + StableHashingContextProvider<'a>,
abe05a73 208 {
dfeec247
XL
209 self.with_task_impl(
210 key,
211 cx,
212 arg,
213 false,
214 task,
215 |_key| {
216 Some(TaskDeps {
217 #[cfg(debug_assertions)]
218 node: Some(_key),
219 reads: SmallVec::new(),
220 read_set: Default::default(),
221 })
9fa01778 222 },
dfeec247
XL
223 |data, key, fingerprint, task| data.complete_task(key, task.unwrap(), fingerprint),
224 hash_result,
225 )
abe05a73
XL
226 }
227
83c7162d 228 /// Creates a new dep-graph input with value `input`
dfeec247
XL
229 pub fn input_task<'a, C, R>(&self, key: DepNode, cx: C, input: R) -> (R, DepNodeIndex)
230 where
231 C: DepGraphSafe + StableHashingContextProvider<'a>,
232 R: for<'b> HashStable<StableHashingContext<'b>>,
83c7162d
XL
233 {
234 fn identity_fn<C, A>(_: C, arg: A) -> A {
235 arg
236 }
237
dfeec247
XL
238 self.with_task_impl(
239 key,
240 cx,
241 input,
242 true,
243 identity_fn,
0731742a 244 |_| None,
dfeec247
XL
245 |data, key, fingerprint, _| data.alloc_node(key, SmallVec::new(), fingerprint),
246 hash_result::<R>,
247 )
83c7162d
XL
248 }
249
9fa01778 250 fn with_task_impl<'a, C, A, R>(
83c7162d
XL
251 &self,
252 key: DepNode,
253 cx: C,
254 arg: A,
255 no_tcx: bool,
256 task: fn(C, A) -> R,
0731742a 257 create_task: fn(DepNode) -> Option<TaskDeps>,
dfeec247
XL
258 finish_task_and_alloc_depnode: fn(
259 &CurrentDepGraph,
260 DepNode,
261 Fingerprint,
262 Option<TaskDeps>,
263 ) -> DepNodeIndex,
9fa01778 264 hash_result: impl FnOnce(&mut StableHashingContext<'_>, &R) -> Option<Fingerprint>,
83c7162d
XL
265 ) -> (R, DepNodeIndex)
266 where
9fa01778 267 C: DepGraphSafe + StableHashingContextProvider<'a>,
54a0048b 268 {
041b39d2 269 if let Some(ref data) = self.data {
0731742a 270 let task_deps = create_task(key).map(|deps| Lock::new(deps));
ea8adc8c
XL
271
272 // In incremental mode, hash the result of the task. We don't
273 // do anything with the hash yet, but we are computing it
274 // anyway so that
275 // - we make sure that the infrastructure works and
276 // - we can get an idea of the runtime cost.
0531ce1d
XL
277 let mut hcx = cx.get_stable_hashing_context();
278
83c7162d
XL
279 let result = if no_tcx {
280 task(cx, arg)
281 } else {
282 ty::tls::with_context(|icx| {
dfeec247
XL
283 let icx =
284 ty::tls::ImplicitCtxt { task_deps: task_deps.as_ref(), ..icx.clone() };
285
286 ty::tls::enter_context(&icx, |_| task(cx, arg))
83c7162d
XL
287 })
288 };
0531ce1d 289
9fa01778 290 let current_fingerprint = hash_result(&mut hcx, &result);
ea8adc8c 291
0731742a
XL
292 let dep_node_index = finish_task_and_alloc_depnode(
293 &data.current,
294 key,
9fa01778 295 current_fingerprint.unwrap_or(Fingerprint::ZERO),
0731742a
XL
296 task_deps.map(|lock| lock.into_inner()),
297 );
ea8adc8c 298
9fa01778
XL
299 let print_status = cfg!(debug_assertions) && hcx.sess().opts.debugging_opts.dep_tasks;
300
ea8adc8c 301 // Determine the color of the new DepNode.
0531ce1d
XL
302 if let Some(prev_index) = data.previous.node_to_index_opt(&key) {
303 let prev_fingerprint = data.previous.fingerprint_by_index(prev_index);
ea8adc8c 304
9fa01778
XL
305 let color = if let Some(current_fingerprint) = current_fingerprint {
306 if current_fingerprint == prev_fingerprint {
307 if print_status {
308 eprintln!("[task::green] {:?}", key);
309 }
310 DepNodeColor::Green(dep_node_index)
311 } else {
312 if print_status {
313 eprintln!("[task::red] {:?}", key);
314 }
315 DepNodeColor::Red
316 }
ea8adc8c 317 } else {
9fa01778
XL
318 if print_status {
319 eprintln!("[task::unknown] {:?}", key);
320 }
321 // Mark the node as Red if we can't hash the result
ea8adc8c
XL
322 DepNodeColor::Red
323 };
324
dfeec247
XL
325 debug_assert!(
326 data.colors.get(prev_index).is_none(),
327 "DepGraph::with_task() - Duplicate DepNodeColor \
328 insertion for {:?}",
329 key
330 );
0531ce1d 331
9fa01778
XL
332 data.colors.insert(prev_index, color);
333 } else {
334 if print_status {
335 eprintln!("[task::new] {:?}", key);
336 }
ea8adc8c
XL
337 }
338
041b39d2
XL
339 (result, dep_node_index)
340 } else {
dfeec247 341 (task(cx, arg), self.next_virtual_depnode_index())
041b39d2 342 }
54a0048b
SL
343 }
344
9fa01778
XL
345 /// Executes something within an "anonymous" task, that is, a task the
346 /// `DepNode` of which is determined by the list of inputs it read from.
dfeec247
XL
347 pub fn with_anon_task<OP, R>(&self, dep_kind: DepKind, op: OP) -> (R, DepNodeIndex)
348 where
349 OP: FnOnce() -> R,
041b39d2
XL
350 {
351 if let Some(ref data) = self.data {
0731742a
XL
352 let (result, task_deps) = ty::tls::with_context(|icx| {
353 let task_deps = Lock::new(TaskDeps {
354 #[cfg(debug_assertions)]
355 node: None,
94b46f34 356 reads: SmallVec::new(),
0bf4aa26 357 read_set: Default::default(),
0731742a 358 });
83c7162d
XL
359
360 let r = {
dfeec247
XL
361 let icx = ty::tls::ImplicitCtxt { task_deps: Some(&task_deps), ..icx.clone() };
362
363 ty::tls::enter_context(&icx, |_| op())
83c7162d
XL
364 };
365
0731742a 366 (r, task_deps.into_inner())
83c7162d 367 });
dfeec247 368 let dep_node_index = data.current.complete_anon_task(dep_kind, task_deps);
ea8adc8c 369 (result, dep_node_index)
041b39d2 370 } else {
dfeec247 371 (op(), self.next_virtual_depnode_index())
041b39d2
XL
372 }
373 }
374
9fa01778
XL
375 /// Executes something within an "eval-always" task which is a task
376 /// that runs whenever anything changes.
377 pub fn with_eval_always_task<'a, C, A, R>(
378 &self,
379 key: DepNode,
380 cx: C,
381 arg: A,
382 task: fn(C, A) -> R,
383 hash_result: impl FnOnce(&mut StableHashingContext<'_>, &R) -> Option<Fingerprint>,
384 ) -> (R, DepNodeIndex)
385 where
386 C: DepGraphSafe + StableHashingContextProvider<'a>,
abe05a73 387 {
dfeec247
XL
388 self.with_task_impl(
389 key,
390 cx,
391 arg,
392 false,
393 task,
0731742a 394 |_| None,
dfeec247
XL
395 |data, key, fingerprint, _| data.alloc_node(key, smallvec![], fingerprint),
396 hash_result,
397 )
abe05a73
XL
398 }
399
041b39d2
XL
400 #[inline]
401 pub fn read(&self, v: DepNode) {
402 if let Some(ref data) = self.data {
e74abb32
XL
403 let map = data.current.node_to_node_index.get_shard_by_value(&v).lock();
404 if let Some(dep_node_index) = map.get(&v).copied() {
405 std::mem::drop(map);
0731742a 406 data.read_index(dep_node_index);
ea8adc8c
XL
407 } else {
408 bug!("DepKind {:?} should be pre-allocated but isn't.", v.kind)
409 }
041b39d2
XL
410 }
411 }
412
413 #[inline]
ea8adc8c 414 pub fn read_index(&self, dep_node_index: DepNodeIndex) {
041b39d2 415 if let Some(ref data) = self.data {
0731742a 416 data.read_index(dep_node_index);
c30ab7b3 417 }
54a0048b
SL
418 }
419
abe05a73 420 #[inline]
ff7c6d11
XL
421 pub fn dep_node_index_of(&self, dep_node: &DepNode) -> DepNodeIndex {
422 self.data
423 .as_ref()
424 .unwrap()
425 .current
ff7c6d11 426 .node_to_node_index
e74abb32
XL
427 .get_shard_by_value(dep_node)
428 .lock()
ff7c6d11
XL
429 .get(dep_node)
430 .cloned()
431 .unwrap()
432 }
433
0531ce1d
XL
434 #[inline]
435 pub fn dep_node_exists(&self, dep_node: &DepNode) -> bool {
436 if let Some(ref data) = self.data {
e74abb32
XL
437 data.current
438 .node_to_node_index
439 .get_shard_by_value(&dep_node)
440 .lock()
441 .contains_key(dep_node)
0531ce1d
XL
442 } else {
443 false
444 }
445 }
446
ff7c6d11
XL
447 #[inline]
448 pub fn fingerprint_of(&self, dep_node_index: DepNodeIndex) -> Fingerprint {
e74abb32
XL
449 let data = self.data.as_ref().expect("dep graph enabled").current.data.lock();
450 data[dep_node_index].fingerprint
041b39d2
XL
451 }
452
ea8adc8c
XL
453 pub fn prev_fingerprint_of(&self, dep_node: &DepNode) -> Option<Fingerprint> {
454 self.data.as_ref().unwrap().previous.fingerprint_of(dep_node)
3b2f2976
XL
455 }
456
abe05a73
XL
457 #[inline]
458 pub fn prev_dep_node_index_of(&self, dep_node: &DepNode) -> SerializedDepNodeIndex {
459 self.data.as_ref().unwrap().previous.node_to_index(dep_node)
460 }
461
9fa01778 462 /// Checks whether a previous work product exists for `v` and, if
5bcae85e 463 /// so, return the path that leads to it. Used to skip doing work.
041b39d2 464 pub fn previous_work_product(&self, v: &WorkProductId) -> Option<WorkProduct> {
dfeec247 465 self.data.as_ref().and_then(|data| data.previous_work_products.get(v).cloned())
5bcae85e
SL
466 }
467
32a655c1
SL
468 /// Access the map of work-products created during the cached run. Only
469 /// used during saving of the dep-graph.
94b46f34
XL
470 pub fn previous_work_products(&self) -> &FxHashMap<WorkProductId, WorkProduct> {
471 &self.data.as_ref().unwrap().previous_work_products
041b39d2
XL
472 }
473
474 #[inline(always)]
dfeec247
XL
475 pub fn register_dep_node_debug_str<F>(&self, dep_node: DepNode, debug_str_gen: F)
476 where
477 F: FnOnce() -> String,
041b39d2 478 {
ea8adc8c 479 let dep_node_debug = &self.data.as_ref().unwrap().dep_node_debug;
041b39d2 480
ea8adc8c 481 if dep_node_debug.borrow().contains_key(&dep_node) {
dfeec247 482 return;
ea8adc8c
XL
483 }
484 let debug_str = debug_str_gen();
485 dep_node_debug.borrow_mut().insert(dep_node, debug_str);
041b39d2
XL
486 }
487
488 pub(super) fn dep_node_debug_str(&self, dep_node: DepNode) -> Option<String> {
dfeec247 489 self.data.as_ref()?.dep_node_debug.borrow().get(&dep_node).cloned()
32a655c1 490 }
ea8adc8c 491
0731742a
XL
492 pub fn edge_deduplication_data(&self) -> Option<(u64, u64)> {
493 if cfg!(debug_assertions) {
e74abb32 494 let current_dep_graph = &self.data.as_ref().unwrap().current;
abe05a73 495
dfeec247
XL
496 Some((
497 current_dep_graph.total_read_count.load(Relaxed),
498 current_dep_graph.total_duplicate_read_count.load(Relaxed),
499 ))
0731742a
XL
500 } else {
501 None
502 }
abe05a73
XL
503 }
504
ea8adc8c 505 pub fn serialize(&self) -> SerializedDepGraph {
e74abb32 506 let data = self.data.as_ref().unwrap().current.data.lock();
ea8adc8c 507
0731742a 508 let fingerprints: IndexVec<SerializedDepNodeIndex, _> =
e74abb32 509 data.iter().map(|d| d.fingerprint).collect();
dfeec247 510 let nodes: IndexVec<SerializedDepNodeIndex, _> = data.iter().map(|d| d.node).collect();
ea8adc8c 511
e74abb32 512 let total_edge_count: usize = data.iter().map(|d| d.edges.len()).sum();
ea8adc8c
XL
513
514 let mut edge_list_indices = IndexVec::with_capacity(nodes.len());
515 let mut edge_list_data = Vec::with_capacity(total_edge_count);
516
e74abb32 517 for (current_dep_node_index, edges) in data.iter_enumerated().map(|(i, d)| (i, &d.edges)) {
ea8adc8c
XL
518 let start = edge_list_data.len() as u32;
519 // This should really just be a memcpy :/
520 edge_list_data.extend(edges.iter().map(|i| SerializedDepNodeIndex::new(i.index())));
521 let end = edge_list_data.len() as u32;
522
523 debug_assert_eq!(current_dep_node_index.index(), edge_list_indices.len());
524 edge_list_indices.push((start, end));
525 }
526
74b04a01 527 debug_assert!(edge_list_data.len() <= u32::MAX as usize);
ea8adc8c
XL
528 debug_assert_eq!(edge_list_data.len(), total_edge_count);
529
dfeec247 530 SerializedDepGraph { nodes, fingerprints, edge_list_indices, edge_list_data }
ea8adc8c
XL
531 }
532
533 pub fn node_color(&self, dep_node: &DepNode) -> Option<DepNodeColor> {
0531ce1d
XL
534 if let Some(ref data) = self.data {
535 if let Some(prev_index) = data.previous.node_to_index_opt(dep_node) {
dfeec247 536 return data.colors.get(prev_index);
0531ce1d
XL
537 } else {
538 // This is a node that did not exist in the previous compilation
539 // session, so we consider it to be red.
dfeec247 540 return Some(DepNodeColor::Red);
0531ce1d
XL
541 }
542 }
543
544 None
ea8adc8c
XL
545 }
546
9fa01778
XL
547 /// Try to read a node index for the node dep_node.
548 /// A node will have an index, when it's already been marked green, or when we can mark it
549 /// green. This function will mark the current task as a reader of the specified node, when
550 /// a node index can be found for that node.
551 pub fn try_mark_green_and_read(
552 &self,
dc9dc135
XL
553 tcx: TyCtxt<'_>,
554 dep_node: &DepNode,
9fa01778
XL
555 ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
556 self.try_mark_green(tcx, dep_node).map(|(prev_index, dep_node_index)| {
557 debug_assert!(self.is_green(&dep_node));
558 self.read_index(dep_node_index);
559 (prev_index, dep_node_index)
560 })
561 }
ea8adc8c 562
9fa01778
XL
563 pub fn try_mark_green(
564 &self,
dc9dc135
XL
565 tcx: TyCtxt<'_>,
566 dep_node: &DepNode,
9fa01778 567 ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
532ac7d7 568 debug_assert!(!dep_node.kind.is_eval_always());
9fa01778
XL
569
570 // Return None if the dep graph is disabled
571 let data = self.data.as_ref()?;
572
573 // Return None if the dep node didn't exist in the previous session
574 let prev_index = data.previous.node_to_index_opt(dep_node)?;
ea8adc8c 575
9fa01778
XL
576 match data.colors.get(prev_index) {
577 Some(DepNodeColor::Green(dep_node_index)) => Some((prev_index, dep_node_index)),
578 Some(DepNodeColor::Red) => None,
579 None => {
ea8adc8c
XL
580 // This DepNode and the corresponding query invocation existed
581 // in the previous compilation session too, so we can try to
582 // mark it as green by recursively marking all of its
583 // dependencies green.
dfeec247
XL
584 self.try_mark_previous_green(tcx, data, prev_index, &dep_node)
585 .map(|dep_node_index| (prev_index, dep_node_index))
ea8adc8c 586 }
9fa01778
XL
587 }
588 }
589
590 /// Try to mark a dep-node which existed in the previous compilation session as green.
591 fn try_mark_previous_green<'tcx>(
592 &self,
dc9dc135 593 tcx: TyCtxt<'tcx>,
9fa01778
XL
594 data: &DepGraphData,
595 prev_dep_node_index: SerializedDepNodeIndex,
dc9dc135 596 dep_node: &DepNode,
9fa01778
XL
597 ) -> Option<DepNodeIndex> {
598 debug!("try_mark_previous_green({:?}) - BEGIN", dep_node);
599
600 #[cfg(not(parallel_compiler))]
601 {
dfeec247
XL
602 debug_assert!(
603 !data
604 .current
605 .node_to_node_index
606 .get_shard_by_value(dep_node)
607 .lock()
608 .contains_key(dep_node)
609 );
9fa01778
XL
610 debug_assert!(data.colors.get(prev_dep_node_index).is_none());
611 }
ea8adc8c 612
532ac7d7
XL
613 // We never try to mark eval_always nodes as green
614 debug_assert!(!dep_node.kind.is_eval_always());
9fa01778
XL
615
616 debug_assert_eq!(data.previous.index_to_node(prev_dep_node_index), *dep_node);
617
618 let prev_deps = data.previous.edge_targets_from(prev_dep_node_index);
0531ce1d 619
94b46f34 620 let mut current_deps = SmallVec::new();
ea8adc8c
XL
621
622 for &dep_dep_node_index in prev_deps {
9fa01778 623 let dep_dep_node_color = data.colors.get(dep_dep_node_index);
ea8adc8c 624
ea8adc8c
XL
625 match dep_dep_node_color {
626 Some(DepNodeColor::Green(node_index)) => {
627 // This dependency has been marked as green before, we are
628 // still fine and can continue with checking the other
629 // dependencies.
dfeec247
XL
630 debug!(
631 "try_mark_previous_green({:?}) --- found dependency {:?} to \
0531ce1d 632 be immediately green",
dfeec247
XL
633 dep_node,
634 data.previous.index_to_node(dep_dep_node_index)
635 );
ea8adc8c
XL
636 current_deps.push(node_index);
637 }
638 Some(DepNodeColor::Red) => {
639 // We found a dependency the value of which has changed
640 // compared to the previous compilation session. We cannot
641 // mark the DepNode as green and also don't need to bother
642 // with checking any of the other dependencies.
dfeec247
XL
643 debug!(
644 "try_mark_previous_green({:?}) - END - dependency {:?} was \
0531ce1d 645 immediately red",
dfeec247
XL
646 dep_node,
647 data.previous.index_to_node(dep_dep_node_index)
648 );
649 return None;
ea8adc8c
XL
650 }
651 None => {
0531ce1d
XL
652 let dep_dep_node = &data.previous.index_to_node(dep_dep_node_index);
653
abe05a73 654 // We don't know the state of this dependency. If it isn't
532ac7d7
XL
655 // an eval_always node, let's try to mark it green recursively.
656 if !dep_dep_node.kind.is_eval_always() {
dfeec247
XL
657 debug!(
658 "try_mark_previous_green({:?}) --- state of dependency {:?} \
659 is unknown, trying to mark it green",
660 dep_node, dep_dep_node
661 );
abe05a73 662
9fa01778
XL
663 let node_index = self.try_mark_previous_green(
664 tcx,
665 data,
666 dep_dep_node_index,
dfeec247 667 dep_dep_node,
9fa01778
XL
668 );
669 if let Some(node_index) = node_index {
dfeec247
XL
670 debug!(
671 "try_mark_previous_green({:?}) --- managed to MARK \
672 dependency {:?} as green",
673 dep_node, dep_dep_node
674 );
abe05a73
XL
675 current_deps.push(node_index);
676 continue;
677 }
ea8adc8c 678 } else {
abe05a73 679 match dep_dep_node.kind {
dfeec247 680 DepKind::Hir | DepKind::HirBody | DepKind::CrateMetadata => {
74b04a01
XL
681 if let Some(def_id) = dep_dep_node.extract_def_id(tcx) {
682 if def_id_corresponds_to_hir_dep_node(tcx, def_id) {
683 // The `DefPath` has corresponding node,
684 // and that node should have been marked
685 // either red or green in `data.colors`.
686 bug!(
687 "DepNode {:?} should have been \
688 pre-marked as red or green but wasn't.",
689 dep_dep_node
690 );
691 } else {
692 // This `DefPath` does not have a
693 // corresponding `DepNode` (e.g. a
694 // struct field), and the ` DefPath`
695 // collided with the `DefPath` of a
696 // proper item that existed in the
697 // previous compilation session.
698 //
699 // Since the given `DefPath` does not
700 // denote the item that previously
701 // existed, we just fail to mark green.
702 return None;
703 }
704 } else {
abe05a73
XL
705 // If the node does not exist anymore, we
706 // just fail to mark green.
dfeec247 707 return None;
ea8adc8c 708 }
ea8adc8c 709 }
abe05a73 710 _ => {
532ac7d7 711 // For other kinds of nodes it's OK to be
abe05a73
XL
712 // forced.
713 }
ea8adc8c
XL
714 }
715 }
abe05a73
XL
716
717 // We failed to mark it green, so we try to force the query.
dfeec247
XL
718 debug!(
719 "try_mark_previous_green({:?}) --- trying to force \
720 dependency {:?}",
721 dep_node, dep_dep_node
722 );
9fa01778
XL
723 if crate::ty::query::force_from_dep_node(tcx, dep_dep_node) {
724 let dep_dep_node_color = data.colors.get(dep_dep_node_index);
0531ce1d 725
abe05a73
XL
726 match dep_dep_node_color {
727 Some(DepNodeColor::Green(node_index)) => {
dfeec247
XL
728 debug!(
729 "try_mark_previous_green({:?}) --- managed to \
abe05a73 730 FORCE dependency {:?} to green",
dfeec247
XL
731 dep_node, dep_dep_node
732 );
abe05a73
XL
733 current_deps.push(node_index);
734 }
735 Some(DepNodeColor::Red) => {
dfeec247
XL
736 debug!(
737 "try_mark_previous_green({:?}) - END - \
abe05a73 738 dependency {:?} was red after forcing",
dfeec247
XL
739 dep_node, dep_dep_node
740 );
741 return None;
abe05a73
XL
742 }
743 None => {
60c5eb7d 744 if !tcx.sess.has_errors_or_delayed_span_bugs() {
dfeec247
XL
745 bug!(
746 "try_mark_previous_green() - Forcing the DepNode \
747 should have set its color"
748 )
0531ce1d 749 } else {
60c5eb7d
XL
750 // If the query we just forced has resulted in
751 // some kind of compilation error, we cannot rely on
752 // the dep-node color having been properly updated.
753 // This means that the query system has reached an
754 // invalid state. We let the compiler continue (by
755 // returning `None`) so it can emit error messages
756 // and wind down, but rely on the fact that this
757 // invalid state will not be persisted to the
758 // incremental compilation cache because of
759 // compilation errors being present.
dfeec247
XL
760 debug!(
761 "try_mark_previous_green({:?}) - END - \
60c5eb7d 762 dependency {:?} resulted in compilation error",
dfeec247
XL
763 dep_node, dep_dep_node
764 );
765 return None;
0531ce1d 766 }
abe05a73
XL
767 }
768 }
769 } else {
770 // The DepNode could not be forced.
dfeec247
XL
771 debug!(
772 "try_mark_previous_green({:?}) - END - dependency {:?} \
773 could not be forced",
774 dep_node, dep_dep_node
775 );
776 return None;
abe05a73 777 }
ea8adc8c
XL
778 }
779 }
780 }
781
ea8adc8c
XL
782 // If we got here without hitting a `return` that means that all
783 // dependencies of this DepNode could be marked as green. Therefore we
83c7162d 784 // can also mark this DepNode as green.
ea8adc8c 785
83c7162d
XL
786 // There may be multiple threads trying to mark the same dep node green concurrently
787
416331ca 788 let dep_node_index = {
0731742a
XL
789 // Copy the fingerprint from the previous graph,
790 // so we don't have to recompute it
ff7c6d11 791 let fingerprint = data.previous.fingerprint_by_index(prev_dep_node_index);
ff7c6d11 792
0731742a
XL
793 // We allocating an entry for the node in the current dependency graph and
794 // adding all the appropriate edges imported from the previous graph
e74abb32 795 data.current.intern_node(*dep_node, current_deps, fingerprint)
0731742a 796 };
ea8adc8c 797
abe05a73 798 // ... emitting any stored diagnostic ...
abe05a73 799
416331ca
XL
800 // FIXME: Store the fact that a node has diagnostics in a bit in the dep graph somewhere
801 // Maybe store a list on disk and encode this fact in the DepNodeState
dfeec247 802 let diagnostics = tcx.queries.on_disk_cache.load_diagnostics(tcx, prev_dep_node_index);
416331ca
XL
803
804 #[cfg(not(parallel_compiler))]
dfeec247
XL
805 debug_assert!(
806 data.colors.get(prev_dep_node_index).is_none(),
807 "DepGraph::try_mark_previous_green() - Duplicate DepNodeColor \
808 insertion for {:?}",
809 dep_node
810 );
abe05a73 811
74b04a01 812 if unlikely!(!diagnostics.is_empty()) {
dfeec247 813 self.emit_diagnostics(tcx, data, dep_node_index, prev_dep_node_index, diagnostics);
abe05a73
XL
814 }
815
ea8adc8c 816 // ... and finally storing a "Green" entry in the color map.
83c7162d 817 // Multiple threads can all write the same color here
9fa01778 818 data.colors.insert(prev_dep_node_index, DepNodeColor::Green(dep_node_index));
0531ce1d 819
9fa01778 820 debug!("try_mark_previous_green({:?}) - END - successfully marked as green", dep_node);
ea8adc8c
XL
821 Some(dep_node_index)
822 }
823
416331ca
XL
824 /// Atomically emits some loaded diagnostics.
825 /// This may be called concurrently on multiple threads for the same dep node.
9fa01778
XL
826 #[cold]
827 #[inline(never)]
828 fn emit_diagnostics<'tcx>(
829 &self,
dc9dc135 830 tcx: TyCtxt<'tcx>,
9fa01778
XL
831 data: &DepGraphData,
832 dep_node_index: DepNodeIndex,
416331ca 833 prev_dep_node_index: SerializedDepNodeIndex,
9fa01778
XL
834 diagnostics: Vec<Diagnostic>,
835 ) {
416331ca
XL
836 let mut emitting = data.emitting_diagnostics.lock();
837
838 if data.colors.get(prev_dep_node_index) == Some(DepNodeColor::Green(dep_node_index)) {
839 // The node is already green so diagnostics must have been emitted already
840 return;
841 }
842
843 if emitting.insert(dep_node_index) {
844 // We were the first to insert the node in the set so this thread
845 // must emit the diagnostics and signal other potentially waiting
846 // threads after.
847 mem::drop(emitting);
9fa01778
XL
848
849 // Promote the previous diagnostics to the current session.
dfeec247 850 tcx.queries.on_disk_cache.store_diagnostics(dep_node_index, diagnostics.clone().into());
416331ca
XL
851
852 let handle = tcx.sess.diagnostic();
9fa01778
XL
853
854 for diagnostic in diagnostics {
e1599b0c 855 handle.emit_diagnostic(&diagnostic);
9fa01778
XL
856 }
857
416331ca
XL
858 // Mark the node as green now that diagnostics are emitted
859 data.colors.insert(prev_dep_node_index, DepNodeColor::Green(dep_node_index));
860
861 // Remove the node from the set
862 data.emitting_diagnostics.lock().remove(&dep_node_index);
863
864 // Wake up waiters
865 data.emitting_diagnostics_cond_var.notify_all();
9fa01778 866 } else {
416331ca 867 // We must wait for the other thread to finish emitting the diagnostic
9fa01778 868
9fa01778 869 loop {
416331ca 870 data.emitting_diagnostics_cond_var.wait(&mut emitting);
dfeec247
XL
871 if data.colors.get(prev_dep_node_index) == Some(DepNodeColor::Green(dep_node_index))
872 {
9fa01778
XL
873 break;
874 }
9fa01778
XL
875 }
876 }
877 }
878
0531ce1d
XL
879 // Returns true if the given node has been marked as green during the
880 // current compilation session. Used in various assertions
881 pub fn is_green(&self, dep_node: &DepNode) -> bool {
882 self.node_color(dep_node).map(|c| c.is_green()).unwrap_or(false)
ea8adc8c
XL
883 }
884
ff7c6d11
XL
885 // This method loads all on-disk cacheable query results into memory, so
886 // they can be written out to the new cache file again. Most query results
887 // will already be in memory but in the case where we marked something as
888 // green but then did not need the value, that value will never have been
889 // loaded from disk.
890 //
891 // This method will only load queries that will end up in the disk cache.
892 // Other queries will not be executed.
416331ca 893 pub fn exec_cache_promotions(&self, tcx: TyCtxt<'_>) {
e74abb32
XL
894 let _prof_timer = tcx.prof.generic_activity("incr_comp_query_cache_promotion");
895
dc9dc135
XL
896 let data = self.data.as_ref().unwrap();
897 for prev_index in data.colors.values.indices() {
898 match data.colors.get(prev_index) {
899 Some(DepNodeColor::Green(_)) => {
900 let dep_node = data.previous.index_to_node(prev_index);
901 dep_node.try_load_from_on_disk_cache(tcx);
ff7c6d11 902 }
dfeec247 903 None | Some(DepNodeColor::Red) => {
dc9dc135
XL
904 // We can skip red nodes because a node can only be marked
905 // as red if the query result was recomputed and thus is
906 // already in memory.
907 }
908 }
ff7c6d11
XL
909 }
910 }
dfeec247
XL
911
912 fn next_virtual_depnode_index(&self) -> DepNodeIndex {
913 let index = self.virtual_dep_node_index.fetch_add(1, Relaxed);
914 DepNodeIndex::from_u32(index)
915 }
5bcae85e
SL
916}
917
74b04a01
XL
918fn def_id_corresponds_to_hir_dep_node(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
919 let hir_id = tcx.hir().as_local_hir_id(def_id).unwrap();
920 def_id.index == hir_id.owner
921}
922
5bcae85e
SL
923/// A "work product" is an intermediate result that we save into the
924/// incremental directory for later re-use. The primary example are
925/// the object files that we save for each partition at code
926/// generation time.
927///
928/// Each work product is associated with a dep-node, representing the
929/// process that produced the work-product. If that dep-node is found
930/// to be dirty when we load up, then we will delete the work-product
931/// at load time. If the work-product is found to be clean, then we
932/// will keep a record in the `previous_work_products` list.
933///
934/// In addition, work products have an associated hash. This hash is
935/// an extra hash that can be used to decide if the work-product from
936/// a previous compilation can be re-used (in addition to the dirty
937/// edges check).
938///
939/// As the primary example, consider the object files we generate for
940/// each partition. In the first run, we create partitions based on
941/// the symbols that need to be compiled. For each partition P, we
942/// hash the symbols in P and create a `WorkProduct` record associated
94b46f34 943/// with `DepNode::CodegenUnit(P)`; the hash is the set of symbols
5bcae85e
SL
944/// in P.
945///
94b46f34 946/// The next time we compile, if the `DepNode::CodegenUnit(P)` is
5bcae85e
SL
947/// judged to be clean (which means none of the things we read to
948/// generate the partition were found to be dirty), it will be loaded
949/// into previous work products. We will then regenerate the set of
950/// symbols in the partition P and hash them (note that new symbols
951/// may be added -- for example, new monomorphizations -- even if
952/// nothing in P changed!). We will compare that hash against the
953/// previous hash. If it matches up, we can reuse the object file.
954#[derive(Clone, Debug, RustcEncodable, RustcDecodable)]
955pub struct WorkProduct {
041b39d2 956 pub cgu_name: String,
9fa01778 957 /// Saved files associated with this CGU.
abe05a73
XL
958 pub saved_files: Vec<(WorkProductFileKind, String)>,
959}
960
b7449926 961#[derive(Clone, Copy, Debug, RustcEncodable, RustcDecodable, PartialEq)]
abe05a73
XL
962pub enum WorkProductFileKind {
963 Object,
964 Bytecode,
965 BytecodeCompressed,
54a0048b 966}
ea8adc8c 967
0731742a
XL
968#[derive(Clone)]
969struct DepNodeData {
970 node: DepNode,
971 edges: SmallVec<[DepNodeIndex; 8]>,
972 fingerprint: Fingerprint,
973}
974
e74abb32
XL
975/// `CurrentDepGraph` stores the dependency graph for the current session.
976/// It will be populated as we run queries or tasks.
977///
978/// The nodes in it are identified by an index (`DepNodeIndex`).
979/// The data for each node is stored in its `DepNodeData`, found in the `data` field.
980///
981/// We never remove nodes from the graph: they are only added.
982///
983/// This struct uses two locks internally. The `data` and `node_to_node_index` fields are
984/// locked separately. Operations that take a `DepNodeIndex` typically just access
985/// the data field.
986///
987/// The only operation that must manipulate both locks is adding new nodes, in which case
988/// we first acquire the `node_to_node_index` lock and then, once a new node is to be inserted,
989/// acquire the lock on `data.`
ea8adc8c 990pub(super) struct CurrentDepGraph {
e74abb32
XL
991 data: Lock<IndexVec<DepNodeIndex, DepNodeData>>,
992 node_to_node_index: Sharded<FxHashMap<DepNode, DepNodeIndex>>,
993
994 /// Used to trap when a specific edge is added to the graph.
995 /// This is used for debug purposes and is only active with `debug_assertions`.
0731742a 996 #[allow(dead_code)]
ea8adc8c
XL
997 forbidden_edge: Option<EdgeFilter>,
998
9fa01778
XL
999 /// Anonymous `DepNode`s are nodes whose IDs we compute from the list of
1000 /// their edges. This has the beneficial side-effect that multiple anonymous
1001 /// nodes can be coalesced into one without changing the semantics of the
1002 /// dependency graph. However, the merging of nodes can lead to a subtle
1003 /// problem during red-green marking: The color of an anonymous node from
1004 /// the current session might "shadow" the color of the node with the same
1005 /// ID from the previous session. In order to side-step this problem, we make
1006 /// sure that anonymous `NodeId`s allocated in different sessions don't overlap.
1007 /// This is implemented by mixing a session-key into the ID fingerprint of
1008 /// each anon node. The session-key is just a random number generated when
1009 /// the `DepGraph` is created.
ea8adc8c 1010 anon_id_seed: Fingerprint,
abe05a73 1011
e74abb32
XL
1012 /// These are simple counters that are for profiling and
1013 /// debugging and only active with `debug_assertions`.
1014 total_read_count: AtomicU64,
1015 total_duplicate_read_count: AtomicU64,
ea8adc8c
XL
1016}
1017
1018impl CurrentDepGraph {
0731742a 1019 fn new(prev_graph_node_count: usize) -> CurrentDepGraph {
ea8adc8c
XL
1020 use std::time::{SystemTime, UNIX_EPOCH};
1021
1022 let duration = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
dfeec247 1023 let nanos = duration.as_secs() * 1_000_000_000 + duration.subsec_nanos() as u64;
ea8adc8c
XL
1024 let mut stable_hasher = StableHasher::new();
1025 nanos.hash(&mut stable_hasher);
1026
1027 let forbidden_edge = if cfg!(debug_assertions) {
1028 match env::var("RUST_FORBID_DEP_GRAPH_EDGE") {
dfeec247
XL
1029 Ok(s) => match EdgeFilter::new(&s) {
1030 Ok(f) => Some(f),
1031 Err(err) => bug!("RUST_FORBID_DEP_GRAPH_EDGE invalid: {}", err),
1032 },
ea8adc8c
XL
1033 Err(_) => None,
1034 }
1035 } else {
1036 None
1037 };
1038
0731742a
XL
1039 // Pre-allocate the dep node structures. We over-allocate a little so
1040 // that we hopefully don't have to re-allocate during this compilation
48663c56
XL
1041 // session. The over-allocation is 2% plus a small constant to account
1042 // for the fact that in very small crates 2% might not be enough.
1043 let new_node_count_estimate = (prev_graph_node_count * 102) / 100 + 200;
0731742a 1044
ea8adc8c 1045 CurrentDepGraph {
e74abb32 1046 data: Lock::new(IndexVec::with_capacity(new_node_count_estimate)),
dfeec247
XL
1047 node_to_node_index: Sharded::new(|| {
1048 FxHashMap::with_capacity_and_hasher(
1049 new_node_count_estimate / sharded::SHARDS,
1050 Default::default(),
1051 )
1052 }),
ea8adc8c 1053 anon_id_seed: stable_hasher.finish(),
ea8adc8c 1054 forbidden_edge,
e74abb32
XL
1055 total_read_count: AtomicU64::new(0),
1056 total_duplicate_read_count: AtomicU64::new(0),
ea8adc8c
XL
1057 }
1058 }
1059
0731742a 1060 fn complete_task(
e74abb32 1061 &self,
0731742a
XL
1062 node: DepNode,
1063 task_deps: TaskDeps,
dfeec247 1064 fingerprint: Fingerprint,
0731742a 1065 ) -> DepNodeIndex {
0731742a 1066 self.alloc_node(node, task_deps.reads, fingerprint)
ea8adc8c
XL
1067 }
1068
e74abb32 1069 fn complete_anon_task(&self, kind: DepKind, task_deps: TaskDeps) -> DepNodeIndex {
532ac7d7 1070 debug_assert!(!kind.is_eval_always());
abe05a73 1071
0731742a 1072 let mut hasher = StableHasher::new();
ea8adc8c 1073
48663c56
XL
1074 // The dep node indices are hashed here instead of hashing the dep nodes of the
1075 // dependencies. These indices may refer to different nodes per session, but this isn't
1076 // a problem here because we that ensure the final dep node hash is per session only by
1077 // combining it with the per session random number `anon_id_seed`. This hash only need
1078 // to map the dependencies to a single value on a per session basis.
1079 task_deps.reads.hash(&mut hasher);
ea8adc8c 1080
48663c56
XL
1081 let target_dep_node = DepNode {
1082 kind,
ea8adc8c 1083
0731742a
XL
1084 // Fingerprint::combine() is faster than sending Fingerprint
1085 // through the StableHasher (at least as long as StableHasher
1086 // is so slow).
48663c56 1087 hash: self.anon_id_seed.combine(hasher.finish()),
0731742a 1088 };
ea8adc8c 1089
416331ca 1090 self.intern_node(target_dep_node, task_deps.reads, Fingerprint::ZERO)
ea8adc8c
XL
1091 }
1092
0731742a 1093 fn alloc_node(
e74abb32 1094 &self,
0731742a
XL
1095 dep_node: DepNode,
1096 edges: SmallVec<[DepNodeIndex; 8]>,
dfeec247 1097 fingerprint: Fingerprint,
0731742a 1098 ) -> DepNodeIndex {
dfeec247
XL
1099 debug_assert!(
1100 !self.node_to_node_index.get_shard_by_value(&dep_node).lock().contains_key(&dep_node)
1101 );
416331ca 1102 self.intern_node(dep_node, edges, fingerprint)
0731742a
XL
1103 }
1104
1105 fn intern_node(
e74abb32 1106 &self,
0731742a
XL
1107 dep_node: DepNode,
1108 edges: SmallVec<[DepNodeIndex; 8]>,
dfeec247 1109 fingerprint: Fingerprint,
416331ca 1110 ) -> DepNodeIndex {
e74abb32 1111 match self.node_to_node_index.get_shard_by_value(&dep_node).lock().entry(dep_node) {
416331ca 1112 Entry::Occupied(entry) => *entry.get(),
0731742a 1113 Entry::Vacant(entry) => {
e74abb32
XL
1114 let mut data = self.data.lock();
1115 let dep_node_index = DepNodeIndex::new(data.len());
dfeec247 1116 data.push(DepNodeData { node: dep_node, edges, fingerprint });
0731742a 1117 entry.insert(dep_node_index);
416331ca 1118 dep_node_index
0731742a 1119 }
abe05a73
XL
1120 }
1121 }
0731742a 1122}
abe05a73 1123
0731742a 1124impl DepGraphData {
74b04a01 1125 #[inline(never)]
0731742a 1126 fn read_index(&self, source: DepNodeIndex) {
83c7162d 1127 ty::tls::with_context_opt(|icx| {
dfeec247 1128 let icx = if let Some(icx) = icx { icx } else { return };
0731742a
XL
1129 if let Some(task_deps) = icx.task_deps {
1130 let mut task_deps = task_deps.lock();
1131 if cfg!(debug_assertions) {
dfeec247 1132 self.current.total_read_count.fetch_add(1, Relaxed);
0731742a
XL
1133 }
1134 if task_deps.read_set.insert(source) {
1135 task_deps.reads.push(source);
1136
1137 #[cfg(debug_assertions)]
1138 {
1139 if let Some(target) = task_deps.node {
e74abb32
XL
1140 let data = self.current.data.lock();
1141 if let Some(ref forbidden_edge) = self.current.forbidden_edge {
1142 let source = data[source].node;
83c7162d 1143 if forbidden_edge.test(&source, &target) {
dfeec247 1144 bug!("forbidden edge {:?} -> {:?} created", source, target)
83c7162d 1145 }
ea8adc8c
XL
1146 }
1147 }
83c7162d 1148 }
0731742a 1149 } else if cfg!(debug_assertions) {
dfeec247 1150 self.current.total_duplicate_read_count.fetch_add(1, Relaxed);
ea8adc8c
XL
1151 }
1152 }
83c7162d 1153 })
ea8adc8c 1154 }
83c7162d
XL
1155}
1156
0731742a
XL
1157pub struct TaskDeps {
1158 #[cfg(debug_assertions)]
1159 node: Option<DepNode>,
94b46f34 1160 reads: SmallVec<[DepNodeIndex; 8]>,
83c7162d
XL
1161 read_set: FxHashSet<DepNodeIndex>,
1162}
1163
0531ce1d
XL
1164// A data structure that stores Option<DepNodeColor> values as a contiguous
1165// array, using one u32 per entry.
1166struct DepNodeColorMap {
9fa01778 1167 values: IndexVec<SerializedDepNodeIndex, AtomicU32>,
0531ce1d
XL
1168}
1169
1170const COMPRESSED_NONE: u32 = 0;
1171const COMPRESSED_RED: u32 = 1;
1172const COMPRESSED_FIRST_GREEN: u32 = 2;
1173
1174impl DepNodeColorMap {
1175 fn new(size: usize) -> DepNodeColorMap {
dfeec247 1176 DepNodeColorMap { values: (0..size).map(|_| AtomicU32::new(COMPRESSED_NONE)).collect() }
0531ce1d
XL
1177 }
1178
1179 fn get(&self, index: SerializedDepNodeIndex) -> Option<DepNodeColor> {
9fa01778 1180 match self.values[index].load(Ordering::Acquire) {
0531ce1d
XL
1181 COMPRESSED_NONE => None,
1182 COMPRESSED_RED => Some(DepNodeColor::Red),
dfeec247
XL
1183 value => {
1184 Some(DepNodeColor::Green(DepNodeIndex::from_u32(value - COMPRESSED_FIRST_GREEN)))
1185 }
0531ce1d
XL
1186 }
1187 }
1188
9fa01778 1189 fn insert(&self, index: SerializedDepNodeIndex, color: DepNodeColor) {
dfeec247
XL
1190 self.values[index].store(
1191 match color {
1192 DepNodeColor::Red => COMPRESSED_RED,
1193 DepNodeColor::Green(index) => index.as_u32() + COMPRESSED_FIRST_GREEN,
1194 },
1195 Ordering::Release,
1196 )
0531ce1d
XL
1197 }
1198}