]>
Commit | Line | Data |
---|---|---|
ba9703b0 | 1 | use rustc_data_structures::fingerprint::Fingerprint; |
ea8adc8c | 2 | use rustc_data_structures::fx::{FxHashMap, FxHashSet}; |
dfeec247 XL |
3 | use rustc_data_structures::profiling::QueryInvocationId; |
4 | use rustc_data_structures::sharded::{self, Sharded}; | |
5 | use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; | |
6 | use rustc_data_structures::sync::{AtomicU32, AtomicU64, Lock, Lrc, Ordering}; | |
ba9703b0 | 7 | use rustc_data_structures::unlikely; |
dfeec247 | 8 | use rustc_errors::Diagnostic; |
e74abb32 | 9 | use rustc_index::vec::{Idx, IndexVec}; |
ba9703b0 XL |
10 | |
11 | use parking_lot::{Condvar, Mutex}; | |
12 | use smallvec::{smallvec, SmallVec}; | |
dfeec247 | 13 | use std::collections::hash_map::Entry; |
ea8adc8c XL |
14 | use std::env; |
15 | use std::hash::Hash; | |
ba9703b0 | 16 | use std::marker::PhantomData; |
416331ca | 17 | use std::mem; |
dfeec247 | 18 | use std::sync::atomic::Ordering::Relaxed; |
54a0048b | 19 | |
ea8adc8c | 20 | use super::debug::EdgeFilter; |
dfeec247 | 21 | use super::prev::PreviousDepGraph; |
54a0048b | 22 | use super::query::DepGraphQuery; |
ea8adc8c | 23 | use super::serialized::{SerializedDepGraph, SerializedDepNodeIndex}; |
ba9703b0 | 24 | use super::{DepContext, DepKind, DepNode, WorkProductId}; |
54a0048b SL |
25 | |
26 | #[derive(Clone)] | |
ba9703b0 XL |
27 | pub struct DepGraph<K: DepKind> { |
28 | data: Option<Lrc<DepGraphData<K>>>, | |
dfeec247 XL |
29 | |
30 | /// This field is used for assigning DepNodeIndices when running in | |
31 | /// non-incremental mode. Even in non-incremental mode we make sure that | |
32 | /// each task has a `DepNodeIndex` that uniquely identifies it. This unique | |
33 | /// ID is used for self-profiling. | |
34 | virtual_dep_node_index: Lrc<AtomicU32>, | |
ea8adc8c XL |
35 | } |
36 | ||
e74abb32 | 37 | rustc_index::newtype_index! { |
b7449926 XL |
38 | pub struct DepNodeIndex { .. } |
39 | } | |
ea8adc8c XL |
40 | |
41 | impl DepNodeIndex { | |
e74abb32 | 42 | pub const INVALID: DepNodeIndex = DepNodeIndex::MAX; |
ea8adc8c XL |
43 | } |
44 | ||
dfeec247 XL |
45 | impl std::convert::From<DepNodeIndex> for QueryInvocationId { |
46 | #[inline] | |
47 | fn from(dep_node_index: DepNodeIndex) -> Self { | |
48 | QueryInvocationId(dep_node_index.as_u32()) | |
49 | } | |
50 | } | |
51 | ||
e74abb32 | 52 | #[derive(PartialEq)] |
ea8adc8c XL |
53 | pub enum DepNodeColor { |
54 | Red, | |
dfeec247 | 55 | Green(DepNodeIndex), |
ea8adc8c XL |
56 | } |
57 | ||
58 | impl DepNodeColor { | |
59 | pub fn is_green(self) -> bool { | |
60 | match self { | |
61 | DepNodeColor::Red => false, | |
62 | DepNodeColor::Green(_) => true, | |
63 | } | |
64 | } | |
5bcae85e SL |
65 | } |
66 | ||
ba9703b0 | 67 | struct DepGraphData<K: DepKind> { |
ea8adc8c XL |
68 | /// The new encoding of the dependency graph, optimized for red/green |
69 | /// tracking. The `current` field is the dependency graph of only the | |
70 | /// current compilation session: We don't merge the previous dep-graph into | |
71 | /// current one anymore. | |
ba9703b0 | 72 | current: CurrentDepGraph<K>, |
ea8adc8c XL |
73 | |
74 | /// The dep-graph from the previous compilation session. It contains all | |
75 | /// nodes and edges as well as all fingerprints of nodes that have them. | |
ba9703b0 | 76 | previous: PreviousDepGraph<K>, |
ea8adc8c | 77 | |
9fa01778 | 78 | colors: DepNodeColorMap, |
5bcae85e | 79 | |
416331ca XL |
80 | /// A set of loaded diagnostics that is in the progress of being emitted. |
81 | emitting_diagnostics: Mutex<FxHashSet<DepNodeIndex>>, | |
9fa01778 XL |
82 | |
83 | /// Used to wait for diagnostics to be emitted. | |
416331ca | 84 | emitting_diagnostics_cond_var: Condvar, |
9fa01778 XL |
85 | |
86 | /// When we load, there may be `.o` files, cached MIR, or other such | |
5bcae85e SL |
87 | /// things available to us. If we find that they are not dirty, we |
88 | /// load the path to the file storing those work-products here into | |
89 | /// this map. We can later look for and extract that data. | |
94b46f34 | 90 | previous_work_products: FxHashMap<WorkProductId, WorkProduct>, |
041b39d2 | 91 | |
ba9703b0 | 92 | dep_node_debug: Lock<FxHashMap<DepNode<K>, String>>, |
54a0048b SL |
93 | } |
94 | ||
ba9703b0 | 95 | pub fn hash_result<HashCtxt, R>(hcx: &mut HashCtxt, result: &R) -> Option<Fingerprint> |
9fa01778 | 96 | where |
ba9703b0 | 97 | R: HashStable<HashCtxt>, |
9fa01778 XL |
98 | { |
99 | let mut stable_hasher = StableHasher::new(); | |
100 | result.hash_stable(hcx, &mut stable_hasher); | |
101 | ||
102 | Some(stable_hasher.finish()) | |
103 | } | |
104 | ||
ba9703b0 | 105 | impl<K: DepKind> DepGraph<K> { |
dfeec247 | 106 | pub fn new( |
ba9703b0 | 107 | prev_graph: PreviousDepGraph<K>, |
dfeec247 | 108 | prev_work_products: FxHashMap<WorkProductId, WorkProduct>, |
ba9703b0 | 109 | ) -> DepGraph<K> { |
0531ce1d XL |
110 | let prev_graph_node_count = prev_graph.node_count(); |
111 | ||
54a0048b | 112 | DepGraph { |
0531ce1d | 113 | data: Some(Lrc::new(DepGraphData { |
94b46f34 | 114 | previous_work_products: prev_work_products, |
a1dfa0c6 | 115 | dep_node_debug: Default::default(), |
e74abb32 | 116 | current: CurrentDepGraph::new(prev_graph_node_count), |
416331ca XL |
117 | emitting_diagnostics: Default::default(), |
118 | emitting_diagnostics_cond_var: Condvar::new(), | |
ea8adc8c | 119 | previous: prev_graph, |
9fa01778 | 120 | colors: DepNodeColorMap::new(prev_graph_node_count), |
ea8adc8c | 121 | })), |
dfeec247 | 122 | virtual_dep_node_index: Lrc::new(AtomicU32::new(0)), |
ea8adc8c XL |
123 | } |
124 | } | |
125 | ||
ba9703b0 | 126 | pub fn new_disabled() -> DepGraph<K> { |
dfeec247 | 127 | DepGraph { data: None, virtual_dep_node_index: Lrc::new(AtomicU32::new(0)) } |
54a0048b SL |
128 | } |
129 | ||
9fa01778 | 130 | /// Returns `true` if we are actually building the full dep-graph, and `false` otherwise. |
32a655c1 SL |
131 | #[inline] |
132 | pub fn is_fully_enabled(&self) -> bool { | |
041b39d2 | 133 | self.data.is_some() |
32a655c1 SL |
134 | } |
135 | ||
ba9703b0 | 136 | pub fn query(&self) -> DepGraphQuery<K> { |
e74abb32 XL |
137 | let data = self.data.as_ref().unwrap().current.data.lock(); |
138 | let nodes: Vec<_> = data.iter().map(|n| n.node).collect(); | |
ea8adc8c | 139 | let mut edges = Vec::new(); |
e74abb32 | 140 | for (from, edge_targets) in data.iter().map(|d| (d.node, &d.edges)) { |
94b46f34 | 141 | for &edge_target in edge_targets.iter() { |
e74abb32 | 142 | let to = data[edge_target].node; |
ea8adc8c XL |
143 | edges.push((from, to)); |
144 | } | |
145 | } | |
54a0048b | 146 | |
ea8adc8c | 147 | DepGraphQuery::new(&nodes[..], &edges[..]) |
54a0048b SL |
148 | } |
149 | ||
dfeec247 | 150 | pub fn assert_ignored(&self) { |
83c7162d | 151 | if let Some(..) = self.data { |
ba9703b0 XL |
152 | K::read_deps(|task_deps| { |
153 | assert!(task_deps.is_none(), "expected no task dependency tracking"); | |
83c7162d | 154 | }) |
2c00a5a8 | 155 | } |
54a0048b SL |
156 | } |
157 | ||
dfeec247 XL |
158 | pub fn with_ignore<OP, R>(&self, op: OP) -> R |
159 | where | |
160 | OP: FnOnce() -> R, | |
54a0048b | 161 | { |
ba9703b0 | 162 | K::with_deps(None, op) |
54a0048b SL |
163 | } |
164 | ||
8bb4bdeb XL |
165 | /// Starts a new dep-graph task. Dep-graph tasks are specified |
166 | /// using a free function (`task`) and **not** a closure -- this | |
167 | /// is intentional because we want to exercise tight control over | |
168 | /// what state they have access to. In particular, we want to | |
169 | /// prevent implicit 'leaks' of tracked state into the task (which | |
170 | /// could then be read without generating correct edges in the | |
ba9703b0 | 171 | /// dep-graph -- see the [rustc dev guide] for more details on |
ff7c6d11 | 172 | /// the dep-graph). To this end, the task function gets exactly two |
8bb4bdeb XL |
173 | /// pieces of state: the context `cx` and an argument `arg`. Both |
174 | /// of these bits of state must be of some type that implements | |
175 | /// `DepGraphSafe` and hence does not leak. | |
176 | /// | |
177 | /// The choice of two arguments is not fundamental. One argument | |
178 | /// would work just as well, since multiple values can be | |
179 | /// collected using tuples. However, using two arguments works out | |
180 | /// to be quite convenient, since it is common to need a context | |
181 | /// (`cx`) and some argument (e.g., a `DefId` identifying what | |
182 | /// item to process). | |
183 | /// | |
184 | /// For cases where you need some other number of arguments: | |
185 | /// | |
186 | /// - If you only need one argument, just use `()` for the `arg` | |
187 | /// parameter. | |
188 | /// - If you need 3+ arguments, use a tuple for the | |
189 | /// `arg` parameter. | |
190 | /// | |
ba9703b0 XL |
191 | /// [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/incremental-compilation.html |
192 | pub fn with_task<Ctxt: DepContext<DepKind = K>, A, R>( | |
9fa01778 | 193 | &self, |
ba9703b0 XL |
194 | key: DepNode<K>, |
195 | cx: Ctxt, | |
9fa01778 | 196 | arg: A, |
ba9703b0 XL |
197 | task: fn(Ctxt, A) -> R, |
198 | hash_result: impl FnOnce(&mut Ctxt::StableHashingContext, &R) -> Option<Fingerprint>, | |
199 | ) -> (R, DepNodeIndex) { | |
dfeec247 XL |
200 | self.with_task_impl( |
201 | key, | |
202 | cx, | |
203 | arg, | |
204 | false, | |
205 | task, | |
206 | |_key| { | |
207 | Some(TaskDeps { | |
208 | #[cfg(debug_assertions)] | |
209 | node: Some(_key), | |
210 | reads: SmallVec::new(), | |
211 | read_set: Default::default(), | |
ba9703b0 | 212 | phantom_data: PhantomData, |
dfeec247 | 213 | }) |
9fa01778 | 214 | }, |
dfeec247 XL |
215 | |data, key, fingerprint, task| data.complete_task(key, task.unwrap(), fingerprint), |
216 | hash_result, | |
217 | ) | |
abe05a73 XL |
218 | } |
219 | ||
ba9703b0 | 220 | fn with_task_impl<Ctxt: DepContext<DepKind = K>, A, R>( |
83c7162d | 221 | &self, |
ba9703b0 XL |
222 | key: DepNode<K>, |
223 | cx: Ctxt, | |
83c7162d XL |
224 | arg: A, |
225 | no_tcx: bool, | |
ba9703b0 XL |
226 | task: fn(Ctxt, A) -> R, |
227 | create_task: fn(DepNode<K>) -> Option<TaskDeps<K>>, | |
dfeec247 | 228 | finish_task_and_alloc_depnode: fn( |
ba9703b0 XL |
229 | &CurrentDepGraph<K>, |
230 | DepNode<K>, | |
dfeec247 | 231 | Fingerprint, |
ba9703b0 | 232 | Option<TaskDeps<K>>, |
dfeec247 | 233 | ) -> DepNodeIndex, |
ba9703b0 XL |
234 | hash_result: impl FnOnce(&mut Ctxt::StableHashingContext, &R) -> Option<Fingerprint>, |
235 | ) -> (R, DepNodeIndex) { | |
041b39d2 | 236 | if let Some(ref data) = self.data { |
ba9703b0 | 237 | let task_deps = create_task(key).map(Lock::new); |
ea8adc8c XL |
238 | |
239 | // In incremental mode, hash the result of the task. We don't | |
240 | // do anything with the hash yet, but we are computing it | |
241 | // anyway so that | |
242 | // - we make sure that the infrastructure works and | |
243 | // - we can get an idea of the runtime cost. | |
ba9703b0 | 244 | let mut hcx = cx.create_stable_hashing_context(); |
0531ce1d | 245 | |
83c7162d XL |
246 | let result = if no_tcx { |
247 | task(cx, arg) | |
248 | } else { | |
ba9703b0 | 249 | K::with_deps(task_deps.as_ref(), || task(cx, arg)) |
83c7162d | 250 | }; |
0531ce1d | 251 | |
9fa01778 | 252 | let current_fingerprint = hash_result(&mut hcx, &result); |
ea8adc8c | 253 | |
0731742a XL |
254 | let dep_node_index = finish_task_and_alloc_depnode( |
255 | &data.current, | |
256 | key, | |
9fa01778 | 257 | current_fingerprint.unwrap_or(Fingerprint::ZERO), |
0731742a XL |
258 | task_deps.map(|lock| lock.into_inner()), |
259 | ); | |
ea8adc8c | 260 | |
ba9703b0 | 261 | let print_status = cfg!(debug_assertions) && cx.debug_dep_tasks(); |
9fa01778 | 262 | |
ea8adc8c | 263 | // Determine the color of the new DepNode. |
0531ce1d XL |
264 | if let Some(prev_index) = data.previous.node_to_index_opt(&key) { |
265 | let prev_fingerprint = data.previous.fingerprint_by_index(prev_index); | |
ea8adc8c | 266 | |
9fa01778 XL |
267 | let color = if let Some(current_fingerprint) = current_fingerprint { |
268 | if current_fingerprint == prev_fingerprint { | |
269 | if print_status { | |
270 | eprintln!("[task::green] {:?}", key); | |
271 | } | |
272 | DepNodeColor::Green(dep_node_index) | |
273 | } else { | |
274 | if print_status { | |
275 | eprintln!("[task::red] {:?}", key); | |
276 | } | |
277 | DepNodeColor::Red | |
278 | } | |
ea8adc8c | 279 | } else { |
9fa01778 XL |
280 | if print_status { |
281 | eprintln!("[task::unknown] {:?}", key); | |
282 | } | |
283 | // Mark the node as Red if we can't hash the result | |
ea8adc8c XL |
284 | DepNodeColor::Red |
285 | }; | |
286 | ||
dfeec247 XL |
287 | debug_assert!( |
288 | data.colors.get(prev_index).is_none(), | |
289 | "DepGraph::with_task() - Duplicate DepNodeColor \ | |
290 | insertion for {:?}", | |
291 | key | |
292 | ); | |
0531ce1d | 293 | |
9fa01778 | 294 | data.colors.insert(prev_index, color); |
29967ef6 XL |
295 | } else if print_status { |
296 | eprintln!("[task::new] {:?}", key); | |
ea8adc8c XL |
297 | } |
298 | ||
041b39d2 XL |
299 | (result, dep_node_index) |
300 | } else { | |
dfeec247 | 301 | (task(cx, arg), self.next_virtual_depnode_index()) |
041b39d2 | 302 | } |
54a0048b SL |
303 | } |
304 | ||
9fa01778 XL |
305 | /// Executes something within an "anonymous" task, that is, a task the |
306 | /// `DepNode` of which is determined by the list of inputs it read from. | |
ba9703b0 | 307 | pub fn with_anon_task<OP, R>(&self, dep_kind: K, op: OP) -> (R, DepNodeIndex) |
dfeec247 XL |
308 | where |
309 | OP: FnOnce() -> R, | |
041b39d2 XL |
310 | { |
311 | if let Some(ref data) = self.data { | |
ba9703b0 | 312 | let task_deps = Lock::new(TaskDeps::default()); |
dfeec247 | 313 | |
ba9703b0 XL |
314 | let result = K::with_deps(Some(&task_deps), op); |
315 | let task_deps = task_deps.into_inner(); | |
83c7162d | 316 | |
dfeec247 | 317 | let dep_node_index = data.current.complete_anon_task(dep_kind, task_deps); |
ea8adc8c | 318 | (result, dep_node_index) |
041b39d2 | 319 | } else { |
dfeec247 | 320 | (op(), self.next_virtual_depnode_index()) |
041b39d2 XL |
321 | } |
322 | } | |
323 | ||
9fa01778 XL |
324 | /// Executes something within an "eval-always" task which is a task |
325 | /// that runs whenever anything changes. | |
ba9703b0 | 326 | pub fn with_eval_always_task<Ctxt: DepContext<DepKind = K>, A, R>( |
9fa01778 | 327 | &self, |
ba9703b0 XL |
328 | key: DepNode<K>, |
329 | cx: Ctxt, | |
9fa01778 | 330 | arg: A, |
ba9703b0 XL |
331 | task: fn(Ctxt, A) -> R, |
332 | hash_result: impl FnOnce(&mut Ctxt::StableHashingContext, &R) -> Option<Fingerprint>, | |
333 | ) -> (R, DepNodeIndex) { | |
dfeec247 XL |
334 | self.with_task_impl( |
335 | key, | |
336 | cx, | |
337 | arg, | |
338 | false, | |
339 | task, | |
0731742a | 340 | |_| None, |
dfeec247 XL |
341 | |data, key, fingerprint, _| data.alloc_node(key, smallvec![], fingerprint), |
342 | hash_result, | |
343 | ) | |
abe05a73 XL |
344 | } |
345 | ||
041b39d2 | 346 | #[inline] |
ba9703b0 | 347 | pub fn read(&self, v: DepNode<K>) { |
041b39d2 | 348 | if let Some(ref data) = self.data { |
e74abb32 XL |
349 | let map = data.current.node_to_node_index.get_shard_by_value(&v).lock(); |
350 | if let Some(dep_node_index) = map.get(&v).copied() { | |
351 | std::mem::drop(map); | |
0731742a | 352 | data.read_index(dep_node_index); |
ea8adc8c | 353 | } else { |
ba9703b0 | 354 | panic!("DepKind {:?} should be pre-allocated but isn't.", v.kind) |
ea8adc8c | 355 | } |
041b39d2 XL |
356 | } |
357 | } | |
358 | ||
359 | #[inline] | |
ea8adc8c | 360 | pub fn read_index(&self, dep_node_index: DepNodeIndex) { |
041b39d2 | 361 | if let Some(ref data) = self.data { |
0731742a | 362 | data.read_index(dep_node_index); |
c30ab7b3 | 363 | } |
54a0048b SL |
364 | } |
365 | ||
abe05a73 | 366 | #[inline] |
ba9703b0 | 367 | pub fn dep_node_index_of(&self, dep_node: &DepNode<K>) -> DepNodeIndex { |
ff7c6d11 XL |
368 | self.data |
369 | .as_ref() | |
370 | .unwrap() | |
371 | .current | |
ff7c6d11 | 372 | .node_to_node_index |
e74abb32 XL |
373 | .get_shard_by_value(dep_node) |
374 | .lock() | |
ff7c6d11 XL |
375 | .get(dep_node) |
376 | .cloned() | |
377 | .unwrap() | |
378 | } | |
379 | ||
0531ce1d | 380 | #[inline] |
ba9703b0 | 381 | pub fn dep_node_exists(&self, dep_node: &DepNode<K>) -> bool { |
0531ce1d | 382 | if let Some(ref data) = self.data { |
e74abb32 XL |
383 | data.current |
384 | .node_to_node_index | |
385 | .get_shard_by_value(&dep_node) | |
386 | .lock() | |
387 | .contains_key(dep_node) | |
0531ce1d XL |
388 | } else { |
389 | false | |
390 | } | |
391 | } | |
392 | ||
ff7c6d11 XL |
393 | #[inline] |
394 | pub fn fingerprint_of(&self, dep_node_index: DepNodeIndex) -> Fingerprint { | |
e74abb32 XL |
395 | let data = self.data.as_ref().expect("dep graph enabled").current.data.lock(); |
396 | data[dep_node_index].fingerprint | |
041b39d2 XL |
397 | } |
398 | ||
ba9703b0 | 399 | pub fn prev_fingerprint_of(&self, dep_node: &DepNode<K>) -> Option<Fingerprint> { |
ea8adc8c | 400 | self.data.as_ref().unwrap().previous.fingerprint_of(dep_node) |
3b2f2976 XL |
401 | } |
402 | ||
9fa01778 | 403 | /// Checks whether a previous work product exists for `v` and, if |
5bcae85e | 404 | /// so, return the path that leads to it. Used to skip doing work. |
041b39d2 | 405 | pub fn previous_work_product(&self, v: &WorkProductId) -> Option<WorkProduct> { |
dfeec247 | 406 | self.data.as_ref().and_then(|data| data.previous_work_products.get(v).cloned()) |
5bcae85e SL |
407 | } |
408 | ||
32a655c1 SL |
409 | /// Access the map of work-products created during the cached run. Only |
410 | /// used during saving of the dep-graph. | |
94b46f34 XL |
411 | pub fn previous_work_products(&self) -> &FxHashMap<WorkProductId, WorkProduct> { |
412 | &self.data.as_ref().unwrap().previous_work_products | |
041b39d2 XL |
413 | } |
414 | ||
415 | #[inline(always)] | |
ba9703b0 | 416 | pub fn register_dep_node_debug_str<F>(&self, dep_node: DepNode<K>, debug_str_gen: F) |
dfeec247 XL |
417 | where |
418 | F: FnOnce() -> String, | |
041b39d2 | 419 | { |
ea8adc8c | 420 | let dep_node_debug = &self.data.as_ref().unwrap().dep_node_debug; |
041b39d2 | 421 | |
ea8adc8c | 422 | if dep_node_debug.borrow().contains_key(&dep_node) { |
dfeec247 | 423 | return; |
ea8adc8c XL |
424 | } |
425 | let debug_str = debug_str_gen(); | |
426 | dep_node_debug.borrow_mut().insert(dep_node, debug_str); | |
041b39d2 XL |
427 | } |
428 | ||
ba9703b0 | 429 | pub fn dep_node_debug_str(&self, dep_node: DepNode<K>) -> Option<String> { |
dfeec247 | 430 | self.data.as_ref()?.dep_node_debug.borrow().get(&dep_node).cloned() |
32a655c1 | 431 | } |
ea8adc8c | 432 | |
0731742a XL |
433 | pub fn edge_deduplication_data(&self) -> Option<(u64, u64)> { |
434 | if cfg!(debug_assertions) { | |
e74abb32 | 435 | let current_dep_graph = &self.data.as_ref().unwrap().current; |
abe05a73 | 436 | |
dfeec247 XL |
437 | Some(( |
438 | current_dep_graph.total_read_count.load(Relaxed), | |
439 | current_dep_graph.total_duplicate_read_count.load(Relaxed), | |
440 | )) | |
0731742a XL |
441 | } else { |
442 | None | |
443 | } | |
abe05a73 XL |
444 | } |
445 | ||
ba9703b0 | 446 | pub fn serialize(&self) -> SerializedDepGraph<K> { |
e74abb32 | 447 | let data = self.data.as_ref().unwrap().current.data.lock(); |
ea8adc8c | 448 | |
0731742a | 449 | let fingerprints: IndexVec<SerializedDepNodeIndex, _> = |
e74abb32 | 450 | data.iter().map(|d| d.fingerprint).collect(); |
dfeec247 | 451 | let nodes: IndexVec<SerializedDepNodeIndex, _> = data.iter().map(|d| d.node).collect(); |
ea8adc8c | 452 | |
e74abb32 | 453 | let total_edge_count: usize = data.iter().map(|d| d.edges.len()).sum(); |
ea8adc8c XL |
454 | |
455 | let mut edge_list_indices = IndexVec::with_capacity(nodes.len()); | |
456 | let mut edge_list_data = Vec::with_capacity(total_edge_count); | |
457 | ||
e74abb32 | 458 | for (current_dep_node_index, edges) in data.iter_enumerated().map(|(i, d)| (i, &d.edges)) { |
ea8adc8c XL |
459 | let start = edge_list_data.len() as u32; |
460 | // This should really just be a memcpy :/ | |
461 | edge_list_data.extend(edges.iter().map(|i| SerializedDepNodeIndex::new(i.index()))); | |
462 | let end = edge_list_data.len() as u32; | |
463 | ||
464 | debug_assert_eq!(current_dep_node_index.index(), edge_list_indices.len()); | |
465 | edge_list_indices.push((start, end)); | |
466 | } | |
467 | ||
74b04a01 | 468 | debug_assert!(edge_list_data.len() <= u32::MAX as usize); |
ea8adc8c XL |
469 | debug_assert_eq!(edge_list_data.len(), total_edge_count); |
470 | ||
dfeec247 | 471 | SerializedDepGraph { nodes, fingerprints, edge_list_indices, edge_list_data } |
ea8adc8c XL |
472 | } |
473 | ||
ba9703b0 | 474 | pub fn node_color(&self, dep_node: &DepNode<K>) -> Option<DepNodeColor> { |
0531ce1d XL |
475 | if let Some(ref data) = self.data { |
476 | if let Some(prev_index) = data.previous.node_to_index_opt(dep_node) { | |
dfeec247 | 477 | return data.colors.get(prev_index); |
0531ce1d XL |
478 | } else { |
479 | // This is a node that did not exist in the previous compilation | |
480 | // session, so we consider it to be red. | |
dfeec247 | 481 | return Some(DepNodeColor::Red); |
0531ce1d XL |
482 | } |
483 | } | |
484 | ||
485 | None | |
ea8adc8c XL |
486 | } |
487 | ||
9fa01778 XL |
488 | /// Try to read a node index for the node dep_node. |
489 | /// A node will have an index, when it's already been marked green, or when we can mark it | |
490 | /// green. This function will mark the current task as a reader of the specified node, when | |
491 | /// a node index can be found for that node. | |
ba9703b0 | 492 | pub fn try_mark_green_and_read<Ctxt: DepContext<DepKind = K>>( |
9fa01778 | 493 | &self, |
ba9703b0 XL |
494 | tcx: Ctxt, |
495 | dep_node: &DepNode<K>, | |
9fa01778 XL |
496 | ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> { |
497 | self.try_mark_green(tcx, dep_node).map(|(prev_index, dep_node_index)| { | |
498 | debug_assert!(self.is_green(&dep_node)); | |
499 | self.read_index(dep_node_index); | |
500 | (prev_index, dep_node_index) | |
501 | }) | |
502 | } | |
ea8adc8c | 503 | |
ba9703b0 | 504 | pub fn try_mark_green<Ctxt: DepContext<DepKind = K>>( |
9fa01778 | 505 | &self, |
ba9703b0 XL |
506 | tcx: Ctxt, |
507 | dep_node: &DepNode<K>, | |
9fa01778 | 508 | ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> { |
532ac7d7 | 509 | debug_assert!(!dep_node.kind.is_eval_always()); |
9fa01778 XL |
510 | |
511 | // Return None if the dep graph is disabled | |
512 | let data = self.data.as_ref()?; | |
513 | ||
514 | // Return None if the dep node didn't exist in the previous session | |
515 | let prev_index = data.previous.node_to_index_opt(dep_node)?; | |
ea8adc8c | 516 | |
9fa01778 XL |
517 | match data.colors.get(prev_index) { |
518 | Some(DepNodeColor::Green(dep_node_index)) => Some((prev_index, dep_node_index)), | |
519 | Some(DepNodeColor::Red) => None, | |
520 | None => { | |
ea8adc8c XL |
521 | // This DepNode and the corresponding query invocation existed |
522 | // in the previous compilation session too, so we can try to | |
523 | // mark it as green by recursively marking all of its | |
524 | // dependencies green. | |
dfeec247 XL |
525 | self.try_mark_previous_green(tcx, data, prev_index, &dep_node) |
526 | .map(|dep_node_index| (prev_index, dep_node_index)) | |
ea8adc8c | 527 | } |
9fa01778 XL |
528 | } |
529 | } | |
530 | ||
531 | /// Try to mark a dep-node which existed in the previous compilation session as green. | |
ba9703b0 | 532 | fn try_mark_previous_green<Ctxt: DepContext<DepKind = K>>( |
9fa01778 | 533 | &self, |
ba9703b0 XL |
534 | tcx: Ctxt, |
535 | data: &DepGraphData<K>, | |
9fa01778 | 536 | prev_dep_node_index: SerializedDepNodeIndex, |
ba9703b0 | 537 | dep_node: &DepNode<K>, |
9fa01778 XL |
538 | ) -> Option<DepNodeIndex> { |
539 | debug!("try_mark_previous_green({:?}) - BEGIN", dep_node); | |
540 | ||
541 | #[cfg(not(parallel_compiler))] | |
542 | { | |
dfeec247 XL |
543 | debug_assert!( |
544 | !data | |
545 | .current | |
546 | .node_to_node_index | |
547 | .get_shard_by_value(dep_node) | |
548 | .lock() | |
549 | .contains_key(dep_node) | |
550 | ); | |
9fa01778 XL |
551 | debug_assert!(data.colors.get(prev_dep_node_index).is_none()); |
552 | } | |
ea8adc8c | 553 | |
532ac7d7 XL |
554 | // We never try to mark eval_always nodes as green |
555 | debug_assert!(!dep_node.kind.is_eval_always()); | |
9fa01778 XL |
556 | |
557 | debug_assert_eq!(data.previous.index_to_node(prev_dep_node_index), *dep_node); | |
558 | ||
559 | let prev_deps = data.previous.edge_targets_from(prev_dep_node_index); | |
0531ce1d | 560 | |
94b46f34 | 561 | let mut current_deps = SmallVec::new(); |
ea8adc8c XL |
562 | |
563 | for &dep_dep_node_index in prev_deps { | |
9fa01778 | 564 | let dep_dep_node_color = data.colors.get(dep_dep_node_index); |
ea8adc8c | 565 | |
ea8adc8c XL |
566 | match dep_dep_node_color { |
567 | Some(DepNodeColor::Green(node_index)) => { | |
568 | // This dependency has been marked as green before, we are | |
569 | // still fine and can continue with checking the other | |
570 | // dependencies. | |
dfeec247 XL |
571 | debug!( |
572 | "try_mark_previous_green({:?}) --- found dependency {:?} to \ | |
0531ce1d | 573 | be immediately green", |
dfeec247 XL |
574 | dep_node, |
575 | data.previous.index_to_node(dep_dep_node_index) | |
576 | ); | |
ea8adc8c XL |
577 | current_deps.push(node_index); |
578 | } | |
579 | Some(DepNodeColor::Red) => { | |
580 | // We found a dependency the value of which has changed | |
581 | // compared to the previous compilation session. We cannot | |
582 | // mark the DepNode as green and also don't need to bother | |
583 | // with checking any of the other dependencies. | |
dfeec247 XL |
584 | debug!( |
585 | "try_mark_previous_green({:?}) - END - dependency {:?} was \ | |
0531ce1d | 586 | immediately red", |
dfeec247 XL |
587 | dep_node, |
588 | data.previous.index_to_node(dep_dep_node_index) | |
589 | ); | |
590 | return None; | |
ea8adc8c XL |
591 | } |
592 | None => { | |
0531ce1d XL |
593 | let dep_dep_node = &data.previous.index_to_node(dep_dep_node_index); |
594 | ||
abe05a73 | 595 | // We don't know the state of this dependency. If it isn't |
532ac7d7 XL |
596 | // an eval_always node, let's try to mark it green recursively. |
597 | if !dep_dep_node.kind.is_eval_always() { | |
dfeec247 XL |
598 | debug!( |
599 | "try_mark_previous_green({:?}) --- state of dependency {:?} \ | |
600 | is unknown, trying to mark it green", | |
601 | dep_node, dep_dep_node | |
602 | ); | |
abe05a73 | 603 | |
9fa01778 XL |
604 | let node_index = self.try_mark_previous_green( |
605 | tcx, | |
606 | data, | |
607 | dep_dep_node_index, | |
dfeec247 | 608 | dep_dep_node, |
9fa01778 XL |
609 | ); |
610 | if let Some(node_index) = node_index { | |
dfeec247 XL |
611 | debug!( |
612 | "try_mark_previous_green({:?}) --- managed to MARK \ | |
613 | dependency {:?} as green", | |
614 | dep_node, dep_dep_node | |
615 | ); | |
abe05a73 XL |
616 | current_deps.push(node_index); |
617 | continue; | |
618 | } | |
ea8adc8c | 619 | } |
abe05a73 XL |
620 | |
621 | // We failed to mark it green, so we try to force the query. | |
dfeec247 XL |
622 | debug!( |
623 | "try_mark_previous_green({:?}) --- trying to force \ | |
624 | dependency {:?}", | |
625 | dep_node, dep_dep_node | |
626 | ); | |
ba9703b0 | 627 | if tcx.try_force_from_dep_node(dep_dep_node) { |
9fa01778 | 628 | let dep_dep_node_color = data.colors.get(dep_dep_node_index); |
0531ce1d | 629 | |
abe05a73 XL |
630 | match dep_dep_node_color { |
631 | Some(DepNodeColor::Green(node_index)) => { | |
dfeec247 XL |
632 | debug!( |
633 | "try_mark_previous_green({:?}) --- managed to \ | |
abe05a73 | 634 | FORCE dependency {:?} to green", |
dfeec247 XL |
635 | dep_node, dep_dep_node |
636 | ); | |
abe05a73 XL |
637 | current_deps.push(node_index); |
638 | } | |
639 | Some(DepNodeColor::Red) => { | |
dfeec247 XL |
640 | debug!( |
641 | "try_mark_previous_green({:?}) - END - \ | |
abe05a73 | 642 | dependency {:?} was red after forcing", |
dfeec247 XL |
643 | dep_node, dep_dep_node |
644 | ); | |
645 | return None; | |
abe05a73 XL |
646 | } |
647 | None => { | |
ba9703b0 XL |
648 | if !tcx.has_errors_or_delayed_span_bugs() { |
649 | panic!( | |
dfeec247 XL |
650 | "try_mark_previous_green() - Forcing the DepNode \ |
651 | should have set its color" | |
652 | ) | |
0531ce1d | 653 | } else { |
60c5eb7d XL |
654 | // If the query we just forced has resulted in |
655 | // some kind of compilation error, we cannot rely on | |
656 | // the dep-node color having been properly updated. | |
657 | // This means that the query system has reached an | |
658 | // invalid state. We let the compiler continue (by | |
659 | // returning `None`) so it can emit error messages | |
660 | // and wind down, but rely on the fact that this | |
661 | // invalid state will not be persisted to the | |
662 | // incremental compilation cache because of | |
663 | // compilation errors being present. | |
dfeec247 XL |
664 | debug!( |
665 | "try_mark_previous_green({:?}) - END - \ | |
60c5eb7d | 666 | dependency {:?} resulted in compilation error", |
dfeec247 XL |
667 | dep_node, dep_dep_node |
668 | ); | |
669 | return None; | |
0531ce1d | 670 | } |
abe05a73 XL |
671 | } |
672 | } | |
673 | } else { | |
674 | // The DepNode could not be forced. | |
dfeec247 XL |
675 | debug!( |
676 | "try_mark_previous_green({:?}) - END - dependency {:?} \ | |
677 | could not be forced", | |
678 | dep_node, dep_dep_node | |
679 | ); | |
680 | return None; | |
abe05a73 | 681 | } |
ea8adc8c XL |
682 | } |
683 | } | |
684 | } | |
685 | ||
ea8adc8c XL |
686 | // If we got here without hitting a `return` that means that all |
687 | // dependencies of this DepNode could be marked as green. Therefore we | |
83c7162d | 688 | // can also mark this DepNode as green. |
ea8adc8c | 689 | |
83c7162d XL |
690 | // There may be multiple threads trying to mark the same dep node green concurrently |
691 | ||
416331ca | 692 | let dep_node_index = { |
0731742a XL |
693 | // Copy the fingerprint from the previous graph, |
694 | // so we don't have to recompute it | |
ff7c6d11 | 695 | let fingerprint = data.previous.fingerprint_by_index(prev_dep_node_index); |
ff7c6d11 | 696 | |
0731742a XL |
697 | // We allocating an entry for the node in the current dependency graph and |
698 | // adding all the appropriate edges imported from the previous graph | |
e74abb32 | 699 | data.current.intern_node(*dep_node, current_deps, fingerprint) |
0731742a | 700 | }; |
ea8adc8c | 701 | |
abe05a73 | 702 | // ... emitting any stored diagnostic ... |
abe05a73 | 703 | |
416331ca XL |
704 | // FIXME: Store the fact that a node has diagnostics in a bit in the dep graph somewhere |
705 | // Maybe store a list on disk and encode this fact in the DepNodeState | |
ba9703b0 | 706 | let diagnostics = tcx.load_diagnostics(prev_dep_node_index); |
416331ca XL |
707 | |
708 | #[cfg(not(parallel_compiler))] | |
dfeec247 XL |
709 | debug_assert!( |
710 | data.colors.get(prev_dep_node_index).is_none(), | |
711 | "DepGraph::try_mark_previous_green() - Duplicate DepNodeColor \ | |
712 | insertion for {:?}", | |
713 | dep_node | |
714 | ); | |
abe05a73 | 715 | |
74b04a01 | 716 | if unlikely!(!diagnostics.is_empty()) { |
dfeec247 | 717 | self.emit_diagnostics(tcx, data, dep_node_index, prev_dep_node_index, diagnostics); |
abe05a73 XL |
718 | } |
719 | ||
ea8adc8c | 720 | // ... and finally storing a "Green" entry in the color map. |
83c7162d | 721 | // Multiple threads can all write the same color here |
9fa01778 | 722 | data.colors.insert(prev_dep_node_index, DepNodeColor::Green(dep_node_index)); |
0531ce1d | 723 | |
9fa01778 | 724 | debug!("try_mark_previous_green({:?}) - END - successfully marked as green", dep_node); |
ea8adc8c XL |
725 | Some(dep_node_index) |
726 | } | |
727 | ||
416331ca XL |
728 | /// Atomically emits some loaded diagnostics. |
729 | /// This may be called concurrently on multiple threads for the same dep node. | |
9fa01778 XL |
730 | #[cold] |
731 | #[inline(never)] | |
ba9703b0 | 732 | fn emit_diagnostics<Ctxt: DepContext<DepKind = K>>( |
9fa01778 | 733 | &self, |
ba9703b0 XL |
734 | tcx: Ctxt, |
735 | data: &DepGraphData<K>, | |
9fa01778 | 736 | dep_node_index: DepNodeIndex, |
416331ca | 737 | prev_dep_node_index: SerializedDepNodeIndex, |
9fa01778 XL |
738 | diagnostics: Vec<Diagnostic>, |
739 | ) { | |
416331ca XL |
740 | let mut emitting = data.emitting_diagnostics.lock(); |
741 | ||
742 | if data.colors.get(prev_dep_node_index) == Some(DepNodeColor::Green(dep_node_index)) { | |
743 | // The node is already green so diagnostics must have been emitted already | |
744 | return; | |
745 | } | |
746 | ||
747 | if emitting.insert(dep_node_index) { | |
748 | // We were the first to insert the node in the set so this thread | |
749 | // must emit the diagnostics and signal other potentially waiting | |
750 | // threads after. | |
751 | mem::drop(emitting); | |
9fa01778 XL |
752 | |
753 | // Promote the previous diagnostics to the current session. | |
ba9703b0 | 754 | tcx.store_diagnostics(dep_node_index, diagnostics.clone().into()); |
416331ca | 755 | |
ba9703b0 | 756 | let handle = tcx.diagnostic(); |
9fa01778 XL |
757 | |
758 | for diagnostic in diagnostics { | |
e1599b0c | 759 | handle.emit_diagnostic(&diagnostic); |
9fa01778 XL |
760 | } |
761 | ||
416331ca XL |
762 | // Mark the node as green now that diagnostics are emitted |
763 | data.colors.insert(prev_dep_node_index, DepNodeColor::Green(dep_node_index)); | |
764 | ||
765 | // Remove the node from the set | |
766 | data.emitting_diagnostics.lock().remove(&dep_node_index); | |
767 | ||
768 | // Wake up waiters | |
769 | data.emitting_diagnostics_cond_var.notify_all(); | |
9fa01778 | 770 | } else { |
416331ca | 771 | // We must wait for the other thread to finish emitting the diagnostic |
9fa01778 | 772 | |
9fa01778 | 773 | loop { |
416331ca | 774 | data.emitting_diagnostics_cond_var.wait(&mut emitting); |
dfeec247 XL |
775 | if data.colors.get(prev_dep_node_index) == Some(DepNodeColor::Green(dep_node_index)) |
776 | { | |
9fa01778 XL |
777 | break; |
778 | } | |
9fa01778 XL |
779 | } |
780 | } | |
781 | } | |
782 | ||
0531ce1d XL |
783 | // Returns true if the given node has been marked as green during the |
784 | // current compilation session. Used in various assertions | |
ba9703b0 | 785 | pub fn is_green(&self, dep_node: &DepNode<K>) -> bool { |
0531ce1d | 786 | self.node_color(dep_node).map(|c| c.is_green()).unwrap_or(false) |
ea8adc8c XL |
787 | } |
788 | ||
ff7c6d11 XL |
789 | // This method loads all on-disk cacheable query results into memory, so |
790 | // they can be written out to the new cache file again. Most query results | |
791 | // will already be in memory but in the case where we marked something as | |
792 | // green but then did not need the value, that value will never have been | |
793 | // loaded from disk. | |
794 | // | |
795 | // This method will only load queries that will end up in the disk cache. | |
796 | // Other queries will not be executed. | |
ba9703b0 XL |
797 | pub fn exec_cache_promotions<Ctxt: DepContext<DepKind = K>>(&self, tcx: Ctxt) { |
798 | let _prof_timer = tcx.profiler().generic_activity("incr_comp_query_cache_promotion"); | |
e74abb32 | 799 | |
dc9dc135 XL |
800 | let data = self.data.as_ref().unwrap(); |
801 | for prev_index in data.colors.values.indices() { | |
802 | match data.colors.get(prev_index) { | |
803 | Some(DepNodeColor::Green(_)) => { | |
804 | let dep_node = data.previous.index_to_node(prev_index); | |
ba9703b0 | 805 | tcx.try_load_from_on_disk_cache(&dep_node); |
ff7c6d11 | 806 | } |
dfeec247 | 807 | None | Some(DepNodeColor::Red) => { |
dc9dc135 XL |
808 | // We can skip red nodes because a node can only be marked |
809 | // as red if the query result was recomputed and thus is | |
810 | // already in memory. | |
811 | } | |
812 | } | |
ff7c6d11 XL |
813 | } |
814 | } | |
dfeec247 XL |
815 | |
816 | fn next_virtual_depnode_index(&self) -> DepNodeIndex { | |
817 | let index = self.virtual_dep_node_index.fetch_add(1, Relaxed); | |
818 | DepNodeIndex::from_u32(index) | |
819 | } | |
5bcae85e SL |
820 | } |
821 | ||
822 | /// A "work product" is an intermediate result that we save into the | |
823 | /// incremental directory for later re-use. The primary example are | |
824 | /// the object files that we save for each partition at code | |
825 | /// generation time. | |
826 | /// | |
827 | /// Each work product is associated with a dep-node, representing the | |
828 | /// process that produced the work-product. If that dep-node is found | |
829 | /// to be dirty when we load up, then we will delete the work-product | |
830 | /// at load time. If the work-product is found to be clean, then we | |
831 | /// will keep a record in the `previous_work_products` list. | |
832 | /// | |
833 | /// In addition, work products have an associated hash. This hash is | |
834 | /// an extra hash that can be used to decide if the work-product from | |
835 | /// a previous compilation can be re-used (in addition to the dirty | |
836 | /// edges check). | |
837 | /// | |
838 | /// As the primary example, consider the object files we generate for | |
839 | /// each partition. In the first run, we create partitions based on | |
840 | /// the symbols that need to be compiled. For each partition P, we | |
841 | /// hash the symbols in P and create a `WorkProduct` record associated | |
94b46f34 | 842 | /// with `DepNode::CodegenUnit(P)`; the hash is the set of symbols |
5bcae85e SL |
843 | /// in P. |
844 | /// | |
94b46f34 | 845 | /// The next time we compile, if the `DepNode::CodegenUnit(P)` is |
5bcae85e SL |
846 | /// judged to be clean (which means none of the things we read to |
847 | /// generate the partition were found to be dirty), it will be loaded | |
848 | /// into previous work products. We will then regenerate the set of | |
849 | /// symbols in the partition P and hash them (note that new symbols | |
850 | /// may be added -- for example, new monomorphizations -- even if | |
851 | /// nothing in P changed!). We will compare that hash against the | |
852 | /// previous hash. If it matches up, we can reuse the object file. | |
3dfed10e | 853 | #[derive(Clone, Debug, Encodable, Decodable)] |
5bcae85e | 854 | pub struct WorkProduct { |
041b39d2 | 855 | pub cgu_name: String, |
f9f354fc XL |
856 | /// Saved file associated with this CGU. |
857 | pub saved_file: Option<String>, | |
54a0048b | 858 | } |
ea8adc8c | 859 | |
0731742a | 860 | #[derive(Clone)] |
ba9703b0 XL |
861 | struct DepNodeData<K> { |
862 | node: DepNode<K>, | |
863 | edges: EdgesVec, | |
0731742a XL |
864 | fingerprint: Fingerprint, |
865 | } | |
866 | ||
e74abb32 XL |
867 | /// `CurrentDepGraph` stores the dependency graph for the current session. |
868 | /// It will be populated as we run queries or tasks. | |
869 | /// | |
870 | /// The nodes in it are identified by an index (`DepNodeIndex`). | |
871 | /// The data for each node is stored in its `DepNodeData`, found in the `data` field. | |
872 | /// | |
873 | /// We never remove nodes from the graph: they are only added. | |
874 | /// | |
875 | /// This struct uses two locks internally. The `data` and `node_to_node_index` fields are | |
876 | /// locked separately. Operations that take a `DepNodeIndex` typically just access | |
877 | /// the data field. | |
878 | /// | |
879 | /// The only operation that must manipulate both locks is adding new nodes, in which case | |
880 | /// we first acquire the `node_to_node_index` lock and then, once a new node is to be inserted, | |
881 | /// acquire the lock on `data.` | |
ba9703b0 XL |
882 | pub(super) struct CurrentDepGraph<K> { |
883 | data: Lock<IndexVec<DepNodeIndex, DepNodeData<K>>>, | |
884 | node_to_node_index: Sharded<FxHashMap<DepNode<K>, DepNodeIndex>>, | |
e74abb32 XL |
885 | |
886 | /// Used to trap when a specific edge is added to the graph. | |
887 | /// This is used for debug purposes and is only active with `debug_assertions`. | |
0731742a | 888 | #[allow(dead_code)] |
ea8adc8c XL |
889 | forbidden_edge: Option<EdgeFilter>, |
890 | ||
9fa01778 XL |
891 | /// Anonymous `DepNode`s are nodes whose IDs we compute from the list of |
892 | /// their edges. This has the beneficial side-effect that multiple anonymous | |
893 | /// nodes can be coalesced into one without changing the semantics of the | |
894 | /// dependency graph. However, the merging of nodes can lead to a subtle | |
895 | /// problem during red-green marking: The color of an anonymous node from | |
896 | /// the current session might "shadow" the color of the node with the same | |
897 | /// ID from the previous session. In order to side-step this problem, we make | |
898 | /// sure that anonymous `NodeId`s allocated in different sessions don't overlap. | |
899 | /// This is implemented by mixing a session-key into the ID fingerprint of | |
900 | /// each anon node. The session-key is just a random number generated when | |
901 | /// the `DepGraph` is created. | |
ea8adc8c | 902 | anon_id_seed: Fingerprint, |
abe05a73 | 903 | |
e74abb32 XL |
904 | /// These are simple counters that are for profiling and |
905 | /// debugging and only active with `debug_assertions`. | |
906 | total_read_count: AtomicU64, | |
907 | total_duplicate_read_count: AtomicU64, | |
ea8adc8c XL |
908 | } |
909 | ||
ba9703b0 XL |
910 | impl<K: DepKind> CurrentDepGraph<K> { |
911 | fn new(prev_graph_node_count: usize) -> CurrentDepGraph<K> { | |
ea8adc8c XL |
912 | use std::time::{SystemTime, UNIX_EPOCH}; |
913 | ||
914 | let duration = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); | |
dfeec247 | 915 | let nanos = duration.as_secs() * 1_000_000_000 + duration.subsec_nanos() as u64; |
ea8adc8c XL |
916 | let mut stable_hasher = StableHasher::new(); |
917 | nanos.hash(&mut stable_hasher); | |
918 | ||
919 | let forbidden_edge = if cfg!(debug_assertions) { | |
920 | match env::var("RUST_FORBID_DEP_GRAPH_EDGE") { | |
dfeec247 XL |
921 | Ok(s) => match EdgeFilter::new(&s) { |
922 | Ok(f) => Some(f), | |
ba9703b0 | 923 | Err(err) => panic!("RUST_FORBID_DEP_GRAPH_EDGE invalid: {}", err), |
dfeec247 | 924 | }, |
ea8adc8c XL |
925 | Err(_) => None, |
926 | } | |
927 | } else { | |
928 | None | |
929 | }; | |
930 | ||
0731742a XL |
931 | // Pre-allocate the dep node structures. We over-allocate a little so |
932 | // that we hopefully don't have to re-allocate during this compilation | |
48663c56 XL |
933 | // session. The over-allocation is 2% plus a small constant to account |
934 | // for the fact that in very small crates 2% might not be enough. | |
935 | let new_node_count_estimate = (prev_graph_node_count * 102) / 100 + 200; | |
0731742a | 936 | |
ea8adc8c | 937 | CurrentDepGraph { |
e74abb32 | 938 | data: Lock::new(IndexVec::with_capacity(new_node_count_estimate)), |
dfeec247 XL |
939 | node_to_node_index: Sharded::new(|| { |
940 | FxHashMap::with_capacity_and_hasher( | |
941 | new_node_count_estimate / sharded::SHARDS, | |
942 | Default::default(), | |
943 | ) | |
944 | }), | |
ea8adc8c | 945 | anon_id_seed: stable_hasher.finish(), |
ea8adc8c | 946 | forbidden_edge, |
e74abb32 XL |
947 | total_read_count: AtomicU64::new(0), |
948 | total_duplicate_read_count: AtomicU64::new(0), | |
ea8adc8c XL |
949 | } |
950 | } | |
951 | ||
0731742a | 952 | fn complete_task( |
e74abb32 | 953 | &self, |
ba9703b0 XL |
954 | node: DepNode<K>, |
955 | task_deps: TaskDeps<K>, | |
dfeec247 | 956 | fingerprint: Fingerprint, |
0731742a | 957 | ) -> DepNodeIndex { |
0731742a | 958 | self.alloc_node(node, task_deps.reads, fingerprint) |
ea8adc8c XL |
959 | } |
960 | ||
ba9703b0 | 961 | fn complete_anon_task(&self, kind: K, task_deps: TaskDeps<K>) -> DepNodeIndex { |
532ac7d7 | 962 | debug_assert!(!kind.is_eval_always()); |
abe05a73 | 963 | |
0731742a | 964 | let mut hasher = StableHasher::new(); |
ea8adc8c | 965 | |
48663c56 XL |
966 | // The dep node indices are hashed here instead of hashing the dep nodes of the |
967 | // dependencies. These indices may refer to different nodes per session, but this isn't | |
968 | // a problem here because we that ensure the final dep node hash is per session only by | |
969 | // combining it with the per session random number `anon_id_seed`. This hash only need | |
970 | // to map the dependencies to a single value on a per session basis. | |
971 | task_deps.reads.hash(&mut hasher); | |
ea8adc8c | 972 | |
48663c56 XL |
973 | let target_dep_node = DepNode { |
974 | kind, | |
ea8adc8c | 975 | |
0731742a XL |
976 | // Fingerprint::combine() is faster than sending Fingerprint |
977 | // through the StableHasher (at least as long as StableHasher | |
978 | // is so slow). | |
48663c56 | 979 | hash: self.anon_id_seed.combine(hasher.finish()), |
0731742a | 980 | }; |
ea8adc8c | 981 | |
416331ca | 982 | self.intern_node(target_dep_node, task_deps.reads, Fingerprint::ZERO) |
ea8adc8c XL |
983 | } |
984 | ||
0731742a | 985 | fn alloc_node( |
e74abb32 | 986 | &self, |
ba9703b0 XL |
987 | dep_node: DepNode<K>, |
988 | edges: EdgesVec, | |
dfeec247 | 989 | fingerprint: Fingerprint, |
0731742a | 990 | ) -> DepNodeIndex { |
dfeec247 XL |
991 | debug_assert!( |
992 | !self.node_to_node_index.get_shard_by_value(&dep_node).lock().contains_key(&dep_node) | |
993 | ); | |
416331ca | 994 | self.intern_node(dep_node, edges, fingerprint) |
0731742a XL |
995 | } |
996 | ||
997 | fn intern_node( | |
e74abb32 | 998 | &self, |
ba9703b0 XL |
999 | dep_node: DepNode<K>, |
1000 | edges: EdgesVec, | |
dfeec247 | 1001 | fingerprint: Fingerprint, |
416331ca | 1002 | ) -> DepNodeIndex { |
e74abb32 | 1003 | match self.node_to_node_index.get_shard_by_value(&dep_node).lock().entry(dep_node) { |
416331ca | 1004 | Entry::Occupied(entry) => *entry.get(), |
0731742a | 1005 | Entry::Vacant(entry) => { |
e74abb32 XL |
1006 | let mut data = self.data.lock(); |
1007 | let dep_node_index = DepNodeIndex::new(data.len()); | |
dfeec247 | 1008 | data.push(DepNodeData { node: dep_node, edges, fingerprint }); |
0731742a | 1009 | entry.insert(dep_node_index); |
416331ca | 1010 | dep_node_index |
0731742a | 1011 | } |
abe05a73 XL |
1012 | } |
1013 | } | |
0731742a | 1014 | } |
abe05a73 | 1015 | |
ba9703b0 | 1016 | impl<K: DepKind> DepGraphData<K> { |
74b04a01 | 1017 | #[inline(never)] |
0731742a | 1018 | fn read_index(&self, source: DepNodeIndex) { |
ba9703b0 XL |
1019 | K::read_deps(|task_deps| { |
1020 | if let Some(task_deps) = task_deps { | |
0731742a | 1021 | let mut task_deps = task_deps.lock(); |
ba9703b0 | 1022 | let task_deps = &mut *task_deps; |
0731742a | 1023 | if cfg!(debug_assertions) { |
dfeec247 | 1024 | self.current.total_read_count.fetch_add(1, Relaxed); |
0731742a | 1025 | } |
ba9703b0 XL |
1026 | |
1027 | // As long as we only have a low number of reads we can avoid doing a hash | |
1028 | // insert and potentially allocating/reallocating the hashmap | |
1029 | let new_read = if task_deps.reads.len() < TASK_DEPS_READS_CAP { | |
1030 | task_deps.reads.iter().all(|other| *other != source) | |
1031 | } else { | |
1032 | task_deps.read_set.insert(source) | |
1033 | }; | |
1034 | if new_read { | |
0731742a | 1035 | task_deps.reads.push(source); |
ba9703b0 XL |
1036 | if task_deps.reads.len() == TASK_DEPS_READS_CAP { |
1037 | // Fill `read_set` with what we have so far so we can use the hashset next | |
1038 | // time | |
1039 | task_deps.read_set.extend(task_deps.reads.iter().copied()); | |
1040 | } | |
0731742a XL |
1041 | |
1042 | #[cfg(debug_assertions)] | |
1043 | { | |
1044 | if let Some(target) = task_deps.node { | |
e74abb32 XL |
1045 | let data = self.current.data.lock(); |
1046 | if let Some(ref forbidden_edge) = self.current.forbidden_edge { | |
1047 | let source = data[source].node; | |
83c7162d | 1048 | if forbidden_edge.test(&source, &target) { |
ba9703b0 | 1049 | panic!("forbidden edge {:?} -> {:?} created", source, target) |
83c7162d | 1050 | } |
ea8adc8c XL |
1051 | } |
1052 | } | |
83c7162d | 1053 | } |
0731742a | 1054 | } else if cfg!(debug_assertions) { |
dfeec247 | 1055 | self.current.total_duplicate_read_count.fetch_add(1, Relaxed); |
ea8adc8c XL |
1056 | } |
1057 | } | |
83c7162d | 1058 | }) |
ea8adc8c | 1059 | } |
83c7162d XL |
1060 | } |
1061 | ||
ba9703b0 XL |
1062 | /// The capacity of the `reads` field `SmallVec` |
1063 | const TASK_DEPS_READS_CAP: usize = 8; | |
1064 | type EdgesVec = SmallVec<[DepNodeIndex; TASK_DEPS_READS_CAP]>; | |
1065 | ||
1066 | pub struct TaskDeps<K> { | |
0731742a | 1067 | #[cfg(debug_assertions)] |
ba9703b0 XL |
1068 | node: Option<DepNode<K>>, |
1069 | reads: EdgesVec, | |
83c7162d | 1070 | read_set: FxHashSet<DepNodeIndex>, |
ba9703b0 XL |
1071 | phantom_data: PhantomData<DepNode<K>>, |
1072 | } | |
1073 | ||
1074 | impl<K> Default for TaskDeps<K> { | |
1075 | fn default() -> Self { | |
1076 | Self { | |
1077 | #[cfg(debug_assertions)] | |
1078 | node: None, | |
1079 | reads: EdgesVec::new(), | |
1080 | read_set: FxHashSet::default(), | |
1081 | phantom_data: PhantomData, | |
1082 | } | |
1083 | } | |
83c7162d XL |
1084 | } |
1085 | ||
0531ce1d XL |
1086 | // A data structure that stores Option<DepNodeColor> values as a contiguous |
1087 | // array, using one u32 per entry. | |
1088 | struct DepNodeColorMap { | |
9fa01778 | 1089 | values: IndexVec<SerializedDepNodeIndex, AtomicU32>, |
0531ce1d XL |
1090 | } |
1091 | ||
1092 | const COMPRESSED_NONE: u32 = 0; | |
1093 | const COMPRESSED_RED: u32 = 1; | |
1094 | const COMPRESSED_FIRST_GREEN: u32 = 2; | |
1095 | ||
1096 | impl DepNodeColorMap { | |
1097 | fn new(size: usize) -> DepNodeColorMap { | |
dfeec247 | 1098 | DepNodeColorMap { values: (0..size).map(|_| AtomicU32::new(COMPRESSED_NONE)).collect() } |
0531ce1d XL |
1099 | } |
1100 | ||
ba9703b0 | 1101 | #[inline] |
0531ce1d | 1102 | fn get(&self, index: SerializedDepNodeIndex) -> Option<DepNodeColor> { |
9fa01778 | 1103 | match self.values[index].load(Ordering::Acquire) { |
0531ce1d XL |
1104 | COMPRESSED_NONE => None, |
1105 | COMPRESSED_RED => Some(DepNodeColor::Red), | |
dfeec247 XL |
1106 | value => { |
1107 | Some(DepNodeColor::Green(DepNodeIndex::from_u32(value - COMPRESSED_FIRST_GREEN))) | |
1108 | } | |
0531ce1d XL |
1109 | } |
1110 | } | |
1111 | ||
9fa01778 | 1112 | fn insert(&self, index: SerializedDepNodeIndex, color: DepNodeColor) { |
dfeec247 XL |
1113 | self.values[index].store( |
1114 | match color { | |
1115 | DepNodeColor::Red => COMPRESSED_RED, | |
1116 | DepNodeColor::Green(index) => index.as_u32() + COMPRESSED_FIRST_GREEN, | |
1117 | }, | |
1118 | Ordering::Release, | |
1119 | ) | |
0531ce1d XL |
1120 | } |
1121 | } |