]>
Commit | Line | Data |
---|---|---|
94b46f34 | 1 | use rustc::dep_graph::{DepGraph, DepKind, WorkProduct, WorkProductId}; |
5bcae85e | 2 | use rustc::session::Session; |
a7813a04 | 3 | use rustc::ty::TyCtxt; |
ea8adc8c | 4 | use rustc::util::common::time; |
476ff2be | 5 | use rustc_data_structures::fx::FxHashMap; |
94b46f34 | 6 | use rustc_data_structures::sync::join; |
5bcae85e | 7 | use rustc_serialize::Encodable as RustcEncodable; |
9e0c209e | 8 | use rustc_serialize::opaque::Encoder; |
2c00a5a8 | 9 | use std::fs; |
a7813a04 | 10 | use std::path::PathBuf; |
54a0048b SL |
11 | |
12 | use super::data::*; | |
9e0c209e SL |
13 | use super::fs::*; |
14 | use super::dirty_clean; | |
15 | use super::file_format; | |
32a655c1 | 16 | use super::work_product; |
54a0048b | 17 | |
416331ca | 18 | pub fn save_dep_graph(tcx: TyCtxt<'_>) { |
5bcae85e | 19 | debug!("save_dep_graph()"); |
2c00a5a8 XL |
20 | tcx.dep_graph.with_ignore(|| { |
21 | let sess = tcx.sess; | |
22 | if sess.opts.incremental.is_none() { | |
23 | return; | |
24 | } | |
9e0c209e | 25 | |
94b46f34 XL |
26 | let query_cache_path = query_cache_path(sess); |
27 | let dep_graph_path = dep_graph_path(sess); | |
abe05a73 | 28 | |
94b46f34 XL |
29 | join(move || { |
30 | if tcx.sess.opts.debugging_opts.incremental_queries { | |
31 | time(sess, "persist query result cache", || { | |
32 | save_in(sess, | |
33 | query_cache_path, | |
34 | |e| encode_query_cache(tcx, e)); | |
35 | }); | |
36 | } | |
37 | }, || { | |
0531ce1d | 38 | time(sess, "persist dep-graph", || { |
2c00a5a8 | 39 | save_in(sess, |
94b46f34 | 40 | dep_graph_path, |
83c7162d XL |
41 | |e| { |
42 | time(sess, "encode dep-graph", || { | |
43 | encode_dep_graph(tcx, e) | |
44 | }) | |
45 | }); | |
2c00a5a8 | 46 | }); |
94b46f34 | 47 | }); |
2c00a5a8 XL |
48 | |
49 | dirty_clean::check_dirty_clean_annotations(tcx); | |
50 | }) | |
a7813a04 | 51 | } |
54a0048b | 52 | |
94b46f34 XL |
53 | pub fn save_work_product_index(sess: &Session, |
54 | dep_graph: &DepGraph, | |
55 | new_work_products: FxHashMap<WorkProductId, WorkProduct>) { | |
9e0c209e SL |
56 | if sess.opts.incremental.is_none() { |
57 | return; | |
58 | } | |
59 | ||
94b46f34 | 60 | debug!("save_work_product_index()"); |
2c00a5a8 | 61 | dep_graph.assert_ignored(); |
9e0c209e | 62 | let path = work_products_path(sess); |
94b46f34 | 63 | save_in(sess, path, |e| encode_work_product_index(&new_work_products, e)); |
32a655c1 SL |
64 | |
65 | // We also need to clean out old work-products, as not all of them are | |
66 | // deleted during invalidation. Some object files don't change their | |
67 | // content, they are just not needed anymore. | |
ea8adc8c | 68 | let previous_work_products = dep_graph.previous_work_products(); |
32a655c1 SL |
69 | for (id, wp) in previous_work_products.iter() { |
70 | if !new_work_products.contains_key(id) { | |
71 | work_product::delete_workproduct_files(sess, wp); | |
72 | debug_assert!(wp.saved_files.iter().all(|&(_, ref file_name)| { | |
73 | !in_incr_comp_dir_sess(sess, file_name).exists() | |
74 | })); | |
75 | } | |
76 | } | |
77 | ||
78 | // Check that we did not delete one of the current work-products: | |
79 | debug_assert!({ | |
80 | new_work_products.iter() | |
81 | .flat_map(|(_, wp)| wp.saved_files | |
82 | .iter() | |
83 | .map(|&(_, ref name)| name)) | |
84 | .map(|name| in_incr_comp_dir_sess(sess, name)) | |
85 | .all(|path| path.exists()) | |
86 | }); | |
5bcae85e | 87 | } |
a7813a04 | 88 | |
9e0c209e | 89 | fn save_in<F>(sess: &Session, path_buf: PathBuf, encode: F) |
8faf50e0 | 90 | where F: FnOnce(&mut Encoder) |
5bcae85e | 91 | { |
9e0c209e | 92 | debug!("save: storing data in {}", path_buf.display()); |
54a0048b | 93 | |
a7813a04 | 94 | // delete the old dep-graph, if any |
9e0c209e SL |
95 | // Note: It's important that we actually delete the old file and not just |
96 | // truncate and overwrite it, since it might be a shared hard-link, the | |
97 | // underlying data of which we don't want to modify | |
a7813a04 XL |
98 | if path_buf.exists() { |
99 | match fs::remove_file(&path_buf) { | |
9e0c209e SL |
100 | Ok(()) => { |
101 | debug!("save: remove old file"); | |
102 | } | |
54a0048b | 103 | Err(err) => { |
5bcae85e SL |
104 | sess.err(&format!("unable to delete old dep-graph at `{}`: {}", |
105 | path_buf.display(), | |
106 | err)); | |
54a0048b SL |
107 | return; |
108 | } | |
109 | } | |
a7813a04 | 110 | } |
54a0048b | 111 | |
a7813a04 | 112 | // generate the data in a memory buffer |
8faf50e0 XL |
113 | let mut encoder = Encoder::new(Vec::new()); |
114 | file_format::write_file_header(&mut encoder); | |
115 | encode(&mut encoder); | |
a7813a04 XL |
116 | |
117 | // write the data out | |
8faf50e0 | 118 | let data = encoder.into_inner(); |
2c00a5a8 | 119 | match fs::write(&path_buf, data) { |
9e0c209e SL |
120 | Ok(_) => { |
121 | debug!("save: data written to disk successfully"); | |
122 | } | |
a7813a04 | 123 | Err(err) => { |
5bcae85e SL |
124 | sess.err(&format!("failed to write dep-graph to `{}`: {}", |
125 | path_buf.display(), | |
126 | err)); | |
a7813a04 | 127 | return; |
54a0048b SL |
128 | } |
129 | } | |
130 | } | |
131 | ||
dc9dc135 | 132 | fn encode_dep_graph(tcx: TyCtxt<'_>, encoder: &mut Encoder) { |
5bcae85e | 133 | // First encode the commandline arguments hash |
8faf50e0 | 134 | tcx.sess.opts.dep_tracking_hash().encode(encoder).unwrap(); |
5bcae85e | 135 | |
7cac9316 | 136 | // Encode the graph data. |
83c7162d XL |
137 | let serialized_graph = time(tcx.sess, "getting serialized graph", || { |
138 | tcx.dep_graph.serialize() | |
139 | }); | |
abe05a73 XL |
140 | |
141 | if tcx.sess.opts.debugging_opts.incremental_info { | |
142 | #[derive(Clone)] | |
143 | struct Stat { | |
144 | kind: DepKind, | |
145 | node_counter: u64, | |
146 | edge_counter: u64, | |
147 | } | |
148 | ||
149 | let total_node_count = serialized_graph.nodes.len(); | |
150 | let total_edge_count = serialized_graph.edge_list_data.len(); | |
abe05a73 | 151 | |
0bf4aa26 | 152 | let mut counts: FxHashMap<_, Stat> = FxHashMap::default(); |
abe05a73 | 153 | |
0531ce1d | 154 | for (i, &node) in serialized_graph.nodes.iter_enumerated() { |
abe05a73 XL |
155 | let stat = counts.entry(node.kind).or_insert(Stat { |
156 | kind: node.kind, | |
157 | node_counter: 0, | |
158 | edge_counter: 0, | |
159 | }); | |
160 | ||
161 | stat.node_counter += 1; | |
162 | let (edge_start, edge_end) = serialized_graph.edge_list_indices[i]; | |
163 | stat.edge_counter += (edge_end - edge_start) as u64; | |
164 | } | |
165 | ||
166 | let mut counts: Vec<_> = counts.values().cloned().collect(); | |
167 | counts.sort_by_key(|s| -(s.node_counter as i64)); | |
168 | ||
169 | let percentage_of_all_nodes: Vec<f64> = counts.iter().map(|s| { | |
170 | (100.0 * (s.node_counter as f64)) / (total_node_count as f64) | |
171 | }).collect(); | |
172 | ||
173 | let average_edges_per_kind: Vec<f64> = counts.iter().map(|s| { | |
174 | (s.edge_counter as f64) / (s.node_counter as f64) | |
175 | }).collect(); | |
176 | ||
177 | println!("[incremental]"); | |
178 | println!("[incremental] DepGraph Statistics"); | |
179 | ||
180 | const SEPARATOR: &str = "[incremental] --------------------------------\ | |
181 | ----------------------------------------------\ | |
182 | ------------"; | |
183 | ||
184 | println!("{}", SEPARATOR); | |
185 | println!("[incremental]"); | |
186 | println!("[incremental] Total Node Count: {}", total_node_count); | |
187 | println!("[incremental] Total Edge Count: {}", total_edge_count); | |
0731742a XL |
188 | if let Some((total_edge_reads, |
189 | total_duplicate_edge_reads)) = tcx.dep_graph.edge_deduplication_data() { | |
190 | println!("[incremental] Total Edge Reads: {}", total_edge_reads); | |
191 | println!("[incremental] Total Duplicate Edge Reads: {}", total_duplicate_edge_reads); | |
192 | } | |
abe05a73 XL |
193 | println!("[incremental]"); |
194 | println!("[incremental] {:<36}| {:<17}| {:<12}| {:<17}|", | |
195 | "Node Kind", | |
196 | "Node Frequency", | |
197 | "Node Count", | |
198 | "Avg. Edge Count"); | |
199 | println!("[incremental] -------------------------------------\ | |
200 | |------------------\ | |
201 | |-------------\ | |
202 | |------------------|"); | |
203 | ||
204 | for (i, stat) in counts.iter().enumerate() { | |
205 | println!("[incremental] {:<36}|{:>16.1}% |{:>12} |{:>17.1} |", | |
206 | format!("{:?}", stat.kind), | |
207 | percentage_of_all_nodes[i], | |
208 | stat.node_counter, | |
209 | average_edges_per_kind[i]); | |
210 | } | |
211 | ||
212 | println!("{}", SEPARATOR); | |
213 | println!("[incremental]"); | |
214 | } | |
215 | ||
83c7162d | 216 | time(tcx.sess, "encoding serialized graph", || { |
8faf50e0 XL |
217 | serialized_graph.encode(encoder).unwrap(); |
218 | }); | |
54a0048b SL |
219 | } |
220 | ||
94b46f34 | 221 | fn encode_work_product_index(work_products: &FxHashMap<WorkProductId, WorkProduct>, |
8faf50e0 | 222 | encoder: &mut Encoder) { |
94b46f34 | 223 | let serialized_products: Vec<_> = work_products |
5bcae85e SL |
224 | .iter() |
225 | .map(|(id, work_product)| { | |
226 | SerializedWorkProduct { | |
227 | id: id.clone(), | |
228 | work_product: work_product.clone(), | |
229 | } | |
230 | }) | |
231 | .collect(); | |
232 | ||
8faf50e0 | 233 | serialized_products.encode(encoder).unwrap(); |
5bcae85e | 234 | } |
abe05a73 | 235 | |
dc9dc135 | 236 | fn encode_query_cache(tcx: TyCtxt<'_>, encoder: &mut Encoder) { |
83c7162d | 237 | time(tcx.sess, "serialize query result cache", || { |
8faf50e0 | 238 | tcx.serialize_query_result_cache(encoder).unwrap(); |
83c7162d | 239 | }) |
abe05a73 | 240 | } |