]> git.proxmox.com Git - rustc.git/blob - src/librustc_incremental/persist/save.rs
New upstream version 1.27.1+dfsg1
[rustc.git] / src / librustc_incremental / persist / save.rs
1 // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 use rustc::dep_graph::{DepGraph, DepKind};
12 use rustc::session::Session;
13 use rustc::ty::TyCtxt;
14 use rustc::util::common::time;
15 use rustc_data_structures::fx::FxHashMap;
16 use rustc_serialize::Encodable as RustcEncodable;
17 use rustc_serialize::opaque::Encoder;
18 use std::io::{self, Cursor};
19 use std::fs;
20 use std::path::PathBuf;
21
22 use super::data::*;
23 use super::fs::*;
24 use super::dirty_clean;
25 use super::file_format;
26 use super::work_product;
27
28 pub fn save_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
29 debug!("save_dep_graph()");
30 tcx.dep_graph.with_ignore(|| {
31 let sess = tcx.sess;
32 if sess.opts.incremental.is_none() {
33 return;
34 }
35
36 time(sess, "persist query result cache", || {
37 save_in(sess,
38 query_cache_path(sess),
39 |e| encode_query_cache(tcx, e));
40 });
41
42 if tcx.sess.opts.debugging_opts.incremental_queries {
43 time(sess, "persist dep-graph", || {
44 save_in(sess,
45 dep_graph_path(sess),
46 |e| {
47 time(sess, "encode dep-graph", || {
48 encode_dep_graph(tcx, e)
49 })
50 });
51 });
52 }
53
54 dirty_clean::check_dirty_clean_annotations(tcx);
55 })
56 }
57
58 pub fn save_work_products(sess: &Session, dep_graph: &DepGraph) {
59 if sess.opts.incremental.is_none() {
60 return;
61 }
62
63 debug!("save_work_products()");
64 dep_graph.assert_ignored();
65 let path = work_products_path(sess);
66 save_in(sess, path, |e| encode_work_products(dep_graph, e));
67
68 // We also need to clean out old work-products, as not all of them are
69 // deleted during invalidation. Some object files don't change their
70 // content, they are just not needed anymore.
71 let new_work_products = dep_graph.work_products();
72 let previous_work_products = dep_graph.previous_work_products();
73
74 for (id, wp) in previous_work_products.iter() {
75 if !new_work_products.contains_key(id) {
76 work_product::delete_workproduct_files(sess, wp);
77 debug_assert!(wp.saved_files.iter().all(|&(_, ref file_name)| {
78 !in_incr_comp_dir_sess(sess, file_name).exists()
79 }));
80 }
81 }
82
83 // Check that we did not delete one of the current work-products:
84 debug_assert!({
85 new_work_products.iter()
86 .flat_map(|(_, wp)| wp.saved_files
87 .iter()
88 .map(|&(_, ref name)| name))
89 .map(|name| in_incr_comp_dir_sess(sess, name))
90 .all(|path| path.exists())
91 });
92 }
93
94 fn save_in<F>(sess: &Session, path_buf: PathBuf, encode: F)
95 where F: FnOnce(&mut Encoder) -> io::Result<()>
96 {
97 debug!("save: storing data in {}", path_buf.display());
98
99 // delete the old dep-graph, if any
100 // Note: It's important that we actually delete the old file and not just
101 // truncate and overwrite it, since it might be a shared hard-link, the
102 // underlying data of which we don't want to modify
103 if path_buf.exists() {
104 match fs::remove_file(&path_buf) {
105 Ok(()) => {
106 debug!("save: remove old file");
107 }
108 Err(err) => {
109 sess.err(&format!("unable to delete old dep-graph at `{}`: {}",
110 path_buf.display(),
111 err));
112 return;
113 }
114 }
115 }
116
117 // generate the data in a memory buffer
118 let mut wr = Cursor::new(Vec::new());
119 file_format::write_file_header(&mut wr).unwrap();
120 match encode(&mut Encoder::new(&mut wr)) {
121 Ok(()) => {}
122 Err(err) => {
123 sess.err(&format!("could not encode dep-graph to `{}`: {}",
124 path_buf.display(),
125 err));
126 return;
127 }
128 }
129
130 // write the data out
131 let data = wr.into_inner();
132 match fs::write(&path_buf, data) {
133 Ok(_) => {
134 debug!("save: data written to disk successfully");
135 }
136 Err(err) => {
137 sess.err(&format!("failed to write dep-graph to `{}`: {}",
138 path_buf.display(),
139 err));
140 return;
141 }
142 }
143 }
144
145 fn encode_dep_graph(tcx: TyCtxt,
146 encoder: &mut Encoder)
147 -> io::Result<()> {
148 // First encode the commandline arguments hash
149 tcx.sess.opts.dep_tracking_hash().encode(encoder)?;
150
151 // Encode the graph data.
152 let serialized_graph = time(tcx.sess, "getting serialized graph", || {
153 tcx.dep_graph.serialize()
154 });
155
156 if tcx.sess.opts.debugging_opts.incremental_info {
157 #[derive(Clone)]
158 struct Stat {
159 kind: DepKind,
160 node_counter: u64,
161 edge_counter: u64,
162 }
163
164 let total_node_count = serialized_graph.nodes.len();
165 let total_edge_count = serialized_graph.edge_list_data.len();
166 let (total_edge_reads, total_duplicate_edge_reads) =
167 tcx.dep_graph.edge_deduplication_data();
168
169 let mut counts: FxHashMap<_, Stat> = FxHashMap();
170
171 for (i, &node) in serialized_graph.nodes.iter_enumerated() {
172 let stat = counts.entry(node.kind).or_insert(Stat {
173 kind: node.kind,
174 node_counter: 0,
175 edge_counter: 0,
176 });
177
178 stat.node_counter += 1;
179 let (edge_start, edge_end) = serialized_graph.edge_list_indices[i];
180 stat.edge_counter += (edge_end - edge_start) as u64;
181 }
182
183 let mut counts: Vec<_> = counts.values().cloned().collect();
184 counts.sort_by_key(|s| -(s.node_counter as i64));
185
186 let percentage_of_all_nodes: Vec<f64> = counts.iter().map(|s| {
187 (100.0 * (s.node_counter as f64)) / (total_node_count as f64)
188 }).collect();
189
190 let average_edges_per_kind: Vec<f64> = counts.iter().map(|s| {
191 (s.edge_counter as f64) / (s.node_counter as f64)
192 }).collect();
193
194 println!("[incremental]");
195 println!("[incremental] DepGraph Statistics");
196
197 const SEPARATOR: &str = "[incremental] --------------------------------\
198 ----------------------------------------------\
199 ------------";
200
201 println!("{}", SEPARATOR);
202 println!("[incremental]");
203 println!("[incremental] Total Node Count: {}", total_node_count);
204 println!("[incremental] Total Edge Count: {}", total_edge_count);
205 println!("[incremental] Total Edge Reads: {}", total_edge_reads);
206 println!("[incremental] Total Duplicate Edge Reads: {}", total_duplicate_edge_reads);
207 println!("[incremental]");
208 println!("[incremental] {:<36}| {:<17}| {:<12}| {:<17}|",
209 "Node Kind",
210 "Node Frequency",
211 "Node Count",
212 "Avg. Edge Count");
213 println!("[incremental] -------------------------------------\
214 |------------------\
215 |-------------\
216 |------------------|");
217
218 for (i, stat) in counts.iter().enumerate() {
219 println!("[incremental] {:<36}|{:>16.1}% |{:>12} |{:>17.1} |",
220 format!("{:?}", stat.kind),
221 percentage_of_all_nodes[i],
222 stat.node_counter,
223 average_edges_per_kind[i]);
224 }
225
226 println!("{}", SEPARATOR);
227 println!("[incremental]");
228 }
229
230 time(tcx.sess, "encoding serialized graph", || {
231 serialized_graph.encode(encoder)
232 })?;
233
234 Ok(())
235 }
236
237 fn encode_work_products(dep_graph: &DepGraph,
238 encoder: &mut Encoder) -> io::Result<()> {
239 let work_products: Vec<_> = dep_graph
240 .work_products()
241 .iter()
242 .map(|(id, work_product)| {
243 SerializedWorkProduct {
244 id: id.clone(),
245 work_product: work_product.clone(),
246 }
247 })
248 .collect();
249
250 work_products.encode(encoder)
251 }
252
253 fn encode_query_cache(tcx: TyCtxt,
254 encoder: &mut Encoder)
255 -> io::Result<()> {
256 time(tcx.sess, "serialize query result cache", || {
257 tcx.serialize_query_result_cache(encoder)
258 })
259 }