1 //! Code to save/load the dep-graph from files.
3 use rustc_data_structures
::fx
::FxHashMap
;
4 use rustc
::dep_graph
::{PreviousDepGraph, SerializedDepGraph, WorkProduct, WorkProductId}
;
5 use rustc
::session
::Session
;
7 use rustc
::ty
::query
::OnDiskCache
;
8 use rustc
::util
::common
::time_ext
;
9 use rustc_serialize
::Decodable
as RustcDecodable
;
10 use rustc_serialize
::opaque
::Decoder
;
15 use super::file_format
;
16 use super::work_product
;
18 pub fn dep_graph_tcx_init
<'a
, 'tcx
>(tcx
: TyCtxt
<'a
, 'tcx
, 'tcx
>) {
19 if !tcx
.dep_graph
.is_fully_enabled() {
23 tcx
.allocate_metadata_dep_nodes();
26 type WorkProductMap
= FxHashMap
<WorkProductId
, WorkProduct
>;
28 pub enum LoadResult
<T
> {
31 Error { message: String }
,
34 impl LoadResult
<(PreviousDepGraph
, WorkProductMap
)> {
35 pub fn open(self, sess
: &Session
) -> (PreviousDepGraph
, WorkProductMap
) {
37 LoadResult
::Error { message }
=> {
41 LoadResult
::DataOutOfDate
=> {
42 if let Err(err
) = delete_all_session_dir_contents(sess
) {
43 sess
.err(&format
!("Failed to delete invalidated or incompatible \
44 incremental compilation session directory contents `{}`: {}.",
45 dep_graph_path(sess
).display(), err
));
49 LoadResult
::Ok { data }
=> data
55 fn load_data(report_incremental_info
: bool
, path
: &Path
) -> LoadResult
<(Vec
<u8>, usize)> {
56 match file_format
::read_file(report_incremental_info
, path
) {
57 Ok(Some(data_and_pos
)) => LoadResult
::Ok
{
61 // The file either didn't exist or was produced by an incompatible
62 // compiler version. Neither is an error.
63 LoadResult
::DataOutOfDate
67 message
: format
!("could not load dep-graph from `{}`: {}",
74 fn delete_dirty_work_product(sess
: &Session
,
75 swp
: SerializedWorkProduct
) {
76 debug
!("delete_dirty_work_product({:?})", swp
);
77 work_product
::delete_workproduct_files(sess
, &swp
.work_product
);
80 /// Either a result that has already be computed or a
81 /// handle that will let us wait until it is computed
82 /// by a background thread.
83 pub enum MaybeAsync
<T
> {
85 Async(std
::thread
::JoinHandle
<T
>)
87 impl<T
> MaybeAsync
<T
> {
88 pub fn open(self) -> std
::thread
::Result
<T
> {
90 MaybeAsync
::Sync(result
) => Ok(result
),
91 MaybeAsync
::Async(handle
) => handle
.join()
96 pub type DepGraphFuture
= MaybeAsync
<LoadResult
<(PreviousDepGraph
, WorkProductMap
)>>;
98 /// Launch a thread and load the dependency graph in the background.
99 pub fn load_dep_graph(sess
: &Session
) -> DepGraphFuture
{
100 // Since `sess` isn't `Sync`, we perform all accesses to `sess`
101 // before we fire the background thread.
103 let time_passes
= sess
.time_passes();
105 if sess
.opts
.incremental
.is_none() {
106 // No incremental compilation.
107 return MaybeAsync
::Sync(LoadResult
::Ok
{
108 data
: Default
::default(),
112 // Calling `sess.incr_comp_session_dir()` will panic if `sess.opts.incremental.is_none()`.
113 // Fortunately, we just checked that this isn't the case.
114 let path
= dep_graph_path_from(&sess
.incr_comp_session_dir());
115 let report_incremental_info
= sess
.opts
.debugging_opts
.incremental_info
;
116 let expected_hash
= sess
.opts
.dep_tracking_hash();
118 let mut prev_work_products
= FxHashMap
::default();
120 // If we are only building with -Zquery-dep-graph but without an actual
121 // incr. comp. session directory, we skip this. Otherwise we'd fail
122 // when trying to load work products.
123 if sess
.incr_comp_session_dir_opt().is_some() {
124 let work_products_path
= work_products_path(sess
);
125 let load_result
= load_data(report_incremental_info
, &work_products_path
);
127 if let LoadResult
::Ok { data: (work_products_data, start_pos) }
= load_result
{
128 // Decode the list of work_products
129 let mut work_product_decoder
= Decoder
::new(&work_products_data
[..], start_pos
);
130 let work_products
: Vec
<SerializedWorkProduct
> =
131 RustcDecodable
::decode(&mut work_product_decoder
).unwrap_or_else(|e
| {
132 let msg
= format
!("Error decoding `work-products` from incremental \
133 compilation session directory: {}", e
);
137 for swp
in work_products
{
138 let mut all_files_exist
= true;
139 for &(_
, ref file_name
) in swp
.work_product
.saved_files
.iter() {
140 let path
= in_incr_comp_dir_sess(sess
, file_name
);
142 all_files_exist
= false;
144 if sess
.opts
.debugging_opts
.incremental_info
{
145 eprintln
!("incremental: could not find file for work \
146 product: {}", path
.display());
152 debug
!("reconcile_work_products: all files for {:?} exist", swp
);
153 prev_work_products
.insert(swp
.id
, swp
.work_product
);
155 debug
!("reconcile_work_products: some file for {:?} does not exist", swp
);
156 delete_dirty_work_product(sess
, swp
);
162 MaybeAsync
::Async(std
::thread
::spawn(move || {
163 time_ext(time_passes
, None
, "background load prev dep-graph", move || {
164 match load_data(report_incremental_info
, &path
) {
165 LoadResult
::DataOutOfDate
=> LoadResult
::DataOutOfDate
,
166 LoadResult
::Error { message }
=> LoadResult
::Error { message }
,
167 LoadResult
::Ok { data: (bytes, start_pos) }
=> {
169 let mut decoder
= Decoder
::new(&bytes
, start_pos
);
170 let prev_commandline_args_hash
= u64::decode(&mut decoder
)
171 .expect("Error reading commandline arg hash from cached dep-graph");
173 if prev_commandline_args_hash
!= expected_hash
{
174 if report_incremental_info
{
175 println
!("[incremental] completely ignoring cache because of \
176 differing commandline arguments");
178 // We can't reuse the cache, purge it.
179 debug
!("load_dep_graph_new: differing commandline arg hashes");
181 // No need to do any further work
182 return LoadResult
::DataOutOfDate
;
185 let dep_graph
= SerializedDepGraph
::decode(&mut decoder
)
186 .expect("Error reading cached dep-graph");
188 LoadResult
::Ok { data: (PreviousDepGraph::new(dep_graph), prev_work_products) }
195 pub fn load_query_result_cache
<'sess
>(sess
: &'sess Session
) -> OnDiskCache
<'sess
> {
196 if sess
.opts
.incremental
.is_none() ||
197 !sess
.opts
.debugging_opts
.incremental_queries
{
198 return OnDiskCache
::new_empty(sess
.source_map());
201 match load_data(sess
.opts
.debugging_opts
.incremental_info
, &query_cache_path(sess
)) {
202 LoadResult
::Ok{ data: (bytes, start_pos) }
=> OnDiskCache
::new(sess
, bytes
, start_pos
),
203 _
=> OnDiskCache
::new_empty(sess
.source_map())