]> git.proxmox.com Git - rustc.git/blame - src/librustc_incremental/persist/hash.rs
New upstream version 1.13.0+dfsg1
[rustc.git] / src / librustc_incremental / persist / hash.rs
CommitLineData
a7813a04
XL
1// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
2// file at the top-level directory of this distribution and at
3// http://rust-lang.org/COPYRIGHT.
4//
5// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8// option. This file may not be copied, modified, or distributed
9// except according to those terms.
10
a7813a04 11use rustc::dep_graph::DepNode;
9e0c209e 12use rustc::hir::def_id::{CrateNum, DefId};
a7813a04
XL
13use rustc::hir::svh::Svh;
14use rustc::ty::TyCtxt;
15use rustc_data_structures::fnv::FnvHashMap;
9e0c209e 16use rustc_data_structures::flock;
a7813a04 17use rustc_serialize::Decodable;
9e0c209e 18use rustc_serialize::opaque::Decoder;
a7813a04 19
9e0c209e 20use IncrementalHashesMap;
a7813a04 21use super::data::*;
9e0c209e
SL
22use super::fs::*;
23use super::file_format;
a7813a04
XL
24
25pub struct HashContext<'a, 'tcx: 'a> {
26 pub tcx: TyCtxt<'a, 'tcx, 'tcx>,
9e0c209e 27 incremental_hashes_map: &'a IncrementalHashesMap,
a7813a04 28 item_metadata_hashes: FnvHashMap<DefId, u64>,
9e0c209e 29 crate_hashes: FnvHashMap<CrateNum, Svh>,
a7813a04
XL
30}
31
32impl<'a, 'tcx> HashContext<'a, 'tcx> {
9e0c209e
SL
33 pub fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>,
34 incremental_hashes_map: &'a IncrementalHashesMap)
35 -> Self {
a7813a04
XL
36 HashContext {
37 tcx: tcx,
9e0c209e 38 incremental_hashes_map: incremental_hashes_map,
a7813a04
XL
39 item_metadata_hashes: FnvHashMap(),
40 crate_hashes: FnvHashMap(),
41 }
42 }
43
5bcae85e
SL
44 pub fn is_hashable(dep_node: &DepNode<DefId>) -> bool {
45 match *dep_node {
9e0c209e 46 DepNode::Krate |
5bcae85e
SL
47 DepNode::Hir(_) => true,
48 DepNode::MetaData(def_id) => !def_id.is_local(),
49 _ => false,
50 }
51 }
52
9e0c209e 53 pub fn hash(&mut self, dep_node: &DepNode<DefId>) -> Option<u64> {
3157f602 54 match *dep_node {
9e0c209e
SL
55 DepNode::Krate => {
56 Some(self.incremental_hashes_map[dep_node])
57 }
58
a7813a04
XL
59 // HIR nodes (which always come from our crate) are an input:
60 DepNode::Hir(def_id) => {
9e0c209e
SL
61 assert!(def_id.is_local(),
62 "cannot hash HIR for non-local def-id {:?} => {:?}",
63 def_id,
64 self.tcx.item_path_str(def_id));
65
66 assert!(!self.tcx.map.is_inlined_def_id(def_id),
67 "cannot hash HIR for inlined def-id {:?} => {:?}",
68 def_id,
69 self.tcx.item_path_str(def_id));
70
71 Some(self.incremental_hashes_map[dep_node])
a7813a04
XL
72 }
73
74 // MetaData from other crates is an *input* to us.
75 // MetaData nodes from *our* crates are an *output*; we
76 // don't hash them, but we do compute a hash for them and
77 // save it for others to use.
78 DepNode::MetaData(def_id) if !def_id.is_local() => {
9e0c209e 79 Some(self.metadata_hash(def_id))
a7813a04
XL
80 }
81
82 _ => {
83 // Other kinds of nodes represent computed by-products
84 // that we don't hash directly; instead, they should
85 // have some transitive dependency on a Hir or
86 // MetaData node, so we'll just hash that
87 None
88 }
89 }
90 }
91
a7813a04
XL
92 fn metadata_hash(&mut self, def_id: DefId) -> u64 {
93 debug!("metadata_hash(def_id={:?})", def_id);
94
95 assert!(!def_id.is_local());
96 loop {
97 // check whether we have a result cached for this def-id
98 if let Some(&hash) = self.item_metadata_hashes.get(&def_id) {
99 debug!("metadata_hash: def_id={:?} hash={:?}", def_id, hash);
100 return hash;
101 }
102
103 // check whether we did not find detailed metadata for this
104 // krate; in that case, we just use the krate's overall hash
105 if let Some(&hash) = self.crate_hashes.get(&def_id.krate) {
106 debug!("metadata_hash: def_id={:?} crate_hash={:?}", def_id, hash);
107
108 // micro-"optimization": avoid a cache miss if we ask
109 // for metadata from this particular def-id again.
110 self.item_metadata_hashes.insert(def_id, hash.as_u64());
111
112 return hash.as_u64();
113 }
114
115 // otherwise, load the data and repeat.
116 self.load_data(def_id.krate);
117 assert!(self.crate_hashes.contains_key(&def_id.krate));
118 }
119 }
120
9e0c209e 121 fn load_data(&mut self, cnum: CrateNum) {
a7813a04
XL
122 debug!("load_data(cnum={})", cnum);
123
124 let svh = self.tcx.sess.cstore.crate_hash(cnum);
125 let old = self.crate_hashes.insert(cnum, svh);
126 debug!("load_data: svh={}", svh);
127 assert!(old.is_none(), "loaded data for crate {:?} twice", cnum);
128
9e0c209e
SL
129 if let Some(session_dir) = find_metadata_hashes_for(self.tcx, cnum) {
130 debug!("load_data: session_dir={:?}", session_dir);
131
132 // Lock the directory we'll be reading the hashes from.
133 let lock_file_path = lock_file_path(&session_dir);
134 let _lock = match flock::Lock::new(&lock_file_path,
135 false, // don't wait
136 false, // don't create the lock-file
137 false) { // shared lock
138 Ok(lock) => lock,
139 Err(err) => {
140 debug!("Could not acquire lock on `{}` while trying to \
141 load metadata hashes: {}",
142 lock_file_path.display(),
143 err);
144
145 // Could not acquire the lock. The directory is probably in
146 // in the process of being deleted. It's OK to just exit
147 // here. It's the same scenario as if the file had not
148 // existed in the first place.
149 return
150 }
151 };
152
153 let hashes_file_path = metadata_hash_import_path(&session_dir);
154
155 match file_format::read_file(&hashes_file_path)
a7813a04 156 {
9e0c209e
SL
157 Ok(Some(data)) => {
158 match self.load_from_data(cnum, &data, svh) {
a7813a04
XL
159 Ok(()) => { }
160 Err(err) => {
161 bug!("decoding error in dep-graph from `{}`: {}",
9e0c209e 162 &hashes_file_path.display(), err);
a7813a04
XL
163 }
164 }
165 }
9e0c209e
SL
166 Ok(None) => {
167 // If the file is not found, that's ok.
168 }
a7813a04 169 Err(err) => {
9e0c209e
SL
170 self.tcx.sess.err(
171 &format!("could not load dep information from `{}`: {}",
172 hashes_file_path.display(), err));
a7813a04
XL
173 }
174 }
175 }
176 }
177
9e0c209e
SL
178 fn load_from_data(&mut self,
179 cnum: CrateNum,
180 data: &[u8],
181 expected_svh: Svh) -> Result<(), String> {
a7813a04
XL
182 debug!("load_from_data(cnum={})", cnum);
183
184 // Load up the hashes for the def-ids from this crate.
185 let mut decoder = Decoder::new(data, 0);
9e0c209e
SL
186 let svh_in_hashes_file = Svh::decode(&mut decoder)?;
187
188 if svh_in_hashes_file != expected_svh {
189 // We should not be able to get here. If we do, then
190 // `fs::find_metadata_hashes_for()` has messed up.
191 bug!("mismatch between SVH in crate and SVH in incr. comp. hashes")
192 }
193
194 let serialized_hashes = SerializedMetadataHashes::decode(&mut decoder)?;
a7813a04
XL
195 for serialized_hash in serialized_hashes.hashes {
196 // the hashes are stored with just a def-index, which is
197 // always relative to the old crate; convert that to use
198 // our internal crate number
199 let def_id = DefId { krate: cnum, index: serialized_hash.def_index };
200
201 // record the hash for this dep-node
202 let old = self.item_metadata_hashes.insert(def_id, serialized_hash.hash);
203 debug!("load_from_data: def_id={:?} hash={}", def_id, serialized_hash.hash);
204 assert!(old.is_none(), "already have hash for {:?}", def_id);
205 }
206 Ok(())
207 }
208}