]> git.proxmox.com Git - rustc.git/blob - compiler/rustc_query_impl/src/on_disk_cache.rs
New upstream version 1.63.0+dfsg1
[rustc.git] / compiler / rustc_query_impl / src / on_disk_cache.rs
1 use crate::QueryCtxt;
2 use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexSet};
3 use rustc_data_structures::memmap::Mmap;
4 use rustc_data_structures::sync::{HashMapExt, Lock, Lrc, RwLock};
5 use rustc_data_structures::unhash::UnhashMap;
6 use rustc_hir::def_id::{CrateNum, DefId, DefIndex, LocalDefId, StableCrateId, LOCAL_CRATE};
7 use rustc_hir::definitions::DefPathHash;
8 use rustc_index::vec::{Idx, IndexVec};
9 use rustc_middle::dep_graph::{DepNodeIndex, SerializedDepNodeIndex};
10 use rustc_middle::mir::interpret::{AllocDecodingSession, AllocDecodingState};
11 use rustc_middle::mir::{self, interpret};
12 use rustc_middle::thir;
13 use rustc_middle::ty::codec::{RefDecodable, TyDecoder, TyEncoder};
14 use rustc_middle::ty::{self, Ty, TyCtxt};
15 use rustc_query_system::dep_graph::DepContext;
16 use rustc_query_system::query::{QueryCache, QueryContext, QuerySideEffects};
17 use rustc_serialize::{
18 opaque::{FileEncodeResult, FileEncoder, IntEncodedWithFixedSize, MemDecoder},
19 Decodable, Decoder, Encodable, Encoder,
20 };
21 use rustc_session::Session;
22 use rustc_span::hygiene::{
23 ExpnId, HygieneDecodeContext, HygieneEncodeContext, SyntaxContext, SyntaxContextData,
24 };
25 use rustc_span::source_map::{SourceMap, StableSourceFileId};
26 use rustc_span::CachingSourceMapView;
27 use rustc_span::{BytePos, ExpnData, ExpnHash, Pos, SourceFile, Span};
28 use std::io;
29 use std::mem;
30
31 const TAG_FILE_FOOTER: u128 = 0xC0FFEE_C0FFEE_C0FFEE_C0FFEE_C0FFEE;
32
33 // A normal span encoded with both location information and a `SyntaxContext`
34 const TAG_FULL_SPAN: u8 = 0;
35 // A partial span with no location information, encoded only with a `SyntaxContext`
36 const TAG_PARTIAL_SPAN: u8 = 1;
37 const TAG_RELATIVE_SPAN: u8 = 2;
38
39 const TAG_SYNTAX_CONTEXT: u8 = 0;
40 const TAG_EXPN_DATA: u8 = 1;
41
42 /// Provides an interface to incremental compilation data cached from the
43 /// previous compilation session. This data will eventually include the results
44 /// of a few selected queries (like `typeck` and `mir_optimized`) and
45 /// any side effects that have been emitted during a query.
46 pub struct OnDiskCache<'sess> {
47 // The complete cache data in serialized form.
48 serialized_data: RwLock<Option<Mmap>>,
49
50 // Collects all `QuerySideEffects` created during the current compilation
51 // session.
52 current_side_effects: Lock<FxHashMap<DepNodeIndex, QuerySideEffects>>,
53
54 source_map: &'sess SourceMap,
55 file_index_to_stable_id: FxHashMap<SourceFileIndex, EncodedSourceFileId>,
56
57 // Caches that are populated lazily during decoding.
58 file_index_to_file: Lock<FxHashMap<SourceFileIndex, Lrc<SourceFile>>>,
59
60 // A map from dep-node to the position of the cached query result in
61 // `serialized_data`.
62 query_result_index: FxHashMap<SerializedDepNodeIndex, AbsoluteBytePos>,
63
64 // A map from dep-node to the position of any associated `QuerySideEffects` in
65 // `serialized_data`.
66 prev_side_effects_index: FxHashMap<SerializedDepNodeIndex, AbsoluteBytePos>,
67
68 alloc_decoding_state: AllocDecodingState,
69
70 // A map from syntax context ids to the position of their associated
71 // `SyntaxContextData`. We use a `u32` instead of a `SyntaxContext`
72 // to represent the fact that we are storing *encoded* ids. When we decode
73 // a `SyntaxContext`, a new id will be allocated from the global `HygieneData`,
74 // which will almost certainly be different than the serialized id.
75 syntax_contexts: FxHashMap<u32, AbsoluteBytePos>,
76 // A map from the `DefPathHash` of an `ExpnId` to the position
77 // of their associated `ExpnData`. Ideally, we would store a `DefId`,
78 // but we need to decode this before we've constructed a `TyCtxt` (which
79 // makes it difficult to decode a `DefId`).
80
81 // Note that these `DefPathHashes` correspond to both local and foreign
82 // `ExpnData` (e.g `ExpnData.krate` may not be `LOCAL_CRATE`). Alternatively,
83 // we could look up the `ExpnData` from the metadata of foreign crates,
84 // but it seemed easier to have `OnDiskCache` be independent of the `CStore`.
85 expn_data: UnhashMap<ExpnHash, AbsoluteBytePos>,
86 // Additional information used when decoding hygiene data.
87 hygiene_context: HygieneDecodeContext,
88 // Maps `ExpnHash`es to their raw value from the *previous*
89 // compilation session. This is used as an initial 'guess' when
90 // we try to map an `ExpnHash` to its value in the current
91 // compilation session.
92 foreign_expn_data: UnhashMap<ExpnHash, u32>,
93 }
94
95 // This type is used only for serialization and deserialization.
96 #[derive(Encodable, Decodable)]
97 struct Footer {
98 file_index_to_stable_id: FxHashMap<SourceFileIndex, EncodedSourceFileId>,
99 query_result_index: EncodedDepNodeIndex,
100 side_effects_index: EncodedDepNodeIndex,
101 // The location of all allocations.
102 interpret_alloc_index: Vec<u32>,
103 // See `OnDiskCache.syntax_contexts`
104 syntax_contexts: FxHashMap<u32, AbsoluteBytePos>,
105 // See `OnDiskCache.expn_data`
106 expn_data: UnhashMap<ExpnHash, AbsoluteBytePos>,
107 foreign_expn_data: UnhashMap<ExpnHash, u32>,
108 }
109
110 pub type EncodedDepNodeIndex = Vec<(SerializedDepNodeIndex, AbsoluteBytePos)>;
111
112 #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, Encodable, Decodable)]
113 struct SourceFileIndex(u32);
114
115 #[derive(Copy, Clone, Debug, Hash, Eq, PartialEq, Encodable, Decodable)]
116 pub struct AbsoluteBytePos(u32);
117
118 impl AbsoluteBytePos {
119 fn new(pos: usize) -> AbsoluteBytePos {
120 debug_assert!(pos <= u32::MAX as usize);
121 AbsoluteBytePos(pos as u32)
122 }
123
124 fn to_usize(self) -> usize {
125 self.0 as usize
126 }
127 }
128
129 /// An `EncodedSourceFileId` is the same as a `StableSourceFileId` except that
130 /// the source crate is represented as a [StableCrateId] instead of as a
131 /// `CrateNum`. This way `EncodedSourceFileId` can be encoded and decoded
132 /// without any additional context, i.e. with a simple `opaque::Decoder` (which
133 /// is the only thing available when decoding the cache's [Footer].
134 #[derive(Encodable, Decodable, Clone, Debug)]
135 struct EncodedSourceFileId {
136 file_name_hash: u64,
137 stable_crate_id: StableCrateId,
138 }
139
140 impl EncodedSourceFileId {
141 fn translate(&self, tcx: TyCtxt<'_>) -> StableSourceFileId {
142 let cnum = tcx.stable_crate_id_to_crate_num(self.stable_crate_id);
143 StableSourceFileId { file_name_hash: self.file_name_hash, cnum }
144 }
145
146 fn new(tcx: TyCtxt<'_>, file: &SourceFile) -> EncodedSourceFileId {
147 let source_file_id = StableSourceFileId::new(file);
148 EncodedSourceFileId {
149 file_name_hash: source_file_id.file_name_hash,
150 stable_crate_id: tcx.stable_crate_id(source_file_id.cnum),
151 }
152 }
153 }
154
155 impl<'sess> rustc_middle::ty::OnDiskCache<'sess> for OnDiskCache<'sess> {
156 /// Creates a new `OnDiskCache` instance from the serialized data in `data`.
157 fn new(sess: &'sess Session, data: Mmap, start_pos: usize) -> Self {
158 debug_assert!(sess.opts.incremental.is_some());
159
160 // Wrap in a scope so we can borrow `data`.
161 let footer: Footer = {
162 let mut decoder = MemDecoder::new(&data, start_pos);
163
164 // Decode the *position* of the footer, which can be found in the
165 // last 8 bytes of the file.
166 decoder.set_position(data.len() - IntEncodedWithFixedSize::ENCODED_SIZE);
167 let footer_pos = IntEncodedWithFixedSize::decode(&mut decoder).0 as usize;
168
169 // Decode the file footer, which contains all the lookup tables, etc.
170 decoder.set_position(footer_pos);
171
172 decode_tagged(&mut decoder, TAG_FILE_FOOTER)
173 };
174
175 Self {
176 serialized_data: RwLock::new(Some(data)),
177 file_index_to_stable_id: footer.file_index_to_stable_id,
178 file_index_to_file: Default::default(),
179 source_map: sess.source_map(),
180 current_side_effects: Default::default(),
181 query_result_index: footer.query_result_index.into_iter().collect(),
182 prev_side_effects_index: footer.side_effects_index.into_iter().collect(),
183 alloc_decoding_state: AllocDecodingState::new(footer.interpret_alloc_index),
184 syntax_contexts: footer.syntax_contexts,
185 expn_data: footer.expn_data,
186 foreign_expn_data: footer.foreign_expn_data,
187 hygiene_context: Default::default(),
188 }
189 }
190
191 fn new_empty(source_map: &'sess SourceMap) -> Self {
192 Self {
193 serialized_data: RwLock::new(None),
194 file_index_to_stable_id: Default::default(),
195 file_index_to_file: Default::default(),
196 source_map,
197 current_side_effects: Default::default(),
198 query_result_index: Default::default(),
199 prev_side_effects_index: Default::default(),
200 alloc_decoding_state: AllocDecodingState::new(Vec::new()),
201 syntax_contexts: FxHashMap::default(),
202 expn_data: UnhashMap::default(),
203 foreign_expn_data: UnhashMap::default(),
204 hygiene_context: Default::default(),
205 }
206 }
207
208 /// Execute all cache promotions and release the serialized backing Mmap.
209 ///
210 /// Cache promotions require invoking queries, which needs to read the serialized data.
211 /// In order to serialize the new on-disk cache, the former on-disk cache file needs to be
212 /// deleted, hence we won't be able to refer to its memmapped data.
213 fn drop_serialized_data(&self, tcx: TyCtxt<'_>) {
214 // Load everything into memory so we can write it out to the on-disk
215 // cache. The vast majority of cacheable query results should already
216 // be in memory, so this should be a cheap operation.
217 // Do this *before* we clone 'latest_foreign_def_path_hashes', since
218 // loading existing queries may cause us to create new DepNodes, which
219 // may in turn end up invoking `store_foreign_def_id_hash`
220 tcx.dep_graph.exec_cache_promotions(tcx);
221
222 *self.serialized_data.write() = None;
223 }
224
225 fn serialize<'tcx>(&self, tcx: TyCtxt<'tcx>, encoder: FileEncoder) -> FileEncodeResult {
226 // Serializing the `DepGraph` should not modify it.
227 tcx.dep_graph.with_ignore(|| {
228 // Allocate `SourceFileIndex`es.
229 let (file_to_file_index, file_index_to_stable_id) = {
230 let files = tcx.sess.source_map().files();
231 let mut file_to_file_index =
232 FxHashMap::with_capacity_and_hasher(files.len(), Default::default());
233 let mut file_index_to_stable_id =
234 FxHashMap::with_capacity_and_hasher(files.len(), Default::default());
235
236 for (index, file) in files.iter().enumerate() {
237 let index = SourceFileIndex(index as u32);
238 let file_ptr: *const SourceFile = &**file as *const _;
239 file_to_file_index.insert(file_ptr, index);
240 let source_file_id = EncodedSourceFileId::new(tcx, &file);
241 file_index_to_stable_id.insert(index, source_file_id);
242 }
243
244 (file_to_file_index, file_index_to_stable_id)
245 };
246
247 let hygiene_encode_context = HygieneEncodeContext::default();
248
249 let mut encoder = CacheEncoder {
250 tcx,
251 encoder,
252 type_shorthands: Default::default(),
253 predicate_shorthands: Default::default(),
254 interpret_allocs: Default::default(),
255 source_map: CachingSourceMapView::new(tcx.sess.source_map()),
256 file_to_file_index,
257 hygiene_context: &hygiene_encode_context,
258 };
259
260 // Encode query results.
261 let mut query_result_index = EncodedDepNodeIndex::new();
262
263 tcx.sess.time("encode_query_results", || {
264 let enc = &mut encoder;
265 let qri = &mut query_result_index;
266 QueryCtxt::from_tcx(tcx).encode_query_results(enc, qri);
267 });
268
269 // Encode side effects.
270 let side_effects_index: EncodedDepNodeIndex = self
271 .current_side_effects
272 .borrow()
273 .iter()
274 .map(|(dep_node_index, side_effects)| {
275 let pos = AbsoluteBytePos::new(encoder.position());
276 let dep_node_index = SerializedDepNodeIndex::new(dep_node_index.index());
277 encoder.encode_tagged(dep_node_index, side_effects);
278
279 (dep_node_index, pos)
280 })
281 .collect();
282
283 let interpret_alloc_index = {
284 let mut interpret_alloc_index = Vec::new();
285 let mut n = 0;
286 loop {
287 let new_n = encoder.interpret_allocs.len();
288 // If we have found new IDs, serialize those too.
289 if n == new_n {
290 // Otherwise, abort.
291 break;
292 }
293 interpret_alloc_index.reserve(new_n - n);
294 for idx in n..new_n {
295 let id = encoder.interpret_allocs[idx];
296 let pos = encoder.position() as u32;
297 interpret_alloc_index.push(pos);
298 interpret::specialized_encode_alloc_id(&mut encoder, tcx, id);
299 }
300 n = new_n;
301 }
302 interpret_alloc_index
303 };
304
305 let mut syntax_contexts = FxHashMap::default();
306 let mut expn_data = UnhashMap::default();
307 let mut foreign_expn_data = UnhashMap::default();
308
309 // Encode all hygiene data (`SyntaxContextData` and `ExpnData`) from the current
310 // session.
311
312 hygiene_encode_context.encode(
313 &mut encoder,
314 |encoder, index, ctxt_data| {
315 let pos = AbsoluteBytePos::new(encoder.position());
316 encoder.encode_tagged(TAG_SYNTAX_CONTEXT, ctxt_data);
317 syntax_contexts.insert(index, pos);
318 },
319 |encoder, expn_id, data, hash| {
320 if expn_id.krate == LOCAL_CRATE {
321 let pos = AbsoluteBytePos::new(encoder.position());
322 encoder.encode_tagged(TAG_EXPN_DATA, data);
323 expn_data.insert(hash, pos);
324 } else {
325 foreign_expn_data.insert(hash, expn_id.local_id.as_u32());
326 }
327 },
328 );
329
330 // `Encode the file footer.
331 let footer_pos = encoder.position() as u64;
332 encoder.encode_tagged(
333 TAG_FILE_FOOTER,
334 &Footer {
335 file_index_to_stable_id,
336 query_result_index,
337 side_effects_index,
338 interpret_alloc_index,
339 syntax_contexts,
340 expn_data,
341 foreign_expn_data,
342 },
343 );
344
345 // Encode the position of the footer as the last 8 bytes of the
346 // file so we know where to look for it.
347 IntEncodedWithFixedSize(footer_pos).encode(&mut encoder.encoder);
348
349 // DO NOT WRITE ANYTHING TO THE ENCODER AFTER THIS POINT! The address
350 // of the footer must be the last thing in the data stream.
351
352 encoder.finish()
353 })
354 }
355 }
356
357 impl<'sess> OnDiskCache<'sess> {
358 pub fn as_dyn(&self) -> &dyn rustc_middle::ty::OnDiskCache<'sess> {
359 self as _
360 }
361
362 /// Loads a `QuerySideEffects` created during the previous compilation session.
363 pub fn load_side_effects(
364 &self,
365 tcx: TyCtxt<'_>,
366 dep_node_index: SerializedDepNodeIndex,
367 ) -> QuerySideEffects {
368 let side_effects: Option<QuerySideEffects> =
369 self.load_indexed(tcx, dep_node_index, &self.prev_side_effects_index);
370
371 side_effects.unwrap_or_default()
372 }
373
374 /// Stores a `QuerySideEffects` emitted during the current compilation session.
375 /// Anything stored like this will be available via `load_side_effects` in
376 /// the next compilation session.
377 #[inline(never)]
378 #[cold]
379 pub fn store_side_effects(&self, dep_node_index: DepNodeIndex, side_effects: QuerySideEffects) {
380 let mut current_side_effects = self.current_side_effects.borrow_mut();
381 let prev = current_side_effects.insert(dep_node_index, side_effects);
382 debug_assert!(prev.is_none());
383 }
384
385 /// Returns the cached query result if there is something in the cache for
386 /// the given `SerializedDepNodeIndex`; otherwise returns `None`.
387 pub fn try_load_query_result<'tcx, T>(
388 &self,
389 tcx: TyCtxt<'tcx>,
390 dep_node_index: SerializedDepNodeIndex,
391 ) -> Option<T>
392 where
393 T: for<'a> Decodable<CacheDecoder<'a, 'tcx>>,
394 {
395 self.load_indexed(tcx, dep_node_index, &self.query_result_index)
396 }
397
398 /// Stores side effect emitted during computation of an anonymous query.
399 /// Since many anonymous queries can share the same `DepNode`, we aggregate
400 /// them -- as opposed to regular queries where we assume that there is a
401 /// 1:1 relationship between query-key and `DepNode`.
402 #[inline(never)]
403 #[cold]
404 pub fn store_side_effects_for_anon_node(
405 &self,
406 dep_node_index: DepNodeIndex,
407 side_effects: QuerySideEffects,
408 ) {
409 let mut current_side_effects = self.current_side_effects.borrow_mut();
410
411 let x = current_side_effects.entry(dep_node_index).or_default();
412 x.append(side_effects);
413 }
414
415 fn load_indexed<'tcx, T>(
416 &self,
417 tcx: TyCtxt<'tcx>,
418 dep_node_index: SerializedDepNodeIndex,
419 index: &FxHashMap<SerializedDepNodeIndex, AbsoluteBytePos>,
420 ) -> Option<T>
421 where
422 T: for<'a> Decodable<CacheDecoder<'a, 'tcx>>,
423 {
424 let pos = index.get(&dep_node_index).cloned()?;
425
426 self.with_decoder(tcx, pos, |decoder| Some(decode_tagged(decoder, dep_node_index)))
427 }
428
429 fn with_decoder<'a, 'tcx, T, F: for<'s> FnOnce(&mut CacheDecoder<'s, 'tcx>) -> T>(
430 &'sess self,
431 tcx: TyCtxt<'tcx>,
432 pos: AbsoluteBytePos,
433 f: F,
434 ) -> T
435 where
436 T: Decodable<CacheDecoder<'a, 'tcx>>,
437 {
438 let serialized_data = self.serialized_data.read();
439 let mut decoder = CacheDecoder {
440 tcx,
441 opaque: MemDecoder::new(serialized_data.as_deref().unwrap_or(&[]), pos.to_usize()),
442 source_map: self.source_map,
443 file_index_to_file: &self.file_index_to_file,
444 file_index_to_stable_id: &self.file_index_to_stable_id,
445 alloc_decoding_session: self.alloc_decoding_state.new_decoding_session(),
446 syntax_contexts: &self.syntax_contexts,
447 expn_data: &self.expn_data,
448 foreign_expn_data: &self.foreign_expn_data,
449 hygiene_context: &self.hygiene_context,
450 };
451 f(&mut decoder)
452 }
453 }
454
455 //- DECODING -------------------------------------------------------------------
456
457 /// A decoder that can read from the incremental compilation cache. It is similar to the one
458 /// we use for crate metadata decoding in that it can rebase spans and eventually
459 /// will also handle things that contain `Ty` instances.
460 pub struct CacheDecoder<'a, 'tcx> {
461 tcx: TyCtxt<'tcx>,
462 opaque: MemDecoder<'a>,
463 source_map: &'a SourceMap,
464 file_index_to_file: &'a Lock<FxHashMap<SourceFileIndex, Lrc<SourceFile>>>,
465 file_index_to_stable_id: &'a FxHashMap<SourceFileIndex, EncodedSourceFileId>,
466 alloc_decoding_session: AllocDecodingSession<'a>,
467 syntax_contexts: &'a FxHashMap<u32, AbsoluteBytePos>,
468 expn_data: &'a UnhashMap<ExpnHash, AbsoluteBytePos>,
469 foreign_expn_data: &'a UnhashMap<ExpnHash, u32>,
470 hygiene_context: &'a HygieneDecodeContext,
471 }
472
473 impl<'a, 'tcx> CacheDecoder<'a, 'tcx> {
474 fn file_index_to_file(&self, index: SourceFileIndex) -> Lrc<SourceFile> {
475 let CacheDecoder {
476 tcx,
477 ref file_index_to_file,
478 ref file_index_to_stable_id,
479 ref source_map,
480 ..
481 } = *self;
482
483 file_index_to_file
484 .borrow_mut()
485 .entry(index)
486 .or_insert_with(|| {
487 let stable_id = file_index_to_stable_id[&index].translate(tcx);
488
489 // If this `SourceFile` is from a foreign crate, then make sure
490 // that we've imported all of the source files from that crate.
491 // This has usually already been done during macro invocation.
492 // However, when encoding query results like `TypeckResults`,
493 // we might encode an `AdtDef` for a foreign type (because it
494 // was referenced in the body of the function). There is no guarantee
495 // that we will load the source files from that crate during macro
496 // expansion, so we use `import_source_files` to ensure that the foreign
497 // source files are actually imported before we call `source_file_by_stable_id`.
498 if stable_id.cnum != LOCAL_CRATE {
499 self.tcx.cstore_untracked().import_source_files(self.tcx.sess, stable_id.cnum);
500 }
501
502 source_map
503 .source_file_by_stable_id(stable_id)
504 .expect("failed to lookup `SourceFile` in new context")
505 })
506 .clone()
507 }
508 }
509
510 trait DecoderWithPosition: Decoder {
511 fn position(&self) -> usize;
512 }
513
514 impl<'a> DecoderWithPosition for MemDecoder<'a> {
515 fn position(&self) -> usize {
516 self.position()
517 }
518 }
519
520 impl<'a, 'tcx> DecoderWithPosition for CacheDecoder<'a, 'tcx> {
521 fn position(&self) -> usize {
522 self.opaque.position()
523 }
524 }
525
526 // Decodes something that was encoded with `encode_tagged()` and verify that the
527 // tag matches and the correct amount of bytes was read.
528 fn decode_tagged<D, T, V>(decoder: &mut D, expected_tag: T) -> V
529 where
530 T: Decodable<D> + Eq + std::fmt::Debug,
531 V: Decodable<D>,
532 D: DecoderWithPosition,
533 {
534 let start_pos = decoder.position();
535
536 let actual_tag = T::decode(decoder);
537 assert_eq!(actual_tag, expected_tag);
538 let value = V::decode(decoder);
539 let end_pos = decoder.position();
540
541 let expected_len: u64 = Decodable::decode(decoder);
542 assert_eq!((end_pos - start_pos) as u64, expected_len);
543
544 value
545 }
546
547 impl<'a, 'tcx> TyDecoder for CacheDecoder<'a, 'tcx> {
548 type I = TyCtxt<'tcx>;
549 const CLEAR_CROSS_CRATE: bool = false;
550
551 #[inline]
552 fn interner(&self) -> TyCtxt<'tcx> {
553 self.tcx
554 }
555
556 #[inline]
557 fn position(&self) -> usize {
558 self.opaque.position()
559 }
560
561 #[inline]
562 fn peek_byte(&self) -> u8 {
563 self.opaque.data[self.opaque.position()]
564 }
565
566 fn cached_ty_for_shorthand<F>(&mut self, shorthand: usize, or_insert_with: F) -> Ty<'tcx>
567 where
568 F: FnOnce(&mut Self) -> Ty<'tcx>,
569 {
570 let tcx = self.tcx;
571
572 let cache_key = ty::CReaderCacheKey { cnum: None, pos: shorthand };
573
574 if let Some(&ty) = tcx.ty_rcache.borrow().get(&cache_key) {
575 return ty;
576 }
577
578 let ty = or_insert_with(self);
579 // This may overwrite the entry, but it should overwrite with the same value.
580 tcx.ty_rcache.borrow_mut().insert_same(cache_key, ty);
581 ty
582 }
583
584 fn with_position<F, R>(&mut self, pos: usize, f: F) -> R
585 where
586 F: FnOnce(&mut Self) -> R,
587 {
588 debug_assert!(pos < self.opaque.data.len());
589
590 let new_opaque = MemDecoder::new(self.opaque.data, pos);
591 let old_opaque = mem::replace(&mut self.opaque, new_opaque);
592 let r = f(self);
593 self.opaque = old_opaque;
594 r
595 }
596
597 fn decode_alloc_id(&mut self) -> interpret::AllocId {
598 let alloc_decoding_session = self.alloc_decoding_session;
599 alloc_decoding_session.decode_alloc_id(self)
600 }
601 }
602
603 rustc_middle::implement_ty_decoder!(CacheDecoder<'a, 'tcx>);
604
605 // This ensures that the `Decodable<opaque::Decoder>::decode` specialization for `Vec<u8>` is used
606 // when a `CacheDecoder` is passed to `Decodable::decode`. Unfortunately, we have to manually opt
607 // into specializations this way, given how `CacheDecoder` and the decoding traits currently work.
608 impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for Vec<u8> {
609 fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Self {
610 Decodable::decode(&mut d.opaque)
611 }
612 }
613
614 impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for SyntaxContext {
615 fn decode(decoder: &mut CacheDecoder<'a, 'tcx>) -> Self {
616 let syntax_contexts = decoder.syntax_contexts;
617 rustc_span::hygiene::decode_syntax_context(decoder, decoder.hygiene_context, |this, id| {
618 // This closure is invoked if we haven't already decoded the data for the `SyntaxContext` we are deserializing.
619 // We look up the position of the associated `SyntaxData` and decode it.
620 let pos = syntax_contexts.get(&id).unwrap();
621 this.with_position(pos.to_usize(), |decoder| {
622 let data: SyntaxContextData = decode_tagged(decoder, TAG_SYNTAX_CONTEXT);
623 data
624 })
625 })
626 }
627 }
628
629 impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for ExpnId {
630 fn decode(decoder: &mut CacheDecoder<'a, 'tcx>) -> Self {
631 let hash = ExpnHash::decode(decoder);
632 if hash.is_root() {
633 return ExpnId::root();
634 }
635
636 if let Some(expn_id) = ExpnId::from_hash(hash) {
637 return expn_id;
638 }
639
640 let krate = decoder.tcx.stable_crate_id_to_crate_num(hash.stable_crate_id());
641
642 let expn_id = if krate == LOCAL_CRATE {
643 // We look up the position of the associated `ExpnData` and decode it.
644 let pos = decoder
645 .expn_data
646 .get(&hash)
647 .unwrap_or_else(|| panic!("Bad hash {:?} (map {:?})", hash, decoder.expn_data));
648
649 let data: ExpnData = decoder
650 .with_position(pos.to_usize(), |decoder| decode_tagged(decoder, TAG_EXPN_DATA));
651 let expn_id = rustc_span::hygiene::register_local_expn_id(data, hash);
652
653 #[cfg(debug_assertions)]
654 {
655 use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
656 let mut hcx = decoder.tcx.create_stable_hashing_context();
657 let mut hasher = StableHasher::new();
658 hcx.while_hashing_spans(true, |hcx| {
659 expn_id.expn_data().hash_stable(hcx, &mut hasher)
660 });
661 let local_hash: u64 = hasher.finish();
662 debug_assert_eq!(hash.local_hash(), local_hash);
663 }
664
665 expn_id
666 } else {
667 let index_guess = decoder.foreign_expn_data[&hash];
668 decoder.tcx.cstore_untracked().expn_hash_to_expn_id(
669 decoder.tcx.sess,
670 krate,
671 index_guess,
672 hash,
673 )
674 };
675
676 debug_assert_eq!(expn_id.krate, krate);
677 expn_id
678 }
679 }
680
681 impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for Span {
682 fn decode(decoder: &mut CacheDecoder<'a, 'tcx>) -> Self {
683 let ctxt = SyntaxContext::decode(decoder);
684 let parent = Option::<LocalDefId>::decode(decoder);
685 let tag: u8 = Decodable::decode(decoder);
686
687 if tag == TAG_PARTIAL_SPAN {
688 return Span::new(BytePos(0), BytePos(0), ctxt, parent);
689 } else if tag == TAG_RELATIVE_SPAN {
690 let dlo = u32::decode(decoder);
691 let dto = u32::decode(decoder);
692
693 let enclosing = decoder.tcx.source_span_untracked(parent.unwrap()).data_untracked();
694 let span = Span::new(
695 enclosing.lo + BytePos::from_u32(dlo),
696 enclosing.lo + BytePos::from_u32(dto),
697 ctxt,
698 parent,
699 );
700
701 return span;
702 } else {
703 debug_assert_eq!(tag, TAG_FULL_SPAN);
704 }
705
706 let file_lo_index = SourceFileIndex::decode(decoder);
707 let line_lo = usize::decode(decoder);
708 let col_lo = BytePos::decode(decoder);
709 let len = BytePos::decode(decoder);
710
711 let file_lo = decoder.file_index_to_file(file_lo_index);
712 let lo = file_lo.lines(|lines| lines[line_lo - 1] + col_lo);
713 let hi = lo + len;
714
715 Span::new(lo, hi, ctxt, parent)
716 }
717 }
718
719 impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for CrateNum {
720 fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Self {
721 let stable_id = StableCrateId::decode(d);
722 let cnum = d.tcx.stable_crate_id_to_crate_num(stable_id);
723 cnum
724 }
725 }
726
727 // This impl makes sure that we get a runtime error when we try decode a
728 // `DefIndex` that is not contained in a `DefId`. Such a case would be problematic
729 // because we would not know how to transform the `DefIndex` to the current
730 // context.
731 impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for DefIndex {
732 fn decode(_d: &mut CacheDecoder<'a, 'tcx>) -> DefIndex {
733 panic!("trying to decode `DefIndex` outside the context of a `DefId`")
734 }
735 }
736
737 // Both the `CrateNum` and the `DefIndex` of a `DefId` can change in between two
738 // compilation sessions. We use the `DefPathHash`, which is stable across
739 // sessions, to map the old `DefId` to the new one.
740 impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for DefId {
741 fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Self {
742 // Load the `DefPathHash` which is was we encoded the `DefId` as.
743 let def_path_hash = DefPathHash::decode(d);
744
745 // Using the `DefPathHash`, we can lookup the new `DefId`.
746 // Subtle: We only encode a `DefId` as part of a query result.
747 // If we get to this point, then all of the query inputs were green,
748 // which means that the definition with this hash is guaranteed to
749 // still exist in the current compilation session.
750 d.tcx.def_path_hash_to_def_id(def_path_hash, &mut || {
751 panic!("Failed to convert DefPathHash {:?}", def_path_hash)
752 })
753 }
754 }
755
756 impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for &'tcx FxHashSet<LocalDefId> {
757 fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Self {
758 RefDecodable::decode(d)
759 }
760 }
761
762 impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>>
763 for &'tcx IndexVec<mir::Promoted, mir::Body<'tcx>>
764 {
765 fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Self {
766 RefDecodable::decode(d)
767 }
768 }
769
770 impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for &'tcx [thir::abstract_const::Node<'tcx>] {
771 fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Self {
772 RefDecodable::decode(d)
773 }
774 }
775
776 impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for &'tcx [(ty::Predicate<'tcx>, Span)] {
777 fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Self {
778 RefDecodable::decode(d)
779 }
780 }
781
782 impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for &'tcx [rustc_ast::InlineAsmTemplatePiece] {
783 fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Self {
784 RefDecodable::decode(d)
785 }
786 }
787
788 macro_rules! impl_ref_decoder {
789 (<$tcx:tt> $($ty:ty,)*) => {
790 $(impl<'a, $tcx> Decodable<CacheDecoder<'a, $tcx>> for &$tcx [$ty] {
791 fn decode(d: &mut CacheDecoder<'a, $tcx>) -> Self {
792 RefDecodable::decode(d)
793 }
794 })*
795 };
796 }
797
798 impl_ref_decoder! {<'tcx>
799 Span,
800 rustc_ast::Attribute,
801 rustc_span::symbol::Ident,
802 ty::Variance,
803 rustc_span::def_id::DefId,
804 rustc_span::def_id::LocalDefId,
805 (rustc_middle::middle::exported_symbols::ExportedSymbol<'tcx>, rustc_middle::middle::exported_symbols::SymbolExportInfo),
806 }
807
808 //- ENCODING -------------------------------------------------------------------
809
810 /// An encoder that can write to the incremental compilation cache.
811 pub struct CacheEncoder<'a, 'tcx> {
812 tcx: TyCtxt<'tcx>,
813 encoder: FileEncoder,
814 type_shorthands: FxHashMap<Ty<'tcx>, usize>,
815 predicate_shorthands: FxHashMap<ty::PredicateKind<'tcx>, usize>,
816 interpret_allocs: FxIndexSet<interpret::AllocId>,
817 source_map: CachingSourceMapView<'tcx>,
818 file_to_file_index: FxHashMap<*const SourceFile, SourceFileIndex>,
819 hygiene_context: &'a HygieneEncodeContext,
820 }
821
822 impl<'a, 'tcx> CacheEncoder<'a, 'tcx> {
823 fn source_file_index(&mut self, source_file: Lrc<SourceFile>) -> SourceFileIndex {
824 self.file_to_file_index[&(&*source_file as *const SourceFile)]
825 }
826
827 /// Encode something with additional information that allows to do some
828 /// sanity checks when decoding the data again. This method will first
829 /// encode the specified tag, then the given value, then the number of
830 /// bytes taken up by tag and value. On decoding, we can then verify that
831 /// we get the expected tag and read the expected number of bytes.
832 fn encode_tagged<T: Encodable<Self>, V: Encodable<Self>>(&mut self, tag: T, value: &V) {
833 let start_pos = self.position();
834
835 tag.encode(self);
836 value.encode(self);
837
838 let end_pos = self.position();
839 ((end_pos - start_pos) as u64).encode(self);
840 }
841
842 fn finish(self) -> Result<usize, io::Error> {
843 self.encoder.finish()
844 }
845 }
846
847 impl<'a, 'tcx> Encodable<CacheEncoder<'a, 'tcx>> for SyntaxContext {
848 fn encode(&self, s: &mut CacheEncoder<'a, 'tcx>) {
849 rustc_span::hygiene::raw_encode_syntax_context(*self, s.hygiene_context, s);
850 }
851 }
852
853 impl<'a, 'tcx> Encodable<CacheEncoder<'a, 'tcx>> for ExpnId {
854 fn encode(&self, s: &mut CacheEncoder<'a, 'tcx>) {
855 s.hygiene_context.schedule_expn_data_for_encoding(*self);
856 self.expn_hash().encode(s);
857 }
858 }
859
860 impl<'a, 'tcx> Encodable<CacheEncoder<'a, 'tcx>> for Span {
861 fn encode(&self, s: &mut CacheEncoder<'a, 'tcx>) {
862 let span_data = self.data_untracked();
863 span_data.ctxt.encode(s);
864 span_data.parent.encode(s);
865
866 if span_data.is_dummy() {
867 return TAG_PARTIAL_SPAN.encode(s);
868 }
869
870 if let Some(parent) = span_data.parent {
871 let enclosing = s.tcx.source_span(parent).data_untracked();
872 if enclosing.contains(span_data) {
873 TAG_RELATIVE_SPAN.encode(s);
874 (span_data.lo - enclosing.lo).to_u32().encode(s);
875 (span_data.hi - enclosing.lo).to_u32().encode(s);
876 return;
877 }
878 }
879
880 let pos = s.source_map.byte_pos_to_line_and_col(span_data.lo);
881 let partial_span = match &pos {
882 Some((file_lo, _, _)) => !file_lo.contains(span_data.hi),
883 None => true,
884 };
885
886 if partial_span {
887 return TAG_PARTIAL_SPAN.encode(s);
888 }
889
890 let (file_lo, line_lo, col_lo) = pos.unwrap();
891
892 let len = span_data.hi - span_data.lo;
893
894 let source_file_index = s.source_file_index(file_lo);
895
896 TAG_FULL_SPAN.encode(s);
897 source_file_index.encode(s);
898 line_lo.encode(s);
899 col_lo.encode(s);
900 len.encode(s);
901 }
902 }
903
904 impl<'a, 'tcx> TyEncoder for CacheEncoder<'a, 'tcx> {
905 type I = TyCtxt<'tcx>;
906 const CLEAR_CROSS_CRATE: bool = false;
907
908 fn position(&self) -> usize {
909 self.encoder.position()
910 }
911 fn type_shorthands(&mut self) -> &mut FxHashMap<Ty<'tcx>, usize> {
912 &mut self.type_shorthands
913 }
914 fn predicate_shorthands(&mut self) -> &mut FxHashMap<ty::PredicateKind<'tcx>, usize> {
915 &mut self.predicate_shorthands
916 }
917 fn encode_alloc_id(&mut self, alloc_id: &interpret::AllocId) {
918 let (index, _) = self.interpret_allocs.insert_full(*alloc_id);
919
920 index.encode(self);
921 }
922 }
923
924 impl<'a, 'tcx> Encodable<CacheEncoder<'a, 'tcx>> for CrateNum {
925 fn encode(&self, s: &mut CacheEncoder<'a, 'tcx>) {
926 s.tcx.stable_crate_id(*self).encode(s);
927 }
928 }
929
930 impl<'a, 'tcx> Encodable<CacheEncoder<'a, 'tcx>> for DefId {
931 fn encode(&self, s: &mut CacheEncoder<'a, 'tcx>) {
932 s.tcx.def_path_hash(*self).encode(s);
933 }
934 }
935
936 impl<'a, 'tcx> Encodable<CacheEncoder<'a, 'tcx>> for DefIndex {
937 fn encode(&self, _: &mut CacheEncoder<'a, 'tcx>) {
938 bug!("encoding `DefIndex` without context");
939 }
940 }
941
942 macro_rules! encoder_methods {
943 ($($name:ident($ty:ty);)*) => {
944 #[inline]
945 $(fn $name(&mut self, value: $ty) {
946 self.encoder.$name(value)
947 })*
948 }
949 }
950
951 impl<'a, 'tcx> Encoder for CacheEncoder<'a, 'tcx> {
952 encoder_methods! {
953 emit_usize(usize);
954 emit_u128(u128);
955 emit_u64(u64);
956 emit_u32(u32);
957 emit_u16(u16);
958 emit_u8(u8);
959
960 emit_isize(isize);
961 emit_i128(i128);
962 emit_i64(i64);
963 emit_i32(i32);
964 emit_i16(i16);
965 emit_i8(i8);
966
967 emit_bool(bool);
968 emit_f64(f64);
969 emit_f32(f32);
970 emit_char(char);
971 emit_str(&str);
972 emit_raw_bytes(&[u8]);
973 }
974 }
975
976 // This ensures that the `Encodable<opaque::FileEncoder>::encode` specialization for byte slices
977 // is used when a `CacheEncoder` having an `opaque::FileEncoder` is passed to `Encodable::encode`.
978 // Unfortunately, we have to manually opt into specializations this way, given how `CacheEncoder`
979 // and the encoding traits currently work.
980 impl<'a, 'tcx> Encodable<CacheEncoder<'a, 'tcx>> for [u8] {
981 fn encode(&self, e: &mut CacheEncoder<'a, 'tcx>) {
982 self.encode(&mut e.encoder);
983 }
984 }
985
986 pub fn encode_query_results<'a, 'tcx, CTX, Q>(
987 tcx: CTX,
988 encoder: &mut CacheEncoder<'a, 'tcx>,
989 query_result_index: &mut EncodedDepNodeIndex,
990 ) where
991 CTX: QueryContext + 'tcx,
992 Q: super::QueryDescription<CTX>,
993 Q::Value: Encodable<CacheEncoder<'a, 'tcx>>,
994 {
995 let _timer = tcx
996 .dep_context()
997 .profiler()
998 .extra_verbose_generic_activity("encode_query_results_for", std::any::type_name::<Q>());
999
1000 assert!(Q::query_state(tcx).all_inactive());
1001 let cache = Q::query_cache(tcx);
1002 cache.iter(&mut |key, value, dep_node| {
1003 if Q::cache_on_disk(*tcx.dep_context(), &key) {
1004 let dep_node = SerializedDepNodeIndex::new(dep_node.index());
1005
1006 // Record position of the cache entry.
1007 query_result_index.push((dep_node, AbsoluteBytePos::new(encoder.encoder.position())));
1008
1009 // Encode the type check tables with the `SerializedDepNodeIndex`
1010 // as tag.
1011 encoder.encode_tagged(dep_node, value);
1012 }
1013 });
1014 }