]> git.proxmox.com Git - rustc.git/blob - src/librustc_metadata/rmeta/table.rs
New upstream version 1.41.1+dfsg1
[rustc.git] / src / librustc_metadata / rmeta / table.rs
1 use crate::rmeta::*;
2
3 use rustc_index::vec::Idx;
4 use rustc_serialize::{Encodable, opaque::Encoder};
5 use std::convert::TryInto;
6 use std::marker::PhantomData;
7 use std::num::NonZeroUsize;
8 use log::debug;
9
10 /// Helper trait, for encoding to, and decoding from, a fixed number of bytes.
11 /// Used mainly for Lazy positions and lengths.
12 /// Unchecked invariant: `Self::default()` should encode as `[0; BYTE_LEN]`,
13 /// but this has no impact on safety.
14 pub(super) trait FixedSizeEncoding: Default {
15 const BYTE_LEN: usize;
16
17 // FIXME(eddyb) convert to and from `[u8; Self::BYTE_LEN]` instead,
18 // once that starts being allowed by the compiler (i.e. lazy normalization).
19 fn from_bytes(b: &[u8]) -> Self;
20 fn write_to_bytes(self, b: &mut [u8]);
21
22 // FIXME(eddyb) make these generic functions, or at least defaults here.
23 // (same problem as above, needs `[u8; Self::BYTE_LEN]`)
24 // For now, a macro (`fixed_size_encoding_byte_len_and_defaults`) is used.
25
26 /// Read a `Self` value (encoded as `Self::BYTE_LEN` bytes),
27 /// from `&b[i * Self::BYTE_LEN..]`, returning `None` if `i`
28 /// is not in bounds, or `Some(Self::from_bytes(...))` otherwise.
29 fn maybe_read_from_bytes_at(b: &[u8], i: usize) -> Option<Self>;
30 /// Write a `Self` value (encoded as `Self::BYTE_LEN` bytes),
31 /// at `&mut b[i * Self::BYTE_LEN..]`, using `Self::write_to_bytes`.
32 fn write_to_bytes_at(self, b: &mut [u8], i: usize);
33 }
34
35 // HACK(eddyb) this shouldn't be needed (see comments on the methods above).
36 macro_rules! fixed_size_encoding_byte_len_and_defaults {
37 ($byte_len:expr) => {
38 const BYTE_LEN: usize = $byte_len;
39 fn maybe_read_from_bytes_at(b: &[u8], i: usize) -> Option<Self> {
40 const BYTE_LEN: usize = $byte_len;
41 // HACK(eddyb) ideally this would be done with fully safe code,
42 // but slicing `[u8]` with `i * N..` is optimized worse, due to the
43 // possibility of `i * N` overflowing, than indexing `[[u8; N]]`.
44 let b = unsafe {
45 std::slice::from_raw_parts(
46 b.as_ptr() as *const [u8; BYTE_LEN],
47 b.len() / BYTE_LEN,
48 )
49 };
50 b.get(i).map(|b| FixedSizeEncoding::from_bytes(b))
51 }
52 fn write_to_bytes_at(self, b: &mut [u8], i: usize) {
53 const BYTE_LEN: usize = $byte_len;
54 // HACK(eddyb) ideally this would be done with fully safe code,
55 // see similar comment in `read_from_bytes_at` for why it can't yet.
56 let b = unsafe {
57 std::slice::from_raw_parts_mut(
58 b.as_mut_ptr() as *mut [u8; BYTE_LEN],
59 b.len() / BYTE_LEN,
60 )
61 };
62 self.write_to_bytes(&mut b[i]);
63 }
64 }
65 }
66
67 impl FixedSizeEncoding for u32 {
68 fixed_size_encoding_byte_len_and_defaults!(4);
69
70 fn from_bytes(b: &[u8]) -> Self {
71 let mut bytes = [0; Self::BYTE_LEN];
72 bytes.copy_from_slice(&b[..Self::BYTE_LEN]);
73 Self::from_le_bytes(bytes)
74 }
75
76 fn write_to_bytes(self, b: &mut [u8]) {
77 b[..Self::BYTE_LEN].copy_from_slice(&self.to_le_bytes());
78 }
79 }
80
81 // NOTE(eddyb) there could be an impl for `usize`, which would enable a more
82 // generic `Lazy<T>` impl, but in the general case we might not need / want to
83 // fit every `usize` in `u32`.
84 impl<T: Encodable> FixedSizeEncoding for Option<Lazy<T>> {
85 fixed_size_encoding_byte_len_and_defaults!(u32::BYTE_LEN);
86
87 fn from_bytes(b: &[u8]) -> Self {
88 Some(Lazy::from_position(NonZeroUsize::new(u32::from_bytes(b) as usize)?))
89 }
90
91 fn write_to_bytes(self, b: &mut [u8]) {
92 let position = self.map_or(0, |lazy| lazy.position.get());
93 let position: u32 = position.try_into().unwrap();
94
95 position.write_to_bytes(b)
96 }
97 }
98
99 impl<T: Encodable> FixedSizeEncoding for Option<Lazy<[T]>> {
100 fixed_size_encoding_byte_len_and_defaults!(u32::BYTE_LEN * 2);
101
102 fn from_bytes(b: &[u8]) -> Self {
103 Some(Lazy::from_position_and_meta(
104 <Option<Lazy<T>>>::from_bytes(b)?.position,
105 u32::from_bytes(&b[u32::BYTE_LEN..]) as usize,
106 ))
107 }
108
109 fn write_to_bytes(self, b: &mut [u8]) {
110 self.map(|lazy| Lazy::<T>::from_position(lazy.position))
111 .write_to_bytes(b);
112
113 let len = self.map_or(0, |lazy| lazy.meta);
114 let len: u32 = len.try_into().unwrap();
115
116 len.write_to_bytes(&mut b[u32::BYTE_LEN..]);
117 }
118 }
119
120 /// Random-access table (i.e. offering constant-time `get`/`set`), similar to
121 /// `Vec<Option<T>>`, but without requiring encoding or decoding all the values
122 /// eagerly and in-order.
123 /// A total of `(max_idx + 1) * <Option<T> as FixedSizeEncoding>::BYTE_LEN` bytes
124 /// are used for a table, where `max_idx` is the largest index passed to
125 /// `TableBuilder::set`.
126 pub(super) struct Table<I: Idx, T> where Option<T>: FixedSizeEncoding {
127 _marker: PhantomData<(fn(&I), T)>,
128 // NOTE(eddyb) this makes `Table` not implement `Sized`, but no
129 // value of `Table` is ever created (it's always behind `Lazy`).
130 _bytes: [u8],
131 }
132
133 /// Helper for constructing a table's serialization (also see `Table`).
134 pub(super) struct TableBuilder<I: Idx, T> where Option<T>: FixedSizeEncoding {
135 // FIXME(eddyb) use `IndexVec<I, [u8; <Option<T>>::BYTE_LEN]>` instead of
136 // `Vec<u8>`, once that starts working (i.e. lazy normalization).
137 // Then again, that has the downside of not allowing `TableBuilder::encode` to
138 // obtain a `&[u8]` entirely in safe code, for writing the bytes out.
139 bytes: Vec<u8>,
140 _marker: PhantomData<(fn(&I), T)>,
141 }
142
143 impl<I: Idx, T> Default for TableBuilder<I, T> where Option<T>: FixedSizeEncoding {
144 fn default() -> Self {
145 TableBuilder {
146 bytes: vec![],
147 _marker: PhantomData,
148 }
149 }
150 }
151
152 impl<I: Idx, T> TableBuilder<I, T> where Option<T>: FixedSizeEncoding {
153 pub(super) fn set(&mut self, i: I, value: T) {
154 // FIXME(eddyb) investigate more compact encodings for sparse tables.
155 // On the PR @michaelwoerister mentioned:
156 // > Space requirements could perhaps be optimized by using the HAMT `popcnt`
157 // > trick (i.e. divide things into buckets of 32 or 64 items and then
158 // > store bit-masks of which item in each bucket is actually serialized).
159 let i = i.index();
160 let needed = (i + 1) * <Option<T>>::BYTE_LEN;
161 if self.bytes.len() < needed {
162 self.bytes.resize(needed, 0);
163 }
164
165 Some(value).write_to_bytes_at(&mut self.bytes, i);
166 }
167
168 pub(super) fn encode(&self, buf: &mut Encoder) -> Lazy<Table<I, T>> {
169 let pos = buf.position();
170 buf.emit_raw_bytes(&self.bytes);
171 Lazy::from_position_and_meta(
172 NonZeroUsize::new(pos as usize).unwrap(),
173 self.bytes.len(),
174 )
175 }
176 }
177
178 impl<I: Idx, T> LazyMeta for Table<I, T> where Option<T>: FixedSizeEncoding {
179 type Meta = usize;
180
181 fn min_size(len: usize) -> usize {
182 len
183 }
184 }
185
186 impl<I: Idx, T> Lazy<Table<I, T>> where Option<T>: FixedSizeEncoding {
187 /// Given the metadata, extract out the value at a particular index (if any).
188 #[inline(never)]
189 pub(super) fn get<'a, 'tcx, M: Metadata<'a, 'tcx>>(
190 &self,
191 metadata: M,
192 i: I,
193 ) -> Option<T> {
194 debug!("Table::lookup: index={:?} len={:?}", i, self.meta);
195
196 let start = self.position.get();
197 let bytes = &metadata.raw_bytes()[start..start + self.meta];
198 <Option<T>>::maybe_read_from_bytes_at(bytes, i.index())?
199 }
200 }