]>
Commit | Line | Data |
---|---|---|
9fa01778 XL |
1 | pub use Integer::*; |
2 | pub use Primitive::*; | |
83c7162d | 3 | |
923072b8 | 4 | use crate::json::{Json, ToJson}; |
9fa01778 | 5 | use crate::spec::Target; |
83c7162d | 6 | |
ba9703b0 | 7 | use std::convert::{TryFrom, TryInto}; |
5869c6ff | 8 | use std::fmt; |
94222f64 | 9 | use std::iter::Step; |
f2b60f7d | 10 | use std::num::{NonZeroUsize, ParseIntError}; |
c295e0f8 | 11 | use std::ops::{Add, AddAssign, Deref, Mul, RangeInclusive, Sub}; |
5869c6ff | 12 | use std::str::FromStr; |
83c7162d | 13 | |
5e7ed085 | 14 | use rustc_data_structures::intern::Interned; |
e74abb32 | 15 | use rustc_index::vec::{Idx, IndexVec}; |
60c5eb7d | 16 | use rustc_macros::HashStable_Generic; |
a1dfa0c6 | 17 | |
83c7162d XL |
18 | pub mod call; |
19 | ||
136023e0 | 20 | /// Parsed [Data layout](https://llvm.org/docs/LangRef.html#data-layout) |
83c7162d XL |
21 | /// for a target, which contains everything needed to compute layouts. |
22 | pub struct TargetDataLayout { | |
23 | pub endian: Endian, | |
a1dfa0c6 XL |
24 | pub i1_align: AbiAndPrefAlign, |
25 | pub i8_align: AbiAndPrefAlign, | |
26 | pub i16_align: AbiAndPrefAlign, | |
27 | pub i32_align: AbiAndPrefAlign, | |
28 | pub i64_align: AbiAndPrefAlign, | |
29 | pub i128_align: AbiAndPrefAlign, | |
30 | pub f32_align: AbiAndPrefAlign, | |
31 | pub f64_align: AbiAndPrefAlign, | |
83c7162d | 32 | pub pointer_size: Size, |
a1dfa0c6 XL |
33 | pub pointer_align: AbiAndPrefAlign, |
34 | pub aggregate_align: AbiAndPrefAlign, | |
83c7162d XL |
35 | |
36 | /// Alignments for vector types. | |
a1dfa0c6 XL |
37 | pub vector_align: Vec<(Size, AbiAndPrefAlign)>, |
38 | ||
3dfed10e | 39 | pub instruction_address_space: AddressSpace, |
94222f64 XL |
40 | |
41 | /// Minimum size of #[repr(C)] enums (default I32 bits) | |
42 | pub c_enum_min_size: Integer, | |
83c7162d XL |
43 | } |
44 | ||
45 | impl Default for TargetDataLayout { | |
46 | /// Creates an instance of `TargetDataLayout`. | |
47 | fn default() -> TargetDataLayout { | |
a1dfa0c6 | 48 | let align = |bits| Align::from_bits(bits).unwrap(); |
83c7162d XL |
49 | TargetDataLayout { |
50 | endian: Endian::Big, | |
a1dfa0c6 XL |
51 | i1_align: AbiAndPrefAlign::new(align(8)), |
52 | i8_align: AbiAndPrefAlign::new(align(8)), | |
53 | i16_align: AbiAndPrefAlign::new(align(16)), | |
54 | i32_align: AbiAndPrefAlign::new(align(32)), | |
55 | i64_align: AbiAndPrefAlign { abi: align(32), pref: align(64) }, | |
56 | i128_align: AbiAndPrefAlign { abi: align(32), pref: align(64) }, | |
57 | f32_align: AbiAndPrefAlign::new(align(32)), | |
58 | f64_align: AbiAndPrefAlign::new(align(64)), | |
83c7162d | 59 | pointer_size: Size::from_bits(64), |
a1dfa0c6 XL |
60 | pointer_align: AbiAndPrefAlign::new(align(64)), |
61 | aggregate_align: AbiAndPrefAlign { abi: align(0), pref: align(64) }, | |
83c7162d | 62 | vector_align: vec![ |
a1dfa0c6 XL |
63 | (Size::from_bits(64), AbiAndPrefAlign::new(align(64))), |
64 | (Size::from_bits(128), AbiAndPrefAlign::new(align(128))), | |
65 | ], | |
3dfed10e | 66 | instruction_address_space: AddressSpace::DATA, |
94222f64 | 67 | c_enum_min_size: Integer::I32, |
83c7162d XL |
68 | } |
69 | } | |
70 | } | |
71 | ||
f2b60f7d FG |
72 | pub enum TargetDataLayoutErrors<'a> { |
73 | InvalidAddressSpace { addr_space: &'a str, cause: &'a str, err: ParseIntError }, | |
74 | InvalidBits { kind: &'a str, bit: &'a str, cause: &'a str, err: ParseIntError }, | |
75 | MissingAlignment { cause: &'a str }, | |
76 | InvalidAlignment { cause: &'a str, err: String }, | |
77 | InconsistentTargetArchitecture { dl: &'a str, target: &'a str }, | |
78 | InconsistentTargetPointerWidth { pointer_size: u64, target: u32 }, | |
79 | InvalidBitsSize { err: String }, | |
80 | } | |
81 | ||
83c7162d | 82 | impl TargetDataLayout { |
f2b60f7d | 83 | pub fn parse<'a>(target: &'a Target) -> Result<TargetDataLayout, TargetDataLayoutErrors<'a>> { |
a1dfa0c6 | 84 | // Parse an address space index from a string. |
f2b60f7d | 85 | let parse_address_space = |s: &'a str, cause: &'a str| { |
3dfed10e | 86 | s.parse::<u32>().map(AddressSpace).map_err(|err| { |
f2b60f7d | 87 | TargetDataLayoutErrors::InvalidAddressSpace { addr_space: s, cause, err } |
a1dfa0c6 XL |
88 | }) |
89 | }; | |
90 | ||
83c7162d | 91 | // Parse a bit count from a string. |
f2b60f7d FG |
92 | let parse_bits = |s: &'a str, kind: &'a str, cause: &'a str| { |
93 | s.parse::<u64>().map_err(|err| TargetDataLayoutErrors::InvalidBits { | |
94 | kind, | |
95 | bit: s, | |
96 | cause, | |
97 | err, | |
83c7162d XL |
98 | }) |
99 | }; | |
100 | ||
101 | // Parse a size string. | |
f2b60f7d | 102 | let size = |s: &'a str, cause: &'a str| parse_bits(s, "size", cause).map(Size::from_bits); |
83c7162d XL |
103 | |
104 | // Parse an alignment string. | |
f2b60f7d | 105 | let align = |s: &[&'a str], cause: &'a str| { |
83c7162d | 106 | if s.is_empty() { |
f2b60f7d | 107 | return Err(TargetDataLayoutErrors::MissingAlignment { cause }); |
83c7162d | 108 | } |
a1dfa0c6 | 109 | let align_from_bits = |bits| { |
f2b60f7d FG |
110 | Align::from_bits(bits) |
111 | .map_err(|err| TargetDataLayoutErrors::InvalidAlignment { cause, err }) | |
a1dfa0c6 | 112 | }; |
83c7162d XL |
113 | let abi = parse_bits(s[0], "alignment", cause)?; |
114 | let pref = s.get(1).map_or(Ok(abi), |pref| parse_bits(pref, "alignment", cause))?; | |
dfeec247 | 115 | Ok(AbiAndPrefAlign { abi: align_from_bits(abi)?, pref: align_from_bits(pref)? }) |
83c7162d XL |
116 | }; |
117 | ||
118 | let mut dl = TargetDataLayout::default(); | |
119 | let mut i128_align_src = 64; | |
8faf50e0 | 120 | for spec in target.data_layout.split('-') { |
416331ca XL |
121 | let spec_parts = spec.split(':').collect::<Vec<_>>(); |
122 | ||
123 | match &*spec_parts { | |
b7449926 XL |
124 | ["e"] => dl.endian = Endian::Little, |
125 | ["E"] => dl.endian = Endian::Big, | |
74b04a01 | 126 | [p] if p.starts_with('P') => { |
a1dfa0c6 XL |
127 | dl.instruction_address_space = parse_address_space(&p[1..], "P")? |
128 | } | |
dfeec247 XL |
129 | ["a", ref a @ ..] => dl.aggregate_align = align(a, "a")?, |
130 | ["f32", ref a @ ..] => dl.f32_align = align(a, "f32")?, | |
131 | ["f64", ref a @ ..] => dl.f64_align = align(a, "f64")?, | |
e1599b0c | 132 | [p @ "p", s, ref a @ ..] | [p @ "p0", s, ref a @ ..] => { |
83c7162d XL |
133 | dl.pointer_size = size(s, p)?; |
134 | dl.pointer_align = align(a, p)?; | |
135 | } | |
74b04a01 | 136 | [s, ref a @ ..] if s.starts_with('i') => { |
5e7ed085 FG |
137 | let Ok(bits) = s[1..].parse::<u64>() else { |
138 | size(&s[1..], "i")?; // For the user error. | |
139 | continue; | |
83c7162d XL |
140 | }; |
141 | let a = align(a, s)?; | |
142 | match bits { | |
143 | 1 => dl.i1_align = a, | |
144 | 8 => dl.i8_align = a, | |
145 | 16 => dl.i16_align = a, | |
146 | 32 => dl.i32_align = a, | |
147 | 64 => dl.i64_align = a, | |
148 | _ => {} | |
149 | } | |
150 | if bits >= i128_align_src && bits <= 128 { | |
151 | // Default alignment for i128 is decided by taking the alignment of | |
dc9dc135 | 152 | // largest-sized i{64..=128}. |
83c7162d XL |
153 | i128_align_src = bits; |
154 | dl.i128_align = a; | |
155 | } | |
156 | } | |
74b04a01 | 157 | [s, ref a @ ..] if s.starts_with('v') => { |
83c7162d XL |
158 | let v_size = size(&s[1..], "v")?; |
159 | let a = align(a, s)?; | |
160 | if let Some(v) = dl.vector_align.iter_mut().find(|v| v.0 == v_size) { | |
161 | v.1 = a; | |
162 | continue; | |
163 | } | |
164 | // No existing entry, add a new one. | |
165 | dl.vector_align.push((v_size, a)); | |
166 | } | |
167 | _ => {} // Ignore everything else. | |
168 | } | |
169 | } | |
170 | ||
171 | // Perform consistency checks against the Target information. | |
5869c6ff | 172 | if dl.endian != target.endian { |
f2b60f7d FG |
173 | return Err(TargetDataLayoutErrors::InconsistentTargetArchitecture { |
174 | dl: dl.endian.as_str(), | |
175 | target: target.endian.as_str(), | |
176 | }); | |
83c7162d XL |
177 | } |
178 | ||
923072b8 FG |
179 | let target_pointer_width: u64 = target.pointer_width.into(); |
180 | if dl.pointer_size.bits() != target_pointer_width { | |
f2b60f7d FG |
181 | return Err(TargetDataLayoutErrors::InconsistentTargetPointerWidth { |
182 | pointer_size: dl.pointer_size.bits(), | |
183 | target: target.pointer_width, | |
184 | }); | |
83c7162d XL |
185 | } |
186 | ||
f2b60f7d FG |
187 | dl.c_enum_min_size = match Integer::from_size(Size::from_bits(target.c_enum_min_bits)) { |
188 | Ok(bits) => bits, | |
189 | Err(err) => return Err(TargetDataLayoutErrors::InvalidBitsSize { err }), | |
190 | }; | |
94222f64 | 191 | |
83c7162d XL |
192 | Ok(dl) |
193 | } | |
194 | ||
9fa01778 | 195 | /// Returns exclusive upper bound on object size. |
83c7162d XL |
196 | /// |
197 | /// The theoretical maximum object size is defined as the maximum positive `isize` value. | |
198 | /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly | |
199 | /// index every address within an object along with one byte past the end, along with allowing | |
200 | /// `isize` to store the difference between any two pointers into an object. | |
201 | /// | |
202 | /// The upper bound on 64-bit currently needs to be lower because LLVM uses a 64-bit integer | |
203 | /// to represent object size in bits. It would need to be 1 << 61 to account for this, but is | |
204 | /// currently conservatively bounded to 1 << 47 as that is enough to cover the current usable | |
205 | /// address space on 64-bit ARMv8 and x86_64. | |
94222f64 | 206 | #[inline] |
83c7162d XL |
207 | pub fn obj_size_bound(&self) -> u64 { |
208 | match self.pointer_size.bits() { | |
209 | 16 => 1 << 15, | |
210 | 32 => 1 << 31, | |
211 | 64 => 1 << 47, | |
dfeec247 | 212 | bits => panic!("obj_size_bound: unknown pointer bit size {}", bits), |
83c7162d XL |
213 | } |
214 | } | |
215 | ||
94222f64 | 216 | #[inline] |
83c7162d XL |
217 | pub fn ptr_sized_integer(&self) -> Integer { |
218 | match self.pointer_size.bits() { | |
219 | 16 => I16, | |
220 | 32 => I32, | |
221 | 64 => I64, | |
dfeec247 | 222 | bits => panic!("ptr_sized_integer: unknown pointer bit size {}", bits), |
83c7162d XL |
223 | } |
224 | } | |
225 | ||
94222f64 | 226 | #[inline] |
a1dfa0c6 | 227 | pub fn vector_align(&self, vec_size: Size) -> AbiAndPrefAlign { |
83c7162d XL |
228 | for &(size, align) in &self.vector_align { |
229 | if size == vec_size { | |
230 | return align; | |
231 | } | |
232 | } | |
233 | // Default to natural alignment, which is what LLVM does. | |
234 | // That is, use the size, rounded up to a power of 2. | |
a1dfa0c6 | 235 | AbiAndPrefAlign::new(Align::from_bytes(vec_size.bytes().next_power_of_two()).unwrap()) |
83c7162d XL |
236 | } |
237 | } | |
238 | ||
a1dfa0c6 | 239 | pub trait HasDataLayout { |
83c7162d XL |
240 | fn data_layout(&self) -> &TargetDataLayout; |
241 | } | |
242 | ||
a1dfa0c6 | 243 | impl HasDataLayout for TargetDataLayout { |
17df50a5 | 244 | #[inline] |
83c7162d XL |
245 | fn data_layout(&self) -> &TargetDataLayout { |
246 | self | |
247 | } | |
248 | } | |
249 | ||
250 | /// Endianness of the target, which must match cfg(target-endian). | |
a1dfa0c6 | 251 | #[derive(Copy, Clone, PartialEq)] |
83c7162d XL |
252 | pub enum Endian { |
253 | Little, | |
dfeec247 | 254 | Big, |
83c7162d XL |
255 | } |
256 | ||
5869c6ff XL |
257 | impl Endian { |
258 | pub fn as_str(&self) -> &'static str { | |
259 | match self { | |
260 | Self::Little => "little", | |
261 | Self::Big => "big", | |
262 | } | |
263 | } | |
264 | } | |
265 | ||
266 | impl fmt::Debug for Endian { | |
267 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { | |
268 | f.write_str(self.as_str()) | |
269 | } | |
270 | } | |
271 | ||
272 | impl FromStr for Endian { | |
273 | type Err = String; | |
274 | ||
275 | fn from_str(s: &str) -> Result<Self, Self::Err> { | |
276 | match s { | |
277 | "little" => Ok(Self::Little), | |
278 | "big" => Ok(Self::Big), | |
279 | _ => Err(format!(r#"unknown endian: "{}""#, s)), | |
280 | } | |
281 | } | |
282 | } | |
283 | ||
284 | impl ToJson for Endian { | |
285 | fn to_json(&self) -> Json { | |
286 | self.as_str().to_json() | |
287 | } | |
288 | } | |
289 | ||
83c7162d | 290 | /// Size of a type in bytes. |
04454e1e | 291 | #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Encodable, Decodable)] |
60c5eb7d | 292 | #[derive(HashStable_Generic)] |
83c7162d | 293 | pub struct Size { |
dfeec247 | 294 | raw: u64, |
83c7162d XL |
295 | } |
296 | ||
04454e1e FG |
297 | // This is debug-printed a lot in larger structs, don't waste too much space there |
298 | impl fmt::Debug for Size { | |
299 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { | |
300 | write!(f, "Size({} bytes)", self.bytes()) | |
301 | } | |
302 | } | |
303 | ||
83c7162d | 304 | impl Size { |
ba9703b0 | 305 | pub const ZERO: Size = Size { raw: 0 }; |
94b46f34 | 306 | |
5869c6ff | 307 | /// Rounds `bits` up to the next-higher byte boundary, if `bits` is |
5e7ed085 | 308 | /// not a multiple of 8. |
ba9703b0 XL |
309 | pub fn from_bits(bits: impl TryInto<u64>) -> Size { |
310 | let bits = bits.try_into().ok().unwrap(); | |
83c7162d | 311 | // Avoid potential overflow from `bits + 7`. |
5869c6ff | 312 | Size { raw: bits / 8 + ((bits % 8) + 7) / 8 } |
83c7162d XL |
313 | } |
314 | ||
8faf50e0 | 315 | #[inline] |
ba9703b0 | 316 | pub fn from_bytes(bytes: impl TryInto<u64>) -> Size { |
5869c6ff XL |
317 | let bytes: u64 = bytes.try_into().ok().unwrap(); |
318 | Size { raw: bytes } | |
83c7162d XL |
319 | } |
320 | ||
8faf50e0 | 321 | #[inline] |
83c7162d XL |
322 | pub fn bytes(self) -> u64 { |
323 | self.raw | |
324 | } | |
325 | ||
ba9703b0 XL |
326 | #[inline] |
327 | pub fn bytes_usize(self) -> usize { | |
328 | self.bytes().try_into().unwrap() | |
329 | } | |
330 | ||
8faf50e0 | 331 | #[inline] |
83c7162d | 332 | pub fn bits(self) -> u64 { |
5e7ed085 FG |
333 | #[cold] |
334 | fn overflow(bytes: u64) -> ! { | |
335 | panic!("Size::bits: {} bytes in bits doesn't fit in u64", bytes) | |
336 | } | |
337 | ||
338 | self.bytes().checked_mul(8).unwrap_or_else(|| overflow(self.bytes())) | |
83c7162d XL |
339 | } |
340 | ||
ba9703b0 XL |
341 | #[inline] |
342 | pub fn bits_usize(self) -> usize { | |
343 | self.bits().try_into().unwrap() | |
344 | } | |
345 | ||
8faf50e0 | 346 | #[inline] |
a1dfa0c6 XL |
347 | pub fn align_to(self, align: Align) -> Size { |
348 | let mask = align.bytes() - 1; | |
83c7162d XL |
349 | Size::from_bytes((self.bytes() + mask) & !mask) |
350 | } | |
351 | ||
8faf50e0 | 352 | #[inline] |
a1dfa0c6 XL |
353 | pub fn is_aligned(self, align: Align) -> bool { |
354 | let mask = align.bytes() - 1; | |
83c7162d XL |
355 | self.bytes() & mask == 0 |
356 | } | |
357 | ||
8faf50e0 | 358 | #[inline] |
a1dfa0c6 | 359 | pub fn checked_add<C: HasDataLayout>(self, offset: Size, cx: &C) -> Option<Size> { |
83c7162d XL |
360 | let dl = cx.data_layout(); |
361 | ||
94b46f34 | 362 | let bytes = self.bytes().checked_add(offset.bytes())?; |
83c7162d | 363 | |
dfeec247 | 364 | if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None } |
83c7162d XL |
365 | } |
366 | ||
8faf50e0 | 367 | #[inline] |
a1dfa0c6 | 368 | pub fn checked_mul<C: HasDataLayout>(self, count: u64, cx: &C) -> Option<Size> { |
83c7162d XL |
369 | let dl = cx.data_layout(); |
370 | ||
94b46f34 | 371 | let bytes = self.bytes().checked_mul(count)?; |
dfeec247 | 372 | if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None } |
83c7162d | 373 | } |
29967ef6 XL |
374 | |
375 | /// Truncates `value` to `self` bits and then sign-extends it to 128 bits | |
376 | /// (i.e., if it is negative, fill with 1's on the left). | |
377 | #[inline] | |
378 | pub fn sign_extend(self, value: u128) -> u128 { | |
379 | let size = self.bits(); | |
380 | if size == 0 { | |
381 | // Truncated until nothing is left. | |
382 | return 0; | |
383 | } | |
384 | // Sign-extend it. | |
385 | let shift = 128 - size; | |
386 | // Shift the unsigned value to the left, then shift back to the right as signed | |
387 | // (essentially fills with sign bit on the left). | |
388 | (((value << shift) as i128) >> shift) as u128 | |
389 | } | |
390 | ||
391 | /// Truncates `value` to `self` bits. | |
392 | #[inline] | |
393 | pub fn truncate(self, value: u128) -> u128 { | |
394 | let size = self.bits(); | |
395 | if size == 0 { | |
396 | // Truncated until nothing is left. | |
397 | return 0; | |
398 | } | |
399 | let shift = 128 - size; | |
400 | // Truncate (shift left to drop out leftover values, shift right to fill with zeroes). | |
401 | (value << shift) >> shift | |
402 | } | |
c295e0f8 XL |
403 | |
404 | #[inline] | |
405 | pub fn signed_int_min(&self) -> i128 { | |
406 | self.sign_extend(1_u128 << (self.bits() - 1)) as i128 | |
407 | } | |
408 | ||
409 | #[inline] | |
410 | pub fn signed_int_max(&self) -> i128 { | |
411 | i128::MAX >> (128 - self.bits()) | |
412 | } | |
413 | ||
414 | #[inline] | |
415 | pub fn unsigned_int_max(&self) -> u128 { | |
416 | u128::MAX >> (128 - self.bits()) | |
417 | } | |
83c7162d XL |
418 | } |
419 | ||
420 | // Panicking addition, subtraction and multiplication for convenience. | |
421 | // Avoid during layout computation, return `LayoutError` instead. | |
422 | ||
423 | impl Add for Size { | |
424 | type Output = Size; | |
8faf50e0 | 425 | #[inline] |
83c7162d | 426 | fn add(self, other: Size) -> Size { |
94b46f34 XL |
427 | Size::from_bytes(self.bytes().checked_add(other.bytes()).unwrap_or_else(|| { |
428 | panic!("Size::add: {} + {} doesn't fit in u64", self.bytes(), other.bytes()) | |
429 | })) | |
83c7162d XL |
430 | } |
431 | } | |
432 | ||
433 | impl Sub for Size { | |
434 | type Output = Size; | |
8faf50e0 | 435 | #[inline] |
83c7162d | 436 | fn sub(self, other: Size) -> Size { |
94b46f34 XL |
437 | Size::from_bytes(self.bytes().checked_sub(other.bytes()).unwrap_or_else(|| { |
438 | panic!("Size::sub: {} - {} would result in negative size", self.bytes(), other.bytes()) | |
439 | })) | |
440 | } | |
441 | } | |
442 | ||
443 | impl Mul<Size> for u64 { | |
444 | type Output = Size; | |
8faf50e0 | 445 | #[inline] |
94b46f34 XL |
446 | fn mul(self, size: Size) -> Size { |
447 | size * self | |
83c7162d XL |
448 | } |
449 | } | |
450 | ||
451 | impl Mul<u64> for Size { | |
452 | type Output = Size; | |
8faf50e0 | 453 | #[inline] |
83c7162d XL |
454 | fn mul(self, count: u64) -> Size { |
455 | match self.bytes().checked_mul(count) { | |
456 | Some(bytes) => Size::from_bytes(bytes), | |
dfeec247 | 457 | None => panic!("Size::mul: {} * {} doesn't fit in u64", self.bytes(), count), |
83c7162d XL |
458 | } |
459 | } | |
460 | } | |
461 | ||
462 | impl AddAssign for Size { | |
8faf50e0 | 463 | #[inline] |
83c7162d XL |
464 | fn add_assign(&mut self, other: Size) { |
465 | *self = *self + other; | |
466 | } | |
467 | } | |
468 | ||
94222f64 XL |
469 | impl Step for Size { |
470 | #[inline] | |
471 | fn steps_between(start: &Self, end: &Self) -> Option<usize> { | |
472 | u64::steps_between(&start.bytes(), &end.bytes()) | |
473 | } | |
474 | ||
475 | #[inline] | |
476 | fn forward_checked(start: Self, count: usize) -> Option<Self> { | |
477 | u64::forward_checked(start.bytes(), count).map(Self::from_bytes) | |
478 | } | |
479 | ||
480 | #[inline] | |
481 | fn forward(start: Self, count: usize) -> Self { | |
482 | Self::from_bytes(u64::forward(start.bytes(), count)) | |
483 | } | |
484 | ||
485 | #[inline] | |
486 | unsafe fn forward_unchecked(start: Self, count: usize) -> Self { | |
487 | Self::from_bytes(u64::forward_unchecked(start.bytes(), count)) | |
488 | } | |
489 | ||
490 | #[inline] | |
491 | fn backward_checked(start: Self, count: usize) -> Option<Self> { | |
492 | u64::backward_checked(start.bytes(), count).map(Self::from_bytes) | |
493 | } | |
494 | ||
495 | #[inline] | |
496 | fn backward(start: Self, count: usize) -> Self { | |
497 | Self::from_bytes(u64::backward(start.bytes(), count)) | |
498 | } | |
499 | ||
500 | #[inline] | |
501 | unsafe fn backward_unchecked(start: Self, count: usize) -> Self { | |
502 | Self::from_bytes(u64::backward_unchecked(start.bytes(), count)) | |
503 | } | |
504 | } | |
505 | ||
a1dfa0c6 | 506 | /// Alignment of a type in bytes (always a power of two). |
04454e1e | 507 | #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Encodable, Decodable)] |
60c5eb7d | 508 | #[derive(HashStable_Generic)] |
83c7162d | 509 | pub struct Align { |
a1dfa0c6 | 510 | pow2: u8, |
83c7162d XL |
511 | } |
512 | ||
04454e1e FG |
513 | // This is debug-printed a lot in larger structs, don't waste too much space there |
514 | impl fmt::Debug for Align { | |
515 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { | |
516 | write!(f, "Align({} bytes)", self.bytes()) | |
517 | } | |
518 | } | |
519 | ||
83c7162d | 520 | impl Align { |
17df50a5 | 521 | pub const ONE: Align = Align { pow2: 0 }; |
064997fb | 522 | pub const MAX: Align = Align { pow2: 29 }; |
17df50a5 | 523 | |
5869c6ff | 524 | #[inline] |
a1dfa0c6 XL |
525 | pub fn from_bits(bits: u64) -> Result<Align, String> { |
526 | Align::from_bytes(Size::from_bits(bits).bytes()) | |
83c7162d XL |
527 | } |
528 | ||
5869c6ff | 529 | #[inline] |
a1dfa0c6 XL |
530 | pub fn from_bytes(align: u64) -> Result<Align, String> { |
531 | // Treat an alignment of 0 bytes like 1-byte alignment. | |
532 | if align == 0 { | |
17df50a5 | 533 | return Ok(Align::ONE); |
a1dfa0c6 | 534 | } |
83c7162d | 535 | |
5869c6ff XL |
536 | #[cold] |
537 | fn not_power_of_2(align: u64) -> String { | |
538 | format!("`{}` is not a power of 2", align) | |
539 | } | |
540 | ||
541 | #[cold] | |
542 | fn too_large(align: u64) -> String { | |
543 | format!("`{}` is too large", align) | |
544 | } | |
545 | ||
a1dfa0c6 XL |
546 | let mut bytes = align; |
547 | let mut pow2: u8 = 0; | |
548 | while (bytes & 1) == 0 { | |
549 | pow2 += 1; | |
550 | bytes >>= 1; | |
551 | } | |
552 | if bytes != 1 { | |
5869c6ff | 553 | return Err(not_power_of_2(align)); |
a1dfa0c6 | 554 | } |
064997fb | 555 | if pow2 > Self::MAX.pow2 { |
5869c6ff | 556 | return Err(too_large(align)); |
a1dfa0c6 | 557 | } |
83c7162d | 558 | |
a1dfa0c6 | 559 | Ok(Align { pow2 }) |
83c7162d XL |
560 | } |
561 | ||
5869c6ff | 562 | #[inline] |
a1dfa0c6 XL |
563 | pub fn bytes(self) -> u64 { |
564 | 1 << self.pow2 | |
83c7162d XL |
565 | } |
566 | ||
5869c6ff | 567 | #[inline] |
a1dfa0c6 XL |
568 | pub fn bits(self) -> u64 { |
569 | self.bytes() * 8 | |
83c7162d | 570 | } |
b7449926 | 571 | |
9fa01778 | 572 | /// Computes the best alignment possible for the given offset |
b7449926 XL |
573 | /// (the largest power of two that the offset is a multiple of). |
574 | /// | |
0731742a | 575 | /// N.B., for an offset of `0`, this happens to return `2^64`. |
5869c6ff | 576 | #[inline] |
b7449926 | 577 | pub fn max_for_offset(offset: Size) -> Align { |
dfeec247 | 578 | Align { pow2: offset.bytes().trailing_zeros() as u8 } |
b7449926 XL |
579 | } |
580 | ||
581 | /// Lower the alignment, if necessary, such that the given offset | |
0bf4aa26 | 582 | /// is aligned to it (the offset is a multiple of the alignment). |
5869c6ff | 583 | #[inline] |
b7449926 XL |
584 | pub fn restrict_for_offset(self, offset: Size) -> Align { |
585 | self.min(Align::max_for_offset(offset)) | |
586 | } | |
83c7162d XL |
587 | } |
588 | ||
74b04a01 | 589 | /// A pair of alignments, ABI-mandated and preferred. |
923072b8 | 590 | #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] |
60c5eb7d | 591 | #[derive(HashStable_Generic)] |
a1dfa0c6 XL |
592 | pub struct AbiAndPrefAlign { |
593 | pub abi: Align, | |
594 | pub pref: Align, | |
595 | } | |
596 | ||
597 | impl AbiAndPrefAlign { | |
94222f64 | 598 | #[inline] |
a1dfa0c6 | 599 | pub fn new(align: Align) -> AbiAndPrefAlign { |
dfeec247 | 600 | AbiAndPrefAlign { abi: align, pref: align } |
a1dfa0c6 XL |
601 | } |
602 | ||
94222f64 | 603 | #[inline] |
a1dfa0c6 | 604 | pub fn min(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign { |
dfeec247 | 605 | AbiAndPrefAlign { abi: self.abi.min(other.abi), pref: self.pref.min(other.pref) } |
a1dfa0c6 XL |
606 | } |
607 | ||
94222f64 | 608 | #[inline] |
a1dfa0c6 | 609 | pub fn max(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign { |
dfeec247 | 610 | AbiAndPrefAlign { abi: self.abi.max(other.abi), pref: self.pref.max(other.pref) } |
a1dfa0c6 XL |
611 | } |
612 | } | |
613 | ||
83c7162d | 614 | /// Integers, also used for enum discriminants. |
60c5eb7d | 615 | #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, HashStable_Generic)] |
83c7162d XL |
616 | pub enum Integer { |
617 | I8, | |
618 | I16, | |
619 | I32, | |
620 | I64, | |
621 | I128, | |
622 | } | |
623 | ||
624 | impl Integer { | |
94222f64 | 625 | #[inline] |
b7449926 XL |
626 | pub fn size(self) -> Size { |
627 | match self { | |
83c7162d XL |
628 | I8 => Size::from_bytes(1), |
629 | I16 => Size::from_bytes(2), | |
630 | I32 => Size::from_bytes(4), | |
dfeec247 XL |
631 | I64 => Size::from_bytes(8), |
632 | I128 => Size::from_bytes(16), | |
83c7162d XL |
633 | } |
634 | } | |
635 | ||
a1dfa0c6 | 636 | pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAndPrefAlign { |
83c7162d XL |
637 | let dl = cx.data_layout(); |
638 | ||
b7449926 | 639 | match self { |
83c7162d XL |
640 | I8 => dl.i8_align, |
641 | I16 => dl.i16_align, | |
642 | I32 => dl.i32_align, | |
643 | I64 => dl.i64_align, | |
644 | I128 => dl.i128_align, | |
645 | } | |
646 | } | |
647 | ||
9fa01778 | 648 | /// Finds the smallest Integer type which can represent the signed value. |
94222f64 | 649 | #[inline] |
83c7162d XL |
650 | pub fn fit_signed(x: i128) -> Integer { |
651 | match x { | |
8faf50e0 XL |
652 | -0x0000_0000_0000_0080..=0x0000_0000_0000_007f => I8, |
653 | -0x0000_0000_0000_8000..=0x0000_0000_0000_7fff => I16, | |
654 | -0x0000_0000_8000_0000..=0x0000_0000_7fff_ffff => I32, | |
655 | -0x8000_0000_0000_0000..=0x7fff_ffff_ffff_ffff => I64, | |
dfeec247 | 656 | _ => I128, |
83c7162d XL |
657 | } |
658 | } | |
659 | ||
9fa01778 | 660 | /// Finds the smallest Integer type which can represent the unsigned value. |
94222f64 | 661 | #[inline] |
83c7162d XL |
662 | pub fn fit_unsigned(x: u128) -> Integer { |
663 | match x { | |
8faf50e0 XL |
664 | 0..=0x0000_0000_0000_00ff => I8, |
665 | 0..=0x0000_0000_0000_ffff => I16, | |
666 | 0..=0x0000_0000_ffff_ffff => I32, | |
667 | 0..=0xffff_ffff_ffff_ffff => I64, | |
83c7162d XL |
668 | _ => I128, |
669 | } | |
670 | } | |
671 | ||
9fa01778 | 672 | /// Finds the smallest integer with the given alignment. |
a1dfa0c6 | 673 | pub fn for_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Option<Integer> { |
83c7162d XL |
674 | let dl = cx.data_layout(); |
675 | ||
136023e0 | 676 | for candidate in [I8, I16, I32, I64, I128] { |
a1dfa0c6 | 677 | if wanted == candidate.align(dl).abi && wanted.bytes() == candidate.size().bytes() { |
83c7162d XL |
678 | return Some(candidate); |
679 | } | |
680 | } | |
681 | None | |
682 | } | |
683 | ||
684 | /// Find the largest integer with the given alignment or less. | |
a1dfa0c6 | 685 | pub fn approximate_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Integer { |
83c7162d XL |
686 | let dl = cx.data_layout(); |
687 | ||
83c7162d | 688 | // FIXME(eddyb) maybe include I128 in the future, when it works everywhere. |
136023e0 | 689 | for candidate in [I64, I32, I16] { |
a1dfa0c6 | 690 | if wanted >= candidate.align(dl).abi && wanted.bytes() >= candidate.size().bytes() { |
83c7162d XL |
691 | return candidate; |
692 | } | |
693 | } | |
694 | I8 | |
695 | } | |
94222f64 XL |
696 | |
697 | // FIXME(eddyb) consolidate this and other methods that find the appropriate | |
698 | // `Integer` given some requirements. | |
699 | #[inline] | |
700 | fn from_size(size: Size) -> Result<Self, String> { | |
701 | match size.bits() { | |
702 | 8 => Ok(Integer::I8), | |
703 | 16 => Ok(Integer::I16), | |
704 | 32 => Ok(Integer::I32), | |
705 | 64 => Ok(Integer::I64), | |
706 | 128 => Ok(Integer::I128), | |
707 | _ => Err(format!("rust does not support integers with {} bits", size.bits())), | |
708 | } | |
709 | } | |
83c7162d XL |
710 | } |
711 | ||
712 | /// Fundamental unit of memory access and layout. | |
60c5eb7d | 713 | #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)] |
83c7162d XL |
714 | pub enum Primitive { |
715 | /// The `bool` is the signedness of the `Integer` type. | |
716 | /// | |
717 | /// One would think we would not care about such details this low down, | |
718 | /// but some ABIs are described in terms of C types and ISAs where the | |
719 | /// integer arithmetic is done on {sign,zero}-extended registers, e.g. | |
720 | /// a negative integer passed by zero-extension will appear positive in | |
721 | /// the callee, and most operations on it will produce the wrong values. | |
722 | Int(Integer, bool), | |
60c5eb7d XL |
723 | F32, |
724 | F64, | |
dfeec247 | 725 | Pointer, |
83c7162d XL |
726 | } |
727 | ||
dc9dc135 | 728 | impl Primitive { |
a1dfa0c6 | 729 | pub fn size<C: HasDataLayout>(self, cx: &C) -> Size { |
83c7162d XL |
730 | let dl = cx.data_layout(); |
731 | ||
732 | match self { | |
733 | Int(i, _) => i.size(), | |
60c5eb7d XL |
734 | F32 => Size::from_bits(32), |
735 | F64 => Size::from_bits(64), | |
dfeec247 | 736 | Pointer => dl.pointer_size, |
83c7162d XL |
737 | } |
738 | } | |
739 | ||
a1dfa0c6 | 740 | pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAndPrefAlign { |
83c7162d XL |
741 | let dl = cx.data_layout(); |
742 | ||
743 | match self { | |
744 | Int(i, _) => i.align(dl), | |
60c5eb7d XL |
745 | F32 => dl.f32_align, |
746 | F64 => dl.f64_align, | |
dfeec247 | 747 | Pointer => dl.pointer_align, |
83c7162d XL |
748 | } |
749 | } | |
94b46f34 | 750 | |
94222f64 XL |
751 | // FIXME(eddyb) remove, it's trivial thanks to `matches!`. |
752 | #[inline] | |
94b46f34 | 753 | pub fn is_float(self) -> bool { |
29967ef6 | 754 | matches!(self, F32 | F64) |
94b46f34 XL |
755 | } |
756 | ||
94222f64 XL |
757 | // FIXME(eddyb) remove, it's completely unused. |
758 | #[inline] | |
94b46f34 | 759 | pub fn is_int(self) -> bool { |
29967ef6 | 760 | matches!(self, Int(..)) |
94b46f34 | 761 | } |
923072b8 FG |
762 | |
763 | #[inline] | |
764 | pub fn is_ptr(self) -> bool { | |
765 | matches!(self, Pointer) | |
766 | } | |
83c7162d XL |
767 | } |
768 | ||
94222f64 XL |
769 | /// Inclusive wrap-around range of valid values, that is, if |
770 | /// start > end, it represents `start..=MAX`, | |
771 | /// followed by `0..=end`. | |
772 | /// | |
773 | /// That is, for an i8 primitive, a range of `254..=2` means following | |
774 | /// sequence: | |
775 | /// | |
776 | /// 254 (-2), 255 (-1), 0, 1, 2 | |
777 | /// | |
c295e0f8 XL |
778 | /// This is intended specifically to mirror LLVM’s `!range` metadata semantics. |
779 | #[derive(Clone, Copy, PartialEq, Eq, Hash)] | |
94222f64 XL |
780 | #[derive(HashStable_Generic)] |
781 | pub struct WrappingRange { | |
782 | pub start: u128, | |
783 | pub end: u128, | |
784 | } | |
785 | ||
786 | impl WrappingRange { | |
04454e1e FG |
787 | pub fn full(size: Size) -> Self { |
788 | Self { start: 0, end: size.unsigned_int_max() } | |
789 | } | |
790 | ||
94222f64 XL |
791 | /// Returns `true` if `v` is contained in the range. |
792 | #[inline(always)] | |
793 | pub fn contains(&self, v: u128) -> bool { | |
794 | if self.start <= self.end { | |
795 | self.start <= v && v <= self.end | |
796 | } else { | |
797 | self.start <= v || v <= self.end | |
798 | } | |
799 | } | |
800 | ||
94222f64 XL |
801 | /// Returns `self` with replaced `start` |
802 | #[inline(always)] | |
803 | pub fn with_start(mut self, start: u128) -> Self { | |
804 | self.start = start; | |
805 | self | |
806 | } | |
807 | ||
808 | /// Returns `self` with replaced `end` | |
809 | #[inline(always)] | |
810 | pub fn with_end(mut self, end: u128) -> Self { | |
811 | self.end = end; | |
812 | self | |
813 | } | |
c295e0f8 XL |
814 | |
815 | /// Returns `true` if `size` completely fills the range. | |
816 | #[inline] | |
817 | pub fn is_full_for(&self, size: Size) -> bool { | |
818 | let max_value = size.unsigned_int_max(); | |
819 | debug_assert!(self.start <= max_value && self.end <= max_value); | |
820 | self.start == (self.end.wrapping_add(1) & max_value) | |
821 | } | |
94222f64 XL |
822 | } |
823 | ||
824 | impl fmt::Debug for WrappingRange { | |
825 | fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { | |
c295e0f8 XL |
826 | if self.start > self.end { |
827 | write!(fmt, "(..={}) | ({}..)", self.end, self.start)?; | |
828 | } else { | |
829 | write!(fmt, "{}..={}", self.start, self.end)?; | |
830 | } | |
94222f64 XL |
831 | Ok(()) |
832 | } | |
833 | } | |
834 | ||
83c7162d | 835 | /// Information about one scalar component of a Rust type. |
c295e0f8 | 836 | #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] |
60c5eb7d | 837 | #[derive(HashStable_Generic)] |
04454e1e FG |
838 | pub enum Scalar { |
839 | Initialized { | |
840 | value: Primitive, | |
841 | ||
842 | // FIXME(eddyb) always use the shortest range, e.g., by finding | |
843 | // the largest space between two consecutive valid values and | |
844 | // taking everything else as the (shortest) valid range. | |
845 | valid_range: WrappingRange, | |
846 | }, | |
847 | Union { | |
848 | /// Even for unions, we need to use the correct registers for the kind of | |
849 | /// values inside the union, so we keep the `Primitive` type around. We | |
850 | /// also use it to compute the size of the scalar. | |
851 | /// However, unions never have niches and even allow undef, | |
852 | /// so there is no `valid_range`. | |
853 | value: Primitive, | |
854 | }, | |
83c7162d XL |
855 | } |
856 | ||
857 | impl Scalar { | |
94222f64 | 858 | #[inline] |
83c7162d | 859 | pub fn is_bool(&self) -> bool { |
c295e0f8 XL |
860 | matches!( |
861 | self, | |
04454e1e FG |
862 | Scalar::Initialized { |
863 | value: Int(I8, false), | |
864 | valid_range: WrappingRange { start: 0, end: 1 } | |
865 | } | |
c295e0f8 | 866 | ) |
83c7162d XL |
867 | } |
868 | ||
04454e1e FG |
869 | /// Get the primitive representation of this type, ignoring the valid range and whether the |
870 | /// value is allowed to be undefined (due to being a union). | |
871 | pub fn primitive(&self) -> Primitive { | |
872 | match *self { | |
873 | Scalar::Initialized { value, .. } | Scalar::Union { value } => value, | |
874 | } | |
875 | } | |
876 | ||
877 | pub fn align(self, cx: &impl HasDataLayout) -> AbiAndPrefAlign { | |
878 | self.primitive().align(cx) | |
879 | } | |
880 | ||
881 | pub fn size(self, cx: &impl HasDataLayout) -> Size { | |
882 | self.primitive().size(cx) | |
883 | } | |
884 | ||
885 | #[inline] | |
886 | pub fn to_union(&self) -> Self { | |
887 | Self::Union { value: self.primitive() } | |
888 | } | |
889 | ||
890 | #[inline] | |
891 | pub fn valid_range(&self, cx: &impl HasDataLayout) -> WrappingRange { | |
892 | match *self { | |
893 | Scalar::Initialized { valid_range, .. } => valid_range, | |
894 | Scalar::Union { value } => WrappingRange::full(value.size(cx)), | |
895 | } | |
896 | } | |
897 | ||
898 | #[inline] | |
899 | /// Allows the caller to mutate the valid range. This operation will panic if attempted on a union. | |
900 | pub fn valid_range_mut(&mut self) -> &mut WrappingRange { | |
901 | match self { | |
902 | Scalar::Initialized { valid_range, .. } => valid_range, | |
903 | Scalar::Union { .. } => panic!("cannot change the valid range of a union"), | |
904 | } | |
905 | } | |
906 | ||
c295e0f8 XL |
907 | /// Returns `true` if all possible numbers are valid, i.e `valid_range` covers the whole layout |
908 | #[inline] | |
909 | pub fn is_always_valid<C: HasDataLayout>(&self, cx: &C) -> bool { | |
04454e1e FG |
910 | match *self { |
911 | Scalar::Initialized { valid_range, .. } => valid_range.is_full_for(self.size(cx)), | |
912 | Scalar::Union { .. } => true, | |
913 | } | |
83c7162d | 914 | } |
923072b8 FG |
915 | |
916 | /// Returns `true` if this type can be left uninit. | |
917 | #[inline] | |
918 | pub fn is_uninit_valid(&self) -> bool { | |
919 | match *self { | |
920 | Scalar::Initialized { .. } => false, | |
921 | Scalar::Union { .. } => true, | |
922 | } | |
923 | } | |
83c7162d XL |
924 | } |
925 | ||
926 | /// Describes how the fields of a type are located in memory. | |
60c5eb7d | 927 | #[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)] |
ba9703b0 XL |
928 | pub enum FieldsShape { |
929 | /// Scalar primitives and `!`, which never have fields. | |
930 | Primitive, | |
931 | ||
83c7162d | 932 | /// All fields start at no offset. The `usize` is the field count. |
ba9703b0 | 933 | Union(NonZeroUsize), |
83c7162d XL |
934 | |
935 | /// Array/vector-like placement, with all fields of identical types. | |
dfeec247 | 936 | Array { stride: Size, count: u64 }, |
83c7162d XL |
937 | |
938 | /// Struct-like placement, with precomputed offsets. | |
939 | /// | |
940 | /// Fields are guaranteed to not overlap, but note that gaps | |
941 | /// before, between and after all the fields are NOT always | |
942 | /// padding, and as such their contents may not be discarded. | |
943 | /// For example, enum variants leave a gap at the start, | |
944 | /// where the discriminant field in the enum layout goes. | |
945 | Arbitrary { | |
946 | /// Offsets for the first byte of each field, | |
947 | /// ordered to match the source definition order. | |
948 | /// This vector does not go in increasing order. | |
949 | // FIXME(eddyb) use small vector optimization for the common case. | |
950 | offsets: Vec<Size>, | |
951 | ||
952 | /// Maps source order field indices to memory order indices, | |
dc9dc135 XL |
953 | /// depending on how the fields were reordered (if at all). |
954 | /// This is a permutation, with both the source order and the | |
955 | /// memory order using the same (0..n) index ranges. | |
956 | /// | |
957 | /// Note that during computation of `memory_index`, sometimes | |
958 | /// it is easier to operate on the inverse mapping (that is, | |
959 | /// from memory order to source order), and that is usually | |
960 | /// named `inverse_memory_index`. | |
961 | /// | |
962 | // FIXME(eddyb) build a better abstraction for permutations, if possible. | |
83c7162d | 963 | // FIXME(camlorn) also consider small vector optimization here. |
dfeec247 XL |
964 | memory_index: Vec<u32>, |
965 | }, | |
83c7162d XL |
966 | } |
967 | ||
ba9703b0 | 968 | impl FieldsShape { |
94222f64 | 969 | #[inline] |
83c7162d XL |
970 | pub fn count(&self) -> usize { |
971 | match *self { | |
ba9703b0 XL |
972 | FieldsShape::Primitive => 0, |
973 | FieldsShape::Union(count) => count.get(), | |
17df50a5 | 974 | FieldsShape::Array { count, .. } => count.try_into().unwrap(), |
ba9703b0 | 975 | FieldsShape::Arbitrary { ref offsets, .. } => offsets.len(), |
83c7162d XL |
976 | } |
977 | } | |
978 | ||
94222f64 | 979 | #[inline] |
83c7162d XL |
980 | pub fn offset(&self, i: usize) -> Size { |
981 | match *self { | |
ba9703b0 XL |
982 | FieldsShape::Primitive => { |
983 | unreachable!("FieldsShape::offset: `Primitive`s have no fields") | |
984 | } | |
985 | FieldsShape::Union(count) => { | |
986 | assert!( | |
987 | i < count.get(), | |
988 | "tried to access field {} of union with {} fields", | |
989 | i, | |
990 | count | |
991 | ); | |
992 | Size::ZERO | |
993 | } | |
994 | FieldsShape::Array { stride, count } => { | |
995 | let i = u64::try_from(i).unwrap(); | |
83c7162d XL |
996 | assert!(i < count); |
997 | stride * i | |
998 | } | |
ba9703b0 | 999 | FieldsShape::Arbitrary { ref offsets, .. } => offsets[i], |
83c7162d XL |
1000 | } |
1001 | } | |
1002 | ||
94222f64 | 1003 | #[inline] |
83c7162d XL |
1004 | pub fn memory_index(&self, i: usize) -> usize { |
1005 | match *self { | |
ba9703b0 XL |
1006 | FieldsShape::Primitive => { |
1007 | unreachable!("FieldsShape::memory_index: `Primitive`s have no fields") | |
1008 | } | |
1009 | FieldsShape::Union(_) | FieldsShape::Array { .. } => i, | |
17df50a5 | 1010 | FieldsShape::Arbitrary { ref memory_index, .. } => memory_index[i].try_into().unwrap(), |
83c7162d XL |
1011 | } |
1012 | } | |
1013 | ||
9fa01778 | 1014 | /// Gets source indices of the fields by increasing offsets. |
83c7162d | 1015 | #[inline] |
dfeec247 | 1016 | pub fn index_by_increasing_offset<'a>(&'a self) -> impl Iterator<Item = usize> + 'a { |
83c7162d XL |
1017 | let mut inverse_small = [0u8; 64]; |
1018 | let mut inverse_big = vec![]; | |
1019 | let use_small = self.count() <= inverse_small.len(); | |
1020 | ||
1021 | // We have to write this logic twice in order to keep the array small. | |
ba9703b0 | 1022 | if let FieldsShape::Arbitrary { ref memory_index, .. } = *self { |
83c7162d XL |
1023 | if use_small { |
1024 | for i in 0..self.count() { | |
1025 | inverse_small[memory_index[i] as usize] = i as u8; | |
1026 | } | |
1027 | } else { | |
1028 | inverse_big = vec![0; self.count()]; | |
1029 | for i in 0..self.count() { | |
1030 | inverse_big[memory_index[i] as usize] = i as u32; | |
1031 | } | |
1032 | } | |
1033 | } | |
1034 | ||
dfeec247 | 1035 | (0..self.count()).map(move |i| match *self { |
ba9703b0 XL |
1036 | FieldsShape::Primitive | FieldsShape::Union(_) | FieldsShape::Array { .. } => i, |
1037 | FieldsShape::Arbitrary { .. } => { | |
dfeec247 XL |
1038 | if use_small { |
1039 | inverse_small[i] as usize | |
1040 | } else { | |
1041 | inverse_big[i] as usize | |
83c7162d XL |
1042 | } |
1043 | } | |
1044 | }) | |
1045 | } | |
1046 | } | |
1047 | ||
3dfed10e XL |
1048 | /// An identifier that specifies the address space that some operation |
1049 | /// should operate on. Special address spaces have an effect on code generation, | |
1050 | /// depending on the target and the address spaces it implements. | |
1051 | #[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] | |
1052 | pub struct AddressSpace(pub u32); | |
1053 | ||
1054 | impl AddressSpace { | |
1055 | /// The default address space, corresponding to data space. | |
1056 | pub const DATA: Self = AddressSpace(0); | |
1057 | } | |
1058 | ||
83c7162d XL |
1059 | /// Describes how values of the type are passed by target ABIs, |
1060 | /// in terms of categories of C types there are ABI rules for. | |
c295e0f8 | 1061 | #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, HashStable_Generic)] |
83c7162d XL |
1062 | pub enum Abi { |
1063 | Uninhabited, | |
1064 | Scalar(Scalar), | |
1065 | ScalarPair(Scalar, Scalar), | |
1066 | Vector { | |
1067 | element: Scalar, | |
dfeec247 | 1068 | count: u64, |
83c7162d XL |
1069 | }, |
1070 | Aggregate { | |
1071 | /// If true, the size is exact, otherwise it's only a lower bound. | |
1072 | sized: bool, | |
dfeec247 | 1073 | }, |
83c7162d XL |
1074 | } |
1075 | ||
1076 | impl Abi { | |
9fa01778 | 1077 | /// Returns `true` if the layout corresponds to an unsized type. |
17df50a5 | 1078 | #[inline] |
83c7162d XL |
1079 | pub fn is_unsized(&self) -> bool { |
1080 | match *self { | |
dfeec247 XL |
1081 | Abi::Uninhabited | Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false, |
1082 | Abi::Aggregate { sized } => !sized, | |
83c7162d XL |
1083 | } |
1084 | } | |
1085 | ||
9fa01778 | 1086 | /// Returns `true` if this is a single signed integer scalar |
94222f64 | 1087 | #[inline] |
83c7162d | 1088 | pub fn is_signed(&self) -> bool { |
c295e0f8 | 1089 | match self { |
04454e1e | 1090 | Abi::Scalar(scal) => match scal.primitive() { |
83c7162d XL |
1091 | Primitive::Int(_, signed) => signed, |
1092 | _ => false, | |
1093 | }, | |
ba9703b0 | 1094 | _ => panic!("`is_signed` on non-scalar ABI {:?}", self), |
83c7162d XL |
1095 | } |
1096 | } | |
0bf4aa26 | 1097 | |
9fa01778 | 1098 | /// Returns `true` if this is an uninhabited type |
17df50a5 | 1099 | #[inline] |
0bf4aa26 | 1100 | pub fn is_uninhabited(&self) -> bool { |
29967ef6 | 1101 | matches!(*self, Abi::Uninhabited) |
0bf4aa26 | 1102 | } |
60c5eb7d XL |
1103 | |
1104 | /// Returns `true` is this is a scalar type | |
17df50a5 | 1105 | #[inline] |
60c5eb7d | 1106 | pub fn is_scalar(&self) -> bool { |
29967ef6 | 1107 | matches!(*self, Abi::Scalar(_)) |
60c5eb7d | 1108 | } |
83c7162d XL |
1109 | } |
1110 | ||
e74abb32 | 1111 | rustc_index::newtype_index! { |
60c5eb7d XL |
1112 | pub struct VariantIdx { |
1113 | derive [HashStable_Generic] | |
1114 | } | |
a1dfa0c6 XL |
1115 | } |
1116 | ||
60c5eb7d | 1117 | #[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)] |
5e7ed085 | 1118 | pub enum Variants<'a> { |
83c7162d | 1119 | /// Single enum variants, structs/tuples, unions, and all non-ADTs. |
dfeec247 | 1120 | Single { index: VariantIdx }, |
83c7162d | 1121 | |
f035d41b XL |
1122 | /// Enum-likes with more than one inhabited variant: each variant comes with |
1123 | /// a *discriminant* (usually the same as the variant index but the user can | |
1124 | /// assign explicit discriminant values). That discriminant is encoded | |
1125 | /// as a *tag* on the machine. The layout of each variant is | |
1126 | /// a struct, and they all have space reserved for the tag. | |
1127 | /// For enums, the tag is the sole field of the layout. | |
532ac7d7 | 1128 | Multiple { |
f035d41b XL |
1129 | tag: Scalar, |
1130 | tag_encoding: TagEncoding, | |
1131 | tag_field: usize, | |
5e7ed085 | 1132 | variants: IndexVec<VariantIdx, Layout<'a>>, |
83c7162d | 1133 | }, |
532ac7d7 XL |
1134 | } |
1135 | ||
60c5eb7d | 1136 | #[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)] |
f035d41b XL |
1137 | pub enum TagEncoding { |
1138 | /// The tag directly stores the discriminant, but possibly with a smaller layout | |
1139 | /// (so converting the tag to the discriminant can require sign extension). | |
1140 | Direct, | |
83c7162d | 1141 | |
532ac7d7 | 1142 | /// Niche (values invalid for a type) encoding the discriminant: |
f035d41b | 1143 | /// Discriminant and variant index coincide. |
f2b60f7d | 1144 | /// The variant `untagged_variant` contains a niche at an arbitrary |
f035d41b | 1145 | /// offset (field `tag_field` of the enum), which for a variant with |
48663c56 XL |
1146 | /// discriminant `d` is set to |
1147 | /// `(d - niche_variants.start).wrapping_add(niche_start)`. | |
83c7162d XL |
1148 | /// |
1149 | /// For example, `Option<(usize, &T)>` is represented such that | |
1150 | /// `None` has a null pointer for the second tuple field, and | |
1151 | /// `Some` is the identity function (with a non-null reference). | |
532ac7d7 | 1152 | Niche { |
f2b60f7d | 1153 | untagged_variant: VariantIdx, |
a1dfa0c6 | 1154 | niche_variants: RangeInclusive<VariantIdx>, |
83c7162d | 1155 | niche_start: u128, |
532ac7d7 | 1156 | }, |
83c7162d XL |
1157 | } |
1158 | ||
c295e0f8 | 1159 | #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, HashStable_Generic)] |
416331ca XL |
1160 | pub struct Niche { |
1161 | pub offset: Size, | |
04454e1e FG |
1162 | pub value: Primitive, |
1163 | pub valid_range: WrappingRange, | |
416331ca XL |
1164 | } |
1165 | ||
1166 | impl Niche { | |
1167 | pub fn from_scalar<C: HasDataLayout>(cx: &C, offset: Size, scalar: Scalar) -> Option<Self> { | |
04454e1e FG |
1168 | let Scalar::Initialized { value, valid_range } = scalar else { return None }; |
1169 | let niche = Niche { offset, value, valid_range }; | |
dfeec247 | 1170 | if niche.available(cx) > 0 { Some(niche) } else { None } |
416331ca XL |
1171 | } |
1172 | ||
1173 | pub fn available<C: HasDataLayout>(&self, cx: &C) -> u128 { | |
04454e1e | 1174 | let Self { value, valid_range: v, .. } = *self; |
c295e0f8 XL |
1175 | let size = value.size(cx); |
1176 | assert!(size.bits() <= 128); | |
1177 | let max_value = size.unsigned_int_max(); | |
416331ca XL |
1178 | |
1179 | // Find out how many values are outside the valid range. | |
94222f64 | 1180 | let niche = v.end.wrapping_add(1)..v.start; |
416331ca XL |
1181 | niche.end.wrapping_sub(niche.start) & max_value |
1182 | } | |
1183 | ||
1184 | pub fn reserve<C: HasDataLayout>(&self, cx: &C, count: u128) -> Option<(u128, Scalar)> { | |
1185 | assert!(count > 0); | |
1186 | ||
04454e1e | 1187 | let Self { value, valid_range: v, .. } = *self; |
c295e0f8 XL |
1188 | let size = value.size(cx); |
1189 | assert!(size.bits() <= 128); | |
1190 | let max_value = size.unsigned_int_max(); | |
416331ca | 1191 | |
c295e0f8 XL |
1192 | let niche = v.end.wrapping_add(1)..v.start; |
1193 | let available = niche.end.wrapping_sub(niche.start) & max_value; | |
1194 | if count > available { | |
416331ca XL |
1195 | return None; |
1196 | } | |
1197 | ||
c295e0f8 XL |
1198 | // Extend the range of valid values being reserved by moving either `v.start` or `v.end` bound. |
1199 | // Given an eventual `Option<T>`, we try to maximize the chance for `None` to occupy the niche of zero. | |
5e7ed085 | 1200 | // This is accomplished by preferring enums with 2 variants(`count==1`) and always taking the shortest path to niche zero. |
c295e0f8 XL |
1201 | // Having `None` in niche zero can enable some special optimizations. |
1202 | // | |
1203 | // Bound selection criteria: | |
1204 | // 1. Select closest to zero given wrapping semantics. | |
1205 | // 2. Avoid moving past zero if possible. | |
1206 | // | |
1207 | // In practice this means that enums with `count > 1` are unlikely to claim niche zero, since they have to fit perfectly. | |
1208 | // If niche zero is already reserved, the selection of bounds are of little interest. | |
1209 | let move_start = |v: WrappingRange| { | |
1210 | let start = v.start.wrapping_sub(count) & max_value; | |
04454e1e | 1211 | Some((start, Scalar::Initialized { value, valid_range: v.with_start(start) })) |
c295e0f8 XL |
1212 | }; |
1213 | let move_end = |v: WrappingRange| { | |
1214 | let start = v.end.wrapping_add(1) & max_value; | |
1215 | let end = v.end.wrapping_add(count) & max_value; | |
04454e1e | 1216 | Some((start, Scalar::Initialized { value, valid_range: v.with_end(end) })) |
c295e0f8 XL |
1217 | }; |
1218 | let distance_end_zero = max_value - v.end; | |
1219 | if v.start > v.end { | |
1220 | // zero is unavailable because wrapping occurs | |
1221 | move_end(v) | |
1222 | } else if v.start <= distance_end_zero { | |
1223 | if count <= v.start { | |
1224 | move_start(v) | |
1225 | } else { | |
1226 | // moved past zero, use other bound | |
1227 | move_end(v) | |
1228 | } | |
1229 | } else { | |
1230 | let end = v.end.wrapping_add(count) & max_value; | |
1231 | let overshot_zero = (1..=v.end).contains(&end); | |
1232 | if overshot_zero { | |
1233 | // moved past zero, use other bound | |
1234 | move_start(v) | |
1235 | } else { | |
1236 | move_end(v) | |
1237 | } | |
416331ca | 1238 | } |
416331ca XL |
1239 | } |
1240 | } | |
1241 | ||
5e7ed085 FG |
1242 | #[derive(PartialEq, Eq, Hash, HashStable_Generic)] |
1243 | pub struct LayoutS<'a> { | |
74b04a01 | 1244 | /// Says where the fields are located within the layout. |
ba9703b0 | 1245 | pub fields: FieldsShape, |
74b04a01 XL |
1246 | |
1247 | /// Encodes information about multi-variant layouts. | |
1248 | /// Even with `Multiple` variants, a layout still has its own fields! Those are then | |
1249 | /// shared between all variants. One of them will be the discriminant, | |
1250 | /// but e.g. generators can have more. | |
1251 | /// | |
1252 | /// To access all fields of this layout, both `fields` and the fields of the active variant | |
1253 | /// must be taken into account. | |
5e7ed085 | 1254 | pub variants: Variants<'a>, |
74b04a01 XL |
1255 | |
1256 | /// The `abi` defines how this data is passed between functions, and it defines | |
1257 | /// value restrictions via `valid_range`. | |
1258 | /// | |
1259 | /// Note that this is entirely orthogonal to the recursive structure defined by | |
1260 | /// `variants` and `fields`; for example, `ManuallyDrop<Result<isize, isize>>` has | |
1261 | /// `Abi::ScalarPair`! So, even with non-`Aggregate` `abi`, `fields` and `variants` | |
1262 | /// have to be taken into account to find all fields of this layout. | |
83c7162d | 1263 | pub abi: Abi, |
416331ca XL |
1264 | |
1265 | /// The leaf scalar with the largest number of invalid values | |
1266 | /// (i.e. outside of its `valid_range`), if it exists. | |
1267 | pub largest_niche: Option<Niche>, | |
1268 | ||
a1dfa0c6 | 1269 | pub align: AbiAndPrefAlign, |
dfeec247 | 1270 | pub size: Size, |
83c7162d XL |
1271 | } |
1272 | ||
5e7ed085 | 1273 | impl<'a> LayoutS<'a> { |
a1dfa0c6 | 1274 | pub fn scalar<C: HasDataLayout>(cx: &C, scalar: Scalar) -> Self { |
c295e0f8 | 1275 | let largest_niche = Niche::from_scalar(cx, Size::ZERO, scalar); |
04454e1e FG |
1276 | let size = scalar.size(cx); |
1277 | let align = scalar.align(cx); | |
5e7ed085 | 1278 | LayoutS { |
a1dfa0c6 | 1279 | variants: Variants::Single { index: VariantIdx::new(0) }, |
ba9703b0 | 1280 | fields: FieldsShape::Primitive, |
83c7162d | 1281 | abi: Abi::Scalar(scalar), |
416331ca | 1282 | largest_niche, |
83c7162d XL |
1283 | size, |
1284 | align, | |
1285 | } | |
1286 | } | |
1287 | } | |
1288 | ||
5e7ed085 FG |
1289 | impl<'a> fmt::Debug for LayoutS<'a> { |
1290 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { | |
1291 | // This is how `Layout` used to print before it become | |
1292 | // `Interned<LayoutS>`. We print it like this to avoid having to update | |
1293 | // expected output in a lot of tests. | |
064997fb | 1294 | let LayoutS { size, align, abi, fields, largest_niche, variants } = self; |
5e7ed085 | 1295 | f.debug_struct("Layout") |
064997fb FG |
1296 | .field("size", size) |
1297 | .field("align", align) | |
1298 | .field("abi", abi) | |
1299 | .field("fields", fields) | |
1300 | .field("largest_niche", largest_niche) | |
1301 | .field("variants", variants) | |
5e7ed085 FG |
1302 | .finish() |
1303 | } | |
1304 | } | |
1305 | ||
1306 | #[derive(Copy, Clone, PartialEq, Eq, Hash, HashStable_Generic)] | |
04454e1e | 1307 | #[rustc_pass_by_value] |
5e7ed085 FG |
1308 | pub struct Layout<'a>(pub Interned<'a, LayoutS<'a>>); |
1309 | ||
1310 | impl<'a> fmt::Debug for Layout<'a> { | |
1311 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { | |
1312 | // See comment on `<LayoutS as Debug>::fmt` above. | |
1313 | self.0.0.fmt(f) | |
1314 | } | |
1315 | } | |
1316 | ||
1317 | impl<'a> Layout<'a> { | |
1318 | pub fn fields(self) -> &'a FieldsShape { | |
1319 | &self.0.0.fields | |
1320 | } | |
1321 | ||
1322 | pub fn variants(self) -> &'a Variants<'a> { | |
1323 | &self.0.0.variants | |
1324 | } | |
1325 | ||
1326 | pub fn abi(self) -> Abi { | |
1327 | self.0.0.abi | |
1328 | } | |
1329 | ||
1330 | pub fn largest_niche(self) -> Option<Niche> { | |
1331 | self.0.0.largest_niche | |
1332 | } | |
1333 | ||
1334 | pub fn align(self) -> AbiAndPrefAlign { | |
1335 | self.0.0.align | |
1336 | } | |
1337 | ||
1338 | pub fn size(self) -> Size { | |
1339 | self.0.0.size | |
1340 | } | |
1341 | } | |
1342 | ||
ba9703b0 | 1343 | /// The layout of a type, alongside the type itself. |
0731742a | 1344 | /// Provides various type traversal APIs (e.g., recursing into fields). |
83c7162d | 1345 | /// |
ba9703b0 XL |
1346 | /// Note that the layout is NOT guaranteed to always be identical |
1347 | /// to that obtained from `layout_of(ty)`, as we need to produce | |
83c7162d | 1348 | /// layouts for which Rust types do not exist, such as enum variants |
0731742a | 1349 | /// or synthetic fields of enums (i.e., discriminants) and fat pointers. |
94222f64 | 1350 | #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, HashStable_Generic)] |
ba9703b0 | 1351 | pub struct TyAndLayout<'a, Ty> { |
83c7162d | 1352 | pub ty: Ty, |
5e7ed085 | 1353 | pub layout: Layout<'a>, |
83c7162d XL |
1354 | } |
1355 | ||
ba9703b0 | 1356 | impl<'a, Ty> Deref for TyAndLayout<'a, Ty> { |
5e7ed085 FG |
1357 | type Target = &'a LayoutS<'a>; |
1358 | fn deref(&self) -> &&'a LayoutS<'a> { | |
1359 | &self.layout.0.0 | |
83c7162d XL |
1360 | } |
1361 | } | |
1362 | ||
3dfed10e | 1363 | #[derive(Copy, Clone, PartialEq, Eq, Debug)] |
48663c56 XL |
1364 | pub enum PointerKind { |
1365 | /// Most general case, we know no restrictions to tell LLVM. | |
064997fb | 1366 | SharedMutable, |
48663c56 | 1367 | |
064997fb | 1368 | /// `&T` where `T` contains no `UnsafeCell`, is `dereferenceable`, `noalias` and `readonly`. |
48663c56 XL |
1369 | Frozen, |
1370 | ||
064997fb | 1371 | /// `&mut T` which is `dereferenceable` and `noalias` but not `readonly`. |
48663c56 XL |
1372 | UniqueBorrowed, |
1373 | ||
064997fb FG |
1374 | /// `&mut !Unpin`, which is `dereferenceable` but neither `noalias` nor `readonly`. |
1375 | UniqueBorrowedPinned, | |
1376 | ||
1377 | /// `Box<T>`, which is `noalias` (even on return types, unlike the above) but neither `readonly` | |
1378 | /// nor `dereferenceable`. | |
dfeec247 | 1379 | UniqueOwned, |
48663c56 XL |
1380 | } |
1381 | ||
3dfed10e | 1382 | #[derive(Copy, Clone, Debug)] |
48663c56 XL |
1383 | pub struct PointeeInfo { |
1384 | pub size: Size, | |
1385 | pub align: Align, | |
1386 | pub safe: Option<PointerKind>, | |
3dfed10e | 1387 | pub address_space: AddressSpace, |
48663c56 XL |
1388 | } |
1389 | ||
923072b8 FG |
1390 | /// Used in `might_permit_raw_init` to indicate the kind of initialisation |
1391 | /// that is checked to be valid | |
064997fb | 1392 | #[derive(Copy, Clone, Debug, PartialEq, Eq)] |
923072b8 FG |
1393 | pub enum InitKind { |
1394 | Zero, | |
1395 | Uninit, | |
1396 | } | |
1397 | ||
94222f64 XL |
1398 | /// Trait that needs to be implemented by the higher-level type representation |
1399 | /// (e.g. `rustc_middle::ty::Ty`), to provide `rustc_target::abi` functionality. | |
1400 | pub trait TyAbiInterface<'a, C>: Sized { | |
1401 | fn ty_and_layout_for_variant( | |
ba9703b0 | 1402 | this: TyAndLayout<'a, Self>, |
a1dfa0c6 XL |
1403 | cx: &C, |
1404 | variant_index: VariantIdx, | |
ba9703b0 | 1405 | ) -> TyAndLayout<'a, Self>; |
94222f64 XL |
1406 | fn ty_and_layout_field(this: TyAndLayout<'a, Self>, cx: &C, i: usize) -> TyAndLayout<'a, Self>; |
1407 | fn ty_and_layout_pointee_info_at( | |
1408 | this: TyAndLayout<'a, Self>, | |
1409 | cx: &C, | |
1410 | offset: Size, | |
1411 | ) -> Option<PointeeInfo>; | |
04454e1e FG |
1412 | fn is_adt(this: TyAndLayout<'a, Self>) -> bool; |
1413 | fn is_never(this: TyAndLayout<'a, Self>) -> bool; | |
1414 | fn is_tuple(this: TyAndLayout<'a, Self>) -> bool; | |
1415 | fn is_unit(this: TyAndLayout<'a, Self>) -> bool; | |
83c7162d XL |
1416 | } |
1417 | ||
ba9703b0 | 1418 | impl<'a, Ty> TyAndLayout<'a, Ty> { |
a1dfa0c6 | 1419 | pub fn for_variant<C>(self, cx: &C, variant_index: VariantIdx) -> Self |
dfeec247 | 1420 | where |
94222f64 | 1421 | Ty: TyAbiInterface<'a, C>, |
dfeec247 | 1422 | { |
94222f64 | 1423 | Ty::ty_and_layout_for_variant(self, cx, variant_index) |
83c7162d | 1424 | } |
ba9703b0 | 1425 | |
94222f64 | 1426 | pub fn field<C>(self, cx: &C, i: usize) -> Self |
dfeec247 | 1427 | where |
94222f64 | 1428 | Ty: TyAbiInterface<'a, C>, |
dfeec247 | 1429 | { |
94222f64 | 1430 | Ty::ty_and_layout_field(self, cx, i) |
83c7162d | 1431 | } |
ba9703b0 | 1432 | |
48663c56 | 1433 | pub fn pointee_info_at<C>(self, cx: &C, offset: Size) -> Option<PointeeInfo> |
dfeec247 | 1434 | where |
94222f64 | 1435 | Ty: TyAbiInterface<'a, C>, |
dfeec247 | 1436 | { |
94222f64 | 1437 | Ty::ty_and_layout_pointee_info_at(self, cx, offset) |
48663c56 | 1438 | } |
5099ac24 FG |
1439 | |
1440 | pub fn is_single_fp_element<C>(self, cx: &C) -> bool | |
1441 | where | |
1442 | Ty: TyAbiInterface<'a, C>, | |
1443 | C: HasDataLayout, | |
1444 | { | |
1445 | match self.abi { | |
04454e1e | 1446 | Abi::Scalar(scalar) => scalar.primitive().is_float(), |
5099ac24 FG |
1447 | Abi::Aggregate { .. } => { |
1448 | if self.fields.count() == 1 && self.fields.offset(0).bytes() == 0 { | |
1449 | self.field(cx, 0).is_single_fp_element(cx) | |
1450 | } else { | |
1451 | false | |
1452 | } | |
1453 | } | |
1454 | _ => false, | |
1455 | } | |
1456 | } | |
04454e1e FG |
1457 | |
1458 | pub fn is_adt<C>(self) -> bool | |
1459 | where | |
1460 | Ty: TyAbiInterface<'a, C>, | |
1461 | { | |
1462 | Ty::is_adt(self) | |
1463 | } | |
1464 | ||
1465 | pub fn is_never<C>(self) -> bool | |
1466 | where | |
1467 | Ty: TyAbiInterface<'a, C>, | |
1468 | { | |
1469 | Ty::is_never(self) | |
1470 | } | |
1471 | ||
1472 | pub fn is_tuple<C>(self) -> bool | |
1473 | where | |
1474 | Ty: TyAbiInterface<'a, C>, | |
1475 | { | |
1476 | Ty::is_tuple(self) | |
1477 | } | |
1478 | ||
1479 | pub fn is_unit<C>(self) -> bool | |
1480 | where | |
1481 | Ty: TyAbiInterface<'a, C>, | |
1482 | { | |
1483 | Ty::is_unit(self) | |
1484 | } | |
83c7162d XL |
1485 | } |
1486 | ||
ba9703b0 | 1487 | impl<'a, Ty> TyAndLayout<'a, Ty> { |
9fa01778 | 1488 | /// Returns `true` if the layout corresponds to an unsized type. |
83c7162d XL |
1489 | pub fn is_unsized(&self) -> bool { |
1490 | self.abi.is_unsized() | |
1491 | } | |
1492 | ||
9fa01778 | 1493 | /// Returns `true` if the type is a ZST and not unsized. |
83c7162d XL |
1494 | pub fn is_zst(&self) -> bool { |
1495 | match self.abi { | |
dfeec247 | 1496 | Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false, |
83c7162d | 1497 | Abi::Uninhabited => self.size.bytes() == 0, |
dfeec247 | 1498 | Abi::Aggregate { sized } => sized && self.size.bytes() == 0, |
83c7162d XL |
1499 | } |
1500 | } | |
ba9703b0 XL |
1501 | |
1502 | /// Determines if this type permits "raw" initialization by just transmuting some | |
1503 | /// memory into an instance of `T`. | |
923072b8 FG |
1504 | /// |
1505 | /// `init_kind` indicates if the memory is zero-initialized or left uninitialized. | |
1506 | /// | |
064997fb FG |
1507 | /// This code is intentionally conservative, and will not detect |
1508 | /// * zero init of an enum whose 0 variant does not allow zero initialization | |
1509 | /// * making uninitialized types who have a full valid range (ints, floats, raw pointers) | |
1510 | /// * Any form of invalid value being made inside an array (unless the value is uninhabited) | |
923072b8 | 1511 | /// |
064997fb FG |
1512 | /// A strict form of these checks that uses const evaluation exists in |
1513 | /// `rustc_const_eval::might_permit_raw_init`, and a tracking issue for making these checks | |
1514 | /// stricter is <https://github.com/rust-lang/rust/issues/66151>. | |
ba9703b0 | 1515 | /// |
064997fb FG |
1516 | /// FIXME: Once all the conservatism is removed from here, and the checks are ran by default, |
1517 | /// we can use the const evaluation checks always instead. | |
1518 | pub fn might_permit_raw_init<C>(self, cx: &C, init_kind: InitKind) -> bool | |
ba9703b0 XL |
1519 | where |
1520 | Self: Copy, | |
94222f64 XL |
1521 | Ty: TyAbiInterface<'a, C>, |
1522 | C: HasDataLayout, | |
ba9703b0 | 1523 | { |
c295e0f8 | 1524 | let scalar_allows_raw_init = move |s: Scalar| -> bool { |
923072b8 FG |
1525 | match init_kind { |
1526 | InitKind::Zero => { | |
1527 | // The range must contain 0. | |
1528 | s.valid_range(cx).contains(0) | |
1529 | } | |
1530 | InitKind::Uninit => { | |
064997fb FG |
1531 | // The range must include all values. |
1532 | s.is_always_valid(cx) | |
923072b8 | 1533 | } |
ba9703b0 XL |
1534 | } |
1535 | }; | |
1536 | ||
1537 | // Check the ABI. | |
c295e0f8 | 1538 | let valid = match self.abi { |
ba9703b0 XL |
1539 | Abi::Uninhabited => false, // definitely UB |
1540 | Abi::Scalar(s) => scalar_allows_raw_init(s), | |
1541 | Abi::ScalarPair(s1, s2) => scalar_allows_raw_init(s1) && scalar_allows_raw_init(s2), | |
c295e0f8 | 1542 | Abi::Vector { element: s, count } => count == 0 || scalar_allows_raw_init(s), |
1b1a35ee | 1543 | Abi::Aggregate { .. } => true, // Fields are checked below. |
ba9703b0 XL |
1544 | }; |
1545 | if !valid { | |
1546 | // This is definitely not okay. | |
94222f64 | 1547 | return false; |
ba9703b0 XL |
1548 | } |
1549 | ||
1b1a35ee XL |
1550 | // If we have not found an error yet, we need to recursively descend into fields. |
1551 | match &self.fields { | |
1552 | FieldsShape::Primitive | FieldsShape::Union { .. } => {} | |
064997fb | 1553 | FieldsShape::Array { .. } => { |
923072b8 | 1554 | // FIXME(#66151): For now, we are conservative and do not check arrays by default. |
1b1a35ee XL |
1555 | } |
1556 | FieldsShape::Arbitrary { offsets, .. } => { | |
1557 | for idx in 0..offsets.len() { | |
064997fb | 1558 | if !self.field(cx, idx).might_permit_raw_init(cx, init_kind) { |
1b1a35ee | 1559 | // We found a field that is unhappy with this kind of initialization. |
94222f64 | 1560 | return false; |
1b1a35ee XL |
1561 | } |
1562 | } | |
1563 | } | |
1564 | } | |
1565 | ||
1566 | // FIXME(#66151): For now, we are conservative and do not check `self.variants`. | |
94222f64 | 1567 | true |
ba9703b0 | 1568 | } |
83c7162d | 1569 | } |