]> git.proxmox.com Git - rustc.git/blame - src/librustc_target/abi/mod.rs
New upstream version 1.44.1+dfsg1
[rustc.git] / src / librustc_target / abi / mod.rs
CommitLineData
9fa01778
XL
1pub use Integer::*;
2pub use Primitive::*;
83c7162d 3
9fa01778 4use crate::spec::Target;
83c7162d 5
ba9703b0
XL
6use std::convert::{TryFrom, TryInto};
7use std::num::NonZeroUsize;
dfeec247 8use std::ops::{Add, AddAssign, Deref, Mul, Range, RangeInclusive, Sub};
83c7162d 9
e74abb32 10use rustc_index::vec::{Idx, IndexVec};
60c5eb7d 11use rustc_macros::HashStable_Generic;
dfeec247 12use rustc_span::Span;
a1dfa0c6 13
83c7162d
XL
14pub mod call;
15
16/// Parsed [Data layout](http://llvm.org/docs/LangRef.html#data-layout)
17/// for a target, which contains everything needed to compute layouts.
18pub struct TargetDataLayout {
19 pub endian: Endian,
a1dfa0c6
XL
20 pub i1_align: AbiAndPrefAlign,
21 pub i8_align: AbiAndPrefAlign,
22 pub i16_align: AbiAndPrefAlign,
23 pub i32_align: AbiAndPrefAlign,
24 pub i64_align: AbiAndPrefAlign,
25 pub i128_align: AbiAndPrefAlign,
26 pub f32_align: AbiAndPrefAlign,
27 pub f64_align: AbiAndPrefAlign,
83c7162d 28 pub pointer_size: Size,
a1dfa0c6
XL
29 pub pointer_align: AbiAndPrefAlign,
30 pub aggregate_align: AbiAndPrefAlign,
83c7162d
XL
31
32 /// Alignments for vector types.
a1dfa0c6
XL
33 pub vector_align: Vec<(Size, AbiAndPrefAlign)>,
34
35 pub instruction_address_space: u32,
83c7162d
XL
36}
37
38impl Default for TargetDataLayout {
39 /// Creates an instance of `TargetDataLayout`.
40 fn default() -> TargetDataLayout {
a1dfa0c6 41 let align = |bits| Align::from_bits(bits).unwrap();
83c7162d
XL
42 TargetDataLayout {
43 endian: Endian::Big,
a1dfa0c6
XL
44 i1_align: AbiAndPrefAlign::new(align(8)),
45 i8_align: AbiAndPrefAlign::new(align(8)),
46 i16_align: AbiAndPrefAlign::new(align(16)),
47 i32_align: AbiAndPrefAlign::new(align(32)),
48 i64_align: AbiAndPrefAlign { abi: align(32), pref: align(64) },
49 i128_align: AbiAndPrefAlign { abi: align(32), pref: align(64) },
50 f32_align: AbiAndPrefAlign::new(align(32)),
51 f64_align: AbiAndPrefAlign::new(align(64)),
83c7162d 52 pointer_size: Size::from_bits(64),
a1dfa0c6
XL
53 pointer_align: AbiAndPrefAlign::new(align(64)),
54 aggregate_align: AbiAndPrefAlign { abi: align(0), pref: align(64) },
83c7162d 55 vector_align: vec![
a1dfa0c6
XL
56 (Size::from_bits(64), AbiAndPrefAlign::new(align(64))),
57 (Size::from_bits(128), AbiAndPrefAlign::new(align(128))),
58 ],
59 instruction_address_space: 0,
83c7162d
XL
60 }
61 }
62}
63
64impl TargetDataLayout {
65 pub fn parse(target: &Target) -> Result<TargetDataLayout, String> {
a1dfa0c6
XL
66 // Parse an address space index from a string.
67 let parse_address_space = |s: &str, cause: &str| {
68 s.parse::<u32>().map_err(|err| {
dfeec247 69 format!("invalid address space `{}` for `{}` in \"data-layout\": {}", s, cause, err)
a1dfa0c6
XL
70 })
71 };
72
83c7162d
XL
73 // Parse a bit count from a string.
74 let parse_bits = |s: &str, kind: &str, cause: &str| {
75 s.parse::<u64>().map_err(|err| {
dfeec247 76 format!("invalid {} `{}` for `{}` in \"data-layout\": {}", kind, s, cause, err)
83c7162d
XL
77 })
78 };
79
80 // Parse a size string.
dfeec247 81 let size = |s: &str, cause: &str| parse_bits(s, "size", cause).map(Size::from_bits);
83c7162d
XL
82
83 // Parse an alignment string.
84 let align = |s: &[&str], cause: &str| {
85 if s.is_empty() {
86 return Err(format!("missing alignment for `{}` in \"data-layout\"", cause));
87 }
a1dfa0c6
XL
88 let align_from_bits = |bits| {
89 Align::from_bits(bits).map_err(|err| {
dfeec247 90 format!("invalid alignment for `{}` in \"data-layout\": {}", cause, err)
a1dfa0c6
XL
91 })
92 };
83c7162d
XL
93 let abi = parse_bits(s[0], "alignment", cause)?;
94 let pref = s.get(1).map_or(Ok(abi), |pref| parse_bits(pref, "alignment", cause))?;
dfeec247 95 Ok(AbiAndPrefAlign { abi: align_from_bits(abi)?, pref: align_from_bits(pref)? })
83c7162d
XL
96 };
97
98 let mut dl = TargetDataLayout::default();
99 let mut i128_align_src = 64;
8faf50e0 100 for spec in target.data_layout.split('-') {
416331ca
XL
101 let spec_parts = spec.split(':').collect::<Vec<_>>();
102
103 match &*spec_parts {
b7449926
XL
104 ["e"] => dl.endian = Endian::Little,
105 ["E"] => dl.endian = Endian::Big,
74b04a01 106 [p] if p.starts_with('P') => {
a1dfa0c6
XL
107 dl.instruction_address_space = parse_address_space(&p[1..], "P")?
108 }
dfeec247
XL
109 ["a", ref a @ ..] => dl.aggregate_align = align(a, "a")?,
110 ["f32", ref a @ ..] => dl.f32_align = align(a, "f32")?,
111 ["f64", ref a @ ..] => dl.f64_align = align(a, "f64")?,
e1599b0c 112 [p @ "p", s, ref a @ ..] | [p @ "p0", s, ref a @ ..] => {
83c7162d
XL
113 dl.pointer_size = size(s, p)?;
114 dl.pointer_align = align(a, p)?;
115 }
74b04a01 116 [s, ref a @ ..] if s.starts_with('i') => {
83c7162d
XL
117 let bits = match s[1..].parse::<u64>() {
118 Ok(bits) => bits,
119 Err(_) => {
120 size(&s[1..], "i")?; // For the user error.
121 continue;
122 }
123 };
124 let a = align(a, s)?;
125 match bits {
126 1 => dl.i1_align = a,
127 8 => dl.i8_align = a,
128 16 => dl.i16_align = a,
129 32 => dl.i32_align = a,
130 64 => dl.i64_align = a,
131 _ => {}
132 }
133 if bits >= i128_align_src && bits <= 128 {
134 // Default alignment for i128 is decided by taking the alignment of
dc9dc135 135 // largest-sized i{64..=128}.
83c7162d
XL
136 i128_align_src = bits;
137 dl.i128_align = a;
138 }
139 }
74b04a01 140 [s, ref a @ ..] if s.starts_with('v') => {
83c7162d
XL
141 let v_size = size(&s[1..], "v")?;
142 let a = align(a, s)?;
143 if let Some(v) = dl.vector_align.iter_mut().find(|v| v.0 == v_size) {
144 v.1 = a;
145 continue;
146 }
147 // No existing entry, add a new one.
148 dl.vector_align.push((v_size, a));
149 }
150 _ => {} // Ignore everything else.
151 }
152 }
153
154 // Perform consistency checks against the Target information.
155 let endian_str = match dl.endian {
156 Endian::Little => "little",
dfeec247 157 Endian::Big => "big",
83c7162d
XL
158 };
159 if endian_str != target.target_endian {
dfeec247
XL
160 return Err(format!(
161 "inconsistent target specification: \"data-layout\" claims \
83c7162d 162 architecture is {}-endian, while \"target-endian\" is `{}`",
dfeec247
XL
163 endian_str, target.target_endian
164 ));
83c7162d
XL
165 }
166
167 if dl.pointer_size.bits().to_string() != target.target_pointer_width {
dfeec247
XL
168 return Err(format!(
169 "inconsistent target specification: \"data-layout\" claims \
83c7162d 170 pointers are {}-bit, while \"target-pointer-width\" is `{}`",
dfeec247
XL
171 dl.pointer_size.bits(),
172 target.target_pointer_width
173 ));
83c7162d
XL
174 }
175
176 Ok(dl)
177 }
178
9fa01778 179 /// Returns exclusive upper bound on object size.
83c7162d
XL
180 ///
181 /// The theoretical maximum object size is defined as the maximum positive `isize` value.
182 /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly
183 /// index every address within an object along with one byte past the end, along with allowing
184 /// `isize` to store the difference between any two pointers into an object.
185 ///
186 /// The upper bound on 64-bit currently needs to be lower because LLVM uses a 64-bit integer
187 /// to represent object size in bits. It would need to be 1 << 61 to account for this, but is
188 /// currently conservatively bounded to 1 << 47 as that is enough to cover the current usable
189 /// address space on 64-bit ARMv8 and x86_64.
190 pub fn obj_size_bound(&self) -> u64 {
191 match self.pointer_size.bits() {
192 16 => 1 << 15,
193 32 => 1 << 31,
194 64 => 1 << 47,
dfeec247 195 bits => panic!("obj_size_bound: unknown pointer bit size {}", bits),
83c7162d
XL
196 }
197 }
198
199 pub fn ptr_sized_integer(&self) -> Integer {
200 match self.pointer_size.bits() {
201 16 => I16,
202 32 => I32,
203 64 => I64,
dfeec247 204 bits => panic!("ptr_sized_integer: unknown pointer bit size {}", bits),
83c7162d
XL
205 }
206 }
207
a1dfa0c6 208 pub fn vector_align(&self, vec_size: Size) -> AbiAndPrefAlign {
83c7162d
XL
209 for &(size, align) in &self.vector_align {
210 if size == vec_size {
211 return align;
212 }
213 }
214 // Default to natural alignment, which is what LLVM does.
215 // That is, use the size, rounded up to a power of 2.
a1dfa0c6 216 AbiAndPrefAlign::new(Align::from_bytes(vec_size.bytes().next_power_of_two()).unwrap())
83c7162d
XL
217 }
218}
219
a1dfa0c6 220pub trait HasDataLayout {
83c7162d
XL
221 fn data_layout(&self) -> &TargetDataLayout;
222}
223
a1dfa0c6 224impl HasDataLayout for TargetDataLayout {
83c7162d
XL
225 fn data_layout(&self) -> &TargetDataLayout {
226 self
227 }
228}
229
230/// Endianness of the target, which must match cfg(target-endian).
a1dfa0c6 231#[derive(Copy, Clone, PartialEq)]
83c7162d
XL
232pub enum Endian {
233 Little,
dfeec247 234 Big,
83c7162d
XL
235}
236
237/// Size of a type in bytes.
94b46f34 238#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)]
60c5eb7d 239#[derive(HashStable_Generic)]
83c7162d 240pub struct Size {
dfeec247 241 raw: u64,
83c7162d
XL
242}
243
244impl Size {
ba9703b0 245 pub const ZERO: Size = Size { raw: 0 };
94b46f34 246
8faf50e0 247 #[inline]
ba9703b0
XL
248 pub fn from_bits(bits: impl TryInto<u64>) -> Size {
249 let bits = bits.try_into().ok().unwrap();
83c7162d
XL
250 // Avoid potential overflow from `bits + 7`.
251 Size::from_bytes(bits / 8 + ((bits % 8) + 7) / 8)
252 }
253
8faf50e0 254 #[inline]
ba9703b0
XL
255 pub fn from_bytes(bytes: impl TryInto<u64>) -> Size {
256 Size { raw: bytes.try_into().ok().unwrap() }
83c7162d
XL
257 }
258
8faf50e0 259 #[inline]
83c7162d
XL
260 pub fn bytes(self) -> u64 {
261 self.raw
262 }
263
ba9703b0
XL
264 #[inline]
265 pub fn bytes_usize(self) -> usize {
266 self.bytes().try_into().unwrap()
267 }
268
8faf50e0 269 #[inline]
83c7162d 270 pub fn bits(self) -> u64 {
94b46f34
XL
271 self.bytes().checked_mul(8).unwrap_or_else(|| {
272 panic!("Size::bits: {} bytes in bits doesn't fit in u64", self.bytes())
273 })
83c7162d
XL
274 }
275
ba9703b0
XL
276 #[inline]
277 pub fn bits_usize(self) -> usize {
278 self.bits().try_into().unwrap()
279 }
280
8faf50e0 281 #[inline]
a1dfa0c6
XL
282 pub fn align_to(self, align: Align) -> Size {
283 let mask = align.bytes() - 1;
83c7162d
XL
284 Size::from_bytes((self.bytes() + mask) & !mask)
285 }
286
8faf50e0 287 #[inline]
a1dfa0c6
XL
288 pub fn is_aligned(self, align: Align) -> bool {
289 let mask = align.bytes() - 1;
83c7162d
XL
290 self.bytes() & mask == 0
291 }
292
8faf50e0 293 #[inline]
a1dfa0c6 294 pub fn checked_add<C: HasDataLayout>(self, offset: Size, cx: &C) -> Option<Size> {
83c7162d
XL
295 let dl = cx.data_layout();
296
94b46f34 297 let bytes = self.bytes().checked_add(offset.bytes())?;
83c7162d 298
dfeec247 299 if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
83c7162d
XL
300 }
301
8faf50e0 302 #[inline]
a1dfa0c6 303 pub fn checked_mul<C: HasDataLayout>(self, count: u64, cx: &C) -> Option<Size> {
83c7162d
XL
304 let dl = cx.data_layout();
305
94b46f34 306 let bytes = self.bytes().checked_mul(count)?;
dfeec247 307 if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
83c7162d
XL
308 }
309}
310
311// Panicking addition, subtraction and multiplication for convenience.
312// Avoid during layout computation, return `LayoutError` instead.
313
314impl Add for Size {
315 type Output = Size;
8faf50e0 316 #[inline]
83c7162d 317 fn add(self, other: Size) -> Size {
94b46f34
XL
318 Size::from_bytes(self.bytes().checked_add(other.bytes()).unwrap_or_else(|| {
319 panic!("Size::add: {} + {} doesn't fit in u64", self.bytes(), other.bytes())
320 }))
83c7162d
XL
321 }
322}
323
324impl Sub for Size {
325 type Output = Size;
8faf50e0 326 #[inline]
83c7162d 327 fn sub(self, other: Size) -> Size {
94b46f34
XL
328 Size::from_bytes(self.bytes().checked_sub(other.bytes()).unwrap_or_else(|| {
329 panic!("Size::sub: {} - {} would result in negative size", self.bytes(), other.bytes())
330 }))
331 }
332}
333
334impl Mul<Size> for u64 {
335 type Output = Size;
8faf50e0 336 #[inline]
94b46f34
XL
337 fn mul(self, size: Size) -> Size {
338 size * self
83c7162d
XL
339 }
340}
341
342impl Mul<u64> for Size {
343 type Output = Size;
8faf50e0 344 #[inline]
83c7162d
XL
345 fn mul(self, count: u64) -> Size {
346 match self.bytes().checked_mul(count) {
347 Some(bytes) => Size::from_bytes(bytes),
dfeec247 348 None => panic!("Size::mul: {} * {} doesn't fit in u64", self.bytes(), count),
83c7162d
XL
349 }
350 }
351}
352
353impl AddAssign for Size {
8faf50e0 354 #[inline]
83c7162d
XL
355 fn add_assign(&mut self, other: Size) {
356 *self = *self + other;
357 }
358}
359
a1dfa0c6
XL
360/// Alignment of a type in bytes (always a power of two).
361#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)]
60c5eb7d 362#[derive(HashStable_Generic)]
83c7162d 363pub struct Align {
a1dfa0c6 364 pow2: u8,
83c7162d
XL
365}
366
367impl Align {
a1dfa0c6
XL
368 pub fn from_bits(bits: u64) -> Result<Align, String> {
369 Align::from_bytes(Size::from_bits(bits).bytes())
83c7162d
XL
370 }
371
a1dfa0c6
XL
372 pub fn from_bytes(align: u64) -> Result<Align, String> {
373 // Treat an alignment of 0 bytes like 1-byte alignment.
374 if align == 0 {
375 return Ok(Align { pow2: 0 });
376 }
83c7162d 377
a1dfa0c6
XL
378 let mut bytes = align;
379 let mut pow2: u8 = 0;
380 while (bytes & 1) == 0 {
381 pow2 += 1;
382 bytes >>= 1;
383 }
384 if bytes != 1 {
385 return Err(format!("`{}` is not a power of 2", align));
386 }
387 if pow2 > 29 {
388 return Err(format!("`{}` is too large", align));
389 }
83c7162d 390
a1dfa0c6 391 Ok(Align { pow2 })
83c7162d
XL
392 }
393
a1dfa0c6
XL
394 pub fn bytes(self) -> u64 {
395 1 << self.pow2
83c7162d
XL
396 }
397
a1dfa0c6
XL
398 pub fn bits(self) -> u64 {
399 self.bytes() * 8
83c7162d 400 }
b7449926 401
9fa01778 402 /// Computes the best alignment possible for the given offset
b7449926
XL
403 /// (the largest power of two that the offset is a multiple of).
404 ///
0731742a 405 /// N.B., for an offset of `0`, this happens to return `2^64`.
b7449926 406 pub fn max_for_offset(offset: Size) -> Align {
dfeec247 407 Align { pow2: offset.bytes().trailing_zeros() as u8 }
b7449926
XL
408 }
409
410 /// Lower the alignment, if necessary, such that the given offset
0bf4aa26 411 /// is aligned to it (the offset is a multiple of the alignment).
b7449926
XL
412 pub fn restrict_for_offset(self, offset: Size) -> Align {
413 self.min(Align::max_for_offset(offset))
414 }
83c7162d
XL
415}
416
74b04a01 417/// A pair of alignments, ABI-mandated and preferred.
a1dfa0c6 418#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)]
60c5eb7d 419#[derive(HashStable_Generic)]
a1dfa0c6
XL
420pub struct AbiAndPrefAlign {
421 pub abi: Align,
422 pub pref: Align,
423}
424
425impl AbiAndPrefAlign {
426 pub fn new(align: Align) -> AbiAndPrefAlign {
dfeec247 427 AbiAndPrefAlign { abi: align, pref: align }
a1dfa0c6
XL
428 }
429
430 pub fn min(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign {
dfeec247 431 AbiAndPrefAlign { abi: self.abi.min(other.abi), pref: self.pref.min(other.pref) }
a1dfa0c6
XL
432 }
433
434 pub fn max(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign {
dfeec247 435 AbiAndPrefAlign { abi: self.abi.max(other.abi), pref: self.pref.max(other.pref) }
a1dfa0c6
XL
436 }
437}
438
83c7162d 439/// Integers, also used for enum discriminants.
60c5eb7d 440#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, HashStable_Generic)]
83c7162d
XL
441pub enum Integer {
442 I8,
443 I16,
444 I32,
445 I64,
446 I128,
447}
448
449impl Integer {
b7449926
XL
450 pub fn size(self) -> Size {
451 match self {
83c7162d
XL
452 I8 => Size::from_bytes(1),
453 I16 => Size::from_bytes(2),
454 I32 => Size::from_bytes(4),
dfeec247
XL
455 I64 => Size::from_bytes(8),
456 I128 => Size::from_bytes(16),
83c7162d
XL
457 }
458 }
459
a1dfa0c6 460 pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAndPrefAlign {
83c7162d
XL
461 let dl = cx.data_layout();
462
b7449926 463 match self {
83c7162d
XL
464 I8 => dl.i8_align,
465 I16 => dl.i16_align,
466 I32 => dl.i32_align,
467 I64 => dl.i64_align,
468 I128 => dl.i128_align,
469 }
470 }
471
9fa01778 472 /// Finds the smallest Integer type which can represent the signed value.
83c7162d
XL
473 pub fn fit_signed(x: i128) -> Integer {
474 match x {
8faf50e0
XL
475 -0x0000_0000_0000_0080..=0x0000_0000_0000_007f => I8,
476 -0x0000_0000_0000_8000..=0x0000_0000_0000_7fff => I16,
477 -0x0000_0000_8000_0000..=0x0000_0000_7fff_ffff => I32,
478 -0x8000_0000_0000_0000..=0x7fff_ffff_ffff_ffff => I64,
dfeec247 479 _ => I128,
83c7162d
XL
480 }
481 }
482
9fa01778 483 /// Finds the smallest Integer type which can represent the unsigned value.
83c7162d
XL
484 pub fn fit_unsigned(x: u128) -> Integer {
485 match x {
8faf50e0
XL
486 0..=0x0000_0000_0000_00ff => I8,
487 0..=0x0000_0000_0000_ffff => I16,
488 0..=0x0000_0000_ffff_ffff => I32,
489 0..=0xffff_ffff_ffff_ffff => I64,
83c7162d
XL
490 _ => I128,
491 }
492 }
493
9fa01778 494 /// Finds the smallest integer with the given alignment.
a1dfa0c6 495 pub fn for_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Option<Integer> {
83c7162d
XL
496 let dl = cx.data_layout();
497
83c7162d 498 for &candidate in &[I8, I16, I32, I64, I128] {
a1dfa0c6 499 if wanted == candidate.align(dl).abi && wanted.bytes() == candidate.size().bytes() {
83c7162d
XL
500 return Some(candidate);
501 }
502 }
503 None
504 }
505
506 /// Find the largest integer with the given alignment or less.
a1dfa0c6 507 pub fn approximate_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Integer {
83c7162d
XL
508 let dl = cx.data_layout();
509
83c7162d
XL
510 // FIXME(eddyb) maybe include I128 in the future, when it works everywhere.
511 for &candidate in &[I64, I32, I16] {
a1dfa0c6 512 if wanted >= candidate.align(dl).abi && wanted.bytes() >= candidate.size().bytes() {
83c7162d
XL
513 return candidate;
514 }
515 }
516 I8
517 }
518}
519
520/// Fundamental unit of memory access and layout.
60c5eb7d 521#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
83c7162d
XL
522pub enum Primitive {
523 /// The `bool` is the signedness of the `Integer` type.
524 ///
525 /// One would think we would not care about such details this low down,
526 /// but some ABIs are described in terms of C types and ISAs where the
527 /// integer arithmetic is done on {sign,zero}-extended registers, e.g.
528 /// a negative integer passed by zero-extension will appear positive in
529 /// the callee, and most operations on it will produce the wrong values.
530 Int(Integer, bool),
60c5eb7d
XL
531 F32,
532 F64,
dfeec247 533 Pointer,
83c7162d
XL
534}
535
dc9dc135 536impl Primitive {
a1dfa0c6 537 pub fn size<C: HasDataLayout>(self, cx: &C) -> Size {
83c7162d
XL
538 let dl = cx.data_layout();
539
540 match self {
541 Int(i, _) => i.size(),
60c5eb7d
XL
542 F32 => Size::from_bits(32),
543 F64 => Size::from_bits(64),
dfeec247 544 Pointer => dl.pointer_size,
83c7162d
XL
545 }
546 }
547
a1dfa0c6 548 pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAndPrefAlign {
83c7162d
XL
549 let dl = cx.data_layout();
550
551 match self {
552 Int(i, _) => i.align(dl),
60c5eb7d
XL
553 F32 => dl.f32_align,
554 F64 => dl.f64_align,
dfeec247 555 Pointer => dl.pointer_align,
83c7162d
XL
556 }
557 }
94b46f34
XL
558
559 pub fn is_float(self) -> bool {
560 match self {
60c5eb7d 561 F32 | F64 => true,
dfeec247 562 _ => false,
94b46f34
XL
563 }
564 }
565
566 pub fn is_int(self) -> bool {
567 match self {
568 Int(..) => true,
569 _ => false,
570 }
571 }
83c7162d
XL
572}
573
574/// Information about one scalar component of a Rust type.
575#[derive(Clone, PartialEq, Eq, Hash, Debug)]
60c5eb7d 576#[derive(HashStable_Generic)]
83c7162d
XL
577pub struct Scalar {
578 pub value: Primitive,
579
580 /// Inclusive wrap-around range of valid values, that is, if
ba9703b0 581 /// start > end, it represents `start..=MAX`,
b7449926
XL
582 /// followed by `0..=end`.
583 ///
584 /// That is, for an i8 primitive, a range of `254..=2` means following
585 /// sequence:
586 ///
587 /// 254 (-2), 255 (-1), 0, 1, 2
588 ///
589 /// This is intended specifically to mirror LLVM’s `!range` metadata,
590 /// semantics.
0731742a 591 // FIXME(eddyb) always use the shortest range, e.g., by finding
83c7162d
XL
592 // the largest space between two consecutive valid values and
593 // taking everything else as the (shortest) valid range.
594 pub valid_range: RangeInclusive<u128>,
595}
596
597impl Scalar {
598 pub fn is_bool(&self) -> bool {
dfeec247 599 if let Int(I8, _) = self.value { self.valid_range == (0..=1) } else { false }
83c7162d
XL
600 }
601
602 /// Returns the valid range as a `x..y` range.
603 ///
604 /// If `x` and `y` are equal, the range is full, not empty.
a1dfa0c6 605 pub fn valid_range_exclusive<C: HasDataLayout>(&self, cx: &C) -> Range<u128> {
83c7162d
XL
606 // For a (max) value of -1, max will be `-1 as usize`, which overflows.
607 // However, that is fine here (it would still represent the full range),
608 // i.e., if the range is everything.
609 let bits = self.value.size(cx).bits();
610 assert!(bits <= 128);
611 let mask = !0u128 >> (128 - bits);
612 let start = *self.valid_range.start();
613 let end = *self.valid_range.end();
614 assert_eq!(start, start & mask);
615 assert_eq!(end, end & mask);
616 start..(end.wrapping_add(1) & mask)
617 }
618}
619
620/// Describes how the fields of a type are located in memory.
60c5eb7d 621#[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
ba9703b0
XL
622pub enum FieldsShape {
623 /// Scalar primitives and `!`, which never have fields.
624 Primitive,
625
83c7162d 626 /// All fields start at no offset. The `usize` is the field count.
ba9703b0 627 Union(NonZeroUsize),
83c7162d
XL
628
629 /// Array/vector-like placement, with all fields of identical types.
dfeec247 630 Array { stride: Size, count: u64 },
83c7162d
XL
631
632 /// Struct-like placement, with precomputed offsets.
633 ///
634 /// Fields are guaranteed to not overlap, but note that gaps
635 /// before, between and after all the fields are NOT always
636 /// padding, and as such their contents may not be discarded.
637 /// For example, enum variants leave a gap at the start,
638 /// where the discriminant field in the enum layout goes.
639 Arbitrary {
640 /// Offsets for the first byte of each field,
641 /// ordered to match the source definition order.
642 /// This vector does not go in increasing order.
643 // FIXME(eddyb) use small vector optimization for the common case.
644 offsets: Vec<Size>,
645
646 /// Maps source order field indices to memory order indices,
dc9dc135
XL
647 /// depending on how the fields were reordered (if at all).
648 /// This is a permutation, with both the source order and the
649 /// memory order using the same (0..n) index ranges.
650 ///
651 /// Note that during computation of `memory_index`, sometimes
652 /// it is easier to operate on the inverse mapping (that is,
653 /// from memory order to source order), and that is usually
654 /// named `inverse_memory_index`.
655 ///
656 // FIXME(eddyb) build a better abstraction for permutations, if possible.
83c7162d 657 // FIXME(camlorn) also consider small vector optimization here.
dfeec247
XL
658 memory_index: Vec<u32>,
659 },
83c7162d
XL
660}
661
ba9703b0 662impl FieldsShape {
83c7162d
XL
663 pub fn count(&self) -> usize {
664 match *self {
ba9703b0
XL
665 FieldsShape::Primitive => 0,
666 FieldsShape::Union(count) => count.get(),
667 FieldsShape::Array { count, .. } => {
83c7162d
XL
668 let usize_count = count as usize;
669 assert_eq!(usize_count as u64, count);
670 usize_count
671 }
ba9703b0 672 FieldsShape::Arbitrary { ref offsets, .. } => offsets.len(),
83c7162d
XL
673 }
674 }
675
676 pub fn offset(&self, i: usize) -> Size {
677 match *self {
ba9703b0
XL
678 FieldsShape::Primitive => {
679 unreachable!("FieldsShape::offset: `Primitive`s have no fields")
680 }
681 FieldsShape::Union(count) => {
682 assert!(
683 i < count.get(),
684 "tried to access field {} of union with {} fields",
685 i,
686 count
687 );
688 Size::ZERO
689 }
690 FieldsShape::Array { stride, count } => {
691 let i = u64::try_from(i).unwrap();
83c7162d
XL
692 assert!(i < count);
693 stride * i
694 }
ba9703b0 695 FieldsShape::Arbitrary { ref offsets, .. } => offsets[i],
83c7162d
XL
696 }
697 }
698
699 pub fn memory_index(&self, i: usize) -> usize {
700 match *self {
ba9703b0
XL
701 FieldsShape::Primitive => {
702 unreachable!("FieldsShape::memory_index: `Primitive`s have no fields")
703 }
704 FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
705 FieldsShape::Arbitrary { ref memory_index, .. } => {
83c7162d
XL
706 let r = memory_index[i];
707 assert_eq!(r as usize as u32, r);
708 r as usize
709 }
710 }
711 }
712
9fa01778 713 /// Gets source indices of the fields by increasing offsets.
83c7162d 714 #[inline]
dfeec247 715 pub fn index_by_increasing_offset<'a>(&'a self) -> impl Iterator<Item = usize> + 'a {
83c7162d
XL
716 let mut inverse_small = [0u8; 64];
717 let mut inverse_big = vec![];
718 let use_small = self.count() <= inverse_small.len();
719
720 // We have to write this logic twice in order to keep the array small.
ba9703b0 721 if let FieldsShape::Arbitrary { ref memory_index, .. } = *self {
83c7162d
XL
722 if use_small {
723 for i in 0..self.count() {
724 inverse_small[memory_index[i] as usize] = i as u8;
725 }
726 } else {
727 inverse_big = vec![0; self.count()];
728 for i in 0..self.count() {
729 inverse_big[memory_index[i] as usize] = i as u32;
730 }
731 }
732 }
733
dfeec247 734 (0..self.count()).map(move |i| match *self {
ba9703b0
XL
735 FieldsShape::Primitive | FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
736 FieldsShape::Arbitrary { .. } => {
dfeec247
XL
737 if use_small {
738 inverse_small[i] as usize
739 } else {
740 inverse_big[i] as usize
83c7162d
XL
741 }
742 }
743 })
744 }
745}
746
747/// Describes how values of the type are passed by target ABIs,
748/// in terms of categories of C types there are ABI rules for.
60c5eb7d 749#[derive(Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
83c7162d
XL
750pub enum Abi {
751 Uninhabited,
752 Scalar(Scalar),
753 ScalarPair(Scalar, Scalar),
754 Vector {
755 element: Scalar,
dfeec247 756 count: u64,
83c7162d
XL
757 },
758 Aggregate {
759 /// If true, the size is exact, otherwise it's only a lower bound.
760 sized: bool,
dfeec247 761 },
83c7162d
XL
762}
763
764impl Abi {
9fa01778 765 /// Returns `true` if the layout corresponds to an unsized type.
83c7162d
XL
766 pub fn is_unsized(&self) -> bool {
767 match *self {
dfeec247
XL
768 Abi::Uninhabited | Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false,
769 Abi::Aggregate { sized } => !sized,
83c7162d
XL
770 }
771 }
772
9fa01778 773 /// Returns `true` if this is a single signed integer scalar
83c7162d
XL
774 pub fn is_signed(&self) -> bool {
775 match *self {
776 Abi::Scalar(ref scal) => match scal.value {
777 Primitive::Int(_, signed) => signed,
778 _ => false,
779 },
ba9703b0 780 _ => panic!("`is_signed` on non-scalar ABI {:?}", self),
83c7162d
XL
781 }
782 }
0bf4aa26 783
9fa01778 784 /// Returns `true` if this is an uninhabited type
0bf4aa26
XL
785 pub fn is_uninhabited(&self) -> bool {
786 match *self {
787 Abi::Uninhabited => true,
788 _ => false,
789 }
790 }
60c5eb7d
XL
791
792 /// Returns `true` is this is a scalar type
793 pub fn is_scalar(&self) -> bool {
794 match *self {
795 Abi::Scalar(_) => true,
796 _ => false,
797 }
798 }
83c7162d
XL
799}
800
e74abb32 801rustc_index::newtype_index! {
60c5eb7d
XL
802 pub struct VariantIdx {
803 derive [HashStable_Generic]
804 }
a1dfa0c6
XL
805}
806
60c5eb7d 807#[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
83c7162d
XL
808pub enum Variants {
809 /// Single enum variants, structs/tuples, unions, and all non-ADTs.
dfeec247 810 Single { index: VariantIdx },
83c7162d 811
48663c56
XL
812 /// Enum-likes with more than one inhabited variant: for each case there is
813 /// a struct, and they all have space reserved for the discriminant.
814 /// For enums this is the sole field of the layout.
532ac7d7
XL
815 Multiple {
816 discr: Scalar,
817 discr_kind: DiscriminantKind,
48663c56 818 discr_index: usize,
ba9703b0 819 variants: IndexVec<VariantIdx, Layout>,
83c7162d 820 },
532ac7d7
XL
821}
822
60c5eb7d 823#[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
532ac7d7
XL
824pub enum DiscriminantKind {
825 /// Integer tag holding the discriminant value itself.
826 Tag,
83c7162d 827
532ac7d7 828 /// Niche (values invalid for a type) encoding the discriminant:
83c7162d 829 /// the variant `dataful_variant` contains a niche at an arbitrary
48663c56
XL
830 /// offset (field `discr_index` of the enum), which for a variant with
831 /// discriminant `d` is set to
832 /// `(d - niche_variants.start).wrapping_add(niche_start)`.
83c7162d
XL
833 ///
834 /// For example, `Option<(usize, &T)>` is represented such that
835 /// `None` has a null pointer for the second tuple field, and
836 /// `Some` is the identity function (with a non-null reference).
532ac7d7 837 Niche {
a1dfa0c6
XL
838 dataful_variant: VariantIdx,
839 niche_variants: RangeInclusive<VariantIdx>,
83c7162d 840 niche_start: u128,
532ac7d7 841 },
83c7162d
XL
842}
843
60c5eb7d 844#[derive(Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
416331ca
XL
845pub struct Niche {
846 pub offset: Size,
847 pub scalar: Scalar,
848}
849
850impl Niche {
851 pub fn from_scalar<C: HasDataLayout>(cx: &C, offset: Size, scalar: Scalar) -> Option<Self> {
dfeec247
XL
852 let niche = Niche { offset, scalar };
853 if niche.available(cx) > 0 { Some(niche) } else { None }
416331ca
XL
854 }
855
856 pub fn available<C: HasDataLayout>(&self, cx: &C) -> u128 {
857 let Scalar { value, valid_range: ref v } = self.scalar;
858 let bits = value.size(cx).bits();
859 assert!(bits <= 128);
860 let max_value = !0u128 >> (128 - bits);
861
862 // Find out how many values are outside the valid range.
863 let niche = v.end().wrapping_add(1)..*v.start();
864 niche.end.wrapping_sub(niche.start) & max_value
865 }
866
867 pub fn reserve<C: HasDataLayout>(&self, cx: &C, count: u128) -> Option<(u128, Scalar)> {
868 assert!(count > 0);
869
870 let Scalar { value, valid_range: ref v } = self.scalar;
871 let bits = value.size(cx).bits();
872 assert!(bits <= 128);
873 let max_value = !0u128 >> (128 - bits);
874
875 if count > max_value {
876 return None;
877 }
878
879 // Compute the range of invalid values being reserved.
880 let start = v.end().wrapping_add(1) & max_value;
881 let end = v.end().wrapping_add(count) & max_value;
882
883 // If the `end` of our range is inside the valid range,
884 // then we ran out of invalid values.
885 // FIXME(eddyb) abstract this with a wraparound range type.
886 let valid_range_contains = |x| {
887 if v.start() <= v.end() {
888 *v.start() <= x && x <= *v.end()
889 } else {
890 *v.start() <= x || x <= *v.end()
891 }
892 };
893 if valid_range_contains(end) {
894 return None;
895 }
896
897 Some((start, Scalar { value, valid_range: *v.start()..=end }))
898 }
899}
900
60c5eb7d 901#[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
ba9703b0 902pub struct Layout {
74b04a01 903 /// Says where the fields are located within the layout.
ba9703b0 904 pub fields: FieldsShape,
74b04a01
XL
905
906 /// Encodes information about multi-variant layouts.
907 /// Even with `Multiple` variants, a layout still has its own fields! Those are then
908 /// shared between all variants. One of them will be the discriminant,
909 /// but e.g. generators can have more.
910 ///
911 /// To access all fields of this layout, both `fields` and the fields of the active variant
912 /// must be taken into account.
913 pub variants: Variants,
914
915 /// The `abi` defines how this data is passed between functions, and it defines
916 /// value restrictions via `valid_range`.
917 ///
918 /// Note that this is entirely orthogonal to the recursive structure defined by
919 /// `variants` and `fields`; for example, `ManuallyDrop<Result<isize, isize>>` has
920 /// `Abi::ScalarPair`! So, even with non-`Aggregate` `abi`, `fields` and `variants`
921 /// have to be taken into account to find all fields of this layout.
83c7162d 922 pub abi: Abi,
416331ca
XL
923
924 /// The leaf scalar with the largest number of invalid values
925 /// (i.e. outside of its `valid_range`), if it exists.
926 pub largest_niche: Option<Niche>,
927
a1dfa0c6 928 pub align: AbiAndPrefAlign,
dfeec247 929 pub size: Size,
83c7162d
XL
930}
931
ba9703b0 932impl Layout {
a1dfa0c6 933 pub fn scalar<C: HasDataLayout>(cx: &C, scalar: Scalar) -> Self {
416331ca 934 let largest_niche = Niche::from_scalar(cx, Size::ZERO, scalar.clone());
83c7162d
XL
935 let size = scalar.value.size(cx);
936 let align = scalar.value.align(cx);
ba9703b0 937 Layout {
a1dfa0c6 938 variants: Variants::Single { index: VariantIdx::new(0) },
ba9703b0 939 fields: FieldsShape::Primitive,
83c7162d 940 abi: Abi::Scalar(scalar),
416331ca 941 largest_niche,
83c7162d
XL
942 size,
943 align,
944 }
945 }
946}
947
ba9703b0 948/// The layout of a type, alongside the type itself.
0731742a 949/// Provides various type traversal APIs (e.g., recursing into fields).
83c7162d 950///
ba9703b0
XL
951/// Note that the layout is NOT guaranteed to always be identical
952/// to that obtained from `layout_of(ty)`, as we need to produce
83c7162d 953/// layouts for which Rust types do not exist, such as enum variants
0731742a 954/// or synthetic fields of enums (i.e., discriminants) and fat pointers.
0bf4aa26 955#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
ba9703b0 956pub struct TyAndLayout<'a, Ty> {
83c7162d 957 pub ty: Ty,
ba9703b0 958 pub layout: &'a Layout,
83c7162d
XL
959}
960
ba9703b0
XL
961impl<'a, Ty> Deref for TyAndLayout<'a, Ty> {
962 type Target = &'a Layout;
963 fn deref(&self) -> &&'a Layout {
964 &self.layout
83c7162d
XL
965 }
966}
967
ba9703b0 968/// Trait for context types that can compute layouts of things.
83c7162d
XL
969pub trait LayoutOf {
970 type Ty;
ba9703b0 971 type TyAndLayout;
83c7162d 972
ba9703b0
XL
973 fn layout_of(&self, ty: Self::Ty) -> Self::TyAndLayout;
974 fn spanned_layout_of(&self, ty: Self::Ty, _span: Span) -> Self::TyAndLayout {
416331ca
XL
975 self.layout_of(ty)
976 }
83c7162d
XL
977}
978
ba9703b0
XL
979/// The `TyAndLayout` above will always be a `MaybeResult<TyAndLayout<'_, Self>>`.
980/// We can't add the bound due to the lifetime, but this trait is still useful when
981/// writing code that's generic over the `LayoutOf` impl.
982pub trait MaybeResult<T> {
983 type Error;
984
985 fn from(x: Result<T, Self::Error>) -> Self;
986 fn to_result(self) -> Result<T, Self::Error>;
987}
988
989impl<T> MaybeResult<T> for T {
990 type Error = !;
991
992 fn from(Ok(x): Result<T, Self::Error>) -> Self {
993 x
994 }
995 fn to_result(self) -> Result<T, Self::Error> {
996 Ok(self)
997 }
998}
999
1000impl<T, E> MaybeResult<T> for Result<T, E> {
1001 type Error = E;
1002
1003 fn from(x: Result<T, Self::Error>) -> Self {
1004 x
1005 }
1006 fn to_result(self) -> Result<T, Self::Error> {
1007 self
1008 }
1009}
1010
48663c56
XL
1011#[derive(Copy, Clone, PartialEq, Eq)]
1012pub enum PointerKind {
1013 /// Most general case, we know no restrictions to tell LLVM.
1014 Shared,
1015
1016 /// `&T` where `T` contains no `UnsafeCell`, is `noalias` and `readonly`.
1017 Frozen,
1018
1019 /// `&mut T`, when we know `noalias` is safe for LLVM.
1020 UniqueBorrowed,
1021
1022 /// `Box<T>`, unlike `UniqueBorrowed`, it also has `noalias` on returns.
dfeec247 1023 UniqueOwned,
48663c56
XL
1024}
1025
1026#[derive(Copy, Clone)]
1027pub struct PointeeInfo {
1028 pub size: Size,
1029 pub align: Align,
1030 pub safe: Option<PointerKind>,
1031}
1032
ba9703b0 1033pub trait TyAndLayoutMethods<'a, C: LayoutOf<Ty = Self>>: Sized {
a1dfa0c6 1034 fn for_variant(
ba9703b0 1035 this: TyAndLayout<'a, Self>,
a1dfa0c6
XL
1036 cx: &C,
1037 variant_index: VariantIdx,
ba9703b0
XL
1038 ) -> TyAndLayout<'a, Self>;
1039 fn field(this: TyAndLayout<'a, Self>, cx: &C, i: usize) -> C::TyAndLayout;
1040 fn pointee_info_at(this: TyAndLayout<'a, Self>, cx: &C, offset: Size) -> Option<PointeeInfo>;
83c7162d
XL
1041}
1042
ba9703b0 1043impl<'a, Ty> TyAndLayout<'a, Ty> {
a1dfa0c6 1044 pub fn for_variant<C>(self, cx: &C, variant_index: VariantIdx) -> Self
dfeec247 1045 where
ba9703b0 1046 Ty: TyAndLayoutMethods<'a, C>,
dfeec247
XL
1047 C: LayoutOf<Ty = Ty>,
1048 {
83c7162d
XL
1049 Ty::for_variant(self, cx, variant_index)
1050 }
ba9703b0
XL
1051
1052 /// Callers might want to use `C: LayoutOf<Ty=Ty, TyAndLayout: MaybeResult<Self>>`
1053 /// to allow recursion (see `might_permit_zero_init` below for an example).
1054 pub fn field<C>(self, cx: &C, i: usize) -> C::TyAndLayout
dfeec247 1055 where
ba9703b0 1056 Ty: TyAndLayoutMethods<'a, C>,
dfeec247
XL
1057 C: LayoutOf<Ty = Ty>,
1058 {
83c7162d
XL
1059 Ty::field(self, cx, i)
1060 }
ba9703b0 1061
48663c56 1062 pub fn pointee_info_at<C>(self, cx: &C, offset: Size) -> Option<PointeeInfo>
dfeec247 1063 where
ba9703b0 1064 Ty: TyAndLayoutMethods<'a, C>,
dfeec247
XL
1065 C: LayoutOf<Ty = Ty>,
1066 {
48663c56
XL
1067 Ty::pointee_info_at(self, cx, offset)
1068 }
83c7162d
XL
1069}
1070
ba9703b0 1071impl<'a, Ty> TyAndLayout<'a, Ty> {
9fa01778 1072 /// Returns `true` if the layout corresponds to an unsized type.
83c7162d
XL
1073 pub fn is_unsized(&self) -> bool {
1074 self.abi.is_unsized()
1075 }
1076
9fa01778 1077 /// Returns `true` if the type is a ZST and not unsized.
83c7162d
XL
1078 pub fn is_zst(&self) -> bool {
1079 match self.abi {
dfeec247 1080 Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false,
83c7162d 1081 Abi::Uninhabited => self.size.bytes() == 0,
dfeec247 1082 Abi::Aggregate { sized } => sized && self.size.bytes() == 0,
83c7162d
XL
1083 }
1084 }
ba9703b0
XL
1085
1086 /// Determines if this type permits "raw" initialization by just transmuting some
1087 /// memory into an instance of `T`.
1088 /// `zero` indicates if the memory is zero-initialized, or alternatively
1089 /// left entirely uninitialized.
1090 /// This is conservative: in doubt, it will answer `true`.
1091 ///
1092 /// FIXME: Once we removed all the conservatism, we could alternatively
1093 /// create an all-0/all-undef constant and run the const value validator to see if
1094 /// this is a valid value for the given type.
1095 pub fn might_permit_raw_init<C, E>(self, cx: &C, zero: bool) -> Result<bool, E>
1096 where
1097 Self: Copy,
1098 Ty: TyAndLayoutMethods<'a, C>,
1099 C: LayoutOf<Ty = Ty, TyAndLayout: MaybeResult<Self, Error = E>> + HasDataLayout,
1100 {
1101 let scalar_allows_raw_init = move |s: &Scalar| -> bool {
1102 if zero {
1103 let range = &s.valid_range;
1104 // The range must contain 0.
1105 range.contains(&0) || (*range.start() > *range.end()) // wrap-around allows 0
1106 } else {
1107 // The range must include all values. `valid_range_exclusive` handles
1108 // the wrap-around using target arithmetic; with wrap-around then the full
1109 // range is one where `start == end`.
1110 let range = s.valid_range_exclusive(cx);
1111 range.start == range.end
1112 }
1113 };
1114
1115 // Check the ABI.
1116 let valid = match &self.abi {
1117 Abi::Uninhabited => false, // definitely UB
1118 Abi::Scalar(s) => scalar_allows_raw_init(s),
1119 Abi::ScalarPair(s1, s2) => scalar_allows_raw_init(s1) && scalar_allows_raw_init(s2),
1120 Abi::Vector { element: s, count } => *count == 0 || scalar_allows_raw_init(s),
1121 Abi::Aggregate { .. } => true, // Cannot be excluded *right now*.
1122 };
1123 if !valid {
1124 // This is definitely not okay.
1125 trace!("might_permit_raw_init({:?}, zero={}): not valid", self.layout, zero);
1126 return Ok(false);
1127 }
1128
1129 // If we have not found an error yet, we need to recursively descend.
1130 // FIXME(#66151): For now, we are conservative and do not do this.
1131 Ok(true)
1132 }
83c7162d 1133}