]> git.proxmox.com Git - rustc.git/blame - compiler/rustc_target/src/abi/mod.rs
New upstream version 1.63.0+dfsg1
[rustc.git] / compiler / rustc_target / src / abi / mod.rs
CommitLineData
9fa01778
XL
1pub use Integer::*;
2pub use Primitive::*;
83c7162d 3
923072b8 4use crate::json::{Json, ToJson};
9fa01778 5use crate::spec::Target;
83c7162d 6
ba9703b0 7use std::convert::{TryFrom, TryInto};
5869c6ff 8use std::fmt;
94222f64 9use std::iter::Step;
ba9703b0 10use std::num::NonZeroUsize;
c295e0f8 11use std::ops::{Add, AddAssign, Deref, Mul, RangeInclusive, Sub};
5869c6ff 12use std::str::FromStr;
83c7162d 13
5e7ed085 14use rustc_data_structures::intern::Interned;
e74abb32 15use rustc_index::vec::{Idx, IndexVec};
60c5eb7d 16use rustc_macros::HashStable_Generic;
a1dfa0c6 17
83c7162d
XL
18pub mod call;
19
136023e0 20/// Parsed [Data layout](https://llvm.org/docs/LangRef.html#data-layout)
83c7162d
XL
21/// for a target, which contains everything needed to compute layouts.
22pub struct TargetDataLayout {
23 pub endian: Endian,
a1dfa0c6
XL
24 pub i1_align: AbiAndPrefAlign,
25 pub i8_align: AbiAndPrefAlign,
26 pub i16_align: AbiAndPrefAlign,
27 pub i32_align: AbiAndPrefAlign,
28 pub i64_align: AbiAndPrefAlign,
29 pub i128_align: AbiAndPrefAlign,
30 pub f32_align: AbiAndPrefAlign,
31 pub f64_align: AbiAndPrefAlign,
83c7162d 32 pub pointer_size: Size,
a1dfa0c6
XL
33 pub pointer_align: AbiAndPrefAlign,
34 pub aggregate_align: AbiAndPrefAlign,
83c7162d
XL
35
36 /// Alignments for vector types.
a1dfa0c6
XL
37 pub vector_align: Vec<(Size, AbiAndPrefAlign)>,
38
3dfed10e 39 pub instruction_address_space: AddressSpace,
94222f64
XL
40
41 /// Minimum size of #[repr(C)] enums (default I32 bits)
42 pub c_enum_min_size: Integer,
83c7162d
XL
43}
44
45impl Default for TargetDataLayout {
46 /// Creates an instance of `TargetDataLayout`.
47 fn default() -> TargetDataLayout {
a1dfa0c6 48 let align = |bits| Align::from_bits(bits).unwrap();
83c7162d
XL
49 TargetDataLayout {
50 endian: Endian::Big,
a1dfa0c6
XL
51 i1_align: AbiAndPrefAlign::new(align(8)),
52 i8_align: AbiAndPrefAlign::new(align(8)),
53 i16_align: AbiAndPrefAlign::new(align(16)),
54 i32_align: AbiAndPrefAlign::new(align(32)),
55 i64_align: AbiAndPrefAlign { abi: align(32), pref: align(64) },
56 i128_align: AbiAndPrefAlign { abi: align(32), pref: align(64) },
57 f32_align: AbiAndPrefAlign::new(align(32)),
58 f64_align: AbiAndPrefAlign::new(align(64)),
83c7162d 59 pointer_size: Size::from_bits(64),
a1dfa0c6
XL
60 pointer_align: AbiAndPrefAlign::new(align(64)),
61 aggregate_align: AbiAndPrefAlign { abi: align(0), pref: align(64) },
83c7162d 62 vector_align: vec![
a1dfa0c6
XL
63 (Size::from_bits(64), AbiAndPrefAlign::new(align(64))),
64 (Size::from_bits(128), AbiAndPrefAlign::new(align(128))),
65 ],
3dfed10e 66 instruction_address_space: AddressSpace::DATA,
94222f64 67 c_enum_min_size: Integer::I32,
83c7162d
XL
68 }
69 }
70}
71
72impl TargetDataLayout {
73 pub fn parse(target: &Target) -> Result<TargetDataLayout, String> {
a1dfa0c6
XL
74 // Parse an address space index from a string.
75 let parse_address_space = |s: &str, cause: &str| {
3dfed10e 76 s.parse::<u32>().map(AddressSpace).map_err(|err| {
dfeec247 77 format!("invalid address space `{}` for `{}` in \"data-layout\": {}", s, cause, err)
a1dfa0c6
XL
78 })
79 };
80
83c7162d
XL
81 // Parse a bit count from a string.
82 let parse_bits = |s: &str, kind: &str, cause: &str| {
83 s.parse::<u64>().map_err(|err| {
dfeec247 84 format!("invalid {} `{}` for `{}` in \"data-layout\": {}", kind, s, cause, err)
83c7162d
XL
85 })
86 };
87
88 // Parse a size string.
dfeec247 89 let size = |s: &str, cause: &str| parse_bits(s, "size", cause).map(Size::from_bits);
83c7162d
XL
90
91 // Parse an alignment string.
92 let align = |s: &[&str], cause: &str| {
93 if s.is_empty() {
94 return Err(format!("missing alignment for `{}` in \"data-layout\"", cause));
95 }
a1dfa0c6
XL
96 let align_from_bits = |bits| {
97 Align::from_bits(bits).map_err(|err| {
dfeec247 98 format!("invalid alignment for `{}` in \"data-layout\": {}", cause, err)
a1dfa0c6
XL
99 })
100 };
83c7162d
XL
101 let abi = parse_bits(s[0], "alignment", cause)?;
102 let pref = s.get(1).map_or(Ok(abi), |pref| parse_bits(pref, "alignment", cause))?;
dfeec247 103 Ok(AbiAndPrefAlign { abi: align_from_bits(abi)?, pref: align_from_bits(pref)? })
83c7162d
XL
104 };
105
106 let mut dl = TargetDataLayout::default();
107 let mut i128_align_src = 64;
8faf50e0 108 for spec in target.data_layout.split('-') {
416331ca
XL
109 let spec_parts = spec.split(':').collect::<Vec<_>>();
110
111 match &*spec_parts {
b7449926
XL
112 ["e"] => dl.endian = Endian::Little,
113 ["E"] => dl.endian = Endian::Big,
74b04a01 114 [p] if p.starts_with('P') => {
a1dfa0c6
XL
115 dl.instruction_address_space = parse_address_space(&p[1..], "P")?
116 }
dfeec247
XL
117 ["a", ref a @ ..] => dl.aggregate_align = align(a, "a")?,
118 ["f32", ref a @ ..] => dl.f32_align = align(a, "f32")?,
119 ["f64", ref a @ ..] => dl.f64_align = align(a, "f64")?,
e1599b0c 120 [p @ "p", s, ref a @ ..] | [p @ "p0", s, ref a @ ..] => {
83c7162d
XL
121 dl.pointer_size = size(s, p)?;
122 dl.pointer_align = align(a, p)?;
123 }
74b04a01 124 [s, ref a @ ..] if s.starts_with('i') => {
5e7ed085
FG
125 let Ok(bits) = s[1..].parse::<u64>() else {
126 size(&s[1..], "i")?; // For the user error.
127 continue;
83c7162d
XL
128 };
129 let a = align(a, s)?;
130 match bits {
131 1 => dl.i1_align = a,
132 8 => dl.i8_align = a,
133 16 => dl.i16_align = a,
134 32 => dl.i32_align = a,
135 64 => dl.i64_align = a,
136 _ => {}
137 }
138 if bits >= i128_align_src && bits <= 128 {
139 // Default alignment for i128 is decided by taking the alignment of
dc9dc135 140 // largest-sized i{64..=128}.
83c7162d
XL
141 i128_align_src = bits;
142 dl.i128_align = a;
143 }
144 }
74b04a01 145 [s, ref a @ ..] if s.starts_with('v') => {
83c7162d
XL
146 let v_size = size(&s[1..], "v")?;
147 let a = align(a, s)?;
148 if let Some(v) = dl.vector_align.iter_mut().find(|v| v.0 == v_size) {
149 v.1 = a;
150 continue;
151 }
152 // No existing entry, add a new one.
153 dl.vector_align.push((v_size, a));
154 }
155 _ => {} // Ignore everything else.
156 }
157 }
158
159 // Perform consistency checks against the Target information.
5869c6ff 160 if dl.endian != target.endian {
dfeec247
XL
161 return Err(format!(
162 "inconsistent target specification: \"data-layout\" claims \
5869c6ff
XL
163 architecture is {}-endian, while \"target-endian\" is `{}`",
164 dl.endian.as_str(),
165 target.endian.as_str(),
dfeec247 166 ));
83c7162d
XL
167 }
168
923072b8
FG
169 let target_pointer_width: u64 = target.pointer_width.into();
170 if dl.pointer_size.bits() != target_pointer_width {
dfeec247
XL
171 return Err(format!(
172 "inconsistent target specification: \"data-layout\" claims \
5869c6ff 173 pointers are {}-bit, while \"target-pointer-width\" is `{}`",
dfeec247 174 dl.pointer_size.bits(),
29967ef6 175 target.pointer_width
dfeec247 176 ));
83c7162d
XL
177 }
178
94222f64
XL
179 dl.c_enum_min_size = Integer::from_size(Size::from_bits(target.c_enum_min_bits))?;
180
83c7162d
XL
181 Ok(dl)
182 }
183
9fa01778 184 /// Returns exclusive upper bound on object size.
83c7162d
XL
185 ///
186 /// The theoretical maximum object size is defined as the maximum positive `isize` value.
187 /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly
188 /// index every address within an object along with one byte past the end, along with allowing
189 /// `isize` to store the difference between any two pointers into an object.
190 ///
191 /// The upper bound on 64-bit currently needs to be lower because LLVM uses a 64-bit integer
192 /// to represent object size in bits. It would need to be 1 << 61 to account for this, but is
193 /// currently conservatively bounded to 1 << 47 as that is enough to cover the current usable
194 /// address space on 64-bit ARMv8 and x86_64.
94222f64 195 #[inline]
83c7162d
XL
196 pub fn obj_size_bound(&self) -> u64 {
197 match self.pointer_size.bits() {
198 16 => 1 << 15,
199 32 => 1 << 31,
200 64 => 1 << 47,
dfeec247 201 bits => panic!("obj_size_bound: unknown pointer bit size {}", bits),
83c7162d
XL
202 }
203 }
204
94222f64 205 #[inline]
83c7162d
XL
206 pub fn ptr_sized_integer(&self) -> Integer {
207 match self.pointer_size.bits() {
208 16 => I16,
209 32 => I32,
210 64 => I64,
dfeec247 211 bits => panic!("ptr_sized_integer: unknown pointer bit size {}", bits),
83c7162d
XL
212 }
213 }
214
94222f64 215 #[inline]
a1dfa0c6 216 pub fn vector_align(&self, vec_size: Size) -> AbiAndPrefAlign {
83c7162d
XL
217 for &(size, align) in &self.vector_align {
218 if size == vec_size {
219 return align;
220 }
221 }
222 // Default to natural alignment, which is what LLVM does.
223 // That is, use the size, rounded up to a power of 2.
a1dfa0c6 224 AbiAndPrefAlign::new(Align::from_bytes(vec_size.bytes().next_power_of_two()).unwrap())
83c7162d
XL
225 }
226}
227
a1dfa0c6 228pub trait HasDataLayout {
83c7162d
XL
229 fn data_layout(&self) -> &TargetDataLayout;
230}
231
a1dfa0c6 232impl HasDataLayout for TargetDataLayout {
17df50a5 233 #[inline]
83c7162d
XL
234 fn data_layout(&self) -> &TargetDataLayout {
235 self
236 }
237}
238
239/// Endianness of the target, which must match cfg(target-endian).
a1dfa0c6 240#[derive(Copy, Clone, PartialEq)]
83c7162d
XL
241pub enum Endian {
242 Little,
dfeec247 243 Big,
83c7162d
XL
244}
245
5869c6ff
XL
246impl Endian {
247 pub fn as_str(&self) -> &'static str {
248 match self {
249 Self::Little => "little",
250 Self::Big => "big",
251 }
252 }
253}
254
255impl fmt::Debug for Endian {
256 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
257 f.write_str(self.as_str())
258 }
259}
260
261impl FromStr for Endian {
262 type Err = String;
263
264 fn from_str(s: &str) -> Result<Self, Self::Err> {
265 match s {
266 "little" => Ok(Self::Little),
267 "big" => Ok(Self::Big),
268 _ => Err(format!(r#"unknown endian: "{}""#, s)),
269 }
270 }
271}
272
273impl ToJson for Endian {
274 fn to_json(&self) -> Json {
275 self.as_str().to_json()
276 }
277}
278
83c7162d 279/// Size of a type in bytes.
04454e1e 280#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Encodable, Decodable)]
60c5eb7d 281#[derive(HashStable_Generic)]
83c7162d 282pub struct Size {
dfeec247 283 raw: u64,
83c7162d
XL
284}
285
04454e1e
FG
286// This is debug-printed a lot in larger structs, don't waste too much space there
287impl fmt::Debug for Size {
288 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
289 write!(f, "Size({} bytes)", self.bytes())
290 }
291}
292
83c7162d 293impl Size {
ba9703b0 294 pub const ZERO: Size = Size { raw: 0 };
94b46f34 295
5869c6ff 296 /// Rounds `bits` up to the next-higher byte boundary, if `bits` is
5e7ed085 297 /// not a multiple of 8.
ba9703b0
XL
298 pub fn from_bits(bits: impl TryInto<u64>) -> Size {
299 let bits = bits.try_into().ok().unwrap();
83c7162d 300 // Avoid potential overflow from `bits + 7`.
5869c6ff 301 Size { raw: bits / 8 + ((bits % 8) + 7) / 8 }
83c7162d
XL
302 }
303
8faf50e0 304 #[inline]
ba9703b0 305 pub fn from_bytes(bytes: impl TryInto<u64>) -> Size {
5869c6ff
XL
306 let bytes: u64 = bytes.try_into().ok().unwrap();
307 Size { raw: bytes }
83c7162d
XL
308 }
309
8faf50e0 310 #[inline]
83c7162d
XL
311 pub fn bytes(self) -> u64 {
312 self.raw
313 }
314
ba9703b0
XL
315 #[inline]
316 pub fn bytes_usize(self) -> usize {
317 self.bytes().try_into().unwrap()
318 }
319
8faf50e0 320 #[inline]
83c7162d 321 pub fn bits(self) -> u64 {
5e7ed085
FG
322 #[cold]
323 fn overflow(bytes: u64) -> ! {
324 panic!("Size::bits: {} bytes in bits doesn't fit in u64", bytes)
325 }
326
327 self.bytes().checked_mul(8).unwrap_or_else(|| overflow(self.bytes()))
83c7162d
XL
328 }
329
ba9703b0
XL
330 #[inline]
331 pub fn bits_usize(self) -> usize {
332 self.bits().try_into().unwrap()
333 }
334
8faf50e0 335 #[inline]
a1dfa0c6
XL
336 pub fn align_to(self, align: Align) -> Size {
337 let mask = align.bytes() - 1;
83c7162d
XL
338 Size::from_bytes((self.bytes() + mask) & !mask)
339 }
340
8faf50e0 341 #[inline]
a1dfa0c6
XL
342 pub fn is_aligned(self, align: Align) -> bool {
343 let mask = align.bytes() - 1;
83c7162d
XL
344 self.bytes() & mask == 0
345 }
346
8faf50e0 347 #[inline]
a1dfa0c6 348 pub fn checked_add<C: HasDataLayout>(self, offset: Size, cx: &C) -> Option<Size> {
83c7162d
XL
349 let dl = cx.data_layout();
350
94b46f34 351 let bytes = self.bytes().checked_add(offset.bytes())?;
83c7162d 352
dfeec247 353 if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
83c7162d
XL
354 }
355
8faf50e0 356 #[inline]
a1dfa0c6 357 pub fn checked_mul<C: HasDataLayout>(self, count: u64, cx: &C) -> Option<Size> {
83c7162d
XL
358 let dl = cx.data_layout();
359
94b46f34 360 let bytes = self.bytes().checked_mul(count)?;
dfeec247 361 if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
83c7162d 362 }
29967ef6
XL
363
364 /// Truncates `value` to `self` bits and then sign-extends it to 128 bits
365 /// (i.e., if it is negative, fill with 1's on the left).
366 #[inline]
367 pub fn sign_extend(self, value: u128) -> u128 {
368 let size = self.bits();
369 if size == 0 {
370 // Truncated until nothing is left.
371 return 0;
372 }
373 // Sign-extend it.
374 let shift = 128 - size;
375 // Shift the unsigned value to the left, then shift back to the right as signed
376 // (essentially fills with sign bit on the left).
377 (((value << shift) as i128) >> shift) as u128
378 }
379
380 /// Truncates `value` to `self` bits.
381 #[inline]
382 pub fn truncate(self, value: u128) -> u128 {
383 let size = self.bits();
384 if size == 0 {
385 // Truncated until nothing is left.
386 return 0;
387 }
388 let shift = 128 - size;
389 // Truncate (shift left to drop out leftover values, shift right to fill with zeroes).
390 (value << shift) >> shift
391 }
c295e0f8
XL
392
393 #[inline]
394 pub fn signed_int_min(&self) -> i128 {
395 self.sign_extend(1_u128 << (self.bits() - 1)) as i128
396 }
397
398 #[inline]
399 pub fn signed_int_max(&self) -> i128 {
400 i128::MAX >> (128 - self.bits())
401 }
402
403 #[inline]
404 pub fn unsigned_int_max(&self) -> u128 {
405 u128::MAX >> (128 - self.bits())
406 }
83c7162d
XL
407}
408
409// Panicking addition, subtraction and multiplication for convenience.
410// Avoid during layout computation, return `LayoutError` instead.
411
412impl Add for Size {
413 type Output = Size;
8faf50e0 414 #[inline]
83c7162d 415 fn add(self, other: Size) -> Size {
94b46f34
XL
416 Size::from_bytes(self.bytes().checked_add(other.bytes()).unwrap_or_else(|| {
417 panic!("Size::add: {} + {} doesn't fit in u64", self.bytes(), other.bytes())
418 }))
83c7162d
XL
419 }
420}
421
422impl Sub for Size {
423 type Output = Size;
8faf50e0 424 #[inline]
83c7162d 425 fn sub(self, other: Size) -> Size {
94b46f34
XL
426 Size::from_bytes(self.bytes().checked_sub(other.bytes()).unwrap_or_else(|| {
427 panic!("Size::sub: {} - {} would result in negative size", self.bytes(), other.bytes())
428 }))
429 }
430}
431
432impl Mul<Size> for u64 {
433 type Output = Size;
8faf50e0 434 #[inline]
94b46f34
XL
435 fn mul(self, size: Size) -> Size {
436 size * self
83c7162d
XL
437 }
438}
439
440impl Mul<u64> for Size {
441 type Output = Size;
8faf50e0 442 #[inline]
83c7162d
XL
443 fn mul(self, count: u64) -> Size {
444 match self.bytes().checked_mul(count) {
445 Some(bytes) => Size::from_bytes(bytes),
dfeec247 446 None => panic!("Size::mul: {} * {} doesn't fit in u64", self.bytes(), count),
83c7162d
XL
447 }
448 }
449}
450
451impl AddAssign for Size {
8faf50e0 452 #[inline]
83c7162d
XL
453 fn add_assign(&mut self, other: Size) {
454 *self = *self + other;
455 }
456}
457
94222f64
XL
458impl Step for Size {
459 #[inline]
460 fn steps_between(start: &Self, end: &Self) -> Option<usize> {
461 u64::steps_between(&start.bytes(), &end.bytes())
462 }
463
464 #[inline]
465 fn forward_checked(start: Self, count: usize) -> Option<Self> {
466 u64::forward_checked(start.bytes(), count).map(Self::from_bytes)
467 }
468
469 #[inline]
470 fn forward(start: Self, count: usize) -> Self {
471 Self::from_bytes(u64::forward(start.bytes(), count))
472 }
473
474 #[inline]
475 unsafe fn forward_unchecked(start: Self, count: usize) -> Self {
476 Self::from_bytes(u64::forward_unchecked(start.bytes(), count))
477 }
478
479 #[inline]
480 fn backward_checked(start: Self, count: usize) -> Option<Self> {
481 u64::backward_checked(start.bytes(), count).map(Self::from_bytes)
482 }
483
484 #[inline]
485 fn backward(start: Self, count: usize) -> Self {
486 Self::from_bytes(u64::backward(start.bytes(), count))
487 }
488
489 #[inline]
490 unsafe fn backward_unchecked(start: Self, count: usize) -> Self {
491 Self::from_bytes(u64::backward_unchecked(start.bytes(), count))
492 }
493}
494
a1dfa0c6 495/// Alignment of a type in bytes (always a power of two).
04454e1e 496#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Encodable, Decodable)]
60c5eb7d 497#[derive(HashStable_Generic)]
83c7162d 498pub struct Align {
a1dfa0c6 499 pow2: u8,
83c7162d
XL
500}
501
04454e1e
FG
502// This is debug-printed a lot in larger structs, don't waste too much space there
503impl fmt::Debug for Align {
504 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
505 write!(f, "Align({} bytes)", self.bytes())
506 }
507}
508
83c7162d 509impl Align {
17df50a5
XL
510 pub const ONE: Align = Align { pow2: 0 };
511
5869c6ff 512 #[inline]
a1dfa0c6
XL
513 pub fn from_bits(bits: u64) -> Result<Align, String> {
514 Align::from_bytes(Size::from_bits(bits).bytes())
83c7162d
XL
515 }
516
5869c6ff 517 #[inline]
a1dfa0c6
XL
518 pub fn from_bytes(align: u64) -> Result<Align, String> {
519 // Treat an alignment of 0 bytes like 1-byte alignment.
520 if align == 0 {
17df50a5 521 return Ok(Align::ONE);
a1dfa0c6 522 }
83c7162d 523
5869c6ff
XL
524 #[cold]
525 fn not_power_of_2(align: u64) -> String {
526 format!("`{}` is not a power of 2", align)
527 }
528
529 #[cold]
530 fn too_large(align: u64) -> String {
531 format!("`{}` is too large", align)
532 }
533
a1dfa0c6
XL
534 let mut bytes = align;
535 let mut pow2: u8 = 0;
536 while (bytes & 1) == 0 {
537 pow2 += 1;
538 bytes >>= 1;
539 }
540 if bytes != 1 {
5869c6ff 541 return Err(not_power_of_2(align));
a1dfa0c6
XL
542 }
543 if pow2 > 29 {
5869c6ff 544 return Err(too_large(align));
a1dfa0c6 545 }
83c7162d 546
a1dfa0c6 547 Ok(Align { pow2 })
83c7162d
XL
548 }
549
5869c6ff 550 #[inline]
a1dfa0c6
XL
551 pub fn bytes(self) -> u64 {
552 1 << self.pow2
83c7162d
XL
553 }
554
5869c6ff 555 #[inline]
a1dfa0c6
XL
556 pub fn bits(self) -> u64 {
557 self.bytes() * 8
83c7162d 558 }
b7449926 559
9fa01778 560 /// Computes the best alignment possible for the given offset
b7449926
XL
561 /// (the largest power of two that the offset is a multiple of).
562 ///
0731742a 563 /// N.B., for an offset of `0`, this happens to return `2^64`.
5869c6ff 564 #[inline]
b7449926 565 pub fn max_for_offset(offset: Size) -> Align {
dfeec247 566 Align { pow2: offset.bytes().trailing_zeros() as u8 }
b7449926
XL
567 }
568
569 /// Lower the alignment, if necessary, such that the given offset
0bf4aa26 570 /// is aligned to it (the offset is a multiple of the alignment).
5869c6ff 571 #[inline]
b7449926
XL
572 pub fn restrict_for_offset(self, offset: Size) -> Align {
573 self.min(Align::max_for_offset(offset))
574 }
83c7162d
XL
575}
576
74b04a01 577/// A pair of alignments, ABI-mandated and preferred.
923072b8 578#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
60c5eb7d 579#[derive(HashStable_Generic)]
a1dfa0c6
XL
580pub struct AbiAndPrefAlign {
581 pub abi: Align,
582 pub pref: Align,
583}
584
585impl AbiAndPrefAlign {
94222f64 586 #[inline]
a1dfa0c6 587 pub fn new(align: Align) -> AbiAndPrefAlign {
dfeec247 588 AbiAndPrefAlign { abi: align, pref: align }
a1dfa0c6
XL
589 }
590
94222f64 591 #[inline]
a1dfa0c6 592 pub fn min(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign {
dfeec247 593 AbiAndPrefAlign { abi: self.abi.min(other.abi), pref: self.pref.min(other.pref) }
a1dfa0c6
XL
594 }
595
94222f64 596 #[inline]
a1dfa0c6 597 pub fn max(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign {
dfeec247 598 AbiAndPrefAlign { abi: self.abi.max(other.abi), pref: self.pref.max(other.pref) }
a1dfa0c6
XL
599 }
600}
601
83c7162d 602/// Integers, also used for enum discriminants.
60c5eb7d 603#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, HashStable_Generic)]
83c7162d
XL
604pub enum Integer {
605 I8,
606 I16,
607 I32,
608 I64,
609 I128,
610}
611
612impl Integer {
94222f64 613 #[inline]
b7449926
XL
614 pub fn size(self) -> Size {
615 match self {
83c7162d
XL
616 I8 => Size::from_bytes(1),
617 I16 => Size::from_bytes(2),
618 I32 => Size::from_bytes(4),
dfeec247
XL
619 I64 => Size::from_bytes(8),
620 I128 => Size::from_bytes(16),
83c7162d
XL
621 }
622 }
623
a1dfa0c6 624 pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAndPrefAlign {
83c7162d
XL
625 let dl = cx.data_layout();
626
b7449926 627 match self {
83c7162d
XL
628 I8 => dl.i8_align,
629 I16 => dl.i16_align,
630 I32 => dl.i32_align,
631 I64 => dl.i64_align,
632 I128 => dl.i128_align,
633 }
634 }
635
9fa01778 636 /// Finds the smallest Integer type which can represent the signed value.
94222f64 637 #[inline]
83c7162d
XL
638 pub fn fit_signed(x: i128) -> Integer {
639 match x {
8faf50e0
XL
640 -0x0000_0000_0000_0080..=0x0000_0000_0000_007f => I8,
641 -0x0000_0000_0000_8000..=0x0000_0000_0000_7fff => I16,
642 -0x0000_0000_8000_0000..=0x0000_0000_7fff_ffff => I32,
643 -0x8000_0000_0000_0000..=0x7fff_ffff_ffff_ffff => I64,
dfeec247 644 _ => I128,
83c7162d
XL
645 }
646 }
647
9fa01778 648 /// Finds the smallest Integer type which can represent the unsigned value.
94222f64 649 #[inline]
83c7162d
XL
650 pub fn fit_unsigned(x: u128) -> Integer {
651 match x {
8faf50e0
XL
652 0..=0x0000_0000_0000_00ff => I8,
653 0..=0x0000_0000_0000_ffff => I16,
654 0..=0x0000_0000_ffff_ffff => I32,
655 0..=0xffff_ffff_ffff_ffff => I64,
83c7162d
XL
656 _ => I128,
657 }
658 }
659
9fa01778 660 /// Finds the smallest integer with the given alignment.
a1dfa0c6 661 pub fn for_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Option<Integer> {
83c7162d
XL
662 let dl = cx.data_layout();
663
136023e0 664 for candidate in [I8, I16, I32, I64, I128] {
a1dfa0c6 665 if wanted == candidate.align(dl).abi && wanted.bytes() == candidate.size().bytes() {
83c7162d
XL
666 return Some(candidate);
667 }
668 }
669 None
670 }
671
672 /// Find the largest integer with the given alignment or less.
a1dfa0c6 673 pub fn approximate_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Integer {
83c7162d
XL
674 let dl = cx.data_layout();
675
83c7162d 676 // FIXME(eddyb) maybe include I128 in the future, when it works everywhere.
136023e0 677 for candidate in [I64, I32, I16] {
a1dfa0c6 678 if wanted >= candidate.align(dl).abi && wanted.bytes() >= candidate.size().bytes() {
83c7162d
XL
679 return candidate;
680 }
681 }
682 I8
683 }
94222f64
XL
684
685 // FIXME(eddyb) consolidate this and other methods that find the appropriate
686 // `Integer` given some requirements.
687 #[inline]
688 fn from_size(size: Size) -> Result<Self, String> {
689 match size.bits() {
690 8 => Ok(Integer::I8),
691 16 => Ok(Integer::I16),
692 32 => Ok(Integer::I32),
693 64 => Ok(Integer::I64),
694 128 => Ok(Integer::I128),
695 _ => Err(format!("rust does not support integers with {} bits", size.bits())),
696 }
697 }
83c7162d
XL
698}
699
700/// Fundamental unit of memory access and layout.
60c5eb7d 701#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
83c7162d
XL
702pub enum Primitive {
703 /// The `bool` is the signedness of the `Integer` type.
704 ///
705 /// One would think we would not care about such details this low down,
706 /// but some ABIs are described in terms of C types and ISAs where the
707 /// integer arithmetic is done on {sign,zero}-extended registers, e.g.
708 /// a negative integer passed by zero-extension will appear positive in
709 /// the callee, and most operations on it will produce the wrong values.
710 Int(Integer, bool),
60c5eb7d
XL
711 F32,
712 F64,
dfeec247 713 Pointer,
83c7162d
XL
714}
715
dc9dc135 716impl Primitive {
a1dfa0c6 717 pub fn size<C: HasDataLayout>(self, cx: &C) -> Size {
83c7162d
XL
718 let dl = cx.data_layout();
719
720 match self {
721 Int(i, _) => i.size(),
60c5eb7d
XL
722 F32 => Size::from_bits(32),
723 F64 => Size::from_bits(64),
dfeec247 724 Pointer => dl.pointer_size,
83c7162d
XL
725 }
726 }
727
a1dfa0c6 728 pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAndPrefAlign {
83c7162d
XL
729 let dl = cx.data_layout();
730
731 match self {
732 Int(i, _) => i.align(dl),
60c5eb7d
XL
733 F32 => dl.f32_align,
734 F64 => dl.f64_align,
dfeec247 735 Pointer => dl.pointer_align,
83c7162d
XL
736 }
737 }
94b46f34 738
94222f64
XL
739 // FIXME(eddyb) remove, it's trivial thanks to `matches!`.
740 #[inline]
94b46f34 741 pub fn is_float(self) -> bool {
29967ef6 742 matches!(self, F32 | F64)
94b46f34
XL
743 }
744
94222f64
XL
745 // FIXME(eddyb) remove, it's completely unused.
746 #[inline]
94b46f34 747 pub fn is_int(self) -> bool {
29967ef6 748 matches!(self, Int(..))
94b46f34 749 }
923072b8
FG
750
751 #[inline]
752 pub fn is_ptr(self) -> bool {
753 matches!(self, Pointer)
754 }
83c7162d
XL
755}
756
94222f64
XL
757/// Inclusive wrap-around range of valid values, that is, if
758/// start > end, it represents `start..=MAX`,
759/// followed by `0..=end`.
760///
761/// That is, for an i8 primitive, a range of `254..=2` means following
762/// sequence:
763///
764/// 254 (-2), 255 (-1), 0, 1, 2
765///
c295e0f8
XL
766/// This is intended specifically to mirror LLVM’s `!range` metadata semantics.
767#[derive(Clone, Copy, PartialEq, Eq, Hash)]
94222f64
XL
768#[derive(HashStable_Generic)]
769pub struct WrappingRange {
770 pub start: u128,
771 pub end: u128,
772}
773
774impl WrappingRange {
04454e1e
FG
775 pub fn full(size: Size) -> Self {
776 Self { start: 0, end: size.unsigned_int_max() }
777 }
778
94222f64
XL
779 /// Returns `true` if `v` is contained in the range.
780 #[inline(always)]
781 pub fn contains(&self, v: u128) -> bool {
782 if self.start <= self.end {
783 self.start <= v && v <= self.end
784 } else {
785 self.start <= v || v <= self.end
786 }
787 }
788
94222f64
XL
789 /// Returns `self` with replaced `start`
790 #[inline(always)]
791 pub fn with_start(mut self, start: u128) -> Self {
792 self.start = start;
793 self
794 }
795
796 /// Returns `self` with replaced `end`
797 #[inline(always)]
798 pub fn with_end(mut self, end: u128) -> Self {
799 self.end = end;
800 self
801 }
c295e0f8
XL
802
803 /// Returns `true` if `size` completely fills the range.
804 #[inline]
805 pub fn is_full_for(&self, size: Size) -> bool {
806 let max_value = size.unsigned_int_max();
807 debug_assert!(self.start <= max_value && self.end <= max_value);
808 self.start == (self.end.wrapping_add(1) & max_value)
809 }
94222f64
XL
810}
811
812impl fmt::Debug for WrappingRange {
813 fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
c295e0f8
XL
814 if self.start > self.end {
815 write!(fmt, "(..={}) | ({}..)", self.end, self.start)?;
816 } else {
817 write!(fmt, "{}..={}", self.start, self.end)?;
818 }
94222f64
XL
819 Ok(())
820 }
821}
822
83c7162d 823/// Information about one scalar component of a Rust type.
c295e0f8 824#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
60c5eb7d 825#[derive(HashStable_Generic)]
04454e1e
FG
826pub enum Scalar {
827 Initialized {
828 value: Primitive,
829
830 // FIXME(eddyb) always use the shortest range, e.g., by finding
831 // the largest space between two consecutive valid values and
832 // taking everything else as the (shortest) valid range.
833 valid_range: WrappingRange,
834 },
835 Union {
836 /// Even for unions, we need to use the correct registers for the kind of
837 /// values inside the union, so we keep the `Primitive` type around. We
838 /// also use it to compute the size of the scalar.
839 /// However, unions never have niches and even allow undef,
840 /// so there is no `valid_range`.
841 value: Primitive,
842 },
83c7162d
XL
843}
844
845impl Scalar {
94222f64 846 #[inline]
83c7162d 847 pub fn is_bool(&self) -> bool {
c295e0f8
XL
848 matches!(
849 self,
04454e1e
FG
850 Scalar::Initialized {
851 value: Int(I8, false),
852 valid_range: WrappingRange { start: 0, end: 1 }
853 }
c295e0f8 854 )
83c7162d
XL
855 }
856
04454e1e
FG
857 /// Get the primitive representation of this type, ignoring the valid range and whether the
858 /// value is allowed to be undefined (due to being a union).
859 pub fn primitive(&self) -> Primitive {
860 match *self {
861 Scalar::Initialized { value, .. } | Scalar::Union { value } => value,
862 }
863 }
864
865 pub fn align(self, cx: &impl HasDataLayout) -> AbiAndPrefAlign {
866 self.primitive().align(cx)
867 }
868
869 pub fn size(self, cx: &impl HasDataLayout) -> Size {
870 self.primitive().size(cx)
871 }
872
873 #[inline]
874 pub fn to_union(&self) -> Self {
875 Self::Union { value: self.primitive() }
876 }
877
878 #[inline]
879 pub fn valid_range(&self, cx: &impl HasDataLayout) -> WrappingRange {
880 match *self {
881 Scalar::Initialized { valid_range, .. } => valid_range,
882 Scalar::Union { value } => WrappingRange::full(value.size(cx)),
883 }
884 }
885
886 #[inline]
887 /// Allows the caller to mutate the valid range. This operation will panic if attempted on a union.
888 pub fn valid_range_mut(&mut self) -> &mut WrappingRange {
889 match self {
890 Scalar::Initialized { valid_range, .. } => valid_range,
891 Scalar::Union { .. } => panic!("cannot change the valid range of a union"),
892 }
893 }
894
c295e0f8
XL
895 /// Returns `true` if all possible numbers are valid, i.e `valid_range` covers the whole layout
896 #[inline]
897 pub fn is_always_valid<C: HasDataLayout>(&self, cx: &C) -> bool {
04454e1e
FG
898 match *self {
899 Scalar::Initialized { valid_range, .. } => valid_range.is_full_for(self.size(cx)),
900 Scalar::Union { .. } => true,
901 }
83c7162d 902 }
923072b8
FG
903
904 /// Returns `true` if this type can be left uninit.
905 #[inline]
906 pub fn is_uninit_valid(&self) -> bool {
907 match *self {
908 Scalar::Initialized { .. } => false,
909 Scalar::Union { .. } => true,
910 }
911 }
83c7162d
XL
912}
913
914/// Describes how the fields of a type are located in memory.
60c5eb7d 915#[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
ba9703b0
XL
916pub enum FieldsShape {
917 /// Scalar primitives and `!`, which never have fields.
918 Primitive,
919
83c7162d 920 /// All fields start at no offset. The `usize` is the field count.
ba9703b0 921 Union(NonZeroUsize),
83c7162d
XL
922
923 /// Array/vector-like placement, with all fields of identical types.
dfeec247 924 Array { stride: Size, count: u64 },
83c7162d
XL
925
926 /// Struct-like placement, with precomputed offsets.
927 ///
928 /// Fields are guaranteed to not overlap, but note that gaps
929 /// before, between and after all the fields are NOT always
930 /// padding, and as such their contents may not be discarded.
931 /// For example, enum variants leave a gap at the start,
932 /// where the discriminant field in the enum layout goes.
933 Arbitrary {
934 /// Offsets for the first byte of each field,
935 /// ordered to match the source definition order.
936 /// This vector does not go in increasing order.
937 // FIXME(eddyb) use small vector optimization for the common case.
938 offsets: Vec<Size>,
939
940 /// Maps source order field indices to memory order indices,
dc9dc135
XL
941 /// depending on how the fields were reordered (if at all).
942 /// This is a permutation, with both the source order and the
943 /// memory order using the same (0..n) index ranges.
944 ///
945 /// Note that during computation of `memory_index`, sometimes
946 /// it is easier to operate on the inverse mapping (that is,
947 /// from memory order to source order), and that is usually
948 /// named `inverse_memory_index`.
949 ///
950 // FIXME(eddyb) build a better abstraction for permutations, if possible.
83c7162d 951 // FIXME(camlorn) also consider small vector optimization here.
dfeec247
XL
952 memory_index: Vec<u32>,
953 },
83c7162d
XL
954}
955
ba9703b0 956impl FieldsShape {
94222f64 957 #[inline]
83c7162d
XL
958 pub fn count(&self) -> usize {
959 match *self {
ba9703b0
XL
960 FieldsShape::Primitive => 0,
961 FieldsShape::Union(count) => count.get(),
17df50a5 962 FieldsShape::Array { count, .. } => count.try_into().unwrap(),
ba9703b0 963 FieldsShape::Arbitrary { ref offsets, .. } => offsets.len(),
83c7162d
XL
964 }
965 }
966
94222f64 967 #[inline]
83c7162d
XL
968 pub fn offset(&self, i: usize) -> Size {
969 match *self {
ba9703b0
XL
970 FieldsShape::Primitive => {
971 unreachable!("FieldsShape::offset: `Primitive`s have no fields")
972 }
973 FieldsShape::Union(count) => {
974 assert!(
975 i < count.get(),
976 "tried to access field {} of union with {} fields",
977 i,
978 count
979 );
980 Size::ZERO
981 }
982 FieldsShape::Array { stride, count } => {
983 let i = u64::try_from(i).unwrap();
83c7162d
XL
984 assert!(i < count);
985 stride * i
986 }
ba9703b0 987 FieldsShape::Arbitrary { ref offsets, .. } => offsets[i],
83c7162d
XL
988 }
989 }
990
94222f64 991 #[inline]
83c7162d
XL
992 pub fn memory_index(&self, i: usize) -> usize {
993 match *self {
ba9703b0
XL
994 FieldsShape::Primitive => {
995 unreachable!("FieldsShape::memory_index: `Primitive`s have no fields")
996 }
997 FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
17df50a5 998 FieldsShape::Arbitrary { ref memory_index, .. } => memory_index[i].try_into().unwrap(),
83c7162d
XL
999 }
1000 }
1001
9fa01778 1002 /// Gets source indices of the fields by increasing offsets.
83c7162d 1003 #[inline]
dfeec247 1004 pub fn index_by_increasing_offset<'a>(&'a self) -> impl Iterator<Item = usize> + 'a {
83c7162d
XL
1005 let mut inverse_small = [0u8; 64];
1006 let mut inverse_big = vec![];
1007 let use_small = self.count() <= inverse_small.len();
1008
1009 // We have to write this logic twice in order to keep the array small.
ba9703b0 1010 if let FieldsShape::Arbitrary { ref memory_index, .. } = *self {
83c7162d
XL
1011 if use_small {
1012 for i in 0..self.count() {
1013 inverse_small[memory_index[i] as usize] = i as u8;
1014 }
1015 } else {
1016 inverse_big = vec![0; self.count()];
1017 for i in 0..self.count() {
1018 inverse_big[memory_index[i] as usize] = i as u32;
1019 }
1020 }
1021 }
1022
dfeec247 1023 (0..self.count()).map(move |i| match *self {
ba9703b0
XL
1024 FieldsShape::Primitive | FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
1025 FieldsShape::Arbitrary { .. } => {
dfeec247
XL
1026 if use_small {
1027 inverse_small[i] as usize
1028 } else {
1029 inverse_big[i] as usize
83c7162d
XL
1030 }
1031 }
1032 })
1033 }
1034}
1035
3dfed10e
XL
1036/// An identifier that specifies the address space that some operation
1037/// should operate on. Special address spaces have an effect on code generation,
1038/// depending on the target and the address spaces it implements.
1039#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
1040pub struct AddressSpace(pub u32);
1041
1042impl AddressSpace {
1043 /// The default address space, corresponding to data space.
1044 pub const DATA: Self = AddressSpace(0);
1045}
1046
83c7162d
XL
1047/// Describes how values of the type are passed by target ABIs,
1048/// in terms of categories of C types there are ABI rules for.
c295e0f8 1049#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
83c7162d
XL
1050pub enum Abi {
1051 Uninhabited,
1052 Scalar(Scalar),
1053 ScalarPair(Scalar, Scalar),
1054 Vector {
1055 element: Scalar,
dfeec247 1056 count: u64,
83c7162d
XL
1057 },
1058 Aggregate {
1059 /// If true, the size is exact, otherwise it's only a lower bound.
1060 sized: bool,
dfeec247 1061 },
83c7162d
XL
1062}
1063
1064impl Abi {
9fa01778 1065 /// Returns `true` if the layout corresponds to an unsized type.
17df50a5 1066 #[inline]
83c7162d
XL
1067 pub fn is_unsized(&self) -> bool {
1068 match *self {
dfeec247
XL
1069 Abi::Uninhabited | Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false,
1070 Abi::Aggregate { sized } => !sized,
83c7162d
XL
1071 }
1072 }
1073
9fa01778 1074 /// Returns `true` if this is a single signed integer scalar
94222f64 1075 #[inline]
83c7162d 1076 pub fn is_signed(&self) -> bool {
c295e0f8 1077 match self {
04454e1e 1078 Abi::Scalar(scal) => match scal.primitive() {
83c7162d
XL
1079 Primitive::Int(_, signed) => signed,
1080 _ => false,
1081 },
ba9703b0 1082 _ => panic!("`is_signed` on non-scalar ABI {:?}", self),
83c7162d
XL
1083 }
1084 }
0bf4aa26 1085
9fa01778 1086 /// Returns `true` if this is an uninhabited type
17df50a5 1087 #[inline]
0bf4aa26 1088 pub fn is_uninhabited(&self) -> bool {
29967ef6 1089 matches!(*self, Abi::Uninhabited)
0bf4aa26 1090 }
60c5eb7d
XL
1091
1092 /// Returns `true` is this is a scalar type
17df50a5 1093 #[inline]
60c5eb7d 1094 pub fn is_scalar(&self) -> bool {
29967ef6 1095 matches!(*self, Abi::Scalar(_))
60c5eb7d 1096 }
83c7162d
XL
1097}
1098
e74abb32 1099rustc_index::newtype_index! {
60c5eb7d
XL
1100 pub struct VariantIdx {
1101 derive [HashStable_Generic]
1102 }
a1dfa0c6
XL
1103}
1104
60c5eb7d 1105#[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
5e7ed085 1106pub enum Variants<'a> {
83c7162d 1107 /// Single enum variants, structs/tuples, unions, and all non-ADTs.
dfeec247 1108 Single { index: VariantIdx },
83c7162d 1109
f035d41b
XL
1110 /// Enum-likes with more than one inhabited variant: each variant comes with
1111 /// a *discriminant* (usually the same as the variant index but the user can
1112 /// assign explicit discriminant values). That discriminant is encoded
1113 /// as a *tag* on the machine. The layout of each variant is
1114 /// a struct, and they all have space reserved for the tag.
1115 /// For enums, the tag is the sole field of the layout.
532ac7d7 1116 Multiple {
f035d41b
XL
1117 tag: Scalar,
1118 tag_encoding: TagEncoding,
1119 tag_field: usize,
5e7ed085 1120 variants: IndexVec<VariantIdx, Layout<'a>>,
83c7162d 1121 },
532ac7d7
XL
1122}
1123
60c5eb7d 1124#[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
f035d41b
XL
1125pub enum TagEncoding {
1126 /// The tag directly stores the discriminant, but possibly with a smaller layout
1127 /// (so converting the tag to the discriminant can require sign extension).
1128 Direct,
83c7162d 1129
532ac7d7 1130 /// Niche (values invalid for a type) encoding the discriminant:
f035d41b
XL
1131 /// Discriminant and variant index coincide.
1132 /// The variant `dataful_variant` contains a niche at an arbitrary
1133 /// offset (field `tag_field` of the enum), which for a variant with
48663c56
XL
1134 /// discriminant `d` is set to
1135 /// `(d - niche_variants.start).wrapping_add(niche_start)`.
83c7162d
XL
1136 ///
1137 /// For example, `Option<(usize, &T)>` is represented such that
1138 /// `None` has a null pointer for the second tuple field, and
1139 /// `Some` is the identity function (with a non-null reference).
532ac7d7 1140 Niche {
a1dfa0c6
XL
1141 dataful_variant: VariantIdx,
1142 niche_variants: RangeInclusive<VariantIdx>,
83c7162d 1143 niche_start: u128,
532ac7d7 1144 },
83c7162d
XL
1145}
1146
c295e0f8 1147#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
416331ca
XL
1148pub struct Niche {
1149 pub offset: Size,
04454e1e
FG
1150 pub value: Primitive,
1151 pub valid_range: WrappingRange,
416331ca
XL
1152}
1153
1154impl Niche {
1155 pub fn from_scalar<C: HasDataLayout>(cx: &C, offset: Size, scalar: Scalar) -> Option<Self> {
04454e1e
FG
1156 let Scalar::Initialized { value, valid_range } = scalar else { return None };
1157 let niche = Niche { offset, value, valid_range };
dfeec247 1158 if niche.available(cx) > 0 { Some(niche) } else { None }
416331ca
XL
1159 }
1160
1161 pub fn available<C: HasDataLayout>(&self, cx: &C) -> u128 {
04454e1e 1162 let Self { value, valid_range: v, .. } = *self;
c295e0f8
XL
1163 let size = value.size(cx);
1164 assert!(size.bits() <= 128);
1165 let max_value = size.unsigned_int_max();
416331ca
XL
1166
1167 // Find out how many values are outside the valid range.
94222f64 1168 let niche = v.end.wrapping_add(1)..v.start;
416331ca
XL
1169 niche.end.wrapping_sub(niche.start) & max_value
1170 }
1171
1172 pub fn reserve<C: HasDataLayout>(&self, cx: &C, count: u128) -> Option<(u128, Scalar)> {
1173 assert!(count > 0);
1174
04454e1e 1175 let Self { value, valid_range: v, .. } = *self;
c295e0f8
XL
1176 let size = value.size(cx);
1177 assert!(size.bits() <= 128);
1178 let max_value = size.unsigned_int_max();
416331ca 1179
c295e0f8
XL
1180 let niche = v.end.wrapping_add(1)..v.start;
1181 let available = niche.end.wrapping_sub(niche.start) & max_value;
1182 if count > available {
416331ca
XL
1183 return None;
1184 }
1185
c295e0f8
XL
1186 // Extend the range of valid values being reserved by moving either `v.start` or `v.end` bound.
1187 // Given an eventual `Option<T>`, we try to maximize the chance for `None` to occupy the niche of zero.
5e7ed085 1188 // This is accomplished by preferring enums with 2 variants(`count==1`) and always taking the shortest path to niche zero.
c295e0f8
XL
1189 // Having `None` in niche zero can enable some special optimizations.
1190 //
1191 // Bound selection criteria:
1192 // 1. Select closest to zero given wrapping semantics.
1193 // 2. Avoid moving past zero if possible.
1194 //
1195 // In practice this means that enums with `count > 1` are unlikely to claim niche zero, since they have to fit perfectly.
1196 // If niche zero is already reserved, the selection of bounds are of little interest.
1197 let move_start = |v: WrappingRange| {
1198 let start = v.start.wrapping_sub(count) & max_value;
04454e1e 1199 Some((start, Scalar::Initialized { value, valid_range: v.with_start(start) }))
c295e0f8
XL
1200 };
1201 let move_end = |v: WrappingRange| {
1202 let start = v.end.wrapping_add(1) & max_value;
1203 let end = v.end.wrapping_add(count) & max_value;
04454e1e 1204 Some((start, Scalar::Initialized { value, valid_range: v.with_end(end) }))
c295e0f8
XL
1205 };
1206 let distance_end_zero = max_value - v.end;
1207 if v.start > v.end {
1208 // zero is unavailable because wrapping occurs
1209 move_end(v)
1210 } else if v.start <= distance_end_zero {
1211 if count <= v.start {
1212 move_start(v)
1213 } else {
1214 // moved past zero, use other bound
1215 move_end(v)
1216 }
1217 } else {
1218 let end = v.end.wrapping_add(count) & max_value;
1219 let overshot_zero = (1..=v.end).contains(&end);
1220 if overshot_zero {
1221 // moved past zero, use other bound
1222 move_start(v)
1223 } else {
1224 move_end(v)
1225 }
416331ca 1226 }
416331ca
XL
1227 }
1228}
1229
5e7ed085
FG
1230#[derive(PartialEq, Eq, Hash, HashStable_Generic)]
1231pub struct LayoutS<'a> {
74b04a01 1232 /// Says where the fields are located within the layout.
ba9703b0 1233 pub fields: FieldsShape,
74b04a01
XL
1234
1235 /// Encodes information about multi-variant layouts.
1236 /// Even with `Multiple` variants, a layout still has its own fields! Those are then
1237 /// shared between all variants. One of them will be the discriminant,
1238 /// but e.g. generators can have more.
1239 ///
1240 /// To access all fields of this layout, both `fields` and the fields of the active variant
1241 /// must be taken into account.
5e7ed085 1242 pub variants: Variants<'a>,
74b04a01
XL
1243
1244 /// The `abi` defines how this data is passed between functions, and it defines
1245 /// value restrictions via `valid_range`.
1246 ///
1247 /// Note that this is entirely orthogonal to the recursive structure defined by
1248 /// `variants` and `fields`; for example, `ManuallyDrop<Result<isize, isize>>` has
1249 /// `Abi::ScalarPair`! So, even with non-`Aggregate` `abi`, `fields` and `variants`
1250 /// have to be taken into account to find all fields of this layout.
83c7162d 1251 pub abi: Abi,
416331ca
XL
1252
1253 /// The leaf scalar with the largest number of invalid values
1254 /// (i.e. outside of its `valid_range`), if it exists.
1255 pub largest_niche: Option<Niche>,
1256
a1dfa0c6 1257 pub align: AbiAndPrefAlign,
dfeec247 1258 pub size: Size,
83c7162d
XL
1259}
1260
5e7ed085 1261impl<'a> LayoutS<'a> {
a1dfa0c6 1262 pub fn scalar<C: HasDataLayout>(cx: &C, scalar: Scalar) -> Self {
c295e0f8 1263 let largest_niche = Niche::from_scalar(cx, Size::ZERO, scalar);
04454e1e
FG
1264 let size = scalar.size(cx);
1265 let align = scalar.align(cx);
5e7ed085 1266 LayoutS {
a1dfa0c6 1267 variants: Variants::Single { index: VariantIdx::new(0) },
ba9703b0 1268 fields: FieldsShape::Primitive,
83c7162d 1269 abi: Abi::Scalar(scalar),
416331ca 1270 largest_niche,
83c7162d
XL
1271 size,
1272 align,
1273 }
1274 }
1275}
1276
5e7ed085
FG
1277impl<'a> fmt::Debug for LayoutS<'a> {
1278 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1279 // This is how `Layout` used to print before it become
1280 // `Interned<LayoutS>`. We print it like this to avoid having to update
1281 // expected output in a lot of tests.
1282 f.debug_struct("Layout")
1283 .field("fields", &self.fields)
1284 .field("variants", &self.variants)
1285 .field("abi", &self.abi)
1286 .field("largest_niche", &self.largest_niche)
1287 .field("align", &self.align)
1288 .field("size", &self.size)
1289 .finish()
1290 }
1291}
1292
1293#[derive(Copy, Clone, PartialEq, Eq, Hash, HashStable_Generic)]
04454e1e 1294#[rustc_pass_by_value]
5e7ed085
FG
1295pub struct Layout<'a>(pub Interned<'a, LayoutS<'a>>);
1296
1297impl<'a> fmt::Debug for Layout<'a> {
1298 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1299 // See comment on `<LayoutS as Debug>::fmt` above.
1300 self.0.0.fmt(f)
1301 }
1302}
1303
1304impl<'a> Layout<'a> {
1305 pub fn fields(self) -> &'a FieldsShape {
1306 &self.0.0.fields
1307 }
1308
1309 pub fn variants(self) -> &'a Variants<'a> {
1310 &self.0.0.variants
1311 }
1312
1313 pub fn abi(self) -> Abi {
1314 self.0.0.abi
1315 }
1316
1317 pub fn largest_niche(self) -> Option<Niche> {
1318 self.0.0.largest_niche
1319 }
1320
1321 pub fn align(self) -> AbiAndPrefAlign {
1322 self.0.0.align
1323 }
1324
1325 pub fn size(self) -> Size {
1326 self.0.0.size
1327 }
1328}
1329
ba9703b0 1330/// The layout of a type, alongside the type itself.
0731742a 1331/// Provides various type traversal APIs (e.g., recursing into fields).
83c7162d 1332///
ba9703b0
XL
1333/// Note that the layout is NOT guaranteed to always be identical
1334/// to that obtained from `layout_of(ty)`, as we need to produce
83c7162d 1335/// layouts for which Rust types do not exist, such as enum variants
0731742a 1336/// or synthetic fields of enums (i.e., discriminants) and fat pointers.
94222f64 1337#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, HashStable_Generic)]
ba9703b0 1338pub struct TyAndLayout<'a, Ty> {
83c7162d 1339 pub ty: Ty,
5e7ed085 1340 pub layout: Layout<'a>,
83c7162d
XL
1341}
1342
ba9703b0 1343impl<'a, Ty> Deref for TyAndLayout<'a, Ty> {
5e7ed085
FG
1344 type Target = &'a LayoutS<'a>;
1345 fn deref(&self) -> &&'a LayoutS<'a> {
1346 &self.layout.0.0
83c7162d
XL
1347 }
1348}
1349
3dfed10e 1350#[derive(Copy, Clone, PartialEq, Eq, Debug)]
48663c56
XL
1351pub enum PointerKind {
1352 /// Most general case, we know no restrictions to tell LLVM.
1353 Shared,
1354
1355 /// `&T` where `T` contains no `UnsafeCell`, is `noalias` and `readonly`.
1356 Frozen,
1357
cdc7bbd5 1358 /// `&mut T` which is `noalias` but not `readonly`.
48663c56
XL
1359 UniqueBorrowed,
1360
1361 /// `Box<T>`, unlike `UniqueBorrowed`, it also has `noalias` on returns.
dfeec247 1362 UniqueOwned,
48663c56
XL
1363}
1364
3dfed10e 1365#[derive(Copy, Clone, Debug)]
48663c56
XL
1366pub struct PointeeInfo {
1367 pub size: Size,
1368 pub align: Align,
1369 pub safe: Option<PointerKind>,
3dfed10e 1370 pub address_space: AddressSpace,
48663c56
XL
1371}
1372
923072b8
FG
1373/// Used in `might_permit_raw_init` to indicate the kind of initialisation
1374/// that is checked to be valid
1375#[derive(Copy, Clone, Debug)]
1376pub enum InitKind {
1377 Zero,
1378 Uninit,
1379}
1380
94222f64
XL
1381/// Trait that needs to be implemented by the higher-level type representation
1382/// (e.g. `rustc_middle::ty::Ty`), to provide `rustc_target::abi` functionality.
1383pub trait TyAbiInterface<'a, C>: Sized {
1384 fn ty_and_layout_for_variant(
ba9703b0 1385 this: TyAndLayout<'a, Self>,
a1dfa0c6
XL
1386 cx: &C,
1387 variant_index: VariantIdx,
ba9703b0 1388 ) -> TyAndLayout<'a, Self>;
94222f64
XL
1389 fn ty_and_layout_field(this: TyAndLayout<'a, Self>, cx: &C, i: usize) -> TyAndLayout<'a, Self>;
1390 fn ty_and_layout_pointee_info_at(
1391 this: TyAndLayout<'a, Self>,
1392 cx: &C,
1393 offset: Size,
1394 ) -> Option<PointeeInfo>;
04454e1e
FG
1395 fn is_adt(this: TyAndLayout<'a, Self>) -> bool;
1396 fn is_never(this: TyAndLayout<'a, Self>) -> bool;
1397 fn is_tuple(this: TyAndLayout<'a, Self>) -> bool;
1398 fn is_unit(this: TyAndLayout<'a, Self>) -> bool;
83c7162d
XL
1399}
1400
ba9703b0 1401impl<'a, Ty> TyAndLayout<'a, Ty> {
a1dfa0c6 1402 pub fn for_variant<C>(self, cx: &C, variant_index: VariantIdx) -> Self
dfeec247 1403 where
94222f64 1404 Ty: TyAbiInterface<'a, C>,
dfeec247 1405 {
94222f64 1406 Ty::ty_and_layout_for_variant(self, cx, variant_index)
83c7162d 1407 }
ba9703b0 1408
94222f64 1409 pub fn field<C>(self, cx: &C, i: usize) -> Self
dfeec247 1410 where
94222f64 1411 Ty: TyAbiInterface<'a, C>,
dfeec247 1412 {
94222f64 1413 Ty::ty_and_layout_field(self, cx, i)
83c7162d 1414 }
ba9703b0 1415
48663c56 1416 pub fn pointee_info_at<C>(self, cx: &C, offset: Size) -> Option<PointeeInfo>
dfeec247 1417 where
94222f64 1418 Ty: TyAbiInterface<'a, C>,
dfeec247 1419 {
94222f64 1420 Ty::ty_and_layout_pointee_info_at(self, cx, offset)
48663c56 1421 }
5099ac24
FG
1422
1423 pub fn is_single_fp_element<C>(self, cx: &C) -> bool
1424 where
1425 Ty: TyAbiInterface<'a, C>,
1426 C: HasDataLayout,
1427 {
1428 match self.abi {
04454e1e 1429 Abi::Scalar(scalar) => scalar.primitive().is_float(),
5099ac24
FG
1430 Abi::Aggregate { .. } => {
1431 if self.fields.count() == 1 && self.fields.offset(0).bytes() == 0 {
1432 self.field(cx, 0).is_single_fp_element(cx)
1433 } else {
1434 false
1435 }
1436 }
1437 _ => false,
1438 }
1439 }
04454e1e
FG
1440
1441 pub fn is_adt<C>(self) -> bool
1442 where
1443 Ty: TyAbiInterface<'a, C>,
1444 {
1445 Ty::is_adt(self)
1446 }
1447
1448 pub fn is_never<C>(self) -> bool
1449 where
1450 Ty: TyAbiInterface<'a, C>,
1451 {
1452 Ty::is_never(self)
1453 }
1454
1455 pub fn is_tuple<C>(self) -> bool
1456 where
1457 Ty: TyAbiInterface<'a, C>,
1458 {
1459 Ty::is_tuple(self)
1460 }
1461
1462 pub fn is_unit<C>(self) -> bool
1463 where
1464 Ty: TyAbiInterface<'a, C>,
1465 {
1466 Ty::is_unit(self)
1467 }
83c7162d
XL
1468}
1469
ba9703b0 1470impl<'a, Ty> TyAndLayout<'a, Ty> {
9fa01778 1471 /// Returns `true` if the layout corresponds to an unsized type.
83c7162d
XL
1472 pub fn is_unsized(&self) -> bool {
1473 self.abi.is_unsized()
1474 }
1475
9fa01778 1476 /// Returns `true` if the type is a ZST and not unsized.
83c7162d
XL
1477 pub fn is_zst(&self) -> bool {
1478 match self.abi {
dfeec247 1479 Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false,
83c7162d 1480 Abi::Uninhabited => self.size.bytes() == 0,
dfeec247 1481 Abi::Aggregate { sized } => sized && self.size.bytes() == 0,
83c7162d
XL
1482 }
1483 }
ba9703b0
XL
1484
1485 /// Determines if this type permits "raw" initialization by just transmuting some
1486 /// memory into an instance of `T`.
923072b8
FG
1487 ///
1488 /// `init_kind` indicates if the memory is zero-initialized or left uninitialized.
1489 ///
1490 /// `strict` is an opt-in debugging flag added in #97323 that enables more checks.
1491 ///
ba9703b0
XL
1492 /// This is conservative: in doubt, it will answer `true`.
1493 ///
1494 /// FIXME: Once we removed all the conservatism, we could alternatively
1495 /// create an all-0/all-undef constant and run the const value validator to see if
1496 /// this is a valid value for the given type.
923072b8 1497 pub fn might_permit_raw_init<C>(self, cx: &C, init_kind: InitKind, strict: bool) -> bool
ba9703b0
XL
1498 where
1499 Self: Copy,
94222f64
XL
1500 Ty: TyAbiInterface<'a, C>,
1501 C: HasDataLayout,
ba9703b0 1502 {
c295e0f8 1503 let scalar_allows_raw_init = move |s: Scalar| -> bool {
923072b8
FG
1504 match init_kind {
1505 InitKind::Zero => {
1506 // The range must contain 0.
1507 s.valid_range(cx).contains(0)
1508 }
1509 InitKind::Uninit => {
1510 if strict {
1511 // The type must be allowed to be uninit (which means "is a union").
1512 s.is_uninit_valid()
1513 } else {
1514 // The range must include all values.
1515 s.is_always_valid(cx)
1516 }
1517 }
ba9703b0
XL
1518 }
1519 };
1520
1521 // Check the ABI.
c295e0f8 1522 let valid = match self.abi {
ba9703b0
XL
1523 Abi::Uninhabited => false, // definitely UB
1524 Abi::Scalar(s) => scalar_allows_raw_init(s),
1525 Abi::ScalarPair(s1, s2) => scalar_allows_raw_init(s1) && scalar_allows_raw_init(s2),
c295e0f8 1526 Abi::Vector { element: s, count } => count == 0 || scalar_allows_raw_init(s),
1b1a35ee 1527 Abi::Aggregate { .. } => true, // Fields are checked below.
ba9703b0
XL
1528 };
1529 if !valid {
1530 // This is definitely not okay.
94222f64 1531 return false;
ba9703b0
XL
1532 }
1533
1b1a35ee
XL
1534 // If we have not found an error yet, we need to recursively descend into fields.
1535 match &self.fields {
1536 FieldsShape::Primitive | FieldsShape::Union { .. } => {}
923072b8
FG
1537 FieldsShape::Array { count, .. } => {
1538 // FIXME(#66151): For now, we are conservative and do not check arrays by default.
1539 if strict
1540 && *count > 0
1541 && !self.field(cx, 0).might_permit_raw_init(cx, init_kind, strict)
1542 {
1543 // Found non empty array with a type that is unhappy about this kind of initialization
1544 return false;
1545 }
1b1a35ee
XL
1546 }
1547 FieldsShape::Arbitrary { offsets, .. } => {
1548 for idx in 0..offsets.len() {
923072b8 1549 if !self.field(cx, idx).might_permit_raw_init(cx, init_kind, strict) {
1b1a35ee 1550 // We found a field that is unhappy with this kind of initialization.
94222f64 1551 return false;
1b1a35ee
XL
1552 }
1553 }
1554 }
1555 }
1556
1557 // FIXME(#66151): For now, we are conservative and do not check `self.variants`.
94222f64 1558 true
ba9703b0 1559 }
83c7162d 1560}