]> git.proxmox.com Git - rustc.git/blame - compiler/rustc_abi/src/lib.rs
bump version to 1.74.1+dfsg1-1~bpo12+pve1
[rustc.git] / compiler / rustc_abi / src / lib.rs
CommitLineData
487cf647 1#![cfg_attr(feature = "nightly", feature(step_trait, rustc_attrs, min_specialization))]
781aab86 2#![cfg_attr(feature = "nightly", allow(internal_features))]
487cf647 3
487cf647
FG
4use std::fmt;
5#[cfg(feature = "nightly")]
6use std::iter::Step;
7use std::num::{NonZeroUsize, ParseIntError};
8use std::ops::{Add, AddAssign, Mul, RangeInclusive, Sub};
9use std::str::FromStr;
10
11use bitflags::bitflags;
9ffffee4 12use rustc_data_structures::intern::Interned;
49aad941 13use rustc_data_structures::stable_hasher::Hash64;
487cf647
FG
14#[cfg(feature = "nightly")]
15use rustc_data_structures::stable_hasher::StableOrd;
49aad941 16use rustc_index::{IndexSlice, IndexVec};
487cf647
FG
17#[cfg(feature = "nightly")]
18use rustc_macros::HashStable_Generic;
19#[cfg(feature = "nightly")]
20use rustc_macros::{Decodable, Encodable};
21
22mod layout;
23
24pub use layout::LayoutCalculator;
25
26/// Requirements for a `StableHashingContext` to be used in this crate.
27/// This is a hack to allow using the `HashStable_Generic` derive macro
28/// instead of implementing everything in `rustc_middle`.
29pub trait HashStableContext {}
30
31use Integer::*;
32use Primitive::*;
33
34bitflags! {
35 #[derive(Default)]
36 #[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))]
37 pub struct ReprFlags: u8 {
38 const IS_C = 1 << 0;
39 const IS_SIMD = 1 << 1;
40 const IS_TRANSPARENT = 1 << 2;
41 // Internal only for now. If true, don't reorder fields.
42 const IS_LINEAR = 1 << 3;
43 // If true, the type's layout can be randomized using
44 // the seed stored in `ReprOptions.layout_seed`
45 const RANDOMIZE_LAYOUT = 1 << 4;
46 // Any of these flags being set prevent field reordering optimisation.
47 const IS_UNOPTIMISABLE = ReprFlags::IS_C.bits
48 | ReprFlags::IS_SIMD.bits
49 | ReprFlags::IS_LINEAR.bits;
50 }
51}
52
53#[derive(Copy, Clone, Debug, Eq, PartialEq)]
54#[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))]
55pub enum IntegerType {
56 /// Pointer sized integer type, i.e. isize and usize. The field shows signedness, that
57 /// is, `Pointer(true)` is isize.
58 Pointer(bool),
59 /// Fix sized integer type, e.g. i8, u32, i128 The bool field shows signedness, `Fixed(I8, false)` means `u8`
60 Fixed(Integer, bool),
61}
62
63impl IntegerType {
64 pub fn is_signed(&self) -> bool {
65 match self {
66 IntegerType::Pointer(b) => *b,
67 IntegerType::Fixed(_, b) => *b,
68 }
69 }
70}
71
72/// Represents the repr options provided by the user,
73#[derive(Copy, Clone, Debug, Eq, PartialEq, Default)]
74#[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))]
75pub struct ReprOptions {
76 pub int: Option<IntegerType>,
77 pub align: Option<Align>,
78 pub pack: Option<Align>,
79 pub flags: ReprFlags,
80 /// The seed to be used for randomizing a type's layout
81 ///
49aad941 82 /// Note: This could technically be a `Hash128` which would
487cf647
FG
83 /// be the "most accurate" hash as it'd encompass the item and crate
84 /// hash without loss, but it does pay the price of being larger.
49aad941 85 /// Everything's a tradeoff, a 64-bit seed should be sufficient for our
487cf647 86 /// purposes (primarily `-Z randomize-layout`)
49aad941 87 pub field_shuffle_seed: Hash64,
487cf647
FG
88}
89
90impl ReprOptions {
91 #[inline]
92 pub fn simd(&self) -> bool {
93 self.flags.contains(ReprFlags::IS_SIMD)
94 }
95
96 #[inline]
97 pub fn c(&self) -> bool {
98 self.flags.contains(ReprFlags::IS_C)
99 }
100
101 #[inline]
102 pub fn packed(&self) -> bool {
103 self.pack.is_some()
104 }
105
106 #[inline]
107 pub fn transparent(&self) -> bool {
108 self.flags.contains(ReprFlags::IS_TRANSPARENT)
109 }
110
111 #[inline]
112 pub fn linear(&self) -> bool {
113 self.flags.contains(ReprFlags::IS_LINEAR)
114 }
115
116 /// Returns the discriminant type, given these `repr` options.
117 /// This must only be called on enums!
118 pub fn discr_type(&self) -> IntegerType {
119 self.int.unwrap_or(IntegerType::Pointer(true))
120 }
121
122 /// Returns `true` if this `#[repr()]` should inhabit "smart enum
123 /// layout" optimizations, such as representing `Foo<&T>` as a
124 /// single pointer.
125 pub fn inhibit_enum_layout_opt(&self) -> bool {
126 self.c() || self.int.is_some()
127 }
128
129 /// Returns `true` if this `#[repr()]` should inhibit struct field reordering
130 /// optimizations, such as with `repr(C)`, `repr(packed(1))`, or `repr(<int>)`.
131 pub fn inhibit_struct_field_reordering_opt(&self) -> bool {
132 if let Some(pack) = self.pack {
133 if pack.bytes() == 1 {
134 return true;
135 }
136 }
137
138 self.flags.intersects(ReprFlags::IS_UNOPTIMISABLE) || self.int.is_some()
139 }
140
141 /// Returns `true` if this type is valid for reordering and `-Z randomize-layout`
142 /// was enabled for its declaration crate
143 pub fn can_randomize_type_layout(&self) -> bool {
144 !self.inhibit_struct_field_reordering_opt()
145 && self.flags.contains(ReprFlags::RANDOMIZE_LAYOUT)
146 }
147
148 /// Returns `true` if this `#[repr()]` should inhibit union ABI optimisations.
149 pub fn inhibit_union_abi_opt(&self) -> bool {
150 self.c()
151 }
152}
153
154/// Parsed [Data layout](https://llvm.org/docs/LangRef.html#data-layout)
155/// for a target, which contains everything needed to compute layouts.
156#[derive(Debug, PartialEq, Eq)]
157pub struct TargetDataLayout {
158 pub endian: Endian,
159 pub i1_align: AbiAndPrefAlign,
160 pub i8_align: AbiAndPrefAlign,
161 pub i16_align: AbiAndPrefAlign,
162 pub i32_align: AbiAndPrefAlign,
163 pub i64_align: AbiAndPrefAlign,
164 pub i128_align: AbiAndPrefAlign,
165 pub f32_align: AbiAndPrefAlign,
166 pub f64_align: AbiAndPrefAlign,
167 pub pointer_size: Size,
168 pub pointer_align: AbiAndPrefAlign,
169 pub aggregate_align: AbiAndPrefAlign,
170
171 /// Alignments for vector types.
172 pub vector_align: Vec<(Size, AbiAndPrefAlign)>,
173
174 pub instruction_address_space: AddressSpace,
175
9ffffee4
FG
176 /// Minimum size of #[repr(C)] enums (default c_int::BITS, usually 32)
177 /// Note: This isn't in LLVM's data layout string, it is `short_enum`
178 /// so the only valid spec for LLVM is c_int::BITS or 8
487cf647
FG
179 pub c_enum_min_size: Integer,
180}
181
182impl Default for TargetDataLayout {
183 /// Creates an instance of `TargetDataLayout`.
184 fn default() -> TargetDataLayout {
185 let align = |bits| Align::from_bits(bits).unwrap();
186 TargetDataLayout {
187 endian: Endian::Big,
188 i1_align: AbiAndPrefAlign::new(align(8)),
189 i8_align: AbiAndPrefAlign::new(align(8)),
190 i16_align: AbiAndPrefAlign::new(align(16)),
191 i32_align: AbiAndPrefAlign::new(align(32)),
192 i64_align: AbiAndPrefAlign { abi: align(32), pref: align(64) },
193 i128_align: AbiAndPrefAlign { abi: align(32), pref: align(64) },
194 f32_align: AbiAndPrefAlign::new(align(32)),
195 f64_align: AbiAndPrefAlign::new(align(64)),
196 pointer_size: Size::from_bits(64),
197 pointer_align: AbiAndPrefAlign::new(align(64)),
198 aggregate_align: AbiAndPrefAlign { abi: align(0), pref: align(64) },
199 vector_align: vec![
200 (Size::from_bits(64), AbiAndPrefAlign::new(align(64))),
201 (Size::from_bits(128), AbiAndPrefAlign::new(align(128))),
202 ],
203 instruction_address_space: AddressSpace::DATA,
204 c_enum_min_size: Integer::I32,
205 }
206 }
207}
208
209pub enum TargetDataLayoutErrors<'a> {
210 InvalidAddressSpace { addr_space: &'a str, cause: &'a str, err: ParseIntError },
211 InvalidBits { kind: &'a str, bit: &'a str, cause: &'a str, err: ParseIntError },
212 MissingAlignment { cause: &'a str },
fe692bf9 213 InvalidAlignment { cause: &'a str, err: AlignFromBytesError },
487cf647
FG
214 InconsistentTargetArchitecture { dl: &'a str, target: &'a str },
215 InconsistentTargetPointerWidth { pointer_size: u64, target: u32 },
216 InvalidBitsSize { err: String },
217}
218
219impl TargetDataLayout {
220 /// Parse data layout from an [llvm data layout string](https://llvm.org/docs/LangRef.html#data-layout)
221 ///
222 /// This function doesn't fill `c_enum_min_size` and it will always be `I32` since it can not be
223 /// determined from llvm string.
224 pub fn parse_from_llvm_datalayout_string<'a>(
225 input: &'a str,
226 ) -> Result<TargetDataLayout, TargetDataLayoutErrors<'a>> {
227 // Parse an address space index from a string.
228 let parse_address_space = |s: &'a str, cause: &'a str| {
229 s.parse::<u32>().map(AddressSpace).map_err(|err| {
230 TargetDataLayoutErrors::InvalidAddressSpace { addr_space: s, cause, err }
231 })
232 };
233
234 // Parse a bit count from a string.
235 let parse_bits = |s: &'a str, kind: &'a str, cause: &'a str| {
236 s.parse::<u64>().map_err(|err| TargetDataLayoutErrors::InvalidBits {
237 kind,
238 bit: s,
239 cause,
240 err,
241 })
242 };
243
244 // Parse a size string.
245 let size = |s: &'a str, cause: &'a str| parse_bits(s, "size", cause).map(Size::from_bits);
246
247 // Parse an alignment string.
248 let align = |s: &[&'a str], cause: &'a str| {
249 if s.is_empty() {
250 return Err(TargetDataLayoutErrors::MissingAlignment { cause });
251 }
252 let align_from_bits = |bits| {
253 Align::from_bits(bits)
254 .map_err(|err| TargetDataLayoutErrors::InvalidAlignment { cause, err })
255 };
256 let abi = parse_bits(s[0], "alignment", cause)?;
257 let pref = s.get(1).map_or(Ok(abi), |pref| parse_bits(pref, "alignment", cause))?;
258 Ok(AbiAndPrefAlign { abi: align_from_bits(abi)?, pref: align_from_bits(pref)? })
259 };
260
261 let mut dl = TargetDataLayout::default();
262 let mut i128_align_src = 64;
263 for spec in input.split('-') {
264 let spec_parts = spec.split(':').collect::<Vec<_>>();
265
266 match &*spec_parts {
267 ["e"] => dl.endian = Endian::Little,
268 ["E"] => dl.endian = Endian::Big,
269 [p] if p.starts_with('P') => {
270 dl.instruction_address_space = parse_address_space(&p[1..], "P")?
271 }
272 ["a", ref a @ ..] => dl.aggregate_align = align(a, "a")?,
273 ["f32", ref a @ ..] => dl.f32_align = align(a, "f32")?,
274 ["f64", ref a @ ..] => dl.f64_align = align(a, "f64")?,
9ffffee4
FG
275 // FIXME(erikdesjardins): we should be parsing nonzero address spaces
276 // this will require replacing TargetDataLayout::{pointer_size,pointer_align}
277 // with e.g. `fn pointer_size_in(AddressSpace)`
487cf647
FG
278 [p @ "p", s, ref a @ ..] | [p @ "p0", s, ref a @ ..] => {
279 dl.pointer_size = size(s, p)?;
280 dl.pointer_align = align(a, p)?;
281 }
282 [s, ref a @ ..] if s.starts_with('i') => {
283 let Ok(bits) = s[1..].parse::<u64>() else {
284 size(&s[1..], "i")?; // For the user error.
285 continue;
286 };
287 let a = align(a, s)?;
288 match bits {
289 1 => dl.i1_align = a,
290 8 => dl.i8_align = a,
291 16 => dl.i16_align = a,
292 32 => dl.i32_align = a,
293 64 => dl.i64_align = a,
294 _ => {}
295 }
296 if bits >= i128_align_src && bits <= 128 {
297 // Default alignment for i128 is decided by taking the alignment of
298 // largest-sized i{64..=128}.
299 i128_align_src = bits;
300 dl.i128_align = a;
301 }
302 }
303 [s, ref a @ ..] if s.starts_with('v') => {
304 let v_size = size(&s[1..], "v")?;
305 let a = align(a, s)?;
306 if let Some(v) = dl.vector_align.iter_mut().find(|v| v.0 == v_size) {
307 v.1 = a;
308 continue;
309 }
310 // No existing entry, add a new one.
311 dl.vector_align.push((v_size, a));
312 }
313 _ => {} // Ignore everything else.
314 }
315 }
316 Ok(dl)
317 }
318
319 /// Returns exclusive upper bound on object size.
320 ///
321 /// The theoretical maximum object size is defined as the maximum positive `isize` value.
322 /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly
323 /// index every address within an object along with one byte past the end, along with allowing
324 /// `isize` to store the difference between any two pointers into an object.
325 ///
326 /// The upper bound on 64-bit currently needs to be lower because LLVM uses a 64-bit integer
327 /// to represent object size in bits. It would need to be 1 << 61 to account for this, but is
328 /// currently conservatively bounded to 1 << 47 as that is enough to cover the current usable
329 /// address space on 64-bit ARMv8 and x86_64.
330 #[inline]
331 pub fn obj_size_bound(&self) -> u64 {
332 match self.pointer_size.bits() {
333 16 => 1 << 15,
334 32 => 1 << 31,
335 64 => 1 << 47,
add651ee 336 bits => panic!("obj_size_bound: unknown pointer bit size {bits}"),
487cf647
FG
337 }
338 }
339
340 #[inline]
341 pub fn ptr_sized_integer(&self) -> Integer {
342 match self.pointer_size.bits() {
343 16 => I16,
344 32 => I32,
345 64 => I64,
add651ee 346 bits => panic!("ptr_sized_integer: unknown pointer bit size {bits}"),
487cf647
FG
347 }
348 }
349
350 #[inline]
351 pub fn vector_align(&self, vec_size: Size) -> AbiAndPrefAlign {
352 for &(size, align) in &self.vector_align {
353 if size == vec_size {
354 return align;
355 }
356 }
357 // Default to natural alignment, which is what LLVM does.
358 // That is, use the size, rounded up to a power of 2.
359 AbiAndPrefAlign::new(Align::from_bytes(vec_size.bytes().next_power_of_two()).unwrap())
360 }
361}
362
363pub trait HasDataLayout {
364 fn data_layout(&self) -> &TargetDataLayout;
365}
366
367impl HasDataLayout for TargetDataLayout {
368 #[inline]
369 fn data_layout(&self) -> &TargetDataLayout {
370 self
371 }
372}
373
374/// Endianness of the target, which must match cfg(target-endian).
375#[derive(Copy, Clone, PartialEq, Eq)]
376pub enum Endian {
377 Little,
378 Big,
379}
380
381impl Endian {
382 pub fn as_str(&self) -> &'static str {
383 match self {
384 Self::Little => "little",
385 Self::Big => "big",
386 }
387 }
388}
389
390impl fmt::Debug for Endian {
391 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
392 f.write_str(self.as_str())
393 }
394}
395
396impl FromStr for Endian {
397 type Err = String;
398
399 fn from_str(s: &str) -> Result<Self, Self::Err> {
400 match s {
401 "little" => Ok(Self::Little),
402 "big" => Ok(Self::Big),
add651ee 403 _ => Err(format!(r#"unknown endian: "{s}""#)),
487cf647
FG
404 }
405 }
406}
407
408/// Size of a type in bytes.
409#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
410#[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))]
411pub struct Size {
412 raw: u64,
413}
414
415// Safety: Ord is implement as just comparing numerical values and numerical values
416// are not changed by (de-)serialization.
417#[cfg(feature = "nightly")]
fe692bf9
FG
418unsafe impl StableOrd for Size {
419 const CAN_USE_UNSTABLE_SORT: bool = true;
420}
487cf647
FG
421
422// This is debug-printed a lot in larger structs, don't waste too much space there
423impl fmt::Debug for Size {
424 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
425 write!(f, "Size({} bytes)", self.bytes())
426 }
427}
428
429impl Size {
430 pub const ZERO: Size = Size { raw: 0 };
431
432 /// Rounds `bits` up to the next-higher byte boundary, if `bits` is
433 /// not a multiple of 8.
434 pub fn from_bits(bits: impl TryInto<u64>) -> Size {
435 let bits = bits.try_into().ok().unwrap();
436 // Avoid potential overflow from `bits + 7`.
437 Size { raw: bits / 8 + ((bits % 8) + 7) / 8 }
438 }
439
440 #[inline]
441 pub fn from_bytes(bytes: impl TryInto<u64>) -> Size {
442 let bytes: u64 = bytes.try_into().ok().unwrap();
443 Size { raw: bytes }
444 }
445
446 #[inline]
447 pub fn bytes(self) -> u64 {
448 self.raw
449 }
450
451 #[inline]
452 pub fn bytes_usize(self) -> usize {
453 self.bytes().try_into().unwrap()
454 }
455
456 #[inline]
457 pub fn bits(self) -> u64 {
458 #[cold]
459 fn overflow(bytes: u64) -> ! {
add651ee 460 panic!("Size::bits: {bytes} bytes in bits doesn't fit in u64")
487cf647
FG
461 }
462
463 self.bytes().checked_mul(8).unwrap_or_else(|| overflow(self.bytes()))
464 }
465
466 #[inline]
467 pub fn bits_usize(self) -> usize {
468 self.bits().try_into().unwrap()
469 }
470
471 #[inline]
472 pub fn align_to(self, align: Align) -> Size {
473 let mask = align.bytes() - 1;
474 Size::from_bytes((self.bytes() + mask) & !mask)
475 }
476
477 #[inline]
478 pub fn is_aligned(self, align: Align) -> bool {
479 let mask = align.bytes() - 1;
480 self.bytes() & mask == 0
481 }
482
483 #[inline]
484 pub fn checked_add<C: HasDataLayout>(self, offset: Size, cx: &C) -> Option<Size> {
485 let dl = cx.data_layout();
486
487 let bytes = self.bytes().checked_add(offset.bytes())?;
488
489 if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
490 }
491
492 #[inline]
493 pub fn checked_mul<C: HasDataLayout>(self, count: u64, cx: &C) -> Option<Size> {
494 let dl = cx.data_layout();
495
496 let bytes = self.bytes().checked_mul(count)?;
497 if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
498 }
499
500 /// Truncates `value` to `self` bits and then sign-extends it to 128 bits
501 /// (i.e., if it is negative, fill with 1's on the left).
502 #[inline]
503 pub fn sign_extend(self, value: u128) -> u128 {
504 let size = self.bits();
505 if size == 0 {
506 // Truncated until nothing is left.
507 return 0;
508 }
509 // Sign-extend it.
510 let shift = 128 - size;
511 // Shift the unsigned value to the left, then shift back to the right as signed
512 // (essentially fills with sign bit on the left).
513 (((value << shift) as i128) >> shift) as u128
514 }
515
516 /// Truncates `value` to `self` bits.
517 #[inline]
518 pub fn truncate(self, value: u128) -> u128 {
519 let size = self.bits();
520 if size == 0 {
521 // Truncated until nothing is left.
522 return 0;
523 }
524 let shift = 128 - size;
525 // Truncate (shift left to drop out leftover values, shift right to fill with zeroes).
526 (value << shift) >> shift
527 }
528
529 #[inline]
530 pub fn signed_int_min(&self) -> i128 {
531 self.sign_extend(1_u128 << (self.bits() - 1)) as i128
532 }
533
534 #[inline]
535 pub fn signed_int_max(&self) -> i128 {
536 i128::MAX >> (128 - self.bits())
537 }
538
539 #[inline]
540 pub fn unsigned_int_max(&self) -> u128 {
541 u128::MAX >> (128 - self.bits())
542 }
543}
544
545// Panicking addition, subtraction and multiplication for convenience.
546// Avoid during layout computation, return `LayoutError` instead.
547
548impl Add for Size {
549 type Output = Size;
550 #[inline]
551 fn add(self, other: Size) -> Size {
552 Size::from_bytes(self.bytes().checked_add(other.bytes()).unwrap_or_else(|| {
553 panic!("Size::add: {} + {} doesn't fit in u64", self.bytes(), other.bytes())
554 }))
555 }
556}
557
558impl Sub for Size {
559 type Output = Size;
560 #[inline]
561 fn sub(self, other: Size) -> Size {
562 Size::from_bytes(self.bytes().checked_sub(other.bytes()).unwrap_or_else(|| {
563 panic!("Size::sub: {} - {} would result in negative size", self.bytes(), other.bytes())
564 }))
565 }
566}
567
568impl Mul<Size> for u64 {
569 type Output = Size;
570 #[inline]
571 fn mul(self, size: Size) -> Size {
572 size * self
573 }
574}
575
576impl Mul<u64> for Size {
577 type Output = Size;
578 #[inline]
579 fn mul(self, count: u64) -> Size {
580 match self.bytes().checked_mul(count) {
581 Some(bytes) => Size::from_bytes(bytes),
582 None => panic!("Size::mul: {} * {} doesn't fit in u64", self.bytes(), count),
583 }
584 }
585}
586
587impl AddAssign for Size {
588 #[inline]
589 fn add_assign(&mut self, other: Size) {
590 *self = *self + other;
591 }
592}
593
594#[cfg(feature = "nightly")]
595impl Step for Size {
596 #[inline]
597 fn steps_between(start: &Self, end: &Self) -> Option<usize> {
598 u64::steps_between(&start.bytes(), &end.bytes())
599 }
600
601 #[inline]
602 fn forward_checked(start: Self, count: usize) -> Option<Self> {
603 u64::forward_checked(start.bytes(), count).map(Self::from_bytes)
604 }
605
606 #[inline]
607 fn forward(start: Self, count: usize) -> Self {
608 Self::from_bytes(u64::forward(start.bytes(), count))
609 }
610
611 #[inline]
612 unsafe fn forward_unchecked(start: Self, count: usize) -> Self {
613 Self::from_bytes(u64::forward_unchecked(start.bytes(), count))
614 }
615
616 #[inline]
617 fn backward_checked(start: Self, count: usize) -> Option<Self> {
618 u64::backward_checked(start.bytes(), count).map(Self::from_bytes)
619 }
620
621 #[inline]
622 fn backward(start: Self, count: usize) -> Self {
623 Self::from_bytes(u64::backward(start.bytes(), count))
624 }
625
626 #[inline]
627 unsafe fn backward_unchecked(start: Self, count: usize) -> Self {
628 Self::from_bytes(u64::backward_unchecked(start.bytes(), count))
629 }
630}
631
632/// Alignment of a type in bytes (always a power of two).
633#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
634#[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))]
635pub struct Align {
636 pow2: u8,
637}
638
639// This is debug-printed a lot in larger structs, don't waste too much space there
640impl fmt::Debug for Align {
641 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
642 write!(f, "Align({} bytes)", self.bytes())
643 }
644}
645
fe692bf9
FG
646#[derive(Clone, Copy)]
647pub enum AlignFromBytesError {
648 NotPowerOfTwo(u64),
649 TooLarge(u64),
650}
651
652impl AlignFromBytesError {
653 pub fn diag_ident(self) -> &'static str {
654 match self {
655 Self::NotPowerOfTwo(_) => "not_power_of_two",
656 Self::TooLarge(_) => "too_large",
657 }
658 }
659
660 pub fn align(self) -> u64 {
661 let (Self::NotPowerOfTwo(align) | Self::TooLarge(align)) = self;
662 align
663 }
664}
665
666impl fmt::Debug for AlignFromBytesError {
667 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
668 fmt::Display::fmt(self, f)
669 }
670}
671
672impl fmt::Display for AlignFromBytesError {
673 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
674 match self {
675 AlignFromBytesError::NotPowerOfTwo(align) => write!(f, "`{align}` is not a power of 2"),
676 AlignFromBytesError::TooLarge(align) => write!(f, "`{align}` is too large"),
677 }
678 }
679}
680
487cf647
FG
681impl Align {
682 pub const ONE: Align = Align { pow2: 0 };
683 pub const MAX: Align = Align { pow2: 29 };
684
685 #[inline]
fe692bf9 686 pub fn from_bits(bits: u64) -> Result<Align, AlignFromBytesError> {
487cf647
FG
687 Align::from_bytes(Size::from_bits(bits).bytes())
688 }
689
690 #[inline]
fe692bf9 691 pub fn from_bytes(align: u64) -> Result<Align, AlignFromBytesError> {
487cf647
FG
692 // Treat an alignment of 0 bytes like 1-byte alignment.
693 if align == 0 {
694 return Ok(Align::ONE);
695 }
696
697 #[cold]
fe692bf9
FG
698 fn not_power_of_2(align: u64) -> AlignFromBytesError {
699 AlignFromBytesError::NotPowerOfTwo(align)
487cf647
FG
700 }
701
702 #[cold]
fe692bf9
FG
703 fn too_large(align: u64) -> AlignFromBytesError {
704 AlignFromBytesError::TooLarge(align)
487cf647
FG
705 }
706
49aad941
FG
707 let tz = align.trailing_zeros();
708 if align != (1 << tz) {
487cf647
FG
709 return Err(not_power_of_2(align));
710 }
49aad941
FG
711
712 let pow2 = tz as u8;
487cf647
FG
713 if pow2 > Self::MAX.pow2 {
714 return Err(too_large(align));
715 }
716
717 Ok(Align { pow2 })
718 }
719
720 #[inline]
721 pub fn bytes(self) -> u64 {
722 1 << self.pow2
723 }
724
725 #[inline]
726 pub fn bits(self) -> u64 {
727 self.bytes() * 8
728 }
729
730 /// Computes the best alignment possible for the given offset
731 /// (the largest power of two that the offset is a multiple of).
732 ///
733 /// N.B., for an offset of `0`, this happens to return `2^64`.
734 #[inline]
735 pub fn max_for_offset(offset: Size) -> Align {
736 Align { pow2: offset.bytes().trailing_zeros() as u8 }
737 }
738
739 /// Lower the alignment, if necessary, such that the given offset
740 /// is aligned to it (the offset is a multiple of the alignment).
741 #[inline]
742 pub fn restrict_for_offset(self, offset: Size) -> Align {
743 self.min(Align::max_for_offset(offset))
744 }
745}
746
747/// A pair of alignments, ABI-mandated and preferred.
748#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
749#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
750
751pub struct AbiAndPrefAlign {
752 pub abi: Align,
753 pub pref: Align,
754}
755
756impl AbiAndPrefAlign {
757 #[inline]
758 pub fn new(align: Align) -> AbiAndPrefAlign {
759 AbiAndPrefAlign { abi: align, pref: align }
760 }
761
762 #[inline]
763 pub fn min(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign {
764 AbiAndPrefAlign { abi: self.abi.min(other.abi), pref: self.pref.min(other.pref) }
765 }
766
767 #[inline]
768 pub fn max(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign {
769 AbiAndPrefAlign { abi: self.abi.max(other.abi), pref: self.pref.max(other.pref) }
770 }
771}
772
773/// Integers, also used for enum discriminants.
774#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
775#[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))]
776
777pub enum Integer {
778 I8,
779 I16,
780 I32,
781 I64,
782 I128,
783}
784
785impl Integer {
786 #[inline]
787 pub fn size(self) -> Size {
788 match self {
789 I8 => Size::from_bytes(1),
790 I16 => Size::from_bytes(2),
791 I32 => Size::from_bytes(4),
792 I64 => Size::from_bytes(8),
793 I128 => Size::from_bytes(16),
794 }
795 }
796
797 /// Gets the Integer type from an IntegerType.
798 pub fn from_attr<C: HasDataLayout>(cx: &C, ity: IntegerType) -> Integer {
799 let dl = cx.data_layout();
800
801 match ity {
802 IntegerType::Pointer(_) => dl.ptr_sized_integer(),
803 IntegerType::Fixed(x, _) => x,
804 }
805 }
806
807 pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAndPrefAlign {
808 let dl = cx.data_layout();
809
810 match self {
811 I8 => dl.i8_align,
812 I16 => dl.i16_align,
813 I32 => dl.i32_align,
814 I64 => dl.i64_align,
815 I128 => dl.i128_align,
816 }
817 }
818
9c376795
FG
819 /// Returns the largest signed value that can be represented by this Integer.
820 #[inline]
821 pub fn signed_max(self) -> i128 {
822 match self {
823 I8 => i8::MAX as i128,
824 I16 => i16::MAX as i128,
825 I32 => i32::MAX as i128,
826 I64 => i64::MAX as i128,
827 I128 => i128::MAX,
828 }
829 }
830
487cf647
FG
831 /// Finds the smallest Integer type which can represent the signed value.
832 #[inline]
833 pub fn fit_signed(x: i128) -> Integer {
834 match x {
835 -0x0000_0000_0000_0080..=0x0000_0000_0000_007f => I8,
836 -0x0000_0000_0000_8000..=0x0000_0000_0000_7fff => I16,
837 -0x0000_0000_8000_0000..=0x0000_0000_7fff_ffff => I32,
838 -0x8000_0000_0000_0000..=0x7fff_ffff_ffff_ffff => I64,
839 _ => I128,
840 }
841 }
842
843 /// Finds the smallest Integer type which can represent the unsigned value.
844 #[inline]
845 pub fn fit_unsigned(x: u128) -> Integer {
846 match x {
847 0..=0x0000_0000_0000_00ff => I8,
848 0..=0x0000_0000_0000_ffff => I16,
849 0..=0x0000_0000_ffff_ffff => I32,
850 0..=0xffff_ffff_ffff_ffff => I64,
851 _ => I128,
852 }
853 }
854
855 /// Finds the smallest integer with the given alignment.
856 pub fn for_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Option<Integer> {
857 let dl = cx.data_layout();
858
9c376795
FG
859 [I8, I16, I32, I64, I128].into_iter().find(|&candidate| {
860 wanted == candidate.align(dl).abi && wanted.bytes() == candidate.size().bytes()
861 })
487cf647
FG
862 }
863
864 /// Find the largest integer with the given alignment or less.
865 pub fn approximate_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Integer {
866 let dl = cx.data_layout();
867
868 // FIXME(eddyb) maybe include I128 in the future, when it works everywhere.
869 for candidate in [I64, I32, I16] {
870 if wanted >= candidate.align(dl).abi && wanted.bytes() >= candidate.size().bytes() {
871 return candidate;
872 }
873 }
874 I8
875 }
876
877 // FIXME(eddyb) consolidate this and other methods that find the appropriate
878 // `Integer` given some requirements.
879 #[inline]
880 pub fn from_size(size: Size) -> Result<Self, String> {
881 match size.bits() {
882 8 => Ok(Integer::I8),
883 16 => Ok(Integer::I16),
884 32 => Ok(Integer::I32),
885 64 => Ok(Integer::I64),
886 128 => Ok(Integer::I128),
887 _ => Err(format!("rust does not support integers with {} bits", size.bits())),
888 }
889 }
890}
891
892/// Fundamental unit of memory access and layout.
893#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
894#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
895pub enum Primitive {
896 /// The `bool` is the signedness of the `Integer` type.
897 ///
898 /// One would think we would not care about such details this low down,
899 /// but some ABIs are described in terms of C types and ISAs where the
900 /// integer arithmetic is done on {sign,zero}-extended registers, e.g.
901 /// a negative integer passed by zero-extension will appear positive in
902 /// the callee, and most operations on it will produce the wrong values.
903 Int(Integer, bool),
904 F32,
905 F64,
9ffffee4 906 Pointer(AddressSpace),
487cf647
FG
907}
908
909impl Primitive {
910 pub fn size<C: HasDataLayout>(self, cx: &C) -> Size {
911 let dl = cx.data_layout();
912
913 match self {
914 Int(i, _) => i.size(),
915 F32 => Size::from_bits(32),
916 F64 => Size::from_bits(64),
9ffffee4
FG
917 // FIXME(erikdesjardins): ignoring address space is technically wrong, pointers in
918 // different address spaces can have different sizes
919 // (but TargetDataLayout doesn't currently parse that part of the DL string)
920 Pointer(_) => dl.pointer_size,
487cf647
FG
921 }
922 }
923
924 pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAndPrefAlign {
925 let dl = cx.data_layout();
926
927 match self {
928 Int(i, _) => i.align(dl),
929 F32 => dl.f32_align,
930 F64 => dl.f64_align,
9ffffee4
FG
931 // FIXME(erikdesjardins): ignoring address space is technically wrong, pointers in
932 // different address spaces can have different alignments
933 // (but TargetDataLayout doesn't currently parse that part of the DL string)
934 Pointer(_) => dl.pointer_align,
487cf647
FG
935 }
936 }
487cf647
FG
937}
938
939/// Inclusive wrap-around range of valid values, that is, if
940/// start > end, it represents `start..=MAX`,
941/// followed by `0..=end`.
942///
943/// That is, for an i8 primitive, a range of `254..=2` means following
944/// sequence:
945///
946/// 254 (-2), 255 (-1), 0, 1, 2
947///
948/// This is intended specifically to mirror LLVM’s `!range` metadata semantics.
949#[derive(Clone, Copy, PartialEq, Eq, Hash)]
950#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
951pub struct WrappingRange {
952 pub start: u128,
953 pub end: u128,
954}
955
956impl WrappingRange {
957 pub fn full(size: Size) -> Self {
958 Self { start: 0, end: size.unsigned_int_max() }
959 }
960
961 /// Returns `true` if `v` is contained in the range.
962 #[inline(always)]
963 pub fn contains(&self, v: u128) -> bool {
964 if self.start <= self.end {
965 self.start <= v && v <= self.end
966 } else {
967 self.start <= v || v <= self.end
968 }
969 }
970
971 /// Returns `self` with replaced `start`
972 #[inline(always)]
973 pub fn with_start(mut self, start: u128) -> Self {
974 self.start = start;
975 self
976 }
977
978 /// Returns `self` with replaced `end`
979 #[inline(always)]
980 pub fn with_end(mut self, end: u128) -> Self {
981 self.end = end;
982 self
983 }
984
985 /// Returns `true` if `size` completely fills the range.
986 #[inline]
987 pub fn is_full_for(&self, size: Size) -> bool {
988 let max_value = size.unsigned_int_max();
989 debug_assert!(self.start <= max_value && self.end <= max_value);
990 self.start == (self.end.wrapping_add(1) & max_value)
991 }
992}
993
994impl fmt::Debug for WrappingRange {
995 fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
996 if self.start > self.end {
997 write!(fmt, "(..={}) | ({}..)", self.end, self.start)?;
998 } else {
999 write!(fmt, "{}..={}", self.start, self.end)?;
1000 }
1001 Ok(())
1002 }
1003}
1004
1005/// Information about one scalar component of a Rust type.
1006#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
1007#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1008pub enum Scalar {
1009 Initialized {
1010 value: Primitive,
1011
1012 // FIXME(eddyb) always use the shortest range, e.g., by finding
1013 // the largest space between two consecutive valid values and
1014 // taking everything else as the (shortest) valid range.
1015 valid_range: WrappingRange,
1016 },
1017 Union {
1018 /// Even for unions, we need to use the correct registers for the kind of
1019 /// values inside the union, so we keep the `Primitive` type around. We
1020 /// also use it to compute the size of the scalar.
1021 /// However, unions never have niches and even allow undef,
1022 /// so there is no `valid_range`.
1023 value: Primitive,
1024 },
1025}
1026
1027impl Scalar {
1028 #[inline]
1029 pub fn is_bool(&self) -> bool {
1030 matches!(
1031 self,
1032 Scalar::Initialized {
1033 value: Int(I8, false),
1034 valid_range: WrappingRange { start: 0, end: 1 }
1035 }
1036 )
1037 }
1038
1039 /// Get the primitive representation of this type, ignoring the valid range and whether the
1040 /// value is allowed to be undefined (due to being a union).
1041 pub fn primitive(&self) -> Primitive {
1042 match *self {
1043 Scalar::Initialized { value, .. } | Scalar::Union { value } => value,
1044 }
1045 }
1046
1047 pub fn align(self, cx: &impl HasDataLayout) -> AbiAndPrefAlign {
1048 self.primitive().align(cx)
1049 }
1050
1051 pub fn size(self, cx: &impl HasDataLayout) -> Size {
1052 self.primitive().size(cx)
1053 }
1054
1055 #[inline]
1056 pub fn to_union(&self) -> Self {
1057 Self::Union { value: self.primitive() }
1058 }
1059
1060 #[inline]
1061 pub fn valid_range(&self, cx: &impl HasDataLayout) -> WrappingRange {
1062 match *self {
1063 Scalar::Initialized { valid_range, .. } => valid_range,
1064 Scalar::Union { value } => WrappingRange::full(value.size(cx)),
1065 }
1066 }
1067
1068 #[inline]
1069 /// Allows the caller to mutate the valid range. This operation will panic if attempted on a union.
1070 pub fn valid_range_mut(&mut self) -> &mut WrappingRange {
1071 match self {
1072 Scalar::Initialized { valid_range, .. } => valid_range,
1073 Scalar::Union { .. } => panic!("cannot change the valid range of a union"),
1074 }
1075 }
1076
1077 /// Returns `true` if all possible numbers are valid, i.e `valid_range` covers the whole layout
1078 #[inline]
1079 pub fn is_always_valid<C: HasDataLayout>(&self, cx: &C) -> bool {
1080 match *self {
1081 Scalar::Initialized { valid_range, .. } => valid_range.is_full_for(self.size(cx)),
1082 Scalar::Union { .. } => true,
1083 }
1084 }
1085
1086 /// Returns `true` if this type can be left uninit.
1087 #[inline]
1088 pub fn is_uninit_valid(&self) -> bool {
1089 match *self {
1090 Scalar::Initialized { .. } => false,
1091 Scalar::Union { .. } => true,
1092 }
1093 }
1094}
1095
353b0b11
FG
1096rustc_index::newtype_index! {
1097 /// The *source-order* index of a field in a variant.
1098 ///
1099 /// This is how most code after type checking refers to fields, rather than
1100 /// using names (as names have hygiene complications and more complex lookup).
1101 ///
1102 /// Particularly for `repr(Rust)` types, this may not be the same as *layout* order.
1103 /// (It is for `repr(C)` `struct`s, however.)
1104 ///
1105 /// For example, in the following types,
1106 /// ```rust
1107 /// # enum Never {}
1108 /// # #[repr(u16)]
1109 /// enum Demo1 {
1110 /// Variant0 { a: Never, b: i32 } = 100,
1111 /// Variant1 { c: u8, d: u64 } = 10,
1112 /// }
1113 /// struct Demo2 { e: u8, f: u16, g: u8 }
1114 /// ```
1115 /// `b` is `FieldIdx(1)` in `VariantIdx(0)`,
1116 /// `d` is `FieldIdx(1)` in `VariantIdx(1)`, and
1117 /// `f` is `FieldIdx(1)` in `VariantIdx(0)`.
1118 #[derive(HashStable_Generic)]
1119 pub struct FieldIdx {}
1120}
1121
487cf647
FG
1122/// Describes how the fields of a type are located in memory.
1123#[derive(PartialEq, Eq, Hash, Clone, Debug)]
1124#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1125pub enum FieldsShape {
1126 /// Scalar primitives and `!`, which never have fields.
1127 Primitive,
1128
1129 /// All fields start at no offset. The `usize` is the field count.
1130 Union(NonZeroUsize),
1131
1132 /// Array/vector-like placement, with all fields of identical types.
1133 Array { stride: Size, count: u64 },
1134
1135 /// Struct-like placement, with precomputed offsets.
1136 ///
1137 /// Fields are guaranteed to not overlap, but note that gaps
1138 /// before, between and after all the fields are NOT always
1139 /// padding, and as such their contents may not be discarded.
1140 /// For example, enum variants leave a gap at the start,
1141 /// where the discriminant field in the enum layout goes.
1142 Arbitrary {
1143 /// Offsets for the first byte of each field,
1144 /// ordered to match the source definition order.
1145 /// This vector does not go in increasing order.
1146 // FIXME(eddyb) use small vector optimization for the common case.
353b0b11 1147 offsets: IndexVec<FieldIdx, Size>,
487cf647
FG
1148
1149 /// Maps source order field indices to memory order indices,
1150 /// depending on how the fields were reordered (if at all).
1151 /// This is a permutation, with both the source order and the
1152 /// memory order using the same (0..n) index ranges.
1153 ///
1154 /// Note that during computation of `memory_index`, sometimes
1155 /// it is easier to operate on the inverse mapping (that is,
1156 /// from memory order to source order), and that is usually
1157 /// named `inverse_memory_index`.
1158 ///
1159 // FIXME(eddyb) build a better abstraction for permutations, if possible.
9c376795 1160 // FIXME(camlorn) also consider small vector optimization here.
353b0b11 1161 memory_index: IndexVec<FieldIdx, u32>,
487cf647
FG
1162 },
1163}
1164
1165impl FieldsShape {
1166 #[inline]
1167 pub fn count(&self) -> usize {
1168 match *self {
1169 FieldsShape::Primitive => 0,
1170 FieldsShape::Union(count) => count.get(),
1171 FieldsShape::Array { count, .. } => count.try_into().unwrap(),
1172 FieldsShape::Arbitrary { ref offsets, .. } => offsets.len(),
1173 }
1174 }
1175
1176 #[inline]
1177 pub fn offset(&self, i: usize) -> Size {
1178 match *self {
1179 FieldsShape::Primitive => {
1180 unreachable!("FieldsShape::offset: `Primitive`s have no fields")
1181 }
1182 FieldsShape::Union(count) => {
add651ee 1183 assert!(i < count.get(), "tried to access field {i} of union with {count} fields");
487cf647
FG
1184 Size::ZERO
1185 }
1186 FieldsShape::Array { stride, count } => {
1187 let i = u64::try_from(i).unwrap();
add651ee 1188 assert!(i < count, "tried to access field {i} of array with {count} fields");
487cf647
FG
1189 stride * i
1190 }
353b0b11 1191 FieldsShape::Arbitrary { ref offsets, .. } => offsets[FieldIdx::from_usize(i)],
487cf647
FG
1192 }
1193 }
1194
1195 #[inline]
1196 pub fn memory_index(&self, i: usize) -> usize {
1197 match *self {
1198 FieldsShape::Primitive => {
1199 unreachable!("FieldsShape::memory_index: `Primitive`s have no fields")
1200 }
1201 FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
353b0b11
FG
1202 FieldsShape::Arbitrary { ref memory_index, .. } => {
1203 memory_index[FieldIdx::from_usize(i)].try_into().unwrap()
1204 }
487cf647
FG
1205 }
1206 }
1207
1208 /// Gets source indices of the fields by increasing offsets.
1209 #[inline]
353b0b11 1210 pub fn index_by_increasing_offset(&self) -> impl Iterator<Item = usize> + '_ {
487cf647 1211 let mut inverse_small = [0u8; 64];
353b0b11 1212 let mut inverse_big = IndexVec::new();
487cf647
FG
1213 let use_small = self.count() <= inverse_small.len();
1214
1215 // We have to write this logic twice in order to keep the array small.
1216 if let FieldsShape::Arbitrary { ref memory_index, .. } = *self {
1217 if use_small {
353b0b11
FG
1218 for (field_idx, &mem_idx) in memory_index.iter_enumerated() {
1219 inverse_small[mem_idx as usize] = field_idx.as_u32() as u8;
487cf647
FG
1220 }
1221 } else {
353b0b11 1222 inverse_big = memory_index.invert_bijective_mapping();
487cf647
FG
1223 }
1224 }
1225
1226 (0..self.count()).map(move |i| match *self {
1227 FieldsShape::Primitive | FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
1228 FieldsShape::Arbitrary { .. } => {
1229 if use_small {
1230 inverse_small[i] as usize
1231 } else {
353b0b11 1232 inverse_big[i as u32].as_usize()
487cf647
FG
1233 }
1234 }
1235 })
1236 }
1237}
1238
1239/// An identifier that specifies the address space that some operation
1240/// should operate on. Special address spaces have an effect on code generation,
1241/// depending on the target and the address spaces it implements.
9ffffee4
FG
1242#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
1243#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
487cf647
FG
1244pub struct AddressSpace(pub u32);
1245
1246impl AddressSpace {
1247 /// The default address space, corresponding to data space.
1248 pub const DATA: Self = AddressSpace(0);
1249}
1250
1251/// Describes how values of the type are passed by target ABIs,
1252/// in terms of categories of C types there are ABI rules for.
1253#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
1254#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1255
1256pub enum Abi {
1257 Uninhabited,
1258 Scalar(Scalar),
1259 ScalarPair(Scalar, Scalar),
1260 Vector {
1261 element: Scalar,
1262 count: u64,
1263 },
1264 Aggregate {
1265 /// If true, the size is exact, otherwise it's only a lower bound.
1266 sized: bool,
1267 },
1268}
1269
1270impl Abi {
1271 /// Returns `true` if the layout corresponds to an unsized type.
1272 #[inline]
1273 pub fn is_unsized(&self) -> bool {
1274 match *self {
1275 Abi::Uninhabited | Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false,
1276 Abi::Aggregate { sized } => !sized,
1277 }
1278 }
1279
1280 #[inline]
1281 pub fn is_sized(&self) -> bool {
1282 !self.is_unsized()
1283 }
1284
1285 /// Returns `true` if this is a single signed integer scalar
1286 #[inline]
1287 pub fn is_signed(&self) -> bool {
1288 match self {
1289 Abi::Scalar(scal) => match scal.primitive() {
1290 Primitive::Int(_, signed) => signed,
1291 _ => false,
1292 },
add651ee 1293 _ => panic!("`is_signed` on non-scalar ABI {self:?}"),
487cf647
FG
1294 }
1295 }
1296
1297 /// Returns `true` if this is an uninhabited type
1298 #[inline]
1299 pub fn is_uninhabited(&self) -> bool {
1300 matches!(*self, Abi::Uninhabited)
1301 }
1302
781aab86 1303 /// Returns `true` if this is a scalar type
487cf647
FG
1304 #[inline]
1305 pub fn is_scalar(&self) -> bool {
1306 matches!(*self, Abi::Scalar(_))
1307 }
49aad941 1308
781aab86
FG
1309 /// Returns `true` if this is a bool
1310 #[inline]
1311 pub fn is_bool(&self) -> bool {
1312 matches!(*self, Abi::Scalar(s) if s.is_bool())
1313 }
1314
49aad941
FG
1315 /// Returns the fixed alignment of this ABI, if any is mandated.
1316 pub fn inherent_align<C: HasDataLayout>(&self, cx: &C) -> Option<AbiAndPrefAlign> {
1317 Some(match *self {
1318 Abi::Scalar(s) => s.align(cx),
1319 Abi::ScalarPair(s1, s2) => s1.align(cx).max(s2.align(cx)),
1320 Abi::Vector { element, count } => {
1321 cx.data_layout().vector_align(element.size(cx) * count)
1322 }
1323 Abi::Uninhabited | Abi::Aggregate { .. } => return None,
1324 })
1325 }
1326
1327 /// Returns the fixed size of this ABI, if any is mandated.
1328 pub fn inherent_size<C: HasDataLayout>(&self, cx: &C) -> Option<Size> {
1329 Some(match *self {
1330 Abi::Scalar(s) => {
1331 // No padding in scalars.
1332 s.size(cx)
1333 }
1334 Abi::ScalarPair(s1, s2) => {
1335 // May have some padding between the pair.
1336 let field2_offset = s1.size(cx).align_to(s2.align(cx).abi);
1337 (field2_offset + s2.size(cx)).align_to(self.inherent_align(cx)?.abi)
1338 }
1339 Abi::Vector { element, count } => {
1340 // No padding in vectors, except possibly for trailing padding
1341 // to make the size a multiple of align (e.g. for vectors of size 3).
1342 (element.size(cx) * count).align_to(self.inherent_align(cx)?.abi)
1343 }
1344 Abi::Uninhabited | Abi::Aggregate { .. } => return None,
1345 })
1346 }
1347
1348 /// Discard validity range information and allow undef.
1349 pub fn to_union(&self) -> Self {
49aad941
FG
1350 match *self {
1351 Abi::Scalar(s) => Abi::Scalar(s.to_union()),
1352 Abi::ScalarPair(s1, s2) => Abi::ScalarPair(s1.to_union(), s2.to_union()),
1353 Abi::Vector { element, count } => Abi::Vector { element: element.to_union(), count },
1354 Abi::Uninhabited | Abi::Aggregate { .. } => Abi::Aggregate { sized: true },
1355 }
1356 }
781aab86
FG
1357
1358 pub fn eq_up_to_validity(&self, other: &Self) -> bool {
1359 match (self, other) {
1360 // Scalar, Vector, ScalarPair have `Scalar` in them where we ignore validity ranges.
1361 // We do *not* ignore the sign since it matters for some ABIs (e.g. s390x).
1362 (Abi::Scalar(l), Abi::Scalar(r)) => l.primitive() == r.primitive(),
1363 (
1364 Abi::Vector { element: element_l, count: count_l },
1365 Abi::Vector { element: element_r, count: count_r },
1366 ) => element_l.primitive() == element_r.primitive() && count_l == count_r,
1367 (Abi::ScalarPair(l1, l2), Abi::ScalarPair(r1, r2)) => {
1368 l1.primitive() == r1.primitive() && l2.primitive() == r2.primitive()
1369 }
1370 // Everything else must be strictly identical.
1371 _ => self == other,
1372 }
1373 }
487cf647
FG
1374}
1375
1376#[derive(PartialEq, Eq, Hash, Clone, Debug)]
1377#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
9ffffee4 1378pub enum Variants {
487cf647 1379 /// Single enum variants, structs/tuples, unions, and all non-ADTs.
9ffffee4 1380 Single { index: VariantIdx },
487cf647
FG
1381
1382 /// Enum-likes with more than one inhabited variant: each variant comes with
1383 /// a *discriminant* (usually the same as the variant index but the user can
9c376795
FG
1384 /// assign explicit discriminant values). That discriminant is encoded
1385 /// as a *tag* on the machine. The layout of each variant is
487cf647
FG
1386 /// a struct, and they all have space reserved for the tag.
1387 /// For enums, the tag is the sole field of the layout.
1388 Multiple {
1389 tag: Scalar,
9ffffee4 1390 tag_encoding: TagEncoding,
487cf647 1391 tag_field: usize,
9ffffee4 1392 variants: IndexVec<VariantIdx, LayoutS>,
487cf647
FG
1393 },
1394}
1395
1396#[derive(PartialEq, Eq, Hash, Clone, Debug)]
1397#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
9ffffee4 1398pub enum TagEncoding {
487cf647
FG
1399 /// The tag directly stores the discriminant, but possibly with a smaller layout
1400 /// (so converting the tag to the discriminant can require sign extension).
1401 Direct,
1402
1403 /// Niche (values invalid for a type) encoding the discriminant:
1404 /// Discriminant and variant index coincide.
1405 /// The variant `untagged_variant` contains a niche at an arbitrary
1406 /// offset (field `tag_field` of the enum), which for a variant with
1407 /// discriminant `d` is set to
1408 /// `(d - niche_variants.start).wrapping_add(niche_start)`.
1409 ///
1410 /// For example, `Option<(usize, &T)>` is represented such that
1411 /// `None` has a null pointer for the second tuple field, and
1412 /// `Some` is the identity function (with a non-null reference).
9ffffee4
FG
1413 Niche {
1414 untagged_variant: VariantIdx,
1415 niche_variants: RangeInclusive<VariantIdx>,
1416 niche_start: u128,
1417 },
487cf647
FG
1418}
1419
1420#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
1421#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1422pub struct Niche {
1423 pub offset: Size,
1424 pub value: Primitive,
1425 pub valid_range: WrappingRange,
1426}
1427
1428impl Niche {
1429 pub fn from_scalar<C: HasDataLayout>(cx: &C, offset: Size, scalar: Scalar) -> Option<Self> {
1430 let Scalar::Initialized { value, valid_range } = scalar else { return None };
1431 let niche = Niche { offset, value, valid_range };
1432 if niche.available(cx) > 0 { Some(niche) } else { None }
1433 }
1434
1435 pub fn available<C: HasDataLayout>(&self, cx: &C) -> u128 {
1436 let Self { value, valid_range: v, .. } = *self;
1437 let size = value.size(cx);
1438 assert!(size.bits() <= 128);
1439 let max_value = size.unsigned_int_max();
1440
1441 // Find out how many values are outside the valid range.
1442 let niche = v.end.wrapping_add(1)..v.start;
1443 niche.end.wrapping_sub(niche.start) & max_value
1444 }
1445
1446 pub fn reserve<C: HasDataLayout>(&self, cx: &C, count: u128) -> Option<(u128, Scalar)> {
1447 assert!(count > 0);
1448
1449 let Self { value, valid_range: v, .. } = *self;
1450 let size = value.size(cx);
1451 assert!(size.bits() <= 128);
1452 let max_value = size.unsigned_int_max();
1453
1454 let niche = v.end.wrapping_add(1)..v.start;
1455 let available = niche.end.wrapping_sub(niche.start) & max_value;
1456 if count > available {
1457 return None;
1458 }
1459
1460 // Extend the range of valid values being reserved by moving either `v.start` or `v.end` bound.
1461 // Given an eventual `Option<T>`, we try to maximize the chance for `None` to occupy the niche of zero.
1462 // This is accomplished by preferring enums with 2 variants(`count==1`) and always taking the shortest path to niche zero.
1463 // Having `None` in niche zero can enable some special optimizations.
1464 //
1465 // Bound selection criteria:
1466 // 1. Select closest to zero given wrapping semantics.
1467 // 2. Avoid moving past zero if possible.
1468 //
1469 // In practice this means that enums with `count > 1` are unlikely to claim niche zero, since they have to fit perfectly.
1470 // If niche zero is already reserved, the selection of bounds are of little interest.
1471 let move_start = |v: WrappingRange| {
1472 let start = v.start.wrapping_sub(count) & max_value;
1473 Some((start, Scalar::Initialized { value, valid_range: v.with_start(start) }))
1474 };
1475 let move_end = |v: WrappingRange| {
1476 let start = v.end.wrapping_add(1) & max_value;
1477 let end = v.end.wrapping_add(count) & max_value;
1478 Some((start, Scalar::Initialized { value, valid_range: v.with_end(end) }))
1479 };
1480 let distance_end_zero = max_value - v.end;
1481 if v.start > v.end {
1482 // zero is unavailable because wrapping occurs
1483 move_end(v)
1484 } else if v.start <= distance_end_zero {
1485 if count <= v.start {
1486 move_start(v)
1487 } else {
1488 // moved past zero, use other bound
1489 move_end(v)
1490 }
1491 } else {
1492 let end = v.end.wrapping_add(count) & max_value;
1493 let overshot_zero = (1..=v.end).contains(&end);
1494 if overshot_zero {
1495 // moved past zero, use other bound
1496 move_start(v)
1497 } else {
1498 move_end(v)
1499 }
1500 }
1501 }
1502}
1503
9ffffee4 1504rustc_index::newtype_index! {
353b0b11
FG
1505 /// The *source-order* index of a variant in a type.
1506 ///
1507 /// For enums, these are always `0..variant_count`, regardless of any
1508 /// custom discriminants that may have been defined, and including any
1509 /// variants that may end up uninhabited due to field types. (Some of the
1510 /// variants may not be present in a monomorphized ABI [`Variants`], but
1511 /// those skipped variants are always counted when determining the *index*.)
1512 ///
1513 /// `struct`s, `tuples`, and `unions`s are considered to have a single variant
1514 /// with variant index zero, aka [`FIRST_VARIANT`].
9ffffee4 1515 #[derive(HashStable_Generic)]
353b0b11
FG
1516 pub struct VariantIdx {
1517 /// Equivalent to `VariantIdx(0)`.
1518 const FIRST_VARIANT = 0;
1519 }
9ffffee4
FG
1520}
1521
487cf647
FG
1522#[derive(PartialEq, Eq, Hash, Clone)]
1523#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
9ffffee4 1524pub struct LayoutS {
487cf647
FG
1525 /// Says where the fields are located within the layout.
1526 pub fields: FieldsShape,
1527
1528 /// Encodes information about multi-variant layouts.
1529 /// Even with `Multiple` variants, a layout still has its own fields! Those are then
1530 /// shared between all variants. One of them will be the discriminant,
1531 /// but e.g. generators can have more.
1532 ///
1533 /// To access all fields of this layout, both `fields` and the fields of the active variant
1534 /// must be taken into account.
9ffffee4 1535 pub variants: Variants,
487cf647
FG
1536
1537 /// The `abi` defines how this data is passed between functions, and it defines
1538 /// value restrictions via `valid_range`.
1539 ///
1540 /// Note that this is entirely orthogonal to the recursive structure defined by
1541 /// `variants` and `fields`; for example, `ManuallyDrop<Result<isize, isize>>` has
1542 /// `Abi::ScalarPair`! So, even with non-`Aggregate` `abi`, `fields` and `variants`
1543 /// have to be taken into account to find all fields of this layout.
1544 pub abi: Abi,
1545
1546 /// The leaf scalar with the largest number of invalid values
1547 /// (i.e. outside of its `valid_range`), if it exists.
1548 pub largest_niche: Option<Niche>,
1549
1550 pub align: AbiAndPrefAlign,
1551 pub size: Size,
add651ee
FG
1552
1553 /// The largest alignment explicitly requested with `repr(align)` on this type or any field.
1554 /// Only used on i686-windows, where the argument passing ABI is different when alignment is
1555 /// requested, even if the requested alignment is equal to the natural alignment.
1556 pub max_repr_align: Option<Align>,
1557
1558 /// The alignment the type would have, ignoring any `repr(align)` but including `repr(packed)`.
1559 /// Only used on aarch64-linux, where the argument passing ABI ignores the requested alignment
1560 /// in some cases.
1561 pub unadjusted_abi_align: Align,
487cf647
FG
1562}
1563
9ffffee4 1564impl LayoutS {
487cf647
FG
1565 pub fn scalar<C: HasDataLayout>(cx: &C, scalar: Scalar) -> Self {
1566 let largest_niche = Niche::from_scalar(cx, Size::ZERO, scalar);
1567 let size = scalar.size(cx);
1568 let align = scalar.align(cx);
1569 LayoutS {
353b0b11 1570 variants: Variants::Single { index: FIRST_VARIANT },
487cf647
FG
1571 fields: FieldsShape::Primitive,
1572 abi: Abi::Scalar(scalar),
1573 largest_niche,
1574 size,
1575 align,
add651ee
FG
1576 max_repr_align: None,
1577 unadjusted_abi_align: align.abi,
487cf647
FG
1578 }
1579 }
1580}
1581
9ffffee4 1582impl fmt::Debug for LayoutS {
487cf647
FG
1583 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1584 // This is how `Layout` used to print before it become
1585 // `Interned<LayoutS>`. We print it like this to avoid having to update
1586 // expected output in a lot of tests.
add651ee
FG
1587 let LayoutS {
1588 size,
1589 align,
1590 abi,
1591 fields,
1592 largest_niche,
1593 variants,
1594 max_repr_align,
1595 unadjusted_abi_align,
1596 } = self;
487cf647
FG
1597 f.debug_struct("Layout")
1598 .field("size", size)
1599 .field("align", align)
1600 .field("abi", abi)
1601 .field("fields", fields)
1602 .field("largest_niche", largest_niche)
1603 .field("variants", variants)
add651ee
FG
1604 .field("max_repr_align", max_repr_align)
1605 .field("unadjusted_abi_align", unadjusted_abi_align)
487cf647
FG
1606 .finish()
1607 }
1608}
1609
9ffffee4
FG
1610#[derive(Copy, Clone, PartialEq, Eq, Hash, HashStable_Generic)]
1611#[rustc_pass_by_value]
1612pub struct Layout<'a>(pub Interned<'a, LayoutS>);
1613
1614impl<'a> fmt::Debug for Layout<'a> {
1615 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1616 // See comment on `<LayoutS as Debug>::fmt` above.
1617 self.0.0.fmt(f)
1618 }
1619}
487cf647 1620
9ffffee4
FG
1621impl<'a> Layout<'a> {
1622 pub fn fields(self) -> &'a FieldsShape {
1623 &self.0.0.fields
1624 }
487cf647 1625
9ffffee4
FG
1626 pub fn variants(self) -> &'a Variants {
1627 &self.0.0.variants
1628 }
487cf647 1629
9ffffee4
FG
1630 pub fn abi(self) -> Abi {
1631 self.0.0.abi
1632 }
1633
1634 pub fn largest_niche(self) -> Option<Niche> {
1635 self.0.0.largest_niche
1636 }
1637
1638 pub fn align(self) -> AbiAndPrefAlign {
1639 self.0.0.align
1640 }
487cf647 1641
9ffffee4
FG
1642 pub fn size(self) -> Size {
1643 self.0.0.size
1644 }
353b0b11 1645
add651ee
FG
1646 pub fn max_repr_align(self) -> Option<Align> {
1647 self.0.0.max_repr_align
1648 }
1649
1650 pub fn unadjusted_abi_align(self) -> Align {
1651 self.0.0.unadjusted_abi_align
1652 }
1653
353b0b11
FG
1654 /// Whether the layout is from a type that implements [`std::marker::PointerLike`].
1655 ///
1656 /// Currently, that means that the type is pointer-sized, pointer-aligned,
1657 /// and has a scalar ABI.
1658 pub fn is_pointer_like(self, data_layout: &TargetDataLayout) -> bool {
1659 self.size() == data_layout.pointer_size
1660 && self.align().abi == data_layout.pointer_align.abi
1661 && matches!(self.abi(), Abi::Scalar(..))
1662 }
9ffffee4
FG
1663}
1664
1665#[derive(Copy, Clone, PartialEq, Eq, Debug)]
1666pub enum PointerKind {
1667 /// Shared reference. `frozen` indicates the absence of any `UnsafeCell`.
1668 SharedRef { frozen: bool },
1669 /// Mutable reference. `unpin` indicates the absence of any pinned data.
1670 MutableRef { unpin: bool },
1671 /// Box. `unpin` indicates the absence of any pinned data.
1672 Box { unpin: bool },
487cf647
FG
1673}
1674
9ffffee4
FG
1675/// Note that this information is advisory only, and backends are free to ignore it.
1676/// It can only be used to encode potential optimizations, but no critical information.
487cf647
FG
1677#[derive(Copy, Clone, Debug)]
1678pub struct PointeeInfo {
1679 pub size: Size,
1680 pub align: Align,
1681 pub safe: Option<PointerKind>,
487cf647
FG
1682}
1683
9ffffee4 1684impl LayoutS {
487cf647 1685 /// Returns `true` if the layout corresponds to an unsized type.
781aab86 1686 #[inline]
487cf647
FG
1687 pub fn is_unsized(&self) -> bool {
1688 self.abi.is_unsized()
1689 }
1690
781aab86 1691 #[inline]
487cf647
FG
1692 pub fn is_sized(&self) -> bool {
1693 self.abi.is_sized()
1694 }
1695
781aab86
FG
1696 /// Returns `true` if the type is sized and a 1-ZST (meaning it has size 0 and alignment 1).
1697 pub fn is_1zst(&self) -> bool {
1698 self.is_sized() && self.size.bytes() == 0 && self.align.abi.bytes() == 1
1699 }
1700
487cf647 1701 /// Returns `true` if the type is a ZST and not unsized.
781aab86
FG
1702 ///
1703 /// Note that this does *not* imply that the type is irrelevant for layout! It can still have
1704 /// non-trivial alignment constraints. You probably want to use `is_1zst` instead.
487cf647
FG
1705 pub fn is_zst(&self) -> bool {
1706 match self.abi {
1707 Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false,
1708 Abi::Uninhabited => self.size.bytes() == 0,
1709 Abi::Aggregate { sized } => sized && self.size.bytes() == 0,
1710 }
1711 }
781aab86
FG
1712
1713 /// Checks if these two `Layout` are equal enough to be considered "the same for all function
1714 /// call ABIs". Note however that real ABIs depend on more details that are not reflected in the
1715 /// `Layout`; the `PassMode` need to be compared as well.
1716 pub fn eq_abi(&self, other: &Self) -> bool {
1717 // The one thing that we are not capturing here is that for unsized types, the metadata must
1718 // also have the same ABI, and moreover that the same metadata leads to the same size. The
1719 // 2nd point is quite hard to check though.
1720 self.size == other.size
1721 && self.is_sized() == other.is_sized()
1722 && self.abi.eq_up_to_validity(&other.abi)
1723 && self.abi.is_bool() == other.abi.is_bool()
1724 && self.align.abi == other.align.abi
1725 && self.max_repr_align == other.max_repr_align
1726 && self.unadjusted_abi_align == other.unadjusted_abi_align
1727 }
487cf647
FG
1728}
1729
1730#[derive(Copy, Clone, Debug)]
1731pub enum StructKind {
1732 /// A tuple, closure, or univariant which cannot be coerced to unsized.
1733 AlwaysSized,
1734 /// A univariant, the last field of which may be coerced to unsized.
1735 MaybeUnsized,
1736 /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
1737 Prefixed(Size, Align),
1738}