1 #![cfg_attr(feature = "nightly", feature(step_trait, rustc_attrs, min_specialization))]
4 #[cfg(feature = "nightly")]
6 use std
::num
::{NonZeroUsize, ParseIntError}
;
7 use std
::ops
::{Add, AddAssign, Mul, RangeInclusive, Sub}
;
10 use bitflags
::bitflags
;
11 use rustc_data_structures
::intern
::Interned
;
12 #[cfg(feature = "nightly")]
13 use rustc_data_structures
::stable_hasher
::StableOrd
;
14 use rustc_index
::vec
::{Idx, IndexSlice, IndexVec}
;
15 #[cfg(feature = "nightly")]
16 use rustc_macros
::HashStable_Generic
;
17 #[cfg(feature = "nightly")]
18 use rustc_macros
::{Decodable, Encodable}
;
22 pub use layout
::LayoutCalculator
;
24 /// Requirements for a `StableHashingContext` to be used in this crate.
25 /// This is a hack to allow using the `HashStable_Generic` derive macro
26 /// instead of implementing everything in `rustc_middle`.
27 pub trait HashStableContext {}
34 #[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))]
35 pub struct ReprFlags
: u8 {
37 const IS_SIMD
= 1 << 1;
38 const IS_TRANSPARENT
= 1 << 2;
39 // Internal only for now. If true, don't reorder fields.
40 const IS_LINEAR
= 1 << 3;
41 // If true, the type's layout can be randomized using
42 // the seed stored in `ReprOptions.layout_seed`
43 const RANDOMIZE_LAYOUT
= 1 << 4;
44 // Any of these flags being set prevent field reordering optimisation.
45 const IS_UNOPTIMISABLE
= ReprFlags
::IS_C
.bits
46 | ReprFlags
::IS_SIMD
.bits
47 | ReprFlags
::IS_LINEAR
.bits
;
51 #[derive(Copy, Clone, Debug, Eq, PartialEq)]
52 #[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))]
53 pub enum IntegerType
{
54 /// Pointer sized integer type, i.e. isize and usize. The field shows signedness, that
55 /// is, `Pointer(true)` is isize.
57 /// Fix sized integer type, e.g. i8, u32, i128 The bool field shows signedness, `Fixed(I8, false)` means `u8`
62 pub fn is_signed(&self) -> bool
{
64 IntegerType
::Pointer(b
) => *b
,
65 IntegerType
::Fixed(_
, b
) => *b
,
70 /// Represents the repr options provided by the user,
71 #[derive(Copy, Clone, Debug, Eq, PartialEq, Default)]
72 #[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))]
73 pub struct ReprOptions
{
74 pub int
: Option
<IntegerType
>,
75 pub align
: Option
<Align
>,
76 pub pack
: Option
<Align
>,
78 /// The seed to be used for randomizing a type's layout
80 /// Note: This could technically be a `[u8; 16]` (a `u128`) which would
81 /// be the "most accurate" hash as it'd encompass the item and crate
82 /// hash without loss, but it does pay the price of being larger.
83 /// Everything's a tradeoff, a `u64` seed should be sufficient for our
84 /// purposes (primarily `-Z randomize-layout`)
85 pub field_shuffle_seed
: u64,
90 pub fn simd(&self) -> bool
{
91 self.flags
.contains(ReprFlags
::IS_SIMD
)
95 pub fn c(&self) -> bool
{
96 self.flags
.contains(ReprFlags
::IS_C
)
100 pub fn packed(&self) -> bool
{
105 pub fn transparent(&self) -> bool
{
106 self.flags
.contains(ReprFlags
::IS_TRANSPARENT
)
110 pub fn linear(&self) -> bool
{
111 self.flags
.contains(ReprFlags
::IS_LINEAR
)
114 /// Returns the discriminant type, given these `repr` options.
115 /// This must only be called on enums!
116 pub fn discr_type(&self) -> IntegerType
{
117 self.int
.unwrap_or(IntegerType
::Pointer(true))
120 /// Returns `true` if this `#[repr()]` should inhabit "smart enum
121 /// layout" optimizations, such as representing `Foo<&T>` as a
123 pub fn inhibit_enum_layout_opt(&self) -> bool
{
124 self.c() || self.int
.is_some()
127 /// Returns `true` if this `#[repr()]` should inhibit struct field reordering
128 /// optimizations, such as with `repr(C)`, `repr(packed(1))`, or `repr(<int>)`.
129 pub fn inhibit_struct_field_reordering_opt(&self) -> bool
{
130 if let Some(pack
) = self.pack
{
131 if pack
.bytes() == 1 {
136 self.flags
.intersects(ReprFlags
::IS_UNOPTIMISABLE
) || self.int
.is_some()
139 /// Returns `true` if this type is valid for reordering and `-Z randomize-layout`
140 /// was enabled for its declaration crate
141 pub fn can_randomize_type_layout(&self) -> bool
{
142 !self.inhibit_struct_field_reordering_opt()
143 && self.flags
.contains(ReprFlags
::RANDOMIZE_LAYOUT
)
146 /// Returns `true` if this `#[repr()]` should inhibit union ABI optimisations.
147 pub fn inhibit_union_abi_opt(&self) -> bool
{
152 /// Parsed [Data layout](https://llvm.org/docs/LangRef.html#data-layout)
153 /// for a target, which contains everything needed to compute layouts.
154 #[derive(Debug, PartialEq, Eq)]
155 pub struct TargetDataLayout
{
157 pub i1_align
: AbiAndPrefAlign
,
158 pub i8_align
: AbiAndPrefAlign
,
159 pub i16_align
: AbiAndPrefAlign
,
160 pub i32_align
: AbiAndPrefAlign
,
161 pub i64_align
: AbiAndPrefAlign
,
162 pub i128_align
: AbiAndPrefAlign
,
163 pub f32_align
: AbiAndPrefAlign
,
164 pub f64_align
: AbiAndPrefAlign
,
165 pub pointer_size
: Size
,
166 pub pointer_align
: AbiAndPrefAlign
,
167 pub aggregate_align
: AbiAndPrefAlign
,
169 /// Alignments for vector types.
170 pub vector_align
: Vec
<(Size
, AbiAndPrefAlign
)>,
172 pub instruction_address_space
: AddressSpace
,
174 /// Minimum size of #[repr(C)] enums (default c_int::BITS, usually 32)
175 /// Note: This isn't in LLVM's data layout string, it is `short_enum`
176 /// so the only valid spec for LLVM is c_int::BITS or 8
177 pub c_enum_min_size
: Integer
,
180 impl Default
for TargetDataLayout
{
181 /// Creates an instance of `TargetDataLayout`.
182 fn default() -> TargetDataLayout
{
183 let align
= |bits
| Align
::from_bits(bits
).unwrap();
186 i1_align
: AbiAndPrefAlign
::new(align(8)),
187 i8_align
: AbiAndPrefAlign
::new(align(8)),
188 i16_align
: AbiAndPrefAlign
::new(align(16)),
189 i32_align
: AbiAndPrefAlign
::new(align(32)),
190 i64_align
: AbiAndPrefAlign { abi: align(32), pref: align(64) }
,
191 i128_align
: AbiAndPrefAlign { abi: align(32), pref: align(64) }
,
192 f32_align
: AbiAndPrefAlign
::new(align(32)),
193 f64_align
: AbiAndPrefAlign
::new(align(64)),
194 pointer_size
: Size
::from_bits(64),
195 pointer_align
: AbiAndPrefAlign
::new(align(64)),
196 aggregate_align
: AbiAndPrefAlign { abi: align(0), pref: align(64) }
,
198 (Size
::from_bits(64), AbiAndPrefAlign
::new(align(64))),
199 (Size
::from_bits(128), AbiAndPrefAlign
::new(align(128))),
201 instruction_address_space
: AddressSpace
::DATA
,
202 c_enum_min_size
: Integer
::I32
,
207 pub enum TargetDataLayoutErrors
<'a
> {
208 InvalidAddressSpace { addr_space: &'a str, cause: &'a str, err: ParseIntError }
,
209 InvalidBits { kind: &'a str, bit: &'a str, cause: &'a str, err: ParseIntError }
,
210 MissingAlignment { cause: &'a str }
,
211 InvalidAlignment { cause: &'a str, err: String }
,
212 InconsistentTargetArchitecture { dl: &'a str, target: &'a str }
,
213 InconsistentTargetPointerWidth { pointer_size: u64, target: u32 }
,
214 InvalidBitsSize { err: String }
,
217 impl TargetDataLayout
{
218 /// Parse data layout from an [llvm data layout string](https://llvm.org/docs/LangRef.html#data-layout)
220 /// This function doesn't fill `c_enum_min_size` and it will always be `I32` since it can not be
221 /// determined from llvm string.
222 pub fn parse_from_llvm_datalayout_string
<'a
>(
224 ) -> Result
<TargetDataLayout
, TargetDataLayoutErrors
<'a
>> {
225 // Parse an address space index from a string.
226 let parse_address_space
= |s
: &'a
str, cause
: &'a
str| {
227 s
.parse
::<u32>().map(AddressSpace
).map_err(|err
| {
228 TargetDataLayoutErrors
::InvalidAddressSpace { addr_space: s, cause, err }
232 // Parse a bit count from a string.
233 let parse_bits
= |s
: &'a
str, kind
: &'a
str, cause
: &'a
str| {
234 s
.parse
::<u64>().map_err(|err
| TargetDataLayoutErrors
::InvalidBits
{
242 // Parse a size string.
243 let size
= |s
: &'a
str, cause
: &'a
str| parse_bits(s
, "size", cause
).map(Size
::from_bits
);
245 // Parse an alignment string.
246 let align
= |s
: &[&'a
str], cause
: &'a
str| {
248 return Err(TargetDataLayoutErrors
::MissingAlignment { cause }
);
250 let align_from_bits
= |bits
| {
251 Align
::from_bits(bits
)
252 .map_err(|err
| TargetDataLayoutErrors
::InvalidAlignment { cause, err }
)
254 let abi
= parse_bits(s
[0], "alignment", cause
)?
;
255 let pref
= s
.get(1).map_or(Ok(abi
), |pref
| parse_bits(pref
, "alignment", cause
))?
;
256 Ok(AbiAndPrefAlign { abi: align_from_bits(abi)?, pref: align_from_bits(pref)? }
)
259 let mut dl
= TargetDataLayout
::default();
260 let mut i128_align_src
= 64;
261 for spec
in input
.split('
-'
) {
262 let spec_parts
= spec
.split('
:'
).collect
::<Vec
<_
>>();
265 ["e"] => dl
.endian
= Endian
::Little
,
266 ["E"] => dl
.endian
= Endian
::Big
,
267 [p
] if p
.starts_with('P'
) => {
268 dl
.instruction_address_space
= parse_address_space(&p
[1..], "P")?
270 ["a", ref a @
..] => dl
.aggregate_align
= align(a
, "a")?
,
271 ["f32", ref a @
..] => dl
.f32_align
= align(a
, "f32")?
,
272 ["f64", ref a @
..] => dl
.f64_align
= align(a
, "f64")?
,
273 // FIXME(erikdesjardins): we should be parsing nonzero address spaces
274 // this will require replacing TargetDataLayout::{pointer_size,pointer_align}
275 // with e.g. `fn pointer_size_in(AddressSpace)`
276 [p @
"p", s
, ref a @
..] | [p @
"p0", s
, ref a @
..] => {
277 dl
.pointer_size
= size(s
, p
)?
;
278 dl
.pointer_align
= align(a
, p
)?
;
280 [s
, ref a @
..] if s
.starts_with('i'
) => {
281 let Ok(bits
) = s
[1..].parse
::<u64>() else {
282 size(&s
[1..], "i")?
; // For the user error.
285 let a
= align(a
, s
)?
;
287 1 => dl
.i1_align
= a
,
288 8 => dl
.i8_align
= a
,
289 16 => dl
.i16_align
= a
,
290 32 => dl
.i32_align
= a
,
291 64 => dl
.i64_align
= a
,
294 if bits
>= i128_align_src
&& bits
<= 128 {
295 // Default alignment for i128 is decided by taking the alignment of
296 // largest-sized i{64..=128}.
297 i128_align_src
= bits
;
301 [s
, ref a @
..] if s
.starts_with('v'
) => {
302 let v_size
= size(&s
[1..], "v")?
;
303 let a
= align(a
, s
)?
;
304 if let Some(v
) = dl
.vector_align
.iter_mut().find(|v
| v
.0 == v_size
) {
308 // No existing entry, add a new one.
309 dl
.vector_align
.push((v_size
, a
));
311 _
=> {}
// Ignore everything else.
317 /// Returns exclusive upper bound on object size.
319 /// The theoretical maximum object size is defined as the maximum positive `isize` value.
320 /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly
321 /// index every address within an object along with one byte past the end, along with allowing
322 /// `isize` to store the difference between any two pointers into an object.
324 /// The upper bound on 64-bit currently needs to be lower because LLVM uses a 64-bit integer
325 /// to represent object size in bits. It would need to be 1 << 61 to account for this, but is
326 /// currently conservatively bounded to 1 << 47 as that is enough to cover the current usable
327 /// address space on 64-bit ARMv8 and x86_64.
329 pub fn obj_size_bound(&self) -> u64 {
330 match self.pointer_size
.bits() {
334 bits
=> panic
!("obj_size_bound: unknown pointer bit size {}", bits
),
339 pub fn ptr_sized_integer(&self) -> Integer
{
340 match self.pointer_size
.bits() {
344 bits
=> panic
!("ptr_sized_integer: unknown pointer bit size {}", bits
),
349 pub fn vector_align(&self, vec_size
: Size
) -> AbiAndPrefAlign
{
350 for &(size
, align
) in &self.vector_align
{
351 if size
== vec_size
{
355 // Default to natural alignment, which is what LLVM does.
356 // That is, use the size, rounded up to a power of 2.
357 AbiAndPrefAlign
::new(Align
::from_bytes(vec_size
.bytes().next_power_of_two()).unwrap())
361 pub trait HasDataLayout
{
362 fn data_layout(&self) -> &TargetDataLayout
;
365 impl HasDataLayout
for TargetDataLayout
{
367 fn data_layout(&self) -> &TargetDataLayout
{
372 /// Endianness of the target, which must match cfg(target-endian).
373 #[derive(Copy, Clone, PartialEq, Eq)]
380 pub fn as_str(&self) -> &'
static str {
382 Self::Little
=> "little",
388 impl fmt
::Debug
for Endian
{
389 fn fmt(&self, f
: &mut fmt
::Formatter
<'_
>) -> fmt
::Result
{
390 f
.write_str(self.as_str())
394 impl FromStr
for Endian
{
397 fn from_str(s
: &str) -> Result
<Self, Self::Err
> {
399 "little" => Ok(Self::Little
),
400 "big" => Ok(Self::Big
),
401 _
=> Err(format
!(r
#"unknown endian: "{}""#, s)),
406 /// Size of a type in bytes.
407 #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
408 #[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))]
413 // Safety: Ord is implement as just comparing numerical values and numerical values
414 // are not changed by (de-)serialization.
415 #[cfg(feature = "nightly")]
416 unsafe impl StableOrd
for Size {}
418 // This is debug-printed a lot in larger structs, don't waste too much space there
419 impl fmt
::Debug
for Size
{
420 fn fmt(&self, f
: &mut fmt
::Formatter
<'_
>) -> fmt
::Result
{
421 write
!(f
, "Size({} bytes)", self.bytes())
426 pub const ZERO
: Size
= Size { raw: 0 }
;
428 /// Rounds `bits` up to the next-higher byte boundary, if `bits` is
429 /// not a multiple of 8.
430 pub fn from_bits(bits
: impl TryInto
<u64>) -> Size
{
431 let bits
= bits
.try_into().ok().unwrap();
432 // Avoid potential overflow from `bits + 7`.
433 Size { raw: bits / 8 + ((bits % 8) + 7) / 8 }
437 pub fn from_bytes(bytes
: impl TryInto
<u64>) -> Size
{
438 let bytes
: u64 = bytes
.try_into().ok().unwrap();
443 pub fn bytes(self) -> u64 {
448 pub fn bytes_usize(self) -> usize {
449 self.bytes().try_into().unwrap()
453 pub fn bits(self) -> u64 {
455 fn overflow(bytes
: u64) -> ! {
456 panic
!("Size::bits: {} bytes in bits doesn't fit in u64", bytes
)
459 self.bytes().checked_mul(8).unwrap_or_else(|| overflow(self.bytes()))
463 pub fn bits_usize(self) -> usize {
464 self.bits().try_into().unwrap()
468 pub fn align_to(self, align
: Align
) -> Size
{
469 let mask
= align
.bytes() - 1;
470 Size
::from_bytes((self.bytes() + mask
) & !mask
)
474 pub fn is_aligned(self, align
: Align
) -> bool
{
475 let mask
= align
.bytes() - 1;
476 self.bytes() & mask
== 0
480 pub fn checked_add
<C
: HasDataLayout
>(self, offset
: Size
, cx
: &C
) -> Option
<Size
> {
481 let dl
= cx
.data_layout();
483 let bytes
= self.bytes().checked_add(offset
.bytes())?
;
485 if bytes
< dl
.obj_size_bound() { Some(Size::from_bytes(bytes)) }
else { None }
489 pub fn checked_mul
<C
: HasDataLayout
>(self, count
: u64, cx
: &C
) -> Option
<Size
> {
490 let dl
= cx
.data_layout();
492 let bytes
= self.bytes().checked_mul(count
)?
;
493 if bytes
< dl
.obj_size_bound() { Some(Size::from_bytes(bytes)) }
else { None }
496 /// Truncates `value` to `self` bits and then sign-extends it to 128 bits
497 /// (i.e., if it is negative, fill with 1's on the left).
499 pub fn sign_extend(self, value
: u128
) -> u128
{
500 let size
= self.bits();
502 // Truncated until nothing is left.
506 let shift
= 128 - size
;
507 // Shift the unsigned value to the left, then shift back to the right as signed
508 // (essentially fills with sign bit on the left).
509 (((value
<< shift
) as i128
) >> shift
) as u128
512 /// Truncates `value` to `self` bits.
514 pub fn truncate(self, value
: u128
) -> u128
{
515 let size
= self.bits();
517 // Truncated until nothing is left.
520 let shift
= 128 - size
;
521 // Truncate (shift left to drop out leftover values, shift right to fill with zeroes).
522 (value
<< shift
) >> shift
526 pub fn signed_int_min(&self) -> i128
{
527 self.sign_extend(1_u128 << (self.bits() - 1)) as i128
531 pub fn signed_int_max(&self) -> i128
{
532 i128
::MAX
>> (128 - self.bits())
536 pub fn unsigned_int_max(&self) -> u128
{
537 u128
::MAX
>> (128 - self.bits())
541 // Panicking addition, subtraction and multiplication for convenience.
542 // Avoid during layout computation, return `LayoutError` instead.
547 fn add(self, other
: Size
) -> Size
{
548 Size
::from_bytes(self.bytes().checked_add(other
.bytes()).unwrap_or_else(|| {
549 panic
!("Size::add: {} + {} doesn't fit in u64", self.bytes(), other
.bytes())
557 fn sub(self, other
: Size
) -> Size
{
558 Size
::from_bytes(self.bytes().checked_sub(other
.bytes()).unwrap_or_else(|| {
559 panic
!("Size::sub: {} - {} would result in negative size", self.bytes(), other
.bytes())
564 impl Mul
<Size
> for u64 {
567 fn mul(self, size
: Size
) -> Size
{
572 impl Mul
<u64> for Size
{
575 fn mul(self, count
: u64) -> Size
{
576 match self.bytes().checked_mul(count
) {
577 Some(bytes
) => Size
::from_bytes(bytes
),
578 None
=> panic
!("Size::mul: {} * {} doesn't fit in u64", self.bytes(), count
),
583 impl AddAssign
for Size
{
585 fn add_assign(&mut self, other
: Size
) {
586 *self = *self + other
;
590 #[cfg(feature = "nightly")]
593 fn steps_between(start
: &Self, end
: &Self) -> Option
<usize> {
594 u64::steps_between(&start
.bytes(), &end
.bytes())
598 fn forward_checked(start
: Self, count
: usize) -> Option
<Self> {
599 u64::forward_checked(start
.bytes(), count
).map(Self::from_bytes
)
603 fn forward(start
: Self, count
: usize) -> Self {
604 Self::from_bytes(u64::forward(start
.bytes(), count
))
608 unsafe fn forward_unchecked(start
: Self, count
: usize) -> Self {
609 Self::from_bytes(u64::forward_unchecked(start
.bytes(), count
))
613 fn backward_checked(start
: Self, count
: usize) -> Option
<Self> {
614 u64::backward_checked(start
.bytes(), count
).map(Self::from_bytes
)
618 fn backward(start
: Self, count
: usize) -> Self {
619 Self::from_bytes(u64::backward(start
.bytes(), count
))
623 unsafe fn backward_unchecked(start
: Self, count
: usize) -> Self {
624 Self::from_bytes(u64::backward_unchecked(start
.bytes(), count
))
628 /// Alignment of a type in bytes (always a power of two).
629 #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
630 #[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))]
635 // This is debug-printed a lot in larger structs, don't waste too much space there
636 impl fmt
::Debug
for Align
{
637 fn fmt(&self, f
: &mut fmt
::Formatter
<'_
>) -> fmt
::Result
{
638 write
!(f
, "Align({} bytes)", self.bytes())
643 pub const ONE
: Align
= Align { pow2: 0 }
;
644 pub const MAX
: Align
= Align { pow2: 29 }
;
647 pub fn from_bits(bits
: u64) -> Result
<Align
, String
> {
648 Align
::from_bytes(Size
::from_bits(bits
).bytes())
652 pub fn from_bytes(align
: u64) -> Result
<Align
, String
> {
653 // Treat an alignment of 0 bytes like 1-byte alignment.
655 return Ok(Align
::ONE
);
659 fn not_power_of_2(align
: u64) -> String
{
660 format
!("`{}` is not a power of 2", align
)
664 fn too_large(align
: u64) -> String
{
665 format
!("`{}` is too large", align
)
668 let mut bytes
= align
;
669 let mut pow2
: u8 = 0;
670 while (bytes
& 1) == 0 {
675 return Err(not_power_of_2(align
));
677 if pow2
> Self::MAX
.pow2
{
678 return Err(too_large(align
));
685 pub fn bytes(self) -> u64 {
690 pub fn bits(self) -> u64 {
694 /// Computes the best alignment possible for the given offset
695 /// (the largest power of two that the offset is a multiple of).
697 /// N.B., for an offset of `0`, this happens to return `2^64`.
699 pub fn max_for_offset(offset
: Size
) -> Align
{
700 Align { pow2: offset.bytes().trailing_zeros() as u8 }
703 /// Lower the alignment, if necessary, such that the given offset
704 /// is aligned to it (the offset is a multiple of the alignment).
706 pub fn restrict_for_offset(self, offset
: Size
) -> Align
{
707 self.min(Align
::max_for_offset(offset
))
711 /// A pair of alignments, ABI-mandated and preferred.
712 #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
713 #[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
715 pub struct AbiAndPrefAlign
{
720 impl AbiAndPrefAlign
{
722 pub fn new(align
: Align
) -> AbiAndPrefAlign
{
723 AbiAndPrefAlign { abi: align, pref: align }
727 pub fn min(self, other
: AbiAndPrefAlign
) -> AbiAndPrefAlign
{
728 AbiAndPrefAlign { abi: self.abi.min(other.abi), pref: self.pref.min(other.pref) }
732 pub fn max(self, other
: AbiAndPrefAlign
) -> AbiAndPrefAlign
{
733 AbiAndPrefAlign { abi: self.abi.max(other.abi), pref: self.pref.max(other.pref) }
737 /// Integers, also used for enum discriminants.
738 #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
739 #[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))]
751 pub fn size(self) -> Size
{
753 I8
=> Size
::from_bytes(1),
754 I16
=> Size
::from_bytes(2),
755 I32
=> Size
::from_bytes(4),
756 I64
=> Size
::from_bytes(8),
757 I128
=> Size
::from_bytes(16),
761 /// Gets the Integer type from an IntegerType.
762 pub fn from_attr
<C
: HasDataLayout
>(cx
: &C
, ity
: IntegerType
) -> Integer
{
763 let dl
= cx
.data_layout();
766 IntegerType
::Pointer(_
) => dl
.ptr_sized_integer(),
767 IntegerType
::Fixed(x
, _
) => x
,
771 pub fn align
<C
: HasDataLayout
>(self, cx
: &C
) -> AbiAndPrefAlign
{
772 let dl
= cx
.data_layout();
779 I128
=> dl
.i128_align
,
783 /// Returns the largest signed value that can be represented by this Integer.
785 pub fn signed_max(self) -> i128
{
787 I8
=> i8::MAX
as i128
,
788 I16
=> i16::MAX
as i128
,
789 I32
=> i32::MAX
as i128
,
790 I64
=> i64::MAX
as i128
,
795 /// Finds the smallest Integer type which can represent the signed value.
797 pub fn fit_signed(x
: i128
) -> Integer
{
799 -0x0000_0000_0000_0080..=0x0000_0000_0000_007f => I8
,
800 -0x0000_0000_0000_8000..=0x0000_0000_0000_7fff => I16
,
801 -0x0000_0000_8000_0000..=0x0000_0000_7fff_ffff => I32
,
802 -0x8000_0000_0000_0000..=0x7fff_ffff_ffff_ffff => I64
,
807 /// Finds the smallest Integer type which can represent the unsigned value.
809 pub fn fit_unsigned(x
: u128
) -> Integer
{
811 0..=0x0000_0000_0000_00ff => I8
,
812 0..=0x0000_0000_0000_ffff => I16
,
813 0..=0x0000_0000_ffff_ffff => I32
,
814 0..=0xffff_ffff_ffff_ffff => I64
,
819 /// Finds the smallest integer with the given alignment.
820 pub fn for_align
<C
: HasDataLayout
>(cx
: &C
, wanted
: Align
) -> Option
<Integer
> {
821 let dl
= cx
.data_layout();
823 [I8
, I16
, I32
, I64
, I128
].into_iter().find(|&candidate
| {
824 wanted
== candidate
.align(dl
).abi
&& wanted
.bytes() == candidate
.size().bytes()
828 /// Find the largest integer with the given alignment or less.
829 pub fn approximate_align
<C
: HasDataLayout
>(cx
: &C
, wanted
: Align
) -> Integer
{
830 let dl
= cx
.data_layout();
832 // FIXME(eddyb) maybe include I128 in the future, when it works everywhere.
833 for candidate
in [I64
, I32
, I16
] {
834 if wanted
>= candidate
.align(dl
).abi
&& wanted
.bytes() >= candidate
.size().bytes() {
841 // FIXME(eddyb) consolidate this and other methods that find the appropriate
842 // `Integer` given some requirements.
844 pub fn from_size(size
: Size
) -> Result
<Self, String
> {
846 8 => Ok(Integer
::I8
),
847 16 => Ok(Integer
::I16
),
848 32 => Ok(Integer
::I32
),
849 64 => Ok(Integer
::I64
),
850 128 => Ok(Integer
::I128
),
851 _
=> Err(format
!("rust does not support integers with {} bits", size
.bits())),
856 /// Fundamental unit of memory access and layout.
857 #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
858 #[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
860 /// The `bool` is the signedness of the `Integer` type.
862 /// One would think we would not care about such details this low down,
863 /// but some ABIs are described in terms of C types and ISAs where the
864 /// integer arithmetic is done on {sign,zero}-extended registers, e.g.
865 /// a negative integer passed by zero-extension will appear positive in
866 /// the callee, and most operations on it will produce the wrong values.
870 Pointer(AddressSpace
),
874 pub fn size
<C
: HasDataLayout
>(self, cx
: &C
) -> Size
{
875 let dl
= cx
.data_layout();
878 Int(i
, _
) => i
.size(),
879 F32
=> Size
::from_bits(32),
880 F64
=> Size
::from_bits(64),
881 // FIXME(erikdesjardins): ignoring address space is technically wrong, pointers in
882 // different address spaces can have different sizes
883 // (but TargetDataLayout doesn't currently parse that part of the DL string)
884 Pointer(_
) => dl
.pointer_size
,
888 pub fn align
<C
: HasDataLayout
>(self, cx
: &C
) -> AbiAndPrefAlign
{
889 let dl
= cx
.data_layout();
892 Int(i
, _
) => i
.align(dl
),
895 // FIXME(erikdesjardins): ignoring address space is technically wrong, pointers in
896 // different address spaces can have different alignments
897 // (but TargetDataLayout doesn't currently parse that part of the DL string)
898 Pointer(_
) => dl
.pointer_align
,
903 /// Inclusive wrap-around range of valid values, that is, if
904 /// start > end, it represents `start..=MAX`,
905 /// followed by `0..=end`.
907 /// That is, for an i8 primitive, a range of `254..=2` means following
910 /// 254 (-2), 255 (-1), 0, 1, 2
912 /// This is intended specifically to mirror LLVM’s `!range` metadata semantics.
913 #[derive(Clone, Copy, PartialEq, Eq, Hash)]
914 #[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
915 pub struct WrappingRange
{
921 pub fn full(size
: Size
) -> Self {
922 Self { start: 0, end: size.unsigned_int_max() }
925 /// Returns `true` if `v` is contained in the range.
927 pub fn contains(&self, v
: u128
) -> bool
{
928 if self.start
<= self.end
{
929 self.start
<= v
&& v
<= self.end
931 self.start
<= v
|| v
<= self.end
935 /// Returns `self` with replaced `start`
937 pub fn with_start(mut self, start
: u128
) -> Self {
942 /// Returns `self` with replaced `end`
944 pub fn with_end(mut self, end
: u128
) -> Self {
949 /// Returns `true` if `size` completely fills the range.
951 pub fn is_full_for(&self, size
: Size
) -> bool
{
952 let max_value
= size
.unsigned_int_max();
953 debug_assert
!(self.start
<= max_value
&& self.end
<= max_value
);
954 self.start
== (self.end
.wrapping_add(1) & max_value
)
958 impl fmt
::Debug
for WrappingRange
{
959 fn fmt(&self, fmt
: &mut fmt
::Formatter
<'_
>) -> fmt
::Result
{
960 if self.start
> self.end
{
961 write
!(fmt
, "(..={}) | ({}..)", self.end
, self.start
)?
;
963 write
!(fmt
, "{}..={}", self.start
, self.end
)?
;
969 /// Information about one scalar component of a Rust type.
970 #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
971 #[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
976 // FIXME(eddyb) always use the shortest range, e.g., by finding
977 // the largest space between two consecutive valid values and
978 // taking everything else as the (shortest) valid range.
979 valid_range
: WrappingRange
,
982 /// Even for unions, we need to use the correct registers for the kind of
983 /// values inside the union, so we keep the `Primitive` type around. We
984 /// also use it to compute the size of the scalar.
985 /// However, unions never have niches and even allow undef,
986 /// so there is no `valid_range`.
993 pub fn is_bool(&self) -> bool
{
996 Scalar
::Initialized
{
997 value
: Int(I8
, false),
998 valid_range
: WrappingRange { start: 0, end: 1 }
1003 /// Get the primitive representation of this type, ignoring the valid range and whether the
1004 /// value is allowed to be undefined (due to being a union).
1005 pub fn primitive(&self) -> Primitive
{
1007 Scalar
::Initialized { value, .. }
| Scalar
::Union { value }
=> value
,
1011 pub fn align(self, cx
: &impl HasDataLayout
) -> AbiAndPrefAlign
{
1012 self.primitive().align(cx
)
1015 pub fn size(self, cx
: &impl HasDataLayout
) -> Size
{
1016 self.primitive().size(cx
)
1020 pub fn to_union(&self) -> Self {
1021 Self::Union { value: self.primitive() }
1025 pub fn valid_range(&self, cx
: &impl HasDataLayout
) -> WrappingRange
{
1027 Scalar
::Initialized { valid_range, .. }
=> valid_range
,
1028 Scalar
::Union { value }
=> WrappingRange
::full(value
.size(cx
)),
1033 /// Allows the caller to mutate the valid range. This operation will panic if attempted on a union.
1034 pub fn valid_range_mut(&mut self) -> &mut WrappingRange
{
1036 Scalar
::Initialized { valid_range, .. }
=> valid_range
,
1037 Scalar
::Union { .. }
=> panic
!("cannot change the valid range of a union"),
1041 /// Returns `true` if all possible numbers are valid, i.e `valid_range` covers the whole layout
1043 pub fn is_always_valid
<C
: HasDataLayout
>(&self, cx
: &C
) -> bool
{
1045 Scalar
::Initialized { valid_range, .. }
=> valid_range
.is_full_for(self.size(cx
)),
1046 Scalar
::Union { .. }
=> true,
1050 /// Returns `true` if this type can be left uninit.
1052 pub fn is_uninit_valid(&self) -> bool
{
1054 Scalar
::Initialized { .. }
=> false,
1055 Scalar
::Union { .. }
=> true,
1060 rustc_index
::newtype_index
! {
1061 /// The *source-order* index of a field in a variant.
1063 /// This is how most code after type checking refers to fields, rather than
1064 /// using names (as names have hygiene complications and more complex lookup).
1066 /// Particularly for `repr(Rust)` types, this may not be the same as *layout* order.
1067 /// (It is for `repr(C)` `struct`s, however.)
1069 /// For example, in the following types,
1074 /// Variant0 { a: Never, b: i32 } = 100,
1075 /// Variant1 { c: u8, d: u64 } = 10,
1077 /// struct Demo2 { e: u8, f: u16, g: u8 }
1079 /// `b` is `FieldIdx(1)` in `VariantIdx(0)`,
1080 /// `d` is `FieldIdx(1)` in `VariantIdx(1)`, and
1081 /// `f` is `FieldIdx(1)` in `VariantIdx(0)`.
1082 #[derive(HashStable_Generic)]
1083 pub struct FieldIdx {}
1086 /// Describes how the fields of a type are located in memory.
1087 #[derive(PartialEq, Eq, Hash, Clone, Debug)]
1088 #[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1089 pub enum FieldsShape
{
1090 /// Scalar primitives and `!`, which never have fields.
1093 /// All fields start at no offset. The `usize` is the field count.
1094 Union(NonZeroUsize
),
1096 /// Array/vector-like placement, with all fields of identical types.
1097 Array { stride: Size, count: u64 }
,
1099 /// Struct-like placement, with precomputed offsets.
1101 /// Fields are guaranteed to not overlap, but note that gaps
1102 /// before, between and after all the fields are NOT always
1103 /// padding, and as such their contents may not be discarded.
1104 /// For example, enum variants leave a gap at the start,
1105 /// where the discriminant field in the enum layout goes.
1107 /// Offsets for the first byte of each field,
1108 /// ordered to match the source definition order.
1109 /// This vector does not go in increasing order.
1110 // FIXME(eddyb) use small vector optimization for the common case.
1111 offsets
: IndexVec
<FieldIdx
, Size
>,
1113 /// Maps source order field indices to memory order indices,
1114 /// depending on how the fields were reordered (if at all).
1115 /// This is a permutation, with both the source order and the
1116 /// memory order using the same (0..n) index ranges.
1118 /// Note that during computation of `memory_index`, sometimes
1119 /// it is easier to operate on the inverse mapping (that is,
1120 /// from memory order to source order), and that is usually
1121 /// named `inverse_memory_index`.
1123 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1124 // FIXME(camlorn) also consider small vector optimization here.
1125 memory_index
: IndexVec
<FieldIdx
, u32>,
1131 pub fn count(&self) -> usize {
1133 FieldsShape
::Primitive
=> 0,
1134 FieldsShape
::Union(count
) => count
.get(),
1135 FieldsShape
::Array { count, .. }
=> count
.try_into().unwrap(),
1136 FieldsShape
::Arbitrary { ref offsets, .. }
=> offsets
.len(),
1141 pub fn offset(&self, i
: usize) -> Size
{
1143 FieldsShape
::Primitive
=> {
1144 unreachable
!("FieldsShape::offset: `Primitive`s have no fields")
1146 FieldsShape
::Union(count
) => {
1149 "tried to access field {} of union with {} fields",
1155 FieldsShape
::Array { stride, count }
=> {
1156 let i
= u64::try_from(i
).unwrap();
1160 FieldsShape
::Arbitrary { ref offsets, .. }
=> offsets
[FieldIdx
::from_usize(i
)],
1165 pub fn memory_index(&self, i
: usize) -> usize {
1167 FieldsShape
::Primitive
=> {
1168 unreachable
!("FieldsShape::memory_index: `Primitive`s have no fields")
1170 FieldsShape
::Union(_
) | FieldsShape
::Array { .. }
=> i
,
1171 FieldsShape
::Arbitrary { ref memory_index, .. }
=> {
1172 memory_index
[FieldIdx
::from_usize(i
)].try_into().unwrap()
1177 /// Gets source indices of the fields by increasing offsets.
1179 pub fn index_by_increasing_offset(&self) -> impl Iterator
<Item
= usize> + '_
{
1180 let mut inverse_small
= [0u8; 64];
1181 let mut inverse_big
= IndexVec
::new();
1182 let use_small
= self.count() <= inverse_small
.len();
1184 // We have to write this logic twice in order to keep the array small.
1185 if let FieldsShape
::Arbitrary { ref memory_index, .. }
= *self {
1187 for (field_idx
, &mem_idx
) in memory_index
.iter_enumerated() {
1188 inverse_small
[mem_idx
as usize] = field_idx
.as_u32() as u8;
1191 inverse_big
= memory_index
.invert_bijective_mapping();
1195 (0..self.count()).map(move |i
| match *self {
1196 FieldsShape
::Primitive
| FieldsShape
::Union(_
) | FieldsShape
::Array { .. }
=> i
,
1197 FieldsShape
::Arbitrary { .. }
=> {
1199 inverse_small
[i
] as usize
1201 inverse_big
[i
as u32].as_usize()
1208 /// An identifier that specifies the address space that some operation
1209 /// should operate on. Special address spaces have an effect on code generation,
1210 /// depending on the target and the address spaces it implements.
1211 #[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
1212 #[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1213 pub struct AddressSpace(pub u32);
1216 /// The default address space, corresponding to data space.
1217 pub const DATA
: Self = AddressSpace(0);
1220 /// Describes how values of the type are passed by target ABIs,
1221 /// in terms of categories of C types there are ABI rules for.
1222 #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
1223 #[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1228 ScalarPair(Scalar
, Scalar
),
1234 /// If true, the size is exact, otherwise it's only a lower bound.
1240 /// Returns `true` if the layout corresponds to an unsized type.
1242 pub fn is_unsized(&self) -> bool
{
1244 Abi
::Uninhabited
| Abi
::Scalar(_
) | Abi
::ScalarPair(..) | Abi
::Vector { .. }
=> false,
1245 Abi
::Aggregate { sized }
=> !sized
,
1250 pub fn is_sized(&self) -> bool
{
1254 /// Returns `true` if this is a single signed integer scalar
1256 pub fn is_signed(&self) -> bool
{
1258 Abi
::Scalar(scal
) => match scal
.primitive() {
1259 Primitive
::Int(_
, signed
) => signed
,
1262 _
=> panic
!("`is_signed` on non-scalar ABI {:?}", self),
1266 /// Returns `true` if this is an uninhabited type
1268 pub fn is_uninhabited(&self) -> bool
{
1269 matches
!(*self, Abi
::Uninhabited
)
1272 /// Returns `true` is this is a scalar type
1274 pub fn is_scalar(&self) -> bool
{
1275 matches
!(*self, Abi
::Scalar(_
))
1279 #[derive(PartialEq, Eq, Hash, Clone, Debug)]
1280 #[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1282 /// Single enum variants, structs/tuples, unions, and all non-ADTs.
1283 Single { index: VariantIdx }
,
1285 /// Enum-likes with more than one inhabited variant: each variant comes with
1286 /// a *discriminant* (usually the same as the variant index but the user can
1287 /// assign explicit discriminant values). That discriminant is encoded
1288 /// as a *tag* on the machine. The layout of each variant is
1289 /// a struct, and they all have space reserved for the tag.
1290 /// For enums, the tag is the sole field of the layout.
1293 tag_encoding
: TagEncoding
,
1295 variants
: IndexVec
<VariantIdx
, LayoutS
>,
1299 #[derive(PartialEq, Eq, Hash, Clone, Debug)]
1300 #[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1301 pub enum TagEncoding
{
1302 /// The tag directly stores the discriminant, but possibly with a smaller layout
1303 /// (so converting the tag to the discriminant can require sign extension).
1306 /// Niche (values invalid for a type) encoding the discriminant:
1307 /// Discriminant and variant index coincide.
1308 /// The variant `untagged_variant` contains a niche at an arbitrary
1309 /// offset (field `tag_field` of the enum), which for a variant with
1310 /// discriminant `d` is set to
1311 /// `(d - niche_variants.start).wrapping_add(niche_start)`.
1313 /// For example, `Option<(usize, &T)>` is represented such that
1314 /// `None` has a null pointer for the second tuple field, and
1315 /// `Some` is the identity function (with a non-null reference).
1317 untagged_variant
: VariantIdx
,
1318 niche_variants
: RangeInclusive
<VariantIdx
>,
1323 #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
1324 #[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1327 pub value
: Primitive
,
1328 pub valid_range
: WrappingRange
,
1332 pub fn from_scalar
<C
: HasDataLayout
>(cx
: &C
, offset
: Size
, scalar
: Scalar
) -> Option
<Self> {
1333 let Scalar
::Initialized { value, valid_range }
= scalar
else { return None }
;
1334 let niche
= Niche { offset, value, valid_range }
;
1335 if niche
.available(cx
) > 0 { Some(niche) }
else { None }
1338 pub fn available
<C
: HasDataLayout
>(&self, cx
: &C
) -> u128
{
1339 let Self { value, valid_range: v, .. }
= *self;
1340 let size
= value
.size(cx
);
1341 assert
!(size
.bits() <= 128);
1342 let max_value
= size
.unsigned_int_max();
1344 // Find out how many values are outside the valid range.
1345 let niche
= v
.end
.wrapping_add(1)..v
.start
;
1346 niche
.end
.wrapping_sub(niche
.start
) & max_value
1349 pub fn reserve
<C
: HasDataLayout
>(&self, cx
: &C
, count
: u128
) -> Option
<(u128
, Scalar
)> {
1352 let Self { value, valid_range: v, .. }
= *self;
1353 let size
= value
.size(cx
);
1354 assert
!(size
.bits() <= 128);
1355 let max_value
= size
.unsigned_int_max();
1357 let niche
= v
.end
.wrapping_add(1)..v
.start
;
1358 let available
= niche
.end
.wrapping_sub(niche
.start
) & max_value
;
1359 if count
> available
{
1363 // Extend the range of valid values being reserved by moving either `v.start` or `v.end` bound.
1364 // Given an eventual `Option<T>`, we try to maximize the chance for `None` to occupy the niche of zero.
1365 // This is accomplished by preferring enums with 2 variants(`count==1`) and always taking the shortest path to niche zero.
1366 // Having `None` in niche zero can enable some special optimizations.
1368 // Bound selection criteria:
1369 // 1. Select closest to zero given wrapping semantics.
1370 // 2. Avoid moving past zero if possible.
1372 // In practice this means that enums with `count > 1` are unlikely to claim niche zero, since they have to fit perfectly.
1373 // If niche zero is already reserved, the selection of bounds are of little interest.
1374 let move_start
= |v
: WrappingRange
| {
1375 let start
= v
.start
.wrapping_sub(count
) & max_value
;
1376 Some((start
, Scalar
::Initialized { value, valid_range: v.with_start(start) }
))
1378 let move_end
= |v
: WrappingRange
| {
1379 let start
= v
.end
.wrapping_add(1) & max_value
;
1380 let end
= v
.end
.wrapping_add(count
) & max_value
;
1381 Some((start
, Scalar
::Initialized { value, valid_range: v.with_end(end) }
))
1383 let distance_end_zero
= max_value
- v
.end
;
1384 if v
.start
> v
.end
{
1385 // zero is unavailable because wrapping occurs
1387 } else if v
.start
<= distance_end_zero
{
1388 if count
<= v
.start
{
1391 // moved past zero, use other bound
1395 let end
= v
.end
.wrapping_add(count
) & max_value
;
1396 let overshot_zero
= (1..=v
.end
).contains(&end
);
1398 // moved past zero, use other bound
1407 rustc_index
::newtype_index
! {
1408 /// The *source-order* index of a variant in a type.
1410 /// For enums, these are always `0..variant_count`, regardless of any
1411 /// custom discriminants that may have been defined, and including any
1412 /// variants that may end up uninhabited due to field types. (Some of the
1413 /// variants may not be present in a monomorphized ABI [`Variants`], but
1414 /// those skipped variants are always counted when determining the *index*.)
1416 /// `struct`s, `tuples`, and `unions`s are considered to have a single variant
1417 /// with variant index zero, aka [`FIRST_VARIANT`].
1418 #[derive(HashStable_Generic)]
1419 pub struct VariantIdx
{
1420 /// Equivalent to `VariantIdx(0)`.
1421 const FIRST_VARIANT
= 0;
1425 #[derive(PartialEq, Eq, Hash, Clone)]
1426 #[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1427 pub struct LayoutS
{
1428 /// Says where the fields are located within the layout.
1429 pub fields
: FieldsShape
,
1431 /// Encodes information about multi-variant layouts.
1432 /// Even with `Multiple` variants, a layout still has its own fields! Those are then
1433 /// shared between all variants. One of them will be the discriminant,
1434 /// but e.g. generators can have more.
1436 /// To access all fields of this layout, both `fields` and the fields of the active variant
1437 /// must be taken into account.
1438 pub variants
: Variants
,
1440 /// The `abi` defines how this data is passed between functions, and it defines
1441 /// value restrictions via `valid_range`.
1443 /// Note that this is entirely orthogonal to the recursive structure defined by
1444 /// `variants` and `fields`; for example, `ManuallyDrop<Result<isize, isize>>` has
1445 /// `Abi::ScalarPair`! So, even with non-`Aggregate` `abi`, `fields` and `variants`
1446 /// have to be taken into account to find all fields of this layout.
1449 /// The leaf scalar with the largest number of invalid values
1450 /// (i.e. outside of its `valid_range`), if it exists.
1451 pub largest_niche
: Option
<Niche
>,
1453 pub align
: AbiAndPrefAlign
,
1458 pub fn scalar
<C
: HasDataLayout
>(cx
: &C
, scalar
: Scalar
) -> Self {
1459 let largest_niche
= Niche
::from_scalar(cx
, Size
::ZERO
, scalar
);
1460 let size
= scalar
.size(cx
);
1461 let align
= scalar
.align(cx
);
1463 variants
: Variants
::Single { index: FIRST_VARIANT }
,
1464 fields
: FieldsShape
::Primitive
,
1465 abi
: Abi
::Scalar(scalar
),
1473 impl fmt
::Debug
for LayoutS
{
1474 fn fmt(&self, f
: &mut fmt
::Formatter
<'_
>) -> fmt
::Result
{
1475 // This is how `Layout` used to print before it become
1476 // `Interned<LayoutS>`. We print it like this to avoid having to update
1477 // expected output in a lot of tests.
1478 let LayoutS { size, align, abi, fields, largest_niche, variants }
= self;
1479 f
.debug_struct("Layout")
1480 .field("size", size
)
1481 .field("align", align
)
1483 .field("fields", fields
)
1484 .field("largest_niche", largest_niche
)
1485 .field("variants", variants
)
1490 #[derive(Copy, Clone, PartialEq, Eq, Hash, HashStable_Generic)]
1491 #[rustc_pass_by_value]
1492 pub struct Layout
<'a
>(pub Interned
<'a
, LayoutS
>);
1494 impl<'a
> fmt
::Debug
for Layout
<'a
> {
1495 fn fmt(&self, f
: &mut fmt
::Formatter
<'_
>) -> fmt
::Result
{
1496 // See comment on `<LayoutS as Debug>::fmt` above.
1501 impl<'a
> Layout
<'a
> {
1502 pub fn fields(self) -> &'a FieldsShape
{
1506 pub fn variants(self) -> &'a Variants
{
1510 pub fn abi(self) -> Abi
{
1514 pub fn largest_niche(self) -> Option
<Niche
> {
1515 self.0.0.largest_niche
1518 pub fn align(self) -> AbiAndPrefAlign
{
1522 pub fn size(self) -> Size
{
1526 /// Whether the layout is from a type that implements [`std::marker::PointerLike`].
1528 /// Currently, that means that the type is pointer-sized, pointer-aligned,
1529 /// and has a scalar ABI.
1530 pub fn is_pointer_like(self, data_layout
: &TargetDataLayout
) -> bool
{
1531 self.size() == data_layout
.pointer_size
1532 && self.align().abi
== data_layout
.pointer_align
.abi
1533 && matches
!(self.abi(), Abi
::Scalar(..))
1537 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
1538 pub enum PointerKind
{
1539 /// Shared reference. `frozen` indicates the absence of any `UnsafeCell`.
1540 SharedRef { frozen: bool }
,
1541 /// Mutable reference. `unpin` indicates the absence of any pinned data.
1542 MutableRef { unpin: bool }
,
1543 /// Box. `unpin` indicates the absence of any pinned data.
1544 Box { unpin: bool }
,
1547 /// Note that this information is advisory only, and backends are free to ignore it.
1548 /// It can only be used to encode potential optimizations, but no critical information.
1549 #[derive(Copy, Clone, Debug)]
1550 pub struct PointeeInfo
{
1553 pub safe
: Option
<PointerKind
>,
1557 /// Returns `true` if the layout corresponds to an unsized type.
1558 pub fn is_unsized(&self) -> bool
{
1559 self.abi
.is_unsized()
1562 pub fn is_sized(&self) -> bool
{
1566 /// Returns `true` if the type is a ZST and not unsized.
1567 pub fn is_zst(&self) -> bool
{
1569 Abi
::Scalar(_
) | Abi
::ScalarPair(..) | Abi
::Vector { .. }
=> false,
1570 Abi
::Uninhabited
=> self.size
.bytes() == 0,
1571 Abi
::Aggregate { sized }
=> sized
&& self.size
.bytes() == 0,
1576 #[derive(Copy, Clone, Debug)]
1577 pub enum StructKind
{
1578 /// A tuple, closure, or univariant which cannot be coerced to unsized.
1580 /// A univariant, the last field of which may be coerced to unsized.
1582 /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
1583 Prefixed(Size
, Align
),