1 // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 pub use self::Integer
::*;
12 pub use self::Primitive
::*;
14 use session
::{self, DataTypeKind, Session}
;
15 use ty
::{self, Ty, TyCtxt, TypeFoldable, ReprOptions, ReprFlags}
;
17 use syntax
::ast
::{self, FloatTy, IntTy, UintTy}
;
19 use syntax_pos
::DUMMY_SP
;
26 use std
::ops
::{Add, Sub, Mul, AddAssign, Deref, RangeInclusive}
;
28 use ich
::StableHashingContext
;
29 use rustc_data_structures
::stable_hasher
::{HashStable
, StableHasher
,
32 /// Parsed [Data layout](http://llvm.org/docs/LangRef.html#data-layout)
33 /// for a target, which contains everything needed to compute layouts.
34 pub struct TargetDataLayout
{
41 pub i128_align
: Align
,
44 pub pointer_size
: Size
,
45 pub pointer_align
: Align
,
46 pub aggregate_align
: Align
,
48 /// Alignments for vector types.
49 pub vector_align
: Vec
<(Size
, Align
)>
52 impl Default
for TargetDataLayout
{
53 /// Creates an instance of `TargetDataLayout`.
54 fn default() -> TargetDataLayout
{
57 i1_align
: Align
::from_bits(8, 8).unwrap(),
58 i8_align
: Align
::from_bits(8, 8).unwrap(),
59 i16_align
: Align
::from_bits(16, 16).unwrap(),
60 i32_align
: Align
::from_bits(32, 32).unwrap(),
61 i64_align
: Align
::from_bits(32, 64).unwrap(),
62 i128_align
: Align
::from_bits(32, 64).unwrap(),
63 f32_align
: Align
::from_bits(32, 32).unwrap(),
64 f64_align
: Align
::from_bits(64, 64).unwrap(),
65 pointer_size
: Size
::from_bits(64),
66 pointer_align
: Align
::from_bits(64, 64).unwrap(),
67 aggregate_align
: Align
::from_bits(0, 64).unwrap(),
69 (Size
::from_bits(64), Align
::from_bits(64, 64).unwrap()),
70 (Size
::from_bits(128), Align
::from_bits(128, 128).unwrap())
76 impl TargetDataLayout
{
77 pub fn parse(sess
: &Session
) -> TargetDataLayout
{
78 // Parse a bit count from a string.
79 let parse_bits
= |s
: &str, kind
: &str, cause
: &str| {
80 s
.parse
::<u64>().unwrap_or_else(|err
| {
81 sess
.err(&format
!("invalid {} `{}` for `{}` in \"data-layout\": {}",
82 kind
, s
, cause
, err
));
87 // Parse a size string.
88 let size
= |s
: &str, cause
: &str| {
89 Size
::from_bits(parse_bits(s
, "size", cause
))
92 // Parse an alignment string.
93 let align
= |s
: &[&str], cause
: &str| {
95 sess
.err(&format
!("missing alignment for `{}` in \"data-layout\"", cause
));
97 let abi
= parse_bits(s
[0], "alignment", cause
);
98 let pref
= s
.get(1).map_or(abi
, |pref
| parse_bits(pref
, "alignment", cause
));
99 Align
::from_bits(abi
, pref
).unwrap_or_else(|err
| {
100 sess
.err(&format
!("invalid alignment for `{}` in \"data-layout\": {}",
102 Align
::from_bits(8, 8).unwrap()
106 let mut dl
= TargetDataLayout
::default();
107 let mut i128_align_src
= 64;
108 for spec
in sess
.target
.target
.data_layout
.split("-") {
109 match &spec
.split(":").collect
::<Vec
<_
>>()[..] {
110 &["e"] => dl
.endian
= Endian
::Little
,
111 &["E"] => dl
.endian
= Endian
::Big
,
112 &["a", ref a
..] => dl
.aggregate_align
= align(a
, "a"),
113 &["f32", ref a
..] => dl
.f32_align
= align(a
, "f32"),
114 &["f64", ref a
..] => dl
.f64_align
= align(a
, "f64"),
115 &[p @
"p", s
, ref a
..] | &[p @
"p0", s
, ref a
..] => {
116 dl
.pointer_size
= size(s
, p
);
117 dl
.pointer_align
= align(a
, p
);
119 &[s
, ref a
..] if s
.starts_with("i") => {
120 let bits
= match s
[1..].parse
::<u64>() {
123 size(&s
[1..], "i"); // For the user error.
129 1 => dl
.i1_align
= a
,
130 8 => dl
.i8_align
= a
,
131 16 => dl
.i16_align
= a
,
132 32 => dl
.i32_align
= a
,
133 64 => dl
.i64_align
= a
,
136 if bits
>= i128_align_src
&& bits
<= 128 {
137 // Default alignment for i128 is decided by taking the alignment of
138 // largest-sized i{64...128}.
139 i128_align_src
= bits
;
143 &[s
, ref a
..] if s
.starts_with("v") => {
144 let v_size
= size(&s
[1..], "v");
146 if let Some(v
) = dl
.vector_align
.iter_mut().find(|v
| v
.0 == v_size
) {
150 // No existing entry, add a new one.
151 dl
.vector_align
.push((v_size
, a
));
153 _
=> {}
// Ignore everything else.
157 // Perform consistency checks against the Target information.
158 let endian_str
= match dl
.endian
{
159 Endian
::Little
=> "little",
162 if endian_str
!= sess
.target
.target
.target_endian
{
163 sess
.err(&format
!("inconsistent target specification: \"data-layout\" claims \
164 architecture is {}-endian, while \"target-endian\" is `{}`",
165 endian_str
, sess
.target
.target
.target_endian
));
168 if dl
.pointer_size
.bits().to_string() != sess
.target
.target
.target_pointer_width
{
169 sess
.err(&format
!("inconsistent target specification: \"data-layout\" claims \
170 pointers are {}-bit, while \"target-pointer-width\" is `{}`",
171 dl
.pointer_size
.bits(), sess
.target
.target
.target_pointer_width
));
177 /// Return exclusive upper bound on object size.
179 /// The theoretical maximum object size is defined as the maximum positive `isize` value.
180 /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly
181 /// index every address within an object along with one byte past the end, along with allowing
182 /// `isize` to store the difference between any two pointers into an object.
184 /// The upper bound on 64-bit currently needs to be lower because LLVM uses a 64-bit integer
185 /// to represent object size in bits. It would need to be 1 << 61 to account for this, but is
186 /// currently conservatively bounded to 1 << 47 as that is enough to cover the current usable
187 /// address space on 64-bit ARMv8 and x86_64.
188 pub fn obj_size_bound(&self) -> u64 {
189 match self.pointer_size
.bits() {
193 bits
=> bug
!("obj_size_bound: unknown pointer bit size {}", bits
)
197 pub fn ptr_sized_integer(&self) -> Integer
{
198 match self.pointer_size
.bits() {
202 bits
=> bug
!("ptr_sized_integer: unknown pointer bit size {}", bits
)
206 pub fn vector_align(&self, vec_size
: Size
) -> Align
{
207 for &(size
, align
) in &self.vector_align
{
208 if size
== vec_size
{
212 // Default to natural alignment, which is what LLVM does.
213 // That is, use the size, rounded up to a power of 2.
214 let align
= vec_size
.bytes().next_power_of_two();
215 Align
::from_bytes(align
, align
).unwrap()
219 pub trait HasDataLayout
: Copy
{
220 fn data_layout(&self) -> &TargetDataLayout
;
223 impl<'a
> HasDataLayout
for &'a TargetDataLayout
{
224 fn data_layout(&self) -> &TargetDataLayout
{
229 /// Endianness of the target, which must match cfg(target-endian).
230 #[derive(Copy, Clone)]
236 /// Size of a type in bytes.
237 #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
243 pub fn from_bits(bits
: u64) -> Size
{
244 // Avoid potential overflow from `bits + 7`.
245 Size
::from_bytes(bits
/ 8 + ((bits
% 8) + 7) / 8)
248 pub fn from_bytes(bytes
: u64) -> Size
{
249 if bytes
>= (1 << 61) {
250 bug
!("Size::from_bytes: {} bytes in bits doesn't fit in u64", bytes
)
257 pub fn bytes(self) -> u64 {
261 pub fn bits(self) -> u64 {
265 pub fn abi_align(self, align
: Align
) -> Size
{
266 let mask
= align
.abi() - 1;
267 Size
::from_bytes((self.bytes() + mask
) & !mask
)
270 pub fn is_abi_aligned(self, align
: Align
) -> bool
{
271 let mask
= align
.abi() - 1;
272 self.bytes() & mask
== 0
275 pub fn checked_add
<C
: HasDataLayout
>(self, offset
: Size
, cx
: C
) -> Option
<Size
> {
276 let dl
= cx
.data_layout();
278 // Each Size is less than dl.obj_size_bound(), so the sum is
279 // also less than 1 << 62 (and therefore can't overflow).
280 let bytes
= self.bytes() + offset
.bytes();
282 if bytes
< dl
.obj_size_bound() {
283 Some(Size
::from_bytes(bytes
))
289 pub fn checked_mul
<C
: HasDataLayout
>(self, count
: u64, cx
: C
) -> Option
<Size
> {
290 let dl
= cx
.data_layout();
292 match self.bytes().checked_mul(count
) {
293 Some(bytes
) if bytes
< dl
.obj_size_bound() => {
294 Some(Size
::from_bytes(bytes
))
301 // Panicking addition, subtraction and multiplication for convenience.
302 // Avoid during layout computation, return `LayoutError` instead.
306 fn add(self, other
: Size
) -> Size
{
307 // Each Size is less than 1 << 61, so the sum is
308 // less than 1 << 62 (and therefore can't overflow).
309 Size
::from_bytes(self.bytes() + other
.bytes())
315 fn sub(self, other
: Size
) -> Size
{
316 // Each Size is less than 1 << 61, so an underflow
317 // would result in a value larger than 1 << 61,
318 // which Size::from_bytes will catch for us.
319 Size
::from_bytes(self.bytes() - other
.bytes())
323 impl Mul
<u64> for Size
{
325 fn mul(self, count
: u64) -> Size
{
326 match self.bytes().checked_mul(count
) {
327 Some(bytes
) => Size
::from_bytes(bytes
),
329 bug
!("Size::mul: {} * {} doesn't fit in u64", self.bytes(), count
)
335 impl AddAssign
for Size
{
336 fn add_assign(&mut self, other
: Size
) {
337 *self = *self + other
;
341 /// Alignment of a type in bytes, both ABI-mandated and preferred.
342 /// Each field is a power of two, giving the alignment a maximum
343 /// value of 2<sup>(2<sup>8</sup> - 1)</sup>, which is limited by LLVM to a i32, with
344 /// a maximum capacity of 2<sup>31</sup> - 1 or 2147483647.
345 #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
352 pub fn from_bits(abi
: u64, pref
: u64) -> Result
<Align
, String
> {
353 Align
::from_bytes(Size
::from_bits(abi
).bytes(),
354 Size
::from_bits(pref
).bytes())
357 pub fn from_bytes(abi
: u64, pref
: u64) -> Result
<Align
, String
> {
358 let log2
= |align
: u64| {
359 // Treat an alignment of 0 bytes like 1-byte alignment.
364 let mut bytes
= align
;
366 while (bytes
& 1) == 0 {
371 Err(format
!("`{}` is not a power of 2", align
))
373 Err(format
!("`{}` is too large", align
))
385 pub fn abi(self) -> u64 {
389 pub fn pref(self) -> u64 {
393 pub fn abi_bits(self) -> u64 {
397 pub fn pref_bits(self) -> u64 {
401 pub fn min(self, other
: Align
) -> Align
{
403 abi
: cmp
::min(self.abi
, other
.abi
),
404 pref
: cmp
::min(self.pref
, other
.pref
),
408 pub fn max(self, other
: Align
) -> Align
{
410 abi
: cmp
::max(self.abi
, other
.abi
),
411 pref
: cmp
::max(self.pref
, other
.pref
),
416 /// Integers, also used for enum discriminants.
417 #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
426 impl<'a
, 'tcx
> Integer
{
427 pub fn size(&self) -> Size
{
429 I8
=> Size
::from_bytes(1),
430 I16
=> Size
::from_bytes(2),
431 I32
=> Size
::from_bytes(4),
432 I64
=> Size
::from_bytes(8),
433 I128
=> Size
::from_bytes(16),
437 pub fn align
<C
: HasDataLayout
>(&self, cx
: C
) -> Align
{
438 let dl
= cx
.data_layout();
445 I128
=> dl
.i128_align
,
449 pub fn to_ty(&self, tcx
: TyCtxt
<'a
, 'tcx
, 'tcx
>, signed
: bool
) -> Ty
<'tcx
> {
450 match (*self, signed
) {
451 (I8
, false) => tcx
.types
.u8,
452 (I16
, false) => tcx
.types
.u16,
453 (I32
, false) => tcx
.types
.u32,
454 (I64
, false) => tcx
.types
.u64,
455 (I128
, false) => tcx
.types
.u128
,
456 (I8
, true) => tcx
.types
.i8,
457 (I16
, true) => tcx
.types
.i16,
458 (I32
, true) => tcx
.types
.i32,
459 (I64
, true) => tcx
.types
.i64,
460 (I128
, true) => tcx
.types
.i128
,
464 /// Find the smallest Integer type which can represent the signed value.
465 pub fn fit_signed(x
: i128
) -> Integer
{
467 -0x0000_0000_0000_0080...0x0000_0000_0000_007f => I8
,
468 -0x0000_0000_0000_8000...0x0000_0000_0000_7fff => I16
,
469 -0x0000_0000_8000_0000...0x0000_0000_7fff_ffff => I32
,
470 -0x8000_0000_0000_0000...0x7fff_ffff_ffff_ffff => I64
,
475 /// Find the smallest Integer type which can represent the unsigned value.
476 pub fn fit_unsigned(x
: u128
) -> Integer
{
478 0...0x0000_0000_0000_00ff => I8
,
479 0...0x0000_0000_0000_ffff => I16
,
480 0...0x0000_0000_ffff_ffff => I32
,
481 0...0xffff_ffff_ffff_ffff => I64
,
486 /// Find the smallest integer with the given alignment.
487 pub fn for_abi_align
<C
: HasDataLayout
>(cx
: C
, align
: Align
) -> Option
<Integer
> {
488 let dl
= cx
.data_layout();
490 let wanted
= align
.abi();
491 for &candidate
in &[I8
, I16
, I32
, I64
, I128
] {
492 if wanted
== candidate
.align(dl
).abi() && wanted
== candidate
.size().bytes() {
493 return Some(candidate
);
499 /// Find the largest integer with the given alignment or less.
500 pub fn approximate_abi_align
<C
: HasDataLayout
>(cx
: C
, align
: Align
) -> Integer
{
501 let dl
= cx
.data_layout();
503 let wanted
= align
.abi();
504 // FIXME(eddyb) maybe include I128 in the future, when it works everywhere.
505 for &candidate
in &[I64
, I32
, I16
] {
506 if wanted
>= candidate
.align(dl
).abi() && wanted
>= candidate
.size().bytes() {
513 /// Get the Integer type from an attr::IntType.
514 pub fn from_attr
<C
: HasDataLayout
>(cx
: C
, ity
: attr
::IntType
) -> Integer
{
515 let dl
= cx
.data_layout();
518 attr
::SignedInt(IntTy
::I8
) | attr
::UnsignedInt(UintTy
::U8
) => I8
,
519 attr
::SignedInt(IntTy
::I16
) | attr
::UnsignedInt(UintTy
::U16
) => I16
,
520 attr
::SignedInt(IntTy
::I32
) | attr
::UnsignedInt(UintTy
::U32
) => I32
,
521 attr
::SignedInt(IntTy
::I64
) | attr
::UnsignedInt(UintTy
::U64
) => I64
,
522 attr
::SignedInt(IntTy
::I128
) | attr
::UnsignedInt(UintTy
::U128
) => I128
,
523 attr
::SignedInt(IntTy
::Isize
) | attr
::UnsignedInt(UintTy
::Usize
) => {
524 dl
.ptr_sized_integer()
529 /// Find the appropriate Integer type and signedness for the given
530 /// signed discriminant range and #[repr] attribute.
531 /// N.B.: u128 values above i128::MAX will be treated as signed, but
532 /// that shouldn't affect anything, other than maybe debuginfo.
533 fn repr_discr(tcx
: TyCtxt
<'a
, 'tcx
, 'tcx
>,
539 // Theoretically, negative values could be larger in unsigned representation
540 // than the unsigned representation of the signed minimum. However, if there
541 // are any negative values, the only valid unsigned representation is u128
542 // which can fit all i128 values, so the result remains unaffected.
543 let unsigned_fit
= Integer
::fit_unsigned(cmp
::max(min
as u128
, max
as u128
));
544 let signed_fit
= cmp
::max(Integer
::fit_signed(min
), Integer
::fit_signed(max
));
546 let mut min_from_extern
= None
;
547 let min_default
= I8
;
549 if let Some(ity
) = repr
.int
{
550 let discr
= Integer
::from_attr(tcx
, ity
);
551 let fit
= if ity
.is_signed() { signed_fit }
else { unsigned_fit }
;
553 bug
!("Integer::repr_discr: `#[repr]` hint too small for \
554 discriminant range of enum `{}", ty
)
556 return (discr
, ity
.is_signed());
560 match &tcx
.sess
.target
.target
.arch
[..] {
561 // WARNING: the ARM EABI has two variants; the one corresponding
562 // to `at_least == I32` appears to be used on Linux and NetBSD,
563 // but some systems may use the variant corresponding to no
564 // lower bound. However, we don't run on those yet...?
565 "arm" => min_from_extern
= Some(I32
),
566 _
=> min_from_extern
= Some(I32
),
570 let at_least
= min_from_extern
.unwrap_or(min_default
);
572 // If there are no negative values, we can use the unsigned fit.
574 (cmp
::max(unsigned_fit
, at_least
), false)
576 (cmp
::max(signed_fit
, at_least
), true)
581 /// Fundamental unit of memory access and layout.
582 #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
584 /// The `bool` is the signedness of the `Integer` type.
586 /// One would think we would not care about such details this low down,
587 /// but some ABIs are described in terms of C types and ISAs where the
588 /// integer arithmetic is done on {sign,zero}-extended registers, e.g.
589 /// a negative integer passed by zero-extension will appear positive in
590 /// the callee, and most operations on it will produce the wrong values.
597 impl<'a
, 'tcx
> Primitive
{
598 pub fn size
<C
: HasDataLayout
>(self, cx
: C
) -> Size
{
599 let dl
= cx
.data_layout();
602 Int(i
, _
) => i
.size(),
603 F32
=> Size
::from_bits(32),
604 F64
=> Size
::from_bits(64),
605 Pointer
=> dl
.pointer_size
609 pub fn align
<C
: HasDataLayout
>(self, cx
: C
) -> Align
{
610 let dl
= cx
.data_layout();
613 Int(i
, _
) => i
.align(dl
),
616 Pointer
=> dl
.pointer_align
620 pub fn to_ty(&self, tcx
: TyCtxt
<'a
, 'tcx
, 'tcx
>) -> Ty
<'tcx
> {
622 Int(i
, signed
) => i
.to_ty(tcx
, signed
),
623 F32
=> tcx
.types
.f32,
624 F64
=> tcx
.types
.f64,
625 Pointer
=> tcx
.mk_mut_ptr(tcx
.mk_nil()),
630 /// Information about one scalar component of a Rust type.
631 #[derive(Clone, PartialEq, Eq, Hash, Debug)]
633 pub value
: Primitive
,
635 /// Inclusive wrap-around range of valid values, that is, if
636 /// min > max, it represents min..=u128::MAX followed by 0..=max.
637 // FIXME(eddyb) always use the shortest range, e.g. by finding
638 // the largest space between two consecutive valid values and
639 // taking everything else as the (shortest) valid range.
640 pub valid_range
: RangeInclusive
<u128
>,
644 pub fn is_bool(&self) -> bool
{
645 if let Int(I8
, _
) = self.value
{
646 self.valid_range
== (0..=1)
653 /// The first half of a fat pointer.
655 /// - For a trait object, this is the address of the box.
656 /// - For a slice, this is the base address.
657 pub const FAT_PTR_ADDR
: usize = 0;
659 /// The second half of a fat pointer.
661 /// - For a trait object, this is the address of the vtable.
662 /// - For a slice, this is the length.
663 pub const FAT_PTR_EXTRA
: usize = 1;
665 /// Describes how the fields of a type are located in memory.
666 #[derive(PartialEq, Eq, Hash, Debug)]
667 pub enum FieldPlacement
{
668 /// All fields start at no offset. The `usize` is the field count.
671 /// Array/vector-like placement, with all fields of identical types.
677 /// Struct-like placement, with precomputed offsets.
679 /// Fields are guaranteed to not overlap, but note that gaps
680 /// before, between and after all the fields are NOT always
681 /// padding, and as such their contents may not be discarded.
682 /// For example, enum variants leave a gap at the start,
683 /// where the discriminant field in the enum layout goes.
685 /// Offsets for the first byte of each field,
686 /// ordered to match the source definition order.
687 /// This vector does not go in increasing order.
688 // FIXME(eddyb) use small vector optimization for the common case.
691 /// Maps source order field indices to memory order indices,
692 /// depending how fields were permuted.
693 // FIXME(camlorn) also consider small vector optimization here.
694 memory_index
: Vec
<u32>
698 impl FieldPlacement
{
699 pub fn count(&self) -> usize {
701 FieldPlacement
::Union(count
) => count
,
702 FieldPlacement
::Array { count, .. }
=> {
703 let usize_count
= count
as usize;
704 assert_eq
!(usize_count
as u64, count
);
707 FieldPlacement
::Arbitrary { ref offsets, .. }
=> offsets
.len()
711 pub fn offset(&self, i
: usize) -> Size
{
713 FieldPlacement
::Union(_
) => Size
::from_bytes(0),
714 FieldPlacement
::Array { stride, count }
=> {
719 FieldPlacement
::Arbitrary { ref offsets, .. }
=> offsets
[i
]
723 pub fn memory_index(&self, i
: usize) -> usize {
725 FieldPlacement
::Union(_
) |
726 FieldPlacement
::Array { .. }
=> i
,
727 FieldPlacement
::Arbitrary { ref memory_index, .. }
=> {
728 let r
= memory_index
[i
];
729 assert_eq
!(r
as usize as u32, r
);
735 /// Get source indices of the fields by increasing offsets.
737 pub fn index_by_increasing_offset
<'a
>(&'a
self) -> impl iter
::Iterator
<Item
=usize>+'a
{
738 let mut inverse_small
= [0u8; 64];
739 let mut inverse_big
= vec
![];
740 let use_small
= self.count() <= inverse_small
.len();
742 // We have to write this logic twice in order to keep the array small.
743 if let FieldPlacement
::Arbitrary { ref memory_index, .. }
= *self {
745 for i
in 0..self.count() {
746 inverse_small
[memory_index
[i
] as usize] = i
as u8;
749 inverse_big
= vec
![0; self.count()];
750 for i
in 0..self.count() {
751 inverse_big
[memory_index
[i
] as usize] = i
as u32;
756 (0..self.count()).map(move |i
| {
758 FieldPlacement
::Union(_
) |
759 FieldPlacement
::Array { .. }
=> i
,
760 FieldPlacement
::Arbitrary { .. }
=> {
761 if use_small { inverse_small[i] as usize }
762 else { inverse_big[i] as usize }
769 /// Describes how values of the type are passed by target ABIs,
770 /// in terms of categories of C types there are ABI rules for.
771 #[derive(Clone, PartialEq, Eq, Hash, Debug)]
775 ScalarPair(Scalar
, Scalar
),
781 /// If true, the size is exact, otherwise it's only a lower bound.
787 /// Returns true if the layout corresponds to an unsized type.
788 pub fn is_unsized(&self) -> bool
{
792 Abi
::ScalarPair(..) |
793 Abi
::Vector { .. }
=> false,
794 Abi
::Aggregate { sized }
=> !sized
799 #[derive(PartialEq, Eq, Hash, Debug)]
801 /// Single enum variants, structs/tuples, unions, and all non-ADTs.
806 /// General-case enums: for each case there is a struct, and they all have
807 /// all space reserved for the discriminant, and their first field starts
808 /// at a non-0 offset, after where the discriminant would go.
811 variants
: Vec
<LayoutDetails
>,
814 /// Multiple cases distinguished by a niche (values invalid for a type):
815 /// the variant `dataful_variant` contains a niche at an arbitrary
816 /// offset (field 0 of the enum), which for a variant with discriminant
817 /// `d` is set to `(d - niche_variants.start).wrapping_add(niche_start)`.
819 /// For example, `Option<(usize, &T)>` is represented such that
820 /// `None` has a null pointer for the second tuple field, and
821 /// `Some` is the identity function (with a non-null reference).
823 dataful_variant
: usize,
824 niche_variants
: RangeInclusive
<usize>,
827 variants
: Vec
<LayoutDetails
>,
831 #[derive(Copy, Clone, Debug)]
832 pub enum LayoutError
<'tcx
> {
834 SizeOverflow(Ty
<'tcx
>)
837 impl<'tcx
> fmt
::Display
for LayoutError
<'tcx
> {
838 fn fmt(&self, f
: &mut fmt
::Formatter
) -> fmt
::Result
{
840 LayoutError
::Unknown(ty
) => {
841 write
!(f
, "the type `{:?}` has an unknown layout", ty
)
843 LayoutError
::SizeOverflow(ty
) => {
844 write
!(f
, "the type `{:?}` is too big for the current architecture", ty
)
850 #[derive(PartialEq, Eq, Hash, Debug)]
851 pub struct LayoutDetails
{
852 pub variants
: Variants
,
853 pub fields
: FieldPlacement
,
860 fn scalar
<C
: HasDataLayout
>(cx
: C
, scalar
: Scalar
) -> Self {
861 let size
= scalar
.value
.size(cx
);
862 let align
= scalar
.value
.align(cx
);
864 variants
: Variants
::Single { index: 0 }
,
865 fields
: FieldPlacement
::Union(0),
866 abi
: Abi
::Scalar(scalar
),
872 fn uninhabited(field_count
: usize) -> Self {
873 let align
= Align
::from_bytes(1, 1).unwrap();
875 variants
: Variants
::Single { index: 0 }
,
876 fields
: FieldPlacement
::Union(field_count
),
877 abi
: Abi
::Uninhabited
,
879 size
: Size
::from_bytes(0)
884 fn layout_raw
<'a
, 'tcx
>(tcx
: TyCtxt
<'a
, 'tcx
, 'tcx
>,
885 query
: ty
::ParamEnvAnd
<'tcx
, Ty
<'tcx
>>)
886 -> Result
<&'tcx LayoutDetails
, LayoutError
<'tcx
>>
888 let (param_env
, ty
) = query
.into_parts();
890 let rec_limit
= tcx
.sess
.recursion_limit
.get();
891 let depth
= tcx
.layout_depth
.get();
892 if depth
> rec_limit
{
894 &format
!("overflow representing the type `{}`", ty
));
897 tcx
.layout_depth
.set(depth
+1);
898 let cx
= LayoutCx { tcx, param_env }
;
899 let layout
= cx
.layout_raw_uncached(ty
);
900 tcx
.layout_depth
.set(depth
);
905 pub fn provide(providers
: &mut ty
::maps
::Providers
) {
906 *providers
= ty
::maps
::Providers
{
912 #[derive(Copy, Clone)]
913 pub struct LayoutCx
<'tcx
, C
> {
915 pub param_env
: ty
::ParamEnv
<'tcx
>
918 impl<'a
, 'tcx
> LayoutCx
<'tcx
, TyCtxt
<'a
, 'tcx
, 'tcx
>> {
919 fn layout_raw_uncached(self, ty
: Ty
<'tcx
>)
920 -> Result
<&'tcx LayoutDetails
, LayoutError
<'tcx
>> {
922 let param_env
= self.param_env
;
923 let dl
= self.data_layout();
924 let scalar_unit
= |value
: Primitive
| {
925 let bits
= value
.size(dl
).bits();
926 assert
!(bits
<= 128);
929 valid_range
: 0..=(!0 >> (128 - bits
))
932 let scalar
= |value
: Primitive
| {
933 tcx
.intern_layout(LayoutDetails
::scalar(self, scalar_unit(value
)))
935 let scalar_pair
= |a
: Scalar
, b
: Scalar
| {
936 let align
= a
.value
.align(dl
).max(b
.value
.align(dl
)).max(dl
.aggregate_align
);
937 let b_offset
= a
.value
.size(dl
).abi_align(b
.value
.align(dl
));
938 let size
= (b_offset
+ b
.value
.size(dl
)).abi_align(align
);
940 variants
: Variants
::Single { index: 0 }
,
941 fields
: FieldPlacement
::Arbitrary
{
942 offsets
: vec
![Size
::from_bytes(0), b_offset
],
943 memory_index
: vec
![0, 1]
945 abi
: Abi
::ScalarPair(a
, b
),
951 #[derive(Copy, Clone, Debug)]
953 /// A tuple, closure, or univariant which cannot be coerced to unsized.
955 /// A univariant, the last field of which may be coerced to unsized.
957 /// A univariant, but with a prefix of an arbitrary size & alignment (e.g. enum tag).
958 Prefixed(Size
, Align
),
960 let univariant_uninterned
= |fields
: &[TyLayout
], repr
: &ReprOptions
, kind
| {
961 let packed
= repr
.packed();
962 if packed
&& repr
.align
> 0 {
963 bug
!("struct cannot be packed and aligned");
966 let mut align
= if packed
{
972 let mut sized
= true;
973 let mut offsets
= vec
![Size
::from_bytes(0); fields
.len()];
974 let mut inverse_memory_index
: Vec
<u32> = (0..fields
.len() as u32).collect();
976 // Anything with repr(C) or repr(packed) doesn't optimize.
977 let mut optimize
= (repr
.flags
& ReprFlags
::IS_UNOPTIMISABLE
).is_empty();
978 if let StructKind
::Prefixed(_
, align
) = kind
{
979 optimize
&= align
.abi() == 1;
983 let end
= if let StructKind
::MaybeUnsized
= kind
{
988 let optimizing
= &mut inverse_memory_index
[..end
];
990 StructKind
::AlwaysSized
|
991 StructKind
::MaybeUnsized
=> {
992 optimizing
.sort_by_key(|&x
| {
993 // Place ZSTs first to avoid "interesting offsets",
994 // especially with only one or two non-ZST fields.
995 let f
= &fields
[x
as usize];
996 (!f
.is_zst(), cmp
::Reverse(f
.align
.abi()))
999 StructKind
::Prefixed(..) => {
1000 optimizing
.sort_by_key(|&x
| fields
[x
as usize].align
.abi());
1005 // inverse_memory_index holds field indices by increasing memory offset.
1006 // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
1007 // We now write field offsets to the corresponding offset slot;
1008 // field 5 with offset 0 puts 0 in offsets[5].
1009 // At the bottom of this function, we use inverse_memory_index to produce memory_index.
1011 let mut offset
= Size
::from_bytes(0);
1013 if let StructKind
::Prefixed(prefix_size
, prefix_align
) = kind
{
1015 align
= align
.max(prefix_align
);
1017 offset
= prefix_size
.abi_align(prefix_align
);
1020 for &i
in &inverse_memory_index
{
1021 let field
= fields
[i
as usize];
1023 bug
!("univariant: field #{} of `{}` comes after unsized field",
1027 if field
.abi
== Abi
::Uninhabited
{
1028 return Ok(LayoutDetails
::uninhabited(fields
.len()));
1031 if field
.is_unsized() {
1035 // Invariant: offset < dl.obj_size_bound() <= 1<<61
1037 offset
= offset
.abi_align(field
.align
);
1038 align
= align
.max(field
.align
);
1041 debug
!("univariant offset: {:?} field: {:#?}", offset
, field
);
1042 offsets
[i
as usize] = offset
;
1044 offset
= offset
.checked_add(field
.size
, dl
)
1045 .ok_or(LayoutError
::SizeOverflow(ty
))?
;
1049 let repr_align
= repr
.align
as u64;
1050 align
= align
.max(Align
::from_bytes(repr_align
, repr_align
).unwrap());
1051 debug
!("univariant repr_align: {:?}", repr_align
);
1054 debug
!("univariant min_size: {:?}", offset
);
1055 let min_size
= offset
;
1057 // As stated above, inverse_memory_index holds field indices by increasing offset.
1058 // This makes it an already-sorted view of the offsets vec.
1059 // To invert it, consider:
1060 // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
1061 // Field 5 would be the first element, so memory_index is i:
1062 // Note: if we didn't optimize, it's already right.
1064 let mut memory_index
;
1066 memory_index
= vec
![0; inverse_memory_index
.len()];
1068 for i
in 0..inverse_memory_index
.len() {
1069 memory_index
[inverse_memory_index
[i
] as usize] = i
as u32;
1072 memory_index
= inverse_memory_index
;
1075 let size
= min_size
.abi_align(align
);
1076 let mut abi
= Abi
::Aggregate { sized }
;
1078 // Unpack newtype ABIs and find scalar pairs.
1079 if sized
&& size
.bytes() > 0 {
1080 // All other fields must be ZSTs, and we need them to all start at 0.
1081 let mut zst_offsets
=
1082 offsets
.iter().enumerate().filter(|&(i
, _
)| fields
[i
].is_zst());
1083 if zst_offsets
.all(|(_
, o
)| o
.bytes() == 0) {
1084 let mut non_zst_fields
=
1085 fields
.iter().enumerate().filter(|&(_
, f
)| !f
.is_zst());
1087 match (non_zst_fields
.next(), non_zst_fields
.next(), non_zst_fields
.next()) {
1088 // We have exactly one non-ZST field.
1089 (Some((i
, field
)), None
, None
) => {
1090 // Field fills the struct and it has a scalar or scalar pair ABI.
1091 if offsets
[i
].bytes() == 0 &&
1092 align
.abi() == field
.align
.abi() &&
1093 size
== field
.size
{
1095 // For plain scalars, or vectors of them, we can't unpack
1096 // newtypes for `#[repr(C)]`, as that affects C ABIs.
1097 Abi
::Scalar(_
) | Abi
::Vector { .. }
if optimize
=> {
1098 abi
= field
.abi
.clone();
1100 // But scalar pairs are Rust-specific and get
1101 // treated as aggregates by C ABIs anyway.
1102 Abi
::ScalarPair(..) => {
1103 abi
= field
.abi
.clone();
1110 // Two non-ZST fields, and they're both scalars.
1111 (Some((i
, &TyLayout
{
1112 details
: &LayoutDetails { abi: Abi::Scalar(ref a), .. }
, ..
1113 })), Some((j
, &TyLayout
{
1114 details
: &LayoutDetails { abi: Abi::Scalar(ref b), .. }
, ..
1116 // Order by the memory placement, not source order.
1117 let ((i
, a
), (j
, b
)) = if offsets
[i
] < offsets
[j
] {
1122 let pair
= scalar_pair(a
.clone(), b
.clone());
1123 let pair_offsets
= match pair
.fields
{
1124 FieldPlacement
::Arbitrary
{
1128 assert_eq
!(memory_index
, &[0, 1]);
1133 if offsets
[i
] == pair_offsets
[0] &&
1134 offsets
[j
] == pair_offsets
[1] &&
1135 align
== pair
.align
&&
1137 // We can use `ScalarPair` only when it matches our
1138 // already computed layout (including `#[repr(C)]`).
1149 variants
: Variants
::Single { index: 0 }
,
1150 fields
: FieldPlacement
::Arbitrary
{
1159 let univariant
= |fields
: &[TyLayout
], repr
: &ReprOptions
, kind
| {
1160 Ok(tcx
.intern_layout(univariant_uninterned(fields
, repr
, kind
)?
))
1162 assert
!(!ty
.has_infer_types());
1167 tcx
.intern_layout(LayoutDetails
::scalar(self, Scalar
{
1168 value
: Int(I8
, false),
1173 tcx
.intern_layout(LayoutDetails
::scalar(self, Scalar
{
1174 value
: Int(I32
, false),
1175 valid_range
: 0..=0x10FFFF
1179 scalar(Int(Integer
::from_attr(dl
, attr
::SignedInt(ity
)), true))
1181 ty
::TyUint(ity
) => {
1182 scalar(Int(Integer
::from_attr(dl
, attr
::UnsignedInt(ity
)), false))
1184 ty
::TyFloat(FloatTy
::F32
) => scalar(F32
),
1185 ty
::TyFloat(FloatTy
::F64
) => scalar(F64
),
1187 let mut ptr
= scalar_unit(Pointer
);
1188 ptr
.valid_range
.start
= 1;
1189 tcx
.intern_layout(LayoutDetails
::scalar(self, ptr
))
1194 tcx
.intern_layout(LayoutDetails
::uninhabited(0))
1197 // Potentially-fat pointers.
1198 ty
::TyRef(_
, ty
::TypeAndMut { ty: pointee, .. }
) |
1199 ty
::TyRawPtr(ty
::TypeAndMut { ty: pointee, .. }
) => {
1200 let mut data_ptr
= scalar_unit(Pointer
);
1201 if !ty
.is_unsafe_ptr() {
1202 data_ptr
.valid_range
.start
= 1;
1205 let pointee
= tcx
.normalize_associated_type_in_env(&pointee
, param_env
);
1206 if pointee
.is_sized(tcx
, param_env
, DUMMY_SP
) {
1207 return Ok(tcx
.intern_layout(LayoutDetails
::scalar(self, data_ptr
)));
1210 let unsized_part
= tcx
.struct_tail(pointee
);
1211 let metadata
= match unsized_part
.sty
{
1212 ty
::TyForeign(..) => {
1213 return Ok(tcx
.intern_layout(LayoutDetails
::scalar(self, data_ptr
)));
1215 ty
::TySlice(_
) | ty
::TyStr
=> {
1216 scalar_unit(Int(dl
.ptr_sized_integer(), false))
1218 ty
::TyDynamic(..) => {
1219 let mut vtable
= scalar_unit(Pointer
);
1220 vtable
.valid_range
.start
= 1;
1223 _
=> return Err(LayoutError
::Unknown(unsized_part
))
1226 // Effectively a (ptr, meta) tuple.
1227 tcx
.intern_layout(scalar_pair(data_ptr
, metadata
))
1230 // Arrays and slices.
1231 ty
::TyArray(element
, mut count
) => {
1232 if count
.has_projections() {
1233 count
= tcx
.normalize_associated_type_in_env(&count
, param_env
);
1234 if count
.has_projections() {
1235 return Err(LayoutError
::Unknown(ty
));
1239 let element
= self.layout_of(element
)?
;
1240 let count
= count
.val
.to_const_int().unwrap().to_u64().unwrap();
1241 let size
= element
.size
.checked_mul(count
, dl
)
1242 .ok_or(LayoutError
::SizeOverflow(ty
))?
;
1244 tcx
.intern_layout(LayoutDetails
{
1245 variants
: Variants
::Single { index: 0 }
,
1246 fields
: FieldPlacement
::Array
{
1247 stride
: element
.size
,
1250 abi
: Abi
::Aggregate { sized: true }
,
1251 align
: element
.align
,
1255 ty
::TySlice(element
) => {
1256 let element
= self.layout_of(element
)?
;
1257 tcx
.intern_layout(LayoutDetails
{
1258 variants
: Variants
::Single { index: 0 }
,
1259 fields
: FieldPlacement
::Array
{
1260 stride
: element
.size
,
1263 abi
: Abi
::Aggregate { sized: false }
,
1264 align
: element
.align
,
1265 size
: Size
::from_bytes(0)
1269 tcx
.intern_layout(LayoutDetails
{
1270 variants
: Variants
::Single { index: 0 }
,
1271 fields
: FieldPlacement
::Array
{
1272 stride
: Size
::from_bytes(1),
1275 abi
: Abi
::Aggregate { sized: false }
,
1277 size
: Size
::from_bytes(0)
1282 ty
::TyFnDef(..) => {
1283 univariant(&[], &ReprOptions
::default(), StructKind
::AlwaysSized
)?
1285 ty
::TyDynamic(..) | ty
::TyForeign(..) => {
1286 let mut unit
= univariant_uninterned(&[], &ReprOptions
::default(),
1287 StructKind
::AlwaysSized
)?
;
1289 Abi
::Aggregate { ref mut sized }
=> *sized
= false,
1292 tcx
.intern_layout(unit
)
1295 // Tuples, generators and closures.
1296 ty
::TyGenerator(def_id
, ref substs
, _
) => {
1297 let tys
= substs
.field_tys(def_id
, tcx
);
1298 univariant(&tys
.map(|ty
| self.layout_of(ty
)).collect
::<Result
<Vec
<_
>, _
>>()?
,
1299 &ReprOptions
::default(),
1300 StructKind
::AlwaysSized
)?
1303 ty
::TyClosure(def_id
, ref substs
) => {
1304 let tys
= substs
.upvar_tys(def_id
, tcx
);
1305 univariant(&tys
.map(|ty
| self.layout_of(ty
)).collect
::<Result
<Vec
<_
>, _
>>()?
,
1306 &ReprOptions
::default(),
1307 StructKind
::AlwaysSized
)?
1310 ty
::TyTuple(tys
, _
) => {
1311 let kind
= if tys
.len() == 0 {
1312 StructKind
::AlwaysSized
1314 StructKind
::MaybeUnsized
1317 univariant(&tys
.iter().map(|ty
| self.layout_of(ty
)).collect
::<Result
<Vec
<_
>, _
>>()?
,
1318 &ReprOptions
::default(), kind
)?
1321 // SIMD vector types.
1322 ty
::TyAdt(def
, ..) if def
.repr
.simd() => {
1323 let element
= self.layout_of(ty
.simd_type(tcx
))?
;
1324 let count
= ty
.simd_size(tcx
) as u64;
1326 let scalar
= match element
.abi
{
1327 Abi
::Scalar(ref scalar
) => scalar
.clone(),
1329 tcx
.sess
.fatal(&format
!("monomorphising SIMD type `{}` with \
1330 a non-machine element type `{}`",
1334 let size
= element
.size
.checked_mul(count
, dl
)
1335 .ok_or(LayoutError
::SizeOverflow(ty
))?
;
1336 let align
= dl
.vector_align(size
);
1337 let size
= size
.abi_align(align
);
1339 tcx
.intern_layout(LayoutDetails
{
1340 variants
: Variants
::Single { index: 0 }
,
1341 fields
: FieldPlacement
::Array
{
1342 stride
: element
.size
,
1355 ty
::TyAdt(def
, substs
) => {
1356 // Cache the field layouts.
1357 let variants
= def
.variants
.iter().map(|v
| {
1358 v
.fields
.iter().map(|field
| {
1359 self.layout_of(field
.ty(tcx
, substs
))
1360 }).collect
::<Result
<Vec
<_
>, _
>>()
1361 }).collect
::<Result
<Vec
<_
>, _
>>()?
;
1364 let packed
= def
.repr
.packed();
1365 if packed
&& def
.repr
.align
> 0 {
1366 bug
!("Union cannot be packed and aligned");
1369 let mut align
= if def
.repr
.packed() {
1375 if def
.repr
.align
> 0 {
1376 let repr_align
= def
.repr
.align
as u64;
1378 Align
::from_bytes(repr_align
, repr_align
).unwrap());
1381 let mut size
= Size
::from_bytes(0);
1382 for field
in &variants
[0] {
1383 assert
!(!field
.is_unsized());
1386 align
= align
.max(field
.align
);
1388 size
= cmp
::max(size
, field
.size
);
1391 return Ok(tcx
.intern_layout(LayoutDetails
{
1392 variants
: Variants
::Single { index: 0 }
,
1393 fields
: FieldPlacement
::Union(variants
[0].len()),
1394 abi
: Abi
::Aggregate { sized: true }
,
1396 size
: size
.abi_align(align
)
1400 let (inh_first
, inh_second
) = {
1401 let mut inh_variants
= (0..variants
.len()).filter(|&v
| {
1402 variants
[v
].iter().all(|f
| f
.abi
!= Abi
::Uninhabited
)
1404 (inh_variants
.next(), inh_variants
.next())
1406 if inh_first
.is_none() {
1407 // Uninhabited because it has no variants, or only uninhabited ones.
1408 return Ok(tcx
.intern_layout(LayoutDetails
::uninhabited(0)));
1411 let is_struct
= !def
.is_enum() ||
1412 // Only one variant is inhabited.
1413 (inh_second
.is_none() &&
1414 // Representation optimizations are allowed.
1415 !def
.repr
.inhibit_enum_layout_opt() &&
1416 // Inhabited variant either has data ...
1417 (!variants
[inh_first
.unwrap()].is_empty() ||
1418 // ... or there other, uninhabited, variants.
1419 variants
.len() > 1));
1421 // Struct, or univariant enum equivalent to a struct.
1422 // (Typechecking will reject discriminant-sizing attrs.)
1424 let v
= inh_first
.unwrap();
1425 let kind
= if def
.is_enum() || variants
[v
].len() == 0 {
1426 StructKind
::AlwaysSized
1428 let param_env
= tcx
.param_env(def
.did
);
1429 let last_field
= def
.variants
[v
].fields
.last().unwrap();
1430 let always_sized
= tcx
.type_of(last_field
.did
)
1431 .is_sized(tcx
, param_env
, DUMMY_SP
);
1432 if !always_sized { StructKind::MaybeUnsized }
1433 else { StructKind::AlwaysSized }
1436 let mut st
= univariant_uninterned(&variants
[v
], &def
.repr
, kind
)?
;
1437 st
.variants
= Variants
::Single { index: v }
;
1438 // Exclude 0 from the range of a newtype ABI NonZero<T>.
1439 if Some(def
.did
) == self.tcx
.lang_items().non_zero() {
1441 Abi
::Scalar(ref mut scalar
) |
1442 Abi
::ScalarPair(ref mut scalar
, _
) => {
1443 if scalar
.valid_range
.start
== 0 {
1444 scalar
.valid_range
.start
= 1;
1450 return Ok(tcx
.intern_layout(st
));
1453 let no_explicit_discriminants
= def
.variants
.iter().enumerate()
1454 .all(|(i
, v
)| v
.discr
== ty
::VariantDiscr
::Relative(i
));
1456 // Niche-filling enum optimization.
1457 if !def
.repr
.inhibit_enum_layout_opt() && no_explicit_discriminants
{
1458 let mut dataful_variant
= None
;
1459 let mut niche_variants
= usize::max_value()..=0;
1461 // Find one non-ZST variant.
1462 'variants
: for (v
, fields
) in variants
.iter().enumerate() {
1464 if f
.abi
== Abi
::Uninhabited
{
1468 if dataful_variant
.is_none() {
1469 dataful_variant
= Some(v
);
1472 dataful_variant
= None
;
1477 if niche_variants
.start
> v
{
1478 niche_variants
.start
= v
;
1480 niche_variants
.end
= v
;
1483 if niche_variants
.start
> niche_variants
.end
{
1484 dataful_variant
= None
;
1487 if let Some(i
) = dataful_variant
{
1488 let count
= (niche_variants
.end
- niche_variants
.start
+ 1) as u128
;
1489 for (field_index
, field
) in variants
[i
].iter().enumerate() {
1490 let (offset
, niche
, niche_start
) =
1491 match field
.find_niche(self, count
)?
{
1492 Some(niche
) => niche
,
1495 let mut align
= dl
.aggregate_align
;
1496 let st
= variants
.iter().enumerate().map(|(j
, v
)| {
1497 let mut st
= univariant_uninterned(v
,
1498 &def
.repr
, StructKind
::AlwaysSized
)?
;
1499 st
.variants
= Variants
::Single { index: j }
;
1501 align
= align
.max(st
.align
);
1504 }).collect
::<Result
<Vec
<_
>, _
>>()?
;
1506 let offset
= st
[i
].fields
.offset(field_index
) + offset
;
1507 let size
= st
[i
].size
;
1509 let abi
= if offset
.bytes() == 0 && niche
.value
.size(dl
) == size
{
1510 Abi
::Scalar(niche
.clone())
1512 Abi
::Aggregate { sized: true }
1515 return Ok(tcx
.intern_layout(LayoutDetails
{
1516 variants
: Variants
::NicheFilling
{
1523 fields
: FieldPlacement
::Arbitrary
{
1524 offsets
: vec
![offset
],
1525 memory_index
: vec
![0]
1535 let (mut min
, mut max
) = (i128
::max_value(), i128
::min_value());
1536 for (i
, discr
) in def
.discriminants(tcx
).enumerate() {
1537 if variants
[i
].iter().any(|f
| f
.abi
== Abi
::Uninhabited
) {
1540 let x
= discr
.to_u128_unchecked() as i128
;
1541 if x
< min { min = x; }
1542 if x
> max { max = x; }
1544 assert
!(min
<= max
, "discriminant range is {}...{}", min
, max
);
1545 let (min_ity
, signed
) = Integer
::repr_discr(tcx
, ty
, &def
.repr
, min
, max
);
1547 let mut align
= dl
.aggregate_align
;
1548 let mut size
= Size
::from_bytes(0);
1550 // We're interested in the smallest alignment, so start large.
1551 let mut start_align
= Align
::from_bytes(256, 256).unwrap();
1552 assert_eq
!(Integer
::for_abi_align(dl
, start_align
), None
);
1554 // repr(C) on an enum tells us to make a (tag, union) layout,
1555 // so we need to grow the prefix alignment to be at least
1556 // the alignment of the union. (This value is used both for
1557 // determining the alignment of the overall enum, and the
1558 // determining the alignment of the payload after the tag.)
1559 let mut prefix_align
= min_ity
.align(dl
);
1561 for fields
in &variants
{
1562 for field
in fields
{
1563 prefix_align
= prefix_align
.max(field
.align
);
1568 // Create the set of structs that represent each variant.
1569 let mut variants
= variants
.into_iter().enumerate().map(|(i
, field_layouts
)| {
1570 let mut st
= univariant_uninterned(&field_layouts
,
1571 &def
.repr
, StructKind
::Prefixed(min_ity
.size(), prefix_align
))?
;
1572 st
.variants
= Variants
::Single { index: i }
;
1573 // Find the first field we can't move later
1574 // to make room for a larger discriminant.
1575 for field
in st
.fields
.index_by_increasing_offset().map(|j
| field_layouts
[j
]) {
1576 if !field
.is_zst() || field
.align
.abi() != 1 {
1577 start_align
= start_align
.min(field
.align
);
1581 size
= cmp
::max(size
, st
.size
);
1582 align
= align
.max(st
.align
);
1584 }).collect
::<Result
<Vec
<_
>, _
>>()?
;
1586 // Align the maximum variant size to the largest alignment.
1587 size
= size
.abi_align(align
);
1589 if size
.bytes() >= dl
.obj_size_bound() {
1590 return Err(LayoutError
::SizeOverflow(ty
));
1593 let typeck_ity
= Integer
::from_attr(dl
, def
.repr
.discr_type());
1594 if typeck_ity
< min_ity
{
1595 // It is a bug if Layout decided on a greater discriminant size than typeck for
1596 // some reason at this point (based on values discriminant can take on). Mostly
1597 // because this discriminant will be loaded, and then stored into variable of
1598 // type calculated by typeck. Consider such case (a bug): typeck decided on
1599 // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1600 // discriminant values. That would be a bug, because then, in trans, in order
1601 // to store this 16-bit discriminant into 8-bit sized temporary some of the
1602 // space necessary to represent would have to be discarded (or layout is wrong
1603 // on thinking it needs 16 bits)
1604 bug
!("layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1605 min_ity
, typeck_ity
);
1606 // However, it is fine to make discr type however large (as an optimisation)
1607 // after this point – we’ll just truncate the value we load in trans.
1610 // Check to see if we should use a different type for the
1611 // discriminant. We can safely use a type with the same size
1612 // as the alignment of the first field of each variant.
1613 // We increase the size of the discriminant to avoid LLVM copying
1614 // padding when it doesn't need to. This normally causes unaligned
1615 // load/stores and excessive memcpy/memset operations. By using a
1616 // bigger integer size, LLVM can be sure about it's contents and
1617 // won't be so conservative.
1619 // Use the initial field alignment
1620 let mut ity
= Integer
::for_abi_align(dl
, start_align
).unwrap_or(min_ity
);
1622 // If the alignment is not larger than the chosen discriminant size,
1623 // don't use the alignment as the final size.
1627 // Patch up the variants' first few fields.
1628 let old_ity_size
= min_ity
.size();
1629 let new_ity_size
= ity
.size();
1630 for variant
in &mut variants
{
1631 if variant
.abi
== Abi
::Uninhabited
{
1634 match variant
.fields
{
1635 FieldPlacement
::Arbitrary { ref mut offsets, .. }
=> {
1637 if *i
<= old_ity_size
{
1638 assert_eq
!(*i
, old_ity_size
);
1642 // We might be making the struct larger.
1643 if variant
.size
<= old_ity_size
{
1644 variant
.size
= new_ity_size
;
1652 let discr
= Scalar
{
1653 value
: Int(ity
, signed
),
1654 valid_range
: (min
as u128
)..=(max
as u128
)
1656 let abi
= if discr
.value
.size(dl
) == size
{
1657 Abi
::Scalar(discr
.clone())
1659 Abi
::Aggregate { sized: true }
1661 tcx
.intern_layout(LayoutDetails
{
1662 variants
: Variants
::Tagged
{
1666 fields
: FieldPlacement
::Arbitrary
{
1667 offsets
: vec
![Size
::from_bytes(0)],
1668 memory_index
: vec
![0]
1676 // Types with no meaningful known layout.
1677 ty
::TyProjection(_
) | ty
::TyAnon(..) => {
1678 let normalized
= tcx
.normalize_associated_type_in_env(&ty
, param_env
);
1679 if ty
== normalized
{
1680 return Err(LayoutError
::Unknown(ty
));
1682 tcx
.layout_raw(param_env
.and(normalized
))?
1685 return Err(LayoutError
::Unknown(ty
));
1687 ty
::TyGeneratorWitness(..) | ty
::TyInfer(_
) | ty
::TyError
=> {
1688 bug
!("LayoutDetails::compute: unexpected type `{}`", ty
)
1693 /// This is invoked by the `layout_raw` query to record the final
1694 /// layout of each type.
1696 fn record_layout_for_printing(self, layout
: TyLayout
<'tcx
>) {
1697 // If we are running with `-Zprint-type-sizes`, record layouts for
1698 // dumping later. Ignore layouts that are done with non-empty
1699 // environments or non-monomorphic layouts, as the user only wants
1700 // to see the stuff resulting from the final trans session.
1702 !self.tcx
.sess
.opts
.debugging_opts
.print_type_sizes
||
1703 layout
.ty
.has_param_types() ||
1704 layout
.ty
.has_self_ty() ||
1705 !self.param_env
.caller_bounds
.is_empty()
1710 self.record_layout_for_printing_outlined(layout
)
1713 fn record_layout_for_printing_outlined(self, layout
: TyLayout
<'tcx
>) {
1714 // (delay format until we actually need it)
1715 let record
= |kind
, opt_discr_size
, variants
| {
1716 let type_desc
= format
!("{:?}", layout
.ty
);
1717 self.tcx
.sess
.code_stats
.borrow_mut().record_type_size(kind
,
1725 let adt_def
= match layout
.ty
.sty
{
1726 ty
::TyAdt(ref adt_def
, _
) => {
1727 debug
!("print-type-size t: `{:?}` process adt", layout
.ty
);
1731 ty
::TyClosure(..) => {
1732 debug
!("print-type-size t: `{:?}` record closure", layout
.ty
);
1733 record(DataTypeKind
::Closure
, None
, vec
![]);
1738 debug
!("print-type-size t: `{:?}` skip non-nominal", layout
.ty
);
1743 let adt_kind
= adt_def
.adt_kind();
1745 let build_variant_info
= |n
: Option
<ast
::Name
>,
1747 layout
: TyLayout
<'tcx
>| {
1748 let mut min_size
= Size
::from_bytes(0);
1749 let field_info
: Vec
<_
> = flds
.iter().enumerate().map(|(i
, &name
)| {
1750 match layout
.field(self, i
) {
1752 bug
!("no layout found for field {}: `{:?}`", name
, err
);
1754 Ok(field_layout
) => {
1755 let offset
= layout
.fields
.offset(i
);
1756 let field_end
= offset
+ field_layout
.size
;
1757 if min_size
< field_end
{
1758 min_size
= field_end
;
1760 session
::FieldInfo
{
1761 name
: name
.to_string(),
1762 offset
: offset
.bytes(),
1763 size
: field_layout
.size
.bytes(),
1764 align
: field_layout
.align
.abi(),
1770 session
::VariantInfo
{
1771 name
: n
.map(|n
|n
.to_string()),
1772 kind
: if layout
.is_unsized() {
1773 session
::SizeKind
::Min
1775 session
::SizeKind
::Exact
1777 align
: layout
.align
.abi(),
1778 size
: if min_size
.bytes() == 0 {
1787 match layout
.variants
{
1788 Variants
::Single { index }
=> {
1789 debug
!("print-type-size `{:#?}` variant {}",
1790 layout
, adt_def
.variants
[index
].name
);
1791 if !adt_def
.variants
.is_empty() {
1792 let variant_def
= &adt_def
.variants
[index
];
1793 let fields
: Vec
<_
> =
1794 variant_def
.fields
.iter().map(|f
| f
.name
).collect();
1795 record(adt_kind
.into(),
1797 vec
![build_variant_info(Some(variant_def
.name
),
1801 // (This case arises for *empty* enums; so give it
1803 record(adt_kind
.into(), None
, vec
![]);
1807 Variants
::NicheFilling { .. }
|
1808 Variants
::Tagged { .. }
=> {
1809 debug
!("print-type-size `{:#?}` adt general variants def {}",
1810 layout
.ty
, adt_def
.variants
.len());
1811 let variant_infos
: Vec
<_
> =
1812 adt_def
.variants
.iter().enumerate().map(|(i
, variant_def
)| {
1813 let fields
: Vec
<_
> =
1814 variant_def
.fields
.iter().map(|f
| f
.name
).collect();
1815 build_variant_info(Some(variant_def
.name
),
1817 layout
.for_variant(self, i
))
1820 record(adt_kind
.into(), match layout
.variants
{
1821 Variants
::Tagged { ref discr, .. }
=> Some(discr
.value
.size(self)),
1829 /// Type size "skeleton", i.e. the only information determining a type's size.
1830 /// While this is conservative, (aside from constant sizes, only pointers,
1831 /// newtypes thereof and null pointer optimized enums are allowed), it is
1832 /// enough to statically check common usecases of transmute.
1833 #[derive(Copy, Clone, Debug)]
1834 pub enum SizeSkeleton
<'tcx
> {
1835 /// Any statically computable Layout.
1838 /// A potentially-fat pointer.
1840 /// If true, this pointer is never null.
1842 /// The type which determines the unsized metadata, if any,
1843 /// of this pointer. Either a type parameter or a projection
1844 /// depending on one, with regions erased.
1849 impl<'a
, 'tcx
> SizeSkeleton
<'tcx
> {
1850 pub fn compute(ty
: Ty
<'tcx
>,
1851 tcx
: TyCtxt
<'a
, 'tcx
, 'tcx
>,
1852 param_env
: ty
::ParamEnv
<'tcx
>)
1853 -> Result
<SizeSkeleton
<'tcx
>, LayoutError
<'tcx
>> {
1854 assert
!(!ty
.has_infer_types());
1856 // First try computing a static layout.
1857 let err
= match tcx
.layout_of(param_env
.and(ty
)) {
1859 return Ok(SizeSkeleton
::Known(layout
.size
));
1865 ty
::TyRef(_
, ty
::TypeAndMut { ty: pointee, .. }
) |
1866 ty
::TyRawPtr(ty
::TypeAndMut { ty: pointee, .. }
) => {
1867 let non_zero
= !ty
.is_unsafe_ptr();
1868 let tail
= tcx
.struct_tail(pointee
);
1870 ty
::TyParam(_
) | ty
::TyProjection(_
) => {
1871 assert
!(tail
.has_param_types() || tail
.has_self_ty());
1872 Ok(SizeSkeleton
::Pointer
{
1874 tail
: tcx
.erase_regions(&tail
)
1878 bug
!("SizeSkeleton::compute({}): layout errored ({}), yet \
1879 tail `{}` is not a type parameter or a projection",
1885 ty
::TyAdt(def
, substs
) => {
1886 // Only newtypes and enums w/ nullable pointer optimization.
1887 if def
.is_union() || def
.variants
.is_empty() || def
.variants
.len() > 2 {
1891 // Get a zero-sized variant or a pointer newtype.
1892 let zero_or_ptr_variant
= |i
: usize| {
1893 let fields
= def
.variants
[i
].fields
.iter().map(|field
| {
1894 SizeSkeleton
::compute(field
.ty(tcx
, substs
), tcx
, param_env
)
1897 for field
in fields
{
1900 SizeSkeleton
::Known(size
) => {
1901 if size
.bytes() > 0 {
1905 SizeSkeleton
::Pointer {..}
=> {
1916 let v0
= zero_or_ptr_variant(0)?
;
1918 if def
.variants
.len() == 1 {
1919 if let Some(SizeSkeleton
::Pointer { non_zero, tail }
) = v0
{
1920 return Ok(SizeSkeleton
::Pointer
{
1921 non_zero
: non_zero
||
1922 Some(def
.did
) == tcx
.lang_items().non_zero(),
1930 let v1
= zero_or_ptr_variant(1)?
;
1931 // Nullable pointer enum optimization.
1933 (Some(SizeSkeleton
::Pointer { non_zero: true, tail }
), None
) |
1934 (None
, Some(SizeSkeleton
::Pointer { non_zero: true, tail }
)) => {
1935 Ok(SizeSkeleton
::Pointer
{
1944 ty
::TyProjection(_
) | ty
::TyAnon(..) => {
1945 let normalized
= tcx
.normalize_associated_type_in_env(&ty
, param_env
);
1946 if ty
== normalized
{
1949 SizeSkeleton
::compute(normalized
, tcx
, param_env
)
1957 pub fn same_size(self, other
: SizeSkeleton
) -> bool
{
1958 match (self, other
) {
1959 (SizeSkeleton
::Known(a
), SizeSkeleton
::Known(b
)) => a
== b
,
1960 (SizeSkeleton
::Pointer { tail: a, .. }
,
1961 SizeSkeleton
::Pointer { tail: b, .. }
) => a
== b
,
1967 /// The details of the layout of a type, alongside the type itself.
1968 /// Provides various type traversal APIs (e.g. recursing into fields).
1970 /// Note that the details are NOT guaranteed to always be identical
1971 /// to those obtained from `layout_of(ty)`, as we need to produce
1972 /// layouts for which Rust types do not exist, such as enum variants
1973 /// or synthetic fields of enums (i.e. discriminants) and fat pointers.
1974 #[derive(Copy, Clone, Debug)]
1975 pub struct TyLayout
<'tcx
> {
1977 details
: &'tcx LayoutDetails
1980 impl<'tcx
> Deref
for TyLayout
<'tcx
> {
1981 type Target
= &'tcx LayoutDetails
;
1982 fn deref(&self) -> &&'tcx LayoutDetails
{
1987 pub trait HasTyCtxt
<'tcx
>: HasDataLayout
{
1988 fn tcx
<'a
>(&'a
self) -> TyCtxt
<'a
, 'tcx
, 'tcx
>;
1991 impl<'a
, 'gcx
, 'tcx
> HasDataLayout
for TyCtxt
<'a
, 'gcx
, 'tcx
> {
1992 fn data_layout(&self) -> &TargetDataLayout
{
1997 impl<'a
, 'gcx
, 'tcx
> HasTyCtxt
<'gcx
> for TyCtxt
<'a
, 'gcx
, 'tcx
> {
1998 fn tcx
<'b
>(&'b
self) -> TyCtxt
<'b
, 'gcx
, 'gcx
> {
2003 impl<'tcx
, T
: HasDataLayout
> HasDataLayout
for LayoutCx
<'tcx
, T
> {
2004 fn data_layout(&self) -> &TargetDataLayout
{
2005 self.tcx
.data_layout()
2009 impl<'gcx
, 'tcx
, T
: HasTyCtxt
<'gcx
>> HasTyCtxt
<'gcx
> for LayoutCx
<'tcx
, T
> {
2010 fn tcx
<'b
>(&'b
self) -> TyCtxt
<'b
, 'gcx
, 'gcx
> {
2015 pub trait MaybeResult
<T
> {
2016 fn from_ok(x
: T
) -> Self;
2017 fn map_same
<F
: FnOnce(T
) -> T
>(self, f
: F
) -> Self;
2020 impl<T
> MaybeResult
<T
> for T
{
2021 fn from_ok(x
: T
) -> Self {
2024 fn map_same
<F
: FnOnce(T
) -> T
>(self, f
: F
) -> Self {
2029 impl<T
, E
> MaybeResult
<T
> for Result
<T
, E
> {
2030 fn from_ok(x
: T
) -> Self {
2033 fn map_same
<F
: FnOnce(T
) -> T
>(self, f
: F
) -> Self {
2038 pub trait LayoutOf
<T
> {
2041 fn layout_of(self, ty
: T
) -> Self::TyLayout
;
2044 impl<'a
, 'tcx
> LayoutOf
<Ty
<'tcx
>> for LayoutCx
<'tcx
, TyCtxt
<'a
, 'tcx
, 'tcx
>> {
2045 type TyLayout
= Result
<TyLayout
<'tcx
>, LayoutError
<'tcx
>>;
2047 /// Computes the layout of a type. Note that this implicitly
2048 /// executes in "reveal all" mode.
2049 fn layout_of(self, ty
: Ty
<'tcx
>) -> Self::TyLayout
{
2050 let param_env
= self.param_env
.reveal_all();
2051 let ty
= self.tcx
.normalize_associated_type_in_env(&ty
, param_env
);
2052 let details
= self.tcx
.layout_raw(param_env
.and(ty
))?
;
2053 let layout
= TyLayout
{
2058 // NB: This recording is normally disabled; when enabled, it
2059 // can however trigger recursive invocations of `layout_of`.
2060 // Therefore, we execute it *after* the main query has
2061 // completed, to avoid problems around recursive structures
2062 // and the like. (Admitedly, I wasn't able to reproduce a problem
2063 // here, but it seems like the right thing to do. -nmatsakis)
2064 self.record_layout_for_printing(layout
);
2070 impl<'a
, 'tcx
> LayoutOf
<Ty
<'tcx
>> for LayoutCx
<'tcx
, ty
::maps
::TyCtxtAt
<'a
, 'tcx
, 'tcx
>> {
2071 type TyLayout
= Result
<TyLayout
<'tcx
>, LayoutError
<'tcx
>>;
2073 /// Computes the layout of a type. Note that this implicitly
2074 /// executes in "reveal all" mode.
2075 fn layout_of(self, ty
: Ty
<'tcx
>) -> Self::TyLayout
{
2076 let param_env
= self.param_env
.reveal_all();
2077 let ty
= self.tcx
.normalize_associated_type_in_env(&ty
, param_env
.reveal_all());
2078 let details
= self.tcx
.layout_raw(param_env
.reveal_all().and(ty
))?
;
2079 let layout
= TyLayout
{
2084 // NB: This recording is normally disabled; when enabled, it
2085 // can however trigger recursive invocations of `layout_of`.
2086 // Therefore, we execute it *after* the main query has
2087 // completed, to avoid problems around recursive structures
2088 // and the like. (Admitedly, I wasn't able to reproduce a problem
2089 // here, but it seems like the right thing to do. -nmatsakis)
2092 param_env
: self.param_env
2094 cx
.record_layout_for_printing(layout
);
2100 // Helper (inherent) `layout_of` methods to avoid pushing `LayoutCx` to users.
2101 impl<'a
, 'tcx
> TyCtxt
<'a
, 'tcx
, 'tcx
> {
2102 /// Computes the layout of a type. Note that this implicitly
2103 /// executes in "reveal all" mode.
2105 pub fn layout_of(self, param_env_and_ty
: ty
::ParamEnvAnd
<'tcx
, Ty
<'tcx
>>)
2106 -> Result
<TyLayout
<'tcx
>, LayoutError
<'tcx
>> {
2109 param_env
: param_env_and_ty
.param_env
2111 cx
.layout_of(param_env_and_ty
.value
)
2115 impl<'a
, 'tcx
> ty
::maps
::TyCtxtAt
<'a
, 'tcx
, 'tcx
> {
2116 /// Computes the layout of a type. Note that this implicitly
2117 /// executes in "reveal all" mode.
2119 pub fn layout_of(self, param_env_and_ty
: ty
::ParamEnvAnd
<'tcx
, Ty
<'tcx
>>)
2120 -> Result
<TyLayout
<'tcx
>, LayoutError
<'tcx
>> {
2123 param_env
: param_env_and_ty
.param_env
2125 cx
.layout_of(param_env_and_ty
.value
)
2129 impl<'a
, 'tcx
> TyLayout
<'tcx
> {
2130 pub fn for_variant
<C
>(&self, cx
: C
, variant_index
: usize) -> Self
2131 where C
: LayoutOf
<Ty
<'tcx
>> + HasTyCtxt
<'tcx
>,
2132 C
::TyLayout
: MaybeResult
<TyLayout
<'tcx
>>
2134 let details
= match self.variants
{
2135 Variants
::Single { index }
if index
== variant_index
=> self.details
,
2137 Variants
::Single { index }
=> {
2138 // Deny calling for_variant more than once for non-Single enums.
2139 cx
.layout_of(self.ty
).map_same(|layout
| {
2140 assert_eq
!(layout
.variants
, Variants
::Single { index }
);
2144 let fields
= match self.ty
.sty
{
2145 ty
::TyAdt(def
, _
) => def
.variants
[variant_index
].fields
.len(),
2148 let mut details
= LayoutDetails
::uninhabited(fields
);
2149 details
.variants
= Variants
::Single { index: variant_index }
;
2150 cx
.tcx().intern_layout(details
)
2153 Variants
::NicheFilling { ref variants, .. }
|
2154 Variants
::Tagged { ref variants, .. }
=> {
2155 &variants
[variant_index
]
2159 assert_eq
!(details
.variants
, Variants
::Single { index: variant_index }
);
2167 pub fn field
<C
>(&self, cx
: C
, i
: usize) -> C
::TyLayout
2168 where C
: LayoutOf
<Ty
<'tcx
>> + HasTyCtxt
<'tcx
>,
2169 C
::TyLayout
: MaybeResult
<TyLayout
<'tcx
>>
2172 cx
.layout_of(match self.ty
.sty
{
2181 ty
::TyGeneratorWitness(..) |
2183 ty
::TyDynamic(..) => {
2184 bug
!("TyLayout::field_type({:?}): not applicable", self)
2187 // Potentially-fat pointers.
2188 ty
::TyRef(_
, ty
::TypeAndMut { ty: pointee, .. }
) |
2189 ty
::TyRawPtr(ty
::TypeAndMut { ty: pointee, .. }
) => {
2192 // Reuse the fat *T type as its own thin pointer data field.
2193 // This provides information about e.g. DST struct pointees
2194 // (which may have no non-DST form), and will work as long
2195 // as the `Abi` or `FieldPlacement` is checked by users.
2197 let nil
= tcx
.mk_nil();
2198 let ptr_ty
= if self.ty
.is_unsafe_ptr() {
2201 tcx
.mk_mut_ref(tcx
.types
.re_static
, nil
)
2203 return cx
.layout_of(ptr_ty
).map_same(|mut ptr_layout
| {
2204 ptr_layout
.ty
= self.ty
;
2209 match tcx
.struct_tail(pointee
).sty
{
2211 ty
::TyStr
=> tcx
.types
.usize,
2212 ty
::TyDynamic(..) => {
2213 // FIXME(eddyb) use an usize/fn() array with
2214 // the correct number of vtables slots.
2215 tcx
.mk_imm_ref(tcx
.types
.re_static
, tcx
.mk_nil())
2217 _
=> bug
!("TyLayout::field_type({:?}): not applicable", self)
2221 // Arrays and slices.
2222 ty
::TyArray(element
, _
) |
2223 ty
::TySlice(element
) => element
,
2224 ty
::TyStr
=> tcx
.types
.u8,
2226 // Tuples, generators and closures.
2227 ty
::TyClosure(def_id
, ref substs
) => {
2228 substs
.upvar_tys(def_id
, tcx
).nth(i
).unwrap()
2231 ty
::TyGenerator(def_id
, ref substs
, _
) => {
2232 substs
.field_tys(def_id
, tcx
).nth(i
).unwrap()
2235 ty
::TyTuple(tys
, _
) => tys
[i
],
2237 // SIMD vector types.
2238 ty
::TyAdt(def
, ..) if def
.repr
.simd() => {
2239 self.ty
.simd_type(tcx
)
2243 ty
::TyAdt(def
, substs
) => {
2244 match self.variants
{
2245 Variants
::Single { index }
=> {
2246 def
.variants
[index
].fields
[i
].ty(tcx
, substs
)
2249 // Discriminant field for enums (where applicable).
2250 Variants
::Tagged { ref discr, .. }
|
2251 Variants
::NicheFilling { niche: ref discr, .. }
=> {
2253 let layout
= LayoutDetails
::scalar(tcx
, discr
.clone());
2254 return MaybeResult
::from_ok(TyLayout
{
2255 details
: tcx
.intern_layout(layout
),
2256 ty
: discr
.value
.to_ty(tcx
)
2262 ty
::TyProjection(_
) | ty
::TyAnon(..) | ty
::TyParam(_
) |
2263 ty
::TyInfer(_
) | ty
::TyError
=> {
2264 bug
!("TyLayout::field_type: unexpected type `{}`", self.ty
)
2269 /// Returns true if the layout corresponds to an unsized type.
2270 pub fn is_unsized(&self) -> bool
{
2271 self.abi
.is_unsized()
2274 /// Returns true if the type is a ZST and not unsized.
2275 pub fn is_zst(&self) -> bool
{
2277 Abi
::Uninhabited
=> true,
2279 Abi
::ScalarPair(..) |
2280 Abi
::Vector { .. }
=> false,
2281 Abi
::Aggregate { sized }
=> sized
&& self.size
.bytes() == 0
2285 pub fn size_and_align(&self) -> (Size
, Align
) {
2286 (self.size
, self.align
)
2289 /// Find the offset of a niche leaf field, starting from
2290 /// the given type and recursing through aggregates, which
2291 /// has at least `count` consecutive invalid values.
2292 /// The tuple is `(offset, scalar, niche_value)`.
2293 // FIXME(eddyb) traverse already optimized enums.
2294 fn find_niche
<C
>(&self, cx
: C
, count
: u128
)
2295 -> Result
<Option
<(Size
, Scalar
, u128
)>, LayoutError
<'tcx
>>
2296 where C
: LayoutOf
<Ty
<'tcx
>, TyLayout
= Result
<Self, LayoutError
<'tcx
>>> +
2299 let scalar_component
= |scalar
: &Scalar
, offset
| {
2300 let Scalar { value, valid_range: ref v }
= *scalar
;
2302 let bits
= value
.size(cx
).bits();
2303 assert
!(bits
<= 128);
2304 let max_value
= !0u128 >> (128 - bits
);
2306 // Find out how many values are outside the valid range.
2307 let niches
= if v
.start
<= v
.end
{
2308 v
.start
+ (max_value
- v
.end
)
2313 // Give up if we can't fit `count` consecutive niches.
2318 let niche_start
= v
.end
.wrapping_add(1) & max_value
;
2319 let niche_end
= v
.end
.wrapping_add(count
) & max_value
;
2320 Some((offset
, Scalar
{
2322 valid_range
: v
.start
..=niche_end
2326 // Locals variables which live across yields are stored
2327 // in the generator type as fields. These may be uninitialized
2328 // so we don't look for niches there.
2329 if let ty
::TyGenerator(..) = self.ty
.sty
{
2334 Abi
::Scalar(ref scalar
) => {
2335 return Ok(scalar_component(scalar
, Size
::from_bytes(0)));
2337 Abi
::ScalarPair(ref a
, ref b
) => {
2338 return Ok(scalar_component(a
, Size
::from_bytes(0)).or_else(|| {
2339 scalar_component(b
, a
.value
.size(cx
).abi_align(b
.value
.align(cx
)))
2342 Abi
::Vector { ref element, .. }
=> {
2343 return Ok(scalar_component(element
, Size
::from_bytes(0)));
2348 // Perhaps one of the fields is non-zero, let's recurse and find out.
2349 if let FieldPlacement
::Union(_
) = self.fields
{
2350 // Only Rust enums have safe-to-inspect fields
2351 // (a discriminant), other unions are unsafe.
2352 if let Variants
::Single { .. }
= self.variants
{
2356 if let FieldPlacement
::Array { .. }
= self.fields
{
2357 if self.fields
.count() > 0 {
2358 return self.field(cx
, 0)?
.find_niche(cx
, count
);
2361 for i
in 0..self.fields
.count() {
2362 let r
= self.field(cx
, i
)?
.find_niche(cx
, count
)?
;
2363 if let Some((offset
, scalar
, niche_value
)) = r
{
2364 let offset
= self.fields
.offset(i
) + offset
;
2365 return Ok(Some((offset
, scalar
, niche_value
)));
2372 impl<'gcx
> HashStable
<StableHashingContext
<'gcx
>> for Variants
{
2373 fn hash_stable
<W
: StableHasherResult
>(&self,
2374 hcx
: &mut StableHashingContext
<'gcx
>,
2375 hasher
: &mut StableHasher
<W
>) {
2376 use ty
::layout
::Variants
::*;
2377 mem
::discriminant(self).hash_stable(hcx
, hasher
);
2380 Single { index }
=> {
2381 index
.hash_stable(hcx
, hasher
);
2387 discr
.hash_stable(hcx
, hasher
);
2388 variants
.hash_stable(hcx
, hasher
);
2392 niche_variants
: RangeInclusive { start, end }
,
2397 dataful_variant
.hash_stable(hcx
, hasher
);
2398 start
.hash_stable(hcx
, hasher
);
2399 end
.hash_stable(hcx
, hasher
);
2400 niche
.hash_stable(hcx
, hasher
);
2401 niche_start
.hash_stable(hcx
, hasher
);
2402 variants
.hash_stable(hcx
, hasher
);
2408 impl<'gcx
> HashStable
<StableHashingContext
<'gcx
>> for FieldPlacement
{
2409 fn hash_stable
<W
: StableHasherResult
>(&self,
2410 hcx
: &mut StableHashingContext
<'gcx
>,
2411 hasher
: &mut StableHasher
<W
>) {
2412 use ty
::layout
::FieldPlacement
::*;
2413 mem
::discriminant(self).hash_stable(hcx
, hasher
);
2417 count
.hash_stable(hcx
, hasher
);
2419 Array { count, stride }
=> {
2420 count
.hash_stable(hcx
, hasher
);
2421 stride
.hash_stable(hcx
, hasher
);
2423 Arbitrary { ref offsets, ref memory_index }
=> {
2424 offsets
.hash_stable(hcx
, hasher
);
2425 memory_index
.hash_stable(hcx
, hasher
);
2431 impl<'gcx
> HashStable
<StableHashingContext
<'gcx
>> for Abi
{
2432 fn hash_stable
<W
: StableHasherResult
>(&self,
2433 hcx
: &mut StableHashingContext
<'gcx
>,
2434 hasher
: &mut StableHasher
<W
>) {
2435 use ty
::layout
::Abi
::*;
2436 mem
::discriminant(self).hash_stable(hcx
, hasher
);
2440 Scalar(ref value
) => {
2441 value
.hash_stable(hcx
, hasher
);
2443 ScalarPair(ref a
, ref b
) => {
2444 a
.hash_stable(hcx
, hasher
);
2445 b
.hash_stable(hcx
, hasher
);
2447 Vector { ref element, count }
=> {
2448 element
.hash_stable(hcx
, hasher
);
2449 count
.hash_stable(hcx
, hasher
);
2451 Aggregate { sized }
=> {
2452 sized
.hash_stable(hcx
, hasher
);
2458 impl<'gcx
> HashStable
<StableHashingContext
<'gcx
>> for Scalar
{
2459 fn hash_stable
<W
: StableHasherResult
>(&self,
2460 hcx
: &mut StableHashingContext
<'gcx
>,
2461 hasher
: &mut StableHasher
<W
>) {
2462 let Scalar { value, valid_range: RangeInclusive { start, end }
} = *self;
2463 value
.hash_stable(hcx
, hasher
);
2464 start
.hash_stable(hcx
, hasher
);
2465 end
.hash_stable(hcx
, hasher
);
2469 impl_stable_hash_for
!(struct ::ty
::layout
::LayoutDetails
{
2477 impl_stable_hash_for
!(enum ::ty
::layout
::Integer
{
2485 impl_stable_hash_for
!(enum ::ty
::layout
::Primitive
{
2486 Int(integer
, signed
),
2492 impl_stable_hash_for
!(struct ::ty
::layout
::Align
{
2497 impl_stable_hash_for
!(struct ::ty
::layout
::Size
{
2501 impl<'gcx
> HashStable
<StableHashingContext
<'gcx
>> for LayoutError
<'gcx
>
2503 fn hash_stable
<W
: StableHasherResult
>(&self,
2504 hcx
: &mut StableHashingContext
<'gcx
>,
2505 hasher
: &mut StableHasher
<W
>) {
2506 use ty
::layout
::LayoutError
::*;
2507 mem
::discriminant(self).hash_stable(hcx
, hasher
);
2511 SizeOverflow(t
) => t
.hash_stable(hcx
, hasher
)