1 // ignore-tidy-filelength
2 use crate::ich
::StableHashingContext
;
3 use crate::middle
::codegen_fn_attrs
::CodegenFnAttrFlags
;
4 use crate::mir
::{GeneratorLayout, GeneratorSavedLocal}
;
5 use crate::ty
::subst
::Subst
;
6 use crate::ty
::{self, subst::SubstsRef, ReprOptions, Ty, TyCtxt, TypeFoldable}
;
9 use rustc_attr
as attr
;
10 use rustc_data_structures
::stable_hasher
::{HashStable, StableHasher}
;
12 use rustc_hir
::lang_items
::LangItem
;
13 use rustc_index
::bit_set
::BitSet
;
14 use rustc_index
::vec
::{Idx, IndexVec}
;
15 use rustc_session
::{config::OptLevel, DataTypeKind, FieldInfo, SizeKind, VariantInfo}
;
16 use rustc_span
::symbol
::{Ident, Symbol}
;
17 use rustc_span
::DUMMY_SP
;
18 use rustc_target
::abi
::call
::{
19 ArgAbi
, ArgAttribute
, ArgAttributes
, ArgExtension
, Conv
, FnAbi
, PassMode
, Reg
, RegKind
,
21 use rustc_target
::abi
::*;
22 use rustc_target
::spec
::{abi::Abi as SpecAbi, HasTargetSpec, PanicStrategy}
;
28 use std
::num
::NonZeroUsize
;
31 pub trait IntegerExt
{
32 fn to_ty
<'tcx
>(&self, tcx
: TyCtxt
<'tcx
>, signed
: bool
) -> Ty
<'tcx
>;
33 fn from_attr
<C
: HasDataLayout
>(cx
: &C
, ity
: attr
::IntType
) -> Integer
;
34 fn from_int_ty
<C
: HasDataLayout
>(cx
: &C
, ity
: ty
::IntTy
) -> Integer
;
35 fn from_uint_ty
<C
: HasDataLayout
>(cx
: &C
, uty
: ty
::UintTy
) -> Integer
;
45 impl IntegerExt
for Integer
{
46 fn to_ty
<'tcx
>(&self, tcx
: TyCtxt
<'tcx
>, signed
: bool
) -> Ty
<'tcx
> {
47 match (*self, signed
) {
48 (I8
, false) => tcx
.types
.u8,
49 (I16
, false) => tcx
.types
.u16,
50 (I32
, false) => tcx
.types
.u32,
51 (I64
, false) => tcx
.types
.u64,
52 (I128
, false) => tcx
.types
.u128
,
53 (I8
, true) => tcx
.types
.i8,
54 (I16
, true) => tcx
.types
.i16,
55 (I32
, true) => tcx
.types
.i32,
56 (I64
, true) => tcx
.types
.i64,
57 (I128
, true) => tcx
.types
.i128
,
61 /// Gets the Integer type from an attr::IntType.
62 fn from_attr
<C
: HasDataLayout
>(cx
: &C
, ity
: attr
::IntType
) -> Integer
{
63 let dl
= cx
.data_layout();
66 attr
::SignedInt(ast
::IntTy
::I8
) | attr
::UnsignedInt(ast
::UintTy
::U8
) => I8
,
67 attr
::SignedInt(ast
::IntTy
::I16
) | attr
::UnsignedInt(ast
::UintTy
::U16
) => I16
,
68 attr
::SignedInt(ast
::IntTy
::I32
) | attr
::UnsignedInt(ast
::UintTy
::U32
) => I32
,
69 attr
::SignedInt(ast
::IntTy
::I64
) | attr
::UnsignedInt(ast
::UintTy
::U64
) => I64
,
70 attr
::SignedInt(ast
::IntTy
::I128
) | attr
::UnsignedInt(ast
::UintTy
::U128
) => I128
,
71 attr
::SignedInt(ast
::IntTy
::Isize
) | attr
::UnsignedInt(ast
::UintTy
::Usize
) => {
72 dl
.ptr_sized_integer()
77 fn from_int_ty
<C
: HasDataLayout
>(cx
: &C
, ity
: ty
::IntTy
) -> Integer
{
80 ty
::IntTy
::I16
=> I16
,
81 ty
::IntTy
::I32
=> I32
,
82 ty
::IntTy
::I64
=> I64
,
83 ty
::IntTy
::I128
=> I128
,
84 ty
::IntTy
::Isize
=> cx
.data_layout().ptr_sized_integer(),
87 fn from_uint_ty
<C
: HasDataLayout
>(cx
: &C
, ity
: ty
::UintTy
) -> Integer
{
90 ty
::UintTy
::U16
=> I16
,
91 ty
::UintTy
::U32
=> I32
,
92 ty
::UintTy
::U64
=> I64
,
93 ty
::UintTy
::U128
=> I128
,
94 ty
::UintTy
::Usize
=> cx
.data_layout().ptr_sized_integer(),
98 /// Finds the appropriate Integer type and signedness for the given
99 /// signed discriminant range and `#[repr]` attribute.
100 /// N.B.: `u128` values above `i128::MAX` will be treated as signed, but
101 /// that shouldn't affect anything, other than maybe debuginfo.
108 ) -> (Integer
, bool
) {
109 // Theoretically, negative values could be larger in unsigned representation
110 // than the unsigned representation of the signed minimum. However, if there
111 // are any negative values, the only valid unsigned representation is u128
112 // which can fit all i128 values, so the result remains unaffected.
113 let unsigned_fit
= Integer
::fit_unsigned(cmp
::max(min
as u128
, max
as u128
));
114 let signed_fit
= cmp
::max(Integer
::fit_signed(min
), Integer
::fit_signed(max
));
116 let mut min_from_extern
= None
;
117 let min_default
= I8
;
119 if let Some(ity
) = repr
.int
{
120 let discr
= Integer
::from_attr(&tcx
, ity
);
121 let fit
= if ity
.is_signed() { signed_fit }
else { unsigned_fit }
;
124 "Integer::repr_discr: `#[repr]` hint too small for \
125 discriminant range of enum `{}",
129 return (discr
, ity
.is_signed());
133 match &tcx
.sess
.target
.arch
[..] {
134 "hexagon" => min_from_extern
= Some(I8
),
135 // WARNING: the ARM EABI has two variants; the one corresponding
136 // to `at_least == I32` appears to be used on Linux and NetBSD,
137 // but some systems may use the variant corresponding to no
138 // lower bound. However, we don't run on those yet...?
139 "arm" => min_from_extern
= Some(I32
),
140 _
=> min_from_extern
= Some(I32
),
144 let at_least
= min_from_extern
.unwrap_or(min_default
);
146 // If there are no negative values, we can use the unsigned fit.
148 (cmp
::max(unsigned_fit
, at_least
), false)
150 (cmp
::max(signed_fit
, at_least
), true)
155 pub trait PrimitiveExt
{
156 fn to_ty
<'tcx
>(&self, tcx
: TyCtxt
<'tcx
>) -> Ty
<'tcx
>;
157 fn to_int_ty
<'tcx
>(&self, tcx
: TyCtxt
<'tcx
>) -> Ty
<'tcx
>;
160 impl PrimitiveExt
for Primitive
{
161 fn to_ty
<'tcx
>(&self, tcx
: TyCtxt
<'tcx
>) -> Ty
<'tcx
> {
163 Int(i
, signed
) => i
.to_ty(tcx
, signed
),
164 F32
=> tcx
.types
.f32,
165 F64
=> tcx
.types
.f64,
166 Pointer
=> tcx
.mk_mut_ptr(tcx
.mk_unit()),
170 /// Return an *integer* type matching this primitive.
171 /// Useful in particular when dealing with enum discriminants.
172 fn to_int_ty(&self, tcx
: TyCtxt
<'tcx
>) -> Ty
<'tcx
> {
174 Int(i
, signed
) => i
.to_ty(tcx
, signed
),
175 Pointer
=> tcx
.types
.usize,
176 F32
| F64
=> bug
!("floats do not have an int type"),
181 /// The first half of a fat pointer.
183 /// - For a trait object, this is the address of the box.
184 /// - For a slice, this is the base address.
185 pub const FAT_PTR_ADDR
: usize = 0;
187 /// The second half of a fat pointer.
189 /// - For a trait object, this is the address of the vtable.
190 /// - For a slice, this is the length.
191 pub const FAT_PTR_EXTRA
: usize = 1;
193 /// The maximum supported number of lanes in a SIMD vector.
195 /// This value is selected based on backend support:
196 /// * LLVM does not appear to have a vector width limit.
197 /// * Cranelift stores the base-2 log of the lane count in a 4 bit integer.
198 pub const MAX_SIMD_LANES
: u64 = 1 << 0xF;
200 #[derive(Copy, Clone, Debug, TyEncodable, TyDecodable)]
201 pub enum LayoutError
<'tcx
> {
203 SizeOverflow(Ty
<'tcx
>),
206 impl<'tcx
> fmt
::Display
for LayoutError
<'tcx
> {
207 fn fmt(&self, f
: &mut fmt
::Formatter
<'_
>) -> fmt
::Result
{
209 LayoutError
::Unknown(ty
) => write
!(f
, "the type `{}` has an unknown layout", ty
),
210 LayoutError
::SizeOverflow(ty
) => {
211 write
!(f
, "values of the type `{}` are too big for the current architecture", ty
)
219 query
: ty
::ParamEnvAnd
<'tcx
, Ty
<'tcx
>>,
220 ) -> Result
<&'tcx Layout
, LayoutError
<'tcx
>> {
221 ty
::tls
::with_related_context(tcx
, move |icx
| {
222 let (param_env
, ty
) = query
.into_parts();
224 if !tcx
.sess
.recursion_limit().value_within_limit(icx
.layout_depth
) {
225 tcx
.sess
.fatal(&format
!("overflow representing the type `{}`", ty
));
228 // Update the ImplicitCtxt to increase the layout_depth
229 let icx
= ty
::tls
::ImplicitCtxt { layout_depth: icx.layout_depth + 1, ..icx.clone() }
;
231 ty
::tls
::enter_context(&icx
, |_
| {
232 let cx
= LayoutCx { tcx, param_env }
;
233 let layout
= cx
.layout_raw_uncached(ty
);
234 // Type-level uninhabitedness should always imply ABI uninhabitedness.
235 if let Ok(layout
) = layout
{
236 if tcx
.conservative_is_privately_uninhabited(param_env
.and(ty
)) {
237 assert
!(layout
.abi
.is_uninhabited());
245 pub fn provide(providers
: &mut ty
::query
::Providers
) {
246 *providers
= ty
::query
::Providers { layout_raw, ..*providers }
;
249 pub struct LayoutCx
<'tcx
, C
> {
251 pub param_env
: ty
::ParamEnv
<'tcx
>,
254 #[derive(Copy, Clone, Debug)]
256 /// A tuple, closure, or univariant which cannot be coerced to unsized.
258 /// A univariant, the last field of which may be coerced to unsized.
260 /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
261 Prefixed(Size
, Align
),
264 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
265 // This is used to go between `memory_index` (source field order to memory order)
266 // and `inverse_memory_index` (memory order to source field order).
267 // See also `FieldsShape::Arbitrary::memory_index` for more details.
268 // FIXME(eddyb) build a better abstraction for permutations, if possible.
269 fn invert_mapping(map
: &[u32]) -> Vec
<u32> {
270 let mut inverse
= vec
![0; map
.len()];
271 for i
in 0..map
.len() {
272 inverse
[map
[i
] as usize] = i
as u32;
277 impl<'tcx
> LayoutCx
<'tcx
, TyCtxt
<'tcx
>> {
278 fn scalar_pair(&self, a
: Scalar
, b
: Scalar
) -> Layout
{
279 let dl
= self.data_layout();
280 let b_align
= b
.value
.align(dl
);
281 let align
= a
.value
.align(dl
).max(b_align
).max(dl
.aggregate_align
);
282 let b_offset
= a
.value
.size(dl
).align_to(b_align
.abi
);
283 let size
= (b_offset
+ b
.value
.size(dl
)).align_to(align
.abi
);
285 // HACK(nox): We iter on `b` and then `a` because `max_by_key`
286 // returns the last maximum.
287 let largest_niche
= Niche
::from_scalar(dl
, b_offset
, b
.clone())
289 .chain(Niche
::from_scalar(dl
, Size
::ZERO
, a
.clone()))
290 .max_by_key(|niche
| niche
.available(dl
));
293 variants
: Variants
::Single { index: VariantIdx::new(0) }
,
294 fields
: FieldsShape
::Arbitrary
{
295 offsets
: vec
![Size
::ZERO
, b_offset
],
296 memory_index
: vec
![0, 1],
298 abi
: Abi
::ScalarPair(a
, b
),
305 fn univariant_uninterned(
308 fields
: &[TyAndLayout
<'_
>],
311 ) -> Result
<Layout
, LayoutError
<'tcx
>> {
312 let dl
= self.data_layout();
313 let pack
= repr
.pack
;
314 if pack
.is_some() && repr
.align
.is_some() {
315 bug
!("struct cannot be packed and aligned");
318 let mut align
= if pack
.is_some() { dl.i8_align }
else { dl.aggregate_align }
;
320 let mut inverse_memory_index
: Vec
<u32> = (0..fields
.len() as u32).collect();
322 let optimize
= !repr
.inhibit_struct_field_reordering_opt();
325 if let StructKind
::MaybeUnsized
= kind { fields.len() - 1 }
else { fields.len() }
;
326 let optimizing
= &mut inverse_memory_index
[..end
];
327 let field_align
= |f
: &TyAndLayout
<'_
>| {
328 if let Some(pack
) = pack { f.align.abi.min(pack) }
else { f.align.abi }
331 StructKind
::AlwaysSized
| StructKind
::MaybeUnsized
=> {
332 optimizing
.sort_by_key(|&x
| {
333 // Place ZSTs first to avoid "interesting offsets",
334 // especially with only one or two non-ZST fields.
335 let f
= &fields
[x
as usize];
336 (!f
.is_zst(), cmp
::Reverse(field_align(f
)))
339 StructKind
::Prefixed(..) => {
340 // Sort in ascending alignment so that the layout stay optimal
341 // regardless of the prefix
342 optimizing
.sort_by_key(|&x
| field_align(&fields
[x
as usize]));
347 // inverse_memory_index holds field indices by increasing memory offset.
348 // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
349 // We now write field offsets to the corresponding offset slot;
350 // field 5 with offset 0 puts 0 in offsets[5].
351 // At the bottom of this function, we invert `inverse_memory_index` to
352 // produce `memory_index` (see `invert_mapping`).
354 let mut sized
= true;
355 let mut offsets
= vec
![Size
::ZERO
; fields
.len()];
356 let mut offset
= Size
::ZERO
;
357 let mut largest_niche
= None
;
358 let mut largest_niche_available
= 0;
360 if let StructKind
::Prefixed(prefix_size
, prefix_align
) = kind
{
362 if let Some(pack
) = pack { prefix_align.min(pack) }
else { prefix_align }
;
363 align
= align
.max(AbiAndPrefAlign
::new(prefix_align
));
364 offset
= prefix_size
.align_to(prefix_align
);
367 for &i
in &inverse_memory_index
{
368 let field
= fields
[i
as usize];
370 bug
!("univariant: field #{} of `{}` comes after unsized field", offsets
.len(), ty
);
373 if field
.is_unsized() {
377 // Invariant: offset < dl.obj_size_bound() <= 1<<61
378 let field_align
= if let Some(pack
) = pack
{
379 field
.align
.min(AbiAndPrefAlign
::new(pack
))
383 offset
= offset
.align_to(field_align
.abi
);
384 align
= align
.max(field_align
);
386 debug
!("univariant offset: {:?} field: {:#?}", offset
, field
);
387 offsets
[i
as usize] = offset
;
389 if !repr
.hide_niche() {
390 if let Some(mut niche
) = field
.largest_niche
.clone() {
391 let available
= niche
.available(dl
);
392 if available
> largest_niche_available
{
393 largest_niche_available
= available
;
394 niche
.offset
+= offset
;
395 largest_niche
= Some(niche
);
400 offset
= offset
.checked_add(field
.size
, dl
).ok_or(LayoutError
::SizeOverflow(ty
))?
;
403 if let Some(repr_align
) = repr
.align
{
404 align
= align
.max(AbiAndPrefAlign
::new(repr_align
));
407 debug
!("univariant min_size: {:?}", offset
);
408 let min_size
= offset
;
410 // As stated above, inverse_memory_index holds field indices by increasing offset.
411 // This makes it an already-sorted view of the offsets vec.
412 // To invert it, consider:
413 // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
414 // Field 5 would be the first element, so memory_index is i:
415 // Note: if we didn't optimize, it's already right.
418 if optimize { invert_mapping(&inverse_memory_index) }
else { inverse_memory_index }
;
420 let size
= min_size
.align_to(align
.abi
);
421 let mut abi
= Abi
::Aggregate { sized }
;
423 // Unpack newtype ABIs and find scalar pairs.
424 if sized
&& size
.bytes() > 0 {
425 // All other fields must be ZSTs.
426 let mut non_zst_fields
= fields
.iter().enumerate().filter(|&(_
, f
)| !f
.is_zst());
428 match (non_zst_fields
.next(), non_zst_fields
.next(), non_zst_fields
.next()) {
429 // We have exactly one non-ZST field.
430 (Some((i
, field
)), None
, None
) => {
431 // Field fills the struct and it has a scalar or scalar pair ABI.
432 if offsets
[i
].bytes() == 0 && align
.abi
== field
.align
.abi
&& size
== field
.size
435 // For plain scalars, or vectors of them, we can't unpack
436 // newtypes for `#[repr(C)]`, as that affects C ABIs.
437 Abi
::Scalar(_
) | Abi
::Vector { .. }
if optimize
=> {
438 abi
= field
.abi
.clone();
440 // But scalar pairs are Rust-specific and get
441 // treated as aggregates by C ABIs anyway.
442 Abi
::ScalarPair(..) => {
443 abi
= field
.abi
.clone();
450 // Two non-ZST fields, and they're both scalars.
452 Some((i
, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(ref a), .. }
, .. })),
453 Some((j
, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(ref b), .. }
, .. })),
456 // Order by the memory placement, not source order.
457 let ((i
, a
), (j
, b
)) =
458 if offsets
[i
] < offsets
[j
] { ((i, a), (j, b)) }
else { ((j, b), (i, a)) }
;
459 let pair
= self.scalar_pair(a
.clone(), b
.clone());
460 let pair_offsets
= match pair
.fields
{
461 FieldsShape
::Arbitrary { ref offsets, ref memory_index }
=> {
462 assert_eq
!(memory_index
, &[0, 1]);
467 if offsets
[i
] == pair_offsets
[0]
468 && offsets
[j
] == pair_offsets
[1]
469 && align
== pair
.align
472 // We can use `ScalarPair` only when it matches our
473 // already computed layout (including `#[repr(C)]`).
482 if sized
&& fields
.iter().any(|f
| f
.abi
.is_uninhabited()) {
483 abi
= Abi
::Uninhabited
;
487 variants
: Variants
::Single { index: VariantIdx::new(0) }
,
488 fields
: FieldsShape
::Arbitrary { offsets, memory_index }
,
496 fn layout_raw_uncached(&self, ty
: Ty
<'tcx
>) -> Result
<&'tcx Layout
, LayoutError
<'tcx
>> {
498 let param_env
= self.param_env
;
499 let dl
= self.data_layout();
500 let scalar_unit
= |value
: Primitive
| {
501 let bits
= value
.size(dl
).bits();
502 assert
!(bits
<= 128);
503 Scalar { value, valid_range: 0..=(!0 >> (128 - bits)) }
505 let scalar
= |value
: Primitive
| tcx
.intern_layout(Layout
::scalar(self, scalar_unit(value
)));
507 let univariant
= |fields
: &[TyAndLayout
<'_
>], repr
: &ReprOptions
, kind
| {
508 Ok(tcx
.intern_layout(self.univariant_uninterned(ty
, fields
, repr
, kind
)?
))
510 debug_assert
!(!ty
.has_infer_types_or_consts());
512 Ok(match *ty
.kind() {
514 ty
::Bool
=> tcx
.intern_layout(Layout
::scalar(
516 Scalar { value: Int(I8, false), valid_range: 0..=1 }
,
518 ty
::Char
=> tcx
.intern_layout(Layout
::scalar(
520 Scalar { value: Int(I32, false), valid_range: 0..=0x10FFFF }
,
522 ty
::Int(ity
) => scalar(Int(Integer
::from_int_ty(dl
, ity
), true)),
523 ty
::Uint(ity
) => scalar(Int(Integer
::from_uint_ty(dl
, ity
), false)),
524 ty
::Float(fty
) => scalar(match fty
{
525 ty
::FloatTy
::F32
=> F32
,
526 ty
::FloatTy
::F64
=> F64
,
529 let mut ptr
= scalar_unit(Pointer
);
530 ptr
.valid_range
= 1..=*ptr
.valid_range
.end();
531 tcx
.intern_layout(Layout
::scalar(self, ptr
))
535 ty
::Never
=> tcx
.intern_layout(Layout
{
536 variants
: Variants
::Single { index: VariantIdx::new(0) }
,
537 fields
: FieldsShape
::Primitive
,
538 abi
: Abi
::Uninhabited
,
544 // Potentially-wide pointers.
545 ty
::Ref(_
, pointee
, _
) | ty
::RawPtr(ty
::TypeAndMut { ty: pointee, .. }
) => {
546 let mut data_ptr
= scalar_unit(Pointer
);
547 if !ty
.is_unsafe_ptr() {
548 data_ptr
.valid_range
= 1..=*data_ptr
.valid_range
.end();
551 let pointee
= tcx
.normalize_erasing_regions(param_env
, pointee
);
552 if pointee
.is_sized(tcx
.at(DUMMY_SP
), param_env
) {
553 return Ok(tcx
.intern_layout(Layout
::scalar(self, data_ptr
)));
556 let unsized_part
= tcx
.struct_tail_erasing_lifetimes(pointee
, param_env
);
557 let metadata
= match unsized_part
.kind() {
559 return Ok(tcx
.intern_layout(Layout
::scalar(self, data_ptr
)));
561 ty
::Slice(_
) | ty
::Str
=> scalar_unit(Int(dl
.ptr_sized_integer(), false)),
563 let mut vtable
= scalar_unit(Pointer
);
564 vtable
.valid_range
= 1..=*vtable
.valid_range
.end();
567 _
=> return Err(LayoutError
::Unknown(unsized_part
)),
570 // Effectively a (ptr, meta) tuple.
571 tcx
.intern_layout(self.scalar_pair(data_ptr
, metadata
))
574 // Arrays and slices.
575 ty
::Array(element
, mut count
) => {
576 if count
.has_projections() {
577 count
= tcx
.normalize_erasing_regions(param_env
, count
);
578 if count
.has_projections() {
579 return Err(LayoutError
::Unknown(ty
));
583 let count
= count
.try_eval_usize(tcx
, param_env
).ok_or(LayoutError
::Unknown(ty
))?
;
584 let element
= self.layout_of(element
)?
;
586 element
.size
.checked_mul(count
, dl
).ok_or(LayoutError
::SizeOverflow(ty
))?
;
589 if count
!= 0 && tcx
.conservative_is_privately_uninhabited(param_env
.and(ty
)) {
592 Abi
::Aggregate { sized: true }
595 let largest_niche
= if count
!= 0 { element.largest_niche.clone() }
else { None }
;
597 tcx
.intern_layout(Layout
{
598 variants
: Variants
::Single { index: VariantIdx::new(0) }
,
599 fields
: FieldsShape
::Array { stride: element.size, count }
,
602 align
: element
.align
,
606 ty
::Slice(element
) => {
607 let element
= self.layout_of(element
)?
;
608 tcx
.intern_layout(Layout
{
609 variants
: Variants
::Single { index: VariantIdx::new(0) }
,
610 fields
: FieldsShape
::Array { stride: element.size, count: 0 }
,
611 abi
: Abi
::Aggregate { sized: false }
,
613 align
: element
.align
,
617 ty
::Str
=> tcx
.intern_layout(Layout
{
618 variants
: Variants
::Single { index: VariantIdx::new(0) }
,
619 fields
: FieldsShape
::Array { stride: Size::from_bytes(1), count: 0 }
,
620 abi
: Abi
::Aggregate { sized: false }
,
627 ty
::FnDef(..) => univariant(&[], &ReprOptions
::default(), StructKind
::AlwaysSized
)?
,
628 ty
::Dynamic(..) | ty
::Foreign(..) => {
629 let mut unit
= self.univariant_uninterned(
632 &ReprOptions
::default(),
633 StructKind
::AlwaysSized
,
636 Abi
::Aggregate { ref mut sized }
=> *sized
= false,
639 tcx
.intern_layout(unit
)
642 ty
::Generator(def_id
, substs
, _
) => self.generator_layout(ty
, def_id
, substs
)?
,
644 ty
::Closure(_
, ref substs
) => {
645 let tys
= substs
.as_closure().upvar_tys();
647 &tys
.map(|ty
| self.layout_of(ty
)).collect
::<Result
<Vec
<_
>, _
>>()?
,
648 &ReprOptions
::default(),
649 StructKind
::AlwaysSized
,
655 if tys
.len() == 0 { StructKind::AlwaysSized }
else { StructKind::MaybeUnsized }
;
659 .map(|k
| self.layout_of(k
.expect_ty()))
660 .collect
::<Result
<Vec
<_
>, _
>>()?
,
661 &ReprOptions
::default(),
666 // SIMD vector types.
667 ty
::Adt(def
, substs
) if def
.repr
.simd() => {
668 // Supported SIMD vectors are homogeneous ADTs with at least one field:
670 // * #[repr(simd)] struct S(T, T, T, T);
671 // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T }
672 // * #[repr(simd)] struct S([T; 4])
674 // where T is a primitive scalar (integer/float/pointer).
676 // SIMD vectors with zero fields are not supported.
677 // (should be caught by typeck)
678 if def
.non_enum_variant().fields
.is_empty() {
679 tcx
.sess
.fatal(&format
!("monomorphising SIMD type `{}` of zero length", ty
));
682 // Type of the first ADT field:
683 let f0_ty
= def
.non_enum_variant().fields
[0].ty(tcx
, substs
);
685 // Heterogeneous SIMD vectors are not supported:
686 // (should be caught by typeck)
687 for fi
in &def
.non_enum_variant().fields
{
688 if fi
.ty(tcx
, substs
) != f0_ty
{
689 tcx
.sess
.fatal(&format
!("monomorphising heterogeneous SIMD type `{}`", ty
));
693 // The element type and number of elements of the SIMD vector
694 // are obtained from:
696 // * the element type and length of the single array field, if
697 // the first field is of array type, or
699 // * the homogenous field type and the number of fields.
700 let (e_ty
, e_len
, is_array
) = if let ty
::Array(e_ty
, _
) = f0_ty
.kind() {
701 // First ADT field is an array:
703 // SIMD vectors with multiple array fields are not supported:
704 // (should be caught by typeck)
705 if def
.non_enum_variant().fields
.len() != 1 {
706 tcx
.sess
.fatal(&format
!(
707 "monomorphising SIMD type `{}` with more than one array field",
712 // Extract the number of elements from the layout of the array field:
713 let len
= if let Ok(TyAndLayout
{
714 layout
: Layout { fields: FieldsShape::Array { count, .. }
, .. },
716 }) = self.layout_of(f0_ty
)
720 return Err(LayoutError
::Unknown(ty
));
725 // First ADT field is not an array:
726 (f0_ty
, def
.non_enum_variant().fields
.len() as _
, false)
729 // SIMD vectors of zero length are not supported.
730 // Additionally, lengths are capped at 2^16 as a fixed maximum backends must
733 // Can't be caught in typeck if the array length is generic.
735 tcx
.sess
.fatal(&format
!("monomorphising SIMD type `{}` of zero length", ty
));
736 } else if e_len
> MAX_SIMD_LANES
{
737 tcx
.sess
.fatal(&format
!(
738 "monomorphising SIMD type `{}` of length greater than {}",
743 // Compute the ABI of the element type:
744 let e_ly
= self.layout_of(e_ty
)?
;
745 let e_abi
= if let Abi
::Scalar(ref scalar
) = e_ly
.abi
{
748 // This error isn't caught in typeck, e.g., if
749 // the element type of the vector is generic.
750 tcx
.sess
.fatal(&format
!(
751 "monomorphising SIMD type `{}` with a non-primitive-scalar \
752 (integer/float/pointer) element type `{}`",
757 // Compute the size and alignment of the vector:
758 let size
= e_ly
.size
.checked_mul(e_len
, dl
).ok_or(LayoutError
::SizeOverflow(ty
))?
;
759 let align
= dl
.vector_align(size
);
760 let size
= size
.align_to(align
.abi
);
762 // Compute the placement of the vector fields:
763 let fields
= if is_array
{
764 FieldsShape
::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }
766 FieldsShape
::Array { stride: e_ly.size, count: e_len }
769 tcx
.intern_layout(Layout
{
770 variants
: Variants
::Single { index: VariantIdx::new(0) }
,
772 abi
: Abi
::Vector { element: e_abi, count: e_len }
,
773 largest_niche
: e_ly
.largest_niche
.clone(),
780 ty
::Adt(def
, substs
) => {
781 // Cache the field layouts.
788 .map(|field
| self.layout_of(field
.ty(tcx
, substs
)))
789 .collect
::<Result
<Vec
<_
>, _
>>()
791 .collect
::<Result
<IndexVec
<VariantIdx
, _
>, _
>>()?
;
794 if def
.repr
.pack
.is_some() && def
.repr
.align
.is_some() {
795 bug
!("union cannot be packed and aligned");
799 if def
.repr
.pack
.is_some() { dl.i8_align }
else { dl.aggregate_align }
;
801 if let Some(repr_align
) = def
.repr
.align
{
802 align
= align
.max(AbiAndPrefAlign
::new(repr_align
));
805 let optimize
= !def
.repr
.inhibit_union_abi_opt();
806 let mut size
= Size
::ZERO
;
807 let mut abi
= Abi
::Aggregate { sized: true }
;
808 let index
= VariantIdx
::new(0);
809 for field
in &variants
[index
] {
810 assert
!(!field
.is_unsized());
811 align
= align
.max(field
.align
);
813 // If all non-ZST fields have the same ABI, forward this ABI
814 if optimize
&& !field
.is_zst() {
815 // Normalize scalar_unit to the maximal valid range
816 let field_abi
= match &field
.abi
{
817 Abi
::Scalar(x
) => Abi
::Scalar(scalar_unit(x
.value
)),
818 Abi
::ScalarPair(x
, y
) => {
819 Abi
::ScalarPair(scalar_unit(x
.value
), scalar_unit(y
.value
))
821 Abi
::Vector { element: x, count }
=> {
822 Abi
::Vector { element: scalar_unit(x.value), count: *count }
824 Abi
::Uninhabited
| Abi
::Aggregate { .. }
=> {
825 Abi
::Aggregate { sized: true }
829 if size
== Size
::ZERO
{
830 // first non ZST: initialize 'abi'
832 } else if abi
!= field_abi
{
833 // different fields have different ABI: reset to Aggregate
834 abi
= Abi
::Aggregate { sized: true }
;
838 size
= cmp
::max(size
, field
.size
);
841 if let Some(pack
) = def
.repr
.pack
{
842 align
= align
.min(AbiAndPrefAlign
::new(pack
));
845 return Ok(tcx
.intern_layout(Layout
{
846 variants
: Variants
::Single { index }
,
847 fields
: FieldsShape
::Union(
848 NonZeroUsize
::new(variants
[index
].len())
849 .ok_or(LayoutError
::Unknown(ty
))?
,
854 size
: size
.align_to(align
.abi
),
858 // A variant is absent if it's uninhabited and only has ZST fields.
859 // Present uninhabited variants only require space for their fields,
860 // but *not* an encoding of the discriminant (e.g., a tag value).
861 // See issue #49298 for more details on the need to leave space
862 // for non-ZST uninhabited data (mostly partial initialization).
863 let absent
= |fields
: &[TyAndLayout
<'_
>]| {
864 let uninhabited
= fields
.iter().any(|f
| f
.abi
.is_uninhabited());
865 let is_zst
= fields
.iter().all(|f
| f
.is_zst());
866 uninhabited
&& is_zst
868 let (present_first
, present_second
) = {
869 let mut present_variants
= variants
871 .filter_map(|(i
, v
)| if absent(v
) { None }
else { Some(i) }
);
872 (present_variants
.next(), present_variants
.next())
874 let present_first
= match present_first
{
875 Some(present_first
) => present_first
,
876 // Uninhabited because it has no variants, or only absent ones.
877 None
if def
.is_enum() => return tcx
.layout_raw(param_env
.and(tcx
.types
.never
)),
878 // If it's a struct, still compute a layout so that we can still compute the
880 None
=> VariantIdx
::new(0),
883 let is_struct
= !def
.is_enum() ||
884 // Only one variant is present.
885 (present_second
.is_none() &&
886 // Representation optimizations are allowed.
887 !def
.repr
.inhibit_enum_layout_opt());
889 // Struct, or univariant enum equivalent to a struct.
890 // (Typechecking will reject discriminant-sizing attrs.)
892 let v
= present_first
;
893 let kind
= if def
.is_enum() || variants
[v
].is_empty() {
894 StructKind
::AlwaysSized
896 let param_env
= tcx
.param_env(def
.did
);
897 let last_field
= def
.variants
[v
].fields
.last().unwrap();
899 tcx
.type_of(last_field
.did
).is_sized(tcx
.at(DUMMY_SP
), param_env
);
901 StructKind
::MaybeUnsized
903 StructKind
::AlwaysSized
907 let mut st
= self.univariant_uninterned(ty
, &variants
[v
], &def
.repr
, kind
)?
;
908 st
.variants
= Variants
::Single { index: v }
;
909 let (start
, end
) = self.tcx
.layout_scalar_valid_range(def
.did
);
911 Abi
::Scalar(ref mut scalar
) | Abi
::ScalarPair(ref mut scalar
, _
) => {
912 // the asserts ensure that we are not using the
913 // `#[rustc_layout_scalar_valid_range(n)]`
914 // attribute to widen the range of anything as that would probably
915 // result in UB somewhere
916 // FIXME(eddyb) the asserts are probably not needed,
917 // as larger validity ranges would result in missed
918 // optimizations, *not* wrongly assuming the inner
919 // value is valid. e.g. unions enlarge validity ranges,
920 // because the values may be uninitialized.
921 if let Bound
::Included(start
) = start
{
922 // FIXME(eddyb) this might be incorrect - it doesn't
923 // account for wrap-around (end < start) ranges.
924 assert
!(*scalar
.valid_range
.start() <= start
);
925 scalar
.valid_range
= start
..=*scalar
.valid_range
.end();
927 if let Bound
::Included(end
) = end
{
928 // FIXME(eddyb) this might be incorrect - it doesn't
929 // account for wrap-around (end < start) ranges.
930 assert
!(*scalar
.valid_range
.end() >= end
);
931 scalar
.valid_range
= *scalar
.valid_range
.start()..=end
;
934 // Update `largest_niche` if we have introduced a larger niche.
935 let niche
= if def
.repr
.hide_niche() {
938 Niche
::from_scalar(dl
, Size
::ZERO
, scalar
.clone())
940 if let Some(niche
) = niche
{
941 match &st
.largest_niche
{
942 Some(largest_niche
) => {
943 // Replace the existing niche even if they're equal,
944 // because this one is at a lower offset.
945 if largest_niche
.available(dl
) <= niche
.available(dl
) {
946 st
.largest_niche
= Some(niche
);
949 None
=> st
.largest_niche
= Some(niche
),
954 start
== Bound
::Unbounded
&& end
== Bound
::Unbounded
,
955 "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
961 return Ok(tcx
.intern_layout(st
));
964 // At this point, we have handled all unions and
965 // structs. (We have also handled univariant enums
966 // that allow representation optimization.)
967 assert
!(def
.is_enum());
969 // The current code for niche-filling relies on variant indices
970 // instead of actual discriminants, so dataful enums with
971 // explicit discriminants (RFC #2363) would misbehave.
972 let no_explicit_discriminants
= def
975 .all(|(i
, v
)| v
.discr
== ty
::VariantDiscr
::Relative(i
.as_u32()));
977 let mut niche_filling_layout
= None
;
979 // Niche-filling enum optimization.
980 if !def
.repr
.inhibit_enum_layout_opt() && no_explicit_discriminants
{
981 let mut dataful_variant
= None
;
982 let mut niche_variants
= VariantIdx
::MAX
..=VariantIdx
::new(0);
984 // Find one non-ZST variant.
985 'variants
: for (v
, fields
) in variants
.iter_enumerated() {
991 if dataful_variant
.is_none() {
992 dataful_variant
= Some(v
);
995 dataful_variant
= None
;
1000 niche_variants
= *niche_variants
.start().min(&v
)..=v
;
1003 if niche_variants
.start() > niche_variants
.end() {
1004 dataful_variant
= None
;
1007 if let Some(i
) = dataful_variant
{
1008 let count
= (niche_variants
.end().as_u32()
1009 - niche_variants
.start().as_u32()
1012 // Find the field with the largest niche
1013 let niche_candidate
= variants
[i
]
1016 .filter_map(|(j
, &field
)| Some((j
, field
.largest_niche
.as_ref()?
)))
1017 .max_by_key(|(_
, niche
)| niche
.available(dl
));
1019 if let Some((field_index
, niche
, (niche_start
, niche_scalar
))) =
1020 niche_candidate
.and_then(|(field_index
, niche
)| {
1021 Some((field_index
, niche
, niche
.reserve(self, count
)?
))
1024 let mut align
= dl
.aggregate_align
;
1028 let mut st
= self.univariant_uninterned(
1032 StructKind
::AlwaysSized
,
1034 st
.variants
= Variants
::Single { index: j }
;
1036 align
= align
.max(st
.align
);
1040 .collect
::<Result
<IndexVec
<VariantIdx
, _
>, _
>>()?
;
1042 let offset
= st
[i
].fields
.offset(field_index
) + niche
.offset
;
1043 let size
= st
[i
].size
;
1045 let abi
= if st
.iter().all(|v
| v
.abi
.is_uninhabited()) {
1049 Abi
::Scalar(_
) => Abi
::Scalar(niche_scalar
.clone()),
1050 Abi
::ScalarPair(ref first
, ref second
) => {
1051 // We need to use scalar_unit to reset the
1052 // valid range to the maximal one for that
1053 // primitive, because only the niche is
1054 // guaranteed to be initialised, not the
1056 if offset
.bytes() == 0 {
1058 niche_scalar
.clone(),
1059 scalar_unit(second
.value
),
1063 scalar_unit(first
.value
),
1064 niche_scalar
.clone(),
1068 _
=> Abi
::Aggregate { sized: true }
,
1073 Niche
::from_scalar(dl
, offset
, niche_scalar
.clone());
1075 niche_filling_layout
= Some(Layout
{
1076 variants
: Variants
::Multiple
{
1078 tag_encoding
: TagEncoding
::Niche
{
1086 fields
: FieldsShape
::Arbitrary
{
1087 offsets
: vec
![offset
],
1088 memory_index
: vec
![0],
1099 let (mut min
, mut max
) = (i128
::MAX
, i128
::MIN
);
1100 let discr_type
= def
.repr
.discr_type();
1101 let bits
= Integer
::from_attr(self, discr_type
).size().bits();
1102 for (i
, discr
) in def
.discriminants(tcx
) {
1103 if variants
[i
].iter().any(|f
| f
.abi
.is_uninhabited()) {
1106 let mut x
= discr
.val
as i128
;
1107 if discr_type
.is_signed() {
1108 // sign extend the raw representation to be an i128
1109 x
= (x
<< (128 - bits
)) >> (128 - bits
);
1118 // We might have no inhabited variants, so pretend there's at least one.
1119 if (min
, max
) == (i128
::MAX
, i128
::MIN
) {
1123 assert
!(min
<= max
, "discriminant range is {}...{}", min
, max
);
1124 let (min_ity
, signed
) = Integer
::repr_discr(tcx
, ty
, &def
.repr
, min
, max
);
1126 let mut align
= dl
.aggregate_align
;
1127 let mut size
= Size
::ZERO
;
1129 // We're interested in the smallest alignment, so start large.
1130 let mut start_align
= Align
::from_bytes(256).unwrap();
1131 assert_eq
!(Integer
::for_align(dl
, start_align
), None
);
1133 // repr(C) on an enum tells us to make a (tag, union) layout,
1134 // so we need to grow the prefix alignment to be at least
1135 // the alignment of the union. (This value is used both for
1136 // determining the alignment of the overall enum, and the
1137 // determining the alignment of the payload after the tag.)
1138 let mut prefix_align
= min_ity
.align(dl
).abi
;
1140 for fields
in &variants
{
1141 for field
in fields
{
1142 prefix_align
= prefix_align
.max(field
.align
.abi
);
1147 // Create the set of structs that represent each variant.
1148 let mut layout_variants
= variants
1150 .map(|(i
, field_layouts
)| {
1151 let mut st
= self.univariant_uninterned(
1155 StructKind
::Prefixed(min_ity
.size(), prefix_align
),
1157 st
.variants
= Variants
::Single { index: i }
;
1158 // Find the first field we can't move later
1159 // to make room for a larger discriminant.
1161 st
.fields
.index_by_increasing_offset().map(|j
| field_layouts
[j
])
1163 if !field
.is_zst() || field
.align
.abi
.bytes() != 1 {
1164 start_align
= start_align
.min(field
.align
.abi
);
1168 size
= cmp
::max(size
, st
.size
);
1169 align
= align
.max(st
.align
);
1172 .collect
::<Result
<IndexVec
<VariantIdx
, _
>, _
>>()?
;
1174 // Align the maximum variant size to the largest alignment.
1175 size
= size
.align_to(align
.abi
);
1177 if size
.bytes() >= dl
.obj_size_bound() {
1178 return Err(LayoutError
::SizeOverflow(ty
));
1181 let typeck_ity
= Integer
::from_attr(dl
, def
.repr
.discr_type());
1182 if typeck_ity
< min_ity
{
1183 // It is a bug if Layout decided on a greater discriminant size than typeck for
1184 // some reason at this point (based on values discriminant can take on). Mostly
1185 // because this discriminant will be loaded, and then stored into variable of
1186 // type calculated by typeck. Consider such case (a bug): typeck decided on
1187 // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1188 // discriminant values. That would be a bug, because then, in codegen, in order
1189 // to store this 16-bit discriminant into 8-bit sized temporary some of the
1190 // space necessary to represent would have to be discarded (or layout is wrong
1191 // on thinking it needs 16 bits)
1193 "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1197 // However, it is fine to make discr type however large (as an optimisation)
1198 // after this point – we’ll just truncate the value we load in codegen.
1201 // Check to see if we should use a different type for the
1202 // discriminant. We can safely use a type with the same size
1203 // as the alignment of the first field of each variant.
1204 // We increase the size of the discriminant to avoid LLVM copying
1205 // padding when it doesn't need to. This normally causes unaligned
1206 // load/stores and excessive memcpy/memset operations. By using a
1207 // bigger integer size, LLVM can be sure about its contents and
1208 // won't be so conservative.
1210 // Use the initial field alignment
1211 let mut ity
= if def
.repr
.c() || def
.repr
.int
.is_some() {
1214 Integer
::for_align(dl
, start_align
).unwrap_or(min_ity
)
1217 // If the alignment is not larger than the chosen discriminant size,
1218 // don't use the alignment as the final size.
1222 // Patch up the variants' first few fields.
1223 let old_ity_size
= min_ity
.size();
1224 let new_ity_size
= ity
.size();
1225 for variant
in &mut layout_variants
{
1226 match variant
.fields
{
1227 FieldsShape
::Arbitrary { ref mut offsets, .. }
=> {
1229 if *i
<= old_ity_size
{
1230 assert_eq
!(*i
, old_ity_size
);
1234 // We might be making the struct larger.
1235 if variant
.size
<= old_ity_size
{
1236 variant
.size
= new_ity_size
;
1244 let tag_mask
= !0u128 >> (128 - ity
.size().bits());
1246 value
: Int(ity
, signed
),
1247 valid_range
: (min
as u128
& tag_mask
)..=(max
as u128
& tag_mask
),
1249 let mut abi
= Abi
::Aggregate { sized: true }
;
1250 if tag
.value
.size(dl
) == size
{
1251 abi
= Abi
::Scalar(tag
.clone());
1253 // Try to use a ScalarPair for all tagged enums.
1254 let mut common_prim
= None
;
1255 for (field_layouts
, layout_variant
) in iter
::zip(&variants
, &layout_variants
) {
1256 let offsets
= match layout_variant
.fields
{
1257 FieldsShape
::Arbitrary { ref offsets, .. }
=> offsets
,
1261 iter
::zip(field_layouts
, offsets
).filter(|p
| !p
.0.is_zst
());
1262 let (field
, offset
) = match (fields
.next(), fields
.next()) {
1263 (None
, None
) => continue,
1264 (Some(pair
), None
) => pair
,
1270 let prim
= match field
.abi
{
1271 Abi
::Scalar(ref scalar
) => scalar
.value
,
1277 if let Some(pair
) = common_prim
{
1278 // This is pretty conservative. We could go fancier
1279 // by conflating things like i32 and u32, or even
1280 // realising that (u8, u8) could just cohabit with
1282 if pair
!= (prim
, offset
) {
1287 common_prim
= Some((prim
, offset
));
1290 if let Some((prim
, offset
)) = common_prim
{
1291 let pair
= self.scalar_pair(tag
.clone(), scalar_unit(prim
));
1292 let pair_offsets
= match pair
.fields
{
1293 FieldsShape
::Arbitrary { ref offsets, ref memory_index }
=> {
1294 assert_eq
!(memory_index
, &[0, 1]);
1299 if pair_offsets
[0] == Size
::ZERO
1300 && pair_offsets
[1] == *offset
1301 && align
== pair
.align
1302 && size
== pair
.size
1304 // We can use `ScalarPair` only when it matches our
1305 // already computed layout (including `#[repr(C)]`).
1311 if layout_variants
.iter().all(|v
| v
.abi
.is_uninhabited()) {
1312 abi
= Abi
::Uninhabited
;
1315 let largest_niche
= Niche
::from_scalar(dl
, Size
::ZERO
, tag
.clone());
1317 let tagged_layout
= Layout
{
1318 variants
: Variants
::Multiple
{
1320 tag_encoding
: TagEncoding
::Direct
,
1322 variants
: layout_variants
,
1324 fields
: FieldsShape
::Arbitrary
{
1325 offsets
: vec
![Size
::ZERO
],
1326 memory_index
: vec
![0],
1334 let best_layout
= match (tagged_layout
, niche_filling_layout
) {
1335 (tagged_layout
, Some(niche_filling_layout
)) => {
1336 // Pick the smaller layout; otherwise,
1337 // pick the layout with the larger niche; otherwise,
1338 // pick tagged as it has simpler codegen.
1339 cmp
::min_by_key(tagged_layout
, niche_filling_layout
, |layout
| {
1341 layout
.largest_niche
.as_ref().map_or(0, |n
| n
.available(dl
));
1342 (layout
.size
, cmp
::Reverse(niche_size
))
1345 (tagged_layout
, None
) => tagged_layout
,
1348 tcx
.intern_layout(best_layout
)
1351 // Types with no meaningful known layout.
1352 ty
::Projection(_
) | ty
::Opaque(..) => {
1353 let normalized
= tcx
.normalize_erasing_regions(param_env
, ty
);
1354 if ty
== normalized
{
1355 return Err(LayoutError
::Unknown(ty
));
1357 tcx
.layout_raw(param_env
.and(normalized
))?
1360 ty
::Placeholder(..) | ty
::GeneratorWitness(..) | ty
::Infer(_
) => {
1361 bug
!("Layout::compute: unexpected type `{}`", ty
)
1364 ty
::Bound(..) | ty
::Param(_
) | ty
::Error(_
) => {
1365 return Err(LayoutError
::Unknown(ty
));
1371 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1372 #[derive(Clone, Debug, PartialEq)]
1373 enum SavedLocalEligibility
{
1375 Assigned(VariantIdx
),
1376 // FIXME: Use newtype_index so we aren't wasting bytes
1377 Ineligible(Option
<u32>),
1380 // When laying out generators, we divide our saved local fields into two
1381 // categories: overlap-eligible and overlap-ineligible.
1383 // Those fields which are ineligible for overlap go in a "prefix" at the
1384 // beginning of the layout, and always have space reserved for them.
1386 // Overlap-eligible fields are only assigned to one variant, so we lay
1387 // those fields out for each variant and put them right after the
1390 // Finally, in the layout details, we point to the fields from the
1391 // variants they are assigned to. It is possible for some fields to be
1392 // included in multiple variants. No field ever "moves around" in the
1393 // layout; its offset is always the same.
1395 // Also included in the layout are the upvars and the discriminant.
1396 // These are included as fields on the "outer" layout; they are not part
1398 impl<'tcx
> LayoutCx
<'tcx
, TyCtxt
<'tcx
>> {
1399 /// Compute the eligibility and assignment of each local.
1400 fn generator_saved_local_eligibility(
1402 info
: &GeneratorLayout
<'tcx
>,
1403 ) -> (BitSet
<GeneratorSavedLocal
>, IndexVec
<GeneratorSavedLocal
, SavedLocalEligibility
>) {
1404 use SavedLocalEligibility
::*;
1406 let mut assignments
: IndexVec
<GeneratorSavedLocal
, SavedLocalEligibility
> =
1407 IndexVec
::from_elem_n(Unassigned
, info
.field_tys
.len());
1409 // The saved locals not eligible for overlap. These will get
1410 // "promoted" to the prefix of our generator.
1411 let mut ineligible_locals
= BitSet
::new_empty(info
.field_tys
.len());
1413 // Figure out which of our saved locals are fields in only
1414 // one variant. The rest are deemed ineligible for overlap.
1415 for (variant_index
, fields
) in info
.variant_fields
.iter_enumerated() {
1416 for local
in fields
{
1417 match assignments
[*local
] {
1419 assignments
[*local
] = Assigned(variant_index
);
1422 // We've already seen this local at another suspension
1423 // point, so it is no longer a candidate.
1425 "removing local {:?} in >1 variant ({:?}, {:?})",
1430 ineligible_locals
.insert(*local
);
1431 assignments
[*local
] = Ineligible(None
);
1438 // Next, check every pair of eligible locals to see if they
1440 for local_a
in info
.storage_conflicts
.rows() {
1441 let conflicts_a
= info
.storage_conflicts
.count(local_a
);
1442 if ineligible_locals
.contains(local_a
) {
1446 for local_b
in info
.storage_conflicts
.iter(local_a
) {
1447 // local_a and local_b are storage live at the same time, therefore they
1448 // cannot overlap in the generator layout. The only way to guarantee
1449 // this is if they are in the same variant, or one is ineligible
1450 // (which means it is stored in every variant).
1451 if ineligible_locals
.contains(local_b
)
1452 || assignments
[local_a
] == assignments
[local_b
]
1457 // If they conflict, we will choose one to make ineligible.
1458 // This is not always optimal; it's just a greedy heuristic that
1459 // seems to produce good results most of the time.
1460 let conflicts_b
= info
.storage_conflicts
.count(local_b
);
1461 let (remove
, other
) =
1462 if conflicts_a
> conflicts_b { (local_a, local_b) }
else { (local_b, local_a) }
;
1463 ineligible_locals
.insert(remove
);
1464 assignments
[remove
] = Ineligible(None
);
1465 trace
!("removing local {:?} due to conflict with {:?}", remove
, other
);
1469 // Count the number of variants in use. If only one of them, then it is
1470 // impossible to overlap any locals in our layout. In this case it's
1471 // always better to make the remaining locals ineligible, so we can
1472 // lay them out with the other locals in the prefix and eliminate
1473 // unnecessary padding bytes.
1475 let mut used_variants
= BitSet
::new_empty(info
.variant_fields
.len());
1476 for assignment
in &assignments
{
1477 if let Assigned(idx
) = assignment
{
1478 used_variants
.insert(*idx
);
1481 if used_variants
.count() < 2 {
1482 for assignment
in assignments
.iter_mut() {
1483 *assignment
= Ineligible(None
);
1485 ineligible_locals
.insert_all();
1489 // Write down the order of our locals that will be promoted to the prefix.
1491 for (idx
, local
) in ineligible_locals
.iter().enumerate() {
1492 assignments
[local
] = Ineligible(Some(idx
as u32));
1495 debug
!("generator saved local assignments: {:?}", assignments
);
1497 (ineligible_locals
, assignments
)
1500 /// Compute the full generator layout.
1501 fn generator_layout(
1504 def_id
: hir
::def_id
::DefId
,
1505 substs
: SubstsRef
<'tcx
>,
1506 ) -> Result
<&'tcx Layout
, LayoutError
<'tcx
>> {
1507 use SavedLocalEligibility
::*;
1509 let subst_field
= |ty
: Ty
<'tcx
>| ty
.subst(tcx
, substs
);
1511 let info
= match tcx
.generator_layout(def_id
) {
1512 None
=> return Err(LayoutError
::Unknown(ty
)),
1515 let (ineligible_locals
, assignments
) = self.generator_saved_local_eligibility(&info
);
1517 // Build a prefix layout, including "promoting" all ineligible
1518 // locals as part of the prefix. We compute the layout of all of
1519 // these fields at once to get optimal packing.
1520 let tag_index
= substs
.as_generator().prefix_tys().count();
1522 // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
1523 let max_discr
= (info
.variant_fields
.len() - 1) as u128
;
1524 let discr_int
= Integer
::fit_unsigned(max_discr
);
1525 let discr_int_ty
= discr_int
.to_ty(tcx
, false);
1526 let tag
= Scalar { value: Primitive::Int(discr_int, false), valid_range: 0..=max_discr }
;
1527 let tag_layout
= self.tcx
.intern_layout(Layout
::scalar(self, tag
.clone()));
1528 let tag_layout
= TyAndLayout { ty: discr_int_ty, layout: tag_layout }
;
1530 let promoted_layouts
= ineligible_locals
1532 .map(|local
| subst_field(info
.field_tys
[local
]))
1533 .map(|ty
| tcx
.mk_maybe_uninit(ty
))
1534 .map(|ty
| self.layout_of(ty
));
1535 let prefix_layouts
= substs
1538 .map(|ty
| self.layout_of(ty
))
1539 .chain(iter
::once(Ok(tag_layout
)))
1540 .chain(promoted_layouts
)
1541 .collect
::<Result
<Vec
<_
>, _
>>()?
;
1542 let prefix
= self.univariant_uninterned(
1545 &ReprOptions
::default(),
1546 StructKind
::AlwaysSized
,
1549 let (prefix_size
, prefix_align
) = (prefix
.size
, prefix
.align
);
1551 // Split the prefix layout into the "outer" fields (upvars and
1552 // discriminant) and the "promoted" fields. Promoted fields will
1553 // get included in each variant that requested them in
1555 debug
!("prefix = {:#?}", prefix
);
1556 let (outer_fields
, promoted_offsets
, promoted_memory_index
) = match prefix
.fields
{
1557 FieldsShape
::Arbitrary { mut offsets, memory_index }
=> {
1558 let mut inverse_memory_index
= invert_mapping(&memory_index
);
1560 // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1561 // "outer" and "promoted" fields respectively.
1562 let b_start
= (tag_index
+ 1) as u32;
1563 let offsets_b
= offsets
.split_off(b_start
as usize);
1564 let offsets_a
= offsets
;
1566 // Disentangle the "a" and "b" components of `inverse_memory_index`
1567 // by preserving the order but keeping only one disjoint "half" each.
1568 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1569 let inverse_memory_index_b
: Vec
<_
> =
1570 inverse_memory_index
.iter().filter_map(|&i
| i
.checked_sub(b_start
)).collect();
1571 inverse_memory_index
.retain(|&i
| i
< b_start
);
1572 let inverse_memory_index_a
= inverse_memory_index
;
1574 // Since `inverse_memory_index_{a,b}` each only refer to their
1575 // respective fields, they can be safely inverted
1576 let memory_index_a
= invert_mapping(&inverse_memory_index_a
);
1577 let memory_index_b
= invert_mapping(&inverse_memory_index_b
);
1580 FieldsShape
::Arbitrary { offsets: offsets_a, memory_index: memory_index_a }
;
1581 (outer_fields
, offsets_b
, memory_index_b
)
1586 let mut size
= prefix
.size
;
1587 let mut align
= prefix
.align
;
1591 .map(|(index
, variant_fields
)| {
1592 // Only include overlap-eligible fields when we compute our variant layout.
1593 let variant_only_tys
= variant_fields
1595 .filter(|local
| match assignments
[**local
] {
1596 Unassigned
=> bug
!(),
1597 Assigned(v
) if v
== index
=> true,
1598 Assigned(_
) => bug
!("assignment does not match variant"),
1599 Ineligible(_
) => false,
1601 .map(|local
| subst_field(info
.field_tys
[*local
]));
1603 let mut variant
= self.univariant_uninterned(
1606 .map(|ty
| self.layout_of(ty
))
1607 .collect
::<Result
<Vec
<_
>, _
>>()?
,
1608 &ReprOptions
::default(),
1609 StructKind
::Prefixed(prefix_size
, prefix_align
.abi
),
1611 variant
.variants
= Variants
::Single { index }
;
1613 let (offsets
, memory_index
) = match variant
.fields
{
1614 FieldsShape
::Arbitrary { offsets, memory_index }
=> (offsets
, memory_index
),
1618 // Now, stitch the promoted and variant-only fields back together in
1619 // the order they are mentioned by our GeneratorLayout.
1620 // Because we only use some subset (that can differ between variants)
1621 // of the promoted fields, we can't just pick those elements of the
1622 // `promoted_memory_index` (as we'd end up with gaps).
1623 // So instead, we build an "inverse memory_index", as if all of the
1624 // promoted fields were being used, but leave the elements not in the
1625 // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1626 // obtain a valid (bijective) mapping.
1627 const INVALID_FIELD_IDX
: u32 = !0;
1628 let mut combined_inverse_memory_index
=
1629 vec
![INVALID_FIELD_IDX
; promoted_memory_index
.len() + memory_index
.len()];
1630 let mut offsets_and_memory_index
= iter
::zip(offsets
, memory_index
);
1631 let combined_offsets
= variant_fields
1635 let (offset
, memory_index
) = match assignments
[*local
] {
1636 Unassigned
=> bug
!(),
1638 let (offset
, memory_index
) =
1639 offsets_and_memory_index
.next().unwrap();
1640 (offset
, promoted_memory_index
.len() as u32 + memory_index
)
1642 Ineligible(field_idx
) => {
1643 let field_idx
= field_idx
.unwrap() as usize;
1644 (promoted_offsets
[field_idx
], promoted_memory_index
[field_idx
])
1647 combined_inverse_memory_index
[memory_index
as usize] = i
as u32;
1652 // Remove the unused slots and invert the mapping to obtain the
1653 // combined `memory_index` (also see previous comment).
1654 combined_inverse_memory_index
.retain(|&i
| i
!= INVALID_FIELD_IDX
);
1655 let combined_memory_index
= invert_mapping(&combined_inverse_memory_index
);
1657 variant
.fields
= FieldsShape
::Arbitrary
{
1658 offsets
: combined_offsets
,
1659 memory_index
: combined_memory_index
,
1662 size
= size
.max(variant
.size
);
1663 align
= align
.max(variant
.align
);
1666 .collect
::<Result
<IndexVec
<VariantIdx
, _
>, _
>>()?
;
1668 size
= size
.align_to(align
.abi
);
1670 let abi
= if prefix
.abi
.is_uninhabited() || variants
.iter().all(|v
| v
.abi
.is_uninhabited())
1674 Abi
::Aggregate { sized: true }
1677 let layout
= tcx
.intern_layout(Layout
{
1678 variants
: Variants
::Multiple
{
1680 tag_encoding
: TagEncoding
::Direct
,
1681 tag_field
: tag_index
,
1684 fields
: outer_fields
,
1686 largest_niche
: prefix
.largest_niche
,
1690 debug
!("generator layout ({:?}): {:#?}", ty
, layout
);
1694 /// This is invoked by the `layout_raw` query to record the final
1695 /// layout of each type.
1697 fn record_layout_for_printing(&self, layout
: TyAndLayout
<'tcx
>) {
1698 // If we are running with `-Zprint-type-sizes`, maybe record layouts
1699 // for dumping later.
1700 if self.tcx
.sess
.opts
.debugging_opts
.print_type_sizes
{
1701 self.record_layout_for_printing_outlined(layout
)
1705 fn record_layout_for_printing_outlined(&self, layout
: TyAndLayout
<'tcx
>) {
1706 // Ignore layouts that are done with non-empty environments or
1707 // non-monomorphic layouts, as the user only wants to see the stuff
1708 // resulting from the final codegen session.
1709 if layout
.ty
.has_param_types_or_consts() || !self.param_env
.caller_bounds().is_empty() {
1713 // (delay format until we actually need it)
1714 let record
= |kind
, packed
, opt_discr_size
, variants
| {
1715 let type_desc
= format
!("{:?}", layout
.ty
);
1716 self.tcx
.sess
.code_stats
.record_type_size(
1727 let adt_def
= match *layout
.ty
.kind() {
1728 ty
::Adt(ref adt_def
, _
) => {
1729 debug
!("print-type-size t: `{:?}` process adt", layout
.ty
);
1733 ty
::Closure(..) => {
1734 debug
!("print-type-size t: `{:?}` record closure", layout
.ty
);
1735 record(DataTypeKind
::Closure
, false, None
, vec
![]);
1740 debug
!("print-type-size t: `{:?}` skip non-nominal", layout
.ty
);
1745 let adt_kind
= adt_def
.adt_kind();
1746 let adt_packed
= adt_def
.repr
.pack
.is_some();
1748 let build_variant_info
= |n
: Option
<Ident
>, flds
: &[Symbol
], layout
: TyAndLayout
<'tcx
>| {
1749 let mut min_size
= Size
::ZERO
;
1750 let field_info
: Vec
<_
> = flds
1753 .map(|(i
, &name
)| match layout
.field(self, i
) {
1755 bug
!("no layout found for field {}: `{:?}`", name
, err
);
1757 Ok(field_layout
) => {
1758 let offset
= layout
.fields
.offset(i
);
1759 let field_end
= offset
+ field_layout
.size
;
1760 if min_size
< field_end
{
1761 min_size
= field_end
;
1764 name
: name
.to_string(),
1765 offset
: offset
.bytes(),
1766 size
: field_layout
.size
.bytes(),
1767 align
: field_layout
.align
.abi
.bytes(),
1774 name
: n
.map(|n
| n
.to_string()),
1775 kind
: if layout
.is_unsized() { SizeKind::Min }
else { SizeKind::Exact }
,
1776 align
: layout
.align
.abi
.bytes(),
1777 size
: if min_size
.bytes() == 0 { layout.size.bytes() }
else { min_size.bytes() }
,
1782 match layout
.variants
{
1783 Variants
::Single { index }
=> {
1784 debug
!("print-type-size `{:#?}` variant {}", layout
, adt_def
.variants
[index
].ident
);
1785 if !adt_def
.variants
.is_empty() {
1786 let variant_def
= &adt_def
.variants
[index
];
1787 let fields
: Vec
<_
> = variant_def
.fields
.iter().map(|f
| f
.ident
.name
).collect();
1792 vec
![build_variant_info(Some(variant_def
.ident
), &fields
, layout
)],
1795 // (This case arises for *empty* enums; so give it
1797 record(adt_kind
.into(), adt_packed
, None
, vec
![]);
1801 Variants
::Multiple { ref tag, ref tag_encoding, .. }
=> {
1803 "print-type-size `{:#?}` adt general variants def {}",
1805 adt_def
.variants
.len()
1807 let variant_infos
: Vec
<_
> = adt_def
1810 .map(|(i
, variant_def
)| {
1811 let fields
: Vec
<_
> =
1812 variant_def
.fields
.iter().map(|f
| f
.ident
.name
).collect();
1814 Some(variant_def
.ident
),
1816 layout
.for_variant(self, i
),
1823 match tag_encoding
{
1824 TagEncoding
::Direct
=> Some(tag
.value
.size(self)),
1834 /// Type size "skeleton", i.e., the only information determining a type's size.
1835 /// While this is conservative, (aside from constant sizes, only pointers,
1836 /// newtypes thereof and null pointer optimized enums are allowed), it is
1837 /// enough to statically check common use cases of transmute.
1838 #[derive(Copy, Clone, Debug)]
1839 pub enum SizeSkeleton
<'tcx
> {
1840 /// Any statically computable Layout.
1843 /// A potentially-fat pointer.
1845 /// If true, this pointer is never null.
1847 /// The type which determines the unsized metadata, if any,
1848 /// of this pointer. Either a type parameter or a projection
1849 /// depending on one, with regions erased.
1854 impl<'tcx
> SizeSkeleton
<'tcx
> {
1858 param_env
: ty
::ParamEnv
<'tcx
>,
1859 ) -> Result
<SizeSkeleton
<'tcx
>, LayoutError
<'tcx
>> {
1860 debug_assert
!(!ty
.has_infer_types_or_consts());
1862 // First try computing a static layout.
1863 let err
= match tcx
.layout_of(param_env
.and(ty
)) {
1865 return Ok(SizeSkeleton
::Known(layout
.size
));
1871 ty
::Ref(_
, pointee
, _
) | ty
::RawPtr(ty
::TypeAndMut { ty: pointee, .. }
) => {
1872 let non_zero
= !ty
.is_unsafe_ptr();
1873 let tail
= tcx
.struct_tail_erasing_lifetimes(pointee
, param_env
);
1875 ty
::Param(_
) | ty
::Projection(_
) => {
1876 debug_assert
!(tail
.has_param_types_or_consts());
1877 Ok(SizeSkeleton
::Pointer { non_zero, tail: tcx.erase_regions(tail) }
)
1880 "SizeSkeleton::compute({}): layout errored ({}), yet \
1881 tail `{}` is not a type parameter or a projection",
1889 ty
::Adt(def
, substs
) => {
1890 // Only newtypes and enums w/ nullable pointer optimization.
1891 if def
.is_union() || def
.variants
.is_empty() || def
.variants
.len() > 2 {
1895 // Get a zero-sized variant or a pointer newtype.
1896 let zero_or_ptr_variant
= |i
| {
1897 let i
= VariantIdx
::new(i
);
1898 let fields
= def
.variants
[i
]
1901 .map(|field
| SizeSkeleton
::compute(field
.ty(tcx
, substs
), tcx
, param_env
));
1903 for field
in fields
{
1906 SizeSkeleton
::Known(size
) => {
1907 if size
.bytes() > 0 {
1911 SizeSkeleton
::Pointer { .. }
=> {
1922 let v0
= zero_or_ptr_variant(0)?
;
1924 if def
.variants
.len() == 1 {
1925 if let Some(SizeSkeleton
::Pointer { non_zero, tail }
) = v0
{
1926 return Ok(SizeSkeleton
::Pointer
{
1928 || match tcx
.layout_scalar_valid_range(def
.did
) {
1929 (Bound
::Included(start
), Bound
::Unbounded
) => start
> 0,
1930 (Bound
::Included(start
), Bound
::Included(end
)) => {
1931 0 < start
&& start
< end
1942 let v1
= zero_or_ptr_variant(1)?
;
1943 // Nullable pointer enum optimization.
1945 (Some(SizeSkeleton
::Pointer { non_zero: true, tail }
), None
)
1946 | (None
, Some(SizeSkeleton
::Pointer { non_zero: true, tail }
)) => {
1947 Ok(SizeSkeleton
::Pointer { non_zero: false, tail }
)
1953 ty
::Projection(_
) | ty
::Opaque(..) => {
1954 let normalized
= tcx
.normalize_erasing_regions(param_env
, ty
);
1955 if ty
== normalized
{
1958 SizeSkeleton
::compute(normalized
, tcx
, param_env
)
1966 pub fn same_size(self, other
: SizeSkeleton
<'_
>) -> bool
{
1967 match (self, other
) {
1968 (SizeSkeleton
::Known(a
), SizeSkeleton
::Known(b
)) => a
== b
,
1969 (SizeSkeleton
::Pointer { tail: a, .. }
, SizeSkeleton
::Pointer { tail: b, .. }
) => {
1977 pub trait HasTyCtxt
<'tcx
>: HasDataLayout
{
1978 fn tcx(&self) -> TyCtxt
<'tcx
>;
1981 pub trait HasParamEnv
<'tcx
> {
1982 fn param_env(&self) -> ty
::ParamEnv
<'tcx
>;
1985 impl<'tcx
> HasDataLayout
for TyCtxt
<'tcx
> {
1986 fn data_layout(&self) -> &TargetDataLayout
{
1991 impl<'tcx
> HasTyCtxt
<'tcx
> for TyCtxt
<'tcx
> {
1992 fn tcx(&self) -> TyCtxt
<'tcx
> {
1997 impl<'tcx
, C
> HasParamEnv
<'tcx
> for LayoutCx
<'tcx
, C
> {
1998 fn param_env(&self) -> ty
::ParamEnv
<'tcx
> {
2003 impl<'tcx
, T
: HasDataLayout
> HasDataLayout
for LayoutCx
<'tcx
, T
> {
2004 fn data_layout(&self) -> &TargetDataLayout
{
2005 self.tcx
.data_layout()
2009 impl<'tcx
, T
: HasTyCtxt
<'tcx
>> HasTyCtxt
<'tcx
> for LayoutCx
<'tcx
, T
> {
2010 fn tcx(&self) -> TyCtxt
<'tcx
> {
2015 pub type TyAndLayout
<'tcx
> = rustc_target
::abi
::TyAndLayout
<'tcx
, Ty
<'tcx
>>;
2017 impl<'tcx
> LayoutOf
for LayoutCx
<'tcx
, TyCtxt
<'tcx
>> {
2019 type TyAndLayout
= Result
<TyAndLayout
<'tcx
>, LayoutError
<'tcx
>>;
2021 /// Computes the layout of a type. Note that this implicitly
2022 /// executes in "reveal all" mode.
2023 fn layout_of(&self, ty
: Ty
<'tcx
>) -> Self::TyAndLayout
{
2024 let param_env
= self.param_env
.with_reveal_all_normalized(self.tcx
);
2025 let ty
= self.tcx
.normalize_erasing_regions(param_env
, ty
);
2026 let layout
= self.tcx
.layout_raw(param_env
.and(ty
))?
;
2027 let layout
= TyAndLayout { ty, layout }
;
2029 // N.B., this recording is normally disabled; when enabled, it
2030 // can however trigger recursive invocations of `layout_of`.
2031 // Therefore, we execute it *after* the main query has
2032 // completed, to avoid problems around recursive structures
2033 // and the like. (Admittedly, I wasn't able to reproduce a problem
2034 // here, but it seems like the right thing to do. -nmatsakis)
2035 self.record_layout_for_printing(layout
);
2041 impl LayoutOf
for LayoutCx
<'tcx
, ty
::query
::TyCtxtAt
<'tcx
>> {
2043 type TyAndLayout
= Result
<TyAndLayout
<'tcx
>, LayoutError
<'tcx
>>;
2045 /// Computes the layout of a type. Note that this implicitly
2046 /// executes in "reveal all" mode.
2047 fn layout_of(&self, ty
: Ty
<'tcx
>) -> Self::TyAndLayout
{
2048 let param_env
= self.param_env
.with_reveal_all_normalized(*self.tcx
);
2049 let ty
= self.tcx
.normalize_erasing_regions(param_env
, ty
);
2050 let layout
= self.tcx
.layout_raw(param_env
.and(ty
))?
;
2051 let layout
= TyAndLayout { ty, layout }
;
2053 // N.B., this recording is normally disabled; when enabled, it
2054 // can however trigger recursive invocations of `layout_of`.
2055 // Therefore, we execute it *after* the main query has
2056 // completed, to avoid problems around recursive structures
2057 // and the like. (Admittedly, I wasn't able to reproduce a problem
2058 // here, but it seems like the right thing to do. -nmatsakis)
2059 let cx
= LayoutCx { tcx: *self.tcx, param_env: self.param_env }
;
2060 cx
.record_layout_for_printing(layout
);
2066 // Helper (inherent) `layout_of` methods to avoid pushing `LayoutCx` to users.
2068 /// Computes the layout of a type. Note that this implicitly
2069 /// executes in "reveal all" mode.
2073 param_env_and_ty
: ty
::ParamEnvAnd
<'tcx
, Ty
<'tcx
>>,
2074 ) -> Result
<TyAndLayout
<'tcx
>, LayoutError
<'tcx
>> {
2075 let cx
= LayoutCx { tcx: self, param_env: param_env_and_ty.param_env }
;
2076 cx
.layout_of(param_env_and_ty
.value
)
2080 impl ty
::query
::TyCtxtAt
<'tcx
> {
2081 /// Computes the layout of a type. Note that this implicitly
2082 /// executes in "reveal all" mode.
2086 param_env_and_ty
: ty
::ParamEnvAnd
<'tcx
, Ty
<'tcx
>>,
2087 ) -> Result
<TyAndLayout
<'tcx
>, LayoutError
<'tcx
>> {
2088 let cx
= LayoutCx { tcx: self.at(self.span), param_env: param_env_and_ty.param_env }
;
2089 cx
.layout_of(param_env_and_ty
.value
)
2093 impl<'tcx
, C
> TyAndLayoutMethods
<'tcx
, C
> for Ty
<'tcx
>
2095 C
: LayoutOf
<Ty
= Ty
<'tcx
>, TyAndLayout
: MaybeResult
<TyAndLayout
<'tcx
>>>
2097 + HasParamEnv
<'tcx
>,
2100 this
: TyAndLayout
<'tcx
>,
2102 variant_index
: VariantIdx
,
2103 ) -> TyAndLayout
<'tcx
> {
2104 let layout
= match this
.variants
{
2105 Variants
::Single { index }
2106 // If all variants but one are uninhabited, the variant layout is the enum layout.
2107 if index
== variant_index
&&
2108 // Don't confuse variants of uninhabited enums with the enum itself.
2109 // For more details see https://github.com/rust-lang/rust/issues/69763.
2110 this
.fields
!= FieldsShape
::Primitive
=>
2115 Variants
::Single { index }
=> {
2116 // Deny calling for_variant more than once for non-Single enums.
2117 if let Ok(original_layout
) = cx
.layout_of(this
.ty
).to_result() {
2118 assert_eq
!(original_layout
.variants
, Variants
::Single { index }
);
2121 let fields
= match this
.ty
.kind() {
2122 ty
::Adt(def
, _
) if def
.variants
.is_empty() =>
2123 bug
!("for_variant called on zero-variant enum"),
2124 ty
::Adt(def
, _
) => def
.variants
[variant_index
].fields
.len(),
2128 tcx
.intern_layout(Layout
{
2129 variants
: Variants
::Single { index: variant_index }
,
2130 fields
: match NonZeroUsize
::new(fields
) {
2131 Some(fields
) => FieldsShape
::Union(fields
),
2132 None
=> FieldsShape
::Arbitrary { offsets: vec![], memory_index: vec![] }
,
2134 abi
: Abi
::Uninhabited
,
2135 largest_niche
: None
,
2136 align
: tcx
.data_layout
.i8_align
,
2141 Variants
::Multiple { ref variants, .. }
=> &variants
[variant_index
],
2144 assert_eq
!(layout
.variants
, Variants
::Single { index: variant_index }
);
2146 TyAndLayout { ty: this.ty, layout }
2149 fn field(this
: TyAndLayout
<'tcx
>, cx
: &C
, i
: usize) -> C
::TyAndLayout
{
2150 enum TyMaybeWithLayout
<C
: LayoutOf
> {
2152 TyAndLayout(C
::TyAndLayout
),
2155 fn ty_and_layout_kind
<
2156 C
: LayoutOf
<Ty
= Ty
<'tcx
>, TyAndLayout
: MaybeResult
<TyAndLayout
<'tcx
>>>
2158 + HasParamEnv
<'tcx
>,
2160 this
: TyAndLayout
<'tcx
>,
2164 ) -> TyMaybeWithLayout
<C
> {
2166 let tag_layout
= |tag
: &Scalar
| -> C
::TyAndLayout
{
2167 let layout
= Layout
::scalar(cx
, tag
.clone());
2168 MaybeResult
::from(Ok(TyAndLayout
{
2169 layout
: tcx
.intern_layout(layout
),
2170 ty
: tag
.value
.to_ty(tcx
),
2183 | ty
::GeneratorWitness(..)
2185 | ty
::Dynamic(..) => bug
!("TyAndLayout::field_type({:?}): not applicable", this
),
2187 // Potentially-fat pointers.
2188 ty
::Ref(_
, pointee
, _
) | ty
::RawPtr(ty
::TypeAndMut { ty: pointee, .. }
) => {
2189 assert
!(i
< this
.fields
.count());
2191 // Reuse the fat `*T` type as its own thin pointer data field.
2192 // This provides information about, e.g., DST struct pointees
2193 // (which may have no non-DST form), and will work as long
2194 // as the `Abi` or `FieldsShape` is checked by users.
2196 let nil
= tcx
.mk_unit();
2197 let ptr_ty
= if ty
.is_unsafe_ptr() {
2200 tcx
.mk_mut_ref(tcx
.lifetimes
.re_static
, nil
)
2202 return TyMaybeWithLayout
::TyAndLayout(MaybeResult
::from(
2203 cx
.layout_of(ptr_ty
).to_result().map(|mut ptr_layout
| {
2210 match tcx
.struct_tail_erasing_lifetimes(pointee
, cx
.param_env()).kind() {
2211 ty
::Slice(_
) | ty
::Str
=> TyMaybeWithLayout
::Ty(tcx
.types
.usize),
2212 ty
::Dynamic(_
, _
) => {
2213 TyMaybeWithLayout
::Ty(tcx
.mk_imm_ref(
2214 tcx
.lifetimes
.re_static
,
2215 tcx
.mk_array(tcx
.types
.usize, 3),
2217 /* FIXME: use actual fn pointers
2218 Warning: naively computing the number of entries in the
2219 vtable by counting the methods on the trait + methods on
2220 all parent traits does not work, because some methods can
2221 be not object safe and thus excluded from the vtable.
2222 Increase this counter if you tried to implement this but
2223 failed to do it without duplicating a lot of code from
2224 other places in the compiler: 2
2226 tcx.mk_array(tcx.types.usize, 3),
2227 tcx.mk_array(Option<fn()>),
2231 _
=> bug
!("TyAndLayout::field_type({:?}): not applicable", this
),
2235 // Arrays and slices.
2236 ty
::Array(element
, _
) | ty
::Slice(element
) => TyMaybeWithLayout
::Ty(element
),
2237 ty
::Str
=> TyMaybeWithLayout
::Ty(tcx
.types
.u8),
2239 // Tuples, generators and closures.
2240 ty
::Closure(_
, ref substs
) => {
2241 ty_and_layout_kind(this
, cx
, i
, substs
.as_closure().tupled_upvars_ty())
2244 ty
::Generator(def_id
, ref substs
, _
) => match this
.variants
{
2245 Variants
::Single { index }
=> TyMaybeWithLayout
::Ty(
2248 .state_tys(def_id
, tcx
)
2249 .nth(index
.as_usize())
2254 Variants
::Multiple { ref tag, tag_field, .. }
=> {
2256 return TyMaybeWithLayout
::TyAndLayout(tag_layout(tag
));
2258 TyMaybeWithLayout
::Ty(substs
.as_generator().prefix_tys().nth(i
).unwrap())
2262 ty
::Tuple(tys
) => TyMaybeWithLayout
::Ty(tys
[i
].expect_ty()),
2265 ty
::Adt(def
, substs
) => {
2266 match this
.variants
{
2267 Variants
::Single { index }
=> {
2268 TyMaybeWithLayout
::Ty(def
.variants
[index
].fields
[i
].ty(tcx
, substs
))
2271 // Discriminant field for enums (where applicable).
2272 Variants
::Multiple { ref tag, .. }
=> {
2274 return TyMaybeWithLayout
::TyAndLayout(tag_layout(tag
));
2281 | ty
::Placeholder(..)
2285 | ty
::Error(_
) => bug
!("TyAndLayout::field_type: unexpected type `{}`", this
.ty
),
2289 cx
.layout_of(match ty_and_layout_kind(this
, cx
, i
, this
.ty
) {
2290 TyMaybeWithLayout
::Ty(result
) => result
,
2291 TyMaybeWithLayout
::TyAndLayout(result
) => return result
,
2295 fn pointee_info_at(this
: TyAndLayout
<'tcx
>, cx
: &C
, offset
: Size
) -> Option
<PointeeInfo
> {
2296 let addr_space_of_ty
= |ty
: Ty
<'tcx
>| {
2297 if ty
.is_fn() { cx.data_layout().instruction_address_space }
else { AddressSpace::DATA }
2300 let pointee_info
= match *this
.ty
.kind() {
2301 ty
::RawPtr(mt
) if offset
.bytes() == 0 => {
2302 cx
.layout_of(mt
.ty
).to_result().ok().map(|layout
| PointeeInfo
{
2304 align
: layout
.align
.abi
,
2306 address_space
: addr_space_of_ty(mt
.ty
),
2309 ty
::FnPtr(fn_sig
) if offset
.bytes() == 0 => {
2310 cx
.layout_of(cx
.tcx().mk_fn_ptr(fn_sig
)).to_result().ok().map(|layout
| {
2313 align
: layout
.align
.abi
,
2315 address_space
: cx
.data_layout().instruction_address_space
,
2319 ty
::Ref(_
, ty
, mt
) if offset
.bytes() == 0 => {
2320 let address_space
= addr_space_of_ty(ty
);
2322 let kind
= if tcx
.sess
.opts
.optimize
== OptLevel
::No
{
2323 // Use conservative pointer kind if not optimizing. This saves us the
2324 // Freeze/Unpin queries, and can save time in the codegen backend (noalias
2325 // attributes in LLVM have compile-time cost even in unoptimized builds).
2329 hir
::Mutability
::Not
=> {
2330 if ty
.is_freeze(tcx
.at(DUMMY_SP
), cx
.param_env()) {
2336 hir
::Mutability
::Mut
=> {
2337 // References to self-referential structures should not be considered
2338 // noalias, as another pointer to the structure can be obtained, that
2339 // is not based-on the original reference. We consider all !Unpin
2340 // types to be potentially self-referential here.
2341 if ty
.is_unpin(tcx
.at(DUMMY_SP
), cx
.param_env()) {
2342 PointerKind
::UniqueBorrowed
2350 cx
.layout_of(ty
).to_result().ok().map(|layout
| PointeeInfo
{
2352 align
: layout
.align
.abi
,
2359 let mut data_variant
= match this
.variants
{
2360 // Within the discriminant field, only the niche itself is
2361 // always initialized, so we only check for a pointer at its
2364 // If the niche is a pointer, it's either valid (according
2365 // to its type), or null (which the niche field's scalar
2366 // validity range encodes). This allows using
2367 // `dereferenceable_or_null` for e.g., `Option<&T>`, and
2368 // this will continue to work as long as we don't start
2369 // using more niches than just null (e.g., the first page of
2370 // the address space, or unaligned pointers).
2371 Variants
::Multiple
{
2372 tag_encoding
: TagEncoding
::Niche { dataful_variant, .. }
,
2375 } if this
.fields
.offset(tag_field
) == offset
=> {
2376 Some(this
.for_variant(cx
, dataful_variant
))
2381 if let Some(variant
) = data_variant
{
2382 // We're not interested in any unions.
2383 if let FieldsShape
::Union(_
) = variant
.fields
{
2384 data_variant
= None
;
2388 let mut result
= None
;
2390 if let Some(variant
) = data_variant
{
2391 let ptr_end
= offset
+ Pointer
.size(cx
);
2392 for i
in 0..variant
.fields
.count() {
2393 let field_start
= variant
.fields
.offset(i
);
2394 if field_start
<= offset
{
2395 let field
= variant
.field(cx
, i
);
2396 result
= field
.to_result().ok().and_then(|field
| {
2397 if ptr_end
<= field_start
+ field
.size
{
2398 // We found the right field, look inside it.
2400 field
.pointee_info_at(cx
, offset
- field_start
);
2406 if result
.is_some() {
2413 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
2414 if let Some(ref mut pointee
) = result
{
2415 if let ty
::Adt(def
, _
) = this
.ty
.kind() {
2416 if def
.is_box() && offset
.bytes() == 0 {
2417 pointee
.safe
= Some(PointerKind
::UniqueOwned
);
2427 "pointee_info_at (offset={:?}, type kind: {:?}) => {:?}",
2437 impl<'a
, 'tcx
> HashStable
<StableHashingContext
<'a
>> for LayoutError
<'tcx
> {
2438 fn hash_stable(&self, hcx
: &mut StableHashingContext
<'a
>, hasher
: &mut StableHasher
) {
2439 use crate::ty
::layout
::LayoutError
::*;
2440 mem
::discriminant(self).hash_stable(hcx
, hasher
);
2443 Unknown(t
) | SizeOverflow(t
) => t
.hash_stable(hcx
, hasher
),
2448 impl<'tcx
> ty
::Instance
<'tcx
> {
2449 // NOTE(eddyb) this is private to avoid using it from outside of
2450 // `FnAbi::of_instance` - any other uses are either too high-level
2451 // for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
2452 // or should go through `FnAbi` instead, to avoid losing any
2453 // adjustments `FnAbi::of_instance` might be performing.
2454 fn fn_sig_for_fn_abi(&self, tcx
: TyCtxt
<'tcx
>) -> ty
::PolyFnSig
<'tcx
> {
2455 // FIXME(davidtwco,eddyb): A `ParamEnv` should be passed through to this function.
2456 let ty
= self.ty(tcx
, ty
::ParamEnv
::reveal_all());
2459 // HACK(davidtwco,eddyb): This is a workaround for polymorphization considering
2460 // parameters unused if they show up in the signature, but not in the `mir::Body`
2461 // (i.e. due to being inside a projection that got normalized, see
2462 // `src/test/ui/polymorphization/normalized_sig_types.rs`), and codegen not keeping
2463 // track of a polymorphization `ParamEnv` to allow normalizing later.
2464 let mut sig
= match *ty
.kind() {
2465 ty
::FnDef(def_id
, substs
) => tcx
2466 .normalize_erasing_regions(tcx
.param_env(def_id
), tcx
.fn_sig(def_id
))
2467 .subst(tcx
, substs
),
2468 _
=> unreachable
!(),
2471 if let ty
::InstanceDef
::VtableShim(..) = self.def
{
2472 // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
2473 sig
= sig
.map_bound(|mut sig
| {
2474 let mut inputs_and_output
= sig
.inputs_and_output
.to_vec();
2475 inputs_and_output
[0] = tcx
.mk_mut_ptr(inputs_and_output
[0]);
2476 sig
.inputs_and_output
= tcx
.intern_type_list(&inputs_and_output
);
2482 ty
::Closure(def_id
, substs
) => {
2483 let sig
= substs
.as_closure().sig();
2485 let bound_vars
= tcx
.mk_bound_variable_kinds(
2488 .chain(iter
::once(ty
::BoundVariableKind
::Region(ty
::BrEnv
))),
2490 let br
= ty
::BoundRegion
{
2491 var
: ty
::BoundVar
::from_usize(bound_vars
.len() - 1),
2492 kind
: ty
::BoundRegionKind
::BrEnv
,
2494 let env_region
= ty
::ReLateBound(ty
::INNERMOST
, br
);
2495 let env_ty
= tcx
.closure_env_ty(def_id
, substs
, env_region
).unwrap();
2497 let sig
= sig
.skip_binder();
2498 ty
::Binder
::bind_with_vars(
2500 iter
::once(env_ty
).chain(sig
.inputs().iter().cloned()),
2509 ty
::Generator(_
, substs
, _
) => {
2510 let sig
= substs
.as_generator().poly_sig();
2512 let bound_vars
= tcx
.mk_bound_variable_kinds(
2515 .chain(iter
::once(ty
::BoundVariableKind
::Region(ty
::BrEnv
))),
2517 let br
= ty
::BoundRegion
{
2518 var
: ty
::BoundVar
::from_usize(bound_vars
.len() - 1),
2519 kind
: ty
::BoundRegionKind
::BrEnv
,
2521 let env_region
= ty
::ReLateBound(ty
::INNERMOST
, br
);
2522 let env_ty
= tcx
.mk_mut_ref(tcx
.mk_region(env_region
), ty
);
2524 let pin_did
= tcx
.require_lang_item(LangItem
::Pin
, None
);
2525 let pin_adt_ref
= tcx
.adt_def(pin_did
);
2526 let pin_substs
= tcx
.intern_substs(&[env_ty
.into()]);
2527 let env_ty
= tcx
.mk_adt(pin_adt_ref
, pin_substs
);
2529 let sig
= sig
.skip_binder();
2530 let state_did
= tcx
.require_lang_item(LangItem
::GeneratorState
, None
);
2531 let state_adt_ref
= tcx
.adt_def(state_did
);
2532 let state_substs
= tcx
.intern_substs(&[sig
.yield_ty
.into(), sig
.return_ty
.into()]);
2533 let ret_ty
= tcx
.mk_adt(state_adt_ref
, state_substs
);
2534 ty
::Binder
::bind_with_vars(
2536 [env_ty
, sig
.resume_ty
].iter(),
2539 hir
::Unsafety
::Normal
,
2540 rustc_target
::spec
::abi
::Abi
::Rust
,
2545 _
=> bug
!("unexpected type {:?} in Instance::fn_sig", ty
),
2550 pub trait FnAbiExt
<'tcx
, C
>
2552 C
: LayoutOf
<Ty
= Ty
<'tcx
>, TyAndLayout
= TyAndLayout
<'tcx
>>
2556 + HasParamEnv
<'tcx
>,
2558 /// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
2560 /// NB: this doesn't handle virtual calls - those should use `FnAbi::of_instance`
2561 /// instead, where the instance is a `InstanceDef::Virtual`.
2562 fn of_fn_ptr(cx
: &C
, sig
: ty
::PolyFnSig
<'tcx
>, extra_args
: &[Ty
<'tcx
>]) -> Self;
2564 /// Compute a `FnAbi` suitable for declaring/defining an `fn` instance, and for
2565 /// direct calls to an `fn`.
2567 /// NB: that includes virtual calls, which are represented by "direct calls"
2568 /// to a `InstanceDef::Virtual` instance (of `<dyn Trait as Trait>::fn`).
2569 fn of_instance(cx
: &C
, instance
: ty
::Instance
<'tcx
>, extra_args
: &[Ty
<'tcx
>]) -> Self;
2573 sig
: ty
::PolyFnSig
<'tcx
>,
2574 extra_args
: &[Ty
<'tcx
>],
2575 caller_location
: Option
<Ty
<'tcx
>>,
2576 codegen_fn_attr_flags
: CodegenFnAttrFlags
,
2577 make_self_ptr_thin
: bool
,
2579 fn adjust_for_abi(&mut self, cx
: &C
, abi
: SpecAbi
);
2583 panic_strategy
: PanicStrategy
,
2584 codegen_fn_attr_flags
: CodegenFnAttrFlags
,
2588 if panic_strategy
!= PanicStrategy
::Unwind
{
2589 // In panic=abort mode we assume nothing can unwind anywhere, so
2590 // optimize based on this!
2592 } else if codegen_fn_attr_flags
.contains(CodegenFnAttrFlags
::UNWIND
) {
2593 // If a specific #[unwind] attribute is present, use that.
2595 } else if codegen_fn_attr_flags
.contains(CodegenFnAttrFlags
::RUSTC_ALLOCATOR_NOUNWIND
) {
2596 // Special attribute for allocator functions, which can't unwind.
2599 if call_conv
== Conv
::Rust
{
2600 // Any Rust method (or `extern "Rust" fn` or `extern
2601 // "rust-call" fn`) is explicitly allowed to unwind
2602 // (unless it has no-unwind attribute, handled above).
2605 // Anything else is either:
2607 // 1. A foreign item using a non-Rust ABI (like `extern "C" { fn foo(); }`), or
2609 // 2. A Rust item using a non-Rust ABI (like `extern "C" fn foo() { ... }`).
2611 // In both of these cases, we should refer to the ABI to determine whether or not we
2612 // should unwind. See Rust RFC 2945 for more information on this behavior, here:
2613 // https://github.com/rust-lang/rfcs/blob/master/text/2945-c-unwind-abi.md
2616 C { unwind }
| Stdcall { unwind }
| System { unwind }
| Thiscall { unwind }
=> {
2631 | AvrNonBlockingInterrupt
2632 | CCmseNonSecureCall
2636 | Unadjusted
=> false,
2637 // In the `if` above, we checked for functions with the Rust calling convention.
2638 Rust
| RustCall
=> unreachable
!(),
2644 impl<'tcx
, C
> FnAbiExt
<'tcx
, C
> for call
::FnAbi
<'tcx
, Ty
<'tcx
>>
2646 C
: LayoutOf
<Ty
= Ty
<'tcx
>, TyAndLayout
= TyAndLayout
<'tcx
>>
2650 + HasParamEnv
<'tcx
>,
2652 fn of_fn_ptr(cx
: &C
, sig
: ty
::PolyFnSig
<'tcx
>, extra_args
: &[Ty
<'tcx
>]) -> Self {
2653 // Assume that fn pointers may always unwind
2654 let codegen_fn_attr_flags
= CodegenFnAttrFlags
::UNWIND
;
2656 call
::FnAbi
::new_internal(cx
, sig
, extra_args
, None
, codegen_fn_attr_flags
, false)
2659 fn of_instance(cx
: &C
, instance
: ty
::Instance
<'tcx
>, extra_args
: &[Ty
<'tcx
>]) -> Self {
2660 let sig
= instance
.fn_sig_for_fn_abi(cx
.tcx());
2662 let caller_location
= if instance
.def
.requires_caller_location(cx
.tcx()) {
2663 Some(cx
.tcx().caller_location_ty())
2668 let attrs
= cx
.tcx().codegen_fn_attrs(instance
.def_id()).flags
;
2670 call
::FnAbi
::new_internal(
2676 matches
!(instance
.def
, ty
::InstanceDef
::Virtual(..)),
2682 sig
: ty
::PolyFnSig
<'tcx
>,
2683 extra_args
: &[Ty
<'tcx
>],
2684 caller_location
: Option
<Ty
<'tcx
>>,
2685 codegen_fn_attr_flags
: CodegenFnAttrFlags
,
2686 force_thin_self_ptr
: bool
,
2688 debug
!("FnAbi::new_internal({:?}, {:?})", sig
, extra_args
);
2690 let sig
= cx
.tcx().normalize_erasing_late_bound_regions(ty
::ParamEnv
::reveal_all(), sig
);
2692 use rustc_target
::spec
::abi
::Abi
::*;
2693 let conv
= match cx
.tcx().sess
.target
.adjust_abi(sig
.abi
) {
2694 RustIntrinsic
| PlatformIntrinsic
| Rust
| RustCall
=> Conv
::Rust
,
2696 // It's the ABI's job to select this, not ours.
2697 System { .. }
=> bug
!("system abi should be selected elsewhere"),
2698 EfiApi
=> bug
!("eficall abi should be selected elsewhere"),
2700 Stdcall { .. }
=> Conv
::X86Stdcall
,
2701 Fastcall
=> Conv
::X86Fastcall
,
2702 Vectorcall
=> Conv
::X86VectorCall
,
2703 Thiscall { .. }
=> Conv
::X86ThisCall
,
2704 C { .. }
=> Conv
::C
,
2705 Unadjusted
=> Conv
::C
,
2706 Win64
=> Conv
::X86_64Win64
,
2707 SysV64
=> Conv
::X86_64SysV
,
2708 Aapcs
=> Conv
::ArmAapcs
,
2709 CCmseNonSecureCall
=> Conv
::CCmseNonSecureCall
,
2710 PtxKernel
=> Conv
::PtxKernel
,
2711 Msp430Interrupt
=> Conv
::Msp430Intr
,
2712 X86Interrupt
=> Conv
::X86Intr
,
2713 AmdGpuKernel
=> Conv
::AmdGpuKernel
,
2714 AvrInterrupt
=> Conv
::AvrInterrupt
,
2715 AvrNonBlockingInterrupt
=> Conv
::AvrNonBlockingInterrupt
,
2718 // These API constants ought to be more specific...
2722 let mut inputs
= sig
.inputs();
2723 let extra_args
= if sig
.abi
== RustCall
{
2724 assert
!(!sig
.c_variadic
&& extra_args
.is_empty());
2726 if let Some(input
) = sig
.inputs().last() {
2727 if let ty
::Tuple(tupled_arguments
) = input
.kind() {
2728 inputs
= &sig
.inputs()[0..sig
.inputs().len() - 1];
2729 tupled_arguments
.iter().map(|k
| k
.expect_ty()).collect()
2732 "argument to function with \"rust-call\" ABI \
2738 "argument to function with \"rust-call\" ABI \
2743 assert
!(sig
.c_variadic
|| extra_args
.is_empty());
2747 let target
= &cx
.tcx().sess
.target
;
2748 let target_env_gnu_like
= matches
!(&target
.env
[..], "gnu" | "musl");
2749 let win_x64_gnu
= target
.os
== "windows" && target
.arch
== "x86_64" && target
.env
== "gnu";
2750 let linux_s390x_gnu_like
=
2751 target
.os
== "linux" && target
.arch
== "s390x" && target_env_gnu_like
;
2752 let linux_sparc64_gnu_like
=
2753 target
.os
== "linux" && target
.arch
== "sparc64" && target_env_gnu_like
;
2754 let linux_powerpc_gnu_like
=
2755 target
.os
== "linux" && target
.arch
== "powerpc" && target_env_gnu_like
;
2756 let rust_abi
= matches
!(sig
.abi
, RustIntrinsic
| PlatformIntrinsic
| Rust
| RustCall
);
2758 // Handle safe Rust thin and fat pointers.
2759 let adjust_for_rust_scalar
= |attrs
: &mut ArgAttributes
,
2761 layout
: TyAndLayout
<'tcx
>,
2764 // Booleans are always an i1 that needs to be zero-extended.
2765 if scalar
.is_bool() {
2766 attrs
.ext(ArgExtension
::Zext
);
2770 // Only pointer types handled below.
2771 if scalar
.value
!= Pointer
{
2775 if scalar
.valid_range
.start() < scalar
.valid_range
.end() {
2776 if *scalar
.valid_range
.start() > 0 {
2777 attrs
.set(ArgAttribute
::NonNull
);
2781 if let Some(pointee
) = layout
.pointee_info_at(cx
, offset
) {
2782 if let Some(kind
) = pointee
.safe
{
2783 attrs
.pointee_align
= Some(pointee
.align
);
2785 // `Box` (`UniqueBorrowed`) are not necessarily dereferenceable
2786 // for the entire duration of the function as they can be deallocated
2787 // at any time. Set their valid size to 0.
2788 attrs
.pointee_size
= match kind
{
2789 PointerKind
::UniqueOwned
=> Size
::ZERO
,
2793 // `Box` pointer parameters never alias because ownership is transferred
2794 // `&mut` pointer parameters never alias other parameters,
2795 // or mutable global data
2797 // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
2798 // and can be marked as both `readonly` and `noalias`, as
2799 // LLVM's definition of `noalias` is based solely on memory
2800 // dependencies rather than pointer equality
2802 // Due to miscompiles in LLVM < 12, we apply a separate NoAliasMutRef attribute
2803 // for UniqueBorrowed arguments, so that the codegen backend can decide
2804 // whether or not to actually emit the attribute.
2805 let no_alias
= match kind
{
2806 PointerKind
::Shared
| PointerKind
::UniqueBorrowed
=> false,
2807 PointerKind
::UniqueOwned
=> true,
2808 PointerKind
::Frozen
=> !is_return
,
2811 attrs
.set(ArgAttribute
::NoAlias
);
2814 if kind
== PointerKind
::Frozen
&& !is_return
{
2815 attrs
.set(ArgAttribute
::ReadOnly
);
2818 if kind
== PointerKind
::UniqueBorrowed
&& !is_return
{
2819 attrs
.set(ArgAttribute
::NoAliasMutRef
);
2825 let arg_of
= |ty
: Ty
<'tcx
>, arg_idx
: Option
<usize>| {
2826 let is_return
= arg_idx
.is_none();
2828 let layout
= cx
.layout_of(ty
);
2829 let layout
= if force_thin_self_ptr
&& arg_idx
== Some(0) {
2830 // Don't pass the vtable, it's not an argument of the virtual fn.
2831 // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
2832 // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
2833 make_thin_self_ptr(cx
, layout
)
2838 let mut arg
= ArgAbi
::new(cx
, layout
, |layout
, scalar
, offset
| {
2839 let mut attrs
= ArgAttributes
::new();
2840 adjust_for_rust_scalar(&mut attrs
, scalar
, *layout
, offset
, is_return
);
2844 if arg
.layout
.is_zst() {
2845 // For some forsaken reason, x86_64-pc-windows-gnu
2846 // doesn't ignore zero-sized struct arguments.
2847 // The same is true for {s390x,sparc64,powerpc}-unknown-linux-{gnu,musl}.
2851 && !linux_s390x_gnu_like
2852 && !linux_sparc64_gnu_like
2853 && !linux_powerpc_gnu_like
)
2855 arg
.mode
= PassMode
::Ignore
;
2862 let mut fn_abi
= FnAbi
{
2863 ret
: arg_of(sig
.output(), None
),
2868 .chain(caller_location
)
2870 .map(|(i
, ty
)| arg_of(ty
, Some(i
)))
2872 c_variadic
: sig
.c_variadic
,
2873 fixed_count
: inputs
.len(),
2875 can_unwind
: fn_can_unwind(
2876 cx
.tcx().sess
.panic_strategy(),
2877 codegen_fn_attr_flags
,
2882 fn_abi
.adjust_for_abi(cx
, sig
.abi
);
2883 debug
!("FnAbi::new_internal = {:?}", fn_abi
);
2887 fn adjust_for_abi(&mut self, cx
: &C
, abi
: SpecAbi
) {
2888 if abi
== SpecAbi
::Unadjusted
{
2892 if abi
== SpecAbi
::Rust
2893 || abi
== SpecAbi
::RustCall
2894 || abi
== SpecAbi
::RustIntrinsic
2895 || abi
== SpecAbi
::PlatformIntrinsic
2897 let fixup
= |arg
: &mut ArgAbi
<'tcx
, Ty
<'tcx
>>| {
2898 if arg
.is_ignore() {
2902 match arg
.layout
.abi
{
2903 Abi
::Aggregate { .. }
=> {}
2905 // This is a fun case! The gist of what this is doing is
2906 // that we want callers and callees to always agree on the
2907 // ABI of how they pass SIMD arguments. If we were to *not*
2908 // make these arguments indirect then they'd be immediates
2909 // in LLVM, which means that they'd used whatever the
2910 // appropriate ABI is for the callee and the caller. That
2911 // means, for example, if the caller doesn't have AVX
2912 // enabled but the callee does, then passing an AVX argument
2913 // across this boundary would cause corrupt data to show up.
2915 // This problem is fixed by unconditionally passing SIMD
2916 // arguments through memory between callers and callees
2917 // which should get them all to agree on ABI regardless of
2918 // target feature sets. Some more information about this
2919 // issue can be found in #44367.
2921 // Note that the platform intrinsic ABI is exempt here as
2922 // that's how we connect up to LLVM and it's unstable
2923 // anyway, we control all calls to it in libstd.
2925 if abi
!= SpecAbi
::PlatformIntrinsic
2926 && cx
.tcx().sess
.target
.simd_types_indirect
=>
2928 arg
.make_indirect();
2935 // Pass and return structures up to 2 pointers in size by value, matching `ScalarPair`.
2936 // LLVM will usually pass these in 2 registers, which is more efficient than by-ref.
2937 let max_by_val_size
= Pointer
.size(cx
) * 2;
2938 let size
= arg
.layout
.size
;
2940 if arg
.layout
.is_unsized() || size
> max_by_val_size
{
2941 arg
.make_indirect();
2943 // We want to pass small aggregates as immediates, but using
2944 // a LLVM aggregate type for this leads to bad optimizations,
2945 // so we pick an appropriately sized integer type instead.
2946 arg
.cast_to(Reg { kind: RegKind::Integer, size }
);
2949 fixup(&mut self.ret
);
2950 for arg
in &mut self.args
{
2956 if let Err(msg
) = self.adjust_for_cabi(cx
, abi
) {
2957 cx
.tcx().sess
.fatal(&msg
);
2962 fn make_thin_self_ptr
<'tcx
, C
>(cx
: &C
, mut layout
: TyAndLayout
<'tcx
>) -> TyAndLayout
<'tcx
>
2964 C
: LayoutOf
<Ty
= Ty
<'tcx
>, TyAndLayout
= TyAndLayout
<'tcx
>>
2966 + HasParamEnv
<'tcx
>,
2968 let fat_pointer_ty
= if layout
.is_unsized() {
2969 // unsized `self` is passed as a pointer to `self`
2970 // FIXME (mikeyhew) change this to use &own if it is ever added to the language
2971 cx
.tcx().mk_mut_ptr(layout
.ty
)
2974 Abi
::ScalarPair(..) => (),
2975 _
=> bug
!("receiver type has unsupported layout: {:?}", layout
),
2978 // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
2979 // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
2980 // elsewhere in the compiler as a method on a `dyn Trait`.
2981 // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
2982 // get a built-in pointer type
2983 let mut fat_pointer_layout
= layout
;
2984 'descend_newtypes
: while !fat_pointer_layout
.ty
.is_unsafe_ptr()
2985 && !fat_pointer_layout
.ty
.is_region_ptr()
2987 for i
in 0..fat_pointer_layout
.fields
.count() {
2988 let field_layout
= fat_pointer_layout
.field(cx
, i
);
2990 if !field_layout
.is_zst() {
2991 fat_pointer_layout
= field_layout
;
2992 continue 'descend_newtypes
;
2996 bug
!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout
);
2999 fat_pointer_layout
.ty
3002 // we now have a type like `*mut RcBox<dyn Trait>`
3003 // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
3004 // this is understood as a special case elsewhere in the compiler
3005 let unit_pointer_ty
= cx
.tcx().mk_mut_ptr(cx
.tcx().mk_unit());
3006 layout
= cx
.layout_of(unit_pointer_ty
);
3007 layout
.ty
= fat_pointer_ty
;