2 use rustc_index
::bit_set
::BitSet
;
3 use rustc_index
::vec
::{Idx, IndexVec}
;
4 use rustc_middle
::mir
::{GeneratorLayout, GeneratorSavedLocal}
;
5 use rustc_middle
::ty
::layout
::{
6 IntegerExt
, LayoutCx
, LayoutError
, LayoutOf
, TyAndLayout
, MAX_SIMD_LANES
,
8 use rustc_middle
::ty
::{
9 self, subst
::SubstsRef
, EarlyBinder
, ReprOptions
, Ty
, TyCtxt
, TypeVisitable
,
11 use rustc_session
::{DataTypeKind, FieldInfo, SizeKind, VariantInfo}
;
12 use rustc_span
::symbol
::Symbol
;
13 use rustc_span
::DUMMY_SP
;
14 use rustc_target
::abi
::*;
16 use std
::cmp
::{self, Ordering}
;
18 use std
::num
::NonZeroUsize
;
21 use rand
::{seq::SliceRandom, SeedableRng}
;
22 use rand_xoshiro
::Xoshiro128StarStar
;
24 use crate::layout_sanity_check
::sanity_check_layout
;
26 pub fn provide(providers
: &mut ty
::query
::Providers
) {
27 *providers
= ty
::query
::Providers { layout_of, ..*providers }
;
30 #[instrument(skip(tcx, query), level = "debug")]
33 query
: ty
::ParamEnvAnd
<'tcx
, Ty
<'tcx
>>,
34 ) -> Result
<TyAndLayout
<'tcx
>, LayoutError
<'tcx
>> {
35 let (param_env
, ty
) = query
.into_parts();
38 let param_env
= param_env
.with_reveal_all_normalized(tcx
);
39 let unnormalized_ty
= ty
;
41 // FIXME: We might want to have two different versions of `layout_of`:
42 // One that can be called after typecheck has completed and can use
43 // `normalize_erasing_regions` here and another one that can be called
44 // before typecheck has completed and uses `try_normalize_erasing_regions`.
45 let ty
= match tcx
.try_normalize_erasing_regions(param_env
, ty
) {
47 Err(normalization_error
) => {
48 return Err(LayoutError
::NormalizationFailure(ty
, normalization_error
));
52 if ty
!= unnormalized_ty
{
53 // Ensure this layout is also cached for the normalized type.
54 return tcx
.layout_of(param_env
.and(ty
));
57 let cx
= LayoutCx { tcx, param_env }
;
59 let layout
= layout_of_uncached(&cx
, ty
)?
;
60 let layout
= TyAndLayout { ty, layout }
;
62 record_layout_for_printing(&cx
, layout
);
64 sanity_check_layout(&cx
, &layout
);
69 #[derive(Copy, Clone, Debug)]
71 /// A tuple, closure, or univariant which cannot be coerced to unsized.
73 /// A univariant, the last field of which may be coerced to unsized.
75 /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
76 Prefixed(Size
, Align
),
79 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
80 // This is used to go between `memory_index` (source field order to memory order)
81 // and `inverse_memory_index` (memory order to source field order).
82 // See also `FieldsShape::Arbitrary::memory_index` for more details.
83 // FIXME(eddyb) build a better abstraction for permutations, if possible.
84 fn invert_mapping(map
: &[u32]) -> Vec
<u32> {
85 let mut inverse
= vec
![0; map
.len()];
86 for i
in 0..map
.len() {
87 inverse
[map
[i
] as usize] = i
as u32;
92 fn scalar_pair
<'tcx
>(cx
: &LayoutCx
<'tcx
, TyCtxt
<'tcx
>>, a
: Scalar
, b
: Scalar
) -> LayoutS
<'tcx
> {
93 let dl
= cx
.data_layout();
94 let b_align
= b
.align(dl
);
95 let align
= a
.align(dl
).max(b_align
).max(dl
.aggregate_align
);
96 let b_offset
= a
.size(dl
).align_to(b_align
.abi
);
97 let size
= (b_offset
+ b
.size(dl
)).align_to(align
.abi
);
99 // HACK(nox): We iter on `b` and then `a` because `max_by_key`
100 // returns the last maximum.
101 let largest_niche
= Niche
::from_scalar(dl
, b_offset
, b
)
103 .chain(Niche
::from_scalar(dl
, Size
::ZERO
, a
))
104 .max_by_key(|niche
| niche
.available(dl
));
107 variants
: Variants
::Single { index: VariantIdx::new(0) }
,
108 fields
: FieldsShape
::Arbitrary
{
109 offsets
: vec
![Size
::ZERO
, b_offset
],
110 memory_index
: vec
![0, 1],
112 abi
: Abi
::ScalarPair(a
, b
),
119 fn univariant_uninterned
<'tcx
>(
120 cx
: &LayoutCx
<'tcx
, TyCtxt
<'tcx
>>,
122 fields
: &[TyAndLayout
<'_
>],
125 ) -> Result
<LayoutS
<'tcx
>, LayoutError
<'tcx
>> {
126 let dl
= cx
.data_layout();
127 let pack
= repr
.pack
;
128 if pack
.is_some() && repr
.align
.is_some() {
129 cx
.tcx
.sess
.delay_span_bug(DUMMY_SP
, "struct cannot be packed and aligned");
130 return Err(LayoutError
::Unknown(ty
));
133 let mut align
= if pack
.is_some() { dl.i8_align }
else { dl.aggregate_align }
;
135 let mut inverse_memory_index
: Vec
<u32> = (0..fields
.len() as u32).collect();
137 let optimize
= !repr
.inhibit_struct_field_reordering_opt();
139 let end
= if let StructKind
::MaybeUnsized
= kind { fields.len() - 1 }
else { fields.len() }
;
140 let optimizing
= &mut inverse_memory_index
[..end
];
141 let field_align
= |f
: &TyAndLayout
<'_
>| {
142 if let Some(pack
) = pack { f.align.abi.min(pack) }
else { f.align.abi }
145 // If `-Z randomize-layout` was enabled for the type definition we can shuffle
146 // the field ordering to try and catch some code making assumptions about layouts
147 // we don't guarantee
148 if repr
.can_randomize_type_layout() {
149 // `ReprOptions.layout_seed` is a deterministic seed that we can use to
150 // randomize field ordering with
151 let mut rng
= Xoshiro128StarStar
::seed_from_u64(repr
.field_shuffle_seed
);
153 // Shuffle the ordering of the fields
154 optimizing
.shuffle(&mut rng
);
156 // Otherwise we just leave things alone and actually optimize the type's fields
159 StructKind
::AlwaysSized
| StructKind
::MaybeUnsized
=> {
160 optimizing
.sort_by_key(|&x
| {
161 // Place ZSTs first to avoid "interesting offsets",
162 // especially with only one or two non-ZST fields.
163 let f
= &fields
[x
as usize];
164 (!f
.is_zst(), cmp
::Reverse(field_align(f
)))
168 StructKind
::Prefixed(..) => {
169 // Sort in ascending alignment so that the layout stays optimal
170 // regardless of the prefix
171 optimizing
.sort_by_key(|&x
| field_align(&fields
[x
as usize]));
175 // FIXME(Kixiron): We can always shuffle fields within a given alignment class
176 // regardless of the status of `-Z randomize-layout`
180 // inverse_memory_index holds field indices by increasing memory offset.
181 // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
182 // We now write field offsets to the corresponding offset slot;
183 // field 5 with offset 0 puts 0 in offsets[5].
184 // At the bottom of this function, we invert `inverse_memory_index` to
185 // produce `memory_index` (see `invert_mapping`).
187 let mut sized
= true;
188 let mut offsets
= vec
![Size
::ZERO
; fields
.len()];
189 let mut offset
= Size
::ZERO
;
190 let mut largest_niche
= None
;
191 let mut largest_niche_available
= 0;
193 if let StructKind
::Prefixed(prefix_size
, prefix_align
) = kind
{
195 if let Some(pack
) = pack { prefix_align.min(pack) }
else { prefix_align }
;
196 align
= align
.max(AbiAndPrefAlign
::new(prefix_align
));
197 offset
= prefix_size
.align_to(prefix_align
);
200 for &i
in &inverse_memory_index
{
201 let field
= fields
[i
as usize];
203 cx
.tcx
.sess
.delay_span_bug(
206 "univariant: field #{} of `{}` comes after unsized field",
213 if field
.is_unsized() {
217 // Invariant: offset < dl.obj_size_bound() <= 1<<61
218 let field_align
= if let Some(pack
) = pack
{
219 field
.align
.min(AbiAndPrefAlign
::new(pack
))
223 offset
= offset
.align_to(field_align
.abi
);
224 align
= align
.max(field_align
);
226 debug
!("univariant offset: {:?} field: {:#?}", offset
, field
);
227 offsets
[i
as usize] = offset
;
229 if let Some(mut niche
) = field
.largest_niche
{
230 let available
= niche
.available(dl
);
231 if available
> largest_niche_available
{
232 largest_niche_available
= available
;
233 niche
.offset
+= offset
;
234 largest_niche
= Some(niche
);
238 offset
= offset
.checked_add(field
.size
, dl
).ok_or(LayoutError
::SizeOverflow(ty
))?
;
241 if let Some(repr_align
) = repr
.align
{
242 align
= align
.max(AbiAndPrefAlign
::new(repr_align
));
245 debug
!("univariant min_size: {:?}", offset
);
246 let min_size
= offset
;
248 // As stated above, inverse_memory_index holds field indices by increasing offset.
249 // This makes it an already-sorted view of the offsets vec.
250 // To invert it, consider:
251 // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
252 // Field 5 would be the first element, so memory_index is i:
253 // Note: if we didn't optimize, it's already right.
256 if optimize { invert_mapping(&inverse_memory_index) }
else { inverse_memory_index }
;
258 let size
= min_size
.align_to(align
.abi
);
259 let mut abi
= Abi
::Aggregate { sized }
;
261 // Unpack newtype ABIs and find scalar pairs.
262 if sized
&& size
.bytes() > 0 {
263 // All other fields must be ZSTs.
264 let mut non_zst_fields
= fields
.iter().enumerate().filter(|&(_
, f
)| !f
.is_zst());
266 match (non_zst_fields
.next(), non_zst_fields
.next(), non_zst_fields
.next()) {
267 // We have exactly one non-ZST field.
268 (Some((i
, field
)), None
, None
) => {
269 // Field fills the struct and it has a scalar or scalar pair ABI.
270 if offsets
[i
].bytes() == 0 && align
.abi
== field
.align
.abi
&& size
== field
.size
{
272 // For plain scalars, or vectors of them, we can't unpack
273 // newtypes for `#[repr(C)]`, as that affects C ABIs.
274 Abi
::Scalar(_
) | Abi
::Vector { .. }
if optimize
=> {
277 // But scalar pairs are Rust-specific and get
278 // treated as aggregates by C ABIs anyway.
279 Abi
::ScalarPair(..) => {
287 // Two non-ZST fields, and they're both scalars.
288 (Some((i
, a
)), Some((j
, b
)), None
) => {
289 match (a
.abi
, b
.abi
) {
290 (Abi
::Scalar(a
), Abi
::Scalar(b
)) => {
291 // Order by the memory placement, not source order.
292 let ((i
, a
), (j
, b
)) = if offsets
[i
] < offsets
[j
] {
297 let pair
= scalar_pair(cx
, a
, b
);
298 let pair_offsets
= match pair
.fields
{
299 FieldsShape
::Arbitrary { ref offsets, ref memory_index }
=> {
300 assert_eq
!(memory_index
, &[0, 1]);
305 if offsets
[i
] == pair_offsets
[0]
306 && offsets
[j
] == pair_offsets
[1]
307 && align
== pair
.align
310 // We can use `ScalarPair` only when it matches our
311 // already computed layout (including `#[repr(C)]`).
323 if fields
.iter().any(|f
| f
.abi
.is_uninhabited()) {
324 abi
= Abi
::Uninhabited
;
328 variants
: Variants
::Single { index: VariantIdx::new(0) }
,
329 fields
: FieldsShape
::Arbitrary { offsets, memory_index }
,
337 fn layout_of_uncached
<'tcx
>(
338 cx
: &LayoutCx
<'tcx
, TyCtxt
<'tcx
>>,
340 ) -> Result
<Layout
<'tcx
>, LayoutError
<'tcx
>> {
342 let param_env
= cx
.param_env
;
343 let dl
= cx
.data_layout();
344 let scalar_unit
= |value
: Primitive
| {
345 let size
= value
.size(dl
);
346 assert
!(size
.bits() <= 128);
347 Scalar
::Initialized { value, valid_range: WrappingRange::full(size) }
349 let scalar
= |value
: Primitive
| tcx
.intern_layout(LayoutS
::scalar(cx
, scalar_unit(value
)));
351 let univariant
= |fields
: &[TyAndLayout
<'_
>], repr
: &ReprOptions
, kind
| {
352 Ok(tcx
.intern_layout(univariant_uninterned(cx
, ty
, fields
, repr
, kind
)?
))
354 debug_assert
!(!ty
.has_non_region_infer());
356 Ok(match *ty
.kind() {
358 ty
::Bool
=> tcx
.intern_layout(LayoutS
::scalar(
360 Scalar
::Initialized
{
361 value
: Int(I8
, false),
362 valid_range
: WrappingRange { start: 0, end: 1 }
,
365 ty
::Char
=> tcx
.intern_layout(LayoutS
::scalar(
367 Scalar
::Initialized
{
368 value
: Int(I32
, false),
369 valid_range
: WrappingRange { start: 0, end: 0x10FFFF }
,
372 ty
::Int(ity
) => scalar(Int(Integer
::from_int_ty(dl
, ity
), true)),
373 ty
::Uint(ity
) => scalar(Int(Integer
::from_uint_ty(dl
, ity
), false)),
374 ty
::Float(fty
) => scalar(match fty
{
375 ty
::FloatTy
::F32
=> F32
,
376 ty
::FloatTy
::F64
=> F64
,
379 let mut ptr
= scalar_unit(Pointer
);
380 ptr
.valid_range_mut().start
= 1;
381 tcx
.intern_layout(LayoutS
::scalar(cx
, ptr
))
385 ty
::Never
=> tcx
.intern_layout(LayoutS
{
386 variants
: Variants
::Single { index: VariantIdx::new(0) }
,
387 fields
: FieldsShape
::Primitive
,
388 abi
: Abi
::Uninhabited
,
394 // Potentially-wide pointers.
395 ty
::Ref(_
, pointee
, _
) | ty
::RawPtr(ty
::TypeAndMut { ty: pointee, .. }
) => {
396 let mut data_ptr
= scalar_unit(Pointer
);
397 if !ty
.is_unsafe_ptr() {
398 data_ptr
.valid_range_mut().start
= 1;
401 let pointee
= tcx
.normalize_erasing_regions(param_env
, pointee
);
402 if pointee
.is_sized(tcx
, param_env
) {
403 return Ok(tcx
.intern_layout(LayoutS
::scalar(cx
, data_ptr
)));
406 let unsized_part
= tcx
.struct_tail_erasing_lifetimes(pointee
, param_env
);
407 let metadata
= match unsized_part
.kind() {
409 return Ok(tcx
.intern_layout(LayoutS
::scalar(cx
, data_ptr
)));
411 ty
::Slice(_
) | ty
::Str
=> scalar_unit(Int(dl
.ptr_sized_integer(), false)),
413 let mut vtable
= scalar_unit(Pointer
);
414 vtable
.valid_range_mut().start
= 1;
417 _
=> return Err(LayoutError
::Unknown(unsized_part
)),
420 // Effectively a (ptr, meta) tuple.
421 tcx
.intern_layout(scalar_pair(cx
, data_ptr
, metadata
))
424 ty
::Dynamic(_
, _
, ty
::DynStar
) => {
425 let mut data
= scalar_unit(Int(dl
.ptr_sized_integer(), false));
426 data
.valid_range_mut().start
= 0;
427 let mut vtable
= scalar_unit(Pointer
);
428 vtable
.valid_range_mut().start
= 1;
429 tcx
.intern_layout(scalar_pair(cx
, data
, vtable
))
432 // Arrays and slices.
433 ty
::Array(element
, mut count
) => {
434 if count
.has_projections() {
435 count
= tcx
.normalize_erasing_regions(param_env
, count
);
436 if count
.has_projections() {
437 return Err(LayoutError
::Unknown(ty
));
441 let count
= count
.try_eval_usize(tcx
, param_env
).ok_or(LayoutError
::Unknown(ty
))?
;
442 let element
= cx
.layout_of(element
)?
;
443 let size
= element
.size
.checked_mul(count
, dl
).ok_or(LayoutError
::SizeOverflow(ty
))?
;
445 let abi
= if count
!= 0 && tcx
.conservative_is_privately_uninhabited(param_env
.and(ty
))
449 Abi
::Aggregate { sized: true }
452 let largest_niche
= if count
!= 0 { element.largest_niche }
else { None }
;
454 tcx
.intern_layout(LayoutS
{
455 variants
: Variants
::Single { index: VariantIdx::new(0) }
,
456 fields
: FieldsShape
::Array { stride: element.size, count }
,
459 align
: element
.align
,
463 ty
::Slice(element
) => {
464 let element
= cx
.layout_of(element
)?
;
465 tcx
.intern_layout(LayoutS
{
466 variants
: Variants
::Single { index: VariantIdx::new(0) }
,
467 fields
: FieldsShape
::Array { stride: element.size, count: 0 }
,
468 abi
: Abi
::Aggregate { sized: false }
,
470 align
: element
.align
,
474 ty
::Str
=> tcx
.intern_layout(LayoutS
{
475 variants
: Variants
::Single { index: VariantIdx::new(0) }
,
476 fields
: FieldsShape
::Array { stride: Size::from_bytes(1), count: 0 }
,
477 abi
: Abi
::Aggregate { sized: false }
,
484 ty
::FnDef(..) => univariant(&[], &ReprOptions
::default(), StructKind
::AlwaysSized
)?
,
485 ty
::Dynamic(_
, _
, ty
::Dyn
) | ty
::Foreign(..) => {
486 let mut unit
= univariant_uninterned(
490 &ReprOptions
::default(),
491 StructKind
::AlwaysSized
,
494 Abi
::Aggregate { ref mut sized }
=> *sized
= false,
497 tcx
.intern_layout(unit
)
500 ty
::Generator(def_id
, substs
, _
) => generator_layout(cx
, ty
, def_id
, substs
)?
,
502 ty
::Closure(_
, ref substs
) => {
503 let tys
= substs
.as_closure().upvar_tys();
505 &tys
.map(|ty
| cx
.layout_of(ty
)).collect
::<Result
<Vec
<_
>, _
>>()?
,
506 &ReprOptions
::default(),
507 StructKind
::AlwaysSized
,
513 if tys
.len() == 0 { StructKind::AlwaysSized }
else { StructKind::MaybeUnsized }
;
516 &tys
.iter().map(|k
| cx
.layout_of(k
)).collect
::<Result
<Vec
<_
>, _
>>()?
,
517 &ReprOptions
::default(),
522 // SIMD vector types.
523 ty
::Adt(def
, substs
) if def
.repr().simd() => {
524 if !def
.is_struct() {
525 // Should have yielded E0517 by now.
526 tcx
.sess
.delay_span_bug(
528 "#[repr(simd)] was applied to an ADT that is not a struct",
530 return Err(LayoutError
::Unknown(ty
));
533 // Supported SIMD vectors are homogeneous ADTs with at least one field:
535 // * #[repr(simd)] struct S(T, T, T, T);
536 // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T }
537 // * #[repr(simd)] struct S([T; 4])
539 // where T is a primitive scalar (integer/float/pointer).
541 // SIMD vectors with zero fields are not supported.
542 // (should be caught by typeck)
543 if def
.non_enum_variant().fields
.is_empty() {
544 tcx
.sess
.fatal(&format
!("monomorphising SIMD type `{}` of zero length", ty
));
547 // Type of the first ADT field:
548 let f0_ty
= def
.non_enum_variant().fields
[0].ty(tcx
, substs
);
550 // Heterogeneous SIMD vectors are not supported:
551 // (should be caught by typeck)
552 for fi
in &def
.non_enum_variant().fields
{
553 if fi
.ty(tcx
, substs
) != f0_ty
{
554 tcx
.sess
.fatal(&format
!("monomorphising heterogeneous SIMD type `{}`", ty
));
558 // The element type and number of elements of the SIMD vector
559 // are obtained from:
561 // * the element type and length of the single array field, if
562 // the first field is of array type, or
564 // * the homogeneous field type and the number of fields.
565 let (e_ty
, e_len
, is_array
) = if let ty
::Array(e_ty
, _
) = f0_ty
.kind() {
566 // First ADT field is an array:
568 // SIMD vectors with multiple array fields are not supported:
569 // (should be caught by typeck)
570 if def
.non_enum_variant().fields
.len() != 1 {
571 tcx
.sess
.fatal(&format
!(
572 "monomorphising SIMD type `{}` with more than one array field",
577 // Extract the number of elements from the layout of the array field:
578 let FieldsShape
::Array { count, .. }
= cx
.layout_of(f0_ty
)?
.layout
.fields() else {
579 return Err(LayoutError
::Unknown(ty
));
582 (*e_ty
, *count
, true)
584 // First ADT field is not an array:
585 (f0_ty
, def
.non_enum_variant().fields
.len() as _
, false)
588 // SIMD vectors of zero length are not supported.
589 // Additionally, lengths are capped at 2^16 as a fixed maximum backends must
592 // Can't be caught in typeck if the array length is generic.
594 tcx
.sess
.fatal(&format
!("monomorphising SIMD type `{}` of zero length", ty
));
595 } else if e_len
> MAX_SIMD_LANES
{
596 tcx
.sess
.fatal(&format
!(
597 "monomorphising SIMD type `{}` of length greater than {}",
602 // Compute the ABI of the element type:
603 let e_ly
= cx
.layout_of(e_ty
)?
;
604 let Abi
::Scalar(e_abi
) = e_ly
.abi
else {
605 // This error isn't caught in typeck, e.g., if
606 // the element type of the vector is generic.
607 tcx
.sess
.fatal(&format
!(
608 "monomorphising SIMD type `{}` with a non-primitive-scalar \
609 (integer/float/pointer) element type `{}`",
614 // Compute the size and alignment of the vector:
615 let size
= e_ly
.size
.checked_mul(e_len
, dl
).ok_or(LayoutError
::SizeOverflow(ty
))?
;
616 let align
= dl
.vector_align(size
);
617 let size
= size
.align_to(align
.abi
);
619 // Compute the placement of the vector fields:
620 let fields
= if is_array
{
621 FieldsShape
::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }
623 FieldsShape
::Array { stride: e_ly.size, count: e_len }
626 tcx
.intern_layout(LayoutS
{
627 variants
: Variants
::Single { index: VariantIdx::new(0) }
,
629 abi
: Abi
::Vector { element: e_abi, count: e_len }
,
630 largest_niche
: e_ly
.largest_niche
,
637 ty
::Adt(def
, substs
) => {
638 // Cache the field layouts.
645 .map(|field
| cx
.layout_of(field
.ty(tcx
, substs
)))
646 .collect
::<Result
<Vec
<_
>, _
>>()
648 .collect
::<Result
<IndexVec
<VariantIdx
, _
>, _
>>()?
;
651 if def
.repr().pack
.is_some() && def
.repr().align
.is_some() {
652 cx
.tcx
.sess
.delay_span_bug(
653 tcx
.def_span(def
.did()),
654 "union cannot be packed and aligned",
656 return Err(LayoutError
::Unknown(ty
));
660 if def
.repr().pack
.is_some() { dl.i8_align }
else { dl.aggregate_align }
;
662 if let Some(repr_align
) = def
.repr().align
{
663 align
= align
.max(AbiAndPrefAlign
::new(repr_align
));
666 let optimize
= !def
.repr().inhibit_union_abi_opt();
667 let mut size
= Size
::ZERO
;
668 let mut abi
= Abi
::Aggregate { sized: true }
;
669 let index
= VariantIdx
::new(0);
670 for field
in &variants
[index
] {
671 assert
!(!field
.is_unsized());
672 align
= align
.max(field
.align
);
674 // If all non-ZST fields have the same ABI, forward this ABI
675 if optimize
&& !field
.is_zst() {
676 // Discard valid range information and allow undef
677 let field_abi
= match field
.abi
{
678 Abi
::Scalar(x
) => Abi
::Scalar(x
.to_union()),
679 Abi
::ScalarPair(x
, y
) => Abi
::ScalarPair(x
.to_union(), y
.to_union()),
680 Abi
::Vector { element: x, count }
=> {
681 Abi
::Vector { element: x.to_union(), count }
683 Abi
::Uninhabited
| Abi
::Aggregate { .. }
=> {
684 Abi
::Aggregate { sized: true }
688 if size
== Size
::ZERO
{
689 // first non ZST: initialize 'abi'
691 } else if abi
!= field_abi
{
692 // different fields have different ABI: reset to Aggregate
693 abi
= Abi
::Aggregate { sized: true }
;
697 size
= cmp
::max(size
, field
.size
);
700 if let Some(pack
) = def
.repr().pack
{
701 align
= align
.min(AbiAndPrefAlign
::new(pack
));
704 return Ok(tcx
.intern_layout(LayoutS
{
705 variants
: Variants
::Single { index }
,
706 fields
: FieldsShape
::Union(
707 NonZeroUsize
::new(variants
[index
].len()).ok_or(LayoutError
::Unknown(ty
))?
,
712 size
: size
.align_to(align
.abi
),
716 // A variant is absent if it's uninhabited and only has ZST fields.
717 // Present uninhabited variants only require space for their fields,
718 // but *not* an encoding of the discriminant (e.g., a tag value).
719 // See issue #49298 for more details on the need to leave space
720 // for non-ZST uninhabited data (mostly partial initialization).
721 let absent
= |fields
: &[TyAndLayout
<'_
>]| {
722 let uninhabited
= fields
.iter().any(|f
| f
.abi
.is_uninhabited());
723 let is_zst
= fields
.iter().all(|f
| f
.is_zst());
724 uninhabited
&& is_zst
726 let (present_first
, present_second
) = {
727 let mut present_variants
= variants
729 .filter_map(|(i
, v
)| if absent(v
) { None }
else { Some(i) }
);
730 (present_variants
.next(), present_variants
.next())
732 let present_first
= match present_first
{
733 Some(present_first
) => present_first
,
734 // Uninhabited because it has no variants, or only absent ones.
735 None
if def
.is_enum() => {
736 return Ok(tcx
.layout_of(param_env
.and(tcx
.types
.never
))?
.layout
);
738 // If it's a struct, still compute a layout so that we can still compute the
740 None
=> VariantIdx
::new(0),
743 let is_struct
= !def
.is_enum() ||
744 // Only one variant is present.
745 (present_second
.is_none() &&
746 // Representation optimizations are allowed.
747 !def
.repr().inhibit_enum_layout_opt());
749 // Struct, or univariant enum equivalent to a struct.
750 // (Typechecking will reject discriminant-sizing attrs.)
752 let v
= present_first
;
753 let kind
= if def
.is_enum() || variants
[v
].is_empty() {
754 StructKind
::AlwaysSized
756 let param_env
= tcx
.param_env(def
.did());
757 let last_field
= def
.variant(v
).fields
.last().unwrap();
758 let always_sized
= tcx
.type_of(last_field
.did
).is_sized(tcx
, param_env
);
759 if !always_sized { StructKind::MaybeUnsized }
else { StructKind::AlwaysSized }
762 let mut st
= univariant_uninterned(cx
, ty
, &variants
[v
], &def
.repr(), kind
)?
;
763 st
.variants
= Variants
::Single { index: v }
;
765 if def
.is_unsafe_cell() {
766 let hide_niches
= |scalar
: &mut _
| match scalar
{
767 Scalar
::Initialized { value, valid_range }
=> {
768 *valid_range
= WrappingRange
::full(value
.size(dl
))
770 // Already doesn't have any niches
771 Scalar
::Union { .. }
=> {}
774 Abi
::Uninhabited
=> {}
775 Abi
::Scalar(scalar
) => hide_niches(scalar
),
776 Abi
::ScalarPair(a
, b
) => {
780 Abi
::Vector { element, count: _ }
=> hide_niches(element
),
781 Abi
::Aggregate { sized: _ }
=> {}
783 st
.largest_niche
= None
;
784 return Ok(tcx
.intern_layout(st
));
787 let (start
, end
) = cx
.tcx
.layout_scalar_valid_range(def
.did());
789 Abi
::Scalar(ref mut scalar
) | Abi
::ScalarPair(ref mut scalar
, _
) => {
790 // the asserts ensure that we are not using the
791 // `#[rustc_layout_scalar_valid_range(n)]`
792 // attribute to widen the range of anything as that would probably
793 // result in UB somewhere
794 // FIXME(eddyb) the asserts are probably not needed,
795 // as larger validity ranges would result in missed
796 // optimizations, *not* wrongly assuming the inner
797 // value is valid. e.g. unions enlarge validity ranges,
798 // because the values may be uninitialized.
799 if let Bound
::Included(start
) = start
{
800 // FIXME(eddyb) this might be incorrect - it doesn't
801 // account for wrap-around (end < start) ranges.
802 let valid_range
= scalar
.valid_range_mut();
803 assert
!(valid_range
.start
<= start
);
804 valid_range
.start
= start
;
806 if let Bound
::Included(end
) = end
{
807 // FIXME(eddyb) this might be incorrect - it doesn't
808 // account for wrap-around (end < start) ranges.
809 let valid_range
= scalar
.valid_range_mut();
810 assert
!(valid_range
.end
>= end
);
811 valid_range
.end
= end
;
814 // Update `largest_niche` if we have introduced a larger niche.
815 let niche
= Niche
::from_scalar(dl
, Size
::ZERO
, *scalar
);
816 if let Some(niche
) = niche
{
817 match st
.largest_niche
{
818 Some(largest_niche
) => {
819 // Replace the existing niche even if they're equal,
820 // because this one is at a lower offset.
821 if largest_niche
.available(dl
) <= niche
.available(dl
) {
822 st
.largest_niche
= Some(niche
);
825 None
=> st
.largest_niche
= Some(niche
),
830 start
== Bound
::Unbounded
&& end
== Bound
::Unbounded
,
831 "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
837 return Ok(tcx
.intern_layout(st
));
840 // At this point, we have handled all unions and
841 // structs. (We have also handled univariant enums
842 // that allow representation optimization.)
843 assert
!(def
.is_enum());
845 // Until we've decided whether to use the tagged or
846 // niche filling LayoutS, we don't want to intern the
847 // variant layouts, so we can't store them in the
848 // overall LayoutS. Store the overall LayoutS
849 // and the variant LayoutSs here until then.
850 struct TmpLayout
<'tcx
> {
851 layout
: LayoutS
<'tcx
>,
852 variants
: IndexVec
<VariantIdx
, LayoutS
<'tcx
>>,
855 let calculate_niche_filling_layout
=
856 || -> Result
<Option
<TmpLayout
<'tcx
>>, LayoutError
<'tcx
>> {
857 // The current code for niche-filling relies on variant indices
858 // instead of actual discriminants, so enums with
859 // explicit discriminants (RFC #2363) would misbehave.
860 if def
.repr().inhibit_enum_layout_opt()
864 .any(|(i
, v
)| v
.discr
!= ty
::VariantDiscr
::Relative(i
.as_u32()))
869 if variants
.len() < 2 {
873 let mut align
= dl
.aggregate_align
;
874 let mut variant_layouts
= variants
877 let mut st
= univariant_uninterned(
882 StructKind
::AlwaysSized
,
884 st
.variants
= Variants
::Single { index: j }
;
886 align
= align
.max(st
.align
);
890 .collect
::<Result
<IndexVec
<VariantIdx
, _
>, _
>>()?
;
892 let largest_variant_index
= match variant_layouts
894 .max_by_key(|(_i
, layout
)| layout
.size
.bytes())
895 .map(|(i
, _layout
)| i
)
897 None
=> return Ok(None
),
901 let all_indices
= VariantIdx
::new(0)..=VariantIdx
::new(variants
.len() - 1);
902 let needs_disc
= |index
: VariantIdx
| {
903 index
!= largest_variant_index
&& !absent(&variants
[index
])
905 let niche_variants
= all_indices
.clone().find(|v
| needs_disc(*v
)).unwrap()
906 ..=all_indices
.rev().find(|v
| needs_disc(*v
)).unwrap();
908 let count
= niche_variants
.size_hint().1.unwrap
() as u128
;
910 // Find the field with the largest niche
911 let (field_index
, niche
, (niche_start
, niche_scalar
)) = match variants
912 [largest_variant_index
]
915 .filter_map(|(j
, field
)| Some((j
, field
.largest_niche?
)))
916 .max_by_key(|(_
, niche
)| niche
.available(dl
))
917 .and_then(|(j
, niche
)| Some((j
, niche
, niche
.reserve(cx
, count
)?
)))
919 None
=> return Ok(None
),
923 let niche_offset
= niche
.offset
924 + variant_layouts
[largest_variant_index
].fields
.offset(field_index
);
925 let niche_size
= niche
.value
.size(dl
);
926 let size
= variant_layouts
[largest_variant_index
].size
.align_to(align
.abi
);
928 let all_variants_fit
=
929 variant_layouts
.iter_enumerated_mut().all(|(i
, layout
)| {
930 if i
== largest_variant_index
{
934 layout
.largest_niche
= None
;
936 if layout
.size
<= niche_offset
{
937 // This variant will fit before the niche.
941 // Determine if it'll fit after the niche.
942 let this_align
= layout
.align
.abi
;
943 let this_offset
= (niche_offset
+ niche_size
).align_to(this_align
);
945 if this_offset
+ layout
.size
> size
{
949 // It'll fit, but we need to make some adjustments.
950 match layout
.fields
{
951 FieldsShape
::Arbitrary { ref mut offsets, .. }
=> {
952 for (j
, offset
) in offsets
.iter_mut().enumerate() {
953 if !variants
[i
][j
].is_zst() {
954 *offset
+= this_offset
;
959 panic
!("Layout of fields should be Arbitrary for variants")
963 // It can't be a Scalar or ScalarPair because the offset isn't 0.
964 if !layout
.abi
.is_uninhabited() {
965 layout
.abi
= Abi
::Aggregate { sized: true }
;
967 layout
.size
+= this_offset
;
972 if !all_variants_fit
{
976 let largest_niche
= Niche
::from_scalar(dl
, niche_offset
, niche_scalar
);
978 let others_zst
= variant_layouts
980 .all(|(i
, layout
)| i
== largest_variant_index
|| layout
.size
== Size
::ZERO
);
981 let same_size
= size
== variant_layouts
[largest_variant_index
].size
;
982 let same_align
= align
== variant_layouts
[largest_variant_index
].align
;
984 let abi
= if variant_layouts
.iter().all(|v
| v
.abi
.is_uninhabited()) {
986 } else if same_size
&& same_align
&& others_zst
{
987 match variant_layouts
[largest_variant_index
].abi
{
988 // When the total alignment and size match, we can use the
989 // same ABI as the scalar variant with the reserved niche.
990 Abi
::Scalar(_
) => Abi
::Scalar(niche_scalar
),
991 Abi
::ScalarPair(first
, second
) => {
992 // Only the niche is guaranteed to be initialised,
993 // so use union layouts for the other primitive.
994 if niche_offset
== Size
::ZERO
{
995 Abi
::ScalarPair(niche_scalar
, second
.to_union())
997 Abi
::ScalarPair(first
.to_union(), niche_scalar
)
1000 _
=> Abi
::Aggregate { sized: true }
,
1003 Abi
::Aggregate { sized: true }
1006 let layout
= LayoutS
{
1007 variants
: Variants
::Multiple
{
1009 tag_encoding
: TagEncoding
::Niche
{
1010 untagged_variant
: largest_variant_index
,
1015 variants
: IndexVec
::new(),
1017 fields
: FieldsShape
::Arbitrary
{
1018 offsets
: vec
![niche_offset
],
1019 memory_index
: vec
![0],
1027 Ok(Some(TmpLayout { layout, variants: variant_layouts }
))
1030 let niche_filling_layout
= calculate_niche_filling_layout()?
;
1032 let (mut min
, mut max
) = (i128
::MAX
, i128
::MIN
);
1033 let discr_type
= def
.repr().discr_type();
1034 let bits
= Integer
::from_attr(cx
, discr_type
).size().bits();
1035 for (i
, discr
) in def
.discriminants(tcx
) {
1036 if variants
[i
].iter().any(|f
| f
.abi
.is_uninhabited()) {
1039 let mut x
= discr
.val
as i128
;
1040 if discr_type
.is_signed() {
1041 // sign extend the raw representation to be an i128
1042 x
= (x
<< (128 - bits
)) >> (128 - bits
);
1051 // We might have no inhabited variants, so pretend there's at least one.
1052 if (min
, max
) == (i128
::MAX
, i128
::MIN
) {
1056 assert
!(min
<= max
, "discriminant range is {}...{}", min
, max
);
1057 let (min_ity
, signed
) = Integer
::repr_discr(tcx
, ty
, &def
.repr(), min
, max
);
1059 let mut align
= dl
.aggregate_align
;
1060 let mut size
= Size
::ZERO
;
1062 // We're interested in the smallest alignment, so start large.
1063 let mut start_align
= Align
::from_bytes(256).unwrap();
1064 assert_eq
!(Integer
::for_align(dl
, start_align
), None
);
1066 // repr(C) on an enum tells us to make a (tag, union) layout,
1067 // so we need to grow the prefix alignment to be at least
1068 // the alignment of the union. (This value is used both for
1069 // determining the alignment of the overall enum, and the
1070 // determining the alignment of the payload after the tag.)
1071 let mut prefix_align
= min_ity
.align(dl
).abi
;
1073 for fields
in &variants
{
1074 for field
in fields
{
1075 prefix_align
= prefix_align
.max(field
.align
.abi
);
1080 // Create the set of structs that represent each variant.
1081 let mut layout_variants
= variants
1083 .map(|(i
, field_layouts
)| {
1084 let mut st
= univariant_uninterned(
1089 StructKind
::Prefixed(min_ity
.size(), prefix_align
),
1091 st
.variants
= Variants
::Single { index: i }
;
1092 // Find the first field we can't move later
1093 // to make room for a larger discriminant.
1094 for field
in st
.fields
.index_by_increasing_offset().map(|j
| field_layouts
[j
]) {
1095 if !field
.is_zst() || field
.align
.abi
.bytes() != 1 {
1096 start_align
= start_align
.min(field
.align
.abi
);
1100 size
= cmp
::max(size
, st
.size
);
1101 align
= align
.max(st
.align
);
1104 .collect
::<Result
<IndexVec
<VariantIdx
, _
>, _
>>()?
;
1106 // Align the maximum variant size to the largest alignment.
1107 size
= size
.align_to(align
.abi
);
1109 if size
.bytes() >= dl
.obj_size_bound() {
1110 return Err(LayoutError
::SizeOverflow(ty
));
1113 let typeck_ity
= Integer
::from_attr(dl
, def
.repr().discr_type());
1114 if typeck_ity
< min_ity
{
1115 // It is a bug if Layout decided on a greater discriminant size than typeck for
1116 // some reason at this point (based on values discriminant can take on). Mostly
1117 // because this discriminant will be loaded, and then stored into variable of
1118 // type calculated by typeck. Consider such case (a bug): typeck decided on
1119 // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1120 // discriminant values. That would be a bug, because then, in codegen, in order
1121 // to store this 16-bit discriminant into 8-bit sized temporary some of the
1122 // space necessary to represent would have to be discarded (or layout is wrong
1123 // on thinking it needs 16 bits)
1125 "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1129 // However, it is fine to make discr type however large (as an optimisation)
1130 // after this point – we’ll just truncate the value we load in codegen.
1133 // Check to see if we should use a different type for the
1134 // discriminant. We can safely use a type with the same size
1135 // as the alignment of the first field of each variant.
1136 // We increase the size of the discriminant to avoid LLVM copying
1137 // padding when it doesn't need to. This normally causes unaligned
1138 // load/stores and excessive memcpy/memset operations. By using a
1139 // bigger integer size, LLVM can be sure about its contents and
1140 // won't be so conservative.
1142 // Use the initial field alignment
1143 let mut ity
= if def
.repr().c() || def
.repr().int
.is_some() {
1146 Integer
::for_align(dl
, start_align
).unwrap_or(min_ity
)
1149 // If the alignment is not larger than the chosen discriminant size,
1150 // don't use the alignment as the final size.
1154 // Patch up the variants' first few fields.
1155 let old_ity_size
= min_ity
.size();
1156 let new_ity_size
= ity
.size();
1157 for variant
in &mut layout_variants
{
1158 match variant
.fields
{
1159 FieldsShape
::Arbitrary { ref mut offsets, .. }
=> {
1161 if *i
<= old_ity_size
{
1162 assert_eq
!(*i
, old_ity_size
);
1166 // We might be making the struct larger.
1167 if variant
.size
<= old_ity_size
{
1168 variant
.size
= new_ity_size
;
1176 let tag_mask
= ity
.size().unsigned_int_max();
1177 let tag
= Scalar
::Initialized
{
1178 value
: Int(ity
, signed
),
1179 valid_range
: WrappingRange
{
1180 start
: (min
as u128
& tag_mask
),
1181 end
: (max
as u128
& tag_mask
),
1184 let mut abi
= Abi
::Aggregate { sized: true }
;
1186 if layout_variants
.iter().all(|v
| v
.abi
.is_uninhabited()) {
1187 abi
= Abi
::Uninhabited
;
1188 } else if tag
.size(dl
) == size
{
1189 // Make sure we only use scalar layout when the enum is entirely its
1190 // own tag (i.e. it has no padding nor any non-ZST variant fields).
1191 abi
= Abi
::Scalar(tag
);
1193 // Try to use a ScalarPair for all tagged enums.
1194 let mut common_prim
= None
;
1195 let mut common_prim_initialized_in_all_variants
= true;
1196 for (field_layouts
, layout_variant
) in iter
::zip(&variants
, &layout_variants
) {
1197 let FieldsShape
::Arbitrary { ref offsets, .. }
= layout_variant
.fields
else {
1200 let mut fields
= iter
::zip(field_layouts
, offsets
).filter(|p
| !p
.0.is_zst
());
1201 let (field
, offset
) = match (fields
.next(), fields
.next()) {
1203 common_prim_initialized_in_all_variants
= false;
1206 (Some(pair
), None
) => pair
,
1212 let prim
= match field
.abi
{
1213 Abi
::Scalar(scalar
) => {
1214 common_prim_initialized_in_all_variants
&=
1215 matches
!(scalar
, Scalar
::Initialized { .. }
);
1223 if let Some(pair
) = common_prim
{
1224 // This is pretty conservative. We could go fancier
1225 // by conflating things like i32 and u32, or even
1226 // realising that (u8, u8) could just cohabit with
1228 if pair
!= (prim
, offset
) {
1233 common_prim
= Some((prim
, offset
));
1236 if let Some((prim
, offset
)) = common_prim
{
1237 let prim_scalar
= if common_prim_initialized_in_all_variants
{
1240 // Common prim might be uninit.
1241 Scalar
::Union { value: prim }
1243 let pair
= scalar_pair(cx
, tag
, prim_scalar
);
1244 let pair_offsets
= match pair
.fields
{
1245 FieldsShape
::Arbitrary { ref offsets, ref memory_index }
=> {
1246 assert_eq
!(memory_index
, &[0, 1]);
1251 if pair_offsets
[0] == Size
::ZERO
1252 && pair_offsets
[1] == *offset
1253 && align
== pair
.align
1254 && size
== pair
.size
1256 // We can use `ScalarPair` only when it matches our
1257 // already computed layout (including `#[repr(C)]`).
1263 // If we pick a "clever" (by-value) ABI, we might have to adjust the ABI of the
1264 // variants to ensure they are consistent. This is because a downcast is
1265 // semantically a NOP, and thus should not affect layout.
1266 if matches
!(abi
, Abi
::Scalar(..) | Abi
::ScalarPair(..)) {
1267 for variant
in &mut layout_variants
{
1268 // We only do this for variants with fields; the others are not accessed anyway.
1269 // Also do not overwrite any already existing "clever" ABIs.
1270 if variant
.fields
.count() > 0 && matches
!(variant
.abi
, Abi
::Aggregate { .. }
) {
1272 // Also need to bump up the size and alignment, so that the entire value fits in here.
1273 variant
.size
= cmp
::max(variant
.size
, size
);
1274 variant
.align
.abi
= cmp
::max(variant
.align
.abi
, align
.abi
);
1279 let largest_niche
= Niche
::from_scalar(dl
, Size
::ZERO
, tag
);
1281 let tagged_layout
= LayoutS
{
1282 variants
: Variants
::Multiple
{
1284 tag_encoding
: TagEncoding
::Direct
,
1286 variants
: IndexVec
::new(),
1288 fields
: FieldsShape
::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }
,
1295 let tagged_layout
= TmpLayout { layout: tagged_layout, variants: layout_variants }
;
1297 let mut best_layout
= match (tagged_layout
, niche_filling_layout
) {
1299 // Pick the smaller layout; otherwise,
1300 // pick the layout with the larger niche; otherwise,
1301 // pick tagged as it has simpler codegen.
1303 let niche_size
= |tmp_l
: &TmpLayout
<'_
>| {
1304 tmp_l
.layout
.largest_niche
.map_or(0, |n
| n
.available(dl
))
1307 tl
.layout
.size
.cmp(&nl
.layout
.size
),
1308 niche_size(&tl
).cmp(&niche_size(&nl
)),
1311 (Equal
, Less
) => nl
,
1318 // Now we can intern the variant layouts and store them in the enum layout.
1319 best_layout
.layout
.variants
= match best_layout
.layout
.variants
{
1320 Variants
::Multiple { tag, tag_encoding, tag_field, .. }
=> Variants
::Multiple
{
1324 variants
: best_layout
1327 .map(|layout
| tcx
.intern_layout(layout
))
1333 tcx
.intern_layout(best_layout
.layout
)
1336 // Types with no meaningful known layout.
1337 ty
::Projection(_
) | ty
::Opaque(..) => {
1338 // NOTE(eddyb) `layout_of` query should've normalized these away,
1339 // if that was possible, so there's no reason to try again here.
1340 return Err(LayoutError
::Unknown(ty
));
1343 ty
::Placeholder(..) | ty
::GeneratorWitness(..) | ty
::Infer(_
) => {
1344 bug
!("Layout::compute: unexpected type `{}`", ty
)
1347 ty
::Bound(..) | ty
::Param(_
) | ty
::Error(_
) => {
1348 return Err(LayoutError
::Unknown(ty
));
1353 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1354 #[derive(Clone, Debug, PartialEq)]
1355 enum SavedLocalEligibility
{
1357 Assigned(VariantIdx
),
1358 // FIXME: Use newtype_index so we aren't wasting bytes
1359 Ineligible(Option
<u32>),
1362 // When laying out generators, we divide our saved local fields into two
1363 // categories: overlap-eligible and overlap-ineligible.
1365 // Those fields which are ineligible for overlap go in a "prefix" at the
1366 // beginning of the layout, and always have space reserved for them.
1368 // Overlap-eligible fields are only assigned to one variant, so we lay
1369 // those fields out for each variant and put them right after the
1372 // Finally, in the layout details, we point to the fields from the
1373 // variants they are assigned to. It is possible for some fields to be
1374 // included in multiple variants. No field ever "moves around" in the
1375 // layout; its offset is always the same.
1377 // Also included in the layout are the upvars and the discriminant.
1378 // These are included as fields on the "outer" layout; they are not part
1381 /// Compute the eligibility and assignment of each local.
1382 fn generator_saved_local_eligibility
<'tcx
>(
1383 info
: &GeneratorLayout
<'tcx
>,
1384 ) -> (BitSet
<GeneratorSavedLocal
>, IndexVec
<GeneratorSavedLocal
, SavedLocalEligibility
>) {
1385 use SavedLocalEligibility
::*;
1387 let mut assignments
: IndexVec
<GeneratorSavedLocal
, SavedLocalEligibility
> =
1388 IndexVec
::from_elem_n(Unassigned
, info
.field_tys
.len());
1390 // The saved locals not eligible for overlap. These will get
1391 // "promoted" to the prefix of our generator.
1392 let mut ineligible_locals
= BitSet
::new_empty(info
.field_tys
.len());
1394 // Figure out which of our saved locals are fields in only
1395 // one variant. The rest are deemed ineligible for overlap.
1396 for (variant_index
, fields
) in info
.variant_fields
.iter_enumerated() {
1397 for local
in fields
{
1398 match assignments
[*local
] {
1400 assignments
[*local
] = Assigned(variant_index
);
1403 // We've already seen this local at another suspension
1404 // point, so it is no longer a candidate.
1406 "removing local {:?} in >1 variant ({:?}, {:?})",
1411 ineligible_locals
.insert(*local
);
1412 assignments
[*local
] = Ineligible(None
);
1419 // Next, check every pair of eligible locals to see if they
1421 for local_a
in info
.storage_conflicts
.rows() {
1422 let conflicts_a
= info
.storage_conflicts
.count(local_a
);
1423 if ineligible_locals
.contains(local_a
) {
1427 for local_b
in info
.storage_conflicts
.iter(local_a
) {
1428 // local_a and local_b are storage live at the same time, therefore they
1429 // cannot overlap in the generator layout. The only way to guarantee
1430 // this is if they are in the same variant, or one is ineligible
1431 // (which means it is stored in every variant).
1432 if ineligible_locals
.contains(local_b
) || assignments
[local_a
] == assignments
[local_b
] {
1436 // If they conflict, we will choose one to make ineligible.
1437 // This is not always optimal; it's just a greedy heuristic that
1438 // seems to produce good results most of the time.
1439 let conflicts_b
= info
.storage_conflicts
.count(local_b
);
1440 let (remove
, other
) =
1441 if conflicts_a
> conflicts_b { (local_a, local_b) }
else { (local_b, local_a) }
;
1442 ineligible_locals
.insert(remove
);
1443 assignments
[remove
] = Ineligible(None
);
1444 trace
!("removing local {:?} due to conflict with {:?}", remove
, other
);
1448 // Count the number of variants in use. If only one of them, then it is
1449 // impossible to overlap any locals in our layout. In this case it's
1450 // always better to make the remaining locals ineligible, so we can
1451 // lay them out with the other locals in the prefix and eliminate
1452 // unnecessary padding bytes.
1454 let mut used_variants
= BitSet
::new_empty(info
.variant_fields
.len());
1455 for assignment
in &assignments
{
1456 if let Assigned(idx
) = assignment
{
1457 used_variants
.insert(*idx
);
1460 if used_variants
.count() < 2 {
1461 for assignment
in assignments
.iter_mut() {
1462 *assignment
= Ineligible(None
);
1464 ineligible_locals
.insert_all();
1468 // Write down the order of our locals that will be promoted to the prefix.
1470 for (idx
, local
) in ineligible_locals
.iter().enumerate() {
1471 assignments
[local
] = Ineligible(Some(idx
as u32));
1474 debug
!("generator saved local assignments: {:?}", assignments
);
1476 (ineligible_locals
, assignments
)
1479 /// Compute the full generator layout.
1480 fn generator_layout
<'tcx
>(
1481 cx
: &LayoutCx
<'tcx
, TyCtxt
<'tcx
>>,
1483 def_id
: hir
::def_id
::DefId
,
1484 substs
: SubstsRef
<'tcx
>,
1485 ) -> Result
<Layout
<'tcx
>, LayoutError
<'tcx
>> {
1486 use SavedLocalEligibility
::*;
1488 let subst_field
= |ty
: Ty
<'tcx
>| EarlyBinder(ty
).subst(tcx
, substs
);
1490 let Some(info
) = tcx
.generator_layout(def_id
) else {
1491 return Err(LayoutError
::Unknown(ty
));
1493 let (ineligible_locals
, assignments
) = generator_saved_local_eligibility(&info
);
1495 // Build a prefix layout, including "promoting" all ineligible
1496 // locals as part of the prefix. We compute the layout of all of
1497 // these fields at once to get optimal packing.
1498 let tag_index
= substs
.as_generator().prefix_tys().count();
1500 // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
1501 let max_discr
= (info
.variant_fields
.len() - 1) as u128
;
1502 let discr_int
= Integer
::fit_unsigned(max_discr
);
1503 let discr_int_ty
= discr_int
.to_ty(tcx
, false);
1504 let tag
= Scalar
::Initialized
{
1505 value
: Primitive
::Int(discr_int
, false),
1506 valid_range
: WrappingRange { start: 0, end: max_discr }
,
1508 let tag_layout
= cx
.tcx
.intern_layout(LayoutS
::scalar(cx
, tag
));
1509 let tag_layout
= TyAndLayout { ty: discr_int_ty, layout: tag_layout }
;
1511 let promoted_layouts
= ineligible_locals
1513 .map(|local
| subst_field(info
.field_tys
[local
]))
1514 .map(|ty
| tcx
.mk_maybe_uninit(ty
))
1515 .map(|ty
| cx
.layout_of(ty
));
1516 let prefix_layouts
= substs
1519 .map(|ty
| cx
.layout_of(ty
))
1520 .chain(iter
::once(Ok(tag_layout
)))
1521 .chain(promoted_layouts
)
1522 .collect
::<Result
<Vec
<_
>, _
>>()?
;
1523 let prefix
= univariant_uninterned(
1527 &ReprOptions
::default(),
1528 StructKind
::AlwaysSized
,
1531 let (prefix_size
, prefix_align
) = (prefix
.size
, prefix
.align
);
1533 // Split the prefix layout into the "outer" fields (upvars and
1534 // discriminant) and the "promoted" fields. Promoted fields will
1535 // get included in each variant that requested them in
1537 debug
!("prefix = {:#?}", prefix
);
1538 let (outer_fields
, promoted_offsets
, promoted_memory_index
) = match prefix
.fields
{
1539 FieldsShape
::Arbitrary { mut offsets, memory_index }
=> {
1540 let mut inverse_memory_index
= invert_mapping(&memory_index
);
1542 // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1543 // "outer" and "promoted" fields respectively.
1544 let b_start
= (tag_index
+ 1) as u32;
1545 let offsets_b
= offsets
.split_off(b_start
as usize);
1546 let offsets_a
= offsets
;
1548 // Disentangle the "a" and "b" components of `inverse_memory_index`
1549 // by preserving the order but keeping only one disjoint "half" each.
1550 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1551 let inverse_memory_index_b
: Vec
<_
> =
1552 inverse_memory_index
.iter().filter_map(|&i
| i
.checked_sub(b_start
)).collect();
1553 inverse_memory_index
.retain(|&i
| i
< b_start
);
1554 let inverse_memory_index_a
= inverse_memory_index
;
1556 // Since `inverse_memory_index_{a,b}` each only refer to their
1557 // respective fields, they can be safely inverted
1558 let memory_index_a
= invert_mapping(&inverse_memory_index_a
);
1559 let memory_index_b
= invert_mapping(&inverse_memory_index_b
);
1562 FieldsShape
::Arbitrary { offsets: offsets_a, memory_index: memory_index_a }
;
1563 (outer_fields
, offsets_b
, memory_index_b
)
1568 let mut size
= prefix
.size
;
1569 let mut align
= prefix
.align
;
1573 .map(|(index
, variant_fields
)| {
1574 // Only include overlap-eligible fields when we compute our variant layout.
1575 let variant_only_tys
= variant_fields
1577 .filter(|local
| match assignments
[**local
] {
1578 Unassigned
=> bug
!(),
1579 Assigned(v
) if v
== index
=> true,
1580 Assigned(_
) => bug
!("assignment does not match variant"),
1581 Ineligible(_
) => false,
1583 .map(|local
| subst_field(info
.field_tys
[*local
]));
1585 let mut variant
= univariant_uninterned(
1588 &variant_only_tys
.map(|ty
| cx
.layout_of(ty
)).collect
::<Result
<Vec
<_
>, _
>>()?
,
1589 &ReprOptions
::default(),
1590 StructKind
::Prefixed(prefix_size
, prefix_align
.abi
),
1592 variant
.variants
= Variants
::Single { index }
;
1594 let FieldsShape
::Arbitrary { offsets, memory_index }
= variant
.fields
else {
1598 // Now, stitch the promoted and variant-only fields back together in
1599 // the order they are mentioned by our GeneratorLayout.
1600 // Because we only use some subset (that can differ between variants)
1601 // of the promoted fields, we can't just pick those elements of the
1602 // `promoted_memory_index` (as we'd end up with gaps).
1603 // So instead, we build an "inverse memory_index", as if all of the
1604 // promoted fields were being used, but leave the elements not in the
1605 // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1606 // obtain a valid (bijective) mapping.
1607 const INVALID_FIELD_IDX
: u32 = !0;
1608 let mut combined_inverse_memory_index
=
1609 vec
![INVALID_FIELD_IDX
; promoted_memory_index
.len() + memory_index
.len()];
1610 let mut offsets_and_memory_index
= iter
::zip(offsets
, memory_index
);
1611 let combined_offsets
= variant_fields
1615 let (offset
, memory_index
) = match assignments
[*local
] {
1616 Unassigned
=> bug
!(),
1618 let (offset
, memory_index
) = offsets_and_memory_index
.next().unwrap();
1619 (offset
, promoted_memory_index
.len() as u32 + memory_index
)
1621 Ineligible(field_idx
) => {
1622 let field_idx
= field_idx
.unwrap() as usize;
1623 (promoted_offsets
[field_idx
], promoted_memory_index
[field_idx
])
1626 combined_inverse_memory_index
[memory_index
as usize] = i
as u32;
1631 // Remove the unused slots and invert the mapping to obtain the
1632 // combined `memory_index` (also see previous comment).
1633 combined_inverse_memory_index
.retain(|&i
| i
!= INVALID_FIELD_IDX
);
1634 let combined_memory_index
= invert_mapping(&combined_inverse_memory_index
);
1636 variant
.fields
= FieldsShape
::Arbitrary
{
1637 offsets
: combined_offsets
,
1638 memory_index
: combined_memory_index
,
1641 size
= size
.max(variant
.size
);
1642 align
= align
.max(variant
.align
);
1643 Ok(tcx
.intern_layout(variant
))
1645 .collect
::<Result
<IndexVec
<VariantIdx
, _
>, _
>>()?
;
1647 size
= size
.align_to(align
.abi
);
1649 let abi
= if prefix
.abi
.is_uninhabited() || variants
.iter().all(|v
| v
.abi().is_uninhabited()) {
1652 Abi
::Aggregate { sized: true }
1655 let layout
= tcx
.intern_layout(LayoutS
{
1656 variants
: Variants
::Multiple
{
1658 tag_encoding
: TagEncoding
::Direct
,
1659 tag_field
: tag_index
,
1662 fields
: outer_fields
,
1664 largest_niche
: prefix
.largest_niche
,
1668 debug
!("generator layout ({:?}): {:#?}", ty
, layout
);
1672 /// This is invoked by the `layout_of` query to record the final
1673 /// layout of each type.
1675 fn record_layout_for_printing
<'tcx
>(cx
: &LayoutCx
<'tcx
, TyCtxt
<'tcx
>>, layout
: TyAndLayout
<'tcx
>) {
1676 // If we are running with `-Zprint-type-sizes`, maybe record layouts
1677 // for dumping later.
1678 if cx
.tcx
.sess
.opts
.unstable_opts
.print_type_sizes
{
1679 record_layout_for_printing_outlined(cx
, layout
)
1683 fn record_layout_for_printing_outlined
<'tcx
>(
1684 cx
: &LayoutCx
<'tcx
, TyCtxt
<'tcx
>>,
1685 layout
: TyAndLayout
<'tcx
>,
1687 // Ignore layouts that are done with non-empty environments or
1688 // non-monomorphic layouts, as the user only wants to see the stuff
1689 // resulting from the final codegen session.
1690 if layout
.ty
.has_non_region_param() || !cx
.param_env
.caller_bounds().is_empty() {
1694 // (delay format until we actually need it)
1695 let record
= |kind
, packed
, opt_discr_size
, variants
| {
1696 let type_desc
= format
!("{:?}", layout
.ty
);
1697 cx
.tcx
.sess
.code_stats
.record_type_size(
1708 let adt_def
= match *layout
.ty
.kind() {
1709 ty
::Adt(ref adt_def
, _
) => {
1710 debug
!("print-type-size t: `{:?}` process adt", layout
.ty
);
1714 ty
::Closure(..) => {
1715 debug
!("print-type-size t: `{:?}` record closure", layout
.ty
);
1716 record(DataTypeKind
::Closure
, false, None
, vec
![]);
1721 debug
!("print-type-size t: `{:?}` skip non-nominal", layout
.ty
);
1726 let adt_kind
= adt_def
.adt_kind();
1727 let adt_packed
= adt_def
.repr().pack
.is_some();
1729 let build_variant_info
= |n
: Option
<Symbol
>, flds
: &[Symbol
], layout
: TyAndLayout
<'tcx
>| {
1730 let mut min_size
= Size
::ZERO
;
1731 let field_info
: Vec
<_
> = flds
1735 let field_layout
= layout
.field(cx
, i
);
1736 let offset
= layout
.fields
.offset(i
);
1737 let field_end
= offset
+ field_layout
.size
;
1738 if min_size
< field_end
{
1739 min_size
= field_end
;
1743 offset
: offset
.bytes(),
1744 size
: field_layout
.size
.bytes(),
1745 align
: field_layout
.align
.abi
.bytes(),
1752 kind
: if layout
.is_unsized() { SizeKind::Min }
else { SizeKind::Exact }
,
1753 align
: layout
.align
.abi
.bytes(),
1754 size
: if min_size
.bytes() == 0 { layout.size.bytes() }
else { min_size.bytes() }
,
1759 match layout
.variants
{
1760 Variants
::Single { index }
=> {
1761 if !adt_def
.variants().is_empty() && layout
.fields
!= FieldsShape
::Primitive
{
1762 debug
!("print-type-size `{:#?}` variant {}", layout
, adt_def
.variant(index
).name
);
1763 let variant_def
= &adt_def
.variant(index
);
1764 let fields
: Vec
<_
> = variant_def
.fields
.iter().map(|f
| f
.name
).collect();
1769 vec
![build_variant_info(Some(variant_def
.name
), &fields
, layout
)],
1772 // (This case arises for *empty* enums; so give it
1774 record(adt_kind
.into(), adt_packed
, None
, vec
![]);
1778 Variants
::Multiple { tag, ref tag_encoding, .. }
=> {
1780 "print-type-size `{:#?}` adt general variants def {}",
1782 adt_def
.variants().len()
1784 let variant_infos
: Vec
<_
> = adt_def
1787 .map(|(i
, variant_def
)| {
1788 let fields
: Vec
<_
> = variant_def
.fields
.iter().map(|f
| f
.name
).collect();
1789 build_variant_info(Some(variant_def
.name
), &fields
, layout
.for_variant(cx
, i
))
1795 match tag_encoding
{
1796 TagEncoding
::Direct
=> Some(tag
.size(cx
)),