3 use std
::{borrow::Borrow, cmp, iter, ops::Bound}
;
5 #[cfg(feature = "randomize")]
6 use rand
::{seq::SliceRandom, SeedableRng}
;
7 #[cfg(feature = "randomize")]
8 use rand_xoshiro
::Xoshiro128StarStar
;
12 pub trait LayoutCalculator
{
13 type TargetDataLayoutRef
: Borrow
<TargetDataLayout
>;
15 fn delay_bug(&self, txt
: String
);
16 fn current_data_layout(&self) -> Self::TargetDataLayoutRef
;
18 fn scalar_pair(&self, a
: Scalar
, b
: Scalar
) -> LayoutS
{
19 let dl
= self.current_data_layout();
21 let b_align
= b
.align(dl
);
22 let align
= a
.align(dl
).max(b_align
).max(dl
.aggregate_align
);
23 let b_offset
= a
.size(dl
).align_to(b_align
.abi
);
24 let size
= (b_offset
+ b
.size(dl
)).align_to(align
.abi
);
26 // HACK(nox): We iter on `b` and then `a` because `max_by_key`
27 // returns the last maximum.
28 let largest_niche
= Niche
::from_scalar(dl
, b_offset
, b
)
30 .chain(Niche
::from_scalar(dl
, Size
::ZERO
, a
))
31 .max_by_key(|niche
| niche
.available(dl
));
34 variants
: Variants
::Single { index: FIRST_VARIANT }
,
35 fields
: FieldsShape
::Arbitrary
{
36 offsets
: [Size
::ZERO
, b_offset
].into(),
37 memory_index
: [0, 1].into(),
39 abi
: Abi
::ScalarPair(a
, b
),
44 unadjusted_abi_align
: align
.abi
,
50 dl
: &TargetDataLayout
,
51 fields
: &IndexSlice
<FieldIdx
, Layout
<'_
>>,
54 ) -> Option
<LayoutS
> {
55 let layout
= univariant(self, dl
, fields
, repr
, kind
, NicheBias
::Start
);
56 // Enums prefer niches close to the beginning or the end of the variants so that other (smaller)
57 // data-carrying variants can be packed into the space after/before the niche.
58 // If the default field ordering does not give us a niche at the front then we do a second
59 // run and bias niches to the right and then check which one is closer to one of the struct's
61 if let Some(layout
) = &layout
{
62 // Don't try to calculate an end-biased layout for unsizable structs,
63 // otherwise we could end up with different layouts for
64 // Foo<Type> and Foo<dyn Trait> which would break unsizing
65 if !matches
!(kind
, StructKind
::MaybeUnsized
) {
66 if let Some(niche
) = layout
.largest_niche
{
67 let head_space
= niche
.offset
.bytes();
68 let niche_length
= niche
.value
.size(dl
).bytes();
69 let tail_space
= layout
.size
.bytes() - head_space
- niche_length
;
71 // This may end up doing redundant work if the niche is already in the last field
72 // (e.g. a trailing bool) and there is tail padding. But it's non-trivial to get
73 // the unpadded size so we try anyway.
74 if fields
.len() > 1 && head_space
!= 0 && tail_space
> 0 {
75 let alt_layout
= univariant(self, dl
, fields
, repr
, kind
, NicheBias
::End
)
76 .expect("alt layout should always work");
77 let niche
= alt_layout
79 .expect("alt layout should have a niche like the regular one");
80 let alt_head_space
= niche
.offset
.bytes();
81 let alt_niche_len
= niche
.value
.size(dl
).bytes();
83 alt_layout
.size
.bytes() - alt_head_space
- alt_niche_len
;
85 debug_assert_eq
!(layout
.size
.bytes(), alt_layout
.size
.bytes());
87 let prefer_alt_layout
=
88 alt_head_space
> head_space
&& alt_head_space
> tail_space
;
91 "sz: {}, default_niche_at: {}+{}, default_tail_space: {}, alt_niche_at/head_space: {}+{}, alt_tail: {}, num_fields: {}, better: {}\n\
101 layout
.fields
.count(),
103 format_field_niches(&layout
, &fields
, &dl
),
104 format_field_niches(&alt_layout
, &fields
, &dl
),
107 if prefer_alt_layout
{
108 return Some(alt_layout
);
117 fn layout_of_never_type(&self) -> LayoutS
{
118 let dl
= self.current_data_layout();
119 let dl
= dl
.borrow();
121 variants
: Variants
::Single { index: FIRST_VARIANT }
,
122 fields
: FieldsShape
::Primitive
,
123 abi
: Abi
::Uninhabited
,
127 max_repr_align
: None
,
128 unadjusted_abi_align
: dl
.i8_align
.abi
,
132 fn layout_of_struct_or_enum(
135 variants
: &IndexSlice
<VariantIdx
, IndexVec
<FieldIdx
, Layout
<'_
>>>,
137 is_unsafe_cell
: bool
,
138 scalar_valid_range
: (Bound
<u128
>, Bound
<u128
>),
139 discr_range_of_repr
: impl Fn(i128
, i128
) -> (Integer
, bool
),
140 discriminants
: impl Iterator
<Item
= (VariantIdx
, i128
)>,
141 dont_niche_optimize_enum
: bool
,
143 ) -> Option
<LayoutS
> {
144 let dl
= self.current_data_layout();
145 let dl
= dl
.borrow();
147 let scalar_unit
= |value
: Primitive
| {
148 let size
= value
.size(dl
);
149 assert
!(size
.bits() <= 128);
150 Scalar
::Initialized { value, valid_range: WrappingRange::full(size) }
153 // A variant is absent if it's uninhabited and only has ZST fields.
154 // Present uninhabited variants only require space for their fields,
155 // but *not* an encoding of the discriminant (e.g., a tag value).
156 // See issue #49298 for more details on the need to leave space
157 // for non-ZST uninhabited data (mostly partial initialization).
158 let absent
= |fields
: &IndexSlice
<FieldIdx
, Layout
<'_
>>| {
159 let uninhabited
= fields
.iter().any(|f
| f
.abi().is_uninhabited());
160 // We cannot ignore alignment; that might lead us to entirely discard a variant and
161 // produce an enum that is less aligned than it should be!
162 let is_1zst
= fields
.iter().all(|f
| f
.0.is_
1zst
());
163 uninhabited
&& is_1zst
165 let (present_first
, present_second
) = {
166 let mut present_variants
= variants
168 .filter_map(|(i
, v
)| if absent(v
) { None }
else { Some(i) }
);
169 (present_variants
.next(), present_variants
.next())
171 let present_first
= match present_first
{
172 Some(present_first
) => present_first
,
173 // Uninhabited because it has no variants, or only absent ones.
175 return Some(self.layout_of_never_type());
177 // If it's a struct, still compute a layout so that we can still compute the
179 None
=> FIRST_VARIANT
,
182 let is_struct
= !is_enum
||
183 // Only one variant is present.
184 (present_second
.is_none() &&
185 // Representation optimizations are allowed.
186 !repr
.inhibit_enum_layout_opt());
188 // Struct, or univariant enum equivalent to a struct.
189 // (Typechecking will reject discriminant-sizing attrs.)
191 let v
= present_first
;
192 let kind
= if is_enum
|| variants
[v
].is_empty() || always_sized
{
193 StructKind
::AlwaysSized
195 StructKind
::MaybeUnsized
198 let mut st
= self.univariant(dl
, &variants
[v
], repr
, kind
)?
;
199 st
.variants
= Variants
::Single { index: v }
;
202 let hide_niches
= |scalar
: &mut _
| match scalar
{
203 Scalar
::Initialized { value, valid_range }
=> {
204 *valid_range
= WrappingRange
::full(value
.size(dl
))
206 // Already doesn't have any niches
207 Scalar
::Union { .. }
=> {}
210 Abi
::Uninhabited
=> {}
211 Abi
::Scalar(scalar
) => hide_niches(scalar
),
212 Abi
::ScalarPair(a
, b
) => {
216 Abi
::Vector { element, count: _ }
=> hide_niches(element
),
217 Abi
::Aggregate { sized: _ }
=> {}
219 st
.largest_niche
= None
;
223 let (start
, end
) = scalar_valid_range
;
225 Abi
::Scalar(ref mut scalar
) | Abi
::ScalarPair(ref mut scalar
, _
) => {
226 // Enlarging validity ranges would result in missed
227 // optimizations, *not* wrongly assuming the inner
228 // value is valid. e.g. unions already enlarge validity ranges,
229 // because the values may be uninitialized.
231 // Because of that we only check that the start and end
232 // of the range is representable with this scalar type.
234 let max_value
= scalar
.size(dl
).unsigned_int_max();
235 if let Bound
::Included(start
) = start
{
236 // FIXME(eddyb) this might be incorrect - it doesn't
237 // account for wrap-around (end < start) ranges.
238 assert
!(start
<= max_value
, "{start} > {max_value}");
239 scalar
.valid_range_mut().start
= start
;
241 if let Bound
::Included(end
) = end
{
242 // FIXME(eddyb) this might be incorrect - it doesn't
243 // account for wrap-around (end < start) ranges.
244 assert
!(end
<= max_value
, "{end} > {max_value}");
245 scalar
.valid_range_mut().end
= end
;
248 // Update `largest_niche` if we have introduced a larger niche.
249 let niche
= Niche
::from_scalar(dl
, Size
::ZERO
, *scalar
);
250 if let Some(niche
) = niche
{
251 match st
.largest_niche
{
252 Some(largest_niche
) => {
253 // Replace the existing niche even if they're equal,
254 // because this one is at a lower offset.
255 if largest_niche
.available(dl
) <= niche
.available(dl
) {
256 st
.largest_niche
= Some(niche
);
259 None
=> st
.largest_niche
= Some(niche
),
264 start
== Bound
::Unbounded
&& end
== Bound
::Unbounded
,
265 "nonscalar layout for layout_scalar_valid_range type: {st:#?}",
272 // At this point, we have handled all unions and
273 // structs. (We have also handled univariant enums
274 // that allow representation optimization.)
277 // Until we've decided whether to use the tagged or
278 // niche filling LayoutS, we don't want to intern the
279 // variant layouts, so we can't store them in the
280 // overall LayoutS. Store the overall LayoutS
281 // and the variant LayoutSs here until then.
284 variants
: IndexVec
<VariantIdx
, LayoutS
>,
287 let calculate_niche_filling_layout
= || -> Option
<TmpLayout
> {
288 if dont_niche_optimize_enum
{
292 if variants
.len() < 2 {
296 let mut align
= dl
.aggregate_align
;
297 let mut max_repr_align
= repr
.align
;
298 let mut unadjusted_abi_align
= align
.abi
;
300 let mut variant_layouts
= variants
303 let mut st
= self.univariant(dl
, v
, repr
, StructKind
::AlwaysSized
)?
;
304 st
.variants
= Variants
::Single { index: j }
;
306 align
= align
.max(st
.align
);
307 max_repr_align
= max_repr_align
.max(st
.max_repr_align
);
308 unadjusted_abi_align
= unadjusted_abi_align
.max(st
.unadjusted_abi_align
);
312 .collect
::<Option
<IndexVec
<VariantIdx
, _
>>>()?
;
314 let largest_variant_index
= variant_layouts
316 .max_by_key(|(_i
, layout
)| layout
.size
.bytes())
317 .map(|(i
, _layout
)| i
)?
;
319 let all_indices
= variants
.indices();
321 |index
: VariantIdx
| index
!= largest_variant_index
&& !absent(&variants
[index
]);
322 let niche_variants
= all_indices
.clone().find(|v
| needs_disc(*v
)).unwrap()
323 ..=all_indices
.rev().find(|v
| needs_disc(*v
)).unwrap();
325 let count
= niche_variants
.size_hint().1.unwrap
() as u128
;
327 // Find the field with the largest niche
328 let (field_index
, niche
, (niche_start
, niche_scalar
)) = variants
[largest_variant_index
]
331 .filter_map(|(j
, field
)| Some((j
, field
.largest_niche()?
)))
332 .max_by_key(|(_
, niche
)| niche
.available(dl
))
333 .and_then(|(j
, niche
)| Some((j
, niche
, niche
.reserve(dl
, count
)?
)))?
;
335 niche
.offset
+ variant_layouts
[largest_variant_index
].fields
.offset(field_index
);
336 let niche_size
= niche
.value
.size(dl
);
337 let size
= variant_layouts
[largest_variant_index
].size
.align_to(align
.abi
);
339 let all_variants_fit
= variant_layouts
.iter_enumerated_mut().all(|(i
, layout
)| {
340 if i
== largest_variant_index
{
344 layout
.largest_niche
= None
;
346 if layout
.size
<= niche_offset
{
347 // This variant will fit before the niche.
351 // Determine if it'll fit after the niche.
352 let this_align
= layout
.align
.abi
;
353 let this_offset
= (niche_offset
+ niche_size
).align_to(this_align
);
355 if this_offset
+ layout
.size
> size
{
359 // It'll fit, but we need to make some adjustments.
360 match layout
.fields
{
361 FieldsShape
::Arbitrary { ref mut offsets, .. }
=> {
362 for offset
in offsets
.iter_mut() {
363 *offset
+= this_offset
;
367 panic
!("Layout of fields should be Arbitrary for variants")
371 // It can't be a Scalar or ScalarPair because the offset isn't 0.
372 if !layout
.abi
.is_uninhabited() {
373 layout
.abi
= Abi
::Aggregate { sized: true }
;
375 layout
.size
+= this_offset
;
380 if !all_variants_fit
{
384 let largest_niche
= Niche
::from_scalar(dl
, niche_offset
, niche_scalar
);
386 let others_zst
= variant_layouts
388 .all(|(i
, layout
)| i
== largest_variant_index
|| layout
.size
== Size
::ZERO
);
389 let same_size
= size
== variant_layouts
[largest_variant_index
].size
;
390 let same_align
= align
== variant_layouts
[largest_variant_index
].align
;
392 let abi
= if variant_layouts
.iter().all(|v
| v
.abi
.is_uninhabited()) {
394 } else if same_size
&& same_align
&& others_zst
{
395 match variant_layouts
[largest_variant_index
].abi
{
396 // When the total alignment and size match, we can use the
397 // same ABI as the scalar variant with the reserved niche.
398 Abi
::Scalar(_
) => Abi
::Scalar(niche_scalar
),
399 Abi
::ScalarPair(first
, second
) => {
400 // Only the niche is guaranteed to be initialised,
401 // so use union layouts for the other primitive.
402 if niche_offset
== Size
::ZERO
{
403 Abi
::ScalarPair(niche_scalar
, second
.to_union())
405 Abi
::ScalarPair(first
.to_union(), niche_scalar
)
408 _
=> Abi
::Aggregate { sized: true }
,
411 Abi
::Aggregate { sized: true }
414 let layout
= LayoutS
{
415 variants
: Variants
::Multiple
{
417 tag_encoding
: TagEncoding
::Niche
{
418 untagged_variant
: largest_variant_index
,
423 variants
: IndexVec
::new(),
425 fields
: FieldsShape
::Arbitrary
{
426 offsets
: [niche_offset
].into(),
427 memory_index
: [0].into(),
434 unadjusted_abi_align
,
437 Some(TmpLayout { layout, variants: variant_layouts }
)
440 let niche_filling_layout
= calculate_niche_filling_layout();
442 let (mut min
, mut max
) = (i128
::MAX
, i128
::MIN
);
443 let discr_type
= repr
.discr_type();
444 let bits
= Integer
::from_attr(dl
, discr_type
).size().bits();
445 for (i
, mut val
) in discriminants
{
446 if variants
[i
].iter().any(|f
| f
.abi().is_uninhabited()) {
449 if discr_type
.is_signed() {
450 // sign extend the raw representation to be an i128
451 val
= (val
<< (128 - bits
)) >> (128 - bits
);
460 // We might have no inhabited variants, so pretend there's at least one.
461 if (min
, max
) == (i128
::MAX
, i128
::MIN
) {
465 assert
!(min
<= max
, "discriminant range is {min}...{max}");
466 let (min_ity
, signed
) = discr_range_of_repr(min
, max
); //Integer::repr_discr(tcx, ty, &repr, min, max);
468 let mut align
= dl
.aggregate_align
;
469 let mut max_repr_align
= repr
.align
;
470 let mut unadjusted_abi_align
= align
.abi
;
472 let mut size
= Size
::ZERO
;
474 // We're interested in the smallest alignment, so start large.
475 let mut start_align
= Align
::from_bytes(256).unwrap();
476 assert_eq
!(Integer
::for_align(dl
, start_align
), None
);
478 // repr(C) on an enum tells us to make a (tag, union) layout,
479 // so we need to grow the prefix alignment to be at least
480 // the alignment of the union. (This value is used both for
481 // determining the alignment of the overall enum, and the
482 // determining the alignment of the payload after the tag.)
483 let mut prefix_align
= min_ity
.align(dl
).abi
;
485 for fields
in variants
{
486 for field
in fields
{
487 prefix_align
= prefix_align
.max(field
.align().abi
);
492 // Create the set of structs that represent each variant.
493 let mut layout_variants
= variants
495 .map(|(i
, field_layouts
)| {
496 let mut st
= self.univariant(
500 StructKind
::Prefixed(min_ity
.size(), prefix_align
),
502 st
.variants
= Variants
::Single { index: i }
;
503 // Find the first field we can't move later
504 // to make room for a larger discriminant.
505 for field_idx
in st
.fields
.index_by_increasing_offset() {
506 let field
= &field_layouts
[FieldIdx
::from_usize(field_idx
)];
507 if !field
.0.is_
1zst
() {
508 start_align
= start_align
.min(field
.align().abi
);
512 size
= cmp
::max(size
, st
.size
);
513 align
= align
.max(st
.align
);
514 max_repr_align
= max_repr_align
.max(st
.max_repr_align
);
515 unadjusted_abi_align
= unadjusted_abi_align
.max(st
.unadjusted_abi_align
);
518 .collect
::<Option
<IndexVec
<VariantIdx
, _
>>>()?
;
520 // Align the maximum variant size to the largest alignment.
521 size
= size
.align_to(align
.abi
);
523 if size
.bytes() >= dl
.obj_size_bound() {
527 let typeck_ity
= Integer
::from_attr(dl
, repr
.discr_type());
528 if typeck_ity
< min_ity
{
529 // It is a bug if Layout decided on a greater discriminant size than typeck for
530 // some reason at this point (based on values discriminant can take on). Mostly
531 // because this discriminant will be loaded, and then stored into variable of
532 // type calculated by typeck. Consider such case (a bug): typeck decided on
533 // byte-sized discriminant, but layout thinks we need a 16-bit to store all
534 // discriminant values. That would be a bug, because then, in codegen, in order
535 // to store this 16-bit discriminant into 8-bit sized temporary some of the
536 // space necessary to represent would have to be discarded (or layout is wrong
537 // on thinking it needs 16 bits)
539 "layout decided on a larger discriminant type ({min_ity:?}) than typeck ({typeck_ity:?})"
541 // However, it is fine to make discr type however large (as an optimisation)
542 // after this point – we’ll just truncate the value we load in codegen.
545 // Check to see if we should use a different type for the
546 // discriminant. We can safely use a type with the same size
547 // as the alignment of the first field of each variant.
548 // We increase the size of the discriminant to avoid LLVM copying
549 // padding when it doesn't need to. This normally causes unaligned
550 // load/stores and excessive memcpy/memset operations. By using a
551 // bigger integer size, LLVM can be sure about its contents and
552 // won't be so conservative.
554 // Use the initial field alignment
555 let mut ity
= if repr
.c() || repr
.int
.is_some() {
558 Integer
::for_align(dl
, start_align
).unwrap_or(min_ity
)
561 // If the alignment is not larger than the chosen discriminant size,
562 // don't use the alignment as the final size.
566 // Patch up the variants' first few fields.
567 let old_ity_size
= min_ity
.size();
568 let new_ity_size
= ity
.size();
569 for variant
in &mut layout_variants
{
570 match variant
.fields
{
571 FieldsShape
::Arbitrary { ref mut offsets, .. }
=> {
573 if *i
<= old_ity_size
{
574 assert_eq
!(*i
, old_ity_size
);
578 // We might be making the struct larger.
579 if variant
.size
<= old_ity_size
{
580 variant
.size
= new_ity_size
;
588 let tag_mask
= ity
.size().unsigned_int_max();
589 let tag
= Scalar
::Initialized
{
590 value
: Int(ity
, signed
),
591 valid_range
: WrappingRange
{
592 start
: (min
as u128
& tag_mask
),
593 end
: (max
as u128
& tag_mask
),
596 let mut abi
= Abi
::Aggregate { sized: true }
;
598 if layout_variants
.iter().all(|v
| v
.abi
.is_uninhabited()) {
599 abi
= Abi
::Uninhabited
;
600 } else if tag
.size(dl
) == size
{
601 // Make sure we only use scalar layout when the enum is entirely its
602 // own tag (i.e. it has no padding nor any non-ZST variant fields).
603 abi
= Abi
::Scalar(tag
);
605 // Try to use a ScalarPair for all tagged enums.
606 // That's possible only if we can find a common primitive type for all variants.
607 let mut common_prim
= None
;
608 let mut common_prim_initialized_in_all_variants
= true;
609 for (field_layouts
, layout_variant
) in iter
::zip(variants
, &layout_variants
) {
610 let FieldsShape
::Arbitrary { ref offsets, .. }
= layout_variant
.fields
else {
613 // We skip *all* ZST here and later check if we are good in terms of alignment.
614 // This lets us handle some cases involving aligned ZST.
615 let mut fields
= iter
::zip(field_layouts
, offsets
).filter(|p
| !p
.0.0.is_zst());
616 let (field
, offset
) = match (fields
.next(), fields
.next()) {
618 common_prim_initialized_in_all_variants
= false;
621 (Some(pair
), None
) => pair
,
627 let prim
= match field
.abi() {
628 Abi
::Scalar(scalar
) => {
629 common_prim_initialized_in_all_variants
&=
630 matches
!(scalar
, Scalar
::Initialized { .. }
);
638 if let Some(pair
) = common_prim
{
639 // This is pretty conservative. We could go fancier
640 // by conflating things like i32 and u32, or even
641 // realising that (u8, u8) could just cohabit with
643 if pair
!= (prim
, offset
) {
648 common_prim
= Some((prim
, offset
));
651 if let Some((prim
, offset
)) = common_prim
{
652 let prim_scalar
= if common_prim_initialized_in_all_variants
{
655 // Common prim might be uninit.
656 Scalar
::Union { value: prim }
658 let pair
= self.scalar_pair(tag
, prim_scalar
);
659 let pair_offsets
= match pair
.fields
{
660 FieldsShape
::Arbitrary { ref offsets, ref memory_index }
=> {
661 assert_eq
!(memory_index
.raw
, [0, 1]);
666 if pair_offsets
[FieldIdx
::from_u32(0)] == Size
::ZERO
667 && pair_offsets
[FieldIdx
::from_u32(1)] == *offset
668 && align
== pair
.align
671 // We can use `ScalarPair` only when it matches our
672 // already computed layout (including `#[repr(C)]`).
678 // If we pick a "clever" (by-value) ABI, we might have to adjust the ABI of the
679 // variants to ensure they are consistent. This is because a downcast is
680 // semantically a NOP, and thus should not affect layout.
681 if matches
!(abi
, Abi
::Scalar(..) | Abi
::ScalarPair(..)) {
682 for variant
in &mut layout_variants
{
683 // We only do this for variants with fields; the others are not accessed anyway.
684 // Also do not overwrite any already existing "clever" ABIs.
685 if variant
.fields
.count() > 0 && matches
!(variant
.abi
, Abi
::Aggregate { .. }
) {
687 // Also need to bump up the size and alignment, so that the entire value fits in here.
688 variant
.size
= cmp
::max(variant
.size
, size
);
689 variant
.align
.abi
= cmp
::max(variant
.align
.abi
, align
.abi
);
694 let largest_niche
= Niche
::from_scalar(dl
, Size
::ZERO
, tag
);
696 let tagged_layout
= LayoutS
{
697 variants
: Variants
::Multiple
{
699 tag_encoding
: TagEncoding
::Direct
,
701 variants
: IndexVec
::new(),
703 fields
: FieldsShape
::Arbitrary
{
704 offsets
: [Size
::ZERO
].into(),
705 memory_index
: [0].into(),
712 unadjusted_abi_align
,
715 let tagged_layout
= TmpLayout { layout: tagged_layout, variants: layout_variants }
;
717 let mut best_layout
= match (tagged_layout
, niche_filling_layout
) {
719 // Pick the smaller layout; otherwise,
720 // pick the layout with the larger niche; otherwise,
721 // pick tagged as it has simpler codegen.
722 use cmp
::Ordering
::*;
724 |tmp_l
: &TmpLayout
| tmp_l
.layout
.largest_niche
.map_or(0, |n
| n
.available(dl
));
725 match (tl
.layout
.size
.cmp(&nl
.layout
.size
), niche_size(&tl
).cmp(&niche_size(&nl
))) {
734 // Now we can intern the variant layouts and store them in the enum layout.
735 best_layout
.layout
.variants
= match best_layout
.layout
.variants
{
736 Variants
::Multiple { tag, tag_encoding, tag_field, .. }
=> {
737 Variants
::Multiple { tag, tag_encoding, tag_field, variants: best_layout.variants }
741 Some(best_layout
.layout
)
747 variants
: &IndexSlice
<VariantIdx
, IndexVec
<FieldIdx
, Layout
<'_
>>>,
748 ) -> Option
<LayoutS
> {
749 let dl
= self.current_data_layout();
750 let dl
= dl
.borrow();
751 let mut align
= if repr
.pack
.is_some() { dl.i8_align }
else { dl.aggregate_align }
;
752 let mut max_repr_align
= repr
.align
;
754 // If all the non-ZST fields have the same ABI and union ABI optimizations aren't
755 // disabled, we can use that common ABI for the union as a whole.
757 let mut common_non_zst_abi_and_align
= if repr
.inhibit_union_abi_opt() {
764 let mut size
= Size
::ZERO
;
765 let only_variant
= &variants
[FIRST_VARIANT
];
766 for field
in only_variant
{
767 if field
.0.is_unsized
() {
768 self.delay_bug("unsized field in union".to_string());
771 align
= align
.max(field
.align());
772 max_repr_align
= max_repr_align
.max(field
.max_repr_align());
773 size
= cmp
::max(size
, field
.size());
775 if field
.0.is_zst
() {
776 // Nothing more to do for ZST fields
780 if let Ok(common
) = common_non_zst_abi_and_align
{
781 // Discard valid range information and allow undef
782 let field_abi
= field
.abi().to_union();
784 if let Some((common_abi
, common_align
)) = common
{
785 if common_abi
!= field_abi
{
786 // Different fields have different ABI: disable opt
787 common_non_zst_abi_and_align
= Err(AbiMismatch
);
789 // Fields with the same non-Aggregate ABI should also
790 // have the same alignment
791 if !matches
!(common_abi
, Abi
::Aggregate { .. }
) {
795 "non-Aggregate field with matching ABI but differing alignment"
800 // First non-ZST field: record its ABI and alignment
801 common_non_zst_abi_and_align
= Ok(Some((field_abi
, field
.align().abi
)));
806 if let Some(pack
) = repr
.pack
{
807 align
= align
.min(AbiAndPrefAlign
::new(pack
));
809 // The unadjusted ABI alignment does not include repr(align), but does include repr(pack).
810 // See documentation on `LayoutS::unadjusted_abi_align`.
811 let unadjusted_abi_align
= align
.abi
;
812 if let Some(repr_align
) = repr
.align
{
813 align
= align
.max(AbiAndPrefAlign
::new(repr_align
));
815 // `align` must not be modified after this, or `unadjusted_abi_align` could be inaccurate.
818 // If all non-ZST fields have the same ABI, we may forward that ABI
819 // for the union as a whole, unless otherwise inhibited.
820 let abi
= match common_non_zst_abi_and_align
{
821 Err(AbiMismatch
) | Ok(None
) => Abi
::Aggregate { sized: true }
,
822 Ok(Some((abi
, _
))) => {
823 if abi
.inherent_align(dl
).map(|a
| a
.abi
) != Some(align
.abi
) {
824 // Mismatched alignment (e.g. union is #[repr(packed)]): disable opt
825 Abi
::Aggregate { sized: true }
833 variants
: Variants
::Single { index: FIRST_VARIANT }
,
834 fields
: FieldsShape
::Union(NonZeroUsize
::new(only_variant
.len())?
),
838 size
: size
.align_to(align
.abi
),
840 unadjusted_abi_align
,
845 /// Determines towards which end of a struct layout optimizations will try to place the best niches.
852 this
: &(impl LayoutCalculator
+ ?Sized
),
853 dl
: &TargetDataLayout
,
854 fields
: &IndexSlice
<FieldIdx
, Layout
<'_
>>,
857 niche_bias
: NicheBias
,
858 ) -> Option
<LayoutS
> {
859 let pack
= repr
.pack
;
860 let mut align
= if pack
.is_some() { dl.i8_align }
else { dl.aggregate_align }
;
861 let mut max_repr_align
= repr
.align
;
862 let mut inverse_memory_index
: IndexVec
<u32, FieldIdx
> = fields
.indices().collect();
863 let optimize
= !repr
.inhibit_struct_field_reordering_opt();
864 if optimize
&& fields
.len() > 1 {
865 let end
= if let StructKind
::MaybeUnsized
= kind { fields.len() - 1 }
else { fields.len() }
;
866 let optimizing
= &mut inverse_memory_index
.raw
[..end
];
867 let fields_excluding_tail
= &fields
.raw
[..end
];
869 // If `-Z randomize-layout` was enabled for the type definition we can shuffle
870 // the field ordering to try and catch some code making assumptions about layouts
871 // we don't guarantee
872 if repr
.can_randomize_type_layout() && cfg
!(feature
= "randomize") {
873 #[cfg(feature = "randomize")]
875 // `ReprOptions.layout_seed` is a deterministic seed that we can use to
876 // randomize field ordering with
877 let mut rng
= Xoshiro128StarStar
::seed_from_u64(repr
.field_shuffle_seed
.as_u64());
879 // Shuffle the ordering of the fields
880 optimizing
.shuffle(&mut rng
);
882 // Otherwise we just leave things alone and actually optimize the type's fields
884 // To allow unsizing `&Foo<Type>` -> `&Foo<dyn Trait>`, the layout of the struct must
885 // not depend on the layout of the tail.
886 let max_field_align
=
887 fields_excluding_tail
.iter().map(|f
| f
.align().abi
.bytes()).max().unwrap_or(1);
888 let largest_niche_size
= fields_excluding_tail
890 .filter_map(|f
| f
.largest_niche())
891 .map(|n
| n
.available(dl
))
895 // Calculates a sort key to group fields by their alignment or possibly some size-derived
897 let alignment_group_key
= |layout
: Layout
<'_
>| {
898 if let Some(pack
) = pack
{
899 // return the packed alignment in bytes
900 layout
.align().abi
.min(pack
).bytes()
902 // returns log2(effective-align).
903 // This is ok since `pack` applies to all fields equally.
904 // The calculation assumes that size is an integer multiple of align, except for ZSTs.
906 let align
= layout
.align().abi
.bytes();
907 let size
= layout
.size().bytes();
908 let niche_size
= layout
.largest_niche().map(|n
| n
.available(dl
)).unwrap_or(0);
909 // group [u8; 4] with align-4 or [u8; 6] with align-2 fields
910 let size_as_align
= align
.max(size
).trailing_zeros();
911 let size_as_align
= if largest_niche_size
> 0 {
913 // Given `A(u8, [u8; 16])` and `B(bool, [u8; 16])` we want to bump the array
914 // to the front in the first case (for aligned loads) but keep the bool in front
915 // in the second case for its niches.
916 NicheBias
::Start
=> max_field_align
.trailing_zeros().min(size_as_align
),
917 // When moving niches towards the end of the struct then for
918 // A((u8, u8, u8, bool), (u8, bool, u8)) we want to keep the first tuple
919 // in the align-1 group because its bool can be moved closer to the end.
920 NicheBias
::End
if niche_size
== largest_niche_size
=> {
921 align
.trailing_zeros()
923 NicheBias
::End
=> size_as_align
,
933 StructKind
::AlwaysSized
| StructKind
::MaybeUnsized
=> {
934 // Currently `LayoutS` only exposes a single niche so sorting is usually sufficient
935 // to get one niche into the preferred position. If it ever supported multiple niches
936 // then a more advanced pick-and-pack approach could provide better results.
937 // But even for the single-niche cache it's not optimal. E.g. for
938 // A(u32, (bool, u8), u16) it would be possible to move the bool to the front
939 // but it would require packing the tuple together with the u16 to build a 4-byte
940 // group so that the u32 can be placed after it without padding. This kind
941 // of packing can't be achieved by sorting.
942 optimizing
.sort_by_key(|&x
| {
944 let field_size
= f
.size().bytes();
945 let niche_size
= f
.largest_niche().map_or(0, |n
| n
.available(dl
));
946 let niche_size_key
= match niche_bias
{
948 NicheBias
::Start
=> !niche_size
,
950 NicheBias
::End
=> niche_size
,
952 let inner_niche_offset_key
= match niche_bias
{
953 NicheBias
::Start
=> f
.largest_niche().map_or(0, |n
| n
.offset
.bytes()),
954 NicheBias
::End
=> f
.largest_niche().map_or(0, |n
| {
955 !(field_size
- n
.value
.size(dl
).bytes() - n
.offset
.bytes())
960 // Then place largest alignments first.
961 cmp
::Reverse(alignment_group_key(f
)),
962 // Then prioritize niche placement within alignment group according to
963 // `niche_bias_start`.
965 // Then among fields with equally-sized niches prefer the ones
966 // closer to the start/end of the field.
967 inner_niche_offset_key
,
972 StructKind
::Prefixed(..) => {
973 // Sort in ascending alignment so that the layout stays optimal
974 // regardless of the prefix.
975 // And put the largest niche in an alignment group at the end
976 // so it can be used as discriminant in jagged enums
977 optimizing
.sort_by_key(|&x
| {
979 let niche_size
= f
.largest_niche().map_or(0, |n
| n
.available(dl
));
980 (alignment_group_key(f
), niche_size
)
985 // FIXME(Kixiron): We can always shuffle fields within a given alignment class
986 // regardless of the status of `-Z randomize-layout`
989 // inverse_memory_index holds field indices by increasing memory offset.
990 // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
991 // We now write field offsets to the corresponding offset slot;
992 // field 5 with offset 0 puts 0 in offsets[5].
993 // At the bottom of this function, we invert `inverse_memory_index` to
994 // produce `memory_index` (see `invert_mapping`).
995 let mut sized
= true;
996 let mut offsets
= IndexVec
::from_elem(Size
::ZERO
, &fields
);
997 let mut offset
= Size
::ZERO
;
998 let mut largest_niche
= None
;
999 let mut largest_niche_available
= 0;
1000 if let StructKind
::Prefixed(prefix_size
, prefix_align
) = kind
{
1002 if let Some(pack
) = pack { prefix_align.min(pack) }
else { prefix_align }
;
1003 align
= align
.max(AbiAndPrefAlign
::new(prefix_align
));
1004 offset
= prefix_size
.align_to(prefix_align
);
1006 for &i
in &inverse_memory_index
{
1007 let field
= &fields
[i
];
1009 this
.delay_bug(format
!(
1010 "univariant: field #{} comes after unsized field",
1015 if field
.0.is_unsized
() {
1019 // Invariant: offset < dl.obj_size_bound() <= 1<<61
1020 let field_align
= if let Some(pack
) = pack
{
1021 field
.align().min(AbiAndPrefAlign
::new(pack
))
1025 offset
= offset
.align_to(field_align
.abi
);
1026 align
= align
.max(field_align
);
1027 max_repr_align
= max_repr_align
.max(field
.max_repr_align());
1029 debug
!("univariant offset: {:?} field: {:#?}", offset
, field
);
1030 offsets
[i
] = offset
;
1032 if let Some(mut niche
) = field
.largest_niche() {
1033 let available
= niche
.available(dl
);
1034 // Pick up larger niches.
1035 let prefer_new_niche
= match niche_bias
{
1036 NicheBias
::Start
=> available
> largest_niche_available
,
1037 // if there are several niches of the same size then pick the last one
1038 NicheBias
::End
=> available
>= largest_niche_available
,
1040 if prefer_new_niche
{
1041 largest_niche_available
= available
;
1042 niche
.offset
+= offset
;
1043 largest_niche
= Some(niche
);
1047 offset
= offset
.checked_add(field
.size(), dl
)?
;
1050 // The unadjusted ABI alignment does not include repr(align), but does include repr(pack).
1051 // See documentation on `LayoutS::unadjusted_abi_align`.
1052 let unadjusted_abi_align
= align
.abi
;
1053 if let Some(repr_align
) = repr
.align
{
1054 align
= align
.max(AbiAndPrefAlign
::new(repr_align
));
1056 // `align` must not be modified after this point, or `unadjusted_abi_align` could be inaccurate.
1059 debug
!("univariant min_size: {:?}", offset
);
1060 let min_size
= offset
;
1061 // As stated above, inverse_memory_index holds field indices by increasing offset.
1062 // This makes it an already-sorted view of the offsets vec.
1063 // To invert it, consider:
1064 // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
1065 // Field 5 would be the first element, so memory_index is i:
1066 // Note: if we didn't optimize, it's already right.
1067 let memory_index
= if optimize
{
1068 inverse_memory_index
.invert_bijective_mapping()
1070 debug_assert
!(inverse_memory_index
.iter().copied().eq(fields
.indices()));
1071 inverse_memory_index
.into_iter().map(FieldIdx
::as_u32
).collect()
1073 let size
= min_size
.align_to(align
.abi
);
1074 let mut layout_of_single_non_zst_field
= None
;
1075 let mut abi
= Abi
::Aggregate { sized }
;
1076 // Try to make this a Scalar/ScalarPair.
1077 if sized
&& size
.bytes() > 0 {
1078 // We skip *all* ZST here and later check if we are good in terms of alignment.
1079 // This lets us handle some cases involving aligned ZST.
1080 let mut non_zst_fields
= fields
.iter_enumerated().filter(|&(_
, f
)| !f
.0.is_zst
());
1082 match (non_zst_fields
.next(), non_zst_fields
.next(), non_zst_fields
.next()) {
1083 // We have exactly one non-ZST field.
1084 (Some((i
, field
)), None
, None
) => {
1085 layout_of_single_non_zst_field
= Some(field
);
1087 // Field fills the struct and it has a scalar or scalar pair ABI.
1088 if offsets
[i
].bytes() == 0 && align
.abi
== field
.align().abi
&& size
== field
.size()
1091 // For plain scalars, or vectors of them, we can't unpack
1092 // newtypes for `#[repr(C)]`, as that affects C ABIs.
1093 Abi
::Scalar(_
) | Abi
::Vector { .. }
if optimize
=> {
1096 // But scalar pairs are Rust-specific and get
1097 // treated as aggregates by C ABIs anyway.
1098 Abi
::ScalarPair(..) => {
1106 // Two non-ZST fields, and they're both scalars.
1107 (Some((i
, a
)), Some((j
, b
)), None
) => {
1108 match (a
.abi(), b
.abi()) {
1109 (Abi
::Scalar(a
), Abi
::Scalar(b
)) => {
1110 // Order by the memory placement, not source order.
1111 let ((i
, a
), (j
, b
)) = if offsets
[i
] < offsets
[j
] {
1116 let pair
= this
.scalar_pair(a
, b
);
1117 let pair_offsets
= match pair
.fields
{
1118 FieldsShape
::Arbitrary { ref offsets, ref memory_index }
=> {
1119 assert_eq
!(memory_index
.raw
, [0, 1]);
1124 if offsets
[i
] == pair_offsets
[FieldIdx
::from_usize(0)]
1125 && offsets
[j
] == pair_offsets
[FieldIdx
::from_usize(1)]
1126 && align
== pair
.align
1127 && size
== pair
.size
1129 // We can use `ScalarPair` only when it matches our
1130 // already computed layout (including `#[repr(C)]`).
1141 if fields
.iter().any(|f
| f
.abi().is_uninhabited()) {
1142 abi
= Abi
::Uninhabited
;
1145 let unadjusted_abi_align
= if repr
.transparent() {
1146 match layout_of_single_non_zst_field
{
1147 Some(l
) => l
.unadjusted_abi_align(),
1149 // `repr(transparent)` with all ZST fields.
1154 unadjusted_abi_align
1158 variants
: Variants
::Single { index: FIRST_VARIANT }
,
1159 fields
: FieldsShape
::Arbitrary { offsets, memory_index }
,
1165 unadjusted_abi_align
,
1169 fn format_field_niches(
1171 fields
: &IndexSlice
<FieldIdx
, Layout
<'_
>>,
1172 dl
: &TargetDataLayout
,
1174 let mut s
= String
::new();
1175 for i
in layout
.fields
.index_by_increasing_offset() {
1176 let offset
= layout
.fields
.offset(i
);
1177 let f
= fields
[i
.into()];
1178 write
!(s
, "[o{}a{}s{}", offset
.bytes(), f
.align().abi
.bytes(), f
.size().bytes()).unwrap();
1179 if let Some(n
) = f
.largest_niche() {
1184 n
.available(dl
).ilog2(),
1185 n
.value
.size(dl
).bytes()
1189 write
!(s
, "] ").unwrap();