]> git.proxmox.com Git - rustc.git/blob - compiler/rustc_abi/src/layout.rs
New upstream version 1.74.1+dfsg1
[rustc.git] / compiler / rustc_abi / src / layout.rs
1 use super::*;
2 use std::fmt::Write;
3 use std::{borrow::Borrow, cmp, iter, ops::Bound};
4
5 #[cfg(feature = "randomize")]
6 use rand::{seq::SliceRandom, SeedableRng};
7 #[cfg(feature = "randomize")]
8 use rand_xoshiro::Xoshiro128StarStar;
9
10 use tracing::debug;
11
12 pub trait LayoutCalculator {
13 type TargetDataLayoutRef: Borrow<TargetDataLayout>;
14
15 fn delay_bug(&self, txt: String);
16 fn current_data_layout(&self) -> Self::TargetDataLayoutRef;
17
18 fn scalar_pair(&self, a: Scalar, b: Scalar) -> LayoutS {
19 let dl = self.current_data_layout();
20 let dl = dl.borrow();
21 let b_align = b.align(dl);
22 let align = a.align(dl).max(b_align).max(dl.aggregate_align);
23 let b_offset = a.size(dl).align_to(b_align.abi);
24 let size = (b_offset + b.size(dl)).align_to(align.abi);
25
26 // HACK(nox): We iter on `b` and then `a` because `max_by_key`
27 // returns the last maximum.
28 let largest_niche = Niche::from_scalar(dl, b_offset, b)
29 .into_iter()
30 .chain(Niche::from_scalar(dl, Size::ZERO, a))
31 .max_by_key(|niche| niche.available(dl));
32
33 LayoutS {
34 variants: Variants::Single { index: FIRST_VARIANT },
35 fields: FieldsShape::Arbitrary {
36 offsets: [Size::ZERO, b_offset].into(),
37 memory_index: [0, 1].into(),
38 },
39 abi: Abi::ScalarPair(a, b),
40 largest_niche,
41 align,
42 size,
43 max_repr_align: None,
44 unadjusted_abi_align: align.abi,
45 }
46 }
47
48 fn univariant(
49 &self,
50 dl: &TargetDataLayout,
51 fields: &IndexSlice<FieldIdx, Layout<'_>>,
52 repr: &ReprOptions,
53 kind: StructKind,
54 ) -> Option<LayoutS> {
55 let layout = univariant(self, dl, fields, repr, kind, NicheBias::Start);
56 // Enums prefer niches close to the beginning or the end of the variants so that other (smaller)
57 // data-carrying variants can be packed into the space after/before the niche.
58 // If the default field ordering does not give us a niche at the front then we do a second
59 // run and bias niches to the right and then check which one is closer to one of the struct's
60 // edges.
61 if let Some(layout) = &layout {
62 // Don't try to calculate an end-biased layout for unsizable structs,
63 // otherwise we could end up with different layouts for
64 // Foo<Type> and Foo<dyn Trait> which would break unsizing
65 if !matches!(kind, StructKind::MaybeUnsized) {
66 if let Some(niche) = layout.largest_niche {
67 let head_space = niche.offset.bytes();
68 let niche_length = niche.value.size(dl).bytes();
69 let tail_space = layout.size.bytes() - head_space - niche_length;
70
71 // This may end up doing redundant work if the niche is already in the last field
72 // (e.g. a trailing bool) and there is tail padding. But it's non-trivial to get
73 // the unpadded size so we try anyway.
74 if fields.len() > 1 && head_space != 0 && tail_space > 0 {
75 let alt_layout = univariant(self, dl, fields, repr, kind, NicheBias::End)
76 .expect("alt layout should always work");
77 let niche = alt_layout
78 .largest_niche
79 .expect("alt layout should have a niche like the regular one");
80 let alt_head_space = niche.offset.bytes();
81 let alt_niche_len = niche.value.size(dl).bytes();
82 let alt_tail_space =
83 alt_layout.size.bytes() - alt_head_space - alt_niche_len;
84
85 debug_assert_eq!(layout.size.bytes(), alt_layout.size.bytes());
86
87 let prefer_alt_layout =
88 alt_head_space > head_space && alt_head_space > tail_space;
89
90 debug!(
91 "sz: {}, default_niche_at: {}+{}, default_tail_space: {}, alt_niche_at/head_space: {}+{}, alt_tail: {}, num_fields: {}, better: {}\n\
92 layout: {}\n\
93 alt_layout: {}\n",
94 layout.size.bytes(),
95 head_space,
96 niche_length,
97 tail_space,
98 alt_head_space,
99 alt_niche_len,
100 alt_tail_space,
101 layout.fields.count(),
102 prefer_alt_layout,
103 format_field_niches(&layout, &fields, &dl),
104 format_field_niches(&alt_layout, &fields, &dl),
105 );
106
107 if prefer_alt_layout {
108 return Some(alt_layout);
109 }
110 }
111 }
112 }
113 }
114 layout
115 }
116
117 fn layout_of_never_type(&self) -> LayoutS {
118 let dl = self.current_data_layout();
119 let dl = dl.borrow();
120 LayoutS {
121 variants: Variants::Single { index: FIRST_VARIANT },
122 fields: FieldsShape::Primitive,
123 abi: Abi::Uninhabited,
124 largest_niche: None,
125 align: dl.i8_align,
126 size: Size::ZERO,
127 max_repr_align: None,
128 unadjusted_abi_align: dl.i8_align.abi,
129 }
130 }
131
132 fn layout_of_struct_or_enum(
133 &self,
134 repr: &ReprOptions,
135 variants: &IndexSlice<VariantIdx, IndexVec<FieldIdx, Layout<'_>>>,
136 is_enum: bool,
137 is_unsafe_cell: bool,
138 scalar_valid_range: (Bound<u128>, Bound<u128>),
139 discr_range_of_repr: impl Fn(i128, i128) -> (Integer, bool),
140 discriminants: impl Iterator<Item = (VariantIdx, i128)>,
141 dont_niche_optimize_enum: bool,
142 always_sized: bool,
143 ) -> Option<LayoutS> {
144 let dl = self.current_data_layout();
145 let dl = dl.borrow();
146
147 let scalar_unit = |value: Primitive| {
148 let size = value.size(dl);
149 assert!(size.bits() <= 128);
150 Scalar::Initialized { value, valid_range: WrappingRange::full(size) }
151 };
152
153 // A variant is absent if it's uninhabited and only has ZST fields.
154 // Present uninhabited variants only require space for their fields,
155 // but *not* an encoding of the discriminant (e.g., a tag value).
156 // See issue #49298 for more details on the need to leave space
157 // for non-ZST uninhabited data (mostly partial initialization).
158 let absent = |fields: &IndexSlice<FieldIdx, Layout<'_>>| {
159 let uninhabited = fields.iter().any(|f| f.abi().is_uninhabited());
160 // We cannot ignore alignment; that might lead us to entirely discard a variant and
161 // produce an enum that is less aligned than it should be!
162 let is_1zst = fields.iter().all(|f| f.0.is_1zst());
163 uninhabited && is_1zst
164 };
165 let (present_first, present_second) = {
166 let mut present_variants = variants
167 .iter_enumerated()
168 .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
169 (present_variants.next(), present_variants.next())
170 };
171 let present_first = match present_first {
172 Some(present_first) => present_first,
173 // Uninhabited because it has no variants, or only absent ones.
174 None if is_enum => {
175 return Some(self.layout_of_never_type());
176 }
177 // If it's a struct, still compute a layout so that we can still compute the
178 // field offsets.
179 None => FIRST_VARIANT,
180 };
181
182 let is_struct = !is_enum ||
183 // Only one variant is present.
184 (present_second.is_none() &&
185 // Representation optimizations are allowed.
186 !repr.inhibit_enum_layout_opt());
187 if is_struct {
188 // Struct, or univariant enum equivalent to a struct.
189 // (Typechecking will reject discriminant-sizing attrs.)
190
191 let v = present_first;
192 let kind = if is_enum || variants[v].is_empty() || always_sized {
193 StructKind::AlwaysSized
194 } else {
195 StructKind::MaybeUnsized
196 };
197
198 let mut st = self.univariant(dl, &variants[v], repr, kind)?;
199 st.variants = Variants::Single { index: v };
200
201 if is_unsafe_cell {
202 let hide_niches = |scalar: &mut _| match scalar {
203 Scalar::Initialized { value, valid_range } => {
204 *valid_range = WrappingRange::full(value.size(dl))
205 }
206 // Already doesn't have any niches
207 Scalar::Union { .. } => {}
208 };
209 match &mut st.abi {
210 Abi::Uninhabited => {}
211 Abi::Scalar(scalar) => hide_niches(scalar),
212 Abi::ScalarPair(a, b) => {
213 hide_niches(a);
214 hide_niches(b);
215 }
216 Abi::Vector { element, count: _ } => hide_niches(element),
217 Abi::Aggregate { sized: _ } => {}
218 }
219 st.largest_niche = None;
220 return Some(st);
221 }
222
223 let (start, end) = scalar_valid_range;
224 match st.abi {
225 Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
226 // Enlarging validity ranges would result in missed
227 // optimizations, *not* wrongly assuming the inner
228 // value is valid. e.g. unions already enlarge validity ranges,
229 // because the values may be uninitialized.
230 //
231 // Because of that we only check that the start and end
232 // of the range is representable with this scalar type.
233
234 let max_value = scalar.size(dl).unsigned_int_max();
235 if let Bound::Included(start) = start {
236 // FIXME(eddyb) this might be incorrect - it doesn't
237 // account for wrap-around (end < start) ranges.
238 assert!(start <= max_value, "{start} > {max_value}");
239 scalar.valid_range_mut().start = start;
240 }
241 if let Bound::Included(end) = end {
242 // FIXME(eddyb) this might be incorrect - it doesn't
243 // account for wrap-around (end < start) ranges.
244 assert!(end <= max_value, "{end} > {max_value}");
245 scalar.valid_range_mut().end = end;
246 }
247
248 // Update `largest_niche` if we have introduced a larger niche.
249 let niche = Niche::from_scalar(dl, Size::ZERO, *scalar);
250 if let Some(niche) = niche {
251 match st.largest_niche {
252 Some(largest_niche) => {
253 // Replace the existing niche even if they're equal,
254 // because this one is at a lower offset.
255 if largest_niche.available(dl) <= niche.available(dl) {
256 st.largest_niche = Some(niche);
257 }
258 }
259 None => st.largest_niche = Some(niche),
260 }
261 }
262 }
263 _ => assert!(
264 start == Bound::Unbounded && end == Bound::Unbounded,
265 "nonscalar layout for layout_scalar_valid_range type: {st:#?}",
266 ),
267 }
268
269 return Some(st);
270 }
271
272 // At this point, we have handled all unions and
273 // structs. (We have also handled univariant enums
274 // that allow representation optimization.)
275 assert!(is_enum);
276
277 // Until we've decided whether to use the tagged or
278 // niche filling LayoutS, we don't want to intern the
279 // variant layouts, so we can't store them in the
280 // overall LayoutS. Store the overall LayoutS
281 // and the variant LayoutSs here until then.
282 struct TmpLayout {
283 layout: LayoutS,
284 variants: IndexVec<VariantIdx, LayoutS>,
285 }
286
287 let calculate_niche_filling_layout = || -> Option<TmpLayout> {
288 if dont_niche_optimize_enum {
289 return None;
290 }
291
292 if variants.len() < 2 {
293 return None;
294 }
295
296 let mut align = dl.aggregate_align;
297 let mut max_repr_align = repr.align;
298 let mut unadjusted_abi_align = align.abi;
299
300 let mut variant_layouts = variants
301 .iter_enumerated()
302 .map(|(j, v)| {
303 let mut st = self.univariant(dl, v, repr, StructKind::AlwaysSized)?;
304 st.variants = Variants::Single { index: j };
305
306 align = align.max(st.align);
307 max_repr_align = max_repr_align.max(st.max_repr_align);
308 unadjusted_abi_align = unadjusted_abi_align.max(st.unadjusted_abi_align);
309
310 Some(st)
311 })
312 .collect::<Option<IndexVec<VariantIdx, _>>>()?;
313
314 let largest_variant_index = variant_layouts
315 .iter_enumerated()
316 .max_by_key(|(_i, layout)| layout.size.bytes())
317 .map(|(i, _layout)| i)?;
318
319 let all_indices = variants.indices();
320 let needs_disc =
321 |index: VariantIdx| index != largest_variant_index && !absent(&variants[index]);
322 let niche_variants = all_indices.clone().find(|v| needs_disc(*v)).unwrap()
323 ..=all_indices.rev().find(|v| needs_disc(*v)).unwrap();
324
325 let count = niche_variants.size_hint().1.unwrap() as u128;
326
327 // Find the field with the largest niche
328 let (field_index, niche, (niche_start, niche_scalar)) = variants[largest_variant_index]
329 .iter()
330 .enumerate()
331 .filter_map(|(j, field)| Some((j, field.largest_niche()?)))
332 .max_by_key(|(_, niche)| niche.available(dl))
333 .and_then(|(j, niche)| Some((j, niche, niche.reserve(dl, count)?)))?;
334 let niche_offset =
335 niche.offset + variant_layouts[largest_variant_index].fields.offset(field_index);
336 let niche_size = niche.value.size(dl);
337 let size = variant_layouts[largest_variant_index].size.align_to(align.abi);
338
339 let all_variants_fit = variant_layouts.iter_enumerated_mut().all(|(i, layout)| {
340 if i == largest_variant_index {
341 return true;
342 }
343
344 layout.largest_niche = None;
345
346 if layout.size <= niche_offset {
347 // This variant will fit before the niche.
348 return true;
349 }
350
351 // Determine if it'll fit after the niche.
352 let this_align = layout.align.abi;
353 let this_offset = (niche_offset + niche_size).align_to(this_align);
354
355 if this_offset + layout.size > size {
356 return false;
357 }
358
359 // It'll fit, but we need to make some adjustments.
360 match layout.fields {
361 FieldsShape::Arbitrary { ref mut offsets, .. } => {
362 for offset in offsets.iter_mut() {
363 *offset += this_offset;
364 }
365 }
366 _ => {
367 panic!("Layout of fields should be Arbitrary for variants")
368 }
369 }
370
371 // It can't be a Scalar or ScalarPair because the offset isn't 0.
372 if !layout.abi.is_uninhabited() {
373 layout.abi = Abi::Aggregate { sized: true };
374 }
375 layout.size += this_offset;
376
377 true
378 });
379
380 if !all_variants_fit {
381 return None;
382 }
383
384 let largest_niche = Niche::from_scalar(dl, niche_offset, niche_scalar);
385
386 let others_zst = variant_layouts
387 .iter_enumerated()
388 .all(|(i, layout)| i == largest_variant_index || layout.size == Size::ZERO);
389 let same_size = size == variant_layouts[largest_variant_index].size;
390 let same_align = align == variant_layouts[largest_variant_index].align;
391
392 let abi = if variant_layouts.iter().all(|v| v.abi.is_uninhabited()) {
393 Abi::Uninhabited
394 } else if same_size && same_align && others_zst {
395 match variant_layouts[largest_variant_index].abi {
396 // When the total alignment and size match, we can use the
397 // same ABI as the scalar variant with the reserved niche.
398 Abi::Scalar(_) => Abi::Scalar(niche_scalar),
399 Abi::ScalarPair(first, second) => {
400 // Only the niche is guaranteed to be initialised,
401 // so use union layouts for the other primitive.
402 if niche_offset == Size::ZERO {
403 Abi::ScalarPair(niche_scalar, second.to_union())
404 } else {
405 Abi::ScalarPair(first.to_union(), niche_scalar)
406 }
407 }
408 _ => Abi::Aggregate { sized: true },
409 }
410 } else {
411 Abi::Aggregate { sized: true }
412 };
413
414 let layout = LayoutS {
415 variants: Variants::Multiple {
416 tag: niche_scalar,
417 tag_encoding: TagEncoding::Niche {
418 untagged_variant: largest_variant_index,
419 niche_variants,
420 niche_start,
421 },
422 tag_field: 0,
423 variants: IndexVec::new(),
424 },
425 fields: FieldsShape::Arbitrary {
426 offsets: [niche_offset].into(),
427 memory_index: [0].into(),
428 },
429 abi,
430 largest_niche,
431 size,
432 align,
433 max_repr_align,
434 unadjusted_abi_align,
435 };
436
437 Some(TmpLayout { layout, variants: variant_layouts })
438 };
439
440 let niche_filling_layout = calculate_niche_filling_layout();
441
442 let (mut min, mut max) = (i128::MAX, i128::MIN);
443 let discr_type = repr.discr_type();
444 let bits = Integer::from_attr(dl, discr_type).size().bits();
445 for (i, mut val) in discriminants {
446 if variants[i].iter().any(|f| f.abi().is_uninhabited()) {
447 continue;
448 }
449 if discr_type.is_signed() {
450 // sign extend the raw representation to be an i128
451 val = (val << (128 - bits)) >> (128 - bits);
452 }
453 if val < min {
454 min = val;
455 }
456 if val > max {
457 max = val;
458 }
459 }
460 // We might have no inhabited variants, so pretend there's at least one.
461 if (min, max) == (i128::MAX, i128::MIN) {
462 min = 0;
463 max = 0;
464 }
465 assert!(min <= max, "discriminant range is {min}...{max}");
466 let (min_ity, signed) = discr_range_of_repr(min, max); //Integer::repr_discr(tcx, ty, &repr, min, max);
467
468 let mut align = dl.aggregate_align;
469 let mut max_repr_align = repr.align;
470 let mut unadjusted_abi_align = align.abi;
471
472 let mut size = Size::ZERO;
473
474 // We're interested in the smallest alignment, so start large.
475 let mut start_align = Align::from_bytes(256).unwrap();
476 assert_eq!(Integer::for_align(dl, start_align), None);
477
478 // repr(C) on an enum tells us to make a (tag, union) layout,
479 // so we need to grow the prefix alignment to be at least
480 // the alignment of the union. (This value is used both for
481 // determining the alignment of the overall enum, and the
482 // determining the alignment of the payload after the tag.)
483 let mut prefix_align = min_ity.align(dl).abi;
484 if repr.c() {
485 for fields in variants {
486 for field in fields {
487 prefix_align = prefix_align.max(field.align().abi);
488 }
489 }
490 }
491
492 // Create the set of structs that represent each variant.
493 let mut layout_variants = variants
494 .iter_enumerated()
495 .map(|(i, field_layouts)| {
496 let mut st = self.univariant(
497 dl,
498 field_layouts,
499 repr,
500 StructKind::Prefixed(min_ity.size(), prefix_align),
501 )?;
502 st.variants = Variants::Single { index: i };
503 // Find the first field we can't move later
504 // to make room for a larger discriminant.
505 for field_idx in st.fields.index_by_increasing_offset() {
506 let field = &field_layouts[FieldIdx::from_usize(field_idx)];
507 if !field.0.is_1zst() {
508 start_align = start_align.min(field.align().abi);
509 break;
510 }
511 }
512 size = cmp::max(size, st.size);
513 align = align.max(st.align);
514 max_repr_align = max_repr_align.max(st.max_repr_align);
515 unadjusted_abi_align = unadjusted_abi_align.max(st.unadjusted_abi_align);
516 Some(st)
517 })
518 .collect::<Option<IndexVec<VariantIdx, _>>>()?;
519
520 // Align the maximum variant size to the largest alignment.
521 size = size.align_to(align.abi);
522
523 if size.bytes() >= dl.obj_size_bound() {
524 return None;
525 }
526
527 let typeck_ity = Integer::from_attr(dl, repr.discr_type());
528 if typeck_ity < min_ity {
529 // It is a bug if Layout decided on a greater discriminant size than typeck for
530 // some reason at this point (based on values discriminant can take on). Mostly
531 // because this discriminant will be loaded, and then stored into variable of
532 // type calculated by typeck. Consider such case (a bug): typeck decided on
533 // byte-sized discriminant, but layout thinks we need a 16-bit to store all
534 // discriminant values. That would be a bug, because then, in codegen, in order
535 // to store this 16-bit discriminant into 8-bit sized temporary some of the
536 // space necessary to represent would have to be discarded (or layout is wrong
537 // on thinking it needs 16 bits)
538 panic!(
539 "layout decided on a larger discriminant type ({min_ity:?}) than typeck ({typeck_ity:?})"
540 );
541 // However, it is fine to make discr type however large (as an optimisation)
542 // after this point – we’ll just truncate the value we load in codegen.
543 }
544
545 // Check to see if we should use a different type for the
546 // discriminant. We can safely use a type with the same size
547 // as the alignment of the first field of each variant.
548 // We increase the size of the discriminant to avoid LLVM copying
549 // padding when it doesn't need to. This normally causes unaligned
550 // load/stores and excessive memcpy/memset operations. By using a
551 // bigger integer size, LLVM can be sure about its contents and
552 // won't be so conservative.
553
554 // Use the initial field alignment
555 let mut ity = if repr.c() || repr.int.is_some() {
556 min_ity
557 } else {
558 Integer::for_align(dl, start_align).unwrap_or(min_ity)
559 };
560
561 // If the alignment is not larger than the chosen discriminant size,
562 // don't use the alignment as the final size.
563 if ity <= min_ity {
564 ity = min_ity;
565 } else {
566 // Patch up the variants' first few fields.
567 let old_ity_size = min_ity.size();
568 let new_ity_size = ity.size();
569 for variant in &mut layout_variants {
570 match variant.fields {
571 FieldsShape::Arbitrary { ref mut offsets, .. } => {
572 for i in offsets {
573 if *i <= old_ity_size {
574 assert_eq!(*i, old_ity_size);
575 *i = new_ity_size;
576 }
577 }
578 // We might be making the struct larger.
579 if variant.size <= old_ity_size {
580 variant.size = new_ity_size;
581 }
582 }
583 _ => panic!(),
584 }
585 }
586 }
587
588 let tag_mask = ity.size().unsigned_int_max();
589 let tag = Scalar::Initialized {
590 value: Int(ity, signed),
591 valid_range: WrappingRange {
592 start: (min as u128 & tag_mask),
593 end: (max as u128 & tag_mask),
594 },
595 };
596 let mut abi = Abi::Aggregate { sized: true };
597
598 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
599 abi = Abi::Uninhabited;
600 } else if tag.size(dl) == size {
601 // Make sure we only use scalar layout when the enum is entirely its
602 // own tag (i.e. it has no padding nor any non-ZST variant fields).
603 abi = Abi::Scalar(tag);
604 } else {
605 // Try to use a ScalarPair for all tagged enums.
606 // That's possible only if we can find a common primitive type for all variants.
607 let mut common_prim = None;
608 let mut common_prim_initialized_in_all_variants = true;
609 for (field_layouts, layout_variant) in iter::zip(variants, &layout_variants) {
610 let FieldsShape::Arbitrary { ref offsets, .. } = layout_variant.fields else {
611 panic!();
612 };
613 // We skip *all* ZST here and later check if we are good in terms of alignment.
614 // This lets us handle some cases involving aligned ZST.
615 let mut fields = iter::zip(field_layouts, offsets).filter(|p| !p.0.0.is_zst());
616 let (field, offset) = match (fields.next(), fields.next()) {
617 (None, None) => {
618 common_prim_initialized_in_all_variants = false;
619 continue;
620 }
621 (Some(pair), None) => pair,
622 _ => {
623 common_prim = None;
624 break;
625 }
626 };
627 let prim = match field.abi() {
628 Abi::Scalar(scalar) => {
629 common_prim_initialized_in_all_variants &=
630 matches!(scalar, Scalar::Initialized { .. });
631 scalar.primitive()
632 }
633 _ => {
634 common_prim = None;
635 break;
636 }
637 };
638 if let Some(pair) = common_prim {
639 // This is pretty conservative. We could go fancier
640 // by conflating things like i32 and u32, or even
641 // realising that (u8, u8) could just cohabit with
642 // u16 or even u32.
643 if pair != (prim, offset) {
644 common_prim = None;
645 break;
646 }
647 } else {
648 common_prim = Some((prim, offset));
649 }
650 }
651 if let Some((prim, offset)) = common_prim {
652 let prim_scalar = if common_prim_initialized_in_all_variants {
653 scalar_unit(prim)
654 } else {
655 // Common prim might be uninit.
656 Scalar::Union { value: prim }
657 };
658 let pair = self.scalar_pair(tag, prim_scalar);
659 let pair_offsets = match pair.fields {
660 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
661 assert_eq!(memory_index.raw, [0, 1]);
662 offsets
663 }
664 _ => panic!(),
665 };
666 if pair_offsets[FieldIdx::from_u32(0)] == Size::ZERO
667 && pair_offsets[FieldIdx::from_u32(1)] == *offset
668 && align == pair.align
669 && size == pair.size
670 {
671 // We can use `ScalarPair` only when it matches our
672 // already computed layout (including `#[repr(C)]`).
673 abi = pair.abi;
674 }
675 }
676 }
677
678 // If we pick a "clever" (by-value) ABI, we might have to adjust the ABI of the
679 // variants to ensure they are consistent. This is because a downcast is
680 // semantically a NOP, and thus should not affect layout.
681 if matches!(abi, Abi::Scalar(..) | Abi::ScalarPair(..)) {
682 for variant in &mut layout_variants {
683 // We only do this for variants with fields; the others are not accessed anyway.
684 // Also do not overwrite any already existing "clever" ABIs.
685 if variant.fields.count() > 0 && matches!(variant.abi, Abi::Aggregate { .. }) {
686 variant.abi = abi;
687 // Also need to bump up the size and alignment, so that the entire value fits in here.
688 variant.size = cmp::max(variant.size, size);
689 variant.align.abi = cmp::max(variant.align.abi, align.abi);
690 }
691 }
692 }
693
694 let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag);
695
696 let tagged_layout = LayoutS {
697 variants: Variants::Multiple {
698 tag,
699 tag_encoding: TagEncoding::Direct,
700 tag_field: 0,
701 variants: IndexVec::new(),
702 },
703 fields: FieldsShape::Arbitrary {
704 offsets: [Size::ZERO].into(),
705 memory_index: [0].into(),
706 },
707 largest_niche,
708 abi,
709 align,
710 size,
711 max_repr_align,
712 unadjusted_abi_align,
713 };
714
715 let tagged_layout = TmpLayout { layout: tagged_layout, variants: layout_variants };
716
717 let mut best_layout = match (tagged_layout, niche_filling_layout) {
718 (tl, Some(nl)) => {
719 // Pick the smaller layout; otherwise,
720 // pick the layout with the larger niche; otherwise,
721 // pick tagged as it has simpler codegen.
722 use cmp::Ordering::*;
723 let niche_size =
724 |tmp_l: &TmpLayout| tmp_l.layout.largest_niche.map_or(0, |n| n.available(dl));
725 match (tl.layout.size.cmp(&nl.layout.size), niche_size(&tl).cmp(&niche_size(&nl))) {
726 (Greater, _) => nl,
727 (Equal, Less) => nl,
728 _ => tl,
729 }
730 }
731 (tl, None) => tl,
732 };
733
734 // Now we can intern the variant layouts and store them in the enum layout.
735 best_layout.layout.variants = match best_layout.layout.variants {
736 Variants::Multiple { tag, tag_encoding, tag_field, .. } => {
737 Variants::Multiple { tag, tag_encoding, tag_field, variants: best_layout.variants }
738 }
739 _ => panic!(),
740 };
741 Some(best_layout.layout)
742 }
743
744 fn layout_of_union(
745 &self,
746 repr: &ReprOptions,
747 variants: &IndexSlice<VariantIdx, IndexVec<FieldIdx, Layout<'_>>>,
748 ) -> Option<LayoutS> {
749 let dl = self.current_data_layout();
750 let dl = dl.borrow();
751 let mut align = if repr.pack.is_some() { dl.i8_align } else { dl.aggregate_align };
752 let mut max_repr_align = repr.align;
753
754 // If all the non-ZST fields have the same ABI and union ABI optimizations aren't
755 // disabled, we can use that common ABI for the union as a whole.
756 struct AbiMismatch;
757 let mut common_non_zst_abi_and_align = if repr.inhibit_union_abi_opt() {
758 // Can't optimize
759 Err(AbiMismatch)
760 } else {
761 Ok(None)
762 };
763
764 let mut size = Size::ZERO;
765 let only_variant = &variants[FIRST_VARIANT];
766 for field in only_variant {
767 if field.0.is_unsized() {
768 self.delay_bug("unsized field in union".to_string());
769 }
770
771 align = align.max(field.align());
772 max_repr_align = max_repr_align.max(field.max_repr_align());
773 size = cmp::max(size, field.size());
774
775 if field.0.is_zst() {
776 // Nothing more to do for ZST fields
777 continue;
778 }
779
780 if let Ok(common) = common_non_zst_abi_and_align {
781 // Discard valid range information and allow undef
782 let field_abi = field.abi().to_union();
783
784 if let Some((common_abi, common_align)) = common {
785 if common_abi != field_abi {
786 // Different fields have different ABI: disable opt
787 common_non_zst_abi_and_align = Err(AbiMismatch);
788 } else {
789 // Fields with the same non-Aggregate ABI should also
790 // have the same alignment
791 if !matches!(common_abi, Abi::Aggregate { .. }) {
792 assert_eq!(
793 common_align,
794 field.align().abi,
795 "non-Aggregate field with matching ABI but differing alignment"
796 );
797 }
798 }
799 } else {
800 // First non-ZST field: record its ABI and alignment
801 common_non_zst_abi_and_align = Ok(Some((field_abi, field.align().abi)));
802 }
803 }
804 }
805
806 if let Some(pack) = repr.pack {
807 align = align.min(AbiAndPrefAlign::new(pack));
808 }
809 // The unadjusted ABI alignment does not include repr(align), but does include repr(pack).
810 // See documentation on `LayoutS::unadjusted_abi_align`.
811 let unadjusted_abi_align = align.abi;
812 if let Some(repr_align) = repr.align {
813 align = align.max(AbiAndPrefAlign::new(repr_align));
814 }
815 // `align` must not be modified after this, or `unadjusted_abi_align` could be inaccurate.
816 let align = align;
817
818 // If all non-ZST fields have the same ABI, we may forward that ABI
819 // for the union as a whole, unless otherwise inhibited.
820 let abi = match common_non_zst_abi_and_align {
821 Err(AbiMismatch) | Ok(None) => Abi::Aggregate { sized: true },
822 Ok(Some((abi, _))) => {
823 if abi.inherent_align(dl).map(|a| a.abi) != Some(align.abi) {
824 // Mismatched alignment (e.g. union is #[repr(packed)]): disable opt
825 Abi::Aggregate { sized: true }
826 } else {
827 abi
828 }
829 }
830 };
831
832 Some(LayoutS {
833 variants: Variants::Single { index: FIRST_VARIANT },
834 fields: FieldsShape::Union(NonZeroUsize::new(only_variant.len())?),
835 abi,
836 largest_niche: None,
837 align,
838 size: size.align_to(align.abi),
839 max_repr_align,
840 unadjusted_abi_align,
841 })
842 }
843 }
844
845 /// Determines towards which end of a struct layout optimizations will try to place the best niches.
846 enum NicheBias {
847 Start,
848 End,
849 }
850
851 fn univariant(
852 this: &(impl LayoutCalculator + ?Sized),
853 dl: &TargetDataLayout,
854 fields: &IndexSlice<FieldIdx, Layout<'_>>,
855 repr: &ReprOptions,
856 kind: StructKind,
857 niche_bias: NicheBias,
858 ) -> Option<LayoutS> {
859 let pack = repr.pack;
860 let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
861 let mut max_repr_align = repr.align;
862 let mut inverse_memory_index: IndexVec<u32, FieldIdx> = fields.indices().collect();
863 let optimize = !repr.inhibit_struct_field_reordering_opt();
864 if optimize && fields.len() > 1 {
865 let end = if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
866 let optimizing = &mut inverse_memory_index.raw[..end];
867 let fields_excluding_tail = &fields.raw[..end];
868
869 // If `-Z randomize-layout` was enabled for the type definition we can shuffle
870 // the field ordering to try and catch some code making assumptions about layouts
871 // we don't guarantee
872 if repr.can_randomize_type_layout() && cfg!(feature = "randomize") {
873 #[cfg(feature = "randomize")]
874 {
875 // `ReprOptions.layout_seed` is a deterministic seed that we can use to
876 // randomize field ordering with
877 let mut rng = Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed.as_u64());
878
879 // Shuffle the ordering of the fields
880 optimizing.shuffle(&mut rng);
881 }
882 // Otherwise we just leave things alone and actually optimize the type's fields
883 } else {
884 // To allow unsizing `&Foo<Type>` -> `&Foo<dyn Trait>`, the layout of the struct must
885 // not depend on the layout of the tail.
886 let max_field_align =
887 fields_excluding_tail.iter().map(|f| f.align().abi.bytes()).max().unwrap_or(1);
888 let largest_niche_size = fields_excluding_tail
889 .iter()
890 .filter_map(|f| f.largest_niche())
891 .map(|n| n.available(dl))
892 .max()
893 .unwrap_or(0);
894
895 // Calculates a sort key to group fields by their alignment or possibly some size-derived
896 // pseudo-alignment.
897 let alignment_group_key = |layout: Layout<'_>| {
898 if let Some(pack) = pack {
899 // return the packed alignment in bytes
900 layout.align().abi.min(pack).bytes()
901 } else {
902 // returns log2(effective-align).
903 // This is ok since `pack` applies to all fields equally.
904 // The calculation assumes that size is an integer multiple of align, except for ZSTs.
905 //
906 let align = layout.align().abi.bytes();
907 let size = layout.size().bytes();
908 let niche_size = layout.largest_niche().map(|n| n.available(dl)).unwrap_or(0);
909 // group [u8; 4] with align-4 or [u8; 6] with align-2 fields
910 let size_as_align = align.max(size).trailing_zeros();
911 let size_as_align = if largest_niche_size > 0 {
912 match niche_bias {
913 // Given `A(u8, [u8; 16])` and `B(bool, [u8; 16])` we want to bump the array
914 // to the front in the first case (for aligned loads) but keep the bool in front
915 // in the second case for its niches.
916 NicheBias::Start => max_field_align.trailing_zeros().min(size_as_align),
917 // When moving niches towards the end of the struct then for
918 // A((u8, u8, u8, bool), (u8, bool, u8)) we want to keep the first tuple
919 // in the align-1 group because its bool can be moved closer to the end.
920 NicheBias::End if niche_size == largest_niche_size => {
921 align.trailing_zeros()
922 }
923 NicheBias::End => size_as_align,
924 }
925 } else {
926 size_as_align
927 };
928 size_as_align as u64
929 }
930 };
931
932 match kind {
933 StructKind::AlwaysSized | StructKind::MaybeUnsized => {
934 // Currently `LayoutS` only exposes a single niche so sorting is usually sufficient
935 // to get one niche into the preferred position. If it ever supported multiple niches
936 // then a more advanced pick-and-pack approach could provide better results.
937 // But even for the single-niche cache it's not optimal. E.g. for
938 // A(u32, (bool, u8), u16) it would be possible to move the bool to the front
939 // but it would require packing the tuple together with the u16 to build a 4-byte
940 // group so that the u32 can be placed after it without padding. This kind
941 // of packing can't be achieved by sorting.
942 optimizing.sort_by_key(|&x| {
943 let f = fields[x];
944 let field_size = f.size().bytes();
945 let niche_size = f.largest_niche().map_or(0, |n| n.available(dl));
946 let niche_size_key = match niche_bias {
947 // large niche first
948 NicheBias::Start => !niche_size,
949 // large niche last
950 NicheBias::End => niche_size,
951 };
952 let inner_niche_offset_key = match niche_bias {
953 NicheBias::Start => f.largest_niche().map_or(0, |n| n.offset.bytes()),
954 NicheBias::End => f.largest_niche().map_or(0, |n| {
955 !(field_size - n.value.size(dl).bytes() - n.offset.bytes())
956 }),
957 };
958
959 (
960 // Then place largest alignments first.
961 cmp::Reverse(alignment_group_key(f)),
962 // Then prioritize niche placement within alignment group according to
963 // `niche_bias_start`.
964 niche_size_key,
965 // Then among fields with equally-sized niches prefer the ones
966 // closer to the start/end of the field.
967 inner_niche_offset_key,
968 )
969 });
970 }
971
972 StructKind::Prefixed(..) => {
973 // Sort in ascending alignment so that the layout stays optimal
974 // regardless of the prefix.
975 // And put the largest niche in an alignment group at the end
976 // so it can be used as discriminant in jagged enums
977 optimizing.sort_by_key(|&x| {
978 let f = fields[x];
979 let niche_size = f.largest_niche().map_or(0, |n| n.available(dl));
980 (alignment_group_key(f), niche_size)
981 });
982 }
983 }
984
985 // FIXME(Kixiron): We can always shuffle fields within a given alignment class
986 // regardless of the status of `-Z randomize-layout`
987 }
988 }
989 // inverse_memory_index holds field indices by increasing memory offset.
990 // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
991 // We now write field offsets to the corresponding offset slot;
992 // field 5 with offset 0 puts 0 in offsets[5].
993 // At the bottom of this function, we invert `inverse_memory_index` to
994 // produce `memory_index` (see `invert_mapping`).
995 let mut sized = true;
996 let mut offsets = IndexVec::from_elem(Size::ZERO, &fields);
997 let mut offset = Size::ZERO;
998 let mut largest_niche = None;
999 let mut largest_niche_available = 0;
1000 if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
1001 let prefix_align =
1002 if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
1003 align = align.max(AbiAndPrefAlign::new(prefix_align));
1004 offset = prefix_size.align_to(prefix_align);
1005 }
1006 for &i in &inverse_memory_index {
1007 let field = &fields[i];
1008 if !sized {
1009 this.delay_bug(format!(
1010 "univariant: field #{} comes after unsized field",
1011 offsets.len(),
1012 ));
1013 }
1014
1015 if field.0.is_unsized() {
1016 sized = false;
1017 }
1018
1019 // Invariant: offset < dl.obj_size_bound() <= 1<<61
1020 let field_align = if let Some(pack) = pack {
1021 field.align().min(AbiAndPrefAlign::new(pack))
1022 } else {
1023 field.align()
1024 };
1025 offset = offset.align_to(field_align.abi);
1026 align = align.max(field_align);
1027 max_repr_align = max_repr_align.max(field.max_repr_align());
1028
1029 debug!("univariant offset: {:?} field: {:#?}", offset, field);
1030 offsets[i] = offset;
1031
1032 if let Some(mut niche) = field.largest_niche() {
1033 let available = niche.available(dl);
1034 // Pick up larger niches.
1035 let prefer_new_niche = match niche_bias {
1036 NicheBias::Start => available > largest_niche_available,
1037 // if there are several niches of the same size then pick the last one
1038 NicheBias::End => available >= largest_niche_available,
1039 };
1040 if prefer_new_niche {
1041 largest_niche_available = available;
1042 niche.offset += offset;
1043 largest_niche = Some(niche);
1044 }
1045 }
1046
1047 offset = offset.checked_add(field.size(), dl)?;
1048 }
1049
1050 // The unadjusted ABI alignment does not include repr(align), but does include repr(pack).
1051 // See documentation on `LayoutS::unadjusted_abi_align`.
1052 let unadjusted_abi_align = align.abi;
1053 if let Some(repr_align) = repr.align {
1054 align = align.max(AbiAndPrefAlign::new(repr_align));
1055 }
1056 // `align` must not be modified after this point, or `unadjusted_abi_align` could be inaccurate.
1057 let align = align;
1058
1059 debug!("univariant min_size: {:?}", offset);
1060 let min_size = offset;
1061 // As stated above, inverse_memory_index holds field indices by increasing offset.
1062 // This makes it an already-sorted view of the offsets vec.
1063 // To invert it, consider:
1064 // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
1065 // Field 5 would be the first element, so memory_index is i:
1066 // Note: if we didn't optimize, it's already right.
1067 let memory_index = if optimize {
1068 inverse_memory_index.invert_bijective_mapping()
1069 } else {
1070 debug_assert!(inverse_memory_index.iter().copied().eq(fields.indices()));
1071 inverse_memory_index.into_iter().map(FieldIdx::as_u32).collect()
1072 };
1073 let size = min_size.align_to(align.abi);
1074 let mut layout_of_single_non_zst_field = None;
1075 let mut abi = Abi::Aggregate { sized };
1076 // Try to make this a Scalar/ScalarPair.
1077 if sized && size.bytes() > 0 {
1078 // We skip *all* ZST here and later check if we are good in terms of alignment.
1079 // This lets us handle some cases involving aligned ZST.
1080 let mut non_zst_fields = fields.iter_enumerated().filter(|&(_, f)| !f.0.is_zst());
1081
1082 match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
1083 // We have exactly one non-ZST field.
1084 (Some((i, field)), None, None) => {
1085 layout_of_single_non_zst_field = Some(field);
1086
1087 // Field fills the struct and it has a scalar or scalar pair ABI.
1088 if offsets[i].bytes() == 0 && align.abi == field.align().abi && size == field.size()
1089 {
1090 match field.abi() {
1091 // For plain scalars, or vectors of them, we can't unpack
1092 // newtypes for `#[repr(C)]`, as that affects C ABIs.
1093 Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
1094 abi = field.abi();
1095 }
1096 // But scalar pairs are Rust-specific and get
1097 // treated as aggregates by C ABIs anyway.
1098 Abi::ScalarPair(..) => {
1099 abi = field.abi();
1100 }
1101 _ => {}
1102 }
1103 }
1104 }
1105
1106 // Two non-ZST fields, and they're both scalars.
1107 (Some((i, a)), Some((j, b)), None) => {
1108 match (a.abi(), b.abi()) {
1109 (Abi::Scalar(a), Abi::Scalar(b)) => {
1110 // Order by the memory placement, not source order.
1111 let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
1112 ((i, a), (j, b))
1113 } else {
1114 ((j, b), (i, a))
1115 };
1116 let pair = this.scalar_pair(a, b);
1117 let pair_offsets = match pair.fields {
1118 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
1119 assert_eq!(memory_index.raw, [0, 1]);
1120 offsets
1121 }
1122 _ => panic!(),
1123 };
1124 if offsets[i] == pair_offsets[FieldIdx::from_usize(0)]
1125 && offsets[j] == pair_offsets[FieldIdx::from_usize(1)]
1126 && align == pair.align
1127 && size == pair.size
1128 {
1129 // We can use `ScalarPair` only when it matches our
1130 // already computed layout (including `#[repr(C)]`).
1131 abi = pair.abi;
1132 }
1133 }
1134 _ => {}
1135 }
1136 }
1137
1138 _ => {}
1139 }
1140 }
1141 if fields.iter().any(|f| f.abi().is_uninhabited()) {
1142 abi = Abi::Uninhabited;
1143 }
1144
1145 let unadjusted_abi_align = if repr.transparent() {
1146 match layout_of_single_non_zst_field {
1147 Some(l) => l.unadjusted_abi_align(),
1148 None => {
1149 // `repr(transparent)` with all ZST fields.
1150 align.abi
1151 }
1152 }
1153 } else {
1154 unadjusted_abi_align
1155 };
1156
1157 Some(LayoutS {
1158 variants: Variants::Single { index: FIRST_VARIANT },
1159 fields: FieldsShape::Arbitrary { offsets, memory_index },
1160 abi,
1161 largest_niche,
1162 align,
1163 size,
1164 max_repr_align,
1165 unadjusted_abi_align,
1166 })
1167 }
1168
1169 fn format_field_niches(
1170 layout: &LayoutS,
1171 fields: &IndexSlice<FieldIdx, Layout<'_>>,
1172 dl: &TargetDataLayout,
1173 ) -> String {
1174 let mut s = String::new();
1175 for i in layout.fields.index_by_increasing_offset() {
1176 let offset = layout.fields.offset(i);
1177 let f = fields[i.into()];
1178 write!(s, "[o{}a{}s{}", offset.bytes(), f.align().abi.bytes(), f.size().bytes()).unwrap();
1179 if let Some(n) = f.largest_niche() {
1180 write!(
1181 s,
1182 " n{}b{}s{}",
1183 n.offset.bytes(),
1184 n.available(dl).ilog2(),
1185 n.value.size(dl).bytes()
1186 )
1187 .unwrap();
1188 }
1189 write!(s, "] ").unwrap();
1190 }
1191 s
1192 }