]> git.proxmox.com Git - rustc.git/blame - src/librustc/ty/layout.rs
New upstream version 1.43.0+dfsg1
[rustc.git] / src / librustc / ty / layout.rs
CommitLineData
9fa01778 1use crate::session::{self, DataTypeKind};
dfeec247 2use crate::ty::{self, subst::SubstsRef, ReprOptions, Ty, TyCtxt, TypeFoldable};
54a0048b 3
74b04a01
XL
4use rustc_ast::ast::{self, Ident, IntTy, UintTy};
5use rustc_attr as attr;
dfeec247 6use rustc_span::DUMMY_SP;
54a0048b
SL
7
8use std::cmp;
9use std::fmt;
94b46f34 10use std::iter;
ea8adc8c 11use std::mem;
b7449926 12use std::ops::Bound;
54a0048b 13
9fa01778 14use crate::ich::StableHashingContext;
dc9dc135 15use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
dc9dc135 16use crate::ty::subst::Subst;
e74abb32 17use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
dfeec247
XL
18use rustc_hir as hir;
19use rustc_index::bit_set::BitSet;
20use rustc_index::vec::{Idx, IndexVec};
ea8adc8c 21
48663c56 22use rustc_target::abi::call::{
dfeec247 23 ArgAbi, ArgAttribute, ArgAttributes, Conv, FnAbi, PassMode, Reg, RegKind,
48663c56 24};
dfeec247
XL
25pub use rustc_target::abi::*;
26use rustc_target::spec::{abi::Abi as SpecAbi, HasTargetSpec};
48663c56 27
83c7162d 28pub trait IntegerExt {
dc9dc135 29 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
a1dfa0c6 30 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
dc9dc135
XL
31 fn repr_discr<'tcx>(
32 tcx: TyCtxt<'tcx>,
33 ty: Ty<'tcx>,
34 repr: &ReprOptions,
35 min: i128,
36 max: i128,
37 ) -> (Integer, bool);
54a0048b
SL
38}
39
83c7162d 40impl IntegerExt for Integer {
dc9dc135 41 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx> {
9e0c209e 42 match (*self, signed) {
9e0c209e
SL
43 (I8, false) => tcx.types.u8,
44 (I16, false) => tcx.types.u16,
45 (I32, false) => tcx.types.u32,
46 (I64, false) => tcx.types.u64,
32a655c1 47 (I128, false) => tcx.types.u128,
9e0c209e
SL
48 (I8, true) => tcx.types.i8,
49 (I16, true) => tcx.types.i16,
50 (I32, true) => tcx.types.i32,
51 (I64, true) => tcx.types.i64,
32a655c1 52 (I128, true) => tcx.types.i128,
9e0c209e
SL
53 }
54 }
55
9fa01778 56 /// Gets the Integer type from an attr::IntType.
a1dfa0c6 57 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
cc61c64b
XL
58 let dl = cx.data_layout();
59
54a0048b
SL
60 match ity {
61 attr::SignedInt(IntTy::I8) | attr::UnsignedInt(UintTy::U8) => I8,
62 attr::SignedInt(IntTy::I16) | attr::UnsignedInt(UintTy::U16) => I16,
63 attr::SignedInt(IntTy::I32) | attr::UnsignedInt(UintTy::U32) => I32,
64 attr::SignedInt(IntTy::I64) | attr::UnsignedInt(UintTy::U64) => I64,
32a655c1 65 attr::SignedInt(IntTy::I128) | attr::UnsignedInt(UintTy::U128) => I128,
2c00a5a8 66 attr::SignedInt(IntTy::Isize) | attr::UnsignedInt(UintTy::Usize) => {
54a0048b
SL
67 dl.ptr_sized_integer()
68 }
69 }
70 }
71
9fa01778 72 /// Finds the appropriate Integer type and signedness for the given
54a0048b 73 /// signed discriminant range and #[repr] attribute.
ff7c6d11 74 /// N.B.: u128 values above i128::MAX will be treated as signed, but
54a0048b 75 /// that shouldn't affect anything, other than maybe debuginfo.
dc9dc135
XL
76 fn repr_discr<'tcx>(
77 tcx: TyCtxt<'tcx>,
78 ty: Ty<'tcx>,
79 repr: &ReprOptions,
80 min: i128,
81 max: i128,
82 ) -> (Integer, bool) {
54a0048b
SL
83 // Theoretically, negative values could be larger in unsigned representation
84 // than the unsigned representation of the signed minimum. However, if there
ff7c6d11
XL
85 // are any negative values, the only valid unsigned representation is u128
86 // which can fit all i128 values, so the result remains unaffected.
87 let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
54a0048b
SL
88 let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
89
476ff2be
SL
90 let mut min_from_extern = None;
91 let min_default = I8;
92
8bb4bdeb 93 if let Some(ity) = repr.int {
a1dfa0c6 94 let discr = Integer::from_attr(&tcx, ity);
8bb4bdeb
XL
95 let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
96 if discr < fit {
dfeec247
XL
97 bug!(
98 "Integer::repr_discr: `#[repr]` hint too small for \
99 discriminant range of enum `{}",
100 ty
101 )
8bb4bdeb
XL
102 }
103 return (discr, ity.is_signed());
104 }
105
cc61c64b 106 if repr.c() {
8bb4bdeb
XL
107 match &tcx.sess.target.target.arch[..] {
108 // WARNING: the ARM EABI has two variants; the one corresponding
109 // to `at_least == I32` appears to be used on Linux and NetBSD,
110 // but some systems may use the variant corresponding to no
0bf4aa26 111 // lower bound. However, we don't run on those yet...?
8bb4bdeb
XL
112 "arm" => min_from_extern = Some(I32),
113 _ => min_from_extern = Some(I32),
54a0048b 114 }
476ff2be
SL
115 }
116
117 let at_least = min_from_extern.unwrap_or(min_default);
54a0048b
SL
118
119 // If there are no negative values, we can use the unsigned fit.
120 if min >= 0 {
121 (cmp::max(unsigned_fit, at_least), false)
122 } else {
123 (cmp::max(signed_fit, at_least), true)
124 }
125 }
126}
127
83c7162d 128pub trait PrimitiveExt {
dc9dc135 129 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
e1599b0c 130 fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
54a0048b
SL
131}
132
83c7162d 133impl PrimitiveExt for Primitive {
dc9dc135 134 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
ff7c6d11
XL
135 match *self {
136 Int(i, signed) => i.to_ty(tcx, signed),
60c5eb7d
XL
137 F32 => tcx.types.f32,
138 F64 => tcx.types.f64,
b7449926 139 Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
ff7c6d11
XL
140 }
141 }
e1599b0c
XL
142
143 /// Return an *integer* type matching this primitive.
144 /// Useful in particular when dealing with enum discriminants.
145 fn to_int_ty(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
146 match *self {
147 Int(i, signed) => i.to_ty(tcx, signed),
148 Pointer => tcx.types.usize,
60c5eb7d 149 F32 | F64 => bug!("floats do not have an int type"),
e1599b0c
XL
150 }
151 }
54a0048b
SL
152}
153
ff7c6d11
XL
154/// The first half of a fat pointer.
155///
156/// - For a trait object, this is the address of the box.
157/// - For a slice, this is the base address.
158pub const FAT_PTR_ADDR: usize = 0;
476ff2be 159
ff7c6d11
XL
160/// The second half of a fat pointer.
161///
162/// - For a trait object, this is the address of the vtable.
163/// - For a slice, this is the length.
164pub const FAT_PTR_EXTRA: usize = 1;
476ff2be 165
83c7162d 166#[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable)]
54a0048b
SL
167pub enum LayoutError<'tcx> {
168 Unknown(Ty<'tcx>),
dfeec247 169 SizeOverflow(Ty<'tcx>),
54a0048b
SL
170}
171
172impl<'tcx> fmt::Display for LayoutError<'tcx> {
0bf4aa26 173 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
54a0048b 174 match *self {
dfeec247 175 LayoutError::Unknown(ty) => write!(f, "the type `{:?}` has an unknown layout", ty),
54a0048b
SL
176 LayoutError::SizeOverflow(ty) => {
177 write!(f, "the type `{:?}` is too big for the current architecture", ty)
178 }
179 }
180 }
181}
182
dc9dc135
XL
183fn layout_raw<'tcx>(
184 tcx: TyCtxt<'tcx>,
185 query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
186) -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> {
83c7162d
XL
187 ty::tls::with_related_context(tcx, move |icx| {
188 let rec_limit = *tcx.sess.recursion_limit.get();
189 let (param_env, ty) = query.into_parts();
5bcae85e 190
83c7162d 191 if icx.layout_depth > rec_limit {
dfeec247 192 tcx.sess.fatal(&format!("overflow representing the type `{}`", ty));
83c7162d 193 }
54a0048b 194
83c7162d 195 // Update the ImplicitCtxt to increase the layout_depth
dfeec247 196 let icx = ty::tls::ImplicitCtxt { layout_depth: icx.layout_depth + 1, ..icx.clone() };
ff7c6d11 197
83c7162d
XL
198 ty::tls::enter_context(&icx, |_| {
199 let cx = LayoutCx { tcx, param_env };
0731742a
XL
200 let layout = cx.layout_raw_uncached(ty);
201 // Type-level uninhabitedness should always imply ABI uninhabitedness.
202 if let Ok(layout) = layout {
203 if ty.conservative_is_privately_uninhabited(tcx) {
204 assert!(layout.abi.is_uninhabited());
205 }
206 }
207 layout
83c7162d
XL
208 })
209 })
ff7c6d11
XL
210}
211
0bf4aa26 212pub fn provide(providers: &mut ty::query::Providers<'_>) {
dfeec247 213 *providers = ty::query::Providers { layout_raw, ..*providers };
ff7c6d11
XL
214}
215
2c00a5a8
XL
216pub struct LayoutCx<'tcx, C> {
217 pub tcx: C,
0731742a 218 pub param_env: ty::ParamEnv<'tcx>,
2c00a5a8
XL
219}
220
dc9dc135
XL
221#[derive(Copy, Clone, Debug)]
222enum StructKind {
223 /// A tuple, closure, or univariant which cannot be coerced to unsized.
224 AlwaysSized,
225 /// A univariant, the last field of which may be coerced to unsized.
226 MaybeUnsized,
227 /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
228 Prefixed(Size, Align),
229}
230
231// Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
232// This is used to go between `memory_index` (source field order to memory order)
233// and `inverse_memory_index` (memory order to source field order).
234// See also `FieldPlacement::Arbitrary::memory_index` for more details.
235// FIXME(eddyb) build a better abstraction for permutations, if possible.
236fn invert_mapping(map: &[u32]) -> Vec<u32> {
237 let mut inverse = vec![0; map.len()];
238 for i in 0..map.len() {
239 inverse[map[i] as usize] = i as u32;
240 }
241 inverse
242}
ff7c6d11 243
dc9dc135
XL
244impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
245 fn scalar_pair(&self, a: Scalar, b: Scalar) -> LayoutDetails {
246 let dl = self.data_layout();
247 let b_align = b.value.align(dl);
248 let align = a.value.align(dl).max(b_align).max(dl.aggregate_align);
249 let b_offset = a.value.size(dl).align_to(b_align.abi);
250 let size = (b_offset + b.value.size(dl)).align_to(align.abi);
416331ca
XL
251
252 // HACK(nox): We iter on `b` and then `a` because `max_by_key`
253 // returns the last maximum.
254 let largest_niche = Niche::from_scalar(dl, b_offset, b.clone())
255 .into_iter()
256 .chain(Niche::from_scalar(dl, Size::ZERO, a.clone()))
257 .max_by_key(|niche| niche.available(dl));
258
dc9dc135
XL
259 LayoutDetails {
260 variants: Variants::Single { index: VariantIdx::new(0) },
261 fields: FieldPlacement::Arbitrary {
262 offsets: vec![Size::ZERO, b_offset],
dfeec247 263 memory_index: vec![0, 1],
dc9dc135
XL
264 },
265 abi: Abi::ScalarPair(a, b),
416331ca 266 largest_niche,
dc9dc135 267 align,
dfeec247 268 size,
ff7c6d11 269 }
dc9dc135 270 }
0bf4aa26 271
dfeec247
XL
272 fn univariant_uninterned(
273 &self,
274 ty: Ty<'tcx>,
275 fields: &[TyLayout<'_>],
276 repr: &ReprOptions,
277 kind: StructKind,
278 ) -> Result<LayoutDetails, LayoutError<'tcx>> {
dc9dc135 279 let dl = self.data_layout();
e1599b0c
XL
280 let pack = repr.pack;
281 if pack.is_some() && repr.align.is_some() {
dc9dc135
XL
282 bug!("struct cannot be packed and aligned");
283 }
ff7c6d11 284
dfeec247 285 let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
ff7c6d11 286
dc9dc135
XL
287 let mut sized = true;
288 let mut offsets = vec![Size::ZERO; fields.len()];
289 let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
ff7c6d11 290
dc9dc135
XL
291 let mut optimize = !repr.inhibit_struct_field_reordering_opt();
292 if let StructKind::Prefixed(_, align) = kind {
293 optimize &= align.bytes() == 1;
294 }
ff7c6d11 295
dc9dc135 296 if optimize {
dfeec247
XL
297 let end =
298 if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
dc9dc135
XL
299 let optimizing = &mut inverse_memory_index[..end];
300 let field_align = |f: &TyLayout<'_>| {
e1599b0c 301 if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
dc9dc135
XL
302 };
303 match kind {
dfeec247 304 StructKind::AlwaysSized | StructKind::MaybeUnsized => {
dc9dc135
XL
305 optimizing.sort_by_key(|&x| {
306 // Place ZSTs first to avoid "interesting offsets",
307 // especially with only one or two non-ZST fields.
308 let f = &fields[x as usize];
309 (!f.is_zst(), cmp::Reverse(field_align(f)))
310 });
311 }
312 StructKind::Prefixed(..) => {
313 optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
ea8adc8c 314 }
ff7c6d11 315 }
dc9dc135 316 }
ea8adc8c 317
dc9dc135
XL
318 // inverse_memory_index holds field indices by increasing memory offset.
319 // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
320 // We now write field offsets to the corresponding offset slot;
321 // field 5 with offset 0 puts 0 in offsets[5].
322 // At the bottom of this function, we invert `inverse_memory_index` to
323 // produce `memory_index` (see `invert_mapping`).
ff7c6d11 324
dc9dc135 325 let mut offset = Size::ZERO;
416331ca
XL
326 let mut largest_niche = None;
327 let mut largest_niche_available = 0;
ff7c6d11 328
dc9dc135 329 if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
dfeec247
XL
330 let prefix_align =
331 if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
dc9dc135
XL
332 align = align.max(AbiAndPrefAlign::new(prefix_align));
333 offset = prefix_size.align_to(prefix_align);
334 }
ff7c6d11 335
dc9dc135
XL
336 for &i in &inverse_memory_index {
337 let field = fields[i as usize];
338 if !sized {
dfeec247 339 bug!("univariant: field #{} of `{}` comes after unsized field", offsets.len(), ty);
dc9dc135 340 }
ff7c6d11 341
dc9dc135
XL
342 if field.is_unsized() {
343 sized = false;
344 }
ff7c6d11 345
dc9dc135 346 // Invariant: offset < dl.obj_size_bound() <= 1<<61
e1599b0c 347 let field_align = if let Some(pack) = pack {
dc9dc135
XL
348 field.align.min(AbiAndPrefAlign::new(pack))
349 } else {
350 field.align
351 };
352 offset = offset.align_to(field_align.abi);
353 align = align.max(field_align);
ff7c6d11 354
dc9dc135
XL
355 debug!("univariant offset: {:?} field: {:#?}", offset, field);
356 offsets[i as usize] = offset;
ff7c6d11 357
74b04a01
XL
358 if !repr.hide_niche() {
359 if let Some(mut niche) = field.largest_niche.clone() {
360 let available = niche.available(dl);
361 if available > largest_niche_available {
362 largest_niche_available = available;
363 niche.offset += offset;
364 largest_niche = Some(niche);
365 }
416331ca
XL
366 }
367 }
368
dfeec247 369 offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
dc9dc135 370 }
ff7c6d11 371
e1599b0c
XL
372 if let Some(repr_align) = repr.align {
373 align = align.max(AbiAndPrefAlign::new(repr_align));
dc9dc135 374 }
ff7c6d11 375
dc9dc135
XL
376 debug!("univariant min_size: {:?}", offset);
377 let min_size = offset;
ff7c6d11 378
dc9dc135
XL
379 // As stated above, inverse_memory_index holds field indices by increasing offset.
380 // This makes it an already-sorted view of the offsets vec.
381 // To invert it, consider:
382 // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
383 // Field 5 would be the first element, so memory_index is i:
384 // Note: if we didn't optimize, it's already right.
ff7c6d11 385
dc9dc135
XL
386 let memory_index;
387 if optimize {
388 memory_index = invert_mapping(&inverse_memory_index);
389 } else {
390 memory_index = inverse_memory_index;
391 }
392
393 let size = min_size.align_to(align.abi);
394 let mut abi = Abi::Aggregate { sized };
395
396 // Unpack newtype ABIs and find scalar pairs.
397 if sized && size.bytes() > 0 {
398 // All other fields must be ZSTs, and we need them to all start at 0.
dfeec247 399 let mut zst_offsets = offsets.iter().enumerate().filter(|&(i, _)| fields[i].is_zst());
dc9dc135 400 if zst_offsets.all(|(_, o)| o.bytes() == 0) {
dfeec247 401 let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
dc9dc135
XL
402
403 match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
404 // We have exactly one non-ZST field.
405 (Some((i, field)), None, None) => {
406 // Field fills the struct and it has a scalar or scalar pair ABI.
dfeec247
XL
407 if offsets[i].bytes() == 0
408 && align.abi == field.align.abi
409 && size == field.size
410 {
dc9dc135
XL
411 match field.abi {
412 // For plain scalars, or vectors of them, we can't unpack
413 // newtypes for `#[repr(C)]`, as that affects C ABIs.
414 Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
415 abi = field.abi.clone();
ff7c6d11 416 }
dc9dc135
XL
417 // But scalar pairs are Rust-specific and get
418 // treated as aggregates by C ABIs anyway.
419 Abi::ScalarPair(..) => {
420 abi = field.abi.clone();
421 }
422 _ => {}
ff7c6d11
XL
423 }
424 }
dc9dc135 425 }
ff7c6d11 426
dc9dc135 427 // Two non-ZST fields, and they're both scalars.
dfeec247
XL
428 (
429 Some((
430 i,
431 &TyLayout {
432 details: &LayoutDetails { abi: Abi::Scalar(ref a), .. },
433 ..
434 },
435 )),
436 Some((
437 j,
438 &TyLayout {
439 details: &LayoutDetails { abi: Abi::Scalar(ref b), .. },
440 ..
441 },
442 )),
443 None,
444 ) => {
dc9dc135
XL
445 // Order by the memory placement, not source order.
446 let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
447 ((i, a), (j, b))
448 } else {
449 ((j, b), (i, a))
450 };
451 let pair = self.scalar_pair(a.clone(), b.clone());
452 let pair_offsets = match pair.fields {
dfeec247 453 FieldPlacement::Arbitrary { ref offsets, ref memory_index } => {
dc9dc135
XL
454 assert_eq!(memory_index, &[0, 1]);
455 offsets
ff7c6d11 456 }
dfeec247 457 _ => bug!(),
dc9dc135 458 };
dfeec247
XL
459 if offsets[i] == pair_offsets[0]
460 && offsets[j] == pair_offsets[1]
461 && align == pair.align
462 && size == pair.size
463 {
dc9dc135
XL
464 // We can use `ScalarPair` only when it matches our
465 // already computed layout (including `#[repr(C)]`).
466 abi = pair.abi;
ff7c6d11 467 }
ff7c6d11 468 }
dc9dc135
XL
469
470 _ => {}
ff7c6d11
XL
471 }
472 }
dc9dc135 473 }
ff7c6d11 474
dc9dc135
XL
475 if sized && fields.iter().any(|f| f.abi.is_uninhabited()) {
476 abi = Abi::Uninhabited;
477 }
83c7162d 478
dc9dc135
XL
479 Ok(LayoutDetails {
480 variants: Variants::Single { index: VariantIdx::new(0) },
dfeec247 481 fields: FieldPlacement::Arbitrary { offsets, memory_index },
dc9dc135 482 abi,
416331ca 483 largest_niche,
dc9dc135 484 align,
dfeec247 485 size,
dc9dc135
XL
486 })
487 }
488
489 fn layout_raw_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> {
490 let tcx = self.tcx;
491 let param_env = self.param_env;
492 let dl = self.data_layout();
493 let scalar_unit = |value: Primitive| {
494 let bits = value.size(dl).bits();
495 assert!(bits <= 128);
dfeec247 496 Scalar { value, valid_range: 0..=(!0 >> (128 - bits)) }
ff7c6d11 497 };
dfeec247
XL
498 let scalar =
499 |value: Primitive| tcx.intern_layout(LayoutDetails::scalar(self, scalar_unit(value)));
dc9dc135 500
0bf4aa26 501 let univariant = |fields: &[TyLayout<'_>], repr: &ReprOptions, kind| {
dc9dc135 502 Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
ff7c6d11 503 };
74b04a01 504 debug_assert!(!ty.has_infer_types_or_consts());
ff7c6d11 505
e74abb32 506 Ok(match ty.kind {
ff7c6d11 507 // Basic scalars.
dfeec247
XL
508 ty::Bool => tcx.intern_layout(LayoutDetails::scalar(
509 self,
510 Scalar { value: Int(I8, false), valid_range: 0..=1 },
511 )),
512 ty::Char => tcx.intern_layout(LayoutDetails::scalar(
513 self,
514 Scalar { value: Int(I32, false), valid_range: 0..=0x10FFFF },
515 )),
516 ty::Int(ity) => scalar(Int(Integer::from_attr(dl, attr::SignedInt(ity)), true)),
517 ty::Uint(ity) => scalar(Int(Integer::from_attr(dl, attr::UnsignedInt(ity)), false)),
60c5eb7d
XL
518 ty::Float(fty) => scalar(match fty {
519 ast::FloatTy::F32 => F32,
520 ast::FloatTy::F64 => F64,
521 }),
b7449926 522 ty::FnPtr(_) => {
ff7c6d11 523 let mut ptr = scalar_unit(Pointer);
83c7162d 524 ptr.valid_range = 1..=*ptr.valid_range.end();
2c00a5a8 525 tcx.intern_layout(LayoutDetails::scalar(self, ptr))
ff7c6d11
XL
526 }
527
528 // The never type.
dfeec247
XL
529 ty::Never => tcx.intern_layout(LayoutDetails {
530 variants: Variants::Single { index: VariantIdx::new(0) },
531 fields: FieldPlacement::Union(0),
532 abi: Abi::Uninhabited,
533 largest_niche: None,
534 align: dl.i8_align,
535 size: Size::ZERO,
536 }),
ff7c6d11
XL
537
538 // Potentially-fat pointers.
dfeec247 539 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
ff7c6d11
XL
540 let mut data_ptr = scalar_unit(Pointer);
541 if !ty.is_unsafe_ptr() {
83c7162d 542 data_ptr.valid_range = 1..=*data_ptr.valid_range.end();
ff7c6d11
XL
543 }
544
0531ce1d
XL
545 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
546 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
2c00a5a8 547 return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
54a0048b 548 }
ff7c6d11 549
416331ca 550 let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
e74abb32 551 let metadata = match unsized_part.kind {
b7449926 552 ty::Foreign(..) => {
2c00a5a8 553 return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
ff7c6d11 554 }
dfeec247 555 ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
b7449926 556 ty::Dynamic(..) => {
ff7c6d11 557 let mut vtable = scalar_unit(Pointer);
83c7162d 558 vtable.valid_range = 1..=*vtable.valid_range.end();
ff7c6d11
XL
559 vtable
560 }
dfeec247 561 _ => return Err(LayoutError::Unknown(unsized_part)),
ff7c6d11
XL
562 };
563
564 // Effectively a (ptr, meta) tuple.
dc9dc135 565 tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
ff7c6d11
XL
566 }
567
568 // Arrays and slices.
b7449926 569 ty::Array(element, mut count) => {
ff7c6d11 570 if count.has_projections() {
0531ce1d 571 count = tcx.normalize_erasing_regions(param_env, count);
ff7c6d11
XL
572 if count.has_projections() {
573 return Err(LayoutError::Unknown(ty));
574 }
575 }
576
416331ca 577 let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
2c00a5a8 578 let element = self.layout_of(element)?;
dfeec247
XL
579 let size =
580 element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
ff7c6d11 581
0731742a
XL
582 let abi = if count != 0 && ty.conservative_is_privately_uninhabited(tcx) {
583 Abi::Uninhabited
584 } else {
585 Abi::Aggregate { sized: true }
586 };
587
dfeec247 588 let largest_niche = if count != 0 { element.largest_niche.clone() } else { None };
416331ca 589
ff7c6d11 590 tcx.intern_layout(LayoutDetails {
a1dfa0c6 591 variants: Variants::Single { index: VariantIdx::new(0) },
dfeec247 592 fields: FieldPlacement::Array { stride: element.size, count },
0731742a 593 abi,
416331ca 594 largest_niche,
ff7c6d11 595 align: element.align,
dfeec247 596 size,
ff7c6d11
XL
597 })
598 }
b7449926 599 ty::Slice(element) => {
2c00a5a8 600 let element = self.layout_of(element)?;
ff7c6d11 601 tcx.intern_layout(LayoutDetails {
a1dfa0c6 602 variants: Variants::Single { index: VariantIdx::new(0) },
dfeec247 603 fields: FieldPlacement::Array { stride: element.size, count: 0 },
ff7c6d11 604 abi: Abi::Aggregate { sized: false },
416331ca 605 largest_niche: None,
ff7c6d11 606 align: element.align,
dfeec247 607 size: Size::ZERO,
ff7c6d11 608 })
54a0048b 609 }
dfeec247
XL
610 ty::Str => tcx.intern_layout(LayoutDetails {
611 variants: Variants::Single { index: VariantIdx::new(0) },
612 fields: FieldPlacement::Array { stride: Size::from_bytes(1), count: 0 },
613 abi: Abi::Aggregate { sized: false },
614 largest_niche: None,
615 align: dl.i8_align,
616 size: Size::ZERO,
617 }),
54a0048b
SL
618
619 // Odd unit types.
dfeec247 620 ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
b7449926 621 ty::Dynamic(..) | ty::Foreign(..) => {
dfeec247
XL
622 let mut unit = self.univariant_uninterned(
623 ty,
624 &[],
625 &ReprOptions::default(),
626 StructKind::AlwaysSized,
627 )?;
ff7c6d11
XL
628 match unit.abi {
629 Abi::Aggregate { ref mut sized } => *sized = false,
dfeec247 630 _ => bug!(),
ff7c6d11
XL
631 }
632 tcx.intern_layout(unit)
54a0048b
SL
633 }
634
e74abb32 635 ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, substs)?,
ea8adc8c 636
b7449926 637 ty::Closure(def_id, ref substs) => {
e74abb32 638 let tys = substs.as_closure().upvar_tys(def_id, tcx);
dfeec247
XL
639 univariant(
640 &tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
8bb4bdeb 641 &ReprOptions::default(),
dfeec247
XL
642 StructKind::AlwaysSized,
643 )?
476ff2be
SL
644 }
645
b7449926 646 ty::Tuple(tys) => {
dfeec247
XL
647 let kind =
648 if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
041b39d2 649
dfeec247
XL
650 univariant(
651 &tys.iter()
652 .map(|k| self.layout_of(k.expect_ty()))
653 .collect::<Result<Vec<_>, _>>()?,
654 &ReprOptions::default(),
655 kind,
656 )?
54a0048b
SL
657 }
658
9e0c209e 659 // SIMD vector types.
b7449926 660 ty::Adt(def, ..) if def.repr.simd() => {
2c00a5a8 661 let element = self.layout_of(ty.simd_type(tcx))?;
60c5eb7d 662 let count = ty.simd_size(tcx);
ff7c6d11
XL
663 assert!(count > 0);
664 let scalar = match element.abi {
665 Abi::Scalar(ref scalar) => scalar.clone(),
9e0c209e 666 _ => {
dfeec247
XL
667 tcx.sess.fatal(&format!(
668 "monomorphising SIMD type `{}` with \
0bf4aa26 669 a non-machine element type `{}`",
dfeec247
XL
670 ty, element.ty
671 ));
54a0048b 672 }
ff7c6d11 673 };
dfeec247
XL
674 let size =
675 element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
ff7c6d11 676 let align = dl.vector_align(size);
a1dfa0c6 677 let size = size.align_to(align.abi);
ff7c6d11
XL
678
679 tcx.intern_layout(LayoutDetails {
a1dfa0c6 680 variants: Variants::Single { index: VariantIdx::new(0) },
dfeec247
XL
681 fields: FieldPlacement::Array { stride: element.size, count },
682 abi: Abi::Vector { element: scalar, count },
416331ca 683 largest_niche: element.largest_niche.clone(),
ff7c6d11
XL
684 size,
685 align,
686 })
54a0048b 687 }
9e0c209e
SL
688
689 // ADTs.
b7449926 690 ty::Adt(def, substs) => {
ff7c6d11 691 // Cache the field layouts.
dfeec247
XL
692 let variants = def
693 .variants
694 .iter()
695 .map(|v| {
696 v.fields
697 .iter()
698 .map(|field| self.layout_of(field.ty(tcx, substs)))
699 .collect::<Result<Vec<_>, _>>()
700 })
701 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
54a0048b 702
ff7c6d11 703 if def.is_union() {
e1599b0c
XL
704 if def.repr.pack.is_some() && def.repr.align.is_some() {
705 bug!("union cannot be packed and aligned");
ff7c6d11
XL
706 }
707
dfeec247
XL
708 let mut align =
709 if def.repr.pack.is_some() { dl.i8_align } else { dl.aggregate_align };
54a0048b 710
e1599b0c
XL
711 if let Some(repr_align) = def.repr.align {
712 align = align.max(AbiAndPrefAlign::new(repr_align));
54a0048b
SL
713 }
714
a1dfa0c6 715 let optimize = !def.repr.inhibit_union_abi_opt();
94b46f34 716 let mut size = Size::ZERO;
a1dfa0c6
XL
717 let mut abi = Abi::Aggregate { sized: true };
718 let index = VariantIdx::new(0);
719 for field in &variants[index] {
ff7c6d11 720 assert!(!field.is_unsized());
e1599b0c 721 align = align.max(field.align);
a1dfa0c6
XL
722
723 // If all non-ZST fields have the same ABI, forward this ABI
724 if optimize && !field.is_zst() {
725 // Normalize scalar_unit to the maximal valid range
726 let field_abi = match &field.abi {
727 Abi::Scalar(x) => Abi::Scalar(scalar_unit(x.value)),
728 Abi::ScalarPair(x, y) => {
dfeec247 729 Abi::ScalarPair(scalar_unit(x.value), scalar_unit(y.value))
a1dfa0c6
XL
730 }
731 Abi::Vector { element: x, count } => {
dfeec247
XL
732 Abi::Vector { element: scalar_unit(x.value), count: *count }
733 }
734 Abi::Uninhabited | Abi::Aggregate { .. } => {
735 Abi::Aggregate { sized: true }
a1dfa0c6 736 }
a1dfa0c6
XL
737 };
738
739 if size == Size::ZERO {
740 // first non ZST: initialize 'abi'
741 abi = field_abi;
dfeec247 742 } else if abi != field_abi {
a1dfa0c6
XL
743 // different fields have different ABI: reset to Aggregate
744 abi = Abi::Aggregate { sized: true };
745 }
ff7c6d11 746 }
a1dfa0c6 747
ff7c6d11
XL
748 size = cmp::max(size, field.size);
749 }
750
e1599b0c
XL
751 if let Some(pack) = def.repr.pack {
752 align = align.min(AbiAndPrefAlign::new(pack));
753 }
754
ff7c6d11 755 return Ok(tcx.intern_layout(LayoutDetails {
a1dfa0c6
XL
756 variants: Variants::Single { index },
757 fields: FieldPlacement::Union(variants[index].len()),
758 abi,
416331ca 759 largest_niche: None,
ff7c6d11 760 align,
dfeec247 761 size: size.align_to(align.abi),
ff7c6d11
XL
762 }));
763 }
764
83c7162d
XL
765 // A variant is absent if it's uninhabited and only has ZST fields.
766 // Present uninhabited variants only require space for their fields,
0731742a 767 // but *not* an encoding of the discriminant (e.g., a tag value).
83c7162d
XL
768 // See issue #49298 for more details on the need to leave space
769 // for non-ZST uninhabited data (mostly partial initialization).
0bf4aa26
XL
770 let absent = |fields: &[TyLayout<'_>]| {
771 let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
83c7162d
XL
772 let is_zst = fields.iter().all(|f| f.is_zst());
773 uninhabited && is_zst
774 };
775 let (present_first, present_second) = {
dfeec247
XL
776 let mut present_variants = variants
777 .iter_enumerated()
778 .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
83c7162d 779 (present_variants.next(), present_variants.next())
ff7c6d11 780 };
e74abb32
XL
781 let present_first = match present_first {
782 present_first @ Some(_) => present_first,
83c7162d 783 // Uninhabited because it has no variants, or only absent ones.
e74abb32
XL
784 None if def.is_enum() => return tcx.layout_raw(param_env.and(tcx.types.never)),
785 // if it's a struct, still compute a layout so that we can still compute the
786 // field offsets
787 None => Some(VariantIdx::new(0)),
788 };
54a0048b 789
ff7c6d11 790 let is_struct = !def.is_enum() ||
83c7162d
XL
791 // Only one variant is present.
792 (present_second.is_none() &&
ff7c6d11 793 // Representation optimizations are allowed.
0bf4aa26 794 !def.repr.inhibit_enum_layout_opt());
ff7c6d11
XL
795 if is_struct {
796 // Struct, or univariant enum equivalent to a struct.
9e0c209e
SL
797 // (Typechecking will reject discriminant-sizing attrs.)
798
83c7162d 799 let v = present_first.unwrap();
74b04a01 800 let kind = if def.is_enum() || variants[v].is_empty() {
ff7c6d11 801 StructKind::AlwaysSized
476ff2be 802 } else {
7cac9316 803 let param_env = tcx.param_env(def.did);
ff7c6d11 804 let last_field = def.variants[v].fields.last().unwrap();
dfeec247
XL
805 let always_sized =
806 tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env);
807 if !always_sized {
808 StructKind::MaybeUnsized
809 } else {
810 StructKind::AlwaysSized
811 }
9e0c209e 812 };
9e0c209e 813
dc9dc135 814 let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr, kind)?;
ff7c6d11 815 st.variants = Variants::Single { index: v };
b7449926
XL
816 let (start, end) = self.tcx.layout_scalar_valid_range(def.did);
817 match st.abi {
dfeec247 818 Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
b7449926
XL
819 // the asserts ensure that we are not using the
820 // `#[rustc_layout_scalar_valid_range(n)]`
821 // attribute to widen the range of anything as that would probably
822 // result in UB somewhere
416331ca
XL
823 // FIXME(eddyb) the asserts are probably not needed,
824 // as larger validity ranges would result in missed
825 // optimizations, *not* wrongly assuming the inner
826 // value is valid. e.g. unions enlarge validity ranges,
827 // because the values may be uninitialized.
b7449926 828 if let Bound::Included(start) = start {
416331ca
XL
829 // FIXME(eddyb) this might be incorrect - it doesn't
830 // account for wrap-around (end < start) ranges.
b7449926
XL
831 assert!(*scalar.valid_range.start() <= start);
832 scalar.valid_range = start..=*scalar.valid_range.end();
833 }
834 if let Bound::Included(end) = end {
416331ca
XL
835 // FIXME(eddyb) this might be incorrect - it doesn't
836 // account for wrap-around (end < start) ranges.
b7449926
XL
837 assert!(*scalar.valid_range.end() >= end);
838 scalar.valid_range = *scalar.valid_range.start()..=end;
ff7c6d11 839 }
416331ca
XL
840
841 // Update `largest_niche` if we have introduced a larger niche.
74b04a01
XL
842 let niche = if def.repr.hide_niche() {
843 None
844 } else {
845 Niche::from_scalar(dl, Size::ZERO, scalar.clone())
846 };
416331ca
XL
847 if let Some(niche) = niche {
848 match &st.largest_niche {
849 Some(largest_niche) => {
850 // Replace the existing niche even if they're equal,
851 // because this one is at a lower offset.
852 if largest_niche.available(dl) <= niche.available(dl) {
853 st.largest_niche = Some(niche);
854 }
855 }
856 None => st.largest_niche = Some(niche),
857 }
858 }
ff7c6d11 859 }
b7449926
XL
860 _ => assert!(
861 start == Bound::Unbounded && end == Bound::Unbounded,
862 "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
863 def,
864 st,
865 ),
54a0048b 866 }
416331ca 867
ff7c6d11 868 return Ok(tcx.intern_layout(st));
54a0048b
SL
869 }
870
74b04a01
XL
871 // At this point, we have handled all unions and
872 // structs. (We have also handled univariant enums
873 // that allow representation optimization.)
874 assert!(def.is_enum());
875
83c7162d
XL
876 // The current code for niche-filling relies on variant indices
877 // instead of actual discriminants, so dataful enums with
878 // explicit discriminants (RFC #2363) would misbehave.
dfeec247
XL
879 let no_explicit_discriminants = def
880 .variants
881 .iter_enumerated()
a1dfa0c6 882 .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
ff7c6d11
XL
883
884 // Niche-filling enum optimization.
885 if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
886 let mut dataful_variant = None;
a1dfa0c6 887 let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
ff7c6d11
XL
888
889 // Find one non-ZST variant.
a1dfa0c6 890 'variants: for (v, fields) in variants.iter_enumerated() {
83c7162d
XL
891 if absent(fields) {
892 continue 'variants;
893 }
ff7c6d11 894 for f in fields {
ff7c6d11
XL
895 if !f.is_zst() {
896 if dataful_variant.is_none() {
897 dataful_variant = Some(v);
898 continue 'variants;
899 } else {
900 dataful_variant = None;
901 break 'variants;
902 }
903 }
54a0048b 904 }
83c7162d 905 niche_variants = *niche_variants.start().min(&v)..=v;
ff7c6d11
XL
906 }
907
83c7162d 908 if niche_variants.start() > niche_variants.end() {
ff7c6d11
XL
909 dataful_variant = None;
910 }
911
912 if let Some(i) = dataful_variant {
dfeec247
XL
913 let count = (niche_variants.end().as_u32()
914 - niche_variants.start().as_u32()
915 + 1) as u128;
416331ca
XL
916 // FIXME(#62691) use the largest niche across all fields,
917 // not just the first one.
83c7162d 918 for (field_index, &field) in variants[i].iter().enumerate() {
416331ca 919 let niche = match &field.largest_niche {
94b46f34
XL
920 Some(niche) => niche,
921 _ => continue,
922 };
923 let (niche_start, niche_scalar) = match niche.reserve(self, count) {
924 Some(pair) => pair,
925 None => continue,
926 };
927
ff7c6d11 928 let mut align = dl.aggregate_align;
dfeec247
XL
929 let st = variants
930 .iter_enumerated()
931 .map(|(j, v)| {
932 let mut st = self.univariant_uninterned(
933 ty,
934 v,
935 &def.repr,
936 StructKind::AlwaysSized,
937 )?;
938 st.variants = Variants::Single { index: j };
939
940 align = align.max(st.align);
941
942 Ok(st)
943 })
944 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
ff7c6d11 945
94b46f34 946 let offset = st[i].fields.offset(field_index) + niche.offset;
ff7c6d11
XL
947 let size = st[i].size;
948
83c7162d 949 let mut abi = match st[i].abi {
94b46f34 950 Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()),
0531ce1d
XL
951 Abi::ScalarPair(ref first, ref second) => {
952 // We need to use scalar_unit to reset the
953 // valid range to the maximal one for that
954 // primitive, because only the niche is
955 // guaranteed to be initialised, not the
956 // other primitive.
957 if offset.bytes() == 0 {
94b46f34
XL
958 Abi::ScalarPair(
959 niche_scalar.clone(),
960 scalar_unit(second.value),
961 )
0531ce1d 962 } else {
94b46f34
XL
963 Abi::ScalarPair(
964 scalar_unit(first.value),
965 niche_scalar.clone(),
966 )
0531ce1d
XL
967 }
968 }
969 _ => Abi::Aggregate { sized: true },
c30ab7b3 970 };
ff7c6d11 971
0bf4aa26 972 if st.iter().all(|v| v.abi.is_uninhabited()) {
83c7162d
XL
973 abi = Abi::Uninhabited;
974 }
975
416331ca
XL
976 let largest_niche =
977 Niche::from_scalar(dl, offset, niche_scalar.clone());
978
ff7c6d11 979 return Ok(tcx.intern_layout(LayoutDetails {
532ac7d7
XL
980 variants: Variants::Multiple {
981 discr: niche_scalar,
982 discr_kind: DiscriminantKind::Niche {
983 dataful_variant: i,
984 niche_variants,
985 niche_start,
986 },
48663c56 987 discr_index: 0,
ff7c6d11
XL
988 variants: st,
989 },
990 fields: FieldPlacement::Arbitrary {
991 offsets: vec![offset],
dfeec247 992 memory_index: vec![0],
ff7c6d11
XL
993 },
994 abi,
416331ca 995 largest_niche,
ff7c6d11
XL
996 size,
997 align,
998 }));
54a0048b 999 }
ff7c6d11
XL
1000 }
1001 }
54a0048b 1002
74b04a01 1003 let (mut min, mut max) = (i128::MAX, i128::MIN);
0531ce1d 1004 let discr_type = def.repr.discr_type();
a1dfa0c6
XL
1005 let bits = Integer::from_attr(self, discr_type).size().bits();
1006 for (i, discr) in def.discriminants(tcx) {
0bf4aa26 1007 if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
ff7c6d11 1008 continue;
54a0048b 1009 }
0531ce1d
XL
1010 let mut x = discr.val as i128;
1011 if discr_type.is_signed() {
1012 // sign extend the raw representation to be an i128
1013 x = (x << (128 - bits)) >> (128 - bits);
1014 }
dfeec247
XL
1015 if x < min {
1016 min = x;
1017 }
1018 if x > max {
1019 max = x;
1020 }
54a0048b 1021 }
83c7162d 1022 // We might have no inhabited variants, so pretend there's at least one.
74b04a01 1023 if (min, max) == (i128::MAX, i128::MIN) {
83c7162d
XL
1024 min = 0;
1025 max = 0;
1026 }
ff7c6d11
XL
1027 assert!(min <= max, "discriminant range is {}...{}", min, max);
1028 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
54a0048b 1029
54a0048b 1030 let mut align = dl.aggregate_align;
94b46f34 1031 let mut size = Size::ZERO;
54a0048b
SL
1032
1033 // We're interested in the smallest alignment, so start large.
a1dfa0c6
XL
1034 let mut start_align = Align::from_bytes(256).unwrap();
1035 assert_eq!(Integer::for_align(dl, start_align), None);
ff7c6d11
XL
1036
1037 // repr(C) on an enum tells us to make a (tag, union) layout,
1038 // so we need to grow the prefix alignment to be at least
1039 // the alignment of the union. (This value is used both for
1040 // determining the alignment of the overall enum, and the
1041 // determining the alignment of the payload after the tag.)
a1dfa0c6 1042 let mut prefix_align = min_ity.align(dl).abi;
ff7c6d11
XL
1043 if def.repr.c() {
1044 for fields in &variants {
1045 for field in fields {
a1dfa0c6 1046 prefix_align = prefix_align.max(field.align.abi);
ff7c6d11
XL
1047 }
1048 }
1049 }
54a0048b 1050
ff7c6d11 1051 // Create the set of structs that represent each variant.
dfeec247
XL
1052 let mut layout_variants = variants
1053 .iter_enumerated()
1054 .map(|(i, field_layouts)| {
1055 let mut st = self.univariant_uninterned(
1056 ty,
1057 &field_layouts,
1058 &def.repr,
1059 StructKind::Prefixed(min_ity.size(), prefix_align),
1060 )?;
1061 st.variants = Variants::Single { index: i };
1062 // Find the first field we can't move later
1063 // to make room for a larger discriminant.
1064 for field in
1065 st.fields.index_by_increasing_offset().map(|j| field_layouts[j])
1066 {
1067 if !field.is_zst() || field.align.abi.bytes() != 1 {
1068 start_align = start_align.min(field.align.abi);
1069 break;
1070 }
54a0048b 1071 }
dfeec247
XL
1072 size = cmp::max(size, st.size);
1073 align = align.max(st.align);
1074 Ok(st)
1075 })
1076 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
54a0048b
SL
1077
1078 // Align the maximum variant size to the largest alignment.
a1dfa0c6 1079 size = size.align_to(align.abi);
54a0048b
SL
1080
1081 if size.bytes() >= dl.obj_size_bound() {
1082 return Err(LayoutError::SizeOverflow(ty));
1083 }
1084
8bb4bdeb
XL
1085 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
1086 if typeck_ity < min_ity {
1087 // It is a bug if Layout decided on a greater discriminant size than typeck for
1088 // some reason at this point (based on values discriminant can take on). Mostly
1089 // because this discriminant will be loaded, and then stored into variable of
1090 // type calculated by typeck. Consider such case (a bug): typeck decided on
1091 // byte-sized discriminant, but layout thinks we need a 16-bit to store all
94b46f34 1092 // discriminant values. That would be a bug, because then, in codegen, in order
8bb4bdeb
XL
1093 // to store this 16-bit discriminant into 8-bit sized temporary some of the
1094 // space necessary to represent would have to be discarded (or layout is wrong
1095 // on thinking it needs 16 bits)
dfeec247
XL
1096 bug!(
1097 "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1098 min_ity,
1099 typeck_ity
1100 );
8bb4bdeb 1101 // However, it is fine to make discr type however large (as an optimisation)
94b46f34 1102 // after this point – we’ll just truncate the value we load in codegen.
8bb4bdeb
XL
1103 }
1104
54a0048b
SL
1105 // Check to see if we should use a different type for the
1106 // discriminant. We can safely use a type with the same size
1107 // as the alignment of the first field of each variant.
1108 // We increase the size of the discriminant to avoid LLVM copying
1109 // padding when it doesn't need to. This normally causes unaligned
1110 // load/stores and excessive memcpy/memset operations. By using a
83c7162d 1111 // bigger integer size, LLVM can be sure about its contents and
54a0048b
SL
1112 // won't be so conservative.
1113
1114 // Use the initial field alignment
83c7162d
XL
1115 let mut ity = if def.repr.c() || def.repr.int.is_some() {
1116 min_ity
1117 } else {
a1dfa0c6 1118 Integer::for_align(dl, start_align).unwrap_or(min_ity)
83c7162d 1119 };
54a0048b
SL
1120
1121 // If the alignment is not larger than the chosen discriminant size,
1122 // don't use the alignment as the final size.
1123 if ity <= min_ity {
1124 ity = min_ity;
1125 } else {
1126 // Patch up the variants' first few fields.
ff7c6d11
XL
1127 let old_ity_size = min_ity.size();
1128 let new_ity_size = ity.size();
83c7162d 1129 for variant in &mut layout_variants {
ff7c6d11
XL
1130 match variant.fields {
1131 FieldPlacement::Arbitrary { ref mut offsets, .. } => {
1132 for i in offsets {
1133 if *i <= old_ity_size {
1134 assert_eq!(*i, old_ity_size);
1135 *i = new_ity_size;
1136 }
1137 }
1138 // We might be making the struct larger.
1139 if variant.size <= old_ity_size {
1140 variant.size = new_ity_size;
1141 }
1142 }
dfeec247 1143 _ => bug!(),
c30ab7b3 1144 }
54a0048b
SL
1145 }
1146 }
1147
0531ce1d
XL
1148 let tag_mask = !0u128 >> (128 - ity.size().bits());
1149 let tag = Scalar {
ff7c6d11 1150 value: Int(ity, signed),
0531ce1d 1151 valid_range: (min as u128 & tag_mask)..=(max as u128 & tag_mask),
ff7c6d11 1152 };
83c7162d
XL
1153 let mut abi = Abi::Aggregate { sized: true };
1154 if tag.value.size(dl) == size {
1155 abi = Abi::Scalar(tag.clone());
8faf50e0
XL
1156 } else {
1157 // Try to use a ScalarPair for all tagged enums.
83c7162d
XL
1158 let mut common_prim = None;
1159 for (field_layouts, layout_variant) in variants.iter().zip(&layout_variants) {
1160 let offsets = match layout_variant.fields {
1161 FieldPlacement::Arbitrary { ref offsets, .. } => offsets,
1162 _ => bug!(),
1163 };
dfeec247
XL
1164 let mut fields =
1165 field_layouts.iter().zip(offsets).filter(|p| !p.0.is_zst());
83c7162d
XL
1166 let (field, offset) = match (fields.next(), fields.next()) {
1167 (None, None) => continue,
1168 (Some(pair), None) => pair,
1169 _ => {
1170 common_prim = None;
1171 break;
1172 }
1173 };
1174 let prim = match field.details.abi {
1175 Abi::Scalar(ref scalar) => scalar.value,
1176 _ => {
1177 common_prim = None;
1178 break;
1179 }
1180 };
1181 if let Some(pair) = common_prim {
1182 // This is pretty conservative. We could go fancier
1183 // by conflating things like i32 and u32, or even
1184 // realising that (u8, u8) could just cohabit with
1185 // u16 or even u32.
1186 if pair != (prim, offset) {
1187 common_prim = None;
1188 break;
1189 }
1190 } else {
1191 common_prim = Some((prim, offset));
1192 }
1193 }
1194 if let Some((prim, offset)) = common_prim {
dc9dc135 1195 let pair = self.scalar_pair(tag.clone(), scalar_unit(prim));
83c7162d 1196 let pair_offsets = match pair.fields {
dfeec247 1197 FieldPlacement::Arbitrary { ref offsets, ref memory_index } => {
83c7162d
XL
1198 assert_eq!(memory_index, &[0, 1]);
1199 offsets
1200 }
dfeec247 1201 _ => bug!(),
83c7162d 1202 };
dfeec247
XL
1203 if pair_offsets[0] == Size::ZERO
1204 && pair_offsets[1] == *offset
1205 && align == pair.align
1206 && size == pair.size
1207 {
83c7162d
XL
1208 // We can use `ScalarPair` only when it matches our
1209 // already computed layout (including `#[repr(C)]`).
1210 abi = pair.abi;
1211 }
1212 }
1213 }
1214
0bf4aa26 1215 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
83c7162d
XL
1216 abi = Abi::Uninhabited;
1217 }
1218
416331ca
XL
1219 let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag.clone());
1220
ff7c6d11 1221 tcx.intern_layout(LayoutDetails {
532ac7d7
XL
1222 variants: Variants::Multiple {
1223 discr: tag,
1224 discr_kind: DiscriminantKind::Tag,
48663c56 1225 discr_index: 0,
83c7162d 1226 variants: layout_variants,
ff7c6d11
XL
1227 },
1228 fields: FieldPlacement::Arbitrary {
94b46f34 1229 offsets: vec![Size::ZERO],
dfeec247 1230 memory_index: vec![0],
ff7c6d11 1231 },
416331ca 1232 largest_niche,
ff7c6d11 1233 abi,
041b39d2 1234 align,
dfeec247 1235 size,
ff7c6d11 1236 })
54a0048b
SL
1237 }
1238
1239 // Types with no meaningful known layout.
b7449926 1240 ty::Projection(_) | ty::Opaque(..) => {
0531ce1d 1241 let normalized = tcx.normalize_erasing_regions(param_env, ty);
5bcae85e
SL
1242 if ty == normalized {
1243 return Err(LayoutError::Unknown(ty));
1244 }
ff7c6d11 1245 tcx.layout_raw(param_env.and(normalized))?
5bcae85e 1246 }
a1dfa0c6 1247
dfeec247
XL
1248 ty::Bound(..)
1249 | ty::Placeholder(..)
1250 | ty::UnnormalizedProjection(..)
1251 | ty::GeneratorWitness(..)
1252 | ty::Infer(_) => bug!("LayoutDetails::compute: unexpected type `{}`", ty),
a1dfa0c6 1253
b7449926 1254 ty::Param(_) | ty::Error => {
8faf50e0
XL
1255 return Err(LayoutError::Unknown(ty));
1256 }
ff7c6d11 1257 })
cc61c64b 1258 }
dc9dc135
XL
1259}
1260
1261/// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1262#[derive(Clone, Debug, PartialEq)]
1263enum SavedLocalEligibility {
1264 Unassigned,
1265 Assigned(VariantIdx),
1266 // FIXME: Use newtype_index so we aren't wasting bytes
1267 Ineligible(Option<u32>),
1268}
1269
1270// When laying out generators, we divide our saved local fields into two
1271// categories: overlap-eligible and overlap-ineligible.
1272//
1273// Those fields which are ineligible for overlap go in a "prefix" at the
1274// beginning of the layout, and always have space reserved for them.
1275//
1276// Overlap-eligible fields are only assigned to one variant, so we lay
1277// those fields out for each variant and put them right after the
1278// prefix.
1279//
1280// Finally, in the layout details, we point to the fields from the
1281// variants they are assigned to. It is possible for some fields to be
1282// included in multiple variants. No field ever "moves around" in the
1283// layout; its offset is always the same.
1284//
1285// Also included in the layout are the upvars and the discriminant.
1286// These are included as fields on the "outer" layout; they are not part
1287// of any variant.
1288impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
1289 /// Compute the eligibility and assignment of each local.
dfeec247
XL
1290 fn generator_saved_local_eligibility(
1291 &self,
1292 info: &GeneratorLayout<'tcx>,
1293 ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
dc9dc135
XL
1294 use SavedLocalEligibility::*;
1295
1296 let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
1297 IndexVec::from_elem_n(Unassigned, info.field_tys.len());
1298
1299 // The saved locals not eligible for overlap. These will get
1300 // "promoted" to the prefix of our generator.
1301 let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
1302
1303 // Figure out which of our saved locals are fields in only
1304 // one variant. The rest are deemed ineligible for overlap.
1305 for (variant_index, fields) in info.variant_fields.iter_enumerated() {
1306 for local in fields {
1307 match assignments[*local] {
1308 Unassigned => {
1309 assignments[*local] = Assigned(variant_index);
1310 }
1311 Assigned(idx) => {
1312 // We've already seen this local at another suspension
1313 // point, so it is no longer a candidate.
dfeec247
XL
1314 trace!(
1315 "removing local {:?} in >1 variant ({:?}, {:?})",
1316 local,
1317 variant_index,
1318 idx
1319 );
dc9dc135
XL
1320 ineligible_locals.insert(*local);
1321 assignments[*local] = Ineligible(None);
1322 }
dfeec247 1323 Ineligible(_) => {}
dc9dc135
XL
1324 }
1325 }
1326 }
1327
1328 // Next, check every pair of eligible locals to see if they
1329 // conflict.
1330 for local_a in info.storage_conflicts.rows() {
1331 let conflicts_a = info.storage_conflicts.count(local_a);
1332 if ineligible_locals.contains(local_a) {
1333 continue;
1334 }
1335
1336 for local_b in info.storage_conflicts.iter(local_a) {
1337 // local_a and local_b are storage live at the same time, therefore they
1338 // cannot overlap in the generator layout. The only way to guarantee
1339 // this is if they are in the same variant, or one is ineligible
1340 // (which means it is stored in every variant).
dfeec247
XL
1341 if ineligible_locals.contains(local_b)
1342 || assignments[local_a] == assignments[local_b]
dc9dc135
XL
1343 {
1344 continue;
1345 }
1346
1347 // If they conflict, we will choose one to make ineligible.
1348 // This is not always optimal; it's just a greedy heuristic that
1349 // seems to produce good results most of the time.
1350 let conflicts_b = info.storage_conflicts.count(local_b);
dfeec247
XL
1351 let (remove, other) =
1352 if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
dc9dc135
XL
1353 ineligible_locals.insert(remove);
1354 assignments[remove] = Ineligible(None);
1355 trace!("removing local {:?} due to conflict with {:?}", remove, other);
1356 }
1357 }
1358
416331ca
XL
1359 // Count the number of variants in use. If only one of them, then it is
1360 // impossible to overlap any locals in our layout. In this case it's
1361 // always better to make the remaining locals ineligible, so we can
1362 // lay them out with the other locals in the prefix and eliminate
1363 // unnecessary padding bytes.
1364 {
1365 let mut used_variants = BitSet::new_empty(info.variant_fields.len());
1366 for assignment in &assignments {
1367 match assignment {
dfeec247
XL
1368 Assigned(idx) => {
1369 used_variants.insert(*idx);
1370 }
416331ca
XL
1371 _ => {}
1372 }
1373 }
1374 if used_variants.count() < 2 {
1375 for assignment in assignments.iter_mut() {
1376 *assignment = Ineligible(None);
1377 }
1378 ineligible_locals.insert_all();
1379 }
1380 }
1381
dc9dc135
XL
1382 // Write down the order of our locals that will be promoted to the prefix.
1383 {
74b04a01
XL
1384 for (idx, local) in ineligible_locals.iter().enumerate() {
1385 assignments[local] = Ineligible(Some(idx as u32));
dc9dc135
XL
1386 }
1387 }
1388 debug!("generator saved local assignments: {:?}", assignments);
1389
1390 (ineligible_locals, assignments)
1391 }
1392
1393 /// Compute the full generator layout.
1394 fn generator_layout(
1395 &self,
1396 ty: Ty<'tcx>,
1397 def_id: hir::def_id::DefId,
e74abb32 1398 substs: SubstsRef<'tcx>,
dc9dc135
XL
1399 ) -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> {
1400 use SavedLocalEligibility::*;
1401 let tcx = self.tcx;
1402
dfeec247 1403 let subst_field = |ty: Ty<'tcx>| ty.subst(tcx, substs);
dc9dc135
XL
1404
1405 let info = tcx.generator_layout(def_id);
1406 let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
1407
1408 // Build a prefix layout, including "promoting" all ineligible
1409 // locals as part of the prefix. We compute the layout of all of
1410 // these fields at once to get optimal packing.
e74abb32 1411 let discr_index = substs.as_generator().prefix_tys(def_id, tcx).count();
416331ca 1412 // FIXME(eddyb) set the correct vaidity range for the discriminant.
e74abb32 1413 let discr_layout = self.layout_of(substs.as_generator().discr_ty(tcx))?;
416331ca
XL
1414 let discr = match &discr_layout.abi {
1415 Abi::Scalar(s) => s.clone(),
1416 _ => bug!(),
1417 };
dfeec247
XL
1418 let promoted_layouts = ineligible_locals
1419 .iter()
416331ca
XL
1420 .map(|local| subst_field(info.field_tys[local]))
1421 .map(|ty| tcx.mk_maybe_uninit(ty))
1422 .map(|ty| self.layout_of(ty));
dfeec247
XL
1423 let prefix_layouts = substs
1424 .as_generator()
1425 .prefix_tys(def_id, tcx)
416331ca
XL
1426 .map(|ty| self.layout_of(ty))
1427 .chain(iter::once(Ok(discr_layout)))
1428 .chain(promoted_layouts)
1429 .collect::<Result<Vec<_>, _>>()?;
dc9dc135
XL
1430 let prefix = self.univariant_uninterned(
1431 ty,
416331ca 1432 &prefix_layouts,
dc9dc135 1433 &ReprOptions::default(),
416331ca
XL
1434 StructKind::AlwaysSized,
1435 )?;
1436
dc9dc135
XL
1437 let (prefix_size, prefix_align) = (prefix.size, prefix.align);
1438
1439 // Split the prefix layout into the "outer" fields (upvars and
1440 // discriminant) and the "promoted" fields. Promoted fields will
1441 // get included in each variant that requested them in
1442 // GeneratorLayout.
1443 debug!("prefix = {:#?}", prefix);
1444 let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
1445 FieldPlacement::Arbitrary { mut offsets, memory_index } => {
1446 let mut inverse_memory_index = invert_mapping(&memory_index);
1447
1448 // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1449 // "outer" and "promoted" fields respectively.
1450 let b_start = (discr_index + 1) as u32;
1451 let offsets_b = offsets.split_off(b_start as usize);
1452 let offsets_a = offsets;
1453
1454 // Disentangle the "a" and "b" components of `inverse_memory_index`
1455 // by preserving the order but keeping only one disjoint "half" each.
1456 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1457 let inverse_memory_index_b: Vec<_> =
1458 inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
1459 inverse_memory_index.retain(|&i| i < b_start);
1460 let inverse_memory_index_a = inverse_memory_index;
1461
1462 // Since `inverse_memory_index_{a,b}` each only refer to their
1463 // respective fields, they can be safely inverted
1464 let memory_index_a = invert_mapping(&inverse_memory_index_a);
1465 let memory_index_b = invert_mapping(&inverse_memory_index_b);
1466
dfeec247
XL
1467 let outer_fields =
1468 FieldPlacement::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
dc9dc135
XL
1469 (outer_fields, offsets_b, memory_index_b)
1470 }
1471 _ => bug!(),
1472 };
1473
1474 let mut size = prefix.size;
1475 let mut align = prefix.align;
dfeec247
XL
1476 let variants = info
1477 .variant_fields
1478 .iter_enumerated()
1479 .map(|(index, variant_fields)| {
1480 // Only include overlap-eligible fields when we compute our variant layout.
1481 let variant_only_tys = variant_fields
1482 .iter()
1483 .filter(|local| match assignments[**local] {
dc9dc135
XL
1484 Unassigned => bug!(),
1485 Assigned(v) if v == index => true,
1486 Assigned(_) => bug!("assignment does not match variant"),
1487 Ineligible(_) => false,
dfeec247
XL
1488 })
1489 .map(|local| subst_field(info.field_tys[*local]));
dc9dc135 1490
dfeec247
XL
1491 let mut variant = self.univariant_uninterned(
1492 ty,
1493 &variant_only_tys
1494 .map(|ty| self.layout_of(ty))
1495 .collect::<Result<Vec<_>, _>>()?,
1496 &ReprOptions::default(),
1497 StructKind::Prefixed(prefix_size, prefix_align.abi),
1498 )?;
1499 variant.variants = Variants::Single { index };
1500
1501 let (offsets, memory_index) = match variant.fields {
1502 FieldPlacement::Arbitrary { offsets, memory_index } => (offsets, memory_index),
1503 _ => bug!(),
dc9dc135 1504 };
dc9dc135 1505
dfeec247
XL
1506 // Now, stitch the promoted and variant-only fields back together in
1507 // the order they are mentioned by our GeneratorLayout.
1508 // Because we only use some subset (that can differ between variants)
1509 // of the promoted fields, we can't just pick those elements of the
1510 // `promoted_memory_index` (as we'd end up with gaps).
1511 // So instead, we build an "inverse memory_index", as if all of the
1512 // promoted fields were being used, but leave the elements not in the
1513 // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1514 // obtain a valid (bijective) mapping.
1515 const INVALID_FIELD_IDX: u32 = !0;
1516 let mut combined_inverse_memory_index =
1517 vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
1518 let mut offsets_and_memory_index = offsets.into_iter().zip(memory_index);
1519 let combined_offsets = variant_fields
1520 .iter()
1521 .enumerate()
1522 .map(|(i, local)| {
1523 let (offset, memory_index) = match assignments[*local] {
1524 Unassigned => bug!(),
1525 Assigned(_) => {
1526 let (offset, memory_index) =
1527 offsets_and_memory_index.next().unwrap();
1528 (offset, promoted_memory_index.len() as u32 + memory_index)
1529 }
1530 Ineligible(field_idx) => {
1531 let field_idx = field_idx.unwrap() as usize;
1532 (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1533 }
1534 };
1535 combined_inverse_memory_index[memory_index as usize] = i as u32;
1536 offset
1537 })
1538 .collect();
1539
1540 // Remove the unused slots and invert the mapping to obtain the
1541 // combined `memory_index` (also see previous comment).
1542 combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
1543 let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
1544
1545 variant.fields = FieldPlacement::Arbitrary {
1546 offsets: combined_offsets,
1547 memory_index: combined_memory_index,
1548 };
1549
1550 size = size.max(variant.size);
1551 align = align.max(variant.align);
1552 Ok(variant)
1553 })
1554 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
dc9dc135 1555
416331ca
XL
1556 size = size.align_to(align.abi);
1557
dfeec247
XL
1558 let abi = if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi.is_uninhabited())
1559 {
dc9dc135
XL
1560 Abi::Uninhabited
1561 } else {
1562 Abi::Aggregate { sized: true }
1563 };
dc9dc135
XL
1564
1565 let layout = tcx.intern_layout(LayoutDetails {
1566 variants: Variants::Multiple {
1567 discr,
1568 discr_kind: DiscriminantKind::Tag,
1569 discr_index,
1570 variants,
1571 },
1572 fields: outer_fields,
1573 abi,
416331ca 1574 largest_niche: prefix.largest_niche,
dc9dc135
XL
1575 size,
1576 align,
1577 });
1578 debug!("generator layout ({:?}): {:#?}", ty, layout);
1579 Ok(layout)
1580 }
7cac9316
XL
1581
1582 /// This is invoked by the `layout_raw` query to record the final
1583 /// layout of each type.
532ac7d7 1584 #[inline(always)]
a1dfa0c6 1585 fn record_layout_for_printing(&self, layout: TyLayout<'tcx>) {
532ac7d7
XL
1586 // If we are running with `-Zprint-type-sizes`, maybe record layouts
1587 // for dumping later.
1588 if self.tcx.sess.opts.debugging_opts.print_type_sizes {
1589 self.record_layout_for_printing_outlined(layout)
1590 }
1591 }
1592
1593 fn record_layout_for_printing_outlined(&self, layout: TyLayout<'tcx>) {
1594 // Ignore layouts that are done with non-empty environments or
1595 // non-monomorphic layouts, as the user only wants to see the stuff
1596 // resulting from the final codegen session.
dfeec247 1597 if layout.ty.has_param_types() || !self.param_env.caller_bounds.is_empty() {
7cac9316
XL
1598 return;
1599 }
1600
7cac9316 1601 // (delay format until we actually need it)
83c7162d 1602 let record = |kind, packed, opt_discr_size, variants| {
2c00a5a8 1603 let type_desc = format!("{:?}", layout.ty);
dfeec247
XL
1604 self.tcx.sess.code_stats.record_type_size(
1605 kind,
1606 type_desc,
1607 layout.align.abi,
1608 layout.size,
1609 packed,
1610 opt_discr_size,
1611 variants,
1612 );
7cac9316
XL
1613 };
1614
e74abb32 1615 let adt_def = match layout.ty.kind {
b7449926 1616 ty::Adt(ref adt_def, _) => {
2c00a5a8 1617 debug!("print-type-size t: `{:?}` process adt", layout.ty);
ff7c6d11 1618 adt_def
7cac9316
XL
1619 }
1620
b7449926 1621 ty::Closure(..) => {
2c00a5a8 1622 debug!("print-type-size t: `{:?}` record closure", layout.ty);
83c7162d 1623 record(DataTypeKind::Closure, false, None, vec![]);
7cac9316
XL
1624 return;
1625 }
1626
1627 _ => {
2c00a5a8 1628 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
7cac9316
XL
1629 return;
1630 }
1631 };
1632
1633 let adt_kind = adt_def.adt_kind();
e1599b0c 1634 let adt_packed = adt_def.repr.pack.is_some();
7cac9316 1635
dfeec247 1636 let build_variant_info = |n: Option<Ident>, flds: &[ast::Name], layout: TyLayout<'tcx>| {
94b46f34 1637 let mut min_size = Size::ZERO;
dfeec247
XL
1638 let field_info: Vec<_> = flds
1639 .iter()
1640 .enumerate()
1641 .map(|(i, &name)| match layout.field(self, i) {
ff7c6d11
XL
1642 Err(err) => {
1643 bug!("no layout found for field {}: `{:?}`", name, err);
1644 }
1645 Ok(field_layout) => {
1646 let offset = layout.fields.offset(i);
1647 let field_end = offset + field_layout.size;
1648 if min_size < field_end {
1649 min_size = field_end;
1650 }
1651 session::FieldInfo {
1652 name: name.to_string(),
1653 offset: offset.bytes(),
1654 size: field_layout.size.bytes(),
a1dfa0c6 1655 align: field_layout.align.abi.bytes(),
ff7c6d11 1656 }
7cac9316 1657 }
dfeec247
XL
1658 })
1659 .collect();
7cac9316
XL
1660
1661 session::VariantInfo {
0731742a 1662 name: n.map(|n| n.to_string()),
ff7c6d11
XL
1663 kind: if layout.is_unsized() {
1664 session::SizeKind::Min
1665 } else {
7cac9316 1666 session::SizeKind::Exact
ff7c6d11 1667 },
a1dfa0c6 1668 align: layout.align.abi.bytes(),
dfeec247 1669 size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
7cac9316
XL
1670 fields: field_info,
1671 }
1672 };
1673
ff7c6d11
XL
1674 match layout.variants {
1675 Variants::Single { index } => {
dfeec247 1676 debug!("print-type-size `{:#?}` variant {}", layout, adt_def.variants[index].ident);
ff7c6d11
XL
1677 if !adt_def.variants.is_empty() {
1678 let variant_def = &adt_def.variants[index];
dfeec247
XL
1679 let fields: Vec<_> = variant_def.fields.iter().map(|f| f.ident.name).collect();
1680 record(
1681 adt_kind.into(),
1682 adt_packed,
1683 None,
1684 vec![build_variant_info(Some(variant_def.ident), &fields, layout)],
1685 );
7cac9316
XL
1686 } else {
1687 // (This case arises for *empty* enums; so give it
1688 // zero variants.)
83c7162d 1689 record(adt_kind.into(), adt_packed, None, vec![]);
7cac9316
XL
1690 }
1691 }
1692
532ac7d7 1693 Variants::Multiple { ref discr, ref discr_kind, .. } => {
dfeec247
XL
1694 debug!(
1695 "print-type-size `{:#?}` adt general variants def {}",
1696 layout.ty,
1697 adt_def.variants.len()
1698 );
1699 let variant_infos: Vec<_> = adt_def
1700 .variants
1701 .iter_enumerated()
1702 .map(|(i, variant_def)| {
ff7c6d11 1703 let fields: Vec<_> =
94b46f34 1704 variant_def.fields.iter().map(|f| f.ident.name).collect();
dfeec247
XL
1705 build_variant_info(
1706 Some(variant_def.ident),
1707 &fields,
1708 layout.for_variant(self, i),
1709 )
ff7c6d11
XL
1710 })
1711 .collect();
dfeec247
XL
1712 record(
1713 adt_kind.into(),
1714 adt_packed,
1715 match discr_kind {
1716 DiscriminantKind::Tag => Some(discr.value.size(self)),
1717 _ => None,
1718 },
1719 variant_infos,
1720 );
7cac9316
XL
1721 }
1722 }
1723 }
54a0048b
SL
1724}
1725
0731742a 1726/// Type size "skeleton", i.e., the only information determining a type's size.
54a0048b
SL
1727/// While this is conservative, (aside from constant sizes, only pointers,
1728/// newtypes thereof and null pointer optimized enums are allowed), it is
a1dfa0c6 1729/// enough to statically check common use cases of transmute.
54a0048b
SL
1730#[derive(Copy, Clone, Debug)]
1731pub enum SizeSkeleton<'tcx> {
1732 /// Any statically computable Layout.
1733 Known(Size),
1734
1735 /// A potentially-fat pointer.
1736 Pointer {
3b2f2976 1737 /// If true, this pointer is never null.
54a0048b 1738 non_zero: bool,
3b2f2976
XL
1739 /// The type which determines the unsized metadata, if any,
1740 /// of this pointer. Either a type parameter or a projection
1741 /// depending on one, with regions erased.
dfeec247
XL
1742 tail: Ty<'tcx>,
1743 },
54a0048b
SL
1744}
1745
dc9dc135
XL
1746impl<'tcx> SizeSkeleton<'tcx> {
1747 pub fn compute(
1748 ty: Ty<'tcx>,
1749 tcx: TyCtxt<'tcx>,
1750 param_env: ty::ParamEnv<'tcx>,
1751 ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
74b04a01 1752 debug_assert!(!ty.has_infer_types_or_consts());
54a0048b
SL
1753
1754 // First try computing a static layout.
2c00a5a8 1755 let err = match tcx.layout_of(param_env.and(ty)) {
54a0048b 1756 Ok(layout) => {
ff7c6d11 1757 return Ok(SizeSkeleton::Known(layout.size));
54a0048b 1758 }
dfeec247 1759 Err(err) => err,
54a0048b
SL
1760 };
1761
e74abb32 1762 match ty.kind {
dfeec247 1763 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
ff7c6d11 1764 let non_zero = !ty.is_unsafe_ptr();
416331ca 1765 let tail = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
e74abb32 1766 match tail.kind {
b7449926 1767 ty::Param(_) | ty::Projection(_) => {
e1599b0c 1768 debug_assert!(tail.has_param_types());
dfeec247 1769 Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(&tail) })
ff7c6d11 1770 }
dfeec247
XL
1771 _ => bug!(
1772 "SizeSkeleton::compute({}): layout errored ({}), yet \
ff7c6d11 1773 tail `{}` is not a type parameter or a projection",
dfeec247
XL
1774 ty,
1775 err,
1776 tail
1777 ),
ff7c6d11 1778 }
54a0048b
SL
1779 }
1780
b7449926 1781 ty::Adt(def, substs) => {
54a0048b 1782 // Only newtypes and enums w/ nullable pointer optimization.
9e0c209e 1783 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
54a0048b
SL
1784 return Err(err);
1785 }
1786
1787 // Get a zero-sized variant or a pointer newtype.
a1dfa0c6
XL
1788 let zero_or_ptr_variant = |i| {
1789 let i = VariantIdx::new(i);
dfeec247
XL
1790 let fields = def.variants[i]
1791 .fields
1792 .iter()
1793 .map(|field| SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env));
54a0048b
SL
1794 let mut ptr = None;
1795 for field in fields {
1796 let field = field?;
1797 match field {
1798 SizeSkeleton::Known(size) => {
1799 if size.bytes() > 0 {
1800 return Err(err);
1801 }
1802 }
dfeec247 1803 SizeSkeleton::Pointer { .. } => {
54a0048b
SL
1804 if ptr.is_some() {
1805 return Err(err);
1806 }
1807 ptr = Some(field);
1808 }
1809 }
1810 }
1811 Ok(ptr)
1812 };
1813
1814 let v0 = zero_or_ptr_variant(0)?;
1815 // Newtype.
1816 if def.variants.len() == 1 {
1817 if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1818 return Ok(SizeSkeleton::Pointer {
dfeec247
XL
1819 non_zero: non_zero
1820 || match tcx.layout_scalar_valid_range(def.did) {
1821 (Bound::Included(start), Bound::Unbounded) => start > 0,
1822 (Bound::Included(start), Bound::Included(end)) => {
1823 0 < start && start < end
1824 }
1825 _ => false,
1826 },
041b39d2 1827 tail,
54a0048b
SL
1828 });
1829 } else {
1830 return Err(err);
1831 }
1832 }
1833
1834 let v1 = zero_or_ptr_variant(1)?;
1835 // Nullable pointer enum optimization.
1836 match (v0, v1) {
dfeec247
XL
1837 (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None)
1838 | (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1839 Ok(SizeSkeleton::Pointer { non_zero: false, tail })
54a0048b 1840 }
dfeec247 1841 _ => Err(err),
54a0048b
SL
1842 }
1843 }
1844
b7449926 1845 ty::Projection(_) | ty::Opaque(..) => {
0531ce1d 1846 let normalized = tcx.normalize_erasing_regions(param_env, ty);
5bcae85e
SL
1847 if ty == normalized {
1848 Err(err)
1849 } else {
7cac9316 1850 SizeSkeleton::compute(normalized, tcx, param_env)
5bcae85e
SL
1851 }
1852 }
1853
dfeec247 1854 _ => Err(err),
54a0048b
SL
1855 }
1856 }
1857
0bf4aa26 1858 pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
54a0048b
SL
1859 match (self, other) {
1860 (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
dfeec247
XL
1861 (SizeSkeleton::Pointer { tail: a, .. }, SizeSkeleton::Pointer { tail: b, .. }) => {
1862 a == b
1863 }
1864 _ => false,
54a0048b
SL
1865 }
1866 }
1867}
cc61c64b 1868
ff7c6d11 1869pub trait HasTyCtxt<'tcx>: HasDataLayout {
dc9dc135 1870 fn tcx(&self) -> TyCtxt<'tcx>;
cc61c64b
XL
1871}
1872
48663c56
XL
1873pub trait HasParamEnv<'tcx> {
1874 fn param_env(&self) -> ty::ParamEnv<'tcx>;
1875}
1876
dc9dc135 1877impl<'tcx> HasDataLayout for TyCtxt<'tcx> {
ff7c6d11
XL
1878 fn data_layout(&self) -> &TargetDataLayout {
1879 &self.data_layout
1880 }
cc61c64b
XL
1881}
1882
dc9dc135
XL
1883impl<'tcx> HasTyCtxt<'tcx> for TyCtxt<'tcx> {
1884 fn tcx(&self) -> TyCtxt<'tcx> {
e74abb32 1885 *self
cc61c64b
XL
1886 }
1887}
1888
48663c56
XL
1889impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
1890 fn param_env(&self) -> ty::ParamEnv<'tcx> {
1891 self.param_env
1892 }
1893}
1894
2c00a5a8 1895impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
cc61c64b 1896 fn data_layout(&self) -> &TargetDataLayout {
2c00a5a8 1897 self.tcx.data_layout()
cc61c64b
XL
1898 }
1899}
1900
dc9dc135
XL
1901impl<'tcx, T: HasTyCtxt<'tcx>> HasTyCtxt<'tcx> for LayoutCx<'tcx, T> {
1902 fn tcx(&self) -> TyCtxt<'tcx> {
2c00a5a8 1903 self.tcx.tcx()
ff7c6d11
XL
1904 }
1905}
1906
1907pub trait MaybeResult<T> {
48663c56
XL
1908 type Error;
1909
1910 fn from(x: Result<T, Self::Error>) -> Self;
1911 fn to_result(self) -> Result<T, Self::Error>;
ff7c6d11
XL
1912}
1913
1914impl<T> MaybeResult<T> for T {
48663c56
XL
1915 type Error = !;
1916
1917 fn from(x: Result<T, Self::Error>) -> Self {
1918 let Ok(x) = x;
ff7c6d11
XL
1919 x
1920 }
48663c56
XL
1921 fn to_result(self) -> Result<T, Self::Error> {
1922 Ok(self)
ff7c6d11
XL
1923 }
1924}
cc61c64b 1925
ff7c6d11 1926impl<T, E> MaybeResult<T> for Result<T, E> {
48663c56
XL
1927 type Error = E;
1928
1929 fn from(x: Result<T, Self::Error>) -> Self {
1930 x
ff7c6d11 1931 }
48663c56
XL
1932 fn to_result(self) -> Result<T, Self::Error> {
1933 self
7cac9316 1934 }
ff7c6d11
XL
1935}
1936
83c7162d 1937pub type TyLayout<'tcx> = ::rustc_target::abi::TyLayout<'tcx, Ty<'tcx>>;
cc61c64b 1938
dc9dc135 1939impl<'tcx> LayoutOf for LayoutCx<'tcx, TyCtxt<'tcx>> {
83c7162d 1940 type Ty = Ty<'tcx>;
ff7c6d11
XL
1941 type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
1942
1943 /// Computes the layout of a type. Note that this implicitly
1944 /// executes in "reveal all" mode.
a1dfa0c6 1945 fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
0531ce1d
XL
1946 let param_env = self.param_env.with_reveal_all();
1947 let ty = self.tcx.normalize_erasing_regions(param_env, ty);
2c00a5a8 1948 let details = self.tcx.layout_raw(param_env.and(ty))?;
dfeec247 1949 let layout = TyLayout { ty, details };
cc61c64b 1950
0731742a 1951 // N.B., this recording is normally disabled; when enabled, it
ff7c6d11
XL
1952 // can however trigger recursive invocations of `layout_of`.
1953 // Therefore, we execute it *after* the main query has
1954 // completed, to avoid problems around recursive structures
0531ce1d 1955 // and the like. (Admittedly, I wasn't able to reproduce a problem
ff7c6d11 1956 // here, but it seems like the right thing to do. -nmatsakis)
2c00a5a8 1957 self.record_layout_for_printing(layout);
ff7c6d11
XL
1958
1959 Ok(layout)
cc61c64b
XL
1960 }
1961}
1962
dc9dc135 1963impl LayoutOf for LayoutCx<'tcx, ty::query::TyCtxtAt<'tcx>> {
83c7162d 1964 type Ty = Ty<'tcx>;
ff7c6d11 1965 type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
cc61c64b 1966
ff7c6d11
XL
1967 /// Computes the layout of a type. Note that this implicitly
1968 /// executes in "reveal all" mode.
a1dfa0c6 1969 fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
0531ce1d
XL
1970 let param_env = self.param_env.with_reveal_all();
1971 let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1972 let details = self.tcx.layout_raw(param_env.and(ty))?;
dfeec247 1973 let layout = TyLayout { ty, details };
cc61c64b 1974
0731742a 1975 // N.B., this recording is normally disabled; when enabled, it
ff7c6d11
XL
1976 // can however trigger recursive invocations of `layout_of`.
1977 // Therefore, we execute it *after* the main query has
1978 // completed, to avoid problems around recursive structures
0531ce1d 1979 // and the like. (Admittedly, I wasn't able to reproduce a problem
ff7c6d11 1980 // here, but it seems like the right thing to do. -nmatsakis)
dfeec247 1981 let cx = LayoutCx { tcx: *self.tcx, param_env: self.param_env };
2c00a5a8 1982 cx.record_layout_for_printing(layout);
cc61c64b 1983
ff7c6d11
XL
1984 Ok(layout)
1985 }
1986}
cc61c64b 1987
2c00a5a8 1988// Helper (inherent) `layout_of` methods to avoid pushing `LayoutCx` to users.
dc9dc135 1989impl TyCtxt<'tcx> {
2c00a5a8
XL
1990 /// Computes the layout of a type. Note that this implicitly
1991 /// executes in "reveal all" mode.
1992 #[inline]
dfeec247
XL
1993 pub fn layout_of(
1994 self,
1995 param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
1996 ) -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
1997 let cx = LayoutCx { tcx: self, param_env: param_env_and_ty.param_env };
2c00a5a8
XL
1998 cx.layout_of(param_env_and_ty.value)
1999 }
2000}
2001
dc9dc135 2002impl ty::query::TyCtxtAt<'tcx> {
2c00a5a8
XL
2003 /// Computes the layout of a type. Note that this implicitly
2004 /// executes in "reveal all" mode.
2005 #[inline]
dfeec247
XL
2006 pub fn layout_of(
2007 self,
2008 param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
2009 ) -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
2010 let cx = LayoutCx { tcx: self.at(self.span), param_env: param_env_and_ty.param_env };
2c00a5a8
XL
2011 cx.layout_of(param_env_and_ty.value)
2012 }
2013}
2014
dc9dc135
XL
2015impl<'tcx, C> TyLayoutMethods<'tcx, C> for Ty<'tcx>
2016where
416331ca
XL
2017 C: LayoutOf<Ty = Ty<'tcx>, TyLayout: MaybeResult<TyLayout<'tcx>>>
2018 + HasTyCtxt<'tcx>
2019 + HasParamEnv<'tcx>,
83c7162d 2020{
a1dfa0c6 2021 fn for_variant(this: TyLayout<'tcx>, cx: &C, variant_index: VariantIdx) -> TyLayout<'tcx> {
83c7162d
XL
2022 let details = match this.variants {
2023 Variants::Single { index } if index == variant_index => this.details,
ff7c6d11
XL
2024
2025 Variants::Single { index } => {
2026 // Deny calling for_variant more than once for non-Single enums.
48663c56 2027 if let Ok(layout) = cx.layout_of(this.ty).to_result() {
ff7c6d11 2028 assert_eq!(layout.variants, Variants::Single { index });
48663c56 2029 }
ff7c6d11 2030
e74abb32 2031 let fields = match this.ty.kind {
b7449926 2032 ty::Adt(def, _) => def.variants[variant_index].fields.len(),
dfeec247 2033 _ => bug!(),
ff7c6d11 2034 };
83c7162d
XL
2035 let tcx = cx.tcx();
2036 tcx.intern_layout(LayoutDetails {
2037 variants: Variants::Single { index: variant_index },
2038 fields: FieldPlacement::Union(fields),
2039 abi: Abi::Uninhabited,
416331ca 2040 largest_niche: None,
83c7162d 2041 align: tcx.data_layout.i8_align,
dfeec247 2042 size: Size::ZERO,
83c7162d 2043 })
ff7c6d11 2044 }
cc61c64b 2045
dfeec247 2046 Variants::Multiple { ref variants, .. } => &variants[variant_index],
ff7c6d11
XL
2047 };
2048
2049 assert_eq!(details.variants, Variants::Single { index: variant_index });
cc61c64b 2050
dfeec247 2051 TyLayout { ty: this.ty, details }
cc61c64b
XL
2052 }
2053
a1dfa0c6 2054 fn field(this: TyLayout<'tcx>, cx: &C, i: usize) -> C::TyLayout {
cc61c64b 2055 let tcx = cx.tcx();
48663c56
XL
2056 let discr_layout = |discr: &Scalar| -> C::TyLayout {
2057 let layout = LayoutDetails::scalar(cx, discr.clone());
2058 MaybeResult::from(Ok(TyLayout {
2059 details: tcx.intern_layout(layout),
2060 ty: discr.value.to_ty(tcx),
2061 }))
2062 };
2063
e74abb32 2064 cx.layout_of(match this.ty.kind {
dfeec247
XL
2065 ty::Bool
2066 | ty::Char
2067 | ty::Int(_)
2068 | ty::Uint(_)
2069 | ty::Float(_)
2070 | ty::FnPtr(_)
2071 | ty::Never
2072 | ty::FnDef(..)
2073 | ty::GeneratorWitness(..)
2074 | ty::Foreign(..)
2075 | ty::Dynamic(..) => bug!("TyLayout::field_type({:?}): not applicable", this),
cc61c64b
XL
2076
2077 // Potentially-fat pointers.
dfeec247 2078 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
8faf50e0 2079 assert!(i < this.fields.count());
ff7c6d11 2080
60c5eb7d
XL
2081 // Reuse the fat `*T` type as its own thin pointer data field.
2082 // This provides information about, e.g., DST struct pointees
ff7c6d11
XL
2083 // (which may have no non-DST form), and will work as long
2084 // as the `Abi` or `FieldPlacement` is checked by users.
2085 if i == 0 {
b7449926 2086 let nil = tcx.mk_unit();
83c7162d 2087 let ptr_ty = if this.ty.is_unsafe_ptr() {
ff7c6d11
XL
2088 tcx.mk_mut_ptr(nil)
2089 } else {
48663c56 2090 tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
ff7c6d11 2091 };
dfeec247
XL
2092 return MaybeResult::from(cx.layout_of(ptr_ty).to_result().map(
2093 |mut ptr_layout| {
2094 ptr_layout.ty = this.ty;
2095 ptr_layout
2096 },
2097 ));
ff7c6d11
XL
2098 }
2099
e74abb32 2100 match tcx.struct_tail_erasing_lifetimes(pointee, cx.param_env()).kind {
dfeec247 2101 ty::Slice(_) | ty::Str => tcx.types.usize,
b7449926 2102 ty::Dynamic(_, _) => {
dfeec247 2103 tcx.mk_imm_ref(tcx.lifetimes.re_static, tcx.mk_array(tcx.types.usize, 3))
9fa01778 2104 /* FIXME: use actual fn pointers
b7449926
XL
2105 Warning: naively computing the number of entries in the
2106 vtable by counting the methods on the trait + methods on
2107 all parent traits does not work, because some methods can
2108 be not object safe and thus excluded from the vtable.
2109 Increase this counter if you tried to implement this but
2110 failed to do it without duplicating a lot of code from
2111 other places in the compiler: 2
2112 tcx.mk_tup(&[
2113 tcx.mk_array(tcx.types.usize, 3),
2114 tcx.mk_array(Option<fn()>),
2115 ])
2116 */
ff7c6d11 2117 }
dfeec247 2118 _ => bug!("TyLayout::field_type({:?}): not applicable", this),
ff7c6d11 2119 }
cc61c64b
XL
2120 }
2121
2122 // Arrays and slices.
dfeec247 2123 ty::Array(element, _) | ty::Slice(element) => element,
b7449926 2124 ty::Str => tcx.types.u8,
cc61c64b 2125
ea8adc8c 2126 // Tuples, generators and closures.
b7449926 2127 ty::Closure(def_id, ref substs) => {
e74abb32 2128 substs.as_closure().upvar_tys(def_id, tcx).nth(i).unwrap()
cc61c64b
XL
2129 }
2130
dfeec247
XL
2131 ty::Generator(def_id, ref substs, _) => match this.variants {
2132 Variants::Single { index } => substs
2133 .as_generator()
2134 .state_tys(def_id, tcx)
2135 .nth(index.as_usize())
2136 .unwrap()
2137 .nth(i)
2138 .unwrap(),
2139 Variants::Multiple { ref discr, discr_index, .. } => {
2140 if i == discr_index {
2141 return discr_layout(discr);
48663c56 2142 }
dfeec247 2143 substs.as_generator().prefix_tys(def_id, tcx).nth(i).unwrap()
48663c56 2144 }
dfeec247 2145 },
ea8adc8c 2146
48663c56 2147 ty::Tuple(tys) => tys[i].expect_ty(),
cc61c64b
XL
2148
2149 // SIMD vector types.
dfeec247 2150 ty::Adt(def, ..) if def.repr.simd() => this.ty.simd_type(tcx),
cc61c64b
XL
2151
2152 // ADTs.
b7449926 2153 ty::Adt(def, substs) => {
83c7162d 2154 match this.variants {
dfeec247 2155 Variants::Single { index } => def.variants[index].fields[i].ty(tcx, substs),
ff7c6d11
XL
2156
2157 // Discriminant field for enums (where applicable).
532ac7d7 2158 Variants::Multiple { ref discr, .. } => {
ff7c6d11 2159 assert_eq!(i, 0);
48663c56 2160 return discr_layout(discr);
ff7c6d11
XL
2161 }
2162 }
cc61c64b
XL
2163 }
2164
dfeec247
XL
2165 ty::Projection(_)
2166 | ty::UnnormalizedProjection(..)
2167 | ty::Bound(..)
2168 | ty::Placeholder(..)
2169 | ty::Opaque(..)
2170 | ty::Param(_)
2171 | ty::Infer(_)
2172 | ty::Error => bug!("TyLayout::field_type: unexpected type `{}`", this.ty),
ff7c6d11
XL
2173 })
2174 }
48663c56 2175
dfeec247 2176 fn pointee_info_at(this: TyLayout<'tcx>, cx: &C, offset: Size) -> Option<PointeeInfo> {
e74abb32 2177 match this.ty.kind {
48663c56 2178 ty::RawPtr(mt) if offset.bytes() == 0 => {
dfeec247
XL
2179 cx.layout_of(mt.ty).to_result().ok().map(|layout| PointeeInfo {
2180 size: layout.size,
2181 align: layout.align.abi,
2182 safe: None,
2183 })
48663c56
XL
2184 }
2185
2186 ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
2187 let tcx = cx.tcx();
2188 let is_freeze = ty.is_freeze(tcx, cx.param_env(), DUMMY_SP);
2189 let kind = match mt {
dfeec247
XL
2190 hir::Mutability::Not => {
2191 if is_freeze {
2192 PointerKind::Frozen
2193 } else {
2194 PointerKind::Shared
2195 }
2196 }
2197 hir::Mutability::Mut => {
48663c56
XL
2198 // Previously we would only emit noalias annotations for LLVM >= 6 or in
2199 // panic=abort mode. That was deemed right, as prior versions had many bugs
2200 // in conjunction with unwinding, but later versions didn’t seem to have
2201 // said issues. See issue #31681.
2202 //
2203 // Alas, later on we encountered a case where noalias would generate wrong
2204 // code altogether even with recent versions of LLVM in *safe* code with no
2205 // unwinding involved. See #54462.
2206 //
2207 // For now, do not enable mutable_noalias by default at all, while the
2208 // issue is being figured out.
dfeec247
XL
2209 let mutable_noalias =
2210 tcx.sess.opts.debugging_opts.mutable_noalias.unwrap_or(false);
48663c56
XL
2211 if mutable_noalias {
2212 PointerKind::UniqueBorrowed
2213 } else {
2214 PointerKind::Shared
2215 }
2216 }
2217 };
2218
dfeec247
XL
2219 cx.layout_of(ty).to_result().ok().map(|layout| PointeeInfo {
2220 size: layout.size,
2221 align: layout.align.abi,
2222 safe: Some(kind),
2223 })
48663c56
XL
2224 }
2225
2226 _ => {
2227 let mut data_variant = match this.variants {
2228 // Within the discriminant field, only the niche itself is
2229 // always initialized, so we only check for a pointer at its
2230 // offset.
2231 //
2232 // If the niche is a pointer, it's either valid (according
2233 // to its type), or null (which the niche field's scalar
2234 // validity range encodes). This allows using
2235 // `dereferenceable_or_null` for e.g., `Option<&T>`, and
2236 // this will continue to work as long as we don't start
2237 // using more niches than just null (e.g., the first page of
2238 // the address space, or unaligned pointers).
2239 Variants::Multiple {
dfeec247 2240 discr_kind: DiscriminantKind::Niche { dataful_variant, .. },
48663c56
XL
2241 discr_index,
2242 ..
dfeec247
XL
2243 } if this.fields.offset(discr_index) == offset => {
2244 Some(this.for_variant(cx, dataful_variant))
2245 }
48663c56
XL
2246 _ => Some(this),
2247 };
2248
2249 if let Some(variant) = data_variant {
2250 // We're not interested in any unions.
2251 if let FieldPlacement::Union(_) = variant.fields {
2252 data_variant = None;
2253 }
2254 }
2255
2256 let mut result = None;
2257
2258 if let Some(variant) = data_variant {
2259 let ptr_end = offset + Pointer.size(cx);
2260 for i in 0..variant.fields.count() {
2261 let field_start = variant.fields.offset(i);
2262 if field_start <= offset {
2263 let field = variant.field(cx, i);
dfeec247
XL
2264 result = field.to_result().ok().and_then(|field| {
2265 if ptr_end <= field_start + field.size {
2266 // We found the right field, look inside it.
2267 field.pointee_info_at(cx, offset - field_start)
2268 } else {
2269 None
2270 }
2271 });
48663c56
XL
2272 if result.is_some() {
2273 break;
2274 }
2275 }
2276 }
2277 }
2278
2279 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
2280 if let Some(ref mut pointee) = result {
e74abb32 2281 if let ty::Adt(def, _) = this.ty.kind {
48663c56
XL
2282 if def.is_box() && offset.bytes() == 0 {
2283 pointee.safe = Some(PointerKind::UniqueOwned);
2284 }
2285 }
2286 }
2287
2288 result
2289 }
2290 }
2291 }
83c7162d 2292}
ff7c6d11 2293
60c5eb7d 2294impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for LayoutError<'tcx> {
e74abb32 2295 fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
60c5eb7d 2296 use crate::ty::layout::LayoutError::*;
ff7c6d11
XL
2297 mem::discriminant(self).hash_stable(hcx, hasher);
2298
2299 match *self {
dfeec247 2300 Unknown(t) | SizeOverflow(t) => t.hash_stable(hcx, hasher),
ff7c6d11
XL
2301 }
2302 }
2303}
2304
60c5eb7d
XL
2305impl<'tcx> ty::Instance<'tcx> {
2306 // NOTE(eddyb) this is private to avoid using it from outside of
2307 // `FnAbi::of_instance` - any other uses are either too high-level
2308 // for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
2309 // or should go through `FnAbi` instead, to avoid losing any
2310 // adjustments `FnAbi::of_instance` might be performing.
2311 fn fn_sig_for_fn_abi(&self, tcx: TyCtxt<'tcx>) -> ty::PolyFnSig<'tcx> {
dfeec247 2312 let ty = self.monomorphic_ty(tcx);
60c5eb7d
XL
2313 match ty.kind {
2314 ty::FnDef(..) |
2315 // Shims currently have type FnPtr. Not sure this should remain.
2316 ty::FnPtr(_) => {
2317 let mut sig = ty.fn_sig(tcx);
2318 if let ty::InstanceDef::VtableShim(..) = self.def {
2319 // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
2320 sig = sig.map_bound(|mut sig| {
2321 let mut inputs_and_output = sig.inputs_and_output.to_vec();
2322 inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
2323 sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
2324 sig
2325 });
2326 }
2327 sig
2328 }
2329 ty::Closure(def_id, substs) => {
2330 let sig = substs.as_closure().sig(def_id, tcx);
2331
2332 let env_ty = tcx.closure_env_ty(def_id, substs).unwrap();
2333 sig.map_bound(|sig| tcx.mk_fn_sig(
2334 iter::once(*env_ty.skip_binder()).chain(sig.inputs().iter().cloned()),
2335 sig.output(),
2336 sig.c_variadic,
2337 sig.unsafety,
2338 sig.abi
2339 ))
2340 }
2341 ty::Generator(def_id, substs, _) => {
2342 let sig = substs.as_generator().poly_sig(def_id, tcx);
2343
2344 let env_region = ty::ReLateBound(ty::INNERMOST, ty::BrEnv);
2345 let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
2346
2347 let pin_did = tcx.lang_items().pin_type().unwrap();
2348 let pin_adt_ref = tcx.adt_def(pin_did);
2349 let pin_substs = tcx.intern_substs(&[env_ty.into()]);
2350 let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
2351
2352 sig.map_bound(|sig| {
2353 let state_did = tcx.lang_items().gen_state().unwrap();
2354 let state_adt_ref = tcx.adt_def(state_did);
2355 let state_substs = tcx.intern_substs(&[
2356 sig.yield_ty.into(),
2357 sig.return_ty.into(),
2358 ]);
2359 let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
2360
74b04a01
XL
2361 tcx.mk_fn_sig(
2362 [env_ty, sig.resume_ty].iter(),
2363 &ret_ty,
60c5eb7d
XL
2364 false,
2365 hir::Unsafety::Normal,
2366 rustc_target::spec::abi::Abi::Rust
2367 )
2368 })
ea8adc8c 2369 }
60c5eb7d 2370 _ => bug!("unexpected type {:?} in Instance::fn_sig", ty)
ea8adc8c
XL
2371 }
2372 }
2373}
2374
60c5eb7d 2375pub trait FnAbiExt<'tcx, C>
48663c56
XL
2376where
2377 C: LayoutOf<Ty = Ty<'tcx>, TyLayout = TyLayout<'tcx>>
2378 + HasDataLayout
2379 + HasTargetSpec
2380 + HasTyCtxt<'tcx>
2381 + HasParamEnv<'tcx>,
2382{
60c5eb7d
XL
2383 /// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
2384 ///
2385 /// NB: this doesn't handle virtual calls - those should use `FnAbi::of_instance`
2386 /// instead, where the instance is a `InstanceDef::Virtual`.
2387 fn of_fn_ptr(cx: &C, sig: ty::PolyFnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2388
2389 /// Compute a `FnAbi` suitable for declaring/defining an `fn` instance, and for
2390 /// direct calls to an `fn`.
2391 ///
2392 /// NB: that includes virtual calls, which are represented by "direct calls"
2393 /// to a `InstanceDef::Virtual` instance (of `<dyn Trait as Trait>::fn`).
2394 fn of_instance(cx: &C, instance: ty::Instance<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2395
48663c56
XL
2396 fn new_internal(
2397 cx: &C,
60c5eb7d 2398 sig: ty::PolyFnSig<'tcx>,
48663c56 2399 extra_args: &[Ty<'tcx>],
60c5eb7d
XL
2400 caller_location: Option<Ty<'tcx>>,
2401 mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgAbi<'tcx, Ty<'tcx>>,
48663c56
XL
2402 ) -> Self;
2403 fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi);
2404}
2405
60c5eb7d 2406impl<'tcx, C> FnAbiExt<'tcx, C> for call::FnAbi<'tcx, Ty<'tcx>>
48663c56
XL
2407where
2408 C: LayoutOf<Ty = Ty<'tcx>, TyLayout = TyLayout<'tcx>>
2409 + HasDataLayout
2410 + HasTargetSpec
2411 + HasTyCtxt<'tcx>
2412 + HasParamEnv<'tcx>,
2413{
60c5eb7d
XL
2414 fn of_fn_ptr(cx: &C, sig: ty::PolyFnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2415 call::FnAbi::new_internal(cx, sig, extra_args, None, |ty, _| ArgAbi::new(cx.layout_of(ty)))
48663c56
XL
2416 }
2417
60c5eb7d
XL
2418 fn of_instance(cx: &C, instance: ty::Instance<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2419 let sig = instance.fn_sig_for_fn_abi(cx.tcx());
48663c56 2420
60c5eb7d
XL
2421 let caller_location = if instance.def.requires_caller_location(cx.tcx()) {
2422 Some(cx.tcx().caller_location_ty())
2423 } else {
2424 None
2425 };
2426
2427 call::FnAbi::new_internal(cx, sig, extra_args, caller_location, |ty, arg_idx| {
48663c56
XL
2428 let mut layout = cx.layout_of(ty);
2429 // Don't pass the vtable, it's not an argument of the virtual fn.
2430 // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
2431 // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
60c5eb7d 2432 if let (ty::InstanceDef::Virtual(..), Some(0)) = (&instance.def, arg_idx) {
48663c56
XL
2433 let fat_pointer_ty = if layout.is_unsized() {
2434 // unsized `self` is passed as a pointer to `self`
2435 // FIXME (mikeyhew) change this to use &own if it is ever added to the language
2436 cx.tcx().mk_mut_ptr(layout.ty)
2437 } else {
2438 match layout.abi {
2439 Abi::ScalarPair(..) => (),
2440 _ => bug!("receiver type has unsupported layout: {:?}", layout),
2441 }
2442
2443 // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
2444 // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
2445 // elsewhere in the compiler as a method on a `dyn Trait`.
2446 // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
2447 // get a built-in pointer type
2448 let mut fat_pointer_layout = layout;
2449 'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
2450 && !fat_pointer_layout.ty.is_region_ptr()
2451 {
60c5eb7d 2452 for i in 0..fat_pointer_layout.fields.count() {
48663c56
XL
2453 let field_layout = fat_pointer_layout.field(cx, i);
2454
2455 if !field_layout.is_zst() {
2456 fat_pointer_layout = field_layout;
2457 continue 'descend_newtypes;
2458 }
2459 }
2460
dfeec247 2461 bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
48663c56
XL
2462 }
2463
2464 fat_pointer_layout.ty
2465 };
2466
2467 // we now have a type like `*mut RcBox<dyn Trait>`
2468 // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
2469 // this is understood as a special case elsewhere in the compiler
2470 let unit_pointer_ty = cx.tcx().mk_mut_ptr(cx.tcx().mk_unit());
2471 layout = cx.layout_of(unit_pointer_ty);
2472 layout.ty = fat_pointer_ty;
2473 }
60c5eb7d 2474 ArgAbi::new(layout)
48663c56
XL
2475 })
2476 }
2477
2478 fn new_internal(
2479 cx: &C,
60c5eb7d 2480 sig: ty::PolyFnSig<'tcx>,
48663c56 2481 extra_args: &[Ty<'tcx>],
60c5eb7d
XL
2482 caller_location: Option<Ty<'tcx>>,
2483 mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgAbi<'tcx, Ty<'tcx>>,
48663c56 2484 ) -> Self {
60c5eb7d
XL
2485 debug!("FnAbi::new_internal({:?}, {:?})", sig, extra_args);
2486
dfeec247 2487 let sig = cx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig);
48663c56
XL
2488
2489 use rustc_target::spec::abi::Abi::*;
2490 let conv = match cx.tcx().sess.target.target.adjust_abi(sig.abi) {
60c5eb7d 2491 RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
48663c56
XL
2492
2493 // It's the ABI's job to select this, not ours.
2494 System => bug!("system abi should be selected elsewhere"),
e74abb32 2495 EfiApi => bug!("eficall abi should be selected elsewhere"),
48663c56
XL
2496
2497 Stdcall => Conv::X86Stdcall,
2498 Fastcall => Conv::X86Fastcall,
2499 Vectorcall => Conv::X86VectorCall,
2500 Thiscall => Conv::X86ThisCall,
2501 C => Conv::C,
2502 Unadjusted => Conv::C,
2503 Win64 => Conv::X86_64Win64,
2504 SysV64 => Conv::X86_64SysV,
2505 Aapcs => Conv::ArmAapcs,
2506 PtxKernel => Conv::PtxKernel,
2507 Msp430Interrupt => Conv::Msp430Intr,
2508 X86Interrupt => Conv::X86Intr,
2509 AmdGpuKernel => Conv::AmdGpuKernel,
2510
2511 // These API constants ought to be more specific...
2512 Cdecl => Conv::C,
2513 };
2514
2515 let mut inputs = sig.inputs();
2516 let extra_args = if sig.abi == RustCall {
2517 assert!(!sig.c_variadic && extra_args.is_empty());
2518
dfeec247
XL
2519 if let Some(input) = sig.inputs().last() {
2520 if let ty::Tuple(tupled_arguments) = input.kind {
48663c56
XL
2521 inputs = &sig.inputs()[0..sig.inputs().len() - 1];
2522 tupled_arguments.iter().map(|k| k.expect_ty()).collect()
dfeec247 2523 } else {
48663c56
XL
2524 bug!(
2525 "argument to function with \"rust-call\" ABI \
dfeec247 2526 is not a tuple"
48663c56
XL
2527 );
2528 }
dfeec247
XL
2529 } else {
2530 bug!(
2531 "argument to function with \"rust-call\" ABI \
2532 is not a tuple"
2533 );
48663c56
XL
2534 }
2535 } else {
2536 assert!(sig.c_variadic || extra_args.is_empty());
2537 extra_args.to_vec()
2538 };
2539
2540 let target = &cx.tcx().sess.target.target;
74b04a01 2541 let target_env_gnu_like = matches!(&target.target_env[..], "gnu" | "musl");
48663c56
XL
2542 let win_x64_gnu =
2543 target.target_os == "windows" && target.arch == "x86_64" && target.target_env == "gnu";
74b04a01
XL
2544 let linux_s390x_gnu_like =
2545 target.target_os == "linux" && target.arch == "s390x" && target_env_gnu_like;
2546 let linux_sparc64_gnu_like =
2547 target.target_os == "linux" && target.arch == "sparc64" && target_env_gnu_like;
2548 let linux_powerpc_gnu_like =
2549 target.target_os == "linux" && target.arch == "powerpc" && target_env_gnu_like;
48663c56
XL
2550 let rust_abi = match sig.abi {
2551 RustIntrinsic | PlatformIntrinsic | Rust | RustCall => true,
2552 _ => false,
2553 };
2554
2555 // Handle safe Rust thin and fat pointers.
2556 let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
2557 scalar: &Scalar,
2558 layout: TyLayout<'tcx>,
2559 offset: Size,
2560 is_return: bool| {
2561 // Booleans are always an i1 that needs to be zero-extended.
2562 if scalar.is_bool() {
2563 attrs.set(ArgAttribute::ZExt);
2564 return;
2565 }
2566
2567 // Only pointer types handled below.
2568 if scalar.value != Pointer {
2569 return;
2570 }
2571
2572 if scalar.valid_range.start() < scalar.valid_range.end() {
2573 if *scalar.valid_range.start() > 0 {
2574 attrs.set(ArgAttribute::NonNull);
2575 }
2576 }
2577
2578 if let Some(pointee) = layout.pointee_info_at(cx, offset) {
2579 if let Some(kind) = pointee.safe {
48663c56
XL
2580 attrs.pointee_align = Some(pointee.align);
2581
74b04a01 2582 // `Box` (`UniqueBorrowed`) are not necessarily dereferenceable
60c5eb7d
XL
2583 // for the entire duration of the function as they can be deallocated
2584 // any time. Set their valid size to 0.
2585 attrs.pointee_size = match kind {
2586 PointerKind::UniqueOwned => Size::ZERO,
dfeec247 2587 _ => pointee.size,
60c5eb7d
XL
2588 };
2589
48663c56
XL
2590 // `Box` pointer parameters never alias because ownership is transferred
2591 // `&mut` pointer parameters never alias other parameters,
2592 // or mutable global data
2593 //
2594 // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
2595 // and can be marked as both `readonly` and `noalias`, as
2596 // LLVM's definition of `noalias` is based solely on memory
2597 // dependencies rather than pointer equality
2598 let no_alias = match kind {
2599 PointerKind::Shared => false,
2600 PointerKind::UniqueOwned => true,
2601 PointerKind::Frozen | PointerKind::UniqueBorrowed => !is_return,
2602 };
2603 if no_alias {
2604 attrs.set(ArgAttribute::NoAlias);
2605 }
2606
2607 if kind == PointerKind::Frozen && !is_return {
2608 attrs.set(ArgAttribute::ReadOnly);
2609 }
2610 }
2611 }
2612 };
2613
48663c56
XL
2614 let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| {
2615 let is_return = arg_idx.is_none();
2616 let mut arg = mk_arg_type(ty, arg_idx);
2617 if arg.layout.is_zst() {
2618 // For some forsaken reason, x86_64-pc-windows-gnu
2619 // doesn't ignore zero-sized struct arguments.
74b04a01
XL
2620 // The same is true for {s390x,sparc64,powerpc}-unknown-linux-{gnu,musl}.
2621 if is_return
2622 || rust_abi
2623 || (!win_x64_gnu
2624 && !linux_s390x_gnu_like
2625 && !linux_sparc64_gnu_like
2626 && !linux_powerpc_gnu_like)
2627 {
e74abb32 2628 arg.mode = PassMode::Ignore;
48663c56
XL
2629 }
2630 }
2631
2632 // FIXME(eddyb) other ABIs don't have logic for scalar pairs.
2633 if !is_return && rust_abi {
2634 if let Abi::ScalarPair(ref a, ref b) = arg.layout.abi {
2635 let mut a_attrs = ArgAttributes::new();
2636 let mut b_attrs = ArgAttributes::new();
2637 adjust_for_rust_scalar(&mut a_attrs, a, arg.layout, Size::ZERO, false);
2638 adjust_for_rust_scalar(
2639 &mut b_attrs,
2640 b,
2641 arg.layout,
2642 a.value.size(cx).align_to(b.value.align(cx).abi),
2643 false,
2644 );
2645 arg.mode = PassMode::Pair(a_attrs, b_attrs);
2646 return arg;
2647 }
2648 }
2649
2650 if let Abi::Scalar(ref scalar) = arg.layout.abi {
2651 if let PassMode::Direct(ref mut attrs) = arg.mode {
2652 adjust_for_rust_scalar(attrs, scalar, arg.layout, Size::ZERO, is_return);
2653 }
2654 }
2655
2656 arg
2657 };
2658
60c5eb7d 2659 let mut fn_abi = FnAbi {
48663c56
XL
2660 ret: arg_of(sig.output(), None),
2661 args: inputs
2662 .iter()
2663 .cloned()
2664 .chain(extra_args)
60c5eb7d 2665 .chain(caller_location)
48663c56
XL
2666 .enumerate()
2667 .map(|(i, ty)| arg_of(ty, Some(i)))
2668 .collect(),
2669 c_variadic: sig.c_variadic,
74b04a01 2670 fixed_count: inputs.len(),
48663c56
XL
2671 conv,
2672 };
60c5eb7d
XL
2673 fn_abi.adjust_for_abi(cx, sig.abi);
2674 fn_abi
48663c56
XL
2675 }
2676
2677 fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi) {
2678 if abi == SpecAbi::Unadjusted {
2679 return;
2680 }
2681
2682 if abi == SpecAbi::Rust
2683 || abi == SpecAbi::RustCall
2684 || abi == SpecAbi::RustIntrinsic
2685 || abi == SpecAbi::PlatformIntrinsic
2686 {
60c5eb7d 2687 let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>| {
48663c56
XL
2688 if arg.is_ignore() {
2689 return;
2690 }
2691
2692 match arg.layout.abi {
2693 Abi::Aggregate { .. } => {}
2694
2695 // This is a fun case! The gist of what this is doing is
2696 // that we want callers and callees to always agree on the
2697 // ABI of how they pass SIMD arguments. If we were to *not*
2698 // make these arguments indirect then they'd be immediates
2699 // in LLVM, which means that they'd used whatever the
2700 // appropriate ABI is for the callee and the caller. That
2701 // means, for example, if the caller doesn't have AVX
2702 // enabled but the callee does, then passing an AVX argument
2703 // across this boundary would cause corrupt data to show up.
2704 //
2705 // This problem is fixed by unconditionally passing SIMD
2706 // arguments through memory between callers and callees
2707 // which should get them all to agree on ABI regardless of
2708 // target feature sets. Some more information about this
2709 // issue can be found in #44367.
2710 //
2711 // Note that the platform intrinsic ABI is exempt here as
2712 // that's how we connect up to LLVM and it's unstable
2713 // anyway, we control all calls to it in libstd.
2714 Abi::Vector { .. }
2715 if abi != SpecAbi::PlatformIntrinsic
2716 && cx.tcx().sess.target.target.options.simd_types_indirect =>
2717 {
2718 arg.make_indirect();
2719 return;
2720 }
2721
2722 _ => return,
2723 }
2724
2725 let size = arg.layout.size;
2726 if arg.layout.is_unsized() || size > Pointer.size(cx) {
2727 arg.make_indirect();
2728 } else {
2729 // We want to pass small aggregates as immediates, but using
2730 // a LLVM aggregate type for this leads to bad optimizations,
2731 // so we pick an appropriately sized integer type instead.
dfeec247 2732 arg.cast_to(Reg { kind: RegKind::Integer, size });
48663c56
XL
2733 }
2734 };
2735 fixup(&mut self.ret);
2736 for arg in &mut self.args {
2737 fixup(arg);
2738 }
2739 if let PassMode::Indirect(ref mut attrs, _) = self.ret.mode {
2740 attrs.set(ArgAttribute::StructRet);
2741 }
2742 return;
2743 }
2744
2745 if let Err(msg) = self.adjust_for_cabi(cx, abi) {
2746 cx.tcx().sess.fatal(&msg);
2747 }
2748 }
2749}