]> git.proxmox.com Git - rustc.git/blame - compiler/rustc_middle/src/ty/layout.rs
New upstream version 1.50.0+dfsg1
[rustc.git] / compiler / rustc_middle / src / ty / layout.rs
CommitLineData
ba9703b0
XL
1use crate::ich::StableHashingContext;
2use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
3use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
4use crate::ty::subst::Subst;
dfeec247 5use crate::ty::{self, subst::SubstsRef, ReprOptions, Ty, TyCtxt, TypeFoldable};
54a0048b 6
3dfed10e 7use rustc_ast::{self as ast, IntTy, UintTy};
74b04a01 8use rustc_attr as attr;
e74abb32 9use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
dfeec247 10use rustc_hir as hir;
3dfed10e 11use rustc_hir::lang_items::LangItem;
dfeec247
XL
12use rustc_index::bit_set::BitSet;
13use rustc_index::vec::{Idx, IndexVec};
ba9703b0 14use rustc_session::{DataTypeKind, FieldInfo, SizeKind, VariantInfo};
f9f354fc 15use rustc_span::symbol::{Ident, Symbol};
ba9703b0 16use rustc_span::DUMMY_SP;
48663c56 17use rustc_target::abi::call::{
fc512014 18 ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, Conv, FnAbi, PassMode, Reg, RegKind,
48663c56 19};
ba9703b0
XL
20use rustc_target::abi::*;
21use rustc_target::spec::{abi::Abi as SpecAbi, HasTargetSpec, PanicStrategy};
22
23use std::cmp;
24use std::fmt;
25use std::iter;
26use std::mem;
27use std::num::NonZeroUsize;
28use std::ops::Bound;
48663c56 29
83c7162d 30pub trait IntegerExt {
dc9dc135 31 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
a1dfa0c6 32 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
dc9dc135
XL
33 fn repr_discr<'tcx>(
34 tcx: TyCtxt<'tcx>,
35 ty: Ty<'tcx>,
36 repr: &ReprOptions,
37 min: i128,
38 max: i128,
39 ) -> (Integer, bool);
54a0048b
SL
40}
41
83c7162d 42impl IntegerExt for Integer {
dc9dc135 43 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx> {
9e0c209e 44 match (*self, signed) {
9e0c209e
SL
45 (I8, false) => tcx.types.u8,
46 (I16, false) => tcx.types.u16,
47 (I32, false) => tcx.types.u32,
48 (I64, false) => tcx.types.u64,
32a655c1 49 (I128, false) => tcx.types.u128,
9e0c209e
SL
50 (I8, true) => tcx.types.i8,
51 (I16, true) => tcx.types.i16,
52 (I32, true) => tcx.types.i32,
53 (I64, true) => tcx.types.i64,
32a655c1 54 (I128, true) => tcx.types.i128,
9e0c209e
SL
55 }
56 }
57
9fa01778 58 /// Gets the Integer type from an attr::IntType.
a1dfa0c6 59 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
cc61c64b
XL
60 let dl = cx.data_layout();
61
54a0048b
SL
62 match ity {
63 attr::SignedInt(IntTy::I8) | attr::UnsignedInt(UintTy::U8) => I8,
64 attr::SignedInt(IntTy::I16) | attr::UnsignedInt(UintTy::U16) => I16,
65 attr::SignedInt(IntTy::I32) | attr::UnsignedInt(UintTy::U32) => I32,
66 attr::SignedInt(IntTy::I64) | attr::UnsignedInt(UintTy::U64) => I64,
32a655c1 67 attr::SignedInt(IntTy::I128) | attr::UnsignedInt(UintTy::U128) => I128,
2c00a5a8 68 attr::SignedInt(IntTy::Isize) | attr::UnsignedInt(UintTy::Usize) => {
54a0048b
SL
69 dl.ptr_sized_integer()
70 }
71 }
72 }
73
9fa01778 74 /// Finds the appropriate Integer type and signedness for the given
f9f354fc
XL
75 /// signed discriminant range and `#[repr]` attribute.
76 /// N.B.: `u128` values above `i128::MAX` will be treated as signed, but
54a0048b 77 /// that shouldn't affect anything, other than maybe debuginfo.
dc9dc135
XL
78 fn repr_discr<'tcx>(
79 tcx: TyCtxt<'tcx>,
80 ty: Ty<'tcx>,
81 repr: &ReprOptions,
82 min: i128,
83 max: i128,
84 ) -> (Integer, bool) {
54a0048b
SL
85 // Theoretically, negative values could be larger in unsigned representation
86 // than the unsigned representation of the signed minimum. However, if there
ff7c6d11
XL
87 // are any negative values, the only valid unsigned representation is u128
88 // which can fit all i128 values, so the result remains unaffected.
89 let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
54a0048b
SL
90 let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
91
476ff2be
SL
92 let mut min_from_extern = None;
93 let min_default = I8;
94
8bb4bdeb 95 if let Some(ity) = repr.int {
a1dfa0c6 96 let discr = Integer::from_attr(&tcx, ity);
8bb4bdeb
XL
97 let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
98 if discr < fit {
dfeec247
XL
99 bug!(
100 "Integer::repr_discr: `#[repr]` hint too small for \
101 discriminant range of enum `{}",
102 ty
103 )
8bb4bdeb
XL
104 }
105 return (discr, ity.is_signed());
106 }
107
cc61c64b 108 if repr.c() {
29967ef6 109 match &tcx.sess.target.arch[..] {
8bb4bdeb
XL
110 // WARNING: the ARM EABI has two variants; the one corresponding
111 // to `at_least == I32` appears to be used on Linux and NetBSD,
112 // but some systems may use the variant corresponding to no
0bf4aa26 113 // lower bound. However, we don't run on those yet...?
8bb4bdeb
XL
114 "arm" => min_from_extern = Some(I32),
115 _ => min_from_extern = Some(I32),
54a0048b 116 }
476ff2be
SL
117 }
118
119 let at_least = min_from_extern.unwrap_or(min_default);
54a0048b
SL
120
121 // If there are no negative values, we can use the unsigned fit.
122 if min >= 0 {
123 (cmp::max(unsigned_fit, at_least), false)
124 } else {
125 (cmp::max(signed_fit, at_least), true)
126 }
127 }
128}
129
83c7162d 130pub trait PrimitiveExt {
dc9dc135 131 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
e1599b0c 132 fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
54a0048b
SL
133}
134
83c7162d 135impl PrimitiveExt for Primitive {
dc9dc135 136 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
ff7c6d11
XL
137 match *self {
138 Int(i, signed) => i.to_ty(tcx, signed),
60c5eb7d
XL
139 F32 => tcx.types.f32,
140 F64 => tcx.types.f64,
b7449926 141 Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
ff7c6d11
XL
142 }
143 }
e1599b0c
XL
144
145 /// Return an *integer* type matching this primitive.
146 /// Useful in particular when dealing with enum discriminants.
147 fn to_int_ty(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
148 match *self {
149 Int(i, signed) => i.to_ty(tcx, signed),
150 Pointer => tcx.types.usize,
60c5eb7d 151 F32 | F64 => bug!("floats do not have an int type"),
e1599b0c
XL
152 }
153 }
54a0048b
SL
154}
155
ff7c6d11
XL
156/// The first half of a fat pointer.
157///
158/// - For a trait object, this is the address of the box.
159/// - For a slice, this is the base address.
160pub const FAT_PTR_ADDR: usize = 0;
476ff2be 161
ff7c6d11
XL
162/// The second half of a fat pointer.
163///
164/// - For a trait object, this is the address of the vtable.
165/// - For a slice, this is the length.
166pub const FAT_PTR_EXTRA: usize = 1;
476ff2be 167
3dfed10e 168#[derive(Copy, Clone, Debug, TyEncodable, TyDecodable)]
54a0048b
SL
169pub enum LayoutError<'tcx> {
170 Unknown(Ty<'tcx>),
dfeec247 171 SizeOverflow(Ty<'tcx>),
54a0048b
SL
172}
173
174impl<'tcx> fmt::Display for LayoutError<'tcx> {
0bf4aa26 175 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
54a0048b 176 match *self {
1b1a35ee 177 LayoutError::Unknown(ty) => write!(f, "the type `{}` has an unknown layout", ty),
54a0048b 178 LayoutError::SizeOverflow(ty) => {
fc512014 179 write!(f, "values of the type `{}` are too big for the current architecture", ty)
54a0048b
SL
180 }
181 }
182 }
183}
184
dc9dc135
XL
185fn layout_raw<'tcx>(
186 tcx: TyCtxt<'tcx>,
187 query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
ba9703b0 188) -> Result<&'tcx Layout, LayoutError<'tcx>> {
83c7162d 189 ty::tls::with_related_context(tcx, move |icx| {
83c7162d 190 let (param_env, ty) = query.into_parts();
5bcae85e 191
f9f354fc 192 if !tcx.sess.recursion_limit().value_within_limit(icx.layout_depth) {
dfeec247 193 tcx.sess.fatal(&format!("overflow representing the type `{}`", ty));
83c7162d 194 }
54a0048b 195
83c7162d 196 // Update the ImplicitCtxt to increase the layout_depth
dfeec247 197 let icx = ty::tls::ImplicitCtxt { layout_depth: icx.layout_depth + 1, ..icx.clone() };
ff7c6d11 198
83c7162d
XL
199 ty::tls::enter_context(&icx, |_| {
200 let cx = LayoutCx { tcx, param_env };
0731742a
XL
201 let layout = cx.layout_raw_uncached(ty);
202 // Type-level uninhabitedness should always imply ABI uninhabitedness.
203 if let Ok(layout) = layout {
204 if ty.conservative_is_privately_uninhabited(tcx) {
205 assert!(layout.abi.is_uninhabited());
206 }
207 }
208 layout
83c7162d
XL
209 })
210 })
ff7c6d11
XL
211}
212
f035d41b 213pub fn provide(providers: &mut ty::query::Providers) {
dfeec247 214 *providers = ty::query::Providers { layout_raw, ..*providers };
ff7c6d11
XL
215}
216
2c00a5a8
XL
217pub struct LayoutCx<'tcx, C> {
218 pub tcx: C,
0731742a 219 pub param_env: ty::ParamEnv<'tcx>,
2c00a5a8
XL
220}
221
dc9dc135
XL
222#[derive(Copy, Clone, Debug)]
223enum StructKind {
224 /// A tuple, closure, or univariant which cannot be coerced to unsized.
225 AlwaysSized,
226 /// A univariant, the last field of which may be coerced to unsized.
227 MaybeUnsized,
228 /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
229 Prefixed(Size, Align),
230}
231
232// Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
233// This is used to go between `memory_index` (source field order to memory order)
234// and `inverse_memory_index` (memory order to source field order).
ba9703b0 235// See also `FieldsShape::Arbitrary::memory_index` for more details.
dc9dc135
XL
236// FIXME(eddyb) build a better abstraction for permutations, if possible.
237fn invert_mapping(map: &[u32]) -> Vec<u32> {
238 let mut inverse = vec![0; map.len()];
239 for i in 0..map.len() {
240 inverse[map[i] as usize] = i as u32;
241 }
242 inverse
243}
ff7c6d11 244
dc9dc135 245impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
ba9703b0 246 fn scalar_pair(&self, a: Scalar, b: Scalar) -> Layout {
dc9dc135
XL
247 let dl = self.data_layout();
248 let b_align = b.value.align(dl);
249 let align = a.value.align(dl).max(b_align).max(dl.aggregate_align);
250 let b_offset = a.value.size(dl).align_to(b_align.abi);
251 let size = (b_offset + b.value.size(dl)).align_to(align.abi);
416331ca
XL
252
253 // HACK(nox): We iter on `b` and then `a` because `max_by_key`
254 // returns the last maximum.
255 let largest_niche = Niche::from_scalar(dl, b_offset, b.clone())
256 .into_iter()
257 .chain(Niche::from_scalar(dl, Size::ZERO, a.clone()))
258 .max_by_key(|niche| niche.available(dl));
259
ba9703b0 260 Layout {
dc9dc135 261 variants: Variants::Single { index: VariantIdx::new(0) },
ba9703b0 262 fields: FieldsShape::Arbitrary {
dc9dc135 263 offsets: vec![Size::ZERO, b_offset],
dfeec247 264 memory_index: vec![0, 1],
dc9dc135
XL
265 },
266 abi: Abi::ScalarPair(a, b),
416331ca 267 largest_niche,
dc9dc135 268 align,
dfeec247 269 size,
ff7c6d11 270 }
dc9dc135 271 }
0bf4aa26 272
dfeec247
XL
273 fn univariant_uninterned(
274 &self,
275 ty: Ty<'tcx>,
ba9703b0 276 fields: &[TyAndLayout<'_>],
dfeec247
XL
277 repr: &ReprOptions,
278 kind: StructKind,
ba9703b0 279 ) -> Result<Layout, LayoutError<'tcx>> {
dc9dc135 280 let dl = self.data_layout();
e1599b0c
XL
281 let pack = repr.pack;
282 if pack.is_some() && repr.align.is_some() {
dc9dc135
XL
283 bug!("struct cannot be packed and aligned");
284 }
ff7c6d11 285
dfeec247 286 let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
ff7c6d11 287
dc9dc135 288 let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
ff7c6d11 289
ba9703b0 290 let optimize = !repr.inhibit_struct_field_reordering_opt();
dc9dc135 291 if optimize {
dfeec247
XL
292 let end =
293 if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
dc9dc135 294 let optimizing = &mut inverse_memory_index[..end];
ba9703b0 295 let field_align = |f: &TyAndLayout<'_>| {
e1599b0c 296 if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
dc9dc135
XL
297 };
298 match kind {
dfeec247 299 StructKind::AlwaysSized | StructKind::MaybeUnsized => {
dc9dc135
XL
300 optimizing.sort_by_key(|&x| {
301 // Place ZSTs first to avoid "interesting offsets",
302 // especially with only one or two non-ZST fields.
303 let f = &fields[x as usize];
304 (!f.is_zst(), cmp::Reverse(field_align(f)))
305 });
306 }
307 StructKind::Prefixed(..) => {
ba9703b0
XL
308 // Sort in ascending alignment so that the layout stay optimal
309 // regardless of the prefix
dc9dc135 310 optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
ea8adc8c 311 }
ff7c6d11 312 }
dc9dc135 313 }
ea8adc8c 314
dc9dc135
XL
315 // inverse_memory_index holds field indices by increasing memory offset.
316 // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
317 // We now write field offsets to the corresponding offset slot;
318 // field 5 with offset 0 puts 0 in offsets[5].
319 // At the bottom of this function, we invert `inverse_memory_index` to
320 // produce `memory_index` (see `invert_mapping`).
ff7c6d11 321
ba9703b0
XL
322 let mut sized = true;
323 let mut offsets = vec![Size::ZERO; fields.len()];
dc9dc135 324 let mut offset = Size::ZERO;
416331ca
XL
325 let mut largest_niche = None;
326 let mut largest_niche_available = 0;
ff7c6d11 327
dc9dc135 328 if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
dfeec247
XL
329 let prefix_align =
330 if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
dc9dc135
XL
331 align = align.max(AbiAndPrefAlign::new(prefix_align));
332 offset = prefix_size.align_to(prefix_align);
333 }
ff7c6d11 334
dc9dc135
XL
335 for &i in &inverse_memory_index {
336 let field = fields[i as usize];
337 if !sized {
dfeec247 338 bug!("univariant: field #{} of `{}` comes after unsized field", offsets.len(), ty);
dc9dc135 339 }
ff7c6d11 340
dc9dc135
XL
341 if field.is_unsized() {
342 sized = false;
343 }
ff7c6d11 344
dc9dc135 345 // Invariant: offset < dl.obj_size_bound() <= 1<<61
e1599b0c 346 let field_align = if let Some(pack) = pack {
dc9dc135
XL
347 field.align.min(AbiAndPrefAlign::new(pack))
348 } else {
349 field.align
350 };
351 offset = offset.align_to(field_align.abi);
352 align = align.max(field_align);
ff7c6d11 353
dc9dc135
XL
354 debug!("univariant offset: {:?} field: {:#?}", offset, field);
355 offsets[i as usize] = offset;
ff7c6d11 356
74b04a01
XL
357 if !repr.hide_niche() {
358 if let Some(mut niche) = field.largest_niche.clone() {
359 let available = niche.available(dl);
360 if available > largest_niche_available {
361 largest_niche_available = available;
362 niche.offset += offset;
363 largest_niche = Some(niche);
364 }
416331ca
XL
365 }
366 }
367
dfeec247 368 offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
dc9dc135 369 }
ff7c6d11 370
e1599b0c
XL
371 if let Some(repr_align) = repr.align {
372 align = align.max(AbiAndPrefAlign::new(repr_align));
dc9dc135 373 }
ff7c6d11 374
dc9dc135
XL
375 debug!("univariant min_size: {:?}", offset);
376 let min_size = offset;
ff7c6d11 377
dc9dc135
XL
378 // As stated above, inverse_memory_index holds field indices by increasing offset.
379 // This makes it an already-sorted view of the offsets vec.
380 // To invert it, consider:
381 // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
382 // Field 5 would be the first element, so memory_index is i:
383 // Note: if we didn't optimize, it's already right.
ff7c6d11 384
ba9703b0
XL
385 let memory_index =
386 if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
dc9dc135
XL
387
388 let size = min_size.align_to(align.abi);
389 let mut abi = Abi::Aggregate { sized };
390
391 // Unpack newtype ABIs and find scalar pairs.
392 if sized && size.bytes() > 0 {
1b1a35ee
XL
393 // All other fields must be ZSTs.
394 let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
395
396 match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
397 // We have exactly one non-ZST field.
398 (Some((i, field)), None, None) => {
399 // Field fills the struct and it has a scalar or scalar pair ABI.
400 if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
401 {
402 match field.abi {
403 // For plain scalars, or vectors of them, we can't unpack
404 // newtypes for `#[repr(C)]`, as that affects C ABIs.
405 Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
406 abi = field.abi.clone();
ff7c6d11 407 }
1b1a35ee
XL
408 // But scalar pairs are Rust-specific and get
409 // treated as aggregates by C ABIs anyway.
410 Abi::ScalarPair(..) => {
411 abi = field.abi.clone();
412 }
413 _ => {}
ff7c6d11 414 }
dc9dc135 415 }
1b1a35ee 416 }
ff7c6d11 417
1b1a35ee
XL
418 // Two non-ZST fields, and they're both scalars.
419 (
420 Some((i, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(ref a), .. }, .. })),
421 Some((j, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(ref b), .. }, .. })),
422 None,
423 ) => {
424 // Order by the memory placement, not source order.
425 let ((i, a), (j, b)) =
426 if offsets[i] < offsets[j] { ((i, a), (j, b)) } else { ((j, b), (i, a)) };
427 let pair = self.scalar_pair(a.clone(), b.clone());
428 let pair_offsets = match pair.fields {
429 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
430 assert_eq!(memory_index, &[0, 1]);
431 offsets
ff7c6d11 432 }
1b1a35ee
XL
433 _ => bug!(),
434 };
435 if offsets[i] == pair_offsets[0]
436 && offsets[j] == pair_offsets[1]
437 && align == pair.align
438 && size == pair.size
439 {
440 // We can use `ScalarPair` only when it matches our
441 // already computed layout (including `#[repr(C)]`).
442 abi = pair.abi;
ff7c6d11
XL
443 }
444 }
1b1a35ee
XL
445
446 _ => {}
ff7c6d11 447 }
dc9dc135 448 }
ff7c6d11 449
dc9dc135
XL
450 if sized && fields.iter().any(|f| f.abi.is_uninhabited()) {
451 abi = Abi::Uninhabited;
452 }
83c7162d 453
ba9703b0 454 Ok(Layout {
dc9dc135 455 variants: Variants::Single { index: VariantIdx::new(0) },
ba9703b0 456 fields: FieldsShape::Arbitrary { offsets, memory_index },
dc9dc135 457 abi,
416331ca 458 largest_niche,
dc9dc135 459 align,
dfeec247 460 size,
dc9dc135
XL
461 })
462 }
463
ba9703b0 464 fn layout_raw_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<'tcx>> {
dc9dc135
XL
465 let tcx = self.tcx;
466 let param_env = self.param_env;
467 let dl = self.data_layout();
468 let scalar_unit = |value: Primitive| {
469 let bits = value.size(dl).bits();
470 assert!(bits <= 128);
dfeec247 471 Scalar { value, valid_range: 0..=(!0 >> (128 - bits)) }
ff7c6d11 472 };
ba9703b0 473 let scalar = |value: Primitive| tcx.intern_layout(Layout::scalar(self, scalar_unit(value)));
dc9dc135 474
ba9703b0 475 let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
dc9dc135 476 Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
ff7c6d11 477 };
74b04a01 478 debug_assert!(!ty.has_infer_types_or_consts());
ff7c6d11 479
1b1a35ee 480 Ok(match *ty.kind() {
ff7c6d11 481 // Basic scalars.
ba9703b0 482 ty::Bool => tcx.intern_layout(Layout::scalar(
dfeec247
XL
483 self,
484 Scalar { value: Int(I8, false), valid_range: 0..=1 },
485 )),
ba9703b0 486 ty::Char => tcx.intern_layout(Layout::scalar(
dfeec247
XL
487 self,
488 Scalar { value: Int(I32, false), valid_range: 0..=0x10FFFF },
489 )),
490 ty::Int(ity) => scalar(Int(Integer::from_attr(dl, attr::SignedInt(ity)), true)),
491 ty::Uint(ity) => scalar(Int(Integer::from_attr(dl, attr::UnsignedInt(ity)), false)),
60c5eb7d
XL
492 ty::Float(fty) => scalar(match fty {
493 ast::FloatTy::F32 => F32,
494 ast::FloatTy::F64 => F64,
495 }),
b7449926 496 ty::FnPtr(_) => {
ff7c6d11 497 let mut ptr = scalar_unit(Pointer);
83c7162d 498 ptr.valid_range = 1..=*ptr.valid_range.end();
ba9703b0 499 tcx.intern_layout(Layout::scalar(self, ptr))
ff7c6d11
XL
500 }
501
502 // The never type.
ba9703b0 503 ty::Never => tcx.intern_layout(Layout {
dfeec247 504 variants: Variants::Single { index: VariantIdx::new(0) },
ba9703b0 505 fields: FieldsShape::Primitive,
dfeec247
XL
506 abi: Abi::Uninhabited,
507 largest_niche: None,
508 align: dl.i8_align,
509 size: Size::ZERO,
510 }),
ff7c6d11 511
f035d41b 512 // Potentially-wide pointers.
dfeec247 513 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
ff7c6d11
XL
514 let mut data_ptr = scalar_unit(Pointer);
515 if !ty.is_unsafe_ptr() {
83c7162d 516 data_ptr.valid_range = 1..=*data_ptr.valid_range.end();
ff7c6d11
XL
517 }
518
0531ce1d
XL
519 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
520 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
ba9703b0 521 return Ok(tcx.intern_layout(Layout::scalar(self, data_ptr)));
54a0048b 522 }
ff7c6d11 523
416331ca 524 let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
1b1a35ee 525 let metadata = match unsized_part.kind() {
b7449926 526 ty::Foreign(..) => {
ba9703b0 527 return Ok(tcx.intern_layout(Layout::scalar(self, data_ptr)));
ff7c6d11 528 }
dfeec247 529 ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
b7449926 530 ty::Dynamic(..) => {
ff7c6d11 531 let mut vtable = scalar_unit(Pointer);
83c7162d 532 vtable.valid_range = 1..=*vtable.valid_range.end();
ff7c6d11
XL
533 vtable
534 }
dfeec247 535 _ => return Err(LayoutError::Unknown(unsized_part)),
ff7c6d11
XL
536 };
537
538 // Effectively a (ptr, meta) tuple.
dc9dc135 539 tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
ff7c6d11
XL
540 }
541
542 // Arrays and slices.
b7449926 543 ty::Array(element, mut count) => {
ff7c6d11 544 if count.has_projections() {
0531ce1d 545 count = tcx.normalize_erasing_regions(param_env, count);
ff7c6d11
XL
546 if count.has_projections() {
547 return Err(LayoutError::Unknown(ty));
548 }
549 }
550
416331ca 551 let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
2c00a5a8 552 let element = self.layout_of(element)?;
dfeec247
XL
553 let size =
554 element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
ff7c6d11 555
0731742a
XL
556 let abi = if count != 0 && ty.conservative_is_privately_uninhabited(tcx) {
557 Abi::Uninhabited
558 } else {
559 Abi::Aggregate { sized: true }
560 };
561
dfeec247 562 let largest_niche = if count != 0 { element.largest_niche.clone() } else { None };
416331ca 563
ba9703b0 564 tcx.intern_layout(Layout {
a1dfa0c6 565 variants: Variants::Single { index: VariantIdx::new(0) },
ba9703b0 566 fields: FieldsShape::Array { stride: element.size, count },
0731742a 567 abi,
416331ca 568 largest_niche,
ff7c6d11 569 align: element.align,
dfeec247 570 size,
ff7c6d11
XL
571 })
572 }
b7449926 573 ty::Slice(element) => {
2c00a5a8 574 let element = self.layout_of(element)?;
ba9703b0 575 tcx.intern_layout(Layout {
a1dfa0c6 576 variants: Variants::Single { index: VariantIdx::new(0) },
ba9703b0 577 fields: FieldsShape::Array { stride: element.size, count: 0 },
ff7c6d11 578 abi: Abi::Aggregate { sized: false },
416331ca 579 largest_niche: None,
ff7c6d11 580 align: element.align,
dfeec247 581 size: Size::ZERO,
ff7c6d11 582 })
54a0048b 583 }
ba9703b0 584 ty::Str => tcx.intern_layout(Layout {
dfeec247 585 variants: Variants::Single { index: VariantIdx::new(0) },
ba9703b0 586 fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
dfeec247
XL
587 abi: Abi::Aggregate { sized: false },
588 largest_niche: None,
589 align: dl.i8_align,
590 size: Size::ZERO,
591 }),
54a0048b
SL
592
593 // Odd unit types.
dfeec247 594 ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
b7449926 595 ty::Dynamic(..) | ty::Foreign(..) => {
dfeec247
XL
596 let mut unit = self.univariant_uninterned(
597 ty,
598 &[],
599 &ReprOptions::default(),
600 StructKind::AlwaysSized,
601 )?;
ff7c6d11
XL
602 match unit.abi {
603 Abi::Aggregate { ref mut sized } => *sized = false,
dfeec247 604 _ => bug!(),
ff7c6d11
XL
605 }
606 tcx.intern_layout(unit)
54a0048b
SL
607 }
608
e74abb32 609 ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, substs)?,
ea8adc8c 610
ba9703b0
XL
611 ty::Closure(_, ref substs) => {
612 let tys = substs.as_closure().upvar_tys();
dfeec247
XL
613 univariant(
614 &tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
8bb4bdeb 615 &ReprOptions::default(),
dfeec247
XL
616 StructKind::AlwaysSized,
617 )?
476ff2be
SL
618 }
619
b7449926 620 ty::Tuple(tys) => {
dfeec247
XL
621 let kind =
622 if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
041b39d2 623
dfeec247
XL
624 univariant(
625 &tys.iter()
626 .map(|k| self.layout_of(k.expect_ty()))
627 .collect::<Result<Vec<_>, _>>()?,
628 &ReprOptions::default(),
629 kind,
630 )?
54a0048b
SL
631 }
632
9e0c209e 633 // SIMD vector types.
fc512014
XL
634 ty::Adt(def, substs) if def.repr.simd() => {
635 // Supported SIMD vectors are homogeneous ADTs with at least one field:
636 //
637 // * #[repr(simd)] struct S(T, T, T, T);
638 // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T }
639 // * #[repr(simd)] struct S([T; 4])
640 //
641 // where T is a primitive scalar (integer/float/pointer).
642
643 // SIMD vectors with zero fields are not supported.
644 // (should be caught by typeck)
645 if def.non_enum_variant().fields.is_empty() {
646 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
647 }
648
649 // Type of the first ADT field:
650 let f0_ty = def.non_enum_variant().fields[0].ty(tcx, substs);
651
652 // Heterogeneous SIMD vectors are not supported:
653 // (should be caught by typeck)
654 for fi in &def.non_enum_variant().fields {
655 if fi.ty(tcx, substs) != f0_ty {
656 tcx.sess.fatal(&format!("monomorphising heterogeneous SIMD type `{}`", ty));
657 }
658 }
659
660 // The element type and number of elements of the SIMD vector
661 // are obtained from:
662 //
663 // * the element type and length of the single array field, if
664 // the first field is of array type, or
665 //
666 // * the homogenous field type and the number of fields.
667 let (e_ty, e_len, is_array) = if let ty::Array(e_ty, _) = f0_ty.kind() {
668 // First ADT field is an array:
669
670 // SIMD vectors with multiple array fields are not supported:
671 // (should be caught by typeck)
672 if def.non_enum_variant().fields.len() != 1 {
dfeec247 673 tcx.sess.fatal(&format!(
fc512014
XL
674 "monomorphising SIMD type `{}` with more than one array field",
675 ty
dfeec247 676 ));
54a0048b 677 }
fc512014
XL
678
679 // Extract the number of elements from the layout of the array field:
680 let len = if let Ok(TyAndLayout {
681 layout: Layout { fields: FieldsShape::Array { count, .. }, .. },
682 ..
683 }) = self.layout_of(f0_ty)
684 {
685 count
686 } else {
687 return Err(LayoutError::Unknown(ty));
688 };
689
690 (*e_ty, *len, true)
691 } else {
692 // First ADT field is not an array:
693 (f0_ty, def.non_enum_variant().fields.len() as _, false)
ff7c6d11 694 };
fc512014
XL
695
696 // SIMD vectors of zero length are not supported.
697 //
698 // Can't be caught in typeck if the array length is generic.
699 if e_len == 0 {
700 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
701 }
702
703 // Compute the ABI of the element type:
704 let e_ly = self.layout_of(e_ty)?;
705 let e_abi = if let Abi::Scalar(ref scalar) = e_ly.abi {
706 scalar.clone()
707 } else {
708 // This error isn't caught in typeck, e.g., if
709 // the element type of the vector is generic.
710 tcx.sess.fatal(&format!(
711 "monomorphising SIMD type `{}` with a non-primitive-scalar \
712 (integer/float/pointer) element type `{}`",
713 ty, e_ty
714 ))
715 };
716
717 // Compute the size and alignment of the vector:
718 let size = e_ly.size.checked_mul(e_len, dl).ok_or(LayoutError::SizeOverflow(ty))?;
ff7c6d11 719 let align = dl.vector_align(size);
a1dfa0c6 720 let size = size.align_to(align.abi);
ff7c6d11 721
fc512014
XL
722 // Compute the placement of the vector fields:
723 let fields = if is_array {
724 FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }
725 } else {
726 FieldsShape::Array { stride: e_ly.size, count: e_len }
727 };
728
ba9703b0 729 tcx.intern_layout(Layout {
a1dfa0c6 730 variants: Variants::Single { index: VariantIdx::new(0) },
fc512014
XL
731 fields,
732 abi: Abi::Vector { element: e_abi, count: e_len },
733 largest_niche: e_ly.largest_niche.clone(),
ff7c6d11
XL
734 size,
735 align,
736 })
54a0048b 737 }
9e0c209e
SL
738
739 // ADTs.
b7449926 740 ty::Adt(def, substs) => {
ff7c6d11 741 // Cache the field layouts.
dfeec247
XL
742 let variants = def
743 .variants
744 .iter()
745 .map(|v| {
746 v.fields
747 .iter()
748 .map(|field| self.layout_of(field.ty(tcx, substs)))
749 .collect::<Result<Vec<_>, _>>()
750 })
751 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
54a0048b 752
ff7c6d11 753 if def.is_union() {
e1599b0c
XL
754 if def.repr.pack.is_some() && def.repr.align.is_some() {
755 bug!("union cannot be packed and aligned");
ff7c6d11
XL
756 }
757
dfeec247
XL
758 let mut align =
759 if def.repr.pack.is_some() { dl.i8_align } else { dl.aggregate_align };
54a0048b 760
e1599b0c
XL
761 if let Some(repr_align) = def.repr.align {
762 align = align.max(AbiAndPrefAlign::new(repr_align));
54a0048b
SL
763 }
764
a1dfa0c6 765 let optimize = !def.repr.inhibit_union_abi_opt();
94b46f34 766 let mut size = Size::ZERO;
a1dfa0c6
XL
767 let mut abi = Abi::Aggregate { sized: true };
768 let index = VariantIdx::new(0);
769 for field in &variants[index] {
ff7c6d11 770 assert!(!field.is_unsized());
e1599b0c 771 align = align.max(field.align);
a1dfa0c6
XL
772
773 // If all non-ZST fields have the same ABI, forward this ABI
774 if optimize && !field.is_zst() {
775 // Normalize scalar_unit to the maximal valid range
776 let field_abi = match &field.abi {
777 Abi::Scalar(x) => Abi::Scalar(scalar_unit(x.value)),
778 Abi::ScalarPair(x, y) => {
dfeec247 779 Abi::ScalarPair(scalar_unit(x.value), scalar_unit(y.value))
a1dfa0c6
XL
780 }
781 Abi::Vector { element: x, count } => {
dfeec247
XL
782 Abi::Vector { element: scalar_unit(x.value), count: *count }
783 }
784 Abi::Uninhabited | Abi::Aggregate { .. } => {
785 Abi::Aggregate { sized: true }
a1dfa0c6 786 }
a1dfa0c6
XL
787 };
788
789 if size == Size::ZERO {
790 // first non ZST: initialize 'abi'
791 abi = field_abi;
dfeec247 792 } else if abi != field_abi {
a1dfa0c6
XL
793 // different fields have different ABI: reset to Aggregate
794 abi = Abi::Aggregate { sized: true };
795 }
ff7c6d11 796 }
a1dfa0c6 797
ff7c6d11
XL
798 size = cmp::max(size, field.size);
799 }
800
e1599b0c
XL
801 if let Some(pack) = def.repr.pack {
802 align = align.min(AbiAndPrefAlign::new(pack));
803 }
804
ba9703b0 805 return Ok(tcx.intern_layout(Layout {
a1dfa0c6 806 variants: Variants::Single { index },
ba9703b0
XL
807 fields: FieldsShape::Union(
808 NonZeroUsize::new(variants[index].len())
809 .ok_or(LayoutError::Unknown(ty))?,
810 ),
a1dfa0c6 811 abi,
416331ca 812 largest_niche: None,
ff7c6d11 813 align,
dfeec247 814 size: size.align_to(align.abi),
ff7c6d11
XL
815 }));
816 }
817
83c7162d
XL
818 // A variant is absent if it's uninhabited and only has ZST fields.
819 // Present uninhabited variants only require space for their fields,
0731742a 820 // but *not* an encoding of the discriminant (e.g., a tag value).
83c7162d
XL
821 // See issue #49298 for more details on the need to leave space
822 // for non-ZST uninhabited data (mostly partial initialization).
ba9703b0 823 let absent = |fields: &[TyAndLayout<'_>]| {
0bf4aa26 824 let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
83c7162d
XL
825 let is_zst = fields.iter().all(|f| f.is_zst());
826 uninhabited && is_zst
827 };
828 let (present_first, present_second) = {
dfeec247
XL
829 let mut present_variants = variants
830 .iter_enumerated()
831 .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
83c7162d 832 (present_variants.next(), present_variants.next())
ff7c6d11 833 };
e74abb32 834 let present_first = match present_first {
3dfed10e 835 Some(present_first) => present_first,
83c7162d 836 // Uninhabited because it has no variants, or only absent ones.
e74abb32 837 None if def.is_enum() => return tcx.layout_raw(param_env.and(tcx.types.never)),
ba9703b0
XL
838 // If it's a struct, still compute a layout so that we can still compute the
839 // field offsets.
3dfed10e 840 None => VariantIdx::new(0),
e74abb32 841 };
54a0048b 842
ff7c6d11 843 let is_struct = !def.is_enum() ||
83c7162d
XL
844 // Only one variant is present.
845 (present_second.is_none() &&
ff7c6d11 846 // Representation optimizations are allowed.
0bf4aa26 847 !def.repr.inhibit_enum_layout_opt());
ff7c6d11
XL
848 if is_struct {
849 // Struct, or univariant enum equivalent to a struct.
9e0c209e
SL
850 // (Typechecking will reject discriminant-sizing attrs.)
851
3dfed10e 852 let v = present_first;
74b04a01 853 let kind = if def.is_enum() || variants[v].is_empty() {
ff7c6d11 854 StructKind::AlwaysSized
476ff2be 855 } else {
7cac9316 856 let param_env = tcx.param_env(def.did);
ff7c6d11 857 let last_field = def.variants[v].fields.last().unwrap();
dfeec247
XL
858 let always_sized =
859 tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env);
860 if !always_sized {
861 StructKind::MaybeUnsized
862 } else {
863 StructKind::AlwaysSized
864 }
9e0c209e 865 };
9e0c209e 866
dc9dc135 867 let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr, kind)?;
ff7c6d11 868 st.variants = Variants::Single { index: v };
b7449926
XL
869 let (start, end) = self.tcx.layout_scalar_valid_range(def.did);
870 match st.abi {
dfeec247 871 Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
b7449926
XL
872 // the asserts ensure that we are not using the
873 // `#[rustc_layout_scalar_valid_range(n)]`
874 // attribute to widen the range of anything as that would probably
875 // result in UB somewhere
416331ca
XL
876 // FIXME(eddyb) the asserts are probably not needed,
877 // as larger validity ranges would result in missed
878 // optimizations, *not* wrongly assuming the inner
879 // value is valid. e.g. unions enlarge validity ranges,
880 // because the values may be uninitialized.
b7449926 881 if let Bound::Included(start) = start {
416331ca
XL
882 // FIXME(eddyb) this might be incorrect - it doesn't
883 // account for wrap-around (end < start) ranges.
b7449926
XL
884 assert!(*scalar.valid_range.start() <= start);
885 scalar.valid_range = start..=*scalar.valid_range.end();
886 }
887 if let Bound::Included(end) = end {
416331ca
XL
888 // FIXME(eddyb) this might be incorrect - it doesn't
889 // account for wrap-around (end < start) ranges.
b7449926
XL
890 assert!(*scalar.valid_range.end() >= end);
891 scalar.valid_range = *scalar.valid_range.start()..=end;
ff7c6d11 892 }
416331ca
XL
893
894 // Update `largest_niche` if we have introduced a larger niche.
74b04a01
XL
895 let niche = if def.repr.hide_niche() {
896 None
897 } else {
898 Niche::from_scalar(dl, Size::ZERO, scalar.clone())
899 };
416331ca
XL
900 if let Some(niche) = niche {
901 match &st.largest_niche {
902 Some(largest_niche) => {
903 // Replace the existing niche even if they're equal,
904 // because this one is at a lower offset.
905 if largest_niche.available(dl) <= niche.available(dl) {
906 st.largest_niche = Some(niche);
907 }
908 }
909 None => st.largest_niche = Some(niche),
910 }
911 }
ff7c6d11 912 }
b7449926
XL
913 _ => assert!(
914 start == Bound::Unbounded && end == Bound::Unbounded,
915 "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
916 def,
917 st,
918 ),
54a0048b 919 }
416331ca 920
ff7c6d11 921 return Ok(tcx.intern_layout(st));
54a0048b
SL
922 }
923
74b04a01
XL
924 // At this point, we have handled all unions and
925 // structs. (We have also handled univariant enums
926 // that allow representation optimization.)
927 assert!(def.is_enum());
928
83c7162d
XL
929 // The current code for niche-filling relies on variant indices
930 // instead of actual discriminants, so dataful enums with
931 // explicit discriminants (RFC #2363) would misbehave.
dfeec247
XL
932 let no_explicit_discriminants = def
933 .variants
934 .iter_enumerated()
a1dfa0c6 935 .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
ff7c6d11 936
3dfed10e
XL
937 let mut niche_filling_layout = None;
938
ff7c6d11
XL
939 // Niche-filling enum optimization.
940 if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
941 let mut dataful_variant = None;
a1dfa0c6 942 let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
ff7c6d11
XL
943
944 // Find one non-ZST variant.
a1dfa0c6 945 'variants: for (v, fields) in variants.iter_enumerated() {
83c7162d
XL
946 if absent(fields) {
947 continue 'variants;
948 }
ff7c6d11 949 for f in fields {
ff7c6d11
XL
950 if !f.is_zst() {
951 if dataful_variant.is_none() {
952 dataful_variant = Some(v);
953 continue 'variants;
954 } else {
955 dataful_variant = None;
956 break 'variants;
957 }
958 }
54a0048b 959 }
83c7162d 960 niche_variants = *niche_variants.start().min(&v)..=v;
ff7c6d11
XL
961 }
962
83c7162d 963 if niche_variants.start() > niche_variants.end() {
ff7c6d11
XL
964 dataful_variant = None;
965 }
966
967 if let Some(i) = dataful_variant {
dfeec247
XL
968 let count = (niche_variants.end().as_u32()
969 - niche_variants.start().as_u32()
970 + 1) as u128;
94b46f34 971
ba9703b0
XL
972 // Find the field with the largest niche
973 let niche_candidate = variants[i]
974 .iter()
975 .enumerate()
976 .filter_map(|(j, &field)| Some((j, field.largest_niche.as_ref()?)))
977 .max_by_key(|(_, niche)| niche.available(dl));
978
979 if let Some((field_index, niche, (niche_start, niche_scalar))) =
980 niche_candidate.and_then(|(field_index, niche)| {
981 Some((field_index, niche, niche.reserve(self, count)?))
982 })
983 {
ff7c6d11 984 let mut align = dl.aggregate_align;
dfeec247
XL
985 let st = variants
986 .iter_enumerated()
987 .map(|(j, v)| {
988 let mut st = self.univariant_uninterned(
989 ty,
990 v,
991 &def.repr,
992 StructKind::AlwaysSized,
993 )?;
994 st.variants = Variants::Single { index: j };
995
996 align = align.max(st.align);
997
998 Ok(st)
999 })
1000 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
ff7c6d11 1001
94b46f34 1002 let offset = st[i].fields.offset(field_index) + niche.offset;
ff7c6d11
XL
1003 let size = st[i].size;
1004
ba9703b0
XL
1005 let abi = if st.iter().all(|v| v.abi.is_uninhabited()) {
1006 Abi::Uninhabited
1007 } else {
1008 match st[i].abi {
1009 Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()),
1010 Abi::ScalarPair(ref first, ref second) => {
1011 // We need to use scalar_unit to reset the
1012 // valid range to the maximal one for that
1013 // primitive, because only the niche is
1014 // guaranteed to be initialised, not the
1015 // other primitive.
1016 if offset.bytes() == 0 {
1017 Abi::ScalarPair(
1018 niche_scalar.clone(),
1019 scalar_unit(second.value),
1020 )
1021 } else {
1022 Abi::ScalarPair(
1023 scalar_unit(first.value),
1024 niche_scalar.clone(),
1025 )
1026 }
0531ce1d 1027 }
ba9703b0 1028 _ => Abi::Aggregate { sized: true },
0531ce1d 1029 }
c30ab7b3 1030 };
ff7c6d11 1031
416331ca
XL
1032 let largest_niche =
1033 Niche::from_scalar(dl, offset, niche_scalar.clone());
1034
3dfed10e 1035 niche_filling_layout = Some(Layout {
532ac7d7 1036 variants: Variants::Multiple {
f035d41b
XL
1037 tag: niche_scalar,
1038 tag_encoding: TagEncoding::Niche {
532ac7d7
XL
1039 dataful_variant: i,
1040 niche_variants,
1041 niche_start,
1042 },
f035d41b 1043 tag_field: 0,
ff7c6d11
XL
1044 variants: st,
1045 },
ba9703b0 1046 fields: FieldsShape::Arbitrary {
ff7c6d11 1047 offsets: vec![offset],
dfeec247 1048 memory_index: vec![0],
ff7c6d11
XL
1049 },
1050 abi,
416331ca 1051 largest_niche,
ff7c6d11
XL
1052 size,
1053 align,
3dfed10e 1054 });
54a0048b 1055 }
ff7c6d11
XL
1056 }
1057 }
54a0048b 1058
74b04a01 1059 let (mut min, mut max) = (i128::MAX, i128::MIN);
0531ce1d 1060 let discr_type = def.repr.discr_type();
a1dfa0c6
XL
1061 let bits = Integer::from_attr(self, discr_type).size().bits();
1062 for (i, discr) in def.discriminants(tcx) {
0bf4aa26 1063 if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
ff7c6d11 1064 continue;
54a0048b 1065 }
0531ce1d
XL
1066 let mut x = discr.val as i128;
1067 if discr_type.is_signed() {
1068 // sign extend the raw representation to be an i128
1069 x = (x << (128 - bits)) >> (128 - bits);
1070 }
dfeec247
XL
1071 if x < min {
1072 min = x;
1073 }
1074 if x > max {
1075 max = x;
1076 }
54a0048b 1077 }
83c7162d 1078 // We might have no inhabited variants, so pretend there's at least one.
74b04a01 1079 if (min, max) == (i128::MAX, i128::MIN) {
83c7162d
XL
1080 min = 0;
1081 max = 0;
1082 }
ff7c6d11
XL
1083 assert!(min <= max, "discriminant range is {}...{}", min, max);
1084 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
54a0048b 1085
54a0048b 1086 let mut align = dl.aggregate_align;
94b46f34 1087 let mut size = Size::ZERO;
54a0048b
SL
1088
1089 // We're interested in the smallest alignment, so start large.
a1dfa0c6
XL
1090 let mut start_align = Align::from_bytes(256).unwrap();
1091 assert_eq!(Integer::for_align(dl, start_align), None);
ff7c6d11
XL
1092
1093 // repr(C) on an enum tells us to make a (tag, union) layout,
1094 // so we need to grow the prefix alignment to be at least
1095 // the alignment of the union. (This value is used both for
1096 // determining the alignment of the overall enum, and the
1097 // determining the alignment of the payload after the tag.)
a1dfa0c6 1098 let mut prefix_align = min_ity.align(dl).abi;
ff7c6d11
XL
1099 if def.repr.c() {
1100 for fields in &variants {
1101 for field in fields {
a1dfa0c6 1102 prefix_align = prefix_align.max(field.align.abi);
ff7c6d11
XL
1103 }
1104 }
1105 }
54a0048b 1106
ff7c6d11 1107 // Create the set of structs that represent each variant.
dfeec247
XL
1108 let mut layout_variants = variants
1109 .iter_enumerated()
1110 .map(|(i, field_layouts)| {
1111 let mut st = self.univariant_uninterned(
1112 ty,
1113 &field_layouts,
1114 &def.repr,
1115 StructKind::Prefixed(min_ity.size(), prefix_align),
1116 )?;
1117 st.variants = Variants::Single { index: i };
1118 // Find the first field we can't move later
1119 // to make room for a larger discriminant.
1120 for field in
1121 st.fields.index_by_increasing_offset().map(|j| field_layouts[j])
1122 {
1123 if !field.is_zst() || field.align.abi.bytes() != 1 {
1124 start_align = start_align.min(field.align.abi);
1125 break;
1126 }
54a0048b 1127 }
dfeec247
XL
1128 size = cmp::max(size, st.size);
1129 align = align.max(st.align);
1130 Ok(st)
1131 })
1132 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
54a0048b
SL
1133
1134 // Align the maximum variant size to the largest alignment.
a1dfa0c6 1135 size = size.align_to(align.abi);
54a0048b
SL
1136
1137 if size.bytes() >= dl.obj_size_bound() {
1138 return Err(LayoutError::SizeOverflow(ty));
1139 }
1140
8bb4bdeb
XL
1141 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
1142 if typeck_ity < min_ity {
1143 // It is a bug if Layout decided on a greater discriminant size than typeck for
1144 // some reason at this point (based on values discriminant can take on). Mostly
1145 // because this discriminant will be loaded, and then stored into variable of
1146 // type calculated by typeck. Consider such case (a bug): typeck decided on
1147 // byte-sized discriminant, but layout thinks we need a 16-bit to store all
94b46f34 1148 // discriminant values. That would be a bug, because then, in codegen, in order
8bb4bdeb
XL
1149 // to store this 16-bit discriminant into 8-bit sized temporary some of the
1150 // space necessary to represent would have to be discarded (or layout is wrong
1151 // on thinking it needs 16 bits)
dfeec247
XL
1152 bug!(
1153 "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1154 min_ity,
1155 typeck_ity
1156 );
8bb4bdeb 1157 // However, it is fine to make discr type however large (as an optimisation)
94b46f34 1158 // after this point – we’ll just truncate the value we load in codegen.
8bb4bdeb
XL
1159 }
1160
54a0048b
SL
1161 // Check to see if we should use a different type for the
1162 // discriminant. We can safely use a type with the same size
1163 // as the alignment of the first field of each variant.
1164 // We increase the size of the discriminant to avoid LLVM copying
1165 // padding when it doesn't need to. This normally causes unaligned
1166 // load/stores and excessive memcpy/memset operations. By using a
83c7162d 1167 // bigger integer size, LLVM can be sure about its contents and
54a0048b
SL
1168 // won't be so conservative.
1169
1170 // Use the initial field alignment
83c7162d
XL
1171 let mut ity = if def.repr.c() || def.repr.int.is_some() {
1172 min_ity
1173 } else {
a1dfa0c6 1174 Integer::for_align(dl, start_align).unwrap_or(min_ity)
83c7162d 1175 };
54a0048b
SL
1176
1177 // If the alignment is not larger than the chosen discriminant size,
1178 // don't use the alignment as the final size.
1179 if ity <= min_ity {
1180 ity = min_ity;
1181 } else {
1182 // Patch up the variants' first few fields.
ff7c6d11
XL
1183 let old_ity_size = min_ity.size();
1184 let new_ity_size = ity.size();
83c7162d 1185 for variant in &mut layout_variants {
ff7c6d11 1186 match variant.fields {
ba9703b0 1187 FieldsShape::Arbitrary { ref mut offsets, .. } => {
ff7c6d11
XL
1188 for i in offsets {
1189 if *i <= old_ity_size {
1190 assert_eq!(*i, old_ity_size);
1191 *i = new_ity_size;
1192 }
1193 }
1194 // We might be making the struct larger.
1195 if variant.size <= old_ity_size {
1196 variant.size = new_ity_size;
1197 }
1198 }
dfeec247 1199 _ => bug!(),
c30ab7b3 1200 }
54a0048b
SL
1201 }
1202 }
1203
0531ce1d
XL
1204 let tag_mask = !0u128 >> (128 - ity.size().bits());
1205 let tag = Scalar {
ff7c6d11 1206 value: Int(ity, signed),
0531ce1d 1207 valid_range: (min as u128 & tag_mask)..=(max as u128 & tag_mask),
ff7c6d11 1208 };
83c7162d
XL
1209 let mut abi = Abi::Aggregate { sized: true };
1210 if tag.value.size(dl) == size {
1211 abi = Abi::Scalar(tag.clone());
8faf50e0
XL
1212 } else {
1213 // Try to use a ScalarPair for all tagged enums.
83c7162d
XL
1214 let mut common_prim = None;
1215 for (field_layouts, layout_variant) in variants.iter().zip(&layout_variants) {
1216 let offsets = match layout_variant.fields {
ba9703b0 1217 FieldsShape::Arbitrary { ref offsets, .. } => offsets,
83c7162d
XL
1218 _ => bug!(),
1219 };
dfeec247
XL
1220 let mut fields =
1221 field_layouts.iter().zip(offsets).filter(|p| !p.0.is_zst());
83c7162d
XL
1222 let (field, offset) = match (fields.next(), fields.next()) {
1223 (None, None) => continue,
1224 (Some(pair), None) => pair,
1225 _ => {
1226 common_prim = None;
1227 break;
1228 }
1229 };
ba9703b0 1230 let prim = match field.abi {
83c7162d
XL
1231 Abi::Scalar(ref scalar) => scalar.value,
1232 _ => {
1233 common_prim = None;
1234 break;
1235 }
1236 };
1237 if let Some(pair) = common_prim {
1238 // This is pretty conservative. We could go fancier
1239 // by conflating things like i32 and u32, or even
1240 // realising that (u8, u8) could just cohabit with
1241 // u16 or even u32.
1242 if pair != (prim, offset) {
1243 common_prim = None;
1244 break;
1245 }
1246 } else {
1247 common_prim = Some((prim, offset));
1248 }
1249 }
1250 if let Some((prim, offset)) = common_prim {
dc9dc135 1251 let pair = self.scalar_pair(tag.clone(), scalar_unit(prim));
83c7162d 1252 let pair_offsets = match pair.fields {
ba9703b0 1253 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
83c7162d
XL
1254 assert_eq!(memory_index, &[0, 1]);
1255 offsets
1256 }
dfeec247 1257 _ => bug!(),
83c7162d 1258 };
dfeec247
XL
1259 if pair_offsets[0] == Size::ZERO
1260 && pair_offsets[1] == *offset
1261 && align == pair.align
1262 && size == pair.size
1263 {
83c7162d
XL
1264 // We can use `ScalarPair` only when it matches our
1265 // already computed layout (including `#[repr(C)]`).
1266 abi = pair.abi;
1267 }
1268 }
1269 }
1270
0bf4aa26 1271 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
83c7162d
XL
1272 abi = Abi::Uninhabited;
1273 }
1274
416331ca
XL
1275 let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag.clone());
1276
3dfed10e 1277 let tagged_layout = Layout {
532ac7d7 1278 variants: Variants::Multiple {
f035d41b
XL
1279 tag,
1280 tag_encoding: TagEncoding::Direct,
1281 tag_field: 0,
83c7162d 1282 variants: layout_variants,
ff7c6d11 1283 },
ba9703b0 1284 fields: FieldsShape::Arbitrary {
94b46f34 1285 offsets: vec![Size::ZERO],
dfeec247 1286 memory_index: vec![0],
ff7c6d11 1287 },
416331ca 1288 largest_niche,
ff7c6d11 1289 abi,
041b39d2 1290 align,
dfeec247 1291 size,
3dfed10e
XL
1292 };
1293
1294 let best_layout = match (tagged_layout, niche_filling_layout) {
1295 (tagged_layout, Some(niche_filling_layout)) => {
1296 // Pick the smaller layout; otherwise,
1297 // pick the layout with the larger niche; otherwise,
1298 // pick tagged as it has simpler codegen.
1299 cmp::min_by_key(tagged_layout, niche_filling_layout, |layout| {
1300 let niche_size =
1301 layout.largest_niche.as_ref().map_or(0, |n| n.available(dl));
1302 (layout.size, cmp::Reverse(niche_size))
1303 })
1304 }
1305 (tagged_layout, None) => tagged_layout,
1306 };
1307
1308 tcx.intern_layout(best_layout)
54a0048b
SL
1309 }
1310
1311 // Types with no meaningful known layout.
b7449926 1312 ty::Projection(_) | ty::Opaque(..) => {
0531ce1d 1313 let normalized = tcx.normalize_erasing_regions(param_env, ty);
5bcae85e
SL
1314 if ty == normalized {
1315 return Err(LayoutError::Unknown(ty));
1316 }
ff7c6d11 1317 tcx.layout_raw(param_env.and(normalized))?
5bcae85e 1318 }
a1dfa0c6 1319
1b1a35ee 1320 ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
f9f354fc
XL
1321 bug!("Layout::compute: unexpected type `{}`", ty)
1322 }
a1dfa0c6 1323
1b1a35ee 1324 ty::Bound(..) | ty::Param(_) | ty::Error(_) => {
8faf50e0
XL
1325 return Err(LayoutError::Unknown(ty));
1326 }
ff7c6d11 1327 })
cc61c64b 1328 }
dc9dc135
XL
1329}
1330
1331/// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1332#[derive(Clone, Debug, PartialEq)]
1333enum SavedLocalEligibility {
1334 Unassigned,
1335 Assigned(VariantIdx),
1336 // FIXME: Use newtype_index so we aren't wasting bytes
1337 Ineligible(Option<u32>),
1338}
1339
1340// When laying out generators, we divide our saved local fields into two
1341// categories: overlap-eligible and overlap-ineligible.
1342//
1343// Those fields which are ineligible for overlap go in a "prefix" at the
1344// beginning of the layout, and always have space reserved for them.
1345//
1346// Overlap-eligible fields are only assigned to one variant, so we lay
1347// those fields out for each variant and put them right after the
1348// prefix.
1349//
1350// Finally, in the layout details, we point to the fields from the
1351// variants they are assigned to. It is possible for some fields to be
1352// included in multiple variants. No field ever "moves around" in the
1353// layout; its offset is always the same.
1354//
1355// Also included in the layout are the upvars and the discriminant.
1356// These are included as fields on the "outer" layout; they are not part
1357// of any variant.
1358impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
1359 /// Compute the eligibility and assignment of each local.
dfeec247
XL
1360 fn generator_saved_local_eligibility(
1361 &self,
1362 info: &GeneratorLayout<'tcx>,
1363 ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
dc9dc135
XL
1364 use SavedLocalEligibility::*;
1365
1366 let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
1367 IndexVec::from_elem_n(Unassigned, info.field_tys.len());
1368
1369 // The saved locals not eligible for overlap. These will get
1370 // "promoted" to the prefix of our generator.
1371 let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
1372
1373 // Figure out which of our saved locals are fields in only
1374 // one variant. The rest are deemed ineligible for overlap.
1375 for (variant_index, fields) in info.variant_fields.iter_enumerated() {
1376 for local in fields {
1377 match assignments[*local] {
1378 Unassigned => {
1379 assignments[*local] = Assigned(variant_index);
1380 }
1381 Assigned(idx) => {
1382 // We've already seen this local at another suspension
1383 // point, so it is no longer a candidate.
dfeec247
XL
1384 trace!(
1385 "removing local {:?} in >1 variant ({:?}, {:?})",
1386 local,
1387 variant_index,
1388 idx
1389 );
dc9dc135
XL
1390 ineligible_locals.insert(*local);
1391 assignments[*local] = Ineligible(None);
1392 }
dfeec247 1393 Ineligible(_) => {}
dc9dc135
XL
1394 }
1395 }
1396 }
1397
1398 // Next, check every pair of eligible locals to see if they
1399 // conflict.
1400 for local_a in info.storage_conflicts.rows() {
1401 let conflicts_a = info.storage_conflicts.count(local_a);
1402 if ineligible_locals.contains(local_a) {
1403 continue;
1404 }
1405
1406 for local_b in info.storage_conflicts.iter(local_a) {
1407 // local_a and local_b are storage live at the same time, therefore they
1408 // cannot overlap in the generator layout. The only way to guarantee
1409 // this is if they are in the same variant, or one is ineligible
1410 // (which means it is stored in every variant).
dfeec247
XL
1411 if ineligible_locals.contains(local_b)
1412 || assignments[local_a] == assignments[local_b]
dc9dc135
XL
1413 {
1414 continue;
1415 }
1416
1417 // If they conflict, we will choose one to make ineligible.
1418 // This is not always optimal; it's just a greedy heuristic that
1419 // seems to produce good results most of the time.
1420 let conflicts_b = info.storage_conflicts.count(local_b);
dfeec247
XL
1421 let (remove, other) =
1422 if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
dc9dc135
XL
1423 ineligible_locals.insert(remove);
1424 assignments[remove] = Ineligible(None);
1425 trace!("removing local {:?} due to conflict with {:?}", remove, other);
1426 }
1427 }
1428
416331ca
XL
1429 // Count the number of variants in use. If only one of them, then it is
1430 // impossible to overlap any locals in our layout. In this case it's
1431 // always better to make the remaining locals ineligible, so we can
1432 // lay them out with the other locals in the prefix and eliminate
1433 // unnecessary padding bytes.
1434 {
1435 let mut used_variants = BitSet::new_empty(info.variant_fields.len());
1436 for assignment in &assignments {
ba9703b0
XL
1437 if let Assigned(idx) = assignment {
1438 used_variants.insert(*idx);
416331ca
XL
1439 }
1440 }
1441 if used_variants.count() < 2 {
1442 for assignment in assignments.iter_mut() {
1443 *assignment = Ineligible(None);
1444 }
1445 ineligible_locals.insert_all();
1446 }
1447 }
1448
dc9dc135
XL
1449 // Write down the order of our locals that will be promoted to the prefix.
1450 {
74b04a01
XL
1451 for (idx, local) in ineligible_locals.iter().enumerate() {
1452 assignments[local] = Ineligible(Some(idx as u32));
dc9dc135
XL
1453 }
1454 }
1455 debug!("generator saved local assignments: {:?}", assignments);
1456
1457 (ineligible_locals, assignments)
1458 }
1459
1460 /// Compute the full generator layout.
1461 fn generator_layout(
1462 &self,
1463 ty: Ty<'tcx>,
1464 def_id: hir::def_id::DefId,
e74abb32 1465 substs: SubstsRef<'tcx>,
ba9703b0 1466 ) -> Result<&'tcx Layout, LayoutError<'tcx>> {
dc9dc135
XL
1467 use SavedLocalEligibility::*;
1468 let tcx = self.tcx;
1469
dfeec247 1470 let subst_field = |ty: Ty<'tcx>| ty.subst(tcx, substs);
dc9dc135
XL
1471
1472 let info = tcx.generator_layout(def_id);
1473 let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
1474
1475 // Build a prefix layout, including "promoting" all ineligible
1476 // locals as part of the prefix. We compute the layout of all of
1477 // these fields at once to get optimal packing.
f035d41b 1478 let tag_index = substs.as_generator().prefix_tys().count();
ba9703b0
XL
1479
1480 // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
1481 let max_discr = (info.variant_fields.len() - 1) as u128;
1482 let discr_int = Integer::fit_unsigned(max_discr);
1483 let discr_int_ty = discr_int.to_ty(tcx, false);
f035d41b
XL
1484 let tag = Scalar { value: Primitive::Int(discr_int, false), valid_range: 0..=max_discr };
1485 let tag_layout = self.tcx.intern_layout(Layout::scalar(self, tag.clone()));
1486 let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
ba9703b0 1487
dfeec247
XL
1488 let promoted_layouts = ineligible_locals
1489 .iter()
416331ca
XL
1490 .map(|local| subst_field(info.field_tys[local]))
1491 .map(|ty| tcx.mk_maybe_uninit(ty))
1492 .map(|ty| self.layout_of(ty));
dfeec247
XL
1493 let prefix_layouts = substs
1494 .as_generator()
ba9703b0 1495 .prefix_tys()
416331ca 1496 .map(|ty| self.layout_of(ty))
f035d41b 1497 .chain(iter::once(Ok(tag_layout)))
416331ca
XL
1498 .chain(promoted_layouts)
1499 .collect::<Result<Vec<_>, _>>()?;
dc9dc135
XL
1500 let prefix = self.univariant_uninterned(
1501 ty,
416331ca 1502 &prefix_layouts,
dc9dc135 1503 &ReprOptions::default(),
416331ca
XL
1504 StructKind::AlwaysSized,
1505 )?;
1506
dc9dc135
XL
1507 let (prefix_size, prefix_align) = (prefix.size, prefix.align);
1508
1509 // Split the prefix layout into the "outer" fields (upvars and
1510 // discriminant) and the "promoted" fields. Promoted fields will
1511 // get included in each variant that requested them in
1512 // GeneratorLayout.
1513 debug!("prefix = {:#?}", prefix);
1514 let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
ba9703b0 1515 FieldsShape::Arbitrary { mut offsets, memory_index } => {
dc9dc135
XL
1516 let mut inverse_memory_index = invert_mapping(&memory_index);
1517
1518 // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1519 // "outer" and "promoted" fields respectively.
f035d41b 1520 let b_start = (tag_index + 1) as u32;
dc9dc135
XL
1521 let offsets_b = offsets.split_off(b_start as usize);
1522 let offsets_a = offsets;
1523
1524 // Disentangle the "a" and "b" components of `inverse_memory_index`
1525 // by preserving the order but keeping only one disjoint "half" each.
1526 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1527 let inverse_memory_index_b: Vec<_> =
1528 inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
1529 inverse_memory_index.retain(|&i| i < b_start);
1530 let inverse_memory_index_a = inverse_memory_index;
1531
1532 // Since `inverse_memory_index_{a,b}` each only refer to their
1533 // respective fields, they can be safely inverted
1534 let memory_index_a = invert_mapping(&inverse_memory_index_a);
1535 let memory_index_b = invert_mapping(&inverse_memory_index_b);
1536
dfeec247 1537 let outer_fields =
ba9703b0 1538 FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
dc9dc135
XL
1539 (outer_fields, offsets_b, memory_index_b)
1540 }
1541 _ => bug!(),
1542 };
1543
1544 let mut size = prefix.size;
1545 let mut align = prefix.align;
dfeec247
XL
1546 let variants = info
1547 .variant_fields
1548 .iter_enumerated()
1549 .map(|(index, variant_fields)| {
1550 // Only include overlap-eligible fields when we compute our variant layout.
1551 let variant_only_tys = variant_fields
1552 .iter()
1553 .filter(|local| match assignments[**local] {
dc9dc135
XL
1554 Unassigned => bug!(),
1555 Assigned(v) if v == index => true,
1556 Assigned(_) => bug!("assignment does not match variant"),
1557 Ineligible(_) => false,
dfeec247
XL
1558 })
1559 .map(|local| subst_field(info.field_tys[*local]));
dc9dc135 1560
dfeec247
XL
1561 let mut variant = self.univariant_uninterned(
1562 ty,
1563 &variant_only_tys
1564 .map(|ty| self.layout_of(ty))
1565 .collect::<Result<Vec<_>, _>>()?,
1566 &ReprOptions::default(),
1567 StructKind::Prefixed(prefix_size, prefix_align.abi),
1568 )?;
1569 variant.variants = Variants::Single { index };
1570
1571 let (offsets, memory_index) = match variant.fields {
ba9703b0 1572 FieldsShape::Arbitrary { offsets, memory_index } => (offsets, memory_index),
dfeec247 1573 _ => bug!(),
dc9dc135 1574 };
dc9dc135 1575
dfeec247
XL
1576 // Now, stitch the promoted and variant-only fields back together in
1577 // the order they are mentioned by our GeneratorLayout.
1578 // Because we only use some subset (that can differ between variants)
1579 // of the promoted fields, we can't just pick those elements of the
1580 // `promoted_memory_index` (as we'd end up with gaps).
1581 // So instead, we build an "inverse memory_index", as if all of the
1582 // promoted fields were being used, but leave the elements not in the
1583 // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1584 // obtain a valid (bijective) mapping.
1585 const INVALID_FIELD_IDX: u32 = !0;
1586 let mut combined_inverse_memory_index =
1587 vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
1588 let mut offsets_and_memory_index = offsets.into_iter().zip(memory_index);
1589 let combined_offsets = variant_fields
1590 .iter()
1591 .enumerate()
1592 .map(|(i, local)| {
1593 let (offset, memory_index) = match assignments[*local] {
1594 Unassigned => bug!(),
1595 Assigned(_) => {
1596 let (offset, memory_index) =
1597 offsets_and_memory_index.next().unwrap();
1598 (offset, promoted_memory_index.len() as u32 + memory_index)
1599 }
1600 Ineligible(field_idx) => {
1601 let field_idx = field_idx.unwrap() as usize;
1602 (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1603 }
1604 };
1605 combined_inverse_memory_index[memory_index as usize] = i as u32;
1606 offset
1607 })
1608 .collect();
1609
1610 // Remove the unused slots and invert the mapping to obtain the
1611 // combined `memory_index` (also see previous comment).
1612 combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
1613 let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
1614
ba9703b0 1615 variant.fields = FieldsShape::Arbitrary {
dfeec247
XL
1616 offsets: combined_offsets,
1617 memory_index: combined_memory_index,
1618 };
1619
1620 size = size.max(variant.size);
1621 align = align.max(variant.align);
1622 Ok(variant)
1623 })
1624 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
dc9dc135 1625
416331ca
XL
1626 size = size.align_to(align.abi);
1627
dfeec247
XL
1628 let abi = if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi.is_uninhabited())
1629 {
dc9dc135
XL
1630 Abi::Uninhabited
1631 } else {
1632 Abi::Aggregate { sized: true }
1633 };
dc9dc135 1634
ba9703b0 1635 let layout = tcx.intern_layout(Layout {
dc9dc135 1636 variants: Variants::Multiple {
f035d41b
XL
1637 tag: tag,
1638 tag_encoding: TagEncoding::Direct,
1639 tag_field: tag_index,
dc9dc135
XL
1640 variants,
1641 },
1642 fields: outer_fields,
1643 abi,
416331ca 1644 largest_niche: prefix.largest_niche,
dc9dc135
XL
1645 size,
1646 align,
1647 });
1648 debug!("generator layout ({:?}): {:#?}", ty, layout);
1649 Ok(layout)
1650 }
7cac9316
XL
1651
1652 /// This is invoked by the `layout_raw` query to record the final
1653 /// layout of each type.
532ac7d7 1654 #[inline(always)]
ba9703b0 1655 fn record_layout_for_printing(&self, layout: TyAndLayout<'tcx>) {
532ac7d7
XL
1656 // If we are running with `-Zprint-type-sizes`, maybe record layouts
1657 // for dumping later.
1658 if self.tcx.sess.opts.debugging_opts.print_type_sizes {
1659 self.record_layout_for_printing_outlined(layout)
1660 }
1661 }
1662
ba9703b0 1663 fn record_layout_for_printing_outlined(&self, layout: TyAndLayout<'tcx>) {
532ac7d7
XL
1664 // Ignore layouts that are done with non-empty environments or
1665 // non-monomorphic layouts, as the user only wants to see the stuff
1666 // resulting from the final codegen session.
f035d41b 1667 if layout.ty.has_param_types_or_consts() || !self.param_env.caller_bounds().is_empty() {
7cac9316
XL
1668 return;
1669 }
1670
7cac9316 1671 // (delay format until we actually need it)
83c7162d 1672 let record = |kind, packed, opt_discr_size, variants| {
2c00a5a8 1673 let type_desc = format!("{:?}", layout.ty);
dfeec247
XL
1674 self.tcx.sess.code_stats.record_type_size(
1675 kind,
1676 type_desc,
1677 layout.align.abi,
1678 layout.size,
1679 packed,
1680 opt_discr_size,
1681 variants,
1682 );
7cac9316
XL
1683 };
1684
1b1a35ee 1685 let adt_def = match *layout.ty.kind() {
b7449926 1686 ty::Adt(ref adt_def, _) => {
2c00a5a8 1687 debug!("print-type-size t: `{:?}` process adt", layout.ty);
ff7c6d11 1688 adt_def
7cac9316
XL
1689 }
1690
b7449926 1691 ty::Closure(..) => {
2c00a5a8 1692 debug!("print-type-size t: `{:?}` record closure", layout.ty);
83c7162d 1693 record(DataTypeKind::Closure, false, None, vec![]);
7cac9316
XL
1694 return;
1695 }
1696
1697 _ => {
2c00a5a8 1698 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
7cac9316
XL
1699 return;
1700 }
1701 };
1702
1703 let adt_kind = adt_def.adt_kind();
e1599b0c 1704 let adt_packed = adt_def.repr.pack.is_some();
7cac9316 1705
f9f354fc 1706 let build_variant_info = |n: Option<Ident>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
94b46f34 1707 let mut min_size = Size::ZERO;
dfeec247
XL
1708 let field_info: Vec<_> = flds
1709 .iter()
1710 .enumerate()
1711 .map(|(i, &name)| match layout.field(self, i) {
ff7c6d11
XL
1712 Err(err) => {
1713 bug!("no layout found for field {}: `{:?}`", name, err);
1714 }
1715 Ok(field_layout) => {
1716 let offset = layout.fields.offset(i);
1717 let field_end = offset + field_layout.size;
1718 if min_size < field_end {
1719 min_size = field_end;
1720 }
ba9703b0 1721 FieldInfo {
ff7c6d11
XL
1722 name: name.to_string(),
1723 offset: offset.bytes(),
1724 size: field_layout.size.bytes(),
a1dfa0c6 1725 align: field_layout.align.abi.bytes(),
ff7c6d11 1726 }
7cac9316 1727 }
dfeec247
XL
1728 })
1729 .collect();
7cac9316 1730
ba9703b0 1731 VariantInfo {
0731742a 1732 name: n.map(|n| n.to_string()),
ba9703b0 1733 kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
a1dfa0c6 1734 align: layout.align.abi.bytes(),
dfeec247 1735 size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
7cac9316
XL
1736 fields: field_info,
1737 }
1738 };
1739
ff7c6d11
XL
1740 match layout.variants {
1741 Variants::Single { index } => {
dfeec247 1742 debug!("print-type-size `{:#?}` variant {}", layout, adt_def.variants[index].ident);
ff7c6d11
XL
1743 if !adt_def.variants.is_empty() {
1744 let variant_def = &adt_def.variants[index];
dfeec247
XL
1745 let fields: Vec<_> = variant_def.fields.iter().map(|f| f.ident.name).collect();
1746 record(
1747 adt_kind.into(),
1748 adt_packed,
1749 None,
1750 vec![build_variant_info(Some(variant_def.ident), &fields, layout)],
1751 );
7cac9316
XL
1752 } else {
1753 // (This case arises for *empty* enums; so give it
1754 // zero variants.)
83c7162d 1755 record(adt_kind.into(), adt_packed, None, vec![]);
7cac9316
XL
1756 }
1757 }
1758
f035d41b 1759 Variants::Multiple { ref tag, ref tag_encoding, .. } => {
dfeec247
XL
1760 debug!(
1761 "print-type-size `{:#?}` adt general variants def {}",
1762 layout.ty,
1763 adt_def.variants.len()
1764 );
1765 let variant_infos: Vec<_> = adt_def
1766 .variants
1767 .iter_enumerated()
1768 .map(|(i, variant_def)| {
ff7c6d11 1769 let fields: Vec<_> =
94b46f34 1770 variant_def.fields.iter().map(|f| f.ident.name).collect();
dfeec247
XL
1771 build_variant_info(
1772 Some(variant_def.ident),
1773 &fields,
1774 layout.for_variant(self, i),
1775 )
ff7c6d11
XL
1776 })
1777 .collect();
dfeec247
XL
1778 record(
1779 adt_kind.into(),
1780 adt_packed,
f035d41b
XL
1781 match tag_encoding {
1782 TagEncoding::Direct => Some(tag.value.size(self)),
dfeec247
XL
1783 _ => None,
1784 },
1785 variant_infos,
1786 );
7cac9316
XL
1787 }
1788 }
1789 }
54a0048b
SL
1790}
1791
0731742a 1792/// Type size "skeleton", i.e., the only information determining a type's size.
54a0048b
SL
1793/// While this is conservative, (aside from constant sizes, only pointers,
1794/// newtypes thereof and null pointer optimized enums are allowed), it is
a1dfa0c6 1795/// enough to statically check common use cases of transmute.
54a0048b
SL
1796#[derive(Copy, Clone, Debug)]
1797pub enum SizeSkeleton<'tcx> {
1798 /// Any statically computable Layout.
1799 Known(Size),
1800
1801 /// A potentially-fat pointer.
1802 Pointer {
3b2f2976 1803 /// If true, this pointer is never null.
54a0048b 1804 non_zero: bool,
3b2f2976
XL
1805 /// The type which determines the unsized metadata, if any,
1806 /// of this pointer. Either a type parameter or a projection
1807 /// depending on one, with regions erased.
dfeec247
XL
1808 tail: Ty<'tcx>,
1809 },
54a0048b
SL
1810}
1811
dc9dc135
XL
1812impl<'tcx> SizeSkeleton<'tcx> {
1813 pub fn compute(
1814 ty: Ty<'tcx>,
1815 tcx: TyCtxt<'tcx>,
1816 param_env: ty::ParamEnv<'tcx>,
1817 ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
74b04a01 1818 debug_assert!(!ty.has_infer_types_or_consts());
54a0048b
SL
1819
1820 // First try computing a static layout.
2c00a5a8 1821 let err = match tcx.layout_of(param_env.and(ty)) {
54a0048b 1822 Ok(layout) => {
ff7c6d11 1823 return Ok(SizeSkeleton::Known(layout.size));
54a0048b 1824 }
dfeec247 1825 Err(err) => err,
54a0048b
SL
1826 };
1827
1b1a35ee 1828 match *ty.kind() {
dfeec247 1829 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
ff7c6d11 1830 let non_zero = !ty.is_unsafe_ptr();
416331ca 1831 let tail = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
1b1a35ee 1832 match tail.kind() {
b7449926 1833 ty::Param(_) | ty::Projection(_) => {
ba9703b0 1834 debug_assert!(tail.has_param_types_or_consts());
fc512014 1835 Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(tail) })
ff7c6d11 1836 }
dfeec247
XL
1837 _ => bug!(
1838 "SizeSkeleton::compute({}): layout errored ({}), yet \
ff7c6d11 1839 tail `{}` is not a type parameter or a projection",
dfeec247
XL
1840 ty,
1841 err,
1842 tail
1843 ),
ff7c6d11 1844 }
54a0048b
SL
1845 }
1846
b7449926 1847 ty::Adt(def, substs) => {
54a0048b 1848 // Only newtypes and enums w/ nullable pointer optimization.
9e0c209e 1849 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
54a0048b
SL
1850 return Err(err);
1851 }
1852
1853 // Get a zero-sized variant or a pointer newtype.
a1dfa0c6
XL
1854 let zero_or_ptr_variant = |i| {
1855 let i = VariantIdx::new(i);
dfeec247
XL
1856 let fields = def.variants[i]
1857 .fields
1858 .iter()
1859 .map(|field| SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env));
54a0048b
SL
1860 let mut ptr = None;
1861 for field in fields {
1862 let field = field?;
1863 match field {
1864 SizeSkeleton::Known(size) => {
1865 if size.bytes() > 0 {
1866 return Err(err);
1867 }
1868 }
dfeec247 1869 SizeSkeleton::Pointer { .. } => {
54a0048b
SL
1870 if ptr.is_some() {
1871 return Err(err);
1872 }
1873 ptr = Some(field);
1874 }
1875 }
1876 }
1877 Ok(ptr)
1878 };
1879
1880 let v0 = zero_or_ptr_variant(0)?;
1881 // Newtype.
1882 if def.variants.len() == 1 {
1883 if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1884 return Ok(SizeSkeleton::Pointer {
dfeec247
XL
1885 non_zero: non_zero
1886 || match tcx.layout_scalar_valid_range(def.did) {
1887 (Bound::Included(start), Bound::Unbounded) => start > 0,
1888 (Bound::Included(start), Bound::Included(end)) => {
1889 0 < start && start < end
1890 }
1891 _ => false,
1892 },
041b39d2 1893 tail,
54a0048b
SL
1894 });
1895 } else {
1896 return Err(err);
1897 }
1898 }
1899
1900 let v1 = zero_or_ptr_variant(1)?;
1901 // Nullable pointer enum optimization.
1902 match (v0, v1) {
dfeec247
XL
1903 (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None)
1904 | (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1905 Ok(SizeSkeleton::Pointer { non_zero: false, tail })
54a0048b 1906 }
dfeec247 1907 _ => Err(err),
54a0048b
SL
1908 }
1909 }
1910
b7449926 1911 ty::Projection(_) | ty::Opaque(..) => {
0531ce1d 1912 let normalized = tcx.normalize_erasing_regions(param_env, ty);
5bcae85e
SL
1913 if ty == normalized {
1914 Err(err)
1915 } else {
7cac9316 1916 SizeSkeleton::compute(normalized, tcx, param_env)
5bcae85e
SL
1917 }
1918 }
1919
dfeec247 1920 _ => Err(err),
54a0048b
SL
1921 }
1922 }
1923
0bf4aa26 1924 pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
54a0048b
SL
1925 match (self, other) {
1926 (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
dfeec247
XL
1927 (SizeSkeleton::Pointer { tail: a, .. }, SizeSkeleton::Pointer { tail: b, .. }) => {
1928 a == b
1929 }
1930 _ => false,
54a0048b
SL
1931 }
1932 }
1933}
cc61c64b 1934
ff7c6d11 1935pub trait HasTyCtxt<'tcx>: HasDataLayout {
dc9dc135 1936 fn tcx(&self) -> TyCtxt<'tcx>;
cc61c64b
XL
1937}
1938
48663c56
XL
1939pub trait HasParamEnv<'tcx> {
1940 fn param_env(&self) -> ty::ParamEnv<'tcx>;
1941}
1942
dc9dc135 1943impl<'tcx> HasDataLayout for TyCtxt<'tcx> {
ff7c6d11
XL
1944 fn data_layout(&self) -> &TargetDataLayout {
1945 &self.data_layout
1946 }
cc61c64b
XL
1947}
1948
dc9dc135
XL
1949impl<'tcx> HasTyCtxt<'tcx> for TyCtxt<'tcx> {
1950 fn tcx(&self) -> TyCtxt<'tcx> {
e74abb32 1951 *self
cc61c64b
XL
1952 }
1953}
1954
48663c56
XL
1955impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
1956 fn param_env(&self) -> ty::ParamEnv<'tcx> {
1957 self.param_env
1958 }
1959}
1960
2c00a5a8 1961impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
cc61c64b 1962 fn data_layout(&self) -> &TargetDataLayout {
2c00a5a8 1963 self.tcx.data_layout()
cc61c64b
XL
1964 }
1965}
1966
dc9dc135
XL
1967impl<'tcx, T: HasTyCtxt<'tcx>> HasTyCtxt<'tcx> for LayoutCx<'tcx, T> {
1968 fn tcx(&self) -> TyCtxt<'tcx> {
2c00a5a8 1969 self.tcx.tcx()
ff7c6d11
XL
1970 }
1971}
1972
29967ef6 1973pub type TyAndLayout<'tcx> = rustc_target::abi::TyAndLayout<'tcx, Ty<'tcx>>;
cc61c64b 1974
dc9dc135 1975impl<'tcx> LayoutOf for LayoutCx<'tcx, TyCtxt<'tcx>> {
83c7162d 1976 type Ty = Ty<'tcx>;
ba9703b0 1977 type TyAndLayout = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
ff7c6d11
XL
1978
1979 /// Computes the layout of a type. Note that this implicitly
1980 /// executes in "reveal all" mode.
ba9703b0 1981 fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
3dfed10e 1982 let param_env = self.param_env.with_reveal_all_normalized(self.tcx);
0531ce1d 1983 let ty = self.tcx.normalize_erasing_regions(param_env, ty);
ba9703b0
XL
1984 let layout = self.tcx.layout_raw(param_env.and(ty))?;
1985 let layout = TyAndLayout { ty, layout };
cc61c64b 1986
0731742a 1987 // N.B., this recording is normally disabled; when enabled, it
ff7c6d11
XL
1988 // can however trigger recursive invocations of `layout_of`.
1989 // Therefore, we execute it *after* the main query has
1990 // completed, to avoid problems around recursive structures
0531ce1d 1991 // and the like. (Admittedly, I wasn't able to reproduce a problem
ff7c6d11 1992 // here, but it seems like the right thing to do. -nmatsakis)
2c00a5a8 1993 self.record_layout_for_printing(layout);
ff7c6d11
XL
1994
1995 Ok(layout)
cc61c64b
XL
1996 }
1997}
1998
dc9dc135 1999impl LayoutOf for LayoutCx<'tcx, ty::query::TyCtxtAt<'tcx>> {
83c7162d 2000 type Ty = Ty<'tcx>;
ba9703b0 2001 type TyAndLayout = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
cc61c64b 2002
ff7c6d11
XL
2003 /// Computes the layout of a type. Note that this implicitly
2004 /// executes in "reveal all" mode.
ba9703b0 2005 fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
3dfed10e 2006 let param_env = self.param_env.with_reveal_all_normalized(*self.tcx);
0531ce1d 2007 let ty = self.tcx.normalize_erasing_regions(param_env, ty);
ba9703b0
XL
2008 let layout = self.tcx.layout_raw(param_env.and(ty))?;
2009 let layout = TyAndLayout { ty, layout };
cc61c64b 2010
0731742a 2011 // N.B., this recording is normally disabled; when enabled, it
ff7c6d11
XL
2012 // can however trigger recursive invocations of `layout_of`.
2013 // Therefore, we execute it *after* the main query has
2014 // completed, to avoid problems around recursive structures
0531ce1d 2015 // and the like. (Admittedly, I wasn't able to reproduce a problem
ff7c6d11 2016 // here, but it seems like the right thing to do. -nmatsakis)
dfeec247 2017 let cx = LayoutCx { tcx: *self.tcx, param_env: self.param_env };
2c00a5a8 2018 cx.record_layout_for_printing(layout);
cc61c64b 2019
ff7c6d11
XL
2020 Ok(layout)
2021 }
2022}
cc61c64b 2023
2c00a5a8 2024// Helper (inherent) `layout_of` methods to avoid pushing `LayoutCx` to users.
dc9dc135 2025impl TyCtxt<'tcx> {
2c00a5a8
XL
2026 /// Computes the layout of a type. Note that this implicitly
2027 /// executes in "reveal all" mode.
2028 #[inline]
dfeec247
XL
2029 pub fn layout_of(
2030 self,
2031 param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
ba9703b0 2032 ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
dfeec247 2033 let cx = LayoutCx { tcx: self, param_env: param_env_and_ty.param_env };
2c00a5a8
XL
2034 cx.layout_of(param_env_and_ty.value)
2035 }
2036}
2037
dc9dc135 2038impl ty::query::TyCtxtAt<'tcx> {
2c00a5a8
XL
2039 /// Computes the layout of a type. Note that this implicitly
2040 /// executes in "reveal all" mode.
2041 #[inline]
dfeec247
XL
2042 pub fn layout_of(
2043 self,
2044 param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
ba9703b0 2045 ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
dfeec247 2046 let cx = LayoutCx { tcx: self.at(self.span), param_env: param_env_and_ty.param_env };
2c00a5a8
XL
2047 cx.layout_of(param_env_and_ty.value)
2048 }
2049}
2050
ba9703b0 2051impl<'tcx, C> TyAndLayoutMethods<'tcx, C> for Ty<'tcx>
dc9dc135 2052where
ba9703b0 2053 C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout: MaybeResult<TyAndLayout<'tcx>>>
416331ca
XL
2054 + HasTyCtxt<'tcx>
2055 + HasParamEnv<'tcx>,
83c7162d 2056{
ba9703b0
XL
2057 fn for_variant(
2058 this: TyAndLayout<'tcx>,
2059 cx: &C,
2060 variant_index: VariantIdx,
2061 ) -> TyAndLayout<'tcx> {
2062 let layout = match this.variants {
2063 Variants::Single { index }
2064 // If all variants but one are uninhabited, the variant layout is the enum layout.
2065 if index == variant_index &&
2066 // Don't confuse variants of uninhabited enums with the enum itself.
2067 // For more details see https://github.com/rust-lang/rust/issues/69763.
2068 this.fields != FieldsShape::Primitive =>
2069 {
2070 this.layout
2071 }
ff7c6d11
XL
2072
2073 Variants::Single { index } => {
2074 // Deny calling for_variant more than once for non-Single enums.
ba9703b0
XL
2075 if let Ok(original_layout) = cx.layout_of(this.ty).to_result() {
2076 assert_eq!(original_layout.variants, Variants::Single { index });
48663c56 2077 }
ff7c6d11 2078
1b1a35ee 2079 let fields = match this.ty.kind() {
f035d41b
XL
2080 ty::Adt(def, _) if def.variants.is_empty() =>
2081 bug!("for_variant called on zero-variant enum"),
b7449926 2082 ty::Adt(def, _) => def.variants[variant_index].fields.len(),
dfeec247 2083 _ => bug!(),
ff7c6d11 2084 };
83c7162d 2085 let tcx = cx.tcx();
ba9703b0 2086 tcx.intern_layout(Layout {
83c7162d 2087 variants: Variants::Single { index: variant_index },
ba9703b0
XL
2088 fields: match NonZeroUsize::new(fields) {
2089 Some(fields) => FieldsShape::Union(fields),
2090 None => FieldsShape::Arbitrary { offsets: vec![], memory_index: vec![] },
2091 },
83c7162d 2092 abi: Abi::Uninhabited,
416331ca 2093 largest_niche: None,
83c7162d 2094 align: tcx.data_layout.i8_align,
dfeec247 2095 size: Size::ZERO,
83c7162d 2096 })
ff7c6d11 2097 }
cc61c64b 2098
dfeec247 2099 Variants::Multiple { ref variants, .. } => &variants[variant_index],
ff7c6d11
XL
2100 };
2101
ba9703b0 2102 assert_eq!(layout.variants, Variants::Single { index: variant_index });
cc61c64b 2103
ba9703b0 2104 TyAndLayout { ty: this.ty, layout }
cc61c64b
XL
2105 }
2106
ba9703b0 2107 fn field(this: TyAndLayout<'tcx>, cx: &C, i: usize) -> C::TyAndLayout {
fc512014
XL
2108 enum TyMaybeWithLayout<C: LayoutOf> {
2109 Ty(C::Ty),
2110 TyAndLayout(C::TyAndLayout),
2111 }
48663c56 2112
fc512014
XL
2113 fn ty_and_layout_kind<
2114 C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout: MaybeResult<TyAndLayout<'tcx>>>
2115 + HasTyCtxt<'tcx>
2116 + HasParamEnv<'tcx>,
2117 >(
2118 this: TyAndLayout<'tcx>,
2119 cx: &C,
2120 i: usize,
2121 ty: C::Ty,
2122 ) -> TyMaybeWithLayout<C> {
2123 let tcx = cx.tcx();
2124 let tag_layout = |tag: &Scalar| -> C::TyAndLayout {
2125 let layout = Layout::scalar(cx, tag.clone());
2126 MaybeResult::from(Ok(TyAndLayout {
2127 layout: tcx.intern_layout(layout),
2128 ty: tag.value.to_ty(tcx),
2129 }))
2130 };
ff7c6d11 2131
fc512014
XL
2132 match *ty.kind() {
2133 ty::Bool
2134 | ty::Char
2135 | ty::Int(_)
2136 | ty::Uint(_)
2137 | ty::Float(_)
2138 | ty::FnPtr(_)
2139 | ty::Never
2140 | ty::FnDef(..)
2141 | ty::GeneratorWitness(..)
2142 | ty::Foreign(..)
2143 | ty::Dynamic(..) => bug!("TyAndLayout::field_type({:?}): not applicable", this),
2144
2145 // Potentially-fat pointers.
2146 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2147 assert!(i < this.fields.count());
2148
2149 // Reuse the fat `*T` type as its own thin pointer data field.
2150 // This provides information about, e.g., DST struct pointees
2151 // (which may have no non-DST form), and will work as long
2152 // as the `Abi` or `FieldsShape` is checked by users.
2153 if i == 0 {
2154 let nil = tcx.mk_unit();
2155 let ptr_ty = if ty.is_unsafe_ptr() {
2156 tcx.mk_mut_ptr(nil)
2157 } else {
2158 tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
2159 };
2160 return TyMaybeWithLayout::TyAndLayout(MaybeResult::from(
2161 cx.layout_of(ptr_ty).to_result().map(|mut ptr_layout| {
2162 ptr_layout.ty = ty;
2163 ptr_layout
2164 }),
2165 ));
ff7c6d11 2166 }
cc61c64b 2167
fc512014
XL
2168 match tcx.struct_tail_erasing_lifetimes(pointee, cx.param_env()).kind() {
2169 ty::Slice(_) | ty::Str => TyMaybeWithLayout::Ty(tcx.types.usize),
2170 ty::Dynamic(_, _) => {
2171 TyMaybeWithLayout::Ty(tcx.mk_imm_ref(
2172 tcx.lifetimes.re_static,
2173 tcx.mk_array(tcx.types.usize, 3),
2174 ))
2175 /* FIXME: use actual fn pointers
2176 Warning: naively computing the number of entries in the
2177 vtable by counting the methods on the trait + methods on
2178 all parent traits does not work, because some methods can
2179 be not object safe and thus excluded from the vtable.
2180 Increase this counter if you tried to implement this but
2181 failed to do it without duplicating a lot of code from
2182 other places in the compiler: 2
2183 tcx.mk_tup(&[
2184 tcx.mk_array(tcx.types.usize, 3),
2185 tcx.mk_array(Option<fn()>),
2186 ])
2187 */
2188 }
2189 _ => bug!("TyAndLayout::field_type({:?}): not applicable", this),
48663c56
XL
2190 }
2191 }
ea8adc8c 2192
fc512014
XL
2193 // Arrays and slices.
2194 ty::Array(element, _) | ty::Slice(element) => TyMaybeWithLayout::Ty(element),
2195 ty::Str => TyMaybeWithLayout::Ty(tcx.types.u8),
cc61c64b 2196
fc512014
XL
2197 // Tuples, generators and closures.
2198 ty::Closure(_, ref substs) => {
2199 ty_and_layout_kind(this, cx, i, substs.as_closure().tupled_upvars_ty())
2200 }
cc61c64b 2201
fc512014
XL
2202 ty::Generator(def_id, ref substs, _) => match this.variants {
2203 Variants::Single { index } => TyMaybeWithLayout::Ty(
2204 substs
2205 .as_generator()
2206 .state_tys(def_id, tcx)
2207 .nth(index.as_usize())
2208 .unwrap()
2209 .nth(i)
2210 .unwrap(),
2211 ),
2212 Variants::Multiple { ref tag, tag_field, .. } => {
2213 if i == tag_field {
2214 return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2215 }
2216 TyMaybeWithLayout::Ty(substs.as_generator().prefix_tys().nth(i).unwrap())
2217 }
2218 },
2219
2220 ty::Tuple(tys) => TyMaybeWithLayout::Ty(tys[i].expect_ty()),
ff7c6d11 2221
fc512014
XL
2222 // ADTs.
2223 ty::Adt(def, substs) => {
2224 match this.variants {
2225 Variants::Single { index } => {
2226 TyMaybeWithLayout::Ty(def.variants[index].fields[i].ty(tcx, substs))
2227 }
2228
2229 // Discriminant field for enums (where applicable).
2230 Variants::Multiple { ref tag, .. } => {
2231 assert_eq!(i, 0);
2232 return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2233 }
ff7c6d11
XL
2234 }
2235 }
fc512014
XL
2236
2237 ty::Projection(_)
2238 | ty::Bound(..)
2239 | ty::Placeholder(..)
2240 | ty::Opaque(..)
2241 | ty::Param(_)
2242 | ty::Infer(_)
2243 | ty::Error(_) => bug!("TyAndLayout::field_type: unexpected type `{}`", this.ty),
cc61c64b 2244 }
fc512014 2245 }
cc61c64b 2246
fc512014
XL
2247 cx.layout_of(match ty_and_layout_kind(this, cx, i, this.ty) {
2248 TyMaybeWithLayout::Ty(result) => result,
2249 TyMaybeWithLayout::TyAndLayout(result) => return result,
ff7c6d11
XL
2250 })
2251 }
48663c56 2252
ba9703b0 2253 fn pointee_info_at(this: TyAndLayout<'tcx>, cx: &C, offset: Size) -> Option<PointeeInfo> {
3dfed10e
XL
2254 let addr_space_of_ty = |ty: Ty<'tcx>| {
2255 if ty.is_fn() { cx.data_layout().instruction_address_space } else { AddressSpace::DATA }
2256 };
2257
1b1a35ee 2258 let pointee_info = match *this.ty.kind() {
48663c56 2259 ty::RawPtr(mt) if offset.bytes() == 0 => {
dfeec247
XL
2260 cx.layout_of(mt.ty).to_result().ok().map(|layout| PointeeInfo {
2261 size: layout.size,
2262 align: layout.align.abi,
2263 safe: None,
3dfed10e
XL
2264 address_space: addr_space_of_ty(mt.ty),
2265 })
2266 }
2267 ty::FnPtr(fn_sig) if offset.bytes() == 0 => {
2268 cx.layout_of(cx.tcx().mk_fn_ptr(fn_sig)).to_result().ok().map(|layout| {
2269 PointeeInfo {
2270 size: layout.size,
2271 align: layout.align.abi,
2272 safe: None,
2273 address_space: cx.data_layout().instruction_address_space,
2274 }
dfeec247 2275 })
48663c56 2276 }
48663c56 2277 ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
3dfed10e 2278 let address_space = addr_space_of_ty(ty);
48663c56 2279 let tcx = cx.tcx();
f035d41b 2280 let is_freeze = ty.is_freeze(tcx.at(DUMMY_SP), cx.param_env());
48663c56 2281 let kind = match mt {
dfeec247
XL
2282 hir::Mutability::Not => {
2283 if is_freeze {
2284 PointerKind::Frozen
2285 } else {
2286 PointerKind::Shared
2287 }
2288 }
2289 hir::Mutability::Mut => {
48663c56
XL
2290 // Previously we would only emit noalias annotations for LLVM >= 6 or in
2291 // panic=abort mode. That was deemed right, as prior versions had many bugs
2292 // in conjunction with unwinding, but later versions didn’t seem to have
2293 // said issues. See issue #31681.
2294 //
2295 // Alas, later on we encountered a case where noalias would generate wrong
2296 // code altogether even with recent versions of LLVM in *safe* code with no
2297 // unwinding involved. See #54462.
2298 //
2299 // For now, do not enable mutable_noalias by default at all, while the
2300 // issue is being figured out.
ba9703b0 2301 if tcx.sess.opts.debugging_opts.mutable_noalias {
48663c56
XL
2302 PointerKind::UniqueBorrowed
2303 } else {
2304 PointerKind::Shared
2305 }
2306 }
2307 };
2308
dfeec247
XL
2309 cx.layout_of(ty).to_result().ok().map(|layout| PointeeInfo {
2310 size: layout.size,
2311 align: layout.align.abi,
2312 safe: Some(kind),
3dfed10e 2313 address_space,
dfeec247 2314 })
48663c56
XL
2315 }
2316
2317 _ => {
2318 let mut data_variant = match this.variants {
2319 // Within the discriminant field, only the niche itself is
2320 // always initialized, so we only check for a pointer at its
2321 // offset.
2322 //
2323 // If the niche is a pointer, it's either valid (according
2324 // to its type), or null (which the niche field's scalar
2325 // validity range encodes). This allows using
2326 // `dereferenceable_or_null` for e.g., `Option<&T>`, and
2327 // this will continue to work as long as we don't start
2328 // using more niches than just null (e.g., the first page of
2329 // the address space, or unaligned pointers).
2330 Variants::Multiple {
f035d41b
XL
2331 tag_encoding: TagEncoding::Niche { dataful_variant, .. },
2332 tag_field,
48663c56 2333 ..
f035d41b 2334 } if this.fields.offset(tag_field) == offset => {
dfeec247
XL
2335 Some(this.for_variant(cx, dataful_variant))
2336 }
48663c56
XL
2337 _ => Some(this),
2338 };
2339
2340 if let Some(variant) = data_variant {
2341 // We're not interested in any unions.
ba9703b0 2342 if let FieldsShape::Union(_) = variant.fields {
48663c56
XL
2343 data_variant = None;
2344 }
2345 }
2346
2347 let mut result = None;
2348
2349 if let Some(variant) = data_variant {
2350 let ptr_end = offset + Pointer.size(cx);
2351 for i in 0..variant.fields.count() {
2352 let field_start = variant.fields.offset(i);
2353 if field_start <= offset {
2354 let field = variant.field(cx, i);
dfeec247
XL
2355 result = field.to_result().ok().and_then(|field| {
2356 if ptr_end <= field_start + field.size {
2357 // We found the right field, look inside it.
3dfed10e
XL
2358 let field_info =
2359 field.pointee_info_at(cx, offset - field_start);
2360 field_info
dfeec247
XL
2361 } else {
2362 None
2363 }
2364 });
48663c56
XL
2365 if result.is_some() {
2366 break;
2367 }
2368 }
2369 }
2370 }
2371
2372 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
2373 if let Some(ref mut pointee) = result {
1b1a35ee 2374 if let ty::Adt(def, _) = this.ty.kind() {
48663c56
XL
2375 if def.is_box() && offset.bytes() == 0 {
2376 pointee.safe = Some(PointerKind::UniqueOwned);
2377 }
2378 }
2379 }
2380
2381 result
2382 }
3dfed10e
XL
2383 };
2384
2385 debug!(
2386 "pointee_info_at (offset={:?}, type kind: {:?}) => {:?}",
1b1a35ee
XL
2387 offset,
2388 this.ty.kind(),
2389 pointee_info
3dfed10e
XL
2390 );
2391
2392 pointee_info
48663c56 2393 }
83c7162d 2394}
ff7c6d11 2395
60c5eb7d 2396impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for LayoutError<'tcx> {
e74abb32 2397 fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
60c5eb7d 2398 use crate::ty::layout::LayoutError::*;
ff7c6d11
XL
2399 mem::discriminant(self).hash_stable(hcx, hasher);
2400
2401 match *self {
dfeec247 2402 Unknown(t) | SizeOverflow(t) => t.hash_stable(hcx, hasher),
ff7c6d11
XL
2403 }
2404 }
2405}
2406
60c5eb7d
XL
2407impl<'tcx> ty::Instance<'tcx> {
2408 // NOTE(eddyb) this is private to avoid using it from outside of
2409 // `FnAbi::of_instance` - any other uses are either too high-level
2410 // for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
2411 // or should go through `FnAbi` instead, to avoid losing any
2412 // adjustments `FnAbi::of_instance` might be performing.
2413 fn fn_sig_for_fn_abi(&self, tcx: TyCtxt<'tcx>) -> ty::PolyFnSig<'tcx> {
3dfed10e
XL
2414 // FIXME(davidtwco,eddyb): A `ParamEnv` should be passed through to this function.
2415 let ty = self.ty(tcx, ty::ParamEnv::reveal_all());
1b1a35ee 2416 match *ty.kind() {
3dfed10e
XL
2417 ty::FnDef(..) => {
2418 // HACK(davidtwco,eddyb): This is a workaround for polymorphization considering
2419 // parameters unused if they show up in the signature, but not in the `mir::Body`
2420 // (i.e. due to being inside a projection that got normalized, see
2421 // `src/test/ui/polymorphization/normalized_sig_types.rs`), and codegen not keeping
2422 // track of a polymorphization `ParamEnv` to allow normalizing later.
1b1a35ee 2423 let mut sig = match *ty.kind() {
3dfed10e
XL
2424 ty::FnDef(def_id, substs) => tcx
2425 .normalize_erasing_regions(tcx.param_env(def_id), tcx.fn_sig(def_id))
2426 .subst(tcx, substs),
2427 _ => unreachable!(),
2428 };
2429
60c5eb7d
XL
2430 if let ty::InstanceDef::VtableShim(..) = self.def {
2431 // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
2432 sig = sig.map_bound(|mut sig| {
2433 let mut inputs_and_output = sig.inputs_and_output.to_vec();
2434 inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
2435 sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
2436 sig
2437 });
2438 }
2439 sig
2440 }
2441 ty::Closure(def_id, substs) => {
ba9703b0 2442 let sig = substs.as_closure().sig();
60c5eb7d
XL
2443
2444 let env_ty = tcx.closure_env_ty(def_id, substs).unwrap();
3dfed10e
XL
2445 sig.map_bound(|sig| {
2446 tcx.mk_fn_sig(
2447 iter::once(env_ty.skip_binder()).chain(sig.inputs().iter().cloned()),
2448 sig.output(),
2449 sig.c_variadic,
2450 sig.unsafety,
2451 sig.abi,
2452 )
2453 })
60c5eb7d 2454 }
ba9703b0
XL
2455 ty::Generator(_, substs, _) => {
2456 let sig = substs.as_generator().poly_sig();
60c5eb7d 2457
fc512014
XL
2458 let br = ty::BoundRegion { kind: ty::BrEnv };
2459 let env_region = ty::ReLateBound(ty::INNERMOST, br);
60c5eb7d
XL
2460 let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
2461
3dfed10e 2462 let pin_did = tcx.require_lang_item(LangItem::Pin, None);
60c5eb7d
XL
2463 let pin_adt_ref = tcx.adt_def(pin_did);
2464 let pin_substs = tcx.intern_substs(&[env_ty.into()]);
2465 let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
2466
2467 sig.map_bound(|sig| {
3dfed10e 2468 let state_did = tcx.require_lang_item(LangItem::GeneratorState, None);
60c5eb7d 2469 let state_adt_ref = tcx.adt_def(state_did);
3dfed10e
XL
2470 let state_substs =
2471 tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]);
60c5eb7d
XL
2472 let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
2473
74b04a01
XL
2474 tcx.mk_fn_sig(
2475 [env_ty, sig.resume_ty].iter(),
2476 &ret_ty,
60c5eb7d
XL
2477 false,
2478 hir::Unsafety::Normal,
3dfed10e 2479 rustc_target::spec::abi::Abi::Rust,
60c5eb7d
XL
2480 )
2481 })
ea8adc8c 2482 }
3dfed10e 2483 _ => bug!("unexpected type {:?} in Instance::fn_sig", ty),
ea8adc8c
XL
2484 }
2485 }
2486}
2487
60c5eb7d 2488pub trait FnAbiExt<'tcx, C>
48663c56 2489where
ba9703b0 2490 C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout = TyAndLayout<'tcx>>
48663c56
XL
2491 + HasDataLayout
2492 + HasTargetSpec
2493 + HasTyCtxt<'tcx>
2494 + HasParamEnv<'tcx>,
2495{
60c5eb7d
XL
2496 /// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
2497 ///
2498 /// NB: this doesn't handle virtual calls - those should use `FnAbi::of_instance`
2499 /// instead, where the instance is a `InstanceDef::Virtual`.
2500 fn of_fn_ptr(cx: &C, sig: ty::PolyFnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2501
2502 /// Compute a `FnAbi` suitable for declaring/defining an `fn` instance, and for
2503 /// direct calls to an `fn`.
2504 ///
2505 /// NB: that includes virtual calls, which are represented by "direct calls"
2506 /// to a `InstanceDef::Virtual` instance (of `<dyn Trait as Trait>::fn`).
2507 fn of_instance(cx: &C, instance: ty::Instance<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2508
48663c56
XL
2509 fn new_internal(
2510 cx: &C,
60c5eb7d 2511 sig: ty::PolyFnSig<'tcx>,
48663c56 2512 extra_args: &[Ty<'tcx>],
60c5eb7d 2513 caller_location: Option<Ty<'tcx>>,
ba9703b0 2514 codegen_fn_attr_flags: CodegenFnAttrFlags,
60c5eb7d 2515 mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgAbi<'tcx, Ty<'tcx>>,
48663c56
XL
2516 ) -> Self;
2517 fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi);
2518}
2519
ba9703b0
XL
2520fn fn_can_unwind(
2521 panic_strategy: PanicStrategy,
2522 codegen_fn_attr_flags: CodegenFnAttrFlags,
2523 call_conv: Conv,
2524) -> bool {
2525 if panic_strategy != PanicStrategy::Unwind {
2526 // In panic=abort mode we assume nothing can unwind anywhere, so
2527 // optimize based on this!
2528 false
2529 } else if codegen_fn_attr_flags.contains(CodegenFnAttrFlags::UNWIND) {
2530 // If a specific #[unwind] attribute is present, use that.
2531 true
2532 } else if codegen_fn_attr_flags.contains(CodegenFnAttrFlags::RUSTC_ALLOCATOR_NOUNWIND) {
2533 // Special attribute for allocator functions, which can't unwind.
2534 false
2535 } else {
2536 if call_conv == Conv::Rust {
2537 // Any Rust method (or `extern "Rust" fn` or `extern
2538 // "rust-call" fn`) is explicitly allowed to unwind
2539 // (unless it has no-unwind attribute, handled above).
2540 true
2541 } else {
2542 // Anything else is either:
2543 //
2544 // 1. A foreign item using a non-Rust ABI (like `extern "C" { fn foo(); }`), or
2545 //
2546 // 2. A Rust item using a non-Rust ABI (like `extern "C" fn foo() { ... }`).
2547 //
2548 // Foreign items (case 1) are assumed to not unwind; it is
2549 // UB otherwise. (At least for now; see also
2550 // rust-lang/rust#63909 and Rust RFC 2753.)
2551 //
2552 // Items defined in Rust with non-Rust ABIs (case 2) are also
2553 // not supposed to unwind. Whether this should be enforced
2554 // (versus stating it is UB) and *how* it would be enforced
2555 // is currently under discussion; see rust-lang/rust#58794.
2556 //
2557 // In either case, we mark item as explicitly nounwind.
2558 false
2559 }
2560 }
2561}
2562
60c5eb7d 2563impl<'tcx, C> FnAbiExt<'tcx, C> for call::FnAbi<'tcx, Ty<'tcx>>
48663c56 2564where
ba9703b0 2565 C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout = TyAndLayout<'tcx>>
48663c56
XL
2566 + HasDataLayout
2567 + HasTargetSpec
2568 + HasTyCtxt<'tcx>
2569 + HasParamEnv<'tcx>,
2570{
60c5eb7d 2571 fn of_fn_ptr(cx: &C, sig: ty::PolyFnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
ba9703b0
XL
2572 // Assume that fn pointers may always unwind
2573 let codegen_fn_attr_flags = CodegenFnAttrFlags::UNWIND;
2574
2575 call::FnAbi::new_internal(cx, sig, extra_args, None, codegen_fn_attr_flags, |ty, _| {
2576 ArgAbi::new(cx.layout_of(ty))
2577 })
48663c56
XL
2578 }
2579
60c5eb7d
XL
2580 fn of_instance(cx: &C, instance: ty::Instance<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2581 let sig = instance.fn_sig_for_fn_abi(cx.tcx());
48663c56 2582
60c5eb7d
XL
2583 let caller_location = if instance.def.requires_caller_location(cx.tcx()) {
2584 Some(cx.tcx().caller_location_ty())
2585 } else {
2586 None
2587 };
2588
ba9703b0
XL
2589 let attrs = cx.tcx().codegen_fn_attrs(instance.def_id()).flags;
2590
2591 call::FnAbi::new_internal(cx, sig, extra_args, caller_location, attrs, |ty, arg_idx| {
48663c56
XL
2592 let mut layout = cx.layout_of(ty);
2593 // Don't pass the vtable, it's not an argument of the virtual fn.
2594 // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
2595 // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
60c5eb7d 2596 if let (ty::InstanceDef::Virtual(..), Some(0)) = (&instance.def, arg_idx) {
48663c56
XL
2597 let fat_pointer_ty = if layout.is_unsized() {
2598 // unsized `self` is passed as a pointer to `self`
2599 // FIXME (mikeyhew) change this to use &own if it is ever added to the language
2600 cx.tcx().mk_mut_ptr(layout.ty)
2601 } else {
2602 match layout.abi {
2603 Abi::ScalarPair(..) => (),
2604 _ => bug!("receiver type has unsupported layout: {:?}", layout),
2605 }
2606
2607 // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
2608 // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
2609 // elsewhere in the compiler as a method on a `dyn Trait`.
2610 // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
2611 // get a built-in pointer type
2612 let mut fat_pointer_layout = layout;
2613 'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
2614 && !fat_pointer_layout.ty.is_region_ptr()
2615 {
60c5eb7d 2616 for i in 0..fat_pointer_layout.fields.count() {
48663c56
XL
2617 let field_layout = fat_pointer_layout.field(cx, i);
2618
2619 if !field_layout.is_zst() {
2620 fat_pointer_layout = field_layout;
2621 continue 'descend_newtypes;
2622 }
2623 }
2624
dfeec247 2625 bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
48663c56
XL
2626 }
2627
2628 fat_pointer_layout.ty
2629 };
2630
2631 // we now have a type like `*mut RcBox<dyn Trait>`
2632 // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
2633 // this is understood as a special case elsewhere in the compiler
2634 let unit_pointer_ty = cx.tcx().mk_mut_ptr(cx.tcx().mk_unit());
2635 layout = cx.layout_of(unit_pointer_ty);
2636 layout.ty = fat_pointer_ty;
2637 }
60c5eb7d 2638 ArgAbi::new(layout)
48663c56
XL
2639 })
2640 }
2641
2642 fn new_internal(
2643 cx: &C,
60c5eb7d 2644 sig: ty::PolyFnSig<'tcx>,
48663c56 2645 extra_args: &[Ty<'tcx>],
60c5eb7d 2646 caller_location: Option<Ty<'tcx>>,
ba9703b0 2647 codegen_fn_attr_flags: CodegenFnAttrFlags,
60c5eb7d 2648 mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgAbi<'tcx, Ty<'tcx>>,
48663c56 2649 ) -> Self {
60c5eb7d
XL
2650 debug!("FnAbi::new_internal({:?}, {:?})", sig, extra_args);
2651
fc512014 2652 let sig = cx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), sig);
48663c56
XL
2653
2654 use rustc_target::spec::abi::Abi::*;
29967ef6 2655 let conv = match cx.tcx().sess.target.adjust_abi(sig.abi) {
60c5eb7d 2656 RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
48663c56
XL
2657
2658 // It's the ABI's job to select this, not ours.
2659 System => bug!("system abi should be selected elsewhere"),
e74abb32 2660 EfiApi => bug!("eficall abi should be selected elsewhere"),
48663c56
XL
2661
2662 Stdcall => Conv::X86Stdcall,
2663 Fastcall => Conv::X86Fastcall,
2664 Vectorcall => Conv::X86VectorCall,
2665 Thiscall => Conv::X86ThisCall,
2666 C => Conv::C,
2667 Unadjusted => Conv::C,
2668 Win64 => Conv::X86_64Win64,
2669 SysV64 => Conv::X86_64SysV,
2670 Aapcs => Conv::ArmAapcs,
2671 PtxKernel => Conv::PtxKernel,
2672 Msp430Interrupt => Conv::Msp430Intr,
2673 X86Interrupt => Conv::X86Intr,
2674 AmdGpuKernel => Conv::AmdGpuKernel,
f035d41b
XL
2675 AvrInterrupt => Conv::AvrInterrupt,
2676 AvrNonBlockingInterrupt => Conv::AvrNonBlockingInterrupt,
48663c56
XL
2677
2678 // These API constants ought to be more specific...
2679 Cdecl => Conv::C,
2680 };
2681
2682 let mut inputs = sig.inputs();
2683 let extra_args = if sig.abi == RustCall {
2684 assert!(!sig.c_variadic && extra_args.is_empty());
2685
dfeec247 2686 if let Some(input) = sig.inputs().last() {
1b1a35ee 2687 if let ty::Tuple(tupled_arguments) = input.kind() {
48663c56
XL
2688 inputs = &sig.inputs()[0..sig.inputs().len() - 1];
2689 tupled_arguments.iter().map(|k| k.expect_ty()).collect()
dfeec247 2690 } else {
48663c56
XL
2691 bug!(
2692 "argument to function with \"rust-call\" ABI \
dfeec247 2693 is not a tuple"
48663c56
XL
2694 );
2695 }
dfeec247
XL
2696 } else {
2697 bug!(
2698 "argument to function with \"rust-call\" ABI \
2699 is not a tuple"
2700 );
48663c56
XL
2701 }
2702 } else {
2703 assert!(sig.c_variadic || extra_args.is_empty());
2704 extra_args.to_vec()
2705 };
2706
29967ef6
XL
2707 let target = &cx.tcx().sess.target;
2708 let target_env_gnu_like = matches!(&target.env[..], "gnu" | "musl");
2709 let win_x64_gnu = target.os == "windows" && target.arch == "x86_64" && target.env == "gnu";
74b04a01 2710 let linux_s390x_gnu_like =
29967ef6 2711 target.os == "linux" && target.arch == "s390x" && target_env_gnu_like;
74b04a01 2712 let linux_sparc64_gnu_like =
29967ef6 2713 target.os == "linux" && target.arch == "sparc64" && target_env_gnu_like;
74b04a01 2714 let linux_powerpc_gnu_like =
29967ef6
XL
2715 target.os == "linux" && target.arch == "powerpc" && target_env_gnu_like;
2716 let rust_abi = matches!(sig.abi, RustIntrinsic | PlatformIntrinsic | Rust | RustCall);
48663c56
XL
2717
2718 // Handle safe Rust thin and fat pointers.
2719 let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
2720 scalar: &Scalar,
ba9703b0 2721 layout: TyAndLayout<'tcx>,
48663c56
XL
2722 offset: Size,
2723 is_return: bool| {
2724 // Booleans are always an i1 that needs to be zero-extended.
2725 if scalar.is_bool() {
fc512014 2726 attrs.ext(ArgExtension::Zext);
48663c56
XL
2727 return;
2728 }
2729
2730 // Only pointer types handled below.
2731 if scalar.value != Pointer {
2732 return;
2733 }
2734
2735 if scalar.valid_range.start() < scalar.valid_range.end() {
2736 if *scalar.valid_range.start() > 0 {
2737 attrs.set(ArgAttribute::NonNull);
2738 }
2739 }
2740
2741 if let Some(pointee) = layout.pointee_info_at(cx, offset) {
2742 if let Some(kind) = pointee.safe {
48663c56
XL
2743 attrs.pointee_align = Some(pointee.align);
2744
74b04a01 2745 // `Box` (`UniqueBorrowed`) are not necessarily dereferenceable
60c5eb7d 2746 // for the entire duration of the function as they can be deallocated
f9f354fc 2747 // at any time. Set their valid size to 0.
60c5eb7d
XL
2748 attrs.pointee_size = match kind {
2749 PointerKind::UniqueOwned => Size::ZERO,
dfeec247 2750 _ => pointee.size,
60c5eb7d
XL
2751 };
2752
48663c56
XL
2753 // `Box` pointer parameters never alias because ownership is transferred
2754 // `&mut` pointer parameters never alias other parameters,
2755 // or mutable global data
2756 //
2757 // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
2758 // and can be marked as both `readonly` and `noalias`, as
2759 // LLVM's definition of `noalias` is based solely on memory
2760 // dependencies rather than pointer equality
2761 let no_alias = match kind {
2762 PointerKind::Shared => false,
2763 PointerKind::UniqueOwned => true,
2764 PointerKind::Frozen | PointerKind::UniqueBorrowed => !is_return,
2765 };
2766 if no_alias {
2767 attrs.set(ArgAttribute::NoAlias);
2768 }
2769
2770 if kind == PointerKind::Frozen && !is_return {
2771 attrs.set(ArgAttribute::ReadOnly);
2772 }
2773 }
2774 }
2775 };
2776
48663c56
XL
2777 let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| {
2778 let is_return = arg_idx.is_none();
2779 let mut arg = mk_arg_type(ty, arg_idx);
2780 if arg.layout.is_zst() {
2781 // For some forsaken reason, x86_64-pc-windows-gnu
2782 // doesn't ignore zero-sized struct arguments.
74b04a01
XL
2783 // The same is true for {s390x,sparc64,powerpc}-unknown-linux-{gnu,musl}.
2784 if is_return
2785 || rust_abi
2786 || (!win_x64_gnu
2787 && !linux_s390x_gnu_like
2788 && !linux_sparc64_gnu_like
2789 && !linux_powerpc_gnu_like)
2790 {
e74abb32 2791 arg.mode = PassMode::Ignore;
48663c56
XL
2792 }
2793 }
2794
2795 // FIXME(eddyb) other ABIs don't have logic for scalar pairs.
2796 if !is_return && rust_abi {
2797 if let Abi::ScalarPair(ref a, ref b) = arg.layout.abi {
2798 let mut a_attrs = ArgAttributes::new();
2799 let mut b_attrs = ArgAttributes::new();
2800 adjust_for_rust_scalar(&mut a_attrs, a, arg.layout, Size::ZERO, false);
2801 adjust_for_rust_scalar(
2802 &mut b_attrs,
2803 b,
2804 arg.layout,
2805 a.value.size(cx).align_to(b.value.align(cx).abi),
2806 false,
2807 );
2808 arg.mode = PassMode::Pair(a_attrs, b_attrs);
2809 return arg;
2810 }
2811 }
2812
2813 if let Abi::Scalar(ref scalar) = arg.layout.abi {
2814 if let PassMode::Direct(ref mut attrs) = arg.mode {
2815 adjust_for_rust_scalar(attrs, scalar, arg.layout, Size::ZERO, is_return);
2816 }
2817 }
2818
2819 arg
2820 };
2821
60c5eb7d 2822 let mut fn_abi = FnAbi {
48663c56
XL
2823 ret: arg_of(sig.output(), None),
2824 args: inputs
2825 .iter()
2826 .cloned()
2827 .chain(extra_args)
60c5eb7d 2828 .chain(caller_location)
48663c56
XL
2829 .enumerate()
2830 .map(|(i, ty)| arg_of(ty, Some(i)))
2831 .collect(),
2832 c_variadic: sig.c_variadic,
74b04a01 2833 fixed_count: inputs.len(),
48663c56 2834 conv,
ba9703b0 2835 can_unwind: fn_can_unwind(cx.tcx().sess.panic_strategy(), codegen_fn_attr_flags, conv),
48663c56 2836 };
60c5eb7d 2837 fn_abi.adjust_for_abi(cx, sig.abi);
1b1a35ee 2838 debug!("FnAbi::new_internal = {:?}", fn_abi);
60c5eb7d 2839 fn_abi
48663c56
XL
2840 }
2841
2842 fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi) {
2843 if abi == SpecAbi::Unadjusted {
2844 return;
2845 }
2846
2847 if abi == SpecAbi::Rust
2848 || abi == SpecAbi::RustCall
2849 || abi == SpecAbi::RustIntrinsic
2850 || abi == SpecAbi::PlatformIntrinsic
2851 {
fc512014 2852 let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>| {
48663c56
XL
2853 if arg.is_ignore() {
2854 return;
2855 }
2856
2857 match arg.layout.abi {
2858 Abi::Aggregate { .. } => {}
2859
2860 // This is a fun case! The gist of what this is doing is
2861 // that we want callers and callees to always agree on the
2862 // ABI of how they pass SIMD arguments. If we were to *not*
2863 // make these arguments indirect then they'd be immediates
2864 // in LLVM, which means that they'd used whatever the
2865 // appropriate ABI is for the callee and the caller. That
2866 // means, for example, if the caller doesn't have AVX
2867 // enabled but the callee does, then passing an AVX argument
2868 // across this boundary would cause corrupt data to show up.
2869 //
2870 // This problem is fixed by unconditionally passing SIMD
2871 // arguments through memory between callers and callees
2872 // which should get them all to agree on ABI regardless of
2873 // target feature sets. Some more information about this
2874 // issue can be found in #44367.
2875 //
2876 // Note that the platform intrinsic ABI is exempt here as
2877 // that's how we connect up to LLVM and it's unstable
2878 // anyway, we control all calls to it in libstd.
2879 Abi::Vector { .. }
2880 if abi != SpecAbi::PlatformIntrinsic
29967ef6 2881 && cx.tcx().sess.target.simd_types_indirect =>
48663c56
XL
2882 {
2883 arg.make_indirect();
2884 return;
2885 }
2886
2887 _ => return,
2888 }
2889
fc512014
XL
2890 // Pass and return structures up to 2 pointers in size by value, matching `ScalarPair`.
2891 // LLVM will usually pass these in 2 registers, which is more efficient than by-ref.
2892 let max_by_val_size = Pointer.size(cx) * 2;
48663c56 2893 let size = arg.layout.size;
1b1a35ee
XL
2894
2895 if arg.layout.is_unsized() || size > max_by_val_size {
48663c56
XL
2896 arg.make_indirect();
2897 } else {
2898 // We want to pass small aggregates as immediates, but using
2899 // a LLVM aggregate type for this leads to bad optimizations,
2900 // so we pick an appropriately sized integer type instead.
dfeec247 2901 arg.cast_to(Reg { kind: RegKind::Integer, size });
48663c56
XL
2902 }
2903 };
fc512014 2904 fixup(&mut self.ret);
48663c56 2905 for arg in &mut self.args {
fc512014 2906 fixup(arg);
48663c56
XL
2907 }
2908 return;
2909 }
2910
2911 if let Err(msg) = self.adjust_for_cabi(cx, abi) {
2912 cx.tcx().sess.fatal(&msg);
2913 }
2914 }
2915}