]> git.proxmox.com Git - rustc.git/blame - src/librustc_middle/ty/layout.rs
New upstream version 1.46.0~beta.2+dfsg1
[rustc.git] / src / librustc_middle / ty / layout.rs
CommitLineData
ba9703b0
XL
1use crate::ich::StableHashingContext;
2use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
3use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
4use crate::ty::subst::Subst;
dfeec247 5use crate::ty::{self, subst::SubstsRef, ReprOptions, Ty, TyCtxt, TypeFoldable};
54a0048b 6
f9f354fc 7use rustc_ast::ast::{self, IntTy, UintTy};
74b04a01 8use rustc_attr as attr;
e74abb32 9use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
dfeec247 10use rustc_hir as hir;
f9f354fc 11use rustc_hir::lang_items::{GeneratorStateLangItem, PinTypeLangItem};
dfeec247
XL
12use rustc_index::bit_set::BitSet;
13use rustc_index::vec::{Idx, IndexVec};
ba9703b0 14use rustc_session::{DataTypeKind, FieldInfo, SizeKind, VariantInfo};
f9f354fc 15use rustc_span::symbol::{Ident, Symbol};
ba9703b0 16use rustc_span::DUMMY_SP;
48663c56 17use rustc_target::abi::call::{
dfeec247 18 ArgAbi, ArgAttribute, ArgAttributes, Conv, FnAbi, PassMode, Reg, RegKind,
48663c56 19};
ba9703b0
XL
20use rustc_target::abi::*;
21use rustc_target::spec::{abi::Abi as SpecAbi, HasTargetSpec, PanicStrategy};
22
23use std::cmp;
24use std::fmt;
25use std::iter;
26use std::mem;
27use std::num::NonZeroUsize;
28use std::ops::Bound;
48663c56 29
83c7162d 30pub trait IntegerExt {
dc9dc135 31 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
a1dfa0c6 32 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
dc9dc135
XL
33 fn repr_discr<'tcx>(
34 tcx: TyCtxt<'tcx>,
35 ty: Ty<'tcx>,
36 repr: &ReprOptions,
37 min: i128,
38 max: i128,
39 ) -> (Integer, bool);
54a0048b
SL
40}
41
83c7162d 42impl IntegerExt for Integer {
dc9dc135 43 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx> {
9e0c209e 44 match (*self, signed) {
9e0c209e
SL
45 (I8, false) => tcx.types.u8,
46 (I16, false) => tcx.types.u16,
47 (I32, false) => tcx.types.u32,
48 (I64, false) => tcx.types.u64,
32a655c1 49 (I128, false) => tcx.types.u128,
9e0c209e
SL
50 (I8, true) => tcx.types.i8,
51 (I16, true) => tcx.types.i16,
52 (I32, true) => tcx.types.i32,
53 (I64, true) => tcx.types.i64,
32a655c1 54 (I128, true) => tcx.types.i128,
9e0c209e
SL
55 }
56 }
57
9fa01778 58 /// Gets the Integer type from an attr::IntType.
a1dfa0c6 59 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
cc61c64b
XL
60 let dl = cx.data_layout();
61
54a0048b
SL
62 match ity {
63 attr::SignedInt(IntTy::I8) | attr::UnsignedInt(UintTy::U8) => I8,
64 attr::SignedInt(IntTy::I16) | attr::UnsignedInt(UintTy::U16) => I16,
65 attr::SignedInt(IntTy::I32) | attr::UnsignedInt(UintTy::U32) => I32,
66 attr::SignedInt(IntTy::I64) | attr::UnsignedInt(UintTy::U64) => I64,
32a655c1 67 attr::SignedInt(IntTy::I128) | attr::UnsignedInt(UintTy::U128) => I128,
2c00a5a8 68 attr::SignedInt(IntTy::Isize) | attr::UnsignedInt(UintTy::Usize) => {
54a0048b
SL
69 dl.ptr_sized_integer()
70 }
71 }
72 }
73
9fa01778 74 /// Finds the appropriate Integer type and signedness for the given
f9f354fc
XL
75 /// signed discriminant range and `#[repr]` attribute.
76 /// N.B.: `u128` values above `i128::MAX` will be treated as signed, but
54a0048b 77 /// that shouldn't affect anything, other than maybe debuginfo.
dc9dc135
XL
78 fn repr_discr<'tcx>(
79 tcx: TyCtxt<'tcx>,
80 ty: Ty<'tcx>,
81 repr: &ReprOptions,
82 min: i128,
83 max: i128,
84 ) -> (Integer, bool) {
54a0048b
SL
85 // Theoretically, negative values could be larger in unsigned representation
86 // than the unsigned representation of the signed minimum. However, if there
ff7c6d11
XL
87 // are any negative values, the only valid unsigned representation is u128
88 // which can fit all i128 values, so the result remains unaffected.
89 let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
54a0048b
SL
90 let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
91
476ff2be
SL
92 let mut min_from_extern = None;
93 let min_default = I8;
94
8bb4bdeb 95 if let Some(ity) = repr.int {
a1dfa0c6 96 let discr = Integer::from_attr(&tcx, ity);
8bb4bdeb
XL
97 let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
98 if discr < fit {
dfeec247
XL
99 bug!(
100 "Integer::repr_discr: `#[repr]` hint too small for \
101 discriminant range of enum `{}",
102 ty
103 )
8bb4bdeb
XL
104 }
105 return (discr, ity.is_signed());
106 }
107
cc61c64b 108 if repr.c() {
8bb4bdeb
XL
109 match &tcx.sess.target.target.arch[..] {
110 // WARNING: the ARM EABI has two variants; the one corresponding
111 // to `at_least == I32` appears to be used on Linux and NetBSD,
112 // but some systems may use the variant corresponding to no
0bf4aa26 113 // lower bound. However, we don't run on those yet...?
8bb4bdeb
XL
114 "arm" => min_from_extern = Some(I32),
115 _ => min_from_extern = Some(I32),
54a0048b 116 }
476ff2be
SL
117 }
118
119 let at_least = min_from_extern.unwrap_or(min_default);
54a0048b
SL
120
121 // If there are no negative values, we can use the unsigned fit.
122 if min >= 0 {
123 (cmp::max(unsigned_fit, at_least), false)
124 } else {
125 (cmp::max(signed_fit, at_least), true)
126 }
127 }
128}
129
83c7162d 130pub trait PrimitiveExt {
dc9dc135 131 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
e1599b0c 132 fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
54a0048b
SL
133}
134
83c7162d 135impl PrimitiveExt for Primitive {
dc9dc135 136 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
ff7c6d11
XL
137 match *self {
138 Int(i, signed) => i.to_ty(tcx, signed),
60c5eb7d
XL
139 F32 => tcx.types.f32,
140 F64 => tcx.types.f64,
b7449926 141 Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
ff7c6d11
XL
142 }
143 }
e1599b0c
XL
144
145 /// Return an *integer* type matching this primitive.
146 /// Useful in particular when dealing with enum discriminants.
147 fn to_int_ty(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
148 match *self {
149 Int(i, signed) => i.to_ty(tcx, signed),
150 Pointer => tcx.types.usize,
60c5eb7d 151 F32 | F64 => bug!("floats do not have an int type"),
e1599b0c
XL
152 }
153 }
54a0048b
SL
154}
155
ff7c6d11
XL
156/// The first half of a fat pointer.
157///
158/// - For a trait object, this is the address of the box.
159/// - For a slice, this is the base address.
160pub const FAT_PTR_ADDR: usize = 0;
476ff2be 161
ff7c6d11
XL
162/// The second half of a fat pointer.
163///
164/// - For a trait object, this is the address of the vtable.
165/// - For a slice, this is the length.
166pub const FAT_PTR_EXTRA: usize = 1;
476ff2be 167
83c7162d 168#[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable)]
54a0048b
SL
169pub enum LayoutError<'tcx> {
170 Unknown(Ty<'tcx>),
dfeec247 171 SizeOverflow(Ty<'tcx>),
54a0048b
SL
172}
173
174impl<'tcx> fmt::Display for LayoutError<'tcx> {
0bf4aa26 175 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
54a0048b 176 match *self {
dfeec247 177 LayoutError::Unknown(ty) => write!(f, "the type `{:?}` has an unknown layout", ty),
54a0048b
SL
178 LayoutError::SizeOverflow(ty) => {
179 write!(f, "the type `{:?}` is too big for the current architecture", ty)
180 }
181 }
182 }
183}
184
dc9dc135
XL
185fn layout_raw<'tcx>(
186 tcx: TyCtxt<'tcx>,
187 query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
ba9703b0 188) -> Result<&'tcx Layout, LayoutError<'tcx>> {
83c7162d 189 ty::tls::with_related_context(tcx, move |icx| {
83c7162d 190 let (param_env, ty) = query.into_parts();
5bcae85e 191
f9f354fc 192 if !tcx.sess.recursion_limit().value_within_limit(icx.layout_depth) {
dfeec247 193 tcx.sess.fatal(&format!("overflow representing the type `{}`", ty));
83c7162d 194 }
54a0048b 195
83c7162d 196 // Update the ImplicitCtxt to increase the layout_depth
dfeec247 197 let icx = ty::tls::ImplicitCtxt { layout_depth: icx.layout_depth + 1, ..icx.clone() };
ff7c6d11 198
83c7162d
XL
199 ty::tls::enter_context(&icx, |_| {
200 let cx = LayoutCx { tcx, param_env };
0731742a
XL
201 let layout = cx.layout_raw_uncached(ty);
202 // Type-level uninhabitedness should always imply ABI uninhabitedness.
203 if let Ok(layout) = layout {
204 if ty.conservative_is_privately_uninhabited(tcx) {
205 assert!(layout.abi.is_uninhabited());
206 }
207 }
208 layout
83c7162d
XL
209 })
210 })
ff7c6d11
XL
211}
212
f035d41b 213pub fn provide(providers: &mut ty::query::Providers) {
dfeec247 214 *providers = ty::query::Providers { layout_raw, ..*providers };
ff7c6d11
XL
215}
216
2c00a5a8
XL
217pub struct LayoutCx<'tcx, C> {
218 pub tcx: C,
0731742a 219 pub param_env: ty::ParamEnv<'tcx>,
2c00a5a8
XL
220}
221
dc9dc135
XL
222#[derive(Copy, Clone, Debug)]
223enum StructKind {
224 /// A tuple, closure, or univariant which cannot be coerced to unsized.
225 AlwaysSized,
226 /// A univariant, the last field of which may be coerced to unsized.
227 MaybeUnsized,
228 /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
229 Prefixed(Size, Align),
230}
231
232// Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
233// This is used to go between `memory_index` (source field order to memory order)
234// and `inverse_memory_index` (memory order to source field order).
ba9703b0 235// See also `FieldsShape::Arbitrary::memory_index` for more details.
dc9dc135
XL
236// FIXME(eddyb) build a better abstraction for permutations, if possible.
237fn invert_mapping(map: &[u32]) -> Vec<u32> {
238 let mut inverse = vec![0; map.len()];
239 for i in 0..map.len() {
240 inverse[map[i] as usize] = i as u32;
241 }
242 inverse
243}
ff7c6d11 244
dc9dc135 245impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
ba9703b0 246 fn scalar_pair(&self, a: Scalar, b: Scalar) -> Layout {
dc9dc135
XL
247 let dl = self.data_layout();
248 let b_align = b.value.align(dl);
249 let align = a.value.align(dl).max(b_align).max(dl.aggregate_align);
250 let b_offset = a.value.size(dl).align_to(b_align.abi);
251 let size = (b_offset + b.value.size(dl)).align_to(align.abi);
416331ca
XL
252
253 // HACK(nox): We iter on `b` and then `a` because `max_by_key`
254 // returns the last maximum.
255 let largest_niche = Niche::from_scalar(dl, b_offset, b.clone())
256 .into_iter()
257 .chain(Niche::from_scalar(dl, Size::ZERO, a.clone()))
258 .max_by_key(|niche| niche.available(dl));
259
ba9703b0 260 Layout {
dc9dc135 261 variants: Variants::Single { index: VariantIdx::new(0) },
ba9703b0 262 fields: FieldsShape::Arbitrary {
dc9dc135 263 offsets: vec![Size::ZERO, b_offset],
dfeec247 264 memory_index: vec![0, 1],
dc9dc135
XL
265 },
266 abi: Abi::ScalarPair(a, b),
416331ca 267 largest_niche,
dc9dc135 268 align,
dfeec247 269 size,
ff7c6d11 270 }
dc9dc135 271 }
0bf4aa26 272
dfeec247
XL
273 fn univariant_uninterned(
274 &self,
275 ty: Ty<'tcx>,
ba9703b0 276 fields: &[TyAndLayout<'_>],
dfeec247
XL
277 repr: &ReprOptions,
278 kind: StructKind,
ba9703b0 279 ) -> Result<Layout, LayoutError<'tcx>> {
dc9dc135 280 let dl = self.data_layout();
e1599b0c
XL
281 let pack = repr.pack;
282 if pack.is_some() && repr.align.is_some() {
dc9dc135
XL
283 bug!("struct cannot be packed and aligned");
284 }
ff7c6d11 285
dfeec247 286 let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
ff7c6d11 287
dc9dc135 288 let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
ff7c6d11 289
ba9703b0 290 let optimize = !repr.inhibit_struct_field_reordering_opt();
dc9dc135 291 if optimize {
dfeec247
XL
292 let end =
293 if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
dc9dc135 294 let optimizing = &mut inverse_memory_index[..end];
ba9703b0 295 let field_align = |f: &TyAndLayout<'_>| {
e1599b0c 296 if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
dc9dc135
XL
297 };
298 match kind {
dfeec247 299 StructKind::AlwaysSized | StructKind::MaybeUnsized => {
dc9dc135
XL
300 optimizing.sort_by_key(|&x| {
301 // Place ZSTs first to avoid "interesting offsets",
302 // especially with only one or two non-ZST fields.
303 let f = &fields[x as usize];
304 (!f.is_zst(), cmp::Reverse(field_align(f)))
305 });
306 }
307 StructKind::Prefixed(..) => {
ba9703b0
XL
308 // Sort in ascending alignment so that the layout stay optimal
309 // regardless of the prefix
dc9dc135 310 optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
ea8adc8c 311 }
ff7c6d11 312 }
dc9dc135 313 }
ea8adc8c 314
dc9dc135
XL
315 // inverse_memory_index holds field indices by increasing memory offset.
316 // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
317 // We now write field offsets to the corresponding offset slot;
318 // field 5 with offset 0 puts 0 in offsets[5].
319 // At the bottom of this function, we invert `inverse_memory_index` to
320 // produce `memory_index` (see `invert_mapping`).
ff7c6d11 321
ba9703b0
XL
322 let mut sized = true;
323 let mut offsets = vec![Size::ZERO; fields.len()];
dc9dc135 324 let mut offset = Size::ZERO;
416331ca
XL
325 let mut largest_niche = None;
326 let mut largest_niche_available = 0;
ff7c6d11 327
dc9dc135 328 if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
dfeec247
XL
329 let prefix_align =
330 if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
dc9dc135
XL
331 align = align.max(AbiAndPrefAlign::new(prefix_align));
332 offset = prefix_size.align_to(prefix_align);
333 }
ff7c6d11 334
dc9dc135
XL
335 for &i in &inverse_memory_index {
336 let field = fields[i as usize];
337 if !sized {
dfeec247 338 bug!("univariant: field #{} of `{}` comes after unsized field", offsets.len(), ty);
dc9dc135 339 }
ff7c6d11 340
dc9dc135
XL
341 if field.is_unsized() {
342 sized = false;
343 }
ff7c6d11 344
dc9dc135 345 // Invariant: offset < dl.obj_size_bound() <= 1<<61
e1599b0c 346 let field_align = if let Some(pack) = pack {
dc9dc135
XL
347 field.align.min(AbiAndPrefAlign::new(pack))
348 } else {
349 field.align
350 };
351 offset = offset.align_to(field_align.abi);
352 align = align.max(field_align);
ff7c6d11 353
dc9dc135
XL
354 debug!("univariant offset: {:?} field: {:#?}", offset, field);
355 offsets[i as usize] = offset;
ff7c6d11 356
74b04a01
XL
357 if !repr.hide_niche() {
358 if let Some(mut niche) = field.largest_niche.clone() {
359 let available = niche.available(dl);
360 if available > largest_niche_available {
361 largest_niche_available = available;
362 niche.offset += offset;
363 largest_niche = Some(niche);
364 }
416331ca
XL
365 }
366 }
367
dfeec247 368 offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
dc9dc135 369 }
ff7c6d11 370
e1599b0c
XL
371 if let Some(repr_align) = repr.align {
372 align = align.max(AbiAndPrefAlign::new(repr_align));
dc9dc135 373 }
ff7c6d11 374
dc9dc135
XL
375 debug!("univariant min_size: {:?}", offset);
376 let min_size = offset;
ff7c6d11 377
dc9dc135
XL
378 // As stated above, inverse_memory_index holds field indices by increasing offset.
379 // This makes it an already-sorted view of the offsets vec.
380 // To invert it, consider:
381 // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
382 // Field 5 would be the first element, so memory_index is i:
383 // Note: if we didn't optimize, it's already right.
ff7c6d11 384
ba9703b0
XL
385 let memory_index =
386 if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
dc9dc135
XL
387
388 let size = min_size.align_to(align.abi);
389 let mut abi = Abi::Aggregate { sized };
390
391 // Unpack newtype ABIs and find scalar pairs.
392 if sized && size.bytes() > 0 {
393 // All other fields must be ZSTs, and we need them to all start at 0.
dfeec247 394 let mut zst_offsets = offsets.iter().enumerate().filter(|&(i, _)| fields[i].is_zst());
dc9dc135 395 if zst_offsets.all(|(_, o)| o.bytes() == 0) {
dfeec247 396 let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
dc9dc135
XL
397
398 match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
399 // We have exactly one non-ZST field.
400 (Some((i, field)), None, None) => {
401 // Field fills the struct and it has a scalar or scalar pair ABI.
dfeec247
XL
402 if offsets[i].bytes() == 0
403 && align.abi == field.align.abi
404 && size == field.size
405 {
dc9dc135
XL
406 match field.abi {
407 // For plain scalars, or vectors of them, we can't unpack
408 // newtypes for `#[repr(C)]`, as that affects C ABIs.
409 Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
410 abi = field.abi.clone();
ff7c6d11 411 }
dc9dc135
XL
412 // But scalar pairs are Rust-specific and get
413 // treated as aggregates by C ABIs anyway.
414 Abi::ScalarPair(..) => {
415 abi = field.abi.clone();
416 }
417 _ => {}
ff7c6d11
XL
418 }
419 }
dc9dc135 420 }
ff7c6d11 421
dc9dc135 422 // Two non-ZST fields, and they're both scalars.
dfeec247
XL
423 (
424 Some((
425 i,
ba9703b0
XL
426 &TyAndLayout {
427 layout: &Layout { abi: Abi::Scalar(ref a), .. }, ..
dfeec247
XL
428 },
429 )),
430 Some((
431 j,
ba9703b0
XL
432 &TyAndLayout {
433 layout: &Layout { abi: Abi::Scalar(ref b), .. }, ..
dfeec247
XL
434 },
435 )),
436 None,
437 ) => {
dc9dc135
XL
438 // Order by the memory placement, not source order.
439 let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
440 ((i, a), (j, b))
441 } else {
442 ((j, b), (i, a))
443 };
444 let pair = self.scalar_pair(a.clone(), b.clone());
445 let pair_offsets = match pair.fields {
ba9703b0 446 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
dc9dc135
XL
447 assert_eq!(memory_index, &[0, 1]);
448 offsets
ff7c6d11 449 }
dfeec247 450 _ => bug!(),
dc9dc135 451 };
dfeec247
XL
452 if offsets[i] == pair_offsets[0]
453 && offsets[j] == pair_offsets[1]
454 && align == pair.align
455 && size == pair.size
456 {
dc9dc135
XL
457 // We can use `ScalarPair` only when it matches our
458 // already computed layout (including `#[repr(C)]`).
459 abi = pair.abi;
ff7c6d11 460 }
ff7c6d11 461 }
dc9dc135
XL
462
463 _ => {}
ff7c6d11
XL
464 }
465 }
dc9dc135 466 }
ff7c6d11 467
dc9dc135
XL
468 if sized && fields.iter().any(|f| f.abi.is_uninhabited()) {
469 abi = Abi::Uninhabited;
470 }
83c7162d 471
ba9703b0 472 Ok(Layout {
dc9dc135 473 variants: Variants::Single { index: VariantIdx::new(0) },
ba9703b0 474 fields: FieldsShape::Arbitrary { offsets, memory_index },
dc9dc135 475 abi,
416331ca 476 largest_niche,
dc9dc135 477 align,
dfeec247 478 size,
dc9dc135
XL
479 })
480 }
481
ba9703b0 482 fn layout_raw_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<'tcx>> {
dc9dc135
XL
483 let tcx = self.tcx;
484 let param_env = self.param_env;
485 let dl = self.data_layout();
486 let scalar_unit = |value: Primitive| {
487 let bits = value.size(dl).bits();
488 assert!(bits <= 128);
dfeec247 489 Scalar { value, valid_range: 0..=(!0 >> (128 - bits)) }
ff7c6d11 490 };
ba9703b0 491 let scalar = |value: Primitive| tcx.intern_layout(Layout::scalar(self, scalar_unit(value)));
dc9dc135 492
ba9703b0 493 let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
dc9dc135 494 Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
ff7c6d11 495 };
74b04a01 496 debug_assert!(!ty.has_infer_types_or_consts());
ff7c6d11 497
e74abb32 498 Ok(match ty.kind {
ff7c6d11 499 // Basic scalars.
ba9703b0 500 ty::Bool => tcx.intern_layout(Layout::scalar(
dfeec247
XL
501 self,
502 Scalar { value: Int(I8, false), valid_range: 0..=1 },
503 )),
ba9703b0 504 ty::Char => tcx.intern_layout(Layout::scalar(
dfeec247
XL
505 self,
506 Scalar { value: Int(I32, false), valid_range: 0..=0x10FFFF },
507 )),
508 ty::Int(ity) => scalar(Int(Integer::from_attr(dl, attr::SignedInt(ity)), true)),
509 ty::Uint(ity) => scalar(Int(Integer::from_attr(dl, attr::UnsignedInt(ity)), false)),
60c5eb7d
XL
510 ty::Float(fty) => scalar(match fty {
511 ast::FloatTy::F32 => F32,
512 ast::FloatTy::F64 => F64,
513 }),
b7449926 514 ty::FnPtr(_) => {
ff7c6d11 515 let mut ptr = scalar_unit(Pointer);
83c7162d 516 ptr.valid_range = 1..=*ptr.valid_range.end();
ba9703b0 517 tcx.intern_layout(Layout::scalar(self, ptr))
ff7c6d11
XL
518 }
519
520 // The never type.
ba9703b0 521 ty::Never => tcx.intern_layout(Layout {
dfeec247 522 variants: Variants::Single { index: VariantIdx::new(0) },
ba9703b0 523 fields: FieldsShape::Primitive,
dfeec247
XL
524 abi: Abi::Uninhabited,
525 largest_niche: None,
526 align: dl.i8_align,
527 size: Size::ZERO,
528 }),
ff7c6d11 529
f035d41b 530 // Potentially-wide pointers.
dfeec247 531 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
ff7c6d11
XL
532 let mut data_ptr = scalar_unit(Pointer);
533 if !ty.is_unsafe_ptr() {
83c7162d 534 data_ptr.valid_range = 1..=*data_ptr.valid_range.end();
ff7c6d11
XL
535 }
536
0531ce1d
XL
537 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
538 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
ba9703b0 539 return Ok(tcx.intern_layout(Layout::scalar(self, data_ptr)));
54a0048b 540 }
ff7c6d11 541
416331ca 542 let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
e74abb32 543 let metadata = match unsized_part.kind {
b7449926 544 ty::Foreign(..) => {
ba9703b0 545 return Ok(tcx.intern_layout(Layout::scalar(self, data_ptr)));
ff7c6d11 546 }
dfeec247 547 ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
b7449926 548 ty::Dynamic(..) => {
ff7c6d11 549 let mut vtable = scalar_unit(Pointer);
83c7162d 550 vtable.valid_range = 1..=*vtable.valid_range.end();
ff7c6d11
XL
551 vtable
552 }
dfeec247 553 _ => return Err(LayoutError::Unknown(unsized_part)),
ff7c6d11
XL
554 };
555
556 // Effectively a (ptr, meta) tuple.
dc9dc135 557 tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
ff7c6d11
XL
558 }
559
560 // Arrays and slices.
b7449926 561 ty::Array(element, mut count) => {
ff7c6d11 562 if count.has_projections() {
0531ce1d 563 count = tcx.normalize_erasing_regions(param_env, count);
ff7c6d11
XL
564 if count.has_projections() {
565 return Err(LayoutError::Unknown(ty));
566 }
567 }
568
416331ca 569 let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
2c00a5a8 570 let element = self.layout_of(element)?;
dfeec247
XL
571 let size =
572 element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
ff7c6d11 573
0731742a
XL
574 let abi = if count != 0 && ty.conservative_is_privately_uninhabited(tcx) {
575 Abi::Uninhabited
576 } else {
577 Abi::Aggregate { sized: true }
578 };
579
dfeec247 580 let largest_niche = if count != 0 { element.largest_niche.clone() } else { None };
416331ca 581
ba9703b0 582 tcx.intern_layout(Layout {
a1dfa0c6 583 variants: Variants::Single { index: VariantIdx::new(0) },
ba9703b0 584 fields: FieldsShape::Array { stride: element.size, count },
0731742a 585 abi,
416331ca 586 largest_niche,
ff7c6d11 587 align: element.align,
dfeec247 588 size,
ff7c6d11
XL
589 })
590 }
b7449926 591 ty::Slice(element) => {
2c00a5a8 592 let element = self.layout_of(element)?;
ba9703b0 593 tcx.intern_layout(Layout {
a1dfa0c6 594 variants: Variants::Single { index: VariantIdx::new(0) },
ba9703b0 595 fields: FieldsShape::Array { stride: element.size, count: 0 },
ff7c6d11 596 abi: Abi::Aggregate { sized: false },
416331ca 597 largest_niche: None,
ff7c6d11 598 align: element.align,
dfeec247 599 size: Size::ZERO,
ff7c6d11 600 })
54a0048b 601 }
ba9703b0 602 ty::Str => tcx.intern_layout(Layout {
dfeec247 603 variants: Variants::Single { index: VariantIdx::new(0) },
ba9703b0 604 fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
dfeec247
XL
605 abi: Abi::Aggregate { sized: false },
606 largest_niche: None,
607 align: dl.i8_align,
608 size: Size::ZERO,
609 }),
54a0048b
SL
610
611 // Odd unit types.
dfeec247 612 ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
b7449926 613 ty::Dynamic(..) | ty::Foreign(..) => {
dfeec247
XL
614 let mut unit = self.univariant_uninterned(
615 ty,
616 &[],
617 &ReprOptions::default(),
618 StructKind::AlwaysSized,
619 )?;
ff7c6d11
XL
620 match unit.abi {
621 Abi::Aggregate { ref mut sized } => *sized = false,
dfeec247 622 _ => bug!(),
ff7c6d11
XL
623 }
624 tcx.intern_layout(unit)
54a0048b
SL
625 }
626
e74abb32 627 ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, substs)?,
ea8adc8c 628
ba9703b0
XL
629 ty::Closure(_, ref substs) => {
630 let tys = substs.as_closure().upvar_tys();
dfeec247
XL
631 univariant(
632 &tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
8bb4bdeb 633 &ReprOptions::default(),
dfeec247
XL
634 StructKind::AlwaysSized,
635 )?
476ff2be
SL
636 }
637
b7449926 638 ty::Tuple(tys) => {
dfeec247
XL
639 let kind =
640 if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
041b39d2 641
dfeec247
XL
642 univariant(
643 &tys.iter()
644 .map(|k| self.layout_of(k.expect_ty()))
645 .collect::<Result<Vec<_>, _>>()?,
646 &ReprOptions::default(),
647 kind,
648 )?
54a0048b
SL
649 }
650
9e0c209e 651 // SIMD vector types.
b7449926 652 ty::Adt(def, ..) if def.repr.simd() => {
2c00a5a8 653 let element = self.layout_of(ty.simd_type(tcx))?;
60c5eb7d 654 let count = ty.simd_size(tcx);
ff7c6d11
XL
655 assert!(count > 0);
656 let scalar = match element.abi {
657 Abi::Scalar(ref scalar) => scalar.clone(),
9e0c209e 658 _ => {
dfeec247
XL
659 tcx.sess.fatal(&format!(
660 "monomorphising SIMD type `{}` with \
0bf4aa26 661 a non-machine element type `{}`",
dfeec247
XL
662 ty, element.ty
663 ));
54a0048b 664 }
ff7c6d11 665 };
dfeec247
XL
666 let size =
667 element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
ff7c6d11 668 let align = dl.vector_align(size);
a1dfa0c6 669 let size = size.align_to(align.abi);
ff7c6d11 670
ba9703b0 671 tcx.intern_layout(Layout {
a1dfa0c6 672 variants: Variants::Single { index: VariantIdx::new(0) },
ba9703b0 673 fields: FieldsShape::Array { stride: element.size, count },
dfeec247 674 abi: Abi::Vector { element: scalar, count },
416331ca 675 largest_niche: element.largest_niche.clone(),
ff7c6d11
XL
676 size,
677 align,
678 })
54a0048b 679 }
9e0c209e
SL
680
681 // ADTs.
b7449926 682 ty::Adt(def, substs) => {
ff7c6d11 683 // Cache the field layouts.
dfeec247
XL
684 let variants = def
685 .variants
686 .iter()
687 .map(|v| {
688 v.fields
689 .iter()
690 .map(|field| self.layout_of(field.ty(tcx, substs)))
691 .collect::<Result<Vec<_>, _>>()
692 })
693 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
54a0048b 694
ff7c6d11 695 if def.is_union() {
e1599b0c
XL
696 if def.repr.pack.is_some() && def.repr.align.is_some() {
697 bug!("union cannot be packed and aligned");
ff7c6d11
XL
698 }
699
dfeec247
XL
700 let mut align =
701 if def.repr.pack.is_some() { dl.i8_align } else { dl.aggregate_align };
54a0048b 702
e1599b0c
XL
703 if let Some(repr_align) = def.repr.align {
704 align = align.max(AbiAndPrefAlign::new(repr_align));
54a0048b
SL
705 }
706
a1dfa0c6 707 let optimize = !def.repr.inhibit_union_abi_opt();
94b46f34 708 let mut size = Size::ZERO;
a1dfa0c6
XL
709 let mut abi = Abi::Aggregate { sized: true };
710 let index = VariantIdx::new(0);
711 for field in &variants[index] {
ff7c6d11 712 assert!(!field.is_unsized());
e1599b0c 713 align = align.max(field.align);
a1dfa0c6
XL
714
715 // If all non-ZST fields have the same ABI, forward this ABI
716 if optimize && !field.is_zst() {
717 // Normalize scalar_unit to the maximal valid range
718 let field_abi = match &field.abi {
719 Abi::Scalar(x) => Abi::Scalar(scalar_unit(x.value)),
720 Abi::ScalarPair(x, y) => {
dfeec247 721 Abi::ScalarPair(scalar_unit(x.value), scalar_unit(y.value))
a1dfa0c6
XL
722 }
723 Abi::Vector { element: x, count } => {
dfeec247
XL
724 Abi::Vector { element: scalar_unit(x.value), count: *count }
725 }
726 Abi::Uninhabited | Abi::Aggregate { .. } => {
727 Abi::Aggregate { sized: true }
a1dfa0c6 728 }
a1dfa0c6
XL
729 };
730
731 if size == Size::ZERO {
732 // first non ZST: initialize 'abi'
733 abi = field_abi;
dfeec247 734 } else if abi != field_abi {
a1dfa0c6
XL
735 // different fields have different ABI: reset to Aggregate
736 abi = Abi::Aggregate { sized: true };
737 }
ff7c6d11 738 }
a1dfa0c6 739
ff7c6d11
XL
740 size = cmp::max(size, field.size);
741 }
742
e1599b0c
XL
743 if let Some(pack) = def.repr.pack {
744 align = align.min(AbiAndPrefAlign::new(pack));
745 }
746
ba9703b0 747 return Ok(tcx.intern_layout(Layout {
a1dfa0c6 748 variants: Variants::Single { index },
ba9703b0
XL
749 fields: FieldsShape::Union(
750 NonZeroUsize::new(variants[index].len())
751 .ok_or(LayoutError::Unknown(ty))?,
752 ),
a1dfa0c6 753 abi,
416331ca 754 largest_niche: None,
ff7c6d11 755 align,
dfeec247 756 size: size.align_to(align.abi),
ff7c6d11
XL
757 }));
758 }
759
83c7162d
XL
760 // A variant is absent if it's uninhabited and only has ZST fields.
761 // Present uninhabited variants only require space for their fields,
0731742a 762 // but *not* an encoding of the discriminant (e.g., a tag value).
83c7162d
XL
763 // See issue #49298 for more details on the need to leave space
764 // for non-ZST uninhabited data (mostly partial initialization).
ba9703b0 765 let absent = |fields: &[TyAndLayout<'_>]| {
0bf4aa26 766 let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
83c7162d
XL
767 let is_zst = fields.iter().all(|f| f.is_zst());
768 uninhabited && is_zst
769 };
770 let (present_first, present_second) = {
dfeec247
XL
771 let mut present_variants = variants
772 .iter_enumerated()
773 .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
83c7162d 774 (present_variants.next(), present_variants.next())
ff7c6d11 775 };
e74abb32
XL
776 let present_first = match present_first {
777 present_first @ Some(_) => present_first,
83c7162d 778 // Uninhabited because it has no variants, or only absent ones.
e74abb32 779 None if def.is_enum() => return tcx.layout_raw(param_env.and(tcx.types.never)),
ba9703b0
XL
780 // If it's a struct, still compute a layout so that we can still compute the
781 // field offsets.
e74abb32
XL
782 None => Some(VariantIdx::new(0)),
783 };
54a0048b 784
ff7c6d11 785 let is_struct = !def.is_enum() ||
83c7162d
XL
786 // Only one variant is present.
787 (present_second.is_none() &&
ff7c6d11 788 // Representation optimizations are allowed.
0bf4aa26 789 !def.repr.inhibit_enum_layout_opt());
ff7c6d11
XL
790 if is_struct {
791 // Struct, or univariant enum equivalent to a struct.
9e0c209e
SL
792 // (Typechecking will reject discriminant-sizing attrs.)
793
83c7162d 794 let v = present_first.unwrap();
74b04a01 795 let kind = if def.is_enum() || variants[v].is_empty() {
ff7c6d11 796 StructKind::AlwaysSized
476ff2be 797 } else {
7cac9316 798 let param_env = tcx.param_env(def.did);
ff7c6d11 799 let last_field = def.variants[v].fields.last().unwrap();
dfeec247
XL
800 let always_sized =
801 tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env);
802 if !always_sized {
803 StructKind::MaybeUnsized
804 } else {
805 StructKind::AlwaysSized
806 }
9e0c209e 807 };
9e0c209e 808
dc9dc135 809 let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr, kind)?;
ff7c6d11 810 st.variants = Variants::Single { index: v };
b7449926
XL
811 let (start, end) = self.tcx.layout_scalar_valid_range(def.did);
812 match st.abi {
dfeec247 813 Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
b7449926
XL
814 // the asserts ensure that we are not using the
815 // `#[rustc_layout_scalar_valid_range(n)]`
816 // attribute to widen the range of anything as that would probably
817 // result in UB somewhere
416331ca
XL
818 // FIXME(eddyb) the asserts are probably not needed,
819 // as larger validity ranges would result in missed
820 // optimizations, *not* wrongly assuming the inner
821 // value is valid. e.g. unions enlarge validity ranges,
822 // because the values may be uninitialized.
b7449926 823 if let Bound::Included(start) = start {
416331ca
XL
824 // FIXME(eddyb) this might be incorrect - it doesn't
825 // account for wrap-around (end < start) ranges.
b7449926
XL
826 assert!(*scalar.valid_range.start() <= start);
827 scalar.valid_range = start..=*scalar.valid_range.end();
828 }
829 if let Bound::Included(end) = end {
416331ca
XL
830 // FIXME(eddyb) this might be incorrect - it doesn't
831 // account for wrap-around (end < start) ranges.
b7449926
XL
832 assert!(*scalar.valid_range.end() >= end);
833 scalar.valid_range = *scalar.valid_range.start()..=end;
ff7c6d11 834 }
416331ca
XL
835
836 // Update `largest_niche` if we have introduced a larger niche.
74b04a01
XL
837 let niche = if def.repr.hide_niche() {
838 None
839 } else {
840 Niche::from_scalar(dl, Size::ZERO, scalar.clone())
841 };
416331ca
XL
842 if let Some(niche) = niche {
843 match &st.largest_niche {
844 Some(largest_niche) => {
845 // Replace the existing niche even if they're equal,
846 // because this one is at a lower offset.
847 if largest_niche.available(dl) <= niche.available(dl) {
848 st.largest_niche = Some(niche);
849 }
850 }
851 None => st.largest_niche = Some(niche),
852 }
853 }
ff7c6d11 854 }
b7449926
XL
855 _ => assert!(
856 start == Bound::Unbounded && end == Bound::Unbounded,
857 "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
858 def,
859 st,
860 ),
54a0048b 861 }
416331ca 862
ff7c6d11 863 return Ok(tcx.intern_layout(st));
54a0048b
SL
864 }
865
74b04a01
XL
866 // At this point, we have handled all unions and
867 // structs. (We have also handled univariant enums
868 // that allow representation optimization.)
869 assert!(def.is_enum());
870
83c7162d
XL
871 // The current code for niche-filling relies on variant indices
872 // instead of actual discriminants, so dataful enums with
873 // explicit discriminants (RFC #2363) would misbehave.
dfeec247
XL
874 let no_explicit_discriminants = def
875 .variants
876 .iter_enumerated()
a1dfa0c6 877 .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
ff7c6d11
XL
878
879 // Niche-filling enum optimization.
880 if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
881 let mut dataful_variant = None;
a1dfa0c6 882 let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
ff7c6d11
XL
883
884 // Find one non-ZST variant.
a1dfa0c6 885 'variants: for (v, fields) in variants.iter_enumerated() {
83c7162d
XL
886 if absent(fields) {
887 continue 'variants;
888 }
ff7c6d11 889 for f in fields {
ff7c6d11
XL
890 if !f.is_zst() {
891 if dataful_variant.is_none() {
892 dataful_variant = Some(v);
893 continue 'variants;
894 } else {
895 dataful_variant = None;
896 break 'variants;
897 }
898 }
54a0048b 899 }
83c7162d 900 niche_variants = *niche_variants.start().min(&v)..=v;
ff7c6d11
XL
901 }
902
83c7162d 903 if niche_variants.start() > niche_variants.end() {
ff7c6d11
XL
904 dataful_variant = None;
905 }
906
907 if let Some(i) = dataful_variant {
dfeec247
XL
908 let count = (niche_variants.end().as_u32()
909 - niche_variants.start().as_u32()
910 + 1) as u128;
94b46f34 911
ba9703b0
XL
912 // Find the field with the largest niche
913 let niche_candidate = variants[i]
914 .iter()
915 .enumerate()
916 .filter_map(|(j, &field)| Some((j, field.largest_niche.as_ref()?)))
917 .max_by_key(|(_, niche)| niche.available(dl));
918
919 if let Some((field_index, niche, (niche_start, niche_scalar))) =
920 niche_candidate.and_then(|(field_index, niche)| {
921 Some((field_index, niche, niche.reserve(self, count)?))
922 })
923 {
ff7c6d11 924 let mut align = dl.aggregate_align;
dfeec247
XL
925 let st = variants
926 .iter_enumerated()
927 .map(|(j, v)| {
928 let mut st = self.univariant_uninterned(
929 ty,
930 v,
931 &def.repr,
932 StructKind::AlwaysSized,
933 )?;
934 st.variants = Variants::Single { index: j };
935
936 align = align.max(st.align);
937
938 Ok(st)
939 })
940 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
ff7c6d11 941
94b46f34 942 let offset = st[i].fields.offset(field_index) + niche.offset;
ff7c6d11
XL
943 let size = st[i].size;
944
ba9703b0
XL
945 let abi = if st.iter().all(|v| v.abi.is_uninhabited()) {
946 Abi::Uninhabited
947 } else {
948 match st[i].abi {
949 Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()),
950 Abi::ScalarPair(ref first, ref second) => {
951 // We need to use scalar_unit to reset the
952 // valid range to the maximal one for that
953 // primitive, because only the niche is
954 // guaranteed to be initialised, not the
955 // other primitive.
956 if offset.bytes() == 0 {
957 Abi::ScalarPair(
958 niche_scalar.clone(),
959 scalar_unit(second.value),
960 )
961 } else {
962 Abi::ScalarPair(
963 scalar_unit(first.value),
964 niche_scalar.clone(),
965 )
966 }
0531ce1d 967 }
ba9703b0 968 _ => Abi::Aggregate { sized: true },
0531ce1d 969 }
c30ab7b3 970 };
ff7c6d11 971
416331ca
XL
972 let largest_niche =
973 Niche::from_scalar(dl, offset, niche_scalar.clone());
974
ba9703b0 975 return Ok(tcx.intern_layout(Layout {
532ac7d7 976 variants: Variants::Multiple {
f035d41b
XL
977 tag: niche_scalar,
978 tag_encoding: TagEncoding::Niche {
532ac7d7
XL
979 dataful_variant: i,
980 niche_variants,
981 niche_start,
982 },
f035d41b 983 tag_field: 0,
ff7c6d11
XL
984 variants: st,
985 },
ba9703b0 986 fields: FieldsShape::Arbitrary {
ff7c6d11 987 offsets: vec![offset],
dfeec247 988 memory_index: vec![0],
ff7c6d11
XL
989 },
990 abi,
416331ca 991 largest_niche,
ff7c6d11
XL
992 size,
993 align,
994 }));
54a0048b 995 }
ff7c6d11
XL
996 }
997 }
54a0048b 998
74b04a01 999 let (mut min, mut max) = (i128::MAX, i128::MIN);
0531ce1d 1000 let discr_type = def.repr.discr_type();
a1dfa0c6
XL
1001 let bits = Integer::from_attr(self, discr_type).size().bits();
1002 for (i, discr) in def.discriminants(tcx) {
0bf4aa26 1003 if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
ff7c6d11 1004 continue;
54a0048b 1005 }
0531ce1d
XL
1006 let mut x = discr.val as i128;
1007 if discr_type.is_signed() {
1008 // sign extend the raw representation to be an i128
1009 x = (x << (128 - bits)) >> (128 - bits);
1010 }
dfeec247
XL
1011 if x < min {
1012 min = x;
1013 }
1014 if x > max {
1015 max = x;
1016 }
54a0048b 1017 }
83c7162d 1018 // We might have no inhabited variants, so pretend there's at least one.
74b04a01 1019 if (min, max) == (i128::MAX, i128::MIN) {
83c7162d
XL
1020 min = 0;
1021 max = 0;
1022 }
ff7c6d11
XL
1023 assert!(min <= max, "discriminant range is {}...{}", min, max);
1024 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
54a0048b 1025
54a0048b 1026 let mut align = dl.aggregate_align;
94b46f34 1027 let mut size = Size::ZERO;
54a0048b
SL
1028
1029 // We're interested in the smallest alignment, so start large.
a1dfa0c6
XL
1030 let mut start_align = Align::from_bytes(256).unwrap();
1031 assert_eq!(Integer::for_align(dl, start_align), None);
ff7c6d11
XL
1032
1033 // repr(C) on an enum tells us to make a (tag, union) layout,
1034 // so we need to grow the prefix alignment to be at least
1035 // the alignment of the union. (This value is used both for
1036 // determining the alignment of the overall enum, and the
1037 // determining the alignment of the payload after the tag.)
a1dfa0c6 1038 let mut prefix_align = min_ity.align(dl).abi;
ff7c6d11
XL
1039 if def.repr.c() {
1040 for fields in &variants {
1041 for field in fields {
a1dfa0c6 1042 prefix_align = prefix_align.max(field.align.abi);
ff7c6d11
XL
1043 }
1044 }
1045 }
54a0048b 1046
ff7c6d11 1047 // Create the set of structs that represent each variant.
dfeec247
XL
1048 let mut layout_variants = variants
1049 .iter_enumerated()
1050 .map(|(i, field_layouts)| {
1051 let mut st = self.univariant_uninterned(
1052 ty,
1053 &field_layouts,
1054 &def.repr,
1055 StructKind::Prefixed(min_ity.size(), prefix_align),
1056 )?;
1057 st.variants = Variants::Single { index: i };
1058 // Find the first field we can't move later
1059 // to make room for a larger discriminant.
1060 for field in
1061 st.fields.index_by_increasing_offset().map(|j| field_layouts[j])
1062 {
1063 if !field.is_zst() || field.align.abi.bytes() != 1 {
1064 start_align = start_align.min(field.align.abi);
1065 break;
1066 }
54a0048b 1067 }
dfeec247
XL
1068 size = cmp::max(size, st.size);
1069 align = align.max(st.align);
1070 Ok(st)
1071 })
1072 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
54a0048b
SL
1073
1074 // Align the maximum variant size to the largest alignment.
a1dfa0c6 1075 size = size.align_to(align.abi);
54a0048b
SL
1076
1077 if size.bytes() >= dl.obj_size_bound() {
1078 return Err(LayoutError::SizeOverflow(ty));
1079 }
1080
8bb4bdeb
XL
1081 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
1082 if typeck_ity < min_ity {
1083 // It is a bug if Layout decided on a greater discriminant size than typeck for
1084 // some reason at this point (based on values discriminant can take on). Mostly
1085 // because this discriminant will be loaded, and then stored into variable of
1086 // type calculated by typeck. Consider such case (a bug): typeck decided on
1087 // byte-sized discriminant, but layout thinks we need a 16-bit to store all
94b46f34 1088 // discriminant values. That would be a bug, because then, in codegen, in order
8bb4bdeb
XL
1089 // to store this 16-bit discriminant into 8-bit sized temporary some of the
1090 // space necessary to represent would have to be discarded (or layout is wrong
1091 // on thinking it needs 16 bits)
dfeec247
XL
1092 bug!(
1093 "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1094 min_ity,
1095 typeck_ity
1096 );
8bb4bdeb 1097 // However, it is fine to make discr type however large (as an optimisation)
94b46f34 1098 // after this point – we’ll just truncate the value we load in codegen.
8bb4bdeb
XL
1099 }
1100
54a0048b
SL
1101 // Check to see if we should use a different type for the
1102 // discriminant. We can safely use a type with the same size
1103 // as the alignment of the first field of each variant.
1104 // We increase the size of the discriminant to avoid LLVM copying
1105 // padding when it doesn't need to. This normally causes unaligned
1106 // load/stores and excessive memcpy/memset operations. By using a
83c7162d 1107 // bigger integer size, LLVM can be sure about its contents and
54a0048b
SL
1108 // won't be so conservative.
1109
1110 // Use the initial field alignment
83c7162d
XL
1111 let mut ity = if def.repr.c() || def.repr.int.is_some() {
1112 min_ity
1113 } else {
a1dfa0c6 1114 Integer::for_align(dl, start_align).unwrap_or(min_ity)
83c7162d 1115 };
54a0048b
SL
1116
1117 // If the alignment is not larger than the chosen discriminant size,
1118 // don't use the alignment as the final size.
1119 if ity <= min_ity {
1120 ity = min_ity;
1121 } else {
1122 // Patch up the variants' first few fields.
ff7c6d11
XL
1123 let old_ity_size = min_ity.size();
1124 let new_ity_size = ity.size();
83c7162d 1125 for variant in &mut layout_variants {
ff7c6d11 1126 match variant.fields {
ba9703b0 1127 FieldsShape::Arbitrary { ref mut offsets, .. } => {
ff7c6d11
XL
1128 for i in offsets {
1129 if *i <= old_ity_size {
1130 assert_eq!(*i, old_ity_size);
1131 *i = new_ity_size;
1132 }
1133 }
1134 // We might be making the struct larger.
1135 if variant.size <= old_ity_size {
1136 variant.size = new_ity_size;
1137 }
1138 }
dfeec247 1139 _ => bug!(),
c30ab7b3 1140 }
54a0048b
SL
1141 }
1142 }
1143
0531ce1d
XL
1144 let tag_mask = !0u128 >> (128 - ity.size().bits());
1145 let tag = Scalar {
ff7c6d11 1146 value: Int(ity, signed),
0531ce1d 1147 valid_range: (min as u128 & tag_mask)..=(max as u128 & tag_mask),
ff7c6d11 1148 };
83c7162d
XL
1149 let mut abi = Abi::Aggregate { sized: true };
1150 if tag.value.size(dl) == size {
1151 abi = Abi::Scalar(tag.clone());
8faf50e0
XL
1152 } else {
1153 // Try to use a ScalarPair for all tagged enums.
83c7162d
XL
1154 let mut common_prim = None;
1155 for (field_layouts, layout_variant) in variants.iter().zip(&layout_variants) {
1156 let offsets = match layout_variant.fields {
ba9703b0 1157 FieldsShape::Arbitrary { ref offsets, .. } => offsets,
83c7162d
XL
1158 _ => bug!(),
1159 };
dfeec247
XL
1160 let mut fields =
1161 field_layouts.iter().zip(offsets).filter(|p| !p.0.is_zst());
83c7162d
XL
1162 let (field, offset) = match (fields.next(), fields.next()) {
1163 (None, None) => continue,
1164 (Some(pair), None) => pair,
1165 _ => {
1166 common_prim = None;
1167 break;
1168 }
1169 };
ba9703b0 1170 let prim = match field.abi {
83c7162d
XL
1171 Abi::Scalar(ref scalar) => scalar.value,
1172 _ => {
1173 common_prim = None;
1174 break;
1175 }
1176 };
1177 if let Some(pair) = common_prim {
1178 // This is pretty conservative. We could go fancier
1179 // by conflating things like i32 and u32, or even
1180 // realising that (u8, u8) could just cohabit with
1181 // u16 or even u32.
1182 if pair != (prim, offset) {
1183 common_prim = None;
1184 break;
1185 }
1186 } else {
1187 common_prim = Some((prim, offset));
1188 }
1189 }
1190 if let Some((prim, offset)) = common_prim {
dc9dc135 1191 let pair = self.scalar_pair(tag.clone(), scalar_unit(prim));
83c7162d 1192 let pair_offsets = match pair.fields {
ba9703b0 1193 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
83c7162d
XL
1194 assert_eq!(memory_index, &[0, 1]);
1195 offsets
1196 }
dfeec247 1197 _ => bug!(),
83c7162d 1198 };
dfeec247
XL
1199 if pair_offsets[0] == Size::ZERO
1200 && pair_offsets[1] == *offset
1201 && align == pair.align
1202 && size == pair.size
1203 {
83c7162d
XL
1204 // We can use `ScalarPair` only when it matches our
1205 // already computed layout (including `#[repr(C)]`).
1206 abi = pair.abi;
1207 }
1208 }
1209 }
1210
0bf4aa26 1211 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
83c7162d
XL
1212 abi = Abi::Uninhabited;
1213 }
1214
416331ca
XL
1215 let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag.clone());
1216
ba9703b0 1217 tcx.intern_layout(Layout {
532ac7d7 1218 variants: Variants::Multiple {
f035d41b
XL
1219 tag,
1220 tag_encoding: TagEncoding::Direct,
1221 tag_field: 0,
83c7162d 1222 variants: layout_variants,
ff7c6d11 1223 },
ba9703b0 1224 fields: FieldsShape::Arbitrary {
94b46f34 1225 offsets: vec![Size::ZERO],
dfeec247 1226 memory_index: vec![0],
ff7c6d11 1227 },
416331ca 1228 largest_niche,
ff7c6d11 1229 abi,
041b39d2 1230 align,
dfeec247 1231 size,
ff7c6d11 1232 })
54a0048b
SL
1233 }
1234
1235 // Types with no meaningful known layout.
b7449926 1236 ty::Projection(_) | ty::Opaque(..) => {
0531ce1d 1237 let normalized = tcx.normalize_erasing_regions(param_env, ty);
5bcae85e
SL
1238 if ty == normalized {
1239 return Err(LayoutError::Unknown(ty));
1240 }
ff7c6d11 1241 tcx.layout_raw(param_env.and(normalized))?
5bcae85e 1242 }
a1dfa0c6 1243
f9f354fc
XL
1244 ty::Bound(..) | ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
1245 bug!("Layout::compute: unexpected type `{}`", ty)
1246 }
a1dfa0c6 1247
f035d41b 1248 ty::Param(_) | ty::Error(_) => {
8faf50e0
XL
1249 return Err(LayoutError::Unknown(ty));
1250 }
ff7c6d11 1251 })
cc61c64b 1252 }
dc9dc135
XL
1253}
1254
1255/// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1256#[derive(Clone, Debug, PartialEq)]
1257enum SavedLocalEligibility {
1258 Unassigned,
1259 Assigned(VariantIdx),
1260 // FIXME: Use newtype_index so we aren't wasting bytes
1261 Ineligible(Option<u32>),
1262}
1263
1264// When laying out generators, we divide our saved local fields into two
1265// categories: overlap-eligible and overlap-ineligible.
1266//
1267// Those fields which are ineligible for overlap go in a "prefix" at the
1268// beginning of the layout, and always have space reserved for them.
1269//
1270// Overlap-eligible fields are only assigned to one variant, so we lay
1271// those fields out for each variant and put them right after the
1272// prefix.
1273//
1274// Finally, in the layout details, we point to the fields from the
1275// variants they are assigned to. It is possible for some fields to be
1276// included in multiple variants. No field ever "moves around" in the
1277// layout; its offset is always the same.
1278//
1279// Also included in the layout are the upvars and the discriminant.
1280// These are included as fields on the "outer" layout; they are not part
1281// of any variant.
1282impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
1283 /// Compute the eligibility and assignment of each local.
dfeec247
XL
1284 fn generator_saved_local_eligibility(
1285 &self,
1286 info: &GeneratorLayout<'tcx>,
1287 ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
dc9dc135
XL
1288 use SavedLocalEligibility::*;
1289
1290 let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
1291 IndexVec::from_elem_n(Unassigned, info.field_tys.len());
1292
1293 // The saved locals not eligible for overlap. These will get
1294 // "promoted" to the prefix of our generator.
1295 let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
1296
1297 // Figure out which of our saved locals are fields in only
1298 // one variant. The rest are deemed ineligible for overlap.
1299 for (variant_index, fields) in info.variant_fields.iter_enumerated() {
1300 for local in fields {
1301 match assignments[*local] {
1302 Unassigned => {
1303 assignments[*local] = Assigned(variant_index);
1304 }
1305 Assigned(idx) => {
1306 // We've already seen this local at another suspension
1307 // point, so it is no longer a candidate.
dfeec247
XL
1308 trace!(
1309 "removing local {:?} in >1 variant ({:?}, {:?})",
1310 local,
1311 variant_index,
1312 idx
1313 );
dc9dc135
XL
1314 ineligible_locals.insert(*local);
1315 assignments[*local] = Ineligible(None);
1316 }
dfeec247 1317 Ineligible(_) => {}
dc9dc135
XL
1318 }
1319 }
1320 }
1321
1322 // Next, check every pair of eligible locals to see if they
1323 // conflict.
1324 for local_a in info.storage_conflicts.rows() {
1325 let conflicts_a = info.storage_conflicts.count(local_a);
1326 if ineligible_locals.contains(local_a) {
1327 continue;
1328 }
1329
1330 for local_b in info.storage_conflicts.iter(local_a) {
1331 // local_a and local_b are storage live at the same time, therefore they
1332 // cannot overlap in the generator layout. The only way to guarantee
1333 // this is if they are in the same variant, or one is ineligible
1334 // (which means it is stored in every variant).
dfeec247
XL
1335 if ineligible_locals.contains(local_b)
1336 || assignments[local_a] == assignments[local_b]
dc9dc135
XL
1337 {
1338 continue;
1339 }
1340
1341 // If they conflict, we will choose one to make ineligible.
1342 // This is not always optimal; it's just a greedy heuristic that
1343 // seems to produce good results most of the time.
1344 let conflicts_b = info.storage_conflicts.count(local_b);
dfeec247
XL
1345 let (remove, other) =
1346 if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
dc9dc135
XL
1347 ineligible_locals.insert(remove);
1348 assignments[remove] = Ineligible(None);
1349 trace!("removing local {:?} due to conflict with {:?}", remove, other);
1350 }
1351 }
1352
416331ca
XL
1353 // Count the number of variants in use. If only one of them, then it is
1354 // impossible to overlap any locals in our layout. In this case it's
1355 // always better to make the remaining locals ineligible, so we can
1356 // lay them out with the other locals in the prefix and eliminate
1357 // unnecessary padding bytes.
1358 {
1359 let mut used_variants = BitSet::new_empty(info.variant_fields.len());
1360 for assignment in &assignments {
ba9703b0
XL
1361 if let Assigned(idx) = assignment {
1362 used_variants.insert(*idx);
416331ca
XL
1363 }
1364 }
1365 if used_variants.count() < 2 {
1366 for assignment in assignments.iter_mut() {
1367 *assignment = Ineligible(None);
1368 }
1369 ineligible_locals.insert_all();
1370 }
1371 }
1372
dc9dc135
XL
1373 // Write down the order of our locals that will be promoted to the prefix.
1374 {
74b04a01
XL
1375 for (idx, local) in ineligible_locals.iter().enumerate() {
1376 assignments[local] = Ineligible(Some(idx as u32));
dc9dc135
XL
1377 }
1378 }
1379 debug!("generator saved local assignments: {:?}", assignments);
1380
1381 (ineligible_locals, assignments)
1382 }
1383
1384 /// Compute the full generator layout.
1385 fn generator_layout(
1386 &self,
1387 ty: Ty<'tcx>,
1388 def_id: hir::def_id::DefId,
e74abb32 1389 substs: SubstsRef<'tcx>,
ba9703b0 1390 ) -> Result<&'tcx Layout, LayoutError<'tcx>> {
dc9dc135
XL
1391 use SavedLocalEligibility::*;
1392 let tcx = self.tcx;
1393
dfeec247 1394 let subst_field = |ty: Ty<'tcx>| ty.subst(tcx, substs);
dc9dc135
XL
1395
1396 let info = tcx.generator_layout(def_id);
1397 let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
1398
1399 // Build a prefix layout, including "promoting" all ineligible
1400 // locals as part of the prefix. We compute the layout of all of
1401 // these fields at once to get optimal packing.
f035d41b 1402 let tag_index = substs.as_generator().prefix_tys().count();
ba9703b0
XL
1403
1404 // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
1405 let max_discr = (info.variant_fields.len() - 1) as u128;
1406 let discr_int = Integer::fit_unsigned(max_discr);
1407 let discr_int_ty = discr_int.to_ty(tcx, false);
f035d41b
XL
1408 let tag = Scalar { value: Primitive::Int(discr_int, false), valid_range: 0..=max_discr };
1409 let tag_layout = self.tcx.intern_layout(Layout::scalar(self, tag.clone()));
1410 let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
ba9703b0 1411
dfeec247
XL
1412 let promoted_layouts = ineligible_locals
1413 .iter()
416331ca
XL
1414 .map(|local| subst_field(info.field_tys[local]))
1415 .map(|ty| tcx.mk_maybe_uninit(ty))
1416 .map(|ty| self.layout_of(ty));
dfeec247
XL
1417 let prefix_layouts = substs
1418 .as_generator()
ba9703b0 1419 .prefix_tys()
416331ca 1420 .map(|ty| self.layout_of(ty))
f035d41b 1421 .chain(iter::once(Ok(tag_layout)))
416331ca
XL
1422 .chain(promoted_layouts)
1423 .collect::<Result<Vec<_>, _>>()?;
dc9dc135
XL
1424 let prefix = self.univariant_uninterned(
1425 ty,
416331ca 1426 &prefix_layouts,
dc9dc135 1427 &ReprOptions::default(),
416331ca
XL
1428 StructKind::AlwaysSized,
1429 )?;
1430
dc9dc135
XL
1431 let (prefix_size, prefix_align) = (prefix.size, prefix.align);
1432
1433 // Split the prefix layout into the "outer" fields (upvars and
1434 // discriminant) and the "promoted" fields. Promoted fields will
1435 // get included in each variant that requested them in
1436 // GeneratorLayout.
1437 debug!("prefix = {:#?}", prefix);
1438 let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
ba9703b0 1439 FieldsShape::Arbitrary { mut offsets, memory_index } => {
dc9dc135
XL
1440 let mut inverse_memory_index = invert_mapping(&memory_index);
1441
1442 // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1443 // "outer" and "promoted" fields respectively.
f035d41b 1444 let b_start = (tag_index + 1) as u32;
dc9dc135
XL
1445 let offsets_b = offsets.split_off(b_start as usize);
1446 let offsets_a = offsets;
1447
1448 // Disentangle the "a" and "b" components of `inverse_memory_index`
1449 // by preserving the order but keeping only one disjoint "half" each.
1450 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1451 let inverse_memory_index_b: Vec<_> =
1452 inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
1453 inverse_memory_index.retain(|&i| i < b_start);
1454 let inverse_memory_index_a = inverse_memory_index;
1455
1456 // Since `inverse_memory_index_{a,b}` each only refer to their
1457 // respective fields, they can be safely inverted
1458 let memory_index_a = invert_mapping(&inverse_memory_index_a);
1459 let memory_index_b = invert_mapping(&inverse_memory_index_b);
1460
dfeec247 1461 let outer_fields =
ba9703b0 1462 FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
dc9dc135
XL
1463 (outer_fields, offsets_b, memory_index_b)
1464 }
1465 _ => bug!(),
1466 };
1467
1468 let mut size = prefix.size;
1469 let mut align = prefix.align;
dfeec247
XL
1470 let variants = info
1471 .variant_fields
1472 .iter_enumerated()
1473 .map(|(index, variant_fields)| {
1474 // Only include overlap-eligible fields when we compute our variant layout.
1475 let variant_only_tys = variant_fields
1476 .iter()
1477 .filter(|local| match assignments[**local] {
dc9dc135
XL
1478 Unassigned => bug!(),
1479 Assigned(v) if v == index => true,
1480 Assigned(_) => bug!("assignment does not match variant"),
1481 Ineligible(_) => false,
dfeec247
XL
1482 })
1483 .map(|local| subst_field(info.field_tys[*local]));
dc9dc135 1484
dfeec247
XL
1485 let mut variant = self.univariant_uninterned(
1486 ty,
1487 &variant_only_tys
1488 .map(|ty| self.layout_of(ty))
1489 .collect::<Result<Vec<_>, _>>()?,
1490 &ReprOptions::default(),
1491 StructKind::Prefixed(prefix_size, prefix_align.abi),
1492 )?;
1493 variant.variants = Variants::Single { index };
1494
1495 let (offsets, memory_index) = match variant.fields {
ba9703b0 1496 FieldsShape::Arbitrary { offsets, memory_index } => (offsets, memory_index),
dfeec247 1497 _ => bug!(),
dc9dc135 1498 };
dc9dc135 1499
dfeec247
XL
1500 // Now, stitch the promoted and variant-only fields back together in
1501 // the order they are mentioned by our GeneratorLayout.
1502 // Because we only use some subset (that can differ between variants)
1503 // of the promoted fields, we can't just pick those elements of the
1504 // `promoted_memory_index` (as we'd end up with gaps).
1505 // So instead, we build an "inverse memory_index", as if all of the
1506 // promoted fields were being used, but leave the elements not in the
1507 // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1508 // obtain a valid (bijective) mapping.
1509 const INVALID_FIELD_IDX: u32 = !0;
1510 let mut combined_inverse_memory_index =
1511 vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
1512 let mut offsets_and_memory_index = offsets.into_iter().zip(memory_index);
1513 let combined_offsets = variant_fields
1514 .iter()
1515 .enumerate()
1516 .map(|(i, local)| {
1517 let (offset, memory_index) = match assignments[*local] {
1518 Unassigned => bug!(),
1519 Assigned(_) => {
1520 let (offset, memory_index) =
1521 offsets_and_memory_index.next().unwrap();
1522 (offset, promoted_memory_index.len() as u32 + memory_index)
1523 }
1524 Ineligible(field_idx) => {
1525 let field_idx = field_idx.unwrap() as usize;
1526 (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1527 }
1528 };
1529 combined_inverse_memory_index[memory_index as usize] = i as u32;
1530 offset
1531 })
1532 .collect();
1533
1534 // Remove the unused slots and invert the mapping to obtain the
1535 // combined `memory_index` (also see previous comment).
1536 combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
1537 let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
1538
ba9703b0 1539 variant.fields = FieldsShape::Arbitrary {
dfeec247
XL
1540 offsets: combined_offsets,
1541 memory_index: combined_memory_index,
1542 };
1543
1544 size = size.max(variant.size);
1545 align = align.max(variant.align);
1546 Ok(variant)
1547 })
1548 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
dc9dc135 1549
416331ca
XL
1550 size = size.align_to(align.abi);
1551
dfeec247
XL
1552 let abi = if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi.is_uninhabited())
1553 {
dc9dc135
XL
1554 Abi::Uninhabited
1555 } else {
1556 Abi::Aggregate { sized: true }
1557 };
dc9dc135 1558
ba9703b0 1559 let layout = tcx.intern_layout(Layout {
dc9dc135 1560 variants: Variants::Multiple {
f035d41b
XL
1561 tag: tag,
1562 tag_encoding: TagEncoding::Direct,
1563 tag_field: tag_index,
dc9dc135
XL
1564 variants,
1565 },
1566 fields: outer_fields,
1567 abi,
416331ca 1568 largest_niche: prefix.largest_niche,
dc9dc135
XL
1569 size,
1570 align,
1571 });
1572 debug!("generator layout ({:?}): {:#?}", ty, layout);
1573 Ok(layout)
1574 }
7cac9316
XL
1575
1576 /// This is invoked by the `layout_raw` query to record the final
1577 /// layout of each type.
532ac7d7 1578 #[inline(always)]
ba9703b0 1579 fn record_layout_for_printing(&self, layout: TyAndLayout<'tcx>) {
532ac7d7
XL
1580 // If we are running with `-Zprint-type-sizes`, maybe record layouts
1581 // for dumping later.
1582 if self.tcx.sess.opts.debugging_opts.print_type_sizes {
1583 self.record_layout_for_printing_outlined(layout)
1584 }
1585 }
1586
ba9703b0 1587 fn record_layout_for_printing_outlined(&self, layout: TyAndLayout<'tcx>) {
532ac7d7
XL
1588 // Ignore layouts that are done with non-empty environments or
1589 // non-monomorphic layouts, as the user only wants to see the stuff
1590 // resulting from the final codegen session.
f035d41b 1591 if layout.ty.has_param_types_or_consts() || !self.param_env.caller_bounds().is_empty() {
7cac9316
XL
1592 return;
1593 }
1594
7cac9316 1595 // (delay format until we actually need it)
83c7162d 1596 let record = |kind, packed, opt_discr_size, variants| {
2c00a5a8 1597 let type_desc = format!("{:?}", layout.ty);
dfeec247
XL
1598 self.tcx.sess.code_stats.record_type_size(
1599 kind,
1600 type_desc,
1601 layout.align.abi,
1602 layout.size,
1603 packed,
1604 opt_discr_size,
1605 variants,
1606 );
7cac9316
XL
1607 };
1608
e74abb32 1609 let adt_def = match layout.ty.kind {
b7449926 1610 ty::Adt(ref adt_def, _) => {
2c00a5a8 1611 debug!("print-type-size t: `{:?}` process adt", layout.ty);
ff7c6d11 1612 adt_def
7cac9316
XL
1613 }
1614
b7449926 1615 ty::Closure(..) => {
2c00a5a8 1616 debug!("print-type-size t: `{:?}` record closure", layout.ty);
83c7162d 1617 record(DataTypeKind::Closure, false, None, vec![]);
7cac9316
XL
1618 return;
1619 }
1620
1621 _ => {
2c00a5a8 1622 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
7cac9316
XL
1623 return;
1624 }
1625 };
1626
1627 let adt_kind = adt_def.adt_kind();
e1599b0c 1628 let adt_packed = adt_def.repr.pack.is_some();
7cac9316 1629
f9f354fc 1630 let build_variant_info = |n: Option<Ident>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
94b46f34 1631 let mut min_size = Size::ZERO;
dfeec247
XL
1632 let field_info: Vec<_> = flds
1633 .iter()
1634 .enumerate()
1635 .map(|(i, &name)| match layout.field(self, i) {
ff7c6d11
XL
1636 Err(err) => {
1637 bug!("no layout found for field {}: `{:?}`", name, err);
1638 }
1639 Ok(field_layout) => {
1640 let offset = layout.fields.offset(i);
1641 let field_end = offset + field_layout.size;
1642 if min_size < field_end {
1643 min_size = field_end;
1644 }
ba9703b0 1645 FieldInfo {
ff7c6d11
XL
1646 name: name.to_string(),
1647 offset: offset.bytes(),
1648 size: field_layout.size.bytes(),
a1dfa0c6 1649 align: field_layout.align.abi.bytes(),
ff7c6d11 1650 }
7cac9316 1651 }
dfeec247
XL
1652 })
1653 .collect();
7cac9316 1654
ba9703b0 1655 VariantInfo {
0731742a 1656 name: n.map(|n| n.to_string()),
ba9703b0 1657 kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
a1dfa0c6 1658 align: layout.align.abi.bytes(),
dfeec247 1659 size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
7cac9316
XL
1660 fields: field_info,
1661 }
1662 };
1663
ff7c6d11
XL
1664 match layout.variants {
1665 Variants::Single { index } => {
dfeec247 1666 debug!("print-type-size `{:#?}` variant {}", layout, adt_def.variants[index].ident);
ff7c6d11
XL
1667 if !adt_def.variants.is_empty() {
1668 let variant_def = &adt_def.variants[index];
dfeec247
XL
1669 let fields: Vec<_> = variant_def.fields.iter().map(|f| f.ident.name).collect();
1670 record(
1671 adt_kind.into(),
1672 adt_packed,
1673 None,
1674 vec![build_variant_info(Some(variant_def.ident), &fields, layout)],
1675 );
7cac9316
XL
1676 } else {
1677 // (This case arises for *empty* enums; so give it
1678 // zero variants.)
83c7162d 1679 record(adt_kind.into(), adt_packed, None, vec![]);
7cac9316
XL
1680 }
1681 }
1682
f035d41b 1683 Variants::Multiple { ref tag, ref tag_encoding, .. } => {
dfeec247
XL
1684 debug!(
1685 "print-type-size `{:#?}` adt general variants def {}",
1686 layout.ty,
1687 adt_def.variants.len()
1688 );
1689 let variant_infos: Vec<_> = adt_def
1690 .variants
1691 .iter_enumerated()
1692 .map(|(i, variant_def)| {
ff7c6d11 1693 let fields: Vec<_> =
94b46f34 1694 variant_def.fields.iter().map(|f| f.ident.name).collect();
dfeec247
XL
1695 build_variant_info(
1696 Some(variant_def.ident),
1697 &fields,
1698 layout.for_variant(self, i),
1699 )
ff7c6d11
XL
1700 })
1701 .collect();
dfeec247
XL
1702 record(
1703 adt_kind.into(),
1704 adt_packed,
f035d41b
XL
1705 match tag_encoding {
1706 TagEncoding::Direct => Some(tag.value.size(self)),
dfeec247
XL
1707 _ => None,
1708 },
1709 variant_infos,
1710 );
7cac9316
XL
1711 }
1712 }
1713 }
54a0048b
SL
1714}
1715
0731742a 1716/// Type size "skeleton", i.e., the only information determining a type's size.
54a0048b
SL
1717/// While this is conservative, (aside from constant sizes, only pointers,
1718/// newtypes thereof and null pointer optimized enums are allowed), it is
a1dfa0c6 1719/// enough to statically check common use cases of transmute.
54a0048b
SL
1720#[derive(Copy, Clone, Debug)]
1721pub enum SizeSkeleton<'tcx> {
1722 /// Any statically computable Layout.
1723 Known(Size),
1724
1725 /// A potentially-fat pointer.
1726 Pointer {
3b2f2976 1727 /// If true, this pointer is never null.
54a0048b 1728 non_zero: bool,
3b2f2976
XL
1729 /// The type which determines the unsized metadata, if any,
1730 /// of this pointer. Either a type parameter or a projection
1731 /// depending on one, with regions erased.
dfeec247
XL
1732 tail: Ty<'tcx>,
1733 },
54a0048b
SL
1734}
1735
dc9dc135
XL
1736impl<'tcx> SizeSkeleton<'tcx> {
1737 pub fn compute(
1738 ty: Ty<'tcx>,
1739 tcx: TyCtxt<'tcx>,
1740 param_env: ty::ParamEnv<'tcx>,
1741 ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
74b04a01 1742 debug_assert!(!ty.has_infer_types_or_consts());
54a0048b
SL
1743
1744 // First try computing a static layout.
2c00a5a8 1745 let err = match tcx.layout_of(param_env.and(ty)) {
54a0048b 1746 Ok(layout) => {
ff7c6d11 1747 return Ok(SizeSkeleton::Known(layout.size));
54a0048b 1748 }
dfeec247 1749 Err(err) => err,
54a0048b
SL
1750 };
1751
e74abb32 1752 match ty.kind {
dfeec247 1753 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
ff7c6d11 1754 let non_zero = !ty.is_unsafe_ptr();
416331ca 1755 let tail = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
e74abb32 1756 match tail.kind {
b7449926 1757 ty::Param(_) | ty::Projection(_) => {
ba9703b0 1758 debug_assert!(tail.has_param_types_or_consts());
dfeec247 1759 Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(&tail) })
ff7c6d11 1760 }
dfeec247
XL
1761 _ => bug!(
1762 "SizeSkeleton::compute({}): layout errored ({}), yet \
ff7c6d11 1763 tail `{}` is not a type parameter or a projection",
dfeec247
XL
1764 ty,
1765 err,
1766 tail
1767 ),
ff7c6d11 1768 }
54a0048b
SL
1769 }
1770
b7449926 1771 ty::Adt(def, substs) => {
54a0048b 1772 // Only newtypes and enums w/ nullable pointer optimization.
9e0c209e 1773 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
54a0048b
SL
1774 return Err(err);
1775 }
1776
1777 // Get a zero-sized variant or a pointer newtype.
a1dfa0c6
XL
1778 let zero_or_ptr_variant = |i| {
1779 let i = VariantIdx::new(i);
dfeec247
XL
1780 let fields = def.variants[i]
1781 .fields
1782 .iter()
1783 .map(|field| SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env));
54a0048b
SL
1784 let mut ptr = None;
1785 for field in fields {
1786 let field = field?;
1787 match field {
1788 SizeSkeleton::Known(size) => {
1789 if size.bytes() > 0 {
1790 return Err(err);
1791 }
1792 }
dfeec247 1793 SizeSkeleton::Pointer { .. } => {
54a0048b
SL
1794 if ptr.is_some() {
1795 return Err(err);
1796 }
1797 ptr = Some(field);
1798 }
1799 }
1800 }
1801 Ok(ptr)
1802 };
1803
1804 let v0 = zero_or_ptr_variant(0)?;
1805 // Newtype.
1806 if def.variants.len() == 1 {
1807 if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1808 return Ok(SizeSkeleton::Pointer {
dfeec247
XL
1809 non_zero: non_zero
1810 || match tcx.layout_scalar_valid_range(def.did) {
1811 (Bound::Included(start), Bound::Unbounded) => start > 0,
1812 (Bound::Included(start), Bound::Included(end)) => {
1813 0 < start && start < end
1814 }
1815 _ => false,
1816 },
041b39d2 1817 tail,
54a0048b
SL
1818 });
1819 } else {
1820 return Err(err);
1821 }
1822 }
1823
1824 let v1 = zero_or_ptr_variant(1)?;
1825 // Nullable pointer enum optimization.
1826 match (v0, v1) {
dfeec247
XL
1827 (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None)
1828 | (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1829 Ok(SizeSkeleton::Pointer { non_zero: false, tail })
54a0048b 1830 }
dfeec247 1831 _ => Err(err),
54a0048b
SL
1832 }
1833 }
1834
b7449926 1835 ty::Projection(_) | ty::Opaque(..) => {
0531ce1d 1836 let normalized = tcx.normalize_erasing_regions(param_env, ty);
5bcae85e
SL
1837 if ty == normalized {
1838 Err(err)
1839 } else {
7cac9316 1840 SizeSkeleton::compute(normalized, tcx, param_env)
5bcae85e
SL
1841 }
1842 }
1843
dfeec247 1844 _ => Err(err),
54a0048b
SL
1845 }
1846 }
1847
0bf4aa26 1848 pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
54a0048b
SL
1849 match (self, other) {
1850 (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
dfeec247
XL
1851 (SizeSkeleton::Pointer { tail: a, .. }, SizeSkeleton::Pointer { tail: b, .. }) => {
1852 a == b
1853 }
1854 _ => false,
54a0048b
SL
1855 }
1856 }
1857}
cc61c64b 1858
ff7c6d11 1859pub trait HasTyCtxt<'tcx>: HasDataLayout {
dc9dc135 1860 fn tcx(&self) -> TyCtxt<'tcx>;
cc61c64b
XL
1861}
1862
48663c56
XL
1863pub trait HasParamEnv<'tcx> {
1864 fn param_env(&self) -> ty::ParamEnv<'tcx>;
1865}
1866
dc9dc135 1867impl<'tcx> HasDataLayout for TyCtxt<'tcx> {
ff7c6d11
XL
1868 fn data_layout(&self) -> &TargetDataLayout {
1869 &self.data_layout
1870 }
cc61c64b
XL
1871}
1872
dc9dc135
XL
1873impl<'tcx> HasTyCtxt<'tcx> for TyCtxt<'tcx> {
1874 fn tcx(&self) -> TyCtxt<'tcx> {
e74abb32 1875 *self
cc61c64b
XL
1876 }
1877}
1878
48663c56
XL
1879impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
1880 fn param_env(&self) -> ty::ParamEnv<'tcx> {
1881 self.param_env
1882 }
1883}
1884
2c00a5a8 1885impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
cc61c64b 1886 fn data_layout(&self) -> &TargetDataLayout {
2c00a5a8 1887 self.tcx.data_layout()
cc61c64b
XL
1888 }
1889}
1890
dc9dc135
XL
1891impl<'tcx, T: HasTyCtxt<'tcx>> HasTyCtxt<'tcx> for LayoutCx<'tcx, T> {
1892 fn tcx(&self) -> TyCtxt<'tcx> {
2c00a5a8 1893 self.tcx.tcx()
ff7c6d11
XL
1894 }
1895}
1896
ba9703b0 1897pub type TyAndLayout<'tcx> = ::rustc_target::abi::TyAndLayout<'tcx, Ty<'tcx>>;
cc61c64b 1898
dc9dc135 1899impl<'tcx> LayoutOf for LayoutCx<'tcx, TyCtxt<'tcx>> {
83c7162d 1900 type Ty = Ty<'tcx>;
ba9703b0 1901 type TyAndLayout = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
ff7c6d11
XL
1902
1903 /// Computes the layout of a type. Note that this implicitly
1904 /// executes in "reveal all" mode.
ba9703b0 1905 fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
0531ce1d
XL
1906 let param_env = self.param_env.with_reveal_all();
1907 let ty = self.tcx.normalize_erasing_regions(param_env, ty);
ba9703b0
XL
1908 let layout = self.tcx.layout_raw(param_env.and(ty))?;
1909 let layout = TyAndLayout { ty, layout };
cc61c64b 1910
0731742a 1911 // N.B., this recording is normally disabled; when enabled, it
ff7c6d11
XL
1912 // can however trigger recursive invocations of `layout_of`.
1913 // Therefore, we execute it *after* the main query has
1914 // completed, to avoid problems around recursive structures
0531ce1d 1915 // and the like. (Admittedly, I wasn't able to reproduce a problem
ff7c6d11 1916 // here, but it seems like the right thing to do. -nmatsakis)
2c00a5a8 1917 self.record_layout_for_printing(layout);
ff7c6d11
XL
1918
1919 Ok(layout)
cc61c64b
XL
1920 }
1921}
1922
dc9dc135 1923impl LayoutOf for LayoutCx<'tcx, ty::query::TyCtxtAt<'tcx>> {
83c7162d 1924 type Ty = Ty<'tcx>;
ba9703b0 1925 type TyAndLayout = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
cc61c64b 1926
ff7c6d11
XL
1927 /// Computes the layout of a type. Note that this implicitly
1928 /// executes in "reveal all" mode.
ba9703b0 1929 fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
0531ce1d
XL
1930 let param_env = self.param_env.with_reveal_all();
1931 let ty = self.tcx.normalize_erasing_regions(param_env, ty);
ba9703b0
XL
1932 let layout = self.tcx.layout_raw(param_env.and(ty))?;
1933 let layout = TyAndLayout { ty, layout };
cc61c64b 1934
0731742a 1935 // N.B., this recording is normally disabled; when enabled, it
ff7c6d11
XL
1936 // can however trigger recursive invocations of `layout_of`.
1937 // Therefore, we execute it *after* the main query has
1938 // completed, to avoid problems around recursive structures
0531ce1d 1939 // and the like. (Admittedly, I wasn't able to reproduce a problem
ff7c6d11 1940 // here, but it seems like the right thing to do. -nmatsakis)
dfeec247 1941 let cx = LayoutCx { tcx: *self.tcx, param_env: self.param_env };
2c00a5a8 1942 cx.record_layout_for_printing(layout);
cc61c64b 1943
ff7c6d11
XL
1944 Ok(layout)
1945 }
1946}
cc61c64b 1947
2c00a5a8 1948// Helper (inherent) `layout_of` methods to avoid pushing `LayoutCx` to users.
dc9dc135 1949impl TyCtxt<'tcx> {
2c00a5a8
XL
1950 /// Computes the layout of a type. Note that this implicitly
1951 /// executes in "reveal all" mode.
1952 #[inline]
dfeec247
XL
1953 pub fn layout_of(
1954 self,
1955 param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
ba9703b0 1956 ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
dfeec247 1957 let cx = LayoutCx { tcx: self, param_env: param_env_and_ty.param_env };
2c00a5a8
XL
1958 cx.layout_of(param_env_and_ty.value)
1959 }
1960}
1961
dc9dc135 1962impl ty::query::TyCtxtAt<'tcx> {
2c00a5a8
XL
1963 /// Computes the layout of a type. Note that this implicitly
1964 /// executes in "reveal all" mode.
1965 #[inline]
dfeec247
XL
1966 pub fn layout_of(
1967 self,
1968 param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
ba9703b0 1969 ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
dfeec247 1970 let cx = LayoutCx { tcx: self.at(self.span), param_env: param_env_and_ty.param_env };
2c00a5a8
XL
1971 cx.layout_of(param_env_and_ty.value)
1972 }
1973}
1974
ba9703b0 1975impl<'tcx, C> TyAndLayoutMethods<'tcx, C> for Ty<'tcx>
dc9dc135 1976where
ba9703b0 1977 C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout: MaybeResult<TyAndLayout<'tcx>>>
416331ca
XL
1978 + HasTyCtxt<'tcx>
1979 + HasParamEnv<'tcx>,
83c7162d 1980{
ba9703b0
XL
1981 fn for_variant(
1982 this: TyAndLayout<'tcx>,
1983 cx: &C,
1984 variant_index: VariantIdx,
1985 ) -> TyAndLayout<'tcx> {
1986 let layout = match this.variants {
1987 Variants::Single { index }
1988 // If all variants but one are uninhabited, the variant layout is the enum layout.
1989 if index == variant_index &&
1990 // Don't confuse variants of uninhabited enums with the enum itself.
1991 // For more details see https://github.com/rust-lang/rust/issues/69763.
1992 this.fields != FieldsShape::Primitive =>
1993 {
1994 this.layout
1995 }
ff7c6d11
XL
1996
1997 Variants::Single { index } => {
1998 // Deny calling for_variant more than once for non-Single enums.
ba9703b0
XL
1999 if let Ok(original_layout) = cx.layout_of(this.ty).to_result() {
2000 assert_eq!(original_layout.variants, Variants::Single { index });
48663c56 2001 }
ff7c6d11 2002
e74abb32 2003 let fields = match this.ty.kind {
f035d41b
XL
2004 ty::Adt(def, _) if def.variants.is_empty() =>
2005 bug!("for_variant called on zero-variant enum"),
b7449926 2006 ty::Adt(def, _) => def.variants[variant_index].fields.len(),
dfeec247 2007 _ => bug!(),
ff7c6d11 2008 };
83c7162d 2009 let tcx = cx.tcx();
ba9703b0 2010 tcx.intern_layout(Layout {
83c7162d 2011 variants: Variants::Single { index: variant_index },
ba9703b0
XL
2012 fields: match NonZeroUsize::new(fields) {
2013 Some(fields) => FieldsShape::Union(fields),
2014 None => FieldsShape::Arbitrary { offsets: vec![], memory_index: vec![] },
2015 },
83c7162d 2016 abi: Abi::Uninhabited,
416331ca 2017 largest_niche: None,
83c7162d 2018 align: tcx.data_layout.i8_align,
dfeec247 2019 size: Size::ZERO,
83c7162d 2020 })
ff7c6d11 2021 }
cc61c64b 2022
dfeec247 2023 Variants::Multiple { ref variants, .. } => &variants[variant_index],
ff7c6d11
XL
2024 };
2025
ba9703b0 2026 assert_eq!(layout.variants, Variants::Single { index: variant_index });
cc61c64b 2027
ba9703b0 2028 TyAndLayout { ty: this.ty, layout }
cc61c64b
XL
2029 }
2030
ba9703b0 2031 fn field(this: TyAndLayout<'tcx>, cx: &C, i: usize) -> C::TyAndLayout {
cc61c64b 2032 let tcx = cx.tcx();
f035d41b
XL
2033 let tag_layout = |tag: &Scalar| -> C::TyAndLayout {
2034 let layout = Layout::scalar(cx, tag.clone());
ba9703b0
XL
2035 MaybeResult::from(Ok(TyAndLayout {
2036 layout: tcx.intern_layout(layout),
f035d41b 2037 ty: tag.value.to_ty(tcx),
48663c56
XL
2038 }))
2039 };
2040
e74abb32 2041 cx.layout_of(match this.ty.kind {
dfeec247
XL
2042 ty::Bool
2043 | ty::Char
2044 | ty::Int(_)
2045 | ty::Uint(_)
2046 | ty::Float(_)
2047 | ty::FnPtr(_)
2048 | ty::Never
2049 | ty::FnDef(..)
2050 | ty::GeneratorWitness(..)
2051 | ty::Foreign(..)
ba9703b0 2052 | ty::Dynamic(..) => bug!("TyAndLayout::field_type({:?}): not applicable", this),
cc61c64b
XL
2053
2054 // Potentially-fat pointers.
dfeec247 2055 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
8faf50e0 2056 assert!(i < this.fields.count());
ff7c6d11 2057
60c5eb7d
XL
2058 // Reuse the fat `*T` type as its own thin pointer data field.
2059 // This provides information about, e.g., DST struct pointees
ff7c6d11 2060 // (which may have no non-DST form), and will work as long
ba9703b0 2061 // as the `Abi` or `FieldsShape` is checked by users.
ff7c6d11 2062 if i == 0 {
b7449926 2063 let nil = tcx.mk_unit();
83c7162d 2064 let ptr_ty = if this.ty.is_unsafe_ptr() {
ff7c6d11
XL
2065 tcx.mk_mut_ptr(nil)
2066 } else {
48663c56 2067 tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
ff7c6d11 2068 };
dfeec247
XL
2069 return MaybeResult::from(cx.layout_of(ptr_ty).to_result().map(
2070 |mut ptr_layout| {
2071 ptr_layout.ty = this.ty;
2072 ptr_layout
2073 },
2074 ));
ff7c6d11
XL
2075 }
2076
e74abb32 2077 match tcx.struct_tail_erasing_lifetimes(pointee, cx.param_env()).kind {
dfeec247 2078 ty::Slice(_) | ty::Str => tcx.types.usize,
b7449926 2079 ty::Dynamic(_, _) => {
dfeec247 2080 tcx.mk_imm_ref(tcx.lifetimes.re_static, tcx.mk_array(tcx.types.usize, 3))
9fa01778 2081 /* FIXME: use actual fn pointers
b7449926
XL
2082 Warning: naively computing the number of entries in the
2083 vtable by counting the methods on the trait + methods on
2084 all parent traits does not work, because some methods can
2085 be not object safe and thus excluded from the vtable.
2086 Increase this counter if you tried to implement this but
2087 failed to do it without duplicating a lot of code from
2088 other places in the compiler: 2
2089 tcx.mk_tup(&[
2090 tcx.mk_array(tcx.types.usize, 3),
2091 tcx.mk_array(Option<fn()>),
2092 ])
2093 */
ff7c6d11 2094 }
ba9703b0 2095 _ => bug!("TyAndLayout::field_type({:?}): not applicable", this),
ff7c6d11 2096 }
cc61c64b
XL
2097 }
2098
2099 // Arrays and slices.
dfeec247 2100 ty::Array(element, _) | ty::Slice(element) => element,
b7449926 2101 ty::Str => tcx.types.u8,
cc61c64b 2102
ea8adc8c 2103 // Tuples, generators and closures.
ba9703b0 2104 ty::Closure(_, ref substs) => substs.as_closure().upvar_tys().nth(i).unwrap(),
cc61c64b 2105
dfeec247
XL
2106 ty::Generator(def_id, ref substs, _) => match this.variants {
2107 Variants::Single { index } => substs
2108 .as_generator()
2109 .state_tys(def_id, tcx)
2110 .nth(index.as_usize())
2111 .unwrap()
2112 .nth(i)
2113 .unwrap(),
f035d41b
XL
2114 Variants::Multiple { ref tag, tag_field, .. } => {
2115 if i == tag_field {
2116 return tag_layout(tag);
48663c56 2117 }
ba9703b0 2118 substs.as_generator().prefix_tys().nth(i).unwrap()
48663c56 2119 }
dfeec247 2120 },
ea8adc8c 2121
48663c56 2122 ty::Tuple(tys) => tys[i].expect_ty(),
cc61c64b
XL
2123
2124 // SIMD vector types.
dfeec247 2125 ty::Adt(def, ..) if def.repr.simd() => this.ty.simd_type(tcx),
cc61c64b
XL
2126
2127 // ADTs.
b7449926 2128 ty::Adt(def, substs) => {
83c7162d 2129 match this.variants {
dfeec247 2130 Variants::Single { index } => def.variants[index].fields[i].ty(tcx, substs),
ff7c6d11
XL
2131
2132 // Discriminant field for enums (where applicable).
f035d41b 2133 Variants::Multiple { ref tag, .. } => {
ff7c6d11 2134 assert_eq!(i, 0);
f035d41b 2135 return tag_layout(tag);
ff7c6d11
XL
2136 }
2137 }
cc61c64b
XL
2138 }
2139
dfeec247 2140 ty::Projection(_)
dfeec247
XL
2141 | ty::Bound(..)
2142 | ty::Placeholder(..)
2143 | ty::Opaque(..)
2144 | ty::Param(_)
2145 | ty::Infer(_)
f035d41b 2146 | ty::Error(_) => bug!("TyAndLayout::field_type: unexpected type `{}`", this.ty),
ff7c6d11
XL
2147 })
2148 }
48663c56 2149
ba9703b0 2150 fn pointee_info_at(this: TyAndLayout<'tcx>, cx: &C, offset: Size) -> Option<PointeeInfo> {
e74abb32 2151 match this.ty.kind {
48663c56 2152 ty::RawPtr(mt) if offset.bytes() == 0 => {
dfeec247
XL
2153 cx.layout_of(mt.ty).to_result().ok().map(|layout| PointeeInfo {
2154 size: layout.size,
2155 align: layout.align.abi,
2156 safe: None,
2157 })
48663c56
XL
2158 }
2159
2160 ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
2161 let tcx = cx.tcx();
f035d41b 2162 let is_freeze = ty.is_freeze(tcx.at(DUMMY_SP), cx.param_env());
48663c56 2163 let kind = match mt {
dfeec247
XL
2164 hir::Mutability::Not => {
2165 if is_freeze {
2166 PointerKind::Frozen
2167 } else {
2168 PointerKind::Shared
2169 }
2170 }
2171 hir::Mutability::Mut => {
48663c56
XL
2172 // Previously we would only emit noalias annotations for LLVM >= 6 or in
2173 // panic=abort mode. That was deemed right, as prior versions had many bugs
2174 // in conjunction with unwinding, but later versions didn’t seem to have
2175 // said issues. See issue #31681.
2176 //
2177 // Alas, later on we encountered a case where noalias would generate wrong
2178 // code altogether even with recent versions of LLVM in *safe* code with no
2179 // unwinding involved. See #54462.
2180 //
2181 // For now, do not enable mutable_noalias by default at all, while the
2182 // issue is being figured out.
ba9703b0 2183 if tcx.sess.opts.debugging_opts.mutable_noalias {
48663c56
XL
2184 PointerKind::UniqueBorrowed
2185 } else {
2186 PointerKind::Shared
2187 }
2188 }
2189 };
2190
dfeec247
XL
2191 cx.layout_of(ty).to_result().ok().map(|layout| PointeeInfo {
2192 size: layout.size,
2193 align: layout.align.abi,
2194 safe: Some(kind),
2195 })
48663c56
XL
2196 }
2197
2198 _ => {
2199 let mut data_variant = match this.variants {
2200 // Within the discriminant field, only the niche itself is
2201 // always initialized, so we only check for a pointer at its
2202 // offset.
2203 //
2204 // If the niche is a pointer, it's either valid (according
2205 // to its type), or null (which the niche field's scalar
2206 // validity range encodes). This allows using
2207 // `dereferenceable_or_null` for e.g., `Option<&T>`, and
2208 // this will continue to work as long as we don't start
2209 // using more niches than just null (e.g., the first page of
2210 // the address space, or unaligned pointers).
2211 Variants::Multiple {
f035d41b
XL
2212 tag_encoding: TagEncoding::Niche { dataful_variant, .. },
2213 tag_field,
48663c56 2214 ..
f035d41b 2215 } if this.fields.offset(tag_field) == offset => {
dfeec247
XL
2216 Some(this.for_variant(cx, dataful_variant))
2217 }
48663c56
XL
2218 _ => Some(this),
2219 };
2220
2221 if let Some(variant) = data_variant {
2222 // We're not interested in any unions.
ba9703b0 2223 if let FieldsShape::Union(_) = variant.fields {
48663c56
XL
2224 data_variant = None;
2225 }
2226 }
2227
2228 let mut result = None;
2229
2230 if let Some(variant) = data_variant {
2231 let ptr_end = offset + Pointer.size(cx);
2232 for i in 0..variant.fields.count() {
2233 let field_start = variant.fields.offset(i);
2234 if field_start <= offset {
2235 let field = variant.field(cx, i);
dfeec247
XL
2236 result = field.to_result().ok().and_then(|field| {
2237 if ptr_end <= field_start + field.size {
2238 // We found the right field, look inside it.
2239 field.pointee_info_at(cx, offset - field_start)
2240 } else {
2241 None
2242 }
2243 });
48663c56
XL
2244 if result.is_some() {
2245 break;
2246 }
2247 }
2248 }
2249 }
2250
2251 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
2252 if let Some(ref mut pointee) = result {
e74abb32 2253 if let ty::Adt(def, _) = this.ty.kind {
48663c56
XL
2254 if def.is_box() && offset.bytes() == 0 {
2255 pointee.safe = Some(PointerKind::UniqueOwned);
2256 }
2257 }
2258 }
2259
2260 result
2261 }
2262 }
2263 }
83c7162d 2264}
ff7c6d11 2265
60c5eb7d 2266impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for LayoutError<'tcx> {
e74abb32 2267 fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
60c5eb7d 2268 use crate::ty::layout::LayoutError::*;
ff7c6d11
XL
2269 mem::discriminant(self).hash_stable(hcx, hasher);
2270
2271 match *self {
dfeec247 2272 Unknown(t) | SizeOverflow(t) => t.hash_stable(hcx, hasher),
ff7c6d11
XL
2273 }
2274 }
2275}
2276
60c5eb7d
XL
2277impl<'tcx> ty::Instance<'tcx> {
2278 // NOTE(eddyb) this is private to avoid using it from outside of
2279 // `FnAbi::of_instance` - any other uses are either too high-level
2280 // for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
2281 // or should go through `FnAbi` instead, to avoid losing any
2282 // adjustments `FnAbi::of_instance` might be performing.
2283 fn fn_sig_for_fn_abi(&self, tcx: TyCtxt<'tcx>) -> ty::PolyFnSig<'tcx> {
dfeec247 2284 let ty = self.monomorphic_ty(tcx);
60c5eb7d
XL
2285 match ty.kind {
2286 ty::FnDef(..) |
2287 // Shims currently have type FnPtr. Not sure this should remain.
2288 ty::FnPtr(_) => {
2289 let mut sig = ty.fn_sig(tcx);
2290 if let ty::InstanceDef::VtableShim(..) = self.def {
2291 // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
2292 sig = sig.map_bound(|mut sig| {
2293 let mut inputs_and_output = sig.inputs_and_output.to_vec();
2294 inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
2295 sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
2296 sig
2297 });
2298 }
2299 sig
2300 }
2301 ty::Closure(def_id, substs) => {
ba9703b0 2302 let sig = substs.as_closure().sig();
60c5eb7d
XL
2303
2304 let env_ty = tcx.closure_env_ty(def_id, substs).unwrap();
2305 sig.map_bound(|sig| tcx.mk_fn_sig(
f035d41b 2306 iter::once(env_ty.skip_binder()).chain(sig.inputs().iter().cloned()),
60c5eb7d
XL
2307 sig.output(),
2308 sig.c_variadic,
2309 sig.unsafety,
2310 sig.abi
2311 ))
2312 }
ba9703b0
XL
2313 ty::Generator(_, substs, _) => {
2314 let sig = substs.as_generator().poly_sig();
60c5eb7d
XL
2315
2316 let env_region = ty::ReLateBound(ty::INNERMOST, ty::BrEnv);
2317 let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
2318
f9f354fc 2319 let pin_did = tcx.require_lang_item(PinTypeLangItem, None);
60c5eb7d
XL
2320 let pin_adt_ref = tcx.adt_def(pin_did);
2321 let pin_substs = tcx.intern_substs(&[env_ty.into()]);
2322 let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
2323
2324 sig.map_bound(|sig| {
f9f354fc 2325 let state_did = tcx.require_lang_item(GeneratorStateLangItem, None);
60c5eb7d
XL
2326 let state_adt_ref = tcx.adt_def(state_did);
2327 let state_substs = tcx.intern_substs(&[
2328 sig.yield_ty.into(),
2329 sig.return_ty.into(),
2330 ]);
2331 let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
2332
74b04a01
XL
2333 tcx.mk_fn_sig(
2334 [env_ty, sig.resume_ty].iter(),
2335 &ret_ty,
60c5eb7d
XL
2336 false,
2337 hir::Unsafety::Normal,
2338 rustc_target::spec::abi::Abi::Rust
2339 )
2340 })
ea8adc8c 2341 }
60c5eb7d 2342 _ => bug!("unexpected type {:?} in Instance::fn_sig", ty)
ea8adc8c
XL
2343 }
2344 }
2345}
2346
60c5eb7d 2347pub trait FnAbiExt<'tcx, C>
48663c56 2348where
ba9703b0 2349 C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout = TyAndLayout<'tcx>>
48663c56
XL
2350 + HasDataLayout
2351 + HasTargetSpec
2352 + HasTyCtxt<'tcx>
2353 + HasParamEnv<'tcx>,
2354{
60c5eb7d
XL
2355 /// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
2356 ///
2357 /// NB: this doesn't handle virtual calls - those should use `FnAbi::of_instance`
2358 /// instead, where the instance is a `InstanceDef::Virtual`.
2359 fn of_fn_ptr(cx: &C, sig: ty::PolyFnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2360
2361 /// Compute a `FnAbi` suitable for declaring/defining an `fn` instance, and for
2362 /// direct calls to an `fn`.
2363 ///
2364 /// NB: that includes virtual calls, which are represented by "direct calls"
2365 /// to a `InstanceDef::Virtual` instance (of `<dyn Trait as Trait>::fn`).
2366 fn of_instance(cx: &C, instance: ty::Instance<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2367
48663c56
XL
2368 fn new_internal(
2369 cx: &C,
60c5eb7d 2370 sig: ty::PolyFnSig<'tcx>,
48663c56 2371 extra_args: &[Ty<'tcx>],
60c5eb7d 2372 caller_location: Option<Ty<'tcx>>,
ba9703b0 2373 codegen_fn_attr_flags: CodegenFnAttrFlags,
60c5eb7d 2374 mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgAbi<'tcx, Ty<'tcx>>,
48663c56
XL
2375 ) -> Self;
2376 fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi);
2377}
2378
ba9703b0
XL
2379fn fn_can_unwind(
2380 panic_strategy: PanicStrategy,
2381 codegen_fn_attr_flags: CodegenFnAttrFlags,
2382 call_conv: Conv,
2383) -> bool {
2384 if panic_strategy != PanicStrategy::Unwind {
2385 // In panic=abort mode we assume nothing can unwind anywhere, so
2386 // optimize based on this!
2387 false
2388 } else if codegen_fn_attr_flags.contains(CodegenFnAttrFlags::UNWIND) {
2389 // If a specific #[unwind] attribute is present, use that.
2390 true
2391 } else if codegen_fn_attr_flags.contains(CodegenFnAttrFlags::RUSTC_ALLOCATOR_NOUNWIND) {
2392 // Special attribute for allocator functions, which can't unwind.
2393 false
2394 } else {
2395 if call_conv == Conv::Rust {
2396 // Any Rust method (or `extern "Rust" fn` or `extern
2397 // "rust-call" fn`) is explicitly allowed to unwind
2398 // (unless it has no-unwind attribute, handled above).
2399 true
2400 } else {
2401 // Anything else is either:
2402 //
2403 // 1. A foreign item using a non-Rust ABI (like `extern "C" { fn foo(); }`), or
2404 //
2405 // 2. A Rust item using a non-Rust ABI (like `extern "C" fn foo() { ... }`).
2406 //
2407 // Foreign items (case 1) are assumed to not unwind; it is
2408 // UB otherwise. (At least for now; see also
2409 // rust-lang/rust#63909 and Rust RFC 2753.)
2410 //
2411 // Items defined in Rust with non-Rust ABIs (case 2) are also
2412 // not supposed to unwind. Whether this should be enforced
2413 // (versus stating it is UB) and *how* it would be enforced
2414 // is currently under discussion; see rust-lang/rust#58794.
2415 //
2416 // In either case, we mark item as explicitly nounwind.
2417 false
2418 }
2419 }
2420}
2421
60c5eb7d 2422impl<'tcx, C> FnAbiExt<'tcx, C> for call::FnAbi<'tcx, Ty<'tcx>>
48663c56 2423where
ba9703b0 2424 C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout = TyAndLayout<'tcx>>
48663c56
XL
2425 + HasDataLayout
2426 + HasTargetSpec
2427 + HasTyCtxt<'tcx>
2428 + HasParamEnv<'tcx>,
2429{
60c5eb7d 2430 fn of_fn_ptr(cx: &C, sig: ty::PolyFnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
ba9703b0
XL
2431 // Assume that fn pointers may always unwind
2432 let codegen_fn_attr_flags = CodegenFnAttrFlags::UNWIND;
2433
2434 call::FnAbi::new_internal(cx, sig, extra_args, None, codegen_fn_attr_flags, |ty, _| {
2435 ArgAbi::new(cx.layout_of(ty))
2436 })
48663c56
XL
2437 }
2438
60c5eb7d
XL
2439 fn of_instance(cx: &C, instance: ty::Instance<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2440 let sig = instance.fn_sig_for_fn_abi(cx.tcx());
48663c56 2441
60c5eb7d
XL
2442 let caller_location = if instance.def.requires_caller_location(cx.tcx()) {
2443 Some(cx.tcx().caller_location_ty())
2444 } else {
2445 None
2446 };
2447
ba9703b0
XL
2448 let attrs = cx.tcx().codegen_fn_attrs(instance.def_id()).flags;
2449
2450 call::FnAbi::new_internal(cx, sig, extra_args, caller_location, attrs, |ty, arg_idx| {
48663c56
XL
2451 let mut layout = cx.layout_of(ty);
2452 // Don't pass the vtable, it's not an argument of the virtual fn.
2453 // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
2454 // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
60c5eb7d 2455 if let (ty::InstanceDef::Virtual(..), Some(0)) = (&instance.def, arg_idx) {
48663c56
XL
2456 let fat_pointer_ty = if layout.is_unsized() {
2457 // unsized `self` is passed as a pointer to `self`
2458 // FIXME (mikeyhew) change this to use &own if it is ever added to the language
2459 cx.tcx().mk_mut_ptr(layout.ty)
2460 } else {
2461 match layout.abi {
2462 Abi::ScalarPair(..) => (),
2463 _ => bug!("receiver type has unsupported layout: {:?}", layout),
2464 }
2465
2466 // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
2467 // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
2468 // elsewhere in the compiler as a method on a `dyn Trait`.
2469 // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
2470 // get a built-in pointer type
2471 let mut fat_pointer_layout = layout;
2472 'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
2473 && !fat_pointer_layout.ty.is_region_ptr()
2474 {
60c5eb7d 2475 for i in 0..fat_pointer_layout.fields.count() {
48663c56
XL
2476 let field_layout = fat_pointer_layout.field(cx, i);
2477
2478 if !field_layout.is_zst() {
2479 fat_pointer_layout = field_layout;
2480 continue 'descend_newtypes;
2481 }
2482 }
2483
dfeec247 2484 bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
48663c56
XL
2485 }
2486
2487 fat_pointer_layout.ty
2488 };
2489
2490 // we now have a type like `*mut RcBox<dyn Trait>`
2491 // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
2492 // this is understood as a special case elsewhere in the compiler
2493 let unit_pointer_ty = cx.tcx().mk_mut_ptr(cx.tcx().mk_unit());
2494 layout = cx.layout_of(unit_pointer_ty);
2495 layout.ty = fat_pointer_ty;
2496 }
60c5eb7d 2497 ArgAbi::new(layout)
48663c56
XL
2498 })
2499 }
2500
2501 fn new_internal(
2502 cx: &C,
60c5eb7d 2503 sig: ty::PolyFnSig<'tcx>,
48663c56 2504 extra_args: &[Ty<'tcx>],
60c5eb7d 2505 caller_location: Option<Ty<'tcx>>,
ba9703b0 2506 codegen_fn_attr_flags: CodegenFnAttrFlags,
60c5eb7d 2507 mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgAbi<'tcx, Ty<'tcx>>,
48663c56 2508 ) -> Self {
60c5eb7d
XL
2509 debug!("FnAbi::new_internal({:?}, {:?})", sig, extra_args);
2510
dfeec247 2511 let sig = cx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig);
48663c56
XL
2512
2513 use rustc_target::spec::abi::Abi::*;
2514 let conv = match cx.tcx().sess.target.target.adjust_abi(sig.abi) {
60c5eb7d 2515 RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
48663c56
XL
2516
2517 // It's the ABI's job to select this, not ours.
2518 System => bug!("system abi should be selected elsewhere"),
e74abb32 2519 EfiApi => bug!("eficall abi should be selected elsewhere"),
48663c56
XL
2520
2521 Stdcall => Conv::X86Stdcall,
2522 Fastcall => Conv::X86Fastcall,
2523 Vectorcall => Conv::X86VectorCall,
2524 Thiscall => Conv::X86ThisCall,
2525 C => Conv::C,
2526 Unadjusted => Conv::C,
2527 Win64 => Conv::X86_64Win64,
2528 SysV64 => Conv::X86_64SysV,
2529 Aapcs => Conv::ArmAapcs,
2530 PtxKernel => Conv::PtxKernel,
2531 Msp430Interrupt => Conv::Msp430Intr,
2532 X86Interrupt => Conv::X86Intr,
2533 AmdGpuKernel => Conv::AmdGpuKernel,
f035d41b
XL
2534 AvrInterrupt => Conv::AvrInterrupt,
2535 AvrNonBlockingInterrupt => Conv::AvrNonBlockingInterrupt,
48663c56
XL
2536
2537 // These API constants ought to be more specific...
2538 Cdecl => Conv::C,
2539 };
2540
2541 let mut inputs = sig.inputs();
2542 let extra_args = if sig.abi == RustCall {
2543 assert!(!sig.c_variadic && extra_args.is_empty());
2544
dfeec247
XL
2545 if let Some(input) = sig.inputs().last() {
2546 if let ty::Tuple(tupled_arguments) = input.kind {
48663c56
XL
2547 inputs = &sig.inputs()[0..sig.inputs().len() - 1];
2548 tupled_arguments.iter().map(|k| k.expect_ty()).collect()
dfeec247 2549 } else {
48663c56
XL
2550 bug!(
2551 "argument to function with \"rust-call\" ABI \
dfeec247 2552 is not a tuple"
48663c56
XL
2553 );
2554 }
dfeec247
XL
2555 } else {
2556 bug!(
2557 "argument to function with \"rust-call\" ABI \
2558 is not a tuple"
2559 );
48663c56
XL
2560 }
2561 } else {
2562 assert!(sig.c_variadic || extra_args.is_empty());
2563 extra_args.to_vec()
2564 };
2565
2566 let target = &cx.tcx().sess.target.target;
74b04a01 2567 let target_env_gnu_like = matches!(&target.target_env[..], "gnu" | "musl");
48663c56
XL
2568 let win_x64_gnu =
2569 target.target_os == "windows" && target.arch == "x86_64" && target.target_env == "gnu";
74b04a01
XL
2570 let linux_s390x_gnu_like =
2571 target.target_os == "linux" && target.arch == "s390x" && target_env_gnu_like;
2572 let linux_sparc64_gnu_like =
2573 target.target_os == "linux" && target.arch == "sparc64" && target_env_gnu_like;
2574 let linux_powerpc_gnu_like =
2575 target.target_os == "linux" && target.arch == "powerpc" && target_env_gnu_like;
48663c56
XL
2576 let rust_abi = match sig.abi {
2577 RustIntrinsic | PlatformIntrinsic | Rust | RustCall => true,
2578 _ => false,
2579 };
2580
2581 // Handle safe Rust thin and fat pointers.
2582 let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
2583 scalar: &Scalar,
ba9703b0 2584 layout: TyAndLayout<'tcx>,
48663c56
XL
2585 offset: Size,
2586 is_return: bool| {
2587 // Booleans are always an i1 that needs to be zero-extended.
2588 if scalar.is_bool() {
2589 attrs.set(ArgAttribute::ZExt);
2590 return;
2591 }
2592
2593 // Only pointer types handled below.
2594 if scalar.value != Pointer {
2595 return;
2596 }
2597
2598 if scalar.valid_range.start() < scalar.valid_range.end() {
2599 if *scalar.valid_range.start() > 0 {
2600 attrs.set(ArgAttribute::NonNull);
2601 }
2602 }
2603
2604 if let Some(pointee) = layout.pointee_info_at(cx, offset) {
2605 if let Some(kind) = pointee.safe {
48663c56
XL
2606 attrs.pointee_align = Some(pointee.align);
2607
74b04a01 2608 // `Box` (`UniqueBorrowed`) are not necessarily dereferenceable
60c5eb7d 2609 // for the entire duration of the function as they can be deallocated
f9f354fc 2610 // at any time. Set their valid size to 0.
60c5eb7d
XL
2611 attrs.pointee_size = match kind {
2612 PointerKind::UniqueOwned => Size::ZERO,
dfeec247 2613 _ => pointee.size,
60c5eb7d
XL
2614 };
2615
48663c56
XL
2616 // `Box` pointer parameters never alias because ownership is transferred
2617 // `&mut` pointer parameters never alias other parameters,
2618 // or mutable global data
2619 //
2620 // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
2621 // and can be marked as both `readonly` and `noalias`, as
2622 // LLVM's definition of `noalias` is based solely on memory
2623 // dependencies rather than pointer equality
2624 let no_alias = match kind {
2625 PointerKind::Shared => false,
2626 PointerKind::UniqueOwned => true,
2627 PointerKind::Frozen | PointerKind::UniqueBorrowed => !is_return,
2628 };
2629 if no_alias {
2630 attrs.set(ArgAttribute::NoAlias);
2631 }
2632
2633 if kind == PointerKind::Frozen && !is_return {
2634 attrs.set(ArgAttribute::ReadOnly);
2635 }
2636 }
2637 }
2638 };
2639
48663c56
XL
2640 let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| {
2641 let is_return = arg_idx.is_none();
2642 let mut arg = mk_arg_type(ty, arg_idx);
2643 if arg.layout.is_zst() {
2644 // For some forsaken reason, x86_64-pc-windows-gnu
2645 // doesn't ignore zero-sized struct arguments.
74b04a01
XL
2646 // The same is true for {s390x,sparc64,powerpc}-unknown-linux-{gnu,musl}.
2647 if is_return
2648 || rust_abi
2649 || (!win_x64_gnu
2650 && !linux_s390x_gnu_like
2651 && !linux_sparc64_gnu_like
2652 && !linux_powerpc_gnu_like)
2653 {
e74abb32 2654 arg.mode = PassMode::Ignore;
48663c56
XL
2655 }
2656 }
2657
2658 // FIXME(eddyb) other ABIs don't have logic for scalar pairs.
2659 if !is_return && rust_abi {
2660 if let Abi::ScalarPair(ref a, ref b) = arg.layout.abi {
2661 let mut a_attrs = ArgAttributes::new();
2662 let mut b_attrs = ArgAttributes::new();
2663 adjust_for_rust_scalar(&mut a_attrs, a, arg.layout, Size::ZERO, false);
2664 adjust_for_rust_scalar(
2665 &mut b_attrs,
2666 b,
2667 arg.layout,
2668 a.value.size(cx).align_to(b.value.align(cx).abi),
2669 false,
2670 );
2671 arg.mode = PassMode::Pair(a_attrs, b_attrs);
2672 return arg;
2673 }
2674 }
2675
2676 if let Abi::Scalar(ref scalar) = arg.layout.abi {
2677 if let PassMode::Direct(ref mut attrs) = arg.mode {
2678 adjust_for_rust_scalar(attrs, scalar, arg.layout, Size::ZERO, is_return);
2679 }
2680 }
2681
2682 arg
2683 };
2684
60c5eb7d 2685 let mut fn_abi = FnAbi {
48663c56
XL
2686 ret: arg_of(sig.output(), None),
2687 args: inputs
2688 .iter()
2689 .cloned()
2690 .chain(extra_args)
60c5eb7d 2691 .chain(caller_location)
48663c56
XL
2692 .enumerate()
2693 .map(|(i, ty)| arg_of(ty, Some(i)))
2694 .collect(),
2695 c_variadic: sig.c_variadic,
74b04a01 2696 fixed_count: inputs.len(),
48663c56 2697 conv,
ba9703b0 2698 can_unwind: fn_can_unwind(cx.tcx().sess.panic_strategy(), codegen_fn_attr_flags, conv),
48663c56 2699 };
60c5eb7d
XL
2700 fn_abi.adjust_for_abi(cx, sig.abi);
2701 fn_abi
48663c56
XL
2702 }
2703
2704 fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi) {
2705 if abi == SpecAbi::Unadjusted {
2706 return;
2707 }
2708
2709 if abi == SpecAbi::Rust
2710 || abi == SpecAbi::RustCall
2711 || abi == SpecAbi::RustIntrinsic
2712 || abi == SpecAbi::PlatformIntrinsic
2713 {
60c5eb7d 2714 let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>| {
48663c56
XL
2715 if arg.is_ignore() {
2716 return;
2717 }
2718
2719 match arg.layout.abi {
2720 Abi::Aggregate { .. } => {}
2721
2722 // This is a fun case! The gist of what this is doing is
2723 // that we want callers and callees to always agree on the
2724 // ABI of how they pass SIMD arguments. If we were to *not*
2725 // make these arguments indirect then they'd be immediates
2726 // in LLVM, which means that they'd used whatever the
2727 // appropriate ABI is for the callee and the caller. That
2728 // means, for example, if the caller doesn't have AVX
2729 // enabled but the callee does, then passing an AVX argument
2730 // across this boundary would cause corrupt data to show up.
2731 //
2732 // This problem is fixed by unconditionally passing SIMD
2733 // arguments through memory between callers and callees
2734 // which should get them all to agree on ABI regardless of
2735 // target feature sets. Some more information about this
2736 // issue can be found in #44367.
2737 //
2738 // Note that the platform intrinsic ABI is exempt here as
2739 // that's how we connect up to LLVM and it's unstable
2740 // anyway, we control all calls to it in libstd.
2741 Abi::Vector { .. }
2742 if abi != SpecAbi::PlatformIntrinsic
2743 && cx.tcx().sess.target.target.options.simd_types_indirect =>
2744 {
2745 arg.make_indirect();
2746 return;
2747 }
2748
2749 _ => return,
2750 }
2751
2752 let size = arg.layout.size;
2753 if arg.layout.is_unsized() || size > Pointer.size(cx) {
2754 arg.make_indirect();
2755 } else {
2756 // We want to pass small aggregates as immediates, but using
2757 // a LLVM aggregate type for this leads to bad optimizations,
2758 // so we pick an appropriately sized integer type instead.
dfeec247 2759 arg.cast_to(Reg { kind: RegKind::Integer, size });
48663c56
XL
2760 }
2761 };
2762 fixup(&mut self.ret);
2763 for arg in &mut self.args {
2764 fixup(arg);
2765 }
2766 if let PassMode::Indirect(ref mut attrs, _) = self.ret.mode {
2767 attrs.set(ArgAttribute::StructRet);
2768 }
2769 return;
2770 }
2771
2772 if let Err(msg) = self.adjust_for_cabi(cx, abi) {
2773 cx.tcx().sess.fatal(&msg);
2774 }
2775 }
2776}