]> git.proxmox.com Git - rustc.git/blame - src/librustc/ty/layout.rs
New upstream version 1.41.1+dfsg1
[rustc.git] / src / librustc / ty / layout.rs
CommitLineData
9fa01778 1use crate::session::{self, DataTypeKind};
e74abb32 2use crate::ty::{self, Ty, TyCtxt, TypeFoldable, ReprOptions, subst::SubstsRef};
54a0048b 3
0731742a 4use syntax::ast::{self, Ident, IntTy, UintTy};
54a0048b 5use syntax::attr;
3157f602 6use syntax_pos::DUMMY_SP;
54a0048b
SL
7
8use std::cmp;
9use std::fmt;
ff7c6d11 10use std::i128;
94b46f34 11use std::iter;
ea8adc8c 12use std::mem;
b7449926 13use std::ops::Bound;
54a0048b 14
48663c56 15use crate::hir;
9fa01778 16use crate::ich::StableHashingContext;
dc9dc135 17use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
dc9dc135 18use crate::ty::subst::Subst;
e74abb32
XL
19use rustc_index::bit_set::BitSet;
20use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
21use rustc_index::vec::{IndexVec, Idx};
ea8adc8c 22
83c7162d 23pub use rustc_target::abi::*;
48663c56
XL
24use rustc_target::spec::{HasTargetSpec, abi::Abi as SpecAbi};
25use rustc_target::abi::call::{
60c5eb7d 26 ArgAttribute, ArgAttributes, ArgAbi, Conv, FnAbi, PassMode, Reg, RegKind
48663c56
XL
27};
28
83c7162d 29pub trait IntegerExt {
dc9dc135 30 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
a1dfa0c6 31 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
dc9dc135
XL
32 fn repr_discr<'tcx>(
33 tcx: TyCtxt<'tcx>,
34 ty: Ty<'tcx>,
35 repr: &ReprOptions,
36 min: i128,
37 max: i128,
38 ) -> (Integer, bool);
54a0048b
SL
39}
40
83c7162d 41impl IntegerExt for Integer {
dc9dc135 42 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx> {
9e0c209e 43 match (*self, signed) {
9e0c209e
SL
44 (I8, false) => tcx.types.u8,
45 (I16, false) => tcx.types.u16,
46 (I32, false) => tcx.types.u32,
47 (I64, false) => tcx.types.u64,
32a655c1 48 (I128, false) => tcx.types.u128,
9e0c209e
SL
49 (I8, true) => tcx.types.i8,
50 (I16, true) => tcx.types.i16,
51 (I32, true) => tcx.types.i32,
52 (I64, true) => tcx.types.i64,
32a655c1 53 (I128, true) => tcx.types.i128,
9e0c209e
SL
54 }
55 }
56
9fa01778 57 /// Gets the Integer type from an attr::IntType.
a1dfa0c6 58 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
cc61c64b
XL
59 let dl = cx.data_layout();
60
54a0048b
SL
61 match ity {
62 attr::SignedInt(IntTy::I8) | attr::UnsignedInt(UintTy::U8) => I8,
63 attr::SignedInt(IntTy::I16) | attr::UnsignedInt(UintTy::U16) => I16,
64 attr::SignedInt(IntTy::I32) | attr::UnsignedInt(UintTy::U32) => I32,
65 attr::SignedInt(IntTy::I64) | attr::UnsignedInt(UintTy::U64) => I64,
32a655c1 66 attr::SignedInt(IntTy::I128) | attr::UnsignedInt(UintTy::U128) => I128,
2c00a5a8 67 attr::SignedInt(IntTy::Isize) | attr::UnsignedInt(UintTy::Usize) => {
54a0048b
SL
68 dl.ptr_sized_integer()
69 }
70 }
71 }
72
9fa01778 73 /// Finds the appropriate Integer type and signedness for the given
54a0048b 74 /// signed discriminant range and #[repr] attribute.
ff7c6d11 75 /// N.B.: u128 values above i128::MAX will be treated as signed, but
54a0048b 76 /// that shouldn't affect anything, other than maybe debuginfo.
dc9dc135
XL
77 fn repr_discr<'tcx>(
78 tcx: TyCtxt<'tcx>,
79 ty: Ty<'tcx>,
80 repr: &ReprOptions,
81 min: i128,
82 max: i128,
83 ) -> (Integer, bool) {
54a0048b
SL
84 // Theoretically, negative values could be larger in unsigned representation
85 // than the unsigned representation of the signed minimum. However, if there
ff7c6d11
XL
86 // are any negative values, the only valid unsigned representation is u128
87 // which can fit all i128 values, so the result remains unaffected.
88 let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
54a0048b
SL
89 let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
90
476ff2be
SL
91 let mut min_from_extern = None;
92 let min_default = I8;
93
8bb4bdeb 94 if let Some(ity) = repr.int {
a1dfa0c6 95 let discr = Integer::from_attr(&tcx, ity);
8bb4bdeb
XL
96 let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
97 if discr < fit {
98 bug!("Integer::repr_discr: `#[repr]` hint too small for \
0bf4aa26 99 discriminant range of enum `{}", ty)
8bb4bdeb
XL
100 }
101 return (discr, ity.is_signed());
102 }
103
cc61c64b 104 if repr.c() {
8bb4bdeb
XL
105 match &tcx.sess.target.target.arch[..] {
106 // WARNING: the ARM EABI has two variants; the one corresponding
107 // to `at_least == I32` appears to be used on Linux and NetBSD,
108 // but some systems may use the variant corresponding to no
0bf4aa26 109 // lower bound. However, we don't run on those yet...?
8bb4bdeb
XL
110 "arm" => min_from_extern = Some(I32),
111 _ => min_from_extern = Some(I32),
54a0048b 112 }
476ff2be
SL
113 }
114
115 let at_least = min_from_extern.unwrap_or(min_default);
54a0048b
SL
116
117 // If there are no negative values, we can use the unsigned fit.
118 if min >= 0 {
119 (cmp::max(unsigned_fit, at_least), false)
120 } else {
121 (cmp::max(signed_fit, at_least), true)
122 }
123 }
124}
125
83c7162d 126pub trait PrimitiveExt {
dc9dc135 127 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
e1599b0c 128 fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
54a0048b
SL
129}
130
83c7162d 131impl PrimitiveExt for Primitive {
dc9dc135 132 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
ff7c6d11
XL
133 match *self {
134 Int(i, signed) => i.to_ty(tcx, signed),
60c5eb7d
XL
135 F32 => tcx.types.f32,
136 F64 => tcx.types.f64,
b7449926 137 Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
ff7c6d11
XL
138 }
139 }
e1599b0c
XL
140
141 /// Return an *integer* type matching this primitive.
142 /// Useful in particular when dealing with enum discriminants.
143 fn to_int_ty(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
144 match *self {
145 Int(i, signed) => i.to_ty(tcx, signed),
146 Pointer => tcx.types.usize,
60c5eb7d 147 F32 | F64 => bug!("floats do not have an int type"),
e1599b0c
XL
148 }
149 }
54a0048b
SL
150}
151
ff7c6d11
XL
152/// The first half of a fat pointer.
153///
154/// - For a trait object, this is the address of the box.
155/// - For a slice, this is the base address.
156pub const FAT_PTR_ADDR: usize = 0;
476ff2be 157
ff7c6d11
XL
158/// The second half of a fat pointer.
159///
160/// - For a trait object, this is the address of the vtable.
161/// - For a slice, this is the length.
162pub const FAT_PTR_EXTRA: usize = 1;
476ff2be 163
83c7162d 164#[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable)]
54a0048b
SL
165pub enum LayoutError<'tcx> {
166 Unknown(Ty<'tcx>),
167 SizeOverflow(Ty<'tcx>)
168}
169
170impl<'tcx> fmt::Display for LayoutError<'tcx> {
0bf4aa26 171 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
54a0048b
SL
172 match *self {
173 LayoutError::Unknown(ty) => {
174 write!(f, "the type `{:?}` has an unknown layout", ty)
175 }
176 LayoutError::SizeOverflow(ty) => {
177 write!(f, "the type `{:?}` is too big for the current architecture", ty)
178 }
179 }
180 }
181}
182
dc9dc135
XL
183fn layout_raw<'tcx>(
184 tcx: TyCtxt<'tcx>,
185 query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
186) -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> {
83c7162d
XL
187 ty::tls::with_related_context(tcx, move |icx| {
188 let rec_limit = *tcx.sess.recursion_limit.get();
189 let (param_env, ty) = query.into_parts();
5bcae85e 190
83c7162d
XL
191 if icx.layout_depth > rec_limit {
192 tcx.sess.fatal(
193 &format!("overflow representing the type `{}`", ty));
194 }
54a0048b 195
83c7162d
XL
196 // Update the ImplicitCtxt to increase the layout_depth
197 let icx = ty::tls::ImplicitCtxt {
198 layout_depth: icx.layout_depth + 1,
199 ..icx.clone()
200 };
ff7c6d11 201
83c7162d
XL
202 ty::tls::enter_context(&icx, |_| {
203 let cx = LayoutCx { tcx, param_env };
0731742a
XL
204 let layout = cx.layout_raw_uncached(ty);
205 // Type-level uninhabitedness should always imply ABI uninhabitedness.
206 if let Ok(layout) = layout {
207 if ty.conservative_is_privately_uninhabited(tcx) {
208 assert!(layout.abi.is_uninhabited());
209 }
210 }
211 layout
83c7162d
XL
212 })
213 })
ff7c6d11
XL
214}
215
0bf4aa26 216pub fn provide(providers: &mut ty::query::Providers<'_>) {
94b46f34 217 *providers = ty::query::Providers {
ff7c6d11
XL
218 layout_raw,
219 ..*providers
220 };
221}
222
2c00a5a8
XL
223pub struct LayoutCx<'tcx, C> {
224 pub tcx: C,
0731742a 225 pub param_env: ty::ParamEnv<'tcx>,
2c00a5a8
XL
226}
227
dc9dc135
XL
228#[derive(Copy, Clone, Debug)]
229enum StructKind {
230 /// A tuple, closure, or univariant which cannot be coerced to unsized.
231 AlwaysSized,
232 /// A univariant, the last field of which may be coerced to unsized.
233 MaybeUnsized,
234 /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
235 Prefixed(Size, Align),
236}
237
238// Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
239// This is used to go between `memory_index` (source field order to memory order)
240// and `inverse_memory_index` (memory order to source field order).
241// See also `FieldPlacement::Arbitrary::memory_index` for more details.
242// FIXME(eddyb) build a better abstraction for permutations, if possible.
243fn invert_mapping(map: &[u32]) -> Vec<u32> {
244 let mut inverse = vec![0; map.len()];
245 for i in 0..map.len() {
246 inverse[map[i] as usize] = i as u32;
247 }
248 inverse
249}
ff7c6d11 250
dc9dc135
XL
251impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
252 fn scalar_pair(&self, a: Scalar, b: Scalar) -> LayoutDetails {
253 let dl = self.data_layout();
254 let b_align = b.value.align(dl);
255 let align = a.value.align(dl).max(b_align).max(dl.aggregate_align);
256 let b_offset = a.value.size(dl).align_to(b_align.abi);
257 let size = (b_offset + b.value.size(dl)).align_to(align.abi);
416331ca
XL
258
259 // HACK(nox): We iter on `b` and then `a` because `max_by_key`
260 // returns the last maximum.
261 let largest_niche = Niche::from_scalar(dl, b_offset, b.clone())
262 .into_iter()
263 .chain(Niche::from_scalar(dl, Size::ZERO, a.clone()))
264 .max_by_key(|niche| niche.available(dl));
265
dc9dc135
XL
266 LayoutDetails {
267 variants: Variants::Single { index: VariantIdx::new(0) },
268 fields: FieldPlacement::Arbitrary {
269 offsets: vec![Size::ZERO, b_offset],
270 memory_index: vec![0, 1]
271 },
272 abi: Abi::ScalarPair(a, b),
416331ca 273 largest_niche,
dc9dc135
XL
274 align,
275 size
ff7c6d11 276 }
dc9dc135 277 }
0bf4aa26 278
dc9dc135
XL
279 fn univariant_uninterned(&self,
280 ty: Ty<'tcx>,
281 fields: &[TyLayout<'_>],
282 repr: &ReprOptions,
283 kind: StructKind) -> Result<LayoutDetails, LayoutError<'tcx>> {
284 let dl = self.data_layout();
e1599b0c
XL
285 let pack = repr.pack;
286 if pack.is_some() && repr.align.is_some() {
dc9dc135
XL
287 bug!("struct cannot be packed and aligned");
288 }
ff7c6d11 289
e1599b0c 290 let mut align = if pack.is_some() {
dc9dc135
XL
291 dl.i8_align
292 } else {
293 dl.aggregate_align
294 };
ff7c6d11 295
dc9dc135
XL
296 let mut sized = true;
297 let mut offsets = vec![Size::ZERO; fields.len()];
298 let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
ff7c6d11 299
dc9dc135
XL
300 let mut optimize = !repr.inhibit_struct_field_reordering_opt();
301 if let StructKind::Prefixed(_, align) = kind {
302 optimize &= align.bytes() == 1;
303 }
ff7c6d11 304
dc9dc135
XL
305 if optimize {
306 let end = if let StructKind::MaybeUnsized = kind {
307 fields.len() - 1
308 } else {
309 fields.len()
310 };
311 let optimizing = &mut inverse_memory_index[..end];
312 let field_align = |f: &TyLayout<'_>| {
e1599b0c 313 if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
dc9dc135
XL
314 };
315 match kind {
316 StructKind::AlwaysSized |
317 StructKind::MaybeUnsized => {
318 optimizing.sort_by_key(|&x| {
319 // Place ZSTs first to avoid "interesting offsets",
320 // especially with only one or two non-ZST fields.
321 let f = &fields[x as usize];
322 (!f.is_zst(), cmp::Reverse(field_align(f)))
323 });
324 }
325 StructKind::Prefixed(..) => {
326 optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
ea8adc8c 327 }
ff7c6d11 328 }
dc9dc135 329 }
ea8adc8c 330
dc9dc135
XL
331 // inverse_memory_index holds field indices by increasing memory offset.
332 // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
333 // We now write field offsets to the corresponding offset slot;
334 // field 5 with offset 0 puts 0 in offsets[5].
335 // At the bottom of this function, we invert `inverse_memory_index` to
336 // produce `memory_index` (see `invert_mapping`).
ff7c6d11 337
ff7c6d11 338
dc9dc135 339 let mut offset = Size::ZERO;
416331ca
XL
340 let mut largest_niche = None;
341 let mut largest_niche_available = 0;
ff7c6d11 342
dc9dc135 343 if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
e1599b0c 344 let prefix_align = if let Some(pack) = pack {
dc9dc135
XL
345 prefix_align.min(pack)
346 } else {
347 prefix_align
348 };
349 align = align.max(AbiAndPrefAlign::new(prefix_align));
350 offset = prefix_size.align_to(prefix_align);
351 }
ff7c6d11 352
dc9dc135
XL
353 for &i in &inverse_memory_index {
354 let field = fields[i as usize];
355 if !sized {
356 bug!("univariant: field #{} of `{}` comes after unsized field",
357 offsets.len(), ty);
358 }
ff7c6d11 359
dc9dc135
XL
360 if field.is_unsized() {
361 sized = false;
362 }
ff7c6d11 363
dc9dc135 364 // Invariant: offset < dl.obj_size_bound() <= 1<<61
e1599b0c 365 let field_align = if let Some(pack) = pack {
dc9dc135
XL
366 field.align.min(AbiAndPrefAlign::new(pack))
367 } else {
368 field.align
369 };
370 offset = offset.align_to(field_align.abi);
371 align = align.max(field_align);
ff7c6d11 372
dc9dc135
XL
373 debug!("univariant offset: {:?} field: {:#?}", offset, field);
374 offsets[i as usize] = offset;
ff7c6d11 375
416331ca
XL
376 if let Some(mut niche) = field.largest_niche.clone() {
377 let available = niche.available(dl);
378 if available > largest_niche_available {
379 largest_niche_available = available;
380 niche.offset += offset;
381 largest_niche = Some(niche);
382 }
383 }
384
dc9dc135
XL
385 offset = offset.checked_add(field.size, dl)
386 .ok_or(LayoutError::SizeOverflow(ty))?;
387 }
ff7c6d11 388
e1599b0c
XL
389 if let Some(repr_align) = repr.align {
390 align = align.max(AbiAndPrefAlign::new(repr_align));
dc9dc135 391 }
ff7c6d11 392
dc9dc135
XL
393 debug!("univariant min_size: {:?}", offset);
394 let min_size = offset;
ff7c6d11 395
dc9dc135
XL
396 // As stated above, inverse_memory_index holds field indices by increasing offset.
397 // This makes it an already-sorted view of the offsets vec.
398 // To invert it, consider:
399 // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
400 // Field 5 would be the first element, so memory_index is i:
401 // Note: if we didn't optimize, it's already right.
ff7c6d11 402
dc9dc135
XL
403 let memory_index;
404 if optimize {
405 memory_index = invert_mapping(&inverse_memory_index);
406 } else {
407 memory_index = inverse_memory_index;
408 }
409
410 let size = min_size.align_to(align.abi);
411 let mut abi = Abi::Aggregate { sized };
412
413 // Unpack newtype ABIs and find scalar pairs.
414 if sized && size.bytes() > 0 {
415 // All other fields must be ZSTs, and we need them to all start at 0.
416 let mut zst_offsets =
417 offsets.iter().enumerate().filter(|&(i, _)| fields[i].is_zst());
418 if zst_offsets.all(|(_, o)| o.bytes() == 0) {
419 let mut non_zst_fields =
420 fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
421
422 match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
423 // We have exactly one non-ZST field.
424 (Some((i, field)), None, None) => {
425 // Field fills the struct and it has a scalar or scalar pair ABI.
426 if offsets[i].bytes() == 0 &&
427 align.abi == field.align.abi &&
428 size == field.size {
429 match field.abi {
430 // For plain scalars, or vectors of them, we can't unpack
431 // newtypes for `#[repr(C)]`, as that affects C ABIs.
432 Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
433 abi = field.abi.clone();
ff7c6d11 434 }
dc9dc135
XL
435 // But scalar pairs are Rust-specific and get
436 // treated as aggregates by C ABIs anyway.
437 Abi::ScalarPair(..) => {
438 abi = field.abi.clone();
439 }
440 _ => {}
ff7c6d11
XL
441 }
442 }
dc9dc135 443 }
ff7c6d11 444
dc9dc135
XL
445 // Two non-ZST fields, and they're both scalars.
446 (Some((i, &TyLayout {
447 details: &LayoutDetails { abi: Abi::Scalar(ref a), .. }, ..
448 })), Some((j, &TyLayout {
449 details: &LayoutDetails { abi: Abi::Scalar(ref b), .. }, ..
450 })), None) => {
451 // Order by the memory placement, not source order.
452 let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
453 ((i, a), (j, b))
454 } else {
455 ((j, b), (i, a))
456 };
457 let pair = self.scalar_pair(a.clone(), b.clone());
458 let pair_offsets = match pair.fields {
459 FieldPlacement::Arbitrary {
460 ref offsets,
461 ref memory_index
462 } => {
463 assert_eq!(memory_index, &[0, 1]);
464 offsets
ff7c6d11 465 }
dc9dc135
XL
466 _ => bug!()
467 };
468 if offsets[i] == pair_offsets[0] &&
469 offsets[j] == pair_offsets[1] &&
470 align == pair.align &&
471 size == pair.size {
472 // We can use `ScalarPair` only when it matches our
473 // already computed layout (including `#[repr(C)]`).
474 abi = pair.abi;
ff7c6d11 475 }
ff7c6d11 476 }
dc9dc135
XL
477
478 _ => {}
ff7c6d11
XL
479 }
480 }
dc9dc135 481 }
ff7c6d11 482
dc9dc135
XL
483 if sized && fields.iter().any(|f| f.abi.is_uninhabited()) {
484 abi = Abi::Uninhabited;
485 }
83c7162d 486
dc9dc135
XL
487 Ok(LayoutDetails {
488 variants: Variants::Single { index: VariantIdx::new(0) },
489 fields: FieldPlacement::Arbitrary {
490 offsets,
491 memory_index
492 },
493 abi,
416331ca 494 largest_niche,
dc9dc135
XL
495 align,
496 size
497 })
498 }
499
500 fn layout_raw_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> {
501 let tcx = self.tcx;
502 let param_env = self.param_env;
503 let dl = self.data_layout();
504 let scalar_unit = |value: Primitive| {
505 let bits = value.size(dl).bits();
506 assert!(bits <= 128);
507 Scalar {
508 value,
509 valid_range: 0..=(!0 >> (128 - bits))
510 }
511 };
512 let scalar = |value: Primitive| {
513 tcx.intern_layout(LayoutDetails::scalar(self, scalar_unit(value)))
ff7c6d11 514 };
dc9dc135 515
0bf4aa26 516 let univariant = |fields: &[TyLayout<'_>], repr: &ReprOptions, kind| {
dc9dc135 517 Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
ff7c6d11 518 };
b7449926 519 debug_assert!(!ty.has_infer_types());
ff7c6d11 520
e74abb32 521 Ok(match ty.kind {
ff7c6d11 522 // Basic scalars.
b7449926 523 ty::Bool => {
2c00a5a8 524 tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
ff7c6d11
XL
525 value: Int(I8, false),
526 valid_range: 0..=1
527 }))
528 }
b7449926 529 ty::Char => {
2c00a5a8 530 tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
ff7c6d11
XL
531 value: Int(I32, false),
532 valid_range: 0..=0x10FFFF
533 }))
534 }
b7449926 535 ty::Int(ity) => {
ff7c6d11
XL
536 scalar(Int(Integer::from_attr(dl, attr::SignedInt(ity)), true))
537 }
b7449926 538 ty::Uint(ity) => {
ff7c6d11
XL
539 scalar(Int(Integer::from_attr(dl, attr::UnsignedInt(ity)), false))
540 }
60c5eb7d
XL
541 ty::Float(fty) => scalar(match fty {
542 ast::FloatTy::F32 => F32,
543 ast::FloatTy::F64 => F64,
544 }),
b7449926 545 ty::FnPtr(_) => {
ff7c6d11 546 let mut ptr = scalar_unit(Pointer);
83c7162d 547 ptr.valid_range = 1..=*ptr.valid_range.end();
2c00a5a8 548 tcx.intern_layout(LayoutDetails::scalar(self, ptr))
ff7c6d11
XL
549 }
550
551 // The never type.
b7449926 552 ty::Never => {
83c7162d 553 tcx.intern_layout(LayoutDetails {
a1dfa0c6 554 variants: Variants::Single { index: VariantIdx::new(0) },
83c7162d
XL
555 fields: FieldPlacement::Union(0),
556 abi: Abi::Uninhabited,
416331ca 557 largest_niche: None,
83c7162d 558 align: dl.i8_align,
94b46f34 559 size: Size::ZERO
83c7162d 560 })
ff7c6d11
XL
561 }
562
563 // Potentially-fat pointers.
b7449926
XL
564 ty::Ref(_, pointee, _) |
565 ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
ff7c6d11
XL
566 let mut data_ptr = scalar_unit(Pointer);
567 if !ty.is_unsafe_ptr() {
83c7162d 568 data_ptr.valid_range = 1..=*data_ptr.valid_range.end();
ff7c6d11
XL
569 }
570
0531ce1d
XL
571 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
572 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
2c00a5a8 573 return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
54a0048b 574 }
ff7c6d11 575
416331ca 576 let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
e74abb32 577 let metadata = match unsized_part.kind {
b7449926 578 ty::Foreign(..) => {
2c00a5a8 579 return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
ff7c6d11 580 }
b7449926 581 ty::Slice(_) | ty::Str => {
ff7c6d11
XL
582 scalar_unit(Int(dl.ptr_sized_integer(), false))
583 }
b7449926 584 ty::Dynamic(..) => {
ff7c6d11 585 let mut vtable = scalar_unit(Pointer);
83c7162d 586 vtable.valid_range = 1..=*vtable.valid_range.end();
ff7c6d11
XL
587 vtable
588 }
589 _ => return Err(LayoutError::Unknown(unsized_part))
590 };
591
592 // Effectively a (ptr, meta) tuple.
dc9dc135 593 tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
ff7c6d11
XL
594 }
595
596 // Arrays and slices.
b7449926 597 ty::Array(element, mut count) => {
ff7c6d11 598 if count.has_projections() {
0531ce1d 599 count = tcx.normalize_erasing_regions(param_env, count);
ff7c6d11
XL
600 if count.has_projections() {
601 return Err(LayoutError::Unknown(ty));
602 }
603 }
604
416331ca 605 let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
2c00a5a8 606 let element = self.layout_of(element)?;
ff7c6d11
XL
607 let size = element.size.checked_mul(count, dl)
608 .ok_or(LayoutError::SizeOverflow(ty))?;
609
0731742a
XL
610 let abi = if count != 0 && ty.conservative_is_privately_uninhabited(tcx) {
611 Abi::Uninhabited
612 } else {
613 Abi::Aggregate { sized: true }
614 };
615
416331ca
XL
616 let largest_niche = if count != 0 {
617 element.largest_niche.clone()
618 } else {
619 None
620 };
621
ff7c6d11 622 tcx.intern_layout(LayoutDetails {
a1dfa0c6 623 variants: Variants::Single { index: VariantIdx::new(0) },
ff7c6d11
XL
624 fields: FieldPlacement::Array {
625 stride: element.size,
626 count
627 },
0731742a 628 abi,
416331ca 629 largest_niche,
ff7c6d11
XL
630 align: element.align,
631 size
632 })
633 }
b7449926 634 ty::Slice(element) => {
2c00a5a8 635 let element = self.layout_of(element)?;
ff7c6d11 636 tcx.intern_layout(LayoutDetails {
a1dfa0c6 637 variants: Variants::Single { index: VariantIdx::new(0) },
ff7c6d11
XL
638 fields: FieldPlacement::Array {
639 stride: element.size,
640 count: 0
641 },
642 abi: Abi::Aggregate { sized: false },
416331ca 643 largest_niche: None,
ff7c6d11 644 align: element.align,
94b46f34 645 size: Size::ZERO
ff7c6d11 646 })
54a0048b 647 }
b7449926 648 ty::Str => {
ff7c6d11 649 tcx.intern_layout(LayoutDetails {
a1dfa0c6 650 variants: Variants::Single { index: VariantIdx::new(0) },
ff7c6d11
XL
651 fields: FieldPlacement::Array {
652 stride: Size::from_bytes(1),
653 count: 0
654 },
655 abi: Abi::Aggregate { sized: false },
416331ca 656 largest_niche: None,
54a0048b 657 align: dl.i8_align,
94b46f34 658 size: Size::ZERO
ff7c6d11 659 })
54a0048b
SL
660 }
661
662 // Odd unit types.
b7449926 663 ty::FnDef(..) => {
ff7c6d11 664 univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?
54a0048b 665 }
b7449926 666 ty::Dynamic(..) | ty::Foreign(..) => {
dc9dc135 667 let mut unit = self.univariant_uninterned(ty, &[], &ReprOptions::default(),
ff7c6d11
XL
668 StructKind::AlwaysSized)?;
669 match unit.abi {
670 Abi::Aggregate { ref mut sized } => *sized = false,
671 _ => bug!()
672 }
673 tcx.intern_layout(unit)
54a0048b
SL
674 }
675
e74abb32 676 ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, substs)?,
ea8adc8c 677
b7449926 678 ty::Closure(def_id, ref substs) => {
e74abb32 679 let tys = substs.as_closure().upvar_tys(def_id, tcx);
2c00a5a8 680 univariant(&tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
8bb4bdeb 681 &ReprOptions::default(),
ff7c6d11 682 StructKind::AlwaysSized)?
476ff2be
SL
683 }
684
b7449926 685 ty::Tuple(tys) => {
041b39d2 686 let kind = if tys.len() == 0 {
ff7c6d11 687 StructKind::AlwaysSized
041b39d2 688 } else {
ff7c6d11 689 StructKind::MaybeUnsized
041b39d2
XL
690 };
691
48663c56
XL
692 univariant(&tys.iter().map(|k| {
693 self.layout_of(k.expect_ty())
694 }).collect::<Result<Vec<_>, _>>()?, &ReprOptions::default(), kind)?
54a0048b
SL
695 }
696
9e0c209e 697 // SIMD vector types.
b7449926 698 ty::Adt(def, ..) if def.repr.simd() => {
2c00a5a8 699 let element = self.layout_of(ty.simd_type(tcx))?;
60c5eb7d 700 let count = ty.simd_size(tcx);
ff7c6d11
XL
701 assert!(count > 0);
702 let scalar = match element.abi {
703 Abi::Scalar(ref scalar) => scalar.clone(),
9e0c209e
SL
704 _ => {
705 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` with \
0bf4aa26 706 a non-machine element type `{}`",
ff7c6d11 707 ty, element.ty));
54a0048b 708 }
ff7c6d11
XL
709 };
710 let size = element.size.checked_mul(count, dl)
711 .ok_or(LayoutError::SizeOverflow(ty))?;
712 let align = dl.vector_align(size);
a1dfa0c6 713 let size = size.align_to(align.abi);
ff7c6d11
XL
714
715 tcx.intern_layout(LayoutDetails {
a1dfa0c6 716 variants: Variants::Single { index: VariantIdx::new(0) },
ff7c6d11
XL
717 fields: FieldPlacement::Array {
718 stride: element.size,
719 count
720 },
721 abi: Abi::Vector {
722 element: scalar,
723 count
724 },
416331ca 725 largest_niche: element.largest_niche.clone(),
ff7c6d11
XL
726 size,
727 align,
728 })
54a0048b 729 }
9e0c209e
SL
730
731 // ADTs.
b7449926 732 ty::Adt(def, substs) => {
ff7c6d11
XL
733 // Cache the field layouts.
734 let variants = def.variants.iter().map(|v| {
735 v.fields.iter().map(|field| {
2c00a5a8 736 self.layout_of(field.ty(tcx, substs))
ff7c6d11 737 }).collect::<Result<Vec<_>, _>>()
a1dfa0c6 738 }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
54a0048b 739
ff7c6d11 740 if def.is_union() {
e1599b0c
XL
741 if def.repr.pack.is_some() && def.repr.align.is_some() {
742 bug!("union cannot be packed and aligned");
ff7c6d11
XL
743 }
744
e1599b0c 745 let mut align = if def.repr.pack.is_some() {
ff7c6d11
XL
746 dl.i8_align
747 } else {
748 dl.aggregate_align
749 };
54a0048b 750
e1599b0c
XL
751 if let Some(repr_align) = def.repr.align {
752 align = align.max(AbiAndPrefAlign::new(repr_align));
54a0048b
SL
753 }
754
a1dfa0c6 755 let optimize = !def.repr.inhibit_union_abi_opt();
94b46f34 756 let mut size = Size::ZERO;
a1dfa0c6
XL
757 let mut abi = Abi::Aggregate { sized: true };
758 let index = VariantIdx::new(0);
759 for field in &variants[index] {
ff7c6d11 760 assert!(!field.is_unsized());
e1599b0c 761 align = align.max(field.align);
a1dfa0c6
XL
762
763 // If all non-ZST fields have the same ABI, forward this ABI
764 if optimize && !field.is_zst() {
765 // Normalize scalar_unit to the maximal valid range
766 let field_abi = match &field.abi {
767 Abi::Scalar(x) => Abi::Scalar(scalar_unit(x.value)),
768 Abi::ScalarPair(x, y) => {
769 Abi::ScalarPair(
770 scalar_unit(x.value),
771 scalar_unit(y.value),
772 )
773 }
774 Abi::Vector { element: x, count } => {
775 Abi::Vector {
776 element: scalar_unit(x.value),
777 count: *count,
778 }
779 }
780 Abi::Uninhabited |
781 Abi::Aggregate { .. } => Abi::Aggregate { sized: true },
782 };
783
784 if size == Size::ZERO {
785 // first non ZST: initialize 'abi'
786 abi = field_abi;
787 } else if abi != field_abi {
788 // different fields have different ABI: reset to Aggregate
789 abi = Abi::Aggregate { sized: true };
790 }
ff7c6d11 791 }
a1dfa0c6 792
ff7c6d11
XL
793 size = cmp::max(size, field.size);
794 }
795
e1599b0c
XL
796 if let Some(pack) = def.repr.pack {
797 align = align.min(AbiAndPrefAlign::new(pack));
798 }
799
ff7c6d11 800 return Ok(tcx.intern_layout(LayoutDetails {
a1dfa0c6
XL
801 variants: Variants::Single { index },
802 fields: FieldPlacement::Union(variants[index].len()),
803 abi,
416331ca 804 largest_niche: None,
ff7c6d11 805 align,
a1dfa0c6 806 size: size.align_to(align.abi)
ff7c6d11
XL
807 }));
808 }
809
83c7162d
XL
810 // A variant is absent if it's uninhabited and only has ZST fields.
811 // Present uninhabited variants only require space for their fields,
0731742a 812 // but *not* an encoding of the discriminant (e.g., a tag value).
83c7162d
XL
813 // See issue #49298 for more details on the need to leave space
814 // for non-ZST uninhabited data (mostly partial initialization).
0bf4aa26
XL
815 let absent = |fields: &[TyLayout<'_>]| {
816 let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
83c7162d
XL
817 let is_zst = fields.iter().all(|f| f.is_zst());
818 uninhabited && is_zst
819 };
820 let (present_first, present_second) = {
a1dfa0c6
XL
821 let mut present_variants = variants.iter_enumerated().filter_map(|(i, v)| {
822 if absent(v) {
823 None
824 } else {
825 Some(i)
826 }
54a0048b 827 });
83c7162d 828 (present_variants.next(), present_variants.next())
ff7c6d11 829 };
e74abb32
XL
830 let present_first = match present_first {
831 present_first @ Some(_) => present_first,
83c7162d 832 // Uninhabited because it has no variants, or only absent ones.
e74abb32
XL
833 None if def.is_enum() => return tcx.layout_raw(param_env.and(tcx.types.never)),
834 // if it's a struct, still compute a layout so that we can still compute the
835 // field offsets
836 None => Some(VariantIdx::new(0)),
837 };
54a0048b 838
ff7c6d11 839 let is_struct = !def.is_enum() ||
83c7162d
XL
840 // Only one variant is present.
841 (present_second.is_none() &&
ff7c6d11 842 // Representation optimizations are allowed.
0bf4aa26 843 !def.repr.inhibit_enum_layout_opt());
ff7c6d11
XL
844 if is_struct {
845 // Struct, or univariant enum equivalent to a struct.
9e0c209e
SL
846 // (Typechecking will reject discriminant-sizing attrs.)
847
83c7162d 848 let v = present_first.unwrap();
ff7c6d11
XL
849 let kind = if def.is_enum() || variants[v].len() == 0 {
850 StructKind::AlwaysSized
476ff2be 851 } else {
7cac9316 852 let param_env = tcx.param_env(def.did);
ff7c6d11 853 let last_field = def.variants[v].fields.last().unwrap();
7cac9316 854 let always_sized = tcx.type_of(last_field.did)
0bf4aa26 855 .is_sized(tcx.at(DUMMY_SP), param_env);
ff7c6d11
XL
856 if !always_sized { StructKind::MaybeUnsized }
857 else { StructKind::AlwaysSized }
9e0c209e 858 };
9e0c209e 859
dc9dc135 860 let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr, kind)?;
ff7c6d11 861 st.variants = Variants::Single { index: v };
b7449926
XL
862 let (start, end) = self.tcx.layout_scalar_valid_range(def.did);
863 match st.abi {
864 Abi::Scalar(ref mut scalar) |
865 Abi::ScalarPair(ref mut scalar, _) => {
866 // the asserts ensure that we are not using the
867 // `#[rustc_layout_scalar_valid_range(n)]`
868 // attribute to widen the range of anything as that would probably
869 // result in UB somewhere
416331ca
XL
870 // FIXME(eddyb) the asserts are probably not needed,
871 // as larger validity ranges would result in missed
872 // optimizations, *not* wrongly assuming the inner
873 // value is valid. e.g. unions enlarge validity ranges,
874 // because the values may be uninitialized.
b7449926 875 if let Bound::Included(start) = start {
416331ca
XL
876 // FIXME(eddyb) this might be incorrect - it doesn't
877 // account for wrap-around (end < start) ranges.
b7449926
XL
878 assert!(*scalar.valid_range.start() <= start);
879 scalar.valid_range = start..=*scalar.valid_range.end();
880 }
881 if let Bound::Included(end) = end {
416331ca
XL
882 // FIXME(eddyb) this might be incorrect - it doesn't
883 // account for wrap-around (end < start) ranges.
b7449926
XL
884 assert!(*scalar.valid_range.end() >= end);
885 scalar.valid_range = *scalar.valid_range.start()..=end;
ff7c6d11 886 }
416331ca
XL
887
888 // Update `largest_niche` if we have introduced a larger niche.
889 let niche = Niche::from_scalar(dl, Size::ZERO, scalar.clone());
890 if let Some(niche) = niche {
891 match &st.largest_niche {
892 Some(largest_niche) => {
893 // Replace the existing niche even if they're equal,
894 // because this one is at a lower offset.
895 if largest_niche.available(dl) <= niche.available(dl) {
896 st.largest_niche = Some(niche);
897 }
898 }
899 None => st.largest_niche = Some(niche),
900 }
901 }
ff7c6d11 902 }
b7449926
XL
903 _ => assert!(
904 start == Bound::Unbounded && end == Bound::Unbounded,
905 "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
906 def,
907 st,
908 ),
54a0048b 909 }
416331ca 910
ff7c6d11 911 return Ok(tcx.intern_layout(st));
54a0048b
SL
912 }
913
83c7162d
XL
914 // The current code for niche-filling relies on variant indices
915 // instead of actual discriminants, so dataful enums with
916 // explicit discriminants (RFC #2363) would misbehave.
a1dfa0c6
XL
917 let no_explicit_discriminants = def.variants.iter_enumerated()
918 .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
ff7c6d11
XL
919
920 // Niche-filling enum optimization.
921 if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
922 let mut dataful_variant = None;
a1dfa0c6 923 let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
ff7c6d11
XL
924
925 // Find one non-ZST variant.
a1dfa0c6 926 'variants: for (v, fields) in variants.iter_enumerated() {
83c7162d
XL
927 if absent(fields) {
928 continue 'variants;
929 }
ff7c6d11 930 for f in fields {
ff7c6d11
XL
931 if !f.is_zst() {
932 if dataful_variant.is_none() {
933 dataful_variant = Some(v);
934 continue 'variants;
935 } else {
936 dataful_variant = None;
937 break 'variants;
938 }
939 }
54a0048b 940 }
83c7162d 941 niche_variants = *niche_variants.start().min(&v)..=v;
ff7c6d11
XL
942 }
943
83c7162d 944 if niche_variants.start() > niche_variants.end() {
ff7c6d11
XL
945 dataful_variant = None;
946 }
947
948 if let Some(i) = dataful_variant {
a1dfa0c6
XL
949 let count = (
950 niche_variants.end().as_u32() - niche_variants.start().as_u32() + 1
951 ) as u128;
416331ca
XL
952 // FIXME(#62691) use the largest niche across all fields,
953 // not just the first one.
83c7162d 954 for (field_index, &field) in variants[i].iter().enumerate() {
416331ca 955 let niche = match &field.largest_niche {
94b46f34
XL
956 Some(niche) => niche,
957 _ => continue,
958 };
959 let (niche_start, niche_scalar) = match niche.reserve(self, count) {
960 Some(pair) => pair,
961 None => continue,
962 };
963
ff7c6d11 964 let mut align = dl.aggregate_align;
a1dfa0c6 965 let st = variants.iter_enumerated().map(|(j, v)| {
dc9dc135 966 let mut st = self.univariant_uninterned(ty, v,
ff7c6d11
XL
967 &def.repr, StructKind::AlwaysSized)?;
968 st.variants = Variants::Single { index: j };
969
970 align = align.max(st.align);
971
972 Ok(st)
a1dfa0c6 973 }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
ff7c6d11 974
94b46f34 975 let offset = st[i].fields.offset(field_index) + niche.offset;
ff7c6d11
XL
976 let size = st[i].size;
977
83c7162d 978 let mut abi = match st[i].abi {
94b46f34 979 Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()),
0531ce1d
XL
980 Abi::ScalarPair(ref first, ref second) => {
981 // We need to use scalar_unit to reset the
982 // valid range to the maximal one for that
983 // primitive, because only the niche is
984 // guaranteed to be initialised, not the
985 // other primitive.
986 if offset.bytes() == 0 {
94b46f34
XL
987 Abi::ScalarPair(
988 niche_scalar.clone(),
989 scalar_unit(second.value),
990 )
0531ce1d 991 } else {
94b46f34
XL
992 Abi::ScalarPair(
993 scalar_unit(first.value),
994 niche_scalar.clone(),
995 )
0531ce1d
XL
996 }
997 }
998 _ => Abi::Aggregate { sized: true },
c30ab7b3 999 };
ff7c6d11 1000
0bf4aa26 1001 if st.iter().all(|v| v.abi.is_uninhabited()) {
83c7162d
XL
1002 abi = Abi::Uninhabited;
1003 }
1004
416331ca
XL
1005
1006 let largest_niche =
1007 Niche::from_scalar(dl, offset, niche_scalar.clone());
1008
ff7c6d11 1009 return Ok(tcx.intern_layout(LayoutDetails {
532ac7d7
XL
1010 variants: Variants::Multiple {
1011 discr: niche_scalar,
1012 discr_kind: DiscriminantKind::Niche {
1013 dataful_variant: i,
1014 niche_variants,
1015 niche_start,
1016 },
48663c56 1017 discr_index: 0,
ff7c6d11
XL
1018 variants: st,
1019 },
1020 fields: FieldPlacement::Arbitrary {
1021 offsets: vec![offset],
1022 memory_index: vec![0]
1023 },
1024 abi,
416331ca 1025 largest_niche,
ff7c6d11
XL
1026 size,
1027 align,
1028 }));
54a0048b 1029 }
ff7c6d11
XL
1030 }
1031 }
54a0048b 1032
ff7c6d11 1033 let (mut min, mut max) = (i128::max_value(), i128::min_value());
0531ce1d 1034 let discr_type = def.repr.discr_type();
a1dfa0c6
XL
1035 let bits = Integer::from_attr(self, discr_type).size().bits();
1036 for (i, discr) in def.discriminants(tcx) {
0bf4aa26 1037 if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
ff7c6d11 1038 continue;
54a0048b 1039 }
0531ce1d
XL
1040 let mut x = discr.val as i128;
1041 if discr_type.is_signed() {
1042 // sign extend the raw representation to be an i128
1043 x = (x << (128 - bits)) >> (128 - bits);
1044 }
ff7c6d11
XL
1045 if x < min { min = x; }
1046 if x > max { max = x; }
54a0048b 1047 }
83c7162d
XL
1048 // We might have no inhabited variants, so pretend there's at least one.
1049 if (min, max) == (i128::max_value(), i128::min_value()) {
1050 min = 0;
1051 max = 0;
1052 }
ff7c6d11
XL
1053 assert!(min <= max, "discriminant range is {}...{}", min, max);
1054 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
54a0048b 1055
54a0048b 1056 let mut align = dl.aggregate_align;
94b46f34 1057 let mut size = Size::ZERO;
54a0048b
SL
1058
1059 // We're interested in the smallest alignment, so start large.
a1dfa0c6
XL
1060 let mut start_align = Align::from_bytes(256).unwrap();
1061 assert_eq!(Integer::for_align(dl, start_align), None);
ff7c6d11
XL
1062
1063 // repr(C) on an enum tells us to make a (tag, union) layout,
1064 // so we need to grow the prefix alignment to be at least
1065 // the alignment of the union. (This value is used both for
1066 // determining the alignment of the overall enum, and the
1067 // determining the alignment of the payload after the tag.)
a1dfa0c6 1068 let mut prefix_align = min_ity.align(dl).abi;
ff7c6d11
XL
1069 if def.repr.c() {
1070 for fields in &variants {
1071 for field in fields {
a1dfa0c6 1072 prefix_align = prefix_align.max(field.align.abi);
ff7c6d11
XL
1073 }
1074 }
1075 }
54a0048b 1076
ff7c6d11 1077 // Create the set of structs that represent each variant.
a1dfa0c6 1078 let mut layout_variants = variants.iter_enumerated().map(|(i, field_layouts)| {
dc9dc135 1079 let mut st = self.univariant_uninterned(ty, &field_layouts,
ff7c6d11
XL
1080 &def.repr, StructKind::Prefixed(min_ity.size(), prefix_align))?;
1081 st.variants = Variants::Single { index: i };
476ff2be
SL
1082 // Find the first field we can't move later
1083 // to make room for a larger discriminant.
ff7c6d11 1084 for field in st.fields.index_by_increasing_offset().map(|j| field_layouts[j]) {
a1dfa0c6
XL
1085 if !field.is_zst() || field.align.abi.bytes() != 1 {
1086 start_align = start_align.min(field.align.abi);
476ff2be 1087 break;
54a0048b 1088 }
476ff2be 1089 }
ff7c6d11 1090 size = cmp::max(size, st.size);
54a0048b
SL
1091 align = align.max(st.align);
1092 Ok(st)
a1dfa0c6 1093 }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
54a0048b
SL
1094
1095 // Align the maximum variant size to the largest alignment.
a1dfa0c6 1096 size = size.align_to(align.abi);
54a0048b
SL
1097
1098 if size.bytes() >= dl.obj_size_bound() {
1099 return Err(LayoutError::SizeOverflow(ty));
1100 }
1101
8bb4bdeb
XL
1102 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
1103 if typeck_ity < min_ity {
1104 // It is a bug if Layout decided on a greater discriminant size than typeck for
1105 // some reason at this point (based on values discriminant can take on). Mostly
1106 // because this discriminant will be loaded, and then stored into variable of
1107 // type calculated by typeck. Consider such case (a bug): typeck decided on
1108 // byte-sized discriminant, but layout thinks we need a 16-bit to store all
94b46f34 1109 // discriminant values. That would be a bug, because then, in codegen, in order
8bb4bdeb
XL
1110 // to store this 16-bit discriminant into 8-bit sized temporary some of the
1111 // space necessary to represent would have to be discarded (or layout is wrong
1112 // on thinking it needs 16 bits)
1113 bug!("layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1114 min_ity, typeck_ity);
1115 // However, it is fine to make discr type however large (as an optimisation)
94b46f34 1116 // after this point – we’ll just truncate the value we load in codegen.
8bb4bdeb
XL
1117 }
1118
54a0048b
SL
1119 // Check to see if we should use a different type for the
1120 // discriminant. We can safely use a type with the same size
1121 // as the alignment of the first field of each variant.
1122 // We increase the size of the discriminant to avoid LLVM copying
1123 // padding when it doesn't need to. This normally causes unaligned
1124 // load/stores and excessive memcpy/memset operations. By using a
83c7162d 1125 // bigger integer size, LLVM can be sure about its contents and
54a0048b
SL
1126 // won't be so conservative.
1127
1128 // Use the initial field alignment
83c7162d
XL
1129 let mut ity = if def.repr.c() || def.repr.int.is_some() {
1130 min_ity
1131 } else {
a1dfa0c6 1132 Integer::for_align(dl, start_align).unwrap_or(min_ity)
83c7162d 1133 };
54a0048b
SL
1134
1135 // If the alignment is not larger than the chosen discriminant size,
1136 // don't use the alignment as the final size.
1137 if ity <= min_ity {
1138 ity = min_ity;
1139 } else {
1140 // Patch up the variants' first few fields.
ff7c6d11
XL
1141 let old_ity_size = min_ity.size();
1142 let new_ity_size = ity.size();
83c7162d 1143 for variant in &mut layout_variants {
ff7c6d11
XL
1144 match variant.fields {
1145 FieldPlacement::Arbitrary { ref mut offsets, .. } => {
1146 for i in offsets {
1147 if *i <= old_ity_size {
1148 assert_eq!(*i, old_ity_size);
1149 *i = new_ity_size;
1150 }
1151 }
1152 // We might be making the struct larger.
1153 if variant.size <= old_ity_size {
1154 variant.size = new_ity_size;
1155 }
1156 }
1157 _ => bug!()
c30ab7b3 1158 }
54a0048b
SL
1159 }
1160 }
1161
0531ce1d
XL
1162 let tag_mask = !0u128 >> (128 - ity.size().bits());
1163 let tag = Scalar {
ff7c6d11 1164 value: Int(ity, signed),
0531ce1d 1165 valid_range: (min as u128 & tag_mask)..=(max as u128 & tag_mask),
ff7c6d11 1166 };
83c7162d
XL
1167 let mut abi = Abi::Aggregate { sized: true };
1168 if tag.value.size(dl) == size {
1169 abi = Abi::Scalar(tag.clone());
8faf50e0
XL
1170 } else {
1171 // Try to use a ScalarPair for all tagged enums.
83c7162d
XL
1172 let mut common_prim = None;
1173 for (field_layouts, layout_variant) in variants.iter().zip(&layout_variants) {
1174 let offsets = match layout_variant.fields {
1175 FieldPlacement::Arbitrary { ref offsets, .. } => offsets,
1176 _ => bug!(),
1177 };
1178 let mut fields = field_layouts
1179 .iter()
1180 .zip(offsets)
1181 .filter(|p| !p.0.is_zst());
1182 let (field, offset) = match (fields.next(), fields.next()) {
1183 (None, None) => continue,
1184 (Some(pair), None) => pair,
1185 _ => {
1186 common_prim = None;
1187 break;
1188 }
1189 };
1190 let prim = match field.details.abi {
1191 Abi::Scalar(ref scalar) => scalar.value,
1192 _ => {
1193 common_prim = None;
1194 break;
1195 }
1196 };
1197 if let Some(pair) = common_prim {
1198 // This is pretty conservative. We could go fancier
1199 // by conflating things like i32 and u32, or even
1200 // realising that (u8, u8) could just cohabit with
1201 // u16 or even u32.
1202 if pair != (prim, offset) {
1203 common_prim = None;
1204 break;
1205 }
1206 } else {
1207 common_prim = Some((prim, offset));
1208 }
1209 }
1210 if let Some((prim, offset)) = common_prim {
dc9dc135 1211 let pair = self.scalar_pair(tag.clone(), scalar_unit(prim));
83c7162d
XL
1212 let pair_offsets = match pair.fields {
1213 FieldPlacement::Arbitrary {
1214 ref offsets,
1215 ref memory_index
1216 } => {
1217 assert_eq!(memory_index, &[0, 1]);
1218 offsets
1219 }
1220 _ => bug!()
1221 };
94b46f34 1222 if pair_offsets[0] == Size::ZERO &&
83c7162d
XL
1223 pair_offsets[1] == *offset &&
1224 align == pair.align &&
1225 size == pair.size {
1226 // We can use `ScalarPair` only when it matches our
1227 // already computed layout (including `#[repr(C)]`).
1228 abi = pair.abi;
1229 }
1230 }
1231 }
1232
0bf4aa26 1233 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
83c7162d
XL
1234 abi = Abi::Uninhabited;
1235 }
1236
416331ca
XL
1237 let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag.clone());
1238
ff7c6d11 1239 tcx.intern_layout(LayoutDetails {
532ac7d7
XL
1240 variants: Variants::Multiple {
1241 discr: tag,
1242 discr_kind: DiscriminantKind::Tag,
48663c56 1243 discr_index: 0,
83c7162d 1244 variants: layout_variants,
ff7c6d11
XL
1245 },
1246 fields: FieldPlacement::Arbitrary {
94b46f34 1247 offsets: vec![Size::ZERO],
ff7c6d11
XL
1248 memory_index: vec![0]
1249 },
416331ca 1250 largest_niche,
ff7c6d11 1251 abi,
041b39d2 1252 align,
ff7c6d11
XL
1253 size
1254 })
54a0048b
SL
1255 }
1256
1257 // Types with no meaningful known layout.
b7449926 1258 ty::Projection(_) | ty::Opaque(..) => {
0531ce1d 1259 let normalized = tcx.normalize_erasing_regions(param_env, ty);
5bcae85e
SL
1260 if ty == normalized {
1261 return Err(LayoutError::Unknown(ty));
1262 }
ff7c6d11 1263 tcx.layout_raw(param_env.and(normalized))?
5bcae85e 1264 }
a1dfa0c6
XL
1265
1266 ty::Bound(..) |
1267 ty::Placeholder(..) |
1268 ty::UnnormalizedProjection(..) |
1269 ty::GeneratorWitness(..) |
1270 ty::Infer(_) => {
ff7c6d11 1271 bug!("LayoutDetails::compute: unexpected type `{}`", ty)
cc61c64b 1272 }
a1dfa0c6 1273
b7449926 1274 ty::Param(_) | ty::Error => {
8faf50e0
XL
1275 return Err(LayoutError::Unknown(ty));
1276 }
ff7c6d11 1277 })
cc61c64b 1278 }
dc9dc135
XL
1279}
1280
1281/// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1282#[derive(Clone, Debug, PartialEq)]
1283enum SavedLocalEligibility {
1284 Unassigned,
1285 Assigned(VariantIdx),
1286 // FIXME: Use newtype_index so we aren't wasting bytes
1287 Ineligible(Option<u32>),
1288}
1289
1290// When laying out generators, we divide our saved local fields into two
1291// categories: overlap-eligible and overlap-ineligible.
1292//
1293// Those fields which are ineligible for overlap go in a "prefix" at the
1294// beginning of the layout, and always have space reserved for them.
1295//
1296// Overlap-eligible fields are only assigned to one variant, so we lay
1297// those fields out for each variant and put them right after the
1298// prefix.
1299//
1300// Finally, in the layout details, we point to the fields from the
1301// variants they are assigned to. It is possible for some fields to be
1302// included in multiple variants. No field ever "moves around" in the
1303// layout; its offset is always the same.
1304//
1305// Also included in the layout are the upvars and the discriminant.
1306// These are included as fields on the "outer" layout; they are not part
1307// of any variant.
1308impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
1309 /// Compute the eligibility and assignment of each local.
1310 fn generator_saved_local_eligibility(&self, info: &GeneratorLayout<'tcx>)
1311 -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
1312 use SavedLocalEligibility::*;
1313
1314 let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
1315 IndexVec::from_elem_n(Unassigned, info.field_tys.len());
1316
1317 // The saved locals not eligible for overlap. These will get
1318 // "promoted" to the prefix of our generator.
1319 let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
1320
1321 // Figure out which of our saved locals are fields in only
1322 // one variant. The rest are deemed ineligible for overlap.
1323 for (variant_index, fields) in info.variant_fields.iter_enumerated() {
1324 for local in fields {
1325 match assignments[*local] {
1326 Unassigned => {
1327 assignments[*local] = Assigned(variant_index);
1328 }
1329 Assigned(idx) => {
1330 // We've already seen this local at another suspension
1331 // point, so it is no longer a candidate.
1332 trace!("removing local {:?} in >1 variant ({:?}, {:?})",
1333 local, variant_index, idx);
1334 ineligible_locals.insert(*local);
1335 assignments[*local] = Ineligible(None);
1336 }
1337 Ineligible(_) => {},
1338 }
1339 }
1340 }
1341
1342 // Next, check every pair of eligible locals to see if they
1343 // conflict.
1344 for local_a in info.storage_conflicts.rows() {
1345 let conflicts_a = info.storage_conflicts.count(local_a);
1346 if ineligible_locals.contains(local_a) {
1347 continue;
1348 }
1349
1350 for local_b in info.storage_conflicts.iter(local_a) {
1351 // local_a and local_b are storage live at the same time, therefore they
1352 // cannot overlap in the generator layout. The only way to guarantee
1353 // this is if they are in the same variant, or one is ineligible
1354 // (which means it is stored in every variant).
1355 if ineligible_locals.contains(local_b) ||
1356 assignments[local_a] == assignments[local_b]
1357 {
1358 continue;
1359 }
1360
1361 // If they conflict, we will choose one to make ineligible.
1362 // This is not always optimal; it's just a greedy heuristic that
1363 // seems to produce good results most of the time.
1364 let conflicts_b = info.storage_conflicts.count(local_b);
1365 let (remove, other) = if conflicts_a > conflicts_b {
1366 (local_a, local_b)
1367 } else {
1368 (local_b, local_a)
1369 };
1370 ineligible_locals.insert(remove);
1371 assignments[remove] = Ineligible(None);
1372 trace!("removing local {:?} due to conflict with {:?}", remove, other);
1373 }
1374 }
1375
416331ca
XL
1376 // Count the number of variants in use. If only one of them, then it is
1377 // impossible to overlap any locals in our layout. In this case it's
1378 // always better to make the remaining locals ineligible, so we can
1379 // lay them out with the other locals in the prefix and eliminate
1380 // unnecessary padding bytes.
1381 {
1382 let mut used_variants = BitSet::new_empty(info.variant_fields.len());
1383 for assignment in &assignments {
1384 match assignment {
1385 Assigned(idx) => { used_variants.insert(*idx); }
1386 _ => {}
1387 }
1388 }
1389 if used_variants.count() < 2 {
1390 for assignment in assignments.iter_mut() {
1391 *assignment = Ineligible(None);
1392 }
1393 ineligible_locals.insert_all();
1394 }
1395 }
1396
dc9dc135
XL
1397 // Write down the order of our locals that will be promoted to the prefix.
1398 {
1399 let mut idx = 0u32;
1400 for local in ineligible_locals.iter() {
1401 assignments[local] = Ineligible(Some(idx));
1402 idx += 1;
1403 }
1404 }
1405 debug!("generator saved local assignments: {:?}", assignments);
1406
1407 (ineligible_locals, assignments)
1408 }
1409
1410 /// Compute the full generator layout.
1411 fn generator_layout(
1412 &self,
1413 ty: Ty<'tcx>,
1414 def_id: hir::def_id::DefId,
e74abb32 1415 substs: SubstsRef<'tcx>,
dc9dc135
XL
1416 ) -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> {
1417 use SavedLocalEligibility::*;
1418 let tcx = self.tcx;
1419
e74abb32 1420 let subst_field = |ty: Ty<'tcx>| { ty.subst(tcx, substs) };
dc9dc135
XL
1421
1422 let info = tcx.generator_layout(def_id);
1423 let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
1424
1425 // Build a prefix layout, including "promoting" all ineligible
1426 // locals as part of the prefix. We compute the layout of all of
1427 // these fields at once to get optimal packing.
e74abb32 1428 let discr_index = substs.as_generator().prefix_tys(def_id, tcx).count();
416331ca 1429 // FIXME(eddyb) set the correct vaidity range for the discriminant.
e74abb32 1430 let discr_layout = self.layout_of(substs.as_generator().discr_ty(tcx))?;
416331ca
XL
1431 let discr = match &discr_layout.abi {
1432 Abi::Scalar(s) => s.clone(),
1433 _ => bug!(),
1434 };
1435 let promoted_layouts = ineligible_locals.iter()
1436 .map(|local| subst_field(info.field_tys[local]))
1437 .map(|ty| tcx.mk_maybe_uninit(ty))
1438 .map(|ty| self.layout_of(ty));
e74abb32 1439 let prefix_layouts = substs.as_generator().prefix_tys(def_id, tcx)
416331ca
XL
1440 .map(|ty| self.layout_of(ty))
1441 .chain(iter::once(Ok(discr_layout)))
1442 .chain(promoted_layouts)
1443 .collect::<Result<Vec<_>, _>>()?;
dc9dc135
XL
1444 let prefix = self.univariant_uninterned(
1445 ty,
416331ca 1446 &prefix_layouts,
dc9dc135 1447 &ReprOptions::default(),
416331ca
XL
1448 StructKind::AlwaysSized,
1449 )?;
1450
dc9dc135
XL
1451 let (prefix_size, prefix_align) = (prefix.size, prefix.align);
1452
1453 // Split the prefix layout into the "outer" fields (upvars and
1454 // discriminant) and the "promoted" fields. Promoted fields will
1455 // get included in each variant that requested them in
1456 // GeneratorLayout.
1457 debug!("prefix = {:#?}", prefix);
1458 let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
1459 FieldPlacement::Arbitrary { mut offsets, memory_index } => {
1460 let mut inverse_memory_index = invert_mapping(&memory_index);
1461
1462 // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1463 // "outer" and "promoted" fields respectively.
1464 let b_start = (discr_index + 1) as u32;
1465 let offsets_b = offsets.split_off(b_start as usize);
1466 let offsets_a = offsets;
1467
1468 // Disentangle the "a" and "b" components of `inverse_memory_index`
1469 // by preserving the order but keeping only one disjoint "half" each.
1470 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1471 let inverse_memory_index_b: Vec<_> =
1472 inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
1473 inverse_memory_index.retain(|&i| i < b_start);
1474 let inverse_memory_index_a = inverse_memory_index;
1475
1476 // Since `inverse_memory_index_{a,b}` each only refer to their
1477 // respective fields, they can be safely inverted
1478 let memory_index_a = invert_mapping(&inverse_memory_index_a);
1479 let memory_index_b = invert_mapping(&inverse_memory_index_b);
1480
1481 let outer_fields = FieldPlacement::Arbitrary {
1482 offsets: offsets_a,
1483 memory_index: memory_index_a,
1484 };
1485 (outer_fields, offsets_b, memory_index_b)
1486 }
1487 _ => bug!(),
1488 };
1489
1490 let mut size = prefix.size;
1491 let mut align = prefix.align;
1492 let variants = info.variant_fields.iter_enumerated().map(|(index, variant_fields)| {
1493 // Only include overlap-eligible fields when we compute our variant layout.
1494 let variant_only_tys = variant_fields
1495 .iter()
1496 .filter(|local| {
1497 match assignments[**local] {
1498 Unassigned => bug!(),
1499 Assigned(v) if v == index => true,
1500 Assigned(_) => bug!("assignment does not match variant"),
1501 Ineligible(_) => false,
1502 }
1503 })
1504 .map(|local| subst_field(info.field_tys[*local]));
1505
1506 let mut variant = self.univariant_uninterned(
1507 ty,
1508 &variant_only_tys
1509 .map(|ty| self.layout_of(ty))
1510 .collect::<Result<Vec<_>, _>>()?,
1511 &ReprOptions::default(),
1512 StructKind::Prefixed(prefix_size, prefix_align.abi))?;
1513 variant.variants = Variants::Single { index };
1514
1515 let (offsets, memory_index) = match variant.fields {
1516 FieldPlacement::Arbitrary { offsets, memory_index } => {
1517 (offsets, memory_index)
1518 }
1519 _ => bug!(),
1520 };
1521
1522 // Now, stitch the promoted and variant-only fields back together in
1523 // the order they are mentioned by our GeneratorLayout.
1524 // Because we only use some subset (that can differ between variants)
1525 // of the promoted fields, we can't just pick those elements of the
1526 // `promoted_memory_index` (as we'd end up with gaps).
1527 // So instead, we build an "inverse memory_index", as if all of the
1528 // promoted fields were being used, but leave the elements not in the
1529 // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1530 // obtain a valid (bijective) mapping.
1531 const INVALID_FIELD_IDX: u32 = !0;
1532 let mut combined_inverse_memory_index =
1533 vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
1534 let mut offsets_and_memory_index = offsets.into_iter().zip(memory_index);
1535 let combined_offsets = variant_fields.iter().enumerate().map(|(i, local)| {
1536 let (offset, memory_index) = match assignments[*local] {
1537 Unassigned => bug!(),
1538 Assigned(_) => {
1539 let (offset, memory_index) = offsets_and_memory_index.next().unwrap();
1540 (offset, promoted_memory_index.len() as u32 + memory_index)
1541 }
1542 Ineligible(field_idx) => {
1543 let field_idx = field_idx.unwrap() as usize;
1544 (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1545 }
1546 };
1547 combined_inverse_memory_index[memory_index as usize] = i as u32;
1548 offset
1549 }).collect();
1550
1551 // Remove the unused slots and invert the mapping to obtain the
1552 // combined `memory_index` (also see previous comment).
1553 combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
1554 let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
1555
1556 variant.fields = FieldPlacement::Arbitrary {
1557 offsets: combined_offsets,
1558 memory_index: combined_memory_index,
1559 };
1560
1561 size = size.max(variant.size);
1562 align = align.max(variant.align);
1563 Ok(variant)
1564 }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1565
416331ca
XL
1566 size = size.align_to(align.abi);
1567
dc9dc135
XL
1568 let abi = if prefix.abi.is_uninhabited() ||
1569 variants.iter().all(|v| v.abi.is_uninhabited()) {
1570 Abi::Uninhabited
1571 } else {
1572 Abi::Aggregate { sized: true }
1573 };
dc9dc135
XL
1574
1575 let layout = tcx.intern_layout(LayoutDetails {
1576 variants: Variants::Multiple {
1577 discr,
1578 discr_kind: DiscriminantKind::Tag,
1579 discr_index,
1580 variants,
1581 },
1582 fields: outer_fields,
1583 abi,
416331ca 1584 largest_niche: prefix.largest_niche,
dc9dc135
XL
1585 size,
1586 align,
1587 });
1588 debug!("generator layout ({:?}): {:#?}", ty, layout);
1589 Ok(layout)
1590 }
7cac9316
XL
1591
1592 /// This is invoked by the `layout_raw` query to record the final
1593 /// layout of each type.
532ac7d7 1594 #[inline(always)]
a1dfa0c6 1595 fn record_layout_for_printing(&self, layout: TyLayout<'tcx>) {
532ac7d7
XL
1596 // If we are running with `-Zprint-type-sizes`, maybe record layouts
1597 // for dumping later.
1598 if self.tcx.sess.opts.debugging_opts.print_type_sizes {
1599 self.record_layout_for_printing_outlined(layout)
1600 }
1601 }
1602
1603 fn record_layout_for_printing_outlined(&self, layout: TyLayout<'tcx>) {
1604 // Ignore layouts that are done with non-empty environments or
1605 // non-monomorphic layouts, as the user only wants to see the stuff
1606 // resulting from the final codegen session.
7cac9316 1607 if
2c00a5a8 1608 layout.ty.has_param_types() ||
2c00a5a8 1609 !self.param_env.caller_bounds.is_empty()
7cac9316
XL
1610 {
1611 return;
1612 }
1613
7cac9316 1614 // (delay format until we actually need it)
83c7162d 1615 let record = |kind, packed, opt_discr_size, variants| {
2c00a5a8 1616 let type_desc = format!("{:?}", layout.ty);
60c5eb7d
XL
1617 self.tcx.sess.code_stats.record_type_size(kind,
1618 type_desc,
1619 layout.align.abi,
1620 layout.size,
1621 packed,
1622 opt_discr_size,
1623 variants);
7cac9316
XL
1624 };
1625
e74abb32 1626 let adt_def = match layout.ty.kind {
b7449926 1627 ty::Adt(ref adt_def, _) => {
2c00a5a8 1628 debug!("print-type-size t: `{:?}` process adt", layout.ty);
ff7c6d11 1629 adt_def
7cac9316
XL
1630 }
1631
b7449926 1632 ty::Closure(..) => {
2c00a5a8 1633 debug!("print-type-size t: `{:?}` record closure", layout.ty);
83c7162d 1634 record(DataTypeKind::Closure, false, None, vec![]);
7cac9316
XL
1635 return;
1636 }
1637
1638 _ => {
2c00a5a8 1639 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
7cac9316
XL
1640 return;
1641 }
1642 };
1643
1644 let adt_kind = adt_def.adt_kind();
e1599b0c 1645 let adt_packed = adt_def.repr.pack.is_some();
7cac9316 1646
0731742a 1647 let build_variant_info = |n: Option<Ident>,
ff7c6d11
XL
1648 flds: &[ast::Name],
1649 layout: TyLayout<'tcx>| {
94b46f34 1650 let mut min_size = Size::ZERO;
ff7c6d11 1651 let field_info: Vec<_> = flds.iter().enumerate().map(|(i, &name)| {
2c00a5a8 1652 match layout.field(self, i) {
ff7c6d11
XL
1653 Err(err) => {
1654 bug!("no layout found for field {}: `{:?}`", name, err);
1655 }
1656 Ok(field_layout) => {
1657 let offset = layout.fields.offset(i);
1658 let field_end = offset + field_layout.size;
1659 if min_size < field_end {
1660 min_size = field_end;
1661 }
1662 session::FieldInfo {
1663 name: name.to_string(),
1664 offset: offset.bytes(),
1665 size: field_layout.size.bytes(),
a1dfa0c6 1666 align: field_layout.align.abi.bytes(),
ff7c6d11 1667 }
7cac9316
XL
1668 }
1669 }
ff7c6d11 1670 }).collect();
7cac9316
XL
1671
1672 session::VariantInfo {
0731742a 1673 name: n.map(|n| n.to_string()),
ff7c6d11
XL
1674 kind: if layout.is_unsized() {
1675 session::SizeKind::Min
1676 } else {
7cac9316 1677 session::SizeKind::Exact
ff7c6d11 1678 },
a1dfa0c6 1679 align: layout.align.abi.bytes(),
ff7c6d11
XL
1680 size: if min_size.bytes() == 0 {
1681 layout.size.bytes()
7cac9316 1682 } else {
ff7c6d11 1683 min_size.bytes()
7cac9316 1684 },
7cac9316
XL
1685 fields: field_info,
1686 }
1687 };
1688
ff7c6d11
XL
1689 match layout.variants {
1690 Variants::Single { index } => {
1691 debug!("print-type-size `{:#?}` variant {}",
0731742a 1692 layout, adt_def.variants[index].ident);
ff7c6d11
XL
1693 if !adt_def.variants.is_empty() {
1694 let variant_def = &adt_def.variants[index];
7cac9316 1695 let fields: Vec<_> =
94b46f34 1696 variant_def.fields.iter().map(|f| f.ident.name).collect();
7cac9316 1697 record(adt_kind.into(),
83c7162d 1698 adt_packed,
7cac9316 1699 None,
0731742a 1700 vec![build_variant_info(Some(variant_def.ident),
7cac9316 1701 &fields,
ff7c6d11 1702 layout)]);
7cac9316
XL
1703 } else {
1704 // (This case arises for *empty* enums; so give it
1705 // zero variants.)
83c7162d 1706 record(adt_kind.into(), adt_packed, None, vec![]);
7cac9316
XL
1707 }
1708 }
1709
532ac7d7 1710 Variants::Multiple { ref discr, ref discr_kind, .. } => {
ff7c6d11 1711 debug!("print-type-size `{:#?}` adt general variants def {}",
2c00a5a8 1712 layout.ty, adt_def.variants.len());
7cac9316 1713 let variant_infos: Vec<_> =
a1dfa0c6 1714 adt_def.variants.iter_enumerated().map(|(i, variant_def)| {
ff7c6d11 1715 let fields: Vec<_> =
94b46f34 1716 variant_def.fields.iter().map(|f| f.ident.name).collect();
0731742a 1717 build_variant_info(Some(variant_def.ident),
0bf4aa26
XL
1718 &fields,
1719 layout.for_variant(self, i))
ff7c6d11
XL
1720 })
1721 .collect();
532ac7d7
XL
1722 record(adt_kind.into(), adt_packed, match discr_kind {
1723 DiscriminantKind::Tag => Some(discr.value.size(self)),
ff7c6d11
XL
1724 _ => None
1725 }, variant_infos);
7cac9316
XL
1726 }
1727 }
1728 }
54a0048b
SL
1729}
1730
0731742a 1731/// Type size "skeleton", i.e., the only information determining a type's size.
54a0048b
SL
1732/// While this is conservative, (aside from constant sizes, only pointers,
1733/// newtypes thereof and null pointer optimized enums are allowed), it is
a1dfa0c6 1734/// enough to statically check common use cases of transmute.
54a0048b
SL
1735#[derive(Copy, Clone, Debug)]
1736pub enum SizeSkeleton<'tcx> {
1737 /// Any statically computable Layout.
1738 Known(Size),
1739
1740 /// A potentially-fat pointer.
1741 Pointer {
3b2f2976 1742 /// If true, this pointer is never null.
54a0048b 1743 non_zero: bool,
3b2f2976
XL
1744 /// The type which determines the unsized metadata, if any,
1745 /// of this pointer. Either a type parameter or a projection
1746 /// depending on one, with regions erased.
54a0048b
SL
1747 tail: Ty<'tcx>
1748 }
1749}
1750
dc9dc135
XL
1751impl<'tcx> SizeSkeleton<'tcx> {
1752 pub fn compute(
1753 ty: Ty<'tcx>,
1754 tcx: TyCtxt<'tcx>,
1755 param_env: ty::ParamEnv<'tcx>,
1756 ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
b7449926 1757 debug_assert!(!ty.has_infer_types());
54a0048b
SL
1758
1759 // First try computing a static layout.
2c00a5a8 1760 let err = match tcx.layout_of(param_env.and(ty)) {
54a0048b 1761 Ok(layout) => {
ff7c6d11 1762 return Ok(SizeSkeleton::Known(layout.size));
54a0048b
SL
1763 }
1764 Err(err) => err
1765 };
1766
e74abb32 1767 match ty.kind {
b7449926
XL
1768 ty::Ref(_, pointee, _) |
1769 ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
ff7c6d11 1770 let non_zero = !ty.is_unsafe_ptr();
416331ca 1771 let tail = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
e74abb32 1772 match tail.kind {
b7449926 1773 ty::Param(_) | ty::Projection(_) => {
e1599b0c 1774 debug_assert!(tail.has_param_types());
ff7c6d11
XL
1775 Ok(SizeSkeleton::Pointer {
1776 non_zero,
1777 tail: tcx.erase_regions(&tail)
1778 })
1779 }
1780 _ => {
1781 bug!("SizeSkeleton::compute({}): layout errored ({}), yet \
1782 tail `{}` is not a type parameter or a projection",
1783 ty, err, tail)
1784 }
1785 }
54a0048b
SL
1786 }
1787
b7449926 1788 ty::Adt(def, substs) => {
54a0048b 1789 // Only newtypes and enums w/ nullable pointer optimization.
9e0c209e 1790 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
54a0048b
SL
1791 return Err(err);
1792 }
1793
1794 // Get a zero-sized variant or a pointer newtype.
a1dfa0c6
XL
1795 let zero_or_ptr_variant = |i| {
1796 let i = VariantIdx::new(i);
54a0048b 1797 let fields = def.variants[i].fields.iter().map(|field| {
7cac9316 1798 SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env)
54a0048b
SL
1799 });
1800 let mut ptr = None;
1801 for field in fields {
1802 let field = field?;
1803 match field {
1804 SizeSkeleton::Known(size) => {
1805 if size.bytes() > 0 {
1806 return Err(err);
1807 }
1808 }
1809 SizeSkeleton::Pointer {..} => {
1810 if ptr.is_some() {
1811 return Err(err);
1812 }
1813 ptr = Some(field);
1814 }
1815 }
1816 }
1817 Ok(ptr)
1818 };
1819
1820 let v0 = zero_or_ptr_variant(0)?;
1821 // Newtype.
1822 if def.variants.len() == 1 {
1823 if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1824 return Ok(SizeSkeleton::Pointer {
b7449926
XL
1825 non_zero: non_zero || match tcx.layout_scalar_valid_range(def.did) {
1826 (Bound::Included(start), Bound::Unbounded) => start > 0,
1827 (Bound::Included(start), Bound::Included(end)) =>
1828 0 < start && start < end,
1829 _ => false,
1830 },
041b39d2 1831 tail,
54a0048b
SL
1832 });
1833 } else {
1834 return Err(err);
1835 }
1836 }
1837
1838 let v1 = zero_or_ptr_variant(1)?;
1839 // Nullable pointer enum optimization.
1840 match (v0, v1) {
1841 (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None) |
1842 (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1843 Ok(SizeSkeleton::Pointer {
1844 non_zero: false,
041b39d2 1845 tail,
54a0048b
SL
1846 })
1847 }
1848 _ => Err(err)
1849 }
1850 }
1851
b7449926 1852 ty::Projection(_) | ty::Opaque(..) => {
0531ce1d 1853 let normalized = tcx.normalize_erasing_regions(param_env, ty);
5bcae85e
SL
1854 if ty == normalized {
1855 Err(err)
1856 } else {
7cac9316 1857 SizeSkeleton::compute(normalized, tcx, param_env)
5bcae85e
SL
1858 }
1859 }
1860
54a0048b
SL
1861 _ => Err(err)
1862 }
1863 }
1864
0bf4aa26 1865 pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
54a0048b
SL
1866 match (self, other) {
1867 (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
1868 (SizeSkeleton::Pointer { tail: a, .. },
1869 SizeSkeleton::Pointer { tail: b, .. }) => a == b,
1870 _ => false
1871 }
1872 }
1873}
cc61c64b 1874
ff7c6d11 1875pub trait HasTyCtxt<'tcx>: HasDataLayout {
dc9dc135 1876 fn tcx(&self) -> TyCtxt<'tcx>;
cc61c64b
XL
1877}
1878
48663c56
XL
1879pub trait HasParamEnv<'tcx> {
1880 fn param_env(&self) -> ty::ParamEnv<'tcx>;
1881}
1882
dc9dc135 1883impl<'tcx> HasDataLayout for TyCtxt<'tcx> {
ff7c6d11
XL
1884 fn data_layout(&self) -> &TargetDataLayout {
1885 &self.data_layout
1886 }
cc61c64b
XL
1887}
1888
dc9dc135
XL
1889impl<'tcx> HasTyCtxt<'tcx> for TyCtxt<'tcx> {
1890 fn tcx(&self) -> TyCtxt<'tcx> {
e74abb32 1891 *self
cc61c64b
XL
1892 }
1893}
1894
48663c56
XL
1895impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
1896 fn param_env(&self) -> ty::ParamEnv<'tcx> {
1897 self.param_env
1898 }
1899}
1900
2c00a5a8 1901impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
cc61c64b 1902 fn data_layout(&self) -> &TargetDataLayout {
2c00a5a8 1903 self.tcx.data_layout()
cc61c64b
XL
1904 }
1905}
1906
dc9dc135
XL
1907impl<'tcx, T: HasTyCtxt<'tcx>> HasTyCtxt<'tcx> for LayoutCx<'tcx, T> {
1908 fn tcx(&self) -> TyCtxt<'tcx> {
2c00a5a8 1909 self.tcx.tcx()
ff7c6d11
XL
1910 }
1911}
1912
1913pub trait MaybeResult<T> {
48663c56
XL
1914 type Error;
1915
1916 fn from(x: Result<T, Self::Error>) -> Self;
1917 fn to_result(self) -> Result<T, Self::Error>;
ff7c6d11
XL
1918}
1919
1920impl<T> MaybeResult<T> for T {
48663c56
XL
1921 type Error = !;
1922
1923 fn from(x: Result<T, Self::Error>) -> Self {
1924 let Ok(x) = x;
ff7c6d11
XL
1925 x
1926 }
48663c56
XL
1927 fn to_result(self) -> Result<T, Self::Error> {
1928 Ok(self)
ff7c6d11
XL
1929 }
1930}
cc61c64b 1931
ff7c6d11 1932impl<T, E> MaybeResult<T> for Result<T, E> {
48663c56
XL
1933 type Error = E;
1934
1935 fn from(x: Result<T, Self::Error>) -> Self {
1936 x
ff7c6d11 1937 }
48663c56
XL
1938 fn to_result(self) -> Result<T, Self::Error> {
1939 self
7cac9316 1940 }
ff7c6d11
XL
1941}
1942
83c7162d 1943pub type TyLayout<'tcx> = ::rustc_target::abi::TyLayout<'tcx, Ty<'tcx>>;
cc61c64b 1944
dc9dc135 1945impl<'tcx> LayoutOf for LayoutCx<'tcx, TyCtxt<'tcx>> {
83c7162d 1946 type Ty = Ty<'tcx>;
ff7c6d11
XL
1947 type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
1948
1949 /// Computes the layout of a type. Note that this implicitly
1950 /// executes in "reveal all" mode.
a1dfa0c6 1951 fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
0531ce1d
XL
1952 let param_env = self.param_env.with_reveal_all();
1953 let ty = self.tcx.normalize_erasing_regions(param_env, ty);
2c00a5a8 1954 let details = self.tcx.layout_raw(param_env.and(ty))?;
ff7c6d11 1955 let layout = TyLayout {
041b39d2 1956 ty,
ff7c6d11
XL
1957 details
1958 };
cc61c64b 1959
0731742a 1960 // N.B., this recording is normally disabled; when enabled, it
ff7c6d11
XL
1961 // can however trigger recursive invocations of `layout_of`.
1962 // Therefore, we execute it *after* the main query has
1963 // completed, to avoid problems around recursive structures
0531ce1d 1964 // and the like. (Admittedly, I wasn't able to reproduce a problem
ff7c6d11 1965 // here, but it seems like the right thing to do. -nmatsakis)
2c00a5a8 1966 self.record_layout_for_printing(layout);
ff7c6d11
XL
1967
1968 Ok(layout)
cc61c64b
XL
1969 }
1970}
1971
dc9dc135 1972impl LayoutOf for LayoutCx<'tcx, ty::query::TyCtxtAt<'tcx>> {
83c7162d 1973 type Ty = Ty<'tcx>;
ff7c6d11 1974 type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
cc61c64b 1975
ff7c6d11
XL
1976 /// Computes the layout of a type. Note that this implicitly
1977 /// executes in "reveal all" mode.
a1dfa0c6 1978 fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
0531ce1d
XL
1979 let param_env = self.param_env.with_reveal_all();
1980 let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1981 let details = self.tcx.layout_raw(param_env.and(ty))?;
ff7c6d11
XL
1982 let layout = TyLayout {
1983 ty,
1984 details
1985 };
cc61c64b 1986
0731742a 1987 // N.B., this recording is normally disabled; when enabled, it
ff7c6d11
XL
1988 // can however trigger recursive invocations of `layout_of`.
1989 // Therefore, we execute it *after* the main query has
1990 // completed, to avoid problems around recursive structures
0531ce1d 1991 // and the like. (Admittedly, I wasn't able to reproduce a problem
ff7c6d11 1992 // here, but it seems like the right thing to do. -nmatsakis)
2c00a5a8
XL
1993 let cx = LayoutCx {
1994 tcx: *self.tcx,
1995 param_env: self.param_env
1996 };
1997 cx.record_layout_for_printing(layout);
cc61c64b 1998
ff7c6d11
XL
1999 Ok(layout)
2000 }
2001}
cc61c64b 2002
2c00a5a8 2003// Helper (inherent) `layout_of` methods to avoid pushing `LayoutCx` to users.
dc9dc135 2004impl TyCtxt<'tcx> {
2c00a5a8
XL
2005 /// Computes the layout of a type. Note that this implicitly
2006 /// executes in "reveal all" mode.
2007 #[inline]
2008 pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
2009 -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
2010 let cx = LayoutCx {
e74abb32 2011 tcx: self,
2c00a5a8
XL
2012 param_env: param_env_and_ty.param_env
2013 };
2014 cx.layout_of(param_env_and_ty.value)
2015 }
2016}
2017
dc9dc135 2018impl ty::query::TyCtxtAt<'tcx> {
2c00a5a8
XL
2019 /// Computes the layout of a type. Note that this implicitly
2020 /// executes in "reveal all" mode.
2021 #[inline]
2022 pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
2023 -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
2024 let cx = LayoutCx {
e74abb32 2025 tcx: self.at(self.span),
2c00a5a8
XL
2026 param_env: param_env_and_ty.param_env
2027 };
2028 cx.layout_of(param_env_and_ty.value)
2029 }
2030}
2031
dc9dc135
XL
2032impl<'tcx, C> TyLayoutMethods<'tcx, C> for Ty<'tcx>
2033where
416331ca
XL
2034 C: LayoutOf<Ty = Ty<'tcx>, TyLayout: MaybeResult<TyLayout<'tcx>>>
2035 + HasTyCtxt<'tcx>
2036 + HasParamEnv<'tcx>,
83c7162d 2037{
a1dfa0c6 2038 fn for_variant(this: TyLayout<'tcx>, cx: &C, variant_index: VariantIdx) -> TyLayout<'tcx> {
83c7162d
XL
2039 let details = match this.variants {
2040 Variants::Single { index } if index == variant_index => this.details,
ff7c6d11
XL
2041
2042 Variants::Single { index } => {
2043 // Deny calling for_variant more than once for non-Single enums.
48663c56 2044 if let Ok(layout) = cx.layout_of(this.ty).to_result() {
ff7c6d11 2045 assert_eq!(layout.variants, Variants::Single { index });
48663c56 2046 }
ff7c6d11 2047
e74abb32 2048 let fields = match this.ty.kind {
b7449926 2049 ty::Adt(def, _) => def.variants[variant_index].fields.len(),
ff7c6d11
XL
2050 _ => bug!()
2051 };
83c7162d
XL
2052 let tcx = cx.tcx();
2053 tcx.intern_layout(LayoutDetails {
2054 variants: Variants::Single { index: variant_index },
2055 fields: FieldPlacement::Union(fields),
2056 abi: Abi::Uninhabited,
416331ca 2057 largest_niche: None,
83c7162d 2058 align: tcx.data_layout.i8_align,
94b46f34 2059 size: Size::ZERO
83c7162d 2060 })
ff7c6d11 2061 }
cc61c64b 2062
532ac7d7 2063 Variants::Multiple { ref variants, .. } => {
ff7c6d11 2064 &variants[variant_index]
cc61c64b 2065 }
ff7c6d11
XL
2066 };
2067
2068 assert_eq!(details.variants, Variants::Single { index: variant_index });
cc61c64b 2069
ff7c6d11 2070 TyLayout {
83c7162d 2071 ty: this.ty,
ff7c6d11 2072 details
cc61c64b
XL
2073 }
2074 }
2075
a1dfa0c6 2076 fn field(this: TyLayout<'tcx>, cx: &C, i: usize) -> C::TyLayout {
cc61c64b 2077 let tcx = cx.tcx();
48663c56
XL
2078 let discr_layout = |discr: &Scalar| -> C::TyLayout {
2079 let layout = LayoutDetails::scalar(cx, discr.clone());
2080 MaybeResult::from(Ok(TyLayout {
2081 details: tcx.intern_layout(layout),
2082 ty: discr.value.to_ty(tcx),
2083 }))
2084 };
2085
e74abb32 2086 cx.layout_of(match this.ty.kind {
b7449926
XL
2087 ty::Bool |
2088 ty::Char |
2089 ty::Int(_) |
2090 ty::Uint(_) |
2091 ty::Float(_) |
2092 ty::FnPtr(_) |
2093 ty::Never |
2094 ty::FnDef(..) |
2095 ty::GeneratorWitness(..) |
2096 ty::Foreign(..) |
2097 ty::Dynamic(..) => {
83c7162d 2098 bug!("TyLayout::field_type({:?}): not applicable", this)
cc61c64b
XL
2099 }
2100
2101 // Potentially-fat pointers.
b7449926
XL
2102 ty::Ref(_, pointee, _) |
2103 ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
8faf50e0 2104 assert!(i < this.fields.count());
ff7c6d11 2105
60c5eb7d
XL
2106 // Reuse the fat `*T` type as its own thin pointer data field.
2107 // This provides information about, e.g., DST struct pointees
ff7c6d11
XL
2108 // (which may have no non-DST form), and will work as long
2109 // as the `Abi` or `FieldPlacement` is checked by users.
2110 if i == 0 {
b7449926 2111 let nil = tcx.mk_unit();
83c7162d 2112 let ptr_ty = if this.ty.is_unsafe_ptr() {
ff7c6d11
XL
2113 tcx.mk_mut_ptr(nil)
2114 } else {
48663c56 2115 tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
ff7c6d11 2116 };
48663c56 2117 return MaybeResult::from(cx.layout_of(ptr_ty).to_result().map(|mut ptr_layout| {
83c7162d 2118 ptr_layout.ty = this.ty;
ff7c6d11 2119 ptr_layout
48663c56 2120 }));
ff7c6d11
XL
2121 }
2122
e74abb32 2123 match tcx.struct_tail_erasing_lifetimes(pointee, cx.param_env()).kind {
b7449926
XL
2124 ty::Slice(_) |
2125 ty::Str => tcx.types.usize,
2126 ty::Dynamic(_, _) => {
8faf50e0 2127 tcx.mk_imm_ref(
48663c56 2128 tcx.lifetimes.re_static,
8faf50e0
XL
2129 tcx.mk_array(tcx.types.usize, 3),
2130 )
9fa01778 2131 /* FIXME: use actual fn pointers
b7449926
XL
2132 Warning: naively computing the number of entries in the
2133 vtable by counting the methods on the trait + methods on
2134 all parent traits does not work, because some methods can
2135 be not object safe and thus excluded from the vtable.
2136 Increase this counter if you tried to implement this but
2137 failed to do it without duplicating a lot of code from
2138 other places in the compiler: 2
2139 tcx.mk_tup(&[
2140 tcx.mk_array(tcx.types.usize, 3),
2141 tcx.mk_array(Option<fn()>),
2142 ])
2143 */
ff7c6d11 2144 }
83c7162d 2145 _ => bug!("TyLayout::field_type({:?}): not applicable", this)
ff7c6d11 2146 }
cc61c64b
XL
2147 }
2148
2149 // Arrays and slices.
b7449926
XL
2150 ty::Array(element, _) |
2151 ty::Slice(element) => element,
2152 ty::Str => tcx.types.u8,
cc61c64b 2153
ea8adc8c 2154 // Tuples, generators and closures.
b7449926 2155 ty::Closure(def_id, ref substs) => {
e74abb32 2156 substs.as_closure().upvar_tys(def_id, tcx).nth(i).unwrap()
cc61c64b
XL
2157 }
2158
b7449926 2159 ty::Generator(def_id, ref substs, _) => {
48663c56
XL
2160 match this.variants {
2161 Variants::Single { index } => {
e74abb32 2162 substs.as_generator().state_tys(def_id, tcx)
48663c56
XL
2163 .nth(index.as_usize()).unwrap()
2164 .nth(i).unwrap()
2165 }
2166 Variants::Multiple { ref discr, discr_index, .. } => {
2167 if i == discr_index {
2168 return discr_layout(discr);
2169 }
e74abb32 2170 substs.as_generator().prefix_tys(def_id, tcx).nth(i).unwrap()
48663c56
XL
2171 }
2172 }
ea8adc8c
XL
2173 }
2174
48663c56 2175 ty::Tuple(tys) => tys[i].expect_ty(),
cc61c64b
XL
2176
2177 // SIMD vector types.
b7449926 2178 ty::Adt(def, ..) if def.repr.simd() => {
83c7162d 2179 this.ty.simd_type(tcx)
cc61c64b
XL
2180 }
2181
2182 // ADTs.
b7449926 2183 ty::Adt(def, substs) => {
83c7162d 2184 match this.variants {
ff7c6d11
XL
2185 Variants::Single { index } => {
2186 def.variants[index].fields[i].ty(tcx, substs)
2187 }
2188
2189 // Discriminant field for enums (where applicable).
532ac7d7 2190 Variants::Multiple { ref discr, .. } => {
ff7c6d11 2191 assert_eq!(i, 0);
48663c56 2192 return discr_layout(discr);
ff7c6d11
XL
2193 }
2194 }
cc61c64b
XL
2195 }
2196
a1dfa0c6
XL
2197 ty::Projection(_) | ty::UnnormalizedProjection(..) | ty::Bound(..) |
2198 ty::Placeholder(..) | ty::Opaque(..) | ty::Param(_) | ty::Infer(_) |
2199 ty::Error => {
83c7162d 2200 bug!("TyLayout::field_type: unexpected type `{}`", this.ty)
cc61c64b 2201 }
ff7c6d11
XL
2202 })
2203 }
48663c56
XL
2204
2205 fn pointee_info_at(
2206 this: TyLayout<'tcx>,
2207 cx: &C,
2208 offset: Size,
2209 ) -> Option<PointeeInfo> {
e74abb32 2210 match this.ty.kind {
48663c56
XL
2211 ty::RawPtr(mt) if offset.bytes() == 0 => {
2212 cx.layout_of(mt.ty).to_result().ok()
2213 .map(|layout| PointeeInfo {
2214 size: layout.size,
2215 align: layout.align.abi,
2216 safe: None,
2217 })
2218 }
2219
2220 ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
2221 let tcx = cx.tcx();
2222 let is_freeze = ty.is_freeze(tcx, cx.param_env(), DUMMY_SP);
2223 let kind = match mt {
60c5eb7d 2224 hir::Mutability::Immutable => if is_freeze {
48663c56
XL
2225 PointerKind::Frozen
2226 } else {
2227 PointerKind::Shared
2228 },
60c5eb7d 2229 hir::Mutability::Mutable => {
48663c56
XL
2230 // Previously we would only emit noalias annotations for LLVM >= 6 or in
2231 // panic=abort mode. That was deemed right, as prior versions had many bugs
2232 // in conjunction with unwinding, but later versions didn’t seem to have
2233 // said issues. See issue #31681.
2234 //
2235 // Alas, later on we encountered a case where noalias would generate wrong
2236 // code altogether even with recent versions of LLVM in *safe* code with no
2237 // unwinding involved. See #54462.
2238 //
2239 // For now, do not enable mutable_noalias by default at all, while the
2240 // issue is being figured out.
2241 let mutable_noalias = tcx.sess.opts.debugging_opts.mutable_noalias
2242 .unwrap_or(false);
2243 if mutable_noalias {
2244 PointerKind::UniqueBorrowed
2245 } else {
2246 PointerKind::Shared
2247 }
2248 }
2249 };
2250
2251 cx.layout_of(ty).to_result().ok()
2252 .map(|layout| PointeeInfo {
2253 size: layout.size,
2254 align: layout.align.abi,
2255 safe: Some(kind),
2256 })
2257 }
2258
2259 _ => {
2260 let mut data_variant = match this.variants {
2261 // Within the discriminant field, only the niche itself is
2262 // always initialized, so we only check for a pointer at its
2263 // offset.
2264 //
2265 // If the niche is a pointer, it's either valid (according
2266 // to its type), or null (which the niche field's scalar
2267 // validity range encodes). This allows using
2268 // `dereferenceable_or_null` for e.g., `Option<&T>`, and
2269 // this will continue to work as long as we don't start
2270 // using more niches than just null (e.g., the first page of
2271 // the address space, or unaligned pointers).
2272 Variants::Multiple {
2273 discr_kind: DiscriminantKind::Niche {
2274 dataful_variant,
2275 ..
2276 },
2277 discr_index,
2278 ..
2279 } if this.fields.offset(discr_index) == offset =>
2280 Some(this.for_variant(cx, dataful_variant)),
2281 _ => Some(this),
2282 };
2283
2284 if let Some(variant) = data_variant {
2285 // We're not interested in any unions.
2286 if let FieldPlacement::Union(_) = variant.fields {
2287 data_variant = None;
2288 }
2289 }
2290
2291 let mut result = None;
2292
2293 if let Some(variant) = data_variant {
2294 let ptr_end = offset + Pointer.size(cx);
2295 for i in 0..variant.fields.count() {
2296 let field_start = variant.fields.offset(i);
2297 if field_start <= offset {
2298 let field = variant.field(cx, i);
2299 result = field.to_result().ok()
2300 .and_then(|field| {
2301 if ptr_end <= field_start + field.size {
2302 // We found the right field, look inside it.
2303 field.pointee_info_at(cx, offset - field_start)
2304 } else {
2305 None
2306 }
2307 });
2308 if result.is_some() {
2309 break;
2310 }
2311 }
2312 }
2313 }
2314
2315 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
2316 if let Some(ref mut pointee) = result {
e74abb32 2317 if let ty::Adt(def, _) = this.ty.kind {
48663c56
XL
2318 if def.is_box() && offset.bytes() == 0 {
2319 pointee.safe = Some(PointerKind::UniqueOwned);
2320 }
2321 }
2322 }
2323
2324 result
2325 }
2326 }
2327 }
83c7162d 2328}
ff7c6d11 2329
60c5eb7d 2330impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for LayoutError<'tcx> {
e74abb32 2331 fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
60c5eb7d 2332 use crate::ty::layout::LayoutError::*;
ff7c6d11
XL
2333 mem::discriminant(self).hash_stable(hcx, hasher);
2334
2335 match *self {
60c5eb7d
XL
2336 Unknown(t) |
2337 SizeOverflow(t) => t.hash_stable(hcx, hasher)
ff7c6d11
XL
2338 }
2339 }
2340}
2341
ff7c6d11 2342
60c5eb7d
XL
2343impl<'tcx> ty::Instance<'tcx> {
2344 // NOTE(eddyb) this is private to avoid using it from outside of
2345 // `FnAbi::of_instance` - any other uses are either too high-level
2346 // for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
2347 // or should go through `FnAbi` instead, to avoid losing any
2348 // adjustments `FnAbi::of_instance` might be performing.
2349 fn fn_sig_for_fn_abi(&self, tcx: TyCtxt<'tcx>) -> ty::PolyFnSig<'tcx> {
2350 let ty = self.ty(tcx);
2351 match ty.kind {
2352 ty::FnDef(..) |
2353 // Shims currently have type FnPtr. Not sure this should remain.
2354 ty::FnPtr(_) => {
2355 let mut sig = ty.fn_sig(tcx);
2356 if let ty::InstanceDef::VtableShim(..) = self.def {
2357 // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
2358 sig = sig.map_bound(|mut sig| {
2359 let mut inputs_and_output = sig.inputs_and_output.to_vec();
2360 inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
2361 sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
2362 sig
2363 });
2364 }
2365 sig
2366 }
2367 ty::Closure(def_id, substs) => {
2368 let sig = substs.as_closure().sig(def_id, tcx);
2369
2370 let env_ty = tcx.closure_env_ty(def_id, substs).unwrap();
2371 sig.map_bound(|sig| tcx.mk_fn_sig(
2372 iter::once(*env_ty.skip_binder()).chain(sig.inputs().iter().cloned()),
2373 sig.output(),
2374 sig.c_variadic,
2375 sig.unsafety,
2376 sig.abi
2377 ))
2378 }
2379 ty::Generator(def_id, substs, _) => {
2380 let sig = substs.as_generator().poly_sig(def_id, tcx);
2381
2382 let env_region = ty::ReLateBound(ty::INNERMOST, ty::BrEnv);
2383 let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
2384
2385 let pin_did = tcx.lang_items().pin_type().unwrap();
2386 let pin_adt_ref = tcx.adt_def(pin_did);
2387 let pin_substs = tcx.intern_substs(&[env_ty.into()]);
2388 let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
2389
2390 sig.map_bound(|sig| {
2391 let state_did = tcx.lang_items().gen_state().unwrap();
2392 let state_adt_ref = tcx.adt_def(state_did);
2393 let state_substs = tcx.intern_substs(&[
2394 sig.yield_ty.into(),
2395 sig.return_ty.into(),
2396 ]);
2397 let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
2398
2399 tcx.mk_fn_sig(iter::once(env_ty),
2400 ret_ty,
2401 false,
2402 hir::Unsafety::Normal,
2403 rustc_target::spec::abi::Abi::Rust
2404 )
2405 })
ea8adc8c 2406 }
60c5eb7d 2407 _ => bug!("unexpected type {:?} in Instance::fn_sig", ty)
ea8adc8c
XL
2408 }
2409 }
2410}
2411
60c5eb7d 2412pub trait FnAbiExt<'tcx, C>
48663c56
XL
2413where
2414 C: LayoutOf<Ty = Ty<'tcx>, TyLayout = TyLayout<'tcx>>
2415 + HasDataLayout
2416 + HasTargetSpec
2417 + HasTyCtxt<'tcx>
2418 + HasParamEnv<'tcx>,
2419{
60c5eb7d
XL
2420 /// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
2421 ///
2422 /// NB: this doesn't handle virtual calls - those should use `FnAbi::of_instance`
2423 /// instead, where the instance is a `InstanceDef::Virtual`.
2424 fn of_fn_ptr(cx: &C, sig: ty::PolyFnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2425
2426 /// Compute a `FnAbi` suitable for declaring/defining an `fn` instance, and for
2427 /// direct calls to an `fn`.
2428 ///
2429 /// NB: that includes virtual calls, which are represented by "direct calls"
2430 /// to a `InstanceDef::Virtual` instance (of `<dyn Trait as Trait>::fn`).
2431 fn of_instance(cx: &C, instance: ty::Instance<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2432
48663c56
XL
2433 fn new_internal(
2434 cx: &C,
60c5eb7d 2435 sig: ty::PolyFnSig<'tcx>,
48663c56 2436 extra_args: &[Ty<'tcx>],
60c5eb7d
XL
2437 caller_location: Option<Ty<'tcx>>,
2438 mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgAbi<'tcx, Ty<'tcx>>,
48663c56
XL
2439 ) -> Self;
2440 fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi);
2441}
2442
60c5eb7d 2443impl<'tcx, C> FnAbiExt<'tcx, C> for call::FnAbi<'tcx, Ty<'tcx>>
48663c56
XL
2444where
2445 C: LayoutOf<Ty = Ty<'tcx>, TyLayout = TyLayout<'tcx>>
2446 + HasDataLayout
2447 + HasTargetSpec
2448 + HasTyCtxt<'tcx>
2449 + HasParamEnv<'tcx>,
2450{
60c5eb7d
XL
2451 fn of_fn_ptr(cx: &C, sig: ty::PolyFnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2452 call::FnAbi::new_internal(cx, sig, extra_args, None, |ty, _| ArgAbi::new(cx.layout_of(ty)))
48663c56
XL
2453 }
2454
60c5eb7d
XL
2455 fn of_instance(cx: &C, instance: ty::Instance<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2456 let sig = instance.fn_sig_for_fn_abi(cx.tcx());
48663c56 2457
60c5eb7d
XL
2458 let caller_location = if instance.def.requires_caller_location(cx.tcx()) {
2459 Some(cx.tcx().caller_location_ty())
2460 } else {
2461 None
2462 };
2463
2464 call::FnAbi::new_internal(cx, sig, extra_args, caller_location, |ty, arg_idx| {
48663c56
XL
2465 let mut layout = cx.layout_of(ty);
2466 // Don't pass the vtable, it's not an argument of the virtual fn.
2467 // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
2468 // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
60c5eb7d 2469 if let (ty::InstanceDef::Virtual(..), Some(0)) = (&instance.def, arg_idx) {
48663c56
XL
2470 let fat_pointer_ty = if layout.is_unsized() {
2471 // unsized `self` is passed as a pointer to `self`
2472 // FIXME (mikeyhew) change this to use &own if it is ever added to the language
2473 cx.tcx().mk_mut_ptr(layout.ty)
2474 } else {
2475 match layout.abi {
2476 Abi::ScalarPair(..) => (),
2477 _ => bug!("receiver type has unsupported layout: {:?}", layout),
2478 }
2479
2480 // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
2481 // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
2482 // elsewhere in the compiler as a method on a `dyn Trait`.
2483 // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
2484 // get a built-in pointer type
2485 let mut fat_pointer_layout = layout;
2486 'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
2487 && !fat_pointer_layout.ty.is_region_ptr()
2488 {
60c5eb7d 2489 for i in 0..fat_pointer_layout.fields.count() {
48663c56
XL
2490 let field_layout = fat_pointer_layout.field(cx, i);
2491
2492 if !field_layout.is_zst() {
2493 fat_pointer_layout = field_layout;
2494 continue 'descend_newtypes;
2495 }
2496 }
2497
2498 bug!(
2499 "receiver has no non-zero-sized fields {:?}",
2500 fat_pointer_layout
2501 );
2502 }
2503
2504 fat_pointer_layout.ty
2505 };
2506
2507 // we now have a type like `*mut RcBox<dyn Trait>`
2508 // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
2509 // this is understood as a special case elsewhere in the compiler
2510 let unit_pointer_ty = cx.tcx().mk_mut_ptr(cx.tcx().mk_unit());
2511 layout = cx.layout_of(unit_pointer_ty);
2512 layout.ty = fat_pointer_ty;
2513 }
60c5eb7d 2514 ArgAbi::new(layout)
48663c56
XL
2515 })
2516 }
2517
2518 fn new_internal(
2519 cx: &C,
60c5eb7d 2520 sig: ty::PolyFnSig<'tcx>,
48663c56 2521 extra_args: &[Ty<'tcx>],
60c5eb7d
XL
2522 caller_location: Option<Ty<'tcx>>,
2523 mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgAbi<'tcx, Ty<'tcx>>,
48663c56 2524 ) -> Self {
60c5eb7d
XL
2525 debug!("FnAbi::new_internal({:?}, {:?})", sig, extra_args);
2526
2527 let sig = cx
2528 .tcx()
2529 .normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig);
48663c56
XL
2530
2531 use rustc_target::spec::abi::Abi::*;
2532 let conv = match cx.tcx().sess.target.target.adjust_abi(sig.abi) {
60c5eb7d 2533 RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
48663c56
XL
2534
2535 // It's the ABI's job to select this, not ours.
2536 System => bug!("system abi should be selected elsewhere"),
e74abb32 2537 EfiApi => bug!("eficall abi should be selected elsewhere"),
48663c56
XL
2538
2539 Stdcall => Conv::X86Stdcall,
2540 Fastcall => Conv::X86Fastcall,
2541 Vectorcall => Conv::X86VectorCall,
2542 Thiscall => Conv::X86ThisCall,
2543 C => Conv::C,
2544 Unadjusted => Conv::C,
2545 Win64 => Conv::X86_64Win64,
2546 SysV64 => Conv::X86_64SysV,
2547 Aapcs => Conv::ArmAapcs,
2548 PtxKernel => Conv::PtxKernel,
2549 Msp430Interrupt => Conv::Msp430Intr,
2550 X86Interrupt => Conv::X86Intr,
2551 AmdGpuKernel => Conv::AmdGpuKernel,
2552
2553 // These API constants ought to be more specific...
2554 Cdecl => Conv::C,
2555 };
2556
2557 let mut inputs = sig.inputs();
2558 let extra_args = if sig.abi == RustCall {
2559 assert!(!sig.c_variadic && extra_args.is_empty());
2560
e74abb32 2561 match sig.inputs().last().unwrap().kind {
48663c56
XL
2562 ty::Tuple(tupled_arguments) => {
2563 inputs = &sig.inputs()[0..sig.inputs().len() - 1];
2564 tupled_arguments.iter().map(|k| k.expect_ty()).collect()
2565 }
2566 _ => {
2567 bug!(
2568 "argument to function with \"rust-call\" ABI \
2569 is not a tuple"
2570 );
2571 }
2572 }
2573 } else {
2574 assert!(sig.c_variadic || extra_args.is_empty());
2575 extra_args.to_vec()
2576 };
2577
2578 let target = &cx.tcx().sess.target.target;
2579 let win_x64_gnu =
2580 target.target_os == "windows" && target.arch == "x86_64" && target.target_env == "gnu";
2581 let linux_s390x =
2582 target.target_os == "linux" && target.arch == "s390x" && target.target_env == "gnu";
2583 let linux_sparc64 =
2584 target.target_os == "linux" && target.arch == "sparc64" && target.target_env == "gnu";
2585 let rust_abi = match sig.abi {
2586 RustIntrinsic | PlatformIntrinsic | Rust | RustCall => true,
2587 _ => false,
2588 };
2589
2590 // Handle safe Rust thin and fat pointers.
2591 let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
2592 scalar: &Scalar,
2593 layout: TyLayout<'tcx>,
2594 offset: Size,
2595 is_return: bool| {
2596 // Booleans are always an i1 that needs to be zero-extended.
2597 if scalar.is_bool() {
2598 attrs.set(ArgAttribute::ZExt);
2599 return;
2600 }
2601
2602 // Only pointer types handled below.
2603 if scalar.value != Pointer {
2604 return;
2605 }
2606
2607 if scalar.valid_range.start() < scalar.valid_range.end() {
2608 if *scalar.valid_range.start() > 0 {
2609 attrs.set(ArgAttribute::NonNull);
2610 }
2611 }
2612
2613 if let Some(pointee) = layout.pointee_info_at(cx, offset) {
2614 if let Some(kind) = pointee.safe {
48663c56
XL
2615 attrs.pointee_align = Some(pointee.align);
2616
60c5eb7d
XL
2617 // `Box` (`UniqueBorrowed`) are not necessarily dereferencable
2618 // for the entire duration of the function as they can be deallocated
2619 // any time. Set their valid size to 0.
2620 attrs.pointee_size = match kind {
2621 PointerKind::UniqueOwned => Size::ZERO,
2622 _ => pointee.size
2623 };
2624
48663c56
XL
2625 // `Box` pointer parameters never alias because ownership is transferred
2626 // `&mut` pointer parameters never alias other parameters,
2627 // or mutable global data
2628 //
2629 // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
2630 // and can be marked as both `readonly` and `noalias`, as
2631 // LLVM's definition of `noalias` is based solely on memory
2632 // dependencies rather than pointer equality
2633 let no_alias = match kind {
2634 PointerKind::Shared => false,
2635 PointerKind::UniqueOwned => true,
2636 PointerKind::Frozen | PointerKind::UniqueBorrowed => !is_return,
2637 };
2638 if no_alias {
2639 attrs.set(ArgAttribute::NoAlias);
2640 }
2641
2642 if kind == PointerKind::Frozen && !is_return {
2643 attrs.set(ArgAttribute::ReadOnly);
2644 }
2645 }
2646 }
2647 };
2648
48663c56
XL
2649 let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| {
2650 let is_return = arg_idx.is_none();
2651 let mut arg = mk_arg_type(ty, arg_idx);
2652 if arg.layout.is_zst() {
2653 // For some forsaken reason, x86_64-pc-windows-gnu
2654 // doesn't ignore zero-sized struct arguments.
2655 // The same is true for s390x-unknown-linux-gnu
2656 // and sparc64-unknown-linux-gnu.
2657 if is_return || rust_abi || (!win_x64_gnu && !linux_s390x && !linux_sparc64) {
e74abb32 2658 arg.mode = PassMode::Ignore;
48663c56
XL
2659 }
2660 }
2661
2662 // FIXME(eddyb) other ABIs don't have logic for scalar pairs.
2663 if !is_return && rust_abi {
2664 if let Abi::ScalarPair(ref a, ref b) = arg.layout.abi {
2665 let mut a_attrs = ArgAttributes::new();
2666 let mut b_attrs = ArgAttributes::new();
2667 adjust_for_rust_scalar(&mut a_attrs, a, arg.layout, Size::ZERO, false);
2668 adjust_for_rust_scalar(
2669 &mut b_attrs,
2670 b,
2671 arg.layout,
2672 a.value.size(cx).align_to(b.value.align(cx).abi),
2673 false,
2674 );
2675 arg.mode = PassMode::Pair(a_attrs, b_attrs);
2676 return arg;
2677 }
2678 }
2679
2680 if let Abi::Scalar(ref scalar) = arg.layout.abi {
2681 if let PassMode::Direct(ref mut attrs) = arg.mode {
2682 adjust_for_rust_scalar(attrs, scalar, arg.layout, Size::ZERO, is_return);
2683 }
2684 }
2685
2686 arg
2687 };
2688
60c5eb7d 2689 let mut fn_abi = FnAbi {
48663c56
XL
2690 ret: arg_of(sig.output(), None),
2691 args: inputs
2692 .iter()
2693 .cloned()
2694 .chain(extra_args)
60c5eb7d 2695 .chain(caller_location)
48663c56
XL
2696 .enumerate()
2697 .map(|(i, ty)| arg_of(ty, Some(i)))
2698 .collect(),
2699 c_variadic: sig.c_variadic,
2700 conv,
2701 };
60c5eb7d
XL
2702 fn_abi.adjust_for_abi(cx, sig.abi);
2703 fn_abi
48663c56
XL
2704 }
2705
2706 fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi) {
2707 if abi == SpecAbi::Unadjusted {
2708 return;
2709 }
2710
2711 if abi == SpecAbi::Rust
2712 || abi == SpecAbi::RustCall
2713 || abi == SpecAbi::RustIntrinsic
2714 || abi == SpecAbi::PlatformIntrinsic
2715 {
60c5eb7d 2716 let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>| {
48663c56
XL
2717 if arg.is_ignore() {
2718 return;
2719 }
2720
2721 match arg.layout.abi {
2722 Abi::Aggregate { .. } => {}
2723
2724 // This is a fun case! The gist of what this is doing is
2725 // that we want callers and callees to always agree on the
2726 // ABI of how they pass SIMD arguments. If we were to *not*
2727 // make these arguments indirect then they'd be immediates
2728 // in LLVM, which means that they'd used whatever the
2729 // appropriate ABI is for the callee and the caller. That
2730 // means, for example, if the caller doesn't have AVX
2731 // enabled but the callee does, then passing an AVX argument
2732 // across this boundary would cause corrupt data to show up.
2733 //
2734 // This problem is fixed by unconditionally passing SIMD
2735 // arguments through memory between callers and callees
2736 // which should get them all to agree on ABI regardless of
2737 // target feature sets. Some more information about this
2738 // issue can be found in #44367.
2739 //
2740 // Note that the platform intrinsic ABI is exempt here as
2741 // that's how we connect up to LLVM and it's unstable
2742 // anyway, we control all calls to it in libstd.
2743 Abi::Vector { .. }
2744 if abi != SpecAbi::PlatformIntrinsic
2745 && cx.tcx().sess.target.target.options.simd_types_indirect =>
2746 {
2747 arg.make_indirect();
2748 return;
2749 }
2750
2751 _ => return,
2752 }
2753
2754 let size = arg.layout.size;
2755 if arg.layout.is_unsized() || size > Pointer.size(cx) {
2756 arg.make_indirect();
2757 } else {
2758 // We want to pass small aggregates as immediates, but using
2759 // a LLVM aggregate type for this leads to bad optimizations,
2760 // so we pick an appropriately sized integer type instead.
2761 arg.cast_to(Reg {
2762 kind: RegKind::Integer,
2763 size,
2764 });
2765 }
2766 };
2767 fixup(&mut self.ret);
2768 for arg in &mut self.args {
2769 fixup(arg);
2770 }
2771 if let PassMode::Indirect(ref mut attrs, _) = self.ret.mode {
2772 attrs.set(ArgAttribute::StructRet);
2773 }
2774 return;
2775 }
2776
2777 if let Err(msg) = self.adjust_for_cabi(cx, abi) {
2778 cx.tcx().sess.fatal(&msg);
2779 }
2780 }
2781}