]> git.proxmox.com Git - rustc.git/blame - src/librustc/ty/layout.rs
New upstream version 1.37.0+dfsg1
[rustc.git] / src / librustc / ty / layout.rs
CommitLineData
9fa01778
XL
1use crate::session::{self, DataTypeKind};
2use crate::ty::{self, Ty, TyCtxt, TypeFoldable, ReprOptions};
54a0048b 3
0731742a 4use syntax::ast::{self, Ident, IntTy, UintTy};
54a0048b 5use syntax::attr;
3157f602 6use syntax_pos::DUMMY_SP;
54a0048b
SL
7
8use std::cmp;
9use std::fmt;
ff7c6d11 10use std::i128;
94b46f34 11use std::iter;
ea8adc8c 12use std::mem;
b7449926 13use std::ops::Bound;
54a0048b 14
48663c56 15use crate::hir;
9fa01778 16use crate::ich::StableHashingContext;
dc9dc135
XL
17use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
18use crate::ty::GeneratorSubsts;
19use crate::ty::subst::Subst;
20use rustc_data_structures::bit_set::BitSet;
a1dfa0c6 21use rustc_data_structures::indexed_vec::{IndexVec, Idx};
ea8adc8c
XL
22use rustc_data_structures::stable_hasher::{HashStable, StableHasher,
23 StableHasherResult};
24
83c7162d 25pub use rustc_target::abi::*;
48663c56
XL
26use rustc_target::spec::{HasTargetSpec, abi::Abi as SpecAbi};
27use rustc_target::abi::call::{
28 ArgAttribute, ArgAttributes, ArgType, Conv, FnType, IgnoreMode, PassMode, Reg, RegKind
29};
30
83c7162d 31pub trait IntegerExt {
dc9dc135 32 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
a1dfa0c6 33 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
dc9dc135
XL
34 fn repr_discr<'tcx>(
35 tcx: TyCtxt<'tcx>,
36 ty: Ty<'tcx>,
37 repr: &ReprOptions,
38 min: i128,
39 max: i128,
40 ) -> (Integer, bool);
54a0048b
SL
41}
42
83c7162d 43impl IntegerExt for Integer {
dc9dc135 44 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx> {
9e0c209e 45 match (*self, signed) {
9e0c209e
SL
46 (I8, false) => tcx.types.u8,
47 (I16, false) => tcx.types.u16,
48 (I32, false) => tcx.types.u32,
49 (I64, false) => tcx.types.u64,
32a655c1 50 (I128, false) => tcx.types.u128,
9e0c209e
SL
51 (I8, true) => tcx.types.i8,
52 (I16, true) => tcx.types.i16,
53 (I32, true) => tcx.types.i32,
54 (I64, true) => tcx.types.i64,
32a655c1 55 (I128, true) => tcx.types.i128,
9e0c209e
SL
56 }
57 }
58
9fa01778 59 /// Gets the Integer type from an attr::IntType.
a1dfa0c6 60 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
cc61c64b
XL
61 let dl = cx.data_layout();
62
54a0048b
SL
63 match ity {
64 attr::SignedInt(IntTy::I8) | attr::UnsignedInt(UintTy::U8) => I8,
65 attr::SignedInt(IntTy::I16) | attr::UnsignedInt(UintTy::U16) => I16,
66 attr::SignedInt(IntTy::I32) | attr::UnsignedInt(UintTy::U32) => I32,
67 attr::SignedInt(IntTy::I64) | attr::UnsignedInt(UintTy::U64) => I64,
32a655c1 68 attr::SignedInt(IntTy::I128) | attr::UnsignedInt(UintTy::U128) => I128,
2c00a5a8 69 attr::SignedInt(IntTy::Isize) | attr::UnsignedInt(UintTy::Usize) => {
54a0048b
SL
70 dl.ptr_sized_integer()
71 }
72 }
73 }
74
9fa01778 75 /// Finds the appropriate Integer type and signedness for the given
54a0048b 76 /// signed discriminant range and #[repr] attribute.
ff7c6d11 77 /// N.B.: u128 values above i128::MAX will be treated as signed, but
54a0048b 78 /// that shouldn't affect anything, other than maybe debuginfo.
dc9dc135
XL
79 fn repr_discr<'tcx>(
80 tcx: TyCtxt<'tcx>,
81 ty: Ty<'tcx>,
82 repr: &ReprOptions,
83 min: i128,
84 max: i128,
85 ) -> (Integer, bool) {
54a0048b
SL
86 // Theoretically, negative values could be larger in unsigned representation
87 // than the unsigned representation of the signed minimum. However, if there
ff7c6d11
XL
88 // are any negative values, the only valid unsigned representation is u128
89 // which can fit all i128 values, so the result remains unaffected.
90 let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
54a0048b
SL
91 let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
92
476ff2be
SL
93 let mut min_from_extern = None;
94 let min_default = I8;
95
8bb4bdeb 96 if let Some(ity) = repr.int {
a1dfa0c6 97 let discr = Integer::from_attr(&tcx, ity);
8bb4bdeb
XL
98 let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
99 if discr < fit {
100 bug!("Integer::repr_discr: `#[repr]` hint too small for \
0bf4aa26 101 discriminant range of enum `{}", ty)
8bb4bdeb
XL
102 }
103 return (discr, ity.is_signed());
104 }
105
cc61c64b 106 if repr.c() {
8bb4bdeb
XL
107 match &tcx.sess.target.target.arch[..] {
108 // WARNING: the ARM EABI has two variants; the one corresponding
109 // to `at_least == I32` appears to be used on Linux and NetBSD,
110 // but some systems may use the variant corresponding to no
0bf4aa26 111 // lower bound. However, we don't run on those yet...?
8bb4bdeb
XL
112 "arm" => min_from_extern = Some(I32),
113 _ => min_from_extern = Some(I32),
54a0048b 114 }
476ff2be
SL
115 }
116
117 let at_least = min_from_extern.unwrap_or(min_default);
54a0048b
SL
118
119 // If there are no negative values, we can use the unsigned fit.
120 if min >= 0 {
121 (cmp::max(unsigned_fit, at_least), false)
122 } else {
123 (cmp::max(signed_fit, at_least), true)
124 }
125 }
126}
127
83c7162d 128pub trait PrimitiveExt {
dc9dc135 129 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
54a0048b
SL
130}
131
83c7162d 132impl PrimitiveExt for Primitive {
dc9dc135 133 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
ff7c6d11
XL
134 match *self {
135 Int(i, signed) => i.to_ty(tcx, signed),
94b46f34
XL
136 Float(FloatTy::F32) => tcx.types.f32,
137 Float(FloatTy::F64) => tcx.types.f64,
b7449926 138 Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
ff7c6d11
XL
139 }
140 }
54a0048b
SL
141}
142
ff7c6d11
XL
143/// The first half of a fat pointer.
144///
145/// - For a trait object, this is the address of the box.
146/// - For a slice, this is the base address.
147pub const FAT_PTR_ADDR: usize = 0;
476ff2be 148
ff7c6d11
XL
149/// The second half of a fat pointer.
150///
151/// - For a trait object, this is the address of the vtable.
152/// - For a slice, this is the length.
153pub const FAT_PTR_EXTRA: usize = 1;
476ff2be 154
83c7162d 155#[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable)]
54a0048b
SL
156pub enum LayoutError<'tcx> {
157 Unknown(Ty<'tcx>),
158 SizeOverflow(Ty<'tcx>)
159}
160
161impl<'tcx> fmt::Display for LayoutError<'tcx> {
0bf4aa26 162 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
54a0048b
SL
163 match *self {
164 LayoutError::Unknown(ty) => {
165 write!(f, "the type `{:?}` has an unknown layout", ty)
166 }
167 LayoutError::SizeOverflow(ty) => {
168 write!(f, "the type `{:?}` is too big for the current architecture", ty)
169 }
170 }
171 }
172}
173
dc9dc135
XL
174fn layout_raw<'tcx>(
175 tcx: TyCtxt<'tcx>,
176 query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
177) -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> {
83c7162d
XL
178 ty::tls::with_related_context(tcx, move |icx| {
179 let rec_limit = *tcx.sess.recursion_limit.get();
180 let (param_env, ty) = query.into_parts();
5bcae85e 181
83c7162d
XL
182 if icx.layout_depth > rec_limit {
183 tcx.sess.fatal(
184 &format!("overflow representing the type `{}`", ty));
185 }
54a0048b 186
83c7162d
XL
187 // Update the ImplicitCtxt to increase the layout_depth
188 let icx = ty::tls::ImplicitCtxt {
189 layout_depth: icx.layout_depth + 1,
190 ..icx.clone()
191 };
ff7c6d11 192
83c7162d
XL
193 ty::tls::enter_context(&icx, |_| {
194 let cx = LayoutCx { tcx, param_env };
0731742a
XL
195 let layout = cx.layout_raw_uncached(ty);
196 // Type-level uninhabitedness should always imply ABI uninhabitedness.
197 if let Ok(layout) = layout {
198 if ty.conservative_is_privately_uninhabited(tcx) {
199 assert!(layout.abi.is_uninhabited());
200 }
201 }
202 layout
83c7162d
XL
203 })
204 })
ff7c6d11
XL
205}
206
0bf4aa26 207pub fn provide(providers: &mut ty::query::Providers<'_>) {
94b46f34 208 *providers = ty::query::Providers {
ff7c6d11
XL
209 layout_raw,
210 ..*providers
211 };
212}
213
2c00a5a8
XL
214pub struct LayoutCx<'tcx, C> {
215 pub tcx: C,
0731742a 216 pub param_env: ty::ParamEnv<'tcx>,
2c00a5a8
XL
217}
218
dc9dc135
XL
219#[derive(Copy, Clone, Debug)]
220enum StructKind {
221 /// A tuple, closure, or univariant which cannot be coerced to unsized.
222 AlwaysSized,
223 /// A univariant, the last field of which may be coerced to unsized.
224 MaybeUnsized,
225 /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
226 Prefixed(Size, Align),
227}
228
229// Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
230// This is used to go between `memory_index` (source field order to memory order)
231// and `inverse_memory_index` (memory order to source field order).
232// See also `FieldPlacement::Arbitrary::memory_index` for more details.
233// FIXME(eddyb) build a better abstraction for permutations, if possible.
234fn invert_mapping(map: &[u32]) -> Vec<u32> {
235 let mut inverse = vec![0; map.len()];
236 for i in 0..map.len() {
237 inverse[map[i] as usize] = i as u32;
238 }
239 inverse
240}
ff7c6d11 241
dc9dc135
XL
242impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
243 fn scalar_pair(&self, a: Scalar, b: Scalar) -> LayoutDetails {
244 let dl = self.data_layout();
245 let b_align = b.value.align(dl);
246 let align = a.value.align(dl).max(b_align).max(dl.aggregate_align);
247 let b_offset = a.value.size(dl).align_to(b_align.abi);
248 let size = (b_offset + b.value.size(dl)).align_to(align.abi);
249 LayoutDetails {
250 variants: Variants::Single { index: VariantIdx::new(0) },
251 fields: FieldPlacement::Arbitrary {
252 offsets: vec![Size::ZERO, b_offset],
253 memory_index: vec![0, 1]
254 },
255 abi: Abi::ScalarPair(a, b),
256 align,
257 size
ff7c6d11 258 }
dc9dc135 259 }
0bf4aa26 260
dc9dc135
XL
261 fn univariant_uninterned(&self,
262 ty: Ty<'tcx>,
263 fields: &[TyLayout<'_>],
264 repr: &ReprOptions,
265 kind: StructKind) -> Result<LayoutDetails, LayoutError<'tcx>> {
266 let dl = self.data_layout();
267 let packed = repr.packed();
268 if packed && repr.align > 0 {
269 bug!("struct cannot be packed and aligned");
270 }
ff7c6d11 271
dc9dc135 272 let pack = Align::from_bytes(repr.pack as u64).unwrap();
83c7162d 273
dc9dc135
XL
274 let mut align = if packed {
275 dl.i8_align
276 } else {
277 dl.aggregate_align
278 };
ff7c6d11 279
dc9dc135
XL
280 let mut sized = true;
281 let mut offsets = vec![Size::ZERO; fields.len()];
282 let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
ff7c6d11 283
dc9dc135
XL
284 let mut optimize = !repr.inhibit_struct_field_reordering_opt();
285 if let StructKind::Prefixed(_, align) = kind {
286 optimize &= align.bytes() == 1;
287 }
ff7c6d11 288
dc9dc135
XL
289 if optimize {
290 let end = if let StructKind::MaybeUnsized = kind {
291 fields.len() - 1
292 } else {
293 fields.len()
294 };
295 let optimizing = &mut inverse_memory_index[..end];
296 let field_align = |f: &TyLayout<'_>| {
297 if packed { f.align.abi.min(pack) } else { f.align.abi }
298 };
299 match kind {
300 StructKind::AlwaysSized |
301 StructKind::MaybeUnsized => {
302 optimizing.sort_by_key(|&x| {
303 // Place ZSTs first to avoid "interesting offsets",
304 // especially with only one or two non-ZST fields.
305 let f = &fields[x as usize];
306 (!f.is_zst(), cmp::Reverse(field_align(f)))
307 });
308 }
309 StructKind::Prefixed(..) => {
310 optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
ea8adc8c 311 }
ff7c6d11 312 }
dc9dc135 313 }
ea8adc8c 314
dc9dc135
XL
315 // inverse_memory_index holds field indices by increasing memory offset.
316 // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
317 // We now write field offsets to the corresponding offset slot;
318 // field 5 with offset 0 puts 0 in offsets[5].
319 // At the bottom of this function, we invert `inverse_memory_index` to
320 // produce `memory_index` (see `invert_mapping`).
ff7c6d11 321
ff7c6d11 322
dc9dc135 323 let mut offset = Size::ZERO;
ff7c6d11 324
dc9dc135
XL
325 if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
326 let prefix_align = if packed {
327 prefix_align.min(pack)
328 } else {
329 prefix_align
330 };
331 align = align.max(AbiAndPrefAlign::new(prefix_align));
332 offset = prefix_size.align_to(prefix_align);
333 }
ff7c6d11 334
dc9dc135
XL
335 for &i in &inverse_memory_index {
336 let field = fields[i as usize];
337 if !sized {
338 bug!("univariant: field #{} of `{}` comes after unsized field",
339 offsets.len(), ty);
340 }
ff7c6d11 341
dc9dc135
XL
342 if field.is_unsized() {
343 sized = false;
344 }
ff7c6d11 345
dc9dc135
XL
346 // Invariant: offset < dl.obj_size_bound() <= 1<<61
347 let field_align = if packed {
348 field.align.min(AbiAndPrefAlign::new(pack))
349 } else {
350 field.align
351 };
352 offset = offset.align_to(field_align.abi);
353 align = align.max(field_align);
ff7c6d11 354
dc9dc135
XL
355 debug!("univariant offset: {:?} field: {:#?}", offset, field);
356 offsets[i as usize] = offset;
ff7c6d11 357
dc9dc135
XL
358 offset = offset.checked_add(field.size, dl)
359 .ok_or(LayoutError::SizeOverflow(ty))?;
360 }
ff7c6d11 361
dc9dc135
XL
362 if repr.align > 0 {
363 let repr_align = repr.align as u64;
364 align = align.max(AbiAndPrefAlign::new(Align::from_bytes(repr_align).unwrap()));
365 debug!("univariant repr_align: {:?}", repr_align);
366 }
ff7c6d11 367
dc9dc135
XL
368 debug!("univariant min_size: {:?}", offset);
369 let min_size = offset;
ff7c6d11 370
dc9dc135
XL
371 // As stated above, inverse_memory_index holds field indices by increasing offset.
372 // This makes it an already-sorted view of the offsets vec.
373 // To invert it, consider:
374 // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
375 // Field 5 would be the first element, so memory_index is i:
376 // Note: if we didn't optimize, it's already right.
ff7c6d11 377
dc9dc135
XL
378 let memory_index;
379 if optimize {
380 memory_index = invert_mapping(&inverse_memory_index);
381 } else {
382 memory_index = inverse_memory_index;
383 }
384
385 let size = min_size.align_to(align.abi);
386 let mut abi = Abi::Aggregate { sized };
387
388 // Unpack newtype ABIs and find scalar pairs.
389 if sized && size.bytes() > 0 {
390 // All other fields must be ZSTs, and we need them to all start at 0.
391 let mut zst_offsets =
392 offsets.iter().enumerate().filter(|&(i, _)| fields[i].is_zst());
393 if zst_offsets.all(|(_, o)| o.bytes() == 0) {
394 let mut non_zst_fields =
395 fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
396
397 match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
398 // We have exactly one non-ZST field.
399 (Some((i, field)), None, None) => {
400 // Field fills the struct and it has a scalar or scalar pair ABI.
401 if offsets[i].bytes() == 0 &&
402 align.abi == field.align.abi &&
403 size == field.size {
404 match field.abi {
405 // For plain scalars, or vectors of them, we can't unpack
406 // newtypes for `#[repr(C)]`, as that affects C ABIs.
407 Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
408 abi = field.abi.clone();
ff7c6d11 409 }
dc9dc135
XL
410 // But scalar pairs are Rust-specific and get
411 // treated as aggregates by C ABIs anyway.
412 Abi::ScalarPair(..) => {
413 abi = field.abi.clone();
414 }
415 _ => {}
ff7c6d11
XL
416 }
417 }
dc9dc135 418 }
ff7c6d11 419
dc9dc135
XL
420 // Two non-ZST fields, and they're both scalars.
421 (Some((i, &TyLayout {
422 details: &LayoutDetails { abi: Abi::Scalar(ref a), .. }, ..
423 })), Some((j, &TyLayout {
424 details: &LayoutDetails { abi: Abi::Scalar(ref b), .. }, ..
425 })), None) => {
426 // Order by the memory placement, not source order.
427 let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
428 ((i, a), (j, b))
429 } else {
430 ((j, b), (i, a))
431 };
432 let pair = self.scalar_pair(a.clone(), b.clone());
433 let pair_offsets = match pair.fields {
434 FieldPlacement::Arbitrary {
435 ref offsets,
436 ref memory_index
437 } => {
438 assert_eq!(memory_index, &[0, 1]);
439 offsets
ff7c6d11 440 }
dc9dc135
XL
441 _ => bug!()
442 };
443 if offsets[i] == pair_offsets[0] &&
444 offsets[j] == pair_offsets[1] &&
445 align == pair.align &&
446 size == pair.size {
447 // We can use `ScalarPair` only when it matches our
448 // already computed layout (including `#[repr(C)]`).
449 abi = pair.abi;
ff7c6d11 450 }
ff7c6d11 451 }
dc9dc135
XL
452
453 _ => {}
ff7c6d11
XL
454 }
455 }
dc9dc135 456 }
ff7c6d11 457
dc9dc135
XL
458 if sized && fields.iter().any(|f| f.abi.is_uninhabited()) {
459 abi = Abi::Uninhabited;
460 }
83c7162d 461
dc9dc135
XL
462 Ok(LayoutDetails {
463 variants: Variants::Single { index: VariantIdx::new(0) },
464 fields: FieldPlacement::Arbitrary {
465 offsets,
466 memory_index
467 },
468 abi,
469 align,
470 size
471 })
472 }
473
474 fn layout_raw_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> {
475 let tcx = self.tcx;
476 let param_env = self.param_env;
477 let dl = self.data_layout();
478 let scalar_unit = |value: Primitive| {
479 let bits = value.size(dl).bits();
480 assert!(bits <= 128);
481 Scalar {
482 value,
483 valid_range: 0..=(!0 >> (128 - bits))
484 }
485 };
486 let scalar = |value: Primitive| {
487 tcx.intern_layout(LayoutDetails::scalar(self, scalar_unit(value)))
ff7c6d11 488 };
dc9dc135 489
0bf4aa26 490 let univariant = |fields: &[TyLayout<'_>], repr: &ReprOptions, kind| {
dc9dc135 491 Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
ff7c6d11 492 };
b7449926 493 debug_assert!(!ty.has_infer_types());
ff7c6d11
XL
494
495 Ok(match ty.sty {
496 // Basic scalars.
b7449926 497 ty::Bool => {
2c00a5a8 498 tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
ff7c6d11
XL
499 value: Int(I8, false),
500 valid_range: 0..=1
501 }))
502 }
b7449926 503 ty::Char => {
2c00a5a8 504 tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
ff7c6d11
XL
505 value: Int(I32, false),
506 valid_range: 0..=0x10FFFF
507 }))
508 }
b7449926 509 ty::Int(ity) => {
ff7c6d11
XL
510 scalar(Int(Integer::from_attr(dl, attr::SignedInt(ity)), true))
511 }
b7449926 512 ty::Uint(ity) => {
ff7c6d11
XL
513 scalar(Int(Integer::from_attr(dl, attr::UnsignedInt(ity)), false))
514 }
b7449926
XL
515 ty::Float(fty) => scalar(Float(fty)),
516 ty::FnPtr(_) => {
ff7c6d11 517 let mut ptr = scalar_unit(Pointer);
83c7162d 518 ptr.valid_range = 1..=*ptr.valid_range.end();
2c00a5a8 519 tcx.intern_layout(LayoutDetails::scalar(self, ptr))
ff7c6d11
XL
520 }
521
522 // The never type.
b7449926 523 ty::Never => {
83c7162d 524 tcx.intern_layout(LayoutDetails {
a1dfa0c6 525 variants: Variants::Single { index: VariantIdx::new(0) },
83c7162d
XL
526 fields: FieldPlacement::Union(0),
527 abi: Abi::Uninhabited,
528 align: dl.i8_align,
94b46f34 529 size: Size::ZERO
83c7162d 530 })
ff7c6d11
XL
531 }
532
533 // Potentially-fat pointers.
b7449926
XL
534 ty::Ref(_, pointee, _) |
535 ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
ff7c6d11
XL
536 let mut data_ptr = scalar_unit(Pointer);
537 if !ty.is_unsafe_ptr() {
83c7162d 538 data_ptr.valid_range = 1..=*data_ptr.valid_range.end();
ff7c6d11
XL
539 }
540
0531ce1d
XL
541 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
542 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
2c00a5a8 543 return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
54a0048b 544 }
ff7c6d11
XL
545
546 let unsized_part = tcx.struct_tail(pointee);
547 let metadata = match unsized_part.sty {
b7449926 548 ty::Foreign(..) => {
2c00a5a8 549 return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
ff7c6d11 550 }
b7449926 551 ty::Slice(_) | ty::Str => {
ff7c6d11
XL
552 scalar_unit(Int(dl.ptr_sized_integer(), false))
553 }
b7449926 554 ty::Dynamic(..) => {
ff7c6d11 555 let mut vtable = scalar_unit(Pointer);
83c7162d 556 vtable.valid_range = 1..=*vtable.valid_range.end();
ff7c6d11
XL
557 vtable
558 }
559 _ => return Err(LayoutError::Unknown(unsized_part))
560 };
561
562 // Effectively a (ptr, meta) tuple.
dc9dc135 563 tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
ff7c6d11
XL
564 }
565
566 // Arrays and slices.
b7449926 567 ty::Array(element, mut count) => {
ff7c6d11 568 if count.has_projections() {
0531ce1d 569 count = tcx.normalize_erasing_regions(param_env, count);
ff7c6d11
XL
570 if count.has_projections() {
571 return Err(LayoutError::Unknown(ty));
572 }
573 }
574
dc9dc135 575 let count = count.assert_usize(tcx).ok_or(LayoutError::Unknown(ty))?;
2c00a5a8 576 let element = self.layout_of(element)?;
ff7c6d11
XL
577 let size = element.size.checked_mul(count, dl)
578 .ok_or(LayoutError::SizeOverflow(ty))?;
579
0731742a
XL
580 let abi = if count != 0 && ty.conservative_is_privately_uninhabited(tcx) {
581 Abi::Uninhabited
582 } else {
583 Abi::Aggregate { sized: true }
584 };
585
ff7c6d11 586 tcx.intern_layout(LayoutDetails {
a1dfa0c6 587 variants: Variants::Single { index: VariantIdx::new(0) },
ff7c6d11
XL
588 fields: FieldPlacement::Array {
589 stride: element.size,
590 count
591 },
0731742a 592 abi,
ff7c6d11
XL
593 align: element.align,
594 size
595 })
596 }
b7449926 597 ty::Slice(element) => {
2c00a5a8 598 let element = self.layout_of(element)?;
ff7c6d11 599 tcx.intern_layout(LayoutDetails {
a1dfa0c6 600 variants: Variants::Single { index: VariantIdx::new(0) },
ff7c6d11
XL
601 fields: FieldPlacement::Array {
602 stride: element.size,
603 count: 0
604 },
605 abi: Abi::Aggregate { sized: false },
606 align: element.align,
94b46f34 607 size: Size::ZERO
ff7c6d11 608 })
54a0048b 609 }
b7449926 610 ty::Str => {
ff7c6d11 611 tcx.intern_layout(LayoutDetails {
a1dfa0c6 612 variants: Variants::Single { index: VariantIdx::new(0) },
ff7c6d11
XL
613 fields: FieldPlacement::Array {
614 stride: Size::from_bytes(1),
615 count: 0
616 },
617 abi: Abi::Aggregate { sized: false },
54a0048b 618 align: dl.i8_align,
94b46f34 619 size: Size::ZERO
ff7c6d11 620 })
54a0048b
SL
621 }
622
623 // Odd unit types.
b7449926 624 ty::FnDef(..) => {
ff7c6d11 625 univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?
54a0048b 626 }
b7449926 627 ty::Dynamic(..) | ty::Foreign(..) => {
dc9dc135 628 let mut unit = self.univariant_uninterned(ty, &[], &ReprOptions::default(),
ff7c6d11
XL
629 StructKind::AlwaysSized)?;
630 match unit.abi {
631 Abi::Aggregate { ref mut sized } => *sized = false,
632 _ => bug!()
633 }
634 tcx.intern_layout(unit)
54a0048b
SL
635 }
636
dc9dc135 637 ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, &substs)?,
ea8adc8c 638
b7449926 639 ty::Closure(def_id, ref substs) => {
476ff2be 640 let tys = substs.upvar_tys(def_id, tcx);
2c00a5a8 641 univariant(&tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
8bb4bdeb 642 &ReprOptions::default(),
ff7c6d11 643 StructKind::AlwaysSized)?
476ff2be
SL
644 }
645
b7449926 646 ty::Tuple(tys) => {
041b39d2 647 let kind = if tys.len() == 0 {
ff7c6d11 648 StructKind::AlwaysSized
041b39d2 649 } else {
ff7c6d11 650 StructKind::MaybeUnsized
041b39d2
XL
651 };
652
48663c56
XL
653 univariant(&tys.iter().map(|k| {
654 self.layout_of(k.expect_ty())
655 }).collect::<Result<Vec<_>, _>>()?, &ReprOptions::default(), kind)?
54a0048b
SL
656 }
657
9e0c209e 658 // SIMD vector types.
b7449926 659 ty::Adt(def, ..) if def.repr.simd() => {
2c00a5a8 660 let element = self.layout_of(ty.simd_type(tcx))?;
ff7c6d11
XL
661 let count = ty.simd_size(tcx) as u64;
662 assert!(count > 0);
663 let scalar = match element.abi {
664 Abi::Scalar(ref scalar) => scalar.clone(),
9e0c209e
SL
665 _ => {
666 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` with \
0bf4aa26 667 a non-machine element type `{}`",
ff7c6d11 668 ty, element.ty));
54a0048b 669 }
ff7c6d11
XL
670 };
671 let size = element.size.checked_mul(count, dl)
672 .ok_or(LayoutError::SizeOverflow(ty))?;
673 let align = dl.vector_align(size);
a1dfa0c6 674 let size = size.align_to(align.abi);
ff7c6d11
XL
675
676 tcx.intern_layout(LayoutDetails {
a1dfa0c6 677 variants: Variants::Single { index: VariantIdx::new(0) },
ff7c6d11
XL
678 fields: FieldPlacement::Array {
679 stride: element.size,
680 count
681 },
682 abi: Abi::Vector {
683 element: scalar,
684 count
685 },
686 size,
687 align,
688 })
54a0048b 689 }
9e0c209e
SL
690
691 // ADTs.
b7449926 692 ty::Adt(def, substs) => {
ff7c6d11
XL
693 // Cache the field layouts.
694 let variants = def.variants.iter().map(|v| {
695 v.fields.iter().map(|field| {
2c00a5a8 696 self.layout_of(field.ty(tcx, substs))
ff7c6d11 697 }).collect::<Result<Vec<_>, _>>()
a1dfa0c6 698 }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
54a0048b 699
ff7c6d11
XL
700 if def.is_union() {
701 let packed = def.repr.packed();
702 if packed && def.repr.align > 0 {
703 bug!("Union cannot be packed and aligned");
704 }
705
a1dfa0c6 706 let pack = Align::from_bytes(def.repr.pack as u64).unwrap();
83c7162d
XL
707
708 let mut align = if packed {
ff7c6d11
XL
709 dl.i8_align
710 } else {
711 dl.aggregate_align
712 };
54a0048b 713
ff7c6d11
XL
714 if def.repr.align > 0 {
715 let repr_align = def.repr.align as u64;
716 align = align.max(
a1dfa0c6 717 AbiAndPrefAlign::new(Align::from_bytes(repr_align).unwrap()));
54a0048b
SL
718 }
719
a1dfa0c6 720 let optimize = !def.repr.inhibit_union_abi_opt();
94b46f34 721 let mut size = Size::ZERO;
a1dfa0c6
XL
722 let mut abi = Abi::Aggregate { sized: true };
723 let index = VariantIdx::new(0);
724 for field in &variants[index] {
ff7c6d11
XL
725 assert!(!field.is_unsized());
726
a1dfa0c6
XL
727 let field_align = if packed {
728 field.align.min(AbiAndPrefAlign::new(pack))
83c7162d 729 } else {
a1dfa0c6
XL
730 field.align
731 };
732 align = align.max(field_align);
733
734 // If all non-ZST fields have the same ABI, forward this ABI
735 if optimize && !field.is_zst() {
736 // Normalize scalar_unit to the maximal valid range
737 let field_abi = match &field.abi {
738 Abi::Scalar(x) => Abi::Scalar(scalar_unit(x.value)),
739 Abi::ScalarPair(x, y) => {
740 Abi::ScalarPair(
741 scalar_unit(x.value),
742 scalar_unit(y.value),
743 )
744 }
745 Abi::Vector { element: x, count } => {
746 Abi::Vector {
747 element: scalar_unit(x.value),
748 count: *count,
749 }
750 }
751 Abi::Uninhabited |
752 Abi::Aggregate { .. } => Abi::Aggregate { sized: true },
753 };
754
755 if size == Size::ZERO {
756 // first non ZST: initialize 'abi'
757 abi = field_abi;
758 } else if abi != field_abi {
759 // different fields have different ABI: reset to Aggregate
760 abi = Abi::Aggregate { sized: true };
761 }
ff7c6d11 762 }
a1dfa0c6 763
ff7c6d11
XL
764 size = cmp::max(size, field.size);
765 }
766
767 return Ok(tcx.intern_layout(LayoutDetails {
a1dfa0c6
XL
768 variants: Variants::Single { index },
769 fields: FieldPlacement::Union(variants[index].len()),
770 abi,
ff7c6d11 771 align,
a1dfa0c6 772 size: size.align_to(align.abi)
ff7c6d11
XL
773 }));
774 }
775
83c7162d
XL
776 // A variant is absent if it's uninhabited and only has ZST fields.
777 // Present uninhabited variants only require space for their fields,
0731742a 778 // but *not* an encoding of the discriminant (e.g., a tag value).
83c7162d
XL
779 // See issue #49298 for more details on the need to leave space
780 // for non-ZST uninhabited data (mostly partial initialization).
0bf4aa26
XL
781 let absent = |fields: &[TyLayout<'_>]| {
782 let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
83c7162d
XL
783 let is_zst = fields.iter().all(|f| f.is_zst());
784 uninhabited && is_zst
785 };
786 let (present_first, present_second) = {
a1dfa0c6
XL
787 let mut present_variants = variants.iter_enumerated().filter_map(|(i, v)| {
788 if absent(v) {
789 None
790 } else {
791 Some(i)
792 }
54a0048b 793 });
83c7162d 794 (present_variants.next(), present_variants.next())
ff7c6d11 795 };
83c7162d
XL
796 if present_first.is_none() {
797 // Uninhabited because it has no variants, or only absent ones.
798 return tcx.layout_raw(param_env.and(tcx.types.never));
54a0048b
SL
799 }
800
ff7c6d11 801 let is_struct = !def.is_enum() ||
83c7162d
XL
802 // Only one variant is present.
803 (present_second.is_none() &&
ff7c6d11 804 // Representation optimizations are allowed.
0bf4aa26 805 !def.repr.inhibit_enum_layout_opt());
ff7c6d11
XL
806 if is_struct {
807 // Struct, or univariant enum equivalent to a struct.
9e0c209e
SL
808 // (Typechecking will reject discriminant-sizing attrs.)
809
83c7162d 810 let v = present_first.unwrap();
ff7c6d11
XL
811 let kind = if def.is_enum() || variants[v].len() == 0 {
812 StructKind::AlwaysSized
476ff2be 813 } else {
7cac9316 814 let param_env = tcx.param_env(def.did);
ff7c6d11 815 let last_field = def.variants[v].fields.last().unwrap();
7cac9316 816 let always_sized = tcx.type_of(last_field.did)
0bf4aa26 817 .is_sized(tcx.at(DUMMY_SP), param_env);
ff7c6d11
XL
818 if !always_sized { StructKind::MaybeUnsized }
819 else { StructKind::AlwaysSized }
9e0c209e 820 };
9e0c209e 821
dc9dc135 822 let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr, kind)?;
ff7c6d11 823 st.variants = Variants::Single { index: v };
b7449926
XL
824 let (start, end) = self.tcx.layout_scalar_valid_range(def.did);
825 match st.abi {
826 Abi::Scalar(ref mut scalar) |
827 Abi::ScalarPair(ref mut scalar, _) => {
828 // the asserts ensure that we are not using the
829 // `#[rustc_layout_scalar_valid_range(n)]`
830 // attribute to widen the range of anything as that would probably
831 // result in UB somewhere
832 if let Bound::Included(start) = start {
833 assert!(*scalar.valid_range.start() <= start);
834 scalar.valid_range = start..=*scalar.valid_range.end();
835 }
836 if let Bound::Included(end) = end {
837 assert!(*scalar.valid_range.end() >= end);
838 scalar.valid_range = *scalar.valid_range.start()..=end;
ff7c6d11 839 }
ff7c6d11 840 }
b7449926
XL
841 _ => assert!(
842 start == Bound::Unbounded && end == Bound::Unbounded,
843 "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
844 def,
845 st,
846 ),
54a0048b 847 }
ff7c6d11 848 return Ok(tcx.intern_layout(st));
54a0048b
SL
849 }
850
83c7162d
XL
851 // The current code for niche-filling relies on variant indices
852 // instead of actual discriminants, so dataful enums with
853 // explicit discriminants (RFC #2363) would misbehave.
a1dfa0c6
XL
854 let no_explicit_discriminants = def.variants.iter_enumerated()
855 .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
ff7c6d11
XL
856
857 // Niche-filling enum optimization.
858 if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
859 let mut dataful_variant = None;
a1dfa0c6 860 let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
ff7c6d11
XL
861
862 // Find one non-ZST variant.
a1dfa0c6 863 'variants: for (v, fields) in variants.iter_enumerated() {
83c7162d
XL
864 if absent(fields) {
865 continue 'variants;
866 }
ff7c6d11 867 for f in fields {
ff7c6d11
XL
868 if !f.is_zst() {
869 if dataful_variant.is_none() {
870 dataful_variant = Some(v);
871 continue 'variants;
872 } else {
873 dataful_variant = None;
874 break 'variants;
875 }
876 }
54a0048b 877 }
83c7162d 878 niche_variants = *niche_variants.start().min(&v)..=v;
ff7c6d11
XL
879 }
880
83c7162d 881 if niche_variants.start() > niche_variants.end() {
ff7c6d11
XL
882 dataful_variant = None;
883 }
884
885 if let Some(i) = dataful_variant {
a1dfa0c6
XL
886 let count = (
887 niche_variants.end().as_u32() - niche_variants.start().as_u32() + 1
888 ) as u128;
83c7162d 889 for (field_index, &field) in variants[i].iter().enumerate() {
94b46f34
XL
890 let niche = match self.find_niche(field)? {
891 Some(niche) => niche,
892 _ => continue,
893 };
894 let (niche_start, niche_scalar) = match niche.reserve(self, count) {
895 Some(pair) => pair,
896 None => continue,
897 };
898
ff7c6d11 899 let mut align = dl.aggregate_align;
a1dfa0c6 900 let st = variants.iter_enumerated().map(|(j, v)| {
dc9dc135 901 let mut st = self.univariant_uninterned(ty, v,
ff7c6d11
XL
902 &def.repr, StructKind::AlwaysSized)?;
903 st.variants = Variants::Single { index: j };
904
905 align = align.max(st.align);
906
907 Ok(st)
a1dfa0c6 908 }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
ff7c6d11 909
94b46f34 910 let offset = st[i].fields.offset(field_index) + niche.offset;
ff7c6d11
XL
911 let size = st[i].size;
912
83c7162d 913 let mut abi = match st[i].abi {
94b46f34 914 Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()),
0531ce1d
XL
915 Abi::ScalarPair(ref first, ref second) => {
916 // We need to use scalar_unit to reset the
917 // valid range to the maximal one for that
918 // primitive, because only the niche is
919 // guaranteed to be initialised, not the
920 // other primitive.
921 if offset.bytes() == 0 {
94b46f34
XL
922 Abi::ScalarPair(
923 niche_scalar.clone(),
924 scalar_unit(second.value),
925 )
0531ce1d 926 } else {
94b46f34
XL
927 Abi::ScalarPair(
928 scalar_unit(first.value),
929 niche_scalar.clone(),
930 )
0531ce1d
XL
931 }
932 }
933 _ => Abi::Aggregate { sized: true },
c30ab7b3 934 };
ff7c6d11 935
0bf4aa26 936 if st.iter().all(|v| v.abi.is_uninhabited()) {
83c7162d
XL
937 abi = Abi::Uninhabited;
938 }
939
ff7c6d11 940 return Ok(tcx.intern_layout(LayoutDetails {
532ac7d7
XL
941 variants: Variants::Multiple {
942 discr: niche_scalar,
943 discr_kind: DiscriminantKind::Niche {
944 dataful_variant: i,
945 niche_variants,
946 niche_start,
947 },
48663c56 948 discr_index: 0,
ff7c6d11
XL
949 variants: st,
950 },
951 fields: FieldPlacement::Arbitrary {
952 offsets: vec![offset],
953 memory_index: vec![0]
954 },
955 abi,
956 size,
957 align,
958 }));
54a0048b 959 }
ff7c6d11
XL
960 }
961 }
54a0048b 962
ff7c6d11 963 let (mut min, mut max) = (i128::max_value(), i128::min_value());
0531ce1d 964 let discr_type = def.repr.discr_type();
a1dfa0c6
XL
965 let bits = Integer::from_attr(self, discr_type).size().bits();
966 for (i, discr) in def.discriminants(tcx) {
0bf4aa26 967 if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
ff7c6d11 968 continue;
54a0048b 969 }
0531ce1d
XL
970 let mut x = discr.val as i128;
971 if discr_type.is_signed() {
972 // sign extend the raw representation to be an i128
973 x = (x << (128 - bits)) >> (128 - bits);
974 }
ff7c6d11
XL
975 if x < min { min = x; }
976 if x > max { max = x; }
54a0048b 977 }
83c7162d
XL
978 // We might have no inhabited variants, so pretend there's at least one.
979 if (min, max) == (i128::max_value(), i128::min_value()) {
980 min = 0;
981 max = 0;
982 }
ff7c6d11
XL
983 assert!(min <= max, "discriminant range is {}...{}", min, max);
984 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
54a0048b 985
54a0048b 986 let mut align = dl.aggregate_align;
94b46f34 987 let mut size = Size::ZERO;
54a0048b
SL
988
989 // We're interested in the smallest alignment, so start large.
a1dfa0c6
XL
990 let mut start_align = Align::from_bytes(256).unwrap();
991 assert_eq!(Integer::for_align(dl, start_align), None);
ff7c6d11
XL
992
993 // repr(C) on an enum tells us to make a (tag, union) layout,
994 // so we need to grow the prefix alignment to be at least
995 // the alignment of the union. (This value is used both for
996 // determining the alignment of the overall enum, and the
997 // determining the alignment of the payload after the tag.)
a1dfa0c6 998 let mut prefix_align = min_ity.align(dl).abi;
ff7c6d11
XL
999 if def.repr.c() {
1000 for fields in &variants {
1001 for field in fields {
a1dfa0c6 1002 prefix_align = prefix_align.max(field.align.abi);
ff7c6d11
XL
1003 }
1004 }
1005 }
54a0048b 1006
ff7c6d11 1007 // Create the set of structs that represent each variant.
a1dfa0c6 1008 let mut layout_variants = variants.iter_enumerated().map(|(i, field_layouts)| {
dc9dc135 1009 let mut st = self.univariant_uninterned(ty, &field_layouts,
ff7c6d11
XL
1010 &def.repr, StructKind::Prefixed(min_ity.size(), prefix_align))?;
1011 st.variants = Variants::Single { index: i };
476ff2be
SL
1012 // Find the first field we can't move later
1013 // to make room for a larger discriminant.
ff7c6d11 1014 for field in st.fields.index_by_increasing_offset().map(|j| field_layouts[j]) {
a1dfa0c6
XL
1015 if !field.is_zst() || field.align.abi.bytes() != 1 {
1016 start_align = start_align.min(field.align.abi);
476ff2be 1017 break;
54a0048b 1018 }
476ff2be 1019 }
ff7c6d11 1020 size = cmp::max(size, st.size);
54a0048b
SL
1021 align = align.max(st.align);
1022 Ok(st)
a1dfa0c6 1023 }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
54a0048b
SL
1024
1025 // Align the maximum variant size to the largest alignment.
a1dfa0c6 1026 size = size.align_to(align.abi);
54a0048b
SL
1027
1028 if size.bytes() >= dl.obj_size_bound() {
1029 return Err(LayoutError::SizeOverflow(ty));
1030 }
1031
8bb4bdeb
XL
1032 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
1033 if typeck_ity < min_ity {
1034 // It is a bug if Layout decided on a greater discriminant size than typeck for
1035 // some reason at this point (based on values discriminant can take on). Mostly
1036 // because this discriminant will be loaded, and then stored into variable of
1037 // type calculated by typeck. Consider such case (a bug): typeck decided on
1038 // byte-sized discriminant, but layout thinks we need a 16-bit to store all
94b46f34 1039 // discriminant values. That would be a bug, because then, in codegen, in order
8bb4bdeb
XL
1040 // to store this 16-bit discriminant into 8-bit sized temporary some of the
1041 // space necessary to represent would have to be discarded (or layout is wrong
1042 // on thinking it needs 16 bits)
1043 bug!("layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1044 min_ity, typeck_ity);
1045 // However, it is fine to make discr type however large (as an optimisation)
94b46f34 1046 // after this point – we’ll just truncate the value we load in codegen.
8bb4bdeb
XL
1047 }
1048
54a0048b
SL
1049 // Check to see if we should use a different type for the
1050 // discriminant. We can safely use a type with the same size
1051 // as the alignment of the first field of each variant.
1052 // We increase the size of the discriminant to avoid LLVM copying
1053 // padding when it doesn't need to. This normally causes unaligned
1054 // load/stores and excessive memcpy/memset operations. By using a
83c7162d 1055 // bigger integer size, LLVM can be sure about its contents and
54a0048b
SL
1056 // won't be so conservative.
1057
1058 // Use the initial field alignment
83c7162d
XL
1059 let mut ity = if def.repr.c() || def.repr.int.is_some() {
1060 min_ity
1061 } else {
a1dfa0c6 1062 Integer::for_align(dl, start_align).unwrap_or(min_ity)
83c7162d 1063 };
54a0048b
SL
1064
1065 // If the alignment is not larger than the chosen discriminant size,
1066 // don't use the alignment as the final size.
1067 if ity <= min_ity {
1068 ity = min_ity;
1069 } else {
1070 // Patch up the variants' first few fields.
ff7c6d11
XL
1071 let old_ity_size = min_ity.size();
1072 let new_ity_size = ity.size();
83c7162d 1073 for variant in &mut layout_variants {
ff7c6d11
XL
1074 match variant.fields {
1075 FieldPlacement::Arbitrary { ref mut offsets, .. } => {
1076 for i in offsets {
1077 if *i <= old_ity_size {
1078 assert_eq!(*i, old_ity_size);
1079 *i = new_ity_size;
1080 }
1081 }
1082 // We might be making the struct larger.
1083 if variant.size <= old_ity_size {
1084 variant.size = new_ity_size;
1085 }
1086 }
1087 _ => bug!()
c30ab7b3 1088 }
54a0048b
SL
1089 }
1090 }
1091
0531ce1d
XL
1092 let tag_mask = !0u128 >> (128 - ity.size().bits());
1093 let tag = Scalar {
ff7c6d11 1094 value: Int(ity, signed),
0531ce1d 1095 valid_range: (min as u128 & tag_mask)..=(max as u128 & tag_mask),
ff7c6d11 1096 };
83c7162d
XL
1097 let mut abi = Abi::Aggregate { sized: true };
1098 if tag.value.size(dl) == size {
1099 abi = Abi::Scalar(tag.clone());
8faf50e0
XL
1100 } else {
1101 // Try to use a ScalarPair for all tagged enums.
83c7162d
XL
1102 let mut common_prim = None;
1103 for (field_layouts, layout_variant) in variants.iter().zip(&layout_variants) {
1104 let offsets = match layout_variant.fields {
1105 FieldPlacement::Arbitrary { ref offsets, .. } => offsets,
1106 _ => bug!(),
1107 };
1108 let mut fields = field_layouts
1109 .iter()
1110 .zip(offsets)
1111 .filter(|p| !p.0.is_zst());
1112 let (field, offset) = match (fields.next(), fields.next()) {
1113 (None, None) => continue,
1114 (Some(pair), None) => pair,
1115 _ => {
1116 common_prim = None;
1117 break;
1118 }
1119 };
1120 let prim = match field.details.abi {
1121 Abi::Scalar(ref scalar) => scalar.value,
1122 _ => {
1123 common_prim = None;
1124 break;
1125 }
1126 };
1127 if let Some(pair) = common_prim {
1128 // This is pretty conservative. We could go fancier
1129 // by conflating things like i32 and u32, or even
1130 // realising that (u8, u8) could just cohabit with
1131 // u16 or even u32.
1132 if pair != (prim, offset) {
1133 common_prim = None;
1134 break;
1135 }
1136 } else {
1137 common_prim = Some((prim, offset));
1138 }
1139 }
1140 if let Some((prim, offset)) = common_prim {
dc9dc135 1141 let pair = self.scalar_pair(tag.clone(), scalar_unit(prim));
83c7162d
XL
1142 let pair_offsets = match pair.fields {
1143 FieldPlacement::Arbitrary {
1144 ref offsets,
1145 ref memory_index
1146 } => {
1147 assert_eq!(memory_index, &[0, 1]);
1148 offsets
1149 }
1150 _ => bug!()
1151 };
94b46f34 1152 if pair_offsets[0] == Size::ZERO &&
83c7162d
XL
1153 pair_offsets[1] == *offset &&
1154 align == pair.align &&
1155 size == pair.size {
1156 // We can use `ScalarPair` only when it matches our
1157 // already computed layout (including `#[repr(C)]`).
1158 abi = pair.abi;
1159 }
1160 }
1161 }
1162
0bf4aa26 1163 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
83c7162d
XL
1164 abi = Abi::Uninhabited;
1165 }
1166
ff7c6d11 1167 tcx.intern_layout(LayoutDetails {
532ac7d7
XL
1168 variants: Variants::Multiple {
1169 discr: tag,
1170 discr_kind: DiscriminantKind::Tag,
48663c56 1171 discr_index: 0,
83c7162d 1172 variants: layout_variants,
ff7c6d11
XL
1173 },
1174 fields: FieldPlacement::Arbitrary {
94b46f34 1175 offsets: vec![Size::ZERO],
ff7c6d11
XL
1176 memory_index: vec![0]
1177 },
1178 abi,
041b39d2 1179 align,
ff7c6d11
XL
1180 size
1181 })
54a0048b
SL
1182 }
1183
1184 // Types with no meaningful known layout.
b7449926 1185 ty::Projection(_) | ty::Opaque(..) => {
0531ce1d 1186 let normalized = tcx.normalize_erasing_regions(param_env, ty);
5bcae85e
SL
1187 if ty == normalized {
1188 return Err(LayoutError::Unknown(ty));
1189 }
ff7c6d11 1190 tcx.layout_raw(param_env.and(normalized))?
5bcae85e 1191 }
a1dfa0c6
XL
1192
1193 ty::Bound(..) |
1194 ty::Placeholder(..) |
1195 ty::UnnormalizedProjection(..) |
1196 ty::GeneratorWitness(..) |
1197 ty::Infer(_) => {
ff7c6d11 1198 bug!("LayoutDetails::compute: unexpected type `{}`", ty)
cc61c64b 1199 }
a1dfa0c6 1200
b7449926 1201 ty::Param(_) | ty::Error => {
8faf50e0
XL
1202 return Err(LayoutError::Unknown(ty));
1203 }
ff7c6d11 1204 })
cc61c64b 1205 }
dc9dc135
XL
1206}
1207
1208/// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1209#[derive(Clone, Debug, PartialEq)]
1210enum SavedLocalEligibility {
1211 Unassigned,
1212 Assigned(VariantIdx),
1213 // FIXME: Use newtype_index so we aren't wasting bytes
1214 Ineligible(Option<u32>),
1215}
1216
1217// When laying out generators, we divide our saved local fields into two
1218// categories: overlap-eligible and overlap-ineligible.
1219//
1220// Those fields which are ineligible for overlap go in a "prefix" at the
1221// beginning of the layout, and always have space reserved for them.
1222//
1223// Overlap-eligible fields are only assigned to one variant, so we lay
1224// those fields out for each variant and put them right after the
1225// prefix.
1226//
1227// Finally, in the layout details, we point to the fields from the
1228// variants they are assigned to. It is possible for some fields to be
1229// included in multiple variants. No field ever "moves around" in the
1230// layout; its offset is always the same.
1231//
1232// Also included in the layout are the upvars and the discriminant.
1233// These are included as fields on the "outer" layout; they are not part
1234// of any variant.
1235impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
1236 /// Compute the eligibility and assignment of each local.
1237 fn generator_saved_local_eligibility(&self, info: &GeneratorLayout<'tcx>)
1238 -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
1239 use SavedLocalEligibility::*;
1240
1241 let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
1242 IndexVec::from_elem_n(Unassigned, info.field_tys.len());
1243
1244 // The saved locals not eligible for overlap. These will get
1245 // "promoted" to the prefix of our generator.
1246 let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
1247
1248 // Figure out which of our saved locals are fields in only
1249 // one variant. The rest are deemed ineligible for overlap.
1250 for (variant_index, fields) in info.variant_fields.iter_enumerated() {
1251 for local in fields {
1252 match assignments[*local] {
1253 Unassigned => {
1254 assignments[*local] = Assigned(variant_index);
1255 }
1256 Assigned(idx) => {
1257 // We've already seen this local at another suspension
1258 // point, so it is no longer a candidate.
1259 trace!("removing local {:?} in >1 variant ({:?}, {:?})",
1260 local, variant_index, idx);
1261 ineligible_locals.insert(*local);
1262 assignments[*local] = Ineligible(None);
1263 }
1264 Ineligible(_) => {},
1265 }
1266 }
1267 }
1268
1269 // Next, check every pair of eligible locals to see if they
1270 // conflict.
1271 for local_a in info.storage_conflicts.rows() {
1272 let conflicts_a = info.storage_conflicts.count(local_a);
1273 if ineligible_locals.contains(local_a) {
1274 continue;
1275 }
1276
1277 for local_b in info.storage_conflicts.iter(local_a) {
1278 // local_a and local_b are storage live at the same time, therefore they
1279 // cannot overlap in the generator layout. The only way to guarantee
1280 // this is if they are in the same variant, or one is ineligible
1281 // (which means it is stored in every variant).
1282 if ineligible_locals.contains(local_b) ||
1283 assignments[local_a] == assignments[local_b]
1284 {
1285 continue;
1286 }
1287
1288 // If they conflict, we will choose one to make ineligible.
1289 // This is not always optimal; it's just a greedy heuristic that
1290 // seems to produce good results most of the time.
1291 let conflicts_b = info.storage_conflicts.count(local_b);
1292 let (remove, other) = if conflicts_a > conflicts_b {
1293 (local_a, local_b)
1294 } else {
1295 (local_b, local_a)
1296 };
1297 ineligible_locals.insert(remove);
1298 assignments[remove] = Ineligible(None);
1299 trace!("removing local {:?} due to conflict with {:?}", remove, other);
1300 }
1301 }
1302
1303 // Write down the order of our locals that will be promoted to the prefix.
1304 {
1305 let mut idx = 0u32;
1306 for local in ineligible_locals.iter() {
1307 assignments[local] = Ineligible(Some(idx));
1308 idx += 1;
1309 }
1310 }
1311 debug!("generator saved local assignments: {:?}", assignments);
1312
1313 (ineligible_locals, assignments)
1314 }
1315
1316 /// Compute the full generator layout.
1317 fn generator_layout(
1318 &self,
1319 ty: Ty<'tcx>,
1320 def_id: hir::def_id::DefId,
1321 substs: &GeneratorSubsts<'tcx>,
1322 ) -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> {
1323 use SavedLocalEligibility::*;
1324 let tcx = self.tcx;
1325
1326 let subst_field = |ty: Ty<'tcx>| { ty.subst(tcx, substs.substs) };
1327
1328 let info = tcx.generator_layout(def_id);
1329 let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
1330
1331 // Build a prefix layout, including "promoting" all ineligible
1332 // locals as part of the prefix. We compute the layout of all of
1333 // these fields at once to get optimal packing.
1334 let discr_index = substs.prefix_tys(def_id, tcx).count();
1335 let promoted_tys =
1336 ineligible_locals.iter().map(|local| subst_field(info.field_tys[local]));
1337 let prefix_tys = substs.prefix_tys(def_id, tcx)
1338 .chain(iter::once(substs.discr_ty(tcx)))
1339 .chain(promoted_tys);
1340 let prefix = self.univariant_uninterned(
1341 ty,
1342 &prefix_tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
1343 &ReprOptions::default(),
1344 StructKind::AlwaysSized)?;
1345 let (prefix_size, prefix_align) = (prefix.size, prefix.align);
1346
1347 // Split the prefix layout into the "outer" fields (upvars and
1348 // discriminant) and the "promoted" fields. Promoted fields will
1349 // get included in each variant that requested them in
1350 // GeneratorLayout.
1351 debug!("prefix = {:#?}", prefix);
1352 let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
1353 FieldPlacement::Arbitrary { mut offsets, memory_index } => {
1354 let mut inverse_memory_index = invert_mapping(&memory_index);
1355
1356 // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1357 // "outer" and "promoted" fields respectively.
1358 let b_start = (discr_index + 1) as u32;
1359 let offsets_b = offsets.split_off(b_start as usize);
1360 let offsets_a = offsets;
1361
1362 // Disentangle the "a" and "b" components of `inverse_memory_index`
1363 // by preserving the order but keeping only one disjoint "half" each.
1364 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1365 let inverse_memory_index_b: Vec<_> =
1366 inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
1367 inverse_memory_index.retain(|&i| i < b_start);
1368 let inverse_memory_index_a = inverse_memory_index;
1369
1370 // Since `inverse_memory_index_{a,b}` each only refer to their
1371 // respective fields, they can be safely inverted
1372 let memory_index_a = invert_mapping(&inverse_memory_index_a);
1373 let memory_index_b = invert_mapping(&inverse_memory_index_b);
1374
1375 let outer_fields = FieldPlacement::Arbitrary {
1376 offsets: offsets_a,
1377 memory_index: memory_index_a,
1378 };
1379 (outer_fields, offsets_b, memory_index_b)
1380 }
1381 _ => bug!(),
1382 };
1383
1384 let mut size = prefix.size;
1385 let mut align = prefix.align;
1386 let variants = info.variant_fields.iter_enumerated().map(|(index, variant_fields)| {
1387 // Only include overlap-eligible fields when we compute our variant layout.
1388 let variant_only_tys = variant_fields
1389 .iter()
1390 .filter(|local| {
1391 match assignments[**local] {
1392 Unassigned => bug!(),
1393 Assigned(v) if v == index => true,
1394 Assigned(_) => bug!("assignment does not match variant"),
1395 Ineligible(_) => false,
1396 }
1397 })
1398 .map(|local| subst_field(info.field_tys[*local]));
1399
1400 let mut variant = self.univariant_uninterned(
1401 ty,
1402 &variant_only_tys
1403 .map(|ty| self.layout_of(ty))
1404 .collect::<Result<Vec<_>, _>>()?,
1405 &ReprOptions::default(),
1406 StructKind::Prefixed(prefix_size, prefix_align.abi))?;
1407 variant.variants = Variants::Single { index };
1408
1409 let (offsets, memory_index) = match variant.fields {
1410 FieldPlacement::Arbitrary { offsets, memory_index } => {
1411 (offsets, memory_index)
1412 }
1413 _ => bug!(),
1414 };
1415
1416 // Now, stitch the promoted and variant-only fields back together in
1417 // the order they are mentioned by our GeneratorLayout.
1418 // Because we only use some subset (that can differ between variants)
1419 // of the promoted fields, we can't just pick those elements of the
1420 // `promoted_memory_index` (as we'd end up with gaps).
1421 // So instead, we build an "inverse memory_index", as if all of the
1422 // promoted fields were being used, but leave the elements not in the
1423 // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1424 // obtain a valid (bijective) mapping.
1425 const INVALID_FIELD_IDX: u32 = !0;
1426 let mut combined_inverse_memory_index =
1427 vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
1428 let mut offsets_and_memory_index = offsets.into_iter().zip(memory_index);
1429 let combined_offsets = variant_fields.iter().enumerate().map(|(i, local)| {
1430 let (offset, memory_index) = match assignments[*local] {
1431 Unassigned => bug!(),
1432 Assigned(_) => {
1433 let (offset, memory_index) = offsets_and_memory_index.next().unwrap();
1434 (offset, promoted_memory_index.len() as u32 + memory_index)
1435 }
1436 Ineligible(field_idx) => {
1437 let field_idx = field_idx.unwrap() as usize;
1438 (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1439 }
1440 };
1441 combined_inverse_memory_index[memory_index as usize] = i as u32;
1442 offset
1443 }).collect();
1444
1445 // Remove the unused slots and invert the mapping to obtain the
1446 // combined `memory_index` (also see previous comment).
1447 combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
1448 let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
1449
1450 variant.fields = FieldPlacement::Arbitrary {
1451 offsets: combined_offsets,
1452 memory_index: combined_memory_index,
1453 };
1454
1455 size = size.max(variant.size);
1456 align = align.max(variant.align);
1457 Ok(variant)
1458 }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1459
1460 let abi = if prefix.abi.is_uninhabited() ||
1461 variants.iter().all(|v| v.abi.is_uninhabited()) {
1462 Abi::Uninhabited
1463 } else {
1464 Abi::Aggregate { sized: true }
1465 };
1466 let discr = match &self.layout_of(substs.discr_ty(tcx))?.abi {
1467 Abi::Scalar(s) => s.clone(),
1468 _ => bug!(),
1469 };
1470
1471 let layout = tcx.intern_layout(LayoutDetails {
1472 variants: Variants::Multiple {
1473 discr,
1474 discr_kind: DiscriminantKind::Tag,
1475 discr_index,
1476 variants,
1477 },
1478 fields: outer_fields,
1479 abi,
1480 size,
1481 align,
1482 });
1483 debug!("generator layout ({:?}): {:#?}", ty, layout);
1484 Ok(layout)
1485 }
7cac9316
XL
1486
1487 /// This is invoked by the `layout_raw` query to record the final
1488 /// layout of each type.
532ac7d7 1489 #[inline(always)]
a1dfa0c6 1490 fn record_layout_for_printing(&self, layout: TyLayout<'tcx>) {
532ac7d7
XL
1491 // If we are running with `-Zprint-type-sizes`, maybe record layouts
1492 // for dumping later.
1493 if self.tcx.sess.opts.debugging_opts.print_type_sizes {
1494 self.record_layout_for_printing_outlined(layout)
1495 }
1496 }
1497
1498 fn record_layout_for_printing_outlined(&self, layout: TyLayout<'tcx>) {
1499 // Ignore layouts that are done with non-empty environments or
1500 // non-monomorphic layouts, as the user only wants to see the stuff
1501 // resulting from the final codegen session.
7cac9316 1502 if
2c00a5a8
XL
1503 layout.ty.has_param_types() ||
1504 layout.ty.has_self_ty() ||
1505 !self.param_env.caller_bounds.is_empty()
7cac9316
XL
1506 {
1507 return;
1508 }
1509
7cac9316 1510 // (delay format until we actually need it)
83c7162d 1511 let record = |kind, packed, opt_discr_size, variants| {
2c00a5a8
XL
1512 let type_desc = format!("{:?}", layout.ty);
1513 self.tcx.sess.code_stats.borrow_mut().record_type_size(kind,
1514 type_desc,
a1dfa0c6 1515 layout.align.abi,
2c00a5a8 1516 layout.size,
83c7162d 1517 packed,
2c00a5a8
XL
1518 opt_discr_size,
1519 variants);
7cac9316
XL
1520 };
1521
2c00a5a8 1522 let adt_def = match layout.ty.sty {
b7449926 1523 ty::Adt(ref adt_def, _) => {
2c00a5a8 1524 debug!("print-type-size t: `{:?}` process adt", layout.ty);
ff7c6d11 1525 adt_def
7cac9316
XL
1526 }
1527
b7449926 1528 ty::Closure(..) => {
2c00a5a8 1529 debug!("print-type-size t: `{:?}` record closure", layout.ty);
83c7162d 1530 record(DataTypeKind::Closure, false, None, vec![]);
7cac9316
XL
1531 return;
1532 }
1533
1534 _ => {
2c00a5a8 1535 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
7cac9316
XL
1536 return;
1537 }
1538 };
1539
1540 let adt_kind = adt_def.adt_kind();
83c7162d 1541 let adt_packed = adt_def.repr.packed();
7cac9316 1542
0731742a 1543 let build_variant_info = |n: Option<Ident>,
ff7c6d11
XL
1544 flds: &[ast::Name],
1545 layout: TyLayout<'tcx>| {
94b46f34 1546 let mut min_size = Size::ZERO;
ff7c6d11 1547 let field_info: Vec<_> = flds.iter().enumerate().map(|(i, &name)| {
2c00a5a8 1548 match layout.field(self, i) {
ff7c6d11
XL
1549 Err(err) => {
1550 bug!("no layout found for field {}: `{:?}`", name, err);
1551 }
1552 Ok(field_layout) => {
1553 let offset = layout.fields.offset(i);
1554 let field_end = offset + field_layout.size;
1555 if min_size < field_end {
1556 min_size = field_end;
1557 }
1558 session::FieldInfo {
1559 name: name.to_string(),
1560 offset: offset.bytes(),
1561 size: field_layout.size.bytes(),
a1dfa0c6 1562 align: field_layout.align.abi.bytes(),
ff7c6d11 1563 }
7cac9316
XL
1564 }
1565 }
ff7c6d11 1566 }).collect();
7cac9316
XL
1567
1568 session::VariantInfo {
0731742a 1569 name: n.map(|n| n.to_string()),
ff7c6d11
XL
1570 kind: if layout.is_unsized() {
1571 session::SizeKind::Min
1572 } else {
7cac9316 1573 session::SizeKind::Exact
ff7c6d11 1574 },
a1dfa0c6 1575 align: layout.align.abi.bytes(),
ff7c6d11
XL
1576 size: if min_size.bytes() == 0 {
1577 layout.size.bytes()
7cac9316 1578 } else {
ff7c6d11 1579 min_size.bytes()
7cac9316 1580 },
7cac9316
XL
1581 fields: field_info,
1582 }
1583 };
1584
ff7c6d11
XL
1585 match layout.variants {
1586 Variants::Single { index } => {
1587 debug!("print-type-size `{:#?}` variant {}",
0731742a 1588 layout, adt_def.variants[index].ident);
ff7c6d11
XL
1589 if !adt_def.variants.is_empty() {
1590 let variant_def = &adt_def.variants[index];
7cac9316 1591 let fields: Vec<_> =
94b46f34 1592 variant_def.fields.iter().map(|f| f.ident.name).collect();
7cac9316 1593 record(adt_kind.into(),
83c7162d 1594 adt_packed,
7cac9316 1595 None,
0731742a 1596 vec![build_variant_info(Some(variant_def.ident),
7cac9316 1597 &fields,
ff7c6d11 1598 layout)]);
7cac9316
XL
1599 } else {
1600 // (This case arises for *empty* enums; so give it
1601 // zero variants.)
83c7162d 1602 record(adt_kind.into(), adt_packed, None, vec![]);
7cac9316
XL
1603 }
1604 }
1605
532ac7d7 1606 Variants::Multiple { ref discr, ref discr_kind, .. } => {
ff7c6d11 1607 debug!("print-type-size `{:#?}` adt general variants def {}",
2c00a5a8 1608 layout.ty, adt_def.variants.len());
7cac9316 1609 let variant_infos: Vec<_> =
a1dfa0c6 1610 adt_def.variants.iter_enumerated().map(|(i, variant_def)| {
ff7c6d11 1611 let fields: Vec<_> =
94b46f34 1612 variant_def.fields.iter().map(|f| f.ident.name).collect();
0731742a 1613 build_variant_info(Some(variant_def.ident),
0bf4aa26
XL
1614 &fields,
1615 layout.for_variant(self, i))
ff7c6d11
XL
1616 })
1617 .collect();
532ac7d7
XL
1618 record(adt_kind.into(), adt_packed, match discr_kind {
1619 DiscriminantKind::Tag => Some(discr.value.size(self)),
ff7c6d11
XL
1620 _ => None
1621 }, variant_infos);
7cac9316
XL
1622 }
1623 }
1624 }
54a0048b
SL
1625}
1626
0731742a 1627/// Type size "skeleton", i.e., the only information determining a type's size.
54a0048b
SL
1628/// While this is conservative, (aside from constant sizes, only pointers,
1629/// newtypes thereof and null pointer optimized enums are allowed), it is
a1dfa0c6 1630/// enough to statically check common use cases of transmute.
54a0048b
SL
1631#[derive(Copy, Clone, Debug)]
1632pub enum SizeSkeleton<'tcx> {
1633 /// Any statically computable Layout.
1634 Known(Size),
1635
1636 /// A potentially-fat pointer.
1637 Pointer {
3b2f2976 1638 /// If true, this pointer is never null.
54a0048b 1639 non_zero: bool,
3b2f2976
XL
1640 /// The type which determines the unsized metadata, if any,
1641 /// of this pointer. Either a type parameter or a projection
1642 /// depending on one, with regions erased.
54a0048b
SL
1643 tail: Ty<'tcx>
1644 }
1645}
1646
dc9dc135
XL
1647impl<'tcx> SizeSkeleton<'tcx> {
1648 pub fn compute(
1649 ty: Ty<'tcx>,
1650 tcx: TyCtxt<'tcx>,
1651 param_env: ty::ParamEnv<'tcx>,
1652 ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
b7449926 1653 debug_assert!(!ty.has_infer_types());
54a0048b
SL
1654
1655 // First try computing a static layout.
2c00a5a8 1656 let err = match tcx.layout_of(param_env.and(ty)) {
54a0048b 1657 Ok(layout) => {
ff7c6d11 1658 return Ok(SizeSkeleton::Known(layout.size));
54a0048b
SL
1659 }
1660 Err(err) => err
1661 };
1662
1663 match ty.sty {
b7449926
XL
1664 ty::Ref(_, pointee, _) |
1665 ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
ff7c6d11
XL
1666 let non_zero = !ty.is_unsafe_ptr();
1667 let tail = tcx.struct_tail(pointee);
1668 match tail.sty {
b7449926
XL
1669 ty::Param(_) | ty::Projection(_) => {
1670 debug_assert!(tail.has_param_types() || tail.has_self_ty());
ff7c6d11
XL
1671 Ok(SizeSkeleton::Pointer {
1672 non_zero,
1673 tail: tcx.erase_regions(&tail)
1674 })
1675 }
1676 _ => {
1677 bug!("SizeSkeleton::compute({}): layout errored ({}), yet \
1678 tail `{}` is not a type parameter or a projection",
1679 ty, err, tail)
1680 }
1681 }
54a0048b
SL
1682 }
1683
b7449926 1684 ty::Adt(def, substs) => {
54a0048b 1685 // Only newtypes and enums w/ nullable pointer optimization.
9e0c209e 1686 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
54a0048b
SL
1687 return Err(err);
1688 }
1689
1690 // Get a zero-sized variant or a pointer newtype.
a1dfa0c6
XL
1691 let zero_or_ptr_variant = |i| {
1692 let i = VariantIdx::new(i);
54a0048b 1693 let fields = def.variants[i].fields.iter().map(|field| {
7cac9316 1694 SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env)
54a0048b
SL
1695 });
1696 let mut ptr = None;
1697 for field in fields {
1698 let field = field?;
1699 match field {
1700 SizeSkeleton::Known(size) => {
1701 if size.bytes() > 0 {
1702 return Err(err);
1703 }
1704 }
1705 SizeSkeleton::Pointer {..} => {
1706 if ptr.is_some() {
1707 return Err(err);
1708 }
1709 ptr = Some(field);
1710 }
1711 }
1712 }
1713 Ok(ptr)
1714 };
1715
1716 let v0 = zero_or_ptr_variant(0)?;
1717 // Newtype.
1718 if def.variants.len() == 1 {
1719 if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1720 return Ok(SizeSkeleton::Pointer {
b7449926
XL
1721 non_zero: non_zero || match tcx.layout_scalar_valid_range(def.did) {
1722 (Bound::Included(start), Bound::Unbounded) => start > 0,
1723 (Bound::Included(start), Bound::Included(end)) =>
1724 0 < start && start < end,
1725 _ => false,
1726 },
041b39d2 1727 tail,
54a0048b
SL
1728 });
1729 } else {
1730 return Err(err);
1731 }
1732 }
1733
1734 let v1 = zero_or_ptr_variant(1)?;
1735 // Nullable pointer enum optimization.
1736 match (v0, v1) {
1737 (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None) |
1738 (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1739 Ok(SizeSkeleton::Pointer {
1740 non_zero: false,
041b39d2 1741 tail,
54a0048b
SL
1742 })
1743 }
1744 _ => Err(err)
1745 }
1746 }
1747
b7449926 1748 ty::Projection(_) | ty::Opaque(..) => {
0531ce1d 1749 let normalized = tcx.normalize_erasing_regions(param_env, ty);
5bcae85e
SL
1750 if ty == normalized {
1751 Err(err)
1752 } else {
7cac9316 1753 SizeSkeleton::compute(normalized, tcx, param_env)
5bcae85e
SL
1754 }
1755 }
1756
54a0048b
SL
1757 _ => Err(err)
1758 }
1759 }
1760
0bf4aa26 1761 pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
54a0048b
SL
1762 match (self, other) {
1763 (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
1764 (SizeSkeleton::Pointer { tail: a, .. },
1765 SizeSkeleton::Pointer { tail: b, .. }) => a == b,
1766 _ => false
1767 }
1768 }
1769}
cc61c64b 1770
ff7c6d11 1771pub trait HasTyCtxt<'tcx>: HasDataLayout {
dc9dc135 1772 fn tcx(&self) -> TyCtxt<'tcx>;
cc61c64b
XL
1773}
1774
48663c56
XL
1775pub trait HasParamEnv<'tcx> {
1776 fn param_env(&self) -> ty::ParamEnv<'tcx>;
1777}
1778
dc9dc135 1779impl<'tcx> HasDataLayout for TyCtxt<'tcx> {
ff7c6d11
XL
1780 fn data_layout(&self) -> &TargetDataLayout {
1781 &self.data_layout
1782 }
cc61c64b
XL
1783}
1784
dc9dc135
XL
1785impl<'tcx> HasTyCtxt<'tcx> for TyCtxt<'tcx> {
1786 fn tcx(&self) -> TyCtxt<'tcx> {
ff7c6d11 1787 self.global_tcx()
cc61c64b
XL
1788 }
1789}
1790
48663c56
XL
1791impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
1792 fn param_env(&self) -> ty::ParamEnv<'tcx> {
1793 self.param_env
1794 }
1795}
1796
2c00a5a8 1797impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
cc61c64b 1798 fn data_layout(&self) -> &TargetDataLayout {
2c00a5a8 1799 self.tcx.data_layout()
cc61c64b
XL
1800 }
1801}
1802
dc9dc135
XL
1803impl<'tcx, T: HasTyCtxt<'tcx>> HasTyCtxt<'tcx> for LayoutCx<'tcx, T> {
1804 fn tcx(&self) -> TyCtxt<'tcx> {
2c00a5a8 1805 self.tcx.tcx()
ff7c6d11
XL
1806 }
1807}
1808
1809pub trait MaybeResult<T> {
48663c56
XL
1810 type Error;
1811
1812 fn from(x: Result<T, Self::Error>) -> Self;
1813 fn to_result(self) -> Result<T, Self::Error>;
ff7c6d11
XL
1814}
1815
1816impl<T> MaybeResult<T> for T {
48663c56
XL
1817 type Error = !;
1818
1819 fn from(x: Result<T, Self::Error>) -> Self {
1820 let Ok(x) = x;
ff7c6d11
XL
1821 x
1822 }
48663c56
XL
1823 fn to_result(self) -> Result<T, Self::Error> {
1824 Ok(self)
ff7c6d11
XL
1825 }
1826}
cc61c64b 1827
ff7c6d11 1828impl<T, E> MaybeResult<T> for Result<T, E> {
48663c56
XL
1829 type Error = E;
1830
1831 fn from(x: Result<T, Self::Error>) -> Self {
1832 x
ff7c6d11 1833 }
48663c56
XL
1834 fn to_result(self) -> Result<T, Self::Error> {
1835 self
7cac9316 1836 }
ff7c6d11
XL
1837}
1838
83c7162d 1839pub type TyLayout<'tcx> = ::rustc_target::abi::TyLayout<'tcx, Ty<'tcx>>;
cc61c64b 1840
dc9dc135 1841impl<'tcx> LayoutOf for LayoutCx<'tcx, TyCtxt<'tcx>> {
83c7162d 1842 type Ty = Ty<'tcx>;
ff7c6d11
XL
1843 type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
1844
1845 /// Computes the layout of a type. Note that this implicitly
1846 /// executes in "reveal all" mode.
a1dfa0c6 1847 fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
0531ce1d
XL
1848 let param_env = self.param_env.with_reveal_all();
1849 let ty = self.tcx.normalize_erasing_regions(param_env, ty);
2c00a5a8 1850 let details = self.tcx.layout_raw(param_env.and(ty))?;
ff7c6d11 1851 let layout = TyLayout {
041b39d2 1852 ty,
ff7c6d11
XL
1853 details
1854 };
cc61c64b 1855
0731742a 1856 // N.B., this recording is normally disabled; when enabled, it
ff7c6d11
XL
1857 // can however trigger recursive invocations of `layout_of`.
1858 // Therefore, we execute it *after* the main query has
1859 // completed, to avoid problems around recursive structures
0531ce1d 1860 // and the like. (Admittedly, I wasn't able to reproduce a problem
ff7c6d11 1861 // here, but it seems like the right thing to do. -nmatsakis)
2c00a5a8 1862 self.record_layout_for_printing(layout);
ff7c6d11
XL
1863
1864 Ok(layout)
cc61c64b
XL
1865 }
1866}
1867
dc9dc135 1868impl LayoutOf for LayoutCx<'tcx, ty::query::TyCtxtAt<'tcx>> {
83c7162d 1869 type Ty = Ty<'tcx>;
ff7c6d11 1870 type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
cc61c64b 1871
ff7c6d11
XL
1872 /// Computes the layout of a type. Note that this implicitly
1873 /// executes in "reveal all" mode.
a1dfa0c6 1874 fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
0531ce1d
XL
1875 let param_env = self.param_env.with_reveal_all();
1876 let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1877 let details = self.tcx.layout_raw(param_env.and(ty))?;
ff7c6d11
XL
1878 let layout = TyLayout {
1879 ty,
1880 details
1881 };
cc61c64b 1882
0731742a 1883 // N.B., this recording is normally disabled; when enabled, it
ff7c6d11
XL
1884 // can however trigger recursive invocations of `layout_of`.
1885 // Therefore, we execute it *after* the main query has
1886 // completed, to avoid problems around recursive structures
0531ce1d 1887 // and the like. (Admittedly, I wasn't able to reproduce a problem
ff7c6d11 1888 // here, but it seems like the right thing to do. -nmatsakis)
2c00a5a8
XL
1889 let cx = LayoutCx {
1890 tcx: *self.tcx,
1891 param_env: self.param_env
1892 };
1893 cx.record_layout_for_printing(layout);
cc61c64b 1894
ff7c6d11
XL
1895 Ok(layout)
1896 }
1897}
cc61c64b 1898
2c00a5a8 1899// Helper (inherent) `layout_of` methods to avoid pushing `LayoutCx` to users.
dc9dc135 1900impl TyCtxt<'tcx> {
2c00a5a8
XL
1901 /// Computes the layout of a type. Note that this implicitly
1902 /// executes in "reveal all" mode.
1903 #[inline]
1904 pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
1905 -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
1906 let cx = LayoutCx {
94b46f34 1907 tcx: self.global_tcx(),
2c00a5a8
XL
1908 param_env: param_env_and_ty.param_env
1909 };
1910 cx.layout_of(param_env_and_ty.value)
1911 }
1912}
1913
dc9dc135 1914impl ty::query::TyCtxtAt<'tcx> {
2c00a5a8
XL
1915 /// Computes the layout of a type. Note that this implicitly
1916 /// executes in "reveal all" mode.
1917 #[inline]
1918 pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
1919 -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
1920 let cx = LayoutCx {
94b46f34 1921 tcx: self.global_tcx().at(self.span),
2c00a5a8
XL
1922 param_env: param_env_and_ty.param_env
1923 };
1924 cx.layout_of(param_env_and_ty.value)
1925 }
1926}
1927
dc9dc135
XL
1928impl<'tcx, C> TyLayoutMethods<'tcx, C> for Ty<'tcx>
1929where
1930 C: LayoutOf<Ty = Ty<'tcx>> + HasTyCtxt<'tcx>,
1931 C::TyLayout: MaybeResult<TyLayout<'tcx>>,
1932 C: HasParamEnv<'tcx>,
83c7162d 1933{
a1dfa0c6 1934 fn for_variant(this: TyLayout<'tcx>, cx: &C, variant_index: VariantIdx) -> TyLayout<'tcx> {
83c7162d
XL
1935 let details = match this.variants {
1936 Variants::Single { index } if index == variant_index => this.details,
ff7c6d11
XL
1937
1938 Variants::Single { index } => {
1939 // Deny calling for_variant more than once for non-Single enums.
48663c56 1940 if let Ok(layout) = cx.layout_of(this.ty).to_result() {
ff7c6d11 1941 assert_eq!(layout.variants, Variants::Single { index });
48663c56 1942 }
ff7c6d11 1943
83c7162d 1944 let fields = match this.ty.sty {
b7449926 1945 ty::Adt(def, _) => def.variants[variant_index].fields.len(),
ff7c6d11
XL
1946 _ => bug!()
1947 };
83c7162d
XL
1948 let tcx = cx.tcx();
1949 tcx.intern_layout(LayoutDetails {
1950 variants: Variants::Single { index: variant_index },
1951 fields: FieldPlacement::Union(fields),
1952 abi: Abi::Uninhabited,
1953 align: tcx.data_layout.i8_align,
94b46f34 1954 size: Size::ZERO
83c7162d 1955 })
ff7c6d11 1956 }
cc61c64b 1957
532ac7d7 1958 Variants::Multiple { ref variants, .. } => {
ff7c6d11 1959 &variants[variant_index]
cc61c64b 1960 }
ff7c6d11
XL
1961 };
1962
1963 assert_eq!(details.variants, Variants::Single { index: variant_index });
cc61c64b 1964
ff7c6d11 1965 TyLayout {
83c7162d 1966 ty: this.ty,
ff7c6d11 1967 details
cc61c64b
XL
1968 }
1969 }
1970
a1dfa0c6 1971 fn field(this: TyLayout<'tcx>, cx: &C, i: usize) -> C::TyLayout {
cc61c64b 1972 let tcx = cx.tcx();
48663c56
XL
1973 let discr_layout = |discr: &Scalar| -> C::TyLayout {
1974 let layout = LayoutDetails::scalar(cx, discr.clone());
1975 MaybeResult::from(Ok(TyLayout {
1976 details: tcx.intern_layout(layout),
1977 ty: discr.value.to_ty(tcx),
1978 }))
1979 };
1980
83c7162d 1981 cx.layout_of(match this.ty.sty {
b7449926
XL
1982 ty::Bool |
1983 ty::Char |
1984 ty::Int(_) |
1985 ty::Uint(_) |
1986 ty::Float(_) |
1987 ty::FnPtr(_) |
1988 ty::Never |
1989 ty::FnDef(..) |
1990 ty::GeneratorWitness(..) |
1991 ty::Foreign(..) |
1992 ty::Dynamic(..) => {
83c7162d 1993 bug!("TyLayout::field_type({:?}): not applicable", this)
cc61c64b
XL
1994 }
1995
1996 // Potentially-fat pointers.
b7449926
XL
1997 ty::Ref(_, pointee, _) |
1998 ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
8faf50e0 1999 assert!(i < this.fields.count());
ff7c6d11
XL
2000
2001 // Reuse the fat *T type as its own thin pointer data field.
0731742a 2002 // This provides information about e.g., DST struct pointees
ff7c6d11
XL
2003 // (which may have no non-DST form), and will work as long
2004 // as the `Abi` or `FieldPlacement` is checked by users.
2005 if i == 0 {
b7449926 2006 let nil = tcx.mk_unit();
83c7162d 2007 let ptr_ty = if this.ty.is_unsafe_ptr() {
ff7c6d11
XL
2008 tcx.mk_mut_ptr(nil)
2009 } else {
48663c56 2010 tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
ff7c6d11 2011 };
48663c56 2012 return MaybeResult::from(cx.layout_of(ptr_ty).to_result().map(|mut ptr_layout| {
83c7162d 2013 ptr_layout.ty = this.ty;
ff7c6d11 2014 ptr_layout
48663c56 2015 }));
ff7c6d11
XL
2016 }
2017
2018 match tcx.struct_tail(pointee).sty {
b7449926
XL
2019 ty::Slice(_) |
2020 ty::Str => tcx.types.usize,
2021 ty::Dynamic(_, _) => {
8faf50e0 2022 tcx.mk_imm_ref(
48663c56 2023 tcx.lifetimes.re_static,
8faf50e0
XL
2024 tcx.mk_array(tcx.types.usize, 3),
2025 )
9fa01778 2026 /* FIXME: use actual fn pointers
b7449926
XL
2027 Warning: naively computing the number of entries in the
2028 vtable by counting the methods on the trait + methods on
2029 all parent traits does not work, because some methods can
2030 be not object safe and thus excluded from the vtable.
2031 Increase this counter if you tried to implement this but
2032 failed to do it without duplicating a lot of code from
2033 other places in the compiler: 2
2034 tcx.mk_tup(&[
2035 tcx.mk_array(tcx.types.usize, 3),
2036 tcx.mk_array(Option<fn()>),
2037 ])
2038 */
ff7c6d11 2039 }
83c7162d 2040 _ => bug!("TyLayout::field_type({:?}): not applicable", this)
ff7c6d11 2041 }
cc61c64b
XL
2042 }
2043
2044 // Arrays and slices.
b7449926
XL
2045 ty::Array(element, _) |
2046 ty::Slice(element) => element,
2047 ty::Str => tcx.types.u8,
cc61c64b 2048
ea8adc8c 2049 // Tuples, generators and closures.
b7449926 2050 ty::Closure(def_id, ref substs) => {
cc61c64b
XL
2051 substs.upvar_tys(def_id, tcx).nth(i).unwrap()
2052 }
2053
b7449926 2054 ty::Generator(def_id, ref substs, _) => {
48663c56
XL
2055 match this.variants {
2056 Variants::Single { index } => {
2057 substs.state_tys(def_id, tcx)
2058 .nth(index.as_usize()).unwrap()
2059 .nth(i).unwrap()
2060 }
2061 Variants::Multiple { ref discr, discr_index, .. } => {
2062 if i == discr_index {
2063 return discr_layout(discr);
2064 }
2065 substs.prefix_tys(def_id, tcx).nth(i).unwrap()
2066 }
2067 }
ea8adc8c
XL
2068 }
2069
48663c56 2070 ty::Tuple(tys) => tys[i].expect_ty(),
cc61c64b
XL
2071
2072 // SIMD vector types.
b7449926 2073 ty::Adt(def, ..) if def.repr.simd() => {
83c7162d 2074 this.ty.simd_type(tcx)
cc61c64b
XL
2075 }
2076
2077 // ADTs.
b7449926 2078 ty::Adt(def, substs) => {
83c7162d 2079 match this.variants {
ff7c6d11
XL
2080 Variants::Single { index } => {
2081 def.variants[index].fields[i].ty(tcx, substs)
2082 }
2083
2084 // Discriminant field for enums (where applicable).
532ac7d7 2085 Variants::Multiple { ref discr, .. } => {
ff7c6d11 2086 assert_eq!(i, 0);
48663c56 2087 return discr_layout(discr);
ff7c6d11
XL
2088 }
2089 }
cc61c64b
XL
2090 }
2091
a1dfa0c6
XL
2092 ty::Projection(_) | ty::UnnormalizedProjection(..) | ty::Bound(..) |
2093 ty::Placeholder(..) | ty::Opaque(..) | ty::Param(_) | ty::Infer(_) |
2094 ty::Error => {
83c7162d 2095 bug!("TyLayout::field_type: unexpected type `{}`", this.ty)
cc61c64b 2096 }
ff7c6d11
XL
2097 })
2098 }
48663c56
XL
2099
2100 fn pointee_info_at(
2101 this: TyLayout<'tcx>,
2102 cx: &C,
2103 offset: Size,
2104 ) -> Option<PointeeInfo> {
2105 match this.ty.sty {
2106 ty::RawPtr(mt) if offset.bytes() == 0 => {
2107 cx.layout_of(mt.ty).to_result().ok()
2108 .map(|layout| PointeeInfo {
2109 size: layout.size,
2110 align: layout.align.abi,
2111 safe: None,
2112 })
2113 }
2114
2115 ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
2116 let tcx = cx.tcx();
2117 let is_freeze = ty.is_freeze(tcx, cx.param_env(), DUMMY_SP);
2118 let kind = match mt {
2119 hir::MutImmutable => if is_freeze {
2120 PointerKind::Frozen
2121 } else {
2122 PointerKind::Shared
2123 },
2124 hir::MutMutable => {
2125 // Previously we would only emit noalias annotations for LLVM >= 6 or in
2126 // panic=abort mode. That was deemed right, as prior versions had many bugs
2127 // in conjunction with unwinding, but later versions didn’t seem to have
2128 // said issues. See issue #31681.
2129 //
2130 // Alas, later on we encountered a case where noalias would generate wrong
2131 // code altogether even with recent versions of LLVM in *safe* code with no
2132 // unwinding involved. See #54462.
2133 //
2134 // For now, do not enable mutable_noalias by default at all, while the
2135 // issue is being figured out.
2136 let mutable_noalias = tcx.sess.opts.debugging_opts.mutable_noalias
2137 .unwrap_or(false);
2138 if mutable_noalias {
2139 PointerKind::UniqueBorrowed
2140 } else {
2141 PointerKind::Shared
2142 }
2143 }
2144 };
2145
2146 cx.layout_of(ty).to_result().ok()
2147 .map(|layout| PointeeInfo {
2148 size: layout.size,
2149 align: layout.align.abi,
2150 safe: Some(kind),
2151 })
2152 }
2153
2154 _ => {
2155 let mut data_variant = match this.variants {
2156 // Within the discriminant field, only the niche itself is
2157 // always initialized, so we only check for a pointer at its
2158 // offset.
2159 //
2160 // If the niche is a pointer, it's either valid (according
2161 // to its type), or null (which the niche field's scalar
2162 // validity range encodes). This allows using
2163 // `dereferenceable_or_null` for e.g., `Option<&T>`, and
2164 // this will continue to work as long as we don't start
2165 // using more niches than just null (e.g., the first page of
2166 // the address space, or unaligned pointers).
2167 Variants::Multiple {
2168 discr_kind: DiscriminantKind::Niche {
2169 dataful_variant,
2170 ..
2171 },
2172 discr_index,
2173 ..
2174 } if this.fields.offset(discr_index) == offset =>
2175 Some(this.for_variant(cx, dataful_variant)),
2176 _ => Some(this),
2177 };
2178
2179 if let Some(variant) = data_variant {
2180 // We're not interested in any unions.
2181 if let FieldPlacement::Union(_) = variant.fields {
2182 data_variant = None;
2183 }
2184 }
2185
2186 let mut result = None;
2187
2188 if let Some(variant) = data_variant {
2189 let ptr_end = offset + Pointer.size(cx);
2190 for i in 0..variant.fields.count() {
2191 let field_start = variant.fields.offset(i);
2192 if field_start <= offset {
2193 let field = variant.field(cx, i);
2194 result = field.to_result().ok()
2195 .and_then(|field| {
2196 if ptr_end <= field_start + field.size {
2197 // We found the right field, look inside it.
2198 field.pointee_info_at(cx, offset - field_start)
2199 } else {
2200 None
2201 }
2202 });
2203 if result.is_some() {
2204 break;
2205 }
2206 }
2207 }
2208 }
2209
2210 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
2211 if let Some(ref mut pointee) = result {
2212 if let ty::Adt(def, _) = this.ty.sty {
2213 if def.is_box() && offset.bytes() == 0 {
2214 pointee.safe = Some(PointerKind::UniqueOwned);
2215 }
2216 }
2217 }
2218
2219 result
2220 }
2221 }
2222 }
83c7162d 2223}
ff7c6d11 2224
94b46f34
XL
2225struct Niche {
2226 offset: Size,
2227 scalar: Scalar,
2228 available: u128,
2229}
2230
2231impl Niche {
dc9dc135 2232 fn reserve<'tcx>(
94b46f34 2233 &self,
dc9dc135 2234 cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
94b46f34
XL
2235 count: u128,
2236 ) -> Option<(u128, Scalar)> {
2237 if count > self.available {
2238 return None;
2239 }
2240 let Scalar { value, valid_range: ref v } = self.scalar;
2241 let bits = value.size(cx).bits();
2242 assert!(bits <= 128);
2243 let max_value = !0u128 >> (128 - bits);
2244 let start = v.end().wrapping_add(1) & max_value;
2245 let end = v.end().wrapping_add(count) & max_value;
2246 Some((start, Scalar { value, valid_range: *v.start()..=end }))
2247 }
2248}
2249
dc9dc135 2250impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
ff7c6d11 2251 /// Find the offset of a niche leaf field, starting from
94b46f34 2252 /// the given type and recursing through aggregates.
ff7c6d11 2253 // FIXME(eddyb) traverse already optimized enums.
a1dfa0c6 2254 fn find_niche(&self, layout: TyLayout<'tcx>) -> Result<Option<Niche>, LayoutError<'tcx>> {
94b46f34 2255 let scalar_niche = |scalar: &Scalar, offset| {
ff7c6d11
XL
2256 let Scalar { value, valid_range: ref v } = *scalar;
2257
83c7162d 2258 let bits = value.size(self).bits();
ff7c6d11
XL
2259 assert!(bits <= 128);
2260 let max_value = !0u128 >> (128 - bits);
2261
2262 // Find out how many values are outside the valid range.
94b46f34 2263 let available = if v.start() <= v.end() {
83c7162d 2264 v.start() + (max_value - v.end())
ff7c6d11 2265 } else {
83c7162d 2266 v.start() - v.end() - 1
ff7c6d11
XL
2267 };
2268
94b46f34
XL
2269 // Give up if there is no niche value available.
2270 if available == 0 {
ff7c6d11
XL
2271 return None;
2272 }
2273
94b46f34 2274 Some(Niche { offset, scalar: scalar.clone(), available })
ff7c6d11
XL
2275 };
2276
2c00a5a8
XL
2277 // Locals variables which live across yields are stored
2278 // in the generator type as fields. These may be uninitialized
2279 // so we don't look for niches there.
b7449926 2280 if let ty::Generator(..) = layout.ty.sty {
2c00a5a8
XL
2281 return Ok(None);
2282 }
2283
83c7162d 2284 match layout.abi {
ff7c6d11 2285 Abi::Scalar(ref scalar) => {
94b46f34 2286 return Ok(scalar_niche(scalar, Size::ZERO));
ff7c6d11
XL
2287 }
2288 Abi::ScalarPair(ref a, ref b) => {
94b46f34
XL
2289 // HACK(nox): We iter on `b` and then `a` because `max_by_key`
2290 // returns the last maximum.
a1dfa0c6
XL
2291 let niche = iter::once(
2292 (b, a.value.size(self).align_to(b.value.align(self).abi))
2293 )
94b46f34
XL
2294 .chain(iter::once((a, Size::ZERO)))
2295 .filter_map(|(scalar, offset)| scalar_niche(scalar, offset))
2296 .max_by_key(|niche| niche.available);
2297 return Ok(niche);
ff7c6d11
XL
2298 }
2299 Abi::Vector { ref element, .. } => {
94b46f34 2300 return Ok(scalar_niche(element, Size::ZERO));
ff7c6d11
XL
2301 }
2302 _ => {}
2303 }
2304
2305 // Perhaps one of the fields is non-zero, let's recurse and find out.
83c7162d 2306 if let FieldPlacement::Union(_) = layout.fields {
ff7c6d11
XL
2307 // Only Rust enums have safe-to-inspect fields
2308 // (a discriminant), other unions are unsafe.
83c7162d 2309 if let Variants::Single { .. } = layout.variants {
ff7c6d11
XL
2310 return Ok(None);
2311 }
2312 }
0731742a
XL
2313 if let FieldPlacement::Array { count: original_64_bit_count, .. } = layout.fields {
2314 // rust-lang/rust#57038: avoid ICE within FieldPlacement::count when count too big
2315 if original_64_bit_count > usize::max_value() as u64 {
2316 return Err(LayoutError::SizeOverflow(layout.ty));
2317 }
83c7162d 2318 if layout.fields.count() > 0 {
94b46f34
XL
2319 return self.find_niche(layout.field(self, 0)?);
2320 } else {
2321 return Ok(None);
ff7c6d11
XL
2322 }
2323 }
94b46f34
XL
2324 let mut niche = None;
2325 let mut available = 0;
83c7162d 2326 for i in 0..layout.fields.count() {
94b46f34
XL
2327 if let Some(mut c) = self.find_niche(layout.field(self, i)?)? {
2328 if c.available > available {
2329 available = c.available;
2330 c.offset += layout.fields.offset(i);
2331 niche = Some(c);
2332 }
ff7c6d11
XL
2333 }
2334 }
94b46f34 2335 Ok(niche)
cc61c64b
XL
2336 }
2337}
ea8adc8c 2338
0531ce1d 2339impl<'a> HashStable<StableHashingContext<'a>> for Variants {
ea8adc8c 2340 fn hash_stable<W: StableHasherResult>(&self,
0531ce1d 2341 hcx: &mut StableHashingContext<'a>,
ea8adc8c 2342 hasher: &mut StableHasher<W>) {
9fa01778 2343 use crate::ty::layout::Variants::*;
ea8adc8c
XL
2344 mem::discriminant(self).hash_stable(hcx, hasher);
2345
2346 match *self {
ff7c6d11
XL
2347 Single { index } => {
2348 index.hash_stable(hcx, hasher);
ea8adc8c 2349 }
532ac7d7
XL
2350 Multiple {
2351 ref discr,
2352 ref discr_kind,
48663c56 2353 discr_index,
ff7c6d11
XL
2354 ref variants,
2355 } => {
532ac7d7
XL
2356 discr.hash_stable(hcx, hasher);
2357 discr_kind.hash_stable(hcx, hasher);
48663c56 2358 discr_index.hash_stable(hcx, hasher);
ff7c6d11 2359 variants.hash_stable(hcx, hasher);
ea8adc8c 2360 }
532ac7d7
XL
2361 }
2362 }
2363}
2364
2365impl<'a> HashStable<StableHashingContext<'a>> for DiscriminantKind {
2366 fn hash_stable<W: StableHasherResult>(&self,
2367 hcx: &mut StableHashingContext<'a>,
2368 hasher: &mut StableHasher<W>) {
2369 use crate::ty::layout::DiscriminantKind::*;
2370 mem::discriminant(self).hash_stable(hcx, hasher);
2371
2372 match *self {
2373 Tag => {}
2374 Niche {
ff7c6d11 2375 dataful_variant,
83c7162d 2376 ref niche_variants,
ff7c6d11 2377 niche_start,
ff7c6d11
XL
2378 } => {
2379 dataful_variant.hash_stable(hcx, hasher);
83c7162d
XL
2380 niche_variants.start().hash_stable(hcx, hasher);
2381 niche_variants.end().hash_stable(hcx, hasher);
ff7c6d11 2382 niche_start.hash_stable(hcx, hasher);
ea8adc8c 2383 }
ff7c6d11
XL
2384 }
2385 }
2386}
2387
0531ce1d 2388impl<'a> HashStable<StableHashingContext<'a>> for FieldPlacement {
ff7c6d11 2389 fn hash_stable<W: StableHasherResult>(&self,
0531ce1d 2390 hcx: &mut StableHashingContext<'a>,
ff7c6d11 2391 hasher: &mut StableHasher<W>) {
9fa01778 2392 use crate::ty::layout::FieldPlacement::*;
ff7c6d11
XL
2393 mem::discriminant(self).hash_stable(hcx, hasher);
2394
2395 match *self {
2396 Union(count) => {
2397 count.hash_stable(hcx, hasher);
ea8adc8c 2398 }
ff7c6d11
XL
2399 Array { count, stride } => {
2400 count.hash_stable(hcx, hasher);
2401 stride.hash_stable(hcx, hasher);
ea8adc8c 2402 }
ff7c6d11
XL
2403 Arbitrary { ref offsets, ref memory_index } => {
2404 offsets.hash_stable(hcx, hasher);
2405 memory_index.hash_stable(hcx, hasher);
ea8adc8c 2406 }
ff7c6d11
XL
2407 }
2408 }
2409}
2410
a1dfa0c6
XL
2411impl<'a> HashStable<StableHashingContext<'a>> for VariantIdx {
2412 fn hash_stable<W: StableHasherResult>(
2413 &self,
2414 hcx: &mut StableHashingContext<'a>,
2415 hasher: &mut StableHasher<W>,
2416 ) {
2417 self.as_u32().hash_stable(hcx, hasher)
2418 }
2419}
2420
0531ce1d 2421impl<'a> HashStable<StableHashingContext<'a>> for Abi {
ff7c6d11 2422 fn hash_stable<W: StableHasherResult>(&self,
0531ce1d 2423 hcx: &mut StableHashingContext<'a>,
ff7c6d11 2424 hasher: &mut StableHasher<W>) {
9fa01778 2425 use crate::ty::layout::Abi::*;
ff7c6d11
XL
2426 mem::discriminant(self).hash_stable(hcx, hasher);
2427
2428 match *self {
2429 Uninhabited => {}
2430 Scalar(ref value) => {
2431 value.hash_stable(hcx, hasher);
ea8adc8c 2432 }
ff7c6d11
XL
2433 ScalarPair(ref a, ref b) => {
2434 a.hash_stable(hcx, hasher);
2435 b.hash_stable(hcx, hasher);
ea8adc8c 2436 }
ff7c6d11
XL
2437 Vector { ref element, count } => {
2438 element.hash_stable(hcx, hasher);
2439 count.hash_stable(hcx, hasher);
ea8adc8c 2440 }
ff7c6d11
XL
2441 Aggregate { sized } => {
2442 sized.hash_stable(hcx, hasher);
ea8adc8c
XL
2443 }
2444 }
2445 }
2446}
2447
0531ce1d 2448impl<'a> HashStable<StableHashingContext<'a>> for Scalar {
ff7c6d11 2449 fn hash_stable<W: StableHasherResult>(&self,
0531ce1d 2450 hcx: &mut StableHashingContext<'a>,
ff7c6d11 2451 hasher: &mut StableHasher<W>) {
83c7162d 2452 let Scalar { value, ref valid_range } = *self;
ff7c6d11 2453 value.hash_stable(hcx, hasher);
83c7162d
XL
2454 valid_range.start().hash_stable(hcx, hasher);
2455 valid_range.end().hash_stable(hcx, hasher);
ff7c6d11
XL
2456 }
2457}
2458
9fa01778 2459impl_stable_hash_for!(struct crate::ty::layout::LayoutDetails {
ff7c6d11
XL
2460 variants,
2461 fields,
2462 abi,
2463 size,
2464 align
2465});
2466
9fa01778 2467impl_stable_hash_for!(enum crate::ty::layout::Integer {
ea8adc8c
XL
2468 I8,
2469 I16,
2470 I32,
2471 I64,
2472 I128
2473});
2474
9fa01778 2475impl_stable_hash_for!(enum crate::ty::layout::Primitive {
ff7c6d11 2476 Int(integer, signed),
94b46f34 2477 Float(fty),
ea8adc8c
XL
2478 Pointer
2479});
2480
9fa01778 2481impl_stable_hash_for!(struct crate::ty::layout::AbiAndPrefAlign {
a1dfa0c6
XL
2482 abi,
2483 pref
2484});
2485
dc9dc135
XL
2486impl<'tcx> HashStable<StableHashingContext<'tcx>> for Align {
2487 fn hash_stable<W: StableHasherResult>(
2488 &self,
2489 hcx: &mut StableHashingContext<'tcx>,
2490 hasher: &mut StableHasher<W>,
2491 ) {
a1dfa0c6 2492 self.bytes().hash_stable(hcx, hasher);
83c7162d
XL
2493 }
2494}
ea8adc8c 2495
dc9dc135
XL
2496impl<'tcx> HashStable<StableHashingContext<'tcx>> for Size {
2497 fn hash_stable<W: StableHasherResult>(
2498 &self,
2499 hcx: &mut StableHashingContext<'tcx>,
2500 hasher: &mut StableHasher<W>,
2501 ) {
83c7162d
XL
2502 self.bytes().hash_stable(hcx, hasher);
2503 }
2504}
ea8adc8c 2505
dc9dc135 2506impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for LayoutError<'tcx> {
ea8adc8c 2507 fn hash_stable<W: StableHasherResult>(&self,
0531ce1d 2508 hcx: &mut StableHashingContext<'a>,
ea8adc8c 2509 hasher: &mut StableHasher<W>) {
9fa01778 2510 use crate::ty::layout::LayoutError::*;
ea8adc8c
XL
2511 mem::discriminant(self).hash_stable(hcx, hasher);
2512
2513 match *self {
2514 Unknown(t) |
2515 SizeOverflow(t) => t.hash_stable(hcx, hasher)
2516 }
2517 }
2518}
48663c56
XL
2519
2520pub trait FnTypeExt<'tcx, C>
2521where
2522 C: LayoutOf<Ty = Ty<'tcx>, TyLayout = TyLayout<'tcx>>
2523 + HasDataLayout
2524 + HasTargetSpec
2525 + HasTyCtxt<'tcx>
2526 + HasParamEnv<'tcx>,
2527{
2528 fn of_instance(cx: &C, instance: &ty::Instance<'tcx>) -> Self;
2529 fn new(cx: &C, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2530 fn new_vtable(cx: &C, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2531 fn new_internal(
2532 cx: &C,
2533 sig: ty::FnSig<'tcx>,
2534 extra_args: &[Ty<'tcx>],
2535 mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgType<'tcx, Ty<'tcx>>,
2536 ) -> Self;
2537 fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi);
2538}
2539
2540impl<'tcx, C> FnTypeExt<'tcx, C> for call::FnType<'tcx, Ty<'tcx>>
2541where
2542 C: LayoutOf<Ty = Ty<'tcx>, TyLayout = TyLayout<'tcx>>
2543 + HasDataLayout
2544 + HasTargetSpec
2545 + HasTyCtxt<'tcx>
2546 + HasParamEnv<'tcx>,
2547{
2548 fn of_instance(cx: &C, instance: &ty::Instance<'tcx>) -> Self {
2549 let sig = instance.fn_sig(cx.tcx());
2550 let sig = cx
2551 .tcx()
2552 .normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig);
2553 call::FnType::new(cx, sig, &[])
2554 }
2555
2556 fn new(cx: &C, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2557 call::FnType::new_internal(cx, sig, extra_args, |ty, _| ArgType::new(cx.layout_of(ty)))
2558 }
2559
2560 fn new_vtable(cx: &C, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2561 FnTypeExt::new_internal(cx, sig, extra_args, |ty, arg_idx| {
2562 let mut layout = cx.layout_of(ty);
2563 // Don't pass the vtable, it's not an argument of the virtual fn.
2564 // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
2565 // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
2566 if arg_idx == Some(0) {
2567 let fat_pointer_ty = if layout.is_unsized() {
2568 // unsized `self` is passed as a pointer to `self`
2569 // FIXME (mikeyhew) change this to use &own if it is ever added to the language
2570 cx.tcx().mk_mut_ptr(layout.ty)
2571 } else {
2572 match layout.abi {
2573 Abi::ScalarPair(..) => (),
2574 _ => bug!("receiver type has unsupported layout: {:?}", layout),
2575 }
2576
2577 // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
2578 // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
2579 // elsewhere in the compiler as a method on a `dyn Trait`.
2580 // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
2581 // get a built-in pointer type
2582 let mut fat_pointer_layout = layout;
2583 'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
2584 && !fat_pointer_layout.ty.is_region_ptr()
2585 {
2586 'iter_fields: for i in 0..fat_pointer_layout.fields.count() {
2587 let field_layout = fat_pointer_layout.field(cx, i);
2588
2589 if !field_layout.is_zst() {
2590 fat_pointer_layout = field_layout;
2591 continue 'descend_newtypes;
2592 }
2593 }
2594
2595 bug!(
2596 "receiver has no non-zero-sized fields {:?}",
2597 fat_pointer_layout
2598 );
2599 }
2600
2601 fat_pointer_layout.ty
2602 };
2603
2604 // we now have a type like `*mut RcBox<dyn Trait>`
2605 // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
2606 // this is understood as a special case elsewhere in the compiler
2607 let unit_pointer_ty = cx.tcx().mk_mut_ptr(cx.tcx().mk_unit());
2608 layout = cx.layout_of(unit_pointer_ty);
2609 layout.ty = fat_pointer_ty;
2610 }
2611 ArgType::new(layout)
2612 })
2613 }
2614
2615 fn new_internal(
2616 cx: &C,
2617 sig: ty::FnSig<'tcx>,
2618 extra_args: &[Ty<'tcx>],
2619 mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgType<'tcx, Ty<'tcx>>,
2620 ) -> Self {
2621 debug!("FnType::new_internal({:?}, {:?})", sig, extra_args);
2622
2623 use rustc_target::spec::abi::Abi::*;
2624 let conv = match cx.tcx().sess.target.target.adjust_abi(sig.abi) {
2625 RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::C,
2626
2627 // It's the ABI's job to select this, not ours.
2628 System => bug!("system abi should be selected elsewhere"),
2629
2630 Stdcall => Conv::X86Stdcall,
2631 Fastcall => Conv::X86Fastcall,
2632 Vectorcall => Conv::X86VectorCall,
2633 Thiscall => Conv::X86ThisCall,
2634 C => Conv::C,
2635 Unadjusted => Conv::C,
2636 Win64 => Conv::X86_64Win64,
2637 SysV64 => Conv::X86_64SysV,
2638 Aapcs => Conv::ArmAapcs,
2639 PtxKernel => Conv::PtxKernel,
2640 Msp430Interrupt => Conv::Msp430Intr,
2641 X86Interrupt => Conv::X86Intr,
2642 AmdGpuKernel => Conv::AmdGpuKernel,
2643
2644 // These API constants ought to be more specific...
2645 Cdecl => Conv::C,
2646 };
2647
2648 let mut inputs = sig.inputs();
2649 let extra_args = if sig.abi == RustCall {
2650 assert!(!sig.c_variadic && extra_args.is_empty());
2651
2652 match sig.inputs().last().unwrap().sty {
2653 ty::Tuple(tupled_arguments) => {
2654 inputs = &sig.inputs()[0..sig.inputs().len() - 1];
2655 tupled_arguments.iter().map(|k| k.expect_ty()).collect()
2656 }
2657 _ => {
2658 bug!(
2659 "argument to function with \"rust-call\" ABI \
2660 is not a tuple"
2661 );
2662 }
2663 }
2664 } else {
2665 assert!(sig.c_variadic || extra_args.is_empty());
2666 extra_args.to_vec()
2667 };
2668
2669 let target = &cx.tcx().sess.target.target;
2670 let win_x64_gnu =
2671 target.target_os == "windows" && target.arch == "x86_64" && target.target_env == "gnu";
2672 let linux_s390x =
2673 target.target_os == "linux" && target.arch == "s390x" && target.target_env == "gnu";
2674 let linux_sparc64 =
2675 target.target_os == "linux" && target.arch == "sparc64" && target.target_env == "gnu";
2676 let rust_abi = match sig.abi {
2677 RustIntrinsic | PlatformIntrinsic | Rust | RustCall => true,
2678 _ => false,
2679 };
2680
2681 // Handle safe Rust thin and fat pointers.
2682 let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
2683 scalar: &Scalar,
2684 layout: TyLayout<'tcx>,
2685 offset: Size,
2686 is_return: bool| {
2687 // Booleans are always an i1 that needs to be zero-extended.
2688 if scalar.is_bool() {
2689 attrs.set(ArgAttribute::ZExt);
2690 return;
2691 }
2692
2693 // Only pointer types handled below.
2694 if scalar.value != Pointer {
2695 return;
2696 }
2697
2698 if scalar.valid_range.start() < scalar.valid_range.end() {
2699 if *scalar.valid_range.start() > 0 {
2700 attrs.set(ArgAttribute::NonNull);
2701 }
2702 }
2703
2704 if let Some(pointee) = layout.pointee_info_at(cx, offset) {
2705 if let Some(kind) = pointee.safe {
2706 attrs.pointee_size = pointee.size;
2707 attrs.pointee_align = Some(pointee.align);
2708
2709 // `Box` pointer parameters never alias because ownership is transferred
2710 // `&mut` pointer parameters never alias other parameters,
2711 // or mutable global data
2712 //
2713 // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
2714 // and can be marked as both `readonly` and `noalias`, as
2715 // LLVM's definition of `noalias` is based solely on memory
2716 // dependencies rather than pointer equality
2717 let no_alias = match kind {
2718 PointerKind::Shared => false,
2719 PointerKind::UniqueOwned => true,
2720 PointerKind::Frozen | PointerKind::UniqueBorrowed => !is_return,
2721 };
2722 if no_alias {
2723 attrs.set(ArgAttribute::NoAlias);
2724 }
2725
2726 if kind == PointerKind::Frozen && !is_return {
2727 attrs.set(ArgAttribute::ReadOnly);
2728 }
2729 }
2730 }
2731 };
2732
2733 // Store the index of the last argument. This is useful for working with
2734 // C-compatible variadic arguments.
2735 let last_arg_idx = if sig.inputs().is_empty() {
2736 None
2737 } else {
2738 Some(sig.inputs().len() - 1)
2739 };
2740
2741 let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| {
2742 let is_return = arg_idx.is_none();
2743 let mut arg = mk_arg_type(ty, arg_idx);
2744 if arg.layout.is_zst() {
2745 // For some forsaken reason, x86_64-pc-windows-gnu
2746 // doesn't ignore zero-sized struct arguments.
2747 // The same is true for s390x-unknown-linux-gnu
2748 // and sparc64-unknown-linux-gnu.
2749 if is_return || rust_abi || (!win_x64_gnu && !linux_s390x && !linux_sparc64) {
2750 arg.mode = PassMode::Ignore(IgnoreMode::Zst);
2751 }
2752 }
2753
2754 // If this is a C-variadic function, this is not the return value,
dc9dc135 2755 // and there is one or more fixed arguments; ensure that the `VaListImpl`
48663c56
XL
2756 // is ignored as an argument.
2757 if sig.c_variadic {
2758 match (last_arg_idx, arg_idx) {
2759 (Some(last_idx), Some(cur_idx)) if last_idx == cur_idx => {
2760 let va_list_did = match cx.tcx().lang_items().va_list() {
2761 Some(did) => did,
2762 None => bug!("`va_list` lang item required for C-variadic functions"),
2763 };
2764 match ty.sty {
2765 ty::Adt(def, _) if def.did == va_list_did => {
dc9dc135 2766 // This is the "spoofed" `VaListImpl`. Set the arguments mode
48663c56
XL
2767 // so that it will be ignored.
2768 arg.mode = PassMode::Ignore(IgnoreMode::CVarArgs);
2769 }
2770 _ => (),
2771 }
2772 }
2773 _ => {}
2774 }
2775 }
2776
2777 // FIXME(eddyb) other ABIs don't have logic for scalar pairs.
2778 if !is_return && rust_abi {
2779 if let Abi::ScalarPair(ref a, ref b) = arg.layout.abi {
2780 let mut a_attrs = ArgAttributes::new();
2781 let mut b_attrs = ArgAttributes::new();
2782 adjust_for_rust_scalar(&mut a_attrs, a, arg.layout, Size::ZERO, false);
2783 adjust_for_rust_scalar(
2784 &mut b_attrs,
2785 b,
2786 arg.layout,
2787 a.value.size(cx).align_to(b.value.align(cx).abi),
2788 false,
2789 );
2790 arg.mode = PassMode::Pair(a_attrs, b_attrs);
2791 return arg;
2792 }
2793 }
2794
2795 if let Abi::Scalar(ref scalar) = arg.layout.abi {
2796 if let PassMode::Direct(ref mut attrs) = arg.mode {
2797 adjust_for_rust_scalar(attrs, scalar, arg.layout, Size::ZERO, is_return);
2798 }
2799 }
2800
2801 arg
2802 };
2803
2804 let mut fn_ty = FnType {
2805 ret: arg_of(sig.output(), None),
2806 args: inputs
2807 .iter()
2808 .cloned()
2809 .chain(extra_args)
2810 .enumerate()
2811 .map(|(i, ty)| arg_of(ty, Some(i)))
2812 .collect(),
2813 c_variadic: sig.c_variadic,
2814 conv,
2815 };
2816 fn_ty.adjust_for_abi(cx, sig.abi);
2817 fn_ty
2818 }
2819
2820 fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi) {
2821 if abi == SpecAbi::Unadjusted {
2822 return;
2823 }
2824
2825 if abi == SpecAbi::Rust
2826 || abi == SpecAbi::RustCall
2827 || abi == SpecAbi::RustIntrinsic
2828 || abi == SpecAbi::PlatformIntrinsic
2829 {
2830 let fixup = |arg: &mut ArgType<'tcx, Ty<'tcx>>| {
2831 if arg.is_ignore() {
2832 return;
2833 }
2834
2835 match arg.layout.abi {
2836 Abi::Aggregate { .. } => {}
2837
2838 // This is a fun case! The gist of what this is doing is
2839 // that we want callers and callees to always agree on the
2840 // ABI of how they pass SIMD arguments. If we were to *not*
2841 // make these arguments indirect then they'd be immediates
2842 // in LLVM, which means that they'd used whatever the
2843 // appropriate ABI is for the callee and the caller. That
2844 // means, for example, if the caller doesn't have AVX
2845 // enabled but the callee does, then passing an AVX argument
2846 // across this boundary would cause corrupt data to show up.
2847 //
2848 // This problem is fixed by unconditionally passing SIMD
2849 // arguments through memory between callers and callees
2850 // which should get them all to agree on ABI regardless of
2851 // target feature sets. Some more information about this
2852 // issue can be found in #44367.
2853 //
2854 // Note that the platform intrinsic ABI is exempt here as
2855 // that's how we connect up to LLVM and it's unstable
2856 // anyway, we control all calls to it in libstd.
2857 Abi::Vector { .. }
2858 if abi != SpecAbi::PlatformIntrinsic
2859 && cx.tcx().sess.target.target.options.simd_types_indirect =>
2860 {
2861 arg.make_indirect();
2862 return;
2863 }
2864
2865 _ => return,
2866 }
2867
2868 let size = arg.layout.size;
2869 if arg.layout.is_unsized() || size > Pointer.size(cx) {
2870 arg.make_indirect();
2871 } else {
2872 // We want to pass small aggregates as immediates, but using
2873 // a LLVM aggregate type for this leads to bad optimizations,
2874 // so we pick an appropriately sized integer type instead.
2875 arg.cast_to(Reg {
2876 kind: RegKind::Integer,
2877 size,
2878 });
2879 }
2880 };
2881 fixup(&mut self.ret);
2882 for arg in &mut self.args {
2883 fixup(arg);
2884 }
2885 if let PassMode::Indirect(ref mut attrs, _) = self.ret.mode {
2886 attrs.set(ArgAttribute::StructRet);
2887 }
2888 return;
2889 }
2890
2891 if let Err(msg) = self.adjust_for_cabi(cx, abi) {
2892 cx.tcx().sess.fatal(&msg);
2893 }
2894 }
2895}