]> git.proxmox.com Git - rustc.git/blob - compiler/rustc_middle/src/ty/layout.rs
New upstream version 1.53.0+dfsg1
[rustc.git] / compiler / rustc_middle / src / ty / layout.rs
1 // ignore-tidy-filelength
2 use crate::ich::StableHashingContext;
3 use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
4 use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
5 use crate::ty::subst::Subst;
6 use crate::ty::{self, subst::SubstsRef, ReprOptions, Ty, TyCtxt, TypeFoldable};
7
8 use rustc_ast as ast;
9 use rustc_attr as attr;
10 use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
11 use rustc_hir as hir;
12 use rustc_hir::lang_items::LangItem;
13 use rustc_index::bit_set::BitSet;
14 use rustc_index::vec::{Idx, IndexVec};
15 use rustc_session::{config::OptLevel, DataTypeKind, FieldInfo, SizeKind, VariantInfo};
16 use rustc_span::symbol::{Ident, Symbol};
17 use rustc_span::DUMMY_SP;
18 use rustc_target::abi::call::{
19 ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, Conv, FnAbi, PassMode, Reg, RegKind,
20 };
21 use rustc_target::abi::*;
22 use rustc_target::spec::{abi::Abi as SpecAbi, HasTargetSpec, PanicStrategy};
23
24 use std::cmp;
25 use std::fmt;
26 use std::iter;
27 use std::mem;
28 use std::num::NonZeroUsize;
29 use std::ops::Bound;
30
31 pub trait IntegerExt {
32 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
33 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
34 fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer;
35 fn from_uint_ty<C: HasDataLayout>(cx: &C, uty: ty::UintTy) -> Integer;
36 fn repr_discr<'tcx>(
37 tcx: TyCtxt<'tcx>,
38 ty: Ty<'tcx>,
39 repr: &ReprOptions,
40 min: i128,
41 max: i128,
42 ) -> (Integer, bool);
43 }
44
45 impl IntegerExt for Integer {
46 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx> {
47 match (*self, signed) {
48 (I8, false) => tcx.types.u8,
49 (I16, false) => tcx.types.u16,
50 (I32, false) => tcx.types.u32,
51 (I64, false) => tcx.types.u64,
52 (I128, false) => tcx.types.u128,
53 (I8, true) => tcx.types.i8,
54 (I16, true) => tcx.types.i16,
55 (I32, true) => tcx.types.i32,
56 (I64, true) => tcx.types.i64,
57 (I128, true) => tcx.types.i128,
58 }
59 }
60
61 /// Gets the Integer type from an attr::IntType.
62 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
63 let dl = cx.data_layout();
64
65 match ity {
66 attr::SignedInt(ast::IntTy::I8) | attr::UnsignedInt(ast::UintTy::U8) => I8,
67 attr::SignedInt(ast::IntTy::I16) | attr::UnsignedInt(ast::UintTy::U16) => I16,
68 attr::SignedInt(ast::IntTy::I32) | attr::UnsignedInt(ast::UintTy::U32) => I32,
69 attr::SignedInt(ast::IntTy::I64) | attr::UnsignedInt(ast::UintTy::U64) => I64,
70 attr::SignedInt(ast::IntTy::I128) | attr::UnsignedInt(ast::UintTy::U128) => I128,
71 attr::SignedInt(ast::IntTy::Isize) | attr::UnsignedInt(ast::UintTy::Usize) => {
72 dl.ptr_sized_integer()
73 }
74 }
75 }
76
77 fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer {
78 match ity {
79 ty::IntTy::I8 => I8,
80 ty::IntTy::I16 => I16,
81 ty::IntTy::I32 => I32,
82 ty::IntTy::I64 => I64,
83 ty::IntTy::I128 => I128,
84 ty::IntTy::Isize => cx.data_layout().ptr_sized_integer(),
85 }
86 }
87 fn from_uint_ty<C: HasDataLayout>(cx: &C, ity: ty::UintTy) -> Integer {
88 match ity {
89 ty::UintTy::U8 => I8,
90 ty::UintTy::U16 => I16,
91 ty::UintTy::U32 => I32,
92 ty::UintTy::U64 => I64,
93 ty::UintTy::U128 => I128,
94 ty::UintTy::Usize => cx.data_layout().ptr_sized_integer(),
95 }
96 }
97
98 /// Finds the appropriate Integer type and signedness for the given
99 /// signed discriminant range and `#[repr]` attribute.
100 /// N.B.: `u128` values above `i128::MAX` will be treated as signed, but
101 /// that shouldn't affect anything, other than maybe debuginfo.
102 fn repr_discr<'tcx>(
103 tcx: TyCtxt<'tcx>,
104 ty: Ty<'tcx>,
105 repr: &ReprOptions,
106 min: i128,
107 max: i128,
108 ) -> (Integer, bool) {
109 // Theoretically, negative values could be larger in unsigned representation
110 // than the unsigned representation of the signed minimum. However, if there
111 // are any negative values, the only valid unsigned representation is u128
112 // which can fit all i128 values, so the result remains unaffected.
113 let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
114 let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
115
116 let mut min_from_extern = None;
117 let min_default = I8;
118
119 if let Some(ity) = repr.int {
120 let discr = Integer::from_attr(&tcx, ity);
121 let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
122 if discr < fit {
123 bug!(
124 "Integer::repr_discr: `#[repr]` hint too small for \
125 discriminant range of enum `{}",
126 ty
127 )
128 }
129 return (discr, ity.is_signed());
130 }
131
132 if repr.c() {
133 match &tcx.sess.target.arch[..] {
134 "hexagon" => min_from_extern = Some(I8),
135 // WARNING: the ARM EABI has two variants; the one corresponding
136 // to `at_least == I32` appears to be used on Linux and NetBSD,
137 // but some systems may use the variant corresponding to no
138 // lower bound. However, we don't run on those yet...?
139 "arm" => min_from_extern = Some(I32),
140 _ => min_from_extern = Some(I32),
141 }
142 }
143
144 let at_least = min_from_extern.unwrap_or(min_default);
145
146 // If there are no negative values, we can use the unsigned fit.
147 if min >= 0 {
148 (cmp::max(unsigned_fit, at_least), false)
149 } else {
150 (cmp::max(signed_fit, at_least), true)
151 }
152 }
153 }
154
155 pub trait PrimitiveExt {
156 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
157 fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
158 }
159
160 impl PrimitiveExt for Primitive {
161 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
162 match *self {
163 Int(i, signed) => i.to_ty(tcx, signed),
164 F32 => tcx.types.f32,
165 F64 => tcx.types.f64,
166 Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
167 }
168 }
169
170 /// Return an *integer* type matching this primitive.
171 /// Useful in particular when dealing with enum discriminants.
172 fn to_int_ty(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
173 match *self {
174 Int(i, signed) => i.to_ty(tcx, signed),
175 Pointer => tcx.types.usize,
176 F32 | F64 => bug!("floats do not have an int type"),
177 }
178 }
179 }
180
181 /// The first half of a fat pointer.
182 ///
183 /// - For a trait object, this is the address of the box.
184 /// - For a slice, this is the base address.
185 pub const FAT_PTR_ADDR: usize = 0;
186
187 /// The second half of a fat pointer.
188 ///
189 /// - For a trait object, this is the address of the vtable.
190 /// - For a slice, this is the length.
191 pub const FAT_PTR_EXTRA: usize = 1;
192
193 /// The maximum supported number of lanes in a SIMD vector.
194 ///
195 /// This value is selected based on backend support:
196 /// * LLVM does not appear to have a vector width limit.
197 /// * Cranelift stores the base-2 log of the lane count in a 4 bit integer.
198 pub const MAX_SIMD_LANES: u64 = 1 << 0xF;
199
200 #[derive(Copy, Clone, Debug, TyEncodable, TyDecodable)]
201 pub enum LayoutError<'tcx> {
202 Unknown(Ty<'tcx>),
203 SizeOverflow(Ty<'tcx>),
204 }
205
206 impl<'tcx> fmt::Display for LayoutError<'tcx> {
207 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
208 match *self {
209 LayoutError::Unknown(ty) => write!(f, "the type `{}` has an unknown layout", ty),
210 LayoutError::SizeOverflow(ty) => {
211 write!(f, "values of the type `{}` are too big for the current architecture", ty)
212 }
213 }
214 }
215 }
216
217 fn layout_raw<'tcx>(
218 tcx: TyCtxt<'tcx>,
219 query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
220 ) -> Result<&'tcx Layout, LayoutError<'tcx>> {
221 ty::tls::with_related_context(tcx, move |icx| {
222 let (param_env, ty) = query.into_parts();
223
224 if !tcx.sess.recursion_limit().value_within_limit(icx.layout_depth) {
225 tcx.sess.fatal(&format!("overflow representing the type `{}`", ty));
226 }
227
228 // Update the ImplicitCtxt to increase the layout_depth
229 let icx = ty::tls::ImplicitCtxt { layout_depth: icx.layout_depth + 1, ..icx.clone() };
230
231 ty::tls::enter_context(&icx, |_| {
232 let cx = LayoutCx { tcx, param_env };
233 let layout = cx.layout_raw_uncached(ty);
234 // Type-level uninhabitedness should always imply ABI uninhabitedness.
235 if let Ok(layout) = layout {
236 if tcx.conservative_is_privately_uninhabited(param_env.and(ty)) {
237 assert!(layout.abi.is_uninhabited());
238 }
239 }
240 layout
241 })
242 })
243 }
244
245 pub fn provide(providers: &mut ty::query::Providers) {
246 *providers = ty::query::Providers { layout_raw, ..*providers };
247 }
248
249 pub struct LayoutCx<'tcx, C> {
250 pub tcx: C,
251 pub param_env: ty::ParamEnv<'tcx>,
252 }
253
254 #[derive(Copy, Clone, Debug)]
255 enum StructKind {
256 /// A tuple, closure, or univariant which cannot be coerced to unsized.
257 AlwaysSized,
258 /// A univariant, the last field of which may be coerced to unsized.
259 MaybeUnsized,
260 /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
261 Prefixed(Size, Align),
262 }
263
264 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
265 // This is used to go between `memory_index` (source field order to memory order)
266 // and `inverse_memory_index` (memory order to source field order).
267 // See also `FieldsShape::Arbitrary::memory_index` for more details.
268 // FIXME(eddyb) build a better abstraction for permutations, if possible.
269 fn invert_mapping(map: &[u32]) -> Vec<u32> {
270 let mut inverse = vec![0; map.len()];
271 for i in 0..map.len() {
272 inverse[map[i] as usize] = i as u32;
273 }
274 inverse
275 }
276
277 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
278 fn scalar_pair(&self, a: Scalar, b: Scalar) -> Layout {
279 let dl = self.data_layout();
280 let b_align = b.value.align(dl);
281 let align = a.value.align(dl).max(b_align).max(dl.aggregate_align);
282 let b_offset = a.value.size(dl).align_to(b_align.abi);
283 let size = (b_offset + b.value.size(dl)).align_to(align.abi);
284
285 // HACK(nox): We iter on `b` and then `a` because `max_by_key`
286 // returns the last maximum.
287 let largest_niche = Niche::from_scalar(dl, b_offset, b.clone())
288 .into_iter()
289 .chain(Niche::from_scalar(dl, Size::ZERO, a.clone()))
290 .max_by_key(|niche| niche.available(dl));
291
292 Layout {
293 variants: Variants::Single { index: VariantIdx::new(0) },
294 fields: FieldsShape::Arbitrary {
295 offsets: vec![Size::ZERO, b_offset],
296 memory_index: vec![0, 1],
297 },
298 abi: Abi::ScalarPair(a, b),
299 largest_niche,
300 align,
301 size,
302 }
303 }
304
305 fn univariant_uninterned(
306 &self,
307 ty: Ty<'tcx>,
308 fields: &[TyAndLayout<'_>],
309 repr: &ReprOptions,
310 kind: StructKind,
311 ) -> Result<Layout, LayoutError<'tcx>> {
312 let dl = self.data_layout();
313 let pack = repr.pack;
314 if pack.is_some() && repr.align.is_some() {
315 bug!("struct cannot be packed and aligned");
316 }
317
318 let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
319
320 let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
321
322 let optimize = !repr.inhibit_struct_field_reordering_opt();
323 if optimize {
324 let end =
325 if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
326 let optimizing = &mut inverse_memory_index[..end];
327 let field_align = |f: &TyAndLayout<'_>| {
328 if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
329 };
330 match kind {
331 StructKind::AlwaysSized | StructKind::MaybeUnsized => {
332 optimizing.sort_by_key(|&x| {
333 // Place ZSTs first to avoid "interesting offsets",
334 // especially with only one or two non-ZST fields.
335 let f = &fields[x as usize];
336 (!f.is_zst(), cmp::Reverse(field_align(f)))
337 });
338 }
339 StructKind::Prefixed(..) => {
340 // Sort in ascending alignment so that the layout stay optimal
341 // regardless of the prefix
342 optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
343 }
344 }
345 }
346
347 // inverse_memory_index holds field indices by increasing memory offset.
348 // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
349 // We now write field offsets to the corresponding offset slot;
350 // field 5 with offset 0 puts 0 in offsets[5].
351 // At the bottom of this function, we invert `inverse_memory_index` to
352 // produce `memory_index` (see `invert_mapping`).
353
354 let mut sized = true;
355 let mut offsets = vec![Size::ZERO; fields.len()];
356 let mut offset = Size::ZERO;
357 let mut largest_niche = None;
358 let mut largest_niche_available = 0;
359
360 if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
361 let prefix_align =
362 if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
363 align = align.max(AbiAndPrefAlign::new(prefix_align));
364 offset = prefix_size.align_to(prefix_align);
365 }
366
367 for &i in &inverse_memory_index {
368 let field = fields[i as usize];
369 if !sized {
370 bug!("univariant: field #{} of `{}` comes after unsized field", offsets.len(), ty);
371 }
372
373 if field.is_unsized() {
374 sized = false;
375 }
376
377 // Invariant: offset < dl.obj_size_bound() <= 1<<61
378 let field_align = if let Some(pack) = pack {
379 field.align.min(AbiAndPrefAlign::new(pack))
380 } else {
381 field.align
382 };
383 offset = offset.align_to(field_align.abi);
384 align = align.max(field_align);
385
386 debug!("univariant offset: {:?} field: {:#?}", offset, field);
387 offsets[i as usize] = offset;
388
389 if !repr.hide_niche() {
390 if let Some(mut niche) = field.largest_niche.clone() {
391 let available = niche.available(dl);
392 if available > largest_niche_available {
393 largest_niche_available = available;
394 niche.offset += offset;
395 largest_niche = Some(niche);
396 }
397 }
398 }
399
400 offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
401 }
402
403 if let Some(repr_align) = repr.align {
404 align = align.max(AbiAndPrefAlign::new(repr_align));
405 }
406
407 debug!("univariant min_size: {:?}", offset);
408 let min_size = offset;
409
410 // As stated above, inverse_memory_index holds field indices by increasing offset.
411 // This makes it an already-sorted view of the offsets vec.
412 // To invert it, consider:
413 // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
414 // Field 5 would be the first element, so memory_index is i:
415 // Note: if we didn't optimize, it's already right.
416
417 let memory_index =
418 if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
419
420 let size = min_size.align_to(align.abi);
421 let mut abi = Abi::Aggregate { sized };
422
423 // Unpack newtype ABIs and find scalar pairs.
424 if sized && size.bytes() > 0 {
425 // All other fields must be ZSTs.
426 let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
427
428 match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
429 // We have exactly one non-ZST field.
430 (Some((i, field)), None, None) => {
431 // Field fills the struct and it has a scalar or scalar pair ABI.
432 if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
433 {
434 match field.abi {
435 // For plain scalars, or vectors of them, we can't unpack
436 // newtypes for `#[repr(C)]`, as that affects C ABIs.
437 Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
438 abi = field.abi.clone();
439 }
440 // But scalar pairs are Rust-specific and get
441 // treated as aggregates by C ABIs anyway.
442 Abi::ScalarPair(..) => {
443 abi = field.abi.clone();
444 }
445 _ => {}
446 }
447 }
448 }
449
450 // Two non-ZST fields, and they're both scalars.
451 (
452 Some((i, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(ref a), .. }, .. })),
453 Some((j, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(ref b), .. }, .. })),
454 None,
455 ) => {
456 // Order by the memory placement, not source order.
457 let ((i, a), (j, b)) =
458 if offsets[i] < offsets[j] { ((i, a), (j, b)) } else { ((j, b), (i, a)) };
459 let pair = self.scalar_pair(a.clone(), b.clone());
460 let pair_offsets = match pair.fields {
461 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
462 assert_eq!(memory_index, &[0, 1]);
463 offsets
464 }
465 _ => bug!(),
466 };
467 if offsets[i] == pair_offsets[0]
468 && offsets[j] == pair_offsets[1]
469 && align == pair.align
470 && size == pair.size
471 {
472 // We can use `ScalarPair` only when it matches our
473 // already computed layout (including `#[repr(C)]`).
474 abi = pair.abi;
475 }
476 }
477
478 _ => {}
479 }
480 }
481
482 if sized && fields.iter().any(|f| f.abi.is_uninhabited()) {
483 abi = Abi::Uninhabited;
484 }
485
486 Ok(Layout {
487 variants: Variants::Single { index: VariantIdx::new(0) },
488 fields: FieldsShape::Arbitrary { offsets, memory_index },
489 abi,
490 largest_niche,
491 align,
492 size,
493 })
494 }
495
496 fn layout_raw_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<'tcx>> {
497 let tcx = self.tcx;
498 let param_env = self.param_env;
499 let dl = self.data_layout();
500 let scalar_unit = |value: Primitive| {
501 let bits = value.size(dl).bits();
502 assert!(bits <= 128);
503 Scalar { value, valid_range: 0..=(!0 >> (128 - bits)) }
504 };
505 let scalar = |value: Primitive| tcx.intern_layout(Layout::scalar(self, scalar_unit(value)));
506
507 let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
508 Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
509 };
510 debug_assert!(!ty.has_infer_types_or_consts());
511
512 Ok(match *ty.kind() {
513 // Basic scalars.
514 ty::Bool => tcx.intern_layout(Layout::scalar(
515 self,
516 Scalar { value: Int(I8, false), valid_range: 0..=1 },
517 )),
518 ty::Char => tcx.intern_layout(Layout::scalar(
519 self,
520 Scalar { value: Int(I32, false), valid_range: 0..=0x10FFFF },
521 )),
522 ty::Int(ity) => scalar(Int(Integer::from_int_ty(dl, ity), true)),
523 ty::Uint(ity) => scalar(Int(Integer::from_uint_ty(dl, ity), false)),
524 ty::Float(fty) => scalar(match fty {
525 ty::FloatTy::F32 => F32,
526 ty::FloatTy::F64 => F64,
527 }),
528 ty::FnPtr(_) => {
529 let mut ptr = scalar_unit(Pointer);
530 ptr.valid_range = 1..=*ptr.valid_range.end();
531 tcx.intern_layout(Layout::scalar(self, ptr))
532 }
533
534 // The never type.
535 ty::Never => tcx.intern_layout(Layout {
536 variants: Variants::Single { index: VariantIdx::new(0) },
537 fields: FieldsShape::Primitive,
538 abi: Abi::Uninhabited,
539 largest_niche: None,
540 align: dl.i8_align,
541 size: Size::ZERO,
542 }),
543
544 // Potentially-wide pointers.
545 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
546 let mut data_ptr = scalar_unit(Pointer);
547 if !ty.is_unsafe_ptr() {
548 data_ptr.valid_range = 1..=*data_ptr.valid_range.end();
549 }
550
551 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
552 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
553 return Ok(tcx.intern_layout(Layout::scalar(self, data_ptr)));
554 }
555
556 let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
557 let metadata = match unsized_part.kind() {
558 ty::Foreign(..) => {
559 return Ok(tcx.intern_layout(Layout::scalar(self, data_ptr)));
560 }
561 ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
562 ty::Dynamic(..) => {
563 let mut vtable = scalar_unit(Pointer);
564 vtable.valid_range = 1..=*vtable.valid_range.end();
565 vtable
566 }
567 _ => return Err(LayoutError::Unknown(unsized_part)),
568 };
569
570 // Effectively a (ptr, meta) tuple.
571 tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
572 }
573
574 // Arrays and slices.
575 ty::Array(element, mut count) => {
576 if count.has_projections() {
577 count = tcx.normalize_erasing_regions(param_env, count);
578 if count.has_projections() {
579 return Err(LayoutError::Unknown(ty));
580 }
581 }
582
583 let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
584 let element = self.layout_of(element)?;
585 let size =
586 element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
587
588 let abi =
589 if count != 0 && tcx.conservative_is_privately_uninhabited(param_env.and(ty)) {
590 Abi::Uninhabited
591 } else {
592 Abi::Aggregate { sized: true }
593 };
594
595 let largest_niche = if count != 0 { element.largest_niche.clone() } else { None };
596
597 tcx.intern_layout(Layout {
598 variants: Variants::Single { index: VariantIdx::new(0) },
599 fields: FieldsShape::Array { stride: element.size, count },
600 abi,
601 largest_niche,
602 align: element.align,
603 size,
604 })
605 }
606 ty::Slice(element) => {
607 let element = self.layout_of(element)?;
608 tcx.intern_layout(Layout {
609 variants: Variants::Single { index: VariantIdx::new(0) },
610 fields: FieldsShape::Array { stride: element.size, count: 0 },
611 abi: Abi::Aggregate { sized: false },
612 largest_niche: None,
613 align: element.align,
614 size: Size::ZERO,
615 })
616 }
617 ty::Str => tcx.intern_layout(Layout {
618 variants: Variants::Single { index: VariantIdx::new(0) },
619 fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
620 abi: Abi::Aggregate { sized: false },
621 largest_niche: None,
622 align: dl.i8_align,
623 size: Size::ZERO,
624 }),
625
626 // Odd unit types.
627 ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
628 ty::Dynamic(..) | ty::Foreign(..) => {
629 let mut unit = self.univariant_uninterned(
630 ty,
631 &[],
632 &ReprOptions::default(),
633 StructKind::AlwaysSized,
634 )?;
635 match unit.abi {
636 Abi::Aggregate { ref mut sized } => *sized = false,
637 _ => bug!(),
638 }
639 tcx.intern_layout(unit)
640 }
641
642 ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, substs)?,
643
644 ty::Closure(_, ref substs) => {
645 let tys = substs.as_closure().upvar_tys();
646 univariant(
647 &tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
648 &ReprOptions::default(),
649 StructKind::AlwaysSized,
650 )?
651 }
652
653 ty::Tuple(tys) => {
654 let kind =
655 if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
656
657 univariant(
658 &tys.iter()
659 .map(|k| self.layout_of(k.expect_ty()))
660 .collect::<Result<Vec<_>, _>>()?,
661 &ReprOptions::default(),
662 kind,
663 )?
664 }
665
666 // SIMD vector types.
667 ty::Adt(def, substs) if def.repr.simd() => {
668 // Supported SIMD vectors are homogeneous ADTs with at least one field:
669 //
670 // * #[repr(simd)] struct S(T, T, T, T);
671 // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T }
672 // * #[repr(simd)] struct S([T; 4])
673 //
674 // where T is a primitive scalar (integer/float/pointer).
675
676 // SIMD vectors with zero fields are not supported.
677 // (should be caught by typeck)
678 if def.non_enum_variant().fields.is_empty() {
679 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
680 }
681
682 // Type of the first ADT field:
683 let f0_ty = def.non_enum_variant().fields[0].ty(tcx, substs);
684
685 // Heterogeneous SIMD vectors are not supported:
686 // (should be caught by typeck)
687 for fi in &def.non_enum_variant().fields {
688 if fi.ty(tcx, substs) != f0_ty {
689 tcx.sess.fatal(&format!("monomorphising heterogeneous SIMD type `{}`", ty));
690 }
691 }
692
693 // The element type and number of elements of the SIMD vector
694 // are obtained from:
695 //
696 // * the element type and length of the single array field, if
697 // the first field is of array type, or
698 //
699 // * the homogenous field type and the number of fields.
700 let (e_ty, e_len, is_array) = if let ty::Array(e_ty, _) = f0_ty.kind() {
701 // First ADT field is an array:
702
703 // SIMD vectors with multiple array fields are not supported:
704 // (should be caught by typeck)
705 if def.non_enum_variant().fields.len() != 1 {
706 tcx.sess.fatal(&format!(
707 "monomorphising SIMD type `{}` with more than one array field",
708 ty
709 ));
710 }
711
712 // Extract the number of elements from the layout of the array field:
713 let len = if let Ok(TyAndLayout {
714 layout: Layout { fields: FieldsShape::Array { count, .. }, .. },
715 ..
716 }) = self.layout_of(f0_ty)
717 {
718 count
719 } else {
720 return Err(LayoutError::Unknown(ty));
721 };
722
723 (*e_ty, *len, true)
724 } else {
725 // First ADT field is not an array:
726 (f0_ty, def.non_enum_variant().fields.len() as _, false)
727 };
728
729 // SIMD vectors of zero length are not supported.
730 // Additionally, lengths are capped at 2^16 as a fixed maximum backends must
731 // support.
732 //
733 // Can't be caught in typeck if the array length is generic.
734 if e_len == 0 {
735 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
736 } else if e_len > MAX_SIMD_LANES {
737 tcx.sess.fatal(&format!(
738 "monomorphising SIMD type `{}` of length greater than {}",
739 ty, MAX_SIMD_LANES,
740 ));
741 }
742
743 // Compute the ABI of the element type:
744 let e_ly = self.layout_of(e_ty)?;
745 let e_abi = if let Abi::Scalar(ref scalar) = e_ly.abi {
746 scalar.clone()
747 } else {
748 // This error isn't caught in typeck, e.g., if
749 // the element type of the vector is generic.
750 tcx.sess.fatal(&format!(
751 "monomorphising SIMD type `{}` with a non-primitive-scalar \
752 (integer/float/pointer) element type `{}`",
753 ty, e_ty
754 ))
755 };
756
757 // Compute the size and alignment of the vector:
758 let size = e_ly.size.checked_mul(e_len, dl).ok_or(LayoutError::SizeOverflow(ty))?;
759 let align = dl.vector_align(size);
760 let size = size.align_to(align.abi);
761
762 // Compute the placement of the vector fields:
763 let fields = if is_array {
764 FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }
765 } else {
766 FieldsShape::Array { stride: e_ly.size, count: e_len }
767 };
768
769 tcx.intern_layout(Layout {
770 variants: Variants::Single { index: VariantIdx::new(0) },
771 fields,
772 abi: Abi::Vector { element: e_abi, count: e_len },
773 largest_niche: e_ly.largest_niche.clone(),
774 size,
775 align,
776 })
777 }
778
779 // ADTs.
780 ty::Adt(def, substs) => {
781 // Cache the field layouts.
782 let variants = def
783 .variants
784 .iter()
785 .map(|v| {
786 v.fields
787 .iter()
788 .map(|field| self.layout_of(field.ty(tcx, substs)))
789 .collect::<Result<Vec<_>, _>>()
790 })
791 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
792
793 if def.is_union() {
794 if def.repr.pack.is_some() && def.repr.align.is_some() {
795 bug!("union cannot be packed and aligned");
796 }
797
798 let mut align =
799 if def.repr.pack.is_some() { dl.i8_align } else { dl.aggregate_align };
800
801 if let Some(repr_align) = def.repr.align {
802 align = align.max(AbiAndPrefAlign::new(repr_align));
803 }
804
805 let optimize = !def.repr.inhibit_union_abi_opt();
806 let mut size = Size::ZERO;
807 let mut abi = Abi::Aggregate { sized: true };
808 let index = VariantIdx::new(0);
809 for field in &variants[index] {
810 assert!(!field.is_unsized());
811 align = align.max(field.align);
812
813 // If all non-ZST fields have the same ABI, forward this ABI
814 if optimize && !field.is_zst() {
815 // Normalize scalar_unit to the maximal valid range
816 let field_abi = match &field.abi {
817 Abi::Scalar(x) => Abi::Scalar(scalar_unit(x.value)),
818 Abi::ScalarPair(x, y) => {
819 Abi::ScalarPair(scalar_unit(x.value), scalar_unit(y.value))
820 }
821 Abi::Vector { element: x, count } => {
822 Abi::Vector { element: scalar_unit(x.value), count: *count }
823 }
824 Abi::Uninhabited | Abi::Aggregate { .. } => {
825 Abi::Aggregate { sized: true }
826 }
827 };
828
829 if size == Size::ZERO {
830 // first non ZST: initialize 'abi'
831 abi = field_abi;
832 } else if abi != field_abi {
833 // different fields have different ABI: reset to Aggregate
834 abi = Abi::Aggregate { sized: true };
835 }
836 }
837
838 size = cmp::max(size, field.size);
839 }
840
841 if let Some(pack) = def.repr.pack {
842 align = align.min(AbiAndPrefAlign::new(pack));
843 }
844
845 return Ok(tcx.intern_layout(Layout {
846 variants: Variants::Single { index },
847 fields: FieldsShape::Union(
848 NonZeroUsize::new(variants[index].len())
849 .ok_or(LayoutError::Unknown(ty))?,
850 ),
851 abi,
852 largest_niche: None,
853 align,
854 size: size.align_to(align.abi),
855 }));
856 }
857
858 // A variant is absent if it's uninhabited and only has ZST fields.
859 // Present uninhabited variants only require space for their fields,
860 // but *not* an encoding of the discriminant (e.g., a tag value).
861 // See issue #49298 for more details on the need to leave space
862 // for non-ZST uninhabited data (mostly partial initialization).
863 let absent = |fields: &[TyAndLayout<'_>]| {
864 let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
865 let is_zst = fields.iter().all(|f| f.is_zst());
866 uninhabited && is_zst
867 };
868 let (present_first, present_second) = {
869 let mut present_variants = variants
870 .iter_enumerated()
871 .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
872 (present_variants.next(), present_variants.next())
873 };
874 let present_first = match present_first {
875 Some(present_first) => present_first,
876 // Uninhabited because it has no variants, or only absent ones.
877 None if def.is_enum() => return tcx.layout_raw(param_env.and(tcx.types.never)),
878 // If it's a struct, still compute a layout so that we can still compute the
879 // field offsets.
880 None => VariantIdx::new(0),
881 };
882
883 let is_struct = !def.is_enum() ||
884 // Only one variant is present.
885 (present_second.is_none() &&
886 // Representation optimizations are allowed.
887 !def.repr.inhibit_enum_layout_opt());
888 if is_struct {
889 // Struct, or univariant enum equivalent to a struct.
890 // (Typechecking will reject discriminant-sizing attrs.)
891
892 let v = present_first;
893 let kind = if def.is_enum() || variants[v].is_empty() {
894 StructKind::AlwaysSized
895 } else {
896 let param_env = tcx.param_env(def.did);
897 let last_field = def.variants[v].fields.last().unwrap();
898 let always_sized =
899 tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env);
900 if !always_sized {
901 StructKind::MaybeUnsized
902 } else {
903 StructKind::AlwaysSized
904 }
905 };
906
907 let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr, kind)?;
908 st.variants = Variants::Single { index: v };
909 let (start, end) = self.tcx.layout_scalar_valid_range(def.did);
910 match st.abi {
911 Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
912 // the asserts ensure that we are not using the
913 // `#[rustc_layout_scalar_valid_range(n)]`
914 // attribute to widen the range of anything as that would probably
915 // result in UB somewhere
916 // FIXME(eddyb) the asserts are probably not needed,
917 // as larger validity ranges would result in missed
918 // optimizations, *not* wrongly assuming the inner
919 // value is valid. e.g. unions enlarge validity ranges,
920 // because the values may be uninitialized.
921 if let Bound::Included(start) = start {
922 // FIXME(eddyb) this might be incorrect - it doesn't
923 // account for wrap-around (end < start) ranges.
924 assert!(*scalar.valid_range.start() <= start);
925 scalar.valid_range = start..=*scalar.valid_range.end();
926 }
927 if let Bound::Included(end) = end {
928 // FIXME(eddyb) this might be incorrect - it doesn't
929 // account for wrap-around (end < start) ranges.
930 assert!(*scalar.valid_range.end() >= end);
931 scalar.valid_range = *scalar.valid_range.start()..=end;
932 }
933
934 // Update `largest_niche` if we have introduced a larger niche.
935 let niche = if def.repr.hide_niche() {
936 None
937 } else {
938 Niche::from_scalar(dl, Size::ZERO, scalar.clone())
939 };
940 if let Some(niche) = niche {
941 match &st.largest_niche {
942 Some(largest_niche) => {
943 // Replace the existing niche even if they're equal,
944 // because this one is at a lower offset.
945 if largest_niche.available(dl) <= niche.available(dl) {
946 st.largest_niche = Some(niche);
947 }
948 }
949 None => st.largest_niche = Some(niche),
950 }
951 }
952 }
953 _ => assert!(
954 start == Bound::Unbounded && end == Bound::Unbounded,
955 "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
956 def,
957 st,
958 ),
959 }
960
961 return Ok(tcx.intern_layout(st));
962 }
963
964 // At this point, we have handled all unions and
965 // structs. (We have also handled univariant enums
966 // that allow representation optimization.)
967 assert!(def.is_enum());
968
969 // The current code for niche-filling relies on variant indices
970 // instead of actual discriminants, so dataful enums with
971 // explicit discriminants (RFC #2363) would misbehave.
972 let no_explicit_discriminants = def
973 .variants
974 .iter_enumerated()
975 .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
976
977 let mut niche_filling_layout = None;
978
979 // Niche-filling enum optimization.
980 if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
981 let mut dataful_variant = None;
982 let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
983
984 // Find one non-ZST variant.
985 'variants: for (v, fields) in variants.iter_enumerated() {
986 if absent(fields) {
987 continue 'variants;
988 }
989 for f in fields {
990 if !f.is_zst() {
991 if dataful_variant.is_none() {
992 dataful_variant = Some(v);
993 continue 'variants;
994 } else {
995 dataful_variant = None;
996 break 'variants;
997 }
998 }
999 }
1000 niche_variants = *niche_variants.start().min(&v)..=v;
1001 }
1002
1003 if niche_variants.start() > niche_variants.end() {
1004 dataful_variant = None;
1005 }
1006
1007 if let Some(i) = dataful_variant {
1008 let count = (niche_variants.end().as_u32()
1009 - niche_variants.start().as_u32()
1010 + 1) as u128;
1011
1012 // Find the field with the largest niche
1013 let niche_candidate = variants[i]
1014 .iter()
1015 .enumerate()
1016 .filter_map(|(j, &field)| Some((j, field.largest_niche.as_ref()?)))
1017 .max_by_key(|(_, niche)| niche.available(dl));
1018
1019 if let Some((field_index, niche, (niche_start, niche_scalar))) =
1020 niche_candidate.and_then(|(field_index, niche)| {
1021 Some((field_index, niche, niche.reserve(self, count)?))
1022 })
1023 {
1024 let mut align = dl.aggregate_align;
1025 let st = variants
1026 .iter_enumerated()
1027 .map(|(j, v)| {
1028 let mut st = self.univariant_uninterned(
1029 ty,
1030 v,
1031 &def.repr,
1032 StructKind::AlwaysSized,
1033 )?;
1034 st.variants = Variants::Single { index: j };
1035
1036 align = align.max(st.align);
1037
1038 Ok(st)
1039 })
1040 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1041
1042 let offset = st[i].fields.offset(field_index) + niche.offset;
1043 let size = st[i].size;
1044
1045 let abi = if st.iter().all(|v| v.abi.is_uninhabited()) {
1046 Abi::Uninhabited
1047 } else {
1048 match st[i].abi {
1049 Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()),
1050 Abi::ScalarPair(ref first, ref second) => {
1051 // We need to use scalar_unit to reset the
1052 // valid range to the maximal one for that
1053 // primitive, because only the niche is
1054 // guaranteed to be initialised, not the
1055 // other primitive.
1056 if offset.bytes() == 0 {
1057 Abi::ScalarPair(
1058 niche_scalar.clone(),
1059 scalar_unit(second.value),
1060 )
1061 } else {
1062 Abi::ScalarPair(
1063 scalar_unit(first.value),
1064 niche_scalar.clone(),
1065 )
1066 }
1067 }
1068 _ => Abi::Aggregate { sized: true },
1069 }
1070 };
1071
1072 let largest_niche =
1073 Niche::from_scalar(dl, offset, niche_scalar.clone());
1074
1075 niche_filling_layout = Some(Layout {
1076 variants: Variants::Multiple {
1077 tag: niche_scalar,
1078 tag_encoding: TagEncoding::Niche {
1079 dataful_variant: i,
1080 niche_variants,
1081 niche_start,
1082 },
1083 tag_field: 0,
1084 variants: st,
1085 },
1086 fields: FieldsShape::Arbitrary {
1087 offsets: vec![offset],
1088 memory_index: vec![0],
1089 },
1090 abi,
1091 largest_niche,
1092 size,
1093 align,
1094 });
1095 }
1096 }
1097 }
1098
1099 let (mut min, mut max) = (i128::MAX, i128::MIN);
1100 let discr_type = def.repr.discr_type();
1101 let bits = Integer::from_attr(self, discr_type).size().bits();
1102 for (i, discr) in def.discriminants(tcx) {
1103 if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
1104 continue;
1105 }
1106 let mut x = discr.val as i128;
1107 if discr_type.is_signed() {
1108 // sign extend the raw representation to be an i128
1109 x = (x << (128 - bits)) >> (128 - bits);
1110 }
1111 if x < min {
1112 min = x;
1113 }
1114 if x > max {
1115 max = x;
1116 }
1117 }
1118 // We might have no inhabited variants, so pretend there's at least one.
1119 if (min, max) == (i128::MAX, i128::MIN) {
1120 min = 0;
1121 max = 0;
1122 }
1123 assert!(min <= max, "discriminant range is {}...{}", min, max);
1124 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
1125
1126 let mut align = dl.aggregate_align;
1127 let mut size = Size::ZERO;
1128
1129 // We're interested in the smallest alignment, so start large.
1130 let mut start_align = Align::from_bytes(256).unwrap();
1131 assert_eq!(Integer::for_align(dl, start_align), None);
1132
1133 // repr(C) on an enum tells us to make a (tag, union) layout,
1134 // so we need to grow the prefix alignment to be at least
1135 // the alignment of the union. (This value is used both for
1136 // determining the alignment of the overall enum, and the
1137 // determining the alignment of the payload after the tag.)
1138 let mut prefix_align = min_ity.align(dl).abi;
1139 if def.repr.c() {
1140 for fields in &variants {
1141 for field in fields {
1142 prefix_align = prefix_align.max(field.align.abi);
1143 }
1144 }
1145 }
1146
1147 // Create the set of structs that represent each variant.
1148 let mut layout_variants = variants
1149 .iter_enumerated()
1150 .map(|(i, field_layouts)| {
1151 let mut st = self.univariant_uninterned(
1152 ty,
1153 &field_layouts,
1154 &def.repr,
1155 StructKind::Prefixed(min_ity.size(), prefix_align),
1156 )?;
1157 st.variants = Variants::Single { index: i };
1158 // Find the first field we can't move later
1159 // to make room for a larger discriminant.
1160 for field in
1161 st.fields.index_by_increasing_offset().map(|j| field_layouts[j])
1162 {
1163 if !field.is_zst() || field.align.abi.bytes() != 1 {
1164 start_align = start_align.min(field.align.abi);
1165 break;
1166 }
1167 }
1168 size = cmp::max(size, st.size);
1169 align = align.max(st.align);
1170 Ok(st)
1171 })
1172 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1173
1174 // Align the maximum variant size to the largest alignment.
1175 size = size.align_to(align.abi);
1176
1177 if size.bytes() >= dl.obj_size_bound() {
1178 return Err(LayoutError::SizeOverflow(ty));
1179 }
1180
1181 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
1182 if typeck_ity < min_ity {
1183 // It is a bug if Layout decided on a greater discriminant size than typeck for
1184 // some reason at this point (based on values discriminant can take on). Mostly
1185 // because this discriminant will be loaded, and then stored into variable of
1186 // type calculated by typeck. Consider such case (a bug): typeck decided on
1187 // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1188 // discriminant values. That would be a bug, because then, in codegen, in order
1189 // to store this 16-bit discriminant into 8-bit sized temporary some of the
1190 // space necessary to represent would have to be discarded (or layout is wrong
1191 // on thinking it needs 16 bits)
1192 bug!(
1193 "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1194 min_ity,
1195 typeck_ity
1196 );
1197 // However, it is fine to make discr type however large (as an optimisation)
1198 // after this point – we’ll just truncate the value we load in codegen.
1199 }
1200
1201 // Check to see if we should use a different type for the
1202 // discriminant. We can safely use a type with the same size
1203 // as the alignment of the first field of each variant.
1204 // We increase the size of the discriminant to avoid LLVM copying
1205 // padding when it doesn't need to. This normally causes unaligned
1206 // load/stores and excessive memcpy/memset operations. By using a
1207 // bigger integer size, LLVM can be sure about its contents and
1208 // won't be so conservative.
1209
1210 // Use the initial field alignment
1211 let mut ity = if def.repr.c() || def.repr.int.is_some() {
1212 min_ity
1213 } else {
1214 Integer::for_align(dl, start_align).unwrap_or(min_ity)
1215 };
1216
1217 // If the alignment is not larger than the chosen discriminant size,
1218 // don't use the alignment as the final size.
1219 if ity <= min_ity {
1220 ity = min_ity;
1221 } else {
1222 // Patch up the variants' first few fields.
1223 let old_ity_size = min_ity.size();
1224 let new_ity_size = ity.size();
1225 for variant in &mut layout_variants {
1226 match variant.fields {
1227 FieldsShape::Arbitrary { ref mut offsets, .. } => {
1228 for i in offsets {
1229 if *i <= old_ity_size {
1230 assert_eq!(*i, old_ity_size);
1231 *i = new_ity_size;
1232 }
1233 }
1234 // We might be making the struct larger.
1235 if variant.size <= old_ity_size {
1236 variant.size = new_ity_size;
1237 }
1238 }
1239 _ => bug!(),
1240 }
1241 }
1242 }
1243
1244 let tag_mask = !0u128 >> (128 - ity.size().bits());
1245 let tag = Scalar {
1246 value: Int(ity, signed),
1247 valid_range: (min as u128 & tag_mask)..=(max as u128 & tag_mask),
1248 };
1249 let mut abi = Abi::Aggregate { sized: true };
1250 if tag.value.size(dl) == size {
1251 abi = Abi::Scalar(tag.clone());
1252 } else {
1253 // Try to use a ScalarPair for all tagged enums.
1254 let mut common_prim = None;
1255 for (field_layouts, layout_variant) in iter::zip(&variants, &layout_variants) {
1256 let offsets = match layout_variant.fields {
1257 FieldsShape::Arbitrary { ref offsets, .. } => offsets,
1258 _ => bug!(),
1259 };
1260 let mut fields =
1261 iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst());
1262 let (field, offset) = match (fields.next(), fields.next()) {
1263 (None, None) => continue,
1264 (Some(pair), None) => pair,
1265 _ => {
1266 common_prim = None;
1267 break;
1268 }
1269 };
1270 let prim = match field.abi {
1271 Abi::Scalar(ref scalar) => scalar.value,
1272 _ => {
1273 common_prim = None;
1274 break;
1275 }
1276 };
1277 if let Some(pair) = common_prim {
1278 // This is pretty conservative. We could go fancier
1279 // by conflating things like i32 and u32, or even
1280 // realising that (u8, u8) could just cohabit with
1281 // u16 or even u32.
1282 if pair != (prim, offset) {
1283 common_prim = None;
1284 break;
1285 }
1286 } else {
1287 common_prim = Some((prim, offset));
1288 }
1289 }
1290 if let Some((prim, offset)) = common_prim {
1291 let pair = self.scalar_pair(tag.clone(), scalar_unit(prim));
1292 let pair_offsets = match pair.fields {
1293 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
1294 assert_eq!(memory_index, &[0, 1]);
1295 offsets
1296 }
1297 _ => bug!(),
1298 };
1299 if pair_offsets[0] == Size::ZERO
1300 && pair_offsets[1] == *offset
1301 && align == pair.align
1302 && size == pair.size
1303 {
1304 // We can use `ScalarPair` only when it matches our
1305 // already computed layout (including `#[repr(C)]`).
1306 abi = pair.abi;
1307 }
1308 }
1309 }
1310
1311 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1312 abi = Abi::Uninhabited;
1313 }
1314
1315 let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag.clone());
1316
1317 let tagged_layout = Layout {
1318 variants: Variants::Multiple {
1319 tag,
1320 tag_encoding: TagEncoding::Direct,
1321 tag_field: 0,
1322 variants: layout_variants,
1323 },
1324 fields: FieldsShape::Arbitrary {
1325 offsets: vec![Size::ZERO],
1326 memory_index: vec![0],
1327 },
1328 largest_niche,
1329 abi,
1330 align,
1331 size,
1332 };
1333
1334 let best_layout = match (tagged_layout, niche_filling_layout) {
1335 (tagged_layout, Some(niche_filling_layout)) => {
1336 // Pick the smaller layout; otherwise,
1337 // pick the layout with the larger niche; otherwise,
1338 // pick tagged as it has simpler codegen.
1339 cmp::min_by_key(tagged_layout, niche_filling_layout, |layout| {
1340 let niche_size =
1341 layout.largest_niche.as_ref().map_or(0, |n| n.available(dl));
1342 (layout.size, cmp::Reverse(niche_size))
1343 })
1344 }
1345 (tagged_layout, None) => tagged_layout,
1346 };
1347
1348 tcx.intern_layout(best_layout)
1349 }
1350
1351 // Types with no meaningful known layout.
1352 ty::Projection(_) | ty::Opaque(..) => {
1353 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1354 if ty == normalized {
1355 return Err(LayoutError::Unknown(ty));
1356 }
1357 tcx.layout_raw(param_env.and(normalized))?
1358 }
1359
1360 ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
1361 bug!("Layout::compute: unexpected type `{}`", ty)
1362 }
1363
1364 ty::Bound(..) | ty::Param(_) | ty::Error(_) => {
1365 return Err(LayoutError::Unknown(ty));
1366 }
1367 })
1368 }
1369 }
1370
1371 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1372 #[derive(Clone, Debug, PartialEq)]
1373 enum SavedLocalEligibility {
1374 Unassigned,
1375 Assigned(VariantIdx),
1376 // FIXME: Use newtype_index so we aren't wasting bytes
1377 Ineligible(Option<u32>),
1378 }
1379
1380 // When laying out generators, we divide our saved local fields into two
1381 // categories: overlap-eligible and overlap-ineligible.
1382 //
1383 // Those fields which are ineligible for overlap go in a "prefix" at the
1384 // beginning of the layout, and always have space reserved for them.
1385 //
1386 // Overlap-eligible fields are only assigned to one variant, so we lay
1387 // those fields out for each variant and put them right after the
1388 // prefix.
1389 //
1390 // Finally, in the layout details, we point to the fields from the
1391 // variants they are assigned to. It is possible for some fields to be
1392 // included in multiple variants. No field ever "moves around" in the
1393 // layout; its offset is always the same.
1394 //
1395 // Also included in the layout are the upvars and the discriminant.
1396 // These are included as fields on the "outer" layout; they are not part
1397 // of any variant.
1398 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
1399 /// Compute the eligibility and assignment of each local.
1400 fn generator_saved_local_eligibility(
1401 &self,
1402 info: &GeneratorLayout<'tcx>,
1403 ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
1404 use SavedLocalEligibility::*;
1405
1406 let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
1407 IndexVec::from_elem_n(Unassigned, info.field_tys.len());
1408
1409 // The saved locals not eligible for overlap. These will get
1410 // "promoted" to the prefix of our generator.
1411 let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
1412
1413 // Figure out which of our saved locals are fields in only
1414 // one variant. The rest are deemed ineligible for overlap.
1415 for (variant_index, fields) in info.variant_fields.iter_enumerated() {
1416 for local in fields {
1417 match assignments[*local] {
1418 Unassigned => {
1419 assignments[*local] = Assigned(variant_index);
1420 }
1421 Assigned(idx) => {
1422 // We've already seen this local at another suspension
1423 // point, so it is no longer a candidate.
1424 trace!(
1425 "removing local {:?} in >1 variant ({:?}, {:?})",
1426 local,
1427 variant_index,
1428 idx
1429 );
1430 ineligible_locals.insert(*local);
1431 assignments[*local] = Ineligible(None);
1432 }
1433 Ineligible(_) => {}
1434 }
1435 }
1436 }
1437
1438 // Next, check every pair of eligible locals to see if they
1439 // conflict.
1440 for local_a in info.storage_conflicts.rows() {
1441 let conflicts_a = info.storage_conflicts.count(local_a);
1442 if ineligible_locals.contains(local_a) {
1443 continue;
1444 }
1445
1446 for local_b in info.storage_conflicts.iter(local_a) {
1447 // local_a and local_b are storage live at the same time, therefore they
1448 // cannot overlap in the generator layout. The only way to guarantee
1449 // this is if they are in the same variant, or one is ineligible
1450 // (which means it is stored in every variant).
1451 if ineligible_locals.contains(local_b)
1452 || assignments[local_a] == assignments[local_b]
1453 {
1454 continue;
1455 }
1456
1457 // If they conflict, we will choose one to make ineligible.
1458 // This is not always optimal; it's just a greedy heuristic that
1459 // seems to produce good results most of the time.
1460 let conflicts_b = info.storage_conflicts.count(local_b);
1461 let (remove, other) =
1462 if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
1463 ineligible_locals.insert(remove);
1464 assignments[remove] = Ineligible(None);
1465 trace!("removing local {:?} due to conflict with {:?}", remove, other);
1466 }
1467 }
1468
1469 // Count the number of variants in use. If only one of them, then it is
1470 // impossible to overlap any locals in our layout. In this case it's
1471 // always better to make the remaining locals ineligible, so we can
1472 // lay them out with the other locals in the prefix and eliminate
1473 // unnecessary padding bytes.
1474 {
1475 let mut used_variants = BitSet::new_empty(info.variant_fields.len());
1476 for assignment in &assignments {
1477 if let Assigned(idx) = assignment {
1478 used_variants.insert(*idx);
1479 }
1480 }
1481 if used_variants.count() < 2 {
1482 for assignment in assignments.iter_mut() {
1483 *assignment = Ineligible(None);
1484 }
1485 ineligible_locals.insert_all();
1486 }
1487 }
1488
1489 // Write down the order of our locals that will be promoted to the prefix.
1490 {
1491 for (idx, local) in ineligible_locals.iter().enumerate() {
1492 assignments[local] = Ineligible(Some(idx as u32));
1493 }
1494 }
1495 debug!("generator saved local assignments: {:?}", assignments);
1496
1497 (ineligible_locals, assignments)
1498 }
1499
1500 /// Compute the full generator layout.
1501 fn generator_layout(
1502 &self,
1503 ty: Ty<'tcx>,
1504 def_id: hir::def_id::DefId,
1505 substs: SubstsRef<'tcx>,
1506 ) -> Result<&'tcx Layout, LayoutError<'tcx>> {
1507 use SavedLocalEligibility::*;
1508 let tcx = self.tcx;
1509 let subst_field = |ty: Ty<'tcx>| ty.subst(tcx, substs);
1510
1511 let info = match tcx.generator_layout(def_id) {
1512 None => return Err(LayoutError::Unknown(ty)),
1513 Some(info) => info,
1514 };
1515 let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
1516
1517 // Build a prefix layout, including "promoting" all ineligible
1518 // locals as part of the prefix. We compute the layout of all of
1519 // these fields at once to get optimal packing.
1520 let tag_index = substs.as_generator().prefix_tys().count();
1521
1522 // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
1523 let max_discr = (info.variant_fields.len() - 1) as u128;
1524 let discr_int = Integer::fit_unsigned(max_discr);
1525 let discr_int_ty = discr_int.to_ty(tcx, false);
1526 let tag = Scalar { value: Primitive::Int(discr_int, false), valid_range: 0..=max_discr };
1527 let tag_layout = self.tcx.intern_layout(Layout::scalar(self, tag.clone()));
1528 let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
1529
1530 let promoted_layouts = ineligible_locals
1531 .iter()
1532 .map(|local| subst_field(info.field_tys[local]))
1533 .map(|ty| tcx.mk_maybe_uninit(ty))
1534 .map(|ty| self.layout_of(ty));
1535 let prefix_layouts = substs
1536 .as_generator()
1537 .prefix_tys()
1538 .map(|ty| self.layout_of(ty))
1539 .chain(iter::once(Ok(tag_layout)))
1540 .chain(promoted_layouts)
1541 .collect::<Result<Vec<_>, _>>()?;
1542 let prefix = self.univariant_uninterned(
1543 ty,
1544 &prefix_layouts,
1545 &ReprOptions::default(),
1546 StructKind::AlwaysSized,
1547 )?;
1548
1549 let (prefix_size, prefix_align) = (prefix.size, prefix.align);
1550
1551 // Split the prefix layout into the "outer" fields (upvars and
1552 // discriminant) and the "promoted" fields. Promoted fields will
1553 // get included in each variant that requested them in
1554 // GeneratorLayout.
1555 debug!("prefix = {:#?}", prefix);
1556 let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
1557 FieldsShape::Arbitrary { mut offsets, memory_index } => {
1558 let mut inverse_memory_index = invert_mapping(&memory_index);
1559
1560 // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1561 // "outer" and "promoted" fields respectively.
1562 let b_start = (tag_index + 1) as u32;
1563 let offsets_b = offsets.split_off(b_start as usize);
1564 let offsets_a = offsets;
1565
1566 // Disentangle the "a" and "b" components of `inverse_memory_index`
1567 // by preserving the order but keeping only one disjoint "half" each.
1568 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1569 let inverse_memory_index_b: Vec<_> =
1570 inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
1571 inverse_memory_index.retain(|&i| i < b_start);
1572 let inverse_memory_index_a = inverse_memory_index;
1573
1574 // Since `inverse_memory_index_{a,b}` each only refer to their
1575 // respective fields, they can be safely inverted
1576 let memory_index_a = invert_mapping(&inverse_memory_index_a);
1577 let memory_index_b = invert_mapping(&inverse_memory_index_b);
1578
1579 let outer_fields =
1580 FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
1581 (outer_fields, offsets_b, memory_index_b)
1582 }
1583 _ => bug!(),
1584 };
1585
1586 let mut size = prefix.size;
1587 let mut align = prefix.align;
1588 let variants = info
1589 .variant_fields
1590 .iter_enumerated()
1591 .map(|(index, variant_fields)| {
1592 // Only include overlap-eligible fields when we compute our variant layout.
1593 let variant_only_tys = variant_fields
1594 .iter()
1595 .filter(|local| match assignments[**local] {
1596 Unassigned => bug!(),
1597 Assigned(v) if v == index => true,
1598 Assigned(_) => bug!("assignment does not match variant"),
1599 Ineligible(_) => false,
1600 })
1601 .map(|local| subst_field(info.field_tys[*local]));
1602
1603 let mut variant = self.univariant_uninterned(
1604 ty,
1605 &variant_only_tys
1606 .map(|ty| self.layout_of(ty))
1607 .collect::<Result<Vec<_>, _>>()?,
1608 &ReprOptions::default(),
1609 StructKind::Prefixed(prefix_size, prefix_align.abi),
1610 )?;
1611 variant.variants = Variants::Single { index };
1612
1613 let (offsets, memory_index) = match variant.fields {
1614 FieldsShape::Arbitrary { offsets, memory_index } => (offsets, memory_index),
1615 _ => bug!(),
1616 };
1617
1618 // Now, stitch the promoted and variant-only fields back together in
1619 // the order they are mentioned by our GeneratorLayout.
1620 // Because we only use some subset (that can differ between variants)
1621 // of the promoted fields, we can't just pick those elements of the
1622 // `promoted_memory_index` (as we'd end up with gaps).
1623 // So instead, we build an "inverse memory_index", as if all of the
1624 // promoted fields were being used, but leave the elements not in the
1625 // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1626 // obtain a valid (bijective) mapping.
1627 const INVALID_FIELD_IDX: u32 = !0;
1628 let mut combined_inverse_memory_index =
1629 vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
1630 let mut offsets_and_memory_index = iter::zip(offsets, memory_index);
1631 let combined_offsets = variant_fields
1632 .iter()
1633 .enumerate()
1634 .map(|(i, local)| {
1635 let (offset, memory_index) = match assignments[*local] {
1636 Unassigned => bug!(),
1637 Assigned(_) => {
1638 let (offset, memory_index) =
1639 offsets_and_memory_index.next().unwrap();
1640 (offset, promoted_memory_index.len() as u32 + memory_index)
1641 }
1642 Ineligible(field_idx) => {
1643 let field_idx = field_idx.unwrap() as usize;
1644 (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1645 }
1646 };
1647 combined_inverse_memory_index[memory_index as usize] = i as u32;
1648 offset
1649 })
1650 .collect();
1651
1652 // Remove the unused slots and invert the mapping to obtain the
1653 // combined `memory_index` (also see previous comment).
1654 combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
1655 let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
1656
1657 variant.fields = FieldsShape::Arbitrary {
1658 offsets: combined_offsets,
1659 memory_index: combined_memory_index,
1660 };
1661
1662 size = size.max(variant.size);
1663 align = align.max(variant.align);
1664 Ok(variant)
1665 })
1666 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1667
1668 size = size.align_to(align.abi);
1669
1670 let abi = if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi.is_uninhabited())
1671 {
1672 Abi::Uninhabited
1673 } else {
1674 Abi::Aggregate { sized: true }
1675 };
1676
1677 let layout = tcx.intern_layout(Layout {
1678 variants: Variants::Multiple {
1679 tag,
1680 tag_encoding: TagEncoding::Direct,
1681 tag_field: tag_index,
1682 variants,
1683 },
1684 fields: outer_fields,
1685 abi,
1686 largest_niche: prefix.largest_niche,
1687 size,
1688 align,
1689 });
1690 debug!("generator layout ({:?}): {:#?}", ty, layout);
1691 Ok(layout)
1692 }
1693
1694 /// This is invoked by the `layout_raw` query to record the final
1695 /// layout of each type.
1696 #[inline(always)]
1697 fn record_layout_for_printing(&self, layout: TyAndLayout<'tcx>) {
1698 // If we are running with `-Zprint-type-sizes`, maybe record layouts
1699 // for dumping later.
1700 if self.tcx.sess.opts.debugging_opts.print_type_sizes {
1701 self.record_layout_for_printing_outlined(layout)
1702 }
1703 }
1704
1705 fn record_layout_for_printing_outlined(&self, layout: TyAndLayout<'tcx>) {
1706 // Ignore layouts that are done with non-empty environments or
1707 // non-monomorphic layouts, as the user only wants to see the stuff
1708 // resulting from the final codegen session.
1709 if layout.ty.has_param_types_or_consts() || !self.param_env.caller_bounds().is_empty() {
1710 return;
1711 }
1712
1713 // (delay format until we actually need it)
1714 let record = |kind, packed, opt_discr_size, variants| {
1715 let type_desc = format!("{:?}", layout.ty);
1716 self.tcx.sess.code_stats.record_type_size(
1717 kind,
1718 type_desc,
1719 layout.align.abi,
1720 layout.size,
1721 packed,
1722 opt_discr_size,
1723 variants,
1724 );
1725 };
1726
1727 let adt_def = match *layout.ty.kind() {
1728 ty::Adt(ref adt_def, _) => {
1729 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1730 adt_def
1731 }
1732
1733 ty::Closure(..) => {
1734 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1735 record(DataTypeKind::Closure, false, None, vec![]);
1736 return;
1737 }
1738
1739 _ => {
1740 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1741 return;
1742 }
1743 };
1744
1745 let adt_kind = adt_def.adt_kind();
1746 let adt_packed = adt_def.repr.pack.is_some();
1747
1748 let build_variant_info = |n: Option<Ident>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
1749 let mut min_size = Size::ZERO;
1750 let field_info: Vec<_> = flds
1751 .iter()
1752 .enumerate()
1753 .map(|(i, &name)| match layout.field(self, i) {
1754 Err(err) => {
1755 bug!("no layout found for field {}: `{:?}`", name, err);
1756 }
1757 Ok(field_layout) => {
1758 let offset = layout.fields.offset(i);
1759 let field_end = offset + field_layout.size;
1760 if min_size < field_end {
1761 min_size = field_end;
1762 }
1763 FieldInfo {
1764 name: name.to_string(),
1765 offset: offset.bytes(),
1766 size: field_layout.size.bytes(),
1767 align: field_layout.align.abi.bytes(),
1768 }
1769 }
1770 })
1771 .collect();
1772
1773 VariantInfo {
1774 name: n.map(|n| n.to_string()),
1775 kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
1776 align: layout.align.abi.bytes(),
1777 size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
1778 fields: field_info,
1779 }
1780 };
1781
1782 match layout.variants {
1783 Variants::Single { index } => {
1784 debug!("print-type-size `{:#?}` variant {}", layout, adt_def.variants[index].ident);
1785 if !adt_def.variants.is_empty() {
1786 let variant_def = &adt_def.variants[index];
1787 let fields: Vec<_> = variant_def.fields.iter().map(|f| f.ident.name).collect();
1788 record(
1789 adt_kind.into(),
1790 adt_packed,
1791 None,
1792 vec![build_variant_info(Some(variant_def.ident), &fields, layout)],
1793 );
1794 } else {
1795 // (This case arises for *empty* enums; so give it
1796 // zero variants.)
1797 record(adt_kind.into(), adt_packed, None, vec![]);
1798 }
1799 }
1800
1801 Variants::Multiple { ref tag, ref tag_encoding, .. } => {
1802 debug!(
1803 "print-type-size `{:#?}` adt general variants def {}",
1804 layout.ty,
1805 adt_def.variants.len()
1806 );
1807 let variant_infos: Vec<_> = adt_def
1808 .variants
1809 .iter_enumerated()
1810 .map(|(i, variant_def)| {
1811 let fields: Vec<_> =
1812 variant_def.fields.iter().map(|f| f.ident.name).collect();
1813 build_variant_info(
1814 Some(variant_def.ident),
1815 &fields,
1816 layout.for_variant(self, i),
1817 )
1818 })
1819 .collect();
1820 record(
1821 adt_kind.into(),
1822 adt_packed,
1823 match tag_encoding {
1824 TagEncoding::Direct => Some(tag.value.size(self)),
1825 _ => None,
1826 },
1827 variant_infos,
1828 );
1829 }
1830 }
1831 }
1832 }
1833
1834 /// Type size "skeleton", i.e., the only information determining a type's size.
1835 /// While this is conservative, (aside from constant sizes, only pointers,
1836 /// newtypes thereof and null pointer optimized enums are allowed), it is
1837 /// enough to statically check common use cases of transmute.
1838 #[derive(Copy, Clone, Debug)]
1839 pub enum SizeSkeleton<'tcx> {
1840 /// Any statically computable Layout.
1841 Known(Size),
1842
1843 /// A potentially-fat pointer.
1844 Pointer {
1845 /// If true, this pointer is never null.
1846 non_zero: bool,
1847 /// The type which determines the unsized metadata, if any,
1848 /// of this pointer. Either a type parameter or a projection
1849 /// depending on one, with regions erased.
1850 tail: Ty<'tcx>,
1851 },
1852 }
1853
1854 impl<'tcx> SizeSkeleton<'tcx> {
1855 pub fn compute(
1856 ty: Ty<'tcx>,
1857 tcx: TyCtxt<'tcx>,
1858 param_env: ty::ParamEnv<'tcx>,
1859 ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1860 debug_assert!(!ty.has_infer_types_or_consts());
1861
1862 // First try computing a static layout.
1863 let err = match tcx.layout_of(param_env.and(ty)) {
1864 Ok(layout) => {
1865 return Ok(SizeSkeleton::Known(layout.size));
1866 }
1867 Err(err) => err,
1868 };
1869
1870 match *ty.kind() {
1871 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1872 let non_zero = !ty.is_unsafe_ptr();
1873 let tail = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
1874 match tail.kind() {
1875 ty::Param(_) | ty::Projection(_) => {
1876 debug_assert!(tail.has_param_types_or_consts());
1877 Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(tail) })
1878 }
1879 _ => bug!(
1880 "SizeSkeleton::compute({}): layout errored ({}), yet \
1881 tail `{}` is not a type parameter or a projection",
1882 ty,
1883 err,
1884 tail
1885 ),
1886 }
1887 }
1888
1889 ty::Adt(def, substs) => {
1890 // Only newtypes and enums w/ nullable pointer optimization.
1891 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
1892 return Err(err);
1893 }
1894
1895 // Get a zero-sized variant or a pointer newtype.
1896 let zero_or_ptr_variant = |i| {
1897 let i = VariantIdx::new(i);
1898 let fields = def.variants[i]
1899 .fields
1900 .iter()
1901 .map(|field| SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env));
1902 let mut ptr = None;
1903 for field in fields {
1904 let field = field?;
1905 match field {
1906 SizeSkeleton::Known(size) => {
1907 if size.bytes() > 0 {
1908 return Err(err);
1909 }
1910 }
1911 SizeSkeleton::Pointer { .. } => {
1912 if ptr.is_some() {
1913 return Err(err);
1914 }
1915 ptr = Some(field);
1916 }
1917 }
1918 }
1919 Ok(ptr)
1920 };
1921
1922 let v0 = zero_or_ptr_variant(0)?;
1923 // Newtype.
1924 if def.variants.len() == 1 {
1925 if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1926 return Ok(SizeSkeleton::Pointer {
1927 non_zero: non_zero
1928 || match tcx.layout_scalar_valid_range(def.did) {
1929 (Bound::Included(start), Bound::Unbounded) => start > 0,
1930 (Bound::Included(start), Bound::Included(end)) => {
1931 0 < start && start < end
1932 }
1933 _ => false,
1934 },
1935 tail,
1936 });
1937 } else {
1938 return Err(err);
1939 }
1940 }
1941
1942 let v1 = zero_or_ptr_variant(1)?;
1943 // Nullable pointer enum optimization.
1944 match (v0, v1) {
1945 (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None)
1946 | (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1947 Ok(SizeSkeleton::Pointer { non_zero: false, tail })
1948 }
1949 _ => Err(err),
1950 }
1951 }
1952
1953 ty::Projection(_) | ty::Opaque(..) => {
1954 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1955 if ty == normalized {
1956 Err(err)
1957 } else {
1958 SizeSkeleton::compute(normalized, tcx, param_env)
1959 }
1960 }
1961
1962 _ => Err(err),
1963 }
1964 }
1965
1966 pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
1967 match (self, other) {
1968 (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
1969 (SizeSkeleton::Pointer { tail: a, .. }, SizeSkeleton::Pointer { tail: b, .. }) => {
1970 a == b
1971 }
1972 _ => false,
1973 }
1974 }
1975 }
1976
1977 pub trait HasTyCtxt<'tcx>: HasDataLayout {
1978 fn tcx(&self) -> TyCtxt<'tcx>;
1979 }
1980
1981 pub trait HasParamEnv<'tcx> {
1982 fn param_env(&self) -> ty::ParamEnv<'tcx>;
1983 }
1984
1985 impl<'tcx> HasDataLayout for TyCtxt<'tcx> {
1986 fn data_layout(&self) -> &TargetDataLayout {
1987 &self.data_layout
1988 }
1989 }
1990
1991 impl<'tcx> HasTyCtxt<'tcx> for TyCtxt<'tcx> {
1992 fn tcx(&self) -> TyCtxt<'tcx> {
1993 *self
1994 }
1995 }
1996
1997 impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
1998 fn param_env(&self) -> ty::ParamEnv<'tcx> {
1999 self.param_env
2000 }
2001 }
2002
2003 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
2004 fn data_layout(&self) -> &TargetDataLayout {
2005 self.tcx.data_layout()
2006 }
2007 }
2008
2009 impl<'tcx, T: HasTyCtxt<'tcx>> HasTyCtxt<'tcx> for LayoutCx<'tcx, T> {
2010 fn tcx(&self) -> TyCtxt<'tcx> {
2011 self.tcx.tcx()
2012 }
2013 }
2014
2015 pub type TyAndLayout<'tcx> = rustc_target::abi::TyAndLayout<'tcx, Ty<'tcx>>;
2016
2017 impl<'tcx> LayoutOf for LayoutCx<'tcx, TyCtxt<'tcx>> {
2018 type Ty = Ty<'tcx>;
2019 type TyAndLayout = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2020
2021 /// Computes the layout of a type. Note that this implicitly
2022 /// executes in "reveal all" mode.
2023 fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
2024 let param_env = self.param_env.with_reveal_all_normalized(self.tcx);
2025 let ty = self.tcx.normalize_erasing_regions(param_env, ty);
2026 let layout = self.tcx.layout_raw(param_env.and(ty))?;
2027 let layout = TyAndLayout { ty, layout };
2028
2029 // N.B., this recording is normally disabled; when enabled, it
2030 // can however trigger recursive invocations of `layout_of`.
2031 // Therefore, we execute it *after* the main query has
2032 // completed, to avoid problems around recursive structures
2033 // and the like. (Admittedly, I wasn't able to reproduce a problem
2034 // here, but it seems like the right thing to do. -nmatsakis)
2035 self.record_layout_for_printing(layout);
2036
2037 Ok(layout)
2038 }
2039 }
2040
2041 impl LayoutOf for LayoutCx<'tcx, ty::query::TyCtxtAt<'tcx>> {
2042 type Ty = Ty<'tcx>;
2043 type TyAndLayout = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2044
2045 /// Computes the layout of a type. Note that this implicitly
2046 /// executes in "reveal all" mode.
2047 fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
2048 let param_env = self.param_env.with_reveal_all_normalized(*self.tcx);
2049 let ty = self.tcx.normalize_erasing_regions(param_env, ty);
2050 let layout = self.tcx.layout_raw(param_env.and(ty))?;
2051 let layout = TyAndLayout { ty, layout };
2052
2053 // N.B., this recording is normally disabled; when enabled, it
2054 // can however trigger recursive invocations of `layout_of`.
2055 // Therefore, we execute it *after* the main query has
2056 // completed, to avoid problems around recursive structures
2057 // and the like. (Admittedly, I wasn't able to reproduce a problem
2058 // here, but it seems like the right thing to do. -nmatsakis)
2059 let cx = LayoutCx { tcx: *self.tcx, param_env: self.param_env };
2060 cx.record_layout_for_printing(layout);
2061
2062 Ok(layout)
2063 }
2064 }
2065
2066 // Helper (inherent) `layout_of` methods to avoid pushing `LayoutCx` to users.
2067 impl TyCtxt<'tcx> {
2068 /// Computes the layout of a type. Note that this implicitly
2069 /// executes in "reveal all" mode.
2070 #[inline]
2071 pub fn layout_of(
2072 self,
2073 param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
2074 ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
2075 let cx = LayoutCx { tcx: self, param_env: param_env_and_ty.param_env };
2076 cx.layout_of(param_env_and_ty.value)
2077 }
2078 }
2079
2080 impl ty::query::TyCtxtAt<'tcx> {
2081 /// Computes the layout of a type. Note that this implicitly
2082 /// executes in "reveal all" mode.
2083 #[inline]
2084 pub fn layout_of(
2085 self,
2086 param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
2087 ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
2088 let cx = LayoutCx { tcx: self.at(self.span), param_env: param_env_and_ty.param_env };
2089 cx.layout_of(param_env_and_ty.value)
2090 }
2091 }
2092
2093 impl<'tcx, C> TyAndLayoutMethods<'tcx, C> for Ty<'tcx>
2094 where
2095 C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout: MaybeResult<TyAndLayout<'tcx>>>
2096 + HasTyCtxt<'tcx>
2097 + HasParamEnv<'tcx>,
2098 {
2099 fn for_variant(
2100 this: TyAndLayout<'tcx>,
2101 cx: &C,
2102 variant_index: VariantIdx,
2103 ) -> TyAndLayout<'tcx> {
2104 let layout = match this.variants {
2105 Variants::Single { index }
2106 // If all variants but one are uninhabited, the variant layout is the enum layout.
2107 if index == variant_index &&
2108 // Don't confuse variants of uninhabited enums with the enum itself.
2109 // For more details see https://github.com/rust-lang/rust/issues/69763.
2110 this.fields != FieldsShape::Primitive =>
2111 {
2112 this.layout
2113 }
2114
2115 Variants::Single { index } => {
2116 // Deny calling for_variant more than once for non-Single enums.
2117 if let Ok(original_layout) = cx.layout_of(this.ty).to_result() {
2118 assert_eq!(original_layout.variants, Variants::Single { index });
2119 }
2120
2121 let fields = match this.ty.kind() {
2122 ty::Adt(def, _) if def.variants.is_empty() =>
2123 bug!("for_variant called on zero-variant enum"),
2124 ty::Adt(def, _) => def.variants[variant_index].fields.len(),
2125 _ => bug!(),
2126 };
2127 let tcx = cx.tcx();
2128 tcx.intern_layout(Layout {
2129 variants: Variants::Single { index: variant_index },
2130 fields: match NonZeroUsize::new(fields) {
2131 Some(fields) => FieldsShape::Union(fields),
2132 None => FieldsShape::Arbitrary { offsets: vec![], memory_index: vec![] },
2133 },
2134 abi: Abi::Uninhabited,
2135 largest_niche: None,
2136 align: tcx.data_layout.i8_align,
2137 size: Size::ZERO,
2138 })
2139 }
2140
2141 Variants::Multiple { ref variants, .. } => &variants[variant_index],
2142 };
2143
2144 assert_eq!(layout.variants, Variants::Single { index: variant_index });
2145
2146 TyAndLayout { ty: this.ty, layout }
2147 }
2148
2149 fn field(this: TyAndLayout<'tcx>, cx: &C, i: usize) -> C::TyAndLayout {
2150 enum TyMaybeWithLayout<C: LayoutOf> {
2151 Ty(C::Ty),
2152 TyAndLayout(C::TyAndLayout),
2153 }
2154
2155 fn ty_and_layout_kind<
2156 C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout: MaybeResult<TyAndLayout<'tcx>>>
2157 + HasTyCtxt<'tcx>
2158 + HasParamEnv<'tcx>,
2159 >(
2160 this: TyAndLayout<'tcx>,
2161 cx: &C,
2162 i: usize,
2163 ty: C::Ty,
2164 ) -> TyMaybeWithLayout<C> {
2165 let tcx = cx.tcx();
2166 let tag_layout = |tag: &Scalar| -> C::TyAndLayout {
2167 let layout = Layout::scalar(cx, tag.clone());
2168 MaybeResult::from(Ok(TyAndLayout {
2169 layout: tcx.intern_layout(layout),
2170 ty: tag.value.to_ty(tcx),
2171 }))
2172 };
2173
2174 match *ty.kind() {
2175 ty::Bool
2176 | ty::Char
2177 | ty::Int(_)
2178 | ty::Uint(_)
2179 | ty::Float(_)
2180 | ty::FnPtr(_)
2181 | ty::Never
2182 | ty::FnDef(..)
2183 | ty::GeneratorWitness(..)
2184 | ty::Foreign(..)
2185 | ty::Dynamic(..) => bug!("TyAndLayout::field_type({:?}): not applicable", this),
2186
2187 // Potentially-fat pointers.
2188 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2189 assert!(i < this.fields.count());
2190
2191 // Reuse the fat `*T` type as its own thin pointer data field.
2192 // This provides information about, e.g., DST struct pointees
2193 // (which may have no non-DST form), and will work as long
2194 // as the `Abi` or `FieldsShape` is checked by users.
2195 if i == 0 {
2196 let nil = tcx.mk_unit();
2197 let ptr_ty = if ty.is_unsafe_ptr() {
2198 tcx.mk_mut_ptr(nil)
2199 } else {
2200 tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
2201 };
2202 return TyMaybeWithLayout::TyAndLayout(MaybeResult::from(
2203 cx.layout_of(ptr_ty).to_result().map(|mut ptr_layout| {
2204 ptr_layout.ty = ty;
2205 ptr_layout
2206 }),
2207 ));
2208 }
2209
2210 match tcx.struct_tail_erasing_lifetimes(pointee, cx.param_env()).kind() {
2211 ty::Slice(_) | ty::Str => TyMaybeWithLayout::Ty(tcx.types.usize),
2212 ty::Dynamic(_, _) => {
2213 TyMaybeWithLayout::Ty(tcx.mk_imm_ref(
2214 tcx.lifetimes.re_static,
2215 tcx.mk_array(tcx.types.usize, 3),
2216 ))
2217 /* FIXME: use actual fn pointers
2218 Warning: naively computing the number of entries in the
2219 vtable by counting the methods on the trait + methods on
2220 all parent traits does not work, because some methods can
2221 be not object safe and thus excluded from the vtable.
2222 Increase this counter if you tried to implement this but
2223 failed to do it without duplicating a lot of code from
2224 other places in the compiler: 2
2225 tcx.mk_tup(&[
2226 tcx.mk_array(tcx.types.usize, 3),
2227 tcx.mk_array(Option<fn()>),
2228 ])
2229 */
2230 }
2231 _ => bug!("TyAndLayout::field_type({:?}): not applicable", this),
2232 }
2233 }
2234
2235 // Arrays and slices.
2236 ty::Array(element, _) | ty::Slice(element) => TyMaybeWithLayout::Ty(element),
2237 ty::Str => TyMaybeWithLayout::Ty(tcx.types.u8),
2238
2239 // Tuples, generators and closures.
2240 ty::Closure(_, ref substs) => {
2241 ty_and_layout_kind(this, cx, i, substs.as_closure().tupled_upvars_ty())
2242 }
2243
2244 ty::Generator(def_id, ref substs, _) => match this.variants {
2245 Variants::Single { index } => TyMaybeWithLayout::Ty(
2246 substs
2247 .as_generator()
2248 .state_tys(def_id, tcx)
2249 .nth(index.as_usize())
2250 .unwrap()
2251 .nth(i)
2252 .unwrap(),
2253 ),
2254 Variants::Multiple { ref tag, tag_field, .. } => {
2255 if i == tag_field {
2256 return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2257 }
2258 TyMaybeWithLayout::Ty(substs.as_generator().prefix_tys().nth(i).unwrap())
2259 }
2260 },
2261
2262 ty::Tuple(tys) => TyMaybeWithLayout::Ty(tys[i].expect_ty()),
2263
2264 // ADTs.
2265 ty::Adt(def, substs) => {
2266 match this.variants {
2267 Variants::Single { index } => {
2268 TyMaybeWithLayout::Ty(def.variants[index].fields[i].ty(tcx, substs))
2269 }
2270
2271 // Discriminant field for enums (where applicable).
2272 Variants::Multiple { ref tag, .. } => {
2273 assert_eq!(i, 0);
2274 return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2275 }
2276 }
2277 }
2278
2279 ty::Projection(_)
2280 | ty::Bound(..)
2281 | ty::Placeholder(..)
2282 | ty::Opaque(..)
2283 | ty::Param(_)
2284 | ty::Infer(_)
2285 | ty::Error(_) => bug!("TyAndLayout::field_type: unexpected type `{}`", this.ty),
2286 }
2287 }
2288
2289 cx.layout_of(match ty_and_layout_kind(this, cx, i, this.ty) {
2290 TyMaybeWithLayout::Ty(result) => result,
2291 TyMaybeWithLayout::TyAndLayout(result) => return result,
2292 })
2293 }
2294
2295 fn pointee_info_at(this: TyAndLayout<'tcx>, cx: &C, offset: Size) -> Option<PointeeInfo> {
2296 let addr_space_of_ty = |ty: Ty<'tcx>| {
2297 if ty.is_fn() { cx.data_layout().instruction_address_space } else { AddressSpace::DATA }
2298 };
2299
2300 let pointee_info = match *this.ty.kind() {
2301 ty::RawPtr(mt) if offset.bytes() == 0 => {
2302 cx.layout_of(mt.ty).to_result().ok().map(|layout| PointeeInfo {
2303 size: layout.size,
2304 align: layout.align.abi,
2305 safe: None,
2306 address_space: addr_space_of_ty(mt.ty),
2307 })
2308 }
2309 ty::FnPtr(fn_sig) if offset.bytes() == 0 => {
2310 cx.layout_of(cx.tcx().mk_fn_ptr(fn_sig)).to_result().ok().map(|layout| {
2311 PointeeInfo {
2312 size: layout.size,
2313 align: layout.align.abi,
2314 safe: None,
2315 address_space: cx.data_layout().instruction_address_space,
2316 }
2317 })
2318 }
2319 ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
2320 let address_space = addr_space_of_ty(ty);
2321 let tcx = cx.tcx();
2322 let kind = if tcx.sess.opts.optimize == OptLevel::No {
2323 // Use conservative pointer kind if not optimizing. This saves us the
2324 // Freeze/Unpin queries, and can save time in the codegen backend (noalias
2325 // attributes in LLVM have compile-time cost even in unoptimized builds).
2326 PointerKind::Shared
2327 } else {
2328 match mt {
2329 hir::Mutability::Not => {
2330 if ty.is_freeze(tcx.at(DUMMY_SP), cx.param_env()) {
2331 PointerKind::Frozen
2332 } else {
2333 PointerKind::Shared
2334 }
2335 }
2336 hir::Mutability::Mut => {
2337 // References to self-referential structures should not be considered
2338 // noalias, as another pointer to the structure can be obtained, that
2339 // is not based-on the original reference. We consider all !Unpin
2340 // types to be potentially self-referential here.
2341 if ty.is_unpin(tcx.at(DUMMY_SP), cx.param_env()) {
2342 PointerKind::UniqueBorrowed
2343 } else {
2344 PointerKind::Shared
2345 }
2346 }
2347 }
2348 };
2349
2350 cx.layout_of(ty).to_result().ok().map(|layout| PointeeInfo {
2351 size: layout.size,
2352 align: layout.align.abi,
2353 safe: Some(kind),
2354 address_space,
2355 })
2356 }
2357
2358 _ => {
2359 let mut data_variant = match this.variants {
2360 // Within the discriminant field, only the niche itself is
2361 // always initialized, so we only check for a pointer at its
2362 // offset.
2363 //
2364 // If the niche is a pointer, it's either valid (according
2365 // to its type), or null (which the niche field's scalar
2366 // validity range encodes). This allows using
2367 // `dereferenceable_or_null` for e.g., `Option<&T>`, and
2368 // this will continue to work as long as we don't start
2369 // using more niches than just null (e.g., the first page of
2370 // the address space, or unaligned pointers).
2371 Variants::Multiple {
2372 tag_encoding: TagEncoding::Niche { dataful_variant, .. },
2373 tag_field,
2374 ..
2375 } if this.fields.offset(tag_field) == offset => {
2376 Some(this.for_variant(cx, dataful_variant))
2377 }
2378 _ => Some(this),
2379 };
2380
2381 if let Some(variant) = data_variant {
2382 // We're not interested in any unions.
2383 if let FieldsShape::Union(_) = variant.fields {
2384 data_variant = None;
2385 }
2386 }
2387
2388 let mut result = None;
2389
2390 if let Some(variant) = data_variant {
2391 let ptr_end = offset + Pointer.size(cx);
2392 for i in 0..variant.fields.count() {
2393 let field_start = variant.fields.offset(i);
2394 if field_start <= offset {
2395 let field = variant.field(cx, i);
2396 result = field.to_result().ok().and_then(|field| {
2397 if ptr_end <= field_start + field.size {
2398 // We found the right field, look inside it.
2399 let field_info =
2400 field.pointee_info_at(cx, offset - field_start);
2401 field_info
2402 } else {
2403 None
2404 }
2405 });
2406 if result.is_some() {
2407 break;
2408 }
2409 }
2410 }
2411 }
2412
2413 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
2414 if let Some(ref mut pointee) = result {
2415 if let ty::Adt(def, _) = this.ty.kind() {
2416 if def.is_box() && offset.bytes() == 0 {
2417 pointee.safe = Some(PointerKind::UniqueOwned);
2418 }
2419 }
2420 }
2421
2422 result
2423 }
2424 };
2425
2426 debug!(
2427 "pointee_info_at (offset={:?}, type kind: {:?}) => {:?}",
2428 offset,
2429 this.ty.kind(),
2430 pointee_info
2431 );
2432
2433 pointee_info
2434 }
2435 }
2436
2437 impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for LayoutError<'tcx> {
2438 fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
2439 use crate::ty::layout::LayoutError::*;
2440 mem::discriminant(self).hash_stable(hcx, hasher);
2441
2442 match *self {
2443 Unknown(t) | SizeOverflow(t) => t.hash_stable(hcx, hasher),
2444 }
2445 }
2446 }
2447
2448 impl<'tcx> ty::Instance<'tcx> {
2449 // NOTE(eddyb) this is private to avoid using it from outside of
2450 // `FnAbi::of_instance` - any other uses are either too high-level
2451 // for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
2452 // or should go through `FnAbi` instead, to avoid losing any
2453 // adjustments `FnAbi::of_instance` might be performing.
2454 fn fn_sig_for_fn_abi(&self, tcx: TyCtxt<'tcx>) -> ty::PolyFnSig<'tcx> {
2455 // FIXME(davidtwco,eddyb): A `ParamEnv` should be passed through to this function.
2456 let ty = self.ty(tcx, ty::ParamEnv::reveal_all());
2457 match *ty.kind() {
2458 ty::FnDef(..) => {
2459 // HACK(davidtwco,eddyb): This is a workaround for polymorphization considering
2460 // parameters unused if they show up in the signature, but not in the `mir::Body`
2461 // (i.e. due to being inside a projection that got normalized, see
2462 // `src/test/ui/polymorphization/normalized_sig_types.rs`), and codegen not keeping
2463 // track of a polymorphization `ParamEnv` to allow normalizing later.
2464 let mut sig = match *ty.kind() {
2465 ty::FnDef(def_id, substs) => tcx
2466 .normalize_erasing_regions(tcx.param_env(def_id), tcx.fn_sig(def_id))
2467 .subst(tcx, substs),
2468 _ => unreachable!(),
2469 };
2470
2471 if let ty::InstanceDef::VtableShim(..) = self.def {
2472 // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
2473 sig = sig.map_bound(|mut sig| {
2474 let mut inputs_and_output = sig.inputs_and_output.to_vec();
2475 inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
2476 sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
2477 sig
2478 });
2479 }
2480 sig
2481 }
2482 ty::Closure(def_id, substs) => {
2483 let sig = substs.as_closure().sig();
2484
2485 let bound_vars = tcx.mk_bound_variable_kinds(
2486 sig.bound_vars()
2487 .iter()
2488 .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
2489 );
2490 let br = ty::BoundRegion {
2491 var: ty::BoundVar::from_usize(bound_vars.len() - 1),
2492 kind: ty::BoundRegionKind::BrEnv,
2493 };
2494 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2495 let env_ty = tcx.closure_env_ty(def_id, substs, env_region).unwrap();
2496
2497 let sig = sig.skip_binder();
2498 ty::Binder::bind_with_vars(
2499 tcx.mk_fn_sig(
2500 iter::once(env_ty).chain(sig.inputs().iter().cloned()),
2501 sig.output(),
2502 sig.c_variadic,
2503 sig.unsafety,
2504 sig.abi,
2505 ),
2506 bound_vars,
2507 )
2508 }
2509 ty::Generator(_, substs, _) => {
2510 let sig = substs.as_generator().poly_sig();
2511
2512 let bound_vars = tcx.mk_bound_variable_kinds(
2513 sig.bound_vars()
2514 .iter()
2515 .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
2516 );
2517 let br = ty::BoundRegion {
2518 var: ty::BoundVar::from_usize(bound_vars.len() - 1),
2519 kind: ty::BoundRegionKind::BrEnv,
2520 };
2521 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2522 let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
2523
2524 let pin_did = tcx.require_lang_item(LangItem::Pin, None);
2525 let pin_adt_ref = tcx.adt_def(pin_did);
2526 let pin_substs = tcx.intern_substs(&[env_ty.into()]);
2527 let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
2528
2529 let sig = sig.skip_binder();
2530 let state_did = tcx.require_lang_item(LangItem::GeneratorState, None);
2531 let state_adt_ref = tcx.adt_def(state_did);
2532 let state_substs = tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]);
2533 let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
2534 ty::Binder::bind_with_vars(
2535 tcx.mk_fn_sig(
2536 [env_ty, sig.resume_ty].iter(),
2537 &ret_ty,
2538 false,
2539 hir::Unsafety::Normal,
2540 rustc_target::spec::abi::Abi::Rust,
2541 ),
2542 bound_vars,
2543 )
2544 }
2545 _ => bug!("unexpected type {:?} in Instance::fn_sig", ty),
2546 }
2547 }
2548 }
2549
2550 pub trait FnAbiExt<'tcx, C>
2551 where
2552 C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout = TyAndLayout<'tcx>>
2553 + HasDataLayout
2554 + HasTargetSpec
2555 + HasTyCtxt<'tcx>
2556 + HasParamEnv<'tcx>,
2557 {
2558 /// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
2559 ///
2560 /// NB: this doesn't handle virtual calls - those should use `FnAbi::of_instance`
2561 /// instead, where the instance is a `InstanceDef::Virtual`.
2562 fn of_fn_ptr(cx: &C, sig: ty::PolyFnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2563
2564 /// Compute a `FnAbi` suitable for declaring/defining an `fn` instance, and for
2565 /// direct calls to an `fn`.
2566 ///
2567 /// NB: that includes virtual calls, which are represented by "direct calls"
2568 /// to a `InstanceDef::Virtual` instance (of `<dyn Trait as Trait>::fn`).
2569 fn of_instance(cx: &C, instance: ty::Instance<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2570
2571 fn new_internal(
2572 cx: &C,
2573 sig: ty::PolyFnSig<'tcx>,
2574 extra_args: &[Ty<'tcx>],
2575 caller_location: Option<Ty<'tcx>>,
2576 codegen_fn_attr_flags: CodegenFnAttrFlags,
2577 make_self_ptr_thin: bool,
2578 ) -> Self;
2579 fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi);
2580 }
2581
2582 fn fn_can_unwind(
2583 panic_strategy: PanicStrategy,
2584 codegen_fn_attr_flags: CodegenFnAttrFlags,
2585 call_conv: Conv,
2586 abi: SpecAbi,
2587 ) -> bool {
2588 if panic_strategy != PanicStrategy::Unwind {
2589 // In panic=abort mode we assume nothing can unwind anywhere, so
2590 // optimize based on this!
2591 false
2592 } else if codegen_fn_attr_flags.contains(CodegenFnAttrFlags::UNWIND) {
2593 // If a specific #[unwind] attribute is present, use that.
2594 true
2595 } else if codegen_fn_attr_flags.contains(CodegenFnAttrFlags::RUSTC_ALLOCATOR_NOUNWIND) {
2596 // Special attribute for allocator functions, which can't unwind.
2597 false
2598 } else {
2599 if call_conv == Conv::Rust {
2600 // Any Rust method (or `extern "Rust" fn` or `extern
2601 // "rust-call" fn`) is explicitly allowed to unwind
2602 // (unless it has no-unwind attribute, handled above).
2603 true
2604 } else {
2605 // Anything else is either:
2606 //
2607 // 1. A foreign item using a non-Rust ABI (like `extern "C" { fn foo(); }`), or
2608 //
2609 // 2. A Rust item using a non-Rust ABI (like `extern "C" fn foo() { ... }`).
2610 //
2611 // In both of these cases, we should refer to the ABI to determine whether or not we
2612 // should unwind. See Rust RFC 2945 for more information on this behavior, here:
2613 // https://github.com/rust-lang/rfcs/blob/master/text/2945-c-unwind-abi.md
2614 use SpecAbi::*;
2615 match abi {
2616 C { unwind } | Stdcall { unwind } | System { unwind } | Thiscall { unwind } => {
2617 unwind
2618 }
2619 Cdecl
2620 | Fastcall
2621 | Vectorcall
2622 | Aapcs
2623 | Win64
2624 | SysV64
2625 | PtxKernel
2626 | Msp430Interrupt
2627 | X86Interrupt
2628 | AmdGpuKernel
2629 | EfiApi
2630 | AvrInterrupt
2631 | AvrNonBlockingInterrupt
2632 | CCmseNonSecureCall
2633 | Wasm
2634 | RustIntrinsic
2635 | PlatformIntrinsic
2636 | Unadjusted => false,
2637 // In the `if` above, we checked for functions with the Rust calling convention.
2638 Rust | RustCall => unreachable!(),
2639 }
2640 }
2641 }
2642 }
2643
2644 impl<'tcx, C> FnAbiExt<'tcx, C> for call::FnAbi<'tcx, Ty<'tcx>>
2645 where
2646 C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout = TyAndLayout<'tcx>>
2647 + HasDataLayout
2648 + HasTargetSpec
2649 + HasTyCtxt<'tcx>
2650 + HasParamEnv<'tcx>,
2651 {
2652 fn of_fn_ptr(cx: &C, sig: ty::PolyFnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2653 // Assume that fn pointers may always unwind
2654 let codegen_fn_attr_flags = CodegenFnAttrFlags::UNWIND;
2655
2656 call::FnAbi::new_internal(cx, sig, extra_args, None, codegen_fn_attr_flags, false)
2657 }
2658
2659 fn of_instance(cx: &C, instance: ty::Instance<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2660 let sig = instance.fn_sig_for_fn_abi(cx.tcx());
2661
2662 let caller_location = if instance.def.requires_caller_location(cx.tcx()) {
2663 Some(cx.tcx().caller_location_ty())
2664 } else {
2665 None
2666 };
2667
2668 let attrs = cx.tcx().codegen_fn_attrs(instance.def_id()).flags;
2669
2670 call::FnAbi::new_internal(
2671 cx,
2672 sig,
2673 extra_args,
2674 caller_location,
2675 attrs,
2676 matches!(instance.def, ty::InstanceDef::Virtual(..)),
2677 )
2678 }
2679
2680 fn new_internal(
2681 cx: &C,
2682 sig: ty::PolyFnSig<'tcx>,
2683 extra_args: &[Ty<'tcx>],
2684 caller_location: Option<Ty<'tcx>>,
2685 codegen_fn_attr_flags: CodegenFnAttrFlags,
2686 force_thin_self_ptr: bool,
2687 ) -> Self {
2688 debug!("FnAbi::new_internal({:?}, {:?})", sig, extra_args);
2689
2690 let sig = cx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), sig);
2691
2692 use rustc_target::spec::abi::Abi::*;
2693 let conv = match cx.tcx().sess.target.adjust_abi(sig.abi) {
2694 RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
2695
2696 // It's the ABI's job to select this, not ours.
2697 System { .. } => bug!("system abi should be selected elsewhere"),
2698 EfiApi => bug!("eficall abi should be selected elsewhere"),
2699
2700 Stdcall { .. } => Conv::X86Stdcall,
2701 Fastcall => Conv::X86Fastcall,
2702 Vectorcall => Conv::X86VectorCall,
2703 Thiscall { .. } => Conv::X86ThisCall,
2704 C { .. } => Conv::C,
2705 Unadjusted => Conv::C,
2706 Win64 => Conv::X86_64Win64,
2707 SysV64 => Conv::X86_64SysV,
2708 Aapcs => Conv::ArmAapcs,
2709 CCmseNonSecureCall => Conv::CCmseNonSecureCall,
2710 PtxKernel => Conv::PtxKernel,
2711 Msp430Interrupt => Conv::Msp430Intr,
2712 X86Interrupt => Conv::X86Intr,
2713 AmdGpuKernel => Conv::AmdGpuKernel,
2714 AvrInterrupt => Conv::AvrInterrupt,
2715 AvrNonBlockingInterrupt => Conv::AvrNonBlockingInterrupt,
2716 Wasm => Conv::C,
2717
2718 // These API constants ought to be more specific...
2719 Cdecl => Conv::C,
2720 };
2721
2722 let mut inputs = sig.inputs();
2723 let extra_args = if sig.abi == RustCall {
2724 assert!(!sig.c_variadic && extra_args.is_empty());
2725
2726 if let Some(input) = sig.inputs().last() {
2727 if let ty::Tuple(tupled_arguments) = input.kind() {
2728 inputs = &sig.inputs()[0..sig.inputs().len() - 1];
2729 tupled_arguments.iter().map(|k| k.expect_ty()).collect()
2730 } else {
2731 bug!(
2732 "argument to function with \"rust-call\" ABI \
2733 is not a tuple"
2734 );
2735 }
2736 } else {
2737 bug!(
2738 "argument to function with \"rust-call\" ABI \
2739 is not a tuple"
2740 );
2741 }
2742 } else {
2743 assert!(sig.c_variadic || extra_args.is_empty());
2744 extra_args.to_vec()
2745 };
2746
2747 let target = &cx.tcx().sess.target;
2748 let target_env_gnu_like = matches!(&target.env[..], "gnu" | "musl");
2749 let win_x64_gnu = target.os == "windows" && target.arch == "x86_64" && target.env == "gnu";
2750 let linux_s390x_gnu_like =
2751 target.os == "linux" && target.arch == "s390x" && target_env_gnu_like;
2752 let linux_sparc64_gnu_like =
2753 target.os == "linux" && target.arch == "sparc64" && target_env_gnu_like;
2754 let linux_powerpc_gnu_like =
2755 target.os == "linux" && target.arch == "powerpc" && target_env_gnu_like;
2756 let rust_abi = matches!(sig.abi, RustIntrinsic | PlatformIntrinsic | Rust | RustCall);
2757
2758 // Handle safe Rust thin and fat pointers.
2759 let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
2760 scalar: &Scalar,
2761 layout: TyAndLayout<'tcx>,
2762 offset: Size,
2763 is_return: bool| {
2764 // Booleans are always an i1 that needs to be zero-extended.
2765 if scalar.is_bool() {
2766 attrs.ext(ArgExtension::Zext);
2767 return;
2768 }
2769
2770 // Only pointer types handled below.
2771 if scalar.value != Pointer {
2772 return;
2773 }
2774
2775 if scalar.valid_range.start() < scalar.valid_range.end() {
2776 if *scalar.valid_range.start() > 0 {
2777 attrs.set(ArgAttribute::NonNull);
2778 }
2779 }
2780
2781 if let Some(pointee) = layout.pointee_info_at(cx, offset) {
2782 if let Some(kind) = pointee.safe {
2783 attrs.pointee_align = Some(pointee.align);
2784
2785 // `Box` (`UniqueBorrowed`) are not necessarily dereferenceable
2786 // for the entire duration of the function as they can be deallocated
2787 // at any time. Set their valid size to 0.
2788 attrs.pointee_size = match kind {
2789 PointerKind::UniqueOwned => Size::ZERO,
2790 _ => pointee.size,
2791 };
2792
2793 // `Box` pointer parameters never alias because ownership is transferred
2794 // `&mut` pointer parameters never alias other parameters,
2795 // or mutable global data
2796 //
2797 // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
2798 // and can be marked as both `readonly` and `noalias`, as
2799 // LLVM's definition of `noalias` is based solely on memory
2800 // dependencies rather than pointer equality
2801 //
2802 // Due to miscompiles in LLVM < 12, we apply a separate NoAliasMutRef attribute
2803 // for UniqueBorrowed arguments, so that the codegen backend can decide
2804 // whether or not to actually emit the attribute.
2805 let no_alias = match kind {
2806 PointerKind::Shared | PointerKind::UniqueBorrowed => false,
2807 PointerKind::UniqueOwned => true,
2808 PointerKind::Frozen => !is_return,
2809 };
2810 if no_alias {
2811 attrs.set(ArgAttribute::NoAlias);
2812 }
2813
2814 if kind == PointerKind::Frozen && !is_return {
2815 attrs.set(ArgAttribute::ReadOnly);
2816 }
2817
2818 if kind == PointerKind::UniqueBorrowed && !is_return {
2819 attrs.set(ArgAttribute::NoAliasMutRef);
2820 }
2821 }
2822 }
2823 };
2824
2825 let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| {
2826 let is_return = arg_idx.is_none();
2827
2828 let layout = cx.layout_of(ty);
2829 let layout = if force_thin_self_ptr && arg_idx == Some(0) {
2830 // Don't pass the vtable, it's not an argument of the virtual fn.
2831 // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
2832 // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
2833 make_thin_self_ptr(cx, layout)
2834 } else {
2835 layout
2836 };
2837
2838 let mut arg = ArgAbi::new(cx, layout, |layout, scalar, offset| {
2839 let mut attrs = ArgAttributes::new();
2840 adjust_for_rust_scalar(&mut attrs, scalar, *layout, offset, is_return);
2841 attrs
2842 });
2843
2844 if arg.layout.is_zst() {
2845 // For some forsaken reason, x86_64-pc-windows-gnu
2846 // doesn't ignore zero-sized struct arguments.
2847 // The same is true for {s390x,sparc64,powerpc}-unknown-linux-{gnu,musl}.
2848 if is_return
2849 || rust_abi
2850 || (!win_x64_gnu
2851 && !linux_s390x_gnu_like
2852 && !linux_sparc64_gnu_like
2853 && !linux_powerpc_gnu_like)
2854 {
2855 arg.mode = PassMode::Ignore;
2856 }
2857 }
2858
2859 arg
2860 };
2861
2862 let mut fn_abi = FnAbi {
2863 ret: arg_of(sig.output(), None),
2864 args: inputs
2865 .iter()
2866 .cloned()
2867 .chain(extra_args)
2868 .chain(caller_location)
2869 .enumerate()
2870 .map(|(i, ty)| arg_of(ty, Some(i)))
2871 .collect(),
2872 c_variadic: sig.c_variadic,
2873 fixed_count: inputs.len(),
2874 conv,
2875 can_unwind: fn_can_unwind(
2876 cx.tcx().sess.panic_strategy(),
2877 codegen_fn_attr_flags,
2878 conv,
2879 sig.abi,
2880 ),
2881 };
2882 fn_abi.adjust_for_abi(cx, sig.abi);
2883 debug!("FnAbi::new_internal = {:?}", fn_abi);
2884 fn_abi
2885 }
2886
2887 fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi) {
2888 if abi == SpecAbi::Unadjusted {
2889 return;
2890 }
2891
2892 if abi == SpecAbi::Rust
2893 || abi == SpecAbi::RustCall
2894 || abi == SpecAbi::RustIntrinsic
2895 || abi == SpecAbi::PlatformIntrinsic
2896 {
2897 let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>| {
2898 if arg.is_ignore() {
2899 return;
2900 }
2901
2902 match arg.layout.abi {
2903 Abi::Aggregate { .. } => {}
2904
2905 // This is a fun case! The gist of what this is doing is
2906 // that we want callers and callees to always agree on the
2907 // ABI of how they pass SIMD arguments. If we were to *not*
2908 // make these arguments indirect then they'd be immediates
2909 // in LLVM, which means that they'd used whatever the
2910 // appropriate ABI is for the callee and the caller. That
2911 // means, for example, if the caller doesn't have AVX
2912 // enabled but the callee does, then passing an AVX argument
2913 // across this boundary would cause corrupt data to show up.
2914 //
2915 // This problem is fixed by unconditionally passing SIMD
2916 // arguments through memory between callers and callees
2917 // which should get them all to agree on ABI regardless of
2918 // target feature sets. Some more information about this
2919 // issue can be found in #44367.
2920 //
2921 // Note that the platform intrinsic ABI is exempt here as
2922 // that's how we connect up to LLVM and it's unstable
2923 // anyway, we control all calls to it in libstd.
2924 Abi::Vector { .. }
2925 if abi != SpecAbi::PlatformIntrinsic
2926 && cx.tcx().sess.target.simd_types_indirect =>
2927 {
2928 arg.make_indirect();
2929 return;
2930 }
2931
2932 _ => return,
2933 }
2934
2935 // Pass and return structures up to 2 pointers in size by value, matching `ScalarPair`.
2936 // LLVM will usually pass these in 2 registers, which is more efficient than by-ref.
2937 let max_by_val_size = Pointer.size(cx) * 2;
2938 let size = arg.layout.size;
2939
2940 if arg.layout.is_unsized() || size > max_by_val_size {
2941 arg.make_indirect();
2942 } else {
2943 // We want to pass small aggregates as immediates, but using
2944 // a LLVM aggregate type for this leads to bad optimizations,
2945 // so we pick an appropriately sized integer type instead.
2946 arg.cast_to(Reg { kind: RegKind::Integer, size });
2947 }
2948 };
2949 fixup(&mut self.ret);
2950 for arg in &mut self.args {
2951 fixup(arg);
2952 }
2953 return;
2954 }
2955
2956 if let Err(msg) = self.adjust_for_cabi(cx, abi) {
2957 cx.tcx().sess.fatal(&msg);
2958 }
2959 }
2960 }
2961
2962 fn make_thin_self_ptr<'tcx, C>(cx: &C, mut layout: TyAndLayout<'tcx>) -> TyAndLayout<'tcx>
2963 where
2964 C: LayoutOf<Ty = Ty<'tcx>, TyAndLayout = TyAndLayout<'tcx>>
2965 + HasTyCtxt<'tcx>
2966 + HasParamEnv<'tcx>,
2967 {
2968 let fat_pointer_ty = if layout.is_unsized() {
2969 // unsized `self` is passed as a pointer to `self`
2970 // FIXME (mikeyhew) change this to use &own if it is ever added to the language
2971 cx.tcx().mk_mut_ptr(layout.ty)
2972 } else {
2973 match layout.abi {
2974 Abi::ScalarPair(..) => (),
2975 _ => bug!("receiver type has unsupported layout: {:?}", layout),
2976 }
2977
2978 // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
2979 // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
2980 // elsewhere in the compiler as a method on a `dyn Trait`.
2981 // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
2982 // get a built-in pointer type
2983 let mut fat_pointer_layout = layout;
2984 'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
2985 && !fat_pointer_layout.ty.is_region_ptr()
2986 {
2987 for i in 0..fat_pointer_layout.fields.count() {
2988 let field_layout = fat_pointer_layout.field(cx, i);
2989
2990 if !field_layout.is_zst() {
2991 fat_pointer_layout = field_layout;
2992 continue 'descend_newtypes;
2993 }
2994 }
2995
2996 bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
2997 }
2998
2999 fat_pointer_layout.ty
3000 };
3001
3002 // we now have a type like `*mut RcBox<dyn Trait>`
3003 // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
3004 // this is understood as a special case elsewhere in the compiler
3005 let unit_pointer_ty = cx.tcx().mk_mut_ptr(cx.tcx().mk_unit());
3006 layout = cx.layout_of(unit_pointer_ty);
3007 layout.ty = fat_pointer_ty;
3008 layout
3009 }