]> git.proxmox.com Git - rustc.git/blob - src/librustc/ty/layout.rs
New upstream version 1.25.0+dfsg1
[rustc.git] / src / librustc / ty / layout.rs
1 // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 pub use self::Integer::*;
12 pub use self::Primitive::*;
13
14 use session::{self, DataTypeKind, Session};
15 use ty::{self, Ty, TyCtxt, TypeFoldable, ReprOptions, ReprFlags};
16
17 use syntax::ast::{self, FloatTy, IntTy, UintTy};
18 use syntax::attr;
19 use syntax_pos::DUMMY_SP;
20
21 use std::cmp;
22 use std::fmt;
23 use std::i128;
24 use std::iter;
25 use std::mem;
26 use std::ops::{Add, Sub, Mul, AddAssign, Deref, RangeInclusive};
27
28 use ich::StableHashingContext;
29 use rustc_data_structures::stable_hasher::{HashStable, StableHasher,
30 StableHasherResult};
31
32 /// Parsed [Data layout](http://llvm.org/docs/LangRef.html#data-layout)
33 /// for a target, which contains everything needed to compute layouts.
34 pub struct TargetDataLayout {
35 pub endian: Endian,
36 pub i1_align: Align,
37 pub i8_align: Align,
38 pub i16_align: Align,
39 pub i32_align: Align,
40 pub i64_align: Align,
41 pub i128_align: Align,
42 pub f32_align: Align,
43 pub f64_align: Align,
44 pub pointer_size: Size,
45 pub pointer_align: Align,
46 pub aggregate_align: Align,
47
48 /// Alignments for vector types.
49 pub vector_align: Vec<(Size, Align)>
50 }
51
52 impl Default for TargetDataLayout {
53 /// Creates an instance of `TargetDataLayout`.
54 fn default() -> TargetDataLayout {
55 TargetDataLayout {
56 endian: Endian::Big,
57 i1_align: Align::from_bits(8, 8).unwrap(),
58 i8_align: Align::from_bits(8, 8).unwrap(),
59 i16_align: Align::from_bits(16, 16).unwrap(),
60 i32_align: Align::from_bits(32, 32).unwrap(),
61 i64_align: Align::from_bits(32, 64).unwrap(),
62 i128_align: Align::from_bits(32, 64).unwrap(),
63 f32_align: Align::from_bits(32, 32).unwrap(),
64 f64_align: Align::from_bits(64, 64).unwrap(),
65 pointer_size: Size::from_bits(64),
66 pointer_align: Align::from_bits(64, 64).unwrap(),
67 aggregate_align: Align::from_bits(0, 64).unwrap(),
68 vector_align: vec![
69 (Size::from_bits(64), Align::from_bits(64, 64).unwrap()),
70 (Size::from_bits(128), Align::from_bits(128, 128).unwrap())
71 ]
72 }
73 }
74 }
75
76 impl TargetDataLayout {
77 pub fn parse(sess: &Session) -> TargetDataLayout {
78 // Parse a bit count from a string.
79 let parse_bits = |s: &str, kind: &str, cause: &str| {
80 s.parse::<u64>().unwrap_or_else(|err| {
81 sess.err(&format!("invalid {} `{}` for `{}` in \"data-layout\": {}",
82 kind, s, cause, err));
83 0
84 })
85 };
86
87 // Parse a size string.
88 let size = |s: &str, cause: &str| {
89 Size::from_bits(parse_bits(s, "size", cause))
90 };
91
92 // Parse an alignment string.
93 let align = |s: &[&str], cause: &str| {
94 if s.is_empty() {
95 sess.err(&format!("missing alignment for `{}` in \"data-layout\"", cause));
96 }
97 let abi = parse_bits(s[0], "alignment", cause);
98 let pref = s.get(1).map_or(abi, |pref| parse_bits(pref, "alignment", cause));
99 Align::from_bits(abi, pref).unwrap_or_else(|err| {
100 sess.err(&format!("invalid alignment for `{}` in \"data-layout\": {}",
101 cause, err));
102 Align::from_bits(8, 8).unwrap()
103 })
104 };
105
106 let mut dl = TargetDataLayout::default();
107 let mut i128_align_src = 64;
108 for spec in sess.target.target.data_layout.split("-") {
109 match &spec.split(":").collect::<Vec<_>>()[..] {
110 &["e"] => dl.endian = Endian::Little,
111 &["E"] => dl.endian = Endian::Big,
112 &["a", ref a..] => dl.aggregate_align = align(a, "a"),
113 &["f32", ref a..] => dl.f32_align = align(a, "f32"),
114 &["f64", ref a..] => dl.f64_align = align(a, "f64"),
115 &[p @ "p", s, ref a..] | &[p @ "p0", s, ref a..] => {
116 dl.pointer_size = size(s, p);
117 dl.pointer_align = align(a, p);
118 }
119 &[s, ref a..] if s.starts_with("i") => {
120 let bits = match s[1..].parse::<u64>() {
121 Ok(bits) => bits,
122 Err(_) => {
123 size(&s[1..], "i"); // For the user error.
124 continue;
125 }
126 };
127 let a = align(a, s);
128 match bits {
129 1 => dl.i1_align = a,
130 8 => dl.i8_align = a,
131 16 => dl.i16_align = a,
132 32 => dl.i32_align = a,
133 64 => dl.i64_align = a,
134 _ => {}
135 }
136 if bits >= i128_align_src && bits <= 128 {
137 // Default alignment for i128 is decided by taking the alignment of
138 // largest-sized i{64...128}.
139 i128_align_src = bits;
140 dl.i128_align = a;
141 }
142 }
143 &[s, ref a..] if s.starts_with("v") => {
144 let v_size = size(&s[1..], "v");
145 let a = align(a, s);
146 if let Some(v) = dl.vector_align.iter_mut().find(|v| v.0 == v_size) {
147 v.1 = a;
148 continue;
149 }
150 // No existing entry, add a new one.
151 dl.vector_align.push((v_size, a));
152 }
153 _ => {} // Ignore everything else.
154 }
155 }
156
157 // Perform consistency checks against the Target information.
158 let endian_str = match dl.endian {
159 Endian::Little => "little",
160 Endian::Big => "big"
161 };
162 if endian_str != sess.target.target.target_endian {
163 sess.err(&format!("inconsistent target specification: \"data-layout\" claims \
164 architecture is {}-endian, while \"target-endian\" is `{}`",
165 endian_str, sess.target.target.target_endian));
166 }
167
168 if dl.pointer_size.bits().to_string() != sess.target.target.target_pointer_width {
169 sess.err(&format!("inconsistent target specification: \"data-layout\" claims \
170 pointers are {}-bit, while \"target-pointer-width\" is `{}`",
171 dl.pointer_size.bits(), sess.target.target.target_pointer_width));
172 }
173
174 dl
175 }
176
177 /// Return exclusive upper bound on object size.
178 ///
179 /// The theoretical maximum object size is defined as the maximum positive `isize` value.
180 /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly
181 /// index every address within an object along with one byte past the end, along with allowing
182 /// `isize` to store the difference between any two pointers into an object.
183 ///
184 /// The upper bound on 64-bit currently needs to be lower because LLVM uses a 64-bit integer
185 /// to represent object size in bits. It would need to be 1 << 61 to account for this, but is
186 /// currently conservatively bounded to 1 << 47 as that is enough to cover the current usable
187 /// address space on 64-bit ARMv8 and x86_64.
188 pub fn obj_size_bound(&self) -> u64 {
189 match self.pointer_size.bits() {
190 16 => 1 << 15,
191 32 => 1 << 31,
192 64 => 1 << 47,
193 bits => bug!("obj_size_bound: unknown pointer bit size {}", bits)
194 }
195 }
196
197 pub fn ptr_sized_integer(&self) -> Integer {
198 match self.pointer_size.bits() {
199 16 => I16,
200 32 => I32,
201 64 => I64,
202 bits => bug!("ptr_sized_integer: unknown pointer bit size {}", bits)
203 }
204 }
205
206 pub fn vector_align(&self, vec_size: Size) -> Align {
207 for &(size, align) in &self.vector_align {
208 if size == vec_size {
209 return align;
210 }
211 }
212 // Default to natural alignment, which is what LLVM does.
213 // That is, use the size, rounded up to a power of 2.
214 let align = vec_size.bytes().next_power_of_two();
215 Align::from_bytes(align, align).unwrap()
216 }
217 }
218
219 pub trait HasDataLayout: Copy {
220 fn data_layout(&self) -> &TargetDataLayout;
221 }
222
223 impl<'a> HasDataLayout for &'a TargetDataLayout {
224 fn data_layout(&self) -> &TargetDataLayout {
225 self
226 }
227 }
228
229 /// Endianness of the target, which must match cfg(target-endian).
230 #[derive(Copy, Clone)]
231 pub enum Endian {
232 Little,
233 Big
234 }
235
236 /// Size of a type in bytes.
237 #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
238 pub struct Size {
239 raw: u64
240 }
241
242 impl Size {
243 pub fn from_bits(bits: u64) -> Size {
244 // Avoid potential overflow from `bits + 7`.
245 Size::from_bytes(bits / 8 + ((bits % 8) + 7) / 8)
246 }
247
248 pub fn from_bytes(bytes: u64) -> Size {
249 if bytes >= (1 << 61) {
250 bug!("Size::from_bytes: {} bytes in bits doesn't fit in u64", bytes)
251 }
252 Size {
253 raw: bytes
254 }
255 }
256
257 pub fn bytes(self) -> u64 {
258 self.raw
259 }
260
261 pub fn bits(self) -> u64 {
262 self.bytes() * 8
263 }
264
265 pub fn abi_align(self, align: Align) -> Size {
266 let mask = align.abi() - 1;
267 Size::from_bytes((self.bytes() + mask) & !mask)
268 }
269
270 pub fn is_abi_aligned(self, align: Align) -> bool {
271 let mask = align.abi() - 1;
272 self.bytes() & mask == 0
273 }
274
275 pub fn checked_add<C: HasDataLayout>(self, offset: Size, cx: C) -> Option<Size> {
276 let dl = cx.data_layout();
277
278 // Each Size is less than dl.obj_size_bound(), so the sum is
279 // also less than 1 << 62 (and therefore can't overflow).
280 let bytes = self.bytes() + offset.bytes();
281
282 if bytes < dl.obj_size_bound() {
283 Some(Size::from_bytes(bytes))
284 } else {
285 None
286 }
287 }
288
289 pub fn checked_mul<C: HasDataLayout>(self, count: u64, cx: C) -> Option<Size> {
290 let dl = cx.data_layout();
291
292 match self.bytes().checked_mul(count) {
293 Some(bytes) if bytes < dl.obj_size_bound() => {
294 Some(Size::from_bytes(bytes))
295 }
296 _ => None
297 }
298 }
299 }
300
301 // Panicking addition, subtraction and multiplication for convenience.
302 // Avoid during layout computation, return `LayoutError` instead.
303
304 impl Add for Size {
305 type Output = Size;
306 fn add(self, other: Size) -> Size {
307 // Each Size is less than 1 << 61, so the sum is
308 // less than 1 << 62 (and therefore can't overflow).
309 Size::from_bytes(self.bytes() + other.bytes())
310 }
311 }
312
313 impl Sub for Size {
314 type Output = Size;
315 fn sub(self, other: Size) -> Size {
316 // Each Size is less than 1 << 61, so an underflow
317 // would result in a value larger than 1 << 61,
318 // which Size::from_bytes will catch for us.
319 Size::from_bytes(self.bytes() - other.bytes())
320 }
321 }
322
323 impl Mul<u64> for Size {
324 type Output = Size;
325 fn mul(self, count: u64) -> Size {
326 match self.bytes().checked_mul(count) {
327 Some(bytes) => Size::from_bytes(bytes),
328 None => {
329 bug!("Size::mul: {} * {} doesn't fit in u64", self.bytes(), count)
330 }
331 }
332 }
333 }
334
335 impl AddAssign for Size {
336 fn add_assign(&mut self, other: Size) {
337 *self = *self + other;
338 }
339 }
340
341 /// Alignment of a type in bytes, both ABI-mandated and preferred.
342 /// Each field is a power of two, giving the alignment a maximum
343 /// value of 2<sup>(2<sup>8</sup> - 1)</sup>, which is limited by LLVM to a i32, with
344 /// a maximum capacity of 2<sup>31</sup> - 1 or 2147483647.
345 #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
346 pub struct Align {
347 abi: u8,
348 pref: u8,
349 }
350
351 impl Align {
352 pub fn from_bits(abi: u64, pref: u64) -> Result<Align, String> {
353 Align::from_bytes(Size::from_bits(abi).bytes(),
354 Size::from_bits(pref).bytes())
355 }
356
357 pub fn from_bytes(abi: u64, pref: u64) -> Result<Align, String> {
358 let log2 = |align: u64| {
359 // Treat an alignment of 0 bytes like 1-byte alignment.
360 if align == 0 {
361 return Ok(0);
362 }
363
364 let mut bytes = align;
365 let mut pow: u8 = 0;
366 while (bytes & 1) == 0 {
367 pow += 1;
368 bytes >>= 1;
369 }
370 if bytes != 1 {
371 Err(format!("`{}` is not a power of 2", align))
372 } else if pow > 30 {
373 Err(format!("`{}` is too large", align))
374 } else {
375 Ok(pow)
376 }
377 };
378
379 Ok(Align {
380 abi: log2(abi)?,
381 pref: log2(pref)?,
382 })
383 }
384
385 pub fn abi(self) -> u64 {
386 1 << self.abi
387 }
388
389 pub fn pref(self) -> u64 {
390 1 << self.pref
391 }
392
393 pub fn abi_bits(self) -> u64 {
394 self.abi() * 8
395 }
396
397 pub fn pref_bits(self) -> u64 {
398 self.pref() * 8
399 }
400
401 pub fn min(self, other: Align) -> Align {
402 Align {
403 abi: cmp::min(self.abi, other.abi),
404 pref: cmp::min(self.pref, other.pref),
405 }
406 }
407
408 pub fn max(self, other: Align) -> Align {
409 Align {
410 abi: cmp::max(self.abi, other.abi),
411 pref: cmp::max(self.pref, other.pref),
412 }
413 }
414 }
415
416 /// Integers, also used for enum discriminants.
417 #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
418 pub enum Integer {
419 I8,
420 I16,
421 I32,
422 I64,
423 I128,
424 }
425
426 impl<'a, 'tcx> Integer {
427 pub fn size(&self) -> Size {
428 match *self {
429 I8 => Size::from_bytes(1),
430 I16 => Size::from_bytes(2),
431 I32 => Size::from_bytes(4),
432 I64 => Size::from_bytes(8),
433 I128 => Size::from_bytes(16),
434 }
435 }
436
437 pub fn align<C: HasDataLayout>(&self, cx: C) -> Align {
438 let dl = cx.data_layout();
439
440 match *self {
441 I8 => dl.i8_align,
442 I16 => dl.i16_align,
443 I32 => dl.i32_align,
444 I64 => dl.i64_align,
445 I128 => dl.i128_align,
446 }
447 }
448
449 pub fn to_ty(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx> {
450 match (*self, signed) {
451 (I8, false) => tcx.types.u8,
452 (I16, false) => tcx.types.u16,
453 (I32, false) => tcx.types.u32,
454 (I64, false) => tcx.types.u64,
455 (I128, false) => tcx.types.u128,
456 (I8, true) => tcx.types.i8,
457 (I16, true) => tcx.types.i16,
458 (I32, true) => tcx.types.i32,
459 (I64, true) => tcx.types.i64,
460 (I128, true) => tcx.types.i128,
461 }
462 }
463
464 /// Find the smallest Integer type which can represent the signed value.
465 pub fn fit_signed(x: i128) -> Integer {
466 match x {
467 -0x0000_0000_0000_0080...0x0000_0000_0000_007f => I8,
468 -0x0000_0000_0000_8000...0x0000_0000_0000_7fff => I16,
469 -0x0000_0000_8000_0000...0x0000_0000_7fff_ffff => I32,
470 -0x8000_0000_0000_0000...0x7fff_ffff_ffff_ffff => I64,
471 _ => I128
472 }
473 }
474
475 /// Find the smallest Integer type which can represent the unsigned value.
476 pub fn fit_unsigned(x: u128) -> Integer {
477 match x {
478 0...0x0000_0000_0000_00ff => I8,
479 0...0x0000_0000_0000_ffff => I16,
480 0...0x0000_0000_ffff_ffff => I32,
481 0...0xffff_ffff_ffff_ffff => I64,
482 _ => I128,
483 }
484 }
485
486 /// Find the smallest integer with the given alignment.
487 pub fn for_abi_align<C: HasDataLayout>(cx: C, align: Align) -> Option<Integer> {
488 let dl = cx.data_layout();
489
490 let wanted = align.abi();
491 for &candidate in &[I8, I16, I32, I64, I128] {
492 if wanted == candidate.align(dl).abi() && wanted == candidate.size().bytes() {
493 return Some(candidate);
494 }
495 }
496 None
497 }
498
499 /// Find the largest integer with the given alignment or less.
500 pub fn approximate_abi_align<C: HasDataLayout>(cx: C, align: Align) -> Integer {
501 let dl = cx.data_layout();
502
503 let wanted = align.abi();
504 // FIXME(eddyb) maybe include I128 in the future, when it works everywhere.
505 for &candidate in &[I64, I32, I16] {
506 if wanted >= candidate.align(dl).abi() && wanted >= candidate.size().bytes() {
507 return candidate;
508 }
509 }
510 I8
511 }
512
513 /// Get the Integer type from an attr::IntType.
514 pub fn from_attr<C: HasDataLayout>(cx: C, ity: attr::IntType) -> Integer {
515 let dl = cx.data_layout();
516
517 match ity {
518 attr::SignedInt(IntTy::I8) | attr::UnsignedInt(UintTy::U8) => I8,
519 attr::SignedInt(IntTy::I16) | attr::UnsignedInt(UintTy::U16) => I16,
520 attr::SignedInt(IntTy::I32) | attr::UnsignedInt(UintTy::U32) => I32,
521 attr::SignedInt(IntTy::I64) | attr::UnsignedInt(UintTy::U64) => I64,
522 attr::SignedInt(IntTy::I128) | attr::UnsignedInt(UintTy::U128) => I128,
523 attr::SignedInt(IntTy::Isize) | attr::UnsignedInt(UintTy::Usize) => {
524 dl.ptr_sized_integer()
525 }
526 }
527 }
528
529 /// Find the appropriate Integer type and signedness for the given
530 /// signed discriminant range and #[repr] attribute.
531 /// N.B.: u128 values above i128::MAX will be treated as signed, but
532 /// that shouldn't affect anything, other than maybe debuginfo.
533 fn repr_discr(tcx: TyCtxt<'a, 'tcx, 'tcx>,
534 ty: Ty<'tcx>,
535 repr: &ReprOptions,
536 min: i128,
537 max: i128)
538 -> (Integer, bool) {
539 // Theoretically, negative values could be larger in unsigned representation
540 // than the unsigned representation of the signed minimum. However, if there
541 // are any negative values, the only valid unsigned representation is u128
542 // which can fit all i128 values, so the result remains unaffected.
543 let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
544 let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
545
546 let mut min_from_extern = None;
547 let min_default = I8;
548
549 if let Some(ity) = repr.int {
550 let discr = Integer::from_attr(tcx, ity);
551 let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
552 if discr < fit {
553 bug!("Integer::repr_discr: `#[repr]` hint too small for \
554 discriminant range of enum `{}", ty)
555 }
556 return (discr, ity.is_signed());
557 }
558
559 if repr.c() {
560 match &tcx.sess.target.target.arch[..] {
561 // WARNING: the ARM EABI has two variants; the one corresponding
562 // to `at_least == I32` appears to be used on Linux and NetBSD,
563 // but some systems may use the variant corresponding to no
564 // lower bound. However, we don't run on those yet...?
565 "arm" => min_from_extern = Some(I32),
566 _ => min_from_extern = Some(I32),
567 }
568 }
569
570 let at_least = min_from_extern.unwrap_or(min_default);
571
572 // If there are no negative values, we can use the unsigned fit.
573 if min >= 0 {
574 (cmp::max(unsigned_fit, at_least), false)
575 } else {
576 (cmp::max(signed_fit, at_least), true)
577 }
578 }
579 }
580
581 /// Fundamental unit of memory access and layout.
582 #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
583 pub enum Primitive {
584 /// The `bool` is the signedness of the `Integer` type.
585 ///
586 /// One would think we would not care about such details this low down,
587 /// but some ABIs are described in terms of C types and ISAs where the
588 /// integer arithmetic is done on {sign,zero}-extended registers, e.g.
589 /// a negative integer passed by zero-extension will appear positive in
590 /// the callee, and most operations on it will produce the wrong values.
591 Int(Integer, bool),
592 F32,
593 F64,
594 Pointer
595 }
596
597 impl<'a, 'tcx> Primitive {
598 pub fn size<C: HasDataLayout>(self, cx: C) -> Size {
599 let dl = cx.data_layout();
600
601 match self {
602 Int(i, _) => i.size(),
603 F32 => Size::from_bits(32),
604 F64 => Size::from_bits(64),
605 Pointer => dl.pointer_size
606 }
607 }
608
609 pub fn align<C: HasDataLayout>(self, cx: C) -> Align {
610 let dl = cx.data_layout();
611
612 match self {
613 Int(i, _) => i.align(dl),
614 F32 => dl.f32_align,
615 F64 => dl.f64_align,
616 Pointer => dl.pointer_align
617 }
618 }
619
620 pub fn to_ty(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx> {
621 match *self {
622 Int(i, signed) => i.to_ty(tcx, signed),
623 F32 => tcx.types.f32,
624 F64 => tcx.types.f64,
625 Pointer => tcx.mk_mut_ptr(tcx.mk_nil()),
626 }
627 }
628 }
629
630 /// Information about one scalar component of a Rust type.
631 #[derive(Clone, PartialEq, Eq, Hash, Debug)]
632 pub struct Scalar {
633 pub value: Primitive,
634
635 /// Inclusive wrap-around range of valid values, that is, if
636 /// min > max, it represents min..=u128::MAX followed by 0..=max.
637 // FIXME(eddyb) always use the shortest range, e.g. by finding
638 // the largest space between two consecutive valid values and
639 // taking everything else as the (shortest) valid range.
640 pub valid_range: RangeInclusive<u128>,
641 }
642
643 impl Scalar {
644 pub fn is_bool(&self) -> bool {
645 if let Int(I8, _) = self.value {
646 self.valid_range == (0..=1)
647 } else {
648 false
649 }
650 }
651 }
652
653 /// The first half of a fat pointer.
654 ///
655 /// - For a trait object, this is the address of the box.
656 /// - For a slice, this is the base address.
657 pub const FAT_PTR_ADDR: usize = 0;
658
659 /// The second half of a fat pointer.
660 ///
661 /// - For a trait object, this is the address of the vtable.
662 /// - For a slice, this is the length.
663 pub const FAT_PTR_EXTRA: usize = 1;
664
665 /// Describes how the fields of a type are located in memory.
666 #[derive(PartialEq, Eq, Hash, Debug)]
667 pub enum FieldPlacement {
668 /// All fields start at no offset. The `usize` is the field count.
669 Union(usize),
670
671 /// Array/vector-like placement, with all fields of identical types.
672 Array {
673 stride: Size,
674 count: u64
675 },
676
677 /// Struct-like placement, with precomputed offsets.
678 ///
679 /// Fields are guaranteed to not overlap, but note that gaps
680 /// before, between and after all the fields are NOT always
681 /// padding, and as such their contents may not be discarded.
682 /// For example, enum variants leave a gap at the start,
683 /// where the discriminant field in the enum layout goes.
684 Arbitrary {
685 /// Offsets for the first byte of each field,
686 /// ordered to match the source definition order.
687 /// This vector does not go in increasing order.
688 // FIXME(eddyb) use small vector optimization for the common case.
689 offsets: Vec<Size>,
690
691 /// Maps source order field indices to memory order indices,
692 /// depending how fields were permuted.
693 // FIXME(camlorn) also consider small vector optimization here.
694 memory_index: Vec<u32>
695 }
696 }
697
698 impl FieldPlacement {
699 pub fn count(&self) -> usize {
700 match *self {
701 FieldPlacement::Union(count) => count,
702 FieldPlacement::Array { count, .. } => {
703 let usize_count = count as usize;
704 assert_eq!(usize_count as u64, count);
705 usize_count
706 }
707 FieldPlacement::Arbitrary { ref offsets, .. } => offsets.len()
708 }
709 }
710
711 pub fn offset(&self, i: usize) -> Size {
712 match *self {
713 FieldPlacement::Union(_) => Size::from_bytes(0),
714 FieldPlacement::Array { stride, count } => {
715 let i = i as u64;
716 assert!(i < count);
717 stride * i
718 }
719 FieldPlacement::Arbitrary { ref offsets, .. } => offsets[i]
720 }
721 }
722
723 pub fn memory_index(&self, i: usize) -> usize {
724 match *self {
725 FieldPlacement::Union(_) |
726 FieldPlacement::Array { .. } => i,
727 FieldPlacement::Arbitrary { ref memory_index, .. } => {
728 let r = memory_index[i];
729 assert_eq!(r as usize as u32, r);
730 r as usize
731 }
732 }
733 }
734
735 /// Get source indices of the fields by increasing offsets.
736 #[inline]
737 pub fn index_by_increasing_offset<'a>(&'a self) -> impl iter::Iterator<Item=usize>+'a {
738 let mut inverse_small = [0u8; 64];
739 let mut inverse_big = vec![];
740 let use_small = self.count() <= inverse_small.len();
741
742 // We have to write this logic twice in order to keep the array small.
743 if let FieldPlacement::Arbitrary { ref memory_index, .. } = *self {
744 if use_small {
745 for i in 0..self.count() {
746 inverse_small[memory_index[i] as usize] = i as u8;
747 }
748 } else {
749 inverse_big = vec![0; self.count()];
750 for i in 0..self.count() {
751 inverse_big[memory_index[i] as usize] = i as u32;
752 }
753 }
754 }
755
756 (0..self.count()).map(move |i| {
757 match *self {
758 FieldPlacement::Union(_) |
759 FieldPlacement::Array { .. } => i,
760 FieldPlacement::Arbitrary { .. } => {
761 if use_small { inverse_small[i] as usize }
762 else { inverse_big[i] as usize }
763 }
764 }
765 })
766 }
767 }
768
769 /// Describes how values of the type are passed by target ABIs,
770 /// in terms of categories of C types there are ABI rules for.
771 #[derive(Clone, PartialEq, Eq, Hash, Debug)]
772 pub enum Abi {
773 Uninhabited,
774 Scalar(Scalar),
775 ScalarPair(Scalar, Scalar),
776 Vector {
777 element: Scalar,
778 count: u64
779 },
780 Aggregate {
781 /// If true, the size is exact, otherwise it's only a lower bound.
782 sized: bool,
783 }
784 }
785
786 impl Abi {
787 /// Returns true if the layout corresponds to an unsized type.
788 pub fn is_unsized(&self) -> bool {
789 match *self {
790 Abi::Uninhabited |
791 Abi::Scalar(_) |
792 Abi::ScalarPair(..) |
793 Abi::Vector { .. } => false,
794 Abi::Aggregate { sized } => !sized
795 }
796 }
797 }
798
799 #[derive(PartialEq, Eq, Hash, Debug)]
800 pub enum Variants {
801 /// Single enum variants, structs/tuples, unions, and all non-ADTs.
802 Single {
803 index: usize
804 },
805
806 /// General-case enums: for each case there is a struct, and they all have
807 /// all space reserved for the discriminant, and their first field starts
808 /// at a non-0 offset, after where the discriminant would go.
809 Tagged {
810 discr: Scalar,
811 variants: Vec<LayoutDetails>,
812 },
813
814 /// Multiple cases distinguished by a niche (values invalid for a type):
815 /// the variant `dataful_variant` contains a niche at an arbitrary
816 /// offset (field 0 of the enum), which for a variant with discriminant
817 /// `d` is set to `(d - niche_variants.start).wrapping_add(niche_start)`.
818 ///
819 /// For example, `Option<(usize, &T)>` is represented such that
820 /// `None` has a null pointer for the second tuple field, and
821 /// `Some` is the identity function (with a non-null reference).
822 NicheFilling {
823 dataful_variant: usize,
824 niche_variants: RangeInclusive<usize>,
825 niche: Scalar,
826 niche_start: u128,
827 variants: Vec<LayoutDetails>,
828 }
829 }
830
831 #[derive(Copy, Clone, Debug)]
832 pub enum LayoutError<'tcx> {
833 Unknown(Ty<'tcx>),
834 SizeOverflow(Ty<'tcx>)
835 }
836
837 impl<'tcx> fmt::Display for LayoutError<'tcx> {
838 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
839 match *self {
840 LayoutError::Unknown(ty) => {
841 write!(f, "the type `{:?}` has an unknown layout", ty)
842 }
843 LayoutError::SizeOverflow(ty) => {
844 write!(f, "the type `{:?}` is too big for the current architecture", ty)
845 }
846 }
847 }
848 }
849
850 #[derive(PartialEq, Eq, Hash, Debug)]
851 pub struct LayoutDetails {
852 pub variants: Variants,
853 pub fields: FieldPlacement,
854 pub abi: Abi,
855 pub align: Align,
856 pub size: Size
857 }
858
859 impl LayoutDetails {
860 fn scalar<C: HasDataLayout>(cx: C, scalar: Scalar) -> Self {
861 let size = scalar.value.size(cx);
862 let align = scalar.value.align(cx);
863 LayoutDetails {
864 variants: Variants::Single { index: 0 },
865 fields: FieldPlacement::Union(0),
866 abi: Abi::Scalar(scalar),
867 size,
868 align,
869 }
870 }
871
872 fn uninhabited(field_count: usize) -> Self {
873 let align = Align::from_bytes(1, 1).unwrap();
874 LayoutDetails {
875 variants: Variants::Single { index: 0 },
876 fields: FieldPlacement::Union(field_count),
877 abi: Abi::Uninhabited,
878 align,
879 size: Size::from_bytes(0)
880 }
881 }
882 }
883
884 fn layout_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
885 query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
886 -> Result<&'tcx LayoutDetails, LayoutError<'tcx>>
887 {
888 let (param_env, ty) = query.into_parts();
889
890 let rec_limit = tcx.sess.recursion_limit.get();
891 let depth = tcx.layout_depth.get();
892 if depth > rec_limit {
893 tcx.sess.fatal(
894 &format!("overflow representing the type `{}`", ty));
895 }
896
897 tcx.layout_depth.set(depth+1);
898 let cx = LayoutCx { tcx, param_env };
899 let layout = cx.layout_raw_uncached(ty);
900 tcx.layout_depth.set(depth);
901
902 layout
903 }
904
905 pub fn provide(providers: &mut ty::maps::Providers) {
906 *providers = ty::maps::Providers {
907 layout_raw,
908 ..*providers
909 };
910 }
911
912 #[derive(Copy, Clone)]
913 pub struct LayoutCx<'tcx, C> {
914 pub tcx: C,
915 pub param_env: ty::ParamEnv<'tcx>
916 }
917
918 impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
919 fn layout_raw_uncached(self, ty: Ty<'tcx>)
920 -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> {
921 let tcx = self.tcx;
922 let param_env = self.param_env;
923 let dl = self.data_layout();
924 let scalar_unit = |value: Primitive| {
925 let bits = value.size(dl).bits();
926 assert!(bits <= 128);
927 Scalar {
928 value,
929 valid_range: 0..=(!0 >> (128 - bits))
930 }
931 };
932 let scalar = |value: Primitive| {
933 tcx.intern_layout(LayoutDetails::scalar(self, scalar_unit(value)))
934 };
935 let scalar_pair = |a: Scalar, b: Scalar| {
936 let align = a.value.align(dl).max(b.value.align(dl)).max(dl.aggregate_align);
937 let b_offset = a.value.size(dl).abi_align(b.value.align(dl));
938 let size = (b_offset + b.value.size(dl)).abi_align(align);
939 LayoutDetails {
940 variants: Variants::Single { index: 0 },
941 fields: FieldPlacement::Arbitrary {
942 offsets: vec![Size::from_bytes(0), b_offset],
943 memory_index: vec![0, 1]
944 },
945 abi: Abi::ScalarPair(a, b),
946 align,
947 size
948 }
949 };
950
951 #[derive(Copy, Clone, Debug)]
952 enum StructKind {
953 /// A tuple, closure, or univariant which cannot be coerced to unsized.
954 AlwaysSized,
955 /// A univariant, the last field of which may be coerced to unsized.
956 MaybeUnsized,
957 /// A univariant, but with a prefix of an arbitrary size & alignment (e.g. enum tag).
958 Prefixed(Size, Align),
959 }
960 let univariant_uninterned = |fields: &[TyLayout], repr: &ReprOptions, kind| {
961 let packed = repr.packed();
962 if packed && repr.align > 0 {
963 bug!("struct cannot be packed and aligned");
964 }
965
966 let mut align = if packed {
967 dl.i8_align
968 } else {
969 dl.aggregate_align
970 };
971
972 let mut sized = true;
973 let mut offsets = vec![Size::from_bytes(0); fields.len()];
974 let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
975
976 // Anything with repr(C) or repr(packed) doesn't optimize.
977 let mut optimize = (repr.flags & ReprFlags::IS_UNOPTIMISABLE).is_empty();
978 if let StructKind::Prefixed(_, align) = kind {
979 optimize &= align.abi() == 1;
980 }
981
982 if optimize {
983 let end = if let StructKind::MaybeUnsized = kind {
984 fields.len() - 1
985 } else {
986 fields.len()
987 };
988 let optimizing = &mut inverse_memory_index[..end];
989 match kind {
990 StructKind::AlwaysSized |
991 StructKind::MaybeUnsized => {
992 optimizing.sort_by_key(|&x| {
993 // Place ZSTs first to avoid "interesting offsets",
994 // especially with only one or two non-ZST fields.
995 let f = &fields[x as usize];
996 (!f.is_zst(), cmp::Reverse(f.align.abi()))
997 })
998 }
999 StructKind::Prefixed(..) => {
1000 optimizing.sort_by_key(|&x| fields[x as usize].align.abi());
1001 }
1002 }
1003 }
1004
1005 // inverse_memory_index holds field indices by increasing memory offset.
1006 // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
1007 // We now write field offsets to the corresponding offset slot;
1008 // field 5 with offset 0 puts 0 in offsets[5].
1009 // At the bottom of this function, we use inverse_memory_index to produce memory_index.
1010
1011 let mut offset = Size::from_bytes(0);
1012
1013 if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
1014 if !packed {
1015 align = align.max(prefix_align);
1016 }
1017 offset = prefix_size.abi_align(prefix_align);
1018 }
1019
1020 for &i in &inverse_memory_index {
1021 let field = fields[i as usize];
1022 if !sized {
1023 bug!("univariant: field #{} of `{}` comes after unsized field",
1024 offsets.len(), ty);
1025 }
1026
1027 if field.abi == Abi::Uninhabited {
1028 return Ok(LayoutDetails::uninhabited(fields.len()));
1029 }
1030
1031 if field.is_unsized() {
1032 sized = false;
1033 }
1034
1035 // Invariant: offset < dl.obj_size_bound() <= 1<<61
1036 if !packed {
1037 offset = offset.abi_align(field.align);
1038 align = align.max(field.align);
1039 }
1040
1041 debug!("univariant offset: {:?} field: {:#?}", offset, field);
1042 offsets[i as usize] = offset;
1043
1044 offset = offset.checked_add(field.size, dl)
1045 .ok_or(LayoutError::SizeOverflow(ty))?;
1046 }
1047
1048 if repr.align > 0 {
1049 let repr_align = repr.align as u64;
1050 align = align.max(Align::from_bytes(repr_align, repr_align).unwrap());
1051 debug!("univariant repr_align: {:?}", repr_align);
1052 }
1053
1054 debug!("univariant min_size: {:?}", offset);
1055 let min_size = offset;
1056
1057 // As stated above, inverse_memory_index holds field indices by increasing offset.
1058 // This makes it an already-sorted view of the offsets vec.
1059 // To invert it, consider:
1060 // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
1061 // Field 5 would be the first element, so memory_index is i:
1062 // Note: if we didn't optimize, it's already right.
1063
1064 let mut memory_index;
1065 if optimize {
1066 memory_index = vec![0; inverse_memory_index.len()];
1067
1068 for i in 0..inverse_memory_index.len() {
1069 memory_index[inverse_memory_index[i] as usize] = i as u32;
1070 }
1071 } else {
1072 memory_index = inverse_memory_index;
1073 }
1074
1075 let size = min_size.abi_align(align);
1076 let mut abi = Abi::Aggregate { sized };
1077
1078 // Unpack newtype ABIs and find scalar pairs.
1079 if sized && size.bytes() > 0 {
1080 // All other fields must be ZSTs, and we need them to all start at 0.
1081 let mut zst_offsets =
1082 offsets.iter().enumerate().filter(|&(i, _)| fields[i].is_zst());
1083 if zst_offsets.all(|(_, o)| o.bytes() == 0) {
1084 let mut non_zst_fields =
1085 fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
1086
1087 match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
1088 // We have exactly one non-ZST field.
1089 (Some((i, field)), None, None) => {
1090 // Field fills the struct and it has a scalar or scalar pair ABI.
1091 if offsets[i].bytes() == 0 &&
1092 align.abi() == field.align.abi() &&
1093 size == field.size {
1094 match field.abi {
1095 // For plain scalars, or vectors of them, we can't unpack
1096 // newtypes for `#[repr(C)]`, as that affects C ABIs.
1097 Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
1098 abi = field.abi.clone();
1099 }
1100 // But scalar pairs are Rust-specific and get
1101 // treated as aggregates by C ABIs anyway.
1102 Abi::ScalarPair(..) => {
1103 abi = field.abi.clone();
1104 }
1105 _ => {}
1106 }
1107 }
1108 }
1109
1110 // Two non-ZST fields, and they're both scalars.
1111 (Some((i, &TyLayout {
1112 details: &LayoutDetails { abi: Abi::Scalar(ref a), .. }, ..
1113 })), Some((j, &TyLayout {
1114 details: &LayoutDetails { abi: Abi::Scalar(ref b), .. }, ..
1115 })), None) => {
1116 // Order by the memory placement, not source order.
1117 let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
1118 ((i, a), (j, b))
1119 } else {
1120 ((j, b), (i, a))
1121 };
1122 let pair = scalar_pair(a.clone(), b.clone());
1123 let pair_offsets = match pair.fields {
1124 FieldPlacement::Arbitrary {
1125 ref offsets,
1126 ref memory_index
1127 } => {
1128 assert_eq!(memory_index, &[0, 1]);
1129 offsets
1130 }
1131 _ => bug!()
1132 };
1133 if offsets[i] == pair_offsets[0] &&
1134 offsets[j] == pair_offsets[1] &&
1135 align == pair.align &&
1136 size == pair.size {
1137 // We can use `ScalarPair` only when it matches our
1138 // already computed layout (including `#[repr(C)]`).
1139 abi = pair.abi;
1140 }
1141 }
1142
1143 _ => {}
1144 }
1145 }
1146 }
1147
1148 Ok(LayoutDetails {
1149 variants: Variants::Single { index: 0 },
1150 fields: FieldPlacement::Arbitrary {
1151 offsets,
1152 memory_index
1153 },
1154 abi,
1155 align,
1156 size
1157 })
1158 };
1159 let univariant = |fields: &[TyLayout], repr: &ReprOptions, kind| {
1160 Ok(tcx.intern_layout(univariant_uninterned(fields, repr, kind)?))
1161 };
1162 assert!(!ty.has_infer_types());
1163
1164 Ok(match ty.sty {
1165 // Basic scalars.
1166 ty::TyBool => {
1167 tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
1168 value: Int(I8, false),
1169 valid_range: 0..=1
1170 }))
1171 }
1172 ty::TyChar => {
1173 tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
1174 value: Int(I32, false),
1175 valid_range: 0..=0x10FFFF
1176 }))
1177 }
1178 ty::TyInt(ity) => {
1179 scalar(Int(Integer::from_attr(dl, attr::SignedInt(ity)), true))
1180 }
1181 ty::TyUint(ity) => {
1182 scalar(Int(Integer::from_attr(dl, attr::UnsignedInt(ity)), false))
1183 }
1184 ty::TyFloat(FloatTy::F32) => scalar(F32),
1185 ty::TyFloat(FloatTy::F64) => scalar(F64),
1186 ty::TyFnPtr(_) => {
1187 let mut ptr = scalar_unit(Pointer);
1188 ptr.valid_range.start = 1;
1189 tcx.intern_layout(LayoutDetails::scalar(self, ptr))
1190 }
1191
1192 // The never type.
1193 ty::TyNever => {
1194 tcx.intern_layout(LayoutDetails::uninhabited(0))
1195 }
1196
1197 // Potentially-fat pointers.
1198 ty::TyRef(_, ty::TypeAndMut { ty: pointee, .. }) |
1199 ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1200 let mut data_ptr = scalar_unit(Pointer);
1201 if !ty.is_unsafe_ptr() {
1202 data_ptr.valid_range.start = 1;
1203 }
1204
1205 let pointee = tcx.normalize_associated_type_in_env(&pointee, param_env);
1206 if pointee.is_sized(tcx, param_env, DUMMY_SP) {
1207 return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
1208 }
1209
1210 let unsized_part = tcx.struct_tail(pointee);
1211 let metadata = match unsized_part.sty {
1212 ty::TyForeign(..) => {
1213 return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
1214 }
1215 ty::TySlice(_) | ty::TyStr => {
1216 scalar_unit(Int(dl.ptr_sized_integer(), false))
1217 }
1218 ty::TyDynamic(..) => {
1219 let mut vtable = scalar_unit(Pointer);
1220 vtable.valid_range.start = 1;
1221 vtable
1222 }
1223 _ => return Err(LayoutError::Unknown(unsized_part))
1224 };
1225
1226 // Effectively a (ptr, meta) tuple.
1227 tcx.intern_layout(scalar_pair(data_ptr, metadata))
1228 }
1229
1230 // Arrays and slices.
1231 ty::TyArray(element, mut count) => {
1232 if count.has_projections() {
1233 count = tcx.normalize_associated_type_in_env(&count, param_env);
1234 if count.has_projections() {
1235 return Err(LayoutError::Unknown(ty));
1236 }
1237 }
1238
1239 let element = self.layout_of(element)?;
1240 let count = count.val.to_const_int().unwrap().to_u64().unwrap();
1241 let size = element.size.checked_mul(count, dl)
1242 .ok_or(LayoutError::SizeOverflow(ty))?;
1243
1244 tcx.intern_layout(LayoutDetails {
1245 variants: Variants::Single { index: 0 },
1246 fields: FieldPlacement::Array {
1247 stride: element.size,
1248 count
1249 },
1250 abi: Abi::Aggregate { sized: true },
1251 align: element.align,
1252 size
1253 })
1254 }
1255 ty::TySlice(element) => {
1256 let element = self.layout_of(element)?;
1257 tcx.intern_layout(LayoutDetails {
1258 variants: Variants::Single { index: 0 },
1259 fields: FieldPlacement::Array {
1260 stride: element.size,
1261 count: 0
1262 },
1263 abi: Abi::Aggregate { sized: false },
1264 align: element.align,
1265 size: Size::from_bytes(0)
1266 })
1267 }
1268 ty::TyStr => {
1269 tcx.intern_layout(LayoutDetails {
1270 variants: Variants::Single { index: 0 },
1271 fields: FieldPlacement::Array {
1272 stride: Size::from_bytes(1),
1273 count: 0
1274 },
1275 abi: Abi::Aggregate { sized: false },
1276 align: dl.i8_align,
1277 size: Size::from_bytes(0)
1278 })
1279 }
1280
1281 // Odd unit types.
1282 ty::TyFnDef(..) => {
1283 univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?
1284 }
1285 ty::TyDynamic(..) | ty::TyForeign(..) => {
1286 let mut unit = univariant_uninterned(&[], &ReprOptions::default(),
1287 StructKind::AlwaysSized)?;
1288 match unit.abi {
1289 Abi::Aggregate { ref mut sized } => *sized = false,
1290 _ => bug!()
1291 }
1292 tcx.intern_layout(unit)
1293 }
1294
1295 // Tuples, generators and closures.
1296 ty::TyGenerator(def_id, ref substs, _) => {
1297 let tys = substs.field_tys(def_id, tcx);
1298 univariant(&tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
1299 &ReprOptions::default(),
1300 StructKind::AlwaysSized)?
1301 }
1302
1303 ty::TyClosure(def_id, ref substs) => {
1304 let tys = substs.upvar_tys(def_id, tcx);
1305 univariant(&tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
1306 &ReprOptions::default(),
1307 StructKind::AlwaysSized)?
1308 }
1309
1310 ty::TyTuple(tys, _) => {
1311 let kind = if tys.len() == 0 {
1312 StructKind::AlwaysSized
1313 } else {
1314 StructKind::MaybeUnsized
1315 };
1316
1317 univariant(&tys.iter().map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
1318 &ReprOptions::default(), kind)?
1319 }
1320
1321 // SIMD vector types.
1322 ty::TyAdt(def, ..) if def.repr.simd() => {
1323 let element = self.layout_of(ty.simd_type(tcx))?;
1324 let count = ty.simd_size(tcx) as u64;
1325 assert!(count > 0);
1326 let scalar = match element.abi {
1327 Abi::Scalar(ref scalar) => scalar.clone(),
1328 _ => {
1329 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` with \
1330 a non-machine element type `{}`",
1331 ty, element.ty));
1332 }
1333 };
1334 let size = element.size.checked_mul(count, dl)
1335 .ok_or(LayoutError::SizeOverflow(ty))?;
1336 let align = dl.vector_align(size);
1337 let size = size.abi_align(align);
1338
1339 tcx.intern_layout(LayoutDetails {
1340 variants: Variants::Single { index: 0 },
1341 fields: FieldPlacement::Array {
1342 stride: element.size,
1343 count
1344 },
1345 abi: Abi::Vector {
1346 element: scalar,
1347 count
1348 },
1349 size,
1350 align,
1351 })
1352 }
1353
1354 // ADTs.
1355 ty::TyAdt(def, substs) => {
1356 // Cache the field layouts.
1357 let variants = def.variants.iter().map(|v| {
1358 v.fields.iter().map(|field| {
1359 self.layout_of(field.ty(tcx, substs))
1360 }).collect::<Result<Vec<_>, _>>()
1361 }).collect::<Result<Vec<_>, _>>()?;
1362
1363 if def.is_union() {
1364 let packed = def.repr.packed();
1365 if packed && def.repr.align > 0 {
1366 bug!("Union cannot be packed and aligned");
1367 }
1368
1369 let mut align = if def.repr.packed() {
1370 dl.i8_align
1371 } else {
1372 dl.aggregate_align
1373 };
1374
1375 if def.repr.align > 0 {
1376 let repr_align = def.repr.align as u64;
1377 align = align.max(
1378 Align::from_bytes(repr_align, repr_align).unwrap());
1379 }
1380
1381 let mut size = Size::from_bytes(0);
1382 for field in &variants[0] {
1383 assert!(!field.is_unsized());
1384
1385 if !packed {
1386 align = align.max(field.align);
1387 }
1388 size = cmp::max(size, field.size);
1389 }
1390
1391 return Ok(tcx.intern_layout(LayoutDetails {
1392 variants: Variants::Single { index: 0 },
1393 fields: FieldPlacement::Union(variants[0].len()),
1394 abi: Abi::Aggregate { sized: true },
1395 align,
1396 size: size.abi_align(align)
1397 }));
1398 }
1399
1400 let (inh_first, inh_second) = {
1401 let mut inh_variants = (0..variants.len()).filter(|&v| {
1402 variants[v].iter().all(|f| f.abi != Abi::Uninhabited)
1403 });
1404 (inh_variants.next(), inh_variants.next())
1405 };
1406 if inh_first.is_none() {
1407 // Uninhabited because it has no variants, or only uninhabited ones.
1408 return Ok(tcx.intern_layout(LayoutDetails::uninhabited(0)));
1409 }
1410
1411 let is_struct = !def.is_enum() ||
1412 // Only one variant is inhabited.
1413 (inh_second.is_none() &&
1414 // Representation optimizations are allowed.
1415 !def.repr.inhibit_enum_layout_opt() &&
1416 // Inhabited variant either has data ...
1417 (!variants[inh_first.unwrap()].is_empty() ||
1418 // ... or there other, uninhabited, variants.
1419 variants.len() > 1));
1420 if is_struct {
1421 // Struct, or univariant enum equivalent to a struct.
1422 // (Typechecking will reject discriminant-sizing attrs.)
1423
1424 let v = inh_first.unwrap();
1425 let kind = if def.is_enum() || variants[v].len() == 0 {
1426 StructKind::AlwaysSized
1427 } else {
1428 let param_env = tcx.param_env(def.did);
1429 let last_field = def.variants[v].fields.last().unwrap();
1430 let always_sized = tcx.type_of(last_field.did)
1431 .is_sized(tcx, param_env, DUMMY_SP);
1432 if !always_sized { StructKind::MaybeUnsized }
1433 else { StructKind::AlwaysSized }
1434 };
1435
1436 let mut st = univariant_uninterned(&variants[v], &def.repr, kind)?;
1437 st.variants = Variants::Single { index: v };
1438 // Exclude 0 from the range of a newtype ABI NonZero<T>.
1439 if Some(def.did) == self.tcx.lang_items().non_zero() {
1440 match st.abi {
1441 Abi::Scalar(ref mut scalar) |
1442 Abi::ScalarPair(ref mut scalar, _) => {
1443 if scalar.valid_range.start == 0 {
1444 scalar.valid_range.start = 1;
1445 }
1446 }
1447 _ => {}
1448 }
1449 }
1450 return Ok(tcx.intern_layout(st));
1451 }
1452
1453 let no_explicit_discriminants = def.variants.iter().enumerate()
1454 .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i));
1455
1456 // Niche-filling enum optimization.
1457 if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
1458 let mut dataful_variant = None;
1459 let mut niche_variants = usize::max_value()..=0;
1460
1461 // Find one non-ZST variant.
1462 'variants: for (v, fields) in variants.iter().enumerate() {
1463 for f in fields {
1464 if f.abi == Abi::Uninhabited {
1465 continue 'variants;
1466 }
1467 if !f.is_zst() {
1468 if dataful_variant.is_none() {
1469 dataful_variant = Some(v);
1470 continue 'variants;
1471 } else {
1472 dataful_variant = None;
1473 break 'variants;
1474 }
1475 }
1476 }
1477 if niche_variants.start > v {
1478 niche_variants.start = v;
1479 }
1480 niche_variants.end = v;
1481 }
1482
1483 if niche_variants.start > niche_variants.end {
1484 dataful_variant = None;
1485 }
1486
1487 if let Some(i) = dataful_variant {
1488 let count = (niche_variants.end - niche_variants.start + 1) as u128;
1489 for (field_index, field) in variants[i].iter().enumerate() {
1490 let (offset, niche, niche_start) =
1491 match field.find_niche(self, count)? {
1492 Some(niche) => niche,
1493 None => continue
1494 };
1495 let mut align = dl.aggregate_align;
1496 let st = variants.iter().enumerate().map(|(j, v)| {
1497 let mut st = univariant_uninterned(v,
1498 &def.repr, StructKind::AlwaysSized)?;
1499 st.variants = Variants::Single { index: j };
1500
1501 align = align.max(st.align);
1502
1503 Ok(st)
1504 }).collect::<Result<Vec<_>, _>>()?;
1505
1506 let offset = st[i].fields.offset(field_index) + offset;
1507 let size = st[i].size;
1508
1509 let abi = if offset.bytes() == 0 && niche.value.size(dl) == size {
1510 Abi::Scalar(niche.clone())
1511 } else {
1512 Abi::Aggregate { sized: true }
1513 };
1514
1515 return Ok(tcx.intern_layout(LayoutDetails {
1516 variants: Variants::NicheFilling {
1517 dataful_variant: i,
1518 niche_variants,
1519 niche,
1520 niche_start,
1521 variants: st,
1522 },
1523 fields: FieldPlacement::Arbitrary {
1524 offsets: vec![offset],
1525 memory_index: vec![0]
1526 },
1527 abi,
1528 size,
1529 align,
1530 }));
1531 }
1532 }
1533 }
1534
1535 let (mut min, mut max) = (i128::max_value(), i128::min_value());
1536 for (i, discr) in def.discriminants(tcx).enumerate() {
1537 if variants[i].iter().any(|f| f.abi == Abi::Uninhabited) {
1538 continue;
1539 }
1540 let x = discr.to_u128_unchecked() as i128;
1541 if x < min { min = x; }
1542 if x > max { max = x; }
1543 }
1544 assert!(min <= max, "discriminant range is {}...{}", min, max);
1545 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
1546
1547 let mut align = dl.aggregate_align;
1548 let mut size = Size::from_bytes(0);
1549
1550 // We're interested in the smallest alignment, so start large.
1551 let mut start_align = Align::from_bytes(256, 256).unwrap();
1552 assert_eq!(Integer::for_abi_align(dl, start_align), None);
1553
1554 // repr(C) on an enum tells us to make a (tag, union) layout,
1555 // so we need to grow the prefix alignment to be at least
1556 // the alignment of the union. (This value is used both for
1557 // determining the alignment of the overall enum, and the
1558 // determining the alignment of the payload after the tag.)
1559 let mut prefix_align = min_ity.align(dl);
1560 if def.repr.c() {
1561 for fields in &variants {
1562 for field in fields {
1563 prefix_align = prefix_align.max(field.align);
1564 }
1565 }
1566 }
1567
1568 // Create the set of structs that represent each variant.
1569 let mut variants = variants.into_iter().enumerate().map(|(i, field_layouts)| {
1570 let mut st = univariant_uninterned(&field_layouts,
1571 &def.repr, StructKind::Prefixed(min_ity.size(), prefix_align))?;
1572 st.variants = Variants::Single { index: i };
1573 // Find the first field we can't move later
1574 // to make room for a larger discriminant.
1575 for field in st.fields.index_by_increasing_offset().map(|j| field_layouts[j]) {
1576 if !field.is_zst() || field.align.abi() != 1 {
1577 start_align = start_align.min(field.align);
1578 break;
1579 }
1580 }
1581 size = cmp::max(size, st.size);
1582 align = align.max(st.align);
1583 Ok(st)
1584 }).collect::<Result<Vec<_>, _>>()?;
1585
1586 // Align the maximum variant size to the largest alignment.
1587 size = size.abi_align(align);
1588
1589 if size.bytes() >= dl.obj_size_bound() {
1590 return Err(LayoutError::SizeOverflow(ty));
1591 }
1592
1593 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
1594 if typeck_ity < min_ity {
1595 // It is a bug if Layout decided on a greater discriminant size than typeck for
1596 // some reason at this point (based on values discriminant can take on). Mostly
1597 // because this discriminant will be loaded, and then stored into variable of
1598 // type calculated by typeck. Consider such case (a bug): typeck decided on
1599 // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1600 // discriminant values. That would be a bug, because then, in trans, in order
1601 // to store this 16-bit discriminant into 8-bit sized temporary some of the
1602 // space necessary to represent would have to be discarded (or layout is wrong
1603 // on thinking it needs 16 bits)
1604 bug!("layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1605 min_ity, typeck_ity);
1606 // However, it is fine to make discr type however large (as an optimisation)
1607 // after this point – we’ll just truncate the value we load in trans.
1608 }
1609
1610 // Check to see if we should use a different type for the
1611 // discriminant. We can safely use a type with the same size
1612 // as the alignment of the first field of each variant.
1613 // We increase the size of the discriminant to avoid LLVM copying
1614 // padding when it doesn't need to. This normally causes unaligned
1615 // load/stores and excessive memcpy/memset operations. By using a
1616 // bigger integer size, LLVM can be sure about it's contents and
1617 // won't be so conservative.
1618
1619 // Use the initial field alignment
1620 let mut ity = Integer::for_abi_align(dl, start_align).unwrap_or(min_ity);
1621
1622 // If the alignment is not larger than the chosen discriminant size,
1623 // don't use the alignment as the final size.
1624 if ity <= min_ity {
1625 ity = min_ity;
1626 } else {
1627 // Patch up the variants' first few fields.
1628 let old_ity_size = min_ity.size();
1629 let new_ity_size = ity.size();
1630 for variant in &mut variants {
1631 if variant.abi == Abi::Uninhabited {
1632 continue;
1633 }
1634 match variant.fields {
1635 FieldPlacement::Arbitrary { ref mut offsets, .. } => {
1636 for i in offsets {
1637 if *i <= old_ity_size {
1638 assert_eq!(*i, old_ity_size);
1639 *i = new_ity_size;
1640 }
1641 }
1642 // We might be making the struct larger.
1643 if variant.size <= old_ity_size {
1644 variant.size = new_ity_size;
1645 }
1646 }
1647 _ => bug!()
1648 }
1649 }
1650 }
1651
1652 let discr = Scalar {
1653 value: Int(ity, signed),
1654 valid_range: (min as u128)..=(max as u128)
1655 };
1656 let abi = if discr.value.size(dl) == size {
1657 Abi::Scalar(discr.clone())
1658 } else {
1659 Abi::Aggregate { sized: true }
1660 };
1661 tcx.intern_layout(LayoutDetails {
1662 variants: Variants::Tagged {
1663 discr,
1664 variants
1665 },
1666 fields: FieldPlacement::Arbitrary {
1667 offsets: vec![Size::from_bytes(0)],
1668 memory_index: vec![0]
1669 },
1670 abi,
1671 align,
1672 size
1673 })
1674 }
1675
1676 // Types with no meaningful known layout.
1677 ty::TyProjection(_) | ty::TyAnon(..) => {
1678 let normalized = tcx.normalize_associated_type_in_env(&ty, param_env);
1679 if ty == normalized {
1680 return Err(LayoutError::Unknown(ty));
1681 }
1682 tcx.layout_raw(param_env.and(normalized))?
1683 }
1684 ty::TyParam(_) => {
1685 return Err(LayoutError::Unknown(ty));
1686 }
1687 ty::TyGeneratorWitness(..) | ty::TyInfer(_) | ty::TyError => {
1688 bug!("LayoutDetails::compute: unexpected type `{}`", ty)
1689 }
1690 })
1691 }
1692
1693 /// This is invoked by the `layout_raw` query to record the final
1694 /// layout of each type.
1695 #[inline]
1696 fn record_layout_for_printing(self, layout: TyLayout<'tcx>) {
1697 // If we are running with `-Zprint-type-sizes`, record layouts for
1698 // dumping later. Ignore layouts that are done with non-empty
1699 // environments or non-monomorphic layouts, as the user only wants
1700 // to see the stuff resulting from the final trans session.
1701 if
1702 !self.tcx.sess.opts.debugging_opts.print_type_sizes ||
1703 layout.ty.has_param_types() ||
1704 layout.ty.has_self_ty() ||
1705 !self.param_env.caller_bounds.is_empty()
1706 {
1707 return;
1708 }
1709
1710 self.record_layout_for_printing_outlined(layout)
1711 }
1712
1713 fn record_layout_for_printing_outlined(self, layout: TyLayout<'tcx>) {
1714 // (delay format until we actually need it)
1715 let record = |kind, opt_discr_size, variants| {
1716 let type_desc = format!("{:?}", layout.ty);
1717 self.tcx.sess.code_stats.borrow_mut().record_type_size(kind,
1718 type_desc,
1719 layout.align,
1720 layout.size,
1721 opt_discr_size,
1722 variants);
1723 };
1724
1725 let adt_def = match layout.ty.sty {
1726 ty::TyAdt(ref adt_def, _) => {
1727 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1728 adt_def
1729 }
1730
1731 ty::TyClosure(..) => {
1732 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1733 record(DataTypeKind::Closure, None, vec![]);
1734 return;
1735 }
1736
1737 _ => {
1738 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1739 return;
1740 }
1741 };
1742
1743 let adt_kind = adt_def.adt_kind();
1744
1745 let build_variant_info = |n: Option<ast::Name>,
1746 flds: &[ast::Name],
1747 layout: TyLayout<'tcx>| {
1748 let mut min_size = Size::from_bytes(0);
1749 let field_info: Vec<_> = flds.iter().enumerate().map(|(i, &name)| {
1750 match layout.field(self, i) {
1751 Err(err) => {
1752 bug!("no layout found for field {}: `{:?}`", name, err);
1753 }
1754 Ok(field_layout) => {
1755 let offset = layout.fields.offset(i);
1756 let field_end = offset + field_layout.size;
1757 if min_size < field_end {
1758 min_size = field_end;
1759 }
1760 session::FieldInfo {
1761 name: name.to_string(),
1762 offset: offset.bytes(),
1763 size: field_layout.size.bytes(),
1764 align: field_layout.align.abi(),
1765 }
1766 }
1767 }
1768 }).collect();
1769
1770 session::VariantInfo {
1771 name: n.map(|n|n.to_string()),
1772 kind: if layout.is_unsized() {
1773 session::SizeKind::Min
1774 } else {
1775 session::SizeKind::Exact
1776 },
1777 align: layout.align.abi(),
1778 size: if min_size.bytes() == 0 {
1779 layout.size.bytes()
1780 } else {
1781 min_size.bytes()
1782 },
1783 fields: field_info,
1784 }
1785 };
1786
1787 match layout.variants {
1788 Variants::Single { index } => {
1789 debug!("print-type-size `{:#?}` variant {}",
1790 layout, adt_def.variants[index].name);
1791 if !adt_def.variants.is_empty() {
1792 let variant_def = &adt_def.variants[index];
1793 let fields: Vec<_> =
1794 variant_def.fields.iter().map(|f| f.name).collect();
1795 record(adt_kind.into(),
1796 None,
1797 vec![build_variant_info(Some(variant_def.name),
1798 &fields,
1799 layout)]);
1800 } else {
1801 // (This case arises for *empty* enums; so give it
1802 // zero variants.)
1803 record(adt_kind.into(), None, vec![]);
1804 }
1805 }
1806
1807 Variants::NicheFilling { .. } |
1808 Variants::Tagged { .. } => {
1809 debug!("print-type-size `{:#?}` adt general variants def {}",
1810 layout.ty, adt_def.variants.len());
1811 let variant_infos: Vec<_> =
1812 adt_def.variants.iter().enumerate().map(|(i, variant_def)| {
1813 let fields: Vec<_> =
1814 variant_def.fields.iter().map(|f| f.name).collect();
1815 build_variant_info(Some(variant_def.name),
1816 &fields,
1817 layout.for_variant(self, i))
1818 })
1819 .collect();
1820 record(adt_kind.into(), match layout.variants {
1821 Variants::Tagged { ref discr, .. } => Some(discr.value.size(self)),
1822 _ => None
1823 }, variant_infos);
1824 }
1825 }
1826 }
1827 }
1828
1829 /// Type size "skeleton", i.e. the only information determining a type's size.
1830 /// While this is conservative, (aside from constant sizes, only pointers,
1831 /// newtypes thereof and null pointer optimized enums are allowed), it is
1832 /// enough to statically check common usecases of transmute.
1833 #[derive(Copy, Clone, Debug)]
1834 pub enum SizeSkeleton<'tcx> {
1835 /// Any statically computable Layout.
1836 Known(Size),
1837
1838 /// A potentially-fat pointer.
1839 Pointer {
1840 /// If true, this pointer is never null.
1841 non_zero: bool,
1842 /// The type which determines the unsized metadata, if any,
1843 /// of this pointer. Either a type parameter or a projection
1844 /// depending on one, with regions erased.
1845 tail: Ty<'tcx>
1846 }
1847 }
1848
1849 impl<'a, 'tcx> SizeSkeleton<'tcx> {
1850 pub fn compute(ty: Ty<'tcx>,
1851 tcx: TyCtxt<'a, 'tcx, 'tcx>,
1852 param_env: ty::ParamEnv<'tcx>)
1853 -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1854 assert!(!ty.has_infer_types());
1855
1856 // First try computing a static layout.
1857 let err = match tcx.layout_of(param_env.and(ty)) {
1858 Ok(layout) => {
1859 return Ok(SizeSkeleton::Known(layout.size));
1860 }
1861 Err(err) => err
1862 };
1863
1864 match ty.sty {
1865 ty::TyRef(_, ty::TypeAndMut { ty: pointee, .. }) |
1866 ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1867 let non_zero = !ty.is_unsafe_ptr();
1868 let tail = tcx.struct_tail(pointee);
1869 match tail.sty {
1870 ty::TyParam(_) | ty::TyProjection(_) => {
1871 assert!(tail.has_param_types() || tail.has_self_ty());
1872 Ok(SizeSkeleton::Pointer {
1873 non_zero,
1874 tail: tcx.erase_regions(&tail)
1875 })
1876 }
1877 _ => {
1878 bug!("SizeSkeleton::compute({}): layout errored ({}), yet \
1879 tail `{}` is not a type parameter or a projection",
1880 ty, err, tail)
1881 }
1882 }
1883 }
1884
1885 ty::TyAdt(def, substs) => {
1886 // Only newtypes and enums w/ nullable pointer optimization.
1887 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
1888 return Err(err);
1889 }
1890
1891 // Get a zero-sized variant or a pointer newtype.
1892 let zero_or_ptr_variant = |i: usize| {
1893 let fields = def.variants[i].fields.iter().map(|field| {
1894 SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env)
1895 });
1896 let mut ptr = None;
1897 for field in fields {
1898 let field = field?;
1899 match field {
1900 SizeSkeleton::Known(size) => {
1901 if size.bytes() > 0 {
1902 return Err(err);
1903 }
1904 }
1905 SizeSkeleton::Pointer {..} => {
1906 if ptr.is_some() {
1907 return Err(err);
1908 }
1909 ptr = Some(field);
1910 }
1911 }
1912 }
1913 Ok(ptr)
1914 };
1915
1916 let v0 = zero_or_ptr_variant(0)?;
1917 // Newtype.
1918 if def.variants.len() == 1 {
1919 if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1920 return Ok(SizeSkeleton::Pointer {
1921 non_zero: non_zero ||
1922 Some(def.did) == tcx.lang_items().non_zero(),
1923 tail,
1924 });
1925 } else {
1926 return Err(err);
1927 }
1928 }
1929
1930 let v1 = zero_or_ptr_variant(1)?;
1931 // Nullable pointer enum optimization.
1932 match (v0, v1) {
1933 (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None) |
1934 (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1935 Ok(SizeSkeleton::Pointer {
1936 non_zero: false,
1937 tail,
1938 })
1939 }
1940 _ => Err(err)
1941 }
1942 }
1943
1944 ty::TyProjection(_) | ty::TyAnon(..) => {
1945 let normalized = tcx.normalize_associated_type_in_env(&ty, param_env);
1946 if ty == normalized {
1947 Err(err)
1948 } else {
1949 SizeSkeleton::compute(normalized, tcx, param_env)
1950 }
1951 }
1952
1953 _ => Err(err)
1954 }
1955 }
1956
1957 pub fn same_size(self, other: SizeSkeleton) -> bool {
1958 match (self, other) {
1959 (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
1960 (SizeSkeleton::Pointer { tail: a, .. },
1961 SizeSkeleton::Pointer { tail: b, .. }) => a == b,
1962 _ => false
1963 }
1964 }
1965 }
1966
1967 /// The details of the layout of a type, alongside the type itself.
1968 /// Provides various type traversal APIs (e.g. recursing into fields).
1969 ///
1970 /// Note that the details are NOT guaranteed to always be identical
1971 /// to those obtained from `layout_of(ty)`, as we need to produce
1972 /// layouts for which Rust types do not exist, such as enum variants
1973 /// or synthetic fields of enums (i.e. discriminants) and fat pointers.
1974 #[derive(Copy, Clone, Debug)]
1975 pub struct TyLayout<'tcx> {
1976 pub ty: Ty<'tcx>,
1977 details: &'tcx LayoutDetails
1978 }
1979
1980 impl<'tcx> Deref for TyLayout<'tcx> {
1981 type Target = &'tcx LayoutDetails;
1982 fn deref(&self) -> &&'tcx LayoutDetails {
1983 &self.details
1984 }
1985 }
1986
1987 pub trait HasTyCtxt<'tcx>: HasDataLayout {
1988 fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx>;
1989 }
1990
1991 impl<'a, 'gcx, 'tcx> HasDataLayout for TyCtxt<'a, 'gcx, 'tcx> {
1992 fn data_layout(&self) -> &TargetDataLayout {
1993 &self.data_layout
1994 }
1995 }
1996
1997 impl<'a, 'gcx, 'tcx> HasTyCtxt<'gcx> for TyCtxt<'a, 'gcx, 'tcx> {
1998 fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> {
1999 self.global_tcx()
2000 }
2001 }
2002
2003 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
2004 fn data_layout(&self) -> &TargetDataLayout {
2005 self.tcx.data_layout()
2006 }
2007 }
2008
2009 impl<'gcx, 'tcx, T: HasTyCtxt<'gcx>> HasTyCtxt<'gcx> for LayoutCx<'tcx, T> {
2010 fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> {
2011 self.tcx.tcx()
2012 }
2013 }
2014
2015 pub trait MaybeResult<T> {
2016 fn from_ok(x: T) -> Self;
2017 fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self;
2018 }
2019
2020 impl<T> MaybeResult<T> for T {
2021 fn from_ok(x: T) -> Self {
2022 x
2023 }
2024 fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self {
2025 f(self)
2026 }
2027 }
2028
2029 impl<T, E> MaybeResult<T> for Result<T, E> {
2030 fn from_ok(x: T) -> Self {
2031 Ok(x)
2032 }
2033 fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self {
2034 self.map(f)
2035 }
2036 }
2037
2038 pub trait LayoutOf<T> {
2039 type TyLayout;
2040
2041 fn layout_of(self, ty: T) -> Self::TyLayout;
2042 }
2043
2044 impl<'a, 'tcx> LayoutOf<Ty<'tcx>> for LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
2045 type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
2046
2047 /// Computes the layout of a type. Note that this implicitly
2048 /// executes in "reveal all" mode.
2049 fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
2050 let param_env = self.param_env.reveal_all();
2051 let ty = self.tcx.normalize_associated_type_in_env(&ty, param_env);
2052 let details = self.tcx.layout_raw(param_env.and(ty))?;
2053 let layout = TyLayout {
2054 ty,
2055 details
2056 };
2057
2058 // NB: This recording is normally disabled; when enabled, it
2059 // can however trigger recursive invocations of `layout_of`.
2060 // Therefore, we execute it *after* the main query has
2061 // completed, to avoid problems around recursive structures
2062 // and the like. (Admitedly, I wasn't able to reproduce a problem
2063 // here, but it seems like the right thing to do. -nmatsakis)
2064 self.record_layout_for_printing(layout);
2065
2066 Ok(layout)
2067 }
2068 }
2069
2070 impl<'a, 'tcx> LayoutOf<Ty<'tcx>> for LayoutCx<'tcx, ty::maps::TyCtxtAt<'a, 'tcx, 'tcx>> {
2071 type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
2072
2073 /// Computes the layout of a type. Note that this implicitly
2074 /// executes in "reveal all" mode.
2075 fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
2076 let param_env = self.param_env.reveal_all();
2077 let ty = self.tcx.normalize_associated_type_in_env(&ty, param_env.reveal_all());
2078 let details = self.tcx.layout_raw(param_env.reveal_all().and(ty))?;
2079 let layout = TyLayout {
2080 ty,
2081 details
2082 };
2083
2084 // NB: This recording is normally disabled; when enabled, it
2085 // can however trigger recursive invocations of `layout_of`.
2086 // Therefore, we execute it *after* the main query has
2087 // completed, to avoid problems around recursive structures
2088 // and the like. (Admitedly, I wasn't able to reproduce a problem
2089 // here, but it seems like the right thing to do. -nmatsakis)
2090 let cx = LayoutCx {
2091 tcx: *self.tcx,
2092 param_env: self.param_env
2093 };
2094 cx.record_layout_for_printing(layout);
2095
2096 Ok(layout)
2097 }
2098 }
2099
2100 // Helper (inherent) `layout_of` methods to avoid pushing `LayoutCx` to users.
2101 impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> {
2102 /// Computes the layout of a type. Note that this implicitly
2103 /// executes in "reveal all" mode.
2104 #[inline]
2105 pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
2106 -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
2107 let cx = LayoutCx {
2108 tcx: self,
2109 param_env: param_env_and_ty.param_env
2110 };
2111 cx.layout_of(param_env_and_ty.value)
2112 }
2113 }
2114
2115 impl<'a, 'tcx> ty::maps::TyCtxtAt<'a, 'tcx, 'tcx> {
2116 /// Computes the layout of a type. Note that this implicitly
2117 /// executes in "reveal all" mode.
2118 #[inline]
2119 pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
2120 -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
2121 let cx = LayoutCx {
2122 tcx: self,
2123 param_env: param_env_and_ty.param_env
2124 };
2125 cx.layout_of(param_env_and_ty.value)
2126 }
2127 }
2128
2129 impl<'a, 'tcx> TyLayout<'tcx> {
2130 pub fn for_variant<C>(&self, cx: C, variant_index: usize) -> Self
2131 where C: LayoutOf<Ty<'tcx>> + HasTyCtxt<'tcx>,
2132 C::TyLayout: MaybeResult<TyLayout<'tcx>>
2133 {
2134 let details = match self.variants {
2135 Variants::Single { index } if index == variant_index => self.details,
2136
2137 Variants::Single { index } => {
2138 // Deny calling for_variant more than once for non-Single enums.
2139 cx.layout_of(self.ty).map_same(|layout| {
2140 assert_eq!(layout.variants, Variants::Single { index });
2141 layout
2142 });
2143
2144 let fields = match self.ty.sty {
2145 ty::TyAdt(def, _) => def.variants[variant_index].fields.len(),
2146 _ => bug!()
2147 };
2148 let mut details = LayoutDetails::uninhabited(fields);
2149 details.variants = Variants::Single { index: variant_index };
2150 cx.tcx().intern_layout(details)
2151 }
2152
2153 Variants::NicheFilling { ref variants, .. } |
2154 Variants::Tagged { ref variants, .. } => {
2155 &variants[variant_index]
2156 }
2157 };
2158
2159 assert_eq!(details.variants, Variants::Single { index: variant_index });
2160
2161 TyLayout {
2162 ty: self.ty,
2163 details
2164 }
2165 }
2166
2167 pub fn field<C>(&self, cx: C, i: usize) -> C::TyLayout
2168 where C: LayoutOf<Ty<'tcx>> + HasTyCtxt<'tcx>,
2169 C::TyLayout: MaybeResult<TyLayout<'tcx>>
2170 {
2171 let tcx = cx.tcx();
2172 cx.layout_of(match self.ty.sty {
2173 ty::TyBool |
2174 ty::TyChar |
2175 ty::TyInt(_) |
2176 ty::TyUint(_) |
2177 ty::TyFloat(_) |
2178 ty::TyFnPtr(_) |
2179 ty::TyNever |
2180 ty::TyFnDef(..) |
2181 ty::TyGeneratorWitness(..) |
2182 ty::TyForeign(..) |
2183 ty::TyDynamic(..) => {
2184 bug!("TyLayout::field_type({:?}): not applicable", self)
2185 }
2186
2187 // Potentially-fat pointers.
2188 ty::TyRef(_, ty::TypeAndMut { ty: pointee, .. }) |
2189 ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2190 assert!(i < 2);
2191
2192 // Reuse the fat *T type as its own thin pointer data field.
2193 // This provides information about e.g. DST struct pointees
2194 // (which may have no non-DST form), and will work as long
2195 // as the `Abi` or `FieldPlacement` is checked by users.
2196 if i == 0 {
2197 let nil = tcx.mk_nil();
2198 let ptr_ty = if self.ty.is_unsafe_ptr() {
2199 tcx.mk_mut_ptr(nil)
2200 } else {
2201 tcx.mk_mut_ref(tcx.types.re_static, nil)
2202 };
2203 return cx.layout_of(ptr_ty).map_same(|mut ptr_layout| {
2204 ptr_layout.ty = self.ty;
2205 ptr_layout
2206 });
2207 }
2208
2209 match tcx.struct_tail(pointee).sty {
2210 ty::TySlice(_) |
2211 ty::TyStr => tcx.types.usize,
2212 ty::TyDynamic(..) => {
2213 // FIXME(eddyb) use an usize/fn() array with
2214 // the correct number of vtables slots.
2215 tcx.mk_imm_ref(tcx.types.re_static, tcx.mk_nil())
2216 }
2217 _ => bug!("TyLayout::field_type({:?}): not applicable", self)
2218 }
2219 }
2220
2221 // Arrays and slices.
2222 ty::TyArray(element, _) |
2223 ty::TySlice(element) => element,
2224 ty::TyStr => tcx.types.u8,
2225
2226 // Tuples, generators and closures.
2227 ty::TyClosure(def_id, ref substs) => {
2228 substs.upvar_tys(def_id, tcx).nth(i).unwrap()
2229 }
2230
2231 ty::TyGenerator(def_id, ref substs, _) => {
2232 substs.field_tys(def_id, tcx).nth(i).unwrap()
2233 }
2234
2235 ty::TyTuple(tys, _) => tys[i],
2236
2237 // SIMD vector types.
2238 ty::TyAdt(def, ..) if def.repr.simd() => {
2239 self.ty.simd_type(tcx)
2240 }
2241
2242 // ADTs.
2243 ty::TyAdt(def, substs) => {
2244 match self.variants {
2245 Variants::Single { index } => {
2246 def.variants[index].fields[i].ty(tcx, substs)
2247 }
2248
2249 // Discriminant field for enums (where applicable).
2250 Variants::Tagged { ref discr, .. } |
2251 Variants::NicheFilling { niche: ref discr, .. } => {
2252 assert_eq!(i, 0);
2253 let layout = LayoutDetails::scalar(tcx, discr.clone());
2254 return MaybeResult::from_ok(TyLayout {
2255 details: tcx.intern_layout(layout),
2256 ty: discr.value.to_ty(tcx)
2257 });
2258 }
2259 }
2260 }
2261
2262 ty::TyProjection(_) | ty::TyAnon(..) | ty::TyParam(_) |
2263 ty::TyInfer(_) | ty::TyError => {
2264 bug!("TyLayout::field_type: unexpected type `{}`", self.ty)
2265 }
2266 })
2267 }
2268
2269 /// Returns true if the layout corresponds to an unsized type.
2270 pub fn is_unsized(&self) -> bool {
2271 self.abi.is_unsized()
2272 }
2273
2274 /// Returns true if the type is a ZST and not unsized.
2275 pub fn is_zst(&self) -> bool {
2276 match self.abi {
2277 Abi::Uninhabited => true,
2278 Abi::Scalar(_) |
2279 Abi::ScalarPair(..) |
2280 Abi::Vector { .. } => false,
2281 Abi::Aggregate { sized } => sized && self.size.bytes() == 0
2282 }
2283 }
2284
2285 pub fn size_and_align(&self) -> (Size, Align) {
2286 (self.size, self.align)
2287 }
2288
2289 /// Find the offset of a niche leaf field, starting from
2290 /// the given type and recursing through aggregates, which
2291 /// has at least `count` consecutive invalid values.
2292 /// The tuple is `(offset, scalar, niche_value)`.
2293 // FIXME(eddyb) traverse already optimized enums.
2294 fn find_niche<C>(&self, cx: C, count: u128)
2295 -> Result<Option<(Size, Scalar, u128)>, LayoutError<'tcx>>
2296 where C: LayoutOf<Ty<'tcx>, TyLayout = Result<Self, LayoutError<'tcx>>> +
2297 HasTyCtxt<'tcx>
2298 {
2299 let scalar_component = |scalar: &Scalar, offset| {
2300 let Scalar { value, valid_range: ref v } = *scalar;
2301
2302 let bits = value.size(cx).bits();
2303 assert!(bits <= 128);
2304 let max_value = !0u128 >> (128 - bits);
2305
2306 // Find out how many values are outside the valid range.
2307 let niches = if v.start <= v.end {
2308 v.start + (max_value - v.end)
2309 } else {
2310 v.start - v.end - 1
2311 };
2312
2313 // Give up if we can't fit `count` consecutive niches.
2314 if count > niches {
2315 return None;
2316 }
2317
2318 let niche_start = v.end.wrapping_add(1) & max_value;
2319 let niche_end = v.end.wrapping_add(count) & max_value;
2320 Some((offset, Scalar {
2321 value,
2322 valid_range: v.start..=niche_end
2323 }, niche_start))
2324 };
2325
2326 // Locals variables which live across yields are stored
2327 // in the generator type as fields. These may be uninitialized
2328 // so we don't look for niches there.
2329 if let ty::TyGenerator(..) = self.ty.sty {
2330 return Ok(None);
2331 }
2332
2333 match self.abi {
2334 Abi::Scalar(ref scalar) => {
2335 return Ok(scalar_component(scalar, Size::from_bytes(0)));
2336 }
2337 Abi::ScalarPair(ref a, ref b) => {
2338 return Ok(scalar_component(a, Size::from_bytes(0)).or_else(|| {
2339 scalar_component(b, a.value.size(cx).abi_align(b.value.align(cx)))
2340 }));
2341 }
2342 Abi::Vector { ref element, .. } => {
2343 return Ok(scalar_component(element, Size::from_bytes(0)));
2344 }
2345 _ => {}
2346 }
2347
2348 // Perhaps one of the fields is non-zero, let's recurse and find out.
2349 if let FieldPlacement::Union(_) = self.fields {
2350 // Only Rust enums have safe-to-inspect fields
2351 // (a discriminant), other unions are unsafe.
2352 if let Variants::Single { .. } = self.variants {
2353 return Ok(None);
2354 }
2355 }
2356 if let FieldPlacement::Array { .. } = self.fields {
2357 if self.fields.count() > 0 {
2358 return self.field(cx, 0)?.find_niche(cx, count);
2359 }
2360 }
2361 for i in 0..self.fields.count() {
2362 let r = self.field(cx, i)?.find_niche(cx, count)?;
2363 if let Some((offset, scalar, niche_value)) = r {
2364 let offset = self.fields.offset(i) + offset;
2365 return Ok(Some((offset, scalar, niche_value)));
2366 }
2367 }
2368 Ok(None)
2369 }
2370 }
2371
2372 impl<'gcx> HashStable<StableHashingContext<'gcx>> for Variants {
2373 fn hash_stable<W: StableHasherResult>(&self,
2374 hcx: &mut StableHashingContext<'gcx>,
2375 hasher: &mut StableHasher<W>) {
2376 use ty::layout::Variants::*;
2377 mem::discriminant(self).hash_stable(hcx, hasher);
2378
2379 match *self {
2380 Single { index } => {
2381 index.hash_stable(hcx, hasher);
2382 }
2383 Tagged {
2384 ref discr,
2385 ref variants,
2386 } => {
2387 discr.hash_stable(hcx, hasher);
2388 variants.hash_stable(hcx, hasher);
2389 }
2390 NicheFilling {
2391 dataful_variant,
2392 niche_variants: RangeInclusive { start, end },
2393 ref niche,
2394 niche_start,
2395 ref variants,
2396 } => {
2397 dataful_variant.hash_stable(hcx, hasher);
2398 start.hash_stable(hcx, hasher);
2399 end.hash_stable(hcx, hasher);
2400 niche.hash_stable(hcx, hasher);
2401 niche_start.hash_stable(hcx, hasher);
2402 variants.hash_stable(hcx, hasher);
2403 }
2404 }
2405 }
2406 }
2407
2408 impl<'gcx> HashStable<StableHashingContext<'gcx>> for FieldPlacement {
2409 fn hash_stable<W: StableHasherResult>(&self,
2410 hcx: &mut StableHashingContext<'gcx>,
2411 hasher: &mut StableHasher<W>) {
2412 use ty::layout::FieldPlacement::*;
2413 mem::discriminant(self).hash_stable(hcx, hasher);
2414
2415 match *self {
2416 Union(count) => {
2417 count.hash_stable(hcx, hasher);
2418 }
2419 Array { count, stride } => {
2420 count.hash_stable(hcx, hasher);
2421 stride.hash_stable(hcx, hasher);
2422 }
2423 Arbitrary { ref offsets, ref memory_index } => {
2424 offsets.hash_stable(hcx, hasher);
2425 memory_index.hash_stable(hcx, hasher);
2426 }
2427 }
2428 }
2429 }
2430
2431 impl<'gcx> HashStable<StableHashingContext<'gcx>> for Abi {
2432 fn hash_stable<W: StableHasherResult>(&self,
2433 hcx: &mut StableHashingContext<'gcx>,
2434 hasher: &mut StableHasher<W>) {
2435 use ty::layout::Abi::*;
2436 mem::discriminant(self).hash_stable(hcx, hasher);
2437
2438 match *self {
2439 Uninhabited => {}
2440 Scalar(ref value) => {
2441 value.hash_stable(hcx, hasher);
2442 }
2443 ScalarPair(ref a, ref b) => {
2444 a.hash_stable(hcx, hasher);
2445 b.hash_stable(hcx, hasher);
2446 }
2447 Vector { ref element, count } => {
2448 element.hash_stable(hcx, hasher);
2449 count.hash_stable(hcx, hasher);
2450 }
2451 Aggregate { sized } => {
2452 sized.hash_stable(hcx, hasher);
2453 }
2454 }
2455 }
2456 }
2457
2458 impl<'gcx> HashStable<StableHashingContext<'gcx>> for Scalar {
2459 fn hash_stable<W: StableHasherResult>(&self,
2460 hcx: &mut StableHashingContext<'gcx>,
2461 hasher: &mut StableHasher<W>) {
2462 let Scalar { value, valid_range: RangeInclusive { start, end } } = *self;
2463 value.hash_stable(hcx, hasher);
2464 start.hash_stable(hcx, hasher);
2465 end.hash_stable(hcx, hasher);
2466 }
2467 }
2468
2469 impl_stable_hash_for!(struct ::ty::layout::LayoutDetails {
2470 variants,
2471 fields,
2472 abi,
2473 size,
2474 align
2475 });
2476
2477 impl_stable_hash_for!(enum ::ty::layout::Integer {
2478 I8,
2479 I16,
2480 I32,
2481 I64,
2482 I128
2483 });
2484
2485 impl_stable_hash_for!(enum ::ty::layout::Primitive {
2486 Int(integer, signed),
2487 F32,
2488 F64,
2489 Pointer
2490 });
2491
2492 impl_stable_hash_for!(struct ::ty::layout::Align {
2493 abi,
2494 pref
2495 });
2496
2497 impl_stable_hash_for!(struct ::ty::layout::Size {
2498 raw
2499 });
2500
2501 impl<'gcx> HashStable<StableHashingContext<'gcx>> for LayoutError<'gcx>
2502 {
2503 fn hash_stable<W: StableHasherResult>(&self,
2504 hcx: &mut StableHashingContext<'gcx>,
2505 hasher: &mut StableHasher<W>) {
2506 use ty::layout::LayoutError::*;
2507 mem::discriminant(self).hash_stable(hcx, hasher);
2508
2509 match *self {
2510 Unknown(t) |
2511 SizeOverflow(t) => t.hash_stable(hcx, hasher)
2512 }
2513 }
2514 }