]> git.proxmox.com Git - rustc.git/blame - src/librustc/ty/layout.rs
New upstream version 1.19.0+dfsg3
[rustc.git] / src / librustc / ty / layout.rs
CommitLineData
54a0048b
SL
1// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
2// file at the top-level directory of this distribution and at
3// http://rust-lang.org/COPYRIGHT.
4//
5// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8// option. This file may not be copied, modified, or distributed
9// except according to those terms.
10
11pub use self::Integer::*;
12pub use self::Layout::*;
13pub use self::Primitive::*;
14
7cac9316 15use session::{self, DataTypeKind, Session};
cc61c64b 16use ty::{self, Ty, TyCtxt, TypeFoldable, ReprOptions, ReprFlags};
54a0048b 17
7cac9316 18use syntax::ast::{self, FloatTy, IntTy, UintTy};
54a0048b 19use syntax::attr;
3157f602 20use syntax_pos::DUMMY_SP;
54a0048b
SL
21
22use std::cmp;
23use std::fmt;
24use std::i64;
476ff2be 25use std::iter;
cc61c64b 26use std::ops::Deref;
54a0048b
SL
27
28/// Parsed [Data layout](http://llvm.org/docs/LangRef.html#data-layout)
29/// for a target, which contains everything needed to compute layouts.
30pub struct TargetDataLayout {
31 pub endian: Endian,
32 pub i1_align: Align,
33 pub i8_align: Align,
34 pub i16_align: Align,
35 pub i32_align: Align,
36 pub i64_align: Align,
32a655c1 37 pub i128_align: Align,
54a0048b
SL
38 pub f32_align: Align,
39 pub f64_align: Align,
40 pub pointer_size: Size,
41 pub pointer_align: Align,
42 pub aggregate_align: Align,
43
44 /// Alignments for vector types.
45 pub vector_align: Vec<(Size, Align)>
46}
47
48impl Default for TargetDataLayout {
9e0c209e 49 /// Creates an instance of `TargetDataLayout`.
54a0048b
SL
50 fn default() -> TargetDataLayout {
51 TargetDataLayout {
52 endian: Endian::Big,
53 i1_align: Align::from_bits(8, 8).unwrap(),
54 i8_align: Align::from_bits(8, 8).unwrap(),
55 i16_align: Align::from_bits(16, 16).unwrap(),
56 i32_align: Align::from_bits(32, 32).unwrap(),
57 i64_align: Align::from_bits(32, 64).unwrap(),
32a655c1 58 i128_align: Align::from_bits(32, 64).unwrap(),
54a0048b
SL
59 f32_align: Align::from_bits(32, 32).unwrap(),
60 f64_align: Align::from_bits(64, 64).unwrap(),
61 pointer_size: Size::from_bits(64),
62 pointer_align: Align::from_bits(64, 64).unwrap(),
63 aggregate_align: Align::from_bits(0, 64).unwrap(),
64 vector_align: vec![
65 (Size::from_bits(64), Align::from_bits(64, 64).unwrap()),
66 (Size::from_bits(128), Align::from_bits(128, 128).unwrap())
67 ]
68 }
69 }
70}
71
72impl TargetDataLayout {
73 pub fn parse(sess: &Session) -> TargetDataLayout {
74 // Parse a bit count from a string.
75 let parse_bits = |s: &str, kind: &str, cause: &str| {
76 s.parse::<u64>().unwrap_or_else(|err| {
77 sess.err(&format!("invalid {} `{}` for `{}` in \"data-layout\": {}",
78 kind, s, cause, err));
79 0
80 })
81 };
82
83 // Parse a size string.
84 let size = |s: &str, cause: &str| {
85 Size::from_bits(parse_bits(s, "size", cause))
86 };
87
88 // Parse an alignment string.
89 let align = |s: &[&str], cause: &str| {
90 if s.is_empty() {
91 sess.err(&format!("missing alignment for `{}` in \"data-layout\"", cause));
92 }
93 let abi = parse_bits(s[0], "alignment", cause);
94 let pref = s.get(1).map_or(abi, |pref| parse_bits(pref, "alignment", cause));
95 Align::from_bits(abi, pref).unwrap_or_else(|err| {
96 sess.err(&format!("invalid alignment for `{}` in \"data-layout\": {}",
97 cause, err));
98 Align::from_bits(8, 8).unwrap()
99 })
100 };
101
102 let mut dl = TargetDataLayout::default();
32a655c1 103 let mut i128_align_src = 64;
54a0048b 104 for spec in sess.target.target.data_layout.split("-") {
5bcae85e 105 match &spec.split(":").collect::<Vec<_>>()[..] {
3157f602
XL
106 &["e"] => dl.endian = Endian::Little,
107 &["E"] => dl.endian = Endian::Big,
108 &["a", ref a..] => dl.aggregate_align = align(a, "a"),
109 &["f32", ref a..] => dl.f32_align = align(a, "f32"),
110 &["f64", ref a..] => dl.f64_align = align(a, "f64"),
111 &[p @ "p", s, ref a..] | &[p @ "p0", s, ref a..] => {
54a0048b
SL
112 dl.pointer_size = size(s, p);
113 dl.pointer_align = align(a, p);
114 }
3157f602 115 &[s, ref a..] if s.starts_with("i") => {
32a655c1
SL
116 let bits = match s[1..].parse::<u64>() {
117 Ok(bits) => bits,
54a0048b
SL
118 Err(_) => {
119 size(&s[1..], "i"); // For the user error.
120 continue;
121 }
122 };
32a655c1
SL
123 let a = align(a, s);
124 match bits {
125 1 => dl.i1_align = a,
126 8 => dl.i8_align = a,
127 16 => dl.i16_align = a,
128 32 => dl.i32_align = a,
129 64 => dl.i64_align = a,
130 _ => {}
131 }
132 if bits >= i128_align_src && bits <= 128 {
133 // Default alignment for i128 is decided by taking the alignment of
134 // largest-sized i{64...128}.
135 i128_align_src = bits;
136 dl.i128_align = a;
137 }
54a0048b 138 }
3157f602 139 &[s, ref a..] if s.starts_with("v") => {
54a0048b
SL
140 let v_size = size(&s[1..], "v");
141 let a = align(a, s);
142 if let Some(v) = dl.vector_align.iter_mut().find(|v| v.0 == v_size) {
143 v.1 = a;
144 continue;
145 }
146 // No existing entry, add a new one.
147 dl.vector_align.push((v_size, a));
148 }
149 _ => {} // Ignore everything else.
150 }
151 }
152
153 // Perform consistency checks against the Target information.
154 let endian_str = match dl.endian {
155 Endian::Little => "little",
156 Endian::Big => "big"
157 };
158 if endian_str != sess.target.target.target_endian {
159 sess.err(&format!("inconsistent target specification: \"data-layout\" claims \
160 architecture is {}-endian, while \"target-endian\" is `{}`",
161 endian_str, sess.target.target.target_endian));
162 }
163
164 if dl.pointer_size.bits().to_string() != sess.target.target.target_pointer_width {
165 sess.err(&format!("inconsistent target specification: \"data-layout\" claims \
166 pointers are {}-bit, while \"target-pointer-width\" is `{}`",
167 dl.pointer_size.bits(), sess.target.target.target_pointer_width));
168 }
169
170 dl
171 }
172
173 /// Return exclusive upper bound on object size.
174 ///
175 /// The theoretical maximum object size is defined as the maximum positive `isize` value.
176 /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly
177 /// index every address within an object along with one byte past the end, along with allowing
178 /// `isize` to store the difference between any two pointers into an object.
179 ///
180 /// The upper bound on 64-bit currently needs to be lower because LLVM uses a 64-bit integer
181 /// to represent object size in bits. It would need to be 1 << 61 to account for this, but is
182 /// currently conservatively bounded to 1 << 47 as that is enough to cover the current usable
183 /// address space on 64-bit ARMv8 and x86_64.
184 pub fn obj_size_bound(&self) -> u64 {
185 match self.pointer_size.bits() {
3157f602 186 16 => 1 << 15,
54a0048b
SL
187 32 => 1 << 31,
188 64 => 1 << 47,
189 bits => bug!("obj_size_bound: unknown pointer bit size {}", bits)
190 }
191 }
192
193 pub fn ptr_sized_integer(&self) -> Integer {
194 match self.pointer_size.bits() {
3157f602 195 16 => I16,
54a0048b
SL
196 32 => I32,
197 64 => I64,
198 bits => bug!("ptr_sized_integer: unknown pointer bit size {}", bits)
199 }
200 }
201}
202
cc61c64b
XL
203pub trait HasDataLayout: Copy {
204 fn data_layout(&self) -> &TargetDataLayout;
205}
206
207impl<'a> HasDataLayout for &'a TargetDataLayout {
208 fn data_layout(&self) -> &TargetDataLayout {
209 self
210 }
211}
212
7cac9316
XL
213impl<'a, 'tcx> HasDataLayout for TyCtxt<'a, 'tcx, 'tcx> {
214 fn data_layout(&self) -> &TargetDataLayout {
215 &self.data_layout
216 }
217}
218
54a0048b
SL
219/// Endianness of the target, which must match cfg(target-endian).
220#[derive(Copy, Clone)]
221pub enum Endian {
222 Little,
223 Big
224}
225
226/// Size of a type in bytes.
227#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
228pub struct Size {
229 raw: u64
230}
231
232impl Size {
233 pub fn from_bits(bits: u64) -> Size {
234 Size::from_bytes((bits + 7) / 8)
235 }
236
237 pub fn from_bytes(bytes: u64) -> Size {
238 if bytes >= (1 << 61) {
239 bug!("Size::from_bytes: {} bytes in bits doesn't fit in u64", bytes)
240 }
241 Size {
242 raw: bytes
243 }
244 }
245
246 pub fn bytes(self) -> u64 {
247 self.raw
248 }
249
250 pub fn bits(self) -> u64 {
251 self.bytes() * 8
252 }
253
254 pub fn abi_align(self, align: Align) -> Size {
255 let mask = align.abi() - 1;
256 Size::from_bytes((self.bytes() + mask) & !mask)
257 }
258
cc61c64b
XL
259 pub fn checked_add<C: HasDataLayout>(self, offset: Size, cx: C) -> Option<Size> {
260 let dl = cx.data_layout();
261
54a0048b
SL
262 // Each Size is less than dl.obj_size_bound(), so the sum is
263 // also less than 1 << 62 (and therefore can't overflow).
264 let bytes = self.bytes() + offset.bytes();
265
266 if bytes < dl.obj_size_bound() {
267 Some(Size::from_bytes(bytes))
268 } else {
269 None
270 }
271 }
272
cc61c64b
XL
273 pub fn checked_mul<C: HasDataLayout>(self, count: u64, cx: C) -> Option<Size> {
274 let dl = cx.data_layout();
275
54a0048b
SL
276 // Each Size is less than dl.obj_size_bound(), so the sum is
277 // also less than 1 << 62 (and therefore can't overflow).
278 match self.bytes().checked_mul(count) {
279 Some(bytes) if bytes < dl.obj_size_bound() => {
280 Some(Size::from_bytes(bytes))
281 }
282 _ => None
283 }
284 }
285}
286
287/// Alignment of a type in bytes, both ABI-mandated and preferred.
288/// Since alignments are always powers of 2, we can pack both in one byte,
cc61c64b 289/// giving each a nibble (4 bits) for a maximum alignment of 2<sup>15</sup> = 32768.
54a0048b
SL
290#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
291pub struct Align {
292 raw: u8
293}
294
295impl Align {
296 pub fn from_bits(abi: u64, pref: u64) -> Result<Align, String> {
297 Align::from_bytes((abi + 7) / 8, (pref + 7) / 8)
298 }
299
300 pub fn from_bytes(abi: u64, pref: u64) -> Result<Align, String> {
301 let pack = |align: u64| {
302 // Treat an alignment of 0 bytes like 1-byte alignment.
303 if align == 0 {
304 return Ok(0);
305 }
306
307 let mut bytes = align;
308 let mut pow: u8 = 0;
309 while (bytes & 1) == 0 {
310 pow += 1;
311 bytes >>= 1;
312 }
313 if bytes != 1 {
314 Err(format!("`{}` is not a power of 2", align))
315 } else if pow > 0x0f {
316 Err(format!("`{}` is too large", align))
317 } else {
318 Ok(pow)
319 }
320 };
321
322 Ok(Align {
323 raw: pack(abi)? | (pack(pref)? << 4)
324 })
325 }
326
327 pub fn abi(self) -> u64 {
328 1 << (self.raw & 0xf)
329 }
330
331 pub fn pref(self) -> u64 {
332 1 << (self.raw >> 4)
333 }
334
335 pub fn min(self, other: Align) -> Align {
336 let abi = cmp::min(self.raw & 0x0f, other.raw & 0x0f);
337 let pref = cmp::min(self.raw & 0xf0, other.raw & 0xf0);
338 Align {
339 raw: abi | pref
340 }
341 }
342
343 pub fn max(self, other: Align) -> Align {
344 let abi = cmp::max(self.raw & 0x0f, other.raw & 0x0f);
345 let pref = cmp::max(self.raw & 0xf0, other.raw & 0xf0);
346 Align {
347 raw: abi | pref
348 }
349 }
350}
351
352/// Integers, also used for enum discriminants.
353#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
354pub enum Integer {
355 I1,
356 I8,
357 I16,
358 I32,
32a655c1
SL
359 I64,
360 I128,
54a0048b
SL
361}
362
363impl Integer {
9e0c209e
SL
364 pub fn size(&self) -> Size {
365 match *self {
366 I1 => Size::from_bits(1),
367 I8 => Size::from_bytes(1),
368 I16 => Size::from_bytes(2),
369 I32 => Size::from_bytes(4),
370 I64 => Size::from_bytes(8),
32a655c1 371 I128 => Size::from_bytes(16),
9e0c209e
SL
372 }
373 }
374
cc61c64b
XL
375 pub fn align<C: HasDataLayout>(&self, cx: C) -> Align {
376 let dl = cx.data_layout();
377
9e0c209e
SL
378 match *self {
379 I1 => dl.i1_align,
380 I8 => dl.i8_align,
381 I16 => dl.i16_align,
382 I32 => dl.i32_align,
383 I64 => dl.i64_align,
32a655c1 384 I128 => dl.i128_align,
9e0c209e
SL
385 }
386 }
387
388 pub fn to_ty<'a, 'tcx>(&self, tcx: &ty::TyCtxt<'a, 'tcx, 'tcx>,
389 signed: bool) -> Ty<'tcx> {
390 match (*self, signed) {
391 (I1, false) => tcx.types.u8,
392 (I8, false) => tcx.types.u8,
393 (I16, false) => tcx.types.u16,
394 (I32, false) => tcx.types.u32,
395 (I64, false) => tcx.types.u64,
32a655c1 396 (I128, false) => tcx.types.u128,
9e0c209e
SL
397 (I1, true) => tcx.types.i8,
398 (I8, true) => tcx.types.i8,
399 (I16, true) => tcx.types.i16,
400 (I32, true) => tcx.types.i32,
401 (I64, true) => tcx.types.i64,
32a655c1 402 (I128, true) => tcx.types.i128,
9e0c209e
SL
403 }
404 }
405
54a0048b
SL
406 /// Find the smallest Integer type which can represent the signed value.
407 pub fn fit_signed(x: i64) -> Integer {
408 match x {
32a655c1
SL
409 -0x0000_0000_0000_0001...0x0000_0000_0000_0000 => I1,
410 -0x0000_0000_0000_0080...0x0000_0000_0000_007f => I8,
411 -0x0000_0000_0000_8000...0x0000_0000_0000_7fff => I16,
412 -0x0000_0000_8000_0000...0x0000_0000_7fff_ffff => I32,
413 -0x8000_0000_0000_0000...0x7fff_ffff_ffff_ffff => I64,
414 _ => I128
54a0048b
SL
415 }
416 }
417
418 /// Find the smallest Integer type which can represent the unsigned value.
419 pub fn fit_unsigned(x: u64) -> Integer {
420 match x {
32a655c1
SL
421 0...0x0000_0000_0000_0001 => I1,
422 0...0x0000_0000_0000_00ff => I8,
423 0...0x0000_0000_0000_ffff => I16,
424 0...0x0000_0000_ffff_ffff => I32,
425 0...0xffff_ffff_ffff_ffff => I64,
426 _ => I128,
54a0048b
SL
427 }
428 }
429
9e0c209e 430 /// Find the smallest integer with the given alignment.
cc61c64b
XL
431 pub fn for_abi_align<C: HasDataLayout>(cx: C, align: Align) -> Option<Integer> {
432 let dl = cx.data_layout();
433
9e0c209e
SL
434 let wanted = align.abi();
435 for &candidate in &[I8, I16, I32, I64] {
436 let ty = Int(candidate);
437 if wanted == ty.align(dl).abi() && wanted == ty.size(dl).bytes() {
438 return Some(candidate);
439 }
440 }
441 None
442 }
443
54a0048b 444 /// Get the Integer type from an attr::IntType.
cc61c64b
XL
445 pub fn from_attr<C: HasDataLayout>(cx: C, ity: attr::IntType) -> Integer {
446 let dl = cx.data_layout();
447
54a0048b
SL
448 match ity {
449 attr::SignedInt(IntTy::I8) | attr::UnsignedInt(UintTy::U8) => I8,
450 attr::SignedInt(IntTy::I16) | attr::UnsignedInt(UintTy::U16) => I16,
451 attr::SignedInt(IntTy::I32) | attr::UnsignedInt(UintTy::U32) => I32,
452 attr::SignedInt(IntTy::I64) | attr::UnsignedInt(UintTy::U64) => I64,
32a655c1 453 attr::SignedInt(IntTy::I128) | attr::UnsignedInt(UintTy::U128) => I128,
54a0048b
SL
454 attr::SignedInt(IntTy::Is) | attr::UnsignedInt(UintTy::Us) => {
455 dl.ptr_sized_integer()
456 }
457 }
458 }
459
460 /// Find the appropriate Integer type and signedness for the given
461 /// signed discriminant range and #[repr] attribute.
462 /// N.B.: u64 values above i64::MAX will be treated as signed, but
463 /// that shouldn't affect anything, other than maybe debuginfo.
7cac9316
XL
464 fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
465 ty: Ty<'tcx>,
466 repr: &ReprOptions,
467 min: i64,
468 max: i64)
469 -> (Integer, bool) {
54a0048b
SL
470 // Theoretically, negative values could be larger in unsigned representation
471 // than the unsigned representation of the signed minimum. However, if there
472 // are any negative values, the only valid unsigned representation is u64
473 // which can fit all i64 values, so the result remains unaffected.
474 let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u64, max as u64));
475 let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
476
476ff2be
SL
477 let mut min_from_extern = None;
478 let min_default = I8;
479
8bb4bdeb 480 if let Some(ity) = repr.int {
cc61c64b 481 let discr = Integer::from_attr(tcx, ity);
8bb4bdeb
XL
482 let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
483 if discr < fit {
484 bug!("Integer::repr_discr: `#[repr]` hint too small for \
485 discriminant range of enum `{}", ty)
486 }
487 return (discr, ity.is_signed());
488 }
489
cc61c64b 490 if repr.c() {
8bb4bdeb
XL
491 match &tcx.sess.target.target.arch[..] {
492 // WARNING: the ARM EABI has two variants; the one corresponding
493 // to `at_least == I32` appears to be used on Linux and NetBSD,
494 // but some systems may use the variant corresponding to no
495 // lower bound. However, we don't run on those yet...?
496 "arm" => min_from_extern = Some(I32),
497 _ => min_from_extern = Some(I32),
54a0048b 498 }
476ff2be
SL
499 }
500
501 let at_least = min_from_extern.unwrap_or(min_default);
54a0048b
SL
502
503 // If there are no negative values, we can use the unsigned fit.
504 if min >= 0 {
505 (cmp::max(unsigned_fit, at_least), false)
506 } else {
507 (cmp::max(signed_fit, at_least), true)
508 }
509 }
510}
511
512/// Fundamental unit of memory access and layout.
513#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
514pub enum Primitive {
515 Int(Integer),
516 F32,
517 F64,
518 Pointer
519}
520
521impl Primitive {
cc61c64b
XL
522 pub fn size<C: HasDataLayout>(self, cx: C) -> Size {
523 let dl = cx.data_layout();
524
54a0048b
SL
525 match self {
526 Int(I1) | Int(I8) => Size::from_bits(8),
527 Int(I16) => Size::from_bits(16),
528 Int(I32) | F32 => Size::from_bits(32),
529 Int(I64) | F64 => Size::from_bits(64),
32a655c1 530 Int(I128) => Size::from_bits(128),
54a0048b
SL
531 Pointer => dl.pointer_size
532 }
533 }
534
cc61c64b
XL
535 pub fn align<C: HasDataLayout>(self, cx: C) -> Align {
536 let dl = cx.data_layout();
537
54a0048b
SL
538 match self {
539 Int(I1) => dl.i1_align,
540 Int(I8) => dl.i8_align,
541 Int(I16) => dl.i16_align,
542 Int(I32) => dl.i32_align,
543 Int(I64) => dl.i64_align,
32a655c1 544 Int(I128) => dl.i128_align,
54a0048b
SL
545 F32 => dl.f32_align,
546 F64 => dl.f64_align,
547 Pointer => dl.pointer_align
548 }
549 }
550}
551
552/// Path through fields of nested structures.
553// FIXME(eddyb) use small vector optimization for the common case.
554pub type FieldPath = Vec<u32>;
555
556/// A structure, a product type in ADT terms.
557#[derive(PartialEq, Eq, Hash, Debug)]
558pub struct Struct {
cc61c64b 559 /// Maximum alignment of fields and repr alignment.
54a0048b
SL
560 pub align: Align,
561
cc61c64b
XL
562 /// Primitive alignment of fields without repr alignment.
563 pub primitive_align: Align,
564
54a0048b
SL
565 /// If true, no alignment padding is used.
566 pub packed: bool,
567
568 /// If true, the size is exact, otherwise it's only a lower bound.
569 pub sized: bool,
570
476ff2be
SL
571 /// Offsets for the first byte of each field, ordered to match the source definition order.
572 /// This vector does not go in increasing order.
c30ab7b3
SL
573 /// FIXME(eddyb) use small vector optimization for the common case.
574 pub offsets: Vec<Size>,
575
476ff2be
SL
576 /// Maps source order field indices to memory order indices, depending how fields were permuted.
577 /// FIXME (camlorn) also consider small vector optimization here.
578 pub memory_index: Vec<u32>,
579
c30ab7b3 580 pub min_size: Size,
54a0048b
SL
581}
582
476ff2be
SL
583// Info required to optimize struct layout.
584#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Debug)]
585enum StructKind {
586 // A tuple, closure, or univariant which cannot be coerced to unsized.
587 AlwaysSizedUnivariant,
588 // A univariant, the last field of which may be coerced to unsized.
589 MaybeUnsizedUnivariant,
590 // A univariant, but part of an enum.
591 EnumVariant,
592}
593
7cac9316
XL
594impl<'a, 'tcx> Struct {
595 fn new(dl: &TargetDataLayout,
596 fields: &Vec<&'a Layout>,
597 repr: &ReprOptions,
598 kind: StructKind,
599 scapegoat: Ty<'tcx>)
600 -> Result<Struct, LayoutError<'tcx>> {
cc61c64b
XL
601 if repr.packed() && repr.align > 0 {
602 bug!("Struct cannot be packed and aligned");
603 }
604
605 let align = if repr.packed() {
606 dl.i8_align
607 } else {
608 dl.aggregate_align
609 };
610
476ff2be 611 let mut ret = Struct {
cc61c64b
XL
612 align: align,
613 primitive_align: align,
614 packed: repr.packed(),
54a0048b 615 sized: true,
c30ab7b3 616 offsets: vec![],
476ff2be 617 memory_index: vec![],
c30ab7b3 618 min_size: Size::from_bytes(0),
476ff2be
SL
619 };
620
8bb4bdeb 621 // Anything with repr(C) or repr(packed) doesn't optimize.
476ff2be
SL
622 // Neither do 1-member and 2-member structs.
623 // In addition, code in trans assume that 2-element structs can become pairs.
624 // It's easier to just short-circuit here.
cc61c64b
XL
625 let can_optimize = (fields.len() > 2 || StructKind::EnumVariant == kind)
626 && (repr.flags & ReprFlags::IS_UNOPTIMISABLE).is_empty();
476ff2be
SL
627
628 let (optimize, sort_ascending) = match kind {
629 StructKind::AlwaysSizedUnivariant => (can_optimize, false),
630 StructKind::MaybeUnsizedUnivariant => (can_optimize, false),
631 StructKind::EnumVariant => {
632 assert!(fields.len() >= 1, "Enum variants must have discriminants.");
633 (can_optimize && fields[0].size(dl).bytes() == 1, true)
634 }
635 };
c30ab7b3 636
476ff2be
SL
637 ret.offsets = vec![Size::from_bytes(0); fields.len()];
638 let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
54a0048b 639
476ff2be
SL
640 if optimize {
641 let start = if let StructKind::EnumVariant = kind { 1 } else { 0 };
642 let end = if let StructKind::MaybeUnsizedUnivariant = kind {
643 fields.len() - 1
644 } else {
645 fields.len()
646 };
647 if end > start {
648 let optimizing = &mut inverse_memory_index[start..end];
649 if sort_ascending {
650 optimizing.sort_by_key(|&x| fields[x as usize].align(dl).abi());
651 } else {
652 optimizing.sort_by(| &a, &b | {
653 let a = fields[a as usize].align(dl).abi();
654 let b = fields[b as usize].align(dl).abi();
655 b.cmp(&a)
656 });
657 }
658 }
659 }
660
661 // inverse_memory_index holds field indices by increasing memory offset.
662 // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
663 // We now write field offsets to the corresponding offset slot;
664 // field 5 with offset 0 puts 0 in offsets[5].
665 // At the bottom of this function, we use inverse_memory_index to produce memory_index.
666
667 if let StructKind::EnumVariant = kind {
668 assert_eq!(inverse_memory_index[0], 0,
669 "Enum variant discriminants must have the lowest offset.");
670 }
671
672 let mut offset = Size::from_bytes(0);
673
674 for i in inverse_memory_index.iter() {
675 let field = fields[*i as usize];
676 if !ret.sized {
677 bug!("Struct::new: field #{} of `{}` comes after unsized field",
678 ret.offsets.len(), scapegoat);
54a0048b
SL
679 }
680
54a0048b 681 if field.is_unsized() {
476ff2be 682 ret.sized = false;
54a0048b
SL
683 }
684
685 // Invariant: offset < dl.obj_size_bound() <= 1<<61
476ff2be 686 if !ret.packed {
54a0048b 687 let align = field.align(dl);
cc61c64b 688 let primitive_align = field.primitive_align(dl);
476ff2be 689 ret.align = ret.align.max(align);
cc61c64b 690 ret.primitive_align = ret.primitive_align.max(primitive_align);
c30ab7b3
SL
691 offset = offset.abi_align(align);
692 }
693
476ff2be
SL
694 debug!("Struct::new offset: {:?} field: {:?} {:?}", offset, field, field.size(dl));
695 ret.offsets[*i as usize] = offset;
54a0048b
SL
696
697 offset = offset.checked_add(field.size(dl), dl)
698 .map_or(Err(LayoutError::SizeOverflow(scapegoat)), Ok)?;
54a0048b
SL
699 }
700
cc61c64b
XL
701 if repr.align > 0 {
702 let repr_align = repr.align as u64;
703 ret.align = ret.align.max(Align::from_bytes(repr_align, repr_align).unwrap());
704 debug!("Struct::new repr_align: {:?}", repr_align);
705 }
c30ab7b3 706
476ff2be
SL
707 debug!("Struct::new min_size: {:?}", offset);
708 ret.min_size = offset;
54a0048b 709
476ff2be
SL
710 // As stated above, inverse_memory_index holds field indices by increasing offset.
711 // This makes it an already-sorted view of the offsets vec.
712 // To invert it, consider:
713 // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
714 // Field 5 would be the first element, so memory_index is i:
715 // Note: if we didn't optimize, it's already right.
716
717 if optimize {
718 ret.memory_index = vec![0; inverse_memory_index.len()];
719
720 for i in 0..inverse_memory_index.len() {
721 ret.memory_index[inverse_memory_index[i] as usize] = i as u32;
722 }
723 } else {
724 ret.memory_index = inverse_memory_index;
725 }
54a0048b 726
476ff2be
SL
727 Ok(ret)
728 }
729
730 /// Get the size with trailing alignment padding.
54a0048b 731 pub fn stride(&self) -> Size {
c30ab7b3 732 self.min_size.abi_align(self.align)
54a0048b
SL
733 }
734
735 /// Determine whether a structure would be zero-sized, given its fields.
cc61c64b 736 fn would_be_zero_sized<I>(dl: &TargetDataLayout, fields: I)
7cac9316
XL
737 -> Result<bool, LayoutError<'tcx>>
738 where I: Iterator<Item=Result<&'a Layout, LayoutError<'tcx>>> {
54a0048b
SL
739 for field in fields {
740 let field = field?;
741 if field.is_unsized() || field.size(dl).bytes() > 0 {
742 return Ok(false);
743 }
744 }
745 Ok(true)
746 }
747
476ff2be
SL
748 /// Get indices of the tys that made this struct by increasing offset.
749 #[inline]
750 pub fn field_index_by_increasing_offset<'b>(&'b self) -> impl iter::Iterator<Item=usize>+'b {
751 let mut inverse_small = [0u8; 64];
752 let mut inverse_big = vec![];
753 let use_small = self.memory_index.len() <= inverse_small.len();
754
755 // We have to write this logic twice in order to keep the array small.
756 if use_small {
757 for i in 0..self.memory_index.len() {
758 inverse_small[self.memory_index[i] as usize] = i as u8;
759 }
760 } else {
761 inverse_big = vec![0; self.memory_index.len()];
762 for i in 0..self.memory_index.len() {
763 inverse_big[self.memory_index[i] as usize] = i as u32;
764 }
765 }
766
767 (0..self.memory_index.len()).map(move |i| {
768 if use_small { inverse_small[i] as usize }
769 else { inverse_big[i] as usize }
770 })
771 }
772
54a0048b
SL
773 /// Find the path leading to a non-zero leaf field, starting from
774 /// the given type and recursing through aggregates.
476ff2be
SL
775 /// The tuple is `(path, source_path)`,
776 /// where `path` is in memory order and `source_path` in source order.
54a0048b 777 // FIXME(eddyb) track value ranges and traverse already optimized enums.
7cac9316
XL
778 fn non_zero_field_in_type(tcx: TyCtxt<'a, 'tcx, 'tcx>,
779 param_env: ty::ParamEnv<'tcx>,
780 ty: Ty<'tcx>)
781 -> Result<Option<(FieldPath, FieldPath)>, LayoutError<'tcx>> {
782 match (ty.layout(tcx, param_env)?, &ty.sty) {
c30ab7b3 783 (&Scalar { non_zero: true, .. }, _) |
476ff2be 784 (&CEnum { non_zero: true, .. }, _) => Ok(Some((vec![], vec![]))),
54a0048b 785 (&FatPointer { non_zero: true, .. }, _) => {
476ff2be 786 Ok(Some((vec![FAT_PTR_ADDR as u32], vec![FAT_PTR_ADDR as u32])))
54a0048b
SL
787 }
788
789 // Is this the NonZero lang item wrapping a pointer or integer type?
9e0c209e 790 (&Univariant { non_zero: true, .. }, &ty::TyAdt(def, substs)) => {
54a0048b
SL
791 let fields = &def.struct_variant().fields;
792 assert_eq!(fields.len(), 1);
7cac9316 793 match *fields[0].ty(tcx, substs).layout(tcx, param_env)? {
54a0048b
SL
794 // FIXME(eddyb) also allow floating-point types here.
795 Scalar { value: Int(_), non_zero: false } |
796 Scalar { value: Pointer, non_zero: false } => {
476ff2be 797 Ok(Some((vec![0], vec![0])))
54a0048b
SL
798 }
799 FatPointer { non_zero: false, .. } => {
476ff2be
SL
800 let tmp = vec![FAT_PTR_ADDR as u32, 0];
801 Ok(Some((tmp.clone(), tmp)))
54a0048b
SL
802 }
803 _ => Ok(None)
804 }
805 }
806
807 // Perhaps one of the fields of this struct is non-zero
808 // let's recurse and find out
476ff2be 809 (&Univariant { ref variant, .. }, &ty::TyAdt(def, substs)) if def.is_struct() => {
7cac9316
XL
810 Struct::non_zero_field_paths(
811 tcx,
812 param_env,
813 def.struct_variant().fields.iter().map(|field| {
814 field.ty(tcx, substs)
815 }),
816 Some(&variant.memory_index[..]))
54a0048b
SL
817 }
818
819 // Perhaps one of the upvars of this closure is non-zero
476ff2be
SL
820 (&Univariant { ref variant, .. }, &ty::TyClosure(def, substs)) => {
821 let upvar_tys = substs.upvar_tys(def, tcx);
7cac9316
XL
822 Struct::non_zero_field_paths(
823 tcx,
824 param_env,
825 upvar_tys,
476ff2be
SL
826 Some(&variant.memory_index[..]))
827 }
54a0048b 828 // Can we use one of the fields in this tuple?
8bb4bdeb 829 (&Univariant { ref variant, .. }, &ty::TyTuple(tys, _)) => {
7cac9316
XL
830 Struct::non_zero_field_paths(
831 tcx,
832 param_env,
833 tys.iter().cloned(),
476ff2be 834 Some(&variant.memory_index[..]))
54a0048b
SL
835 }
836
837 // Is this a fixed-size array of something non-zero
838 // with at least one element?
839 (_, &ty::TyArray(ety, d)) if d > 0 => {
7cac9316
XL
840 Struct::non_zero_field_paths(
841 tcx,
842 param_env,
843 Some(ety).into_iter(),
844 None)
54a0048b
SL
845 }
846
5bcae85e 847 (_, &ty::TyProjection(_)) | (_, &ty::TyAnon(..)) => {
7cac9316 848 let normalized = tcx.normalize_associated_type_in_env(&ty, param_env);
5bcae85e
SL
849 if ty == normalized {
850 return Ok(None);
851 }
7cac9316 852 return Struct::non_zero_field_in_type(tcx, param_env, normalized);
5bcae85e
SL
853 }
854
54a0048b
SL
855 // Anything else is not a non-zero type.
856 _ => Ok(None)
857 }
858 }
859
860 /// Find the path leading to a non-zero leaf field, starting from
861 /// the given set of fields and recursing through aggregates.
476ff2be
SL
862 /// Returns Some((path, source_path)) on success.
863 /// `path` is translated to memory order. `source_path` is not.
7cac9316
XL
864 fn non_zero_field_paths<I>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
865 param_env: ty::ParamEnv<'tcx>,
866 fields: I,
867 permutation: Option<&[u32]>)
868 -> Result<Option<(FieldPath, FieldPath)>, LayoutError<'tcx>>
869 where I: Iterator<Item=Ty<'tcx>> {
54a0048b 870 for (i, ty) in fields.enumerate() {
7cac9316
XL
871 let r = Struct::non_zero_field_in_type(tcx, param_env, ty)?;
872 if let Some((mut path, mut source_path)) = r {
476ff2be
SL
873 source_path.push(i as u32);
874 let index = if let Some(p) = permutation {
875 p[i] as usize
876 } else {
877 i
878 };
879 path.push(index as u32);
880 return Ok(Some((path, source_path)));
54a0048b
SL
881 }
882 }
883 Ok(None)
884 }
cc61c64b
XL
885
886 pub fn over_align(&self) -> Option<u32> {
887 let align = self.align.abi();
888 let primitive_align = self.primitive_align.abi();
889 if align > primitive_align {
890 Some(align as u32)
891 } else {
892 None
893 }
894 }
9e0c209e
SL
895}
896
897/// An untagged union.
898#[derive(PartialEq, Eq, Hash, Debug)]
899pub struct Union {
900 pub align: Align,
cc61c64b 901 pub primitive_align: Align,
9e0c209e
SL
902
903 pub min_size: Size,
904
905 /// If true, no alignment padding is used.
906 pub packed: bool,
907}
908
7cac9316 909impl<'a, 'tcx> Union {
cc61c64b
XL
910 fn new(dl: &TargetDataLayout, packed: bool) -> Union {
911 let align = if packed { dl.i8_align } else { dl.aggregate_align };
9e0c209e 912 Union {
cc61c64b
XL
913 align: align,
914 primitive_align: align,
9e0c209e
SL
915 min_size: Size::from_bytes(0),
916 packed: packed,
917 }
918 }
919
920 /// Extend the Struct with more fields.
cc61c64b
XL
921 fn extend<I>(&mut self, dl: &TargetDataLayout,
922 fields: I,
7cac9316
XL
923 scapegoat: Ty<'tcx>)
924 -> Result<(), LayoutError<'tcx>>
925 where I: Iterator<Item=Result<&'a Layout, LayoutError<'tcx>>> {
9e0c209e
SL
926 for (index, field) in fields.enumerate() {
927 let field = field?;
928 if field.is_unsized() {
929 bug!("Union::extend: field #{} of `{}` is unsized",
930 index, scapegoat);
931 }
932
476ff2be
SL
933 debug!("Union::extend field: {:?} {:?}", field, field.size(dl));
934
9e0c209e
SL
935 if !self.packed {
936 self.align = self.align.max(field.align(dl));
cc61c64b 937 self.primitive_align = self.primitive_align.max(field.primitive_align(dl));
9e0c209e
SL
938 }
939 self.min_size = cmp::max(self.min_size, field.size(dl));
940 }
941
476ff2be
SL
942 debug!("Union::extend min-size: {:?}", self.min_size);
943
9e0c209e
SL
944 Ok(())
945 }
946
476ff2be 947 /// Get the size with trailing alignment padding.
9e0c209e
SL
948 pub fn stride(&self) -> Size {
949 self.min_size.abi_align(self.align)
950 }
cc61c64b
XL
951
952 pub fn over_align(&self) -> Option<u32> {
953 let align = self.align.abi();
954 let primitive_align = self.primitive_align.abi();
955 if align > primitive_align {
956 Some(align as u32)
957 } else {
958 None
959 }
960 }
54a0048b
SL
961}
962
963/// The first half of a fat pointer.
964/// - For a trait object, this is the address of the box.
965/// - For a slice, this is the base address.
966pub const FAT_PTR_ADDR: usize = 0;
967
968/// The second half of a fat pointer.
969/// - For a trait object, this is the address of the vtable.
970/// - For a slice, this is the length.
971pub const FAT_PTR_EXTRA: usize = 1;
972
973/// Type layout, from which size and alignment can be cheaply computed.
974/// For ADTs, it also includes field placement and enum optimizations.
975/// NOTE: Because Layout is interned, redundant information should be
976/// kept to a minimum, e.g. it includes no sub-component Ty or Layout.
977#[derive(Debug, PartialEq, Eq, Hash)]
978pub enum Layout {
979 /// TyBool, TyChar, TyInt, TyUint, TyFloat, TyRawPtr, TyRef or TyFnPtr.
980 Scalar {
981 value: Primitive,
982 // If true, the value cannot represent a bit pattern of all zeroes.
983 non_zero: bool
984 },
985
9e0c209e 986 /// SIMD vectors, from structs marked with #[repr(simd)].
54a0048b
SL
987 Vector {
988 element: Primitive,
989 count: u64
990 },
991
992 /// TyArray, TySlice or TyStr.
993 Array {
994 /// If true, the size is exact, otherwise it's only a lower bound.
995 sized: bool,
996 align: Align,
cc61c64b
XL
997 primitive_align: Align,
998 element_size: Size,
999 count: u64
54a0048b
SL
1000 },
1001
1002 /// TyRawPtr or TyRef with a !Sized pointee.
1003 FatPointer {
1004 metadata: Primitive,
1005 // If true, the pointer cannot be null.
1006 non_zero: bool
1007 },
1008
9e0c209e 1009 // Remaining variants are all ADTs such as structs, enums or tuples.
54a0048b
SL
1010
1011 /// C-like enums; basically an integer.
1012 CEnum {
1013 discr: Integer,
1014 signed: bool,
c30ab7b3 1015 non_zero: bool,
54a0048b
SL
1016 // Inclusive discriminant range.
1017 // If min > max, it represents min...u64::MAX followed by 0...max.
1018 // FIXME(eddyb) always use the shortest range, e.g. by finding
1019 // the largest space between two consecutive discriminants and
1020 // taking everything else as the (shortest) discriminant range.
1021 min: u64,
1022 max: u64
1023 },
1024
1025 /// Single-case enums, and structs/tuples.
1026 Univariant {
1027 variant: Struct,
1028 // If true, the structure is NonZero.
1029 // FIXME(eddyb) use a newtype Layout kind for this.
1030 non_zero: bool
1031 },
1032
9e0c209e
SL
1033 /// Untagged unions.
1034 UntaggedUnion {
1035 variants: Union,
1036 },
1037
54a0048b
SL
1038 /// General-case enums: for each case there is a struct, and they
1039 /// all start with a field for the discriminant.
1040 General {
1041 discr: Integer,
1042 variants: Vec<Struct>,
1043 size: Size,
cc61c64b
XL
1044 align: Align,
1045 primitive_align: Align,
54a0048b
SL
1046 },
1047
1048 /// Two cases distinguished by a nullable pointer: the case with discriminant
1049 /// `nndiscr` must have single field which is known to be nonnull due to its type.
1050 /// The other case is known to be zero sized. Hence we represent the enum
1051 /// as simply a nullable pointer: if not null it indicates the `nndiscr` variant,
1052 /// otherwise it indicates the other case.
1053 ///
1054 /// For example, `std::option::Option` instantiated at a safe pointer type
1055 /// is represented such that `None` is a null pointer and `Some` is the
1056 /// identity function.
1057 RawNullablePointer {
1058 nndiscr: u64,
1059 value: Primitive
1060 },
1061
1062 /// Two cases distinguished by a nullable pointer: the case with discriminant
1063 /// `nndiscr` is represented by the struct `nonnull`, where the `discrfield`th
1064 /// field is known to be nonnull due to its type; if that field is null, then
1065 /// it represents the other case, which is known to be zero sized.
1066 StructWrappedNullablePointer {
1067 nndiscr: u64,
1068 nonnull: Struct,
1069 // N.B. There is a 0 at the start, for LLVM GEP through a pointer.
476ff2be
SL
1070 discrfield: FieldPath,
1071 // Like discrfield, but in source order. For debuginfo.
1072 discrfield_source: FieldPath
54a0048b
SL
1073 }
1074}
1075
1076#[derive(Copy, Clone, Debug)]
1077pub enum LayoutError<'tcx> {
1078 Unknown(Ty<'tcx>),
1079 SizeOverflow(Ty<'tcx>)
1080}
1081
1082impl<'tcx> fmt::Display for LayoutError<'tcx> {
1083 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1084 match *self {
1085 LayoutError::Unknown(ty) => {
1086 write!(f, "the type `{:?}` has an unknown layout", ty)
1087 }
1088 LayoutError::SizeOverflow(ty) => {
1089 write!(f, "the type `{:?}` is too big for the current architecture", ty)
1090 }
1091 }
1092 }
1093}
1094
7cac9316
XL
1095impl<'a, 'tcx> Layout {
1096 pub fn compute_uncached(tcx: TyCtxt<'a, 'tcx, 'tcx>,
1097 param_env: ty::ParamEnv<'tcx>,
1098 ty: Ty<'tcx>)
1099 -> Result<&'tcx Layout, LayoutError<'tcx>> {
5bcae85e 1100 let success = |layout| Ok(tcx.intern_layout(layout));
54a0048b
SL
1101 let dl = &tcx.data_layout;
1102 assert!(!ty.has_infer_types());
1103
7cac9316 1104 let ptr_layout = |pointee: Ty<'tcx>| {
32a655c1 1105 let non_zero = !ty.is_unsafe_ptr();
7cac9316
XL
1106 let pointee = tcx.normalize_associated_type_in_env(&pointee, param_env);
1107 if pointee.is_sized(tcx, param_env, DUMMY_SP) {
32a655c1
SL
1108 Ok(Scalar { value: Pointer, non_zero: non_zero })
1109 } else {
1110 let unsized_part = tcx.struct_tail(pointee);
1111 let meta = match unsized_part.sty {
1112 ty::TySlice(_) | ty::TyStr => {
1113 Int(dl.ptr_sized_integer())
1114 }
1115 ty::TyDynamic(..) => Pointer,
1116 _ => return Err(LayoutError::Unknown(unsized_part))
1117 };
1118 Ok(FatPointer { metadata: meta, non_zero: non_zero })
1119 }
1120 };
476ff2be 1121
54a0048b
SL
1122 let layout = match ty.sty {
1123 // Basic scalars.
1124 ty::TyBool => Scalar { value: Int(I1), non_zero: false },
1125 ty::TyChar => Scalar { value: Int(I32), non_zero: false },
1126 ty::TyInt(ity) => {
1127 Scalar {
1128 value: Int(Integer::from_attr(dl, attr::SignedInt(ity))),
1129 non_zero: false
1130 }
1131 }
1132 ty::TyUint(ity) => {
1133 Scalar {
1134 value: Int(Integer::from_attr(dl, attr::UnsignedInt(ity))),
1135 non_zero: false
1136 }
1137 }
1138 ty::TyFloat(FloatTy::F32) => Scalar { value: F32, non_zero: false },
1139 ty::TyFloat(FloatTy::F64) => Scalar { value: F64, non_zero: false },
1140 ty::TyFnPtr(_) => Scalar { value: Pointer, non_zero: true },
1141
5bcae85e 1142 // The never type.
476ff2be 1143 ty::TyNever => Univariant {
8bb4bdeb 1144 variant: Struct::new(dl, &vec![], &ReprOptions::default(),
476ff2be
SL
1145 StructKind::AlwaysSizedUnivariant, ty)?,
1146 non_zero: false
1147 },
5bcae85e 1148
54a0048b 1149 // Potentially-fat pointers.
54a0048b
SL
1150 ty::TyRef(_, ty::TypeAndMut { ty: pointee, .. }) |
1151 ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
32a655c1
SL
1152 ptr_layout(pointee)?
1153 }
1154 ty::TyAdt(def, _) if def.is_box() => {
1155 ptr_layout(ty.boxed_ty())?
54a0048b
SL
1156 }
1157
1158 // Arrays and slices.
1159 ty::TyArray(element, count) => {
7cac9316 1160 let element = element.layout(tcx, param_env)?;
cc61c64b
XL
1161 let element_size = element.size(dl);
1162 // FIXME(eddyb) Don't use host `usize` for array lengths.
1163 let usize_count: usize = count;
1164 let count = usize_count as u64;
1165 if element_size.checked_mul(count, dl).is_none() {
1166 return Err(LayoutError::SizeOverflow(ty));
1167 }
54a0048b
SL
1168 Array {
1169 sized: true,
1170 align: element.align(dl),
cc61c64b
XL
1171 primitive_align: element.primitive_align(dl),
1172 element_size: element_size,
1173 count: count
54a0048b
SL
1174 }
1175 }
1176 ty::TySlice(element) => {
7cac9316 1177 let element = element.layout(tcx, param_env)?;
54a0048b
SL
1178 Array {
1179 sized: false,
cc61c64b
XL
1180 align: element.align(dl),
1181 primitive_align: element.primitive_align(dl),
1182 element_size: element.size(dl),
1183 count: 0
54a0048b
SL
1184 }
1185 }
1186 ty::TyStr => {
1187 Array {
1188 sized: false,
1189 align: dl.i8_align,
cc61c64b
XL
1190 primitive_align: dl.i8_align,
1191 element_size: Size::from_bytes(1),
1192 count: 0
54a0048b
SL
1193 }
1194 }
1195
1196 // Odd unit types.
1197 ty::TyFnDef(..) => {
1198 Univariant {
476ff2be 1199 variant: Struct::new(dl, &vec![],
8bb4bdeb 1200 &ReprOptions::default(), StructKind::AlwaysSizedUnivariant, ty)?,
54a0048b
SL
1201 non_zero: false
1202 }
1203 }
476ff2be 1204 ty::TyDynamic(..) => {
8bb4bdeb 1205 let mut unit = Struct::new(dl, &vec![], &ReprOptions::default(),
476ff2be 1206 StructKind::AlwaysSizedUnivariant, ty)?;
54a0048b
SL
1207 unit.sized = false;
1208 Univariant { variant: unit, non_zero: false }
1209 }
1210
9e0c209e 1211 // Tuples and closures.
476ff2be
SL
1212 ty::TyClosure(def_id, ref substs) => {
1213 let tys = substs.upvar_tys(def_id, tcx);
1214 let st = Struct::new(dl,
7cac9316 1215 &tys.map(|ty| ty.layout(tcx, param_env))
476ff2be 1216 .collect::<Result<Vec<_>, _>>()?,
8bb4bdeb 1217 &ReprOptions::default(),
476ff2be
SL
1218 StructKind::AlwaysSizedUnivariant, ty)?;
1219 Univariant { variant: st, non_zero: false }
1220 }
1221
8bb4bdeb 1222 ty::TyTuple(tys, _) => {
476ff2be
SL
1223 // FIXME(camlorn): if we ever allow unsized tuples, this needs to be checked.
1224 // See the univariant case below to learn how.
1225 let st = Struct::new(dl,
7cac9316 1226 &tys.iter().map(|ty| ty.layout(tcx, param_env))
476ff2be 1227 .collect::<Result<Vec<_>, _>>()?,
8bb4bdeb 1228 &ReprOptions::default(), StructKind::AlwaysSizedUnivariant, ty)?;
54a0048b
SL
1229 Univariant { variant: st, non_zero: false }
1230 }
1231
9e0c209e 1232 // SIMD vector types.
cc61c64b 1233 ty::TyAdt(def, ..) if def.repr.simd() => {
9e0c209e 1234 let element = ty.simd_type(tcx);
7cac9316 1235 match *element.layout(tcx, param_env)? {
9e0c209e
SL
1236 Scalar { value, .. } => {
1237 return success(Vector {
1238 element: value,
1239 count: ty.simd_size(tcx) as u64
1240 });
1241 }
1242 _ => {
1243 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` with \
1244 a non-machine element type `{}`",
1245 ty, element));
54a0048b 1246 }
54a0048b
SL
1247 }
1248 }
9e0c209e
SL
1249
1250 // ADTs.
1251 ty::TyAdt(def, substs) => {
54a0048b
SL
1252 if def.variants.is_empty() {
1253 // Uninhabitable; represent as unit
1254 // (Typechecking will reject discriminant-sizing attrs.)
54a0048b 1255
9e0c209e 1256 return success(Univariant {
476ff2be 1257 variant: Struct::new(dl, &vec![],
8bb4bdeb 1258 &def.repr, StructKind::AlwaysSizedUnivariant, ty)?,
9e0c209e
SL
1259 non_zero: false
1260 });
54a0048b
SL
1261 }
1262
9e0c209e 1263 if def.is_enum() && def.variants.iter().all(|v| v.fields.is_empty()) {
54a0048b 1264 // All bodies empty -> intlike
32a655c1
SL
1265 let (mut min, mut max, mut non_zero) = (i64::max_value(),
1266 i64::min_value(),
1267 true);
8bb4bdeb
XL
1268 for discr in def.discriminants(tcx) {
1269 let x = discr.to_u128_unchecked() as i64;
c30ab7b3 1270 if x == 0 { non_zero = false; }
54a0048b
SL
1271 if x < min { min = x; }
1272 if x > max { max = x; }
1273 }
1274
32a655c1
SL
1275 // FIXME: should handle i128? signed-value based impl is weird and hard to
1276 // grok.
8bb4bdeb 1277 let (discr, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
5bcae85e 1278 return success(CEnum {
54a0048b
SL
1279 discr: discr,
1280 signed: signed,
c30ab7b3 1281 non_zero: non_zero,
32a655c1 1282 // FIXME: should be u128?
54a0048b
SL
1283 min: min as u64,
1284 max: max as u64
1285 });
1286 }
1287
8bb4bdeb
XL
1288 if !def.is_enum() || (def.variants.len() == 1 &&
1289 !def.repr.inhibit_enum_layout_opt()) {
9e0c209e
SL
1290 // Struct, or union, or univariant enum equivalent to a struct.
1291 // (Typechecking will reject discriminant-sizing attrs.)
1292
476ff2be
SL
1293 let kind = if def.is_enum() || def.variants[0].fields.len() == 0{
1294 StructKind::AlwaysSizedUnivariant
1295 } else {
7cac9316 1296 let param_env = tcx.param_env(def.did);
476ff2be
SL
1297 let fields = &def.variants[0].fields;
1298 let last_field = &fields[fields.len()-1];
7cac9316
XL
1299 let always_sized = tcx.type_of(last_field.did)
1300 .is_sized(tcx, param_env, DUMMY_SP);
476ff2be
SL
1301 if !always_sized { StructKind::MaybeUnsizedUnivariant }
1302 else { StructKind::AlwaysSizedUnivariant }
1303 };
1304
9e0c209e 1305 let fields = def.variants[0].fields.iter().map(|field| {
7cac9316 1306 field.ty(tcx, substs).layout(tcx, param_env)
476ff2be 1307 }).collect::<Result<Vec<_>, _>>()?;
9e0c209e 1308 let layout = if def.is_union() {
cc61c64b 1309 let mut un = Union::new(dl, def.repr.packed());
476ff2be 1310 un.extend(dl, fields.iter().map(|&f| Ok(f)), ty)?;
9e0c209e
SL
1311 UntaggedUnion { variants: un }
1312 } else {
8bb4bdeb 1313 let st = Struct::new(dl, &fields, &def.repr,
476ff2be 1314 kind, ty)?;
9e0c209e
SL
1315 let non_zero = Some(def.did) == tcx.lang_items.non_zero();
1316 Univariant { variant: st, non_zero: non_zero }
1317 };
1318 return success(layout);
1319 }
1320
54a0048b
SL
1321 // Since there's at least one
1322 // non-empty body, explicit discriminants should have
1323 // been rejected by a checker before this point.
1324 for (i, v) in def.variants.iter().enumerate() {
8bb4bdeb 1325 if v.discr != ty::VariantDiscr::Relative(i) {
54a0048b 1326 bug!("non-C-like enum {} with specified discriminants",
9e0c209e 1327 tcx.item_path_str(def.did));
54a0048b
SL
1328 }
1329 }
1330
54a0048b
SL
1331 // Cache the substituted and normalized variant field types.
1332 let variants = def.variants.iter().map(|v| {
5bcae85e 1333 v.fields.iter().map(|field| field.ty(tcx, substs)).collect::<Vec<_>>()
54a0048b
SL
1334 }).collect::<Vec<_>>();
1335
8bb4bdeb 1336 if variants.len() == 2 && !def.repr.inhibit_enum_layout_opt() {
54a0048b
SL
1337 // Nullable pointer optimization
1338 for discr in 0..2 {
1339 let other_fields = variants[1 - discr].iter().map(|ty| {
7cac9316 1340 ty.layout(tcx, param_env)
54a0048b
SL
1341 });
1342 if !Struct::would_be_zero_sized(dl, other_fields)? {
1343 continue;
1344 }
7cac9316
XL
1345 let paths = Struct::non_zero_field_paths(tcx,
1346 param_env,
1347 variants[discr].iter().cloned(),
1348 None)?;
476ff2be
SL
1349 let (mut path, mut path_source) = if let Some(p) = paths { p }
1350 else { continue };
54a0048b
SL
1351
1352 // FIXME(eddyb) should take advantage of a newtype.
1353 if path == &[0] && variants[discr].len() == 1 {
7cac9316 1354 let value = match *variants[discr][0].layout(tcx, param_env)? {
c30ab7b3
SL
1355 Scalar { value, .. } => value,
1356 CEnum { discr, .. } => Int(discr),
1357 _ => bug!("Layout::compute: `{}`'s non-zero \
1358 `{}` field not scalar?!",
1359 ty, variants[discr][0])
1360 };
1361 return success(RawNullablePointer {
1362 nndiscr: discr as u64,
1363 value: value,
1364 });
54a0048b
SL
1365 }
1366
476ff2be 1367 let st = Struct::new(dl,
7cac9316 1368 &variants[discr].iter().map(|ty| ty.layout(tcx, param_env))
476ff2be 1369 .collect::<Result<Vec<_>, _>>()?,
8bb4bdeb 1370 &def.repr, StructKind::AlwaysSizedUnivariant, ty)?;
476ff2be
SL
1371
1372 // We have to fix the last element of path here.
1373 let mut i = *path.last().unwrap();
1374 i = st.memory_index[i as usize];
1375 *path.last_mut().unwrap() = i;
54a0048b
SL
1376 path.push(0); // For GEP through a pointer.
1377 path.reverse();
476ff2be
SL
1378 path_source.push(0);
1379 path_source.reverse();
1380
5bcae85e 1381 return success(StructWrappedNullablePointer {
54a0048b
SL
1382 nndiscr: discr as u64,
1383 nonnull: st,
476ff2be
SL
1384 discrfield: path,
1385 discrfield_source: path_source
54a0048b
SL
1386 });
1387 }
1388 }
1389
1390 // The general case.
1391 let discr_max = (variants.len() - 1) as i64;
1392 assert!(discr_max >= 0);
8bb4bdeb 1393 let (min_ity, _) = Integer::repr_discr(tcx, ty, &def.repr, 0, discr_max);
54a0048b 1394 let mut align = dl.aggregate_align;
cc61c64b 1395 let mut primitive_align = dl.aggregate_align;
54a0048b
SL
1396 let mut size = Size::from_bytes(0);
1397
1398 // We're interested in the smallest alignment, so start large.
1399 let mut start_align = Align::from_bytes(256, 256).unwrap();
1400
1401 // Create the set of structs that represent each variant
1402 // Use the minimum integer type we figured out above
476ff2be 1403 let discr = Scalar { value: Int(min_ity), non_zero: false };
54a0048b 1404 let mut variants = variants.into_iter().map(|fields| {
476ff2be 1405 let mut fields = fields.into_iter().map(|field| {
7cac9316 1406 field.layout(tcx, param_env)
476ff2be
SL
1407 }).collect::<Result<Vec<_>, _>>()?;
1408 fields.insert(0, &discr);
1409 let st = Struct::new(dl,
1410 &fields,
8bb4bdeb 1411 &def.repr, StructKind::EnumVariant, ty)?;
476ff2be
SL
1412 // Find the first field we can't move later
1413 // to make room for a larger discriminant.
1414 // It is important to skip the first field.
1415 for i in st.field_index_by_increasing_offset().skip(1) {
1416 let field = fields[i];
1417 let field_align = field.align(dl);
1418 if field.size(dl).bytes() != 0 || field_align.abi() != 1 {
1419 start_align = start_align.min(field_align);
1420 break;
54a0048b 1421 }
476ff2be 1422 }
c30ab7b3 1423 size = cmp::max(size, st.min_size);
54a0048b 1424 align = align.max(st.align);
cc61c64b 1425 primitive_align = primitive_align.max(st.primitive_align);
54a0048b
SL
1426 Ok(st)
1427 }).collect::<Result<Vec<_>, _>>()?;
1428
1429 // Align the maximum variant size to the largest alignment.
1430 size = size.abi_align(align);
1431
1432 if size.bytes() >= dl.obj_size_bound() {
1433 return Err(LayoutError::SizeOverflow(ty));
1434 }
1435
8bb4bdeb
XL
1436 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
1437 if typeck_ity < min_ity {
1438 // It is a bug if Layout decided on a greater discriminant size than typeck for
1439 // some reason at this point (based on values discriminant can take on). Mostly
1440 // because this discriminant will be loaded, and then stored into variable of
1441 // type calculated by typeck. Consider such case (a bug): typeck decided on
1442 // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1443 // discriminant values. That would be a bug, because then, in trans, in order
1444 // to store this 16-bit discriminant into 8-bit sized temporary some of the
1445 // space necessary to represent would have to be discarded (or layout is wrong
1446 // on thinking it needs 16 bits)
1447 bug!("layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1448 min_ity, typeck_ity);
1449 // However, it is fine to make discr type however large (as an optimisation)
1450 // after this point – we’ll just truncate the value we load in trans.
1451 }
1452
54a0048b
SL
1453 // Check to see if we should use a different type for the
1454 // discriminant. We can safely use a type with the same size
1455 // as the alignment of the first field of each variant.
1456 // We increase the size of the discriminant to avoid LLVM copying
1457 // padding when it doesn't need to. This normally causes unaligned
1458 // load/stores and excessive memcpy/memset operations. By using a
1459 // bigger integer size, LLVM can be sure about it's contents and
1460 // won't be so conservative.
1461
1462 // Use the initial field alignment
9e0c209e 1463 let mut ity = Integer::for_abi_align(dl, start_align).unwrap_or(min_ity);
54a0048b
SL
1464
1465 // If the alignment is not larger than the chosen discriminant size,
1466 // don't use the alignment as the final size.
1467 if ity <= min_ity {
1468 ity = min_ity;
1469 } else {
1470 // Patch up the variants' first few fields.
1471 let old_ity_size = Int(min_ity).size(dl);
1472 let new_ity_size = Int(ity).size(dl);
1473 for variant in &mut variants {
476ff2be
SL
1474 for i in variant.offsets.iter_mut() {
1475 // The first field is the discrimminant, at offset 0.
1476 // These aren't in order, and we need to skip it.
1477 if *i <= old_ity_size && *i > Size::from_bytes(0) {
1478 *i = new_ity_size;
54a0048b 1479 }
54a0048b 1480 }
c30ab7b3
SL
1481 // We might be making the struct larger.
1482 if variant.min_size <= old_ity_size {
1483 variant.min_size = new_ity_size;
1484 }
54a0048b
SL
1485 }
1486 }
1487
1488 General {
1489 discr: ity,
1490 variants: variants,
1491 size: size,
cc61c64b
XL
1492 align: align,
1493 primitive_align: primitive_align
54a0048b
SL
1494 }
1495 }
1496
1497 // Types with no meaningful known layout.
5bcae85e 1498 ty::TyProjection(_) | ty::TyAnon(..) => {
7cac9316 1499 let normalized = tcx.normalize_associated_type_in_env(&ty, param_env);
5bcae85e
SL
1500 if ty == normalized {
1501 return Err(LayoutError::Unknown(ty));
1502 }
7cac9316 1503 return normalized.layout(tcx, param_env);
5bcae85e
SL
1504 }
1505 ty::TyParam(_) => {
54a0048b
SL
1506 return Err(LayoutError::Unknown(ty));
1507 }
1508 ty::TyInfer(_) | ty::TyError => {
1509 bug!("Layout::compute: unexpected type `{}`", ty)
1510 }
1511 };
1512
5bcae85e 1513 success(layout)
54a0048b
SL
1514 }
1515
1516 /// Returns true if the layout corresponds to an unsized type.
1517 pub fn is_unsized(&self) -> bool {
1518 match *self {
1519 Scalar {..} | Vector {..} | FatPointer {..} |
9e0c209e 1520 CEnum {..} | UntaggedUnion {..} | General {..} |
54a0048b
SL
1521 RawNullablePointer {..} |
1522 StructWrappedNullablePointer {..} => false,
1523
1524 Array { sized, .. } |
1525 Univariant { variant: Struct { sized, .. }, .. } => !sized
1526 }
1527 }
1528
cc61c64b
XL
1529 pub fn size<C: HasDataLayout>(&self, cx: C) -> Size {
1530 let dl = cx.data_layout();
1531
54a0048b
SL
1532 match *self {
1533 Scalar { value, .. } | RawNullablePointer { value, .. } => {
1534 value.size(dl)
1535 }
1536
1537 Vector { element, count } => {
cc61c64b
XL
1538 let element_size = element.size(dl);
1539 let vec_size = match element_size.checked_mul(count, dl) {
54a0048b
SL
1540 Some(size) => size,
1541 None => bug!("Layout::size({:?}): {} * {} overflowed",
cc61c64b 1542 self, element_size.bytes(), count)
54a0048b
SL
1543 };
1544 vec_size.abi_align(self.align(dl))
1545 }
1546
cc61c64b
XL
1547 Array { element_size, count, .. } => {
1548 match element_size.checked_mul(count, dl) {
1549 Some(size) => size,
1550 None => bug!("Layout::size({:?}): {} * {} overflowed",
1551 self, element_size.bytes(), count)
1552 }
1553 }
1554
54a0048b
SL
1555 FatPointer { metadata, .. } => {
1556 // Effectively a (ptr, meta) tuple.
1557 Pointer.size(dl).abi_align(metadata.align(dl))
1558 .checked_add(metadata.size(dl), dl).unwrap()
1559 .abi_align(self.align(dl))
1560 }
1561
1562 CEnum { discr, .. } => Int(discr).size(dl),
cc61c64b 1563 General { size, .. } => size,
9e0c209e 1564 UntaggedUnion { ref variants } => variants.stride(),
54a0048b
SL
1565
1566 Univariant { ref variant, .. } |
1567 StructWrappedNullablePointer { nonnull: ref variant, .. } => {
1568 variant.stride()
1569 }
1570 }
1571 }
1572
cc61c64b
XL
1573 pub fn align<C: HasDataLayout>(&self, cx: C) -> Align {
1574 let dl = cx.data_layout();
1575
54a0048b
SL
1576 match *self {
1577 Scalar { value, .. } | RawNullablePointer { value, .. } => {
1578 value.align(dl)
1579 }
1580
1581 Vector { element, count } => {
1582 let elem_size = element.size(dl);
1583 let vec_size = match elem_size.checked_mul(count, dl) {
1584 Some(size) => size,
1585 None => bug!("Layout::align({:?}): {} * {} overflowed",
1586 self, elem_size.bytes(), count)
1587 };
1588 for &(size, align) in &dl.vector_align {
1589 if size == vec_size {
1590 return align;
1591 }
1592 }
1593 // Default to natural alignment, which is what LLVM does.
1594 // That is, use the size, rounded up to a power of 2.
1595 let align = vec_size.bytes().next_power_of_two();
1596 Align::from_bytes(align, align).unwrap()
1597 }
1598
1599 FatPointer { metadata, .. } => {
1600 // Effectively a (ptr, meta) tuple.
1601 Pointer.align(dl).max(metadata.align(dl))
1602 }
1603
1604 CEnum { discr, .. } => Int(discr).align(dl),
1605 Array { align, .. } | General { align, .. } => align,
9e0c209e 1606 UntaggedUnion { ref variants } => variants.align,
54a0048b
SL
1607
1608 Univariant { ref variant, .. } |
1609 StructWrappedNullablePointer { nonnull: ref variant, .. } => {
1610 variant.align
1611 }
1612 }
1613 }
cc61c64b
XL
1614
1615 /// Returns alignment before repr alignment is applied
1616 pub fn primitive_align(&self, dl: &TargetDataLayout) -> Align {
1617 match *self {
1618 Array { primitive_align, .. } | General { primitive_align, .. } => primitive_align,
1619 Univariant { ref variant, .. } |
1620 StructWrappedNullablePointer { nonnull: ref variant, .. } => {
1621 variant.primitive_align
1622 },
1623
1624 _ => self.align(dl)
1625 }
1626 }
1627
1628 /// Returns repr alignment if it is greater than the primitive alignment.
1629 pub fn over_align(&self, dl: &TargetDataLayout) -> Option<u32> {
1630 let align = self.align(dl);
1631 let primitive_align = self.primitive_align(dl);
1632 if align.abi() > primitive_align.abi() {
1633 Some(align.abi() as u32)
1634 } else {
1635 None
1636 }
1637 }
1638
1639 pub fn field_offset<C: HasDataLayout>(&self,
1640 cx: C,
1641 i: usize,
1642 variant_index: Option<usize>)
1643 -> Size {
1644 let dl = cx.data_layout();
1645
1646 match *self {
1647 Scalar { .. } |
1648 CEnum { .. } |
1649 UntaggedUnion { .. } |
1650 RawNullablePointer { .. } => {
1651 Size::from_bytes(0)
1652 }
1653
1654 Vector { element, count } => {
1655 let element_size = element.size(dl);
1656 let i = i as u64;
1657 assert!(i < count);
1658 Size::from_bytes(element_size.bytes() * count)
1659 }
1660
1661 Array { element_size, count, .. } => {
1662 let i = i as u64;
1663 assert!(i < count);
1664 Size::from_bytes(element_size.bytes() * count)
1665 }
1666
1667 FatPointer { metadata, .. } => {
1668 // Effectively a (ptr, meta) tuple.
1669 assert!(i < 2);
1670 if i == 0 {
1671 Size::from_bytes(0)
1672 } else {
1673 Pointer.size(dl).abi_align(metadata.align(dl))
1674 }
1675 }
1676
1677 Univariant { ref variant, .. } => variant.offsets[i],
1678
1679 General { ref variants, .. } => {
1680 let v = variant_index.expect("variant index required");
1681 variants[v].offsets[i + 1]
1682 }
1683
1684 StructWrappedNullablePointer { nndiscr, ref nonnull, .. } => {
1685 if Some(nndiscr as usize) == variant_index {
1686 nonnull.offsets[i]
1687 } else {
1688 Size::from_bytes(0)
1689 }
1690 }
1691 }
1692 }
7cac9316
XL
1693
1694 /// This is invoked by the `layout_raw` query to record the final
1695 /// layout of each type.
1696 #[inline]
1697 pub fn record_layout_for_printing(tcx: TyCtxt<'a, 'tcx, 'tcx>,
1698 ty: Ty<'tcx>,
1699 param_env: ty::ParamEnv<'tcx>,
1700 layout: &Layout) {
1701 // If we are running with `-Zprint-type-sizes`, record layouts for
1702 // dumping later. Ignore layouts that are done with non-empty
1703 // environments or non-monomorphic layouts, as the user only wants
1704 // to see the stuff resulting from the final trans session.
1705 if
1706 !tcx.sess.opts.debugging_opts.print_type_sizes ||
1707 ty.has_param_types() ||
1708 ty.has_self_ty() ||
1709 !param_env.caller_bounds.is_empty()
1710 {
1711 return;
1712 }
1713
1714 Self::record_layout_for_printing_outlined(tcx, ty, param_env, layout)
1715 }
1716
1717 fn record_layout_for_printing_outlined(tcx: TyCtxt<'a, 'tcx, 'tcx>,
1718 ty: Ty<'tcx>,
1719 param_env: ty::ParamEnv<'tcx>,
1720 layout: &Layout) {
1721 // (delay format until we actually need it)
1722 let record = |kind, opt_discr_size, variants| {
1723 let type_desc = format!("{:?}", ty);
1724 let overall_size = layout.size(tcx);
1725 let align = layout.align(tcx);
1726 tcx.sess.code_stats.borrow_mut().record_type_size(kind,
1727 type_desc,
1728 align,
1729 overall_size,
1730 opt_discr_size,
1731 variants);
1732 };
1733
1734 let (adt_def, substs) = match ty.sty {
1735 ty::TyAdt(ref adt_def, substs) => {
1736 debug!("print-type-size t: `{:?}` process adt", ty);
1737 (adt_def, substs)
1738 }
1739
1740 ty::TyClosure(..) => {
1741 debug!("print-type-size t: `{:?}` record closure", ty);
1742 record(DataTypeKind::Closure, None, vec![]);
1743 return;
1744 }
1745
1746 _ => {
1747 debug!("print-type-size t: `{:?}` skip non-nominal", ty);
1748 return;
1749 }
1750 };
1751
1752 let adt_kind = adt_def.adt_kind();
1753
1754 let build_field_info = |(field_name, field_ty): (ast::Name, Ty<'tcx>), offset: &Size| {
1755 let layout = field_ty.layout(tcx, param_env);
1756 match layout {
1757 Err(_) => bug!("no layout found for field {} type: `{:?}`", field_name, field_ty),
1758 Ok(field_layout) => {
1759 session::FieldInfo {
1760 name: field_name.to_string(),
1761 offset: offset.bytes(),
1762 size: field_layout.size(tcx).bytes(),
1763 align: field_layout.align(tcx).abi(),
1764 }
1765 }
1766 }
1767 };
1768
1769 let build_primitive_info = |name: ast::Name, value: &Primitive| {
1770 session::VariantInfo {
1771 name: Some(name.to_string()),
1772 kind: session::SizeKind::Exact,
1773 align: value.align(tcx).abi(),
1774 size: value.size(tcx).bytes(),
1775 fields: vec![],
1776 }
1777 };
1778
1779 enum Fields<'a> {
1780 WithDiscrim(&'a Struct),
1781 NoDiscrim(&'a Struct),
1782 }
1783
1784 let build_variant_info = |n: Option<ast::Name>,
1785 flds: &[(ast::Name, Ty<'tcx>)],
1786 layout: Fields| {
1787 let (s, field_offsets) = match layout {
1788 Fields::WithDiscrim(s) => (s, &s.offsets[1..]),
1789 Fields::NoDiscrim(s) => (s, &s.offsets[0..]),
1790 };
1791 let field_info: Vec<_> =
1792 flds.iter()
1793 .zip(field_offsets.iter())
1794 .map(|(&field_name_ty, offset)| build_field_info(field_name_ty, offset))
1795 .collect();
1796
1797 session::VariantInfo {
1798 name: n.map(|n|n.to_string()),
1799 kind: if s.sized {
1800 session::SizeKind::Exact
1801 } else {
1802 session::SizeKind::Min
1803 },
1804 align: s.align.abi(),
1805 size: s.min_size.bytes(),
1806 fields: field_info,
1807 }
1808 };
1809
1810 match *layout {
1811 Layout::StructWrappedNullablePointer { nonnull: ref variant_layout,
1812 nndiscr,
1813 discrfield: _,
1814 discrfield_source: _ } => {
1815 debug!("print-type-size t: `{:?}` adt struct-wrapped nullable nndiscr {} is {:?}",
1816 ty, nndiscr, variant_layout);
1817 let variant_def = &adt_def.variants[nndiscr as usize];
1818 let fields: Vec<_> =
1819 variant_def.fields.iter()
1820 .map(|field_def| (field_def.name, field_def.ty(tcx, substs)))
1821 .collect();
1822 record(adt_kind.into(),
1823 None,
1824 vec![build_variant_info(Some(variant_def.name),
1825 &fields,
1826 Fields::NoDiscrim(variant_layout))]);
1827 }
1828 Layout::RawNullablePointer { nndiscr, value } => {
1829 debug!("print-type-size t: `{:?}` adt raw nullable nndiscr {} is {:?}",
1830 ty, nndiscr, value);
1831 let variant_def = &adt_def.variants[nndiscr as usize];
1832 record(adt_kind.into(), None,
1833 vec![build_primitive_info(variant_def.name, &value)]);
1834 }
1835 Layout::Univariant { variant: ref variant_layout, non_zero: _ } => {
1836 let variant_names = || {
1837 adt_def.variants.iter().map(|v|format!("{}", v.name)).collect::<Vec<_>>()
1838 };
1839 debug!("print-type-size t: `{:?}` adt univariant {:?} variants: {:?}",
1840 ty, variant_layout, variant_names());
1841 assert!(adt_def.variants.len() <= 1,
1842 "univariant with variants {:?}", variant_names());
1843 if adt_def.variants.len() == 1 {
1844 let variant_def = &adt_def.variants[0];
1845 let fields: Vec<_> =
1846 variant_def.fields.iter()
1847 .map(|f| (f.name, f.ty(tcx, substs)))
1848 .collect();
1849 record(adt_kind.into(),
1850 None,
1851 vec![build_variant_info(Some(variant_def.name),
1852 &fields,
1853 Fields::NoDiscrim(variant_layout))]);
1854 } else {
1855 // (This case arises for *empty* enums; so give it
1856 // zero variants.)
1857 record(adt_kind.into(), None, vec![]);
1858 }
1859 }
1860
1861 Layout::General { ref variants, discr, .. } => {
1862 debug!("print-type-size t: `{:?}` adt general variants def {} layouts {} {:?}",
1863 ty, adt_def.variants.len(), variants.len(), variants);
1864 let variant_infos: Vec<_> =
1865 adt_def.variants.iter()
1866 .zip(variants.iter())
1867 .map(|(variant_def, variant_layout)| {
1868 let fields: Vec<_> =
1869 variant_def.fields
1870 .iter()
1871 .map(|f| (f.name, f.ty(tcx, substs)))
1872 .collect();
1873 build_variant_info(Some(variant_def.name),
1874 &fields,
1875 Fields::WithDiscrim(variant_layout))
1876 })
1877 .collect();
1878 record(adt_kind.into(), Some(discr.size()), variant_infos);
1879 }
1880
1881 Layout::UntaggedUnion { ref variants } => {
1882 debug!("print-type-size t: `{:?}` adt union variants {:?}",
1883 ty, variants);
1884 // layout does not currently store info about each
1885 // variant...
1886 record(adt_kind.into(), None, Vec::new());
1887 }
1888
1889 Layout::CEnum { discr, .. } => {
1890 debug!("print-type-size t: `{:?}` adt c-like enum", ty);
1891 let variant_infos: Vec<_> =
1892 adt_def.variants.iter()
1893 .map(|variant_def| {
1894 build_primitive_info(variant_def.name,
1895 &Primitive::Int(discr))
1896 })
1897 .collect();
1898 record(adt_kind.into(), Some(discr.size()), variant_infos);
1899 }
1900
1901 // other cases provide little interesting (i.e. adjustable
1902 // via representation tweaks) size info beyond total size.
1903 Layout::Scalar { .. } |
1904 Layout::Vector { .. } |
1905 Layout::Array { .. } |
1906 Layout::FatPointer { .. } => {
1907 debug!("print-type-size t: `{:?}` adt other", ty);
1908 record(adt_kind.into(), None, Vec::new())
1909 }
1910 }
1911 }
54a0048b
SL
1912}
1913
1914/// Type size "skeleton", i.e. the only information determining a type's size.
1915/// While this is conservative, (aside from constant sizes, only pointers,
1916/// newtypes thereof and null pointer optimized enums are allowed), it is
1917/// enough to statically check common usecases of transmute.
1918#[derive(Copy, Clone, Debug)]
1919pub enum SizeSkeleton<'tcx> {
1920 /// Any statically computable Layout.
1921 Known(Size),
1922
1923 /// A potentially-fat pointer.
1924 Pointer {
1925 // If true, this pointer is never null.
1926 non_zero: bool,
1927 // The type which determines the unsized metadata, if any,
1928 // of this pointer. Either a type parameter or a projection
1929 // depending on one, with regions erased.
1930 tail: Ty<'tcx>
1931 }
1932}
1933
7cac9316
XL
1934impl<'a, 'tcx> SizeSkeleton<'tcx> {
1935 pub fn compute(ty: Ty<'tcx>,
1936 tcx: TyCtxt<'a, 'tcx, 'tcx>,
1937 param_env: ty::ParamEnv<'tcx>)
1938 -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
54a0048b
SL
1939 assert!(!ty.has_infer_types());
1940
1941 // First try computing a static layout.
7cac9316 1942 let err = match ty.layout(tcx, param_env) {
54a0048b 1943 Ok(layout) => {
cc61c64b 1944 return Ok(SizeSkeleton::Known(layout.size(tcx)));
54a0048b
SL
1945 }
1946 Err(err) => err
1947 };
1948
7cac9316 1949 let ptr_skeleton = |pointee: Ty<'tcx>| {
32a655c1
SL
1950 let non_zero = !ty.is_unsafe_ptr();
1951 let tail = tcx.struct_tail(pointee);
1952 match tail.sty {
1953 ty::TyParam(_) | ty::TyProjection(_) => {
1954 assert!(tail.has_param_types() || tail.has_self_ty());
1955 Ok(SizeSkeleton::Pointer {
1956 non_zero: non_zero,
1957 tail: tcx.erase_regions(&tail)
1958 })
1959 }
1960 _ => {
1961 bug!("SizeSkeleton::compute({}): layout errored ({}), yet \
1962 tail `{}` is not a type parameter or a projection",
1963 ty, err, tail)
1964 }
1965 }
1966 };
1967
54a0048b 1968 match ty.sty {
54a0048b
SL
1969 ty::TyRef(_, ty::TypeAndMut { ty: pointee, .. }) |
1970 ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
32a655c1
SL
1971 ptr_skeleton(pointee)
1972 }
1973 ty::TyAdt(def, _) if def.is_box() => {
1974 ptr_skeleton(ty.boxed_ty())
54a0048b
SL
1975 }
1976
9e0c209e 1977 ty::TyAdt(def, substs) => {
54a0048b 1978 // Only newtypes and enums w/ nullable pointer optimization.
9e0c209e 1979 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
54a0048b
SL
1980 return Err(err);
1981 }
1982
1983 // Get a zero-sized variant or a pointer newtype.
1984 let zero_or_ptr_variant = |i: usize| {
1985 let fields = def.variants[i].fields.iter().map(|field| {
7cac9316 1986 SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env)
54a0048b
SL
1987 });
1988 let mut ptr = None;
1989 for field in fields {
1990 let field = field?;
1991 match field {
1992 SizeSkeleton::Known(size) => {
1993 if size.bytes() > 0 {
1994 return Err(err);
1995 }
1996 }
1997 SizeSkeleton::Pointer {..} => {
1998 if ptr.is_some() {
1999 return Err(err);
2000 }
2001 ptr = Some(field);
2002 }
2003 }
2004 }
2005 Ok(ptr)
2006 };
2007
2008 let v0 = zero_or_ptr_variant(0)?;
2009 // Newtype.
2010 if def.variants.len() == 1 {
2011 if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
2012 return Ok(SizeSkeleton::Pointer {
2013 non_zero: non_zero ||
2014 Some(def.did) == tcx.lang_items.non_zero(),
2015 tail: tail
2016 });
2017 } else {
2018 return Err(err);
2019 }
2020 }
2021
2022 let v1 = zero_or_ptr_variant(1)?;
2023 // Nullable pointer enum optimization.
2024 match (v0, v1) {
2025 (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None) |
2026 (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
2027 Ok(SizeSkeleton::Pointer {
2028 non_zero: false,
2029 tail: tail
2030 })
2031 }
2032 _ => Err(err)
2033 }
2034 }
2035
5bcae85e 2036 ty::TyProjection(_) | ty::TyAnon(..) => {
7cac9316 2037 let normalized = tcx.normalize_associated_type_in_env(&ty, param_env);
5bcae85e
SL
2038 if ty == normalized {
2039 Err(err)
2040 } else {
7cac9316 2041 SizeSkeleton::compute(normalized, tcx, param_env)
5bcae85e
SL
2042 }
2043 }
2044
54a0048b
SL
2045 _ => Err(err)
2046 }
2047 }
2048
2049 pub fn same_size(self, other: SizeSkeleton) -> bool {
2050 match (self, other) {
2051 (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
2052 (SizeSkeleton::Pointer { tail: a, .. },
2053 SizeSkeleton::Pointer { tail: b, .. }) => a == b,
2054 _ => false
2055 }
2056 }
2057}
cc61c64b
XL
2058
2059/// A pair of a type and its layout. Implements various
2060/// type traversal APIs (e.g. recursing into fields).
2061#[derive(Copy, Clone, Debug)]
2062pub struct TyLayout<'tcx> {
2063 pub ty: Ty<'tcx>,
2064 pub layout: &'tcx Layout,
2065 pub variant_index: Option<usize>,
2066}
2067
2068impl<'tcx> Deref for TyLayout<'tcx> {
2069 type Target = Layout;
2070 fn deref(&self) -> &Layout {
2071 self.layout
2072 }
2073}
2074
7cac9316
XL
2075pub trait LayoutTyper<'tcx>: HasDataLayout {
2076 type TyLayout;
2077
cc61c64b 2078 fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx>;
7cac9316
XL
2079 fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout;
2080 fn normalize_projections(self, ty: Ty<'tcx>) -> Ty<'tcx>;
cc61c64b
XL
2081}
2082
7cac9316
XL
2083/// Combines a tcx with the parameter environment so that you can
2084/// compute layout operations.
2085#[derive(Copy, Clone)]
2086pub struct LayoutCx<'a, 'tcx: 'a> {
2087 tcx: TyCtxt<'a, 'tcx, 'tcx>,
2088 param_env: ty::ParamEnv<'tcx>,
cc61c64b
XL
2089}
2090
7cac9316
XL
2091impl<'a, 'tcx> LayoutCx<'a, 'tcx> {
2092 pub fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>, param_env: ty::ParamEnv<'tcx>) -> Self {
2093 LayoutCx { tcx, param_env }
cc61c64b
XL
2094 }
2095}
2096
7cac9316 2097impl<'a, 'tcx> HasDataLayout for LayoutCx<'a, 'tcx> {
cc61c64b
XL
2098 fn data_layout(&self) -> &TargetDataLayout {
2099 &self.tcx.data_layout
2100 }
2101}
2102
7cac9316
XL
2103impl<'a, 'tcx> LayoutTyper<'tcx> for LayoutCx<'a, 'tcx> {
2104 type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
cc61c64b 2105
7cac9316
XL
2106 fn tcx<'b>(&'b self) -> TyCtxt<'b, 'tcx, 'tcx> {
2107 self.tcx
2108 }
cc61c64b 2109
7cac9316 2110 fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
cc61c64b
XL
2111 let ty = self.normalize_projections(ty);
2112
2113 Ok(TyLayout {
2114 ty: ty,
7cac9316 2115 layout: ty.layout(self.tcx, self.param_env)?,
cc61c64b
XL
2116 variant_index: None
2117 })
2118 }
2119
7cac9316
XL
2120 fn normalize_projections(self, ty: Ty<'tcx>) -> Ty<'tcx> {
2121 self.tcx.normalize_associated_type_in_env(&ty, self.param_env)
cc61c64b
XL
2122 }
2123}
2124
2125impl<'a, 'tcx> TyLayout<'tcx> {
2126 pub fn for_variant(&self, variant_index: usize) -> Self {
2127 TyLayout {
2128 variant_index: Some(variant_index),
2129 ..*self
2130 }
2131 }
2132
2133 pub fn field_offset<C: HasDataLayout>(&self, cx: C, i: usize) -> Size {
2134 self.layout.field_offset(cx, i, self.variant_index)
2135 }
2136
2137 pub fn field_count(&self) -> usize {
2138 // Handle enum/union through the type rather than Layout.
2139 if let ty::TyAdt(def, _) = self.ty.sty {
2140 let v = self.variant_index.unwrap_or(0);
2141 if def.variants.is_empty() {
2142 assert_eq!(v, 0);
2143 return 0;
2144 } else {
2145 return def.variants[v].fields.len();
2146 }
2147 }
2148
2149 match *self.layout {
2150 Scalar { .. } => {
2151 bug!("TyLayout::field_count({:?}): not applicable", self)
2152 }
2153
2154 // Handled above (the TyAdt case).
2155 CEnum { .. } |
2156 General { .. } |
2157 UntaggedUnion { .. } |
2158 RawNullablePointer { .. } |
2159 StructWrappedNullablePointer { .. } => bug!(),
2160
2161 FatPointer { .. } => 2,
2162
2163 Vector { count, .. } |
2164 Array { count, .. } => {
2165 let usize_count = count as usize;
2166 assert_eq!(usize_count as u64, count);
2167 usize_count
2168 }
2169
2170 Univariant { ref variant, .. } => variant.offsets.len(),
2171 }
2172 }
2173
7cac9316 2174 pub fn field_type<C: LayoutTyper<'tcx>>(&self, cx: C, i: usize) -> Ty<'tcx> {
cc61c64b
XL
2175 let tcx = cx.tcx();
2176
2177 let ptr_field_type = |pointee: Ty<'tcx>| {
2178 let slice = |element: Ty<'tcx>| {
2179 assert!(i < 2);
2180 if i == 0 {
2181 tcx.mk_mut_ptr(element)
2182 } else {
2183 tcx.types.usize
2184 }
2185 };
2186 match tcx.struct_tail(pointee).sty {
2187 ty::TySlice(element) => slice(element),
2188 ty::TyStr => slice(tcx.types.u8),
2189 ty::TyDynamic(..) => tcx.mk_mut_ptr(tcx.mk_nil()),
2190 _ => bug!("TyLayout::field_type({:?}): not applicable", self)
2191 }
2192 };
2193
2194 match self.ty.sty {
2195 ty::TyBool |
2196 ty::TyChar |
2197 ty::TyInt(_) |
2198 ty::TyUint(_) |
2199 ty::TyFloat(_) |
2200 ty::TyFnPtr(_) |
2201 ty::TyNever |
2202 ty::TyFnDef(..) |
2203 ty::TyDynamic(..) => {
2204 bug!("TyLayout::field_type({:?}): not applicable", self)
2205 }
2206
2207 // Potentially-fat pointers.
2208 ty::TyRef(_, ty::TypeAndMut { ty: pointee, .. }) |
2209 ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2210 ptr_field_type(pointee)
2211 }
2212 ty::TyAdt(def, _) if def.is_box() => {
2213 ptr_field_type(self.ty.boxed_ty())
2214 }
2215
2216 // Arrays and slices.
2217 ty::TyArray(element, _) |
2218 ty::TySlice(element) => element,
2219 ty::TyStr => tcx.types.u8,
2220
2221 // Tuples and closures.
2222 ty::TyClosure(def_id, ref substs) => {
2223 substs.upvar_tys(def_id, tcx).nth(i).unwrap()
2224 }
2225
2226 ty::TyTuple(tys, _) => tys[i],
2227
2228 // SIMD vector types.
2229 ty::TyAdt(def, ..) if def.repr.simd() => {
2230 self.ty.simd_type(tcx)
2231 }
2232
2233 // ADTs.
2234 ty::TyAdt(def, substs) => {
2235 def.variants[self.variant_index.unwrap_or(0)].fields[i].ty(tcx, substs)
2236 }
2237
2238 ty::TyProjection(_) | ty::TyAnon(..) | ty::TyParam(_) |
2239 ty::TyInfer(_) | ty::TyError => {
2240 bug!("TyLayout::field_type: unexpected type `{}`", self.ty)
2241 }
2242 }
2243 }
2244
7cac9316
XL
2245 pub fn field<C: LayoutTyper<'tcx>>(&self,
2246 cx: C,
2247 i: usize)
2248 -> C::TyLayout {
cc61c64b
XL
2249 cx.layout_of(cx.normalize_projections(self.field_type(cx, i)))
2250 }
2251}