]> git.proxmox.com Git - rustc.git/blame - src/librustc/ty/layout.rs
New upstream version 1.15.1+dfsg1
[rustc.git] / src / librustc / ty / layout.rs
CommitLineData
54a0048b
SL
1// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
2// file at the top-level directory of this distribution and at
3// http://rust-lang.org/COPYRIGHT.
4//
5// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8// option. This file may not be copied, modified, or distributed
9// except according to those terms.
10
11pub use self::Integer::*;
12pub use self::Layout::*;
13pub use self::Primitive::*;
14
a7813a04 15use infer::InferCtxt;
54a0048b
SL
16use session::Session;
17use traits;
18use ty::{self, Ty, TyCtxt, TypeFoldable};
19
20use syntax::ast::{FloatTy, IntTy, UintTy};
21use syntax::attr;
3157f602 22use syntax_pos::DUMMY_SP;
54a0048b
SL
23
24use std::cmp;
25use std::fmt;
26use std::i64;
476ff2be 27use std::iter;
54a0048b
SL
28
29/// Parsed [Data layout](http://llvm.org/docs/LangRef.html#data-layout)
30/// for a target, which contains everything needed to compute layouts.
31pub struct TargetDataLayout {
32 pub endian: Endian,
33 pub i1_align: Align,
34 pub i8_align: Align,
35 pub i16_align: Align,
36 pub i32_align: Align,
37 pub i64_align: Align,
38 pub f32_align: Align,
39 pub f64_align: Align,
40 pub pointer_size: Size,
41 pub pointer_align: Align,
42 pub aggregate_align: Align,
43
44 /// Alignments for vector types.
45 pub vector_align: Vec<(Size, Align)>
46}
47
48impl Default for TargetDataLayout {
9e0c209e 49 /// Creates an instance of `TargetDataLayout`.
54a0048b
SL
50 fn default() -> TargetDataLayout {
51 TargetDataLayout {
52 endian: Endian::Big,
53 i1_align: Align::from_bits(8, 8).unwrap(),
54 i8_align: Align::from_bits(8, 8).unwrap(),
55 i16_align: Align::from_bits(16, 16).unwrap(),
56 i32_align: Align::from_bits(32, 32).unwrap(),
57 i64_align: Align::from_bits(32, 64).unwrap(),
58 f32_align: Align::from_bits(32, 32).unwrap(),
59 f64_align: Align::from_bits(64, 64).unwrap(),
60 pointer_size: Size::from_bits(64),
61 pointer_align: Align::from_bits(64, 64).unwrap(),
62 aggregate_align: Align::from_bits(0, 64).unwrap(),
63 vector_align: vec![
64 (Size::from_bits(64), Align::from_bits(64, 64).unwrap()),
65 (Size::from_bits(128), Align::from_bits(128, 128).unwrap())
66 ]
67 }
68 }
69}
70
71impl TargetDataLayout {
72 pub fn parse(sess: &Session) -> TargetDataLayout {
73 // Parse a bit count from a string.
74 let parse_bits = |s: &str, kind: &str, cause: &str| {
75 s.parse::<u64>().unwrap_or_else(|err| {
76 sess.err(&format!("invalid {} `{}` for `{}` in \"data-layout\": {}",
77 kind, s, cause, err));
78 0
79 })
80 };
81
82 // Parse a size string.
83 let size = |s: &str, cause: &str| {
84 Size::from_bits(parse_bits(s, "size", cause))
85 };
86
87 // Parse an alignment string.
88 let align = |s: &[&str], cause: &str| {
89 if s.is_empty() {
90 sess.err(&format!("missing alignment for `{}` in \"data-layout\"", cause));
91 }
92 let abi = parse_bits(s[0], "alignment", cause);
93 let pref = s.get(1).map_or(abi, |pref| parse_bits(pref, "alignment", cause));
94 Align::from_bits(abi, pref).unwrap_or_else(|err| {
95 sess.err(&format!("invalid alignment for `{}` in \"data-layout\": {}",
96 cause, err));
97 Align::from_bits(8, 8).unwrap()
98 })
99 };
100
101 let mut dl = TargetDataLayout::default();
102 for spec in sess.target.target.data_layout.split("-") {
5bcae85e 103 match &spec.split(":").collect::<Vec<_>>()[..] {
3157f602
XL
104 &["e"] => dl.endian = Endian::Little,
105 &["E"] => dl.endian = Endian::Big,
106 &["a", ref a..] => dl.aggregate_align = align(a, "a"),
107 &["f32", ref a..] => dl.f32_align = align(a, "f32"),
108 &["f64", ref a..] => dl.f64_align = align(a, "f64"),
109 &[p @ "p", s, ref a..] | &[p @ "p0", s, ref a..] => {
54a0048b
SL
110 dl.pointer_size = size(s, p);
111 dl.pointer_align = align(a, p);
112 }
3157f602 113 &[s, ref a..] if s.starts_with("i") => {
54a0048b
SL
114 let ty_align = match s[1..].parse::<u64>() {
115 Ok(1) => &mut dl.i8_align,
116 Ok(8) => &mut dl.i8_align,
117 Ok(16) => &mut dl.i16_align,
118 Ok(32) => &mut dl.i32_align,
119 Ok(64) => &mut dl.i64_align,
120 Ok(_) => continue,
121 Err(_) => {
122 size(&s[1..], "i"); // For the user error.
123 continue;
124 }
125 };
126 *ty_align = align(a, s);
127 }
3157f602 128 &[s, ref a..] if s.starts_with("v") => {
54a0048b
SL
129 let v_size = size(&s[1..], "v");
130 let a = align(a, s);
131 if let Some(v) = dl.vector_align.iter_mut().find(|v| v.0 == v_size) {
132 v.1 = a;
133 continue;
134 }
135 // No existing entry, add a new one.
136 dl.vector_align.push((v_size, a));
137 }
138 _ => {} // Ignore everything else.
139 }
140 }
141
142 // Perform consistency checks against the Target information.
143 let endian_str = match dl.endian {
144 Endian::Little => "little",
145 Endian::Big => "big"
146 };
147 if endian_str != sess.target.target.target_endian {
148 sess.err(&format!("inconsistent target specification: \"data-layout\" claims \
149 architecture is {}-endian, while \"target-endian\" is `{}`",
150 endian_str, sess.target.target.target_endian));
151 }
152
153 if dl.pointer_size.bits().to_string() != sess.target.target.target_pointer_width {
154 sess.err(&format!("inconsistent target specification: \"data-layout\" claims \
155 pointers are {}-bit, while \"target-pointer-width\" is `{}`",
156 dl.pointer_size.bits(), sess.target.target.target_pointer_width));
157 }
158
159 dl
160 }
161
162 /// Return exclusive upper bound on object size.
163 ///
164 /// The theoretical maximum object size is defined as the maximum positive `isize` value.
165 /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly
166 /// index every address within an object along with one byte past the end, along with allowing
167 /// `isize` to store the difference between any two pointers into an object.
168 ///
169 /// The upper bound on 64-bit currently needs to be lower because LLVM uses a 64-bit integer
170 /// to represent object size in bits. It would need to be 1 << 61 to account for this, but is
171 /// currently conservatively bounded to 1 << 47 as that is enough to cover the current usable
172 /// address space on 64-bit ARMv8 and x86_64.
173 pub fn obj_size_bound(&self) -> u64 {
174 match self.pointer_size.bits() {
3157f602 175 16 => 1 << 15,
54a0048b
SL
176 32 => 1 << 31,
177 64 => 1 << 47,
178 bits => bug!("obj_size_bound: unknown pointer bit size {}", bits)
179 }
180 }
181
182 pub fn ptr_sized_integer(&self) -> Integer {
183 match self.pointer_size.bits() {
3157f602 184 16 => I16,
54a0048b
SL
185 32 => I32,
186 64 => I64,
187 bits => bug!("ptr_sized_integer: unknown pointer bit size {}", bits)
188 }
189 }
190}
191
192/// Endianness of the target, which must match cfg(target-endian).
193#[derive(Copy, Clone)]
194pub enum Endian {
195 Little,
196 Big
197}
198
199/// Size of a type in bytes.
200#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
201pub struct Size {
202 raw: u64
203}
204
205impl Size {
206 pub fn from_bits(bits: u64) -> Size {
207 Size::from_bytes((bits + 7) / 8)
208 }
209
210 pub fn from_bytes(bytes: u64) -> Size {
211 if bytes >= (1 << 61) {
212 bug!("Size::from_bytes: {} bytes in bits doesn't fit in u64", bytes)
213 }
214 Size {
215 raw: bytes
216 }
217 }
218
219 pub fn bytes(self) -> u64 {
220 self.raw
221 }
222
223 pub fn bits(self) -> u64 {
224 self.bytes() * 8
225 }
226
227 pub fn abi_align(self, align: Align) -> Size {
228 let mask = align.abi() - 1;
229 Size::from_bytes((self.bytes() + mask) & !mask)
230 }
231
232 pub fn checked_add(self, offset: Size, dl: &TargetDataLayout) -> Option<Size> {
233 // Each Size is less than dl.obj_size_bound(), so the sum is
234 // also less than 1 << 62 (and therefore can't overflow).
235 let bytes = self.bytes() + offset.bytes();
236
237 if bytes < dl.obj_size_bound() {
238 Some(Size::from_bytes(bytes))
239 } else {
240 None
241 }
242 }
243
244 pub fn checked_mul(self, count: u64, dl: &TargetDataLayout) -> Option<Size> {
245 // Each Size is less than dl.obj_size_bound(), so the sum is
246 // also less than 1 << 62 (and therefore can't overflow).
247 match self.bytes().checked_mul(count) {
248 Some(bytes) if bytes < dl.obj_size_bound() => {
249 Some(Size::from_bytes(bytes))
250 }
251 _ => None
252 }
253 }
254}
255
256/// Alignment of a type in bytes, both ABI-mandated and preferred.
257/// Since alignments are always powers of 2, we can pack both in one byte,
258/// giving each a nibble (4 bits) for a maximum alignment of 2^15 = 32768.
259#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
260pub struct Align {
261 raw: u8
262}
263
264impl Align {
265 pub fn from_bits(abi: u64, pref: u64) -> Result<Align, String> {
266 Align::from_bytes((abi + 7) / 8, (pref + 7) / 8)
267 }
268
269 pub fn from_bytes(abi: u64, pref: u64) -> Result<Align, String> {
270 let pack = |align: u64| {
271 // Treat an alignment of 0 bytes like 1-byte alignment.
272 if align == 0 {
273 return Ok(0);
274 }
275
276 let mut bytes = align;
277 let mut pow: u8 = 0;
278 while (bytes & 1) == 0 {
279 pow += 1;
280 bytes >>= 1;
281 }
282 if bytes != 1 {
283 Err(format!("`{}` is not a power of 2", align))
284 } else if pow > 0x0f {
285 Err(format!("`{}` is too large", align))
286 } else {
287 Ok(pow)
288 }
289 };
290
291 Ok(Align {
292 raw: pack(abi)? | (pack(pref)? << 4)
293 })
294 }
295
296 pub fn abi(self) -> u64 {
297 1 << (self.raw & 0xf)
298 }
299
300 pub fn pref(self) -> u64 {
301 1 << (self.raw >> 4)
302 }
303
304 pub fn min(self, other: Align) -> Align {
305 let abi = cmp::min(self.raw & 0x0f, other.raw & 0x0f);
306 let pref = cmp::min(self.raw & 0xf0, other.raw & 0xf0);
307 Align {
308 raw: abi | pref
309 }
310 }
311
312 pub fn max(self, other: Align) -> Align {
313 let abi = cmp::max(self.raw & 0x0f, other.raw & 0x0f);
314 let pref = cmp::max(self.raw & 0xf0, other.raw & 0xf0);
315 Align {
316 raw: abi | pref
317 }
318 }
319}
320
321/// Integers, also used for enum discriminants.
322#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
323pub enum Integer {
324 I1,
325 I8,
326 I16,
327 I32,
328 I64
329}
330
331impl Integer {
9e0c209e
SL
332 pub fn size(&self) -> Size {
333 match *self {
334 I1 => Size::from_bits(1),
335 I8 => Size::from_bytes(1),
336 I16 => Size::from_bytes(2),
337 I32 => Size::from_bytes(4),
338 I64 => Size::from_bytes(8),
339 }
340 }
341
342 pub fn align(&self, dl: &TargetDataLayout)-> Align {
343 match *self {
344 I1 => dl.i1_align,
345 I8 => dl.i8_align,
346 I16 => dl.i16_align,
347 I32 => dl.i32_align,
348 I64 => dl.i64_align,
349 }
350 }
351
352 pub fn to_ty<'a, 'tcx>(&self, tcx: &ty::TyCtxt<'a, 'tcx, 'tcx>,
353 signed: bool) -> Ty<'tcx> {
354 match (*self, signed) {
355 (I1, false) => tcx.types.u8,
356 (I8, false) => tcx.types.u8,
357 (I16, false) => tcx.types.u16,
358 (I32, false) => tcx.types.u32,
359 (I64, false) => tcx.types.u64,
360 (I1, true) => tcx.types.i8,
361 (I8, true) => tcx.types.i8,
362 (I16, true) => tcx.types.i16,
363 (I32, true) => tcx.types.i32,
364 (I64, true) => tcx.types.i64,
365 }
366 }
367
54a0048b
SL
368 /// Find the smallest Integer type which can represent the signed value.
369 pub fn fit_signed(x: i64) -> Integer {
370 match x {
371 -0x0000_0001...0x0000_0000 => I1,
372 -0x0000_0080...0x0000_007f => I8,
373 -0x0000_8000...0x0000_7fff => I16,
374 -0x8000_0000...0x7fff_ffff => I32,
375 _ => I64
376 }
377 }
378
379 /// Find the smallest Integer type which can represent the unsigned value.
380 pub fn fit_unsigned(x: u64) -> Integer {
381 match x {
382 0...0x0000_0001 => I1,
383 0...0x0000_00ff => I8,
384 0...0x0000_ffff => I16,
385 0...0xffff_ffff => I32,
386 _ => I64
387 }
388 }
389
9e0c209e
SL
390 /// Find the smallest integer with the given alignment.
391 pub fn for_abi_align(dl: &TargetDataLayout, align: Align) -> Option<Integer> {
392 let wanted = align.abi();
393 for &candidate in &[I8, I16, I32, I64] {
394 let ty = Int(candidate);
395 if wanted == ty.align(dl).abi() && wanted == ty.size(dl).bytes() {
396 return Some(candidate);
397 }
398 }
399 None
400 }
401
54a0048b
SL
402 /// Get the Integer type from an attr::IntType.
403 pub fn from_attr(dl: &TargetDataLayout, ity: attr::IntType) -> Integer {
404 match ity {
405 attr::SignedInt(IntTy::I8) | attr::UnsignedInt(UintTy::U8) => I8,
406 attr::SignedInt(IntTy::I16) | attr::UnsignedInt(UintTy::U16) => I16,
407 attr::SignedInt(IntTy::I32) | attr::UnsignedInt(UintTy::U32) => I32,
408 attr::SignedInt(IntTy::I64) | attr::UnsignedInt(UintTy::U64) => I64,
409 attr::SignedInt(IntTy::Is) | attr::UnsignedInt(UintTy::Us) => {
410 dl.ptr_sized_integer()
411 }
412 }
413 }
414
415 /// Find the appropriate Integer type and signedness for the given
416 /// signed discriminant range and #[repr] attribute.
417 /// N.B.: u64 values above i64::MAX will be treated as signed, but
418 /// that shouldn't affect anything, other than maybe debuginfo.
476ff2be 419 fn repr_discr(tcx: TyCtxt, ty: Ty, hints: &[attr::ReprAttr], min: i64, max: i64)
54a0048b
SL
420 -> (Integer, bool) {
421 // Theoretically, negative values could be larger in unsigned representation
422 // than the unsigned representation of the signed minimum. However, if there
423 // are any negative values, the only valid unsigned representation is u64
424 // which can fit all i64 values, so the result remains unaffected.
425 let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u64, max as u64));
426 let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
427
476ff2be
SL
428 let mut min_from_extern = None;
429 let min_default = I8;
430
431 for &r in hints.iter() {
432 match r {
433 attr::ReprInt(ity) => {
434 let discr = Integer::from_attr(&tcx.data_layout, ity);
435 let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
436 if discr < fit {
437 bug!("Integer::repr_discr: `#[repr]` hint too small for \
438 discriminant range of enum `{}", ty)
439 }
440 return (discr, ity.is_signed());
54a0048b 441 }
476ff2be
SL
442 attr::ReprExtern => {
443 match &tcx.sess.target.target.arch[..] {
444 // WARNING: the ARM EABI has two variants; the one corresponding
445 // to `at_least == I32` appears to be used on Linux and NetBSD,
446 // but some systems may use the variant corresponding to no
447 // lower bound. However, we don't run on those yet...?
448 "arm" => min_from_extern = Some(I32),
449 _ => min_from_extern = Some(I32),
450 }
451 }
452 attr::ReprAny => {},
453 attr::ReprPacked => {
454 bug!("Integer::repr_discr: found #[repr(packed)] on enum `{}", ty);
455 }
456 attr::ReprSimd => {
457 bug!("Integer::repr_discr: found #[repr(simd)] on enum `{}", ty);
54a0048b
SL
458 }
459 }
476ff2be
SL
460 }
461
462 let at_least = min_from_extern.unwrap_or(min_default);
54a0048b
SL
463
464 // If there are no negative values, we can use the unsigned fit.
465 if min >= 0 {
466 (cmp::max(unsigned_fit, at_least), false)
467 } else {
468 (cmp::max(signed_fit, at_least), true)
469 }
470 }
471}
472
473/// Fundamental unit of memory access and layout.
474#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
475pub enum Primitive {
476 Int(Integer),
477 F32,
478 F64,
479 Pointer
480}
481
482impl Primitive {
483 pub fn size(self, dl: &TargetDataLayout) -> Size {
484 match self {
485 Int(I1) | Int(I8) => Size::from_bits(8),
486 Int(I16) => Size::from_bits(16),
487 Int(I32) | F32 => Size::from_bits(32),
488 Int(I64) | F64 => Size::from_bits(64),
489 Pointer => dl.pointer_size
490 }
491 }
492
493 pub fn align(self, dl: &TargetDataLayout) -> Align {
494 match self {
495 Int(I1) => dl.i1_align,
496 Int(I8) => dl.i8_align,
497 Int(I16) => dl.i16_align,
498 Int(I32) => dl.i32_align,
499 Int(I64) => dl.i64_align,
500 F32 => dl.f32_align,
501 F64 => dl.f64_align,
502 Pointer => dl.pointer_align
503 }
504 }
505}
506
507/// Path through fields of nested structures.
508// FIXME(eddyb) use small vector optimization for the common case.
509pub type FieldPath = Vec<u32>;
510
511/// A structure, a product type in ADT terms.
512#[derive(PartialEq, Eq, Hash, Debug)]
513pub struct Struct {
514 pub align: Align,
515
516 /// If true, no alignment padding is used.
517 pub packed: bool,
518
519 /// If true, the size is exact, otherwise it's only a lower bound.
520 pub sized: bool,
521
476ff2be
SL
522 /// Offsets for the first byte of each field, ordered to match the source definition order.
523 /// This vector does not go in increasing order.
c30ab7b3
SL
524 /// FIXME(eddyb) use small vector optimization for the common case.
525 pub offsets: Vec<Size>,
526
476ff2be
SL
527 /// Maps source order field indices to memory order indices, depending how fields were permuted.
528 /// FIXME (camlorn) also consider small vector optimization here.
529 pub memory_index: Vec<u32>,
530
c30ab7b3 531 pub min_size: Size,
54a0048b
SL
532}
533
476ff2be
SL
534// Info required to optimize struct layout.
535#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Debug)]
536enum StructKind {
537 // A tuple, closure, or univariant which cannot be coerced to unsized.
538 AlwaysSizedUnivariant,
539 // A univariant, the last field of which may be coerced to unsized.
540 MaybeUnsizedUnivariant,
541 // A univariant, but part of an enum.
542 EnumVariant,
543}
544
a7813a04 545impl<'a, 'gcx, 'tcx> Struct {
476ff2be
SL
546 // FIXME(camlorn): reprs need a better representation to deal with multiple reprs on one type.
547 fn new(dl: &TargetDataLayout, fields: &Vec<&'a Layout>,
548 reprs: &[attr::ReprAttr], kind: StructKind,
549 scapegoat: Ty<'gcx>) -> Result<Struct, LayoutError<'gcx>> {
550 let packed = reprs.contains(&attr::ReprPacked);
551 let mut ret = Struct {
54a0048b
SL
552 align: if packed { dl.i8_align } else { dl.aggregate_align },
553 packed: packed,
554 sized: true,
c30ab7b3 555 offsets: vec![],
476ff2be 556 memory_index: vec![],
c30ab7b3 557 min_size: Size::from_bytes(0),
476ff2be
SL
558 };
559
560 // Anything with ReprExtern or ReprPacked doesn't optimize.
561 // Neither do 1-member and 2-member structs.
562 // In addition, code in trans assume that 2-element structs can become pairs.
563 // It's easier to just short-circuit here.
564 let mut can_optimize = fields.len() > 2 || StructKind::EnumVariant == kind;
565 if can_optimize {
566 // This exhaustive match makes new reprs force the adder to modify this function.
567 // Otherwise, things can silently break.
568 // Note the inversion, return true to stop optimizing.
569 can_optimize = !reprs.iter().any(|r| {
570 match *r {
571 attr::ReprAny | attr::ReprInt(_) => false,
572 attr::ReprExtern | attr::ReprPacked => true,
573 attr::ReprSimd => bug!("Simd vectors should be represented as layout::Vector")
574 }
575 });
54a0048b 576 }
54a0048b 577
476ff2be
SL
578 // Disable field reordering until we can decide what to do.
579 // The odd pattern here avoids a warning about the value never being read.
580 if can_optimize { can_optimize = false }
581
582 let (optimize, sort_ascending) = match kind {
583 StructKind::AlwaysSizedUnivariant => (can_optimize, false),
584 StructKind::MaybeUnsizedUnivariant => (can_optimize, false),
585 StructKind::EnumVariant => {
586 assert!(fields.len() >= 1, "Enum variants must have discriminants.");
587 (can_optimize && fields[0].size(dl).bytes() == 1, true)
588 }
589 };
c30ab7b3 590
476ff2be
SL
591 ret.offsets = vec![Size::from_bytes(0); fields.len()];
592 let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
54a0048b 593
476ff2be
SL
594 if optimize {
595 let start = if let StructKind::EnumVariant = kind { 1 } else { 0 };
596 let end = if let StructKind::MaybeUnsizedUnivariant = kind {
597 fields.len() - 1
598 } else {
599 fields.len()
600 };
601 if end > start {
602 let optimizing = &mut inverse_memory_index[start..end];
603 if sort_ascending {
604 optimizing.sort_by_key(|&x| fields[x as usize].align(dl).abi());
605 } else {
606 optimizing.sort_by(| &a, &b | {
607 let a = fields[a as usize].align(dl).abi();
608 let b = fields[b as usize].align(dl).abi();
609 b.cmp(&a)
610 });
611 }
612 }
613 }
614
615 // inverse_memory_index holds field indices by increasing memory offset.
616 // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
617 // We now write field offsets to the corresponding offset slot;
618 // field 5 with offset 0 puts 0 in offsets[5].
619 // At the bottom of this function, we use inverse_memory_index to produce memory_index.
620
621 if let StructKind::EnumVariant = kind {
622 assert_eq!(inverse_memory_index[0], 0,
623 "Enum variant discriminants must have the lowest offset.");
624 }
625
626 let mut offset = Size::from_bytes(0);
627
628 for i in inverse_memory_index.iter() {
629 let field = fields[*i as usize];
630 if !ret.sized {
631 bug!("Struct::new: field #{} of `{}` comes after unsized field",
632 ret.offsets.len(), scapegoat);
54a0048b
SL
633 }
634
54a0048b 635 if field.is_unsized() {
476ff2be 636 ret.sized = false;
54a0048b
SL
637 }
638
639 // Invariant: offset < dl.obj_size_bound() <= 1<<61
476ff2be 640 if !ret.packed {
54a0048b 641 let align = field.align(dl);
476ff2be 642 ret.align = ret.align.max(align);
c30ab7b3
SL
643 offset = offset.abi_align(align);
644 }
645
476ff2be
SL
646 debug!("Struct::new offset: {:?} field: {:?} {:?}", offset, field, field.size(dl));
647 ret.offsets[*i as usize] = offset;
54a0048b
SL
648
649 offset = offset.checked_add(field.size(dl), dl)
650 .map_or(Err(LayoutError::SizeOverflow(scapegoat)), Ok)?;
54a0048b
SL
651 }
652
c30ab7b3 653
476ff2be
SL
654 debug!("Struct::new min_size: {:?}", offset);
655 ret.min_size = offset;
54a0048b 656
476ff2be
SL
657 // As stated above, inverse_memory_index holds field indices by increasing offset.
658 // This makes it an already-sorted view of the offsets vec.
659 // To invert it, consider:
660 // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
661 // Field 5 would be the first element, so memory_index is i:
662 // Note: if we didn't optimize, it's already right.
663
664 if optimize {
665 ret.memory_index = vec![0; inverse_memory_index.len()];
666
667 for i in 0..inverse_memory_index.len() {
668 ret.memory_index[inverse_memory_index[i] as usize] = i as u32;
669 }
670 } else {
671 ret.memory_index = inverse_memory_index;
672 }
54a0048b 673
476ff2be
SL
674 Ok(ret)
675 }
676
677 /// Get the size with trailing alignment padding.
54a0048b 678 pub fn stride(&self) -> Size {
c30ab7b3 679 self.min_size.abi_align(self.align)
54a0048b
SL
680 }
681
682 /// Determine whether a structure would be zero-sized, given its fields.
a7813a04
XL
683 pub fn would_be_zero_sized<I>(dl: &TargetDataLayout, fields: I)
684 -> Result<bool, LayoutError<'gcx>>
685 where I: Iterator<Item=Result<&'a Layout, LayoutError<'gcx>>> {
54a0048b
SL
686 for field in fields {
687 let field = field?;
688 if field.is_unsized() || field.size(dl).bytes() > 0 {
689 return Ok(false);
690 }
691 }
692 Ok(true)
693 }
694
476ff2be
SL
695 /// Get indices of the tys that made this struct by increasing offset.
696 #[inline]
697 pub fn field_index_by_increasing_offset<'b>(&'b self) -> impl iter::Iterator<Item=usize>+'b {
698 let mut inverse_small = [0u8; 64];
699 let mut inverse_big = vec![];
700 let use_small = self.memory_index.len() <= inverse_small.len();
701
702 // We have to write this logic twice in order to keep the array small.
703 if use_small {
704 for i in 0..self.memory_index.len() {
705 inverse_small[self.memory_index[i] as usize] = i as u8;
706 }
707 } else {
708 inverse_big = vec![0; self.memory_index.len()];
709 for i in 0..self.memory_index.len() {
710 inverse_big[self.memory_index[i] as usize] = i as u32;
711 }
712 }
713
714 (0..self.memory_index.len()).map(move |i| {
715 if use_small { inverse_small[i] as usize }
716 else { inverse_big[i] as usize }
717 })
718 }
719
54a0048b
SL
720 /// Find the path leading to a non-zero leaf field, starting from
721 /// the given type and recursing through aggregates.
476ff2be
SL
722 /// The tuple is `(path, source_path)`,
723 /// where `path` is in memory order and `source_path` in source order.
54a0048b 724 // FIXME(eddyb) track value ranges and traverse already optimized enums.
476ff2be
SL
725 fn non_zero_field_in_type(infcx: &InferCtxt<'a, 'gcx, 'tcx>,
726 ty: Ty<'gcx>)
727 -> Result<Option<(FieldPath, FieldPath)>, LayoutError<'gcx>> {
a7813a04 728 let tcx = infcx.tcx.global_tcx();
54a0048b 729 match (ty.layout(infcx)?, &ty.sty) {
c30ab7b3 730 (&Scalar { non_zero: true, .. }, _) |
476ff2be 731 (&CEnum { non_zero: true, .. }, _) => Ok(Some((vec![], vec![]))),
54a0048b 732 (&FatPointer { non_zero: true, .. }, _) => {
476ff2be 733 Ok(Some((vec![FAT_PTR_ADDR as u32], vec![FAT_PTR_ADDR as u32])))
54a0048b
SL
734 }
735
736 // Is this the NonZero lang item wrapping a pointer or integer type?
9e0c209e 737 (&Univariant { non_zero: true, .. }, &ty::TyAdt(def, substs)) => {
54a0048b
SL
738 let fields = &def.struct_variant().fields;
739 assert_eq!(fields.len(), 1);
5bcae85e 740 match *fields[0].ty(tcx, substs).layout(infcx)? {
54a0048b
SL
741 // FIXME(eddyb) also allow floating-point types here.
742 Scalar { value: Int(_), non_zero: false } |
743 Scalar { value: Pointer, non_zero: false } => {
476ff2be 744 Ok(Some((vec![0], vec![0])))
54a0048b
SL
745 }
746 FatPointer { non_zero: false, .. } => {
476ff2be
SL
747 let tmp = vec![FAT_PTR_ADDR as u32, 0];
748 Ok(Some((tmp.clone(), tmp)))
54a0048b
SL
749 }
750 _ => Ok(None)
751 }
752 }
753
754 // Perhaps one of the fields of this struct is non-zero
755 // let's recurse and find out
476ff2be
SL
756 (&Univariant { ref variant, .. }, &ty::TyAdt(def, substs)) if def.is_struct() => {
757 Struct::non_zero_field_paths(infcx, def.struct_variant().fields
54a0048b 758 .iter().map(|field| {
5bcae85e 759 field.ty(tcx, substs)
476ff2be
SL
760 }),
761 Some(&variant.memory_index[..]))
54a0048b
SL
762 }
763
764 // Perhaps one of the upvars of this closure is non-zero
476ff2be
SL
765 (&Univariant { ref variant, .. }, &ty::TyClosure(def, substs)) => {
766 let upvar_tys = substs.upvar_tys(def, tcx);
767 Struct::non_zero_field_paths(infcx, upvar_tys,
768 Some(&variant.memory_index[..]))
769 }
54a0048b 770 // Can we use one of the fields in this tuple?
476ff2be
SL
771 (&Univariant { ref variant, .. }, &ty::TyTuple(tys)) => {
772 Struct::non_zero_field_paths(infcx, tys.iter().cloned(),
773 Some(&variant.memory_index[..]))
54a0048b
SL
774 }
775
776 // Is this a fixed-size array of something non-zero
777 // with at least one element?
778 (_, &ty::TyArray(ety, d)) if d > 0 => {
476ff2be 779 Struct::non_zero_field_paths(infcx, Some(ety).into_iter(), None)
54a0048b
SL
780 }
781
5bcae85e
SL
782 (_, &ty::TyProjection(_)) | (_, &ty::TyAnon(..)) => {
783 let normalized = normalize_associated_type(infcx, ty);
784 if ty == normalized {
785 return Ok(None);
786 }
787 return Struct::non_zero_field_in_type(infcx, normalized);
788 }
789
54a0048b
SL
790 // Anything else is not a non-zero type.
791 _ => Ok(None)
792 }
793 }
794
795 /// Find the path leading to a non-zero leaf field, starting from
796 /// the given set of fields and recursing through aggregates.
476ff2be
SL
797 /// Returns Some((path, source_path)) on success.
798 /// `path` is translated to memory order. `source_path` is not.
799 fn non_zero_field_paths<I>(infcx: &InferCtxt<'a, 'gcx, 'tcx>,
800 fields: I,
801 permutation: Option<&[u32]>)
802 -> Result<Option<(FieldPath, FieldPath)>, LayoutError<'gcx>>
a7813a04 803 where I: Iterator<Item=Ty<'gcx>> {
54a0048b 804 for (i, ty) in fields.enumerate() {
476ff2be
SL
805 if let Some((mut path, mut source_path)) = Struct::non_zero_field_in_type(infcx, ty)? {
806 source_path.push(i as u32);
807 let index = if let Some(p) = permutation {
808 p[i] as usize
809 } else {
810 i
811 };
812 path.push(index as u32);
813 return Ok(Some((path, source_path)));
54a0048b
SL
814 }
815 }
816 Ok(None)
817 }
9e0c209e
SL
818}
819
820/// An untagged union.
821#[derive(PartialEq, Eq, Hash, Debug)]
822pub struct Union {
823 pub align: Align,
824
825 pub min_size: Size,
826
827 /// If true, no alignment padding is used.
828 pub packed: bool,
829}
830
831impl<'a, 'gcx, 'tcx> Union {
832 pub fn new(dl: &TargetDataLayout, packed: bool) -> Union {
833 Union {
834 align: if packed { dl.i8_align } else { dl.aggregate_align },
835 min_size: Size::from_bytes(0),
836 packed: packed,
837 }
838 }
839
840 /// Extend the Struct with more fields.
841 pub fn extend<I>(&mut self, dl: &TargetDataLayout,
842 fields: I,
843 scapegoat: Ty<'gcx>)
844 -> Result<(), LayoutError<'gcx>>
845 where I: Iterator<Item=Result<&'a Layout, LayoutError<'gcx>>> {
846 for (index, field) in fields.enumerate() {
847 let field = field?;
848 if field.is_unsized() {
849 bug!("Union::extend: field #{} of `{}` is unsized",
850 index, scapegoat);
851 }
852
476ff2be
SL
853 debug!("Union::extend field: {:?} {:?}", field, field.size(dl));
854
9e0c209e
SL
855 if !self.packed {
856 self.align = self.align.max(field.align(dl));
857 }
858 self.min_size = cmp::max(self.min_size, field.size(dl));
859 }
860
476ff2be
SL
861 debug!("Union::extend min-size: {:?}", self.min_size);
862
9e0c209e
SL
863 Ok(())
864 }
865
476ff2be 866 /// Get the size with trailing alignment padding.
9e0c209e
SL
867 pub fn stride(&self) -> Size {
868 self.min_size.abi_align(self.align)
869 }
54a0048b
SL
870}
871
872/// The first half of a fat pointer.
873/// - For a trait object, this is the address of the box.
874/// - For a slice, this is the base address.
875pub const FAT_PTR_ADDR: usize = 0;
876
877/// The second half of a fat pointer.
878/// - For a trait object, this is the address of the vtable.
879/// - For a slice, this is the length.
880pub const FAT_PTR_EXTRA: usize = 1;
881
882/// Type layout, from which size and alignment can be cheaply computed.
883/// For ADTs, it also includes field placement and enum optimizations.
884/// NOTE: Because Layout is interned, redundant information should be
885/// kept to a minimum, e.g. it includes no sub-component Ty or Layout.
886#[derive(Debug, PartialEq, Eq, Hash)]
887pub enum Layout {
888 /// TyBool, TyChar, TyInt, TyUint, TyFloat, TyRawPtr, TyRef or TyFnPtr.
889 Scalar {
890 value: Primitive,
891 // If true, the value cannot represent a bit pattern of all zeroes.
892 non_zero: bool
893 },
894
9e0c209e 895 /// SIMD vectors, from structs marked with #[repr(simd)].
54a0048b
SL
896 Vector {
897 element: Primitive,
898 count: u64
899 },
900
901 /// TyArray, TySlice or TyStr.
902 Array {
903 /// If true, the size is exact, otherwise it's only a lower bound.
904 sized: bool,
905 align: Align,
906 size: Size
907 },
908
909 /// TyRawPtr or TyRef with a !Sized pointee.
910 FatPointer {
911 metadata: Primitive,
912 // If true, the pointer cannot be null.
913 non_zero: bool
914 },
915
9e0c209e 916 // Remaining variants are all ADTs such as structs, enums or tuples.
54a0048b
SL
917
918 /// C-like enums; basically an integer.
919 CEnum {
920 discr: Integer,
921 signed: bool,
c30ab7b3 922 non_zero: bool,
54a0048b
SL
923 // Inclusive discriminant range.
924 // If min > max, it represents min...u64::MAX followed by 0...max.
925 // FIXME(eddyb) always use the shortest range, e.g. by finding
926 // the largest space between two consecutive discriminants and
927 // taking everything else as the (shortest) discriminant range.
928 min: u64,
929 max: u64
930 },
931
932 /// Single-case enums, and structs/tuples.
933 Univariant {
934 variant: Struct,
935 // If true, the structure is NonZero.
936 // FIXME(eddyb) use a newtype Layout kind for this.
937 non_zero: bool
938 },
939
9e0c209e
SL
940 /// Untagged unions.
941 UntaggedUnion {
942 variants: Union,
943 },
944
54a0048b
SL
945 /// General-case enums: for each case there is a struct, and they
946 /// all start with a field for the discriminant.
947 General {
948 discr: Integer,
949 variants: Vec<Struct>,
950 size: Size,
951 align: Align
952 },
953
954 /// Two cases distinguished by a nullable pointer: the case with discriminant
955 /// `nndiscr` must have single field which is known to be nonnull due to its type.
956 /// The other case is known to be zero sized. Hence we represent the enum
957 /// as simply a nullable pointer: if not null it indicates the `nndiscr` variant,
958 /// otherwise it indicates the other case.
959 ///
960 /// For example, `std::option::Option` instantiated at a safe pointer type
961 /// is represented such that `None` is a null pointer and `Some` is the
962 /// identity function.
963 RawNullablePointer {
964 nndiscr: u64,
965 value: Primitive
966 },
967
968 /// Two cases distinguished by a nullable pointer: the case with discriminant
969 /// `nndiscr` is represented by the struct `nonnull`, where the `discrfield`th
970 /// field is known to be nonnull due to its type; if that field is null, then
971 /// it represents the other case, which is known to be zero sized.
972 StructWrappedNullablePointer {
973 nndiscr: u64,
974 nonnull: Struct,
975 // N.B. There is a 0 at the start, for LLVM GEP through a pointer.
476ff2be
SL
976 discrfield: FieldPath,
977 // Like discrfield, but in source order. For debuginfo.
978 discrfield_source: FieldPath
54a0048b
SL
979 }
980}
981
982#[derive(Copy, Clone, Debug)]
983pub enum LayoutError<'tcx> {
984 Unknown(Ty<'tcx>),
985 SizeOverflow(Ty<'tcx>)
986}
987
988impl<'tcx> fmt::Display for LayoutError<'tcx> {
989 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
990 match *self {
991 LayoutError::Unknown(ty) => {
992 write!(f, "the type `{:?}` has an unknown layout", ty)
993 }
994 LayoutError::SizeOverflow(ty) => {
995 write!(f, "the type `{:?}` is too big for the current architecture", ty)
996 }
997 }
998 }
999}
1000
1001/// Helper function for normalizing associated types in an inference context.
a7813a04
XL
1002fn normalize_associated_type<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>,
1003 ty: Ty<'gcx>)
1004 -> Ty<'gcx> {
54a0048b
SL
1005 if !ty.has_projection_types() {
1006 return ty;
1007 }
1008
1009 let mut selcx = traits::SelectionContext::new(infcx);
1010 let cause = traits::ObligationCause::dummy();
1011 let traits::Normalized { value: result, obligations } =
1012 traits::normalize(&mut selcx, cause, &ty);
1013
1014 let mut fulfill_cx = traits::FulfillmentContext::new();
1015
1016 for obligation in obligations {
1017 fulfill_cx.register_predicate_obligation(infcx, obligation);
1018 }
1019
a7813a04 1020 infcx.drain_fulfillment_cx_or_panic(DUMMY_SP, &mut fulfill_cx, &result)
54a0048b
SL
1021}
1022
a7813a04
XL
1023impl<'a, 'gcx, 'tcx> Layout {
1024 pub fn compute_uncached(ty: Ty<'gcx>,
1025 infcx: &InferCtxt<'a, 'gcx, 'tcx>)
5bcae85e 1026 -> Result<&'gcx Layout, LayoutError<'gcx>> {
a7813a04 1027 let tcx = infcx.tcx.global_tcx();
5bcae85e 1028 let success = |layout| Ok(tcx.intern_layout(layout));
54a0048b
SL
1029 let dl = &tcx.data_layout;
1030 assert!(!ty.has_infer_types());
1031
476ff2be 1032
54a0048b
SL
1033 let layout = match ty.sty {
1034 // Basic scalars.
1035 ty::TyBool => Scalar { value: Int(I1), non_zero: false },
1036 ty::TyChar => Scalar { value: Int(I32), non_zero: false },
1037 ty::TyInt(ity) => {
1038 Scalar {
1039 value: Int(Integer::from_attr(dl, attr::SignedInt(ity))),
1040 non_zero: false
1041 }
1042 }
1043 ty::TyUint(ity) => {
1044 Scalar {
1045 value: Int(Integer::from_attr(dl, attr::UnsignedInt(ity))),
1046 non_zero: false
1047 }
1048 }
1049 ty::TyFloat(FloatTy::F32) => Scalar { value: F32, non_zero: false },
1050 ty::TyFloat(FloatTy::F64) => Scalar { value: F64, non_zero: false },
1051 ty::TyFnPtr(_) => Scalar { value: Pointer, non_zero: true },
1052
5bcae85e 1053 // The never type.
476ff2be
SL
1054 ty::TyNever => Univariant {
1055 variant: Struct::new(dl, &vec![], &[],
1056 StructKind::AlwaysSizedUnivariant, ty)?,
1057 non_zero: false
1058 },
5bcae85e 1059
54a0048b
SL
1060 // Potentially-fat pointers.
1061 ty::TyBox(pointee) |
1062 ty::TyRef(_, ty::TypeAndMut { ty: pointee, .. }) |
1063 ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1064 let non_zero = !ty.is_unsafe_ptr();
5bcae85e 1065 let pointee = normalize_associated_type(infcx, pointee);
a7813a04 1066 if pointee.is_sized(tcx, &infcx.parameter_environment, DUMMY_SP) {
54a0048b
SL
1067 Scalar { value: Pointer, non_zero: non_zero }
1068 } else {
1069 let unsized_part = tcx.struct_tail(pointee);
1070 let meta = match unsized_part.sty {
1071 ty::TySlice(_) | ty::TyStr => {
1072 Int(dl.ptr_sized_integer())
1073 }
476ff2be 1074 ty::TyDynamic(..) => Pointer,
54a0048b
SL
1075 _ => return Err(LayoutError::Unknown(unsized_part))
1076 };
1077 FatPointer { metadata: meta, non_zero: non_zero }
1078 }
1079 }
1080
1081 // Arrays and slices.
1082 ty::TyArray(element, count) => {
1083 let element = element.layout(infcx)?;
1084 Array {
1085 sized: true,
1086 align: element.align(dl),
1087 size: element.size(dl).checked_mul(count as u64, dl)
1088 .map_or(Err(LayoutError::SizeOverflow(ty)), Ok)?
1089 }
1090 }
1091 ty::TySlice(element) => {
1092 Array {
1093 sized: false,
1094 align: element.layout(infcx)?.align(dl),
1095 size: Size::from_bytes(0)
1096 }
1097 }
1098 ty::TyStr => {
1099 Array {
1100 sized: false,
1101 align: dl.i8_align,
1102 size: Size::from_bytes(0)
1103 }
1104 }
1105
1106 // Odd unit types.
1107 ty::TyFnDef(..) => {
1108 Univariant {
476ff2be
SL
1109 variant: Struct::new(dl, &vec![],
1110 &[], StructKind::AlwaysSizedUnivariant, ty)?,
54a0048b
SL
1111 non_zero: false
1112 }
1113 }
476ff2be
SL
1114 ty::TyDynamic(..) => {
1115 let mut unit = Struct::new(dl, &vec![], &[],
1116 StructKind::AlwaysSizedUnivariant, ty)?;
54a0048b
SL
1117 unit.sized = false;
1118 Univariant { variant: unit, non_zero: false }
1119 }
1120
9e0c209e 1121 // Tuples and closures.
476ff2be
SL
1122 ty::TyClosure(def_id, ref substs) => {
1123 let tys = substs.upvar_tys(def_id, tcx);
1124 let st = Struct::new(dl,
1125 &tys.map(|ty| ty.layout(infcx))
1126 .collect::<Result<Vec<_>, _>>()?,
1127 &[],
1128 StructKind::AlwaysSizedUnivariant, ty)?;
1129 Univariant { variant: st, non_zero: false }
1130 }
1131
a7813a04 1132 ty::TyTuple(tys) => {
476ff2be
SL
1133 // FIXME(camlorn): if we ever allow unsized tuples, this needs to be checked.
1134 // See the univariant case below to learn how.
1135 let st = Struct::new(dl,
1136 &tys.iter().map(|ty| ty.layout(infcx))
1137 .collect::<Result<Vec<_>, _>>()?,
1138 &[], StructKind::AlwaysSizedUnivariant, ty)?;
54a0048b
SL
1139 Univariant { variant: st, non_zero: false }
1140 }
1141
9e0c209e
SL
1142 // SIMD vector types.
1143 ty::TyAdt(def, ..) if def.is_simd() => {
1144 let element = ty.simd_type(tcx);
1145 match *element.layout(infcx)? {
1146 Scalar { value, .. } => {
1147 return success(Vector {
1148 element: value,
1149 count: ty.simd_size(tcx) as u64
1150 });
1151 }
1152 _ => {
1153 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` with \
1154 a non-machine element type `{}`",
1155 ty, element));
54a0048b 1156 }
54a0048b
SL
1157 }
1158 }
9e0c209e
SL
1159
1160 // ADTs.
1161 ty::TyAdt(def, substs) => {
476ff2be 1162 let hints = &tcx.lookup_repr_hints(def.did)[..];
54a0048b 1163
54a0048b
SL
1164 if def.variants.is_empty() {
1165 // Uninhabitable; represent as unit
1166 // (Typechecking will reject discriminant-sizing attrs.)
476ff2be 1167 assert_eq!(hints.len(), 0);
54a0048b 1168
9e0c209e 1169 return success(Univariant {
476ff2be
SL
1170 variant: Struct::new(dl, &vec![],
1171 &hints[..], StructKind::AlwaysSizedUnivariant, ty)?,
9e0c209e
SL
1172 non_zero: false
1173 });
54a0048b
SL
1174 }
1175
9e0c209e 1176 if def.is_enum() && def.variants.iter().all(|v| v.fields.is_empty()) {
54a0048b 1177 // All bodies empty -> intlike
c30ab7b3 1178 let (mut min, mut max, mut non_zero) = (i64::MAX, i64::MIN, true);
54a0048b
SL
1179 for v in &def.variants {
1180 let x = v.disr_val.to_u64_unchecked() as i64;
c30ab7b3 1181 if x == 0 { non_zero = false; }
54a0048b
SL
1182 if x < min { min = x; }
1183 if x > max { max = x; }
1184 }
1185
476ff2be 1186 let (discr, signed) = Integer::repr_discr(tcx, ty, &hints[..], min, max);
5bcae85e 1187 return success(CEnum {
54a0048b
SL
1188 discr: discr,
1189 signed: signed,
c30ab7b3 1190 non_zero: non_zero,
54a0048b
SL
1191 min: min as u64,
1192 max: max as u64
1193 });
1194 }
1195
476ff2be 1196 if !def.is_enum() || def.variants.len() == 1 && hints.is_empty() {
9e0c209e
SL
1197 // Struct, or union, or univariant enum equivalent to a struct.
1198 // (Typechecking will reject discriminant-sizing attrs.)
1199
476ff2be
SL
1200 let kind = if def.is_enum() || def.variants[0].fields.len() == 0{
1201 StructKind::AlwaysSizedUnivariant
1202 } else {
1203 use middle::region::ROOT_CODE_EXTENT;
1204 let param_env = tcx.construct_parameter_environment(DUMMY_SP,
1205 def.did, ROOT_CODE_EXTENT);
1206 let fields = &def.variants[0].fields;
1207 let last_field = &fields[fields.len()-1];
1208 let always_sized = last_field.ty(tcx, param_env.free_substs)
1209 .is_sized(tcx, &param_env, DUMMY_SP);
1210 if !always_sized { StructKind::MaybeUnsizedUnivariant }
1211 else { StructKind::AlwaysSizedUnivariant }
1212 };
1213
9e0c209e
SL
1214 let fields = def.variants[0].fields.iter().map(|field| {
1215 field.ty(tcx, substs).layout(infcx)
476ff2be 1216 }).collect::<Result<Vec<_>, _>>()?;
9e0c209e
SL
1217 let packed = tcx.lookup_packed(def.did);
1218 let layout = if def.is_union() {
1219 let mut un = Union::new(dl, packed);
476ff2be 1220 un.extend(dl, fields.iter().map(|&f| Ok(f)), ty)?;
9e0c209e
SL
1221 UntaggedUnion { variants: un }
1222 } else {
476ff2be
SL
1223 let st = Struct::new(dl, &fields, &hints[..],
1224 kind, ty)?;
9e0c209e
SL
1225 let non_zero = Some(def.did) == tcx.lang_items.non_zero();
1226 Univariant { variant: st, non_zero: non_zero }
1227 };
1228 return success(layout);
1229 }
1230
54a0048b
SL
1231 // Since there's at least one
1232 // non-empty body, explicit discriminants should have
1233 // been rejected by a checker before this point.
1234 for (i, v) in def.variants.iter().enumerate() {
1235 if i as u64 != v.disr_val.to_u64_unchecked() {
1236 bug!("non-C-like enum {} with specified discriminants",
9e0c209e 1237 tcx.item_path_str(def.did));
54a0048b
SL
1238 }
1239 }
1240
54a0048b
SL
1241 // Cache the substituted and normalized variant field types.
1242 let variants = def.variants.iter().map(|v| {
5bcae85e 1243 v.fields.iter().map(|field| field.ty(tcx, substs)).collect::<Vec<_>>()
54a0048b
SL
1244 }).collect::<Vec<_>>();
1245
476ff2be 1246 if variants.len() == 2 && hints.is_empty() {
54a0048b
SL
1247 // Nullable pointer optimization
1248 for discr in 0..2 {
1249 let other_fields = variants[1 - discr].iter().map(|ty| {
1250 ty.layout(infcx)
1251 });
1252 if !Struct::would_be_zero_sized(dl, other_fields)? {
1253 continue;
1254 }
476ff2be
SL
1255 let paths = Struct::non_zero_field_paths(infcx,
1256 variants[discr].iter().cloned(),
1257 None)?;
1258 let (mut path, mut path_source) = if let Some(p) = paths { p }
1259 else { continue };
54a0048b
SL
1260
1261 // FIXME(eddyb) should take advantage of a newtype.
1262 if path == &[0] && variants[discr].len() == 1 {
c30ab7b3
SL
1263 let value = match *variants[discr][0].layout(infcx)? {
1264 Scalar { value, .. } => value,
1265 CEnum { discr, .. } => Int(discr),
1266 _ => bug!("Layout::compute: `{}`'s non-zero \
1267 `{}` field not scalar?!",
1268 ty, variants[discr][0])
1269 };
1270 return success(RawNullablePointer {
1271 nndiscr: discr as u64,
1272 value: value,
1273 });
54a0048b
SL
1274 }
1275
476ff2be
SL
1276 let st = Struct::new(dl,
1277 &variants[discr].iter().map(|ty| ty.layout(infcx))
1278 .collect::<Result<Vec<_>, _>>()?,
1279 &hints[..], StructKind::AlwaysSizedUnivariant, ty)?;
1280
1281 // We have to fix the last element of path here.
1282 let mut i = *path.last().unwrap();
1283 i = st.memory_index[i as usize];
1284 *path.last_mut().unwrap() = i;
54a0048b
SL
1285 path.push(0); // For GEP through a pointer.
1286 path.reverse();
476ff2be
SL
1287 path_source.push(0);
1288 path_source.reverse();
1289
5bcae85e 1290 return success(StructWrappedNullablePointer {
54a0048b
SL
1291 nndiscr: discr as u64,
1292 nonnull: st,
476ff2be
SL
1293 discrfield: path,
1294 discrfield_source: path_source
54a0048b
SL
1295 });
1296 }
1297 }
1298
1299 // The general case.
1300 let discr_max = (variants.len() - 1) as i64;
1301 assert!(discr_max >= 0);
476ff2be 1302 let (min_ity, _) = Integer::repr_discr(tcx, ty, &hints[..], 0, discr_max);
54a0048b
SL
1303
1304 let mut align = dl.aggregate_align;
1305 let mut size = Size::from_bytes(0);
1306
1307 // We're interested in the smallest alignment, so start large.
1308 let mut start_align = Align::from_bytes(256, 256).unwrap();
1309
1310 // Create the set of structs that represent each variant
1311 // Use the minimum integer type we figured out above
476ff2be 1312 let discr = Scalar { value: Int(min_ity), non_zero: false };
54a0048b 1313 let mut variants = variants.into_iter().map(|fields| {
476ff2be
SL
1314 let mut fields = fields.into_iter().map(|field| {
1315 field.layout(infcx)
1316 }).collect::<Result<Vec<_>, _>>()?;
1317 fields.insert(0, &discr);
1318 let st = Struct::new(dl,
1319 &fields,
1320 &hints[..], StructKind::EnumVariant, ty)?;
1321 // Find the first field we can't move later
1322 // to make room for a larger discriminant.
1323 // It is important to skip the first field.
1324 for i in st.field_index_by_increasing_offset().skip(1) {
1325 let field = fields[i];
1326 let field_align = field.align(dl);
1327 if field.size(dl).bytes() != 0 || field_align.abi() != 1 {
1328 start_align = start_align.min(field_align);
1329 break;
54a0048b 1330 }
476ff2be 1331 }
c30ab7b3 1332 size = cmp::max(size, st.min_size);
54a0048b
SL
1333 align = align.max(st.align);
1334 Ok(st)
1335 }).collect::<Result<Vec<_>, _>>()?;
1336
1337 // Align the maximum variant size to the largest alignment.
1338 size = size.abi_align(align);
1339
1340 if size.bytes() >= dl.obj_size_bound() {
1341 return Err(LayoutError::SizeOverflow(ty));
1342 }
1343
1344 // Check to see if we should use a different type for the
1345 // discriminant. We can safely use a type with the same size
1346 // as the alignment of the first field of each variant.
1347 // We increase the size of the discriminant to avoid LLVM copying
1348 // padding when it doesn't need to. This normally causes unaligned
1349 // load/stores and excessive memcpy/memset operations. By using a
1350 // bigger integer size, LLVM can be sure about it's contents and
1351 // won't be so conservative.
1352
1353 // Use the initial field alignment
9e0c209e 1354 let mut ity = Integer::for_abi_align(dl, start_align).unwrap_or(min_ity);
54a0048b
SL
1355
1356 // If the alignment is not larger than the chosen discriminant size,
1357 // don't use the alignment as the final size.
1358 if ity <= min_ity {
1359 ity = min_ity;
1360 } else {
1361 // Patch up the variants' first few fields.
1362 let old_ity_size = Int(min_ity).size(dl);
1363 let new_ity_size = Int(ity).size(dl);
1364 for variant in &mut variants {
476ff2be
SL
1365 for i in variant.offsets.iter_mut() {
1366 // The first field is the discrimminant, at offset 0.
1367 // These aren't in order, and we need to skip it.
1368 if *i <= old_ity_size && *i > Size::from_bytes(0) {
1369 *i = new_ity_size;
54a0048b 1370 }
54a0048b 1371 }
c30ab7b3
SL
1372 // We might be making the struct larger.
1373 if variant.min_size <= old_ity_size {
1374 variant.min_size = new_ity_size;
1375 }
54a0048b
SL
1376 }
1377 }
1378
1379 General {
1380 discr: ity,
1381 variants: variants,
1382 size: size,
1383 align: align
1384 }
1385 }
1386
1387 // Types with no meaningful known layout.
5bcae85e
SL
1388 ty::TyProjection(_) | ty::TyAnon(..) => {
1389 let normalized = normalize_associated_type(infcx, ty);
1390 if ty == normalized {
1391 return Err(LayoutError::Unknown(ty));
1392 }
1393 return normalized.layout(infcx);
1394 }
1395 ty::TyParam(_) => {
54a0048b
SL
1396 return Err(LayoutError::Unknown(ty));
1397 }
1398 ty::TyInfer(_) | ty::TyError => {
1399 bug!("Layout::compute: unexpected type `{}`", ty)
1400 }
1401 };
1402
5bcae85e 1403 success(layout)
54a0048b
SL
1404 }
1405
1406 /// Returns true if the layout corresponds to an unsized type.
1407 pub fn is_unsized(&self) -> bool {
1408 match *self {
1409 Scalar {..} | Vector {..} | FatPointer {..} |
9e0c209e 1410 CEnum {..} | UntaggedUnion {..} | General {..} |
54a0048b
SL
1411 RawNullablePointer {..} |
1412 StructWrappedNullablePointer {..} => false,
1413
1414 Array { sized, .. } |
1415 Univariant { variant: Struct { sized, .. }, .. } => !sized
1416 }
1417 }
1418
1419 pub fn size(&self, dl: &TargetDataLayout) -> Size {
1420 match *self {
1421 Scalar { value, .. } | RawNullablePointer { value, .. } => {
1422 value.size(dl)
1423 }
1424
1425 Vector { element, count } => {
1426 let elem_size = element.size(dl);
1427 let vec_size = match elem_size.checked_mul(count, dl) {
1428 Some(size) => size,
1429 None => bug!("Layout::size({:?}): {} * {} overflowed",
1430 self, elem_size.bytes(), count)
1431 };
1432 vec_size.abi_align(self.align(dl))
1433 }
1434
1435 FatPointer { metadata, .. } => {
1436 // Effectively a (ptr, meta) tuple.
1437 Pointer.size(dl).abi_align(metadata.align(dl))
1438 .checked_add(metadata.size(dl), dl).unwrap()
1439 .abi_align(self.align(dl))
1440 }
1441
1442 CEnum { discr, .. } => Int(discr).size(dl),
1443 Array { size, .. } | General { size, .. } => size,
9e0c209e 1444 UntaggedUnion { ref variants } => variants.stride(),
54a0048b
SL
1445
1446 Univariant { ref variant, .. } |
1447 StructWrappedNullablePointer { nonnull: ref variant, .. } => {
1448 variant.stride()
1449 }
1450 }
1451 }
1452
1453 pub fn align(&self, dl: &TargetDataLayout) -> Align {
1454 match *self {
1455 Scalar { value, .. } | RawNullablePointer { value, .. } => {
1456 value.align(dl)
1457 }
1458
1459 Vector { element, count } => {
1460 let elem_size = element.size(dl);
1461 let vec_size = match elem_size.checked_mul(count, dl) {
1462 Some(size) => size,
1463 None => bug!("Layout::align({:?}): {} * {} overflowed",
1464 self, elem_size.bytes(), count)
1465 };
1466 for &(size, align) in &dl.vector_align {
1467 if size == vec_size {
1468 return align;
1469 }
1470 }
1471 // Default to natural alignment, which is what LLVM does.
1472 // That is, use the size, rounded up to a power of 2.
1473 let align = vec_size.bytes().next_power_of_two();
1474 Align::from_bytes(align, align).unwrap()
1475 }
1476
1477 FatPointer { metadata, .. } => {
1478 // Effectively a (ptr, meta) tuple.
1479 Pointer.align(dl).max(metadata.align(dl))
1480 }
1481
1482 CEnum { discr, .. } => Int(discr).align(dl),
1483 Array { align, .. } | General { align, .. } => align,
9e0c209e 1484 UntaggedUnion { ref variants } => variants.align,
54a0048b
SL
1485
1486 Univariant { ref variant, .. } |
1487 StructWrappedNullablePointer { nonnull: ref variant, .. } => {
1488 variant.align
1489 }
1490 }
1491 }
1492}
1493
1494/// Type size "skeleton", i.e. the only information determining a type's size.
1495/// While this is conservative, (aside from constant sizes, only pointers,
1496/// newtypes thereof and null pointer optimized enums are allowed), it is
1497/// enough to statically check common usecases of transmute.
1498#[derive(Copy, Clone, Debug)]
1499pub enum SizeSkeleton<'tcx> {
1500 /// Any statically computable Layout.
1501 Known(Size),
1502
1503 /// A potentially-fat pointer.
1504 Pointer {
1505 // If true, this pointer is never null.
1506 non_zero: bool,
1507 // The type which determines the unsized metadata, if any,
1508 // of this pointer. Either a type parameter or a projection
1509 // depending on one, with regions erased.
1510 tail: Ty<'tcx>
1511 }
1512}
1513
a7813a04
XL
1514impl<'a, 'gcx, 'tcx> SizeSkeleton<'gcx> {
1515 pub fn compute(ty: Ty<'gcx>, infcx: &InferCtxt<'a, 'gcx, 'tcx>)
1516 -> Result<SizeSkeleton<'gcx>, LayoutError<'gcx>> {
1517 let tcx = infcx.tcx.global_tcx();
54a0048b
SL
1518 assert!(!ty.has_infer_types());
1519
1520 // First try computing a static layout.
1521 let err = match ty.layout(infcx) {
1522 Ok(layout) => {
1523 return Ok(SizeSkeleton::Known(layout.size(&tcx.data_layout)));
1524 }
1525 Err(err) => err
1526 };
1527
1528 match ty.sty {
1529 ty::TyBox(pointee) |
1530 ty::TyRef(_, ty::TypeAndMut { ty: pointee, .. }) |
1531 ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1532 let non_zero = !ty.is_unsafe_ptr();
1533 let tail = tcx.struct_tail(pointee);
1534 match tail.sty {
1535 ty::TyParam(_) | ty::TyProjection(_) => {
1536 assert!(tail.has_param_types() || tail.has_self_ty());
1537 Ok(SizeSkeleton::Pointer {
1538 non_zero: non_zero,
1539 tail: tcx.erase_regions(&tail)
1540 })
1541 }
1542 _ => {
1543 bug!("SizeSkeleton::compute({}): layout errored ({}), yet \
1544 tail `{}` is not a type parameter or a projection",
1545 ty, err, tail)
1546 }
1547 }
1548 }
1549
9e0c209e 1550 ty::TyAdt(def, substs) => {
54a0048b 1551 // Only newtypes and enums w/ nullable pointer optimization.
9e0c209e 1552 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
54a0048b
SL
1553 return Err(err);
1554 }
1555
1556 // Get a zero-sized variant or a pointer newtype.
1557 let zero_or_ptr_variant = |i: usize| {
1558 let fields = def.variants[i].fields.iter().map(|field| {
5bcae85e 1559 SizeSkeleton::compute(field.ty(tcx, substs), infcx)
54a0048b
SL
1560 });
1561 let mut ptr = None;
1562 for field in fields {
1563 let field = field?;
1564 match field {
1565 SizeSkeleton::Known(size) => {
1566 if size.bytes() > 0 {
1567 return Err(err);
1568 }
1569 }
1570 SizeSkeleton::Pointer {..} => {
1571 if ptr.is_some() {
1572 return Err(err);
1573 }
1574 ptr = Some(field);
1575 }
1576 }
1577 }
1578 Ok(ptr)
1579 };
1580
1581 let v0 = zero_or_ptr_variant(0)?;
1582 // Newtype.
1583 if def.variants.len() == 1 {
1584 if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1585 return Ok(SizeSkeleton::Pointer {
1586 non_zero: non_zero ||
1587 Some(def.did) == tcx.lang_items.non_zero(),
1588 tail: tail
1589 });
1590 } else {
1591 return Err(err);
1592 }
1593 }
1594
1595 let v1 = zero_or_ptr_variant(1)?;
1596 // Nullable pointer enum optimization.
1597 match (v0, v1) {
1598 (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None) |
1599 (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1600 Ok(SizeSkeleton::Pointer {
1601 non_zero: false,
1602 tail: tail
1603 })
1604 }
1605 _ => Err(err)
1606 }
1607 }
1608
5bcae85e
SL
1609 ty::TyProjection(_) | ty::TyAnon(..) => {
1610 let normalized = normalize_associated_type(infcx, ty);
1611 if ty == normalized {
1612 Err(err)
1613 } else {
1614 SizeSkeleton::compute(normalized, infcx)
1615 }
1616 }
1617
54a0048b
SL
1618 _ => Err(err)
1619 }
1620 }
1621
1622 pub fn same_size(self, other: SizeSkeleton) -> bool {
1623 match (self, other) {
1624 (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
1625 (SizeSkeleton::Pointer { tail: a, .. },
1626 SizeSkeleton::Pointer { tail: b, .. }) => a == b,
1627 _ => false
1628 }
1629 }
1630}