]> git.proxmox.com Git - rustc.git/blob - src/librustc/ty/layout.rs
Imported Upstream version 1.9.0+dfsg1
[rustc.git] / src / librustc / ty / layout.rs
1 // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 pub use self::Integer::*;
12 pub use self::Layout::*;
13 pub use self::Primitive::*;
14
15 use infer::{InferCtxt, drain_fulfillment_cx_or_panic};
16 use session::Session;
17 use traits;
18 use ty::{self, Ty, TyCtxt, TypeFoldable};
19
20 use syntax::ast::{FloatTy, IntTy, UintTy};
21 use syntax::attr;
22 use syntax::codemap::DUMMY_SP;
23
24 use std::cmp;
25 use std::fmt;
26 use std::i64;
27
28 /// Parsed [Data layout](http://llvm.org/docs/LangRef.html#data-layout)
29 /// for a target, which contains everything needed to compute layouts.
30 pub struct TargetDataLayout {
31 pub endian: Endian,
32 pub i1_align: Align,
33 pub i8_align: Align,
34 pub i16_align: Align,
35 pub i32_align: Align,
36 pub i64_align: Align,
37 pub f32_align: Align,
38 pub f64_align: Align,
39 pub pointer_size: Size,
40 pub pointer_align: Align,
41 pub aggregate_align: Align,
42
43 /// Alignments for vector types.
44 pub vector_align: Vec<(Size, Align)>
45 }
46
47 impl Default for TargetDataLayout {
48 fn default() -> TargetDataLayout {
49 TargetDataLayout {
50 endian: Endian::Big,
51 i1_align: Align::from_bits(8, 8).unwrap(),
52 i8_align: Align::from_bits(8, 8).unwrap(),
53 i16_align: Align::from_bits(16, 16).unwrap(),
54 i32_align: Align::from_bits(32, 32).unwrap(),
55 i64_align: Align::from_bits(32, 64).unwrap(),
56 f32_align: Align::from_bits(32, 32).unwrap(),
57 f64_align: Align::from_bits(64, 64).unwrap(),
58 pointer_size: Size::from_bits(64),
59 pointer_align: Align::from_bits(64, 64).unwrap(),
60 aggregate_align: Align::from_bits(0, 64).unwrap(),
61 vector_align: vec![
62 (Size::from_bits(64), Align::from_bits(64, 64).unwrap()),
63 (Size::from_bits(128), Align::from_bits(128, 128).unwrap())
64 ]
65 }
66 }
67 }
68
69 impl TargetDataLayout {
70 pub fn parse(sess: &Session) -> TargetDataLayout {
71 // Parse a bit count from a string.
72 let parse_bits = |s: &str, kind: &str, cause: &str| {
73 s.parse::<u64>().unwrap_or_else(|err| {
74 sess.err(&format!("invalid {} `{}` for `{}` in \"data-layout\": {}",
75 kind, s, cause, err));
76 0
77 })
78 };
79
80 // Parse a size string.
81 let size = |s: &str, cause: &str| {
82 Size::from_bits(parse_bits(s, "size", cause))
83 };
84
85 // Parse an alignment string.
86 let align = |s: &[&str], cause: &str| {
87 if s.is_empty() {
88 sess.err(&format!("missing alignment for `{}` in \"data-layout\"", cause));
89 }
90 let abi = parse_bits(s[0], "alignment", cause);
91 let pref = s.get(1).map_or(abi, |pref| parse_bits(pref, "alignment", cause));
92 Align::from_bits(abi, pref).unwrap_or_else(|err| {
93 sess.err(&format!("invalid alignment for `{}` in \"data-layout\": {}",
94 cause, err));
95 Align::from_bits(8, 8).unwrap()
96 })
97 };
98
99 let mut dl = TargetDataLayout::default();
100 for spec in sess.target.target.data_layout.split("-") {
101 match &spec.split(":").collect::<Vec<_>>()[..] {
102 ["e"] => dl.endian = Endian::Little,
103 ["E"] => dl.endian = Endian::Big,
104 ["a", a..] => dl.aggregate_align = align(a, "a"),
105 ["f32", a..] => dl.f32_align = align(a, "f32"),
106 ["f64", a..] => dl.f64_align = align(a, "f64"),
107 [p @ "p", s, a..] | [p @ "p0", s, a..] => {
108 dl.pointer_size = size(s, p);
109 dl.pointer_align = align(a, p);
110 }
111 [s, a..] if s.starts_with("i") => {
112 let ty_align = match s[1..].parse::<u64>() {
113 Ok(1) => &mut dl.i8_align,
114 Ok(8) => &mut dl.i8_align,
115 Ok(16) => &mut dl.i16_align,
116 Ok(32) => &mut dl.i32_align,
117 Ok(64) => &mut dl.i64_align,
118 Ok(_) => continue,
119 Err(_) => {
120 size(&s[1..], "i"); // For the user error.
121 continue;
122 }
123 };
124 *ty_align = align(a, s);
125 }
126 [s, a..] if s.starts_with("v") => {
127 let v_size = size(&s[1..], "v");
128 let a = align(a, s);
129 if let Some(v) = dl.vector_align.iter_mut().find(|v| v.0 == v_size) {
130 v.1 = a;
131 continue;
132 }
133 // No existing entry, add a new one.
134 dl.vector_align.push((v_size, a));
135 }
136 _ => {} // Ignore everything else.
137 }
138 }
139
140 // Perform consistency checks against the Target information.
141 let endian_str = match dl.endian {
142 Endian::Little => "little",
143 Endian::Big => "big"
144 };
145 if endian_str != sess.target.target.target_endian {
146 sess.err(&format!("inconsistent target specification: \"data-layout\" claims \
147 architecture is {}-endian, while \"target-endian\" is `{}`",
148 endian_str, sess.target.target.target_endian));
149 }
150
151 if dl.pointer_size.bits().to_string() != sess.target.target.target_pointer_width {
152 sess.err(&format!("inconsistent target specification: \"data-layout\" claims \
153 pointers are {}-bit, while \"target-pointer-width\" is `{}`",
154 dl.pointer_size.bits(), sess.target.target.target_pointer_width));
155 }
156
157 dl
158 }
159
160 /// Return exclusive upper bound on object size.
161 ///
162 /// The theoretical maximum object size is defined as the maximum positive `isize` value.
163 /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly
164 /// index every address within an object along with one byte past the end, along with allowing
165 /// `isize` to store the difference between any two pointers into an object.
166 ///
167 /// The upper bound on 64-bit currently needs to be lower because LLVM uses a 64-bit integer
168 /// to represent object size in bits. It would need to be 1 << 61 to account for this, but is
169 /// currently conservatively bounded to 1 << 47 as that is enough to cover the current usable
170 /// address space on 64-bit ARMv8 and x86_64.
171 pub fn obj_size_bound(&self) -> u64 {
172 match self.pointer_size.bits() {
173 32 => 1 << 31,
174 64 => 1 << 47,
175 bits => bug!("obj_size_bound: unknown pointer bit size {}", bits)
176 }
177 }
178
179 pub fn ptr_sized_integer(&self) -> Integer {
180 match self.pointer_size.bits() {
181 32 => I32,
182 64 => I64,
183 bits => bug!("ptr_sized_integer: unknown pointer bit size {}", bits)
184 }
185 }
186 }
187
188 /// Endianness of the target, which must match cfg(target-endian).
189 #[derive(Copy, Clone)]
190 pub enum Endian {
191 Little,
192 Big
193 }
194
195 /// Size of a type in bytes.
196 #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
197 pub struct Size {
198 raw: u64
199 }
200
201 impl Size {
202 pub fn from_bits(bits: u64) -> Size {
203 Size::from_bytes((bits + 7) / 8)
204 }
205
206 pub fn from_bytes(bytes: u64) -> Size {
207 if bytes >= (1 << 61) {
208 bug!("Size::from_bytes: {} bytes in bits doesn't fit in u64", bytes)
209 }
210 Size {
211 raw: bytes
212 }
213 }
214
215 pub fn bytes(self) -> u64 {
216 self.raw
217 }
218
219 pub fn bits(self) -> u64 {
220 self.bytes() * 8
221 }
222
223 pub fn abi_align(self, align: Align) -> Size {
224 let mask = align.abi() - 1;
225 Size::from_bytes((self.bytes() + mask) & !mask)
226 }
227
228 pub fn checked_add(self, offset: Size, dl: &TargetDataLayout) -> Option<Size> {
229 // Each Size is less than dl.obj_size_bound(), so the sum is
230 // also less than 1 << 62 (and therefore can't overflow).
231 let bytes = self.bytes() + offset.bytes();
232
233 if bytes < dl.obj_size_bound() {
234 Some(Size::from_bytes(bytes))
235 } else {
236 None
237 }
238 }
239
240 pub fn checked_mul(self, count: u64, dl: &TargetDataLayout) -> Option<Size> {
241 // Each Size is less than dl.obj_size_bound(), so the sum is
242 // also less than 1 << 62 (and therefore can't overflow).
243 match self.bytes().checked_mul(count) {
244 Some(bytes) if bytes < dl.obj_size_bound() => {
245 Some(Size::from_bytes(bytes))
246 }
247 _ => None
248 }
249 }
250 }
251
252 /// Alignment of a type in bytes, both ABI-mandated and preferred.
253 /// Since alignments are always powers of 2, we can pack both in one byte,
254 /// giving each a nibble (4 bits) for a maximum alignment of 2^15 = 32768.
255 #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
256 pub struct Align {
257 raw: u8
258 }
259
260 impl Align {
261 pub fn from_bits(abi: u64, pref: u64) -> Result<Align, String> {
262 Align::from_bytes((abi + 7) / 8, (pref + 7) / 8)
263 }
264
265 pub fn from_bytes(abi: u64, pref: u64) -> Result<Align, String> {
266 let pack = |align: u64| {
267 // Treat an alignment of 0 bytes like 1-byte alignment.
268 if align == 0 {
269 return Ok(0);
270 }
271
272 let mut bytes = align;
273 let mut pow: u8 = 0;
274 while (bytes & 1) == 0 {
275 pow += 1;
276 bytes >>= 1;
277 }
278 if bytes != 1 {
279 Err(format!("`{}` is not a power of 2", align))
280 } else if pow > 0x0f {
281 Err(format!("`{}` is too large", align))
282 } else {
283 Ok(pow)
284 }
285 };
286
287 Ok(Align {
288 raw: pack(abi)? | (pack(pref)? << 4)
289 })
290 }
291
292 pub fn abi(self) -> u64 {
293 1 << (self.raw & 0xf)
294 }
295
296 pub fn pref(self) -> u64 {
297 1 << (self.raw >> 4)
298 }
299
300 pub fn min(self, other: Align) -> Align {
301 let abi = cmp::min(self.raw & 0x0f, other.raw & 0x0f);
302 let pref = cmp::min(self.raw & 0xf0, other.raw & 0xf0);
303 Align {
304 raw: abi | pref
305 }
306 }
307
308 pub fn max(self, other: Align) -> Align {
309 let abi = cmp::max(self.raw & 0x0f, other.raw & 0x0f);
310 let pref = cmp::max(self.raw & 0xf0, other.raw & 0xf0);
311 Align {
312 raw: abi | pref
313 }
314 }
315 }
316
317 /// Integers, also used for enum discriminants.
318 #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
319 pub enum Integer {
320 I1,
321 I8,
322 I16,
323 I32,
324 I64
325 }
326
327 impl Integer {
328 /// Find the smallest Integer type which can represent the signed value.
329 pub fn fit_signed(x: i64) -> Integer {
330 match x {
331 -0x0000_0001...0x0000_0000 => I1,
332 -0x0000_0080...0x0000_007f => I8,
333 -0x0000_8000...0x0000_7fff => I16,
334 -0x8000_0000...0x7fff_ffff => I32,
335 _ => I64
336 }
337 }
338
339 /// Find the smallest Integer type which can represent the unsigned value.
340 pub fn fit_unsigned(x: u64) -> Integer {
341 match x {
342 0...0x0000_0001 => I1,
343 0...0x0000_00ff => I8,
344 0...0x0000_ffff => I16,
345 0...0xffff_ffff => I32,
346 _ => I64
347 }
348 }
349
350 /// Get the Integer type from an attr::IntType.
351 pub fn from_attr(dl: &TargetDataLayout, ity: attr::IntType) -> Integer {
352 match ity {
353 attr::SignedInt(IntTy::I8) | attr::UnsignedInt(UintTy::U8) => I8,
354 attr::SignedInt(IntTy::I16) | attr::UnsignedInt(UintTy::U16) => I16,
355 attr::SignedInt(IntTy::I32) | attr::UnsignedInt(UintTy::U32) => I32,
356 attr::SignedInt(IntTy::I64) | attr::UnsignedInt(UintTy::U64) => I64,
357 attr::SignedInt(IntTy::Is) | attr::UnsignedInt(UintTy::Us) => {
358 dl.ptr_sized_integer()
359 }
360 }
361 }
362
363 /// Find the appropriate Integer type and signedness for the given
364 /// signed discriminant range and #[repr] attribute.
365 /// N.B.: u64 values above i64::MAX will be treated as signed, but
366 /// that shouldn't affect anything, other than maybe debuginfo.
367 pub fn repr_discr(tcx: &TyCtxt, hint: attr::ReprAttr, min: i64, max: i64)
368 -> (Integer, bool) {
369 // Theoretically, negative values could be larger in unsigned representation
370 // than the unsigned representation of the signed minimum. However, if there
371 // are any negative values, the only valid unsigned representation is u64
372 // which can fit all i64 values, so the result remains unaffected.
373 let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u64, max as u64));
374 let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
375
376 let at_least = match hint {
377 attr::ReprInt(span, ity) => {
378 let discr = Integer::from_attr(&tcx.data_layout, ity);
379 let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
380 if discr < fit {
381 span_bug!(span, "representation hint insufficient for discriminant range")
382 }
383 return (discr, ity.is_signed());
384 }
385 attr::ReprExtern => {
386 match &tcx.sess.target.target.arch[..] {
387 // WARNING: the ARM EABI has two variants; the one corresponding
388 // to `at_least == I32` appears to be used on Linux and NetBSD,
389 // but some systems may use the variant corresponding to no
390 // lower bound. However, we don't run on those yet...?
391 "arm" => I32,
392 _ => I32,
393 }
394 }
395 attr::ReprAny => I8,
396 attr::ReprPacked => {
397 bug!("Integer::repr_discr: found #[repr(packed)] on an enum");
398 }
399 attr::ReprSimd => {
400 bug!("Integer::repr_discr: found #[repr(simd)] on an enum");
401 }
402 };
403
404 // If there are no negative values, we can use the unsigned fit.
405 if min >= 0 {
406 (cmp::max(unsigned_fit, at_least), false)
407 } else {
408 (cmp::max(signed_fit, at_least), true)
409 }
410 }
411 }
412
413 /// Fundamental unit of memory access and layout.
414 #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
415 pub enum Primitive {
416 Int(Integer),
417 F32,
418 F64,
419 Pointer
420 }
421
422 impl Primitive {
423 pub fn size(self, dl: &TargetDataLayout) -> Size {
424 match self {
425 Int(I1) | Int(I8) => Size::from_bits(8),
426 Int(I16) => Size::from_bits(16),
427 Int(I32) | F32 => Size::from_bits(32),
428 Int(I64) | F64 => Size::from_bits(64),
429 Pointer => dl.pointer_size
430 }
431 }
432
433 pub fn align(self, dl: &TargetDataLayout) -> Align {
434 match self {
435 Int(I1) => dl.i1_align,
436 Int(I8) => dl.i8_align,
437 Int(I16) => dl.i16_align,
438 Int(I32) => dl.i32_align,
439 Int(I64) => dl.i64_align,
440 F32 => dl.f32_align,
441 F64 => dl.f64_align,
442 Pointer => dl.pointer_align
443 }
444 }
445 }
446
447 /// Path through fields of nested structures.
448 // FIXME(eddyb) use small vector optimization for the common case.
449 pub type FieldPath = Vec<u32>;
450
451 /// A structure, a product type in ADT terms.
452 #[derive(PartialEq, Eq, Hash, Debug)]
453 pub struct Struct {
454 pub align: Align,
455
456 /// If true, no alignment padding is used.
457 pub packed: bool,
458
459 /// If true, the size is exact, otherwise it's only a lower bound.
460 pub sized: bool,
461
462 /// Offsets for the first byte after each field.
463 /// That is, field_offset(i) = offset_after_field[i - 1] and the
464 /// whole structure's size is the last offset, excluding padding.
465 // FIXME(eddyb) use small vector optimization for the common case.
466 pub offset_after_field: Vec<Size>
467 }
468
469 impl Struct {
470 pub fn new(dl: &TargetDataLayout, packed: bool) -> Struct {
471 Struct {
472 align: if packed { dl.i8_align } else { dl.aggregate_align },
473 packed: packed,
474 sized: true,
475 offset_after_field: vec![]
476 }
477 }
478
479 /// Extend the Struct with more fields.
480 pub fn extend<'a, 'tcx, I>(&mut self, dl: &TargetDataLayout,
481 fields: I,
482 scapegoat: Ty<'tcx>)
483 -> Result<(), LayoutError<'tcx>>
484 where I: Iterator<Item=Result<&'a Layout, LayoutError<'tcx>>> {
485 self.offset_after_field.reserve(fields.size_hint().0);
486
487 for field in fields {
488 if !self.sized {
489 bug!("Struct::compute: field #{} of `{}` comes after unsized field",
490 self.offset_after_field.len(), scapegoat);
491 }
492
493 let field = field?;
494 if field.is_unsized() {
495 self.sized = false;
496 }
497
498 // Invariant: offset < dl.obj_size_bound() <= 1<<61
499 let mut offset = if !self.packed {
500 let align = field.align(dl);
501 self.align = self.align.max(align);
502 self.offset_after_field.last_mut().map_or(Size::from_bytes(0), |last| {
503 *last = last.abi_align(align);
504 *last
505 })
506 } else {
507 self.offset_after_field.last().map_or(Size::from_bytes(0), |&last| last)
508 };
509
510 offset = offset.checked_add(field.size(dl), dl)
511 .map_or(Err(LayoutError::SizeOverflow(scapegoat)), Ok)?;
512
513 self.offset_after_field.push(offset);
514 }
515
516 Ok(())
517 }
518
519 /// Get the size without trailing alignment padding.
520 pub fn min_size(&self) -> Size {
521 self.offset_after_field.last().map_or(Size::from_bytes(0), |&last| last)
522 }
523
524 /// Get the size with trailing aligment padding.
525 pub fn stride(&self) -> Size {
526 self.min_size().abi_align(self.align)
527 }
528
529 /// Determine whether a structure would be zero-sized, given its fields.
530 pub fn would_be_zero_sized<'a, 'tcx, I>(dl: &TargetDataLayout, fields: I)
531 -> Result<bool, LayoutError<'tcx>>
532 where I: Iterator<Item=Result<&'a Layout, LayoutError<'tcx>>> {
533 for field in fields {
534 let field = field?;
535 if field.is_unsized() || field.size(dl).bytes() > 0 {
536 return Ok(false);
537 }
538 }
539 Ok(true)
540 }
541
542 /// Find the path leading to a non-zero leaf field, starting from
543 /// the given type and recursing through aggregates.
544 // FIXME(eddyb) track value ranges and traverse already optimized enums.
545 pub fn non_zero_field_in_type<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>,
546 ty: Ty<'tcx>)
547 -> Result<Option<FieldPath>, LayoutError<'tcx>> {
548 let tcx = infcx.tcx;
549 match (ty.layout(infcx)?, &ty.sty) {
550 (&Scalar { non_zero: true, .. }, _) => Ok(Some(vec![])),
551 (&FatPointer { non_zero: true, .. }, _) => {
552 Ok(Some(vec![FAT_PTR_ADDR as u32]))
553 }
554
555 // Is this the NonZero lang item wrapping a pointer or integer type?
556 (&Univariant { non_zero: true, .. }, &ty::TyStruct(def, substs)) => {
557 let fields = &def.struct_variant().fields;
558 assert_eq!(fields.len(), 1);
559 let ty = normalize_associated_type(infcx, fields[0].ty(tcx, substs));
560 match *ty.layout(infcx)? {
561 // FIXME(eddyb) also allow floating-point types here.
562 Scalar { value: Int(_), non_zero: false } |
563 Scalar { value: Pointer, non_zero: false } => {
564 Ok(Some(vec![0]))
565 }
566 FatPointer { non_zero: false, .. } => {
567 Ok(Some(vec![FAT_PTR_ADDR as u32, 0]))
568 }
569 _ => Ok(None)
570 }
571 }
572
573 // Perhaps one of the fields of this struct is non-zero
574 // let's recurse and find out
575 (_, &ty::TyStruct(def, substs)) => {
576 Struct::non_zero_field_path(infcx, def.struct_variant().fields
577 .iter().map(|field| {
578 normalize_associated_type(infcx, field.ty(tcx, substs))
579 }))
580 }
581
582 // Perhaps one of the upvars of this closure is non-zero
583 // Let's recurse and find out!
584 (_, &ty::TyClosure(_, box ty::ClosureSubsts { upvar_tys: ref tys, .. })) |
585 // Can we use one of the fields in this tuple?
586 (_, &ty::TyTuple(ref tys)) => {
587 Struct::non_zero_field_path(infcx, tys.iter().cloned())
588 }
589
590 // Is this a fixed-size array of something non-zero
591 // with at least one element?
592 (_, &ty::TyArray(ety, d)) if d > 0 => {
593 Struct::non_zero_field_path(infcx, Some(ety).into_iter())
594 }
595
596 // Anything else is not a non-zero type.
597 _ => Ok(None)
598 }
599 }
600
601 /// Find the path leading to a non-zero leaf field, starting from
602 /// the given set of fields and recursing through aggregates.
603 pub fn non_zero_field_path<'a, 'tcx, I>(infcx: &InferCtxt<'a, 'tcx>,
604 fields: I)
605 -> Result<Option<FieldPath>, LayoutError<'tcx>>
606 where I: Iterator<Item=Ty<'tcx>> {
607 for (i, ty) in fields.enumerate() {
608 if let Some(mut path) = Struct::non_zero_field_in_type(infcx, ty)? {
609 path.push(i as u32);
610 return Ok(Some(path));
611 }
612 }
613 Ok(None)
614 }
615 }
616
617 /// The first half of a fat pointer.
618 /// - For a trait object, this is the address of the box.
619 /// - For a slice, this is the base address.
620 pub const FAT_PTR_ADDR: usize = 0;
621
622 /// The second half of a fat pointer.
623 /// - For a trait object, this is the address of the vtable.
624 /// - For a slice, this is the length.
625 pub const FAT_PTR_EXTRA: usize = 1;
626
627 /// Type layout, from which size and alignment can be cheaply computed.
628 /// For ADTs, it also includes field placement and enum optimizations.
629 /// NOTE: Because Layout is interned, redundant information should be
630 /// kept to a minimum, e.g. it includes no sub-component Ty or Layout.
631 #[derive(Debug, PartialEq, Eq, Hash)]
632 pub enum Layout {
633 /// TyBool, TyChar, TyInt, TyUint, TyFloat, TyRawPtr, TyRef or TyFnPtr.
634 Scalar {
635 value: Primitive,
636 // If true, the value cannot represent a bit pattern of all zeroes.
637 non_zero: bool
638 },
639
640 /// SIMD vectors, from TyStruct marked with #[repr(simd)].
641 Vector {
642 element: Primitive,
643 count: u64
644 },
645
646 /// TyArray, TySlice or TyStr.
647 Array {
648 /// If true, the size is exact, otherwise it's only a lower bound.
649 sized: bool,
650 align: Align,
651 size: Size
652 },
653
654 /// TyRawPtr or TyRef with a !Sized pointee.
655 FatPointer {
656 metadata: Primitive,
657 // If true, the pointer cannot be null.
658 non_zero: bool
659 },
660
661 // Remaining variants are all ADTs such as TyStruct, TyEnum or TyTuple.
662
663 /// C-like enums; basically an integer.
664 CEnum {
665 discr: Integer,
666 signed: bool,
667 // Inclusive discriminant range.
668 // If min > max, it represents min...u64::MAX followed by 0...max.
669 // FIXME(eddyb) always use the shortest range, e.g. by finding
670 // the largest space between two consecutive discriminants and
671 // taking everything else as the (shortest) discriminant range.
672 min: u64,
673 max: u64
674 },
675
676 /// Single-case enums, and structs/tuples.
677 Univariant {
678 variant: Struct,
679 // If true, the structure is NonZero.
680 // FIXME(eddyb) use a newtype Layout kind for this.
681 non_zero: bool
682 },
683
684 /// General-case enums: for each case there is a struct, and they
685 /// all start with a field for the discriminant.
686 General {
687 discr: Integer,
688 variants: Vec<Struct>,
689 size: Size,
690 align: Align
691 },
692
693 /// Two cases distinguished by a nullable pointer: the case with discriminant
694 /// `nndiscr` must have single field which is known to be nonnull due to its type.
695 /// The other case is known to be zero sized. Hence we represent the enum
696 /// as simply a nullable pointer: if not null it indicates the `nndiscr` variant,
697 /// otherwise it indicates the other case.
698 ///
699 /// For example, `std::option::Option` instantiated at a safe pointer type
700 /// is represented such that `None` is a null pointer and `Some` is the
701 /// identity function.
702 RawNullablePointer {
703 nndiscr: u64,
704 value: Primitive
705 },
706
707 /// Two cases distinguished by a nullable pointer: the case with discriminant
708 /// `nndiscr` is represented by the struct `nonnull`, where the `discrfield`th
709 /// field is known to be nonnull due to its type; if that field is null, then
710 /// it represents the other case, which is known to be zero sized.
711 StructWrappedNullablePointer {
712 nndiscr: u64,
713 nonnull: Struct,
714 // N.B. There is a 0 at the start, for LLVM GEP through a pointer.
715 discrfield: FieldPath
716 }
717 }
718
719 #[derive(Copy, Clone, Debug)]
720 pub enum LayoutError<'tcx> {
721 Unknown(Ty<'tcx>),
722 SizeOverflow(Ty<'tcx>)
723 }
724
725 impl<'tcx> fmt::Display for LayoutError<'tcx> {
726 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
727 match *self {
728 LayoutError::Unknown(ty) => {
729 write!(f, "the type `{:?}` has an unknown layout", ty)
730 }
731 LayoutError::SizeOverflow(ty) => {
732 write!(f, "the type `{:?}` is too big for the current architecture", ty)
733 }
734 }
735 }
736 }
737
738 /// Helper function for normalizing associated types in an inference context.
739 fn normalize_associated_type<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>,
740 ty: Ty<'tcx>)
741 -> Ty<'tcx> {
742 if !ty.has_projection_types() {
743 return ty;
744 }
745
746 let mut selcx = traits::SelectionContext::new(infcx);
747 let cause = traits::ObligationCause::dummy();
748 let traits::Normalized { value: result, obligations } =
749 traits::normalize(&mut selcx, cause, &ty);
750
751 let mut fulfill_cx = traits::FulfillmentContext::new();
752
753 for obligation in obligations {
754 fulfill_cx.register_predicate_obligation(infcx, obligation);
755 }
756
757 drain_fulfillment_cx_or_panic(DUMMY_SP, infcx, &mut fulfill_cx, &result)
758 }
759
760 impl Layout {
761 pub fn compute_uncached<'a, 'tcx>(ty: Ty<'tcx>,
762 infcx: &InferCtxt<'a, 'tcx>)
763 -> Result<Layout, LayoutError<'tcx>> {
764 let tcx = infcx.tcx;
765 let dl = &tcx.data_layout;
766 assert!(!ty.has_infer_types());
767
768 let layout = match ty.sty {
769 // Basic scalars.
770 ty::TyBool => Scalar { value: Int(I1), non_zero: false },
771 ty::TyChar => Scalar { value: Int(I32), non_zero: false },
772 ty::TyInt(ity) => {
773 Scalar {
774 value: Int(Integer::from_attr(dl, attr::SignedInt(ity))),
775 non_zero: false
776 }
777 }
778 ty::TyUint(ity) => {
779 Scalar {
780 value: Int(Integer::from_attr(dl, attr::UnsignedInt(ity))),
781 non_zero: false
782 }
783 }
784 ty::TyFloat(FloatTy::F32) => Scalar { value: F32, non_zero: false },
785 ty::TyFloat(FloatTy::F64) => Scalar { value: F64, non_zero: false },
786 ty::TyFnPtr(_) => Scalar { value: Pointer, non_zero: true },
787
788 // Potentially-fat pointers.
789 ty::TyBox(pointee) |
790 ty::TyRef(_, ty::TypeAndMut { ty: pointee, .. }) |
791 ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
792 let non_zero = !ty.is_unsafe_ptr();
793 if pointee.is_sized(&infcx.parameter_environment, DUMMY_SP) {
794 Scalar { value: Pointer, non_zero: non_zero }
795 } else {
796 let unsized_part = tcx.struct_tail(pointee);
797 let meta = match unsized_part.sty {
798 ty::TySlice(_) | ty::TyStr => {
799 Int(dl.ptr_sized_integer())
800 }
801 ty::TyTrait(_) => Pointer,
802 _ => return Err(LayoutError::Unknown(unsized_part))
803 };
804 FatPointer { metadata: meta, non_zero: non_zero }
805 }
806 }
807
808 // Arrays and slices.
809 ty::TyArray(element, count) => {
810 let element = element.layout(infcx)?;
811 Array {
812 sized: true,
813 align: element.align(dl),
814 size: element.size(dl).checked_mul(count as u64, dl)
815 .map_or(Err(LayoutError::SizeOverflow(ty)), Ok)?
816 }
817 }
818 ty::TySlice(element) => {
819 Array {
820 sized: false,
821 align: element.layout(infcx)?.align(dl),
822 size: Size::from_bytes(0)
823 }
824 }
825 ty::TyStr => {
826 Array {
827 sized: false,
828 align: dl.i8_align,
829 size: Size::from_bytes(0)
830 }
831 }
832
833 // Odd unit types.
834 ty::TyFnDef(..) => {
835 Univariant {
836 variant: Struct::new(dl, false),
837 non_zero: false
838 }
839 }
840 ty::TyTrait(_) => {
841 let mut unit = Struct::new(dl, false);
842 unit.sized = false;
843 Univariant { variant: unit, non_zero: false }
844 }
845
846 // Tuples.
847 ty::TyClosure(_, box ty::ClosureSubsts { upvar_tys: ref tys, .. }) |
848 ty::TyTuple(ref tys) => {
849 let mut st = Struct::new(dl, false);
850 st.extend(dl, tys.iter().map(|ty| ty.layout(infcx)), ty)?;
851 Univariant { variant: st, non_zero: false }
852 }
853
854 // ADTs.
855 ty::TyStruct(def, substs) => {
856 if ty.is_simd() {
857 // SIMD vector types.
858 let element = ty.simd_type(tcx);
859 match *element.layout(infcx)? {
860 Scalar { value, .. } => {
861 return Ok(Vector {
862 element: value,
863 count: ty.simd_size(tcx) as u64
864 });
865 }
866 _ => {
867 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` with \
868 a non-machine element type `{}`",
869 ty, element));
870 }
871 }
872 }
873 let fields = def.struct_variant().fields.iter().map(|field| {
874 normalize_associated_type(infcx, field.ty(tcx, substs))
875 .layout(infcx)
876 });
877 let packed = tcx.lookup_packed(def.did);
878 let mut st = Struct::new(dl, packed);
879 st.extend(dl, fields, ty)?;
880
881 // FIXME(16758) don't add a drop flag to unsized structs, as it
882 // won't actually be in the location we say it is because it'll be after
883 // the unsized field. Several other pieces of code assume that the unsized
884 // field is definitely the last one.
885 if def.dtor_kind().has_drop_flag() &&
886 ty.is_sized(&infcx.parameter_environment, DUMMY_SP) {
887 st.extend(dl, Some(Ok(&Scalar {
888 value: Int(I8),
889 non_zero: false
890 })).into_iter(), ty)?;
891 }
892 Univariant {
893 variant: st,
894 non_zero: Some(def.did) == tcx.lang_items.non_zero()
895 }
896 }
897 ty::TyEnum(def, substs) => {
898 let hint = *tcx.lookup_repr_hints(def.did).get(0)
899 .unwrap_or(&attr::ReprAny);
900
901 let dtor = def.dtor_kind().has_drop_flag();
902 let drop_flag = if dtor {
903 Some(Scalar { value: Int(I8), non_zero: false })
904 } else {
905 None
906 };
907
908 if def.variants.is_empty() {
909 // Uninhabitable; represent as unit
910 // (Typechecking will reject discriminant-sizing attrs.)
911 assert_eq!(hint, attr::ReprAny);
912
913 let mut st = Struct::new(dl, false);
914 st.extend(dl, drop_flag.iter().map(Ok), ty)?;
915 return Ok(Univariant { variant: st, non_zero: false });
916 }
917
918 if !dtor && def.variants.iter().all(|v| v.fields.is_empty()) {
919 // All bodies empty -> intlike
920 let (mut min, mut max) = (i64::MAX, i64::MIN);
921 for v in &def.variants {
922 let x = v.disr_val.to_u64_unchecked() as i64;
923 if x < min { min = x; }
924 if x > max { max = x; }
925 }
926
927 let (discr, signed) = Integer::repr_discr(tcx, hint, min, max);
928 return Ok(CEnum {
929 discr: discr,
930 signed: signed,
931 min: min as u64,
932 max: max as u64
933 });
934 }
935
936 // Since there's at least one
937 // non-empty body, explicit discriminants should have
938 // been rejected by a checker before this point.
939 for (i, v) in def.variants.iter().enumerate() {
940 if i as u64 != v.disr_val.to_u64_unchecked() {
941 bug!("non-C-like enum {} with specified discriminants",
942 tcx.item_path_str(def.did));
943 }
944 }
945
946 if def.variants.len() == 1 {
947 // Equivalent to a struct/tuple/newtype.
948 // (Typechecking will reject discriminant-sizing attrs.)
949 assert_eq!(hint, attr::ReprAny);
950 let fields = def.variants[0].fields.iter().map(|field| {
951 normalize_associated_type(infcx, field.ty(tcx, substs))
952 .layout(infcx)
953 });
954 let mut st = Struct::new(dl, false);
955 st.extend(dl, fields.chain(drop_flag.iter().map(Ok)), ty)?;
956 return Ok(Univariant { variant: st, non_zero: false });
957 }
958
959 // Cache the substituted and normalized variant field types.
960 let variants = def.variants.iter().map(|v| {
961 v.fields.iter().map(|field| {
962 normalize_associated_type(infcx, field.ty(tcx, substs))
963 }).collect::<Vec<_>>()
964 }).collect::<Vec<_>>();
965
966 if !dtor && variants.len() == 2 && hint == attr::ReprAny {
967 // Nullable pointer optimization
968 for discr in 0..2 {
969 let other_fields = variants[1 - discr].iter().map(|ty| {
970 ty.layout(infcx)
971 });
972 if !Struct::would_be_zero_sized(dl, other_fields)? {
973 continue;
974 }
975 let path = Struct::non_zero_field_path(infcx,
976 variants[discr].iter().cloned())?;
977 let mut path = if let Some(p) = path { p } else { continue };
978
979 // FIXME(eddyb) should take advantage of a newtype.
980 if path == &[0] && variants[discr].len() == 1 {
981 match *variants[discr][0].layout(infcx)? {
982 Scalar { value, .. } => {
983 return Ok(RawNullablePointer {
984 nndiscr: discr as u64,
985 value: value
986 });
987 }
988 _ => {
989 bug!("Layout::compute: `{}`'s non-zero \
990 `{}` field not scalar?!",
991 ty, variants[discr][0])
992 }
993 }
994 }
995
996 path.push(0); // For GEP through a pointer.
997 path.reverse();
998 let mut st = Struct::new(dl, false);
999 st.extend(dl, variants[discr].iter().map(|ty| {
1000 ty.layout(infcx)
1001 }), ty)?;
1002 return Ok(StructWrappedNullablePointer {
1003 nndiscr: discr as u64,
1004 nonnull: st,
1005 discrfield: path
1006 });
1007 }
1008 }
1009
1010 // The general case.
1011 let discr_max = (variants.len() - 1) as i64;
1012 assert!(discr_max >= 0);
1013 let (min_ity, _) = Integer::repr_discr(tcx, hint, 0, discr_max);
1014
1015 let mut align = dl.aggregate_align;
1016 let mut size = Size::from_bytes(0);
1017
1018 // We're interested in the smallest alignment, so start large.
1019 let mut start_align = Align::from_bytes(256, 256).unwrap();
1020
1021 // Create the set of structs that represent each variant
1022 // Use the minimum integer type we figured out above
1023 let discr = Some(Scalar { value: Int(min_ity), non_zero: false });
1024 let mut variants = variants.into_iter().map(|fields| {
1025 let mut found_start = false;
1026 let fields = fields.into_iter().map(|field| {
1027 let field = field.layout(infcx)?;
1028 if !found_start {
1029 // Find the first field we can't move later
1030 // to make room for a larger discriminant.
1031 let field_align = field.align(dl);
1032 if field.size(dl).bytes() != 0 || field_align.abi() != 1 {
1033 start_align = start_align.min(field_align);
1034 found_start = true;
1035 }
1036 }
1037 Ok(field)
1038 });
1039 let mut st = Struct::new(dl, false);
1040 st.extend(dl, discr.iter().map(Ok).chain(fields)
1041 .chain(drop_flag.iter().map(Ok)), ty)?;
1042 size = cmp::max(size, st.min_size());
1043 align = align.max(st.align);
1044 Ok(st)
1045 }).collect::<Result<Vec<_>, _>>()?;
1046
1047 // Align the maximum variant size to the largest alignment.
1048 size = size.abi_align(align);
1049
1050 if size.bytes() >= dl.obj_size_bound() {
1051 return Err(LayoutError::SizeOverflow(ty));
1052 }
1053
1054 // Check to see if we should use a different type for the
1055 // discriminant. We can safely use a type with the same size
1056 // as the alignment of the first field of each variant.
1057 // We increase the size of the discriminant to avoid LLVM copying
1058 // padding when it doesn't need to. This normally causes unaligned
1059 // load/stores and excessive memcpy/memset operations. By using a
1060 // bigger integer size, LLVM can be sure about it's contents and
1061 // won't be so conservative.
1062
1063 // Use the initial field alignment
1064 let wanted = start_align.abi();
1065 let mut ity = min_ity;
1066 for &candidate in &[I16, I32, I64] {
1067 let ty = Int(candidate);
1068 if wanted == ty.align(dl).abi() && wanted == ty.size(dl).bytes() {
1069 ity = candidate;
1070 break;
1071 }
1072 }
1073
1074 // FIXME(eddyb) conservative only to avoid diverging from trans::adt.
1075 if align.abi() != start_align.abi() {
1076 ity = min_ity;
1077 }
1078
1079 // If the alignment is not larger than the chosen discriminant size,
1080 // don't use the alignment as the final size.
1081 if ity <= min_ity {
1082 ity = min_ity;
1083 } else {
1084 // Patch up the variants' first few fields.
1085 let old_ity_size = Int(min_ity).size(dl);
1086 let new_ity_size = Int(ity).size(dl);
1087 for variant in &mut variants {
1088 for offset in &mut variant.offset_after_field {
1089 if *offset > old_ity_size {
1090 break;
1091 }
1092 *offset = new_ity_size;
1093 }
1094 }
1095 }
1096
1097 General {
1098 discr: ity,
1099 variants: variants,
1100 size: size,
1101 align: align
1102 }
1103 }
1104
1105 // Types with no meaningful known layout.
1106 ty::TyProjection(_) | ty::TyParam(_) => {
1107 return Err(LayoutError::Unknown(ty));
1108 }
1109 ty::TyInfer(_) | ty::TyError => {
1110 bug!("Layout::compute: unexpected type `{}`", ty)
1111 }
1112 };
1113
1114 Ok(layout)
1115 }
1116
1117 /// Returns true if the layout corresponds to an unsized type.
1118 pub fn is_unsized(&self) -> bool {
1119 match *self {
1120 Scalar {..} | Vector {..} | FatPointer {..} |
1121 CEnum {..} | General {..} |
1122 RawNullablePointer {..} |
1123 StructWrappedNullablePointer {..} => false,
1124
1125 Array { sized, .. } |
1126 Univariant { variant: Struct { sized, .. }, .. } => !sized
1127 }
1128 }
1129
1130 pub fn size(&self, dl: &TargetDataLayout) -> Size {
1131 match *self {
1132 Scalar { value, .. } | RawNullablePointer { value, .. } => {
1133 value.size(dl)
1134 }
1135
1136 Vector { element, count } => {
1137 let elem_size = element.size(dl);
1138 let vec_size = match elem_size.checked_mul(count, dl) {
1139 Some(size) => size,
1140 None => bug!("Layout::size({:?}): {} * {} overflowed",
1141 self, elem_size.bytes(), count)
1142 };
1143 vec_size.abi_align(self.align(dl))
1144 }
1145
1146 FatPointer { metadata, .. } => {
1147 // Effectively a (ptr, meta) tuple.
1148 Pointer.size(dl).abi_align(metadata.align(dl))
1149 .checked_add(metadata.size(dl), dl).unwrap()
1150 .abi_align(self.align(dl))
1151 }
1152
1153 CEnum { discr, .. } => Int(discr).size(dl),
1154 Array { size, .. } | General { size, .. } => size,
1155
1156 Univariant { ref variant, .. } |
1157 StructWrappedNullablePointer { nonnull: ref variant, .. } => {
1158 variant.stride()
1159 }
1160 }
1161 }
1162
1163 pub fn align(&self, dl: &TargetDataLayout) -> Align {
1164 match *self {
1165 Scalar { value, .. } | RawNullablePointer { value, .. } => {
1166 value.align(dl)
1167 }
1168
1169 Vector { element, count } => {
1170 let elem_size = element.size(dl);
1171 let vec_size = match elem_size.checked_mul(count, dl) {
1172 Some(size) => size,
1173 None => bug!("Layout::align({:?}): {} * {} overflowed",
1174 self, elem_size.bytes(), count)
1175 };
1176 for &(size, align) in &dl.vector_align {
1177 if size == vec_size {
1178 return align;
1179 }
1180 }
1181 // Default to natural alignment, which is what LLVM does.
1182 // That is, use the size, rounded up to a power of 2.
1183 let align = vec_size.bytes().next_power_of_two();
1184 Align::from_bytes(align, align).unwrap()
1185 }
1186
1187 FatPointer { metadata, .. } => {
1188 // Effectively a (ptr, meta) tuple.
1189 Pointer.align(dl).max(metadata.align(dl))
1190 }
1191
1192 CEnum { discr, .. } => Int(discr).align(dl),
1193 Array { align, .. } | General { align, .. } => align,
1194
1195 Univariant { ref variant, .. } |
1196 StructWrappedNullablePointer { nonnull: ref variant, .. } => {
1197 variant.align
1198 }
1199 }
1200 }
1201 }
1202
1203 /// Type size "skeleton", i.e. the only information determining a type's size.
1204 /// While this is conservative, (aside from constant sizes, only pointers,
1205 /// newtypes thereof and null pointer optimized enums are allowed), it is
1206 /// enough to statically check common usecases of transmute.
1207 #[derive(Copy, Clone, Debug)]
1208 pub enum SizeSkeleton<'tcx> {
1209 /// Any statically computable Layout.
1210 Known(Size),
1211
1212 /// A potentially-fat pointer.
1213 Pointer {
1214 // If true, this pointer is never null.
1215 non_zero: bool,
1216 // The type which determines the unsized metadata, if any,
1217 // of this pointer. Either a type parameter or a projection
1218 // depending on one, with regions erased.
1219 tail: Ty<'tcx>
1220 }
1221 }
1222
1223 impl<'tcx> SizeSkeleton<'tcx> {
1224 pub fn compute<'a>(ty: Ty<'tcx>, infcx: &InferCtxt<'a, 'tcx>)
1225 -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1226 let tcx = infcx.tcx;
1227 assert!(!ty.has_infer_types());
1228
1229 // First try computing a static layout.
1230 let err = match ty.layout(infcx) {
1231 Ok(layout) => {
1232 return Ok(SizeSkeleton::Known(layout.size(&tcx.data_layout)));
1233 }
1234 Err(err) => err
1235 };
1236
1237 match ty.sty {
1238 ty::TyBox(pointee) |
1239 ty::TyRef(_, ty::TypeAndMut { ty: pointee, .. }) |
1240 ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1241 let non_zero = !ty.is_unsafe_ptr();
1242 let tail = tcx.struct_tail(pointee);
1243 match tail.sty {
1244 ty::TyParam(_) | ty::TyProjection(_) => {
1245 assert!(tail.has_param_types() || tail.has_self_ty());
1246 Ok(SizeSkeleton::Pointer {
1247 non_zero: non_zero,
1248 tail: tcx.erase_regions(&tail)
1249 })
1250 }
1251 _ => {
1252 bug!("SizeSkeleton::compute({}): layout errored ({}), yet \
1253 tail `{}` is not a type parameter or a projection",
1254 ty, err, tail)
1255 }
1256 }
1257 }
1258
1259 ty::TyStruct(def, substs) | ty::TyEnum(def, substs) => {
1260 // Only newtypes and enums w/ nullable pointer optimization.
1261 if def.variants.is_empty() || def.variants.len() > 2 {
1262 return Err(err);
1263 }
1264
1265 // If there's a drop flag, it can't be just a pointer.
1266 if def.dtor_kind().has_drop_flag() {
1267 return Err(err);
1268 }
1269
1270 // Get a zero-sized variant or a pointer newtype.
1271 let zero_or_ptr_variant = |i: usize| {
1272 let fields = def.variants[i].fields.iter().map(|field| {
1273 let ty = normalize_associated_type(infcx, &field.ty(tcx, substs));
1274 SizeSkeleton::compute(ty, infcx)
1275 });
1276 let mut ptr = None;
1277 for field in fields {
1278 let field = field?;
1279 match field {
1280 SizeSkeleton::Known(size) => {
1281 if size.bytes() > 0 {
1282 return Err(err);
1283 }
1284 }
1285 SizeSkeleton::Pointer {..} => {
1286 if ptr.is_some() {
1287 return Err(err);
1288 }
1289 ptr = Some(field);
1290 }
1291 }
1292 }
1293 Ok(ptr)
1294 };
1295
1296 let v0 = zero_or_ptr_variant(0)?;
1297 // Newtype.
1298 if def.variants.len() == 1 {
1299 if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1300 return Ok(SizeSkeleton::Pointer {
1301 non_zero: non_zero ||
1302 Some(def.did) == tcx.lang_items.non_zero(),
1303 tail: tail
1304 });
1305 } else {
1306 return Err(err);
1307 }
1308 }
1309
1310 let v1 = zero_or_ptr_variant(1)?;
1311 // Nullable pointer enum optimization.
1312 match (v0, v1) {
1313 (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None) |
1314 (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1315 Ok(SizeSkeleton::Pointer {
1316 non_zero: false,
1317 tail: tail
1318 })
1319 }
1320 _ => Err(err)
1321 }
1322 }
1323
1324 _ => Err(err)
1325 }
1326 }
1327
1328 pub fn same_size(self, other: SizeSkeleton) -> bool {
1329 match (self, other) {
1330 (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
1331 (SizeSkeleton::Pointer { tail: a, .. },
1332 SizeSkeleton::Pointer { tail: b, .. }) => a == b,
1333 _ => false
1334 }
1335 }
1336 }