]> git.proxmox.com Git - rustc.git/blob - src/librustc_target/abi/mod.rs
New upstream version 1.41.1+dfsg1
[rustc.git] / src / librustc_target / abi / mod.rs
1 pub use Integer::*;
2 pub use Primitive::*;
3
4 use crate::spec::Target;
5
6 use std::ops::{Add, Deref, Sub, Mul, AddAssign, Range, RangeInclusive};
7
8 use rustc_index::vec::{Idx, IndexVec};
9 use rustc_macros::HashStable_Generic;
10 use syntax_pos::Span;
11
12 pub mod call;
13
14 /// Parsed [Data layout](http://llvm.org/docs/LangRef.html#data-layout)
15 /// for a target, which contains everything needed to compute layouts.
16 pub struct TargetDataLayout {
17 pub endian: Endian,
18 pub i1_align: AbiAndPrefAlign,
19 pub i8_align: AbiAndPrefAlign,
20 pub i16_align: AbiAndPrefAlign,
21 pub i32_align: AbiAndPrefAlign,
22 pub i64_align: AbiAndPrefAlign,
23 pub i128_align: AbiAndPrefAlign,
24 pub f32_align: AbiAndPrefAlign,
25 pub f64_align: AbiAndPrefAlign,
26 pub pointer_size: Size,
27 pub pointer_align: AbiAndPrefAlign,
28 pub aggregate_align: AbiAndPrefAlign,
29
30 /// Alignments for vector types.
31 pub vector_align: Vec<(Size, AbiAndPrefAlign)>,
32
33 pub instruction_address_space: u32,
34 }
35
36 impl Default for TargetDataLayout {
37 /// Creates an instance of `TargetDataLayout`.
38 fn default() -> TargetDataLayout {
39 let align = |bits| Align::from_bits(bits).unwrap();
40 TargetDataLayout {
41 endian: Endian::Big,
42 i1_align: AbiAndPrefAlign::new(align(8)),
43 i8_align: AbiAndPrefAlign::new(align(8)),
44 i16_align: AbiAndPrefAlign::new(align(16)),
45 i32_align: AbiAndPrefAlign::new(align(32)),
46 i64_align: AbiAndPrefAlign { abi: align(32), pref: align(64) },
47 i128_align: AbiAndPrefAlign { abi: align(32), pref: align(64) },
48 f32_align: AbiAndPrefAlign::new(align(32)),
49 f64_align: AbiAndPrefAlign::new(align(64)),
50 pointer_size: Size::from_bits(64),
51 pointer_align: AbiAndPrefAlign::new(align(64)),
52 aggregate_align: AbiAndPrefAlign { abi: align(0), pref: align(64) },
53 vector_align: vec![
54 (Size::from_bits(64), AbiAndPrefAlign::new(align(64))),
55 (Size::from_bits(128), AbiAndPrefAlign::new(align(128))),
56 ],
57 instruction_address_space: 0,
58 }
59 }
60 }
61
62 impl TargetDataLayout {
63 pub fn parse(target: &Target) -> Result<TargetDataLayout, String> {
64 // Parse an address space index from a string.
65 let parse_address_space = |s: &str, cause: &str| {
66 s.parse::<u32>().map_err(|err| {
67 format!("invalid address space `{}` for `{}` in \"data-layout\": {}",
68 s, cause, err)
69 })
70 };
71
72 // Parse a bit count from a string.
73 let parse_bits = |s: &str, kind: &str, cause: &str| {
74 s.parse::<u64>().map_err(|err| {
75 format!("invalid {} `{}` for `{}` in \"data-layout\": {}",
76 kind, s, cause, err)
77 })
78 };
79
80 // Parse a size string.
81 let size = |s: &str, cause: &str| {
82 parse_bits(s, "size", cause).map(Size::from_bits)
83 };
84
85 // Parse an alignment string.
86 let align = |s: &[&str], cause: &str| {
87 if s.is_empty() {
88 return Err(format!("missing alignment for `{}` in \"data-layout\"", cause));
89 }
90 let align_from_bits = |bits| {
91 Align::from_bits(bits).map_err(|err| {
92 format!("invalid alignment for `{}` in \"data-layout\": {}",
93 cause, err)
94 })
95 };
96 let abi = parse_bits(s[0], "alignment", cause)?;
97 let pref = s.get(1).map_or(Ok(abi), |pref| parse_bits(pref, "alignment", cause))?;
98 Ok(AbiAndPrefAlign {
99 abi: align_from_bits(abi)?,
100 pref: align_from_bits(pref)?,
101 })
102 };
103
104 let mut dl = TargetDataLayout::default();
105 let mut i128_align_src = 64;
106 for spec in target.data_layout.split('-') {
107 let spec_parts = spec.split(':').collect::<Vec<_>>();
108
109 match &*spec_parts {
110 ["e"] => dl.endian = Endian::Little,
111 ["E"] => dl.endian = Endian::Big,
112 [p] if p.starts_with("P") => {
113 dl.instruction_address_space = parse_address_space(&p[1..], "P")?
114 }
115 ["a", ref a @ ..] => {
116 dl.aggregate_align = align(a, "a")?
117 }
118 ["f32", ref a @ ..] => {
119 dl.f32_align = align(a, "f32")?
120 }
121 ["f64", ref a @ ..] => {
122 dl.f64_align = align(a, "f64")?
123 }
124 [p @ "p", s, ref a @ ..] | [p @ "p0", s, ref a @ ..] => {
125 dl.pointer_size = size(s, p)?;
126 dl.pointer_align = align(a, p)?;
127 }
128 [s, ref a @ ..] if s.starts_with("i") => {
129 let bits = match s[1..].parse::<u64>() {
130 Ok(bits) => bits,
131 Err(_) => {
132 size(&s[1..], "i")?; // For the user error.
133 continue;
134 }
135 };
136 let a = align(a, s)?;
137 match bits {
138 1 => dl.i1_align = a,
139 8 => dl.i8_align = a,
140 16 => dl.i16_align = a,
141 32 => dl.i32_align = a,
142 64 => dl.i64_align = a,
143 _ => {}
144 }
145 if bits >= i128_align_src && bits <= 128 {
146 // Default alignment for i128 is decided by taking the alignment of
147 // largest-sized i{64..=128}.
148 i128_align_src = bits;
149 dl.i128_align = a;
150 }
151 }
152 [s, ref a @ ..] if s.starts_with("v") => {
153 let v_size = size(&s[1..], "v")?;
154 let a = align(a, s)?;
155 if let Some(v) = dl.vector_align.iter_mut().find(|v| v.0 == v_size) {
156 v.1 = a;
157 continue;
158 }
159 // No existing entry, add a new one.
160 dl.vector_align.push((v_size, a));
161 }
162 _ => {} // Ignore everything else.
163 }
164 }
165
166 // Perform consistency checks against the Target information.
167 let endian_str = match dl.endian {
168 Endian::Little => "little",
169 Endian::Big => "big"
170 };
171 if endian_str != target.target_endian {
172 return Err(format!("inconsistent target specification: \"data-layout\" claims \
173 architecture is {}-endian, while \"target-endian\" is `{}`",
174 endian_str, target.target_endian));
175 }
176
177 if dl.pointer_size.bits().to_string() != target.target_pointer_width {
178 return Err(format!("inconsistent target specification: \"data-layout\" claims \
179 pointers are {}-bit, while \"target-pointer-width\" is `{}`",
180 dl.pointer_size.bits(), target.target_pointer_width));
181 }
182
183 Ok(dl)
184 }
185
186 /// Returns exclusive upper bound on object size.
187 ///
188 /// The theoretical maximum object size is defined as the maximum positive `isize` value.
189 /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly
190 /// index every address within an object along with one byte past the end, along with allowing
191 /// `isize` to store the difference between any two pointers into an object.
192 ///
193 /// The upper bound on 64-bit currently needs to be lower because LLVM uses a 64-bit integer
194 /// to represent object size in bits. It would need to be 1 << 61 to account for this, but is
195 /// currently conservatively bounded to 1 << 47 as that is enough to cover the current usable
196 /// address space on 64-bit ARMv8 and x86_64.
197 pub fn obj_size_bound(&self) -> u64 {
198 match self.pointer_size.bits() {
199 16 => 1 << 15,
200 32 => 1 << 31,
201 64 => 1 << 47,
202 bits => panic!("obj_size_bound: unknown pointer bit size {}", bits)
203 }
204 }
205
206 pub fn ptr_sized_integer(&self) -> Integer {
207 match self.pointer_size.bits() {
208 16 => I16,
209 32 => I32,
210 64 => I64,
211 bits => panic!("ptr_sized_integer: unknown pointer bit size {}", bits)
212 }
213 }
214
215 pub fn vector_align(&self, vec_size: Size) -> AbiAndPrefAlign {
216 for &(size, align) in &self.vector_align {
217 if size == vec_size {
218 return align;
219 }
220 }
221 // Default to natural alignment, which is what LLVM does.
222 // That is, use the size, rounded up to a power of 2.
223 AbiAndPrefAlign::new(Align::from_bytes(vec_size.bytes().next_power_of_two()).unwrap())
224 }
225 }
226
227 pub trait HasDataLayout {
228 fn data_layout(&self) -> &TargetDataLayout;
229 }
230
231 impl HasDataLayout for TargetDataLayout {
232 fn data_layout(&self) -> &TargetDataLayout {
233 self
234 }
235 }
236
237 /// Endianness of the target, which must match cfg(target-endian).
238 #[derive(Copy, Clone, PartialEq)]
239 pub enum Endian {
240 Little,
241 Big
242 }
243
244 /// Size of a type in bytes.
245 #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)]
246 #[derive(HashStable_Generic)]
247 pub struct Size {
248 raw: u64
249 }
250
251 impl Size {
252 pub const ZERO: Size = Self::from_bytes(0);
253
254 #[inline]
255 pub fn from_bits(bits: u64) -> Size {
256 // Avoid potential overflow from `bits + 7`.
257 Size::from_bytes(bits / 8 + ((bits % 8) + 7) / 8)
258 }
259
260 #[inline]
261 pub const fn from_bytes(bytes: u64) -> Size {
262 Size {
263 raw: bytes
264 }
265 }
266
267 #[inline]
268 pub fn bytes(self) -> u64 {
269 self.raw
270 }
271
272 #[inline]
273 pub fn bits(self) -> u64 {
274 self.bytes().checked_mul(8).unwrap_or_else(|| {
275 panic!("Size::bits: {} bytes in bits doesn't fit in u64", self.bytes())
276 })
277 }
278
279 #[inline]
280 pub fn align_to(self, align: Align) -> Size {
281 let mask = align.bytes() - 1;
282 Size::from_bytes((self.bytes() + mask) & !mask)
283 }
284
285 #[inline]
286 pub fn is_aligned(self, align: Align) -> bool {
287 let mask = align.bytes() - 1;
288 self.bytes() & mask == 0
289 }
290
291 #[inline]
292 pub fn checked_add<C: HasDataLayout>(self, offset: Size, cx: &C) -> Option<Size> {
293 let dl = cx.data_layout();
294
295 let bytes = self.bytes().checked_add(offset.bytes())?;
296
297 if bytes < dl.obj_size_bound() {
298 Some(Size::from_bytes(bytes))
299 } else {
300 None
301 }
302 }
303
304 #[inline]
305 pub fn checked_mul<C: HasDataLayout>(self, count: u64, cx: &C) -> Option<Size> {
306 let dl = cx.data_layout();
307
308 let bytes = self.bytes().checked_mul(count)?;
309 if bytes < dl.obj_size_bound() {
310 Some(Size::from_bytes(bytes))
311 } else {
312 None
313 }
314 }
315 }
316
317 // Panicking addition, subtraction and multiplication for convenience.
318 // Avoid during layout computation, return `LayoutError` instead.
319
320 impl Add for Size {
321 type Output = Size;
322 #[inline]
323 fn add(self, other: Size) -> Size {
324 Size::from_bytes(self.bytes().checked_add(other.bytes()).unwrap_or_else(|| {
325 panic!("Size::add: {} + {} doesn't fit in u64", self.bytes(), other.bytes())
326 }))
327 }
328 }
329
330 impl Sub for Size {
331 type Output = Size;
332 #[inline]
333 fn sub(self, other: Size) -> Size {
334 Size::from_bytes(self.bytes().checked_sub(other.bytes()).unwrap_or_else(|| {
335 panic!("Size::sub: {} - {} would result in negative size", self.bytes(), other.bytes())
336 }))
337 }
338 }
339
340 impl Mul<Size> for u64 {
341 type Output = Size;
342 #[inline]
343 fn mul(self, size: Size) -> Size {
344 size * self
345 }
346 }
347
348 impl Mul<u64> for Size {
349 type Output = Size;
350 #[inline]
351 fn mul(self, count: u64) -> Size {
352 match self.bytes().checked_mul(count) {
353 Some(bytes) => Size::from_bytes(bytes),
354 None => {
355 panic!("Size::mul: {} * {} doesn't fit in u64", self.bytes(), count)
356 }
357 }
358 }
359 }
360
361 impl AddAssign for Size {
362 #[inline]
363 fn add_assign(&mut self, other: Size) {
364 *self = *self + other;
365 }
366 }
367
368 /// Alignment of a type in bytes (always a power of two).
369 #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)]
370 #[derive(HashStable_Generic)]
371 pub struct Align {
372 pow2: u8,
373 }
374
375 impl Align {
376 pub fn from_bits(bits: u64) -> Result<Align, String> {
377 Align::from_bytes(Size::from_bits(bits).bytes())
378 }
379
380 pub fn from_bytes(align: u64) -> Result<Align, String> {
381 // Treat an alignment of 0 bytes like 1-byte alignment.
382 if align == 0 {
383 return Ok(Align { pow2: 0 });
384 }
385
386 let mut bytes = align;
387 let mut pow2: u8 = 0;
388 while (bytes & 1) == 0 {
389 pow2 += 1;
390 bytes >>= 1;
391 }
392 if bytes != 1 {
393 return Err(format!("`{}` is not a power of 2", align));
394 }
395 if pow2 > 29 {
396 return Err(format!("`{}` is too large", align));
397 }
398
399 Ok(Align { pow2 })
400 }
401
402 pub fn bytes(self) -> u64 {
403 1 << self.pow2
404 }
405
406 pub fn bits(self) -> u64 {
407 self.bytes() * 8
408 }
409
410 /// Computes the best alignment possible for the given offset
411 /// (the largest power of two that the offset is a multiple of).
412 ///
413 /// N.B., for an offset of `0`, this happens to return `2^64`.
414 pub fn max_for_offset(offset: Size) -> Align {
415 Align {
416 pow2: offset.bytes().trailing_zeros() as u8,
417 }
418 }
419
420 /// Lower the alignment, if necessary, such that the given offset
421 /// is aligned to it (the offset is a multiple of the alignment).
422 pub fn restrict_for_offset(self, offset: Size) -> Align {
423 self.min(Align::max_for_offset(offset))
424 }
425 }
426
427 /// A pair of aligments, ABI-mandated and preferred.
428 #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)]
429 #[derive(HashStable_Generic)]
430 pub struct AbiAndPrefAlign {
431 pub abi: Align,
432 pub pref: Align,
433 }
434
435 impl AbiAndPrefAlign {
436 pub fn new(align: Align) -> AbiAndPrefAlign {
437 AbiAndPrefAlign {
438 abi: align,
439 pref: align,
440 }
441 }
442
443 pub fn min(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign {
444 AbiAndPrefAlign {
445 abi: self.abi.min(other.abi),
446 pref: self.pref.min(other.pref),
447 }
448 }
449
450 pub fn max(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign {
451 AbiAndPrefAlign {
452 abi: self.abi.max(other.abi),
453 pref: self.pref.max(other.pref),
454 }
455 }
456 }
457
458 /// Integers, also used for enum discriminants.
459 #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, HashStable_Generic)]
460 pub enum Integer {
461 I8,
462 I16,
463 I32,
464 I64,
465 I128,
466 }
467
468 impl Integer {
469 pub fn size(self) -> Size {
470 match self {
471 I8 => Size::from_bytes(1),
472 I16 => Size::from_bytes(2),
473 I32 => Size::from_bytes(4),
474 I64 => Size::from_bytes(8),
475 I128 => Size::from_bytes(16),
476 }
477 }
478
479 pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAndPrefAlign {
480 let dl = cx.data_layout();
481
482 match self {
483 I8 => dl.i8_align,
484 I16 => dl.i16_align,
485 I32 => dl.i32_align,
486 I64 => dl.i64_align,
487 I128 => dl.i128_align,
488 }
489 }
490
491 /// Finds the smallest Integer type which can represent the signed value.
492 pub fn fit_signed(x: i128) -> Integer {
493 match x {
494 -0x0000_0000_0000_0080..=0x0000_0000_0000_007f => I8,
495 -0x0000_0000_0000_8000..=0x0000_0000_0000_7fff => I16,
496 -0x0000_0000_8000_0000..=0x0000_0000_7fff_ffff => I32,
497 -0x8000_0000_0000_0000..=0x7fff_ffff_ffff_ffff => I64,
498 _ => I128
499 }
500 }
501
502 /// Finds the smallest Integer type which can represent the unsigned value.
503 pub fn fit_unsigned(x: u128) -> Integer {
504 match x {
505 0..=0x0000_0000_0000_00ff => I8,
506 0..=0x0000_0000_0000_ffff => I16,
507 0..=0x0000_0000_ffff_ffff => I32,
508 0..=0xffff_ffff_ffff_ffff => I64,
509 _ => I128,
510 }
511 }
512
513 /// Finds the smallest integer with the given alignment.
514 pub fn for_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Option<Integer> {
515 let dl = cx.data_layout();
516
517 for &candidate in &[I8, I16, I32, I64, I128] {
518 if wanted == candidate.align(dl).abi && wanted.bytes() == candidate.size().bytes() {
519 return Some(candidate);
520 }
521 }
522 None
523 }
524
525 /// Find the largest integer with the given alignment or less.
526 pub fn approximate_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Integer {
527 let dl = cx.data_layout();
528
529 // FIXME(eddyb) maybe include I128 in the future, when it works everywhere.
530 for &candidate in &[I64, I32, I16] {
531 if wanted >= candidate.align(dl).abi && wanted.bytes() >= candidate.size().bytes() {
532 return candidate;
533 }
534 }
535 I8
536 }
537 }
538
539 /// Fundamental unit of memory access and layout.
540 #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
541 pub enum Primitive {
542 /// The `bool` is the signedness of the `Integer` type.
543 ///
544 /// One would think we would not care about such details this low down,
545 /// but some ABIs are described in terms of C types and ISAs where the
546 /// integer arithmetic is done on {sign,zero}-extended registers, e.g.
547 /// a negative integer passed by zero-extension will appear positive in
548 /// the callee, and most operations on it will produce the wrong values.
549 Int(Integer, bool),
550 F32,
551 F64,
552 Pointer
553 }
554
555 impl Primitive {
556 pub fn size<C: HasDataLayout>(self, cx: &C) -> Size {
557 let dl = cx.data_layout();
558
559 match self {
560 Int(i, _) => i.size(),
561 F32 => Size::from_bits(32),
562 F64 => Size::from_bits(64),
563 Pointer => dl.pointer_size
564 }
565 }
566
567 pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAndPrefAlign {
568 let dl = cx.data_layout();
569
570 match self {
571 Int(i, _) => i.align(dl),
572 F32 => dl.f32_align,
573 F64 => dl.f64_align,
574 Pointer => dl.pointer_align
575 }
576 }
577
578 pub fn is_float(self) -> bool {
579 match self {
580 F32 | F64 => true,
581 _ => false
582 }
583 }
584
585 pub fn is_int(self) -> bool {
586 match self {
587 Int(..) => true,
588 _ => false,
589 }
590 }
591 }
592
593 /// Information about one scalar component of a Rust type.
594 #[derive(Clone, PartialEq, Eq, Hash, Debug)]
595 #[derive(HashStable_Generic)]
596 pub struct Scalar {
597 pub value: Primitive,
598
599 /// Inclusive wrap-around range of valid values, that is, if
600 /// start > end, it represents `start..=max_value()`,
601 /// followed by `0..=end`.
602 ///
603 /// That is, for an i8 primitive, a range of `254..=2` means following
604 /// sequence:
605 ///
606 /// 254 (-2), 255 (-1), 0, 1, 2
607 ///
608 /// This is intended specifically to mirror LLVM’s `!range` metadata,
609 /// semantics.
610 // FIXME(eddyb) always use the shortest range, e.g., by finding
611 // the largest space between two consecutive valid values and
612 // taking everything else as the (shortest) valid range.
613 pub valid_range: RangeInclusive<u128>,
614 }
615
616 impl Scalar {
617 pub fn is_bool(&self) -> bool {
618 if let Int(I8, _) = self.value {
619 self.valid_range == (0..=1)
620 } else {
621 false
622 }
623 }
624
625 /// Returns the valid range as a `x..y` range.
626 ///
627 /// If `x` and `y` are equal, the range is full, not empty.
628 pub fn valid_range_exclusive<C: HasDataLayout>(&self, cx: &C) -> Range<u128> {
629 // For a (max) value of -1, max will be `-1 as usize`, which overflows.
630 // However, that is fine here (it would still represent the full range),
631 // i.e., if the range is everything.
632 let bits = self.value.size(cx).bits();
633 assert!(bits <= 128);
634 let mask = !0u128 >> (128 - bits);
635 let start = *self.valid_range.start();
636 let end = *self.valid_range.end();
637 assert_eq!(start, start & mask);
638 assert_eq!(end, end & mask);
639 start..(end.wrapping_add(1) & mask)
640 }
641 }
642
643 /// Describes how the fields of a type are located in memory.
644 #[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
645 pub enum FieldPlacement {
646 /// All fields start at no offset. The `usize` is the field count.
647 ///
648 /// In the case of primitives the number of fields is `0`.
649 Union(usize),
650
651 /// Array/vector-like placement, with all fields of identical types.
652 Array {
653 stride: Size,
654 count: u64
655 },
656
657 /// Struct-like placement, with precomputed offsets.
658 ///
659 /// Fields are guaranteed to not overlap, but note that gaps
660 /// before, between and after all the fields are NOT always
661 /// padding, and as such their contents may not be discarded.
662 /// For example, enum variants leave a gap at the start,
663 /// where the discriminant field in the enum layout goes.
664 Arbitrary {
665 /// Offsets for the first byte of each field,
666 /// ordered to match the source definition order.
667 /// This vector does not go in increasing order.
668 // FIXME(eddyb) use small vector optimization for the common case.
669 offsets: Vec<Size>,
670
671 /// Maps source order field indices to memory order indices,
672 /// depending on how the fields were reordered (if at all).
673 /// This is a permutation, with both the source order and the
674 /// memory order using the same (0..n) index ranges.
675 ///
676 /// Note that during computation of `memory_index`, sometimes
677 /// it is easier to operate on the inverse mapping (that is,
678 /// from memory order to source order), and that is usually
679 /// named `inverse_memory_index`.
680 ///
681 // FIXME(eddyb) build a better abstraction for permutations, if possible.
682 // FIXME(camlorn) also consider small vector optimization here.
683 memory_index: Vec<u32>
684 }
685 }
686
687 impl FieldPlacement {
688 pub fn count(&self) -> usize {
689 match *self {
690 FieldPlacement::Union(count) => count,
691 FieldPlacement::Array { count, .. } => {
692 let usize_count = count as usize;
693 assert_eq!(usize_count as u64, count);
694 usize_count
695 }
696 FieldPlacement::Arbitrary { ref offsets, .. } => offsets.len()
697 }
698 }
699
700 pub fn offset(&self, i: usize) -> Size {
701 match *self {
702 FieldPlacement::Union(_) => Size::ZERO,
703 FieldPlacement::Array { stride, count } => {
704 let i = i as u64;
705 assert!(i < count);
706 stride * i
707 }
708 FieldPlacement::Arbitrary { ref offsets, .. } => offsets[i]
709 }
710 }
711
712 pub fn memory_index(&self, i: usize) -> usize {
713 match *self {
714 FieldPlacement::Union(_) |
715 FieldPlacement::Array { .. } => i,
716 FieldPlacement::Arbitrary { ref memory_index, .. } => {
717 let r = memory_index[i];
718 assert_eq!(r as usize as u32, r);
719 r as usize
720 }
721 }
722 }
723
724 /// Gets source indices of the fields by increasing offsets.
725 #[inline]
726 pub fn index_by_increasing_offset<'a>(&'a self) -> impl Iterator<Item=usize>+'a {
727 let mut inverse_small = [0u8; 64];
728 let mut inverse_big = vec![];
729 let use_small = self.count() <= inverse_small.len();
730
731 // We have to write this logic twice in order to keep the array small.
732 if let FieldPlacement::Arbitrary { ref memory_index, .. } = *self {
733 if use_small {
734 for i in 0..self.count() {
735 inverse_small[memory_index[i] as usize] = i as u8;
736 }
737 } else {
738 inverse_big = vec![0; self.count()];
739 for i in 0..self.count() {
740 inverse_big[memory_index[i] as usize] = i as u32;
741 }
742 }
743 }
744
745 (0..self.count()).map(move |i| {
746 match *self {
747 FieldPlacement::Union(_) |
748 FieldPlacement::Array { .. } => i,
749 FieldPlacement::Arbitrary { .. } => {
750 if use_small { inverse_small[i] as usize }
751 else { inverse_big[i] as usize }
752 }
753 }
754 })
755 }
756 }
757
758 /// Describes how values of the type are passed by target ABIs,
759 /// in terms of categories of C types there are ABI rules for.
760 #[derive(Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
761 pub enum Abi {
762 Uninhabited,
763 Scalar(Scalar),
764 ScalarPair(Scalar, Scalar),
765 Vector {
766 element: Scalar,
767 count: u64
768 },
769 Aggregate {
770 /// If true, the size is exact, otherwise it's only a lower bound.
771 sized: bool,
772 }
773 }
774
775 impl Abi {
776 /// Returns `true` if the layout corresponds to an unsized type.
777 pub fn is_unsized(&self) -> bool {
778 match *self {
779 Abi::Uninhabited |
780 Abi::Scalar(_) |
781 Abi::ScalarPair(..) |
782 Abi::Vector { .. } => false,
783 Abi::Aggregate { sized } => !sized
784 }
785 }
786
787 /// Returns `true` if this is a single signed integer scalar
788 pub fn is_signed(&self) -> bool {
789 match *self {
790 Abi::Scalar(ref scal) => match scal.value {
791 Primitive::Int(_, signed) => signed,
792 _ => false,
793 },
794 _ => false,
795 }
796 }
797
798 /// Returns `true` if this is an uninhabited type
799 pub fn is_uninhabited(&self) -> bool {
800 match *self {
801 Abi::Uninhabited => true,
802 _ => false,
803 }
804 }
805
806 /// Returns `true` is this is a scalar type
807 pub fn is_scalar(&self) -> bool {
808 match *self {
809 Abi::Scalar(_) => true,
810 _ => false,
811 }
812 }
813 }
814
815 rustc_index::newtype_index! {
816 pub struct VariantIdx {
817 derive [HashStable_Generic]
818 }
819 }
820
821 #[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
822 pub enum Variants {
823 /// Single enum variants, structs/tuples, unions, and all non-ADTs.
824 Single {
825 index: VariantIdx,
826 },
827
828 /// Enum-likes with more than one inhabited variant: for each case there is
829 /// a struct, and they all have space reserved for the discriminant.
830 /// For enums this is the sole field of the layout.
831 Multiple {
832 discr: Scalar,
833 discr_kind: DiscriminantKind,
834 discr_index: usize,
835 variants: IndexVec<VariantIdx, LayoutDetails>,
836 },
837 }
838
839 #[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
840 pub enum DiscriminantKind {
841 /// Integer tag holding the discriminant value itself.
842 Tag,
843
844 /// Niche (values invalid for a type) encoding the discriminant:
845 /// the variant `dataful_variant` contains a niche at an arbitrary
846 /// offset (field `discr_index` of the enum), which for a variant with
847 /// discriminant `d` is set to
848 /// `(d - niche_variants.start).wrapping_add(niche_start)`.
849 ///
850 /// For example, `Option<(usize, &T)>` is represented such that
851 /// `None` has a null pointer for the second tuple field, and
852 /// `Some` is the identity function (with a non-null reference).
853 Niche {
854 dataful_variant: VariantIdx,
855 niche_variants: RangeInclusive<VariantIdx>,
856 niche_start: u128,
857 },
858 }
859
860 #[derive(Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
861 pub struct Niche {
862 pub offset: Size,
863 pub scalar: Scalar,
864 }
865
866 impl Niche {
867 pub fn from_scalar<C: HasDataLayout>(cx: &C, offset: Size, scalar: Scalar) -> Option<Self> {
868 let niche = Niche {
869 offset,
870 scalar,
871 };
872 if niche.available(cx) > 0 {
873 Some(niche)
874 } else {
875 None
876 }
877 }
878
879 pub fn available<C: HasDataLayout>(&self, cx: &C) -> u128 {
880 let Scalar { value, valid_range: ref v } = self.scalar;
881 let bits = value.size(cx).bits();
882 assert!(bits <= 128);
883 let max_value = !0u128 >> (128 - bits);
884
885 // Find out how many values are outside the valid range.
886 let niche = v.end().wrapping_add(1)..*v.start();
887 niche.end.wrapping_sub(niche.start) & max_value
888 }
889
890 pub fn reserve<C: HasDataLayout>(&self, cx: &C, count: u128) -> Option<(u128, Scalar)> {
891 assert!(count > 0);
892
893 let Scalar { value, valid_range: ref v } = self.scalar;
894 let bits = value.size(cx).bits();
895 assert!(bits <= 128);
896 let max_value = !0u128 >> (128 - bits);
897
898 if count > max_value {
899 return None;
900 }
901
902 // Compute the range of invalid values being reserved.
903 let start = v.end().wrapping_add(1) & max_value;
904 let end = v.end().wrapping_add(count) & max_value;
905
906 // If the `end` of our range is inside the valid range,
907 // then we ran out of invalid values.
908 // FIXME(eddyb) abstract this with a wraparound range type.
909 let valid_range_contains = |x| {
910 if v.start() <= v.end() {
911 *v.start() <= x && x <= *v.end()
912 } else {
913 *v.start() <= x || x <= *v.end()
914 }
915 };
916 if valid_range_contains(end) {
917 return None;
918 }
919
920 Some((start, Scalar { value, valid_range: *v.start()..=end }))
921 }
922 }
923
924 #[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
925 pub struct LayoutDetails {
926 pub variants: Variants,
927 pub fields: FieldPlacement,
928 pub abi: Abi,
929
930 /// The leaf scalar with the largest number of invalid values
931 /// (i.e. outside of its `valid_range`), if it exists.
932 pub largest_niche: Option<Niche>,
933
934 pub align: AbiAndPrefAlign,
935 pub size: Size
936 }
937
938 impl LayoutDetails {
939 pub fn scalar<C: HasDataLayout>(cx: &C, scalar: Scalar) -> Self {
940 let largest_niche = Niche::from_scalar(cx, Size::ZERO, scalar.clone());
941 let size = scalar.value.size(cx);
942 let align = scalar.value.align(cx);
943 LayoutDetails {
944 variants: Variants::Single { index: VariantIdx::new(0) },
945 fields: FieldPlacement::Union(0),
946 abi: Abi::Scalar(scalar),
947 largest_niche,
948 size,
949 align,
950 }
951 }
952 }
953
954 /// The details of the layout of a type, alongside the type itself.
955 /// Provides various type traversal APIs (e.g., recursing into fields).
956 ///
957 /// Note that the details are NOT guaranteed to always be identical
958 /// to those obtained from `layout_of(ty)`, as we need to produce
959 /// layouts for which Rust types do not exist, such as enum variants
960 /// or synthetic fields of enums (i.e., discriminants) and fat pointers.
961 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
962 pub struct TyLayout<'a, Ty> {
963 pub ty: Ty,
964 pub details: &'a LayoutDetails
965 }
966
967 impl<'a, Ty> Deref for TyLayout<'a, Ty> {
968 type Target = &'a LayoutDetails;
969 fn deref(&self) -> &&'a LayoutDetails {
970 &self.details
971 }
972 }
973
974 pub trait LayoutOf {
975 type Ty;
976 type TyLayout;
977
978 fn layout_of(&self, ty: Self::Ty) -> Self::TyLayout;
979 fn spanned_layout_of(&self, ty: Self::Ty, _span: Span) -> Self::TyLayout {
980 self.layout_of(ty)
981 }
982 }
983
984 #[derive(Copy, Clone, PartialEq, Eq)]
985 pub enum PointerKind {
986 /// Most general case, we know no restrictions to tell LLVM.
987 Shared,
988
989 /// `&T` where `T` contains no `UnsafeCell`, is `noalias` and `readonly`.
990 Frozen,
991
992 /// `&mut T`, when we know `noalias` is safe for LLVM.
993 UniqueBorrowed,
994
995 /// `Box<T>`, unlike `UniqueBorrowed`, it also has `noalias` on returns.
996 UniqueOwned
997 }
998
999 #[derive(Copy, Clone)]
1000 pub struct PointeeInfo {
1001 pub size: Size,
1002 pub align: Align,
1003 pub safe: Option<PointerKind>,
1004 }
1005
1006 pub trait TyLayoutMethods<'a, C: LayoutOf<Ty = Self>>: Sized {
1007 fn for_variant(
1008 this: TyLayout<'a, Self>,
1009 cx: &C,
1010 variant_index: VariantIdx,
1011 ) -> TyLayout<'a, Self>;
1012 fn field(this: TyLayout<'a, Self>, cx: &C, i: usize) -> C::TyLayout;
1013 fn pointee_info_at(
1014 this: TyLayout<'a, Self>,
1015 cx: &C,
1016 offset: Size,
1017 ) -> Option<PointeeInfo>;
1018 }
1019
1020 impl<'a, Ty> TyLayout<'a, Ty> {
1021 pub fn for_variant<C>(self, cx: &C, variant_index: VariantIdx) -> Self
1022 where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> {
1023 Ty::for_variant(self, cx, variant_index)
1024 }
1025 pub fn field<C>(self, cx: &C, i: usize) -> C::TyLayout
1026 where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> {
1027 Ty::field(self, cx, i)
1028 }
1029 pub fn pointee_info_at<C>(self, cx: &C, offset: Size) -> Option<PointeeInfo>
1030 where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> {
1031 Ty::pointee_info_at(self, cx, offset)
1032 }
1033 }
1034
1035 impl<'a, Ty> TyLayout<'a, Ty> {
1036 /// Returns `true` if the layout corresponds to an unsized type.
1037 pub fn is_unsized(&self) -> bool {
1038 self.abi.is_unsized()
1039 }
1040
1041 /// Returns `true` if the type is a ZST and not unsized.
1042 pub fn is_zst(&self) -> bool {
1043 match self.abi {
1044 Abi::Scalar(_) |
1045 Abi::ScalarPair(..) |
1046 Abi::Vector { .. } => false,
1047 Abi::Uninhabited => self.size.bytes() == 0,
1048 Abi::Aggregate { sized } => sized && self.size.bytes() == 0
1049 }
1050 }
1051 }