]> git.proxmox.com Git - rustc.git/blame - src/librustc_target/abi/mod.rs
New upstream version 1.41.1+dfsg1
[rustc.git] / src / librustc_target / abi / mod.rs
CommitLineData
9fa01778
XL
1pub use Integer::*;
2pub use Primitive::*;
83c7162d 3
9fa01778 4use crate::spec::Target;
83c7162d 5
83c7162d
XL
6use std::ops::{Add, Deref, Sub, Mul, AddAssign, Range, RangeInclusive};
7
e74abb32 8use rustc_index::vec::{Idx, IndexVec};
60c5eb7d 9use rustc_macros::HashStable_Generic;
416331ca 10use syntax_pos::Span;
a1dfa0c6 11
83c7162d
XL
12pub mod call;
13
14/// Parsed [Data layout](http://llvm.org/docs/LangRef.html#data-layout)
15/// for a target, which contains everything needed to compute layouts.
16pub struct TargetDataLayout {
17 pub endian: Endian,
a1dfa0c6
XL
18 pub i1_align: AbiAndPrefAlign,
19 pub i8_align: AbiAndPrefAlign,
20 pub i16_align: AbiAndPrefAlign,
21 pub i32_align: AbiAndPrefAlign,
22 pub i64_align: AbiAndPrefAlign,
23 pub i128_align: AbiAndPrefAlign,
24 pub f32_align: AbiAndPrefAlign,
25 pub f64_align: AbiAndPrefAlign,
83c7162d 26 pub pointer_size: Size,
a1dfa0c6
XL
27 pub pointer_align: AbiAndPrefAlign,
28 pub aggregate_align: AbiAndPrefAlign,
83c7162d
XL
29
30 /// Alignments for vector types.
a1dfa0c6
XL
31 pub vector_align: Vec<(Size, AbiAndPrefAlign)>,
32
33 pub instruction_address_space: u32,
83c7162d
XL
34}
35
36impl Default for TargetDataLayout {
37 /// Creates an instance of `TargetDataLayout`.
38 fn default() -> TargetDataLayout {
a1dfa0c6 39 let align = |bits| Align::from_bits(bits).unwrap();
83c7162d
XL
40 TargetDataLayout {
41 endian: Endian::Big,
a1dfa0c6
XL
42 i1_align: AbiAndPrefAlign::new(align(8)),
43 i8_align: AbiAndPrefAlign::new(align(8)),
44 i16_align: AbiAndPrefAlign::new(align(16)),
45 i32_align: AbiAndPrefAlign::new(align(32)),
46 i64_align: AbiAndPrefAlign { abi: align(32), pref: align(64) },
47 i128_align: AbiAndPrefAlign { abi: align(32), pref: align(64) },
48 f32_align: AbiAndPrefAlign::new(align(32)),
49 f64_align: AbiAndPrefAlign::new(align(64)),
83c7162d 50 pointer_size: Size::from_bits(64),
a1dfa0c6
XL
51 pointer_align: AbiAndPrefAlign::new(align(64)),
52 aggregate_align: AbiAndPrefAlign { abi: align(0), pref: align(64) },
83c7162d 53 vector_align: vec![
a1dfa0c6
XL
54 (Size::from_bits(64), AbiAndPrefAlign::new(align(64))),
55 (Size::from_bits(128), AbiAndPrefAlign::new(align(128))),
56 ],
57 instruction_address_space: 0,
83c7162d
XL
58 }
59 }
60}
61
62impl TargetDataLayout {
63 pub fn parse(target: &Target) -> Result<TargetDataLayout, String> {
a1dfa0c6
XL
64 // Parse an address space index from a string.
65 let parse_address_space = |s: &str, cause: &str| {
66 s.parse::<u32>().map_err(|err| {
67 format!("invalid address space `{}` for `{}` in \"data-layout\": {}",
68 s, cause, err)
69 })
70 };
71
83c7162d
XL
72 // Parse a bit count from a string.
73 let parse_bits = |s: &str, kind: &str, cause: &str| {
74 s.parse::<u64>().map_err(|err| {
75 format!("invalid {} `{}` for `{}` in \"data-layout\": {}",
76 kind, s, cause, err)
77 })
78 };
79
80 // Parse a size string.
81 let size = |s: &str, cause: &str| {
82 parse_bits(s, "size", cause).map(Size::from_bits)
83 };
84
85 // Parse an alignment string.
86 let align = |s: &[&str], cause: &str| {
87 if s.is_empty() {
88 return Err(format!("missing alignment for `{}` in \"data-layout\"", cause));
89 }
a1dfa0c6
XL
90 let align_from_bits = |bits| {
91 Align::from_bits(bits).map_err(|err| {
92 format!("invalid alignment for `{}` in \"data-layout\": {}",
93 cause, err)
94 })
95 };
83c7162d
XL
96 let abi = parse_bits(s[0], "alignment", cause)?;
97 let pref = s.get(1).map_or(Ok(abi), |pref| parse_bits(pref, "alignment", cause))?;
a1dfa0c6
XL
98 Ok(AbiAndPrefAlign {
99 abi: align_from_bits(abi)?,
100 pref: align_from_bits(pref)?,
83c7162d
XL
101 })
102 };
103
104 let mut dl = TargetDataLayout::default();
105 let mut i128_align_src = 64;
8faf50e0 106 for spec in target.data_layout.split('-') {
416331ca
XL
107 let spec_parts = spec.split(':').collect::<Vec<_>>();
108
109 match &*spec_parts {
b7449926
XL
110 ["e"] => dl.endian = Endian::Little,
111 ["E"] => dl.endian = Endian::Big,
a1dfa0c6
XL
112 [p] if p.starts_with("P") => {
113 dl.instruction_address_space = parse_address_space(&p[1..], "P")?
114 }
e1599b0c 115 ["a", ref a @ ..] => {
416331ca
XL
116 dl.aggregate_align = align(a, "a")?
117 }
e1599b0c 118 ["f32", ref a @ ..] => {
416331ca
XL
119 dl.f32_align = align(a, "f32")?
120 }
e1599b0c 121 ["f64", ref a @ ..] => {
416331ca
XL
122 dl.f64_align = align(a, "f64")?
123 }
e1599b0c 124 [p @ "p", s, ref a @ ..] | [p @ "p0", s, ref a @ ..] => {
83c7162d
XL
125 dl.pointer_size = size(s, p)?;
126 dl.pointer_align = align(a, p)?;
127 }
e1599b0c 128 [s, ref a @ ..] if s.starts_with("i") => {
83c7162d
XL
129 let bits = match s[1..].parse::<u64>() {
130 Ok(bits) => bits,
131 Err(_) => {
132 size(&s[1..], "i")?; // For the user error.
133 continue;
134 }
135 };
136 let a = align(a, s)?;
137 match bits {
138 1 => dl.i1_align = a,
139 8 => dl.i8_align = a,
140 16 => dl.i16_align = a,
141 32 => dl.i32_align = a,
142 64 => dl.i64_align = a,
143 _ => {}
144 }
145 if bits >= i128_align_src && bits <= 128 {
146 // Default alignment for i128 is decided by taking the alignment of
dc9dc135 147 // largest-sized i{64..=128}.
83c7162d
XL
148 i128_align_src = bits;
149 dl.i128_align = a;
150 }
151 }
e1599b0c 152 [s, ref a @ ..] if s.starts_with("v") => {
83c7162d
XL
153 let v_size = size(&s[1..], "v")?;
154 let a = align(a, s)?;
155 if let Some(v) = dl.vector_align.iter_mut().find(|v| v.0 == v_size) {
156 v.1 = a;
157 continue;
158 }
159 // No existing entry, add a new one.
160 dl.vector_align.push((v_size, a));
161 }
162 _ => {} // Ignore everything else.
163 }
164 }
165
166 // Perform consistency checks against the Target information.
167 let endian_str = match dl.endian {
168 Endian::Little => "little",
169 Endian::Big => "big"
170 };
171 if endian_str != target.target_endian {
172 return Err(format!("inconsistent target specification: \"data-layout\" claims \
173 architecture is {}-endian, while \"target-endian\" is `{}`",
174 endian_str, target.target_endian));
175 }
176
177 if dl.pointer_size.bits().to_string() != target.target_pointer_width {
178 return Err(format!("inconsistent target specification: \"data-layout\" claims \
179 pointers are {}-bit, while \"target-pointer-width\" is `{}`",
180 dl.pointer_size.bits(), target.target_pointer_width));
181 }
182
183 Ok(dl)
184 }
185
9fa01778 186 /// Returns exclusive upper bound on object size.
83c7162d
XL
187 ///
188 /// The theoretical maximum object size is defined as the maximum positive `isize` value.
189 /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly
190 /// index every address within an object along with one byte past the end, along with allowing
191 /// `isize` to store the difference between any two pointers into an object.
192 ///
193 /// The upper bound on 64-bit currently needs to be lower because LLVM uses a 64-bit integer
194 /// to represent object size in bits. It would need to be 1 << 61 to account for this, but is
195 /// currently conservatively bounded to 1 << 47 as that is enough to cover the current usable
196 /// address space on 64-bit ARMv8 and x86_64.
197 pub fn obj_size_bound(&self) -> u64 {
198 match self.pointer_size.bits() {
199 16 => 1 << 15,
200 32 => 1 << 31,
201 64 => 1 << 47,
202 bits => panic!("obj_size_bound: unknown pointer bit size {}", bits)
203 }
204 }
205
206 pub fn ptr_sized_integer(&self) -> Integer {
207 match self.pointer_size.bits() {
208 16 => I16,
209 32 => I32,
210 64 => I64,
211 bits => panic!("ptr_sized_integer: unknown pointer bit size {}", bits)
212 }
213 }
214
a1dfa0c6 215 pub fn vector_align(&self, vec_size: Size) -> AbiAndPrefAlign {
83c7162d
XL
216 for &(size, align) in &self.vector_align {
217 if size == vec_size {
218 return align;
219 }
220 }
221 // Default to natural alignment, which is what LLVM does.
222 // That is, use the size, rounded up to a power of 2.
a1dfa0c6 223 AbiAndPrefAlign::new(Align::from_bytes(vec_size.bytes().next_power_of_two()).unwrap())
83c7162d
XL
224 }
225}
226
a1dfa0c6 227pub trait HasDataLayout {
83c7162d
XL
228 fn data_layout(&self) -> &TargetDataLayout;
229}
230
a1dfa0c6 231impl HasDataLayout for TargetDataLayout {
83c7162d
XL
232 fn data_layout(&self) -> &TargetDataLayout {
233 self
234 }
235}
236
237/// Endianness of the target, which must match cfg(target-endian).
a1dfa0c6 238#[derive(Copy, Clone, PartialEq)]
83c7162d
XL
239pub enum Endian {
240 Little,
241 Big
242}
243
244/// Size of a type in bytes.
94b46f34 245#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)]
60c5eb7d 246#[derive(HashStable_Generic)]
83c7162d
XL
247pub struct Size {
248 raw: u64
249}
250
251impl Size {
94b46f34
XL
252 pub const ZERO: Size = Self::from_bytes(0);
253
8faf50e0 254 #[inline]
83c7162d
XL
255 pub fn from_bits(bits: u64) -> Size {
256 // Avoid potential overflow from `bits + 7`.
257 Size::from_bytes(bits / 8 + ((bits % 8) + 7) / 8)
258 }
259
8faf50e0 260 #[inline]
94b46f34 261 pub const fn from_bytes(bytes: u64) -> Size {
83c7162d
XL
262 Size {
263 raw: bytes
264 }
265 }
266
8faf50e0 267 #[inline]
83c7162d
XL
268 pub fn bytes(self) -> u64 {
269 self.raw
270 }
271
8faf50e0 272 #[inline]
83c7162d 273 pub fn bits(self) -> u64 {
94b46f34
XL
274 self.bytes().checked_mul(8).unwrap_or_else(|| {
275 panic!("Size::bits: {} bytes in bits doesn't fit in u64", self.bytes())
276 })
83c7162d
XL
277 }
278
8faf50e0 279 #[inline]
a1dfa0c6
XL
280 pub fn align_to(self, align: Align) -> Size {
281 let mask = align.bytes() - 1;
83c7162d
XL
282 Size::from_bytes((self.bytes() + mask) & !mask)
283 }
284
8faf50e0 285 #[inline]
a1dfa0c6
XL
286 pub fn is_aligned(self, align: Align) -> bool {
287 let mask = align.bytes() - 1;
83c7162d
XL
288 self.bytes() & mask == 0
289 }
290
8faf50e0 291 #[inline]
a1dfa0c6 292 pub fn checked_add<C: HasDataLayout>(self, offset: Size, cx: &C) -> Option<Size> {
83c7162d
XL
293 let dl = cx.data_layout();
294
94b46f34 295 let bytes = self.bytes().checked_add(offset.bytes())?;
83c7162d
XL
296
297 if bytes < dl.obj_size_bound() {
298 Some(Size::from_bytes(bytes))
299 } else {
300 None
301 }
302 }
303
8faf50e0 304 #[inline]
a1dfa0c6 305 pub fn checked_mul<C: HasDataLayout>(self, count: u64, cx: &C) -> Option<Size> {
83c7162d
XL
306 let dl = cx.data_layout();
307
94b46f34
XL
308 let bytes = self.bytes().checked_mul(count)?;
309 if bytes < dl.obj_size_bound() {
310 Some(Size::from_bytes(bytes))
311 } else {
312 None
83c7162d
XL
313 }
314 }
315}
316
317// Panicking addition, subtraction and multiplication for convenience.
318// Avoid during layout computation, return `LayoutError` instead.
319
320impl Add for Size {
321 type Output = Size;
8faf50e0 322 #[inline]
83c7162d 323 fn add(self, other: Size) -> Size {
94b46f34
XL
324 Size::from_bytes(self.bytes().checked_add(other.bytes()).unwrap_or_else(|| {
325 panic!("Size::add: {} + {} doesn't fit in u64", self.bytes(), other.bytes())
326 }))
83c7162d
XL
327 }
328}
329
330impl Sub for Size {
331 type Output = Size;
8faf50e0 332 #[inline]
83c7162d 333 fn sub(self, other: Size) -> Size {
94b46f34
XL
334 Size::from_bytes(self.bytes().checked_sub(other.bytes()).unwrap_or_else(|| {
335 panic!("Size::sub: {} - {} would result in negative size", self.bytes(), other.bytes())
336 }))
337 }
338}
339
340impl Mul<Size> for u64 {
341 type Output = Size;
8faf50e0 342 #[inline]
94b46f34
XL
343 fn mul(self, size: Size) -> Size {
344 size * self
83c7162d
XL
345 }
346}
347
348impl Mul<u64> for Size {
349 type Output = Size;
8faf50e0 350 #[inline]
83c7162d
XL
351 fn mul(self, count: u64) -> Size {
352 match self.bytes().checked_mul(count) {
353 Some(bytes) => Size::from_bytes(bytes),
354 None => {
355 panic!("Size::mul: {} * {} doesn't fit in u64", self.bytes(), count)
356 }
357 }
358 }
359}
360
361impl AddAssign for Size {
8faf50e0 362 #[inline]
83c7162d
XL
363 fn add_assign(&mut self, other: Size) {
364 *self = *self + other;
365 }
366}
367
a1dfa0c6
XL
368/// Alignment of a type in bytes (always a power of two).
369#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)]
60c5eb7d 370#[derive(HashStable_Generic)]
83c7162d 371pub struct Align {
a1dfa0c6 372 pow2: u8,
83c7162d
XL
373}
374
375impl Align {
a1dfa0c6
XL
376 pub fn from_bits(bits: u64) -> Result<Align, String> {
377 Align::from_bytes(Size::from_bits(bits).bytes())
83c7162d
XL
378 }
379
a1dfa0c6
XL
380 pub fn from_bytes(align: u64) -> Result<Align, String> {
381 // Treat an alignment of 0 bytes like 1-byte alignment.
382 if align == 0 {
383 return Ok(Align { pow2: 0 });
384 }
83c7162d 385
a1dfa0c6
XL
386 let mut bytes = align;
387 let mut pow2: u8 = 0;
388 while (bytes & 1) == 0 {
389 pow2 += 1;
390 bytes >>= 1;
391 }
392 if bytes != 1 {
393 return Err(format!("`{}` is not a power of 2", align));
394 }
395 if pow2 > 29 {
396 return Err(format!("`{}` is too large", align));
397 }
83c7162d 398
a1dfa0c6 399 Ok(Align { pow2 })
83c7162d
XL
400 }
401
a1dfa0c6
XL
402 pub fn bytes(self) -> u64 {
403 1 << self.pow2
83c7162d
XL
404 }
405
a1dfa0c6
XL
406 pub fn bits(self) -> u64 {
407 self.bytes() * 8
83c7162d 408 }
b7449926 409
9fa01778 410 /// Computes the best alignment possible for the given offset
b7449926
XL
411 /// (the largest power of two that the offset is a multiple of).
412 ///
0731742a 413 /// N.B., for an offset of `0`, this happens to return `2^64`.
b7449926 414 pub fn max_for_offset(offset: Size) -> Align {
b7449926 415 Align {
a1dfa0c6 416 pow2: offset.bytes().trailing_zeros() as u8,
b7449926
XL
417 }
418 }
419
420 /// Lower the alignment, if necessary, such that the given offset
0bf4aa26 421 /// is aligned to it (the offset is a multiple of the alignment).
b7449926
XL
422 pub fn restrict_for_offset(self, offset: Size) -> Align {
423 self.min(Align::max_for_offset(offset))
424 }
83c7162d
XL
425}
426
a1dfa0c6
XL
427/// A pair of aligments, ABI-mandated and preferred.
428#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)]
60c5eb7d 429#[derive(HashStable_Generic)]
a1dfa0c6
XL
430pub struct AbiAndPrefAlign {
431 pub abi: Align,
432 pub pref: Align,
433}
434
435impl AbiAndPrefAlign {
436 pub fn new(align: Align) -> AbiAndPrefAlign {
437 AbiAndPrefAlign {
438 abi: align,
439 pref: align,
440 }
441 }
442
443 pub fn min(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign {
444 AbiAndPrefAlign {
445 abi: self.abi.min(other.abi),
446 pref: self.pref.min(other.pref),
447 }
448 }
449
450 pub fn max(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign {
451 AbiAndPrefAlign {
452 abi: self.abi.max(other.abi),
453 pref: self.pref.max(other.pref),
454 }
455 }
456}
457
83c7162d 458/// Integers, also used for enum discriminants.
60c5eb7d 459#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, HashStable_Generic)]
83c7162d
XL
460pub enum Integer {
461 I8,
462 I16,
463 I32,
464 I64,
465 I128,
466}
467
468impl Integer {
b7449926
XL
469 pub fn size(self) -> Size {
470 match self {
83c7162d
XL
471 I8 => Size::from_bytes(1),
472 I16 => Size::from_bytes(2),
473 I32 => Size::from_bytes(4),
474 I64 => Size::from_bytes(8),
475 I128 => Size::from_bytes(16),
476 }
477 }
478
a1dfa0c6 479 pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAndPrefAlign {
83c7162d
XL
480 let dl = cx.data_layout();
481
b7449926 482 match self {
83c7162d
XL
483 I8 => dl.i8_align,
484 I16 => dl.i16_align,
485 I32 => dl.i32_align,
486 I64 => dl.i64_align,
487 I128 => dl.i128_align,
488 }
489 }
490
9fa01778 491 /// Finds the smallest Integer type which can represent the signed value.
83c7162d
XL
492 pub fn fit_signed(x: i128) -> Integer {
493 match x {
8faf50e0
XL
494 -0x0000_0000_0000_0080..=0x0000_0000_0000_007f => I8,
495 -0x0000_0000_0000_8000..=0x0000_0000_0000_7fff => I16,
496 -0x0000_0000_8000_0000..=0x0000_0000_7fff_ffff => I32,
497 -0x8000_0000_0000_0000..=0x7fff_ffff_ffff_ffff => I64,
83c7162d
XL
498 _ => I128
499 }
500 }
501
9fa01778 502 /// Finds the smallest Integer type which can represent the unsigned value.
83c7162d
XL
503 pub fn fit_unsigned(x: u128) -> Integer {
504 match x {
8faf50e0
XL
505 0..=0x0000_0000_0000_00ff => I8,
506 0..=0x0000_0000_0000_ffff => I16,
507 0..=0x0000_0000_ffff_ffff => I32,
508 0..=0xffff_ffff_ffff_ffff => I64,
83c7162d
XL
509 _ => I128,
510 }
511 }
512
9fa01778 513 /// Finds the smallest integer with the given alignment.
a1dfa0c6 514 pub fn for_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Option<Integer> {
83c7162d
XL
515 let dl = cx.data_layout();
516
83c7162d 517 for &candidate in &[I8, I16, I32, I64, I128] {
a1dfa0c6 518 if wanted == candidate.align(dl).abi && wanted.bytes() == candidate.size().bytes() {
83c7162d
XL
519 return Some(candidate);
520 }
521 }
522 None
523 }
524
525 /// Find the largest integer with the given alignment or less.
a1dfa0c6 526 pub fn approximate_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Integer {
83c7162d
XL
527 let dl = cx.data_layout();
528
83c7162d
XL
529 // FIXME(eddyb) maybe include I128 in the future, when it works everywhere.
530 for &candidate in &[I64, I32, I16] {
a1dfa0c6 531 if wanted >= candidate.align(dl).abi && wanted.bytes() >= candidate.size().bytes() {
83c7162d
XL
532 return candidate;
533 }
534 }
535 I8
536 }
537}
538
539/// Fundamental unit of memory access and layout.
60c5eb7d 540#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
83c7162d
XL
541pub enum Primitive {
542 /// The `bool` is the signedness of the `Integer` type.
543 ///
544 /// One would think we would not care about such details this low down,
545 /// but some ABIs are described in terms of C types and ISAs where the
546 /// integer arithmetic is done on {sign,zero}-extended registers, e.g.
547 /// a negative integer passed by zero-extension will appear positive in
548 /// the callee, and most operations on it will produce the wrong values.
549 Int(Integer, bool),
60c5eb7d
XL
550 F32,
551 F64,
83c7162d
XL
552 Pointer
553}
554
dc9dc135 555impl Primitive {
a1dfa0c6 556 pub fn size<C: HasDataLayout>(self, cx: &C) -> Size {
83c7162d
XL
557 let dl = cx.data_layout();
558
559 match self {
560 Int(i, _) => i.size(),
60c5eb7d
XL
561 F32 => Size::from_bits(32),
562 F64 => Size::from_bits(64),
83c7162d
XL
563 Pointer => dl.pointer_size
564 }
565 }
566
a1dfa0c6 567 pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAndPrefAlign {
83c7162d
XL
568 let dl = cx.data_layout();
569
570 match self {
571 Int(i, _) => i.align(dl),
60c5eb7d
XL
572 F32 => dl.f32_align,
573 F64 => dl.f64_align,
83c7162d
XL
574 Pointer => dl.pointer_align
575 }
576 }
94b46f34
XL
577
578 pub fn is_float(self) -> bool {
579 match self {
60c5eb7d 580 F32 | F64 => true,
94b46f34
XL
581 _ => false
582 }
583 }
584
585 pub fn is_int(self) -> bool {
586 match self {
587 Int(..) => true,
588 _ => false,
589 }
590 }
83c7162d
XL
591}
592
593/// Information about one scalar component of a Rust type.
594#[derive(Clone, PartialEq, Eq, Hash, Debug)]
60c5eb7d 595#[derive(HashStable_Generic)]
83c7162d
XL
596pub struct Scalar {
597 pub value: Primitive,
598
599 /// Inclusive wrap-around range of valid values, that is, if
b7449926
XL
600 /// start > end, it represents `start..=max_value()`,
601 /// followed by `0..=end`.
602 ///
603 /// That is, for an i8 primitive, a range of `254..=2` means following
604 /// sequence:
605 ///
606 /// 254 (-2), 255 (-1), 0, 1, 2
607 ///
608 /// This is intended specifically to mirror LLVM’s `!range` metadata,
609 /// semantics.
0731742a 610 // FIXME(eddyb) always use the shortest range, e.g., by finding
83c7162d
XL
611 // the largest space between two consecutive valid values and
612 // taking everything else as the (shortest) valid range.
613 pub valid_range: RangeInclusive<u128>,
614}
615
616impl Scalar {
617 pub fn is_bool(&self) -> bool {
618 if let Int(I8, _) = self.value {
619 self.valid_range == (0..=1)
620 } else {
621 false
622 }
623 }
624
625 /// Returns the valid range as a `x..y` range.
626 ///
627 /// If `x` and `y` are equal, the range is full, not empty.
a1dfa0c6 628 pub fn valid_range_exclusive<C: HasDataLayout>(&self, cx: &C) -> Range<u128> {
83c7162d
XL
629 // For a (max) value of -1, max will be `-1 as usize`, which overflows.
630 // However, that is fine here (it would still represent the full range),
631 // i.e., if the range is everything.
632 let bits = self.value.size(cx).bits();
633 assert!(bits <= 128);
634 let mask = !0u128 >> (128 - bits);
635 let start = *self.valid_range.start();
636 let end = *self.valid_range.end();
637 assert_eq!(start, start & mask);
638 assert_eq!(end, end & mask);
639 start..(end.wrapping_add(1) & mask)
640 }
641}
642
643/// Describes how the fields of a type are located in memory.
60c5eb7d 644#[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
83c7162d
XL
645pub enum FieldPlacement {
646 /// All fields start at no offset. The `usize` is the field count.
8faf50e0
XL
647 ///
648 /// In the case of primitives the number of fields is `0`.
83c7162d
XL
649 Union(usize),
650
651 /// Array/vector-like placement, with all fields of identical types.
652 Array {
653 stride: Size,
654 count: u64
655 },
656
657 /// Struct-like placement, with precomputed offsets.
658 ///
659 /// Fields are guaranteed to not overlap, but note that gaps
660 /// before, between and after all the fields are NOT always
661 /// padding, and as such their contents may not be discarded.
662 /// For example, enum variants leave a gap at the start,
663 /// where the discriminant field in the enum layout goes.
664 Arbitrary {
665 /// Offsets for the first byte of each field,
666 /// ordered to match the source definition order.
667 /// This vector does not go in increasing order.
668 // FIXME(eddyb) use small vector optimization for the common case.
669 offsets: Vec<Size>,
670
671 /// Maps source order field indices to memory order indices,
dc9dc135
XL
672 /// depending on how the fields were reordered (if at all).
673 /// This is a permutation, with both the source order and the
674 /// memory order using the same (0..n) index ranges.
675 ///
676 /// Note that during computation of `memory_index`, sometimes
677 /// it is easier to operate on the inverse mapping (that is,
678 /// from memory order to source order), and that is usually
679 /// named `inverse_memory_index`.
680 ///
681 // FIXME(eddyb) build a better abstraction for permutations, if possible.
83c7162d
XL
682 // FIXME(camlorn) also consider small vector optimization here.
683 memory_index: Vec<u32>
684 }
685}
686
687impl FieldPlacement {
688 pub fn count(&self) -> usize {
689 match *self {
690 FieldPlacement::Union(count) => count,
691 FieldPlacement::Array { count, .. } => {
692 let usize_count = count as usize;
693 assert_eq!(usize_count as u64, count);
694 usize_count
695 }
696 FieldPlacement::Arbitrary { ref offsets, .. } => offsets.len()
697 }
698 }
699
700 pub fn offset(&self, i: usize) -> Size {
701 match *self {
94b46f34 702 FieldPlacement::Union(_) => Size::ZERO,
83c7162d
XL
703 FieldPlacement::Array { stride, count } => {
704 let i = i as u64;
705 assert!(i < count);
706 stride * i
707 }
708 FieldPlacement::Arbitrary { ref offsets, .. } => offsets[i]
709 }
710 }
711
712 pub fn memory_index(&self, i: usize) -> usize {
713 match *self {
714 FieldPlacement::Union(_) |
715 FieldPlacement::Array { .. } => i,
716 FieldPlacement::Arbitrary { ref memory_index, .. } => {
717 let r = memory_index[i];
718 assert_eq!(r as usize as u32, r);
719 r as usize
720 }
721 }
722 }
723
9fa01778 724 /// Gets source indices of the fields by increasing offsets.
83c7162d
XL
725 #[inline]
726 pub fn index_by_increasing_offset<'a>(&'a self) -> impl Iterator<Item=usize>+'a {
727 let mut inverse_small = [0u8; 64];
728 let mut inverse_big = vec![];
729 let use_small = self.count() <= inverse_small.len();
730
731 // We have to write this logic twice in order to keep the array small.
732 if let FieldPlacement::Arbitrary { ref memory_index, .. } = *self {
733 if use_small {
734 for i in 0..self.count() {
735 inverse_small[memory_index[i] as usize] = i as u8;
736 }
737 } else {
738 inverse_big = vec![0; self.count()];
739 for i in 0..self.count() {
740 inverse_big[memory_index[i] as usize] = i as u32;
741 }
742 }
743 }
744
745 (0..self.count()).map(move |i| {
746 match *self {
747 FieldPlacement::Union(_) |
748 FieldPlacement::Array { .. } => i,
749 FieldPlacement::Arbitrary { .. } => {
750 if use_small { inverse_small[i] as usize }
751 else { inverse_big[i] as usize }
752 }
753 }
754 })
755 }
756}
757
758/// Describes how values of the type are passed by target ABIs,
759/// in terms of categories of C types there are ABI rules for.
60c5eb7d 760#[derive(Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
83c7162d
XL
761pub enum Abi {
762 Uninhabited,
763 Scalar(Scalar),
764 ScalarPair(Scalar, Scalar),
765 Vector {
766 element: Scalar,
767 count: u64
768 },
769 Aggregate {
770 /// If true, the size is exact, otherwise it's only a lower bound.
771 sized: bool,
772 }
773}
774
775impl Abi {
9fa01778 776 /// Returns `true` if the layout corresponds to an unsized type.
83c7162d
XL
777 pub fn is_unsized(&self) -> bool {
778 match *self {
779 Abi::Uninhabited |
780 Abi::Scalar(_) |
781 Abi::ScalarPair(..) |
782 Abi::Vector { .. } => false,
783 Abi::Aggregate { sized } => !sized
784 }
785 }
786
9fa01778 787 /// Returns `true` if this is a single signed integer scalar
83c7162d
XL
788 pub fn is_signed(&self) -> bool {
789 match *self {
790 Abi::Scalar(ref scal) => match scal.value {
791 Primitive::Int(_, signed) => signed,
792 _ => false,
793 },
794 _ => false,
795 }
796 }
0bf4aa26 797
9fa01778 798 /// Returns `true` if this is an uninhabited type
0bf4aa26
XL
799 pub fn is_uninhabited(&self) -> bool {
800 match *self {
801 Abi::Uninhabited => true,
802 _ => false,
803 }
804 }
60c5eb7d
XL
805
806 /// Returns `true` is this is a scalar type
807 pub fn is_scalar(&self) -> bool {
808 match *self {
809 Abi::Scalar(_) => true,
810 _ => false,
811 }
812 }
83c7162d
XL
813}
814
e74abb32 815rustc_index::newtype_index! {
60c5eb7d
XL
816 pub struct VariantIdx {
817 derive [HashStable_Generic]
818 }
a1dfa0c6
XL
819}
820
60c5eb7d 821#[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
83c7162d
XL
822pub enum Variants {
823 /// Single enum variants, structs/tuples, unions, and all non-ADTs.
824 Single {
a1dfa0c6 825 index: VariantIdx,
83c7162d
XL
826 },
827
48663c56
XL
828 /// Enum-likes with more than one inhabited variant: for each case there is
829 /// a struct, and they all have space reserved for the discriminant.
830 /// For enums this is the sole field of the layout.
532ac7d7
XL
831 Multiple {
832 discr: Scalar,
833 discr_kind: DiscriminantKind,
48663c56 834 discr_index: usize,
a1dfa0c6 835 variants: IndexVec<VariantIdx, LayoutDetails>,
83c7162d 836 },
532ac7d7
XL
837}
838
60c5eb7d 839#[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
532ac7d7
XL
840pub enum DiscriminantKind {
841 /// Integer tag holding the discriminant value itself.
842 Tag,
83c7162d 843
532ac7d7 844 /// Niche (values invalid for a type) encoding the discriminant:
83c7162d 845 /// the variant `dataful_variant` contains a niche at an arbitrary
48663c56
XL
846 /// offset (field `discr_index` of the enum), which for a variant with
847 /// discriminant `d` is set to
848 /// `(d - niche_variants.start).wrapping_add(niche_start)`.
83c7162d
XL
849 ///
850 /// For example, `Option<(usize, &T)>` is represented such that
851 /// `None` has a null pointer for the second tuple field, and
852 /// `Some` is the identity function (with a non-null reference).
532ac7d7 853 Niche {
a1dfa0c6
XL
854 dataful_variant: VariantIdx,
855 niche_variants: RangeInclusive<VariantIdx>,
83c7162d 856 niche_start: u128,
532ac7d7 857 },
83c7162d
XL
858}
859
60c5eb7d 860#[derive(Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
416331ca
XL
861pub struct Niche {
862 pub offset: Size,
863 pub scalar: Scalar,
864}
865
866impl Niche {
867 pub fn from_scalar<C: HasDataLayout>(cx: &C, offset: Size, scalar: Scalar) -> Option<Self> {
868 let niche = Niche {
869 offset,
870 scalar,
871 };
872 if niche.available(cx) > 0 {
873 Some(niche)
874 } else {
875 None
876 }
877 }
878
879 pub fn available<C: HasDataLayout>(&self, cx: &C) -> u128 {
880 let Scalar { value, valid_range: ref v } = self.scalar;
881 let bits = value.size(cx).bits();
882 assert!(bits <= 128);
883 let max_value = !0u128 >> (128 - bits);
884
885 // Find out how many values are outside the valid range.
886 let niche = v.end().wrapping_add(1)..*v.start();
887 niche.end.wrapping_sub(niche.start) & max_value
888 }
889
890 pub fn reserve<C: HasDataLayout>(&self, cx: &C, count: u128) -> Option<(u128, Scalar)> {
891 assert!(count > 0);
892
893 let Scalar { value, valid_range: ref v } = self.scalar;
894 let bits = value.size(cx).bits();
895 assert!(bits <= 128);
896 let max_value = !0u128 >> (128 - bits);
897
898 if count > max_value {
899 return None;
900 }
901
902 // Compute the range of invalid values being reserved.
903 let start = v.end().wrapping_add(1) & max_value;
904 let end = v.end().wrapping_add(count) & max_value;
905
906 // If the `end` of our range is inside the valid range,
907 // then we ran out of invalid values.
908 // FIXME(eddyb) abstract this with a wraparound range type.
909 let valid_range_contains = |x| {
910 if v.start() <= v.end() {
911 *v.start() <= x && x <= *v.end()
912 } else {
913 *v.start() <= x || x <= *v.end()
914 }
915 };
916 if valid_range_contains(end) {
917 return None;
918 }
919
920 Some((start, Scalar { value, valid_range: *v.start()..=end }))
921 }
922}
923
60c5eb7d 924#[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
83c7162d
XL
925pub struct LayoutDetails {
926 pub variants: Variants,
927 pub fields: FieldPlacement,
928 pub abi: Abi,
416331ca
XL
929
930 /// The leaf scalar with the largest number of invalid values
931 /// (i.e. outside of its `valid_range`), if it exists.
932 pub largest_niche: Option<Niche>,
933
a1dfa0c6 934 pub align: AbiAndPrefAlign,
83c7162d
XL
935 pub size: Size
936}
937
938impl LayoutDetails {
a1dfa0c6 939 pub fn scalar<C: HasDataLayout>(cx: &C, scalar: Scalar) -> Self {
416331ca 940 let largest_niche = Niche::from_scalar(cx, Size::ZERO, scalar.clone());
83c7162d
XL
941 let size = scalar.value.size(cx);
942 let align = scalar.value.align(cx);
943 LayoutDetails {
a1dfa0c6 944 variants: Variants::Single { index: VariantIdx::new(0) },
83c7162d
XL
945 fields: FieldPlacement::Union(0),
946 abi: Abi::Scalar(scalar),
416331ca 947 largest_niche,
83c7162d
XL
948 size,
949 align,
950 }
951 }
952}
953
954/// The details of the layout of a type, alongside the type itself.
0731742a 955/// Provides various type traversal APIs (e.g., recursing into fields).
83c7162d
XL
956///
957/// Note that the details are NOT guaranteed to always be identical
958/// to those obtained from `layout_of(ty)`, as we need to produce
959/// layouts for which Rust types do not exist, such as enum variants
0731742a 960/// or synthetic fields of enums (i.e., discriminants) and fat pointers.
0bf4aa26 961#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
83c7162d
XL
962pub struct TyLayout<'a, Ty> {
963 pub ty: Ty,
964 pub details: &'a LayoutDetails
965}
966
967impl<'a, Ty> Deref for TyLayout<'a, Ty> {
968 type Target = &'a LayoutDetails;
969 fn deref(&self) -> &&'a LayoutDetails {
970 &self.details
971 }
972}
973
974pub trait LayoutOf {
975 type Ty;
976 type TyLayout;
977
a1dfa0c6 978 fn layout_of(&self, ty: Self::Ty) -> Self::TyLayout;
416331ca
XL
979 fn spanned_layout_of(&self, ty: Self::Ty, _span: Span) -> Self::TyLayout {
980 self.layout_of(ty)
981 }
83c7162d
XL
982}
983
48663c56
XL
984#[derive(Copy, Clone, PartialEq, Eq)]
985pub enum PointerKind {
986 /// Most general case, we know no restrictions to tell LLVM.
987 Shared,
988
989 /// `&T` where `T` contains no `UnsafeCell`, is `noalias` and `readonly`.
990 Frozen,
991
992 /// `&mut T`, when we know `noalias` is safe for LLVM.
993 UniqueBorrowed,
994
995 /// `Box<T>`, unlike `UniqueBorrowed`, it also has `noalias` on returns.
996 UniqueOwned
997}
998
999#[derive(Copy, Clone)]
1000pub struct PointeeInfo {
1001 pub size: Size,
1002 pub align: Align,
1003 pub safe: Option<PointerKind>,
1004}
1005
83c7162d 1006pub trait TyLayoutMethods<'a, C: LayoutOf<Ty = Self>>: Sized {
a1dfa0c6
XL
1007 fn for_variant(
1008 this: TyLayout<'a, Self>,
1009 cx: &C,
1010 variant_index: VariantIdx,
1011 ) -> TyLayout<'a, Self>;
1012 fn field(this: TyLayout<'a, Self>, cx: &C, i: usize) -> C::TyLayout;
48663c56
XL
1013 fn pointee_info_at(
1014 this: TyLayout<'a, Self>,
1015 cx: &C,
1016 offset: Size,
1017 ) -> Option<PointeeInfo>;
83c7162d
XL
1018}
1019
1020impl<'a, Ty> TyLayout<'a, Ty> {
a1dfa0c6 1021 pub fn for_variant<C>(self, cx: &C, variant_index: VariantIdx) -> Self
83c7162d
XL
1022 where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> {
1023 Ty::for_variant(self, cx, variant_index)
1024 }
a1dfa0c6 1025 pub fn field<C>(self, cx: &C, i: usize) -> C::TyLayout
83c7162d
XL
1026 where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> {
1027 Ty::field(self, cx, i)
1028 }
48663c56
XL
1029 pub fn pointee_info_at<C>(self, cx: &C, offset: Size) -> Option<PointeeInfo>
1030 where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> {
1031 Ty::pointee_info_at(self, cx, offset)
1032 }
83c7162d
XL
1033}
1034
1035impl<'a, Ty> TyLayout<'a, Ty> {
9fa01778 1036 /// Returns `true` if the layout corresponds to an unsized type.
83c7162d
XL
1037 pub fn is_unsized(&self) -> bool {
1038 self.abi.is_unsized()
1039 }
1040
9fa01778 1041 /// Returns `true` if the type is a ZST and not unsized.
83c7162d
XL
1042 pub fn is_zst(&self) -> bool {
1043 match self.abi {
1044 Abi::Scalar(_) |
1045 Abi::ScalarPair(..) |
1046 Abi::Vector { .. } => false,
1047 Abi::Uninhabited => self.size.bytes() == 0,
1048 Abi::Aggregate { sized } => sized && self.size.bytes() == 0
1049 }
1050 }
83c7162d 1051}