]> git.proxmox.com Git - rustc.git/blob - compiler/rustc_target/src/abi/mod.rs
820399943f0afc91f62b36e38e0c5781bc549352
[rustc.git] / compiler / rustc_target / src / abi / mod.rs
1 pub use Integer::*;
2 pub use Primitive::*;
3
4 use crate::spec::Target;
5
6 use std::convert::{TryFrom, TryInto};
7 use std::fmt;
8 use std::iter::Step;
9 use std::num::NonZeroUsize;
10 use std::ops::{Add, AddAssign, Deref, Mul, Range, RangeInclusive, Sub};
11 use std::str::FromStr;
12
13 use rustc_index::vec::{Idx, IndexVec};
14 use rustc_macros::HashStable_Generic;
15 use rustc_serialize::json::{Json, ToJson};
16 use rustc_span::Span;
17
18 pub mod call;
19
20 /// Parsed [Data layout](https://llvm.org/docs/LangRef.html#data-layout)
21 /// for a target, which contains everything needed to compute layouts.
22 pub struct TargetDataLayout {
23 pub endian: Endian,
24 pub i1_align: AbiAndPrefAlign,
25 pub i8_align: AbiAndPrefAlign,
26 pub i16_align: AbiAndPrefAlign,
27 pub i32_align: AbiAndPrefAlign,
28 pub i64_align: AbiAndPrefAlign,
29 pub i128_align: AbiAndPrefAlign,
30 pub f32_align: AbiAndPrefAlign,
31 pub f64_align: AbiAndPrefAlign,
32 pub pointer_size: Size,
33 pub pointer_align: AbiAndPrefAlign,
34 pub aggregate_align: AbiAndPrefAlign,
35
36 /// Alignments for vector types.
37 pub vector_align: Vec<(Size, AbiAndPrefAlign)>,
38
39 pub instruction_address_space: AddressSpace,
40
41 /// Minimum size of #[repr(C)] enums (default I32 bits)
42 pub c_enum_min_size: Integer,
43 }
44
45 impl Default for TargetDataLayout {
46 /// Creates an instance of `TargetDataLayout`.
47 fn default() -> TargetDataLayout {
48 let align = |bits| Align::from_bits(bits).unwrap();
49 TargetDataLayout {
50 endian: Endian::Big,
51 i1_align: AbiAndPrefAlign::new(align(8)),
52 i8_align: AbiAndPrefAlign::new(align(8)),
53 i16_align: AbiAndPrefAlign::new(align(16)),
54 i32_align: AbiAndPrefAlign::new(align(32)),
55 i64_align: AbiAndPrefAlign { abi: align(32), pref: align(64) },
56 i128_align: AbiAndPrefAlign { abi: align(32), pref: align(64) },
57 f32_align: AbiAndPrefAlign::new(align(32)),
58 f64_align: AbiAndPrefAlign::new(align(64)),
59 pointer_size: Size::from_bits(64),
60 pointer_align: AbiAndPrefAlign::new(align(64)),
61 aggregate_align: AbiAndPrefAlign { abi: align(0), pref: align(64) },
62 vector_align: vec![
63 (Size::from_bits(64), AbiAndPrefAlign::new(align(64))),
64 (Size::from_bits(128), AbiAndPrefAlign::new(align(128))),
65 ],
66 instruction_address_space: AddressSpace::DATA,
67 c_enum_min_size: Integer::I32,
68 }
69 }
70 }
71
72 impl TargetDataLayout {
73 pub fn parse(target: &Target) -> Result<TargetDataLayout, String> {
74 // Parse an address space index from a string.
75 let parse_address_space = |s: &str, cause: &str| {
76 s.parse::<u32>().map(AddressSpace).map_err(|err| {
77 format!("invalid address space `{}` for `{}` in \"data-layout\": {}", s, cause, err)
78 })
79 };
80
81 // Parse a bit count from a string.
82 let parse_bits = |s: &str, kind: &str, cause: &str| {
83 s.parse::<u64>().map_err(|err| {
84 format!("invalid {} `{}` for `{}` in \"data-layout\": {}", kind, s, cause, err)
85 })
86 };
87
88 // Parse a size string.
89 let size = |s: &str, cause: &str| parse_bits(s, "size", cause).map(Size::from_bits);
90
91 // Parse an alignment string.
92 let align = |s: &[&str], cause: &str| {
93 if s.is_empty() {
94 return Err(format!("missing alignment for `{}` in \"data-layout\"", cause));
95 }
96 let align_from_bits = |bits| {
97 Align::from_bits(bits).map_err(|err| {
98 format!("invalid alignment for `{}` in \"data-layout\": {}", cause, err)
99 })
100 };
101 let abi = parse_bits(s[0], "alignment", cause)?;
102 let pref = s.get(1).map_or(Ok(abi), |pref| parse_bits(pref, "alignment", cause))?;
103 Ok(AbiAndPrefAlign { abi: align_from_bits(abi)?, pref: align_from_bits(pref)? })
104 };
105
106 let mut dl = TargetDataLayout::default();
107 let mut i128_align_src = 64;
108 for spec in target.data_layout.split('-') {
109 let spec_parts = spec.split(':').collect::<Vec<_>>();
110
111 match &*spec_parts {
112 ["e"] => dl.endian = Endian::Little,
113 ["E"] => dl.endian = Endian::Big,
114 [p] if p.starts_with('P') => {
115 dl.instruction_address_space = parse_address_space(&p[1..], "P")?
116 }
117 ["a", ref a @ ..] => dl.aggregate_align = align(a, "a")?,
118 ["f32", ref a @ ..] => dl.f32_align = align(a, "f32")?,
119 ["f64", ref a @ ..] => dl.f64_align = align(a, "f64")?,
120 [p @ "p", s, ref a @ ..] | [p @ "p0", s, ref a @ ..] => {
121 dl.pointer_size = size(s, p)?;
122 dl.pointer_align = align(a, p)?;
123 }
124 [s, ref a @ ..] if s.starts_with('i') => {
125 let bits = match s[1..].parse::<u64>() {
126 Ok(bits) => bits,
127 Err(_) => {
128 size(&s[1..], "i")?; // For the user error.
129 continue;
130 }
131 };
132 let a = align(a, s)?;
133 match bits {
134 1 => dl.i1_align = a,
135 8 => dl.i8_align = a,
136 16 => dl.i16_align = a,
137 32 => dl.i32_align = a,
138 64 => dl.i64_align = a,
139 _ => {}
140 }
141 if bits >= i128_align_src && bits <= 128 {
142 // Default alignment for i128 is decided by taking the alignment of
143 // largest-sized i{64..=128}.
144 i128_align_src = bits;
145 dl.i128_align = a;
146 }
147 }
148 [s, ref a @ ..] if s.starts_with('v') => {
149 let v_size = size(&s[1..], "v")?;
150 let a = align(a, s)?;
151 if let Some(v) = dl.vector_align.iter_mut().find(|v| v.0 == v_size) {
152 v.1 = a;
153 continue;
154 }
155 // No existing entry, add a new one.
156 dl.vector_align.push((v_size, a));
157 }
158 _ => {} // Ignore everything else.
159 }
160 }
161
162 // Perform consistency checks against the Target information.
163 if dl.endian != target.endian {
164 return Err(format!(
165 "inconsistent target specification: \"data-layout\" claims \
166 architecture is {}-endian, while \"target-endian\" is `{}`",
167 dl.endian.as_str(),
168 target.endian.as_str(),
169 ));
170 }
171
172 if dl.pointer_size.bits() != target.pointer_width.into() {
173 return Err(format!(
174 "inconsistent target specification: \"data-layout\" claims \
175 pointers are {}-bit, while \"target-pointer-width\" is `{}`",
176 dl.pointer_size.bits(),
177 target.pointer_width
178 ));
179 }
180
181 dl.c_enum_min_size = Integer::from_size(Size::from_bits(target.c_enum_min_bits))?;
182
183 Ok(dl)
184 }
185
186 /// Returns exclusive upper bound on object size.
187 ///
188 /// The theoretical maximum object size is defined as the maximum positive `isize` value.
189 /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly
190 /// index every address within an object along with one byte past the end, along with allowing
191 /// `isize` to store the difference between any two pointers into an object.
192 ///
193 /// The upper bound on 64-bit currently needs to be lower because LLVM uses a 64-bit integer
194 /// to represent object size in bits. It would need to be 1 << 61 to account for this, but is
195 /// currently conservatively bounded to 1 << 47 as that is enough to cover the current usable
196 /// address space on 64-bit ARMv8 and x86_64.
197 #[inline]
198 pub fn obj_size_bound(&self) -> u64 {
199 match self.pointer_size.bits() {
200 16 => 1 << 15,
201 32 => 1 << 31,
202 64 => 1 << 47,
203 bits => panic!("obj_size_bound: unknown pointer bit size {}", bits),
204 }
205 }
206
207 #[inline]
208 pub fn ptr_sized_integer(&self) -> Integer {
209 match self.pointer_size.bits() {
210 16 => I16,
211 32 => I32,
212 64 => I64,
213 bits => panic!("ptr_sized_integer: unknown pointer bit size {}", bits),
214 }
215 }
216
217 #[inline]
218 pub fn vector_align(&self, vec_size: Size) -> AbiAndPrefAlign {
219 for &(size, align) in &self.vector_align {
220 if size == vec_size {
221 return align;
222 }
223 }
224 // Default to natural alignment, which is what LLVM does.
225 // That is, use the size, rounded up to a power of 2.
226 AbiAndPrefAlign::new(Align::from_bytes(vec_size.bytes().next_power_of_two()).unwrap())
227 }
228 }
229
230 pub trait HasDataLayout {
231 fn data_layout(&self) -> &TargetDataLayout;
232 }
233
234 impl HasDataLayout for TargetDataLayout {
235 #[inline]
236 fn data_layout(&self) -> &TargetDataLayout {
237 self
238 }
239 }
240
241 /// Endianness of the target, which must match cfg(target-endian).
242 #[derive(Copy, Clone, PartialEq)]
243 pub enum Endian {
244 Little,
245 Big,
246 }
247
248 impl Endian {
249 pub fn as_str(&self) -> &'static str {
250 match self {
251 Self::Little => "little",
252 Self::Big => "big",
253 }
254 }
255 }
256
257 impl fmt::Debug for Endian {
258 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
259 f.write_str(self.as_str())
260 }
261 }
262
263 impl FromStr for Endian {
264 type Err = String;
265
266 fn from_str(s: &str) -> Result<Self, Self::Err> {
267 match s {
268 "little" => Ok(Self::Little),
269 "big" => Ok(Self::Big),
270 _ => Err(format!(r#"unknown endian: "{}""#, s)),
271 }
272 }
273 }
274
275 impl ToJson for Endian {
276 fn to_json(&self) -> Json {
277 self.as_str().to_json()
278 }
279 }
280
281 /// Size of a type in bytes.
282 #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Encodable, Decodable)]
283 #[derive(HashStable_Generic)]
284 pub struct Size {
285 // The top 3 bits are ALWAYS zero.
286 raw: u64,
287 }
288
289 impl Size {
290 pub const ZERO: Size = Size { raw: 0 };
291
292 /// Rounds `bits` up to the next-higher byte boundary, if `bits` is
293 /// is not aligned.
294 pub fn from_bits(bits: impl TryInto<u64>) -> Size {
295 let bits = bits.try_into().ok().unwrap();
296
297 #[cold]
298 fn overflow(bits: u64) -> ! {
299 panic!("Size::from_bits({}) has overflowed", bits);
300 }
301
302 // This is the largest value of `bits` that does not cause overflow
303 // during rounding, and guarantees that the resulting number of bytes
304 // cannot cause overflow when multiplied by 8.
305 if bits > 0xffff_ffff_ffff_fff8 {
306 overflow(bits);
307 }
308
309 // Avoid potential overflow from `bits + 7`.
310 Size { raw: bits / 8 + ((bits % 8) + 7) / 8 }
311 }
312
313 #[inline]
314 pub fn from_bytes(bytes: impl TryInto<u64>) -> Size {
315 let bytes: u64 = bytes.try_into().ok().unwrap();
316 Size { raw: bytes }
317 }
318
319 #[inline]
320 pub fn bytes(self) -> u64 {
321 self.raw
322 }
323
324 #[inline]
325 pub fn bytes_usize(self) -> usize {
326 self.bytes().try_into().unwrap()
327 }
328
329 #[inline]
330 pub fn bits(self) -> u64 {
331 self.raw << 3
332 }
333
334 #[inline]
335 pub fn bits_usize(self) -> usize {
336 self.bits().try_into().unwrap()
337 }
338
339 #[inline]
340 pub fn align_to(self, align: Align) -> Size {
341 let mask = align.bytes() - 1;
342 Size::from_bytes((self.bytes() + mask) & !mask)
343 }
344
345 #[inline]
346 pub fn is_aligned(self, align: Align) -> bool {
347 let mask = align.bytes() - 1;
348 self.bytes() & mask == 0
349 }
350
351 #[inline]
352 pub fn checked_add<C: HasDataLayout>(self, offset: Size, cx: &C) -> Option<Size> {
353 let dl = cx.data_layout();
354
355 let bytes = self.bytes().checked_add(offset.bytes())?;
356
357 if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
358 }
359
360 #[inline]
361 pub fn checked_mul<C: HasDataLayout>(self, count: u64, cx: &C) -> Option<Size> {
362 let dl = cx.data_layout();
363
364 let bytes = self.bytes().checked_mul(count)?;
365 if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
366 }
367
368 /// Truncates `value` to `self` bits and then sign-extends it to 128 bits
369 /// (i.e., if it is negative, fill with 1's on the left).
370 #[inline]
371 pub fn sign_extend(self, value: u128) -> u128 {
372 let size = self.bits();
373 if size == 0 {
374 // Truncated until nothing is left.
375 return 0;
376 }
377 // Sign-extend it.
378 let shift = 128 - size;
379 // Shift the unsigned value to the left, then shift back to the right as signed
380 // (essentially fills with sign bit on the left).
381 (((value << shift) as i128) >> shift) as u128
382 }
383
384 /// Truncates `value` to `self` bits.
385 #[inline]
386 pub fn truncate(self, value: u128) -> u128 {
387 let size = self.bits();
388 if size == 0 {
389 // Truncated until nothing is left.
390 return 0;
391 }
392 let shift = 128 - size;
393 // Truncate (shift left to drop out leftover values, shift right to fill with zeroes).
394 (value << shift) >> shift
395 }
396 }
397
398 // Panicking addition, subtraction and multiplication for convenience.
399 // Avoid during layout computation, return `LayoutError` instead.
400
401 impl Add for Size {
402 type Output = Size;
403 #[inline]
404 fn add(self, other: Size) -> Size {
405 Size::from_bytes(self.bytes().checked_add(other.bytes()).unwrap_or_else(|| {
406 panic!("Size::add: {} + {} doesn't fit in u64", self.bytes(), other.bytes())
407 }))
408 }
409 }
410
411 impl Sub for Size {
412 type Output = Size;
413 #[inline]
414 fn sub(self, other: Size) -> Size {
415 Size::from_bytes(self.bytes().checked_sub(other.bytes()).unwrap_or_else(|| {
416 panic!("Size::sub: {} - {} would result in negative size", self.bytes(), other.bytes())
417 }))
418 }
419 }
420
421 impl Mul<Size> for u64 {
422 type Output = Size;
423 #[inline]
424 fn mul(self, size: Size) -> Size {
425 size * self
426 }
427 }
428
429 impl Mul<u64> for Size {
430 type Output = Size;
431 #[inline]
432 fn mul(self, count: u64) -> Size {
433 match self.bytes().checked_mul(count) {
434 Some(bytes) => Size::from_bytes(bytes),
435 None => panic!("Size::mul: {} * {} doesn't fit in u64", self.bytes(), count),
436 }
437 }
438 }
439
440 impl AddAssign for Size {
441 #[inline]
442 fn add_assign(&mut self, other: Size) {
443 *self = *self + other;
444 }
445 }
446
447 impl Step for Size {
448 #[inline]
449 fn steps_between(start: &Self, end: &Self) -> Option<usize> {
450 u64::steps_between(&start.bytes(), &end.bytes())
451 }
452
453 #[inline]
454 fn forward_checked(start: Self, count: usize) -> Option<Self> {
455 u64::forward_checked(start.bytes(), count).map(Self::from_bytes)
456 }
457
458 #[inline]
459 fn forward(start: Self, count: usize) -> Self {
460 Self::from_bytes(u64::forward(start.bytes(), count))
461 }
462
463 #[inline]
464 unsafe fn forward_unchecked(start: Self, count: usize) -> Self {
465 Self::from_bytes(u64::forward_unchecked(start.bytes(), count))
466 }
467
468 #[inline]
469 fn backward_checked(start: Self, count: usize) -> Option<Self> {
470 u64::backward_checked(start.bytes(), count).map(Self::from_bytes)
471 }
472
473 #[inline]
474 fn backward(start: Self, count: usize) -> Self {
475 Self::from_bytes(u64::backward(start.bytes(), count))
476 }
477
478 #[inline]
479 unsafe fn backward_unchecked(start: Self, count: usize) -> Self {
480 Self::from_bytes(u64::backward_unchecked(start.bytes(), count))
481 }
482 }
483
484 /// Alignment of a type in bytes (always a power of two).
485 #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Encodable, Decodable)]
486 #[derive(HashStable_Generic)]
487 pub struct Align {
488 pow2: u8,
489 }
490
491 impl Align {
492 pub const ONE: Align = Align { pow2: 0 };
493
494 #[inline]
495 pub fn from_bits(bits: u64) -> Result<Align, String> {
496 Align::from_bytes(Size::from_bits(bits).bytes())
497 }
498
499 #[inline]
500 pub fn from_bytes(align: u64) -> Result<Align, String> {
501 // Treat an alignment of 0 bytes like 1-byte alignment.
502 if align == 0 {
503 return Ok(Align::ONE);
504 }
505
506 #[cold]
507 fn not_power_of_2(align: u64) -> String {
508 format!("`{}` is not a power of 2", align)
509 }
510
511 #[cold]
512 fn too_large(align: u64) -> String {
513 format!("`{}` is too large", align)
514 }
515
516 let mut bytes = align;
517 let mut pow2: u8 = 0;
518 while (bytes & 1) == 0 {
519 pow2 += 1;
520 bytes >>= 1;
521 }
522 if bytes != 1 {
523 return Err(not_power_of_2(align));
524 }
525 if pow2 > 29 {
526 return Err(too_large(align));
527 }
528
529 Ok(Align { pow2 })
530 }
531
532 #[inline]
533 pub fn bytes(self) -> u64 {
534 1 << self.pow2
535 }
536
537 #[inline]
538 pub fn bits(self) -> u64 {
539 self.bytes() * 8
540 }
541
542 /// Computes the best alignment possible for the given offset
543 /// (the largest power of two that the offset is a multiple of).
544 ///
545 /// N.B., for an offset of `0`, this happens to return `2^64`.
546 #[inline]
547 pub fn max_for_offset(offset: Size) -> Align {
548 Align { pow2: offset.bytes().trailing_zeros() as u8 }
549 }
550
551 /// Lower the alignment, if necessary, such that the given offset
552 /// is aligned to it (the offset is a multiple of the alignment).
553 #[inline]
554 pub fn restrict_for_offset(self, offset: Size) -> Align {
555 self.min(Align::max_for_offset(offset))
556 }
557 }
558
559 /// A pair of alignments, ABI-mandated and preferred.
560 #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, Encodable, Decodable)]
561 #[derive(HashStable_Generic)]
562 pub struct AbiAndPrefAlign {
563 pub abi: Align,
564 pub pref: Align,
565 }
566
567 impl AbiAndPrefAlign {
568 #[inline]
569 pub fn new(align: Align) -> AbiAndPrefAlign {
570 AbiAndPrefAlign { abi: align, pref: align }
571 }
572
573 #[inline]
574 pub fn min(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign {
575 AbiAndPrefAlign { abi: self.abi.min(other.abi), pref: self.pref.min(other.pref) }
576 }
577
578 #[inline]
579 pub fn max(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign {
580 AbiAndPrefAlign { abi: self.abi.max(other.abi), pref: self.pref.max(other.pref) }
581 }
582 }
583
584 /// Integers, also used for enum discriminants.
585 #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, HashStable_Generic)]
586 pub enum Integer {
587 I8,
588 I16,
589 I32,
590 I64,
591 I128,
592 }
593
594 impl Integer {
595 #[inline]
596 pub fn size(self) -> Size {
597 match self {
598 I8 => Size::from_bytes(1),
599 I16 => Size::from_bytes(2),
600 I32 => Size::from_bytes(4),
601 I64 => Size::from_bytes(8),
602 I128 => Size::from_bytes(16),
603 }
604 }
605
606 pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAndPrefAlign {
607 let dl = cx.data_layout();
608
609 match self {
610 I8 => dl.i8_align,
611 I16 => dl.i16_align,
612 I32 => dl.i32_align,
613 I64 => dl.i64_align,
614 I128 => dl.i128_align,
615 }
616 }
617
618 /// Finds the smallest Integer type which can represent the signed value.
619 #[inline]
620 pub fn fit_signed(x: i128) -> Integer {
621 match x {
622 -0x0000_0000_0000_0080..=0x0000_0000_0000_007f => I8,
623 -0x0000_0000_0000_8000..=0x0000_0000_0000_7fff => I16,
624 -0x0000_0000_8000_0000..=0x0000_0000_7fff_ffff => I32,
625 -0x8000_0000_0000_0000..=0x7fff_ffff_ffff_ffff => I64,
626 _ => I128,
627 }
628 }
629
630 /// Finds the smallest Integer type which can represent the unsigned value.
631 #[inline]
632 pub fn fit_unsigned(x: u128) -> Integer {
633 match x {
634 0..=0x0000_0000_0000_00ff => I8,
635 0..=0x0000_0000_0000_ffff => I16,
636 0..=0x0000_0000_ffff_ffff => I32,
637 0..=0xffff_ffff_ffff_ffff => I64,
638 _ => I128,
639 }
640 }
641
642 /// Finds the smallest integer with the given alignment.
643 pub fn for_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Option<Integer> {
644 let dl = cx.data_layout();
645
646 for candidate in [I8, I16, I32, I64, I128] {
647 if wanted == candidate.align(dl).abi && wanted.bytes() == candidate.size().bytes() {
648 return Some(candidate);
649 }
650 }
651 None
652 }
653
654 /// Find the largest integer with the given alignment or less.
655 pub fn approximate_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Integer {
656 let dl = cx.data_layout();
657
658 // FIXME(eddyb) maybe include I128 in the future, when it works everywhere.
659 for candidate in [I64, I32, I16] {
660 if wanted >= candidate.align(dl).abi && wanted.bytes() >= candidate.size().bytes() {
661 return candidate;
662 }
663 }
664 I8
665 }
666
667 // FIXME(eddyb) consolidate this and other methods that find the appropriate
668 // `Integer` given some requirements.
669 #[inline]
670 fn from_size(size: Size) -> Result<Self, String> {
671 match size.bits() {
672 8 => Ok(Integer::I8),
673 16 => Ok(Integer::I16),
674 32 => Ok(Integer::I32),
675 64 => Ok(Integer::I64),
676 128 => Ok(Integer::I128),
677 _ => Err(format!("rust does not support integers with {} bits", size.bits())),
678 }
679 }
680 }
681
682 /// Fundamental unit of memory access and layout.
683 #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
684 pub enum Primitive {
685 /// The `bool` is the signedness of the `Integer` type.
686 ///
687 /// One would think we would not care about such details this low down,
688 /// but some ABIs are described in terms of C types and ISAs where the
689 /// integer arithmetic is done on {sign,zero}-extended registers, e.g.
690 /// a negative integer passed by zero-extension will appear positive in
691 /// the callee, and most operations on it will produce the wrong values.
692 Int(Integer, bool),
693 F32,
694 F64,
695 Pointer,
696 }
697
698 impl Primitive {
699 pub fn size<C: HasDataLayout>(self, cx: &C) -> Size {
700 let dl = cx.data_layout();
701
702 match self {
703 Int(i, _) => i.size(),
704 F32 => Size::from_bits(32),
705 F64 => Size::from_bits(64),
706 Pointer => dl.pointer_size,
707 }
708 }
709
710 pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAndPrefAlign {
711 let dl = cx.data_layout();
712
713 match self {
714 Int(i, _) => i.align(dl),
715 F32 => dl.f32_align,
716 F64 => dl.f64_align,
717 Pointer => dl.pointer_align,
718 }
719 }
720
721 // FIXME(eddyb) remove, it's trivial thanks to `matches!`.
722 #[inline]
723 pub fn is_float(self) -> bool {
724 matches!(self, F32 | F64)
725 }
726
727 // FIXME(eddyb) remove, it's completely unused.
728 #[inline]
729 pub fn is_int(self) -> bool {
730 matches!(self, Int(..))
731 }
732 }
733
734 /// Inclusive wrap-around range of valid values, that is, if
735 /// start > end, it represents `start..=MAX`,
736 /// followed by `0..=end`.
737 ///
738 /// That is, for an i8 primitive, a range of `254..=2` means following
739 /// sequence:
740 ///
741 /// 254 (-2), 255 (-1), 0, 1, 2
742 ///
743 /// This is intended specifically to mirror LLVM’s `!range` metadata,
744 /// semantics.
745 #[derive(Clone, PartialEq, Eq, Hash)]
746 #[derive(HashStable_Generic)]
747 pub struct WrappingRange {
748 pub start: u128,
749 pub end: u128,
750 }
751
752 impl WrappingRange {
753 /// Returns `true` if `v` is contained in the range.
754 #[inline(always)]
755 pub fn contains(&self, v: u128) -> bool {
756 if self.start <= self.end {
757 self.start <= v && v <= self.end
758 } else {
759 self.start <= v || v <= self.end
760 }
761 }
762
763 /// Returns `true` if zero is contained in the range.
764 /// Equal to `range.contains(0)` but should be faster.
765 #[inline(always)]
766 pub fn contains_zero(&self) -> bool {
767 self.start > self.end || self.start == 0
768 }
769
770 /// Returns `self` with replaced `start`
771 #[inline(always)]
772 pub fn with_start(mut self, start: u128) -> Self {
773 self.start = start;
774 self
775 }
776
777 /// Returns `self` with replaced `end`
778 #[inline(always)]
779 pub fn with_end(mut self, end: u128) -> Self {
780 self.end = end;
781 self
782 }
783 }
784
785 impl fmt::Debug for WrappingRange {
786 fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
787 write!(fmt, "{}..={}", self.start, self.end)?;
788 Ok(())
789 }
790 }
791
792 /// Information about one scalar component of a Rust type.
793 #[derive(Clone, PartialEq, Eq, Hash, Debug)]
794 #[derive(HashStable_Generic)]
795 pub struct Scalar {
796 pub value: Primitive,
797
798 // FIXME(eddyb) always use the shortest range, e.g., by finding
799 // the largest space between two consecutive valid values and
800 // taking everything else as the (shortest) valid range.
801 pub valid_range: WrappingRange,
802 }
803
804 impl Scalar {
805 #[inline]
806 pub fn is_bool(&self) -> bool {
807 matches!(self.value, Int(I8, false))
808 && matches!(self.valid_range, WrappingRange { start: 0, end: 1 })
809 }
810
811 /// Returns the valid range as a `x..y` range.
812 ///
813 /// If `x` and `y` are equal, the range is full, not empty.
814 pub fn valid_range_exclusive<C: HasDataLayout>(&self, cx: &C) -> Range<u128> {
815 // For a (max) value of -1, max will be `-1 as usize`, which overflows.
816 // However, that is fine here (it would still represent the full range),
817 // i.e., if the range is everything.
818 let bits = self.value.size(cx).bits();
819 assert!(bits <= 128);
820 let mask = !0u128 >> (128 - bits);
821 let start = self.valid_range.start;
822 let end = self.valid_range.end;
823 assert_eq!(start, start & mask);
824 assert_eq!(end, end & mask);
825 start..(end.wrapping_add(1) & mask)
826 }
827 }
828
829 /// Describes how the fields of a type are located in memory.
830 #[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
831 pub enum FieldsShape {
832 /// Scalar primitives and `!`, which never have fields.
833 Primitive,
834
835 /// All fields start at no offset. The `usize` is the field count.
836 Union(NonZeroUsize),
837
838 /// Array/vector-like placement, with all fields of identical types.
839 Array { stride: Size, count: u64 },
840
841 /// Struct-like placement, with precomputed offsets.
842 ///
843 /// Fields are guaranteed to not overlap, but note that gaps
844 /// before, between and after all the fields are NOT always
845 /// padding, and as such their contents may not be discarded.
846 /// For example, enum variants leave a gap at the start,
847 /// where the discriminant field in the enum layout goes.
848 Arbitrary {
849 /// Offsets for the first byte of each field,
850 /// ordered to match the source definition order.
851 /// This vector does not go in increasing order.
852 // FIXME(eddyb) use small vector optimization for the common case.
853 offsets: Vec<Size>,
854
855 /// Maps source order field indices to memory order indices,
856 /// depending on how the fields were reordered (if at all).
857 /// This is a permutation, with both the source order and the
858 /// memory order using the same (0..n) index ranges.
859 ///
860 /// Note that during computation of `memory_index`, sometimes
861 /// it is easier to operate on the inverse mapping (that is,
862 /// from memory order to source order), and that is usually
863 /// named `inverse_memory_index`.
864 ///
865 // FIXME(eddyb) build a better abstraction for permutations, if possible.
866 // FIXME(camlorn) also consider small vector optimization here.
867 memory_index: Vec<u32>,
868 },
869 }
870
871 impl FieldsShape {
872 #[inline]
873 pub fn count(&self) -> usize {
874 match *self {
875 FieldsShape::Primitive => 0,
876 FieldsShape::Union(count) => count.get(),
877 FieldsShape::Array { count, .. } => count.try_into().unwrap(),
878 FieldsShape::Arbitrary { ref offsets, .. } => offsets.len(),
879 }
880 }
881
882 #[inline]
883 pub fn offset(&self, i: usize) -> Size {
884 match *self {
885 FieldsShape::Primitive => {
886 unreachable!("FieldsShape::offset: `Primitive`s have no fields")
887 }
888 FieldsShape::Union(count) => {
889 assert!(
890 i < count.get(),
891 "tried to access field {} of union with {} fields",
892 i,
893 count
894 );
895 Size::ZERO
896 }
897 FieldsShape::Array { stride, count } => {
898 let i = u64::try_from(i).unwrap();
899 assert!(i < count);
900 stride * i
901 }
902 FieldsShape::Arbitrary { ref offsets, .. } => offsets[i],
903 }
904 }
905
906 #[inline]
907 pub fn memory_index(&self, i: usize) -> usize {
908 match *self {
909 FieldsShape::Primitive => {
910 unreachable!("FieldsShape::memory_index: `Primitive`s have no fields")
911 }
912 FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
913 FieldsShape::Arbitrary { ref memory_index, .. } => memory_index[i].try_into().unwrap(),
914 }
915 }
916
917 /// Gets source indices of the fields by increasing offsets.
918 #[inline]
919 pub fn index_by_increasing_offset<'a>(&'a self) -> impl Iterator<Item = usize> + 'a {
920 let mut inverse_small = [0u8; 64];
921 let mut inverse_big = vec![];
922 let use_small = self.count() <= inverse_small.len();
923
924 // We have to write this logic twice in order to keep the array small.
925 if let FieldsShape::Arbitrary { ref memory_index, .. } = *self {
926 if use_small {
927 for i in 0..self.count() {
928 inverse_small[memory_index[i] as usize] = i as u8;
929 }
930 } else {
931 inverse_big = vec![0; self.count()];
932 for i in 0..self.count() {
933 inverse_big[memory_index[i] as usize] = i as u32;
934 }
935 }
936 }
937
938 (0..self.count()).map(move |i| match *self {
939 FieldsShape::Primitive | FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
940 FieldsShape::Arbitrary { .. } => {
941 if use_small {
942 inverse_small[i] as usize
943 } else {
944 inverse_big[i] as usize
945 }
946 }
947 })
948 }
949 }
950
951 /// An identifier that specifies the address space that some operation
952 /// should operate on. Special address spaces have an effect on code generation,
953 /// depending on the target and the address spaces it implements.
954 #[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
955 pub struct AddressSpace(pub u32);
956
957 impl AddressSpace {
958 /// The default address space, corresponding to data space.
959 pub const DATA: Self = AddressSpace(0);
960 }
961
962 /// Describes how values of the type are passed by target ABIs,
963 /// in terms of categories of C types there are ABI rules for.
964 #[derive(Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
965 pub enum Abi {
966 Uninhabited,
967 Scalar(Scalar),
968 ScalarPair(Scalar, Scalar),
969 Vector {
970 element: Scalar,
971 count: u64,
972 },
973 Aggregate {
974 /// If true, the size is exact, otherwise it's only a lower bound.
975 sized: bool,
976 },
977 }
978
979 impl Abi {
980 /// Returns `true` if the layout corresponds to an unsized type.
981 #[inline]
982 pub fn is_unsized(&self) -> bool {
983 match *self {
984 Abi::Uninhabited | Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false,
985 Abi::Aggregate { sized } => !sized,
986 }
987 }
988
989 /// Returns `true` if this is a single signed integer scalar
990 #[inline]
991 pub fn is_signed(&self) -> bool {
992 match *self {
993 Abi::Scalar(ref scal) => match scal.value {
994 Primitive::Int(_, signed) => signed,
995 _ => false,
996 },
997 _ => panic!("`is_signed` on non-scalar ABI {:?}", self),
998 }
999 }
1000
1001 /// Returns `true` if this is an uninhabited type
1002 #[inline]
1003 pub fn is_uninhabited(&self) -> bool {
1004 matches!(*self, Abi::Uninhabited)
1005 }
1006
1007 /// Returns `true` is this is a scalar type
1008 #[inline]
1009 pub fn is_scalar(&self) -> bool {
1010 matches!(*self, Abi::Scalar(_))
1011 }
1012 }
1013
1014 rustc_index::newtype_index! {
1015 pub struct VariantIdx {
1016 derive [HashStable_Generic]
1017 }
1018 }
1019
1020 #[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
1021 pub enum Variants {
1022 /// Single enum variants, structs/tuples, unions, and all non-ADTs.
1023 Single { index: VariantIdx },
1024
1025 /// Enum-likes with more than one inhabited variant: each variant comes with
1026 /// a *discriminant* (usually the same as the variant index but the user can
1027 /// assign explicit discriminant values). That discriminant is encoded
1028 /// as a *tag* on the machine. The layout of each variant is
1029 /// a struct, and they all have space reserved for the tag.
1030 /// For enums, the tag is the sole field of the layout.
1031 Multiple {
1032 tag: Scalar,
1033 tag_encoding: TagEncoding,
1034 tag_field: usize,
1035 variants: IndexVec<VariantIdx, Layout>,
1036 },
1037 }
1038
1039 #[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
1040 pub enum TagEncoding {
1041 /// The tag directly stores the discriminant, but possibly with a smaller layout
1042 /// (so converting the tag to the discriminant can require sign extension).
1043 Direct,
1044
1045 /// Niche (values invalid for a type) encoding the discriminant:
1046 /// Discriminant and variant index coincide.
1047 /// The variant `dataful_variant` contains a niche at an arbitrary
1048 /// offset (field `tag_field` of the enum), which for a variant with
1049 /// discriminant `d` is set to
1050 /// `(d - niche_variants.start).wrapping_add(niche_start)`.
1051 ///
1052 /// For example, `Option<(usize, &T)>` is represented such that
1053 /// `None` has a null pointer for the second tuple field, and
1054 /// `Some` is the identity function (with a non-null reference).
1055 Niche {
1056 dataful_variant: VariantIdx,
1057 niche_variants: RangeInclusive<VariantIdx>,
1058 niche_start: u128,
1059 },
1060 }
1061
1062 #[derive(Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
1063 pub struct Niche {
1064 pub offset: Size,
1065 pub scalar: Scalar,
1066 }
1067
1068 impl Niche {
1069 pub fn from_scalar<C: HasDataLayout>(cx: &C, offset: Size, scalar: Scalar) -> Option<Self> {
1070 let niche = Niche { offset, scalar };
1071 if niche.available(cx) > 0 { Some(niche) } else { None }
1072 }
1073
1074 pub fn available<C: HasDataLayout>(&self, cx: &C) -> u128 {
1075 let Scalar { value, valid_range: ref v } = self.scalar;
1076 let bits = value.size(cx).bits();
1077 assert!(bits <= 128);
1078 let max_value = !0u128 >> (128 - bits);
1079
1080 // Find out how many values are outside the valid range.
1081 let niche = v.end.wrapping_add(1)..v.start;
1082 niche.end.wrapping_sub(niche.start) & max_value
1083 }
1084
1085 pub fn reserve<C: HasDataLayout>(&self, cx: &C, count: u128) -> Option<(u128, Scalar)> {
1086 assert!(count > 0);
1087
1088 let Scalar { value, valid_range: v } = self.scalar.clone();
1089 let bits = value.size(cx).bits();
1090 assert!(bits <= 128);
1091 let max_value = !0u128 >> (128 - bits);
1092
1093 if count > max_value {
1094 return None;
1095 }
1096
1097 // Compute the range of invalid values being reserved.
1098 let start = v.end.wrapping_add(1) & max_value;
1099 let end = v.end.wrapping_add(count) & max_value;
1100
1101 if v.contains(end) {
1102 return None;
1103 }
1104
1105 Some((start, Scalar { value, valid_range: v.with_end(end) }))
1106 }
1107 }
1108
1109 #[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
1110 pub struct Layout {
1111 /// Says where the fields are located within the layout.
1112 pub fields: FieldsShape,
1113
1114 /// Encodes information about multi-variant layouts.
1115 /// Even with `Multiple` variants, a layout still has its own fields! Those are then
1116 /// shared between all variants. One of them will be the discriminant,
1117 /// but e.g. generators can have more.
1118 ///
1119 /// To access all fields of this layout, both `fields` and the fields of the active variant
1120 /// must be taken into account.
1121 pub variants: Variants,
1122
1123 /// The `abi` defines how this data is passed between functions, and it defines
1124 /// value restrictions via `valid_range`.
1125 ///
1126 /// Note that this is entirely orthogonal to the recursive structure defined by
1127 /// `variants` and `fields`; for example, `ManuallyDrop<Result<isize, isize>>` has
1128 /// `Abi::ScalarPair`! So, even with non-`Aggregate` `abi`, `fields` and `variants`
1129 /// have to be taken into account to find all fields of this layout.
1130 pub abi: Abi,
1131
1132 /// The leaf scalar with the largest number of invalid values
1133 /// (i.e. outside of its `valid_range`), if it exists.
1134 pub largest_niche: Option<Niche>,
1135
1136 pub align: AbiAndPrefAlign,
1137 pub size: Size,
1138 }
1139
1140 impl Layout {
1141 pub fn scalar<C: HasDataLayout>(cx: &C, scalar: Scalar) -> Self {
1142 let largest_niche = Niche::from_scalar(cx, Size::ZERO, scalar.clone());
1143 let size = scalar.value.size(cx);
1144 let align = scalar.value.align(cx);
1145 Layout {
1146 variants: Variants::Single { index: VariantIdx::new(0) },
1147 fields: FieldsShape::Primitive,
1148 abi: Abi::Scalar(scalar),
1149 largest_niche,
1150 size,
1151 align,
1152 }
1153 }
1154 }
1155
1156 /// The layout of a type, alongside the type itself.
1157 /// Provides various type traversal APIs (e.g., recursing into fields).
1158 ///
1159 /// Note that the layout is NOT guaranteed to always be identical
1160 /// to that obtained from `layout_of(ty)`, as we need to produce
1161 /// layouts for which Rust types do not exist, such as enum variants
1162 /// or synthetic fields of enums (i.e., discriminants) and fat pointers.
1163 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, HashStable_Generic)]
1164 pub struct TyAndLayout<'a, Ty> {
1165 pub ty: Ty,
1166 pub layout: &'a Layout,
1167 }
1168
1169 impl<'a, Ty> Deref for TyAndLayout<'a, Ty> {
1170 type Target = &'a Layout;
1171 fn deref(&self) -> &&'a Layout {
1172 &self.layout
1173 }
1174 }
1175
1176 /// Trait for context types that can compute layouts of things.
1177 pub trait LayoutOf<'a>: Sized {
1178 type Ty: TyAbiInterface<'a, Self>;
1179 type TyAndLayout: MaybeResult<TyAndLayout<'a, Self::Ty>>;
1180
1181 fn layout_of(&self, ty: Self::Ty) -> Self::TyAndLayout;
1182 fn spanned_layout_of(&self, ty: Self::Ty, _span: Span) -> Self::TyAndLayout {
1183 self.layout_of(ty)
1184 }
1185 }
1186
1187 pub trait MaybeResult<T> {
1188 type Error;
1189
1190 fn from(x: Result<T, Self::Error>) -> Self;
1191 fn to_result(self) -> Result<T, Self::Error>;
1192 }
1193
1194 impl<T> MaybeResult<T> for T {
1195 type Error = !;
1196
1197 fn from(Ok(x): Result<T, Self::Error>) -> Self {
1198 x
1199 }
1200 fn to_result(self) -> Result<T, Self::Error> {
1201 Ok(self)
1202 }
1203 }
1204
1205 impl<T, E> MaybeResult<T> for Result<T, E> {
1206 type Error = E;
1207
1208 fn from(x: Result<T, Self::Error>) -> Self {
1209 x
1210 }
1211 fn to_result(self) -> Result<T, Self::Error> {
1212 self
1213 }
1214 }
1215
1216 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
1217 pub enum PointerKind {
1218 /// Most general case, we know no restrictions to tell LLVM.
1219 Shared,
1220
1221 /// `&T` where `T` contains no `UnsafeCell`, is `noalias` and `readonly`.
1222 Frozen,
1223
1224 /// `&mut T` which is `noalias` but not `readonly`.
1225 UniqueBorrowed,
1226
1227 /// `Box<T>`, unlike `UniqueBorrowed`, it also has `noalias` on returns.
1228 UniqueOwned,
1229 }
1230
1231 #[derive(Copy, Clone, Debug)]
1232 pub struct PointeeInfo {
1233 pub size: Size,
1234 pub align: Align,
1235 pub safe: Option<PointerKind>,
1236 pub address_space: AddressSpace,
1237 }
1238
1239 /// Trait that needs to be implemented by the higher-level type representation
1240 /// (e.g. `rustc_middle::ty::Ty`), to provide `rustc_target::abi` functionality.
1241 pub trait TyAbiInterface<'a, C>: Sized {
1242 fn ty_and_layout_for_variant(
1243 this: TyAndLayout<'a, Self>,
1244 cx: &C,
1245 variant_index: VariantIdx,
1246 ) -> TyAndLayout<'a, Self>;
1247 fn ty_and_layout_field(this: TyAndLayout<'a, Self>, cx: &C, i: usize) -> TyAndLayout<'a, Self>;
1248 fn ty_and_layout_pointee_info_at(
1249 this: TyAndLayout<'a, Self>,
1250 cx: &C,
1251 offset: Size,
1252 ) -> Option<PointeeInfo>;
1253 }
1254
1255 impl<'a, Ty> TyAndLayout<'a, Ty> {
1256 pub fn for_variant<C>(self, cx: &C, variant_index: VariantIdx) -> Self
1257 where
1258 Ty: TyAbiInterface<'a, C>,
1259 {
1260 Ty::ty_and_layout_for_variant(self, cx, variant_index)
1261 }
1262
1263 pub fn field<C>(self, cx: &C, i: usize) -> Self
1264 where
1265 Ty: TyAbiInterface<'a, C>,
1266 {
1267 Ty::ty_and_layout_field(self, cx, i)
1268 }
1269
1270 pub fn pointee_info_at<C>(self, cx: &C, offset: Size) -> Option<PointeeInfo>
1271 where
1272 Ty: TyAbiInterface<'a, C>,
1273 {
1274 Ty::ty_and_layout_pointee_info_at(self, cx, offset)
1275 }
1276 }
1277
1278 impl<'a, Ty> TyAndLayout<'a, Ty> {
1279 /// Returns `true` if the layout corresponds to an unsized type.
1280 pub fn is_unsized(&self) -> bool {
1281 self.abi.is_unsized()
1282 }
1283
1284 /// Returns `true` if the type is a ZST and not unsized.
1285 pub fn is_zst(&self) -> bool {
1286 match self.abi {
1287 Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false,
1288 Abi::Uninhabited => self.size.bytes() == 0,
1289 Abi::Aggregate { sized } => sized && self.size.bytes() == 0,
1290 }
1291 }
1292
1293 /// Determines if this type permits "raw" initialization by just transmuting some
1294 /// memory into an instance of `T`.
1295 /// `zero` indicates if the memory is zero-initialized, or alternatively
1296 /// left entirely uninitialized.
1297 /// This is conservative: in doubt, it will answer `true`.
1298 ///
1299 /// FIXME: Once we removed all the conservatism, we could alternatively
1300 /// create an all-0/all-undef constant and run the const value validator to see if
1301 /// this is a valid value for the given type.
1302 pub fn might_permit_raw_init<C>(self, cx: &C, zero: bool) -> bool
1303 where
1304 Self: Copy,
1305 Ty: TyAbiInterface<'a, C>,
1306 C: HasDataLayout,
1307 {
1308 let scalar_allows_raw_init = move |s: &Scalar| -> bool {
1309 if zero {
1310 // The range must contain 0.
1311 s.valid_range.contains_zero()
1312 } else {
1313 // The range must include all values. `valid_range_exclusive` handles
1314 // the wrap-around using target arithmetic; with wrap-around then the full
1315 // range is one where `start == end`.
1316 let range = s.valid_range_exclusive(cx);
1317 range.start == range.end
1318 }
1319 };
1320
1321 // Check the ABI.
1322 let valid = match &self.abi {
1323 Abi::Uninhabited => false, // definitely UB
1324 Abi::Scalar(s) => scalar_allows_raw_init(s),
1325 Abi::ScalarPair(s1, s2) => scalar_allows_raw_init(s1) && scalar_allows_raw_init(s2),
1326 Abi::Vector { element: s, count } => *count == 0 || scalar_allows_raw_init(s),
1327 Abi::Aggregate { .. } => true, // Fields are checked below.
1328 };
1329 if !valid {
1330 // This is definitely not okay.
1331 return false;
1332 }
1333
1334 // If we have not found an error yet, we need to recursively descend into fields.
1335 match &self.fields {
1336 FieldsShape::Primitive | FieldsShape::Union { .. } => {}
1337 FieldsShape::Array { .. } => {
1338 // FIXME(#66151): For now, we are conservative and do not check arrays.
1339 }
1340 FieldsShape::Arbitrary { offsets, .. } => {
1341 for idx in 0..offsets.len() {
1342 if !self.field(cx, idx).might_permit_raw_init(cx, zero) {
1343 // We found a field that is unhappy with this kind of initialization.
1344 return false;
1345 }
1346 }
1347 }
1348 }
1349
1350 // FIXME(#66151): For now, we are conservative and do not check `self.variants`.
1351 true
1352 }
1353 }