]> git.proxmox.com Git - rustc.git/blame - compiler/rustc_abi/src/lib.rs
Update upstream source from tag 'upstream/1.70.0+dfsg1'
[rustc.git] / compiler / rustc_abi / src / lib.rs
CommitLineData
487cf647
FG
1#![cfg_attr(feature = "nightly", feature(step_trait, rustc_attrs, min_specialization))]
2
487cf647
FG
3use std::fmt;
4#[cfg(feature = "nightly")]
5use std::iter::Step;
6use std::num::{NonZeroUsize, ParseIntError};
7use std::ops::{Add, AddAssign, Mul, RangeInclusive, Sub};
8use std::str::FromStr;
9
10use bitflags::bitflags;
9ffffee4 11use rustc_data_structures::intern::Interned;
487cf647
FG
12#[cfg(feature = "nightly")]
13use rustc_data_structures::stable_hasher::StableOrd;
353b0b11 14use rustc_index::vec::{Idx, IndexSlice, IndexVec};
487cf647
FG
15#[cfg(feature = "nightly")]
16use rustc_macros::HashStable_Generic;
17#[cfg(feature = "nightly")]
18use rustc_macros::{Decodable, Encodable};
19
20mod layout;
21
22pub use layout::LayoutCalculator;
23
24/// Requirements for a `StableHashingContext` to be used in this crate.
25/// This is a hack to allow using the `HashStable_Generic` derive macro
26/// instead of implementing everything in `rustc_middle`.
27pub trait HashStableContext {}
28
29use Integer::*;
30use Primitive::*;
31
32bitflags! {
33 #[derive(Default)]
34 #[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))]
35 pub struct ReprFlags: u8 {
36 const IS_C = 1 << 0;
37 const IS_SIMD = 1 << 1;
38 const IS_TRANSPARENT = 1 << 2;
39 // Internal only for now. If true, don't reorder fields.
40 const IS_LINEAR = 1 << 3;
41 // If true, the type's layout can be randomized using
42 // the seed stored in `ReprOptions.layout_seed`
43 const RANDOMIZE_LAYOUT = 1 << 4;
44 // Any of these flags being set prevent field reordering optimisation.
45 const IS_UNOPTIMISABLE = ReprFlags::IS_C.bits
46 | ReprFlags::IS_SIMD.bits
47 | ReprFlags::IS_LINEAR.bits;
48 }
49}
50
51#[derive(Copy, Clone, Debug, Eq, PartialEq)]
52#[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))]
53pub enum IntegerType {
54 /// Pointer sized integer type, i.e. isize and usize. The field shows signedness, that
55 /// is, `Pointer(true)` is isize.
56 Pointer(bool),
57 /// Fix sized integer type, e.g. i8, u32, i128 The bool field shows signedness, `Fixed(I8, false)` means `u8`
58 Fixed(Integer, bool),
59}
60
61impl IntegerType {
62 pub fn is_signed(&self) -> bool {
63 match self {
64 IntegerType::Pointer(b) => *b,
65 IntegerType::Fixed(_, b) => *b,
66 }
67 }
68}
69
70/// Represents the repr options provided by the user,
71#[derive(Copy, Clone, Debug, Eq, PartialEq, Default)]
72#[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))]
73pub struct ReprOptions {
74 pub int: Option<IntegerType>,
75 pub align: Option<Align>,
76 pub pack: Option<Align>,
77 pub flags: ReprFlags,
78 /// The seed to be used for randomizing a type's layout
79 ///
80 /// Note: This could technically be a `[u8; 16]` (a `u128`) which would
81 /// be the "most accurate" hash as it'd encompass the item and crate
82 /// hash without loss, but it does pay the price of being larger.
83 /// Everything's a tradeoff, a `u64` seed should be sufficient for our
84 /// purposes (primarily `-Z randomize-layout`)
85 pub field_shuffle_seed: u64,
86}
87
88impl ReprOptions {
89 #[inline]
90 pub fn simd(&self) -> bool {
91 self.flags.contains(ReprFlags::IS_SIMD)
92 }
93
94 #[inline]
95 pub fn c(&self) -> bool {
96 self.flags.contains(ReprFlags::IS_C)
97 }
98
99 #[inline]
100 pub fn packed(&self) -> bool {
101 self.pack.is_some()
102 }
103
104 #[inline]
105 pub fn transparent(&self) -> bool {
106 self.flags.contains(ReprFlags::IS_TRANSPARENT)
107 }
108
109 #[inline]
110 pub fn linear(&self) -> bool {
111 self.flags.contains(ReprFlags::IS_LINEAR)
112 }
113
114 /// Returns the discriminant type, given these `repr` options.
115 /// This must only be called on enums!
116 pub fn discr_type(&self) -> IntegerType {
117 self.int.unwrap_or(IntegerType::Pointer(true))
118 }
119
120 /// Returns `true` if this `#[repr()]` should inhabit "smart enum
121 /// layout" optimizations, such as representing `Foo<&T>` as a
122 /// single pointer.
123 pub fn inhibit_enum_layout_opt(&self) -> bool {
124 self.c() || self.int.is_some()
125 }
126
127 /// Returns `true` if this `#[repr()]` should inhibit struct field reordering
128 /// optimizations, such as with `repr(C)`, `repr(packed(1))`, or `repr(<int>)`.
129 pub fn inhibit_struct_field_reordering_opt(&self) -> bool {
130 if let Some(pack) = self.pack {
131 if pack.bytes() == 1 {
132 return true;
133 }
134 }
135
136 self.flags.intersects(ReprFlags::IS_UNOPTIMISABLE) || self.int.is_some()
137 }
138
139 /// Returns `true` if this type is valid for reordering and `-Z randomize-layout`
140 /// was enabled for its declaration crate
141 pub fn can_randomize_type_layout(&self) -> bool {
142 !self.inhibit_struct_field_reordering_opt()
143 && self.flags.contains(ReprFlags::RANDOMIZE_LAYOUT)
144 }
145
146 /// Returns `true` if this `#[repr()]` should inhibit union ABI optimisations.
147 pub fn inhibit_union_abi_opt(&self) -> bool {
148 self.c()
149 }
150}
151
152/// Parsed [Data layout](https://llvm.org/docs/LangRef.html#data-layout)
153/// for a target, which contains everything needed to compute layouts.
154#[derive(Debug, PartialEq, Eq)]
155pub struct TargetDataLayout {
156 pub endian: Endian,
157 pub i1_align: AbiAndPrefAlign,
158 pub i8_align: AbiAndPrefAlign,
159 pub i16_align: AbiAndPrefAlign,
160 pub i32_align: AbiAndPrefAlign,
161 pub i64_align: AbiAndPrefAlign,
162 pub i128_align: AbiAndPrefAlign,
163 pub f32_align: AbiAndPrefAlign,
164 pub f64_align: AbiAndPrefAlign,
165 pub pointer_size: Size,
166 pub pointer_align: AbiAndPrefAlign,
167 pub aggregate_align: AbiAndPrefAlign,
168
169 /// Alignments for vector types.
170 pub vector_align: Vec<(Size, AbiAndPrefAlign)>,
171
172 pub instruction_address_space: AddressSpace,
173
9ffffee4
FG
174 /// Minimum size of #[repr(C)] enums (default c_int::BITS, usually 32)
175 /// Note: This isn't in LLVM's data layout string, it is `short_enum`
176 /// so the only valid spec for LLVM is c_int::BITS or 8
487cf647
FG
177 pub c_enum_min_size: Integer,
178}
179
180impl Default for TargetDataLayout {
181 /// Creates an instance of `TargetDataLayout`.
182 fn default() -> TargetDataLayout {
183 let align = |bits| Align::from_bits(bits).unwrap();
184 TargetDataLayout {
185 endian: Endian::Big,
186 i1_align: AbiAndPrefAlign::new(align(8)),
187 i8_align: AbiAndPrefAlign::new(align(8)),
188 i16_align: AbiAndPrefAlign::new(align(16)),
189 i32_align: AbiAndPrefAlign::new(align(32)),
190 i64_align: AbiAndPrefAlign { abi: align(32), pref: align(64) },
191 i128_align: AbiAndPrefAlign { abi: align(32), pref: align(64) },
192 f32_align: AbiAndPrefAlign::new(align(32)),
193 f64_align: AbiAndPrefAlign::new(align(64)),
194 pointer_size: Size::from_bits(64),
195 pointer_align: AbiAndPrefAlign::new(align(64)),
196 aggregate_align: AbiAndPrefAlign { abi: align(0), pref: align(64) },
197 vector_align: vec![
198 (Size::from_bits(64), AbiAndPrefAlign::new(align(64))),
199 (Size::from_bits(128), AbiAndPrefAlign::new(align(128))),
200 ],
201 instruction_address_space: AddressSpace::DATA,
202 c_enum_min_size: Integer::I32,
203 }
204 }
205}
206
207pub enum TargetDataLayoutErrors<'a> {
208 InvalidAddressSpace { addr_space: &'a str, cause: &'a str, err: ParseIntError },
209 InvalidBits { kind: &'a str, bit: &'a str, cause: &'a str, err: ParseIntError },
210 MissingAlignment { cause: &'a str },
211 InvalidAlignment { cause: &'a str, err: String },
212 InconsistentTargetArchitecture { dl: &'a str, target: &'a str },
213 InconsistentTargetPointerWidth { pointer_size: u64, target: u32 },
214 InvalidBitsSize { err: String },
215}
216
217impl TargetDataLayout {
218 /// Parse data layout from an [llvm data layout string](https://llvm.org/docs/LangRef.html#data-layout)
219 ///
220 /// This function doesn't fill `c_enum_min_size` and it will always be `I32` since it can not be
221 /// determined from llvm string.
222 pub fn parse_from_llvm_datalayout_string<'a>(
223 input: &'a str,
224 ) -> Result<TargetDataLayout, TargetDataLayoutErrors<'a>> {
225 // Parse an address space index from a string.
226 let parse_address_space = |s: &'a str, cause: &'a str| {
227 s.parse::<u32>().map(AddressSpace).map_err(|err| {
228 TargetDataLayoutErrors::InvalidAddressSpace { addr_space: s, cause, err }
229 })
230 };
231
232 // Parse a bit count from a string.
233 let parse_bits = |s: &'a str, kind: &'a str, cause: &'a str| {
234 s.parse::<u64>().map_err(|err| TargetDataLayoutErrors::InvalidBits {
235 kind,
236 bit: s,
237 cause,
238 err,
239 })
240 };
241
242 // Parse a size string.
243 let size = |s: &'a str, cause: &'a str| parse_bits(s, "size", cause).map(Size::from_bits);
244
245 // Parse an alignment string.
246 let align = |s: &[&'a str], cause: &'a str| {
247 if s.is_empty() {
248 return Err(TargetDataLayoutErrors::MissingAlignment { cause });
249 }
250 let align_from_bits = |bits| {
251 Align::from_bits(bits)
252 .map_err(|err| TargetDataLayoutErrors::InvalidAlignment { cause, err })
253 };
254 let abi = parse_bits(s[0], "alignment", cause)?;
255 let pref = s.get(1).map_or(Ok(abi), |pref| parse_bits(pref, "alignment", cause))?;
256 Ok(AbiAndPrefAlign { abi: align_from_bits(abi)?, pref: align_from_bits(pref)? })
257 };
258
259 let mut dl = TargetDataLayout::default();
260 let mut i128_align_src = 64;
261 for spec in input.split('-') {
262 let spec_parts = spec.split(':').collect::<Vec<_>>();
263
264 match &*spec_parts {
265 ["e"] => dl.endian = Endian::Little,
266 ["E"] => dl.endian = Endian::Big,
267 [p] if p.starts_with('P') => {
268 dl.instruction_address_space = parse_address_space(&p[1..], "P")?
269 }
270 ["a", ref a @ ..] => dl.aggregate_align = align(a, "a")?,
271 ["f32", ref a @ ..] => dl.f32_align = align(a, "f32")?,
272 ["f64", ref a @ ..] => dl.f64_align = align(a, "f64")?,
9ffffee4
FG
273 // FIXME(erikdesjardins): we should be parsing nonzero address spaces
274 // this will require replacing TargetDataLayout::{pointer_size,pointer_align}
275 // with e.g. `fn pointer_size_in(AddressSpace)`
487cf647
FG
276 [p @ "p", s, ref a @ ..] | [p @ "p0", s, ref a @ ..] => {
277 dl.pointer_size = size(s, p)?;
278 dl.pointer_align = align(a, p)?;
279 }
280 [s, ref a @ ..] if s.starts_with('i') => {
281 let Ok(bits) = s[1..].parse::<u64>() else {
282 size(&s[1..], "i")?; // For the user error.
283 continue;
284 };
285 let a = align(a, s)?;
286 match bits {
287 1 => dl.i1_align = a,
288 8 => dl.i8_align = a,
289 16 => dl.i16_align = a,
290 32 => dl.i32_align = a,
291 64 => dl.i64_align = a,
292 _ => {}
293 }
294 if bits >= i128_align_src && bits <= 128 {
295 // Default alignment for i128 is decided by taking the alignment of
296 // largest-sized i{64..=128}.
297 i128_align_src = bits;
298 dl.i128_align = a;
299 }
300 }
301 [s, ref a @ ..] if s.starts_with('v') => {
302 let v_size = size(&s[1..], "v")?;
303 let a = align(a, s)?;
304 if let Some(v) = dl.vector_align.iter_mut().find(|v| v.0 == v_size) {
305 v.1 = a;
306 continue;
307 }
308 // No existing entry, add a new one.
309 dl.vector_align.push((v_size, a));
310 }
311 _ => {} // Ignore everything else.
312 }
313 }
314 Ok(dl)
315 }
316
317 /// Returns exclusive upper bound on object size.
318 ///
319 /// The theoretical maximum object size is defined as the maximum positive `isize` value.
320 /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly
321 /// index every address within an object along with one byte past the end, along with allowing
322 /// `isize` to store the difference between any two pointers into an object.
323 ///
324 /// The upper bound on 64-bit currently needs to be lower because LLVM uses a 64-bit integer
325 /// to represent object size in bits. It would need to be 1 << 61 to account for this, but is
326 /// currently conservatively bounded to 1 << 47 as that is enough to cover the current usable
327 /// address space on 64-bit ARMv8 and x86_64.
328 #[inline]
329 pub fn obj_size_bound(&self) -> u64 {
330 match self.pointer_size.bits() {
331 16 => 1 << 15,
332 32 => 1 << 31,
333 64 => 1 << 47,
334 bits => panic!("obj_size_bound: unknown pointer bit size {}", bits),
335 }
336 }
337
338 #[inline]
339 pub fn ptr_sized_integer(&self) -> Integer {
340 match self.pointer_size.bits() {
341 16 => I16,
342 32 => I32,
343 64 => I64,
344 bits => panic!("ptr_sized_integer: unknown pointer bit size {}", bits),
345 }
346 }
347
348 #[inline]
349 pub fn vector_align(&self, vec_size: Size) -> AbiAndPrefAlign {
350 for &(size, align) in &self.vector_align {
351 if size == vec_size {
352 return align;
353 }
354 }
355 // Default to natural alignment, which is what LLVM does.
356 // That is, use the size, rounded up to a power of 2.
357 AbiAndPrefAlign::new(Align::from_bytes(vec_size.bytes().next_power_of_two()).unwrap())
358 }
359}
360
361pub trait HasDataLayout {
362 fn data_layout(&self) -> &TargetDataLayout;
363}
364
365impl HasDataLayout for TargetDataLayout {
366 #[inline]
367 fn data_layout(&self) -> &TargetDataLayout {
368 self
369 }
370}
371
372/// Endianness of the target, which must match cfg(target-endian).
373#[derive(Copy, Clone, PartialEq, Eq)]
374pub enum Endian {
375 Little,
376 Big,
377}
378
379impl Endian {
380 pub fn as_str(&self) -> &'static str {
381 match self {
382 Self::Little => "little",
383 Self::Big => "big",
384 }
385 }
386}
387
388impl fmt::Debug for Endian {
389 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
390 f.write_str(self.as_str())
391 }
392}
393
394impl FromStr for Endian {
395 type Err = String;
396
397 fn from_str(s: &str) -> Result<Self, Self::Err> {
398 match s {
399 "little" => Ok(Self::Little),
400 "big" => Ok(Self::Big),
401 _ => Err(format!(r#"unknown endian: "{}""#, s)),
402 }
403 }
404}
405
406/// Size of a type in bytes.
407#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
408#[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))]
409pub struct Size {
410 raw: u64,
411}
412
413// Safety: Ord is implement as just comparing numerical values and numerical values
414// are not changed by (de-)serialization.
415#[cfg(feature = "nightly")]
416unsafe impl StableOrd for Size {}
417
418// This is debug-printed a lot in larger structs, don't waste too much space there
419impl fmt::Debug for Size {
420 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
421 write!(f, "Size({} bytes)", self.bytes())
422 }
423}
424
425impl Size {
426 pub const ZERO: Size = Size { raw: 0 };
427
428 /// Rounds `bits` up to the next-higher byte boundary, if `bits` is
429 /// not a multiple of 8.
430 pub fn from_bits(bits: impl TryInto<u64>) -> Size {
431 let bits = bits.try_into().ok().unwrap();
432 // Avoid potential overflow from `bits + 7`.
433 Size { raw: bits / 8 + ((bits % 8) + 7) / 8 }
434 }
435
436 #[inline]
437 pub fn from_bytes(bytes: impl TryInto<u64>) -> Size {
438 let bytes: u64 = bytes.try_into().ok().unwrap();
439 Size { raw: bytes }
440 }
441
442 #[inline]
443 pub fn bytes(self) -> u64 {
444 self.raw
445 }
446
447 #[inline]
448 pub fn bytes_usize(self) -> usize {
449 self.bytes().try_into().unwrap()
450 }
451
452 #[inline]
453 pub fn bits(self) -> u64 {
454 #[cold]
455 fn overflow(bytes: u64) -> ! {
456 panic!("Size::bits: {} bytes in bits doesn't fit in u64", bytes)
457 }
458
459 self.bytes().checked_mul(8).unwrap_or_else(|| overflow(self.bytes()))
460 }
461
462 #[inline]
463 pub fn bits_usize(self) -> usize {
464 self.bits().try_into().unwrap()
465 }
466
467 #[inline]
468 pub fn align_to(self, align: Align) -> Size {
469 let mask = align.bytes() - 1;
470 Size::from_bytes((self.bytes() + mask) & !mask)
471 }
472
473 #[inline]
474 pub fn is_aligned(self, align: Align) -> bool {
475 let mask = align.bytes() - 1;
476 self.bytes() & mask == 0
477 }
478
479 #[inline]
480 pub fn checked_add<C: HasDataLayout>(self, offset: Size, cx: &C) -> Option<Size> {
481 let dl = cx.data_layout();
482
483 let bytes = self.bytes().checked_add(offset.bytes())?;
484
485 if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
486 }
487
488 #[inline]
489 pub fn checked_mul<C: HasDataLayout>(self, count: u64, cx: &C) -> Option<Size> {
490 let dl = cx.data_layout();
491
492 let bytes = self.bytes().checked_mul(count)?;
493 if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
494 }
495
496 /// Truncates `value` to `self` bits and then sign-extends it to 128 bits
497 /// (i.e., if it is negative, fill with 1's on the left).
498 #[inline]
499 pub fn sign_extend(self, value: u128) -> u128 {
500 let size = self.bits();
501 if size == 0 {
502 // Truncated until nothing is left.
503 return 0;
504 }
505 // Sign-extend it.
506 let shift = 128 - size;
507 // Shift the unsigned value to the left, then shift back to the right as signed
508 // (essentially fills with sign bit on the left).
509 (((value << shift) as i128) >> shift) as u128
510 }
511
512 /// Truncates `value` to `self` bits.
513 #[inline]
514 pub fn truncate(self, value: u128) -> u128 {
515 let size = self.bits();
516 if size == 0 {
517 // Truncated until nothing is left.
518 return 0;
519 }
520 let shift = 128 - size;
521 // Truncate (shift left to drop out leftover values, shift right to fill with zeroes).
522 (value << shift) >> shift
523 }
524
525 #[inline]
526 pub fn signed_int_min(&self) -> i128 {
527 self.sign_extend(1_u128 << (self.bits() - 1)) as i128
528 }
529
530 #[inline]
531 pub fn signed_int_max(&self) -> i128 {
532 i128::MAX >> (128 - self.bits())
533 }
534
535 #[inline]
536 pub fn unsigned_int_max(&self) -> u128 {
537 u128::MAX >> (128 - self.bits())
538 }
539}
540
541// Panicking addition, subtraction and multiplication for convenience.
542// Avoid during layout computation, return `LayoutError` instead.
543
544impl Add for Size {
545 type Output = Size;
546 #[inline]
547 fn add(self, other: Size) -> Size {
548 Size::from_bytes(self.bytes().checked_add(other.bytes()).unwrap_or_else(|| {
549 panic!("Size::add: {} + {} doesn't fit in u64", self.bytes(), other.bytes())
550 }))
551 }
552}
553
554impl Sub for Size {
555 type Output = Size;
556 #[inline]
557 fn sub(self, other: Size) -> Size {
558 Size::from_bytes(self.bytes().checked_sub(other.bytes()).unwrap_or_else(|| {
559 panic!("Size::sub: {} - {} would result in negative size", self.bytes(), other.bytes())
560 }))
561 }
562}
563
564impl Mul<Size> for u64 {
565 type Output = Size;
566 #[inline]
567 fn mul(self, size: Size) -> Size {
568 size * self
569 }
570}
571
572impl Mul<u64> for Size {
573 type Output = Size;
574 #[inline]
575 fn mul(self, count: u64) -> Size {
576 match self.bytes().checked_mul(count) {
577 Some(bytes) => Size::from_bytes(bytes),
578 None => panic!("Size::mul: {} * {} doesn't fit in u64", self.bytes(), count),
579 }
580 }
581}
582
583impl AddAssign for Size {
584 #[inline]
585 fn add_assign(&mut self, other: Size) {
586 *self = *self + other;
587 }
588}
589
590#[cfg(feature = "nightly")]
591impl Step for Size {
592 #[inline]
593 fn steps_between(start: &Self, end: &Self) -> Option<usize> {
594 u64::steps_between(&start.bytes(), &end.bytes())
595 }
596
597 #[inline]
598 fn forward_checked(start: Self, count: usize) -> Option<Self> {
599 u64::forward_checked(start.bytes(), count).map(Self::from_bytes)
600 }
601
602 #[inline]
603 fn forward(start: Self, count: usize) -> Self {
604 Self::from_bytes(u64::forward(start.bytes(), count))
605 }
606
607 #[inline]
608 unsafe fn forward_unchecked(start: Self, count: usize) -> Self {
609 Self::from_bytes(u64::forward_unchecked(start.bytes(), count))
610 }
611
612 #[inline]
613 fn backward_checked(start: Self, count: usize) -> Option<Self> {
614 u64::backward_checked(start.bytes(), count).map(Self::from_bytes)
615 }
616
617 #[inline]
618 fn backward(start: Self, count: usize) -> Self {
619 Self::from_bytes(u64::backward(start.bytes(), count))
620 }
621
622 #[inline]
623 unsafe fn backward_unchecked(start: Self, count: usize) -> Self {
624 Self::from_bytes(u64::backward_unchecked(start.bytes(), count))
625 }
626}
627
628/// Alignment of a type in bytes (always a power of two).
629#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
630#[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))]
631pub struct Align {
632 pow2: u8,
633}
634
635// This is debug-printed a lot in larger structs, don't waste too much space there
636impl fmt::Debug for Align {
637 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
638 write!(f, "Align({} bytes)", self.bytes())
639 }
640}
641
642impl Align {
643 pub const ONE: Align = Align { pow2: 0 };
644 pub const MAX: Align = Align { pow2: 29 };
645
646 #[inline]
647 pub fn from_bits(bits: u64) -> Result<Align, String> {
648 Align::from_bytes(Size::from_bits(bits).bytes())
649 }
650
651 #[inline]
652 pub fn from_bytes(align: u64) -> Result<Align, String> {
653 // Treat an alignment of 0 bytes like 1-byte alignment.
654 if align == 0 {
655 return Ok(Align::ONE);
656 }
657
658 #[cold]
659 fn not_power_of_2(align: u64) -> String {
660 format!("`{}` is not a power of 2", align)
661 }
662
663 #[cold]
664 fn too_large(align: u64) -> String {
665 format!("`{}` is too large", align)
666 }
667
668 let mut bytes = align;
669 let mut pow2: u8 = 0;
670 while (bytes & 1) == 0 {
671 pow2 += 1;
672 bytes >>= 1;
673 }
674 if bytes != 1 {
675 return Err(not_power_of_2(align));
676 }
677 if pow2 > Self::MAX.pow2 {
678 return Err(too_large(align));
679 }
680
681 Ok(Align { pow2 })
682 }
683
684 #[inline]
685 pub fn bytes(self) -> u64 {
686 1 << self.pow2
687 }
688
689 #[inline]
690 pub fn bits(self) -> u64 {
691 self.bytes() * 8
692 }
693
694 /// Computes the best alignment possible for the given offset
695 /// (the largest power of two that the offset is a multiple of).
696 ///
697 /// N.B., for an offset of `0`, this happens to return `2^64`.
698 #[inline]
699 pub fn max_for_offset(offset: Size) -> Align {
700 Align { pow2: offset.bytes().trailing_zeros() as u8 }
701 }
702
703 /// Lower the alignment, if necessary, such that the given offset
704 /// is aligned to it (the offset is a multiple of the alignment).
705 #[inline]
706 pub fn restrict_for_offset(self, offset: Size) -> Align {
707 self.min(Align::max_for_offset(offset))
708 }
709}
710
711/// A pair of alignments, ABI-mandated and preferred.
712#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
713#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
714
715pub struct AbiAndPrefAlign {
716 pub abi: Align,
717 pub pref: Align,
718}
719
720impl AbiAndPrefAlign {
721 #[inline]
722 pub fn new(align: Align) -> AbiAndPrefAlign {
723 AbiAndPrefAlign { abi: align, pref: align }
724 }
725
726 #[inline]
727 pub fn min(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign {
728 AbiAndPrefAlign { abi: self.abi.min(other.abi), pref: self.pref.min(other.pref) }
729 }
730
731 #[inline]
732 pub fn max(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign {
733 AbiAndPrefAlign { abi: self.abi.max(other.abi), pref: self.pref.max(other.pref) }
734 }
735}
736
737/// Integers, also used for enum discriminants.
738#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
739#[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))]
740
741pub enum Integer {
742 I8,
743 I16,
744 I32,
745 I64,
746 I128,
747}
748
749impl Integer {
750 #[inline]
751 pub fn size(self) -> Size {
752 match self {
753 I8 => Size::from_bytes(1),
754 I16 => Size::from_bytes(2),
755 I32 => Size::from_bytes(4),
756 I64 => Size::from_bytes(8),
757 I128 => Size::from_bytes(16),
758 }
759 }
760
761 /// Gets the Integer type from an IntegerType.
762 pub fn from_attr<C: HasDataLayout>(cx: &C, ity: IntegerType) -> Integer {
763 let dl = cx.data_layout();
764
765 match ity {
766 IntegerType::Pointer(_) => dl.ptr_sized_integer(),
767 IntegerType::Fixed(x, _) => x,
768 }
769 }
770
771 pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAndPrefAlign {
772 let dl = cx.data_layout();
773
774 match self {
775 I8 => dl.i8_align,
776 I16 => dl.i16_align,
777 I32 => dl.i32_align,
778 I64 => dl.i64_align,
779 I128 => dl.i128_align,
780 }
781 }
782
9c376795
FG
783 /// Returns the largest signed value that can be represented by this Integer.
784 #[inline]
785 pub fn signed_max(self) -> i128 {
786 match self {
787 I8 => i8::MAX as i128,
788 I16 => i16::MAX as i128,
789 I32 => i32::MAX as i128,
790 I64 => i64::MAX as i128,
791 I128 => i128::MAX,
792 }
793 }
794
487cf647
FG
795 /// Finds the smallest Integer type which can represent the signed value.
796 #[inline]
797 pub fn fit_signed(x: i128) -> Integer {
798 match x {
799 -0x0000_0000_0000_0080..=0x0000_0000_0000_007f => I8,
800 -0x0000_0000_0000_8000..=0x0000_0000_0000_7fff => I16,
801 -0x0000_0000_8000_0000..=0x0000_0000_7fff_ffff => I32,
802 -0x8000_0000_0000_0000..=0x7fff_ffff_ffff_ffff => I64,
803 _ => I128,
804 }
805 }
806
807 /// Finds the smallest Integer type which can represent the unsigned value.
808 #[inline]
809 pub fn fit_unsigned(x: u128) -> Integer {
810 match x {
811 0..=0x0000_0000_0000_00ff => I8,
812 0..=0x0000_0000_0000_ffff => I16,
813 0..=0x0000_0000_ffff_ffff => I32,
814 0..=0xffff_ffff_ffff_ffff => I64,
815 _ => I128,
816 }
817 }
818
819 /// Finds the smallest integer with the given alignment.
820 pub fn for_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Option<Integer> {
821 let dl = cx.data_layout();
822
9c376795
FG
823 [I8, I16, I32, I64, I128].into_iter().find(|&candidate| {
824 wanted == candidate.align(dl).abi && wanted.bytes() == candidate.size().bytes()
825 })
487cf647
FG
826 }
827
828 /// Find the largest integer with the given alignment or less.
829 pub fn approximate_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Integer {
830 let dl = cx.data_layout();
831
832 // FIXME(eddyb) maybe include I128 in the future, when it works everywhere.
833 for candidate in [I64, I32, I16] {
834 if wanted >= candidate.align(dl).abi && wanted.bytes() >= candidate.size().bytes() {
835 return candidate;
836 }
837 }
838 I8
839 }
840
841 // FIXME(eddyb) consolidate this and other methods that find the appropriate
842 // `Integer` given some requirements.
843 #[inline]
844 pub fn from_size(size: Size) -> Result<Self, String> {
845 match size.bits() {
846 8 => Ok(Integer::I8),
847 16 => Ok(Integer::I16),
848 32 => Ok(Integer::I32),
849 64 => Ok(Integer::I64),
850 128 => Ok(Integer::I128),
851 _ => Err(format!("rust does not support integers with {} bits", size.bits())),
852 }
853 }
854}
855
856/// Fundamental unit of memory access and layout.
857#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
858#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
859pub enum Primitive {
860 /// The `bool` is the signedness of the `Integer` type.
861 ///
862 /// One would think we would not care about such details this low down,
863 /// but some ABIs are described in terms of C types and ISAs where the
864 /// integer arithmetic is done on {sign,zero}-extended registers, e.g.
865 /// a negative integer passed by zero-extension will appear positive in
866 /// the callee, and most operations on it will produce the wrong values.
867 Int(Integer, bool),
868 F32,
869 F64,
9ffffee4 870 Pointer(AddressSpace),
487cf647
FG
871}
872
873impl Primitive {
874 pub fn size<C: HasDataLayout>(self, cx: &C) -> Size {
875 let dl = cx.data_layout();
876
877 match self {
878 Int(i, _) => i.size(),
879 F32 => Size::from_bits(32),
880 F64 => Size::from_bits(64),
9ffffee4
FG
881 // FIXME(erikdesjardins): ignoring address space is technically wrong, pointers in
882 // different address spaces can have different sizes
883 // (but TargetDataLayout doesn't currently parse that part of the DL string)
884 Pointer(_) => dl.pointer_size,
487cf647
FG
885 }
886 }
887
888 pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAndPrefAlign {
889 let dl = cx.data_layout();
890
891 match self {
892 Int(i, _) => i.align(dl),
893 F32 => dl.f32_align,
894 F64 => dl.f64_align,
9ffffee4
FG
895 // FIXME(erikdesjardins): ignoring address space is technically wrong, pointers in
896 // different address spaces can have different alignments
897 // (but TargetDataLayout doesn't currently parse that part of the DL string)
898 Pointer(_) => dl.pointer_align,
487cf647
FG
899 }
900 }
487cf647
FG
901}
902
903/// Inclusive wrap-around range of valid values, that is, if
904/// start > end, it represents `start..=MAX`,
905/// followed by `0..=end`.
906///
907/// That is, for an i8 primitive, a range of `254..=2` means following
908/// sequence:
909///
910/// 254 (-2), 255 (-1), 0, 1, 2
911///
912/// This is intended specifically to mirror LLVM’s `!range` metadata semantics.
913#[derive(Clone, Copy, PartialEq, Eq, Hash)]
914#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
915pub struct WrappingRange {
916 pub start: u128,
917 pub end: u128,
918}
919
920impl WrappingRange {
921 pub fn full(size: Size) -> Self {
922 Self { start: 0, end: size.unsigned_int_max() }
923 }
924
925 /// Returns `true` if `v` is contained in the range.
926 #[inline(always)]
927 pub fn contains(&self, v: u128) -> bool {
928 if self.start <= self.end {
929 self.start <= v && v <= self.end
930 } else {
931 self.start <= v || v <= self.end
932 }
933 }
934
935 /// Returns `self` with replaced `start`
936 #[inline(always)]
937 pub fn with_start(mut self, start: u128) -> Self {
938 self.start = start;
939 self
940 }
941
942 /// Returns `self` with replaced `end`
943 #[inline(always)]
944 pub fn with_end(mut self, end: u128) -> Self {
945 self.end = end;
946 self
947 }
948
949 /// Returns `true` if `size` completely fills the range.
950 #[inline]
951 pub fn is_full_for(&self, size: Size) -> bool {
952 let max_value = size.unsigned_int_max();
953 debug_assert!(self.start <= max_value && self.end <= max_value);
954 self.start == (self.end.wrapping_add(1) & max_value)
955 }
956}
957
958impl fmt::Debug for WrappingRange {
959 fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
960 if self.start > self.end {
961 write!(fmt, "(..={}) | ({}..)", self.end, self.start)?;
962 } else {
963 write!(fmt, "{}..={}", self.start, self.end)?;
964 }
965 Ok(())
966 }
967}
968
969/// Information about one scalar component of a Rust type.
970#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
971#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
972pub enum Scalar {
973 Initialized {
974 value: Primitive,
975
976 // FIXME(eddyb) always use the shortest range, e.g., by finding
977 // the largest space between two consecutive valid values and
978 // taking everything else as the (shortest) valid range.
979 valid_range: WrappingRange,
980 },
981 Union {
982 /// Even for unions, we need to use the correct registers for the kind of
983 /// values inside the union, so we keep the `Primitive` type around. We
984 /// also use it to compute the size of the scalar.
985 /// However, unions never have niches and even allow undef,
986 /// so there is no `valid_range`.
987 value: Primitive,
988 },
989}
990
991impl Scalar {
992 #[inline]
993 pub fn is_bool(&self) -> bool {
994 matches!(
995 self,
996 Scalar::Initialized {
997 value: Int(I8, false),
998 valid_range: WrappingRange { start: 0, end: 1 }
999 }
1000 )
1001 }
1002
1003 /// Get the primitive representation of this type, ignoring the valid range and whether the
1004 /// value is allowed to be undefined (due to being a union).
1005 pub fn primitive(&self) -> Primitive {
1006 match *self {
1007 Scalar::Initialized { value, .. } | Scalar::Union { value } => value,
1008 }
1009 }
1010
1011 pub fn align(self, cx: &impl HasDataLayout) -> AbiAndPrefAlign {
1012 self.primitive().align(cx)
1013 }
1014
1015 pub fn size(self, cx: &impl HasDataLayout) -> Size {
1016 self.primitive().size(cx)
1017 }
1018
1019 #[inline]
1020 pub fn to_union(&self) -> Self {
1021 Self::Union { value: self.primitive() }
1022 }
1023
1024 #[inline]
1025 pub fn valid_range(&self, cx: &impl HasDataLayout) -> WrappingRange {
1026 match *self {
1027 Scalar::Initialized { valid_range, .. } => valid_range,
1028 Scalar::Union { value } => WrappingRange::full(value.size(cx)),
1029 }
1030 }
1031
1032 #[inline]
1033 /// Allows the caller to mutate the valid range. This operation will panic if attempted on a union.
1034 pub fn valid_range_mut(&mut self) -> &mut WrappingRange {
1035 match self {
1036 Scalar::Initialized { valid_range, .. } => valid_range,
1037 Scalar::Union { .. } => panic!("cannot change the valid range of a union"),
1038 }
1039 }
1040
1041 /// Returns `true` if all possible numbers are valid, i.e `valid_range` covers the whole layout
1042 #[inline]
1043 pub fn is_always_valid<C: HasDataLayout>(&self, cx: &C) -> bool {
1044 match *self {
1045 Scalar::Initialized { valid_range, .. } => valid_range.is_full_for(self.size(cx)),
1046 Scalar::Union { .. } => true,
1047 }
1048 }
1049
1050 /// Returns `true` if this type can be left uninit.
1051 #[inline]
1052 pub fn is_uninit_valid(&self) -> bool {
1053 match *self {
1054 Scalar::Initialized { .. } => false,
1055 Scalar::Union { .. } => true,
1056 }
1057 }
1058}
1059
353b0b11
FG
1060rustc_index::newtype_index! {
1061 /// The *source-order* index of a field in a variant.
1062 ///
1063 /// This is how most code after type checking refers to fields, rather than
1064 /// using names (as names have hygiene complications and more complex lookup).
1065 ///
1066 /// Particularly for `repr(Rust)` types, this may not be the same as *layout* order.
1067 /// (It is for `repr(C)` `struct`s, however.)
1068 ///
1069 /// For example, in the following types,
1070 /// ```rust
1071 /// # enum Never {}
1072 /// # #[repr(u16)]
1073 /// enum Demo1 {
1074 /// Variant0 { a: Never, b: i32 } = 100,
1075 /// Variant1 { c: u8, d: u64 } = 10,
1076 /// }
1077 /// struct Demo2 { e: u8, f: u16, g: u8 }
1078 /// ```
1079 /// `b` is `FieldIdx(1)` in `VariantIdx(0)`,
1080 /// `d` is `FieldIdx(1)` in `VariantIdx(1)`, and
1081 /// `f` is `FieldIdx(1)` in `VariantIdx(0)`.
1082 #[derive(HashStable_Generic)]
1083 pub struct FieldIdx {}
1084}
1085
487cf647
FG
1086/// Describes how the fields of a type are located in memory.
1087#[derive(PartialEq, Eq, Hash, Clone, Debug)]
1088#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1089pub enum FieldsShape {
1090 /// Scalar primitives and `!`, which never have fields.
1091 Primitive,
1092
1093 /// All fields start at no offset. The `usize` is the field count.
1094 Union(NonZeroUsize),
1095
1096 /// Array/vector-like placement, with all fields of identical types.
1097 Array { stride: Size, count: u64 },
1098
1099 /// Struct-like placement, with precomputed offsets.
1100 ///
1101 /// Fields are guaranteed to not overlap, but note that gaps
1102 /// before, between and after all the fields are NOT always
1103 /// padding, and as such their contents may not be discarded.
1104 /// For example, enum variants leave a gap at the start,
1105 /// where the discriminant field in the enum layout goes.
1106 Arbitrary {
1107 /// Offsets for the first byte of each field,
1108 /// ordered to match the source definition order.
1109 /// This vector does not go in increasing order.
1110 // FIXME(eddyb) use small vector optimization for the common case.
353b0b11 1111 offsets: IndexVec<FieldIdx, Size>,
487cf647
FG
1112
1113 /// Maps source order field indices to memory order indices,
1114 /// depending on how the fields were reordered (if at all).
1115 /// This is a permutation, with both the source order and the
1116 /// memory order using the same (0..n) index ranges.
1117 ///
1118 /// Note that during computation of `memory_index`, sometimes
1119 /// it is easier to operate on the inverse mapping (that is,
1120 /// from memory order to source order), and that is usually
1121 /// named `inverse_memory_index`.
1122 ///
1123 // FIXME(eddyb) build a better abstraction for permutations, if possible.
9c376795 1124 // FIXME(camlorn) also consider small vector optimization here.
353b0b11 1125 memory_index: IndexVec<FieldIdx, u32>,
487cf647
FG
1126 },
1127}
1128
1129impl FieldsShape {
1130 #[inline]
1131 pub fn count(&self) -> usize {
1132 match *self {
1133 FieldsShape::Primitive => 0,
1134 FieldsShape::Union(count) => count.get(),
1135 FieldsShape::Array { count, .. } => count.try_into().unwrap(),
1136 FieldsShape::Arbitrary { ref offsets, .. } => offsets.len(),
1137 }
1138 }
1139
1140 #[inline]
1141 pub fn offset(&self, i: usize) -> Size {
1142 match *self {
1143 FieldsShape::Primitive => {
1144 unreachable!("FieldsShape::offset: `Primitive`s have no fields")
1145 }
1146 FieldsShape::Union(count) => {
1147 assert!(
1148 i < count.get(),
1149 "tried to access field {} of union with {} fields",
1150 i,
1151 count
1152 );
1153 Size::ZERO
1154 }
1155 FieldsShape::Array { stride, count } => {
1156 let i = u64::try_from(i).unwrap();
1157 assert!(i < count);
1158 stride * i
1159 }
353b0b11 1160 FieldsShape::Arbitrary { ref offsets, .. } => offsets[FieldIdx::from_usize(i)],
487cf647
FG
1161 }
1162 }
1163
1164 #[inline]
1165 pub fn memory_index(&self, i: usize) -> usize {
1166 match *self {
1167 FieldsShape::Primitive => {
1168 unreachable!("FieldsShape::memory_index: `Primitive`s have no fields")
1169 }
1170 FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
353b0b11
FG
1171 FieldsShape::Arbitrary { ref memory_index, .. } => {
1172 memory_index[FieldIdx::from_usize(i)].try_into().unwrap()
1173 }
487cf647
FG
1174 }
1175 }
1176
1177 /// Gets source indices of the fields by increasing offsets.
1178 #[inline]
353b0b11 1179 pub fn index_by_increasing_offset(&self) -> impl Iterator<Item = usize> + '_ {
487cf647 1180 let mut inverse_small = [0u8; 64];
353b0b11 1181 let mut inverse_big = IndexVec::new();
487cf647
FG
1182 let use_small = self.count() <= inverse_small.len();
1183
1184 // We have to write this logic twice in order to keep the array small.
1185 if let FieldsShape::Arbitrary { ref memory_index, .. } = *self {
1186 if use_small {
353b0b11
FG
1187 for (field_idx, &mem_idx) in memory_index.iter_enumerated() {
1188 inverse_small[mem_idx as usize] = field_idx.as_u32() as u8;
487cf647
FG
1189 }
1190 } else {
353b0b11 1191 inverse_big = memory_index.invert_bijective_mapping();
487cf647
FG
1192 }
1193 }
1194
1195 (0..self.count()).map(move |i| match *self {
1196 FieldsShape::Primitive | FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
1197 FieldsShape::Arbitrary { .. } => {
1198 if use_small {
1199 inverse_small[i] as usize
1200 } else {
353b0b11 1201 inverse_big[i as u32].as_usize()
487cf647
FG
1202 }
1203 }
1204 })
1205 }
1206}
1207
1208/// An identifier that specifies the address space that some operation
1209/// should operate on. Special address spaces have an effect on code generation,
1210/// depending on the target and the address spaces it implements.
9ffffee4
FG
1211#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
1212#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
487cf647
FG
1213pub struct AddressSpace(pub u32);
1214
1215impl AddressSpace {
1216 /// The default address space, corresponding to data space.
1217 pub const DATA: Self = AddressSpace(0);
1218}
1219
1220/// Describes how values of the type are passed by target ABIs,
1221/// in terms of categories of C types there are ABI rules for.
1222#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
1223#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1224
1225pub enum Abi {
1226 Uninhabited,
1227 Scalar(Scalar),
1228 ScalarPair(Scalar, Scalar),
1229 Vector {
1230 element: Scalar,
1231 count: u64,
1232 },
1233 Aggregate {
1234 /// If true, the size is exact, otherwise it's only a lower bound.
1235 sized: bool,
1236 },
1237}
1238
1239impl Abi {
1240 /// Returns `true` if the layout corresponds to an unsized type.
1241 #[inline]
1242 pub fn is_unsized(&self) -> bool {
1243 match *self {
1244 Abi::Uninhabited | Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false,
1245 Abi::Aggregate { sized } => !sized,
1246 }
1247 }
1248
1249 #[inline]
1250 pub fn is_sized(&self) -> bool {
1251 !self.is_unsized()
1252 }
1253
1254 /// Returns `true` if this is a single signed integer scalar
1255 #[inline]
1256 pub fn is_signed(&self) -> bool {
1257 match self {
1258 Abi::Scalar(scal) => match scal.primitive() {
1259 Primitive::Int(_, signed) => signed,
1260 _ => false,
1261 },
1262 _ => panic!("`is_signed` on non-scalar ABI {:?}", self),
1263 }
1264 }
1265
1266 /// Returns `true` if this is an uninhabited type
1267 #[inline]
1268 pub fn is_uninhabited(&self) -> bool {
1269 matches!(*self, Abi::Uninhabited)
1270 }
1271
1272 /// Returns `true` is this is a scalar type
1273 #[inline]
1274 pub fn is_scalar(&self) -> bool {
1275 matches!(*self, Abi::Scalar(_))
1276 }
1277}
1278
1279#[derive(PartialEq, Eq, Hash, Clone, Debug)]
1280#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
9ffffee4 1281pub enum Variants {
487cf647 1282 /// Single enum variants, structs/tuples, unions, and all non-ADTs.
9ffffee4 1283 Single { index: VariantIdx },
487cf647
FG
1284
1285 /// Enum-likes with more than one inhabited variant: each variant comes with
1286 /// a *discriminant* (usually the same as the variant index but the user can
9c376795
FG
1287 /// assign explicit discriminant values). That discriminant is encoded
1288 /// as a *tag* on the machine. The layout of each variant is
487cf647
FG
1289 /// a struct, and they all have space reserved for the tag.
1290 /// For enums, the tag is the sole field of the layout.
1291 Multiple {
1292 tag: Scalar,
9ffffee4 1293 tag_encoding: TagEncoding,
487cf647 1294 tag_field: usize,
9ffffee4 1295 variants: IndexVec<VariantIdx, LayoutS>,
487cf647
FG
1296 },
1297}
1298
1299#[derive(PartialEq, Eq, Hash, Clone, Debug)]
1300#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
9ffffee4 1301pub enum TagEncoding {
487cf647
FG
1302 /// The tag directly stores the discriminant, but possibly with a smaller layout
1303 /// (so converting the tag to the discriminant can require sign extension).
1304 Direct,
1305
1306 /// Niche (values invalid for a type) encoding the discriminant:
1307 /// Discriminant and variant index coincide.
1308 /// The variant `untagged_variant` contains a niche at an arbitrary
1309 /// offset (field `tag_field` of the enum), which for a variant with
1310 /// discriminant `d` is set to
1311 /// `(d - niche_variants.start).wrapping_add(niche_start)`.
1312 ///
1313 /// For example, `Option<(usize, &T)>` is represented such that
1314 /// `None` has a null pointer for the second tuple field, and
1315 /// `Some` is the identity function (with a non-null reference).
9ffffee4
FG
1316 Niche {
1317 untagged_variant: VariantIdx,
1318 niche_variants: RangeInclusive<VariantIdx>,
1319 niche_start: u128,
1320 },
487cf647
FG
1321}
1322
1323#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
1324#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1325pub struct Niche {
1326 pub offset: Size,
1327 pub value: Primitive,
1328 pub valid_range: WrappingRange,
1329}
1330
1331impl Niche {
1332 pub fn from_scalar<C: HasDataLayout>(cx: &C, offset: Size, scalar: Scalar) -> Option<Self> {
1333 let Scalar::Initialized { value, valid_range } = scalar else { return None };
1334 let niche = Niche { offset, value, valid_range };
1335 if niche.available(cx) > 0 { Some(niche) } else { None }
1336 }
1337
1338 pub fn available<C: HasDataLayout>(&self, cx: &C) -> u128 {
1339 let Self { value, valid_range: v, .. } = *self;
1340 let size = value.size(cx);
1341 assert!(size.bits() <= 128);
1342 let max_value = size.unsigned_int_max();
1343
1344 // Find out how many values are outside the valid range.
1345 let niche = v.end.wrapping_add(1)..v.start;
1346 niche.end.wrapping_sub(niche.start) & max_value
1347 }
1348
1349 pub fn reserve<C: HasDataLayout>(&self, cx: &C, count: u128) -> Option<(u128, Scalar)> {
1350 assert!(count > 0);
1351
1352 let Self { value, valid_range: v, .. } = *self;
1353 let size = value.size(cx);
1354 assert!(size.bits() <= 128);
1355 let max_value = size.unsigned_int_max();
1356
1357 let niche = v.end.wrapping_add(1)..v.start;
1358 let available = niche.end.wrapping_sub(niche.start) & max_value;
1359 if count > available {
1360 return None;
1361 }
1362
1363 // Extend the range of valid values being reserved by moving either `v.start` or `v.end` bound.
1364 // Given an eventual `Option<T>`, we try to maximize the chance for `None` to occupy the niche of zero.
1365 // This is accomplished by preferring enums with 2 variants(`count==1`) and always taking the shortest path to niche zero.
1366 // Having `None` in niche zero can enable some special optimizations.
1367 //
1368 // Bound selection criteria:
1369 // 1. Select closest to zero given wrapping semantics.
1370 // 2. Avoid moving past zero if possible.
1371 //
1372 // In practice this means that enums with `count > 1` are unlikely to claim niche zero, since they have to fit perfectly.
1373 // If niche zero is already reserved, the selection of bounds are of little interest.
1374 let move_start = |v: WrappingRange| {
1375 let start = v.start.wrapping_sub(count) & max_value;
1376 Some((start, Scalar::Initialized { value, valid_range: v.with_start(start) }))
1377 };
1378 let move_end = |v: WrappingRange| {
1379 let start = v.end.wrapping_add(1) & max_value;
1380 let end = v.end.wrapping_add(count) & max_value;
1381 Some((start, Scalar::Initialized { value, valid_range: v.with_end(end) }))
1382 };
1383 let distance_end_zero = max_value - v.end;
1384 if v.start > v.end {
1385 // zero is unavailable because wrapping occurs
1386 move_end(v)
1387 } else if v.start <= distance_end_zero {
1388 if count <= v.start {
1389 move_start(v)
1390 } else {
1391 // moved past zero, use other bound
1392 move_end(v)
1393 }
1394 } else {
1395 let end = v.end.wrapping_add(count) & max_value;
1396 let overshot_zero = (1..=v.end).contains(&end);
1397 if overshot_zero {
1398 // moved past zero, use other bound
1399 move_start(v)
1400 } else {
1401 move_end(v)
1402 }
1403 }
1404 }
1405}
1406
9ffffee4 1407rustc_index::newtype_index! {
353b0b11
FG
1408 /// The *source-order* index of a variant in a type.
1409 ///
1410 /// For enums, these are always `0..variant_count`, regardless of any
1411 /// custom discriminants that may have been defined, and including any
1412 /// variants that may end up uninhabited due to field types. (Some of the
1413 /// variants may not be present in a monomorphized ABI [`Variants`], but
1414 /// those skipped variants are always counted when determining the *index*.)
1415 ///
1416 /// `struct`s, `tuples`, and `unions`s are considered to have a single variant
1417 /// with variant index zero, aka [`FIRST_VARIANT`].
9ffffee4 1418 #[derive(HashStable_Generic)]
353b0b11
FG
1419 pub struct VariantIdx {
1420 /// Equivalent to `VariantIdx(0)`.
1421 const FIRST_VARIANT = 0;
1422 }
9ffffee4
FG
1423}
1424
487cf647
FG
1425#[derive(PartialEq, Eq, Hash, Clone)]
1426#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
9ffffee4 1427pub struct LayoutS {
487cf647
FG
1428 /// Says where the fields are located within the layout.
1429 pub fields: FieldsShape,
1430
1431 /// Encodes information about multi-variant layouts.
1432 /// Even with `Multiple` variants, a layout still has its own fields! Those are then
1433 /// shared between all variants. One of them will be the discriminant,
1434 /// but e.g. generators can have more.
1435 ///
1436 /// To access all fields of this layout, both `fields` and the fields of the active variant
1437 /// must be taken into account.
9ffffee4 1438 pub variants: Variants,
487cf647
FG
1439
1440 /// The `abi` defines how this data is passed between functions, and it defines
1441 /// value restrictions via `valid_range`.
1442 ///
1443 /// Note that this is entirely orthogonal to the recursive structure defined by
1444 /// `variants` and `fields`; for example, `ManuallyDrop<Result<isize, isize>>` has
1445 /// `Abi::ScalarPair`! So, even with non-`Aggregate` `abi`, `fields` and `variants`
1446 /// have to be taken into account to find all fields of this layout.
1447 pub abi: Abi,
1448
1449 /// The leaf scalar with the largest number of invalid values
1450 /// (i.e. outside of its `valid_range`), if it exists.
1451 pub largest_niche: Option<Niche>,
1452
1453 pub align: AbiAndPrefAlign,
1454 pub size: Size,
1455}
1456
9ffffee4 1457impl LayoutS {
487cf647
FG
1458 pub fn scalar<C: HasDataLayout>(cx: &C, scalar: Scalar) -> Self {
1459 let largest_niche = Niche::from_scalar(cx, Size::ZERO, scalar);
1460 let size = scalar.size(cx);
1461 let align = scalar.align(cx);
1462 LayoutS {
353b0b11 1463 variants: Variants::Single { index: FIRST_VARIANT },
487cf647
FG
1464 fields: FieldsShape::Primitive,
1465 abi: Abi::Scalar(scalar),
1466 largest_niche,
1467 size,
1468 align,
1469 }
1470 }
1471}
1472
9ffffee4 1473impl fmt::Debug for LayoutS {
487cf647
FG
1474 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1475 // This is how `Layout` used to print before it become
1476 // `Interned<LayoutS>`. We print it like this to avoid having to update
1477 // expected output in a lot of tests.
1478 let LayoutS { size, align, abi, fields, largest_niche, variants } = self;
1479 f.debug_struct("Layout")
1480 .field("size", size)
1481 .field("align", align)
1482 .field("abi", abi)
1483 .field("fields", fields)
1484 .field("largest_niche", largest_niche)
1485 .field("variants", variants)
1486 .finish()
1487 }
1488}
1489
9ffffee4
FG
1490#[derive(Copy, Clone, PartialEq, Eq, Hash, HashStable_Generic)]
1491#[rustc_pass_by_value]
1492pub struct Layout<'a>(pub Interned<'a, LayoutS>);
1493
1494impl<'a> fmt::Debug for Layout<'a> {
1495 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1496 // See comment on `<LayoutS as Debug>::fmt` above.
1497 self.0.0.fmt(f)
1498 }
1499}
487cf647 1500
9ffffee4
FG
1501impl<'a> Layout<'a> {
1502 pub fn fields(self) -> &'a FieldsShape {
1503 &self.0.0.fields
1504 }
487cf647 1505
9ffffee4
FG
1506 pub fn variants(self) -> &'a Variants {
1507 &self.0.0.variants
1508 }
487cf647 1509
9ffffee4
FG
1510 pub fn abi(self) -> Abi {
1511 self.0.0.abi
1512 }
1513
1514 pub fn largest_niche(self) -> Option<Niche> {
1515 self.0.0.largest_niche
1516 }
1517
1518 pub fn align(self) -> AbiAndPrefAlign {
1519 self.0.0.align
1520 }
487cf647 1521
9ffffee4
FG
1522 pub fn size(self) -> Size {
1523 self.0.0.size
1524 }
353b0b11
FG
1525
1526 /// Whether the layout is from a type that implements [`std::marker::PointerLike`].
1527 ///
1528 /// Currently, that means that the type is pointer-sized, pointer-aligned,
1529 /// and has a scalar ABI.
1530 pub fn is_pointer_like(self, data_layout: &TargetDataLayout) -> bool {
1531 self.size() == data_layout.pointer_size
1532 && self.align().abi == data_layout.pointer_align.abi
1533 && matches!(self.abi(), Abi::Scalar(..))
1534 }
9ffffee4
FG
1535}
1536
1537#[derive(Copy, Clone, PartialEq, Eq, Debug)]
1538pub enum PointerKind {
1539 /// Shared reference. `frozen` indicates the absence of any `UnsafeCell`.
1540 SharedRef { frozen: bool },
1541 /// Mutable reference. `unpin` indicates the absence of any pinned data.
1542 MutableRef { unpin: bool },
1543 /// Box. `unpin` indicates the absence of any pinned data.
1544 Box { unpin: bool },
487cf647
FG
1545}
1546
9ffffee4
FG
1547/// Note that this information is advisory only, and backends are free to ignore it.
1548/// It can only be used to encode potential optimizations, but no critical information.
487cf647
FG
1549#[derive(Copy, Clone, Debug)]
1550pub struct PointeeInfo {
1551 pub size: Size,
1552 pub align: Align,
1553 pub safe: Option<PointerKind>,
487cf647
FG
1554}
1555
9ffffee4 1556impl LayoutS {
487cf647
FG
1557 /// Returns `true` if the layout corresponds to an unsized type.
1558 pub fn is_unsized(&self) -> bool {
1559 self.abi.is_unsized()
1560 }
1561
1562 pub fn is_sized(&self) -> bool {
1563 self.abi.is_sized()
1564 }
1565
1566 /// Returns `true` if the type is a ZST and not unsized.
1567 pub fn is_zst(&self) -> bool {
1568 match self.abi {
1569 Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false,
1570 Abi::Uninhabited => self.size.bytes() == 0,
1571 Abi::Aggregate { sized } => sized && self.size.bytes() == 0,
1572 }
1573 }
1574}
1575
1576#[derive(Copy, Clone, Debug)]
1577pub enum StructKind {
1578 /// A tuple, closure, or univariant which cannot be coerced to unsized.
1579 AlwaysSized,
1580 /// A univariant, the last field of which may be coerced to unsized.
1581 MaybeUnsized,
1582 /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
1583 Prefixed(Size, Align),
1584}