]> git.proxmox.com Git - rustc.git/blame - compiler/rustc_abi/src/lib.rs
New upstream version 1.67.1+dfsg1
[rustc.git] / compiler / rustc_abi / src / lib.rs
CommitLineData
487cf647
FG
1#![cfg_attr(feature = "nightly", feature(step_trait, rustc_attrs, min_specialization))]
2
3use std::convert::{TryFrom, TryInto};
4use std::fmt;
5#[cfg(feature = "nightly")]
6use std::iter::Step;
7use std::num::{NonZeroUsize, ParseIntError};
8use std::ops::{Add, AddAssign, Mul, RangeInclusive, Sub};
9use std::str::FromStr;
10
11use bitflags::bitflags;
12#[cfg(feature = "nightly")]
13use rustc_data_structures::stable_hasher::StableOrd;
14use rustc_index::vec::{Idx, IndexVec};
15#[cfg(feature = "nightly")]
16use rustc_macros::HashStable_Generic;
17#[cfg(feature = "nightly")]
18use rustc_macros::{Decodable, Encodable};
19
20mod layout;
21
22pub use layout::LayoutCalculator;
23
24/// Requirements for a `StableHashingContext` to be used in this crate.
25/// This is a hack to allow using the `HashStable_Generic` derive macro
26/// instead of implementing everything in `rustc_middle`.
27pub trait HashStableContext {}
28
29use Integer::*;
30use Primitive::*;
31
32bitflags! {
33 #[derive(Default)]
34 #[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))]
35 pub struct ReprFlags: u8 {
36 const IS_C = 1 << 0;
37 const IS_SIMD = 1 << 1;
38 const IS_TRANSPARENT = 1 << 2;
39 // Internal only for now. If true, don't reorder fields.
40 const IS_LINEAR = 1 << 3;
41 // If true, the type's layout can be randomized using
42 // the seed stored in `ReprOptions.layout_seed`
43 const RANDOMIZE_LAYOUT = 1 << 4;
44 // Any of these flags being set prevent field reordering optimisation.
45 const IS_UNOPTIMISABLE = ReprFlags::IS_C.bits
46 | ReprFlags::IS_SIMD.bits
47 | ReprFlags::IS_LINEAR.bits;
48 }
49}
50
51#[derive(Copy, Clone, Debug, Eq, PartialEq)]
52#[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))]
53pub enum IntegerType {
54 /// Pointer sized integer type, i.e. isize and usize. The field shows signedness, that
55 /// is, `Pointer(true)` is isize.
56 Pointer(bool),
57 /// Fix sized integer type, e.g. i8, u32, i128 The bool field shows signedness, `Fixed(I8, false)` means `u8`
58 Fixed(Integer, bool),
59}
60
61impl IntegerType {
62 pub fn is_signed(&self) -> bool {
63 match self {
64 IntegerType::Pointer(b) => *b,
65 IntegerType::Fixed(_, b) => *b,
66 }
67 }
68}
69
70/// Represents the repr options provided by the user,
71#[derive(Copy, Clone, Debug, Eq, PartialEq, Default)]
72#[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))]
73pub struct ReprOptions {
74 pub int: Option<IntegerType>,
75 pub align: Option<Align>,
76 pub pack: Option<Align>,
77 pub flags: ReprFlags,
78 /// The seed to be used for randomizing a type's layout
79 ///
80 /// Note: This could technically be a `[u8; 16]` (a `u128`) which would
81 /// be the "most accurate" hash as it'd encompass the item and crate
82 /// hash without loss, but it does pay the price of being larger.
83 /// Everything's a tradeoff, a `u64` seed should be sufficient for our
84 /// purposes (primarily `-Z randomize-layout`)
85 pub field_shuffle_seed: u64,
86}
87
88impl ReprOptions {
89 #[inline]
90 pub fn simd(&self) -> bool {
91 self.flags.contains(ReprFlags::IS_SIMD)
92 }
93
94 #[inline]
95 pub fn c(&self) -> bool {
96 self.flags.contains(ReprFlags::IS_C)
97 }
98
99 #[inline]
100 pub fn packed(&self) -> bool {
101 self.pack.is_some()
102 }
103
104 #[inline]
105 pub fn transparent(&self) -> bool {
106 self.flags.contains(ReprFlags::IS_TRANSPARENT)
107 }
108
109 #[inline]
110 pub fn linear(&self) -> bool {
111 self.flags.contains(ReprFlags::IS_LINEAR)
112 }
113
114 /// Returns the discriminant type, given these `repr` options.
115 /// This must only be called on enums!
116 pub fn discr_type(&self) -> IntegerType {
117 self.int.unwrap_or(IntegerType::Pointer(true))
118 }
119
120 /// Returns `true` if this `#[repr()]` should inhabit "smart enum
121 /// layout" optimizations, such as representing `Foo<&T>` as a
122 /// single pointer.
123 pub fn inhibit_enum_layout_opt(&self) -> bool {
124 self.c() || self.int.is_some()
125 }
126
127 /// Returns `true` if this `#[repr()]` should inhibit struct field reordering
128 /// optimizations, such as with `repr(C)`, `repr(packed(1))`, or `repr(<int>)`.
129 pub fn inhibit_struct_field_reordering_opt(&self) -> bool {
130 if let Some(pack) = self.pack {
131 if pack.bytes() == 1 {
132 return true;
133 }
134 }
135
136 self.flags.intersects(ReprFlags::IS_UNOPTIMISABLE) || self.int.is_some()
137 }
138
139 /// Returns `true` if this type is valid for reordering and `-Z randomize-layout`
140 /// was enabled for its declaration crate
141 pub fn can_randomize_type_layout(&self) -> bool {
142 !self.inhibit_struct_field_reordering_opt()
143 && self.flags.contains(ReprFlags::RANDOMIZE_LAYOUT)
144 }
145
146 /// Returns `true` if this `#[repr()]` should inhibit union ABI optimisations.
147 pub fn inhibit_union_abi_opt(&self) -> bool {
148 self.c()
149 }
150}
151
152/// Parsed [Data layout](https://llvm.org/docs/LangRef.html#data-layout)
153/// for a target, which contains everything needed to compute layouts.
154#[derive(Debug, PartialEq, Eq)]
155pub struct TargetDataLayout {
156 pub endian: Endian,
157 pub i1_align: AbiAndPrefAlign,
158 pub i8_align: AbiAndPrefAlign,
159 pub i16_align: AbiAndPrefAlign,
160 pub i32_align: AbiAndPrefAlign,
161 pub i64_align: AbiAndPrefAlign,
162 pub i128_align: AbiAndPrefAlign,
163 pub f32_align: AbiAndPrefAlign,
164 pub f64_align: AbiAndPrefAlign,
165 pub pointer_size: Size,
166 pub pointer_align: AbiAndPrefAlign,
167 pub aggregate_align: AbiAndPrefAlign,
168
169 /// Alignments for vector types.
170 pub vector_align: Vec<(Size, AbiAndPrefAlign)>,
171
172 pub instruction_address_space: AddressSpace,
173
174 /// Minimum size of #[repr(C)] enums (default I32 bits)
175 pub c_enum_min_size: Integer,
176}
177
178impl Default for TargetDataLayout {
179 /// Creates an instance of `TargetDataLayout`.
180 fn default() -> TargetDataLayout {
181 let align = |bits| Align::from_bits(bits).unwrap();
182 TargetDataLayout {
183 endian: Endian::Big,
184 i1_align: AbiAndPrefAlign::new(align(8)),
185 i8_align: AbiAndPrefAlign::new(align(8)),
186 i16_align: AbiAndPrefAlign::new(align(16)),
187 i32_align: AbiAndPrefAlign::new(align(32)),
188 i64_align: AbiAndPrefAlign { abi: align(32), pref: align(64) },
189 i128_align: AbiAndPrefAlign { abi: align(32), pref: align(64) },
190 f32_align: AbiAndPrefAlign::new(align(32)),
191 f64_align: AbiAndPrefAlign::new(align(64)),
192 pointer_size: Size::from_bits(64),
193 pointer_align: AbiAndPrefAlign::new(align(64)),
194 aggregate_align: AbiAndPrefAlign { abi: align(0), pref: align(64) },
195 vector_align: vec![
196 (Size::from_bits(64), AbiAndPrefAlign::new(align(64))),
197 (Size::from_bits(128), AbiAndPrefAlign::new(align(128))),
198 ],
199 instruction_address_space: AddressSpace::DATA,
200 c_enum_min_size: Integer::I32,
201 }
202 }
203}
204
205pub enum TargetDataLayoutErrors<'a> {
206 InvalidAddressSpace { addr_space: &'a str, cause: &'a str, err: ParseIntError },
207 InvalidBits { kind: &'a str, bit: &'a str, cause: &'a str, err: ParseIntError },
208 MissingAlignment { cause: &'a str },
209 InvalidAlignment { cause: &'a str, err: String },
210 InconsistentTargetArchitecture { dl: &'a str, target: &'a str },
211 InconsistentTargetPointerWidth { pointer_size: u64, target: u32 },
212 InvalidBitsSize { err: String },
213}
214
215impl TargetDataLayout {
216 /// Parse data layout from an [llvm data layout string](https://llvm.org/docs/LangRef.html#data-layout)
217 ///
218 /// This function doesn't fill `c_enum_min_size` and it will always be `I32` since it can not be
219 /// determined from llvm string.
220 pub fn parse_from_llvm_datalayout_string<'a>(
221 input: &'a str,
222 ) -> Result<TargetDataLayout, TargetDataLayoutErrors<'a>> {
223 // Parse an address space index from a string.
224 let parse_address_space = |s: &'a str, cause: &'a str| {
225 s.parse::<u32>().map(AddressSpace).map_err(|err| {
226 TargetDataLayoutErrors::InvalidAddressSpace { addr_space: s, cause, err }
227 })
228 };
229
230 // Parse a bit count from a string.
231 let parse_bits = |s: &'a str, kind: &'a str, cause: &'a str| {
232 s.parse::<u64>().map_err(|err| TargetDataLayoutErrors::InvalidBits {
233 kind,
234 bit: s,
235 cause,
236 err,
237 })
238 };
239
240 // Parse a size string.
241 let size = |s: &'a str, cause: &'a str| parse_bits(s, "size", cause).map(Size::from_bits);
242
243 // Parse an alignment string.
244 let align = |s: &[&'a str], cause: &'a str| {
245 if s.is_empty() {
246 return Err(TargetDataLayoutErrors::MissingAlignment { cause });
247 }
248 let align_from_bits = |bits| {
249 Align::from_bits(bits)
250 .map_err(|err| TargetDataLayoutErrors::InvalidAlignment { cause, err })
251 };
252 let abi = parse_bits(s[0], "alignment", cause)?;
253 let pref = s.get(1).map_or(Ok(abi), |pref| parse_bits(pref, "alignment", cause))?;
254 Ok(AbiAndPrefAlign { abi: align_from_bits(abi)?, pref: align_from_bits(pref)? })
255 };
256
257 let mut dl = TargetDataLayout::default();
258 let mut i128_align_src = 64;
259 for spec in input.split('-') {
260 let spec_parts = spec.split(':').collect::<Vec<_>>();
261
262 match &*spec_parts {
263 ["e"] => dl.endian = Endian::Little,
264 ["E"] => dl.endian = Endian::Big,
265 [p] if p.starts_with('P') => {
266 dl.instruction_address_space = parse_address_space(&p[1..], "P")?
267 }
268 ["a", ref a @ ..] => dl.aggregate_align = align(a, "a")?,
269 ["f32", ref a @ ..] => dl.f32_align = align(a, "f32")?,
270 ["f64", ref a @ ..] => dl.f64_align = align(a, "f64")?,
271 [p @ "p", s, ref a @ ..] | [p @ "p0", s, ref a @ ..] => {
272 dl.pointer_size = size(s, p)?;
273 dl.pointer_align = align(a, p)?;
274 }
275 [s, ref a @ ..] if s.starts_with('i') => {
276 let Ok(bits) = s[1..].parse::<u64>() else {
277 size(&s[1..], "i")?; // For the user error.
278 continue;
279 };
280 let a = align(a, s)?;
281 match bits {
282 1 => dl.i1_align = a,
283 8 => dl.i8_align = a,
284 16 => dl.i16_align = a,
285 32 => dl.i32_align = a,
286 64 => dl.i64_align = a,
287 _ => {}
288 }
289 if bits >= i128_align_src && bits <= 128 {
290 // Default alignment for i128 is decided by taking the alignment of
291 // largest-sized i{64..=128}.
292 i128_align_src = bits;
293 dl.i128_align = a;
294 }
295 }
296 [s, ref a @ ..] if s.starts_with('v') => {
297 let v_size = size(&s[1..], "v")?;
298 let a = align(a, s)?;
299 if let Some(v) = dl.vector_align.iter_mut().find(|v| v.0 == v_size) {
300 v.1 = a;
301 continue;
302 }
303 // No existing entry, add a new one.
304 dl.vector_align.push((v_size, a));
305 }
306 _ => {} // Ignore everything else.
307 }
308 }
309 Ok(dl)
310 }
311
312 /// Returns exclusive upper bound on object size.
313 ///
314 /// The theoretical maximum object size is defined as the maximum positive `isize` value.
315 /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly
316 /// index every address within an object along with one byte past the end, along with allowing
317 /// `isize` to store the difference between any two pointers into an object.
318 ///
319 /// The upper bound on 64-bit currently needs to be lower because LLVM uses a 64-bit integer
320 /// to represent object size in bits. It would need to be 1 << 61 to account for this, but is
321 /// currently conservatively bounded to 1 << 47 as that is enough to cover the current usable
322 /// address space on 64-bit ARMv8 and x86_64.
323 #[inline]
324 pub fn obj_size_bound(&self) -> u64 {
325 match self.pointer_size.bits() {
326 16 => 1 << 15,
327 32 => 1 << 31,
328 64 => 1 << 47,
329 bits => panic!("obj_size_bound: unknown pointer bit size {}", bits),
330 }
331 }
332
333 #[inline]
334 pub fn ptr_sized_integer(&self) -> Integer {
335 match self.pointer_size.bits() {
336 16 => I16,
337 32 => I32,
338 64 => I64,
339 bits => panic!("ptr_sized_integer: unknown pointer bit size {}", bits),
340 }
341 }
342
343 #[inline]
344 pub fn vector_align(&self, vec_size: Size) -> AbiAndPrefAlign {
345 for &(size, align) in &self.vector_align {
346 if size == vec_size {
347 return align;
348 }
349 }
350 // Default to natural alignment, which is what LLVM does.
351 // That is, use the size, rounded up to a power of 2.
352 AbiAndPrefAlign::new(Align::from_bytes(vec_size.bytes().next_power_of_two()).unwrap())
353 }
354}
355
356pub trait HasDataLayout {
357 fn data_layout(&self) -> &TargetDataLayout;
358}
359
360impl HasDataLayout for TargetDataLayout {
361 #[inline]
362 fn data_layout(&self) -> &TargetDataLayout {
363 self
364 }
365}
366
367/// Endianness of the target, which must match cfg(target-endian).
368#[derive(Copy, Clone, PartialEq, Eq)]
369pub enum Endian {
370 Little,
371 Big,
372}
373
374impl Endian {
375 pub fn as_str(&self) -> &'static str {
376 match self {
377 Self::Little => "little",
378 Self::Big => "big",
379 }
380 }
381}
382
383impl fmt::Debug for Endian {
384 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
385 f.write_str(self.as_str())
386 }
387}
388
389impl FromStr for Endian {
390 type Err = String;
391
392 fn from_str(s: &str) -> Result<Self, Self::Err> {
393 match s {
394 "little" => Ok(Self::Little),
395 "big" => Ok(Self::Big),
396 _ => Err(format!(r#"unknown endian: "{}""#, s)),
397 }
398 }
399}
400
401/// Size of a type in bytes.
402#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
403#[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))]
404pub struct Size {
405 raw: u64,
406}
407
408// Safety: Ord is implement as just comparing numerical values and numerical values
409// are not changed by (de-)serialization.
410#[cfg(feature = "nightly")]
411unsafe impl StableOrd for Size {}
412
413// This is debug-printed a lot in larger structs, don't waste too much space there
414impl fmt::Debug for Size {
415 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
416 write!(f, "Size({} bytes)", self.bytes())
417 }
418}
419
420impl Size {
421 pub const ZERO: Size = Size { raw: 0 };
422
423 /// Rounds `bits` up to the next-higher byte boundary, if `bits` is
424 /// not a multiple of 8.
425 pub fn from_bits(bits: impl TryInto<u64>) -> Size {
426 let bits = bits.try_into().ok().unwrap();
427 // Avoid potential overflow from `bits + 7`.
428 Size { raw: bits / 8 + ((bits % 8) + 7) / 8 }
429 }
430
431 #[inline]
432 pub fn from_bytes(bytes: impl TryInto<u64>) -> Size {
433 let bytes: u64 = bytes.try_into().ok().unwrap();
434 Size { raw: bytes }
435 }
436
437 #[inline]
438 pub fn bytes(self) -> u64 {
439 self.raw
440 }
441
442 #[inline]
443 pub fn bytes_usize(self) -> usize {
444 self.bytes().try_into().unwrap()
445 }
446
447 #[inline]
448 pub fn bits(self) -> u64 {
449 #[cold]
450 fn overflow(bytes: u64) -> ! {
451 panic!("Size::bits: {} bytes in bits doesn't fit in u64", bytes)
452 }
453
454 self.bytes().checked_mul(8).unwrap_or_else(|| overflow(self.bytes()))
455 }
456
457 #[inline]
458 pub fn bits_usize(self) -> usize {
459 self.bits().try_into().unwrap()
460 }
461
462 #[inline]
463 pub fn align_to(self, align: Align) -> Size {
464 let mask = align.bytes() - 1;
465 Size::from_bytes((self.bytes() + mask) & !mask)
466 }
467
468 #[inline]
469 pub fn is_aligned(self, align: Align) -> bool {
470 let mask = align.bytes() - 1;
471 self.bytes() & mask == 0
472 }
473
474 #[inline]
475 pub fn checked_add<C: HasDataLayout>(self, offset: Size, cx: &C) -> Option<Size> {
476 let dl = cx.data_layout();
477
478 let bytes = self.bytes().checked_add(offset.bytes())?;
479
480 if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
481 }
482
483 #[inline]
484 pub fn checked_mul<C: HasDataLayout>(self, count: u64, cx: &C) -> Option<Size> {
485 let dl = cx.data_layout();
486
487 let bytes = self.bytes().checked_mul(count)?;
488 if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
489 }
490
491 /// Truncates `value` to `self` bits and then sign-extends it to 128 bits
492 /// (i.e., if it is negative, fill with 1's on the left).
493 #[inline]
494 pub fn sign_extend(self, value: u128) -> u128 {
495 let size = self.bits();
496 if size == 0 {
497 // Truncated until nothing is left.
498 return 0;
499 }
500 // Sign-extend it.
501 let shift = 128 - size;
502 // Shift the unsigned value to the left, then shift back to the right as signed
503 // (essentially fills with sign bit on the left).
504 (((value << shift) as i128) >> shift) as u128
505 }
506
507 /// Truncates `value` to `self` bits.
508 #[inline]
509 pub fn truncate(self, value: u128) -> u128 {
510 let size = self.bits();
511 if size == 0 {
512 // Truncated until nothing is left.
513 return 0;
514 }
515 let shift = 128 - size;
516 // Truncate (shift left to drop out leftover values, shift right to fill with zeroes).
517 (value << shift) >> shift
518 }
519
520 #[inline]
521 pub fn signed_int_min(&self) -> i128 {
522 self.sign_extend(1_u128 << (self.bits() - 1)) as i128
523 }
524
525 #[inline]
526 pub fn signed_int_max(&self) -> i128 {
527 i128::MAX >> (128 - self.bits())
528 }
529
530 #[inline]
531 pub fn unsigned_int_max(&self) -> u128 {
532 u128::MAX >> (128 - self.bits())
533 }
534}
535
536// Panicking addition, subtraction and multiplication for convenience.
537// Avoid during layout computation, return `LayoutError` instead.
538
539impl Add for Size {
540 type Output = Size;
541 #[inline]
542 fn add(self, other: Size) -> Size {
543 Size::from_bytes(self.bytes().checked_add(other.bytes()).unwrap_or_else(|| {
544 panic!("Size::add: {} + {} doesn't fit in u64", self.bytes(), other.bytes())
545 }))
546 }
547}
548
549impl Sub for Size {
550 type Output = Size;
551 #[inline]
552 fn sub(self, other: Size) -> Size {
553 Size::from_bytes(self.bytes().checked_sub(other.bytes()).unwrap_or_else(|| {
554 panic!("Size::sub: {} - {} would result in negative size", self.bytes(), other.bytes())
555 }))
556 }
557}
558
559impl Mul<Size> for u64 {
560 type Output = Size;
561 #[inline]
562 fn mul(self, size: Size) -> Size {
563 size * self
564 }
565}
566
567impl Mul<u64> for Size {
568 type Output = Size;
569 #[inline]
570 fn mul(self, count: u64) -> Size {
571 match self.bytes().checked_mul(count) {
572 Some(bytes) => Size::from_bytes(bytes),
573 None => panic!("Size::mul: {} * {} doesn't fit in u64", self.bytes(), count),
574 }
575 }
576}
577
578impl AddAssign for Size {
579 #[inline]
580 fn add_assign(&mut self, other: Size) {
581 *self = *self + other;
582 }
583}
584
585#[cfg(feature = "nightly")]
586impl Step for Size {
587 #[inline]
588 fn steps_between(start: &Self, end: &Self) -> Option<usize> {
589 u64::steps_between(&start.bytes(), &end.bytes())
590 }
591
592 #[inline]
593 fn forward_checked(start: Self, count: usize) -> Option<Self> {
594 u64::forward_checked(start.bytes(), count).map(Self::from_bytes)
595 }
596
597 #[inline]
598 fn forward(start: Self, count: usize) -> Self {
599 Self::from_bytes(u64::forward(start.bytes(), count))
600 }
601
602 #[inline]
603 unsafe fn forward_unchecked(start: Self, count: usize) -> Self {
604 Self::from_bytes(u64::forward_unchecked(start.bytes(), count))
605 }
606
607 #[inline]
608 fn backward_checked(start: Self, count: usize) -> Option<Self> {
609 u64::backward_checked(start.bytes(), count).map(Self::from_bytes)
610 }
611
612 #[inline]
613 fn backward(start: Self, count: usize) -> Self {
614 Self::from_bytes(u64::backward(start.bytes(), count))
615 }
616
617 #[inline]
618 unsafe fn backward_unchecked(start: Self, count: usize) -> Self {
619 Self::from_bytes(u64::backward_unchecked(start.bytes(), count))
620 }
621}
622
623/// Alignment of a type in bytes (always a power of two).
624#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
625#[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))]
626pub struct Align {
627 pow2: u8,
628}
629
630// This is debug-printed a lot in larger structs, don't waste too much space there
631impl fmt::Debug for Align {
632 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
633 write!(f, "Align({} bytes)", self.bytes())
634 }
635}
636
637impl Align {
638 pub const ONE: Align = Align { pow2: 0 };
639 pub const MAX: Align = Align { pow2: 29 };
640
641 #[inline]
642 pub fn from_bits(bits: u64) -> Result<Align, String> {
643 Align::from_bytes(Size::from_bits(bits).bytes())
644 }
645
646 #[inline]
647 pub fn from_bytes(align: u64) -> Result<Align, String> {
648 // Treat an alignment of 0 bytes like 1-byte alignment.
649 if align == 0 {
650 return Ok(Align::ONE);
651 }
652
653 #[cold]
654 fn not_power_of_2(align: u64) -> String {
655 format!("`{}` is not a power of 2", align)
656 }
657
658 #[cold]
659 fn too_large(align: u64) -> String {
660 format!("`{}` is too large", align)
661 }
662
663 let mut bytes = align;
664 let mut pow2: u8 = 0;
665 while (bytes & 1) == 0 {
666 pow2 += 1;
667 bytes >>= 1;
668 }
669 if bytes != 1 {
670 return Err(not_power_of_2(align));
671 }
672 if pow2 > Self::MAX.pow2 {
673 return Err(too_large(align));
674 }
675
676 Ok(Align { pow2 })
677 }
678
679 #[inline]
680 pub fn bytes(self) -> u64 {
681 1 << self.pow2
682 }
683
684 #[inline]
685 pub fn bits(self) -> u64 {
686 self.bytes() * 8
687 }
688
689 /// Computes the best alignment possible for the given offset
690 /// (the largest power of two that the offset is a multiple of).
691 ///
692 /// N.B., for an offset of `0`, this happens to return `2^64`.
693 #[inline]
694 pub fn max_for_offset(offset: Size) -> Align {
695 Align { pow2: offset.bytes().trailing_zeros() as u8 }
696 }
697
698 /// Lower the alignment, if necessary, such that the given offset
699 /// is aligned to it (the offset is a multiple of the alignment).
700 #[inline]
701 pub fn restrict_for_offset(self, offset: Size) -> Align {
702 self.min(Align::max_for_offset(offset))
703 }
704}
705
706/// A pair of alignments, ABI-mandated and preferred.
707#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
708#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
709
710pub struct AbiAndPrefAlign {
711 pub abi: Align,
712 pub pref: Align,
713}
714
715impl AbiAndPrefAlign {
716 #[inline]
717 pub fn new(align: Align) -> AbiAndPrefAlign {
718 AbiAndPrefAlign { abi: align, pref: align }
719 }
720
721 #[inline]
722 pub fn min(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign {
723 AbiAndPrefAlign { abi: self.abi.min(other.abi), pref: self.pref.min(other.pref) }
724 }
725
726 #[inline]
727 pub fn max(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign {
728 AbiAndPrefAlign { abi: self.abi.max(other.abi), pref: self.pref.max(other.pref) }
729 }
730}
731
732/// Integers, also used for enum discriminants.
733#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
734#[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))]
735
736pub enum Integer {
737 I8,
738 I16,
739 I32,
740 I64,
741 I128,
742}
743
744impl Integer {
745 #[inline]
746 pub fn size(self) -> Size {
747 match self {
748 I8 => Size::from_bytes(1),
749 I16 => Size::from_bytes(2),
750 I32 => Size::from_bytes(4),
751 I64 => Size::from_bytes(8),
752 I128 => Size::from_bytes(16),
753 }
754 }
755
756 /// Gets the Integer type from an IntegerType.
757 pub fn from_attr<C: HasDataLayout>(cx: &C, ity: IntegerType) -> Integer {
758 let dl = cx.data_layout();
759
760 match ity {
761 IntegerType::Pointer(_) => dl.ptr_sized_integer(),
762 IntegerType::Fixed(x, _) => x,
763 }
764 }
765
766 pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAndPrefAlign {
767 let dl = cx.data_layout();
768
769 match self {
770 I8 => dl.i8_align,
771 I16 => dl.i16_align,
772 I32 => dl.i32_align,
773 I64 => dl.i64_align,
774 I128 => dl.i128_align,
775 }
776 }
777
778 /// Finds the smallest Integer type which can represent the signed value.
779 #[inline]
780 pub fn fit_signed(x: i128) -> Integer {
781 match x {
782 -0x0000_0000_0000_0080..=0x0000_0000_0000_007f => I8,
783 -0x0000_0000_0000_8000..=0x0000_0000_0000_7fff => I16,
784 -0x0000_0000_8000_0000..=0x0000_0000_7fff_ffff => I32,
785 -0x8000_0000_0000_0000..=0x7fff_ffff_ffff_ffff => I64,
786 _ => I128,
787 }
788 }
789
790 /// Finds the smallest Integer type which can represent the unsigned value.
791 #[inline]
792 pub fn fit_unsigned(x: u128) -> Integer {
793 match x {
794 0..=0x0000_0000_0000_00ff => I8,
795 0..=0x0000_0000_0000_ffff => I16,
796 0..=0x0000_0000_ffff_ffff => I32,
797 0..=0xffff_ffff_ffff_ffff => I64,
798 _ => I128,
799 }
800 }
801
802 /// Finds the smallest integer with the given alignment.
803 pub fn for_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Option<Integer> {
804 let dl = cx.data_layout();
805
806 for candidate in [I8, I16, I32, I64, I128] {
807 if wanted == candidate.align(dl).abi && wanted.bytes() == candidate.size().bytes() {
808 return Some(candidate);
809 }
810 }
811 None
812 }
813
814 /// Find the largest integer with the given alignment or less.
815 pub fn approximate_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Integer {
816 let dl = cx.data_layout();
817
818 // FIXME(eddyb) maybe include I128 in the future, when it works everywhere.
819 for candidate in [I64, I32, I16] {
820 if wanted >= candidate.align(dl).abi && wanted.bytes() >= candidate.size().bytes() {
821 return candidate;
822 }
823 }
824 I8
825 }
826
827 // FIXME(eddyb) consolidate this and other methods that find the appropriate
828 // `Integer` given some requirements.
829 #[inline]
830 pub fn from_size(size: Size) -> Result<Self, String> {
831 match size.bits() {
832 8 => Ok(Integer::I8),
833 16 => Ok(Integer::I16),
834 32 => Ok(Integer::I32),
835 64 => Ok(Integer::I64),
836 128 => Ok(Integer::I128),
837 _ => Err(format!("rust does not support integers with {} bits", size.bits())),
838 }
839 }
840}
841
842/// Fundamental unit of memory access and layout.
843#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
844#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
845pub enum Primitive {
846 /// The `bool` is the signedness of the `Integer` type.
847 ///
848 /// One would think we would not care about such details this low down,
849 /// but some ABIs are described in terms of C types and ISAs where the
850 /// integer arithmetic is done on {sign,zero}-extended registers, e.g.
851 /// a negative integer passed by zero-extension will appear positive in
852 /// the callee, and most operations on it will produce the wrong values.
853 Int(Integer, bool),
854 F32,
855 F64,
856 Pointer,
857}
858
859impl Primitive {
860 pub fn size<C: HasDataLayout>(self, cx: &C) -> Size {
861 let dl = cx.data_layout();
862
863 match self {
864 Int(i, _) => i.size(),
865 F32 => Size::from_bits(32),
866 F64 => Size::from_bits(64),
867 Pointer => dl.pointer_size,
868 }
869 }
870
871 pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAndPrefAlign {
872 let dl = cx.data_layout();
873
874 match self {
875 Int(i, _) => i.align(dl),
876 F32 => dl.f32_align,
877 F64 => dl.f64_align,
878 Pointer => dl.pointer_align,
879 }
880 }
881
882 // FIXME(eddyb) remove, it's trivial thanks to `matches!`.
883 #[inline]
884 pub fn is_float(self) -> bool {
885 matches!(self, F32 | F64)
886 }
887
888 // FIXME(eddyb) remove, it's completely unused.
889 #[inline]
890 pub fn is_int(self) -> bool {
891 matches!(self, Int(..))
892 }
893
894 #[inline]
895 pub fn is_ptr(self) -> bool {
896 matches!(self, Pointer)
897 }
898}
899
900/// Inclusive wrap-around range of valid values, that is, if
901/// start > end, it represents `start..=MAX`,
902/// followed by `0..=end`.
903///
904/// That is, for an i8 primitive, a range of `254..=2` means following
905/// sequence:
906///
907/// 254 (-2), 255 (-1), 0, 1, 2
908///
909/// This is intended specifically to mirror LLVM’s `!range` metadata semantics.
910#[derive(Clone, Copy, PartialEq, Eq, Hash)]
911#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
912pub struct WrappingRange {
913 pub start: u128,
914 pub end: u128,
915}
916
917impl WrappingRange {
918 pub fn full(size: Size) -> Self {
919 Self { start: 0, end: size.unsigned_int_max() }
920 }
921
922 /// Returns `true` if `v` is contained in the range.
923 #[inline(always)]
924 pub fn contains(&self, v: u128) -> bool {
925 if self.start <= self.end {
926 self.start <= v && v <= self.end
927 } else {
928 self.start <= v || v <= self.end
929 }
930 }
931
932 /// Returns `self` with replaced `start`
933 #[inline(always)]
934 pub fn with_start(mut self, start: u128) -> Self {
935 self.start = start;
936 self
937 }
938
939 /// Returns `self` with replaced `end`
940 #[inline(always)]
941 pub fn with_end(mut self, end: u128) -> Self {
942 self.end = end;
943 self
944 }
945
946 /// Returns `true` if `size` completely fills the range.
947 #[inline]
948 pub fn is_full_for(&self, size: Size) -> bool {
949 let max_value = size.unsigned_int_max();
950 debug_assert!(self.start <= max_value && self.end <= max_value);
951 self.start == (self.end.wrapping_add(1) & max_value)
952 }
953}
954
955impl fmt::Debug for WrappingRange {
956 fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
957 if self.start > self.end {
958 write!(fmt, "(..={}) | ({}..)", self.end, self.start)?;
959 } else {
960 write!(fmt, "{}..={}", self.start, self.end)?;
961 }
962 Ok(())
963 }
964}
965
966/// Information about one scalar component of a Rust type.
967#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
968#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
969pub enum Scalar {
970 Initialized {
971 value: Primitive,
972
973 // FIXME(eddyb) always use the shortest range, e.g., by finding
974 // the largest space between two consecutive valid values and
975 // taking everything else as the (shortest) valid range.
976 valid_range: WrappingRange,
977 },
978 Union {
979 /// Even for unions, we need to use the correct registers for the kind of
980 /// values inside the union, so we keep the `Primitive` type around. We
981 /// also use it to compute the size of the scalar.
982 /// However, unions never have niches and even allow undef,
983 /// so there is no `valid_range`.
984 value: Primitive,
985 },
986}
987
988impl Scalar {
989 #[inline]
990 pub fn is_bool(&self) -> bool {
991 matches!(
992 self,
993 Scalar::Initialized {
994 value: Int(I8, false),
995 valid_range: WrappingRange { start: 0, end: 1 }
996 }
997 )
998 }
999
1000 /// Get the primitive representation of this type, ignoring the valid range and whether the
1001 /// value is allowed to be undefined (due to being a union).
1002 pub fn primitive(&self) -> Primitive {
1003 match *self {
1004 Scalar::Initialized { value, .. } | Scalar::Union { value } => value,
1005 }
1006 }
1007
1008 pub fn align(self, cx: &impl HasDataLayout) -> AbiAndPrefAlign {
1009 self.primitive().align(cx)
1010 }
1011
1012 pub fn size(self, cx: &impl HasDataLayout) -> Size {
1013 self.primitive().size(cx)
1014 }
1015
1016 #[inline]
1017 pub fn to_union(&self) -> Self {
1018 Self::Union { value: self.primitive() }
1019 }
1020
1021 #[inline]
1022 pub fn valid_range(&self, cx: &impl HasDataLayout) -> WrappingRange {
1023 match *self {
1024 Scalar::Initialized { valid_range, .. } => valid_range,
1025 Scalar::Union { value } => WrappingRange::full(value.size(cx)),
1026 }
1027 }
1028
1029 #[inline]
1030 /// Allows the caller to mutate the valid range. This operation will panic if attempted on a union.
1031 pub fn valid_range_mut(&mut self) -> &mut WrappingRange {
1032 match self {
1033 Scalar::Initialized { valid_range, .. } => valid_range,
1034 Scalar::Union { .. } => panic!("cannot change the valid range of a union"),
1035 }
1036 }
1037
1038 /// Returns `true` if all possible numbers are valid, i.e `valid_range` covers the whole layout
1039 #[inline]
1040 pub fn is_always_valid<C: HasDataLayout>(&self, cx: &C) -> bool {
1041 match *self {
1042 Scalar::Initialized { valid_range, .. } => valid_range.is_full_for(self.size(cx)),
1043 Scalar::Union { .. } => true,
1044 }
1045 }
1046
1047 /// Returns `true` if this type can be left uninit.
1048 #[inline]
1049 pub fn is_uninit_valid(&self) -> bool {
1050 match *self {
1051 Scalar::Initialized { .. } => false,
1052 Scalar::Union { .. } => true,
1053 }
1054 }
1055}
1056
1057/// Describes how the fields of a type are located in memory.
1058#[derive(PartialEq, Eq, Hash, Clone, Debug)]
1059#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1060pub enum FieldsShape {
1061 /// Scalar primitives and `!`, which never have fields.
1062 Primitive,
1063
1064 /// All fields start at no offset. The `usize` is the field count.
1065 Union(NonZeroUsize),
1066
1067 /// Array/vector-like placement, with all fields of identical types.
1068 Array { stride: Size, count: u64 },
1069
1070 /// Struct-like placement, with precomputed offsets.
1071 ///
1072 /// Fields are guaranteed to not overlap, but note that gaps
1073 /// before, between and after all the fields are NOT always
1074 /// padding, and as such their contents may not be discarded.
1075 /// For example, enum variants leave a gap at the start,
1076 /// where the discriminant field in the enum layout goes.
1077 Arbitrary {
1078 /// Offsets for the first byte of each field,
1079 /// ordered to match the source definition order.
1080 /// This vector does not go in increasing order.
1081 // FIXME(eddyb) use small vector optimization for the common case.
1082 offsets: Vec<Size>,
1083
1084 /// Maps source order field indices to memory order indices,
1085 /// depending on how the fields were reordered (if at all).
1086 /// This is a permutation, with both the source order and the
1087 /// memory order using the same (0..n) index ranges.
1088 ///
1089 /// Note that during computation of `memory_index`, sometimes
1090 /// it is easier to operate on the inverse mapping (that is,
1091 /// from memory order to source order), and that is usually
1092 /// named `inverse_memory_index`.
1093 ///
1094 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1095 // FIXME(camlorn) also consider small vector optimization here.
1096 memory_index: Vec<u32>,
1097 },
1098}
1099
1100impl FieldsShape {
1101 #[inline]
1102 pub fn count(&self) -> usize {
1103 match *self {
1104 FieldsShape::Primitive => 0,
1105 FieldsShape::Union(count) => count.get(),
1106 FieldsShape::Array { count, .. } => count.try_into().unwrap(),
1107 FieldsShape::Arbitrary { ref offsets, .. } => offsets.len(),
1108 }
1109 }
1110
1111 #[inline]
1112 pub fn offset(&self, i: usize) -> Size {
1113 match *self {
1114 FieldsShape::Primitive => {
1115 unreachable!("FieldsShape::offset: `Primitive`s have no fields")
1116 }
1117 FieldsShape::Union(count) => {
1118 assert!(
1119 i < count.get(),
1120 "tried to access field {} of union with {} fields",
1121 i,
1122 count
1123 );
1124 Size::ZERO
1125 }
1126 FieldsShape::Array { stride, count } => {
1127 let i = u64::try_from(i).unwrap();
1128 assert!(i < count);
1129 stride * i
1130 }
1131 FieldsShape::Arbitrary { ref offsets, .. } => offsets[i],
1132 }
1133 }
1134
1135 #[inline]
1136 pub fn memory_index(&self, i: usize) -> usize {
1137 match *self {
1138 FieldsShape::Primitive => {
1139 unreachable!("FieldsShape::memory_index: `Primitive`s have no fields")
1140 }
1141 FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
1142 FieldsShape::Arbitrary { ref memory_index, .. } => memory_index[i].try_into().unwrap(),
1143 }
1144 }
1145
1146 /// Gets source indices of the fields by increasing offsets.
1147 #[inline]
1148 pub fn index_by_increasing_offset<'a>(&'a self) -> impl Iterator<Item = usize> + 'a {
1149 let mut inverse_small = [0u8; 64];
1150 let mut inverse_big = vec![];
1151 let use_small = self.count() <= inverse_small.len();
1152
1153 // We have to write this logic twice in order to keep the array small.
1154 if let FieldsShape::Arbitrary { ref memory_index, .. } = *self {
1155 if use_small {
1156 for i in 0..self.count() {
1157 inverse_small[memory_index[i] as usize] = i as u8;
1158 }
1159 } else {
1160 inverse_big = vec![0; self.count()];
1161 for i in 0..self.count() {
1162 inverse_big[memory_index[i] as usize] = i as u32;
1163 }
1164 }
1165 }
1166
1167 (0..self.count()).map(move |i| match *self {
1168 FieldsShape::Primitive | FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
1169 FieldsShape::Arbitrary { .. } => {
1170 if use_small {
1171 inverse_small[i] as usize
1172 } else {
1173 inverse_big[i] as usize
1174 }
1175 }
1176 })
1177 }
1178}
1179
1180/// An identifier that specifies the address space that some operation
1181/// should operate on. Special address spaces have an effect on code generation,
1182/// depending on the target and the address spaces it implements.
1183#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
1184pub struct AddressSpace(pub u32);
1185
1186impl AddressSpace {
1187 /// The default address space, corresponding to data space.
1188 pub const DATA: Self = AddressSpace(0);
1189}
1190
1191/// Describes how values of the type are passed by target ABIs,
1192/// in terms of categories of C types there are ABI rules for.
1193#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
1194#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1195
1196pub enum Abi {
1197 Uninhabited,
1198 Scalar(Scalar),
1199 ScalarPair(Scalar, Scalar),
1200 Vector {
1201 element: Scalar,
1202 count: u64,
1203 },
1204 Aggregate {
1205 /// If true, the size is exact, otherwise it's only a lower bound.
1206 sized: bool,
1207 },
1208}
1209
1210impl Abi {
1211 /// Returns `true` if the layout corresponds to an unsized type.
1212 #[inline]
1213 pub fn is_unsized(&self) -> bool {
1214 match *self {
1215 Abi::Uninhabited | Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false,
1216 Abi::Aggregate { sized } => !sized,
1217 }
1218 }
1219
1220 #[inline]
1221 pub fn is_sized(&self) -> bool {
1222 !self.is_unsized()
1223 }
1224
1225 /// Returns `true` if this is a single signed integer scalar
1226 #[inline]
1227 pub fn is_signed(&self) -> bool {
1228 match self {
1229 Abi::Scalar(scal) => match scal.primitive() {
1230 Primitive::Int(_, signed) => signed,
1231 _ => false,
1232 },
1233 _ => panic!("`is_signed` on non-scalar ABI {:?}", self),
1234 }
1235 }
1236
1237 /// Returns `true` if this is an uninhabited type
1238 #[inline]
1239 pub fn is_uninhabited(&self) -> bool {
1240 matches!(*self, Abi::Uninhabited)
1241 }
1242
1243 /// Returns `true` is this is a scalar type
1244 #[inline]
1245 pub fn is_scalar(&self) -> bool {
1246 matches!(*self, Abi::Scalar(_))
1247 }
1248}
1249
1250#[derive(PartialEq, Eq, Hash, Clone, Debug)]
1251#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1252pub enum Variants<V: Idx> {
1253 /// Single enum variants, structs/tuples, unions, and all non-ADTs.
1254 Single { index: V },
1255
1256 /// Enum-likes with more than one inhabited variant: each variant comes with
1257 /// a *discriminant* (usually the same as the variant index but the user can
1258 /// assign explicit discriminant values). That discriminant is encoded
1259 /// as a *tag* on the machine. The layout of each variant is
1260 /// a struct, and they all have space reserved for the tag.
1261 /// For enums, the tag is the sole field of the layout.
1262 Multiple {
1263 tag: Scalar,
1264 tag_encoding: TagEncoding<V>,
1265 tag_field: usize,
1266 variants: IndexVec<V, LayoutS<V>>,
1267 },
1268}
1269
1270#[derive(PartialEq, Eq, Hash, Clone, Debug)]
1271#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1272pub enum TagEncoding<V: Idx> {
1273 /// The tag directly stores the discriminant, but possibly with a smaller layout
1274 /// (so converting the tag to the discriminant can require sign extension).
1275 Direct,
1276
1277 /// Niche (values invalid for a type) encoding the discriminant:
1278 /// Discriminant and variant index coincide.
1279 /// The variant `untagged_variant` contains a niche at an arbitrary
1280 /// offset (field `tag_field` of the enum), which for a variant with
1281 /// discriminant `d` is set to
1282 /// `(d - niche_variants.start).wrapping_add(niche_start)`.
1283 ///
1284 /// For example, `Option<(usize, &T)>` is represented such that
1285 /// `None` has a null pointer for the second tuple field, and
1286 /// `Some` is the identity function (with a non-null reference).
1287 Niche { untagged_variant: V, niche_variants: RangeInclusive<V>, niche_start: u128 },
1288}
1289
1290#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
1291#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1292pub struct Niche {
1293 pub offset: Size,
1294 pub value: Primitive,
1295 pub valid_range: WrappingRange,
1296}
1297
1298impl Niche {
1299 pub fn from_scalar<C: HasDataLayout>(cx: &C, offset: Size, scalar: Scalar) -> Option<Self> {
1300 let Scalar::Initialized { value, valid_range } = scalar else { return None };
1301 let niche = Niche { offset, value, valid_range };
1302 if niche.available(cx) > 0 { Some(niche) } else { None }
1303 }
1304
1305 pub fn available<C: HasDataLayout>(&self, cx: &C) -> u128 {
1306 let Self { value, valid_range: v, .. } = *self;
1307 let size = value.size(cx);
1308 assert!(size.bits() <= 128);
1309 let max_value = size.unsigned_int_max();
1310
1311 // Find out how many values are outside the valid range.
1312 let niche = v.end.wrapping_add(1)..v.start;
1313 niche.end.wrapping_sub(niche.start) & max_value
1314 }
1315
1316 pub fn reserve<C: HasDataLayout>(&self, cx: &C, count: u128) -> Option<(u128, Scalar)> {
1317 assert!(count > 0);
1318
1319 let Self { value, valid_range: v, .. } = *self;
1320 let size = value.size(cx);
1321 assert!(size.bits() <= 128);
1322 let max_value = size.unsigned_int_max();
1323
1324 let niche = v.end.wrapping_add(1)..v.start;
1325 let available = niche.end.wrapping_sub(niche.start) & max_value;
1326 if count > available {
1327 return None;
1328 }
1329
1330 // Extend the range of valid values being reserved by moving either `v.start` or `v.end` bound.
1331 // Given an eventual `Option<T>`, we try to maximize the chance for `None` to occupy the niche of zero.
1332 // This is accomplished by preferring enums with 2 variants(`count==1`) and always taking the shortest path to niche zero.
1333 // Having `None` in niche zero can enable some special optimizations.
1334 //
1335 // Bound selection criteria:
1336 // 1. Select closest to zero given wrapping semantics.
1337 // 2. Avoid moving past zero if possible.
1338 //
1339 // In practice this means that enums with `count > 1` are unlikely to claim niche zero, since they have to fit perfectly.
1340 // If niche zero is already reserved, the selection of bounds are of little interest.
1341 let move_start = |v: WrappingRange| {
1342 let start = v.start.wrapping_sub(count) & max_value;
1343 Some((start, Scalar::Initialized { value, valid_range: v.with_start(start) }))
1344 };
1345 let move_end = |v: WrappingRange| {
1346 let start = v.end.wrapping_add(1) & max_value;
1347 let end = v.end.wrapping_add(count) & max_value;
1348 Some((start, Scalar::Initialized { value, valid_range: v.with_end(end) }))
1349 };
1350 let distance_end_zero = max_value - v.end;
1351 if v.start > v.end {
1352 // zero is unavailable because wrapping occurs
1353 move_end(v)
1354 } else if v.start <= distance_end_zero {
1355 if count <= v.start {
1356 move_start(v)
1357 } else {
1358 // moved past zero, use other bound
1359 move_end(v)
1360 }
1361 } else {
1362 let end = v.end.wrapping_add(count) & max_value;
1363 let overshot_zero = (1..=v.end).contains(&end);
1364 if overshot_zero {
1365 // moved past zero, use other bound
1366 move_start(v)
1367 } else {
1368 move_end(v)
1369 }
1370 }
1371 }
1372}
1373
1374#[derive(PartialEq, Eq, Hash, Clone)]
1375#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1376pub struct LayoutS<V: Idx> {
1377 /// Says where the fields are located within the layout.
1378 pub fields: FieldsShape,
1379
1380 /// Encodes information about multi-variant layouts.
1381 /// Even with `Multiple` variants, a layout still has its own fields! Those are then
1382 /// shared between all variants. One of them will be the discriminant,
1383 /// but e.g. generators can have more.
1384 ///
1385 /// To access all fields of this layout, both `fields` and the fields of the active variant
1386 /// must be taken into account.
1387 pub variants: Variants<V>,
1388
1389 /// The `abi` defines how this data is passed between functions, and it defines
1390 /// value restrictions via `valid_range`.
1391 ///
1392 /// Note that this is entirely orthogonal to the recursive structure defined by
1393 /// `variants` and `fields`; for example, `ManuallyDrop<Result<isize, isize>>` has
1394 /// `Abi::ScalarPair`! So, even with non-`Aggregate` `abi`, `fields` and `variants`
1395 /// have to be taken into account to find all fields of this layout.
1396 pub abi: Abi,
1397
1398 /// The leaf scalar with the largest number of invalid values
1399 /// (i.e. outside of its `valid_range`), if it exists.
1400 pub largest_niche: Option<Niche>,
1401
1402 pub align: AbiAndPrefAlign,
1403 pub size: Size,
1404}
1405
1406impl<V: Idx> LayoutS<V> {
1407 pub fn scalar<C: HasDataLayout>(cx: &C, scalar: Scalar) -> Self {
1408 let largest_niche = Niche::from_scalar(cx, Size::ZERO, scalar);
1409 let size = scalar.size(cx);
1410 let align = scalar.align(cx);
1411 LayoutS {
1412 variants: Variants::Single { index: V::new(0) },
1413 fields: FieldsShape::Primitive,
1414 abi: Abi::Scalar(scalar),
1415 largest_niche,
1416 size,
1417 align,
1418 }
1419 }
1420}
1421
1422impl<V: Idx> fmt::Debug for LayoutS<V> {
1423 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1424 // This is how `Layout` used to print before it become
1425 // `Interned<LayoutS>`. We print it like this to avoid having to update
1426 // expected output in a lot of tests.
1427 let LayoutS { size, align, abi, fields, largest_niche, variants } = self;
1428 f.debug_struct("Layout")
1429 .field("size", size)
1430 .field("align", align)
1431 .field("abi", abi)
1432 .field("fields", fields)
1433 .field("largest_niche", largest_niche)
1434 .field("variants", variants)
1435 .finish()
1436 }
1437}
1438
1439#[derive(Copy, Clone, PartialEq, Eq, Debug)]
1440pub enum PointerKind {
1441 /// Most general case, we know no restrictions to tell LLVM.
1442 SharedMutable,
1443
1444 /// `&T` where `T` contains no `UnsafeCell`, is `dereferenceable`, `noalias` and `readonly`.
1445 Frozen,
1446
1447 /// `&mut T` which is `dereferenceable` and `noalias` but not `readonly`.
1448 UniqueBorrowed,
1449
1450 /// `&mut !Unpin`, which is `dereferenceable` but neither `noalias` nor `readonly`.
1451 UniqueBorrowedPinned,
1452
1453 /// `Box<T>`, which is `noalias` (even on return types, unlike the above) but neither `readonly`
1454 /// nor `dereferenceable`.
1455 UniqueOwned,
1456}
1457
1458#[derive(Copy, Clone, Debug)]
1459pub struct PointeeInfo {
1460 pub size: Size,
1461 pub align: Align,
1462 pub safe: Option<PointerKind>,
1463 pub address_space: AddressSpace,
1464}
1465
1466/// Used in `might_permit_raw_init` to indicate the kind of initialisation
1467/// that is checked to be valid
1468#[derive(Copy, Clone, Debug, PartialEq, Eq)]
1469pub enum InitKind {
1470 Zero,
1471 UninitMitigated0x01Fill,
1472}
1473
1474impl<V: Idx> LayoutS<V> {
1475 /// Returns `true` if the layout corresponds to an unsized type.
1476 pub fn is_unsized(&self) -> bool {
1477 self.abi.is_unsized()
1478 }
1479
1480 pub fn is_sized(&self) -> bool {
1481 self.abi.is_sized()
1482 }
1483
1484 /// Returns `true` if the type is a ZST and not unsized.
1485 pub fn is_zst(&self) -> bool {
1486 match self.abi {
1487 Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false,
1488 Abi::Uninhabited => self.size.bytes() == 0,
1489 Abi::Aggregate { sized } => sized && self.size.bytes() == 0,
1490 }
1491 }
1492}
1493
1494#[derive(Copy, Clone, Debug)]
1495pub enum StructKind {
1496 /// A tuple, closure, or univariant which cannot be coerced to unsized.
1497 AlwaysSized,
1498 /// A univariant, the last field of which may be coerced to unsized.
1499 MaybeUnsized,
1500 /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
1501 Prefixed(Size, Align),
1502}