1 //! Functions concerning immediate values and operands, and reading from operands.
2 //! All high-level functions to read from memory work on operands as sources.
4 use std
::convert
::TryFrom
;
7 use rustc_errors
::ErrorReported
;
8 use rustc_hir
::def
::Namespace
;
9 use rustc_macros
::HashStable
;
10 use rustc_middle
::ty
::layout
::{PrimitiveExt, TyAndLayout}
;
11 use rustc_middle
::ty
::print
::{FmtPrinter, PrettyPrinter, Printer}
;
12 use rustc_middle
::ty
::Ty
;
13 use rustc_middle
::{mir, ty}
;
14 use rustc_target
::abi
::{Abi, DiscriminantKind, HasDataLayout, LayoutOf, Size}
;
15 use rustc_target
::abi
::{VariantIdx, Variants}
;
18 from_known_layout
, mir_assign_valid_types
, ConstValue
, GlobalId
, InterpCx
, InterpResult
,
19 MPlaceTy
, Machine
, MemPlace
, Place
, PlaceTy
, Pointer
, Scalar
, ScalarMaybeUninit
,
22 /// An `Immediate` represents a single immediate self-contained Rust value.
24 /// For optimization of a few very common cases, there is also a representation for a pair of
25 /// primitive values (`ScalarPair`). It allows Miri to avoid making allocations for checked binary
26 /// operations and wide pointers. This idea was taken from rustc's codegen.
27 /// In particular, thanks to `ScalarPair`, arithmetic operations and casts can be entirely
28 /// defined on `Immediate`, and do not have to work with a `Place`.
29 #[derive(Copy, Clone, Debug, PartialEq, Eq, HashStable, Hash)]
30 pub enum Immediate
<Tag
= ()> {
31 Scalar(ScalarMaybeUninit
<Tag
>),
32 ScalarPair(ScalarMaybeUninit
<Tag
>, ScalarMaybeUninit
<Tag
>),
35 impl<Tag
> From
<ScalarMaybeUninit
<Tag
>> for Immediate
<Tag
> {
37 fn from(val
: ScalarMaybeUninit
<Tag
>) -> Self {
38 Immediate
::Scalar(val
)
42 impl<Tag
> From
<Scalar
<Tag
>> for Immediate
<Tag
> {
44 fn from(val
: Scalar
<Tag
>) -> Self {
45 Immediate
::Scalar(val
.into())
49 impl<Tag
> From
<Pointer
<Tag
>> for Immediate
<Tag
> {
51 fn from(val
: Pointer
<Tag
>) -> Self {
52 Immediate
::Scalar(Scalar
::from(val
).into())
56 impl<'tcx
, Tag
> Immediate
<Tag
> {
57 pub fn new_slice(val
: Scalar
<Tag
>, len
: u64, cx
: &impl HasDataLayout
) -> Self {
58 Immediate
::ScalarPair(val
.into(), Scalar
::from_machine_usize(len
, cx
).into())
61 pub fn new_dyn_trait(val
: Scalar
<Tag
>, vtable
: Pointer
<Tag
>) -> Self {
62 Immediate
::ScalarPair(val
.into(), vtable
.into())
66 pub fn to_scalar_or_undef(self) -> ScalarMaybeUninit
<Tag
> {
68 Immediate
::Scalar(val
) => val
,
69 Immediate
::ScalarPair(..) => bug
!("Got a wide pointer where a scalar was expected"),
74 pub fn to_scalar(self) -> InterpResult
<'tcx
, Scalar
<Tag
>> {
75 self.to_scalar_or_undef().not_undef()
79 pub fn to_scalar_pair(self) -> InterpResult
<'tcx
, (Scalar
<Tag
>, Scalar
<Tag
>)> {
81 Immediate
::Scalar(..) => bug
!("Got a thin pointer where a scalar pair was expected"),
82 Immediate
::ScalarPair(a
, b
) => Ok((a
.not_undef()?
, b
.not_undef()?
)),
87 // ScalarPair needs a type to interpret, so we often have an immediate and a type together
88 // as input for binary and cast operations.
89 #[derive(Copy, Clone, Debug)]
90 pub struct ImmTy
<'tcx
, Tag
= ()> {
92 pub layout
: TyAndLayout
<'tcx
>,
95 impl<Tag
: Copy
> std
::fmt
::Display
for ImmTy
<'tcx
, Tag
> {
96 fn fmt(&self, f
: &mut std
::fmt
::Formatter
<'_
>) -> std
::fmt
::Result
{
97 /// Helper function for printing a scalar to a FmtPrinter
98 fn p
<'a
, 'tcx
, F
: std
::fmt
::Write
, Tag
>(
99 cx
: FmtPrinter
<'a
, 'tcx
, F
>,
100 s
: ScalarMaybeUninit
<Tag
>,
102 ) -> Result
<FmtPrinter
<'a
, 'tcx
, F
>, std
::fmt
::Error
> {
104 ScalarMaybeUninit
::Scalar(s
) => {
105 cx
.pretty_print_const_scalar(s
.erase_tag(), ty
, true)
107 ScalarMaybeUninit
::Uninit
=> cx
.typed_value(
109 this
.write_str("{undef ")?
;
112 |this
| this
.print_type(ty
),
117 ty
::tls
::with(|tcx
| {
119 Immediate
::Scalar(s
) => {
120 if let Some(ty
) = tcx
.lift(&self.layout
.ty
) {
121 let cx
= FmtPrinter
::new(tcx
, f
, Namespace
::ValueNS
);
125 write
!(f
, "{}: {}", s
.erase_tag(), self.layout
.ty
)
127 Immediate
::ScalarPair(a
, b
) => {
128 // FIXME(oli-obk): at least print tuples and slices nicely
129 write
!(f
, "({}, {}): {}", a
.erase_tag(), b
.erase_tag(), self.layout
.ty
,)
136 impl<'tcx
, Tag
> ::std
::ops
::Deref
for ImmTy
<'tcx
, Tag
> {
137 type Target
= Immediate
<Tag
>;
139 fn deref(&self) -> &Immediate
<Tag
> {
144 /// An `Operand` is the result of computing a `mir::Operand`. It can be immediate,
145 /// or still in memory. The latter is an optimization, to delay reading that chunk of
146 /// memory and to avoid having to store arbitrary-sized data here.
147 #[derive(Copy, Clone, Debug, PartialEq, Eq, HashStable, Hash)]
148 pub enum Operand
<Tag
= ()> {
149 Immediate(Immediate
<Tag
>),
150 Indirect(MemPlace
<Tag
>),
153 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
154 pub struct OpTy
<'tcx
, Tag
= ()> {
155 op
: Operand
<Tag
>, // Keep this private; it helps enforce invariants.
156 pub layout
: TyAndLayout
<'tcx
>,
159 impl<'tcx
, Tag
> ::std
::ops
::Deref
for OpTy
<'tcx
, Tag
> {
160 type Target
= Operand
<Tag
>;
162 fn deref(&self) -> &Operand
<Tag
> {
167 impl<'tcx
, Tag
: Copy
> From
<MPlaceTy
<'tcx
, Tag
>> for OpTy
<'tcx
, Tag
> {
169 fn from(mplace
: MPlaceTy
<'tcx
, Tag
>) -> Self {
170 OpTy { op: Operand::Indirect(*mplace), layout: mplace.layout }
174 impl<'tcx
, Tag
> From
<ImmTy
<'tcx
, Tag
>> for OpTy
<'tcx
, Tag
> {
176 fn from(val
: ImmTy
<'tcx
, Tag
>) -> Self {
177 OpTy { op: Operand::Immediate(val.imm), layout: val.layout }
181 impl<'tcx
, Tag
: Copy
> ImmTy
<'tcx
, Tag
> {
183 pub fn from_scalar(val
: Scalar
<Tag
>, layout
: TyAndLayout
<'tcx
>) -> Self {
184 ImmTy { imm: val.into(), layout }
188 pub fn from_immediate(imm
: Immediate
<Tag
>, layout
: TyAndLayout
<'tcx
>) -> Self {
189 ImmTy { imm, layout }
193 pub fn try_from_uint(i
: impl Into
<u128
>, layout
: TyAndLayout
<'tcx
>) -> Option
<Self> {
194 Some(Self::from_scalar(Scalar
::try_from_uint(i
, layout
.size
)?
, layout
))
197 pub fn from_uint(i
: impl Into
<u128
>, layout
: TyAndLayout
<'tcx
>) -> Self {
198 Self::from_scalar(Scalar
::from_uint(i
, layout
.size
), layout
)
202 pub fn try_from_int(i
: impl Into
<i128
>, layout
: TyAndLayout
<'tcx
>) -> Option
<Self> {
203 Some(Self::from_scalar(Scalar
::try_from_int(i
, layout
.size
)?
, layout
))
207 pub fn from_int(i
: impl Into
<i128
>, layout
: TyAndLayout
<'tcx
>) -> Self {
208 Self::from_scalar(Scalar
::from_int(i
, layout
.size
), layout
)
212 impl<'mir
, 'tcx
: 'mir
, M
: Machine
<'mir
, 'tcx
>> InterpCx
<'mir
, 'tcx
, M
> {
213 /// Normalice `place.ptr` to a `Pointer` if this is a place and not a ZST.
214 /// Can be helpful to avoid lots of `force_ptr` calls later, if this place is used a lot.
218 op
: OpTy
<'tcx
, M
::PointerTag
>,
219 ) -> InterpResult
<'tcx
, OpTy
<'tcx
, M
::PointerTag
>> {
220 match op
.try_as_mplace(self) {
221 Ok(mplace
) => Ok(self.force_mplace_ptr(mplace
)?
.into()),
222 Err(imm
) => Ok(imm
.into()), // Nothing to cast/force
226 /// Try reading an immediate in memory; this is interesting particularly for `ScalarPair`.
227 /// Returns `None` if the layout does not permit loading this as a value.
228 fn try_read_immediate_from_mplace(
230 mplace
: MPlaceTy
<'tcx
, M
::PointerTag
>,
231 ) -> InterpResult
<'tcx
, Option
<ImmTy
<'tcx
, M
::PointerTag
>>> {
232 if mplace
.layout
.is_unsized() {
233 // Don't touch unsized
238 .check_mplace_access(mplace
, None
)
239 .expect("places should be checked on creation")
243 if let Scalar
::Ptr(ptr
) = mplace
.ptr
{
244 // We may be reading from a static.
245 // In order to ensure that `static FOO: Type = FOO;` causes a cycle error
246 // instead of magically pulling *any* ZST value from the ether, we need to
247 // actually access the referenced allocation.
248 self.memory
.get_raw(ptr
.alloc_id
)?
;
250 return Ok(Some(ImmTy
{
252 imm
: Scalar
::zst().into(),
253 layout
: mplace
.layout
,
258 let alloc
= self.memory
.get_raw(ptr
.alloc_id
)?
;
260 match mplace
.layout
.abi
{
262 let scalar
= alloc
.read_scalar(self, ptr
, mplace
.layout
.size
)?
;
263 Ok(Some(ImmTy { imm: scalar.into(), layout: mplace.layout }
))
265 Abi
::ScalarPair(ref a
, ref b
) => {
266 // We checked `ptr_align` above, so all fields will have the alignment they need.
267 // We would anyway check against `ptr_align.restrict_for_offset(b_offset)`,
268 // which `ptr.offset(b_offset)` cannot possibly fail to satisfy.
269 let (a
, b
) = (&a
.value
, &b
.value
);
270 let (a_size
, b_size
) = (a
.size(self), b
.size(self));
272 let b_offset
= a_size
.align_to(b
.align(self).abi
);
273 assert
!(b_offset
.bytes() > 0); // we later use the offset to tell apart the fields
274 let b_ptr
= ptr
.offset(b_offset
, self)?
;
275 let a_val
= alloc
.read_scalar(self, a_ptr
, a_size
)?
;
276 let b_val
= alloc
.read_scalar(self, b_ptr
, b_size
)?
;
277 Ok(Some(ImmTy { imm: Immediate::ScalarPair(a_val, b_val), layout: mplace.layout }
))
283 /// Try returning an immediate for the operand.
284 /// If the layout does not permit loading this as an immediate, return where in memory
285 /// we can find the data.
286 /// Note that for a given layout, this operation will either always fail or always
287 /// succeed! Whether it succeeds depends on whether the layout can be represented
288 /// in a `Immediate`, not on which data is stored there currently.
289 pub(crate) fn try_read_immediate(
291 src
: OpTy
<'tcx
, M
::PointerTag
>,
292 ) -> InterpResult
<'tcx
, Result
<ImmTy
<'tcx
, M
::PointerTag
>, MPlaceTy
<'tcx
, M
::PointerTag
>>> {
293 Ok(match src
.try_as_mplace(self) {
295 if let Some(val
) = self.try_read_immediate_from_mplace(mplace
)?
{
305 /// Read an immediate from a place, asserting that that is possible with the given layout.
307 pub fn read_immediate(
309 op
: OpTy
<'tcx
, M
::PointerTag
>,
310 ) -> InterpResult
<'tcx
, ImmTy
<'tcx
, M
::PointerTag
>> {
311 if let Ok(imm
) = self.try_read_immediate(op
)?
{
314 bug
!("primitive read failed for type: {:?}", op
.layout
.ty
);
318 /// Read a scalar from a place
321 op
: OpTy
<'tcx
, M
::PointerTag
>,
322 ) -> InterpResult
<'tcx
, ScalarMaybeUninit
<M
::PointerTag
>> {
323 Ok(self.read_immediate(op
)?
.to_scalar_or_undef())
326 // Turn the wide MPlace into a string (must already be dereferenced!)
327 pub fn read_str(&self, mplace
: MPlaceTy
<'tcx
, M
::PointerTag
>) -> InterpResult
<'tcx
, &str> {
328 let len
= mplace
.len(self)?
;
329 let bytes
= self.memory
.read_bytes(mplace
.ptr
, Size
::from_bytes(len
))?
;
330 let str = ::std
::str::from_utf8(bytes
).map_err(|err
| err_ub
!(InvalidStr(err
)))?
;
334 /// Projection functions
335 pub fn operand_field(
337 op
: OpTy
<'tcx
, M
::PointerTag
>,
339 ) -> InterpResult
<'tcx
, OpTy
<'tcx
, M
::PointerTag
>> {
340 let base
= match op
.try_as_mplace(self) {
342 // We can reuse the mplace field computation logic for indirect operands.
343 let field
= self.mplace_field(mplace
, field
)?
;
344 return Ok(field
.into());
349 let field_layout
= op
.layout
.field(self, field
)?
;
350 if field_layout
.is_zst() {
351 let immediate
= Scalar
::zst().into();
352 return Ok(OpTy { op: Operand::Immediate(immediate), layout: field_layout }
);
354 let offset
= op
.layout
.fields
.offset(field
);
355 let immediate
= match *base
{
356 // the field covers the entire type
357 _
if offset
.bytes() == 0 && field_layout
.size
== op
.layout
.size
=> *base
,
358 // extract fields from types with `ScalarPair` ABI
359 Immediate
::ScalarPair(a
, b
) => {
360 let val
= if offset
.bytes() == 0 { a }
else { b }
;
363 Immediate
::Scalar(val
) => {
364 bug
!("field access on non aggregate {:#?}, {:#?}", val
, op
.layout
)
367 Ok(OpTy { op: Operand::Immediate(immediate), layout: field_layout }
)
370 pub fn operand_index(
372 op
: OpTy
<'tcx
, M
::PointerTag
>,
374 ) -> InterpResult
<'tcx
, OpTy
<'tcx
, M
::PointerTag
>> {
375 if let Ok(index
) = usize::try_from(index
) {
376 // We can just treat this as a field.
377 self.operand_field(op
, index
)
379 // Indexing into a big array. This must be an mplace.
380 let mplace
= op
.assert_mem_place(self);
381 Ok(self.mplace_index(mplace
, index
)?
.into())
385 pub fn operand_downcast(
387 op
: OpTy
<'tcx
, M
::PointerTag
>,
389 ) -> InterpResult
<'tcx
, OpTy
<'tcx
, M
::PointerTag
>> {
390 // Downcasts only change the layout
391 Ok(match op
.try_as_mplace(self) {
392 Ok(mplace
) => self.mplace_downcast(mplace
, variant
)?
.into(),
394 let layout
= op
.layout
.for_variant(self, variant
);
395 OpTy { layout, ..op }
400 pub fn operand_projection(
402 base
: OpTy
<'tcx
, M
::PointerTag
>,
403 proj_elem
: mir
::PlaceElem
<'tcx
>,
404 ) -> InterpResult
<'tcx
, OpTy
<'tcx
, M
::PointerTag
>> {
405 use rustc_middle
::mir
::ProjectionElem
::*;
407 Field(field
, _
) => self.operand_field(base
, field
.index())?
,
408 Downcast(_
, variant
) => self.operand_downcast(base
, variant
)?
,
409 Deref
=> self.deref_operand(base
)?
.into(),
410 Subslice { .. }
| ConstantIndex { .. }
| Index(_
) => {
411 // The rest should only occur as mplace, we do not use Immediates for types
412 // allowing such operations. This matches place_projection forcing an allocation.
413 let mplace
= base
.assert_mem_place(self);
414 self.mplace_projection(mplace
, proj_elem
)?
.into()
419 /// This is used by [priroda](https://github.com/oli-obk/priroda) to get an OpTy from a local
422 frame
: &super::Frame
<'mir
, 'tcx
, M
::PointerTag
, M
::FrameExtra
>,
424 layout
: Option
<TyAndLayout
<'tcx
>>,
425 ) -> InterpResult
<'tcx
, OpTy
<'tcx
, M
::PointerTag
>> {
426 let layout
= self.layout_of_local(frame
, local
, layout
)?
;
427 let op
= if layout
.is_zst() {
428 // Do not read from ZST, they might not be initialized
429 Operand
::Immediate(Scalar
::zst().into())
431 M
::access_local(&self, frame
, local
)?
433 Ok(OpTy { op, layout }
)
436 /// Every place can be read from, so we can turn them into an operand.
437 /// This will definitely return `Indirect` if the place is a `Ptr`, i.e., this
438 /// will never actually read from memory.
442 place
: PlaceTy
<'tcx
, M
::PointerTag
>,
443 ) -> InterpResult
<'tcx
, OpTy
<'tcx
, M
::PointerTag
>> {
444 let op
= match *place
{
445 Place
::Ptr(mplace
) => Operand
::Indirect(mplace
),
446 Place
::Local { frame, local }
=> {
447 *self.access_local(&self.stack()[frame
], local
, None
)?
450 Ok(OpTy { op, layout: place.layout }
)
453 // Evaluate a place with the goal of reading from it. This lets us sometimes
454 // avoid allocations.
455 pub fn eval_place_to_op(
457 place
: mir
::Place
<'tcx
>,
458 layout
: Option
<TyAndLayout
<'tcx
>>,
459 ) -> InterpResult
<'tcx
, OpTy
<'tcx
, M
::PointerTag
>> {
460 // Do not use the layout passed in as argument if the base we are looking at
461 // here is not the entire place.
462 let layout
= if place
.projection
.is_empty() { layout }
else { None }
;
464 let base_op
= self.access_local(self.frame(), place
.local
, layout
)?
;
469 .try_fold(base_op
, |op
, elem
| self.operand_projection(op
, elem
))?
;
471 trace
!("eval_place_to_op: got {:?}", *op
);
472 // Sanity-check the type we ended up with.
473 debug_assert
!(mir_assign_valid_types(
475 self.layout_of(self.subst_from_current_frame_and_normalize_erasing_regions(
476 place
.ty(&self.frame().body
.local_decls
, *self.tcx
).ty
483 /// Evaluate the operand, returning a place where you can then find the data.
484 /// If you already know the layout, you can save two table lookups
485 /// by passing it in here.
488 mir_op
: &mir
::Operand
<'tcx
>,
489 layout
: Option
<TyAndLayout
<'tcx
>>,
490 ) -> InterpResult
<'tcx
, OpTy
<'tcx
, M
::PointerTag
>> {
491 use rustc_middle
::mir
::Operand
::*;
492 let op
= match *mir_op
{
493 // FIXME: do some more logic on `move` to invalidate the old location
494 Copy(place
) | Move(place
) => self.eval_place_to_op(place
, layout
)?
,
496 Constant(ref constant
) => {
498 self.subst_from_current_frame_and_normalize_erasing_regions(constant
.literal
);
499 self.eval_const_to_op(val
, layout
)?
502 trace
!("{:?}: {:?}", mir_op
, *op
);
506 /// Evaluate a bunch of operands at once
507 pub(super) fn eval_operands(
509 ops
: &[mir
::Operand
<'tcx
>],
510 ) -> InterpResult
<'tcx
, Vec
<OpTy
<'tcx
, M
::PointerTag
>>> {
511 ops
.iter().map(|op
| self.eval_operand(op
, None
)).collect()
514 // Used when the miri-engine runs into a constant and for extracting information from constants
515 // in patterns via the `const_eval` module
516 /// The `val` and `layout` are assumed to already be in our interpreter
517 /// "universe" (param_env).
518 crate fn eval_const_to_op(
520 val
: &ty
::Const
<'tcx
>,
521 layout
: Option
<TyAndLayout
<'tcx
>>,
522 ) -> InterpResult
<'tcx
, OpTy
<'tcx
, M
::PointerTag
>> {
523 let tag_scalar
= |scalar
| match scalar
{
524 Scalar
::Ptr(ptr
) => Scalar
::Ptr(self.tag_global_base_pointer(ptr
)),
525 Scalar
::Raw { data, size }
=> Scalar
::Raw { data, size }
,
527 // Early-return cases.
528 let val_val
= match val
.val
{
529 ty
::ConstKind
::Param(_
) => throw_inval
!(TooGeneric
),
530 ty
::ConstKind
::Error
=> throw_inval
!(TypeckError(ErrorReported
)),
531 ty
::ConstKind
::Unevaluated(def_id
, substs
, promoted
) => {
532 let instance
= self.resolve(def_id
, substs
)?
;
533 // We use `const_eval` here and `const_eval_raw` elsewhere in mir interpretation.
534 // The reason we use `const_eval_raw` everywhere else is to prevent cycles during
535 // validation, because validation automatically reads through any references, thus
536 // potentially requiring the current static to be evaluated again. This is not a
537 // problem here, because we are building an operand which means an actual read is
540 // The machine callback `adjust_global_const` below is guaranteed to
541 // be called for all constants because `const_eval` calls
542 // `eval_const_to_op` recursively.
543 return Ok(self.const_eval(GlobalId { instance, promoted }
, val
.ty
)?
);
545 ty
::ConstKind
::Infer(..)
546 | ty
::ConstKind
::Bound(..)
547 | ty
::ConstKind
::Placeholder(..) => {
548 bug
!("eval_const_to_op: Unexpected ConstKind {:?}", val
)
550 ty
::ConstKind
::Value(val_val
) => val_val
,
552 // This call allows the machine to create fresh allocation ids for
553 // thread-local statics (see the `adjust_global_const` function
555 let val_val
= M
::adjust_global_const(self, val_val
)?
;
556 // Other cases need layout.
557 let layout
= from_known_layout(self.tcx
, layout
, || self.layout_of(val
.ty
))?
;
558 let op
= match val_val
{
559 ConstValue
::ByRef { alloc, offset }
=> {
560 let id
= self.tcx
.create_memory_alloc(alloc
);
561 // We rely on mutability being set correctly in that allocation to prevent writes
562 // where none should happen.
563 let ptr
= self.tag_global_base_pointer(Pointer
::new(id
, offset
));
564 Operand
::Indirect(MemPlace
::from_ptr(ptr
, layout
.align
.abi
))
566 ConstValue
::Scalar(x
) => Operand
::Immediate(tag_scalar(x
).into()),
567 ConstValue
::Slice { data, start, end }
=> {
568 // We rely on mutability being set correctly in `data` to prevent writes
569 // where none should happen.
570 let ptr
= Pointer
::new(
571 self.tcx
.create_memory_alloc(data
),
572 Size
::from_bytes(start
), // offset: `start`
574 Operand
::Immediate(Immediate
::new_slice(
575 self.tag_global_base_pointer(ptr
).into(),
576 u64::try_from(end
.checked_sub(start
).unwrap()).unwrap(), // len: `end - start`
581 Ok(OpTy { op, layout }
)
584 /// Read discriminant, return the runtime value as well as the variant index.
585 pub fn read_discriminant(
587 op
: OpTy
<'tcx
, M
::PointerTag
>,
588 ) -> InterpResult
<'tcx
, (Scalar
<M
::PointerTag
>, VariantIdx
)> {
589 trace
!("read_discriminant_value {:#?}", op
.layout
);
591 // Get type and layout of the discriminant.
592 let discr_layout
= self.layout_of(op
.layout
.ty
.discriminant_ty(*self.tcx
))?
;
593 trace
!("discriminant type: {:?}", discr_layout
.ty
);
595 // We use "discriminant" to refer to the value associated with a particular enum variant.
596 // This is not to be confused with its "variant index", which is just determining its position in the
597 // declared list of variants -- they can differ with explicitly assigned discriminants.
598 // We use "tag" to refer to how the discriminant is encoded in memory, which can be either
599 // straight-forward (`DiscriminantKind::Tag`) or with a niche (`DiscriminantKind::Niche`).
600 // Unfortunately, the rest of the compiler calls the latter "discriminant", too, which makes things
602 let (tag_scalar_layout
, tag_kind
, tag_index
) = match op
.layout
.variants
{
603 Variants
::Single { index }
=> {
604 let discr
= match op
.layout
.ty
.discriminant_for_variant(*self.tcx
, index
) {
606 // This type actually has discriminants.
607 assert_eq
!(discr
.ty
, discr_layout
.ty
);
608 Scalar
::from_uint(discr
.val
, discr_layout
.size
)
611 // On a type without actual discriminants, variant is 0.
612 assert_eq
!(index
.as_u32(), 0);
613 Scalar
::from_uint(index
.as_u32(), discr_layout
.size
)
616 return Ok((discr
, index
));
618 Variants
::Multiple { ref discr, ref discr_kind, discr_index, .. }
=> {
619 (discr
, discr_kind
, discr_index
)
623 // There are *three* layouts that come into play here:
624 // - The discriminant has a type for typechecking. This is `discr_layout`, and is used for
625 // the `Scalar` we return.
626 // - The tag (encoded discriminant) has layout `tag_layout`. This is always an integer type,
627 // and used to interpret the value we read from the tag field.
628 // For the return value, a cast to `discr_layout` is performed.
629 // - The field storing the tag has a layout, which is very similar to `tag_layout` but
630 // may be a pointer. This is `tag_val.layout`; we just use it for sanity checks.
632 // Get layout for tag.
633 let tag_layout
= self.layout_of(tag_scalar_layout
.value
.to_int_ty(*self.tcx
))?
;
635 // Read tag and sanity-check `tag_layout`.
636 let tag_val
= self.read_immediate(self.operand_field(op
, tag_index
)?
)?
;
637 assert_eq
!(tag_layout
.size
, tag_val
.layout
.size
);
638 assert_eq
!(tag_layout
.abi
.is_signed(), tag_val
.layout
.abi
.is_signed());
639 let tag_val
= tag_val
.to_scalar()?
;
640 trace
!("tag value: {:?}", tag_val
);
642 // Figure out which discriminant and variant this corresponds to.
644 DiscriminantKind
::Tag
=> {
646 .force_bits(tag_val
, tag_layout
.size
)
647 .map_err(|_
| err_ub
!(InvalidDiscriminant(tag_val
.erase_tag())))?
;
648 // Cast bits from tag layout to discriminant layout.
649 let discr_val_cast
= self.cast_from_scalar(tag_bits
, tag_layout
, discr_layout
.ty
);
650 let discr_bits
= discr_val_cast
.assert_bits(discr_layout
.size
);
651 // Convert discriminant to variant index, and catch invalid discriminants.
652 let index
= match op
.layout
.ty
.kind
{
654 adt
.discriminants(self.tcx
.tcx
).find(|(_
, var
)| var
.val
== discr_bits
)
656 ty
::Generator(def_id
, substs
, _
) => {
657 let substs
= substs
.as_generator();
659 .discriminants(def_id
, self.tcx
.tcx
)
660 .find(|(_
, var
)| var
.val
== discr_bits
)
662 _
=> bug
!("tagged layout for non-adt non-generator"),
664 .ok_or_else(|| err_ub
!(InvalidDiscriminant(tag_val
.erase_tag())))?
;
665 // Return the cast value, and the index.
666 (discr_val_cast
, index
.0)
668 DiscriminantKind
::Niche { dataful_variant, ref niche_variants, niche_start }
=> {
669 // Compute the variant this niche value/"tag" corresponds to. With niche layout,
670 // discriminant (encoded in niche/tag) and variant index are the same.
671 let variants_start
= niche_variants
.start().as_u32();
672 let variants_end
= niche_variants
.end().as_u32();
673 let variant
= match tag_val
.to_bits_or_ptr(tag_layout
.size
, self) {
675 // The niche must be just 0 (which an inbounds pointer value never is)
676 let ptr_valid
= niche_start
== 0
677 && variants_start
== variants_end
678 && !self.memory
.ptr_may_be_null(ptr
);
680 throw_ub
!(InvalidDiscriminant(tag_val
.erase_tag()))
685 // We need to use machine arithmetic to get the relative variant idx:
686 // variant_index_relative = tag_val - niche_start_val
687 let tag_val
= ImmTy
::from_uint(tag_bits
, tag_layout
);
688 let niche_start_val
= ImmTy
::from_uint(niche_start
, tag_layout
);
689 let variant_index_relative_val
=
690 self.binary_op(mir
::BinOp
::Sub
, tag_val
, niche_start_val
)?
;
691 let variant_index_relative
= variant_index_relative_val
693 .assert_bits(tag_val
.layout
.size
);
694 // Check if this is in the range that indicates an actual discriminant.
695 if variant_index_relative
<= u128
::from(variants_end
- variants_start
) {
696 let variant_index_relative
= u32::try_from(variant_index_relative
)
697 .expect("we checked that this fits into a u32");
698 // Then computing the absolute variant idx should not overflow any more.
699 let variant_index
= variants_start
700 .checked_add(variant_index_relative
)
701 .expect("overflow computing absolute variant idx");
702 let variants_len
= op
706 .expect("tagged layout for non adt")
709 assert
!(usize::try_from(variant_index
).unwrap() < variants_len
);
710 VariantIdx
::from_u32(variant_index
)
716 // Compute the size of the scalar we need to return.
717 // No need to cast, because the variant index directly serves as discriminant and is
718 // encoded in the tag.
719 (Scalar
::from_uint(variant
.as_u32(), discr_layout
.size
), variant
)