1 //! Functions concerning immediate values and operands, and reading from operands.
2 //! All high-level functions to read from memory work on operands as sources.
4 use std
::convert
::TryFrom
;
7 use rustc_errors
::ErrorReported
;
8 use rustc_hir
::def
::Namespace
;
9 use rustc_macros
::HashStable
;
10 use rustc_middle
::ty
::layout
::{PrimitiveExt, TyAndLayout}
;
11 use rustc_middle
::ty
::print
::{FmtPrinter, PrettyPrinter, Printer}
;
12 use rustc_middle
::ty
::{ConstInt, Ty}
;
13 use rustc_middle
::{mir, ty}
;
14 use rustc_target
::abi
::{Abi, HasDataLayout, LayoutOf, Size, TagEncoding}
;
15 use rustc_target
::abi
::{VariantIdx, Variants}
;
18 from_known_layout
, mir_assign_valid_types
, ConstValue
, GlobalId
, InterpCx
, InterpResult
,
19 MPlaceTy
, Machine
, MemPlace
, Place
, PlaceTy
, Pointer
, Scalar
, ScalarMaybeUninit
,
22 /// An `Immediate` represents a single immediate self-contained Rust value.
24 /// For optimization of a few very common cases, there is also a representation for a pair of
25 /// primitive values (`ScalarPair`). It allows Miri to avoid making allocations for checked binary
26 /// operations and wide pointers. This idea was taken from rustc's codegen.
27 /// In particular, thanks to `ScalarPair`, arithmetic operations and casts can be entirely
28 /// defined on `Immediate`, and do not have to work with a `Place`.
29 #[derive(Copy, Clone, Debug, PartialEq, Eq, HashStable, Hash)]
30 pub enum Immediate
<Tag
= ()> {
31 Scalar(ScalarMaybeUninit
<Tag
>),
32 ScalarPair(ScalarMaybeUninit
<Tag
>, ScalarMaybeUninit
<Tag
>),
35 impl<Tag
> From
<ScalarMaybeUninit
<Tag
>> for Immediate
<Tag
> {
37 fn from(val
: ScalarMaybeUninit
<Tag
>) -> Self {
38 Immediate
::Scalar(val
)
42 impl<Tag
> From
<Scalar
<Tag
>> for Immediate
<Tag
> {
44 fn from(val
: Scalar
<Tag
>) -> Self {
45 Immediate
::Scalar(val
.into())
49 impl<Tag
> From
<Pointer
<Tag
>> for Immediate
<Tag
> {
51 fn from(val
: Pointer
<Tag
>) -> Self {
52 Immediate
::Scalar(Scalar
::from(val
).into())
56 impl<'tcx
, Tag
> Immediate
<Tag
> {
57 pub fn new_slice(val
: Scalar
<Tag
>, len
: u64, cx
: &impl HasDataLayout
) -> Self {
58 Immediate
::ScalarPair(val
.into(), Scalar
::from_machine_usize(len
, cx
).into())
61 pub fn new_dyn_trait(val
: Scalar
<Tag
>, vtable
: Pointer
<Tag
>) -> Self {
62 Immediate
::ScalarPair(val
.into(), vtable
.into())
66 pub fn to_scalar_or_uninit(self) -> ScalarMaybeUninit
<Tag
> {
68 Immediate
::Scalar(val
) => val
,
69 Immediate
::ScalarPair(..) => bug
!("Got a wide pointer where a scalar was expected"),
74 pub fn to_scalar(self) -> InterpResult
<'tcx
, Scalar
<Tag
>> {
75 self.to_scalar_or_uninit().check_init()
79 pub fn to_scalar_pair(self) -> InterpResult
<'tcx
, (Scalar
<Tag
>, Scalar
<Tag
>)> {
81 Immediate
::Scalar(..) => bug
!("Got a thin pointer where a scalar pair was expected"),
82 Immediate
::ScalarPair(a
, b
) => Ok((a
.check_init()?
, b
.check_init()?
)),
87 // ScalarPair needs a type to interpret, so we often have an immediate and a type together
88 // as input for binary and cast operations.
89 #[derive(Copy, Clone, Debug)]
90 pub struct ImmTy
<'tcx
, Tag
= ()> {
92 pub layout
: TyAndLayout
<'tcx
>,
95 impl<Tag
: Copy
> std
::fmt
::Display
for ImmTy
<'tcx
, Tag
> {
96 fn fmt(&self, f
: &mut std
::fmt
::Formatter
<'_
>) -> std
::fmt
::Result
{
97 /// Helper function for printing a scalar to a FmtPrinter
98 fn p
<'a
, 'tcx
, F
: std
::fmt
::Write
, Tag
>(
99 cx
: FmtPrinter
<'a
, 'tcx
, F
>,
100 s
: ScalarMaybeUninit
<Tag
>,
102 ) -> Result
<FmtPrinter
<'a
, 'tcx
, F
>, std
::fmt
::Error
> {
104 ScalarMaybeUninit
::Scalar(s
) => {
105 cx
.pretty_print_const_scalar(s
.erase_tag(), ty
, true)
107 ScalarMaybeUninit
::Uninit
=> cx
.typed_value(
109 this
.write_str("{uninit ")?
;
112 |this
| this
.print_type(ty
),
117 ty
::tls
::with(|tcx
| {
119 Immediate
::Scalar(s
) => {
120 if let Some(ty
) = tcx
.lift(&self.layout
.ty
) {
121 let cx
= FmtPrinter
::new(tcx
, f
, Namespace
::ValueNS
);
125 write
!(f
, "{}: {}", s
.erase_tag(), self.layout
.ty
)
127 Immediate
::ScalarPair(a
, b
) => {
128 // FIXME(oli-obk): at least print tuples and slices nicely
129 write
!(f
, "({}, {}): {}", a
.erase_tag(), b
.erase_tag(), self.layout
.ty
,)
136 impl<'tcx
, Tag
> ::std
::ops
::Deref
for ImmTy
<'tcx
, Tag
> {
137 type Target
= Immediate
<Tag
>;
139 fn deref(&self) -> &Immediate
<Tag
> {
144 /// An `Operand` is the result of computing a `mir::Operand`. It can be immediate,
145 /// or still in memory. The latter is an optimization, to delay reading that chunk of
146 /// memory and to avoid having to store arbitrary-sized data here.
147 #[derive(Copy, Clone, Debug, PartialEq, Eq, HashStable, Hash)]
148 pub enum Operand
<Tag
= ()> {
149 Immediate(Immediate
<Tag
>),
150 Indirect(MemPlace
<Tag
>),
153 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
154 pub struct OpTy
<'tcx
, Tag
= ()> {
155 op
: Operand
<Tag
>, // Keep this private; it helps enforce invariants.
156 pub layout
: TyAndLayout
<'tcx
>,
159 impl<'tcx
, Tag
> ::std
::ops
::Deref
for OpTy
<'tcx
, Tag
> {
160 type Target
= Operand
<Tag
>;
162 fn deref(&self) -> &Operand
<Tag
> {
167 impl<'tcx
, Tag
: Copy
> From
<MPlaceTy
<'tcx
, Tag
>> for OpTy
<'tcx
, Tag
> {
169 fn from(mplace
: MPlaceTy
<'tcx
, Tag
>) -> Self {
170 OpTy { op: Operand::Indirect(*mplace), layout: mplace.layout }
174 impl<'tcx
, Tag
> From
<ImmTy
<'tcx
, Tag
>> for OpTy
<'tcx
, Tag
> {
176 fn from(val
: ImmTy
<'tcx
, Tag
>) -> Self {
177 OpTy { op: Operand::Immediate(val.imm), layout: val.layout }
181 impl<'tcx
, Tag
: Copy
> ImmTy
<'tcx
, Tag
> {
183 pub fn from_scalar(val
: Scalar
<Tag
>, layout
: TyAndLayout
<'tcx
>) -> Self {
184 ImmTy { imm: val.into(), layout }
188 pub fn from_immediate(imm
: Immediate
<Tag
>, layout
: TyAndLayout
<'tcx
>) -> Self {
189 ImmTy { imm, layout }
193 pub fn try_from_uint(i
: impl Into
<u128
>, layout
: TyAndLayout
<'tcx
>) -> Option
<Self> {
194 Some(Self::from_scalar(Scalar
::try_from_uint(i
, layout
.size
)?
, layout
))
197 pub fn from_uint(i
: impl Into
<u128
>, layout
: TyAndLayout
<'tcx
>) -> Self {
198 Self::from_scalar(Scalar
::from_uint(i
, layout
.size
), layout
)
202 pub fn try_from_int(i
: impl Into
<i128
>, layout
: TyAndLayout
<'tcx
>) -> Option
<Self> {
203 Some(Self::from_scalar(Scalar
::try_from_int(i
, layout
.size
)?
, layout
))
207 pub fn from_int(i
: impl Into
<i128
>, layout
: TyAndLayout
<'tcx
>) -> Self {
208 Self::from_scalar(Scalar
::from_int(i
, layout
.size
), layout
)
212 pub fn to_const_int(self) -> ConstInt
{
213 assert
!(self.layout
.ty
.is_integral());
216 .expect("to_const_int doesn't work on scalar pairs")
217 .assert_bits(self.layout
.size
),
219 self.layout
.ty
.is_signed(),
220 self.layout
.ty
.is_ptr_sized_integral(),
225 impl<'mir
, 'tcx
: 'mir
, M
: Machine
<'mir
, 'tcx
>> InterpCx
<'mir
, 'tcx
, M
> {
226 /// Normalice `place.ptr` to a `Pointer` if this is a place and not a ZST.
227 /// Can be helpful to avoid lots of `force_ptr` calls later, if this place is used a lot.
231 op
: OpTy
<'tcx
, M
::PointerTag
>,
232 ) -> InterpResult
<'tcx
, OpTy
<'tcx
, M
::PointerTag
>> {
233 match op
.try_as_mplace(self) {
234 Ok(mplace
) => Ok(self.force_mplace_ptr(mplace
)?
.into()),
235 Err(imm
) => Ok(imm
.into()), // Nothing to cast/force
239 /// Try reading an immediate in memory; this is interesting particularly for `ScalarPair`.
240 /// Returns `None` if the layout does not permit loading this as a value.
241 fn try_read_immediate_from_mplace(
243 mplace
: MPlaceTy
<'tcx
, M
::PointerTag
>,
244 ) -> InterpResult
<'tcx
, Option
<ImmTy
<'tcx
, M
::PointerTag
>>> {
245 if mplace
.layout
.is_unsized() {
246 // Don't touch unsized
251 .check_mplace_access(mplace
, None
)
252 .expect("places should be checked on creation")
256 if let Scalar
::Ptr(ptr
) = mplace
.ptr
{
257 // We may be reading from a static.
258 // In order to ensure that `static FOO: Type = FOO;` causes a cycle error
259 // instead of magically pulling *any* ZST value from the ether, we need to
260 // actually access the referenced allocation.
261 self.memory
.get_raw(ptr
.alloc_id
)?
;
263 return Ok(Some(ImmTy
{
265 imm
: Scalar
::zst().into(),
266 layout
: mplace
.layout
,
271 let alloc
= self.memory
.get_raw(ptr
.alloc_id
)?
;
273 match mplace
.layout
.abi
{
275 let scalar
= alloc
.read_scalar(self, ptr
, mplace
.layout
.size
)?
;
276 Ok(Some(ImmTy { imm: scalar.into(), layout: mplace.layout }
))
278 Abi
::ScalarPair(ref a
, ref b
) => {
279 // We checked `ptr_align` above, so all fields will have the alignment they need.
280 // We would anyway check against `ptr_align.restrict_for_offset(b_offset)`,
281 // which `ptr.offset(b_offset)` cannot possibly fail to satisfy.
282 let (a
, b
) = (&a
.value
, &b
.value
);
283 let (a_size
, b_size
) = (a
.size(self), b
.size(self));
285 let b_offset
= a_size
.align_to(b
.align(self).abi
);
286 assert
!(b_offset
.bytes() > 0); // we later use the offset to tell apart the fields
287 let b_ptr
= ptr
.offset(b_offset
, self)?
;
288 let a_val
= alloc
.read_scalar(self, a_ptr
, a_size
)?
;
289 let b_val
= alloc
.read_scalar(self, b_ptr
, b_size
)?
;
290 Ok(Some(ImmTy { imm: Immediate::ScalarPair(a_val, b_val), layout: mplace.layout }
))
296 /// Try returning an immediate for the operand.
297 /// If the layout does not permit loading this as an immediate, return where in memory
298 /// we can find the data.
299 /// Note that for a given layout, this operation will either always fail or always
300 /// succeed! Whether it succeeds depends on whether the layout can be represented
301 /// in a `Immediate`, not on which data is stored there currently.
302 pub(crate) fn try_read_immediate(
304 src
: OpTy
<'tcx
, M
::PointerTag
>,
305 ) -> InterpResult
<'tcx
, Result
<ImmTy
<'tcx
, M
::PointerTag
>, MPlaceTy
<'tcx
, M
::PointerTag
>>> {
306 Ok(match src
.try_as_mplace(self) {
308 if let Some(val
) = self.try_read_immediate_from_mplace(mplace
)?
{
318 /// Read an immediate from a place, asserting that that is possible with the given layout.
320 pub fn read_immediate(
322 op
: OpTy
<'tcx
, M
::PointerTag
>,
323 ) -> InterpResult
<'tcx
, ImmTy
<'tcx
, M
::PointerTag
>> {
324 if let Ok(imm
) = self.try_read_immediate(op
)?
{
327 span_bug
!(self.cur_span(), "primitive read failed for type: {:?}", op
.layout
.ty
);
331 /// Read a scalar from a place
334 op
: OpTy
<'tcx
, M
::PointerTag
>,
335 ) -> InterpResult
<'tcx
, ScalarMaybeUninit
<M
::PointerTag
>> {
336 Ok(self.read_immediate(op
)?
.to_scalar_or_uninit())
339 // Turn the wide MPlace into a string (must already be dereferenced!)
340 pub fn read_str(&self, mplace
: MPlaceTy
<'tcx
, M
::PointerTag
>) -> InterpResult
<'tcx
, &str> {
341 let len
= mplace
.len(self)?
;
342 let bytes
= self.memory
.read_bytes(mplace
.ptr
, Size
::from_bytes(len
))?
;
343 let str = ::std
::str::from_utf8(bytes
).map_err(|err
| err_ub
!(InvalidStr(err
)))?
;
347 /// Projection functions
348 pub fn operand_field(
350 op
: OpTy
<'tcx
, M
::PointerTag
>,
352 ) -> InterpResult
<'tcx
, OpTy
<'tcx
, M
::PointerTag
>> {
353 let base
= match op
.try_as_mplace(self) {
355 // We can reuse the mplace field computation logic for indirect operands.
356 let field
= self.mplace_field(mplace
, field
)?
;
357 return Ok(field
.into());
362 let field_layout
= op
.layout
.field(self, field
)?
;
363 if field_layout
.is_zst() {
364 let immediate
= Scalar
::zst().into();
365 return Ok(OpTy { op: Operand::Immediate(immediate), layout: field_layout }
);
367 let offset
= op
.layout
.fields
.offset(field
);
368 let immediate
= match *base
{
369 // the field covers the entire type
370 _
if offset
.bytes() == 0 && field_layout
.size
== op
.layout
.size
=> *base
,
371 // extract fields from types with `ScalarPair` ABI
372 Immediate
::ScalarPair(a
, b
) => {
373 let val
= if offset
.bytes() == 0 { a }
else { b }
;
376 Immediate
::Scalar(val
) => span_bug
!(
378 "field access on non aggregate {:#?}, {:#?}",
383 Ok(OpTy { op: Operand::Immediate(immediate), layout: field_layout }
)
386 pub fn operand_index(
388 op
: OpTy
<'tcx
, M
::PointerTag
>,
390 ) -> InterpResult
<'tcx
, OpTy
<'tcx
, M
::PointerTag
>> {
391 if let Ok(index
) = usize::try_from(index
) {
392 // We can just treat this as a field.
393 self.operand_field(op
, index
)
395 // Indexing into a big array. This must be an mplace.
396 let mplace
= op
.assert_mem_place(self);
397 Ok(self.mplace_index(mplace
, index
)?
.into())
401 pub fn operand_downcast(
403 op
: OpTy
<'tcx
, M
::PointerTag
>,
405 ) -> InterpResult
<'tcx
, OpTy
<'tcx
, M
::PointerTag
>> {
406 // Downcasts only change the layout
407 Ok(match op
.try_as_mplace(self) {
408 Ok(mplace
) => self.mplace_downcast(mplace
, variant
)?
.into(),
410 let layout
= op
.layout
.for_variant(self, variant
);
411 OpTy { layout, ..op }
416 pub fn operand_projection(
418 base
: OpTy
<'tcx
, M
::PointerTag
>,
419 proj_elem
: mir
::PlaceElem
<'tcx
>,
420 ) -> InterpResult
<'tcx
, OpTy
<'tcx
, M
::PointerTag
>> {
421 use rustc_middle
::mir
::ProjectionElem
::*;
423 Field(field
, _
) => self.operand_field(base
, field
.index())?
,
424 Downcast(_
, variant
) => self.operand_downcast(base
, variant
)?
,
425 Deref
=> self.deref_operand(base
)?
.into(),
426 Subslice { .. }
| ConstantIndex { .. }
| Index(_
) => {
427 // The rest should only occur as mplace, we do not use Immediates for types
428 // allowing such operations. This matches place_projection forcing an allocation.
429 let mplace
= base
.assert_mem_place(self);
430 self.mplace_projection(mplace
, proj_elem
)?
.into()
435 /// Read from a local. Will not actually access the local if reading from a ZST.
436 /// Will not access memory, instead an indirect `Operand` is returned.
438 /// This is public because it is used by [priroda](https://github.com/oli-obk/priroda) to get an
439 /// OpTy from a local
442 frame
: &super::Frame
<'mir
, 'tcx
, M
::PointerTag
, M
::FrameExtra
>,
444 layout
: Option
<TyAndLayout
<'tcx
>>,
445 ) -> InterpResult
<'tcx
, OpTy
<'tcx
, M
::PointerTag
>> {
446 let layout
= self.layout_of_local(frame
, local
, layout
)?
;
447 let op
= if layout
.is_zst() {
448 // Do not read from ZST, they might not be initialized
449 Operand
::Immediate(Scalar
::zst().into())
451 M
::access_local(&self, frame
, local
)?
453 Ok(OpTy { op, layout }
)
456 /// Every place can be read from, so we can turn them into an operand.
457 /// This will definitely return `Indirect` if the place is a `Ptr`, i.e., this
458 /// will never actually read from memory.
462 place
: PlaceTy
<'tcx
, M
::PointerTag
>,
463 ) -> InterpResult
<'tcx
, OpTy
<'tcx
, M
::PointerTag
>> {
464 let op
= match *place
{
465 Place
::Ptr(mplace
) => Operand
::Indirect(mplace
),
466 Place
::Local { frame, local }
=> {
467 *self.access_local(&self.stack()[frame
], local
, None
)?
470 Ok(OpTy { op, layout: place.layout }
)
473 // Evaluate a place with the goal of reading from it. This lets us sometimes
474 // avoid allocations.
475 pub fn eval_place_to_op(
477 place
: mir
::Place
<'tcx
>,
478 layout
: Option
<TyAndLayout
<'tcx
>>,
479 ) -> InterpResult
<'tcx
, OpTy
<'tcx
, M
::PointerTag
>> {
480 // Do not use the layout passed in as argument if the base we are looking at
481 // here is not the entire place.
482 let layout
= if place
.projection
.is_empty() { layout }
else { None }
;
484 let base_op
= self.access_local(self.frame(), place
.local
, layout
)?
;
489 .try_fold(base_op
, |op
, elem
| self.operand_projection(op
, elem
))?
;
491 trace
!("eval_place_to_op: got {:?}", *op
);
492 // Sanity-check the type we ended up with.
493 debug_assert
!(mir_assign_valid_types(
496 self.layout_of(self.subst_from_current_frame_and_normalize_erasing_regions(
497 place
.ty(&self.frame().body
.local_decls
, *self.tcx
).ty
504 /// Evaluate the operand, returning a place where you can then find the data.
505 /// If you already know the layout, you can save two table lookups
506 /// by passing it in here.
509 mir_op
: &mir
::Operand
<'tcx
>,
510 layout
: Option
<TyAndLayout
<'tcx
>>,
511 ) -> InterpResult
<'tcx
, OpTy
<'tcx
, M
::PointerTag
>> {
512 use rustc_middle
::mir
::Operand
::*;
513 let op
= match *mir_op
{
514 // FIXME: do some more logic on `move` to invalidate the old location
515 Copy(place
) | Move(place
) => self.eval_place_to_op(place
, layout
)?
,
517 Constant(ref constant
) => {
519 self.subst_from_current_frame_and_normalize_erasing_regions(constant
.literal
);
520 self.const_to_op(val
, layout
)?
523 trace
!("{:?}: {:?}", mir_op
, *op
);
527 /// Evaluate a bunch of operands at once
528 pub(super) fn eval_operands(
530 ops
: &[mir
::Operand
<'tcx
>],
531 ) -> InterpResult
<'tcx
, Vec
<OpTy
<'tcx
, M
::PointerTag
>>> {
532 ops
.iter().map(|op
| self.eval_operand(op
, None
)).collect()
535 // Used when the miri-engine runs into a constant and for extracting information from constants
536 // in patterns via the `const_eval` module
537 /// The `val` and `layout` are assumed to already be in our interpreter
538 /// "universe" (param_env).
539 crate fn const_to_op(
541 val
: &ty
::Const
<'tcx
>,
542 layout
: Option
<TyAndLayout
<'tcx
>>,
543 ) -> InterpResult
<'tcx
, OpTy
<'tcx
, M
::PointerTag
>> {
544 let tag_scalar
= |scalar
| -> InterpResult
<'tcx
, _
> {
546 Scalar
::Ptr(ptr
) => Scalar
::Ptr(self.global_base_pointer(ptr
)?
),
547 Scalar
::Raw { data, size }
=> Scalar
::Raw { data, size }
,
550 // Early-return cases.
551 let val_val
= match val
.val
{
552 ty
::ConstKind
::Param(_
) | ty
::ConstKind
::Bound(..) => throw_inval
!(TooGeneric
),
553 ty
::ConstKind
::Error(_
) => throw_inval
!(TypeckError(ErrorReported
)),
554 ty
::ConstKind
::Unevaluated(def
, substs
, promoted
) => {
555 let instance
= self.resolve(def
, substs
)?
;
556 return Ok(self.eval_to_allocation(GlobalId { instance, promoted }
)?
.into());
558 ty
::ConstKind
::Infer(..) | ty
::ConstKind
::Placeholder(..) => {
559 span_bug
!(self.cur_span(), "const_to_op: Unexpected ConstKind {:?}", val
)
561 ty
::ConstKind
::Value(val_val
) => val_val
,
563 // Other cases need layout.
565 from_known_layout(self.tcx
, self.param_env
, layout
, || self.layout_of(val
.ty
))?
;
566 let op
= match val_val
{
567 ConstValue
::ByRef { alloc, offset }
=> {
568 let id
= self.tcx
.create_memory_alloc(alloc
);
569 // We rely on mutability being set correctly in that allocation to prevent writes
570 // where none should happen.
571 let ptr
= self.global_base_pointer(Pointer
::new(id
, offset
))?
;
572 Operand
::Indirect(MemPlace
::from_ptr(ptr
, layout
.align
.abi
))
574 ConstValue
::Scalar(x
) => Operand
::Immediate(tag_scalar(x
)?
.into()),
575 ConstValue
::Slice { data, start, end }
=> {
576 // We rely on mutability being set correctly in `data` to prevent writes
577 // where none should happen.
578 let ptr
= Pointer
::new(
579 self.tcx
.create_memory_alloc(data
),
580 Size
::from_bytes(start
), // offset: `start`
582 Operand
::Immediate(Immediate
::new_slice(
583 self.global_base_pointer(ptr
)?
.into(),
584 u64::try_from(end
.checked_sub(start
).unwrap()).unwrap(), // len: `end - start`
589 Ok(OpTy { op, layout }
)
592 /// Read discriminant, return the runtime value as well as the variant index.
593 pub fn read_discriminant(
595 op
: OpTy
<'tcx
, M
::PointerTag
>,
596 ) -> InterpResult
<'tcx
, (Scalar
<M
::PointerTag
>, VariantIdx
)> {
597 trace
!("read_discriminant_value {:#?}", op
.layout
);
598 // Get type and layout of the discriminant.
599 let discr_layout
= self.layout_of(op
.layout
.ty
.discriminant_ty(*self.tcx
))?
;
600 trace
!("discriminant type: {:?}", discr_layout
.ty
);
602 // We use "discriminant" to refer to the value associated with a particular enum variant.
603 // This is not to be confused with its "variant index", which is just determining its position in the
604 // declared list of variants -- they can differ with explicitly assigned discriminants.
605 // We use "tag" to refer to how the discriminant is encoded in memory, which can be either
606 // straight-forward (`TagEncoding::Direct`) or with a niche (`TagEncoding::Niche`).
607 let (tag_scalar_layout
, tag_encoding
, tag_field
) = match op
.layout
.variants
{
608 Variants
::Single { index }
=> {
609 let discr
= match op
.layout
.ty
.discriminant_for_variant(*self.tcx
, index
) {
611 // This type actually has discriminants.
612 assert_eq
!(discr
.ty
, discr_layout
.ty
);
613 Scalar
::from_uint(discr
.val
, discr_layout
.size
)
616 // On a type without actual discriminants, variant is 0.
617 assert_eq
!(index
.as_u32(), 0);
618 Scalar
::from_uint(index
.as_u32(), discr_layout
.size
)
621 return Ok((discr
, index
));
623 Variants
::Multiple { ref tag, ref tag_encoding, tag_field, .. }
=> {
624 (tag
, tag_encoding
, tag_field
)
628 // There are *three* layouts that come into play here:
629 // - The discriminant has a type for typechecking. This is `discr_layout`, and is used for
630 // the `Scalar` we return.
631 // - The tag (encoded discriminant) has layout `tag_layout`. This is always an integer type,
632 // and used to interpret the value we read from the tag field.
633 // For the return value, a cast to `discr_layout` is performed.
634 // - The field storing the tag has a layout, which is very similar to `tag_layout` but
635 // may be a pointer. This is `tag_val.layout`; we just use it for sanity checks.
637 // Get layout for tag.
638 let tag_layout
= self.layout_of(tag_scalar_layout
.value
.to_int_ty(*self.tcx
))?
;
640 // Read tag and sanity-check `tag_layout`.
641 let tag_val
= self.read_immediate(self.operand_field(op
, tag_field
)?
)?
;
642 assert_eq
!(tag_layout
.size
, tag_val
.layout
.size
);
643 assert_eq
!(tag_layout
.abi
.is_signed(), tag_val
.layout
.abi
.is_signed());
644 let tag_val
= tag_val
.to_scalar()?
;
645 trace
!("tag value: {:?}", tag_val
);
647 // Figure out which discriminant and variant this corresponds to.
648 Ok(match *tag_encoding
{
649 TagEncoding
::Direct
=> {
651 .force_bits(tag_val
, tag_layout
.size
)
652 .map_err(|_
| err_ub
!(InvalidTag(tag_val
.erase_tag())))?
;
653 // Cast bits from tag layout to discriminant layout.
654 let discr_val
= self.cast_from_scalar(tag_bits
, tag_layout
, discr_layout
.ty
);
655 let discr_bits
= discr_val
.assert_bits(discr_layout
.size
);
656 // Convert discriminant to variant index, and catch invalid discriminants.
657 let index
= match *op
.layout
.ty
.kind() {
659 adt
.discriminants(*self.tcx
).find(|(_
, var
)| var
.val
== discr_bits
)
661 ty
::Generator(def_id
, substs
, _
) => {
662 let substs
= substs
.as_generator();
664 .discriminants(def_id
, *self.tcx
)
665 .find(|(_
, var
)| var
.val
== discr_bits
)
667 _
=> span_bug
!(self.cur_span(), "tagged layout for non-adt non-generator"),
669 .ok_or_else(|| err_ub
!(InvalidTag(tag_val
.erase_tag())))?
;
670 // Return the cast value, and the index.
673 TagEncoding
::Niche { dataful_variant, ref niche_variants, niche_start }
=> {
674 // Compute the variant this niche value/"tag" corresponds to. With niche layout,
675 // discriminant (encoded in niche/tag) and variant index are the same.
676 let variants_start
= niche_variants
.start().as_u32();
677 let variants_end
= niche_variants
.end().as_u32();
678 let variant
= match tag_val
.to_bits_or_ptr(tag_layout
.size
, self) {
680 // The niche must be just 0 (which an inbounds pointer value never is)
681 let ptr_valid
= niche_start
== 0
682 && variants_start
== variants_end
683 && !self.memory
.ptr_may_be_null(ptr
);
685 throw_ub
!(InvalidTag(tag_val
.erase_tag()))
690 // We need to use machine arithmetic to get the relative variant idx:
691 // variant_index_relative = tag_val - niche_start_val
692 let tag_val
= ImmTy
::from_uint(tag_bits
, tag_layout
);
693 let niche_start_val
= ImmTy
::from_uint(niche_start
, tag_layout
);
694 let variant_index_relative_val
=
695 self.binary_op(mir
::BinOp
::Sub
, tag_val
, niche_start_val
)?
;
696 let variant_index_relative
= variant_index_relative_val
698 .assert_bits(tag_val
.layout
.size
);
699 // Check if this is in the range that indicates an actual discriminant.
700 if variant_index_relative
<= u128
::from(variants_end
- variants_start
) {
701 let variant_index_relative
= u32::try_from(variant_index_relative
)
702 .expect("we checked that this fits into a u32");
703 // Then computing the absolute variant idx should not overflow any more.
704 let variant_index
= variants_start
705 .checked_add(variant_index_relative
)
706 .expect("overflow computing absolute variant idx");
707 let variants_len
= op
711 .expect("tagged layout for non adt")
714 assert
!(usize::try_from(variant_index
).unwrap() < variants_len
);
715 VariantIdx
::from_u32(variant_index
)
721 // Compute the size of the scalar we need to return.
722 // No need to cast, because the variant index directly serves as discriminant and is
723 // encoded in the tag.
724 (Scalar
::from_uint(variant
.as_u32(), discr_layout
.size
), variant
)