1 // Copyright 2018 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Functions concerning immediate values and operands, and reading from operands.
12 //! All high-level functions to read from memory work on operands as sources.
14 use std
::convert
::TryInto
;
17 use rustc
::ty
::layout
::{self, Size, LayoutOf, TyLayout, HasDataLayout, IntegerExt}
;
19 use rustc
::mir
::interpret
::{
21 ConstValue
, Pointer
, Scalar
,
22 EvalResult
, EvalErrorKind
24 use super::{EvalContext, Machine, MemPlace, MPlaceTy, MemoryKind}
;
26 #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, RustcEncodable, RustcDecodable, Hash)]
27 pub enum ScalarMaybeUndef
<Tag
=(), Id
=AllocId
> {
28 Scalar(Scalar
<Tag
, Id
>),
32 impl<Tag
> From
<Scalar
<Tag
>> for ScalarMaybeUndef
<Tag
> {
34 fn from(s
: Scalar
<Tag
>) -> Self {
35 ScalarMaybeUndef
::Scalar(s
)
39 impl<'tcx
> ScalarMaybeUndef
<()> {
41 pub fn with_default_tag
<Tag
>(self) -> ScalarMaybeUndef
<Tag
>
45 ScalarMaybeUndef
::Scalar(s
) => ScalarMaybeUndef
::Scalar(s
.with_default_tag()),
46 ScalarMaybeUndef
::Undef
=> ScalarMaybeUndef
::Undef
,
51 impl<'tcx
, Tag
> ScalarMaybeUndef
<Tag
> {
53 pub fn erase_tag(self) -> ScalarMaybeUndef
56 ScalarMaybeUndef
::Scalar(s
) => ScalarMaybeUndef
::Scalar(s
.erase_tag()),
57 ScalarMaybeUndef
::Undef
=> ScalarMaybeUndef
::Undef
,
62 pub fn not_undef(self) -> EvalResult
<'
static, Scalar
<Tag
>> {
64 ScalarMaybeUndef
::Scalar(scalar
) => Ok(scalar
),
65 ScalarMaybeUndef
::Undef
=> err
!(ReadUndefBytes(Size
::from_bytes(0))),
70 pub fn to_ptr(self) -> EvalResult
<'tcx
, Pointer
<Tag
>> {
71 self.not_undef()?
.to_ptr()
75 pub fn to_bits(self, target_size
: Size
) -> EvalResult
<'tcx
, u128
> {
76 self.not_undef()?
.to_bits(target_size
)
80 pub fn to_bool(self) -> EvalResult
<'tcx
, bool
> {
81 self.not_undef()?
.to_bool()
85 pub fn to_char(self) -> EvalResult
<'tcx
, char> {
86 self.not_undef()?
.to_char()
90 pub fn to_f32(self) -> EvalResult
<'tcx
, f32> {
91 self.not_undef()?
.to_f32()
95 pub fn to_f64(self) -> EvalResult
<'tcx
, f64> {
96 self.not_undef()?
.to_f64()
100 pub fn to_u8(self) -> EvalResult
<'tcx
, u8> {
101 self.not_undef()?
.to_u8()
105 pub fn to_u32(self) -> EvalResult
<'tcx
, u32> {
106 self.not_undef()?
.to_u32()
110 pub fn to_u64(self) -> EvalResult
<'tcx
, u64> {
111 self.not_undef()?
.to_u64()
115 pub fn to_usize(self, cx
: impl HasDataLayout
) -> EvalResult
<'tcx
, u64> {
116 self.not_undef()?
.to_usize(cx
)
120 pub fn to_i8(self) -> EvalResult
<'tcx
, i8> {
121 self.not_undef()?
.to_i8()
125 pub fn to_i32(self) -> EvalResult
<'tcx
, i32> {
126 self.not_undef()?
.to_i32()
130 pub fn to_i64(self) -> EvalResult
<'tcx
, i64> {
131 self.not_undef()?
.to_i64()
135 pub fn to_isize(self, cx
: impl HasDataLayout
) -> EvalResult
<'tcx
, i64> {
136 self.not_undef()?
.to_isize(cx
)
141 /// A `Value` represents a single immediate self-contained Rust value.
143 /// For optimization of a few very common cases, there is also a representation for a pair of
144 /// primitive values (`ScalarPair`). It allows Miri to avoid making allocations for checked binary
145 /// operations and fat pointers. This idea was taken from rustc's codegen.
146 /// In particular, thanks to `ScalarPair`, arithmetic operations and casts can be entirely
147 /// defined on `Value`, and do not have to work with a `Place`.
148 #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
149 pub enum Value
<Tag
=(), Id
=AllocId
> {
150 Scalar(ScalarMaybeUndef
<Tag
, Id
>),
151 ScalarPair(ScalarMaybeUndef
<Tag
, Id
>, ScalarMaybeUndef
<Tag
, Id
>),
156 pub fn with_default_tag
<Tag
>(self) -> Value
<Tag
>
160 Value
::Scalar(x
) => Value
::Scalar(x
.with_default_tag()),
161 Value
::ScalarPair(x
, y
) =>
162 Value
::ScalarPair(x
.with_default_tag(), y
.with_default_tag()),
167 impl<'tcx
, Tag
> Value
<Tag
> {
169 pub fn erase_tag(self) -> Value
172 Value
::Scalar(x
) => Value
::Scalar(x
.erase_tag()),
173 Value
::ScalarPair(x
, y
) =>
174 Value
::ScalarPair(x
.erase_tag(), y
.erase_tag()),
181 cx
: impl HasDataLayout
183 Value
::ScalarPair(val
.into(), Scalar
::from_uint(len
, cx
.data_layout().pointer_size
).into())
186 pub fn new_dyn_trait(val
: Scalar
<Tag
>, vtable
: Pointer
<Tag
>) -> Self {
187 Value
::ScalarPair(val
.into(), Scalar
::Ptr(vtable
).into())
191 pub fn to_scalar_or_undef(self) -> ScalarMaybeUndef
<Tag
> {
193 Value
::Scalar(val
) => val
,
194 Value
::ScalarPair(..) => bug
!("Got a fat pointer where a scalar was expected"),
199 pub fn to_scalar(self) -> EvalResult
<'tcx
, Scalar
<Tag
>> {
200 self.to_scalar_or_undef().not_undef()
204 pub fn to_scalar_pair(self) -> EvalResult
<'tcx
, (Scalar
<Tag
>, Scalar
<Tag
>)> {
206 Value
::Scalar(..) => bug
!("Got a thin pointer where a scalar pair was expected"),
207 Value
::ScalarPair(a
, b
) => Ok((a
.not_undef()?
, b
.not_undef()?
))
211 /// Convert the value into a pointer (or a pointer-sized integer).
212 /// Throws away the second half of a ScalarPair!
214 pub fn to_scalar_ptr(self) -> EvalResult
<'tcx
, Scalar
<Tag
>> {
217 Value
::ScalarPair(ptr
, _
) => ptr
.not_undef(),
222 // ScalarPair needs a type to interpret, so we often have a value and a type together
223 // as input for binary and cast operations.
224 #[derive(Copy, Clone, Debug)]
225 pub struct ValTy
<'tcx
, Tag
=()> {
227 pub layout
: TyLayout
<'tcx
>,
230 impl<'tcx
, Tag
> ::std
::ops
::Deref
for ValTy
<'tcx
, Tag
> {
231 type Target
= Value
<Tag
>;
233 fn deref(&self) -> &Value
<Tag
> {
238 /// An `Operand` is the result of computing a `mir::Operand`. It can be immediate,
239 /// or still in memory. The latter is an optimization, to delay reading that chunk of
240 /// memory and to avoid having to store arbitrary-sized data here.
241 #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
242 pub enum Operand
<Tag
=(), Id
=AllocId
> {
243 Immediate(Value
<Tag
, Id
>),
244 Indirect(MemPlace
<Tag
, Id
>),
249 pub fn with_default_tag
<Tag
>(self) -> Operand
<Tag
>
253 Operand
::Immediate(x
) => Operand
::Immediate(x
.with_default_tag()),
254 Operand
::Indirect(x
) => Operand
::Indirect(x
.with_default_tag()),
259 impl<Tag
> Operand
<Tag
> {
261 pub fn erase_tag(self) -> Operand
264 Operand
::Immediate(x
) => Operand
::Immediate(x
.erase_tag()),
265 Operand
::Indirect(x
) => Operand
::Indirect(x
.erase_tag()),
270 pub fn to_mem_place(self) -> MemPlace
<Tag
>
271 where Tag
: ::std
::fmt
::Debug
274 Operand
::Indirect(mplace
) => mplace
,
275 _
=> bug
!("to_mem_place: expected Operand::Indirect, got {:?}", self),
281 pub fn to_immediate(self) -> Value
<Tag
>
282 where Tag
: ::std
::fmt
::Debug
285 Operand
::Immediate(val
) => val
,
286 _
=> bug
!("to_immediate: expected Operand::Immediate, got {:?}", self),
292 #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
293 pub struct OpTy
<'tcx
, Tag
=()> {
294 crate op
: Operand
<Tag
>, // ideally we'd make this private, but const_prop needs this
295 pub layout
: TyLayout
<'tcx
>,
298 impl<'tcx
, Tag
> ::std
::ops
::Deref
for OpTy
<'tcx
, Tag
> {
299 type Target
= Operand
<Tag
>;
301 fn deref(&self) -> &Operand
<Tag
> {
306 impl<'tcx
, Tag
: Copy
> From
<MPlaceTy
<'tcx
, Tag
>> for OpTy
<'tcx
, Tag
> {
308 fn from(mplace
: MPlaceTy
<'tcx
, Tag
>) -> Self {
310 op
: Operand
::Indirect(*mplace
),
311 layout
: mplace
.layout
316 impl<'tcx
, Tag
> From
<ValTy
<'tcx
, Tag
>> for OpTy
<'tcx
, Tag
> {
318 fn from(val
: ValTy
<'tcx
, Tag
>) -> Self {
320 op
: Operand
::Immediate(val
.value
),
326 impl<'tcx
, Tag
> OpTy
<'tcx
, Tag
>
329 pub fn erase_tag(self) -> OpTy
<'tcx
>
332 op
: self.op
.erase_tag(),
338 // Use the existing layout if given (but sanity check in debug mode),
339 // or compute the layout.
341 fn from_known_layout
<'tcx
>(
342 layout
: Option
<TyLayout
<'tcx
>>,
343 compute
: impl FnOnce() -> EvalResult
<'tcx
, TyLayout
<'tcx
>>
344 ) -> EvalResult
<'tcx
, TyLayout
<'tcx
>> {
348 if cfg
!(debug_assertions
) {
349 let layout2
= compute()?
;
350 assert_eq
!(layout
.details
, layout2
.details
,
351 "Mismatch in layout of supposedly equal-layout types {:?} and {:?}",
352 layout
.ty
, layout2
.ty
);
359 impl<'a
, 'mir
, 'tcx
, M
: Machine
<'a
, 'mir
, 'tcx
>> EvalContext
<'a
, 'mir
, 'tcx
, M
> {
360 /// Try reading a value in memory; this is interesting particularly for ScalarPair.
361 /// Return None if the layout does not permit loading this as a value.
362 pub(super) fn try_read_value_from_mplace(
364 mplace
: MPlaceTy
<'tcx
, M
::PointerTag
>,
365 ) -> EvalResult
<'tcx
, Option
<Value
<M
::PointerTag
>>> {
366 if mplace
.layout
.is_unsized() {
367 // Don't touch unsized
370 let (ptr
, ptr_align
) = mplace
.to_scalar_ptr_align();
372 if mplace
.layout
.is_zst() {
373 // Not all ZSTs have a layout we would handle below, so just short-circuit them
375 self.memory
.check_align(ptr
, ptr_align
)?
;
376 return Ok(Some(Value
::Scalar(Scalar
::zst().into())));
379 let ptr
= ptr
.to_ptr()?
;
380 match mplace
.layout
.abi
{
381 layout
::Abi
::Scalar(..) => {
382 let scalar
= self.memory
.read_scalar(ptr
, ptr_align
, mplace
.layout
.size
)?
;
383 Ok(Some(Value
::Scalar(scalar
)))
385 layout
::Abi
::ScalarPair(ref a
, ref b
) => {
386 let (a
, b
) = (&a
.value
, &b
.value
);
387 let (a_size
, b_size
) = (a
.size(self), b
.size(self));
389 let b_offset
= a_size
.abi_align(b
.align(self));
390 assert
!(b_offset
.bytes() > 0); // we later use the offset to test which field to use
391 let b_ptr
= ptr
.offset(b_offset
, self)?
.into();
392 let a_val
= self.memory
.read_scalar(a_ptr
, ptr_align
, a_size
)?
;
393 let b_val
= self.memory
.read_scalar(b_ptr
, ptr_align
, b_size
)?
;
394 Ok(Some(Value
::ScalarPair(a_val
, b_val
)))
400 /// Try returning an immediate value for the operand.
401 /// If the layout does not permit loading this as a value, return where in memory
402 /// we can find the data.
403 /// Note that for a given layout, this operation will either always fail or always
404 /// succeed! Whether it succeeds depends on whether the layout can be represented
405 /// in a `Value`, not on which data is stored there currently.
406 pub(crate) fn try_read_value(
408 src
: OpTy
<'tcx
, M
::PointerTag
>,
409 ) -> EvalResult
<'tcx
, Result
<Value
<M
::PointerTag
>, MemPlace
<M
::PointerTag
>>> {
410 Ok(match src
.try_as_mplace() {
412 if let Some(val
) = self.try_read_value_from_mplace(mplace
)?
{
422 /// Read a value from a place, asserting that that is possible with the given layout.
426 op
: OpTy
<'tcx
, M
::PointerTag
>
427 ) -> EvalResult
<'tcx
, ValTy
<'tcx
, M
::PointerTag
>> {
428 if let Ok(value
) = self.try_read_value(op
)?
{
429 Ok(ValTy { value, layout: op.layout }
)
431 bug
!("primitive read failed for type: {:?}", op
.layout
.ty
);
435 /// Read a scalar from a place
438 op
: OpTy
<'tcx
, M
::PointerTag
>
439 ) -> EvalResult
<'tcx
, ScalarMaybeUndef
<M
::PointerTag
>> {
440 match *self.read_value(op
)?
{
441 Value
::ScalarPair(..) => bug
!("got ScalarPair for type: {:?}", op
.layout
.ty
),
442 Value
::Scalar(val
) => Ok(val
),
446 // Turn the MPlace into a string (must already be dereferenced!)
449 mplace
: MPlaceTy
<'tcx
, M
::PointerTag
>,
450 ) -> EvalResult
<'tcx
, &str> {
451 let len
= mplace
.len(self)?
;
452 let bytes
= self.memory
.read_bytes(mplace
.ptr
, Size
::from_bytes(len
as u64))?
;
453 let str = ::std
::str::from_utf8(bytes
)
454 .map_err(|err
| EvalErrorKind
::ValidationFailure(err
.to_string()))?
;
458 pub fn uninit_operand(
460 layout
: TyLayout
<'tcx
>
461 ) -> EvalResult
<'tcx
, Operand
<M
::PointerTag
>> {
462 // This decides which types we will use the Immediate optimization for, and hence should
463 // match what `try_read_value` and `eval_place_to_op` support.
465 return Ok(Operand
::Immediate(Value
::Scalar(Scalar
::zst().into())));
468 Ok(match layout
.abi
{
469 layout
::Abi
::Scalar(..) =>
470 Operand
::Immediate(Value
::Scalar(ScalarMaybeUndef
::Undef
)),
471 layout
::Abi
::ScalarPair(..) =>
472 Operand
::Immediate(Value
::ScalarPair(
473 ScalarMaybeUndef
::Undef
,
474 ScalarMaybeUndef
::Undef
,
477 trace
!("Forcing allocation for local of type {:?}", layout
.ty
);
479 *self.allocate(layout
, MemoryKind
::Stack
)?
485 /// Projection functions
486 pub fn operand_field(
488 op
: OpTy
<'tcx
, M
::PointerTag
>,
490 ) -> EvalResult
<'tcx
, OpTy
<'tcx
, M
::PointerTag
>> {
491 let base
= match op
.try_as_mplace() {
494 let field
= self.mplace_field(mplace
, field
)?
;
495 return Ok(field
.into());
500 let field
= field
.try_into().unwrap();
501 let field_layout
= op
.layout
.field(self, field
)?
;
502 if field_layout
.is_zst() {
503 let val
= Value
::Scalar(Scalar
::zst().into());
504 return Ok(OpTy { op: Operand::Immediate(val), layout: field_layout }
);
506 let offset
= op
.layout
.fields
.offset(field
);
507 let value
= match base
{
508 // the field covers the entire type
509 _
if offset
.bytes() == 0 && field_layout
.size
== op
.layout
.size
=> base
,
510 // extract fields from types with `ScalarPair` ABI
511 Value
::ScalarPair(a
, b
) => {
512 let val
= if offset
.bytes() == 0 { a }
else { b }
;
515 Value
::Scalar(val
) =>
516 bug
!("field access on non aggregate {:#?}, {:#?}", val
, op
.layout
),
518 Ok(OpTy { op: Operand::Immediate(value), layout: field_layout }
)
521 pub fn operand_downcast(
523 op
: OpTy
<'tcx
, M
::PointerTag
>,
525 ) -> EvalResult
<'tcx
, OpTy
<'tcx
, M
::PointerTag
>> {
526 // Downcasts only change the layout
527 Ok(match op
.try_as_mplace() {
529 self.mplace_downcast(mplace
, variant
)?
.into()
532 let layout
= op
.layout
.for_variant(self, variant
);
533 OpTy { layout, ..op }
538 // Take an operand, representing a pointer, and dereference it to a place -- that
539 // will always be a MemPlace.
540 pub(super) fn deref_operand(
542 src
: OpTy
<'tcx
, M
::PointerTag
>,
543 ) -> EvalResult
<'tcx
, MPlaceTy
<'tcx
, M
::PointerTag
>> {
544 let val
= self.read_value(src
)?
;
545 trace
!("deref to {} on {:?}", val
.layout
.ty
, *val
);
546 Ok(self.ref_to_mplace(val
)?
)
549 pub fn operand_projection(
551 base
: OpTy
<'tcx
, M
::PointerTag
>,
552 proj_elem
: &mir
::PlaceElem
<'tcx
>,
553 ) -> EvalResult
<'tcx
, OpTy
<'tcx
, M
::PointerTag
>> {
554 use rustc
::mir
::ProjectionElem
::*;
555 Ok(match *proj_elem
{
556 Field(field
, _
) => self.operand_field(base
, field
.index() as u64)?
,
557 Downcast(_
, variant
) => self.operand_downcast(base
, variant
)?
,
558 Deref
=> self.deref_operand(base
)?
.into(),
559 Subslice { .. }
| ConstantIndex { .. }
| Index(_
) => if base
.layout
.is_zst() {
561 op
: Operand
::Immediate(Value
::Scalar(Scalar
::zst().into())),
562 // the actual index doesn't matter, so we just pick a convenient one like 0
563 layout
: base
.layout
.field(self, 0)?
,
566 // The rest should only occur as mplace, we do not use Immediates for types
567 // allowing such operations. This matches place_projection forcing an allocation.
568 let mplace
= base
.to_mem_place();
569 self.mplace_projection(mplace
, proj_elem
)?
.into()
574 /// This is used by [priroda](https://github.com/oli-obk/priroda) to get an OpTy from a local
576 /// When you know the layout of the local in advance, you can pass it as last argument
579 frame
: &super::Frame
<'mir
, 'tcx
, M
::PointerTag
>,
581 layout
: Option
<TyLayout
<'tcx
>>,
582 ) -> EvalResult
<'tcx
, OpTy
<'tcx
, M
::PointerTag
>> {
583 assert_ne
!(local
, mir
::RETURN_PLACE
);
584 let op
= *frame
.locals
[local
].access()?
;
585 let layout
= from_known_layout(layout
,
586 || self.layout_of_local(frame
, local
))?
;
587 Ok(OpTy { op, layout }
)
590 // Evaluate a place with the goal of reading from it. This lets us sometimes
591 // avoid allocations. If you already know the layout, you can pass it in
592 // to avoid looking it up again.
595 mir_place
: &mir
::Place
<'tcx
>,
596 layout
: Option
<TyLayout
<'tcx
>>,
597 ) -> EvalResult
<'tcx
, OpTy
<'tcx
, M
::PointerTag
>> {
598 use rustc
::mir
::Place
::*;
599 let op
= match *mir_place
{
600 Local(mir
::RETURN_PLACE
) => return err
!(ReadFromReturnPointer
),
601 Local(local
) => self.access_local(self.frame(), local
, layout
)?
,
603 Projection(ref proj
) => {
604 let op
= self.eval_place_to_op(&proj
.base
, None
)?
;
605 self.operand_projection(op
, &proj
.elem
)?
608 _
=> self.eval_place_to_mplace(mir_place
)?
.into(),
611 trace
!("eval_place_to_op: got {:?}", *op
);
615 /// Evaluate the operand, returning a place where you can then find the data.
616 /// if you already know the layout, you can save two some table lookups
617 /// by passing it in here.
620 mir_op
: &mir
::Operand
<'tcx
>,
621 layout
: Option
<TyLayout
<'tcx
>>,
622 ) -> EvalResult
<'tcx
, OpTy
<'tcx
, M
::PointerTag
>> {
623 use rustc
::mir
::Operand
::*;
624 let op
= match *mir_op
{
625 // FIXME: do some more logic on `move` to invalidate the old location
628 self.eval_place_to_op(place
, layout
)?
,
630 Constant(ref constant
) => {
631 let layout
= from_known_layout(layout
, || {
632 let ty
= self.monomorphize(mir_op
.ty(self.mir(), *self.tcx
), self.substs());
635 let op
= self.const_value_to_op(constant
.literal
.val
)?
;
639 trace
!("{:?}: {:?}", mir_op
, *op
);
643 /// Evaluate a bunch of operands at once
644 pub(super) fn eval_operands(
646 ops
: &[mir
::Operand
<'tcx
>],
647 ) -> EvalResult
<'tcx
, Vec
<OpTy
<'tcx
, M
::PointerTag
>>> {
649 .map(|op
| self.eval_operand(op
, None
))
653 // Also used e.g. when miri runs into a constant.
654 pub(super) fn const_value_to_op(
656 val
: ConstValue
<'tcx
>,
657 ) -> EvalResult
<'tcx
, Operand
<M
::PointerTag
>> {
658 trace
!("const_value_to_op: {:?}", val
);
660 ConstValue
::Unevaluated(def_id
, substs
) => {
661 let instance
= self.resolve(def_id
, substs
)?
;
662 self.global_to_op(GlobalId
{
667 ConstValue
::ByRef(id
, alloc
, offset
) => {
668 // We rely on mutability being set correctly in that allocation to prevent writes
669 // where none should happen -- and for `static mut`, we copy on demand anyway.
670 Ok(Operand
::Indirect(
671 MemPlace
::from_ptr(Pointer
::new(id
, offset
), alloc
.align
)
672 ).with_default_tag())
674 ConstValue
::ScalarPair(a
, b
) =>
675 Ok(Operand
::Immediate(Value
::ScalarPair(a
.into(), b
.into())).with_default_tag()),
676 ConstValue
::Scalar(x
) =>
677 Ok(Operand
::Immediate(Value
::Scalar(x
.into())).with_default_tag()),
682 cnst
: &ty
::Const
<'tcx
>,
683 ) -> EvalResult
<'tcx
, OpTy
<'tcx
, M
::PointerTag
>> {
684 let op
= self.const_value_to_op(cnst
.val
)?
;
685 Ok(OpTy { op, layout: self.layout_of(cnst.ty)? }
)
688 pub(super) fn global_to_op(
691 ) -> EvalResult
<'tcx
, Operand
<M
::PointerTag
>> {
692 let cv
= self.const_eval(gid
)?
;
693 self.const_value_to_op(cv
.val
)
696 /// Read discriminant, return the runtime value as well as the variant index.
697 pub fn read_discriminant(
699 rval
: OpTy
<'tcx
, M
::PointerTag
>,
700 ) -> EvalResult
<'tcx
, (u128
, usize)> {
701 trace
!("read_discriminant_value {:#?}", rval
.layout
);
703 match rval
.layout
.variants
{
704 layout
::Variants
::Single { index }
=> {
705 let discr_val
= rval
.layout
.ty
.ty_adt_def().map_or(
707 |def
| def
.discriminant_for_variant(*self.tcx
, index
).val
);
708 return Ok((discr_val
, index
));
710 layout
::Variants
::Tagged { .. }
|
711 layout
::Variants
::NicheFilling { .. }
=> {}
,
713 // read raw discriminant value
714 let discr_op
= self.operand_field(rval
, 0)?
;
715 let discr_val
= self.read_value(discr_op
)?
;
716 let raw_discr
= discr_val
.to_scalar()?
;
717 trace
!("discr value: {:?}", raw_discr
);
719 Ok(match rval
.layout
.variants
{
720 layout
::Variants
::Single { .. }
=> bug
!(),
721 layout
::Variants
::Tagged { .. }
=> {
722 let real_discr
= if discr_val
.layout
.ty
.is_signed() {
723 let i
= raw_discr
.to_bits(discr_val
.layout
.size
)?
as i128
;
724 // going from layout tag type to typeck discriminant type
725 // requires first sign extending with the layout discriminant
726 let shift
= 128 - discr_val
.layout
.size
.bits();
727 let sexted
= (i
<< shift
) >> shift
;
728 // and then zeroing with the typeck discriminant type
729 let discr_ty
= rval
.layout
.ty
730 .ty_adt_def().expect("tagged layout corresponds to adt")
733 let discr_ty
= layout
::Integer
::from_attr(self.tcx
.tcx
, discr_ty
);
734 let shift
= 128 - discr_ty
.size().bits();
735 let truncatee
= sexted
as u128
;
736 (truncatee
<< shift
) >> shift
738 raw_discr
.to_bits(discr_val
.layout
.size
)?
740 // Make sure we catch invalid discriminants
741 let index
= rval
.layout
.ty
743 .expect("tagged layout for non adt")
744 .discriminants(self.tcx
.tcx
)
745 .position(|var
| var
.val
== real_discr
)
746 .ok_or_else(|| EvalErrorKind
::InvalidDiscriminant(real_discr
))?
;
749 layout
::Variants
::NicheFilling
{
755 let variants_start
= *niche_variants
.start() as u128
;
756 let variants_end
= *niche_variants
.end() as u128
;
757 let real_discr
= match raw_discr
{
759 // The niche must be just 0 (which a pointer value never is)
760 assert
!(niche_start
== 0);
761 assert
!(variants_start
== variants_end
);
762 dataful_variant
as u128
764 Scalar
::Bits { bits: raw_discr, size }
=> {
765 assert_eq
!(size
as u64, discr_val
.layout
.size
.bytes());
766 let discr
= raw_discr
.wrapping_sub(niche_start
)
767 .wrapping_add(variants_start
);
768 if variants_start
<= discr
&& discr
<= variants_end
{
771 dataful_variant
as u128
775 let index
= real_discr
as usize;
776 assert_eq
!(index
as u128
, real_discr
);
777 assert
!(index
< rval
.layout
.ty
779 .expect("tagged layout for non adt")