1 //! This module contains the `InterpCx` methods for executing a single step of the interpreter.
3 //! The main entry point is the `step` method.
6 use rustc_middle
::mir
::interpret
::{InterpResult, Scalar}
;
7 use rustc_target
::abi
::LayoutOf
;
9 use super::{InterpCx, Machine}
;
11 /// Classify whether an operator is "left-homogeneous", i.e., the LHS has the
12 /// same type as the result.
14 fn binop_left_homogeneous(op
: mir
::BinOp
) -> bool
{
15 use rustc_middle
::mir
::BinOp
::*;
17 Add
| Sub
| Mul
| Div
| Rem
| BitXor
| BitAnd
| BitOr
| Offset
| Shl
| Shr
=> true,
18 Eq
| Ne
| Lt
| Le
| Gt
| Ge
=> false,
21 /// Classify whether an operator is "right-homogeneous", i.e., the RHS has the
22 /// same type as the LHS.
24 fn binop_right_homogeneous(op
: mir
::BinOp
) -> bool
{
25 use rustc_middle
::mir
::BinOp
::*;
27 Add
| Sub
| Mul
| Div
| Rem
| BitXor
| BitAnd
| BitOr
| Eq
| Ne
| Lt
| Le
| Gt
| Ge
=> true,
28 Offset
| Shl
| Shr
=> false,
32 impl<'mir
, 'tcx
: 'mir
, M
: Machine
<'mir
, 'tcx
>> InterpCx
<'mir
, 'tcx
, M
> {
33 pub fn run(&mut self) -> InterpResult
<'tcx
> {
38 /// Returns `true` as long as there are more things to do.
40 /// This is used by [priroda](https://github.com/oli-obk/priroda)
42 /// This is marked `#inline(always)` to work around adverserial codegen when `opt-level = 3`
44 pub fn step(&mut self) -> InterpResult
<'tcx
, bool
> {
45 if self.stack().is_empty() {
49 let loc
= match self.frame().loc
{
52 // We are unwinding and this fn has no cleanup code.
53 // Just go on unwinding.
54 trace
!("unwinding: skipping frame");
55 self.pop_stack_frame(/* unwinding */ true)?
;
59 let basic_block
= &self.body().basic_blocks()[loc
.block
];
61 let old_frames
= self.frame_idx();
63 if let Some(stmt
) = basic_block
.statements
.get(loc
.statement_index
) {
64 assert_eq
!(old_frames
, self.frame_idx());
65 self.statement(stmt
)?
;
69 M
::before_terminator(self)?
;
71 let terminator
= basic_block
.terminator();
72 assert_eq
!(old_frames
, self.frame_idx());
73 self.terminator(terminator
)?
;
77 /// Runs the interpretation logic for the given `mir::Statement` at the current frame and
78 /// statement counter. This also moves the statement counter forward.
79 crate fn statement(&mut self, stmt
: &mir
::Statement
<'tcx
>) -> InterpResult
<'tcx
> {
82 use rustc_middle
::mir
::StatementKind
::*;
84 // Some statements (e.g., box) push new stack frames.
85 // We have to record the stack frame number *before* executing the statement.
86 let frame_idx
= self.frame_idx();
89 Assign(box (place
, rvalue
)) => self.eval_rvalue_into_place(rvalue
, *place
)?
,
91 SetDiscriminant { place, variant_index }
=> {
92 let dest
= self.eval_place(**place
)?
;
93 self.write_discriminant(*variant_index
, dest
)?
;
96 // Mark locals as alive
97 StorageLive(local
) => {
98 let old_val
= self.storage_live(*local
)?
;
99 self.deallocate_local(old_val
)?
;
102 // Mark locals as dead
103 StorageDead(local
) => {
104 let old_val
= self.storage_dead(*local
);
105 self.deallocate_local(old_val
)?
;
108 // No dynamic semantics attached to `FakeRead`; MIR
109 // interpreter is solely intended for borrowck'ed code.
113 Retag(kind
, place
) => {
114 let dest
= self.eval_place(**place
)?
;
115 M
::retag(self, *kind
, dest
)?
;
118 // Statements we do not track.
119 AscribeUserType(..) => {}
121 // Currently, Miri discards Coverage statements. Coverage statements are only injected
122 // via an optional compile time MIR pass and have no side effects. Since Coverage
123 // statements don't exist at the source level, it is safe for Miri to ignore them, even
124 // for undefined behavior (UB) checks.
126 // A coverage counter inside a const expression (for example, a counter injected in a
127 // const function) is discarded when the const is evaluated at compile time. Whether
128 // this should change, and/or how to implement a const eval counter, is a subject of the
131 // FIXME(#73156): Handle source code coverage in const eval
134 // Defined to do nothing. These are added by optimization passes, to avoid changing the
135 // size of MIR constantly.
138 LlvmInlineAsm { .. }
=> throw_unsup_format
!("inline assembly is not supported"),
141 self.stack_mut()[frame_idx
].loc
.as_mut().unwrap().statement_index
+= 1;
145 /// Evaluate an assignment statement.
147 /// There is no separate `eval_rvalue` function. Instead, the code for handling each rvalue
148 /// type writes its results directly into the memory specified by the place.
149 pub fn eval_rvalue_into_place(
151 rvalue
: &mir
::Rvalue
<'tcx
>,
152 place
: mir
::Place
<'tcx
>,
153 ) -> InterpResult
<'tcx
> {
154 let dest
= self.eval_place(place
)?
;
156 use rustc_middle
::mir
::Rvalue
::*;
158 ThreadLocalRef(did
) => {
159 let id
= M
::thread_local_static_alloc_id(self, did
)?
;
160 let val
= self.global_base_pointer(id
.into())?
;
161 self.write_scalar(val
, dest
)?
;
164 Use(ref operand
) => {
165 // Avoid recomputing the layout
166 let op
= self.eval_operand(operand
, Some(dest
.layout
))?
;
167 self.copy_op(op
, dest
)?
;
170 BinaryOp(bin_op
, ref left
, ref right
) => {
171 let layout
= binop_left_homogeneous(bin_op
).then_some(dest
.layout
);
172 let left
= self.read_immediate(self.eval_operand(left
, layout
)?
)?
;
173 let layout
= binop_right_homogeneous(bin_op
).then_some(left
.layout
);
174 let right
= self.read_immediate(self.eval_operand(right
, layout
)?
)?
;
175 self.binop_ignore_overflow(bin_op
, left
, right
, dest
)?
;
178 CheckedBinaryOp(bin_op
, ref left
, ref right
) => {
179 // Due to the extra boolean in the result, we can never reuse the `dest.layout`.
180 let left
= self.read_immediate(self.eval_operand(left
, None
)?
)?
;
181 let layout
= binop_right_homogeneous(bin_op
).then_some(left
.layout
);
182 let right
= self.read_immediate(self.eval_operand(right
, layout
)?
)?
;
183 self.binop_with_overflow(bin_op
, left
, right
, dest
)?
;
186 UnaryOp(un_op
, ref operand
) => {
187 // The operand always has the same type as the result.
188 let val
= self.read_immediate(self.eval_operand(operand
, Some(dest
.layout
))?
)?
;
189 let val
= self.unary_op(un_op
, val
)?
;
190 assert_eq
!(val
.layout
, dest
.layout
, "layout mismatch for result of {:?}", un_op
);
191 self.write_immediate(*val
, dest
)?
;
194 Aggregate(ref kind
, ref operands
) => {
195 let (dest
, active_field_index
) = match **kind
{
196 mir
::AggregateKind
::Adt(adt_def
, variant_index
, _
, _
, active_field_index
) => {
197 self.write_discriminant(variant_index
, dest
)?
;
198 if adt_def
.is_enum() {
199 (self.place_downcast(dest
, variant_index
)?
, active_field_index
)
201 (dest
, active_field_index
)
207 for (i
, operand
) in operands
.iter().enumerate() {
208 let op
= self.eval_operand(operand
, None
)?
;
209 // Ignore zero-sized fields.
210 if !op
.layout
.is_zst() {
211 let field_index
= active_field_index
.unwrap_or(i
);
212 let field_dest
= self.place_field(dest
, field_index
)?
;
213 self.copy_op(op
, field_dest
)?
;
218 Repeat(ref operand
, _
) => {
219 let op
= self.eval_operand(operand
, None
)?
;
220 let dest
= self.force_allocation(dest
)?
;
221 let length
= dest
.len(self)?
;
223 if let Some(first_ptr
) = self.check_mplace_access(dest
, None
)?
{
225 let first
= self.mplace_field(dest
, 0)?
;
226 self.copy_op(op
, first
.into())?
;
229 let elem_size
= first
.layout
.size
;
230 // Copy the rest. This is performance-sensitive code
231 // for big static/const arrays!
232 let rest_ptr
= first_ptr
.offset(elem_size
, self)?
;
233 self.memory
.copy_repeatedly(
238 /*nonoverlapping:*/ true,
245 // FIXME(CTFE): don't allow computing the length of arrays in const eval
246 let src
= self.eval_place(place
)?
;
247 let mplace
= self.force_allocation(src
)?
;
248 let len
= mplace
.len(self)?
;
249 self.write_scalar(Scalar
::from_machine_usize(len
, self), dest
)?
;
252 AddressOf(_
, place
) | Ref(_
, _
, place
) => {
253 let src
= self.eval_place(place
)?
;
254 let place
= self.force_allocation(src
)?
;
255 if place
.layout
.size
.bytes() > 0 {
256 // definitely not a ZST
257 assert
!(place
.ptr
.is_ptr(), "non-ZST places should be normalized to `Pointer`");
259 self.write_immediate(place
.to_ref(), dest
)?
;
262 NullaryOp(mir
::NullOp
::Box
, _
) => {
263 M
::box_alloc(self, dest
)?
;
266 NullaryOp(mir
::NullOp
::SizeOf
, ty
) => {
267 let ty
= self.subst_from_current_frame_and_normalize_erasing_regions(ty
);
268 let layout
= self.layout_of(ty
)?
;
270 !layout
.is_unsized(),
271 "SizeOf nullary MIR operator called for unsized type"
273 self.write_scalar(Scalar
::from_machine_usize(layout
.size
.bytes(), self), dest
)?
;
276 Cast(cast_kind
, ref operand
, cast_ty
) => {
277 let src
= self.eval_operand(operand
, None
)?
;
278 let cast_ty
= self.subst_from_current_frame_and_normalize_erasing_regions(cast_ty
);
279 self.cast(src
, cast_kind
, cast_ty
, dest
)?
;
282 Discriminant(place
) => {
283 let op
= self.eval_place_to_op(place
, None
)?
;
284 let discr_val
= self.read_discriminant(op
)?
.0;
285 self.write_scalar(discr_val
, dest
)?
;
289 trace
!("{:?}", self.dump_place(*dest
));
294 fn terminator(&mut self, terminator
: &mir
::Terminator
<'tcx
>) -> InterpResult
<'tcx
> {
295 info
!("{:?}", terminator
.kind
);
297 self.eval_terminator(terminator
)?
;
298 if !self.stack().is_empty() {
299 if let Ok(loc
) = self.frame().loc
{
300 info
!("// executing {:?}", loc
.block
);