1 //! This module contains the `InterpCx` methods for executing a single step of the interpreter.
3 //! The main entry point is the `step` method.
6 use rustc_middle
::mir
::interpret
::{InterpResult, Scalar}
;
7 use rustc_target
::abi
::LayoutOf
;
9 use super::{InterpCx, Machine}
;
11 /// Classify whether an operator is "left-homogeneous", i.e., the LHS has the
12 /// same type as the result.
14 fn binop_left_homogeneous(op
: mir
::BinOp
) -> bool
{
15 use rustc_middle
::mir
::BinOp
::*;
17 Add
| Sub
| Mul
| Div
| Rem
| BitXor
| BitAnd
| BitOr
| Offset
| Shl
| Shr
=> true,
18 Eq
| Ne
| Lt
| Le
| Gt
| Ge
=> false,
21 /// Classify whether an operator is "right-homogeneous", i.e., the RHS has the
22 /// same type as the LHS.
24 fn binop_right_homogeneous(op
: mir
::BinOp
) -> bool
{
25 use rustc_middle
::mir
::BinOp
::*;
27 Add
| Sub
| Mul
| Div
| Rem
| BitXor
| BitAnd
| BitOr
| Eq
| Ne
| Lt
| Le
| Gt
| Ge
=> true,
28 Offset
| Shl
| Shr
=> false,
32 impl<'mir
, 'tcx
: 'mir
, M
: Machine
<'mir
, 'tcx
>> InterpCx
<'mir
, 'tcx
, M
> {
33 pub fn run(&mut self) -> InterpResult
<'tcx
> {
38 /// Returns `true` as long as there are more things to do.
40 /// This is used by [priroda](https://github.com/oli-obk/priroda)
42 /// This is marked `#inline(always)` to work around adverserial codegen when `opt-level = 3`
44 pub fn step(&mut self) -> InterpResult
<'tcx
, bool
> {
45 if self.stack().is_empty() {
49 let block
= match self.frame().block
{
52 // We are unwinding and this fn has no cleanup code.
53 // Just go on unwinding.
54 trace
!("unwinding: skipping frame");
55 self.pop_stack_frame(/* unwinding */ true)?
;
59 let stmt_id
= self.frame().stmt
;
60 let body
= self.body();
61 let basic_block
= &body
.basic_blocks()[block
];
63 let old_frames
= self.frame_idx();
65 if let Some(stmt
) = basic_block
.statements
.get(stmt_id
) {
66 assert_eq
!(old_frames
, self.frame_idx());
67 self.statement(stmt
)?
;
71 M
::before_terminator(self)?
;
73 let terminator
= basic_block
.terminator();
74 assert_eq
!(old_frames
, self.frame_idx());
75 self.terminator(terminator
)?
;
79 fn statement(&mut self, stmt
: &mir
::Statement
<'tcx
>) -> InterpResult
<'tcx
> {
81 self.set_span(stmt
.source_info
.span
);
83 use rustc_middle
::mir
::StatementKind
::*;
85 // Some statements (e.g., box) push new stack frames.
86 // We have to record the stack frame number *before* executing the statement.
87 let frame_idx
= self.frame_idx();
90 Assign(box (place
, rvalue
)) => self.eval_rvalue_into_place(rvalue
, *place
)?
,
92 SetDiscriminant { place, variant_index }
=> {
93 let dest
= self.eval_place(**place
)?
;
94 self.write_discriminant_index(*variant_index
, dest
)?
;
97 // Mark locals as alive
98 StorageLive(local
) => {
99 let old_val
= self.storage_live(*local
)?
;
100 self.deallocate_local(old_val
)?
;
103 // Mark locals as dead
104 StorageDead(local
) => {
105 let old_val
= self.storage_dead(*local
);
106 self.deallocate_local(old_val
)?
;
109 // No dynamic semantics attached to `FakeRead`; MIR
110 // interpreter is solely intended for borrowck'ed code.
114 Retag(kind
, place
) => {
115 let dest
= self.eval_place(**place
)?
;
116 M
::retag(self, *kind
, dest
)?
;
119 // Statements we do not track.
120 AscribeUserType(..) => {}
122 // Defined to do nothing. These are added by optimization passes, to avoid changing the
123 // size of MIR constantly.
126 LlvmInlineAsm { .. }
=> throw_unsup_format
!("inline assembly is not supported"),
129 self.stack_mut()[frame_idx
].stmt
+= 1;
133 /// Evaluate an assignment statement.
135 /// There is no separate `eval_rvalue` function. Instead, the code for handling each rvalue
136 /// type writes its results directly into the memory specified by the place.
137 pub fn eval_rvalue_into_place(
139 rvalue
: &mir
::Rvalue
<'tcx
>,
140 place
: mir
::Place
<'tcx
>,
141 ) -> InterpResult
<'tcx
> {
142 let dest
= self.eval_place(place
)?
;
144 use rustc_middle
::mir
::Rvalue
::*;
146 Use(ref operand
) => {
147 // Avoid recomputing the layout
148 let op
= self.eval_operand(operand
, Some(dest
.layout
))?
;
149 self.copy_op(op
, dest
)?
;
152 BinaryOp(bin_op
, ref left
, ref right
) => {
153 let layout
= binop_left_homogeneous(bin_op
).then_some(dest
.layout
);
154 let left
= self.read_immediate(self.eval_operand(left
, layout
)?
)?
;
155 let layout
= binop_right_homogeneous(bin_op
).then_some(left
.layout
);
156 let right
= self.read_immediate(self.eval_operand(right
, layout
)?
)?
;
157 self.binop_ignore_overflow(bin_op
, left
, right
, dest
)?
;
160 CheckedBinaryOp(bin_op
, ref left
, ref right
) => {
161 // Due to the extra boolean in the result, we can never reuse the `dest.layout`.
162 let left
= self.read_immediate(self.eval_operand(left
, None
)?
)?
;
163 let layout
= binop_right_homogeneous(bin_op
).then_some(left
.layout
);
164 let right
= self.read_immediate(self.eval_operand(right
, layout
)?
)?
;
165 self.binop_with_overflow(bin_op
, left
, right
, dest
)?
;
168 UnaryOp(un_op
, ref operand
) => {
169 // The operand always has the same type as the result.
170 let val
= self.read_immediate(self.eval_operand(operand
, Some(dest
.layout
))?
)?
;
171 let val
= self.unary_op(un_op
, val
)?
;
172 assert_eq
!(val
.layout
, dest
.layout
, "layout mismatch for result of {:?}", un_op
);
173 self.write_immediate(*val
, dest
)?
;
176 Aggregate(ref kind
, ref operands
) => {
177 let (dest
, active_field_index
) = match **kind
{
178 mir
::AggregateKind
::Adt(adt_def
, variant_index
, _
, _
, active_field_index
) => {
179 self.write_discriminant_index(variant_index
, dest
)?
;
180 if adt_def
.is_enum() {
181 (self.place_downcast(dest
, variant_index
)?
, active_field_index
)
183 (dest
, active_field_index
)
189 for (i
, operand
) in operands
.iter().enumerate() {
190 let op
= self.eval_operand(operand
, None
)?
;
191 // Ignore zero-sized fields.
192 if !op
.layout
.is_zst() {
193 let field_index
= active_field_index
.unwrap_or(i
);
194 let field_dest
= self.place_field(dest
, field_index
)?
;
195 self.copy_op(op
, field_dest
)?
;
200 Repeat(ref operand
, _
) => {
201 let op
= self.eval_operand(operand
, None
)?
;
202 let dest
= self.force_allocation(dest
)?
;
203 let length
= dest
.len(self)?
;
205 if let Some(first_ptr
) = self.check_mplace_access(dest
, None
)?
{
207 let first
= self.mplace_field(dest
, 0)?
;
208 self.copy_op(op
, first
.into())?
;
211 let elem_size
= first
.layout
.size
;
212 // Copy the rest. This is performance-sensitive code
213 // for big static/const arrays!
214 let rest_ptr
= first_ptr
.offset(elem_size
, self)?
;
215 self.memory
.copy_repeatedly(
220 /*nonoverlapping:*/ true,
227 // FIXME(CTFE): don't allow computing the length of arrays in const eval
228 let src
= self.eval_place(place
)?
;
229 let mplace
= self.force_allocation(src
)?
;
230 let len
= mplace
.len(self)?
;
231 self.write_scalar(Scalar
::from_machine_usize(len
, self), dest
)?
;
234 AddressOf(_
, place
) | Ref(_
, _
, place
) => {
235 let src
= self.eval_place(place
)?
;
236 let place
= self.force_allocation(src
)?
;
237 if place
.layout
.size
.bytes() > 0 {
238 // definitely not a ZST
239 assert
!(place
.ptr
.is_ptr(), "non-ZST places should be normalized to `Pointer`");
241 self.write_immediate(place
.to_ref(), dest
)?
;
244 NullaryOp(mir
::NullOp
::Box
, _
) => {
245 M
::box_alloc(self, dest
)?
;
248 NullaryOp(mir
::NullOp
::SizeOf
, ty
) => {
249 let ty
= self.subst_from_current_frame_and_normalize_erasing_regions(ty
);
250 let layout
= self.layout_of(ty
)?
;
252 !layout
.is_unsized(),
253 "SizeOf nullary MIR operator called for unsized type"
255 self.write_scalar(Scalar
::from_machine_usize(layout
.size
.bytes(), self), dest
)?
;
258 Cast(kind
, ref operand
, _
) => {
259 let src
= self.eval_operand(operand
, None
)?
;
260 self.cast(src
, kind
, dest
)?
;
263 Discriminant(place
) => {
264 let op
= self.eval_place_to_op(place
, None
)?
;
265 let discr_val
= self.read_discriminant(op
)?
.0;
266 let size
= dest
.layout
.size
;
267 self.write_scalar(Scalar
::from_uint(discr_val
, size
), dest
)?
;
271 self.dump_place(*dest
);
276 fn terminator(&mut self, terminator
: &mir
::Terminator
<'tcx
>) -> InterpResult
<'tcx
> {
277 info
!("{:?}", terminator
.kind
);
278 self.set_span(terminator
.source_info
.span
);
280 self.eval_terminator(terminator
)?
;
281 if !self.stack().is_empty() {
282 if let Some(block
) = self.frame().block
{
283 info
!("// executing {:?}", block
);