1 //! This module contains the `InterpCx` methods for executing a single step of the interpreter.
3 //! The main entry point is the `step` method.
6 use rustc
::mir
::interpret
::{InterpResult, PointerArithmetic, Scalar}
;
7 use rustc
::ty
::layout
::LayoutOf
;
9 use super::{InterpCx, Machine}
;
11 /// Classify whether an operator is "left-homogeneous", i.e., the LHS has the
12 /// same type as the result.
14 fn binop_left_homogeneous(op
: mir
::BinOp
) -> bool
{
15 use rustc
::mir
::BinOp
::*;
17 Add
| Sub
| Mul
| Div
| Rem
| BitXor
| BitAnd
| BitOr
| Offset
| Shl
| Shr
=> true,
18 Eq
| Ne
| Lt
| Le
| Gt
| Ge
=> false,
21 /// Classify whether an operator is "right-homogeneous", i.e., the RHS has the
22 /// same type as the LHS.
24 fn binop_right_homogeneous(op
: mir
::BinOp
) -> bool
{
25 use rustc
::mir
::BinOp
::*;
27 Add
| Sub
| Mul
| Div
| Rem
| BitXor
| BitAnd
| BitOr
| Eq
| Ne
| Lt
| Le
| Gt
| Ge
=> true,
28 Offset
| Shl
| Shr
=> false,
32 impl<'mir
, 'tcx
, M
: Machine
<'mir
, 'tcx
>> InterpCx
<'mir
, 'tcx
, M
> {
33 pub fn run(&mut self) -> InterpResult
<'tcx
> {
38 /// Returns `true` as long as there are more things to do.
40 /// This is used by [priroda](https://github.com/oli-obk/priroda)
42 /// This is marked `#inline(always)` to work around adverserial codegen when `opt-level = 3`
44 pub fn step(&mut self) -> InterpResult
<'tcx
, bool
> {
45 if self.stack
.is_empty() {
49 let block
= match self.frame().block
{
52 // We are unwinding and this fn has no cleanup code.
53 // Just go on unwinding.
54 trace
!("unwinding: skipping frame");
55 self.pop_stack_frame(/* unwinding */ true)?
;
59 let stmt_id
= self.frame().stmt
;
60 let body
= self.body();
61 let basic_block
= &body
.basic_blocks()[block
];
63 let old_frames
= self.cur_frame();
65 if let Some(stmt
) = basic_block
.statements
.get(stmt_id
) {
66 assert_eq
!(old_frames
, self.cur_frame());
67 self.statement(stmt
)?
;
71 M
::before_terminator(self)?
;
73 let terminator
= basic_block
.terminator();
74 assert_eq
!(old_frames
, self.cur_frame());
75 self.terminator(terminator
)?
;
79 fn statement(&mut self, stmt
: &mir
::Statement
<'tcx
>) -> InterpResult
<'tcx
> {
82 use rustc
::mir
::StatementKind
::*;
84 // Some statements (e.g., box) push new stack frames.
85 // We have to record the stack frame number *before* executing the statement.
86 let frame_idx
= self.cur_frame();
87 self.tcx
.span
= stmt
.source_info
.span
;
88 self.memory
.tcx
.span
= stmt
.source_info
.span
;
91 Assign(box (ref place
, ref rvalue
)) => self.eval_rvalue_into_place(rvalue
, place
)?
,
93 SetDiscriminant { ref place, variant_index }
=> {
94 let dest
= self.eval_place(place
)?
;
95 self.write_discriminant_index(variant_index
, dest
)?
;
98 // Mark locals as alive
99 StorageLive(local
) => {
100 let old_val
= self.storage_live(local
)?
;
101 self.deallocate_local(old_val
)?
;
104 // Mark locals as dead
105 StorageDead(local
) => {
106 let old_val
= self.storage_dead(local
);
107 self.deallocate_local(old_val
)?
;
110 // No dynamic semantics attached to `FakeRead`; MIR
111 // interpreter is solely intended for borrowck'ed code.
115 Retag(kind
, ref place
) => {
116 let dest
= self.eval_place(place
)?
;
117 M
::retag(self, kind
, dest
)?
;
120 // Statements we do not track.
121 AscribeUserType(..) => {}
123 // Defined to do nothing. These are added by optimization passes, to avoid changing the
124 // size of MIR constantly.
127 InlineAsm { .. }
=> throw_unsup_format
!("inline assembly is not supported"),
130 self.stack
[frame_idx
].stmt
+= 1;
134 /// Evaluate an assignment statement.
136 /// There is no separate `eval_rvalue` function. Instead, the code for handling each rvalue
137 /// type writes its results directly into the memory specified by the place.
138 pub fn eval_rvalue_into_place(
140 rvalue
: &mir
::Rvalue
<'tcx
>,
141 place
: &mir
::Place
<'tcx
>,
142 ) -> InterpResult
<'tcx
> {
143 let dest
= self.eval_place(place
)?
;
145 use rustc
::mir
::Rvalue
::*;
147 Use(ref operand
) => {
148 // Avoid recomputing the layout
149 let op
= self.eval_operand(operand
, Some(dest
.layout
))?
;
150 self.copy_op(op
, dest
)?
;
153 BinaryOp(bin_op
, ref left
, ref right
) => {
154 let layout
= binop_left_homogeneous(bin_op
).then_some(dest
.layout
);
155 let left
= self.read_immediate(self.eval_operand(left
, layout
)?
)?
;
156 let layout
= binop_right_homogeneous(bin_op
).then_some(left
.layout
);
157 let right
= self.read_immediate(self.eval_operand(right
, layout
)?
)?
;
158 self.binop_ignore_overflow(bin_op
, left
, right
, dest
)?
;
161 CheckedBinaryOp(bin_op
, ref left
, ref right
) => {
162 // Due to the extra boolean in the result, we can never reuse the `dest.layout`.
163 let left
= self.read_immediate(self.eval_operand(left
, None
)?
)?
;
164 let layout
= binop_right_homogeneous(bin_op
).then_some(left
.layout
);
165 let right
= self.read_immediate(self.eval_operand(right
, layout
)?
)?
;
166 self.binop_with_overflow(bin_op
, left
, right
, dest
)?
;
169 UnaryOp(un_op
, ref operand
) => {
170 // The operand always has the same type as the result.
171 let val
= self.read_immediate(self.eval_operand(operand
, Some(dest
.layout
))?
)?
;
172 let val
= self.unary_op(un_op
, val
)?
;
173 assert_eq
!(val
.layout
, dest
.layout
, "layout mismatch for result of {:?}", un_op
);
174 self.write_immediate(*val
, dest
)?
;
177 Aggregate(ref kind
, ref operands
) => {
178 let (dest
, active_field_index
) = match **kind
{
179 mir
::AggregateKind
::Adt(adt_def
, variant_index
, _
, _
, active_field_index
) => {
180 self.write_discriminant_index(variant_index
, dest
)?
;
181 if adt_def
.is_enum() {
182 (self.place_downcast(dest
, variant_index
)?
, active_field_index
)
184 (dest
, active_field_index
)
190 for (i
, operand
) in operands
.iter().enumerate() {
191 let op
= self.eval_operand(operand
, None
)?
;
192 // Ignore zero-sized fields.
193 if !op
.layout
.is_zst() {
194 let field_index
= active_field_index
.unwrap_or(i
);
195 let field_dest
= self.place_field(dest
, field_index
as u64)?
;
196 self.copy_op(op
, field_dest
)?
;
201 Repeat(ref operand
, _
) => {
202 let op
= self.eval_operand(operand
, None
)?
;
203 let dest
= self.force_allocation(dest
)?
;
204 let length
= dest
.len(self)?
;
206 if let Some(first_ptr
) = self.check_mplace_access(dest
, None
)?
{
208 let first
= self.mplace_field(dest
, 0)?
;
209 self.copy_op(op
, first
.into())?
;
212 let elem_size
= first
.layout
.size
;
213 // Copy the rest. This is performance-sensitive code
214 // for big static/const arrays!
215 let rest_ptr
= first_ptr
.offset(elem_size
, self)?
;
216 self.memory
.copy_repeatedly(
221 /*nonoverlapping:*/ true,
228 // FIXME(CTFE): don't allow computing the length of arrays in const eval
229 let src
= self.eval_place(place
)?
;
230 let mplace
= self.force_allocation(src
)?
;
231 let len
= mplace
.len(self)?
;
232 let size
= self.pointer_size();
233 self.write_scalar(Scalar
::from_uint(len
, size
), dest
)?
;
236 AddressOf(_
, ref place
) | Ref(_
, _
, ref place
) => {
237 let src
= self.eval_place(place
)?
;
238 let place
= self.force_allocation(src
)?
;
239 if place
.layout
.size
.bytes() > 0 {
240 // definitely not a ZST
241 assert
!(place
.ptr
.is_ptr(), "non-ZST places should be normalized to `Pointer`");
243 self.write_immediate(place
.to_ref(), dest
)?
;
246 NullaryOp(mir
::NullOp
::Box
, _
) => {
247 M
::box_alloc(self, dest
)?
;
250 NullaryOp(mir
::NullOp
::SizeOf
, ty
) => {
251 let ty
= self.subst_from_frame_and_normalize_erasing_regions(ty
);
252 let layout
= self.layout_of(ty
)?
;
254 !layout
.is_unsized(),
255 "SizeOf nullary MIR operator called for unsized type"
257 let size
= self.pointer_size();
258 self.write_scalar(Scalar
::from_uint(layout
.size
.bytes(), size
), dest
)?
;
261 Cast(kind
, ref operand
, _
) => {
262 let src
= self.eval_operand(operand
, None
)?
;
263 self.cast(src
, kind
, dest
)?
;
266 Discriminant(ref place
) => {
267 let op
= self.eval_place_to_op(place
, None
)?
;
268 let discr_val
= self.read_discriminant(op
)?
.0;
269 let size
= dest
.layout
.size
;
270 self.write_scalar(Scalar
::from_uint(discr_val
, size
), dest
)?
;
274 self.dump_place(*dest
);
279 fn terminator(&mut self, terminator
: &mir
::Terminator
<'tcx
>) -> InterpResult
<'tcx
> {
280 info
!("{:?}", terminator
.kind
);
281 self.tcx
.span
= terminator
.source_info
.span
;
282 self.memory
.tcx
.span
= terminator
.source_info
.span
;
284 let old_stack
= self.cur_frame();
285 let old_bb
= self.frame().block
;
287 self.eval_terminator(terminator
)?
;
288 if !self.stack
.is_empty() {
289 // This should change *something*
290 assert
!(self.cur_frame() != old_stack
|| self.frame().block
!= old_bb
);
291 if let Some(block
) = self.frame().block
{
292 info
!("// executing {:?}", block
);