]>
Commit | Line | Data |
---|---|---|
416331ca | 1 | //! This module contains the `InterpCx` methods for executing a single step of the interpreter. |
ff7c6d11 XL |
2 | //! |
3 | //! The main entry point is the `step` method. | |
4 | ||
ff7c6d11 | 5 | use rustc::mir; |
b7449926 | 6 | use rustc::ty::layout::LayoutOf; |
dc9dc135 | 7 | use rustc::mir::interpret::{InterpResult, Scalar, PointerArithmetic}; |
ff7c6d11 | 8 | |
416331ca | 9 | use super::{InterpCx, Machine}; |
ff7c6d11 | 10 | |
0731742a | 11 | /// Classify whether an operator is "left-homogeneous", i.e., the LHS has the |
b7449926 XL |
12 | /// same type as the result. |
13 | #[inline] | |
14 | fn binop_left_homogeneous(op: mir::BinOp) -> bool { | |
15 | use rustc::mir::BinOp::*; | |
16 | match op { | |
17 | Add | Sub | Mul | Div | Rem | BitXor | BitAnd | BitOr | | |
18 | Offset | Shl | Shr => | |
19 | true, | |
20 | Eq | Ne | Lt | Le | Gt | Ge => | |
21 | false, | |
22 | } | |
23 | } | |
0731742a | 24 | /// Classify whether an operator is "right-homogeneous", i.e., the RHS has the |
b7449926 XL |
25 | /// same type as the LHS. |
26 | #[inline] | |
27 | fn binop_right_homogeneous(op: mir::BinOp) -> bool { | |
28 | use rustc::mir::BinOp::*; | |
29 | match op { | |
30 | Add | Sub | Mul | Div | Rem | BitXor | BitAnd | BitOr | | |
31 | Eq | Ne | Lt | Le | Gt | Ge => | |
32 | true, | |
33 | Offset | Shl | Shr => | |
34 | false, | |
35 | } | |
36 | } | |
37 | ||
416331ca | 38 | impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { |
dc9dc135 | 39 | pub fn run(&mut self) -> InterpResult<'tcx> { |
b7449926 XL |
40 | while self.step()? {} |
41 | Ok(()) | |
ff7c6d11 XL |
42 | } |
43 | ||
9fa01778 | 44 | /// Returns `true` as long as there are more things to do. |
0bf4aa26 XL |
45 | /// |
46 | /// This is used by [priroda](https://github.com/oli-obk/priroda) | |
dc9dc135 | 47 | pub fn step(&mut self) -> InterpResult<'tcx, bool> { |
ff7c6d11 XL |
48 | if self.stack.is_empty() { |
49 | return Ok(false); | |
50 | } | |
51 | ||
52 | let block = self.frame().block; | |
53 | let stmt_id = self.frame().stmt; | |
dc9dc135 XL |
54 | let body = self.body(); |
55 | let basic_block = &body.basic_blocks()[block]; | |
ff7c6d11 XL |
56 | |
57 | let old_frames = self.cur_frame(); | |
58 | ||
59 | if let Some(stmt) = basic_block.statements.get(stmt_id) { | |
0531ce1d XL |
60 | assert_eq!(old_frames, self.cur_frame()); |
61 | self.statement(stmt)?; | |
ff7c6d11 XL |
62 | return Ok(true); |
63 | } | |
64 | ||
0bf4aa26 | 65 | M::before_terminator(self)?; |
0531ce1d | 66 | |
ff7c6d11 | 67 | let terminator = basic_block.terminator(); |
0531ce1d XL |
68 | assert_eq!(old_frames, self.cur_frame()); |
69 | self.terminator(terminator)?; | |
ff7c6d11 XL |
70 | Ok(true) |
71 | } | |
72 | ||
dc9dc135 | 73 | fn statement(&mut self, stmt: &mir::Statement<'tcx>) -> InterpResult<'tcx> { |
0731742a | 74 | info!("{:?}", stmt); |
ff7c6d11 XL |
75 | |
76 | use rustc::mir::StatementKind::*; | |
77 | ||
0731742a | 78 | // Some statements (e.g., box) push new stack frames. |
b7449926 | 79 | // We have to record the stack frame number *before* executing the statement. |
ff7c6d11 | 80 | let frame_idx = self.cur_frame(); |
0531ce1d XL |
81 | self.tcx.span = stmt.source_info.span; |
82 | self.memory.tcx.span = stmt.source_info.span; | |
ff7c6d11 XL |
83 | |
84 | match stmt.kind { | |
e1599b0c | 85 | Assign(box(ref place, ref rvalue)) => self.eval_rvalue_into_place(rvalue, place)?, |
ff7c6d11 XL |
86 | |
87 | SetDiscriminant { | |
88 | ref place, | |
89 | variant_index, | |
90 | } => { | |
91 | let dest = self.eval_place(place)?; | |
b7449926 | 92 | self.write_discriminant_index(variant_index, dest)?; |
ff7c6d11 XL |
93 | } |
94 | ||
95 | // Mark locals as alive | |
96 | StorageLive(local) => { | |
b7449926 | 97 | let old_val = self.storage_live(local)?; |
ff7c6d11 XL |
98 | self.deallocate_local(old_val)?; |
99 | } | |
100 | ||
101 | // Mark locals as dead | |
102 | StorageDead(local) => { | |
b7449926 | 103 | let old_val = self.storage_dead(local); |
ff7c6d11 XL |
104 | self.deallocate_local(old_val)?; |
105 | } | |
106 | ||
0bf4aa26 | 107 | // No dynamic semantics attached to `FakeRead`; MIR |
94b46f34 | 108 | // interpreter is solely intended for borrowck'ed code. |
0bf4aa26 | 109 | FakeRead(..) => {} |
94b46f34 | 110 | |
a1dfa0c6 | 111 | // Stacked Borrows. |
0731742a | 112 | Retag(kind, ref place) => { |
a1dfa0c6 | 113 | let dest = self.eval_place(place)?; |
0731742a | 114 | M::retag(self, kind, dest)?; |
ff7c6d11 | 115 | } |
ff7c6d11 | 116 | |
a1dfa0c6 | 117 | // Statements we do not track. |
b7449926 | 118 | AscribeUserType(..) => {} |
0531ce1d | 119 | |
ff7c6d11 XL |
120 | // Defined to do nothing. These are added by optimization passes, to avoid changing the |
121 | // size of MIR constantly. | |
122 | Nop => {} | |
123 | ||
416331ca | 124 | InlineAsm { .. } => throw_unsup_format!("inline assembly is not supported"), |
ff7c6d11 XL |
125 | } |
126 | ||
127 | self.stack[frame_idx].stmt += 1; | |
128 | Ok(()) | |
129 | } | |
130 | ||
b7449926 XL |
131 | /// Evaluate an assignment statement. |
132 | /// | |
133 | /// There is no separate `eval_rvalue` function. Instead, the code for handling each rvalue | |
134 | /// type writes its results directly into the memory specified by the place. | |
e74abb32 | 135 | pub fn eval_rvalue_into_place( |
b7449926 XL |
136 | &mut self, |
137 | rvalue: &mir::Rvalue<'tcx>, | |
138 | place: &mir::Place<'tcx>, | |
dc9dc135 | 139 | ) -> InterpResult<'tcx> { |
b7449926 XL |
140 | let dest = self.eval_place(place)?; |
141 | ||
142 | use rustc::mir::Rvalue::*; | |
143 | match *rvalue { | |
144 | Use(ref operand) => { | |
145 | // Avoid recomputing the layout | |
146 | let op = self.eval_operand(operand, Some(dest.layout))?; | |
147 | self.copy_op(op, dest)?; | |
148 | } | |
149 | ||
150 | BinaryOp(bin_op, ref left, ref right) => { | |
151 | let layout = if binop_left_homogeneous(bin_op) { Some(dest.layout) } else { None }; | |
a1dfa0c6 | 152 | let left = self.read_immediate(self.eval_operand(left, layout)?)?; |
b7449926 | 153 | let layout = if binop_right_homogeneous(bin_op) { Some(left.layout) } else { None }; |
a1dfa0c6 | 154 | let right = self.read_immediate(self.eval_operand(right, layout)?)?; |
b7449926 XL |
155 | self.binop_ignore_overflow( |
156 | bin_op, | |
157 | left, | |
158 | right, | |
159 | dest, | |
160 | )?; | |
161 | } | |
162 | ||
163 | CheckedBinaryOp(bin_op, ref left, ref right) => { | |
164 | // Due to the extra boolean in the result, we can never reuse the `dest.layout`. | |
a1dfa0c6 | 165 | let left = self.read_immediate(self.eval_operand(left, None)?)?; |
b7449926 | 166 | let layout = if binop_right_homogeneous(bin_op) { Some(left.layout) } else { None }; |
a1dfa0c6 | 167 | let right = self.read_immediate(self.eval_operand(right, layout)?)?; |
b7449926 XL |
168 | self.binop_with_overflow( |
169 | bin_op, | |
170 | left, | |
171 | right, | |
172 | dest, | |
173 | )?; | |
174 | } | |
175 | ||
176 | UnaryOp(un_op, ref operand) => { | |
177 | // The operand always has the same type as the result. | |
a1dfa0c6 | 178 | let val = self.read_immediate(self.eval_operand(operand, Some(dest.layout))?)?; |
9fa01778 | 179 | let val = self.unary_op(un_op, val)?; |
e1599b0c XL |
180 | assert_eq!(val.layout, dest.layout, "layout mismatch for result of {:?}", un_op); |
181 | self.write_immediate(*val, dest)?; | |
b7449926 XL |
182 | } |
183 | ||
184 | Aggregate(ref kind, ref operands) => { | |
185 | let (dest, active_field_index) = match **kind { | |
186 | mir::AggregateKind::Adt(adt_def, variant_index, _, _, active_field_index) => { | |
187 | self.write_discriminant_index(variant_index, dest)?; | |
188 | if adt_def.is_enum() { | |
189 | (self.place_downcast(dest, variant_index)?, active_field_index) | |
190 | } else { | |
191 | (dest, active_field_index) | |
192 | } | |
193 | } | |
194 | _ => (dest, None) | |
195 | }; | |
196 | ||
197 | for (i, operand) in operands.iter().enumerate() { | |
198 | let op = self.eval_operand(operand, None)?; | |
199 | // Ignore zero-sized fields. | |
200 | if !op.layout.is_zst() { | |
201 | let field_index = active_field_index.unwrap_or(i); | |
202 | let field_dest = self.place_field(dest, field_index as u64)?; | |
203 | self.copy_op(op, field_dest)?; | |
204 | } | |
205 | } | |
206 | } | |
207 | ||
208 | Repeat(ref operand, _) => { | |
209 | let op = self.eval_operand(operand, None)?; | |
210 | let dest = self.force_allocation(dest)?; | |
a1dfa0c6 | 211 | let length = dest.len(self)?; |
b7449926 | 212 | |
416331ca XL |
213 | if let Some(first_ptr) = self.check_mplace_access(dest, None)? { |
214 | // Write the first. | |
b7449926 XL |
215 | let first = self.mplace_field(dest, 0)?; |
216 | self.copy_op(op, first.into())?; | |
217 | ||
218 | if length > 1 { | |
416331ca XL |
219 | let elem_size = first.layout.size; |
220 | // Copy the rest. This is performance-sensitive code | |
221 | // for big static/const arrays! | |
222 | let rest_ptr = first_ptr.offset(elem_size, self)?; | |
b7449926 | 223 | self.memory.copy_repeatedly( |
416331ca | 224 | first_ptr, rest_ptr, elem_size, length - 1, /*nonoverlapping:*/true |
b7449926 XL |
225 | )?; |
226 | } | |
227 | } | |
228 | } | |
229 | ||
230 | Len(ref place) => { | |
231 | // FIXME(CTFE): don't allow computing the length of arrays in const eval | |
232 | let src = self.eval_place(place)?; | |
233 | let mplace = self.force_allocation(src)?; | |
a1dfa0c6 | 234 | let len = mplace.len(self)?; |
b7449926 XL |
235 | let size = self.pointer_size(); |
236 | self.write_scalar( | |
237 | Scalar::from_uint(len, size), | |
238 | dest, | |
239 | )?; | |
240 | } | |
241 | ||
a1dfa0c6 | 242 | Ref(_, _, ref place) => { |
b7449926 | 243 | let src = self.eval_place(place)?; |
e1599b0c XL |
244 | let place = self.force_allocation(src)?; |
245 | if place.layout.size.bytes() > 0 { | |
246 | // definitely not a ZST | |
247 | assert!(place.ptr.is_ptr(), "non-ZST places should be normalized to `Pointer`"); | |
248 | } | |
249 | self.write_immediate(place.to_ref(), dest)?; | |
b7449926 XL |
250 | } |
251 | ||
252 | NullaryOp(mir::NullOp::Box, _) => { | |
253 | M::box_alloc(self, dest)?; | |
254 | } | |
255 | ||
256 | NullaryOp(mir::NullOp::SizeOf, ty) => { | |
e1599b0c | 257 | let ty = self.subst_from_frame_and_normalize_erasing_regions(ty); |
b7449926 XL |
258 | let layout = self.layout_of(ty)?; |
259 | assert!(!layout.is_unsized(), | |
260 | "SizeOf nullary MIR operator called for unsized type"); | |
261 | let size = self.pointer_size(); | |
262 | self.write_scalar( | |
263 | Scalar::from_uint(layout.size.bytes(), size), | |
264 | dest, | |
265 | )?; | |
266 | } | |
267 | ||
48663c56 | 268 | Cast(kind, ref operand, _) => { |
b7449926 XL |
269 | let src = self.eval_operand(operand, None)?; |
270 | self.cast(src, kind, dest)?; | |
271 | } | |
272 | ||
273 | Discriminant(ref place) => { | |
9fa01778 XL |
274 | let op = self.eval_place_to_op(place, None)?; |
275 | let discr_val = self.read_discriminant(op)?.0; | |
b7449926 XL |
276 | let size = dest.layout.size; |
277 | self.write_scalar(Scalar::from_uint(discr_val, size), dest)?; | |
278 | } | |
279 | } | |
280 | ||
281 | self.dump_place(*dest); | |
282 | ||
283 | Ok(()) | |
284 | } | |
285 | ||
dc9dc135 | 286 | fn terminator(&mut self, terminator: &mir::Terminator<'tcx>) -> InterpResult<'tcx> { |
0731742a | 287 | info!("{:?}", terminator.kind); |
0531ce1d XL |
288 | self.tcx.span = terminator.source_info.span; |
289 | self.memory.tcx.span = terminator.source_info.span; | |
b7449926 XL |
290 | |
291 | let old_stack = self.cur_frame(); | |
292 | let old_bb = self.frame().block; | |
ff7c6d11 XL |
293 | self.eval_terminator(terminator)?; |
294 | if !self.stack.is_empty() { | |
b7449926 XL |
295 | // This should change *something* |
296 | debug_assert!(self.cur_frame() != old_stack || self.frame().block != old_bb); | |
0731742a | 297 | info!("// {:?}", self.frame().block); |
ff7c6d11 XL |
298 | } |
299 | Ok(()) | |
300 | } | |
ff7c6d11 | 301 | } |