]>
Commit | Line | Data |
---|---|---|
b7449926 XL |
1 | // Copyright 2018 The Rust Project Developers. See the COPYRIGHT |
2 | // file at the top-level directory of this distribution and at | |
3 | // http://rust-lang.org/COPYRIGHT. | |
4 | // | |
5 | // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or | |
6 | // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license | |
7 | // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your | |
8 | // option. This file may not be copied, modified, or distributed | |
9 | // except according to those terms. | |
10 | ||
ff7c6d11 XL |
11 | //! This module contains the `EvalContext` methods for executing a single step of the interpreter. |
12 | //! | |
13 | //! The main entry point is the `step` method. | |
14 | ||
ff7c6d11 | 15 | use rustc::mir; |
b7449926 XL |
16 | use rustc::ty::layout::LayoutOf; |
17 | use rustc::mir::interpret::{EvalResult, Scalar, PointerArithmetic}; | |
ff7c6d11 | 18 | |
0531ce1d | 19 | use super::{EvalContext, Machine}; |
ff7c6d11 | 20 | |
b7449926 XL |
21 | /// Classify whether an operator is "left-homogeneous", i.e. the LHS has the |
22 | /// same type as the result. | |
23 | #[inline] | |
24 | fn binop_left_homogeneous(op: mir::BinOp) -> bool { | |
25 | use rustc::mir::BinOp::*; | |
26 | match op { | |
27 | Add | Sub | Mul | Div | Rem | BitXor | BitAnd | BitOr | | |
28 | Offset | Shl | Shr => | |
29 | true, | |
30 | Eq | Ne | Lt | Le | Gt | Ge => | |
31 | false, | |
32 | } | |
33 | } | |
34 | /// Classify whether an operator is "right-homogeneous", i.e. the RHS has the | |
35 | /// same type as the LHS. | |
36 | #[inline] | |
37 | fn binop_right_homogeneous(op: mir::BinOp) -> bool { | |
38 | use rustc::mir::BinOp::*; | |
39 | match op { | |
40 | Add | Sub | Mul | Div | Rem | BitXor | BitAnd | BitOr | | |
41 | Eq | Ne | Lt | Le | Gt | Ge => | |
42 | true, | |
43 | Offset | Shl | Shr => | |
44 | false, | |
45 | } | |
46 | } | |
47 | ||
0bf4aa26 | 48 | impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { |
b7449926 XL |
49 | pub fn run(&mut self) -> EvalResult<'tcx> { |
50 | while self.step()? {} | |
51 | Ok(()) | |
ff7c6d11 XL |
52 | } |
53 | ||
54 | /// Returns true as long as there are more things to do. | |
0bf4aa26 XL |
55 | /// |
56 | /// This is used by [priroda](https://github.com/oli-obk/priroda) | |
57 | pub fn step(&mut self) -> EvalResult<'tcx, bool> { | |
ff7c6d11 XL |
58 | if self.stack.is_empty() { |
59 | return Ok(false); | |
60 | } | |
61 | ||
62 | let block = self.frame().block; | |
63 | let stmt_id = self.frame().stmt; | |
64 | let mir = self.mir(); | |
65 | let basic_block = &mir.basic_blocks()[block]; | |
66 | ||
67 | let old_frames = self.cur_frame(); | |
68 | ||
69 | if let Some(stmt) = basic_block.statements.get(stmt_id) { | |
0531ce1d XL |
70 | assert_eq!(old_frames, self.cur_frame()); |
71 | self.statement(stmt)?; | |
ff7c6d11 XL |
72 | return Ok(true); |
73 | } | |
74 | ||
0bf4aa26 | 75 | M::before_terminator(self)?; |
0531ce1d | 76 | |
ff7c6d11 | 77 | let terminator = basic_block.terminator(); |
0531ce1d XL |
78 | assert_eq!(old_frames, self.cur_frame()); |
79 | self.terminator(terminator)?; | |
ff7c6d11 XL |
80 | Ok(true) |
81 | } | |
82 | ||
83 | fn statement(&mut self, stmt: &mir::Statement<'tcx>) -> EvalResult<'tcx> { | |
b7449926 | 84 | debug!("{:?}", stmt); |
ff7c6d11 XL |
85 | |
86 | use rustc::mir::StatementKind::*; | |
87 | ||
b7449926 XL |
88 | // Some statements (e.g. box) push new stack frames. |
89 | // We have to record the stack frame number *before* executing the statement. | |
ff7c6d11 | 90 | let frame_idx = self.cur_frame(); |
0531ce1d XL |
91 | self.tcx.span = stmt.source_info.span; |
92 | self.memory.tcx.span = stmt.source_info.span; | |
ff7c6d11 XL |
93 | |
94 | match stmt.kind { | |
95 | Assign(ref place, ref rvalue) => self.eval_rvalue_into_place(rvalue, place)?, | |
96 | ||
97 | SetDiscriminant { | |
98 | ref place, | |
99 | variant_index, | |
100 | } => { | |
101 | let dest = self.eval_place(place)?; | |
b7449926 | 102 | self.write_discriminant_index(variant_index, dest)?; |
ff7c6d11 XL |
103 | } |
104 | ||
105 | // Mark locals as alive | |
106 | StorageLive(local) => { | |
b7449926 | 107 | let old_val = self.storage_live(local)?; |
ff7c6d11 XL |
108 | self.deallocate_local(old_val)?; |
109 | } | |
110 | ||
111 | // Mark locals as dead | |
112 | StorageDead(local) => { | |
b7449926 | 113 | let old_val = self.storage_dead(local); |
ff7c6d11 XL |
114 | self.deallocate_local(old_val)?; |
115 | } | |
116 | ||
0bf4aa26 | 117 | // No dynamic semantics attached to `FakeRead`; MIR |
94b46f34 | 118 | // interpreter is solely intended for borrowck'ed code. |
0bf4aa26 | 119 | FakeRead(..) => {} |
94b46f34 | 120 | |
a1dfa0c6 XL |
121 | // Stacked Borrows. |
122 | Retag { fn_entry, ref place } => { | |
123 | let dest = self.eval_place(place)?; | |
124 | M::retag(self, fn_entry, dest)?; | |
125 | } | |
126 | EscapeToRaw(ref op) => { | |
127 | let op = self.eval_operand(op, None)?; | |
128 | M::escape_to_raw(self, op)?; | |
ff7c6d11 | 129 | } |
ff7c6d11 | 130 | |
a1dfa0c6 | 131 | // Statements we do not track. |
b7449926 | 132 | AscribeUserType(..) => {} |
0531ce1d | 133 | |
ff7c6d11 XL |
134 | // Defined to do nothing. These are added by optimization passes, to avoid changing the |
135 | // size of MIR constantly. | |
136 | Nop => {} | |
137 | ||
138 | InlineAsm { .. } => return err!(InlineAsm), | |
139 | } | |
140 | ||
141 | self.stack[frame_idx].stmt += 1; | |
142 | Ok(()) | |
143 | } | |
144 | ||
b7449926 XL |
145 | /// Evaluate an assignment statement. |
146 | /// | |
147 | /// There is no separate `eval_rvalue` function. Instead, the code for handling each rvalue | |
148 | /// type writes its results directly into the memory specified by the place. | |
149 | fn eval_rvalue_into_place( | |
150 | &mut self, | |
151 | rvalue: &mir::Rvalue<'tcx>, | |
152 | place: &mir::Place<'tcx>, | |
153 | ) -> EvalResult<'tcx> { | |
154 | let dest = self.eval_place(place)?; | |
155 | ||
156 | use rustc::mir::Rvalue::*; | |
157 | match *rvalue { | |
158 | Use(ref operand) => { | |
159 | // Avoid recomputing the layout | |
160 | let op = self.eval_operand(operand, Some(dest.layout))?; | |
161 | self.copy_op(op, dest)?; | |
162 | } | |
163 | ||
164 | BinaryOp(bin_op, ref left, ref right) => { | |
165 | let layout = if binop_left_homogeneous(bin_op) { Some(dest.layout) } else { None }; | |
a1dfa0c6 | 166 | let left = self.read_immediate(self.eval_operand(left, layout)?)?; |
b7449926 | 167 | let layout = if binop_right_homogeneous(bin_op) { Some(left.layout) } else { None }; |
a1dfa0c6 | 168 | let right = self.read_immediate(self.eval_operand(right, layout)?)?; |
b7449926 XL |
169 | self.binop_ignore_overflow( |
170 | bin_op, | |
171 | left, | |
172 | right, | |
173 | dest, | |
174 | )?; | |
175 | } | |
176 | ||
177 | CheckedBinaryOp(bin_op, ref left, ref right) => { | |
178 | // Due to the extra boolean in the result, we can never reuse the `dest.layout`. | |
a1dfa0c6 | 179 | let left = self.read_immediate(self.eval_operand(left, None)?)?; |
b7449926 | 180 | let layout = if binop_right_homogeneous(bin_op) { Some(left.layout) } else { None }; |
a1dfa0c6 | 181 | let right = self.read_immediate(self.eval_operand(right, layout)?)?; |
b7449926 XL |
182 | self.binop_with_overflow( |
183 | bin_op, | |
184 | left, | |
185 | right, | |
186 | dest, | |
187 | )?; | |
188 | } | |
189 | ||
190 | UnaryOp(un_op, ref operand) => { | |
191 | // The operand always has the same type as the result. | |
a1dfa0c6 | 192 | let val = self.read_immediate(self.eval_operand(operand, Some(dest.layout))?)?; |
b7449926 XL |
193 | let val = self.unary_op(un_op, val.to_scalar()?, dest.layout)?; |
194 | self.write_scalar(val, dest)?; | |
195 | } | |
196 | ||
197 | Aggregate(ref kind, ref operands) => { | |
198 | let (dest, active_field_index) = match **kind { | |
199 | mir::AggregateKind::Adt(adt_def, variant_index, _, _, active_field_index) => { | |
200 | self.write_discriminant_index(variant_index, dest)?; | |
201 | if adt_def.is_enum() { | |
202 | (self.place_downcast(dest, variant_index)?, active_field_index) | |
203 | } else { | |
204 | (dest, active_field_index) | |
205 | } | |
206 | } | |
207 | _ => (dest, None) | |
208 | }; | |
209 | ||
210 | for (i, operand) in operands.iter().enumerate() { | |
211 | let op = self.eval_operand(operand, None)?; | |
212 | // Ignore zero-sized fields. | |
213 | if !op.layout.is_zst() { | |
214 | let field_index = active_field_index.unwrap_or(i); | |
215 | let field_dest = self.place_field(dest, field_index as u64)?; | |
216 | self.copy_op(op, field_dest)?; | |
217 | } | |
218 | } | |
219 | } | |
220 | ||
221 | Repeat(ref operand, _) => { | |
222 | let op = self.eval_operand(operand, None)?; | |
223 | let dest = self.force_allocation(dest)?; | |
a1dfa0c6 | 224 | let length = dest.len(self)?; |
b7449926 XL |
225 | |
226 | if length > 0 { | |
227 | // write the first | |
228 | let first = self.mplace_field(dest, 0)?; | |
229 | self.copy_op(op, first.into())?; | |
230 | ||
231 | if length > 1 { | |
232 | // copy the rest | |
233 | let (dest, dest_align) = first.to_scalar_ptr_align(); | |
a1dfa0c6 | 234 | let rest = dest.ptr_offset(first.layout.size, self)?; |
b7449926 XL |
235 | self.memory.copy_repeatedly( |
236 | dest, dest_align, rest, dest_align, first.layout.size, length - 1, true | |
237 | )?; | |
238 | } | |
239 | } | |
240 | } | |
241 | ||
242 | Len(ref place) => { | |
243 | // FIXME(CTFE): don't allow computing the length of arrays in const eval | |
244 | let src = self.eval_place(place)?; | |
245 | let mplace = self.force_allocation(src)?; | |
a1dfa0c6 | 246 | let len = mplace.len(self)?; |
b7449926 XL |
247 | let size = self.pointer_size(); |
248 | self.write_scalar( | |
249 | Scalar::from_uint(len, size), | |
250 | dest, | |
251 | )?; | |
252 | } | |
253 | ||
a1dfa0c6 | 254 | Ref(_, _, ref place) => { |
b7449926 | 255 | let src = self.eval_place(place)?; |
0bf4aa26 | 256 | let val = self.force_allocation(src)?; |
a1dfa0c6 | 257 | self.write_immediate(val.to_ref(), dest)?; |
b7449926 XL |
258 | } |
259 | ||
260 | NullaryOp(mir::NullOp::Box, _) => { | |
261 | M::box_alloc(self, dest)?; | |
262 | } | |
263 | ||
264 | NullaryOp(mir::NullOp::SizeOf, ty) => { | |
265 | let ty = self.monomorphize(ty, self.substs()); | |
266 | let layout = self.layout_of(ty)?; | |
267 | assert!(!layout.is_unsized(), | |
268 | "SizeOf nullary MIR operator called for unsized type"); | |
269 | let size = self.pointer_size(); | |
270 | self.write_scalar( | |
271 | Scalar::from_uint(layout.size.bytes(), size), | |
272 | dest, | |
273 | )?; | |
274 | } | |
275 | ||
276 | Cast(kind, ref operand, cast_ty) => { | |
277 | debug_assert_eq!(self.monomorphize(cast_ty, self.substs()), dest.layout.ty); | |
278 | let src = self.eval_operand(operand, None)?; | |
279 | self.cast(src, kind, dest)?; | |
280 | } | |
281 | ||
282 | Discriminant(ref place) => { | |
283 | let place = self.eval_place(place)?; | |
284 | let discr_val = self.read_discriminant(self.place_to_op(place)?)?.0; | |
285 | let size = dest.layout.size; | |
286 | self.write_scalar(Scalar::from_uint(discr_val, size), dest)?; | |
287 | } | |
288 | } | |
289 | ||
290 | self.dump_place(*dest); | |
291 | ||
292 | Ok(()) | |
293 | } | |
294 | ||
ff7c6d11 | 295 | fn terminator(&mut self, terminator: &mir::Terminator<'tcx>) -> EvalResult<'tcx> { |
b7449926 | 296 | debug!("{:?}", terminator.kind); |
0531ce1d XL |
297 | self.tcx.span = terminator.source_info.span; |
298 | self.memory.tcx.span = terminator.source_info.span; | |
b7449926 XL |
299 | |
300 | let old_stack = self.cur_frame(); | |
301 | let old_bb = self.frame().block; | |
ff7c6d11 XL |
302 | self.eval_terminator(terminator)?; |
303 | if !self.stack.is_empty() { | |
b7449926 XL |
304 | // This should change *something* |
305 | debug_assert!(self.cur_frame() != old_stack || self.frame().block != old_bb); | |
306 | debug!("// {:?}", self.frame().block); | |
ff7c6d11 XL |
307 | } |
308 | Ok(()) | |
309 | } | |
ff7c6d11 | 310 | } |