]> git.proxmox.com Git - rustc.git/blob - src/librustc_mir/interpret/step.rs
New upstream version 1.47.0+dfsg1
[rustc.git] / src / librustc_mir / interpret / step.rs
1 //! This module contains the `InterpCx` methods for executing a single step of the interpreter.
2 //!
3 //! The main entry point is the `step` method.
4
5 use rustc_middle::mir;
6 use rustc_middle::mir::interpret::{InterpResult, Scalar};
7 use rustc_target::abi::LayoutOf;
8
9 use super::{InterpCx, Machine};
10
11 /// Classify whether an operator is "left-homogeneous", i.e., the LHS has the
12 /// same type as the result.
13 #[inline]
14 fn binop_left_homogeneous(op: mir::BinOp) -> bool {
15 use rustc_middle::mir::BinOp::*;
16 match op {
17 Add | Sub | Mul | Div | Rem | BitXor | BitAnd | BitOr | Offset | Shl | Shr => true,
18 Eq | Ne | Lt | Le | Gt | Ge => false,
19 }
20 }
21 /// Classify whether an operator is "right-homogeneous", i.e., the RHS has the
22 /// same type as the LHS.
23 #[inline]
24 fn binop_right_homogeneous(op: mir::BinOp) -> bool {
25 use rustc_middle::mir::BinOp::*;
26 match op {
27 Add | Sub | Mul | Div | Rem | BitXor | BitAnd | BitOr | Eq | Ne | Lt | Le | Gt | Ge => true,
28 Offset | Shl | Shr => false,
29 }
30 }
31
32 impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
33 pub fn run(&mut self) -> InterpResult<'tcx> {
34 while self.step()? {}
35 Ok(())
36 }
37
38 /// Returns `true` as long as there are more things to do.
39 ///
40 /// This is used by [priroda](https://github.com/oli-obk/priroda)
41 ///
42 /// This is marked `#inline(always)` to work around adverserial codegen when `opt-level = 3`
43 #[inline(always)]
44 pub fn step(&mut self) -> InterpResult<'tcx, bool> {
45 if self.stack().is_empty() {
46 return Ok(false);
47 }
48
49 let loc = match self.frame().loc {
50 Ok(loc) => loc,
51 Err(_) => {
52 // We are unwinding and this fn has no cleanup code.
53 // Just go on unwinding.
54 trace!("unwinding: skipping frame");
55 self.pop_stack_frame(/* unwinding */ true)?;
56 return Ok(true);
57 }
58 };
59 let basic_block = &self.body().basic_blocks()[loc.block];
60
61 let old_frames = self.frame_idx();
62
63 if let Some(stmt) = basic_block.statements.get(loc.statement_index) {
64 assert_eq!(old_frames, self.frame_idx());
65 self.statement(stmt)?;
66 return Ok(true);
67 }
68
69 M::before_terminator(self)?;
70
71 let terminator = basic_block.terminator();
72 assert_eq!(old_frames, self.frame_idx());
73 self.terminator(terminator)?;
74 Ok(true)
75 }
76
77 /// Runs the interpretation logic for the given `mir::Statement` at the current frame and
78 /// statement counter. This also moves the statement counter forward.
79 crate fn statement(&mut self, stmt: &mir::Statement<'tcx>) -> InterpResult<'tcx> {
80 info!("{:?}", stmt);
81
82 use rustc_middle::mir::StatementKind::*;
83
84 // Some statements (e.g., box) push new stack frames.
85 // We have to record the stack frame number *before* executing the statement.
86 let frame_idx = self.frame_idx();
87
88 match &stmt.kind {
89 Assign(box (place, rvalue)) => self.eval_rvalue_into_place(rvalue, *place)?,
90
91 SetDiscriminant { place, variant_index } => {
92 let dest = self.eval_place(**place)?;
93 self.write_discriminant(*variant_index, dest)?;
94 }
95
96 // Mark locals as alive
97 StorageLive(local) => {
98 let old_val = self.storage_live(*local)?;
99 self.deallocate_local(old_val)?;
100 }
101
102 // Mark locals as dead
103 StorageDead(local) => {
104 let old_val = self.storage_dead(*local);
105 self.deallocate_local(old_val)?;
106 }
107
108 // No dynamic semantics attached to `FakeRead`; MIR
109 // interpreter is solely intended for borrowck'ed code.
110 FakeRead(..) => {}
111
112 // Stacked Borrows.
113 Retag(kind, place) => {
114 let dest = self.eval_place(**place)?;
115 M::retag(self, *kind, dest)?;
116 }
117
118 // Statements we do not track.
119 AscribeUserType(..) => {}
120
121 // Currently, Miri discards Coverage statements. Coverage statements are only injected
122 // via an optional compile time MIR pass and have no side effects. Since Coverage
123 // statements don't exist at the source level, it is safe for Miri to ignore them, even
124 // for undefined behavior (UB) checks.
125 //
126 // A coverage counter inside a const expression (for example, a counter injected in a
127 // const function) is discarded when the const is evaluated at compile time. Whether
128 // this should change, and/or how to implement a const eval counter, is a subject of the
129 // following issue:
130 //
131 // FIXME(#73156): Handle source code coverage in const eval
132 Coverage(..) => {}
133
134 // Defined to do nothing. These are added by optimization passes, to avoid changing the
135 // size of MIR constantly.
136 Nop => {}
137
138 LlvmInlineAsm { .. } => throw_unsup_format!("inline assembly is not supported"),
139 }
140
141 self.stack_mut()[frame_idx].loc.as_mut().unwrap().statement_index += 1;
142 Ok(())
143 }
144
145 /// Evaluate an assignment statement.
146 ///
147 /// There is no separate `eval_rvalue` function. Instead, the code for handling each rvalue
148 /// type writes its results directly into the memory specified by the place.
149 pub fn eval_rvalue_into_place(
150 &mut self,
151 rvalue: &mir::Rvalue<'tcx>,
152 place: mir::Place<'tcx>,
153 ) -> InterpResult<'tcx> {
154 let dest = self.eval_place(place)?;
155
156 use rustc_middle::mir::Rvalue::*;
157 match *rvalue {
158 ThreadLocalRef(did) => {
159 let id = M::thread_local_static_alloc_id(self, did)?;
160 let val = self.global_base_pointer(id.into())?;
161 self.write_scalar(val, dest)?;
162 }
163
164 Use(ref operand) => {
165 // Avoid recomputing the layout
166 let op = self.eval_operand(operand, Some(dest.layout))?;
167 self.copy_op(op, dest)?;
168 }
169
170 BinaryOp(bin_op, ref left, ref right) => {
171 let layout = binop_left_homogeneous(bin_op).then_some(dest.layout);
172 let left = self.read_immediate(self.eval_operand(left, layout)?)?;
173 let layout = binop_right_homogeneous(bin_op).then_some(left.layout);
174 let right = self.read_immediate(self.eval_operand(right, layout)?)?;
175 self.binop_ignore_overflow(bin_op, left, right, dest)?;
176 }
177
178 CheckedBinaryOp(bin_op, ref left, ref right) => {
179 // Due to the extra boolean in the result, we can never reuse the `dest.layout`.
180 let left = self.read_immediate(self.eval_operand(left, None)?)?;
181 let layout = binop_right_homogeneous(bin_op).then_some(left.layout);
182 let right = self.read_immediate(self.eval_operand(right, layout)?)?;
183 self.binop_with_overflow(bin_op, left, right, dest)?;
184 }
185
186 UnaryOp(un_op, ref operand) => {
187 // The operand always has the same type as the result.
188 let val = self.read_immediate(self.eval_operand(operand, Some(dest.layout))?)?;
189 let val = self.unary_op(un_op, val)?;
190 assert_eq!(val.layout, dest.layout, "layout mismatch for result of {:?}", un_op);
191 self.write_immediate(*val, dest)?;
192 }
193
194 Aggregate(ref kind, ref operands) => {
195 let (dest, active_field_index) = match **kind {
196 mir::AggregateKind::Adt(adt_def, variant_index, _, _, active_field_index) => {
197 self.write_discriminant(variant_index, dest)?;
198 if adt_def.is_enum() {
199 (self.place_downcast(dest, variant_index)?, active_field_index)
200 } else {
201 (dest, active_field_index)
202 }
203 }
204 _ => (dest, None),
205 };
206
207 for (i, operand) in operands.iter().enumerate() {
208 let op = self.eval_operand(operand, None)?;
209 // Ignore zero-sized fields.
210 if !op.layout.is_zst() {
211 let field_index = active_field_index.unwrap_or(i);
212 let field_dest = self.place_field(dest, field_index)?;
213 self.copy_op(op, field_dest)?;
214 }
215 }
216 }
217
218 Repeat(ref operand, _) => {
219 let op = self.eval_operand(operand, None)?;
220 let dest = self.force_allocation(dest)?;
221 let length = dest.len(self)?;
222
223 if let Some(first_ptr) = self.check_mplace_access(dest, None)? {
224 // Write the first.
225 let first = self.mplace_field(dest, 0)?;
226 self.copy_op(op, first.into())?;
227
228 if length > 1 {
229 let elem_size = first.layout.size;
230 // Copy the rest. This is performance-sensitive code
231 // for big static/const arrays!
232 let rest_ptr = first_ptr.offset(elem_size, self)?;
233 self.memory.copy_repeatedly(
234 first_ptr,
235 rest_ptr,
236 elem_size,
237 length - 1,
238 /*nonoverlapping:*/ true,
239 )?;
240 }
241 }
242 }
243
244 Len(place) => {
245 // FIXME(CTFE): don't allow computing the length of arrays in const eval
246 let src = self.eval_place(place)?;
247 let mplace = self.force_allocation(src)?;
248 let len = mplace.len(self)?;
249 self.write_scalar(Scalar::from_machine_usize(len, self), dest)?;
250 }
251
252 AddressOf(_, place) | Ref(_, _, place) => {
253 let src = self.eval_place(place)?;
254 let place = self.force_allocation(src)?;
255 if place.layout.size.bytes() > 0 {
256 // definitely not a ZST
257 assert!(place.ptr.is_ptr(), "non-ZST places should be normalized to `Pointer`");
258 }
259 self.write_immediate(place.to_ref(), dest)?;
260 }
261
262 NullaryOp(mir::NullOp::Box, _) => {
263 M::box_alloc(self, dest)?;
264 }
265
266 NullaryOp(mir::NullOp::SizeOf, ty) => {
267 let ty = self.subst_from_current_frame_and_normalize_erasing_regions(ty);
268 let layout = self.layout_of(ty)?;
269 assert!(
270 !layout.is_unsized(),
271 "SizeOf nullary MIR operator called for unsized type"
272 );
273 self.write_scalar(Scalar::from_machine_usize(layout.size.bytes(), self), dest)?;
274 }
275
276 Cast(cast_kind, ref operand, cast_ty) => {
277 let src = self.eval_operand(operand, None)?;
278 let cast_ty = self.subst_from_current_frame_and_normalize_erasing_regions(cast_ty);
279 self.cast(src, cast_kind, cast_ty, dest)?;
280 }
281
282 Discriminant(place) => {
283 let op = self.eval_place_to_op(place, None)?;
284 let discr_val = self.read_discriminant(op)?.0;
285 self.write_scalar(discr_val, dest)?;
286 }
287 }
288
289 trace!("{:?}", self.dump_place(*dest));
290
291 Ok(())
292 }
293
294 fn terminator(&mut self, terminator: &mir::Terminator<'tcx>) -> InterpResult<'tcx> {
295 info!("{:?}", terminator.kind);
296
297 self.eval_terminator(terminator)?;
298 if !self.stack().is_empty() {
299 if let Ok(loc) = self.frame().loc {
300 info!("// executing {:?}", loc.block);
301 }
302 }
303 Ok(())
304 }
305 }