]> git.proxmox.com Git - rustc.git/blame - src/librustc_mir/interpret/step.rs
New upstream version 1.44.1+dfsg1
[rustc.git] / src / librustc_mir / interpret / step.rs
CommitLineData
416331ca 1//! This module contains the `InterpCx` methods for executing a single step of the interpreter.
ff7c6d11
XL
2//!
3//! The main entry point is the `step` method.
4
ba9703b0
XL
5use rustc_middle::mir;
6use rustc_middle::mir::interpret::{InterpResult, Scalar};
7use rustc_target::abi::LayoutOf;
ff7c6d11 8
416331ca 9use super::{InterpCx, Machine};
ff7c6d11 10
0731742a 11/// Classify whether an operator is "left-homogeneous", i.e., the LHS has the
b7449926
XL
12/// same type as the result.
13#[inline]
14fn binop_left_homogeneous(op: mir::BinOp) -> bool {
ba9703b0 15 use rustc_middle::mir::BinOp::*;
b7449926 16 match op {
dfeec247
XL
17 Add | Sub | Mul | Div | Rem | BitXor | BitAnd | BitOr | Offset | Shl | Shr => true,
18 Eq | Ne | Lt | Le | Gt | Ge => false,
b7449926
XL
19 }
20}
0731742a 21/// Classify whether an operator is "right-homogeneous", i.e., the RHS has the
b7449926
XL
22/// same type as the LHS.
23#[inline]
24fn binop_right_homogeneous(op: mir::BinOp) -> bool {
ba9703b0 25 use rustc_middle::mir::BinOp::*;
b7449926 26 match op {
dfeec247
XL
27 Add | Sub | Mul | Div | Rem | BitXor | BitAnd | BitOr | Eq | Ne | Lt | Le | Gt | Ge => true,
28 Offset | Shl | Shr => false,
b7449926
XL
29 }
30}
31
ba9703b0 32impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
dc9dc135 33 pub fn run(&mut self) -> InterpResult<'tcx> {
b7449926
XL
34 while self.step()? {}
35 Ok(())
ff7c6d11
XL
36 }
37
9fa01778 38 /// Returns `true` as long as there are more things to do.
0bf4aa26
XL
39 ///
40 /// This is used by [priroda](https://github.com/oli-obk/priroda)
74b04a01
XL
41 ///
42 /// This is marked `#inline(always)` to work around adverserial codegen when `opt-level = 3`
43 #[inline(always)]
dc9dc135 44 pub fn step(&mut self) -> InterpResult<'tcx, bool> {
ba9703b0 45 if self.stack().is_empty() {
ff7c6d11
XL
46 return Ok(false);
47 }
48
60c5eb7d
XL
49 let block = match self.frame().block {
50 Some(block) => block,
51 None => {
52 // We are unwinding and this fn has no cleanup code.
53 // Just go on unwinding.
54 trace!("unwinding: skipping frame");
55 self.pop_stack_frame(/* unwinding */ true)?;
dfeec247 56 return Ok(true);
60c5eb7d
XL
57 }
58 };
ff7c6d11 59 let stmt_id = self.frame().stmt;
dc9dc135
XL
60 let body = self.body();
61 let basic_block = &body.basic_blocks()[block];
ff7c6d11 62
ba9703b0 63 let old_frames = self.frame_idx();
ff7c6d11
XL
64
65 if let Some(stmt) = basic_block.statements.get(stmt_id) {
ba9703b0 66 assert_eq!(old_frames, self.frame_idx());
0531ce1d 67 self.statement(stmt)?;
ff7c6d11
XL
68 return Ok(true);
69 }
70
0bf4aa26 71 M::before_terminator(self)?;
0531ce1d 72
ff7c6d11 73 let terminator = basic_block.terminator();
ba9703b0 74 assert_eq!(old_frames, self.frame_idx());
0531ce1d 75 self.terminator(terminator)?;
ff7c6d11
XL
76 Ok(true)
77 }
78
dc9dc135 79 fn statement(&mut self, stmt: &mir::Statement<'tcx>) -> InterpResult<'tcx> {
0731742a 80 info!("{:?}", stmt);
ba9703b0 81 self.set_span(stmt.source_info.span);
ff7c6d11 82
ba9703b0 83 use rustc_middle::mir::StatementKind::*;
ff7c6d11 84
0731742a 85 // Some statements (e.g., box) push new stack frames.
b7449926 86 // We have to record the stack frame number *before* executing the statement.
ba9703b0 87 let frame_idx = self.frame_idx();
ff7c6d11 88
ba9703b0
XL
89 match &stmt.kind {
90 Assign(box (place, rvalue)) => self.eval_rvalue_into_place(rvalue, *place)?,
ff7c6d11 91
ba9703b0
XL
92 SetDiscriminant { place, variant_index } => {
93 let dest = self.eval_place(**place)?;
94 self.write_discriminant_index(*variant_index, dest)?;
ff7c6d11
XL
95 }
96
97 // Mark locals as alive
98 StorageLive(local) => {
ba9703b0 99 let old_val = self.storage_live(*local)?;
ff7c6d11
XL
100 self.deallocate_local(old_val)?;
101 }
102
103 // Mark locals as dead
104 StorageDead(local) => {
ba9703b0 105 let old_val = self.storage_dead(*local);
ff7c6d11
XL
106 self.deallocate_local(old_val)?;
107 }
108
0bf4aa26 109 // No dynamic semantics attached to `FakeRead`; MIR
94b46f34 110 // interpreter is solely intended for borrowck'ed code.
0bf4aa26 111 FakeRead(..) => {}
94b46f34 112
a1dfa0c6 113 // Stacked Borrows.
ba9703b0
XL
114 Retag(kind, place) => {
115 let dest = self.eval_place(**place)?;
116 M::retag(self, *kind, dest)?;
ff7c6d11 117 }
ff7c6d11 118
a1dfa0c6 119 // Statements we do not track.
b7449926 120 AscribeUserType(..) => {}
0531ce1d 121
ff7c6d11
XL
122 // Defined to do nothing. These are added by optimization passes, to avoid changing the
123 // size of MIR constantly.
124 Nop => {}
125
ba9703b0 126 LlvmInlineAsm { .. } => throw_unsup_format!("inline assembly is not supported"),
ff7c6d11
XL
127 }
128
ba9703b0 129 self.stack_mut()[frame_idx].stmt += 1;
ff7c6d11
XL
130 Ok(())
131 }
132
b7449926
XL
133 /// Evaluate an assignment statement.
134 ///
135 /// There is no separate `eval_rvalue` function. Instead, the code for handling each rvalue
136 /// type writes its results directly into the memory specified by the place.
e74abb32 137 pub fn eval_rvalue_into_place(
b7449926
XL
138 &mut self,
139 rvalue: &mir::Rvalue<'tcx>,
ba9703b0 140 place: mir::Place<'tcx>,
dc9dc135 141 ) -> InterpResult<'tcx> {
b7449926
XL
142 let dest = self.eval_place(place)?;
143
ba9703b0 144 use rustc_middle::mir::Rvalue::*;
b7449926
XL
145 match *rvalue {
146 Use(ref operand) => {
147 // Avoid recomputing the layout
148 let op = self.eval_operand(operand, Some(dest.layout))?;
149 self.copy_op(op, dest)?;
150 }
151
152 BinaryOp(bin_op, ref left, ref right) => {
60c5eb7d 153 let layout = binop_left_homogeneous(bin_op).then_some(dest.layout);
a1dfa0c6 154 let left = self.read_immediate(self.eval_operand(left, layout)?)?;
60c5eb7d 155 let layout = binop_right_homogeneous(bin_op).then_some(left.layout);
a1dfa0c6 156 let right = self.read_immediate(self.eval_operand(right, layout)?)?;
dfeec247 157 self.binop_ignore_overflow(bin_op, left, right, dest)?;
b7449926
XL
158 }
159
160 CheckedBinaryOp(bin_op, ref left, ref right) => {
161 // Due to the extra boolean in the result, we can never reuse the `dest.layout`.
a1dfa0c6 162 let left = self.read_immediate(self.eval_operand(left, None)?)?;
60c5eb7d 163 let layout = binop_right_homogeneous(bin_op).then_some(left.layout);
a1dfa0c6 164 let right = self.read_immediate(self.eval_operand(right, layout)?)?;
dfeec247 165 self.binop_with_overflow(bin_op, left, right, dest)?;
b7449926
XL
166 }
167
168 UnaryOp(un_op, ref operand) => {
169 // The operand always has the same type as the result.
a1dfa0c6 170 let val = self.read_immediate(self.eval_operand(operand, Some(dest.layout))?)?;
9fa01778 171 let val = self.unary_op(un_op, val)?;
e1599b0c
XL
172 assert_eq!(val.layout, dest.layout, "layout mismatch for result of {:?}", un_op);
173 self.write_immediate(*val, dest)?;
b7449926
XL
174 }
175
176 Aggregate(ref kind, ref operands) => {
177 let (dest, active_field_index) = match **kind {
178 mir::AggregateKind::Adt(adt_def, variant_index, _, _, active_field_index) => {
179 self.write_discriminant_index(variant_index, dest)?;
180 if adt_def.is_enum() {
181 (self.place_downcast(dest, variant_index)?, active_field_index)
182 } else {
183 (dest, active_field_index)
184 }
185 }
dfeec247 186 _ => (dest, None),
b7449926
XL
187 };
188
189 for (i, operand) in operands.iter().enumerate() {
190 let op = self.eval_operand(operand, None)?;
191 // Ignore zero-sized fields.
192 if !op.layout.is_zst() {
193 let field_index = active_field_index.unwrap_or(i);
ba9703b0 194 let field_dest = self.place_field(dest, field_index)?;
b7449926
XL
195 self.copy_op(op, field_dest)?;
196 }
197 }
198 }
199
200 Repeat(ref operand, _) => {
201 let op = self.eval_operand(operand, None)?;
202 let dest = self.force_allocation(dest)?;
a1dfa0c6 203 let length = dest.len(self)?;
b7449926 204
416331ca
XL
205 if let Some(first_ptr) = self.check_mplace_access(dest, None)? {
206 // Write the first.
b7449926
XL
207 let first = self.mplace_field(dest, 0)?;
208 self.copy_op(op, first.into())?;
209
210 if length > 1 {
416331ca
XL
211 let elem_size = first.layout.size;
212 // Copy the rest. This is performance-sensitive code
213 // for big static/const arrays!
214 let rest_ptr = first_ptr.offset(elem_size, self)?;
b7449926 215 self.memory.copy_repeatedly(
dfeec247
XL
216 first_ptr,
217 rest_ptr,
218 elem_size,
219 length - 1,
220 /*nonoverlapping:*/ true,
b7449926
XL
221 )?;
222 }
223 }
224 }
225
ba9703b0 226 Len(place) => {
b7449926
XL
227 // FIXME(CTFE): don't allow computing the length of arrays in const eval
228 let src = self.eval_place(place)?;
229 let mplace = self.force_allocation(src)?;
a1dfa0c6 230 let len = mplace.len(self)?;
ba9703b0 231 self.write_scalar(Scalar::from_machine_usize(len, self), dest)?;
b7449926
XL
232 }
233
ba9703b0 234 AddressOf(_, place) | Ref(_, _, place) => {
b7449926 235 let src = self.eval_place(place)?;
e1599b0c
XL
236 let place = self.force_allocation(src)?;
237 if place.layout.size.bytes() > 0 {
238 // definitely not a ZST
239 assert!(place.ptr.is_ptr(), "non-ZST places should be normalized to `Pointer`");
240 }
241 self.write_immediate(place.to_ref(), dest)?;
b7449926
XL
242 }
243
244 NullaryOp(mir::NullOp::Box, _) => {
245 M::box_alloc(self, dest)?;
246 }
247
248 NullaryOp(mir::NullOp::SizeOf, ty) => {
ba9703b0 249 let ty = self.subst_from_current_frame_and_normalize_erasing_regions(ty);
b7449926 250 let layout = self.layout_of(ty)?;
dfeec247
XL
251 assert!(
252 !layout.is_unsized(),
253 "SizeOf nullary MIR operator called for unsized type"
254 );
ba9703b0 255 self.write_scalar(Scalar::from_machine_usize(layout.size.bytes(), self), dest)?;
b7449926
XL
256 }
257
48663c56 258 Cast(kind, ref operand, _) => {
b7449926
XL
259 let src = self.eval_operand(operand, None)?;
260 self.cast(src, kind, dest)?;
261 }
262
ba9703b0 263 Discriminant(place) => {
9fa01778
XL
264 let op = self.eval_place_to_op(place, None)?;
265 let discr_val = self.read_discriminant(op)?.0;
b7449926
XL
266 let size = dest.layout.size;
267 self.write_scalar(Scalar::from_uint(discr_val, size), dest)?;
268 }
269 }
270
271 self.dump_place(*dest);
272
273 Ok(())
274 }
275
dc9dc135 276 fn terminator(&mut self, terminator: &mir::Terminator<'tcx>) -> InterpResult<'tcx> {
0731742a 277 info!("{:?}", terminator.kind);
ba9703b0 278 self.set_span(terminator.source_info.span);
60c5eb7d 279
ff7c6d11 280 self.eval_terminator(terminator)?;
ba9703b0 281 if !self.stack().is_empty() {
60c5eb7d
XL
282 if let Some(block) = self.frame().block {
283 info!("// executing {:?}", block);
284 }
ff7c6d11
XL
285 }
286 Ok(())
287 }
ff7c6d11 288}