]> git.proxmox.com Git - rustc.git/blame - compiler/rustc_mir/src/interpret/step.rs
New upstream version 1.52.0~beta.3+dfsg1
[rustc.git] / compiler / rustc_mir / src / interpret / step.rs
CommitLineData
416331ca 1//! This module contains the `InterpCx` methods for executing a single step of the interpreter.
ff7c6d11
XL
2//!
3//! The main entry point is the `step` method.
4
6a06907d 5use crate::interpret::OpTy;
ba9703b0
XL
6use rustc_middle::mir;
7use rustc_middle::mir::interpret::{InterpResult, Scalar};
8use rustc_target::abi::LayoutOf;
ff7c6d11 9
416331ca 10use super::{InterpCx, Machine};
ff7c6d11 11
0731742a 12/// Classify whether an operator is "left-homogeneous", i.e., the LHS has the
b7449926
XL
13/// same type as the result.
14#[inline]
15fn binop_left_homogeneous(op: mir::BinOp) -> bool {
ba9703b0 16 use rustc_middle::mir::BinOp::*;
b7449926 17 match op {
dfeec247
XL
18 Add | Sub | Mul | Div | Rem | BitXor | BitAnd | BitOr | Offset | Shl | Shr => true,
19 Eq | Ne | Lt | Le | Gt | Ge => false,
b7449926
XL
20 }
21}
0731742a 22/// Classify whether an operator is "right-homogeneous", i.e., the RHS has the
b7449926
XL
23/// same type as the LHS.
24#[inline]
25fn binop_right_homogeneous(op: mir::BinOp) -> bool {
ba9703b0 26 use rustc_middle::mir::BinOp::*;
b7449926 27 match op {
dfeec247
XL
28 Add | Sub | Mul | Div | Rem | BitXor | BitAnd | BitOr | Eq | Ne | Lt | Le | Gt | Ge => true,
29 Offset | Shl | Shr => false,
b7449926
XL
30 }
31}
32
ba9703b0 33impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
dc9dc135 34 pub fn run(&mut self) -> InterpResult<'tcx> {
b7449926
XL
35 while self.step()? {}
36 Ok(())
ff7c6d11
XL
37 }
38
9fa01778 39 /// Returns `true` as long as there are more things to do.
0bf4aa26
XL
40 ///
41 /// This is used by [priroda](https://github.com/oli-obk/priroda)
74b04a01
XL
42 ///
43 /// This is marked `#inline(always)` to work around adverserial codegen when `opt-level = 3`
44 #[inline(always)]
dc9dc135 45 pub fn step(&mut self) -> InterpResult<'tcx, bool> {
ba9703b0 46 if self.stack().is_empty() {
ff7c6d11
XL
47 return Ok(false);
48 }
49
f9f354fc 50 let loc = match self.frame().loc {
3dfed10e
XL
51 Ok(loc) => loc,
52 Err(_) => {
60c5eb7d
XL
53 // We are unwinding and this fn has no cleanup code.
54 // Just go on unwinding.
55 trace!("unwinding: skipping frame");
56 self.pop_stack_frame(/* unwinding */ true)?;
dfeec247 57 return Ok(true);
60c5eb7d
XL
58 }
59 };
f9f354fc 60 let basic_block = &self.body().basic_blocks()[loc.block];
ff7c6d11 61
ba9703b0 62 let old_frames = self.frame_idx();
ff7c6d11 63
f9f354fc 64 if let Some(stmt) = basic_block.statements.get(loc.statement_index) {
ba9703b0 65 assert_eq!(old_frames, self.frame_idx());
0531ce1d 66 self.statement(stmt)?;
ff7c6d11
XL
67 return Ok(true);
68 }
69
0bf4aa26 70 M::before_terminator(self)?;
0531ce1d 71
ff7c6d11 72 let terminator = basic_block.terminator();
ba9703b0 73 assert_eq!(old_frames, self.frame_idx());
0531ce1d 74 self.terminator(terminator)?;
ff7c6d11
XL
75 Ok(true)
76 }
77
3dfed10e
XL
78 /// Runs the interpretation logic for the given `mir::Statement` at the current frame and
79 /// statement counter. This also moves the statement counter forward.
80 crate fn statement(&mut self, stmt: &mir::Statement<'tcx>) -> InterpResult<'tcx> {
0731742a 81 info!("{:?}", stmt);
ff7c6d11 82
ba9703b0 83 use rustc_middle::mir::StatementKind::*;
ff7c6d11 84
0731742a 85 // Some statements (e.g., box) push new stack frames.
b7449926 86 // We have to record the stack frame number *before* executing the statement.
ba9703b0 87 let frame_idx = self.frame_idx();
ff7c6d11 88
ba9703b0
XL
89 match &stmt.kind {
90 Assign(box (place, rvalue)) => self.eval_rvalue_into_place(rvalue, *place)?,
ff7c6d11 91
ba9703b0
XL
92 SetDiscriminant { place, variant_index } => {
93 let dest = self.eval_place(**place)?;
6a06907d 94 self.write_discriminant(*variant_index, &dest)?;
ff7c6d11
XL
95 }
96
97 // Mark locals as alive
98 StorageLive(local) => {
fc512014 99 self.storage_live(*local)?;
ff7c6d11
XL
100 }
101
102 // Mark locals as dead
103 StorageDead(local) => {
fc512014 104 self.storage_dead(*local)?;
ff7c6d11
XL
105 }
106
0bf4aa26 107 // No dynamic semantics attached to `FakeRead`; MIR
94b46f34 108 // interpreter is solely intended for borrowck'ed code.
0bf4aa26 109 FakeRead(..) => {}
94b46f34 110
a1dfa0c6 111 // Stacked Borrows.
ba9703b0
XL
112 Retag(kind, place) => {
113 let dest = self.eval_place(**place)?;
6a06907d
XL
114 M::retag(self, *kind, &dest)?;
115 }
116
117 // Call CopyNonOverlapping
118 CopyNonOverlapping(box rustc_middle::mir::CopyNonOverlapping { src, dst, count }) => {
119 let src = self.eval_operand(src, None)?;
120 let dst = self.eval_operand(dst, None)?;
121 let count = self.eval_operand(count, None)?;
122 self.copy(&src, &dst, &count, /* nonoverlapping */ true)?;
ff7c6d11 123 }
ff7c6d11 124
a1dfa0c6 125 // Statements we do not track.
b7449926 126 AscribeUserType(..) => {}
0531ce1d 127
3dfed10e
XL
128 // Currently, Miri discards Coverage statements. Coverage statements are only injected
129 // via an optional compile time MIR pass and have no side effects. Since Coverage
130 // statements don't exist at the source level, it is safe for Miri to ignore them, even
131 // for undefined behavior (UB) checks.
132 //
133 // A coverage counter inside a const expression (for example, a counter injected in a
134 // const function) is discarded when the const is evaluated at compile time. Whether
135 // this should change, and/or how to implement a const eval counter, is a subject of the
136 // following issue:
137 //
138 // FIXME(#73156): Handle source code coverage in const eval
139 Coverage(..) => {}
140
ff7c6d11
XL
141 // Defined to do nothing. These are added by optimization passes, to avoid changing the
142 // size of MIR constantly.
143 Nop => {}
144
ba9703b0 145 LlvmInlineAsm { .. } => throw_unsup_format!("inline assembly is not supported"),
ff7c6d11
XL
146 }
147
f9f354fc 148 self.stack_mut()[frame_idx].loc.as_mut().unwrap().statement_index += 1;
ff7c6d11
XL
149 Ok(())
150 }
151
6a06907d
XL
152 pub(crate) fn copy(
153 &mut self,
154 src: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::PointerTag>,
155 dst: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::PointerTag>,
156 count: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::PointerTag>,
157 nonoverlapping: bool,
158 ) -> InterpResult<'tcx> {
159 let count = self.read_scalar(&count)?.to_machine_usize(self)?;
160 let layout = self.layout_of(src.layout.ty.builtin_deref(true).unwrap().ty)?;
161 let (size, align) = (layout.size, layout.align.abi);
162 let size = size.checked_mul(count, self).ok_or_else(|| {
163 err_ub_format!(
164 "overflow computing total size of `{}`",
165 if nonoverlapping { "copy_nonoverlapping" } else { "copy" }
166 )
167 })?;
168
169 // Make sure we check both pointers for an access of the total size and aligment,
170 // *even if* the total size is 0.
171 let src =
172 self.memory.check_ptr_access(self.read_scalar(&src)?.check_init()?, size, align)?;
173
174 let dst =
175 self.memory.check_ptr_access(self.read_scalar(&dst)?.check_init()?, size, align)?;
176
177 if let (Some(src), Some(dst)) = (src, dst) {
178 self.memory.copy(src, dst, size, nonoverlapping)?;
179 }
180 Ok(())
181 }
182
b7449926
XL
183 /// Evaluate an assignment statement.
184 ///
185 /// There is no separate `eval_rvalue` function. Instead, the code for handling each rvalue
186 /// type writes its results directly into the memory specified by the place.
e74abb32 187 pub fn eval_rvalue_into_place(
b7449926
XL
188 &mut self,
189 rvalue: &mir::Rvalue<'tcx>,
ba9703b0 190 place: mir::Place<'tcx>,
dc9dc135 191 ) -> InterpResult<'tcx> {
b7449926
XL
192 let dest = self.eval_place(place)?;
193
ba9703b0 194 use rustc_middle::mir::Rvalue::*;
b7449926 195 match *rvalue {
f9f354fc 196 ThreadLocalRef(did) => {
3dfed10e
XL
197 let id = M::thread_local_static_alloc_id(self, did)?;
198 let val = self.global_base_pointer(id.into())?;
6a06907d 199 self.write_scalar(val, &dest)?;
f9f354fc
XL
200 }
201
b7449926
XL
202 Use(ref operand) => {
203 // Avoid recomputing the layout
204 let op = self.eval_operand(operand, Some(dest.layout))?;
6a06907d 205 self.copy_op(&op, &dest)?;
b7449926
XL
206 }
207
6a06907d 208 BinaryOp(bin_op, box (ref left, ref right)) => {
60c5eb7d 209 let layout = binop_left_homogeneous(bin_op).then_some(dest.layout);
6a06907d 210 let left = self.read_immediate(&self.eval_operand(left, layout)?)?;
60c5eb7d 211 let layout = binop_right_homogeneous(bin_op).then_some(left.layout);
6a06907d
XL
212 let right = self.read_immediate(&self.eval_operand(right, layout)?)?;
213 self.binop_ignore_overflow(bin_op, &left, &right, &dest)?;
b7449926
XL
214 }
215
6a06907d 216 CheckedBinaryOp(bin_op, box (ref left, ref right)) => {
b7449926 217 // Due to the extra boolean in the result, we can never reuse the `dest.layout`.
6a06907d 218 let left = self.read_immediate(&self.eval_operand(left, None)?)?;
60c5eb7d 219 let layout = binop_right_homogeneous(bin_op).then_some(left.layout);
6a06907d
XL
220 let right = self.read_immediate(&self.eval_operand(right, layout)?)?;
221 self.binop_with_overflow(bin_op, &left, &right, &dest)?;
b7449926
XL
222 }
223
224 UnaryOp(un_op, ref operand) => {
225 // The operand always has the same type as the result.
6a06907d
XL
226 let val = self.read_immediate(&self.eval_operand(operand, Some(dest.layout))?)?;
227 let val = self.unary_op(un_op, &val)?;
e1599b0c 228 assert_eq!(val.layout, dest.layout, "layout mismatch for result of {:?}", un_op);
6a06907d 229 self.write_immediate(*val, &dest)?;
b7449926
XL
230 }
231
232 Aggregate(ref kind, ref operands) => {
233 let (dest, active_field_index) = match **kind {
234 mir::AggregateKind::Adt(adt_def, variant_index, _, _, active_field_index) => {
6a06907d 235 self.write_discriminant(variant_index, &dest)?;
b7449926 236 if adt_def.is_enum() {
6a06907d 237 (self.place_downcast(&dest, variant_index)?, active_field_index)
b7449926
XL
238 } else {
239 (dest, active_field_index)
240 }
241 }
dfeec247 242 _ => (dest, None),
b7449926
XL
243 };
244
245 for (i, operand) in operands.iter().enumerate() {
246 let op = self.eval_operand(operand, None)?;
247 // Ignore zero-sized fields.
248 if !op.layout.is_zst() {
249 let field_index = active_field_index.unwrap_or(i);
6a06907d
XL
250 let field_dest = self.place_field(&dest, field_index)?;
251 self.copy_op(&op, &field_dest)?;
b7449926
XL
252 }
253 }
254 }
255
256 Repeat(ref operand, _) => {
257 let op = self.eval_operand(operand, None)?;
6a06907d 258 let dest = self.force_allocation(&dest)?;
a1dfa0c6 259 let length = dest.len(self)?;
b7449926 260
6a06907d 261 if let Some(first_ptr) = self.check_mplace_access(&dest, None)? {
416331ca 262 // Write the first.
6a06907d
XL
263 let first = self.mplace_field(&dest, 0)?;
264 self.copy_op(&op, &first.into())?;
b7449926
XL
265
266 if length > 1 {
416331ca
XL
267 let elem_size = first.layout.size;
268 // Copy the rest. This is performance-sensitive code
269 // for big static/const arrays!
270 let rest_ptr = first_ptr.offset(elem_size, self)?;
b7449926 271 self.memory.copy_repeatedly(
dfeec247
XL
272 first_ptr,
273 rest_ptr,
274 elem_size,
275 length - 1,
276 /*nonoverlapping:*/ true,
b7449926
XL
277 )?;
278 }
279 }
280 }
281
ba9703b0 282 Len(place) => {
b7449926
XL
283 // FIXME(CTFE): don't allow computing the length of arrays in const eval
284 let src = self.eval_place(place)?;
6a06907d 285 let mplace = self.force_allocation(&src)?;
a1dfa0c6 286 let len = mplace.len(self)?;
6a06907d 287 self.write_scalar(Scalar::from_machine_usize(len, self), &dest)?;
b7449926
XL
288 }
289
ba9703b0 290 AddressOf(_, place) | Ref(_, _, place) => {
b7449926 291 let src = self.eval_place(place)?;
6a06907d 292 let place = self.force_allocation(&src)?;
e1599b0c
XL
293 if place.layout.size.bytes() > 0 {
294 // definitely not a ZST
295 assert!(place.ptr.is_ptr(), "non-ZST places should be normalized to `Pointer`");
296 }
6a06907d 297 self.write_immediate(place.to_ref(), &dest)?;
b7449926
XL
298 }
299
300 NullaryOp(mir::NullOp::Box, _) => {
6a06907d 301 M::box_alloc(self, &dest)?;
b7449926
XL
302 }
303
304 NullaryOp(mir::NullOp::SizeOf, ty) => {
ba9703b0 305 let ty = self.subst_from_current_frame_and_normalize_erasing_regions(ty);
b7449926 306 let layout = self.layout_of(ty)?;
5869c6ff
XL
307 if layout.is_unsized() {
308 // FIXME: This should be a span_bug (#80742)
309 self.tcx.sess.delay_span_bug(
310 self.frame().current_span(),
311 &format!("SizeOf nullary MIR operator called for unsized type {}", ty),
312 );
313 throw_inval!(SizeOfUnsizedType(ty));
314 }
6a06907d 315 self.write_scalar(Scalar::from_machine_usize(layout.size.bytes(), self), &dest)?;
b7449926
XL
316 }
317
f9f354fc 318 Cast(cast_kind, ref operand, cast_ty) => {
b7449926 319 let src = self.eval_operand(operand, None)?;
f9f354fc 320 let cast_ty = self.subst_from_current_frame_and_normalize_erasing_regions(cast_ty);
6a06907d 321 self.cast(&src, cast_kind, cast_ty, &dest)?;
b7449926
XL
322 }
323
ba9703b0 324 Discriminant(place) => {
9fa01778 325 let op = self.eval_place_to_op(place, None)?;
6a06907d
XL
326 let discr_val = self.read_discriminant(&op)?.0;
327 self.write_scalar(discr_val, &dest)?;
b7449926
XL
328 }
329 }
330
3dfed10e 331 trace!("{:?}", self.dump_place(*dest));
b7449926
XL
332
333 Ok(())
334 }
335
dc9dc135 336 fn terminator(&mut self, terminator: &mir::Terminator<'tcx>) -> InterpResult<'tcx> {
0731742a 337 info!("{:?}", terminator.kind);
60c5eb7d 338
ff7c6d11 339 self.eval_terminator(terminator)?;
ba9703b0 340 if !self.stack().is_empty() {
3dfed10e 341 if let Ok(loc) = self.frame().loc {
f9f354fc 342 info!("// executing {:?}", loc.block);
60c5eb7d 343 }
ff7c6d11
XL
344 }
345 Ok(())
346 }
ff7c6d11 347}