]> git.proxmox.com Git - rustc.git/blob - src/librustc_mir/interpret/eval_context.rs
New upstream version 1.41.1+dfsg1
[rustc.git] / src / librustc_mir / interpret / eval_context.rs
1 use std::cell::Cell;
2 use std::fmt::Write;
3 use std::mem;
4
5 use syntax::source_map::{self, Span, DUMMY_SP};
6 use rustc::ich::StableHashingContext;
7 use rustc::hir::def_id::DefId;
8 use rustc::hir::def::DefKind;
9 use rustc::mir;
10 use rustc::ty::layout::{
11 self, Size, Align, HasDataLayout, LayoutOf, TyLayout
12 };
13 use rustc::ty::subst::SubstsRef;
14 use rustc::ty::{self, Ty, TyCtxt, TypeFoldable};
15 use rustc::ty::query::TyCtxtAt;
16 use rustc_index::vec::IndexVec;
17 use rustc::mir::interpret::{
18 GlobalId, Scalar, Pointer, FrameInfo, AllocId,
19 InterpResult, truncate, sign_extend,
20 };
21 use rustc_data_structures::fx::FxHashMap;
22 use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
23 use rustc_macros::HashStable;
24
25 use super::{
26 Immediate, Operand, MemPlace, MPlaceTy, Place, PlaceTy, ScalarMaybeUndef,
27 Memory, Machine, StackPopInfo
28 };
29
30 pub struct InterpCx<'mir, 'tcx, M: Machine<'mir, 'tcx>> {
31 /// Stores the `Machine` instance.
32 pub machine: M,
33
34 /// The results of the type checker, from rustc.
35 pub tcx: TyCtxtAt<'tcx>,
36
37 /// Bounds in scope for polymorphic evaluations.
38 pub(crate) param_env: ty::ParamEnv<'tcx>,
39
40 /// The virtual memory system.
41 pub memory: Memory<'mir, 'tcx, M>,
42
43 /// The virtual call stack.
44 pub(crate) stack: Vec<Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>>,
45
46 /// A cache for deduplicating vtables
47 pub(super) vtables:
48 FxHashMap<(Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>), Pointer<M::PointerTag>>,
49 }
50
51 /// A stack frame.
52 #[derive(Clone)]
53 pub struct Frame<'mir, 'tcx, Tag=(), Extra=()> {
54 ////////////////////////////////////////////////////////////////////////////////
55 // Function and callsite information
56 ////////////////////////////////////////////////////////////////////////////////
57 /// The MIR for the function called on this frame.
58 pub body: &'mir mir::Body<'tcx>,
59
60 /// The def_id and substs of the current function.
61 pub instance: ty::Instance<'tcx>,
62
63 /// The span of the call site.
64 pub span: source_map::Span,
65
66 /// Extra data for the machine.
67 pub extra: Extra,
68
69 ////////////////////////////////////////////////////////////////////////////////
70 // Return place and locals
71 ////////////////////////////////////////////////////////////////////////////////
72 /// Work to perform when returning from this function.
73 pub return_to_block: StackPopCleanup,
74
75 /// The location where the result of the current stack frame should be written to,
76 /// and its layout in the caller.
77 pub return_place: Option<PlaceTy<'tcx, Tag>>,
78
79 /// The list of locals for this stack frame, stored in order as
80 /// `[return_ptr, arguments..., variables..., temporaries...]`.
81 /// The locals are stored as `Option<Value>`s.
82 /// `None` represents a local that is currently dead, while a live local
83 /// can either directly contain `Scalar` or refer to some part of an `Allocation`.
84 pub locals: IndexVec<mir::Local, LocalState<'tcx, Tag>>,
85
86 ////////////////////////////////////////////////////////////////////////////////
87 // Current position within the function
88 ////////////////////////////////////////////////////////////////////////////////
89 /// The block that is currently executed (or will be executed after the above call stacks
90 /// return).
91 /// If this is `None`, we are unwinding and this function doesn't need any clean-up.
92 /// Just continue the same as with `Resume`.
93 pub block: Option<mir::BasicBlock>,
94
95 /// The index of the currently evaluated statement.
96 pub stmt: usize,
97 }
98
99 #[derive(Clone, Eq, PartialEq, Debug, HashStable)] // Miri debug-prints these
100 pub enum StackPopCleanup {
101 /// Jump to the next block in the caller, or cause UB if None (that's a function
102 /// that may never return). Also store layout of return place so
103 /// we can validate it at that layout.
104 /// `ret` stores the block we jump to on a normal return, while 'unwind'
105 /// stores the block used for cleanup during unwinding
106 Goto { ret: Option<mir::BasicBlock>, unwind: Option<mir::BasicBlock> },
107 /// Just do nohing: Used by Main and for the box_alloc hook in miri.
108 /// `cleanup` says whether locals are deallocated. Static computation
109 /// wants them leaked to intern what they need (and just throw away
110 /// the entire `ecx` when it is done).
111 None { cleanup: bool },
112 }
113
114 /// State of a local variable including a memoized layout
115 #[derive(Clone, PartialEq, Eq, HashStable)]
116 pub struct LocalState<'tcx, Tag=(), Id=AllocId> {
117 pub value: LocalValue<Tag, Id>,
118 /// Don't modify if `Some`, this is only used to prevent computing the layout twice
119 #[stable_hasher(ignore)]
120 pub layout: Cell<Option<TyLayout<'tcx>>>,
121 }
122
123 /// Current value of a local variable
124 #[derive(Clone, PartialEq, Eq, Debug, HashStable)] // Miri debug-prints these
125 pub enum LocalValue<Tag=(), Id=AllocId> {
126 /// This local is not currently alive, and cannot be used at all.
127 Dead,
128 /// This local is alive but not yet initialized. It can be written to
129 /// but not read from or its address taken. Locals get initialized on
130 /// first write because for unsized locals, we do not know their size
131 /// before that.
132 Uninitialized,
133 /// A normal, live local.
134 /// Mostly for convenience, we re-use the `Operand` type here.
135 /// This is an optimization over just always having a pointer here;
136 /// we can thus avoid doing an allocation when the local just stores
137 /// immediate values *and* never has its address taken.
138 Live(Operand<Tag, Id>),
139 }
140
141 impl<'tcx, Tag: Copy + 'static> LocalState<'tcx, Tag> {
142 pub fn access(&self) -> InterpResult<'tcx, Operand<Tag>> {
143 match self.value {
144 LocalValue::Dead => throw_unsup!(DeadLocal),
145 LocalValue::Uninitialized =>
146 bug!("The type checker should prevent reading from a never-written local"),
147 LocalValue::Live(val) => Ok(val),
148 }
149 }
150
151 /// Overwrite the local. If the local can be overwritten in place, return a reference
152 /// to do so; otherwise return the `MemPlace` to consult instead.
153 pub fn access_mut(
154 &mut self,
155 ) -> InterpResult<'tcx, Result<&mut LocalValue<Tag>, MemPlace<Tag>>> {
156 match self.value {
157 LocalValue::Dead => throw_unsup!(DeadLocal),
158 LocalValue::Live(Operand::Indirect(mplace)) => Ok(Err(mplace)),
159 ref mut local @ LocalValue::Live(Operand::Immediate(_)) |
160 ref mut local @ LocalValue::Uninitialized => {
161 Ok(Ok(local))
162 }
163 }
164 }
165 }
166
167 impl<'mir, 'tcx, Tag, Extra> Frame<'mir, 'tcx, Tag, Extra> {
168 /// Return the `SourceInfo` of the current instruction.
169 pub fn current_source_info(&self) -> Option<mir::SourceInfo> {
170 self.block.map(|block| {
171 let block = &self.body.basic_blocks()[block];
172 if self.stmt < block.statements.len() {
173 block.statements[self.stmt].source_info
174 } else {
175 block.terminator().source_info
176 }
177 })
178 }
179 }
180
181 impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> HasDataLayout for InterpCx<'mir, 'tcx, M> {
182 #[inline]
183 fn data_layout(&self) -> &layout::TargetDataLayout {
184 &self.tcx.data_layout
185 }
186 }
187
188 impl<'mir, 'tcx, M> layout::HasTyCtxt<'tcx> for InterpCx<'mir, 'tcx, M>
189 where
190 M: Machine<'mir, 'tcx>,
191 {
192 #[inline]
193 fn tcx(&self) -> TyCtxt<'tcx> {
194 *self.tcx
195 }
196 }
197
198 impl<'mir, 'tcx, M> layout::HasParamEnv<'tcx> for InterpCx<'mir, 'tcx, M>
199 where
200 M: Machine<'mir, 'tcx>,
201 {
202 fn param_env(&self) -> ty::ParamEnv<'tcx> {
203 self.param_env
204 }
205 }
206
207 impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> LayoutOf for InterpCx<'mir, 'tcx, M> {
208 type Ty = Ty<'tcx>;
209 type TyLayout = InterpResult<'tcx, TyLayout<'tcx>>;
210
211 #[inline]
212 fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
213 self.tcx
214 .layout_of(self.param_env.and(ty))
215 .map_err(|layout| err_inval!(Layout(layout)).into())
216 }
217 }
218
219 impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
220 pub fn new(
221 tcx: TyCtxtAt<'tcx>,
222 param_env: ty::ParamEnv<'tcx>,
223 machine: M,
224 memory_extra: M::MemoryExtra,
225 ) -> Self {
226 InterpCx {
227 machine,
228 tcx,
229 param_env,
230 memory: Memory::new(tcx, memory_extra),
231 stack: Vec::new(),
232 vtables: FxHashMap::default(),
233 }
234 }
235
236 #[inline(always)]
237 pub fn force_ptr(
238 &self,
239 scalar: Scalar<M::PointerTag>,
240 ) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
241 self.memory.force_ptr(scalar)
242 }
243
244 #[inline(always)]
245 pub fn force_bits(
246 &self,
247 scalar: Scalar<M::PointerTag>,
248 size: Size
249 ) -> InterpResult<'tcx, u128> {
250 self.memory.force_bits(scalar, size)
251 }
252
253 /// Call this to turn untagged "global" pointers (obtained via `tcx`) into
254 /// the *canonical* machine pointer to the allocation. Must never be used
255 /// for any other pointers!
256 ///
257 /// This represents a *direct* access to that memory, as opposed to access
258 /// through a pointer that was created by the program.
259 #[inline(always)]
260 pub fn tag_static_base_pointer(&self, ptr: Pointer) -> Pointer<M::PointerTag> {
261 self.memory.tag_static_base_pointer(ptr)
262 }
263
264 #[inline(always)]
265 pub fn stack(&self) -> &[Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>] {
266 &self.stack
267 }
268
269 #[inline(always)]
270 pub fn cur_frame(&self) -> usize {
271 assert!(self.stack.len() > 0);
272 self.stack.len() - 1
273 }
274
275 #[inline(always)]
276 pub fn frame(&self) -> &Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra> {
277 self.stack.last().expect("no call frames exist")
278 }
279
280 #[inline(always)]
281 pub fn frame_mut(&mut self) -> &mut Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra> {
282 self.stack.last_mut().expect("no call frames exist")
283 }
284
285 #[inline(always)]
286 pub(super) fn body(&self) -> &'mir mir::Body<'tcx> {
287 self.frame().body
288 }
289
290 #[inline(always)]
291 pub fn sign_extend(&self, value: u128, ty: TyLayout<'_>) -> u128 {
292 assert!(ty.abi.is_signed());
293 sign_extend(value, ty.size)
294 }
295
296 #[inline(always)]
297 pub fn truncate(&self, value: u128, ty: TyLayout<'_>) -> u128 {
298 truncate(value, ty.size)
299 }
300
301 #[inline]
302 pub fn type_is_sized(&self, ty: Ty<'tcx>) -> bool {
303 ty.is_sized(self.tcx, self.param_env)
304 }
305
306 #[inline]
307 pub fn type_is_freeze(&self, ty: Ty<'tcx>) -> bool {
308 ty.is_freeze(*self.tcx, self.param_env, DUMMY_SP)
309 }
310
311 pub fn load_mir(
312 &self,
313 instance: ty::InstanceDef<'tcx>,
314 promoted: Option<mir::Promoted>,
315 ) -> InterpResult<'tcx, mir::ReadOnlyBodyAndCache<'tcx, 'tcx>> {
316 // do not continue if typeck errors occurred (can only occur in local crate)
317 let did = instance.def_id();
318 if did.is_local()
319 && self.tcx.has_typeck_tables(did)
320 && self.tcx.typeck_tables_of(did).tainted_by_errors
321 {
322 throw_inval!(TypeckError)
323 }
324 trace!("load mir(instance={:?}, promoted={:?})", instance, promoted);
325 if let Some(promoted) = promoted {
326 return Ok(self.tcx.promoted_mir(did)[promoted].unwrap_read_only());
327 }
328 match instance {
329 ty::InstanceDef::Item(def_id) => if self.tcx.is_mir_available(did) {
330 Ok(self.tcx.optimized_mir(did).unwrap_read_only())
331 } else {
332 throw_unsup!(NoMirFor(self.tcx.def_path_str(def_id)))
333 },
334 _ => Ok(self.tcx.instance_mir(instance)),
335 }
336 }
337
338 /// Call this on things you got out of the MIR (so it is as generic as the current
339 /// stack frame), to bring it into the proper environment for this interpreter.
340 pub(super) fn subst_from_frame_and_normalize_erasing_regions<T: TypeFoldable<'tcx>>(
341 &self,
342 value: T,
343 ) -> T {
344 self.tcx.subst_and_normalize_erasing_regions(
345 self.frame().instance.substs,
346 self.param_env,
347 &value,
348 )
349 }
350
351 /// The `substs` are assumed to already be in our interpreter "universe" (param_env).
352 pub(super) fn resolve(
353 &self,
354 def_id: DefId,
355 substs: SubstsRef<'tcx>
356 ) -> InterpResult<'tcx, ty::Instance<'tcx>> {
357 trace!("resolve: {:?}, {:#?}", def_id, substs);
358 trace!("param_env: {:#?}", self.param_env);
359 trace!("substs: {:#?}", substs);
360 ty::Instance::resolve(
361 *self.tcx,
362 self.param_env,
363 def_id,
364 substs,
365 ).ok_or_else(|| err_inval!(TooGeneric).into())
366 }
367
368 pub fn layout_of_local(
369 &self,
370 frame: &Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>,
371 local: mir::Local,
372 layout: Option<TyLayout<'tcx>>,
373 ) -> InterpResult<'tcx, TyLayout<'tcx>> {
374 // `const_prop` runs into this with an invalid (empty) frame, so we
375 // have to support that case (mostly by skipping all caching).
376 match frame.locals.get(local).and_then(|state| state.layout.get()) {
377 None => {
378 let layout = crate::interpret::operand::from_known_layout(layout, || {
379 let local_ty = frame.body.local_decls[local].ty;
380 let local_ty = self.tcx.subst_and_normalize_erasing_regions(
381 frame.instance.substs,
382 self.param_env,
383 &local_ty,
384 );
385 self.layout_of(local_ty)
386 })?;
387 if let Some(state) = frame.locals.get(local) {
388 // Layouts of locals are requested a lot, so we cache them.
389 state.layout.set(Some(layout));
390 }
391 Ok(layout)
392 }
393 Some(layout) => Ok(layout),
394 }
395 }
396
397 /// Returns the actual dynamic size and alignment of the place at the given type.
398 /// Only the "meta" (metadata) part of the place matters.
399 /// This can fail to provide an answer for extern types.
400 pub(super) fn size_and_align_of(
401 &self,
402 metadata: Option<Scalar<M::PointerTag>>,
403 layout: TyLayout<'tcx>,
404 ) -> InterpResult<'tcx, Option<(Size, Align)>> {
405 if !layout.is_unsized() {
406 return Ok(Some((layout.size, layout.align.abi)));
407 }
408 match layout.ty.kind {
409 ty::Adt(..) | ty::Tuple(..) => {
410 // First get the size of all statically known fields.
411 // Don't use type_of::sizing_type_of because that expects t to be sized,
412 // and it also rounds up to alignment, which we want to avoid,
413 // as the unsized field's alignment could be smaller.
414 assert!(!layout.ty.is_simd());
415 trace!("DST layout: {:?}", layout);
416
417 let sized_size = layout.fields.offset(layout.fields.count() - 1);
418 let sized_align = layout.align.abi;
419 trace!(
420 "DST {} statically sized prefix size: {:?} align: {:?}",
421 layout.ty,
422 sized_size,
423 sized_align
424 );
425
426 // Recurse to get the size of the dynamically sized field (must be
427 // the last field). Can't have foreign types here, how would we
428 // adjust alignment and size for them?
429 let field = layout.field(self, layout.fields.count() - 1)?;
430 let (unsized_size, unsized_align) = match self.size_and_align_of(metadata, field)? {
431 Some(size_and_align) => size_and_align,
432 None => {
433 // A field with extern type. If this field is at offset 0, we behave
434 // like the underlying extern type.
435 // FIXME: Once we have made decisions for how to handle size and alignment
436 // of `extern type`, this should be adapted. It is just a temporary hack
437 // to get some code to work that probably ought to work.
438 if sized_size == Size::ZERO {
439 return Ok(None)
440 } else {
441 bug!("Fields cannot be extern types, unless they are at offset 0")
442 }
443 }
444 };
445
446 // FIXME (#26403, #27023): We should be adding padding
447 // to `sized_size` (to accommodate the `unsized_align`
448 // required of the unsized field that follows) before
449 // summing it with `sized_size`. (Note that since #26403
450 // is unfixed, we do not yet add the necessary padding
451 // here. But this is where the add would go.)
452
453 // Return the sum of sizes and max of aligns.
454 let size = sized_size + unsized_size;
455
456 // Choose max of two known alignments (combined value must
457 // be aligned according to more restrictive of the two).
458 let align = sized_align.max(unsized_align);
459
460 // Issue #27023: must add any necessary padding to `size`
461 // (to make it a multiple of `align`) before returning it.
462 let size = size.align_to(align);
463
464 // Check if this brought us over the size limit.
465 if size.bytes() >= self.tcx.data_layout().obj_size_bound() {
466 throw_ub_format!("wide pointer metadata contains invalid information: \
467 total size is bigger than largest supported object");
468 }
469 Ok(Some((size, align)))
470 }
471 ty::Dynamic(..) => {
472 let vtable = metadata.expect("dyn trait fat ptr must have vtable");
473 // Read size and align from vtable (already checks size).
474 Ok(Some(self.read_size_and_align_from_vtable(vtable)?))
475 }
476
477 ty::Slice(_) | ty::Str => {
478 let len = metadata.expect("slice fat ptr must have length").to_machine_usize(self)?;
479 let elem = layout.field(self, 0)?;
480
481 // Make sure the slice is not too big.
482 let size = elem.size.checked_mul(len, &*self.tcx)
483 .ok_or_else(|| err_ub_format!("invalid slice: \
484 total size is bigger than largest supported object"))?;
485 Ok(Some((size, elem.align.abi)))
486 }
487
488 ty::Foreign(_) => {
489 Ok(None)
490 }
491
492 _ => bug!("size_and_align_of::<{:?}> not supported", layout.ty),
493 }
494 }
495 #[inline]
496 pub fn size_and_align_of_mplace(
497 &self,
498 mplace: MPlaceTy<'tcx, M::PointerTag>
499 ) -> InterpResult<'tcx, Option<(Size, Align)>> {
500 self.size_and_align_of(mplace.meta, mplace.layout)
501 }
502
503 pub fn push_stack_frame(
504 &mut self,
505 instance: ty::Instance<'tcx>,
506 span: Span,
507 body: &'mir mir::Body<'tcx>,
508 return_place: Option<PlaceTy<'tcx, M::PointerTag>>,
509 return_to_block: StackPopCleanup,
510 ) -> InterpResult<'tcx> {
511 if self.stack.len() > 0 {
512 info!("PAUSING({}) {}", self.cur_frame(), self.frame().instance);
513 }
514 ::log_settings::settings().indentation += 1;
515
516 // first push a stack frame so we have access to the local substs
517 let extra = M::stack_push(self)?;
518 self.stack.push(Frame {
519 body,
520 block: Some(mir::START_BLOCK),
521 return_to_block,
522 return_place,
523 // empty local array, we fill it in below, after we are inside the stack frame and
524 // all methods actually know about the frame
525 locals: IndexVec::new(),
526 span,
527 instance,
528 stmt: 0,
529 extra,
530 });
531
532 // don't allocate at all for trivial constants
533 if body.local_decls.len() > 1 {
534 // Locals are initially uninitialized.
535 let dummy = LocalState {
536 value: LocalValue::Uninitialized,
537 layout: Cell::new(None),
538 };
539 let mut locals = IndexVec::from_elem(dummy, &body.local_decls);
540 // Return place is handled specially by the `eval_place` functions, and the
541 // entry in `locals` should never be used. Make it dead, to be sure.
542 locals[mir::RETURN_PLACE].value = LocalValue::Dead;
543 // Now mark those locals as dead that we do not want to initialize
544 match self.tcx.def_kind(instance.def_id()) {
545 // statics and constants don't have `Storage*` statements, no need to look for them
546 Some(DefKind::Static)
547 | Some(DefKind::Const)
548 | Some(DefKind::AssocConst) => {},
549 _ => {
550 trace!("push_stack_frame: {:?}: num_bbs: {}", span, body.basic_blocks().len());
551 for block in body.basic_blocks() {
552 for stmt in block.statements.iter() {
553 use rustc::mir::StatementKind::{StorageDead, StorageLive};
554 match stmt.kind {
555 StorageLive(local) |
556 StorageDead(local) => {
557 locals[local].value = LocalValue::Dead;
558 }
559 _ => {}
560 }
561 }
562 }
563 },
564 }
565 // done
566 self.frame_mut().locals = locals;
567 }
568
569 info!("ENTERING({}) {}", self.cur_frame(), self.frame().instance);
570
571 if self.stack.len() > *self.tcx.sess.recursion_limit.get() {
572 throw_exhaust!(StackFrameLimitReached)
573 } else {
574 Ok(())
575 }
576 }
577
578 /// Jump to the given block.
579 #[inline]
580 pub fn go_to_block(&mut self, target: mir::BasicBlock) {
581 let frame = self.frame_mut();
582 frame.block = Some(target);
583 frame.stmt = 0;
584 }
585
586 /// *Return* to the given `target` basic block.
587 /// Do *not* use for unwinding! Use `unwind_to_block` instead.
588 ///
589 /// If `target` is `None`, that indicates the function cannot return, so we raise UB.
590 pub fn return_to_block(&mut self, target: Option<mir::BasicBlock>) -> InterpResult<'tcx> {
591 if let Some(target) = target {
592 Ok(self.go_to_block(target))
593 } else {
594 throw_ub!(Unreachable)
595 }
596 }
597
598 /// *Unwind* to the given `target` basic block.
599 /// Do *not* use for returning! Use `return_to_block` instead.
600 ///
601 /// If `target` is `None`, that indicates the function does not need cleanup during
602 /// unwinding, and we will just keep propagating that upwards.
603 pub fn unwind_to_block(&mut self, target: Option<mir::BasicBlock>) {
604 let frame = self.frame_mut();
605 frame.block = target;
606 frame.stmt = 0;
607 }
608
609 /// Pops the current frame from the stack, deallocating the
610 /// memory for allocated locals.
611 ///
612 /// If `unwinding` is `false`, then we are performing a normal return
613 /// from a function. In this case, we jump back into the frame of the caller,
614 /// and continue execution as normal.
615 ///
616 /// If `unwinding` is `true`, then we are in the middle of a panic,
617 /// and need to unwind this frame. In this case, we jump to the
618 /// `cleanup` block for the function, which is responsible for running
619 /// `Drop` impls for any locals that have been initialized at this point.
620 /// The cleanup block ends with a special `Resume` terminator, which will
621 /// cause us to continue unwinding.
622 pub(super) fn pop_stack_frame(
623 &mut self,
624 unwinding: bool
625 ) -> InterpResult<'tcx> {
626 info!("LEAVING({}) {} (unwinding = {})",
627 self.cur_frame(), self.frame().instance, unwinding);
628
629 // Sanity check `unwinding`.
630 assert_eq!(
631 unwinding,
632 match self.frame().block {
633 None => true,
634 Some(block) => self.body().basic_blocks()[block].is_cleanup
635 }
636 );
637
638 ::log_settings::settings().indentation -= 1;
639 let frame = self.stack.pop().expect(
640 "tried to pop a stack frame, but there were none",
641 );
642 let stack_pop_info = M::stack_pop(self, frame.extra, unwinding)?;
643 if let (false, StackPopInfo::StopUnwinding) = (unwinding, stack_pop_info) {
644 bug!("Attempted to stop unwinding while there is no unwinding!");
645 }
646
647 // Now where do we jump next?
648
649 // Determine if we leave this function normally or via unwinding.
650 let cur_unwinding = if let StackPopInfo::StopUnwinding = stack_pop_info {
651 false
652 } else {
653 unwinding
654 };
655
656 // Usually we want to clean up (deallocate locals), but in a few rare cases we don't.
657 // In that case, we return early. We also avoid validation in that case,
658 // because this is CTFE and the final value will be thoroughly validated anyway.
659 let (cleanup, next_block) = match frame.return_to_block {
660 StackPopCleanup::Goto { ret, unwind } => {
661 (true, Some(if cur_unwinding { unwind } else { ret }))
662 },
663 StackPopCleanup::None { cleanup, .. } => (cleanup, None)
664 };
665
666 if !cleanup {
667 assert!(self.stack.is_empty(), "only the topmost frame should ever be leaked");
668 assert!(next_block.is_none(), "tried to skip cleanup when we have a next block!");
669 // Leak the locals, skip validation.
670 return Ok(());
671 }
672
673 // Cleanup: deallocate all locals that are backed by an allocation.
674 for local in frame.locals {
675 self.deallocate_local(local.value)?;
676 }
677
678
679 trace!("StackPopCleanup: {:?} StackPopInfo: {:?} cur_unwinding = {:?}",
680 frame.return_to_block, stack_pop_info, cur_unwinding);
681 if cur_unwinding {
682 // Follow the unwind edge.
683 let unwind = next_block.expect("Encounted StackPopCleanup::None when unwinding!");
684 self.unwind_to_block(unwind);
685 } else {
686 // Follow the normal return edge.
687 // Validate the return value. Do this after deallocating so that we catch dangling
688 // references.
689 if let Some(return_place) = frame.return_place {
690 if M::enforce_validity(self) {
691 // Data got changed, better make sure it matches the type!
692 // It is still possible that the return place held invalid data while
693 // the function is running, but that's okay because nobody could have
694 // accessed that same data from the "outside" to observe any broken
695 // invariant -- that is, unless a function somehow has a ptr to
696 // its return place... but the way MIR is currently generated, the
697 // return place is always a local and then this cannot happen.
698 self.validate_operand(
699 self.place_to_op(return_place)?,
700 vec![],
701 None,
702 )?;
703 }
704 } else {
705 // Uh, that shouldn't happen... the function did not intend to return
706 throw_ub!(Unreachable);
707 }
708
709 // Jump to new block -- *after* validation so that the spans make more sense.
710 if let Some(ret) = next_block {
711 self.return_to_block(ret)?;
712 }
713 }
714
715 if self.stack.len() > 0 {
716 info!("CONTINUING({}) {} (unwinding = {})",
717 self.cur_frame(), self.frame().instance, cur_unwinding);
718 }
719
720 Ok(())
721 }
722
723 /// Mark a storage as live, killing the previous content and returning it.
724 /// Remember to deallocate that!
725 pub fn storage_live(
726 &mut self,
727 local: mir::Local
728 ) -> InterpResult<'tcx, LocalValue<M::PointerTag>> {
729 assert!(local != mir::RETURN_PLACE, "Cannot make return place live");
730 trace!("{:?} is now live", local);
731
732 let local_val = LocalValue::Uninitialized;
733 // StorageLive *always* kills the value that's currently stored.
734 // However, we do not error if the variable already is live;
735 // see <https://github.com/rust-lang/rust/issues/42371>.
736 Ok(mem::replace(&mut self.frame_mut().locals[local].value, local_val))
737 }
738
739 /// Returns the old value of the local.
740 /// Remember to deallocate that!
741 pub fn storage_dead(&mut self, local: mir::Local) -> LocalValue<M::PointerTag> {
742 assert!(local != mir::RETURN_PLACE, "Cannot make return place dead");
743 trace!("{:?} is now dead", local);
744
745 mem::replace(&mut self.frame_mut().locals[local].value, LocalValue::Dead)
746 }
747
748 pub(super) fn deallocate_local(
749 &mut self,
750 local: LocalValue<M::PointerTag>,
751 ) -> InterpResult<'tcx> {
752 // FIXME: should we tell the user that there was a local which was never written to?
753 if let LocalValue::Live(Operand::Indirect(MemPlace { ptr, .. })) = local {
754 trace!("deallocating local");
755 let ptr = ptr.to_ptr()?;
756 if log_enabled!(::log::Level::Trace) {
757 self.memory.dump_alloc(ptr.alloc_id);
758 }
759 self.memory.deallocate_local(ptr)?;
760 };
761 Ok(())
762 }
763
764 pub fn const_eval_raw(
765 &self,
766 gid: GlobalId<'tcx>,
767 ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
768 // FIXME(oli-obk): make this check an assertion that it's not a static here
769 // FIXME(RalfJ, oli-obk): document that `Place::Static` can never be anything but a static
770 // and `ConstValue::Unevaluated` can never be a static
771 let param_env = if self.tcx.is_static(gid.instance.def_id()) {
772 ty::ParamEnv::reveal_all()
773 } else {
774 self.param_env
775 };
776 // We use `const_eval_raw` here, and get an unvalidated result. That is okay:
777 // Our result will later be validated anyway, and there seems no good reason
778 // to have to fail early here. This is also more consistent with
779 // `Memory::get_static_alloc` which has to use `const_eval_raw` to avoid cycles.
780 let val = self.tcx.const_eval_raw(param_env.and(gid))?;
781 self.raw_const_to_mplace(val)
782 }
783
784 pub fn dump_place(&self, place: Place<M::PointerTag>) {
785 // Debug output
786 if !log_enabled!(::log::Level::Trace) {
787 return;
788 }
789 match place {
790 Place::Local { frame, local } => {
791 let mut allocs = Vec::new();
792 let mut msg = format!("{:?}", local);
793 if frame != self.cur_frame() {
794 write!(msg, " ({} frames up)", self.cur_frame() - frame).unwrap();
795 }
796 write!(msg, ":").unwrap();
797
798 match self.stack[frame].locals[local].value {
799 LocalValue::Dead => write!(msg, " is dead").unwrap(),
800 LocalValue::Uninitialized => write!(msg, " is uninitialized").unwrap(),
801 LocalValue::Live(Operand::Indirect(mplace)) => {
802 match mplace.ptr {
803 Scalar::Ptr(ptr) => {
804 write!(msg, " by align({}){} ref:",
805 mplace.align.bytes(),
806 match mplace.meta {
807 Some(meta) => format!(" meta({:?})", meta),
808 None => String::new()
809 }
810 ).unwrap();
811 allocs.push(ptr.alloc_id);
812 }
813 ptr => write!(msg, " by integral ref: {:?}", ptr).unwrap(),
814 }
815 }
816 LocalValue::Live(Operand::Immediate(Immediate::Scalar(val))) => {
817 write!(msg, " {:?}", val).unwrap();
818 if let ScalarMaybeUndef::Scalar(Scalar::Ptr(ptr)) = val {
819 allocs.push(ptr.alloc_id);
820 }
821 }
822 LocalValue::Live(Operand::Immediate(Immediate::ScalarPair(val1, val2))) => {
823 write!(msg, " ({:?}, {:?})", val1, val2).unwrap();
824 if let ScalarMaybeUndef::Scalar(Scalar::Ptr(ptr)) = val1 {
825 allocs.push(ptr.alloc_id);
826 }
827 if let ScalarMaybeUndef::Scalar(Scalar::Ptr(ptr)) = val2 {
828 allocs.push(ptr.alloc_id);
829 }
830 }
831 }
832
833 trace!("{}", msg);
834 self.memory.dump_allocs(allocs);
835 }
836 Place::Ptr(mplace) => {
837 match mplace.ptr {
838 Scalar::Ptr(ptr) => {
839 trace!("by align({}) ref:", mplace.align.bytes());
840 self.memory.dump_alloc(ptr.alloc_id);
841 }
842 ptr => trace!(" integral by ref: {:?}", ptr),
843 }
844 }
845 }
846 }
847
848 pub fn generate_stacktrace(&self, explicit_span: Option<Span>) -> Vec<FrameInfo<'tcx>> {
849 let mut last_span = None;
850 let mut frames = Vec::new();
851 for frame in self.stack().iter().rev() {
852 // make sure we don't emit frames that are duplicates of the previous
853 if explicit_span == Some(frame.span) {
854 last_span = Some(frame.span);
855 continue;
856 }
857 if let Some(last) = last_span {
858 if last == frame.span {
859 continue;
860 }
861 } else {
862 last_span = Some(frame.span);
863 }
864
865 let lint_root = frame.current_source_info().and_then(|source_info| {
866 match &frame.body.source_scopes[source_info.scope].local_data {
867 mir::ClearCrossCrate::Set(data) => Some(data.lint_root),
868 mir::ClearCrossCrate::Clear => None,
869 }
870 });
871
872 frames.push(FrameInfo { call_site: frame.span, instance: frame.instance, lint_root });
873 }
874 trace!("generate stacktrace: {:#?}, {:?}", frames, explicit_span);
875 frames
876 }
877 }
878
879 impl<'ctx, 'mir, 'tcx, Tag, Extra> HashStable<StableHashingContext<'ctx>>
880 for Frame<'mir, 'tcx, Tag, Extra>
881 where Extra: HashStable<StableHashingContext<'ctx>>,
882 Tag: HashStable<StableHashingContext<'ctx>>
883 {
884 fn hash_stable(&self, hcx: &mut StableHashingContext<'ctx>, hasher: &mut StableHasher) {
885 self.body.hash_stable(hcx, hasher);
886 self.instance.hash_stable(hcx, hasher);
887 self.span.hash_stable(hcx, hasher);
888 self.return_to_block.hash_stable(hcx, hasher);
889 self.return_place.as_ref().map(|r| &**r).hash_stable(hcx, hasher);
890 self.locals.hash_stable(hcx, hasher);
891 self.block.hash_stable(hcx, hasher);
892 self.stmt.hash_stable(hcx, hasher);
893 self.extra.hash_stable(hcx, hasher);
894 }
895 }