]> git.proxmox.com Git - rustc.git/blob - src/librustc_mir/interpret/eval_context.rs
New upstream version 1.44.1+dfsg1
[rustc.git] / src / librustc_mir / interpret / eval_context.rs
1 use std::cell::Cell;
2 use std::fmt::Write;
3 use std::mem;
4
5 use rustc_data_structures::fx::FxHashMap;
6 use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
7 use rustc_hir::def::DefKind;
8 use rustc_hir::def_id::DefId;
9 use rustc_index::vec::IndexVec;
10 use rustc_macros::HashStable;
11 use rustc_middle::ich::StableHashingContext;
12 use rustc_middle::mir;
13 use rustc_middle::mir::interpret::{
14 sign_extend, truncate, AllocId, FrameInfo, GlobalId, InterpResult, Pointer, Scalar,
15 };
16 use rustc_middle::ty::layout::{self, TyAndLayout};
17 use rustc_middle::ty::{
18 self, fold::BottomUpFolder, query::TyCtxtAt, subst::SubstsRef, Ty, TyCtxt, TypeFoldable,
19 };
20 use rustc_span::{source_map::DUMMY_SP, Span};
21 use rustc_target::abi::{Align, HasDataLayout, LayoutOf, Size, TargetDataLayout};
22
23 use super::{
24 Immediate, MPlaceTy, Machine, MemPlace, MemPlaceMeta, Memory, OpTy, Operand, Place, PlaceTy,
25 ScalarMaybeUndef, StackPopJump,
26 };
27 use crate::util::storage::AlwaysLiveLocals;
28
29 pub struct InterpCx<'mir, 'tcx, M: Machine<'mir, 'tcx>> {
30 /// Stores the `Machine` instance.
31 ///
32 /// Note: the stack is provided by the machine.
33 pub machine: M,
34
35 /// The results of the type checker, from rustc.
36 pub tcx: TyCtxtAt<'tcx>,
37
38 /// Bounds in scope for polymorphic evaluations.
39 pub(crate) param_env: ty::ParamEnv<'tcx>,
40
41 /// The virtual memory system.
42 pub memory: Memory<'mir, 'tcx, M>,
43
44 /// A cache for deduplicating vtables
45 pub(super) vtables:
46 FxHashMap<(Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>), Pointer<M::PointerTag>>,
47 }
48
49 /// A stack frame.
50 #[derive(Clone)]
51 pub struct Frame<'mir, 'tcx, Tag = (), Extra = ()> {
52 ////////////////////////////////////////////////////////////////////////////////
53 // Function and callsite information
54 ////////////////////////////////////////////////////////////////////////////////
55 /// The MIR for the function called on this frame.
56 pub body: &'mir mir::Body<'tcx>,
57
58 /// The def_id and substs of the current function.
59 pub instance: ty::Instance<'tcx>,
60
61 /// Extra data for the machine.
62 pub extra: Extra,
63
64 ////////////////////////////////////////////////////////////////////////////////
65 // Return place and locals
66 ////////////////////////////////////////////////////////////////////////////////
67 /// Work to perform when returning from this function.
68 pub return_to_block: StackPopCleanup,
69
70 /// The location where the result of the current stack frame should be written to,
71 /// and its layout in the caller.
72 pub return_place: Option<PlaceTy<'tcx, Tag>>,
73
74 /// The list of locals for this stack frame, stored in order as
75 /// `[return_ptr, arguments..., variables..., temporaries...]`.
76 /// The locals are stored as `Option<Value>`s.
77 /// `None` represents a local that is currently dead, while a live local
78 /// can either directly contain `Scalar` or refer to some part of an `Allocation`.
79 pub locals: IndexVec<mir::Local, LocalState<'tcx, Tag>>,
80
81 ////////////////////////////////////////////////////////////////////////////////
82 // Current position within the function
83 ////////////////////////////////////////////////////////////////////////////////
84 /// The block that is currently executed (or will be executed after the above call stacks
85 /// return).
86 /// If this is `None`, we are unwinding and this function doesn't need any clean-up.
87 /// Just continue the same as with `Resume`.
88 pub block: Option<mir::BasicBlock>,
89
90 /// The index of the currently evaluated statement.
91 pub stmt: usize,
92 }
93
94 #[derive(Clone, Eq, PartialEq, Debug, HashStable)] // Miri debug-prints these
95 pub enum StackPopCleanup {
96 /// Jump to the next block in the caller, or cause UB if None (that's a function
97 /// that may never return). Also store layout of return place so
98 /// we can validate it at that layout.
99 /// `ret` stores the block we jump to on a normal return, while `unwind`
100 /// stores the block used for cleanup during unwinding.
101 Goto { ret: Option<mir::BasicBlock>, unwind: Option<mir::BasicBlock> },
102 /// Just do nothing: Used by Main and for the `box_alloc` hook in miri.
103 /// `cleanup` says whether locals are deallocated. Static computation
104 /// wants them leaked to intern what they need (and just throw away
105 /// the entire `ecx` when it is done).
106 None { cleanup: bool },
107 }
108
109 /// State of a local variable including a memoized layout
110 #[derive(Clone, PartialEq, Eq, HashStable)]
111 pub struct LocalState<'tcx, Tag = (), Id = AllocId> {
112 pub value: LocalValue<Tag, Id>,
113 /// Don't modify if `Some`, this is only used to prevent computing the layout twice
114 #[stable_hasher(ignore)]
115 pub layout: Cell<Option<TyAndLayout<'tcx>>>,
116 }
117
118 /// Current value of a local variable
119 #[derive(Copy, Clone, PartialEq, Eq, Debug, HashStable)] // Miri debug-prints these
120 pub enum LocalValue<Tag = (), Id = AllocId> {
121 /// This local is not currently alive, and cannot be used at all.
122 Dead,
123 /// This local is alive but not yet initialized. It can be written to
124 /// but not read from or its address taken. Locals get initialized on
125 /// first write because for unsized locals, we do not know their size
126 /// before that.
127 Uninitialized,
128 /// A normal, live local.
129 /// Mostly for convenience, we re-use the `Operand` type here.
130 /// This is an optimization over just always having a pointer here;
131 /// we can thus avoid doing an allocation when the local just stores
132 /// immediate values *and* never has its address taken.
133 Live(Operand<Tag, Id>),
134 }
135
136 impl<'tcx, Tag: Copy + 'static> LocalState<'tcx, Tag> {
137 pub fn access(&self) -> InterpResult<'tcx, Operand<Tag>> {
138 match self.value {
139 LocalValue::Dead => throw_ub!(DeadLocal),
140 LocalValue::Uninitialized => {
141 bug!("The type checker should prevent reading from a never-written local")
142 }
143 LocalValue::Live(val) => Ok(val),
144 }
145 }
146
147 /// Overwrite the local. If the local can be overwritten in place, return a reference
148 /// to do so; otherwise return the `MemPlace` to consult instead.
149 pub fn access_mut(
150 &mut self,
151 ) -> InterpResult<'tcx, Result<&mut LocalValue<Tag>, MemPlace<Tag>>> {
152 match self.value {
153 LocalValue::Dead => throw_ub!(DeadLocal),
154 LocalValue::Live(Operand::Indirect(mplace)) => Ok(Err(mplace)),
155 ref mut
156 local @ (LocalValue::Live(Operand::Immediate(_)) | LocalValue::Uninitialized) => {
157 Ok(Ok(local))
158 }
159 }
160 }
161 }
162
163 impl<'mir, 'tcx, Tag> Frame<'mir, 'tcx, Tag> {
164 pub fn with_extra<Extra>(self, extra: Extra) -> Frame<'mir, 'tcx, Tag, Extra> {
165 Frame {
166 body: self.body,
167 instance: self.instance,
168 return_to_block: self.return_to_block,
169 return_place: self.return_place,
170 locals: self.locals,
171 block: self.block,
172 stmt: self.stmt,
173 extra,
174 }
175 }
176 }
177
178 impl<'mir, 'tcx, Tag, Extra> Frame<'mir, 'tcx, Tag, Extra> {
179 /// Return the `SourceInfo` of the current instruction.
180 pub fn current_source_info(&self) -> Option<mir::SourceInfo> {
181 self.block.map(|block| {
182 let block = &self.body.basic_blocks()[block];
183 if self.stmt < block.statements.len() {
184 block.statements[self.stmt].source_info
185 } else {
186 block.terminator().source_info
187 }
188 })
189 }
190 }
191
192 impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> HasDataLayout for InterpCx<'mir, 'tcx, M> {
193 #[inline]
194 fn data_layout(&self) -> &TargetDataLayout {
195 &self.tcx.data_layout
196 }
197 }
198
199 impl<'mir, 'tcx, M> layout::HasTyCtxt<'tcx> for InterpCx<'mir, 'tcx, M>
200 where
201 M: Machine<'mir, 'tcx>,
202 {
203 #[inline]
204 fn tcx(&self) -> TyCtxt<'tcx> {
205 *self.tcx
206 }
207 }
208
209 impl<'mir, 'tcx, M> layout::HasParamEnv<'tcx> for InterpCx<'mir, 'tcx, M>
210 where
211 M: Machine<'mir, 'tcx>,
212 {
213 fn param_env(&self) -> ty::ParamEnv<'tcx> {
214 self.param_env
215 }
216 }
217
218 impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> LayoutOf for InterpCx<'mir, 'tcx, M> {
219 type Ty = Ty<'tcx>;
220 type TyAndLayout = InterpResult<'tcx, TyAndLayout<'tcx>>;
221
222 #[inline]
223 fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
224 self.tcx
225 .layout_of(self.param_env.and(ty))
226 .map_err(|layout| err_inval!(Layout(layout)).into())
227 }
228 }
229
230 /// Test if it is valid for a MIR assignment to assign `src`-typed place to `dest`-typed value.
231 /// This test should be symmetric, as it is primarily about layout compatibility.
232 pub(super) fn mir_assign_valid_types<'tcx>(
233 tcx: TyCtxt<'tcx>,
234 src: TyAndLayout<'tcx>,
235 dest: TyAndLayout<'tcx>,
236 ) -> bool {
237 if src.ty == dest.ty {
238 // Equal types, all is good.
239 return true;
240 }
241 if src.layout != dest.layout {
242 // Layout differs, definitely not equal.
243 // We do this here because Miri would *do the wrong thing* if we allowed layout-changing
244 // assignments.
245 return false;
246 }
247
248 // Type-changing assignments can happen for (at least) two reasons:
249 // 1. `&mut T` -> `&T` gets optimized from a reborrow to a mere assignment.
250 // 2. Subtyping is used. While all normal lifetimes are erased, higher-ranked types
251 // with their late-bound lifetimes are still around and can lead to type differences.
252 // Normalize both of them away.
253 let normalize = |ty: Ty<'tcx>| {
254 ty.fold_with(&mut BottomUpFolder {
255 tcx,
256 // Normalize all references to immutable.
257 ty_op: |ty| match ty.kind {
258 ty::Ref(_, pointee, _) => tcx.mk_imm_ref(tcx.lifetimes.re_erased, pointee),
259 _ => ty,
260 },
261 // We just erase all late-bound lifetimes, but this is not fully correct (FIXME):
262 // lifetimes in invariant positions could matter (e.g. through associated types).
263 // We rely on the fact that layout was confirmed to be equal above.
264 lt_op: |_| tcx.lifetimes.re_erased,
265 // Leave consts unchanged.
266 ct_op: |ct| ct,
267 })
268 };
269 normalize(src.ty) == normalize(dest.ty)
270 }
271
272 /// Use the already known layout if given (but sanity check in debug mode),
273 /// or compute the layout.
274 #[cfg_attr(not(debug_assertions), inline(always))]
275 pub(super) fn from_known_layout<'tcx>(
276 tcx: TyCtxtAt<'tcx>,
277 known_layout: Option<TyAndLayout<'tcx>>,
278 compute: impl FnOnce() -> InterpResult<'tcx, TyAndLayout<'tcx>>,
279 ) -> InterpResult<'tcx, TyAndLayout<'tcx>> {
280 match known_layout {
281 None => compute(),
282 Some(known_layout) => {
283 if cfg!(debug_assertions) {
284 let check_layout = compute()?;
285 if !mir_assign_valid_types(tcx.tcx, check_layout, known_layout) {
286 span_bug!(
287 tcx.span,
288 "expected type differs from actual type.\nexpected: {:?}\nactual: {:?}",
289 known_layout.ty,
290 check_layout.ty,
291 );
292 }
293 }
294 Ok(known_layout)
295 }
296 }
297 }
298
299 impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
300 pub fn new(
301 tcx: TyCtxtAt<'tcx>,
302 param_env: ty::ParamEnv<'tcx>,
303 machine: M,
304 memory_extra: M::MemoryExtra,
305 ) -> Self {
306 InterpCx {
307 machine,
308 tcx,
309 param_env,
310 memory: Memory::new(tcx, memory_extra),
311 vtables: FxHashMap::default(),
312 }
313 }
314
315 #[inline(always)]
316 pub fn set_span(&mut self, span: Span) {
317 self.tcx.span = span;
318 self.memory.tcx.span = span;
319 }
320
321 #[inline(always)]
322 pub fn force_ptr(
323 &self,
324 scalar: Scalar<M::PointerTag>,
325 ) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
326 self.memory.force_ptr(scalar)
327 }
328
329 #[inline(always)]
330 pub fn force_bits(
331 &self,
332 scalar: Scalar<M::PointerTag>,
333 size: Size,
334 ) -> InterpResult<'tcx, u128> {
335 self.memory.force_bits(scalar, size)
336 }
337
338 /// Call this to turn untagged "global" pointers (obtained via `tcx`) into
339 /// the *canonical* machine pointer to the allocation. Must never be used
340 /// for any other pointers!
341 ///
342 /// This represents a *direct* access to that memory, as opposed to access
343 /// through a pointer that was created by the program.
344 #[inline(always)]
345 pub fn tag_global_base_pointer(&self, ptr: Pointer) -> Pointer<M::PointerTag> {
346 self.memory.tag_global_base_pointer(ptr)
347 }
348
349 #[inline(always)]
350 pub(crate) fn stack(&self) -> &[Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>] {
351 M::stack(self)
352 }
353
354 #[inline(always)]
355 pub(crate) fn stack_mut(
356 &mut self,
357 ) -> &mut Vec<Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>> {
358 M::stack_mut(self)
359 }
360
361 #[inline(always)]
362 pub fn frame_idx(&self) -> usize {
363 let stack = self.stack();
364 assert!(!stack.is_empty());
365 stack.len() - 1
366 }
367
368 #[inline(always)]
369 pub fn frame(&self) -> &Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra> {
370 self.stack().last().expect("no call frames exist")
371 }
372
373 #[inline(always)]
374 pub fn frame_mut(&mut self) -> &mut Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra> {
375 self.stack_mut().last_mut().expect("no call frames exist")
376 }
377
378 #[inline(always)]
379 pub(super) fn body(&self) -> &'mir mir::Body<'tcx> {
380 self.frame().body
381 }
382
383 #[inline(always)]
384 pub fn sign_extend(&self, value: u128, ty: TyAndLayout<'_>) -> u128 {
385 assert!(ty.abi.is_signed());
386 sign_extend(value, ty.size)
387 }
388
389 #[inline(always)]
390 pub fn truncate(&self, value: u128, ty: TyAndLayout<'_>) -> u128 {
391 truncate(value, ty.size)
392 }
393
394 #[inline]
395 pub fn type_is_sized(&self, ty: Ty<'tcx>) -> bool {
396 ty.is_sized(self.tcx, self.param_env)
397 }
398
399 #[inline]
400 pub fn type_is_freeze(&self, ty: Ty<'tcx>) -> bool {
401 ty.is_freeze(*self.tcx, self.param_env, DUMMY_SP)
402 }
403
404 pub fn load_mir(
405 &self,
406 instance: ty::InstanceDef<'tcx>,
407 promoted: Option<mir::Promoted>,
408 ) -> InterpResult<'tcx, mir::ReadOnlyBodyAndCache<'tcx, 'tcx>> {
409 // do not continue if typeck errors occurred (can only occur in local crate)
410 let did = instance.def_id();
411 if did.is_local() && self.tcx.has_typeck_tables(did) {
412 if let Some(error_reported) = self.tcx.typeck_tables_of(did).tainted_by_errors {
413 throw_inval!(TypeckError(error_reported))
414 }
415 }
416 trace!("load mir(instance={:?}, promoted={:?})", instance, promoted);
417 if let Some(promoted) = promoted {
418 return Ok(self.tcx.promoted_mir(did)[promoted].unwrap_read_only());
419 }
420 match instance {
421 ty::InstanceDef::Item(def_id) => {
422 if self.tcx.is_mir_available(did) {
423 Ok(self.tcx.optimized_mir(did).unwrap_read_only())
424 } else {
425 throw_unsup!(NoMirFor(def_id))
426 }
427 }
428 _ => Ok(self.tcx.instance_mir(instance)),
429 }
430 }
431
432 /// Call this on things you got out of the MIR (so it is as generic as the current
433 /// stack frame), to bring it into the proper environment for this interpreter.
434 pub(super) fn subst_from_current_frame_and_normalize_erasing_regions<T: TypeFoldable<'tcx>>(
435 &self,
436 value: T,
437 ) -> T {
438 self.subst_from_frame_and_normalize_erasing_regions(self.frame(), value)
439 }
440
441 /// Call this on things you got out of the MIR (so it is as generic as the provided
442 /// stack frame), to bring it into the proper environment for this interpreter.
443 pub(super) fn subst_from_frame_and_normalize_erasing_regions<T: TypeFoldable<'tcx>>(
444 &self,
445 frame: &Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>,
446 value: T,
447 ) -> T {
448 if let Some(substs) = frame.instance.substs_for_mir_body() {
449 self.tcx.subst_and_normalize_erasing_regions(substs, self.param_env, &value)
450 } else {
451 self.tcx.normalize_erasing_regions(self.param_env, value)
452 }
453 }
454
455 /// The `substs` are assumed to already be in our interpreter "universe" (param_env).
456 pub(super) fn resolve(
457 &self,
458 def_id: DefId,
459 substs: SubstsRef<'tcx>,
460 ) -> InterpResult<'tcx, ty::Instance<'tcx>> {
461 trace!("resolve: {:?}, {:#?}", def_id, substs);
462 trace!("param_env: {:#?}", self.param_env);
463 trace!("substs: {:#?}", substs);
464 ty::Instance::resolve(*self.tcx, self.param_env, def_id, substs)
465 .ok_or_else(|| err_inval!(TooGeneric).into())
466 }
467
468 pub fn layout_of_local(
469 &self,
470 frame: &Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>,
471 local: mir::Local,
472 layout: Option<TyAndLayout<'tcx>>,
473 ) -> InterpResult<'tcx, TyAndLayout<'tcx>> {
474 // `const_prop` runs into this with an invalid (empty) frame, so we
475 // have to support that case (mostly by skipping all caching).
476 match frame.locals.get(local).and_then(|state| state.layout.get()) {
477 None => {
478 let layout = from_known_layout(self.tcx, layout, || {
479 let local_ty = frame.body.local_decls[local].ty;
480 let local_ty =
481 self.subst_from_frame_and_normalize_erasing_regions(frame, local_ty);
482 self.layout_of(local_ty)
483 })?;
484 if let Some(state) = frame.locals.get(local) {
485 // Layouts of locals are requested a lot, so we cache them.
486 state.layout.set(Some(layout));
487 }
488 Ok(layout)
489 }
490 Some(layout) => Ok(layout),
491 }
492 }
493
494 /// Returns the actual dynamic size and alignment of the place at the given type.
495 /// Only the "meta" (metadata) part of the place matters.
496 /// This can fail to provide an answer for extern types.
497 pub(super) fn size_and_align_of(
498 &self,
499 metadata: MemPlaceMeta<M::PointerTag>,
500 layout: TyAndLayout<'tcx>,
501 ) -> InterpResult<'tcx, Option<(Size, Align)>> {
502 if !layout.is_unsized() {
503 return Ok(Some((layout.size, layout.align.abi)));
504 }
505 match layout.ty.kind {
506 ty::Adt(..) | ty::Tuple(..) => {
507 // First get the size of all statically known fields.
508 // Don't use type_of::sizing_type_of because that expects t to be sized,
509 // and it also rounds up to alignment, which we want to avoid,
510 // as the unsized field's alignment could be smaller.
511 assert!(!layout.ty.is_simd());
512 assert!(layout.fields.count() > 0);
513 trace!("DST layout: {:?}", layout);
514
515 let sized_size = layout.fields.offset(layout.fields.count() - 1);
516 let sized_align = layout.align.abi;
517 trace!(
518 "DST {} statically sized prefix size: {:?} align: {:?}",
519 layout.ty,
520 sized_size,
521 sized_align
522 );
523
524 // Recurse to get the size of the dynamically sized field (must be
525 // the last field). Can't have foreign types here, how would we
526 // adjust alignment and size for them?
527 let field = layout.field(self, layout.fields.count() - 1)?;
528 let (unsized_size, unsized_align) = match self.size_and_align_of(metadata, field)? {
529 Some(size_and_align) => size_and_align,
530 None => {
531 // A field with extern type. If this field is at offset 0, we behave
532 // like the underlying extern type.
533 // FIXME: Once we have made decisions for how to handle size and alignment
534 // of `extern type`, this should be adapted. It is just a temporary hack
535 // to get some code to work that probably ought to work.
536 if sized_size == Size::ZERO {
537 return Ok(None);
538 } else {
539 bug!("Fields cannot be extern types, unless they are at offset 0")
540 }
541 }
542 };
543
544 // FIXME (#26403, #27023): We should be adding padding
545 // to `sized_size` (to accommodate the `unsized_align`
546 // required of the unsized field that follows) before
547 // summing it with `sized_size`. (Note that since #26403
548 // is unfixed, we do not yet add the necessary padding
549 // here. But this is where the add would go.)
550
551 // Return the sum of sizes and max of aligns.
552 let size = sized_size + unsized_size; // `Size` addition
553
554 // Choose max of two known alignments (combined value must
555 // be aligned according to more restrictive of the two).
556 let align = sized_align.max(unsized_align);
557
558 // Issue #27023: must add any necessary padding to `size`
559 // (to make it a multiple of `align`) before returning it.
560 let size = size.align_to(align);
561
562 // Check if this brought us over the size limit.
563 if size.bytes() >= self.tcx.data_layout().obj_size_bound() {
564 throw_ub!(InvalidMeta("total size is bigger than largest supported object"));
565 }
566 Ok(Some((size, align)))
567 }
568 ty::Dynamic(..) => {
569 let vtable = metadata.unwrap_meta();
570 // Read size and align from vtable (already checks size).
571 Ok(Some(self.read_size_and_align_from_vtable(vtable)?))
572 }
573
574 ty::Slice(_) | ty::Str => {
575 let len = metadata.unwrap_meta().to_machine_usize(self)?;
576 let elem = layout.field(self, 0)?;
577
578 // Make sure the slice is not too big.
579 let size = elem.size.checked_mul(len, &*self.tcx).ok_or_else(|| {
580 err_ub!(InvalidMeta("slice is bigger than largest supported object"))
581 })?;
582 Ok(Some((size, elem.align.abi)))
583 }
584
585 ty::Foreign(_) => Ok(None),
586
587 _ => bug!("size_and_align_of::<{:?}> not supported", layout.ty),
588 }
589 }
590 #[inline]
591 pub fn size_and_align_of_mplace(
592 &self,
593 mplace: MPlaceTy<'tcx, M::PointerTag>,
594 ) -> InterpResult<'tcx, Option<(Size, Align)>> {
595 self.size_and_align_of(mplace.meta, mplace.layout)
596 }
597
598 pub fn push_stack_frame(
599 &mut self,
600 instance: ty::Instance<'tcx>,
601 body: &'mir mir::Body<'tcx>,
602 return_place: Option<PlaceTy<'tcx, M::PointerTag>>,
603 return_to_block: StackPopCleanup,
604 ) -> InterpResult<'tcx> {
605 if !self.stack().is_empty() {
606 info!("PAUSING({}) {}", self.frame_idx(), self.frame().instance);
607 }
608 ::log_settings::settings().indentation += 1;
609
610 // first push a stack frame so we have access to the local substs
611 let pre_frame = Frame {
612 body,
613 block: Some(mir::START_BLOCK),
614 return_to_block,
615 return_place,
616 // empty local array, we fill it in below, after we are inside the stack frame and
617 // all methods actually know about the frame
618 locals: IndexVec::new(),
619 instance,
620 stmt: 0,
621 extra: (),
622 };
623 let frame = M::init_frame_extra(self, pre_frame)?;
624 self.stack_mut().push(frame);
625
626 // don't allocate at all for trivial constants
627 if body.local_decls.len() > 1 {
628 // Locals are initially uninitialized.
629 let dummy = LocalState { value: LocalValue::Uninitialized, layout: Cell::new(None) };
630 let mut locals = IndexVec::from_elem(dummy, &body.local_decls);
631 // Return place is handled specially by the `eval_place` functions, and the
632 // entry in `locals` should never be used. Make it dead, to be sure.
633 locals[mir::RETURN_PLACE].value = LocalValue::Dead;
634 // Now mark those locals as dead that we do not want to initialize
635 match self.tcx.def_kind(instance.def_id()) {
636 // statics and constants don't have `Storage*` statements, no need to look for them
637 //
638 // FIXME: The above is likely untrue. See
639 // <https://github.com/rust-lang/rust/pull/70004#issuecomment-602022110>. Is it
640 // okay to ignore `StorageDead`/`StorageLive` annotations during CTFE?
641 Some(DefKind::Static | DefKind::Const | DefKind::AssocConst) => {}
642 _ => {
643 // Mark locals that use `Storage*` annotations as dead on function entry.
644 let always_live = AlwaysLiveLocals::new(self.body());
645 for local in locals.indices() {
646 if !always_live.contains(local) {
647 locals[local].value = LocalValue::Dead;
648 }
649 }
650 }
651 }
652 // done
653 self.frame_mut().locals = locals;
654 }
655
656 M::after_stack_push(self)?;
657 info!("ENTERING({}) {}", self.frame_idx(), self.frame().instance);
658
659 if self.stack().len() > *self.tcx.sess.recursion_limit.get() {
660 throw_exhaust!(StackFrameLimitReached)
661 } else {
662 Ok(())
663 }
664 }
665
666 /// Jump to the given block.
667 #[inline]
668 pub fn go_to_block(&mut self, target: mir::BasicBlock) {
669 let frame = self.frame_mut();
670 frame.block = Some(target);
671 frame.stmt = 0;
672 }
673
674 /// *Return* to the given `target` basic block.
675 /// Do *not* use for unwinding! Use `unwind_to_block` instead.
676 ///
677 /// If `target` is `None`, that indicates the function cannot return, so we raise UB.
678 pub fn return_to_block(&mut self, target: Option<mir::BasicBlock>) -> InterpResult<'tcx> {
679 if let Some(target) = target {
680 self.go_to_block(target);
681 Ok(())
682 } else {
683 throw_ub!(Unreachable)
684 }
685 }
686
687 /// *Unwind* to the given `target` basic block.
688 /// Do *not* use for returning! Use `return_to_block` instead.
689 ///
690 /// If `target` is `None`, that indicates the function does not need cleanup during
691 /// unwinding, and we will just keep propagating that upwards.
692 pub fn unwind_to_block(&mut self, target: Option<mir::BasicBlock>) {
693 let frame = self.frame_mut();
694 frame.block = target;
695 frame.stmt = 0;
696 }
697
698 /// Pops the current frame from the stack, deallocating the
699 /// memory for allocated locals.
700 ///
701 /// If `unwinding` is `false`, then we are performing a normal return
702 /// from a function. In this case, we jump back into the frame of the caller,
703 /// and continue execution as normal.
704 ///
705 /// If `unwinding` is `true`, then we are in the middle of a panic,
706 /// and need to unwind this frame. In this case, we jump to the
707 /// `cleanup` block for the function, which is responsible for running
708 /// `Drop` impls for any locals that have been initialized at this point.
709 /// The cleanup block ends with a special `Resume` terminator, which will
710 /// cause us to continue unwinding.
711 pub(super) fn pop_stack_frame(&mut self, unwinding: bool) -> InterpResult<'tcx> {
712 info!(
713 "LEAVING({}) {} (unwinding = {})",
714 self.frame_idx(),
715 self.frame().instance,
716 unwinding
717 );
718
719 // Sanity check `unwinding`.
720 assert_eq!(
721 unwinding,
722 match self.frame().block {
723 None => true,
724 Some(block) => self.body().basic_blocks()[block].is_cleanup,
725 }
726 );
727
728 ::log_settings::settings().indentation -= 1;
729 let frame =
730 self.stack_mut().pop().expect("tried to pop a stack frame, but there were none");
731
732 // Now where do we jump next?
733
734 // Usually we want to clean up (deallocate locals), but in a few rare cases we don't.
735 // In that case, we return early. We also avoid validation in that case,
736 // because this is CTFE and the final value will be thoroughly validated anyway.
737 let (cleanup, next_block) = match frame.return_to_block {
738 StackPopCleanup::Goto { ret, unwind } => {
739 (true, Some(if unwinding { unwind } else { ret }))
740 }
741 StackPopCleanup::None { cleanup, .. } => (cleanup, None),
742 };
743
744 if !cleanup {
745 assert!(self.stack().is_empty(), "only the topmost frame should ever be leaked");
746 assert!(next_block.is_none(), "tried to skip cleanup when we have a next block!");
747 assert!(!unwinding, "tried to skip cleanup during unwinding");
748 // Leak the locals, skip validation, skip machine hook.
749 return Ok(());
750 }
751
752 // Cleanup: deallocate all locals that are backed by an allocation.
753 for local in &frame.locals {
754 self.deallocate_local(local.value)?;
755 }
756
757 let return_place = frame.return_place;
758 if M::after_stack_pop(self, frame, unwinding)? == StackPopJump::NoJump {
759 // The hook already did everything.
760 // We want to skip the `info!` below, hence early return.
761 return Ok(());
762 }
763 // Normal return, figure out where to jump.
764 if unwinding {
765 // Follow the unwind edge.
766 let unwind = next_block.expect("Encountered StackPopCleanup::None when unwinding!");
767 self.unwind_to_block(unwind);
768 } else {
769 // Follow the normal return edge.
770 // Validate the return value. Do this after deallocating so that we catch dangling
771 // references.
772 if let Some(return_place) = return_place {
773 if M::enforce_validity(self) {
774 // Data got changed, better make sure it matches the type!
775 // It is still possible that the return place held invalid data while
776 // the function is running, but that's okay because nobody could have
777 // accessed that same data from the "outside" to observe any broken
778 // invariant -- that is, unless a function somehow has a ptr to
779 // its return place... but the way MIR is currently generated, the
780 // return place is always a local and then this cannot happen.
781 self.validate_operand(self.place_to_op(return_place)?)?;
782 }
783 } else {
784 // Uh, that shouldn't happen... the function did not intend to return
785 throw_ub!(Unreachable);
786 }
787
788 // Jump to new block -- *after* validation so that the spans make more sense.
789 if let Some(ret) = next_block {
790 self.return_to_block(ret)?;
791 }
792 }
793
794 if !self.stack().is_empty() {
795 info!(
796 "CONTINUING({}) {} (unwinding = {})",
797 self.frame_idx(),
798 self.frame().instance,
799 unwinding
800 );
801 }
802
803 Ok(())
804 }
805
806 /// Mark a storage as live, killing the previous content and returning it.
807 /// Remember to deallocate that!
808 pub fn storage_live(
809 &mut self,
810 local: mir::Local,
811 ) -> InterpResult<'tcx, LocalValue<M::PointerTag>> {
812 assert!(local != mir::RETURN_PLACE, "Cannot make return place live");
813 trace!("{:?} is now live", local);
814
815 let local_val = LocalValue::Uninitialized;
816 // StorageLive *always* kills the value that's currently stored.
817 // However, we do not error if the variable already is live;
818 // see <https://github.com/rust-lang/rust/issues/42371>.
819 Ok(mem::replace(&mut self.frame_mut().locals[local].value, local_val))
820 }
821
822 /// Returns the old value of the local.
823 /// Remember to deallocate that!
824 pub fn storage_dead(&mut self, local: mir::Local) -> LocalValue<M::PointerTag> {
825 assert!(local != mir::RETURN_PLACE, "Cannot make return place dead");
826 trace!("{:?} is now dead", local);
827
828 mem::replace(&mut self.frame_mut().locals[local].value, LocalValue::Dead)
829 }
830
831 pub(super) fn deallocate_local(
832 &mut self,
833 local: LocalValue<M::PointerTag>,
834 ) -> InterpResult<'tcx> {
835 // FIXME: should we tell the user that there was a local which was never written to?
836 if let LocalValue::Live(Operand::Indirect(MemPlace { ptr, .. })) = local {
837 trace!("deallocating local");
838 // All locals have a backing allocation, even if the allocation is empty
839 // due to the local having ZST type.
840 let ptr = ptr.assert_ptr();
841 if log_enabled!(::log::Level::Trace) {
842 self.memory.dump_alloc(ptr.alloc_id);
843 }
844 self.memory.deallocate_local(ptr)?;
845 };
846 Ok(())
847 }
848
849 pub(super) fn const_eval(
850 &self,
851 gid: GlobalId<'tcx>,
852 ty: Ty<'tcx>,
853 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
854 // For statics we pick `ParamEnv::reveal_all`, because statics don't have generics
855 // and thus don't care about the parameter environment. While we could just use
856 // `self.param_env`, that would mean we invoke the query to evaluate the static
857 // with different parameter environments, thus causing the static to be evaluated
858 // multiple times.
859 let param_env = if self.tcx.is_static(gid.instance.def_id()) {
860 ty::ParamEnv::reveal_all()
861 } else {
862 self.param_env
863 };
864 let val = self.tcx.const_eval_global_id(param_env, gid, Some(self.tcx.span))?;
865
866 // Even though `ecx.const_eval` is called from `eval_const_to_op` we can never have a
867 // recursion deeper than one level, because the `tcx.const_eval` above is guaranteed to not
868 // return `ConstValue::Unevaluated`, which is the only way that `eval_const_to_op` will call
869 // `ecx.const_eval`.
870 let const_ = ty::Const { val: ty::ConstKind::Value(val), ty };
871 self.eval_const_to_op(&const_, None)
872 }
873
874 pub fn const_eval_raw(
875 &self,
876 gid: GlobalId<'tcx>,
877 ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
878 // For statics we pick `ParamEnv::reveal_all`, because statics don't have generics
879 // and thus don't care about the parameter environment. While we could just use
880 // `self.param_env`, that would mean we invoke the query to evaluate the static
881 // with different parameter environments, thus causing the static to be evaluated
882 // multiple times.
883 let param_env = if self.tcx.is_static(gid.instance.def_id()) {
884 ty::ParamEnv::reveal_all()
885 } else {
886 self.param_env
887 };
888 // We use `const_eval_raw` here, and get an unvalidated result. That is okay:
889 // Our result will later be validated anyway, and there seems no good reason
890 // to have to fail early here. This is also more consistent with
891 // `Memory::get_static_alloc` which has to use `const_eval_raw` to avoid cycles.
892 let val = self.tcx.const_eval_raw(param_env.and(gid))?;
893 self.raw_const_to_mplace(val)
894 }
895
896 pub fn dump_place(&self, place: Place<M::PointerTag>) {
897 // Debug output
898 if !log_enabled!(::log::Level::Trace) {
899 return;
900 }
901 match place {
902 Place::Local { frame, local } => {
903 let mut allocs = Vec::new();
904 let mut msg = format!("{:?}", local);
905 if frame != self.frame_idx() {
906 write!(msg, " ({} frames up)", self.frame_idx() - frame).unwrap();
907 }
908 write!(msg, ":").unwrap();
909
910 match self.stack()[frame].locals[local].value {
911 LocalValue::Dead => write!(msg, " is dead").unwrap(),
912 LocalValue::Uninitialized => write!(msg, " is uninitialized").unwrap(),
913 LocalValue::Live(Operand::Indirect(mplace)) => match mplace.ptr {
914 Scalar::Ptr(ptr) => {
915 write!(
916 msg,
917 " by align({}){} ref:",
918 mplace.align.bytes(),
919 match mplace.meta {
920 MemPlaceMeta::Meta(meta) => format!(" meta({:?})", meta),
921 MemPlaceMeta::Poison | MemPlaceMeta::None => String::new(),
922 }
923 )
924 .unwrap();
925 allocs.push(ptr.alloc_id);
926 }
927 ptr => write!(msg, " by integral ref: {:?}", ptr).unwrap(),
928 },
929 LocalValue::Live(Operand::Immediate(Immediate::Scalar(val))) => {
930 write!(msg, " {:?}", val).unwrap();
931 if let ScalarMaybeUndef::Scalar(Scalar::Ptr(ptr)) = val {
932 allocs.push(ptr.alloc_id);
933 }
934 }
935 LocalValue::Live(Operand::Immediate(Immediate::ScalarPair(val1, val2))) => {
936 write!(msg, " ({:?}, {:?})", val1, val2).unwrap();
937 if let ScalarMaybeUndef::Scalar(Scalar::Ptr(ptr)) = val1 {
938 allocs.push(ptr.alloc_id);
939 }
940 if let ScalarMaybeUndef::Scalar(Scalar::Ptr(ptr)) = val2 {
941 allocs.push(ptr.alloc_id);
942 }
943 }
944 }
945
946 trace!("{}", msg);
947 self.memory.dump_allocs(allocs);
948 }
949 Place::Ptr(mplace) => match mplace.ptr {
950 Scalar::Ptr(ptr) => {
951 trace!("by align({}) ref:", mplace.align.bytes());
952 self.memory.dump_alloc(ptr.alloc_id);
953 }
954 ptr => trace!(" integral by ref: {:?}", ptr),
955 },
956 }
957 }
958
959 pub fn generate_stacktrace(&self) -> Vec<FrameInfo<'tcx>> {
960 let mut frames = Vec::new();
961 for frame in self.stack().iter().rev() {
962 let source_info = frame.current_source_info();
963 let lint_root = source_info.and_then(|source_info| {
964 match &frame.body.source_scopes[source_info.scope].local_data {
965 mir::ClearCrossCrate::Set(data) => Some(data.lint_root),
966 mir::ClearCrossCrate::Clear => None,
967 }
968 });
969 let span = source_info.map_or(DUMMY_SP, |source_info| source_info.span);
970
971 frames.push(FrameInfo { span, instance: frame.instance, lint_root });
972 }
973 trace!("generate stacktrace: {:#?}", frames);
974 frames
975 }
976 }
977
978 impl<'ctx, 'mir, 'tcx, Tag, Extra> HashStable<StableHashingContext<'ctx>>
979 for Frame<'mir, 'tcx, Tag, Extra>
980 where
981 Extra: HashStable<StableHashingContext<'ctx>>,
982 Tag: HashStable<StableHashingContext<'ctx>>,
983 {
984 fn hash_stable(&self, hcx: &mut StableHashingContext<'ctx>, hasher: &mut StableHasher) {
985 self.body.hash_stable(hcx, hasher);
986 self.instance.hash_stable(hcx, hasher);
987 self.return_to_block.hash_stable(hcx, hasher);
988 self.return_place.as_ref().map(|r| &**r).hash_stable(hcx, hasher);
989 self.locals.hash_stable(hcx, hasher);
990 self.block.hash_stable(hcx, hasher);
991 self.stmt.hash_stable(hcx, hasher);
992 self.extra.hash_stable(hcx, hasher);
993 }
994 }