]> git.proxmox.com Git - rustc.git/blob - compiler/rustc_const_eval/src/interpret/terminator.rs
New upstream version 1.63.0+dfsg1
[rustc.git] / compiler / rustc_const_eval / src / interpret / terminator.rs
1 use std::borrow::Cow;
2 use std::convert::TryFrom;
3
4 use rustc_middle::ty::layout::{FnAbiOf, LayoutOf};
5 use rustc_middle::ty::Instance;
6 use rustc_middle::{
7 mir,
8 ty::{self, Ty},
9 };
10 use rustc_target::abi;
11 use rustc_target::abi::call::{ArgAbi, ArgAttribute, ArgAttributes, FnAbi, PassMode};
12 use rustc_target::spec::abi::Abi;
13
14 use super::{
15 FnVal, ImmTy, InterpCx, InterpResult, MPlaceTy, Machine, OpTy, PlaceTy, Scalar,
16 StackPopCleanup, StackPopUnwind,
17 };
18
19 impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
20 pub(super) fn eval_terminator(
21 &mut self,
22 terminator: &mir::Terminator<'tcx>,
23 ) -> InterpResult<'tcx> {
24 use rustc_middle::mir::TerminatorKind::*;
25 match terminator.kind {
26 Return => {
27 self.pop_stack_frame(/* unwinding */ false)?
28 }
29
30 Goto { target } => self.go_to_block(target),
31
32 SwitchInt { ref discr, ref targets, switch_ty } => {
33 let discr = self.read_immediate(&self.eval_operand(discr, None)?)?;
34 trace!("SwitchInt({:?})", *discr);
35 assert_eq!(discr.layout.ty, switch_ty);
36
37 // Branch to the `otherwise` case by default, if no match is found.
38 assert!(!targets.iter().is_empty());
39 let mut target_block = targets.otherwise();
40
41 for (const_int, target) in targets.iter() {
42 // Compare using MIR BinOp::Eq, to also support pointer values.
43 // (Avoiding `self.binary_op` as that does some redundant layout computation.)
44 let res = self
45 .overflowing_binary_op(
46 mir::BinOp::Eq,
47 &discr,
48 &ImmTy::from_uint(const_int, discr.layout),
49 )?
50 .0;
51 if res.to_bool()? {
52 target_block = target;
53 break;
54 }
55 }
56
57 self.go_to_block(target_block);
58 }
59
60 Call {
61 ref func,
62 ref args,
63 destination,
64 target,
65 ref cleanup,
66 from_hir_call: _,
67 fn_span: _,
68 } => {
69 let old_stack = self.frame_idx();
70 let old_loc = self.frame().loc;
71 let func = self.eval_operand(func, None)?;
72 let args = self.eval_operands(args)?;
73
74 let fn_sig_binder = func.layout.ty.fn_sig(*self.tcx);
75 let fn_sig =
76 self.tcx.normalize_erasing_late_bound_regions(self.param_env, fn_sig_binder);
77 let extra_args = &args[fn_sig.inputs().len()..];
78 let extra_args = self.tcx.mk_type_list(extra_args.iter().map(|arg| arg.layout.ty));
79
80 let (fn_val, fn_abi, with_caller_location) = match *func.layout.ty.kind() {
81 ty::FnPtr(_sig) => {
82 let fn_ptr = self.read_pointer(&func)?;
83 let fn_val = self.get_ptr_fn(fn_ptr)?;
84 (fn_val, self.fn_abi_of_fn_ptr(fn_sig_binder, extra_args)?, false)
85 }
86 ty::FnDef(def_id, substs) => {
87 let instance =
88 self.resolve(ty::WithOptConstParam::unknown(def_id), substs)?;
89 (
90 FnVal::Instance(instance),
91 self.fn_abi_of_instance(instance, extra_args)?,
92 instance.def.requires_caller_location(*self.tcx),
93 )
94 }
95 _ => span_bug!(
96 terminator.source_info.span,
97 "invalid callee of type {:?}",
98 func.layout.ty
99 ),
100 };
101
102 let destination = self.eval_place(destination)?;
103 self.eval_fn_call(
104 fn_val,
105 (fn_sig.abi, fn_abi),
106 &args,
107 with_caller_location,
108 &destination,
109 target,
110 match (cleanup, fn_abi.can_unwind) {
111 (Some(cleanup), true) => StackPopUnwind::Cleanup(*cleanup),
112 (None, true) => StackPopUnwind::Skip,
113 (_, false) => StackPopUnwind::NotAllowed,
114 },
115 )?;
116 // Sanity-check that `eval_fn_call` either pushed a new frame or
117 // did a jump to another block.
118 if self.frame_idx() == old_stack && self.frame().loc == old_loc {
119 span_bug!(terminator.source_info.span, "evaluating this call made no progress");
120 }
121 }
122
123 Drop { place, target, unwind } => {
124 let place = self.eval_place(place)?;
125 let ty = place.layout.ty;
126 trace!("TerminatorKind::drop: {:?}, type {}", place, ty);
127
128 let instance = Instance::resolve_drop_in_place(*self.tcx, ty);
129 self.drop_in_place(&place, instance, target, unwind)?;
130 }
131
132 Assert { ref cond, expected, ref msg, target, cleanup } => {
133 let cond_val =
134 self.read_immediate(&self.eval_operand(cond, None)?)?.to_scalar()?.to_bool()?;
135 if expected == cond_val {
136 self.go_to_block(target);
137 } else {
138 M::assert_panic(self, msg, cleanup)?;
139 }
140 }
141
142 Abort => {
143 M::abort(self, "the program aborted execution".to_owned())?;
144 }
145
146 // When we encounter Resume, we've finished unwinding
147 // cleanup for the current stack frame. We pop it in order
148 // to continue unwinding the next frame
149 Resume => {
150 trace!("unwinding: resuming from cleanup");
151 // By definition, a Resume terminator means
152 // that we're unwinding
153 self.pop_stack_frame(/* unwinding */ true)?;
154 return Ok(());
155 }
156
157 // It is UB to ever encounter this.
158 Unreachable => throw_ub!(Unreachable),
159
160 // These should never occur for MIR we actually run.
161 DropAndReplace { .. }
162 | FalseEdge { .. }
163 | FalseUnwind { .. }
164 | Yield { .. }
165 | GeneratorDrop => span_bug!(
166 terminator.source_info.span,
167 "{:#?} should have been eliminated by MIR pass",
168 terminator.kind
169 ),
170
171 // Inline assembly can't be interpreted.
172 InlineAsm { .. } => throw_unsup_format!("inline assembly is not supported"),
173 }
174
175 Ok(())
176 }
177
178 fn check_argument_compat(
179 caller_abi: &ArgAbi<'tcx, Ty<'tcx>>,
180 callee_abi: &ArgAbi<'tcx, Ty<'tcx>>,
181 ) -> bool {
182 // Heuristic for type comparison.
183 let layout_compat = || {
184 if caller_abi.layout.ty == callee_abi.layout.ty {
185 // No question
186 return true;
187 }
188 if caller_abi.layout.size != callee_abi.layout.size
189 || caller_abi.layout.align.abi != callee_abi.layout.align.abi
190 {
191 // This cannot go well...
192 // FIXME: What about unsized types?
193 return false;
194 }
195 // The rest *should* be okay, but we are extra conservative.
196 match (caller_abi.layout.abi, callee_abi.layout.abi) {
197 // Different valid ranges are okay (once we enforce validity,
198 // that will take care to make it UB to leave the range, just
199 // like for transmute).
200 (abi::Abi::Scalar(caller), abi::Abi::Scalar(callee)) => {
201 caller.primitive() == callee.primitive()
202 }
203 (
204 abi::Abi::ScalarPair(caller1, caller2),
205 abi::Abi::ScalarPair(callee1, callee2),
206 ) => {
207 caller1.primitive() == callee1.primitive()
208 && caller2.primitive() == callee2.primitive()
209 }
210 // Be conservative
211 _ => false,
212 }
213 };
214 // Padding must be fully equal.
215 let pad_compat = || caller_abi.pad == callee_abi.pad;
216 // When comparing the PassMode, we have to be smart about comparing the attributes.
217 let arg_attr_compat = |a1: ArgAttributes, a2: ArgAttributes| {
218 // There's only one regular attribute that matters for the call ABI: InReg.
219 // Everything else is things like noalias, dereferencable, nonnull, ...
220 // (This also applies to pointee_size, pointee_align.)
221 if a1.regular.contains(ArgAttribute::InReg) != a2.regular.contains(ArgAttribute::InReg)
222 {
223 return false;
224 }
225 // We also compare the sign extension mode -- this could let the callee make assumptions
226 // about bits that conceptually were not even passed.
227 if a1.arg_ext != a2.arg_ext {
228 return false;
229 }
230 return true;
231 };
232 let mode_compat = || match (caller_abi.mode, callee_abi.mode) {
233 (PassMode::Ignore, PassMode::Ignore) => true,
234 (PassMode::Direct(a1), PassMode::Direct(a2)) => arg_attr_compat(a1, a2),
235 (PassMode::Pair(a1, b1), PassMode::Pair(a2, b2)) => {
236 arg_attr_compat(a1, a2) && arg_attr_compat(b1, b2)
237 }
238 (PassMode::Cast(c1), PassMode::Cast(c2)) => c1 == c2,
239 (
240 PassMode::Indirect { attrs: a1, extra_attrs: None, on_stack: s1 },
241 PassMode::Indirect { attrs: a2, extra_attrs: None, on_stack: s2 },
242 ) => arg_attr_compat(a1, a2) && s1 == s2,
243 (
244 PassMode::Indirect { attrs: a1, extra_attrs: Some(e1), on_stack: s1 },
245 PassMode::Indirect { attrs: a2, extra_attrs: Some(e2), on_stack: s2 },
246 ) => arg_attr_compat(a1, a2) && arg_attr_compat(e1, e2) && s1 == s2,
247 _ => false,
248 };
249
250 if layout_compat() && pad_compat() && mode_compat() {
251 return true;
252 }
253 trace!(
254 "check_argument_compat: incompatible ABIs:\ncaller: {:?}\ncallee: {:?}",
255 caller_abi,
256 callee_abi
257 );
258 return false;
259 }
260
261 /// Initialize a single callee argument, checking the types for compatibility.
262 fn pass_argument<'x, 'y>(
263 &mut self,
264 caller_args: &mut impl Iterator<
265 Item = (&'x OpTy<'tcx, M::PointerTag>, &'y ArgAbi<'tcx, Ty<'tcx>>),
266 >,
267 callee_abi: &ArgAbi<'tcx, Ty<'tcx>>,
268 callee_arg: &PlaceTy<'tcx, M::PointerTag>,
269 ) -> InterpResult<'tcx>
270 where
271 'tcx: 'x,
272 'tcx: 'y,
273 {
274 if matches!(callee_abi.mode, PassMode::Ignore) {
275 // This one is skipped.
276 return Ok(());
277 }
278 // Find next caller arg.
279 let (caller_arg, caller_abi) = caller_args.next().ok_or_else(|| {
280 err_ub_format!("calling a function with fewer arguments than it requires")
281 })?;
282 // Now, check
283 if !Self::check_argument_compat(caller_abi, callee_abi) {
284 throw_ub_format!(
285 "calling a function with argument of type {:?} passing data of type {:?}",
286 callee_arg.layout.ty,
287 caller_arg.layout.ty
288 )
289 }
290 // We allow some transmutes here.
291 // FIXME: Depending on the PassMode, this should reset some padding to uninitialized. (This
292 // is true for all `copy_op`, but there are a lot of special cases for argument passing
293 // specifically.)
294 self.copy_op_transmute(&caller_arg, callee_arg)
295 }
296
297 /// Call this function -- pushing the stack frame and initializing the arguments.
298 ///
299 /// `caller_fn_abi` is used to determine if all the arguments are passed the proper way.
300 /// However, we also need `caller_abi` to determine if we need to do untupling of arguments.
301 ///
302 /// `with_caller_location` indicates whether the caller passed a caller location. Miri
303 /// implements caller locations without argument passing, but to match `FnAbi` we need to know
304 /// when those arguments are present.
305 pub(crate) fn eval_fn_call(
306 &mut self,
307 fn_val: FnVal<'tcx, M::ExtraFnVal>,
308 (caller_abi, caller_fn_abi): (Abi, &FnAbi<'tcx, Ty<'tcx>>),
309 args: &[OpTy<'tcx, M::PointerTag>],
310 with_caller_location: bool,
311 destination: &PlaceTy<'tcx, M::PointerTag>,
312 target: Option<mir::BasicBlock>,
313 mut unwind: StackPopUnwind,
314 ) -> InterpResult<'tcx> {
315 trace!("eval_fn_call: {:#?}", fn_val);
316
317 let instance = match fn_val {
318 FnVal::Instance(instance) => instance,
319 FnVal::Other(extra) => {
320 return M::call_extra_fn(
321 self,
322 extra,
323 caller_abi,
324 args,
325 destination,
326 target,
327 unwind,
328 );
329 }
330 };
331
332 match instance.def {
333 ty::InstanceDef::Intrinsic(def_id) => {
334 assert!(self.tcx.is_intrinsic(def_id));
335 // caller_fn_abi is not relevant here, we interpret the arguments directly for each intrinsic.
336 M::call_intrinsic(self, instance, args, destination, target, unwind)
337 }
338 ty::InstanceDef::VtableShim(..)
339 | ty::InstanceDef::ReifyShim(..)
340 | ty::InstanceDef::ClosureOnceShim { .. }
341 | ty::InstanceDef::FnPtrShim(..)
342 | ty::InstanceDef::DropGlue(..)
343 | ty::InstanceDef::CloneShim(..)
344 | ty::InstanceDef::Item(_) => {
345 // We need MIR for this fn
346 let Some((body, instance)) =
347 M::find_mir_or_eval_fn(self, instance, caller_abi, args, destination, target, unwind)? else {
348 return Ok(());
349 };
350
351 // Compute callee information using the `instance` returned by
352 // `find_mir_or_eval_fn`.
353 // FIXME: for variadic support, do we have to somehow determine callee's extra_args?
354 let callee_fn_abi = self.fn_abi_of_instance(instance, ty::List::empty())?;
355
356 if callee_fn_abi.c_variadic || caller_fn_abi.c_variadic {
357 throw_unsup_format!("calling a c-variadic function is not supported");
358 }
359
360 if M::enforce_abi(self) {
361 if caller_fn_abi.conv != callee_fn_abi.conv {
362 throw_ub_format!(
363 "calling a function with calling convention {:?} using calling convention {:?}",
364 callee_fn_abi.conv,
365 caller_fn_abi.conv
366 )
367 }
368 }
369
370 if !matches!(unwind, StackPopUnwind::NotAllowed) && !callee_fn_abi.can_unwind {
371 // The callee cannot unwind.
372 unwind = StackPopUnwind::NotAllowed;
373 }
374
375 self.push_stack_frame(
376 instance,
377 body,
378 destination,
379 StackPopCleanup::Goto { ret: target, unwind },
380 )?;
381
382 // If an error is raised here, pop the frame again to get an accurate backtrace.
383 // To this end, we wrap it all in a `try` block.
384 let res: InterpResult<'tcx> = try {
385 trace!(
386 "caller ABI: {:?}, args: {:#?}",
387 caller_abi,
388 args.iter()
389 .map(|arg| (arg.layout.ty, format!("{:?}", **arg)))
390 .collect::<Vec<_>>()
391 );
392 trace!(
393 "spread_arg: {:?}, locals: {:#?}",
394 body.spread_arg,
395 body.args_iter()
396 .map(|local| (
397 local,
398 self.layout_of_local(self.frame(), local, None).unwrap().ty
399 ))
400 .collect::<Vec<_>>()
401 );
402
403 // In principle, we have two iterators: Where the arguments come from, and where
404 // they go to.
405
406 // For where they come from: If the ABI is RustCall, we untuple the
407 // last incoming argument. These two iterators do not have the same type,
408 // so to keep the code paths uniform we accept an allocation
409 // (for RustCall ABI only).
410 let caller_args: Cow<'_, [OpTy<'tcx, M::PointerTag>]> =
411 if caller_abi == Abi::RustCall && !args.is_empty() {
412 // Untuple
413 let (untuple_arg, args) = args.split_last().unwrap();
414 trace!("eval_fn_call: Will pass last argument by untupling");
415 Cow::from(
416 args.iter()
417 .map(|&a| Ok(a))
418 .chain(
419 (0..untuple_arg.layout.fields.count())
420 .map(|i| self.operand_field(untuple_arg, i)),
421 )
422 .collect::<InterpResult<'_, Vec<OpTy<'tcx, M::PointerTag>>>>(
423 )?,
424 )
425 } else {
426 // Plain arg passing
427 Cow::from(args)
428 };
429 // If `with_caller_location` is set we pretend there is an extra argument (that
430 // we will not pass).
431 assert_eq!(
432 caller_args.len() + if with_caller_location { 1 } else { 0 },
433 caller_fn_abi.args.len(),
434 "mismatch between caller ABI and caller arguments",
435 );
436 let mut caller_args = caller_args
437 .iter()
438 .zip(caller_fn_abi.args.iter())
439 .filter(|arg_and_abi| !matches!(arg_and_abi.1.mode, PassMode::Ignore));
440
441 // Now we have to spread them out across the callee's locals,
442 // taking into account the `spread_arg`. If we could write
443 // this is a single iterator (that handles `spread_arg`), then
444 // `pass_argument` would be the loop body. It takes care to
445 // not advance `caller_iter` for ZSTs.
446 let mut callee_args_abis = callee_fn_abi.args.iter();
447 for local in body.args_iter() {
448 let dest = self.eval_place(mir::Place::from(local))?;
449 if Some(local) == body.spread_arg {
450 // Must be a tuple
451 for i in 0..dest.layout.fields.count() {
452 let dest = self.place_field(&dest, i)?;
453 let callee_abi = callee_args_abis.next().unwrap();
454 self.pass_argument(&mut caller_args, callee_abi, &dest)?;
455 }
456 } else {
457 // Normal argument
458 let callee_abi = callee_args_abis.next().unwrap();
459 self.pass_argument(&mut caller_args, callee_abi, &dest)?;
460 }
461 }
462 // If the callee needs a caller location, pretend we consume one more argument from the ABI.
463 if instance.def.requires_caller_location(*self.tcx) {
464 callee_args_abis.next().unwrap();
465 }
466 // Now we should have no more caller args or callee arg ABIs
467 assert!(
468 callee_args_abis.next().is_none(),
469 "mismatch between callee ABI and callee body arguments"
470 );
471 if caller_args.next().is_some() {
472 throw_ub_format!("calling a function with more arguments than it expected")
473 }
474 // Don't forget to check the return type!
475 if !Self::check_argument_compat(&caller_fn_abi.ret, &callee_fn_abi.ret) {
476 throw_ub_format!(
477 "calling a function with return type {:?} passing \
478 return place of type {:?}",
479 callee_fn_abi.ret.layout.ty,
480 caller_fn_abi.ret.layout.ty,
481 )
482 }
483 };
484 match res {
485 Err(err) => {
486 self.stack_mut().pop();
487 Err(err)
488 }
489 Ok(()) => Ok(()),
490 }
491 }
492 // cannot use the shim here, because that will only result in infinite recursion
493 ty::InstanceDef::Virtual(_, idx) => {
494 let mut args = args.to_vec();
495 // We have to implement all "object safe receivers". So we have to go search for a
496 // pointer or `dyn Trait` type, but it could be wrapped in newtypes. So recursively
497 // unwrap those newtypes until we are there.
498 let mut receiver = args[0];
499 let receiver_place = loop {
500 match receiver.layout.ty.kind() {
501 ty::Ref(..) | ty::RawPtr(..) => break self.deref_operand(&receiver)?,
502 ty::Dynamic(..) => break receiver.assert_mem_place(),
503 _ => {
504 // Not there yet, search for the only non-ZST field.
505 let mut non_zst_field = None;
506 for i in 0..receiver.layout.fields.count() {
507 let field = self.operand_field(&receiver, i)?;
508 if !field.layout.is_zst() {
509 assert!(
510 non_zst_field.is_none(),
511 "multiple non-ZST fields in dyn receiver type {}",
512 receiver.layout.ty
513 );
514 non_zst_field = Some(field);
515 }
516 }
517 receiver = non_zst_field.unwrap_or_else(|| {
518 panic!(
519 "no non-ZST fields in dyn receiver type {}",
520 receiver.layout.ty
521 )
522 });
523 }
524 }
525 };
526 // Find and consult vtable. The type now could be something like RcBox<dyn Trait>,
527 // i.e., it is still not necessarily `ty::Dynamic` (so we cannot use
528 // `place.vtable()`), but it should have a `dyn Trait` tail.
529 assert!(matches!(
530 self.tcx
531 .struct_tail_erasing_lifetimes(receiver_place.layout.ty, self.param_env)
532 .kind(),
533 ty::Dynamic(..)
534 ));
535 let vtable = self.scalar_to_ptr(receiver_place.meta.unwrap_meta())?;
536 let fn_val = self.get_vtable_slot(vtable, u64::try_from(idx).unwrap())?;
537
538 // `*mut receiver_place.layout.ty` is almost the layout that we
539 // want for args[0]: We have to project to field 0 because we want
540 // a thin pointer.
541 assert!(receiver_place.layout.is_unsized());
542 let receiver_ptr_ty = self.tcx.mk_mut_ptr(receiver_place.layout.ty);
543 let this_receiver_ptr = self.layout_of(receiver_ptr_ty)?.field(self, 0);
544 // Adjust receiver argument.
545 args[0] = OpTy::from(ImmTy::from_immediate(
546 Scalar::from_maybe_pointer(receiver_place.ptr, self).into(),
547 this_receiver_ptr,
548 ));
549 trace!("Patched receiver operand to {:#?}", args[0]);
550 // recurse with concrete function
551 self.eval_fn_call(
552 fn_val,
553 (caller_abi, caller_fn_abi),
554 &args,
555 with_caller_location,
556 destination,
557 target,
558 unwind,
559 )
560 }
561 }
562 }
563
564 fn drop_in_place(
565 &mut self,
566 place: &PlaceTy<'tcx, M::PointerTag>,
567 instance: ty::Instance<'tcx>,
568 target: mir::BasicBlock,
569 unwind: Option<mir::BasicBlock>,
570 ) -> InterpResult<'tcx> {
571 trace!("drop_in_place: {:?},\n {:?}, {:?}", *place, place.layout.ty, instance);
572 // We take the address of the object. This may well be unaligned, which is fine
573 // for us here. However, unaligned accesses will probably make the actual drop
574 // implementation fail -- a problem shared by rustc.
575 let place = self.force_allocation(place)?;
576
577 let (instance, place) = match place.layout.ty.kind() {
578 ty::Dynamic(..) => {
579 // Dropping a trait object.
580 self.unpack_dyn_trait(&place)?
581 }
582 _ => (instance, place),
583 };
584 let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty())?;
585
586 let arg = ImmTy::from_immediate(
587 place.to_ref(self),
588 self.layout_of(self.tcx.mk_mut_ptr(place.layout.ty))?,
589 );
590
591 let ty = self.tcx.mk_unit(); // return type is ()
592 let dest = MPlaceTy::dangling(self.layout_of(ty)?);
593
594 self.eval_fn_call(
595 FnVal::Instance(instance),
596 (Abi::Rust, fn_abi),
597 &[arg.into()],
598 false,
599 &dest.into(),
600 Some(target),
601 match unwind {
602 Some(cleanup) => StackPopUnwind::Cleanup(cleanup),
603 None => StackPopUnwind::Skip,
604 },
605 )
606 }
607 }