]> git.proxmox.com Git - rustc.git/blob - src/librustc_mir/interpret/terminator.rs
New upstream version 1.41.1+dfsg1
[rustc.git] / src / librustc_mir / interpret / terminator.rs
1 use std::borrow::Cow;
2
3 use rustc::{mir, ty};
4 use rustc::ty::Instance;
5 use rustc::ty::layout::{self, TyLayout, LayoutOf};
6 use syntax::source_map::Span;
7 use rustc_target::spec::abi::Abi;
8
9 use super::{
10 GlobalId, InterpResult, InterpCx, Machine,
11 OpTy, ImmTy, PlaceTy, MPlaceTy, StackPopCleanup, FnVal,
12 };
13
14 impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
15 pub(super) fn eval_terminator(
16 &mut self,
17 terminator: &mir::Terminator<'tcx>,
18 ) -> InterpResult<'tcx> {
19 use rustc::mir::TerminatorKind::*;
20 match terminator.kind {
21 Return => {
22 self.frame().return_place.map(|r| self.dump_place(*r));
23 self.pop_stack_frame(/* unwinding */ false)?
24 }
25
26 Goto { target } => self.go_to_block(target),
27
28 SwitchInt {
29 ref discr,
30 ref values,
31 ref targets,
32 ..
33 } => {
34 let discr = self.read_immediate(self.eval_operand(discr, None)?)?;
35 trace!("SwitchInt({:?})", *discr);
36
37 // Branch to the `otherwise` case by default, if no match is found.
38 let mut target_block = targets[targets.len() - 1];
39
40 for (index, &const_int) in values.iter().enumerate() {
41 // Compare using binary_op, to also support pointer values
42 let res = self.overflowing_binary_op(mir::BinOp::Eq,
43 discr,
44 ImmTy::from_uint(const_int, discr.layout),
45 )?.0;
46 if res.to_bool()? {
47 target_block = targets[index];
48 break;
49 }
50 }
51
52 self.go_to_block(target_block);
53 }
54
55 Call {
56 ref func,
57 ref args,
58 ref destination,
59 ref cleanup,
60 ..
61 } => {
62 let func = self.eval_operand(func, None)?;
63 let (fn_val, abi) = match func.layout.ty.kind {
64 ty::FnPtr(sig) => {
65 let caller_abi = sig.abi();
66 let fn_ptr = self.read_scalar(func)?.not_undef()?;
67 let fn_val = self.memory.get_fn(fn_ptr)?;
68 (fn_val, caller_abi)
69 }
70 ty::FnDef(def_id, substs) => {
71 let sig = func.layout.ty.fn_sig(*self.tcx);
72 (FnVal::Instance(self.resolve(def_id, substs)?), sig.abi())
73 },
74 _ => {
75 bug!("invalid callee of type {:?}", func.layout.ty)
76 }
77 };
78 let args = self.eval_operands(args)?;
79 let ret = match destination {
80 Some((dest, ret)) => Some((self.eval_place(dest)?, *ret)),
81 None => None,
82 };
83 self.eval_fn_call(
84 fn_val,
85 terminator.source_info.span,
86 abi,
87 &args[..],
88 ret,
89 *cleanup
90 )?;
91 }
92
93 Drop {
94 ref location,
95 target,
96 unwind,
97 } => {
98 // FIXME(CTFE): forbid drop in const eval
99 let place = self.eval_place(location)?;
100 let ty = place.layout.ty;
101 trace!("TerminatorKind::drop: {:?}, type {}", location, ty);
102
103 let instance = Instance::resolve_drop_in_place(*self.tcx, ty);
104 self.drop_in_place(
105 place,
106 instance,
107 terminator.source_info.span,
108 target,
109 unwind
110 )?;
111 }
112
113 Assert {
114 ref cond,
115 expected,
116 ref msg,
117 target,
118 cleanup,
119 } => {
120 let cond_val = self.read_immediate(self.eval_operand(cond, None)?)?
121 .to_scalar()?.to_bool()?;
122 if expected == cond_val {
123 self.go_to_block(target);
124 } else {
125 M::assert_panic(self, terminator.source_info.span, msg, cleanup)?;
126 }
127 }
128
129
130 // When we encounter Resume, we've finished unwinding
131 // cleanup for the current stack frame. We pop it in order
132 // to continue unwinding the next frame
133 Resume => {
134 trace!("unwinding: resuming from cleanup");
135 // By definition, a Resume terminator means
136 // that we're unwinding
137 self.pop_stack_frame(/* unwinding */ true)?;
138 return Ok(())
139 },
140
141 // It is UB to ever encounter this.
142 Unreachable => throw_ub!(Unreachable),
143
144 // These should never occur for MIR we actually run.
145 DropAndReplace { .. } |
146 FalseEdges { .. } |
147 FalseUnwind { .. } =>
148 bug!("{:#?} should have been eliminated by MIR pass", terminator.kind),
149
150 // These are not (yet) supported. It is unclear if they even can occur in
151 // MIR that we actually run.
152 Yield { .. } |
153 GeneratorDrop |
154 Abort =>
155 throw_unsup_format!("Unsupported terminator kind: {:#?}", terminator.kind),
156 }
157
158 Ok(())
159 }
160
161 fn check_argument_compat(
162 rust_abi: bool,
163 caller: TyLayout<'tcx>,
164 callee: TyLayout<'tcx>,
165 ) -> bool {
166 if caller.ty == callee.ty {
167 // No question
168 return true;
169 }
170 if !rust_abi {
171 // Don't risk anything
172 return false;
173 }
174 // Compare layout
175 match (&caller.abi, &callee.abi) {
176 // Different valid ranges are okay (once we enforce validity,
177 // that will take care to make it UB to leave the range, just
178 // like for transmute).
179 (layout::Abi::Scalar(ref caller), layout::Abi::Scalar(ref callee)) =>
180 caller.value == callee.value,
181 (layout::Abi::ScalarPair(ref caller1, ref caller2),
182 layout::Abi::ScalarPair(ref callee1, ref callee2)) =>
183 caller1.value == callee1.value && caller2.value == callee2.value,
184 // Be conservative
185 _ => false
186 }
187 }
188
189 /// Pass a single argument, checking the types for compatibility.
190 fn pass_argument(
191 &mut self,
192 rust_abi: bool,
193 caller_arg: &mut impl Iterator<Item=OpTy<'tcx, M::PointerTag>>,
194 callee_arg: PlaceTy<'tcx, M::PointerTag>,
195 ) -> InterpResult<'tcx> {
196 if rust_abi && callee_arg.layout.is_zst() {
197 // Nothing to do.
198 trace!("Skipping callee ZST");
199 return Ok(());
200 }
201 let caller_arg = caller_arg.next()
202 .ok_or_else(|| err_unsup!(FunctionArgCountMismatch)) ?;
203 if rust_abi {
204 debug_assert!(!caller_arg.layout.is_zst(), "ZSTs must have been already filtered out");
205 }
206 // Now, check
207 if !Self::check_argument_compat(rust_abi, caller_arg.layout, callee_arg.layout) {
208 throw_unsup!(FunctionArgMismatch(caller_arg.layout.ty, callee_arg.layout.ty))
209 }
210 // We allow some transmutes here
211 self.copy_op_transmute(caller_arg, callee_arg)
212 }
213
214 /// Call this function -- pushing the stack frame and initializing the arguments.
215 fn eval_fn_call(
216 &mut self,
217 fn_val: FnVal<'tcx, M::ExtraFnVal>,
218 span: Span,
219 caller_abi: Abi,
220 args: &[OpTy<'tcx, M::PointerTag>],
221 ret: Option<(PlaceTy<'tcx, M::PointerTag>, mir::BasicBlock)>,
222 unwind: Option<mir::BasicBlock>
223 ) -> InterpResult<'tcx> {
224 trace!("eval_fn_call: {:#?}", fn_val);
225
226 let instance = match fn_val {
227 FnVal::Instance(instance) => instance,
228 FnVal::Other(extra) => {
229 return M::call_extra_fn(self, extra, args, ret, unwind);
230 }
231 };
232
233 // ABI check
234 {
235 let callee_abi = {
236 let instance_ty = instance.ty(*self.tcx);
237 match instance_ty.kind {
238 ty::FnDef(..) =>
239 instance_ty.fn_sig(*self.tcx).abi(),
240 ty::Closure(..) => Abi::RustCall,
241 ty::Generator(..) => Abi::Rust,
242 _ => bug!("unexpected callee ty: {:?}", instance_ty),
243 }
244 };
245 let normalize_abi = |abi| match abi {
246 Abi::Rust | Abi::RustCall | Abi::RustIntrinsic | Abi::PlatformIntrinsic =>
247 // These are all the same ABI, really.
248 Abi::Rust,
249 abi =>
250 abi,
251 };
252 if normalize_abi(caller_abi) != normalize_abi(callee_abi) {
253 throw_unsup!(FunctionAbiMismatch(caller_abi, callee_abi))
254 }
255 }
256
257 match instance.def {
258 ty::InstanceDef::Intrinsic(..) => {
259 assert!(caller_abi == Abi::RustIntrinsic || caller_abi == Abi::PlatformIntrinsic);
260 return M::call_intrinsic(self, span, instance, args, ret, unwind);
261 }
262 ty::InstanceDef::VtableShim(..) |
263 ty::InstanceDef::ReifyShim(..) |
264 ty::InstanceDef::ClosureOnceShim { .. } |
265 ty::InstanceDef::FnPtrShim(..) |
266 ty::InstanceDef::DropGlue(..) |
267 ty::InstanceDef::CloneShim(..) |
268 ty::InstanceDef::Item(_) => {
269 // We need MIR for this fn
270 let body = match M::find_mir_or_eval_fn(self, instance, args, ret, unwind)? {
271 Some(body) => body,
272 None => return Ok(()),
273 };
274
275 self.push_stack_frame(
276 instance,
277 span,
278 body,
279 ret.map(|p| p.0),
280 StackPopCleanup::Goto { ret: ret.map(|p| p.1), unwind }
281 )?;
282
283 // We want to pop this frame again in case there was an error, to put
284 // the blame in the right location. Until the 2018 edition is used in
285 // the compiler, we have to do this with an immediately invoked function.
286 let res = (||{
287 trace!(
288 "caller ABI: {:?}, args: {:#?}",
289 caller_abi,
290 args.iter()
291 .map(|arg| (arg.layout.ty, format!("{:?}", **arg)))
292 .collect::<Vec<_>>()
293 );
294 trace!(
295 "spread_arg: {:?}, locals: {:#?}",
296 body.spread_arg,
297 body.args_iter()
298 .map(|local|
299 (local, self.layout_of_local(self.frame(), local, None).unwrap().ty)
300 )
301 .collect::<Vec<_>>()
302 );
303
304 // Figure out how to pass which arguments.
305 // The Rust ABI is special: ZST get skipped.
306 let rust_abi = match caller_abi {
307 Abi::Rust | Abi::RustCall => true,
308 _ => false
309 };
310 // We have two iterators: Where the arguments come from,
311 // and where they go to.
312
313 // For where they come from: If the ABI is RustCall, we untuple the
314 // last incoming argument. These two iterators do not have the same type,
315 // so to keep the code paths uniform we accept an allocation
316 // (for RustCall ABI only).
317 let caller_args : Cow<'_, [OpTy<'tcx, M::PointerTag>]> =
318 if caller_abi == Abi::RustCall && !args.is_empty() {
319 // Untuple
320 let (&untuple_arg, args) = args.split_last().unwrap();
321 trace!("eval_fn_call: Will pass last argument by untupling");
322 Cow::from(args.iter().map(|&a| Ok(a))
323 .chain((0..untuple_arg.layout.fields.count()).into_iter()
324 .map(|i| self.operand_field(untuple_arg, i as u64))
325 )
326 .collect::<InterpResult<'_, Vec<OpTy<'tcx, M::PointerTag>>>>()?)
327 } else {
328 // Plain arg passing
329 Cow::from(args)
330 };
331 // Skip ZSTs
332 let mut caller_iter = caller_args.iter()
333 .filter(|op| !rust_abi || !op.layout.is_zst())
334 .map(|op| *op);
335
336 // Now we have to spread them out across the callee's locals,
337 // taking into account the `spread_arg`. If we could write
338 // this is a single iterator (that handles `spread_arg`), then
339 // `pass_argument` would be the loop body. It takes care to
340 // not advance `caller_iter` for ZSTs.
341 let mut locals_iter = body.args_iter();
342 while let Some(local) = locals_iter.next() {
343 let dest = self.eval_place(
344 &mir::Place::from(local)
345 )?;
346 if Some(local) == body.spread_arg {
347 // Must be a tuple
348 for i in 0..dest.layout.fields.count() {
349 let dest = self.place_field(dest, i as u64)?;
350 self.pass_argument(rust_abi, &mut caller_iter, dest)?;
351 }
352 } else {
353 // Normal argument
354 self.pass_argument(rust_abi, &mut caller_iter, dest)?;
355 }
356 }
357 // Now we should have no more caller args
358 if caller_iter.next().is_some() {
359 trace!("Caller has passed too many args");
360 throw_unsup!(FunctionArgCountMismatch)
361 }
362 // Don't forget to check the return type!
363 if let Some((caller_ret, _)) = ret {
364 let callee_ret = self.eval_place(
365 &mir::Place::return_place()
366 )?;
367 if !Self::check_argument_compat(
368 rust_abi,
369 caller_ret.layout,
370 callee_ret.layout,
371 ) {
372 throw_unsup!(
373 FunctionRetMismatch(caller_ret.layout.ty, callee_ret.layout.ty)
374 )
375 }
376 } else {
377 let local = mir::RETURN_PLACE;
378 let callee_layout = self.layout_of_local(self.frame(), local, None)?;
379 if !callee_layout.abi.is_uninhabited() {
380 throw_unsup!(FunctionRetMismatch(
381 self.tcx.types.never, callee_layout.ty
382 ))
383 }
384 }
385 Ok(())
386 })();
387 match res {
388 Err(err) => {
389 self.stack.pop();
390 Err(err)
391 }
392 Ok(v) => Ok(v)
393 }
394 }
395 // cannot use the shim here, because that will only result in infinite recursion
396 ty::InstanceDef::Virtual(_, idx) => {
397 let mut args = args.to_vec();
398 // We have to implement all "object safe receivers". Currently we
399 // support built-in pointers (&, &mut, Box) as well as unsized-self. We do
400 // not yet support custom self types.
401 // Also see librustc_codegen_llvm/abi.rs and librustc_codegen_llvm/mir/block.rs.
402 let receiver_place = match args[0].layout.ty.builtin_deref(true) {
403 Some(_) => {
404 // Built-in pointer.
405 self.deref_operand(args[0])?
406 }
407 None => {
408 // Unsized self.
409 args[0].assert_mem_place()
410 }
411 };
412 // Find and consult vtable
413 let vtable = receiver_place.vtable();
414 let drop_fn = self.get_vtable_slot(vtable, idx)?;
415
416 // `*mut receiver_place.layout.ty` is almost the layout that we
417 // want for args[0]: We have to project to field 0 because we want
418 // a thin pointer.
419 assert!(receiver_place.layout.is_unsized());
420 let receiver_ptr_ty = self.tcx.mk_mut_ptr(receiver_place.layout.ty);
421 let this_receiver_ptr = self.layout_of(receiver_ptr_ty)?.field(self, 0)?;
422 // Adjust receiver argument.
423 args[0] = OpTy::from(ImmTy {
424 layout: this_receiver_ptr,
425 imm: receiver_place.ptr.into()
426 });
427 trace!("Patched self operand to {:#?}", args[0]);
428 // recurse with concrete function
429 self.eval_fn_call(drop_fn, span, caller_abi, &args, ret, unwind)
430 }
431 }
432 }
433
434 /// Evaluate a const function where all arguments (if any) are zero-sized types.
435 /// The evaluation is memoized thanks to the query system.
436 // FIXME: Consider moving this to `const_eval.rs`.
437 pub (crate) fn eval_const_fn_call(
438 &mut self,
439 gid: GlobalId<'tcx>,
440 ret: Option<(PlaceTy<'tcx, M::PointerTag>, mir::BasicBlock)>,
441 ) -> InterpResult<'tcx> {
442 trace!("eval_const_fn_call: {:?}", gid);
443
444 let place = self.const_eval_raw(gid)?;
445 let dest = ret.ok_or_else(|| err_ub!(Unreachable))?.0;
446
447 self.copy_op(place.into(), dest)?;
448
449 self.return_to_block(ret.map(|r| r.1))?;
450 self.dump_place(*dest);
451 return Ok(())
452 }
453
454 fn drop_in_place(
455 &mut self,
456 place: PlaceTy<'tcx, M::PointerTag>,
457 instance: ty::Instance<'tcx>,
458 span: Span,
459 target: mir::BasicBlock,
460 unwind: Option<mir::BasicBlock>
461 ) -> InterpResult<'tcx> {
462 trace!("drop_in_place: {:?},\n {:?}, {:?}", *place, place.layout.ty, instance);
463 // We take the address of the object. This may well be unaligned, which is fine
464 // for us here. However, unaligned accesses will probably make the actual drop
465 // implementation fail -- a problem shared by rustc.
466 let place = self.force_allocation(place)?;
467
468 let (instance, place) = match place.layout.ty.kind {
469 ty::Dynamic(..) => {
470 // Dropping a trait object.
471 self.unpack_dyn_trait(place)?
472 }
473 _ => (instance, place),
474 };
475
476 let arg = ImmTy {
477 imm: place.to_ref(),
478 layout: self.layout_of(self.tcx.mk_mut_ptr(place.layout.ty))?,
479 };
480
481 let ty = self.tcx.mk_unit(); // return type is ()
482 let dest = MPlaceTy::dangling(self.layout_of(ty)?, self);
483
484 self.eval_fn_call(
485 FnVal::Instance(instance),
486 span,
487 Abi::Rust,
488 &[arg.into()],
489 Some((dest.into(), target)),
490 unwind
491 )
492 }
493 }