]> git.proxmox.com Git - rustc.git/blob - src/librustc_mir/transform/inline.rs
New upstream version 1.28.0~beta.14+dfsg1
[rustc.git] / src / librustc_mir / transform / inline.rs
1 // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 //! Inlining pass for MIR functions
12
13 use rustc::hir;
14 use rustc::hir::CodegenFnAttrFlags;
15 use rustc::hir::def_id::DefId;
16
17 use rustc_data_structures::bitvec::BitVector;
18 use rustc_data_structures::indexed_vec::{Idx, IndexVec};
19
20 use rustc::mir::*;
21 use rustc::mir::visit::*;
22 use rustc::ty::{self, Instance, Ty, TyCtxt};
23 use rustc::ty::subst::{Subst,Substs};
24
25 use std::collections::VecDeque;
26 use std::iter;
27 use transform::{MirPass, MirSource};
28 use super::simplify::{remove_dead_blocks, CfgSimplifier};
29
30 use syntax::{attr};
31 use rustc_target::spec::abi::Abi;
32
33 const DEFAULT_THRESHOLD: usize = 50;
34 const HINT_THRESHOLD: usize = 100;
35
36 const INSTR_COST: usize = 5;
37 const CALL_PENALTY: usize = 25;
38
39 const UNKNOWN_SIZE_COST: usize = 10;
40
41 pub struct Inline;
42
43 #[derive(Copy, Clone, Debug)]
44 struct CallSite<'tcx> {
45 callee: DefId,
46 substs: &'tcx Substs<'tcx>,
47 bb: BasicBlock,
48 location: SourceInfo,
49 }
50
51 impl MirPass for Inline {
52 fn run_pass<'a, 'tcx>(&self,
53 tcx: TyCtxt<'a, 'tcx, 'tcx>,
54 source: MirSource,
55 mir: &mut Mir<'tcx>) {
56 if tcx.sess.opts.debugging_opts.mir_opt_level >= 2 {
57 Inliner { tcx, source }.run_pass(mir);
58 }
59 }
60 }
61
62 struct Inliner<'a, 'tcx: 'a> {
63 tcx: TyCtxt<'a, 'tcx, 'tcx>,
64 source: MirSource,
65 }
66
67 impl<'a, 'tcx> Inliner<'a, 'tcx> {
68 fn run_pass(&self, caller_mir: &mut Mir<'tcx>) {
69 // Keep a queue of callsites to try inlining on. We take
70 // advantage of the fact that queries detect cycles here to
71 // allow us to try and fetch the fully optimized MIR of a
72 // call; if it succeeds, we can inline it and we know that
73 // they do not call us. Otherwise, we just don't try to
74 // inline.
75 //
76 // We use a queue so that we inline "broadly" before we inline
77 // in depth. It is unclear if this is the best heuristic,
78 // really, but that's true of all the heuristics in this
79 // file. =)
80
81 let mut callsites = VecDeque::new();
82
83 let param_env = self.tcx.param_env(self.source.def_id);
84
85 // Only do inlining into fn bodies.
86 let id = self.tcx.hir.as_local_node_id(self.source.def_id).unwrap();
87 let body_owner_kind = self.tcx.hir.body_owner_kind(id);
88 if let (hir::BodyOwnerKind::Fn, None) = (body_owner_kind, self.source.promoted) {
89
90 for (bb, bb_data) in caller_mir.basic_blocks().iter_enumerated() {
91 // Don't inline calls that are in cleanup blocks.
92 if bb_data.is_cleanup { continue; }
93
94 // Only consider direct calls to functions
95 let terminator = bb_data.terminator();
96 if let TerminatorKind::Call {
97 func: Operand::Constant(ref f), .. } = terminator.kind {
98 if let ty::TyFnDef(callee_def_id, substs) = f.ty.sty {
99 if let Some(instance) = Instance::resolve(self.tcx,
100 param_env,
101 callee_def_id,
102 substs) {
103 callsites.push_back(CallSite {
104 callee: instance.def_id(),
105 substs: instance.substs,
106 bb,
107 location: terminator.source_info
108 });
109 }
110 }
111 }
112 }
113 } else {
114 return;
115 }
116
117 let mut local_change;
118 let mut changed = false;
119
120 loop {
121 local_change = false;
122 while let Some(callsite) = callsites.pop_front() {
123 debug!("checking whether to inline callsite {:?}", callsite);
124 if !self.tcx.is_mir_available(callsite.callee) {
125 debug!("checking whether to inline callsite {:?} - MIR unavailable", callsite);
126 continue;
127 }
128
129 let callee_mir = match self.tcx.try_optimized_mir(callsite.location.span,
130 callsite.callee) {
131 Ok(callee_mir) if self.should_inline(callsite, callee_mir) => {
132 self.tcx.subst_and_normalize_erasing_regions(
133 &callsite.substs,
134 param_env,
135 callee_mir,
136 )
137 }
138 Ok(_) => continue,
139
140 Err(mut bug) => {
141 // FIXME(#43542) shouldn't have to cancel an error
142 bug.cancel();
143 continue
144 }
145 };
146
147 let start = caller_mir.basic_blocks().len();
148 debug!("attempting to inline callsite {:?} - mir={:?}", callsite, callee_mir);
149 if !self.inline_call(callsite, caller_mir, callee_mir) {
150 debug!("attempting to inline callsite {:?} - failure", callsite);
151 continue;
152 }
153 debug!("attempting to inline callsite {:?} - success", callsite);
154
155 // Add callsites from inlined function
156 for (bb, bb_data) in caller_mir.basic_blocks().iter_enumerated().skip(start) {
157 // Only consider direct calls to functions
158 let terminator = bb_data.terminator();
159 if let TerminatorKind::Call {
160 func: Operand::Constant(ref f), .. } = terminator.kind {
161 if let ty::TyFnDef(callee_def_id, substs) = f.ty.sty {
162 // Don't inline the same function multiple times.
163 if callsite.callee != callee_def_id {
164 callsites.push_back(CallSite {
165 callee: callee_def_id,
166 substs,
167 bb,
168 location: terminator.source_info
169 });
170 }
171 }
172 }
173 }
174
175 local_change = true;
176 changed = true;
177 }
178
179 if !local_change {
180 break;
181 }
182 }
183
184 // Simplify if we inlined anything.
185 if changed {
186 debug!("Running simplify cfg on {:?}", self.source);
187 CfgSimplifier::new(caller_mir).simplify();
188 remove_dead_blocks(caller_mir);
189 }
190 }
191
192 fn should_inline(&self,
193 callsite: CallSite<'tcx>,
194 callee_mir: &Mir<'tcx>)
195 -> bool
196 {
197 debug!("should_inline({:?})", callsite);
198 let tcx = self.tcx;
199
200 // Don't inline closures that have captures
201 // FIXME: Handle closures better
202 if callee_mir.upvar_decls.len() > 0 {
203 debug!(" upvar decls present - not inlining");
204 return false;
205 }
206
207 // Cannot inline generators which haven't been transformed yet
208 if callee_mir.yield_ty.is_some() {
209 debug!(" yield ty present - not inlining");
210 return false;
211 }
212
213 // Do not inline {u,i}128 lang items, codegen const eval depends
214 // on detecting calls to these lang items and intercepting them
215 if tcx.is_binop_lang_item(callsite.callee).is_some() {
216 debug!(" not inlining 128bit integer lang item");
217 return false;
218 }
219
220 let codegen_fn_attrs = tcx.codegen_fn_attrs(callsite.callee);
221
222 let hinted = match codegen_fn_attrs.inline {
223 // Just treat inline(always) as a hint for now,
224 // there are cases that prevent inlining that we
225 // need to check for first.
226 attr::InlineAttr::Always => true,
227 attr::InlineAttr::Never => {
228 debug!("#[inline(never)] present - not inlining");
229 return false
230 }
231 attr::InlineAttr::Hint => true,
232 attr::InlineAttr::None => false,
233 };
234
235 // Only inline local functions if they would be eligible for cross-crate
236 // inlining. This is to ensure that the final crate doesn't have MIR that
237 // reference unexported symbols
238 if callsite.callee.is_local() {
239 if callsite.substs.types().count() == 0 && !hinted {
240 debug!(" callee is an exported function - not inlining");
241 return false;
242 }
243 }
244
245 let mut threshold = if hinted {
246 HINT_THRESHOLD
247 } else {
248 DEFAULT_THRESHOLD
249 };
250
251 // Significantly lower the threshold for inlining cold functions
252 if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::COLD) {
253 threshold /= 5;
254 }
255
256 // Give a bonus functions with a small number of blocks,
257 // We normally have two or three blocks for even
258 // very small functions.
259 if callee_mir.basic_blocks().len() <= 3 {
260 threshold += threshold / 4;
261 }
262 debug!(" final inline threshold = {}", threshold);
263
264 // FIXME: Give a bonus to functions with only a single caller
265
266 let param_env = tcx.param_env(self.source.def_id);
267
268 let mut first_block = true;
269 let mut cost = 0;
270
271 // Traverse the MIR manually so we can account for the effects of
272 // inlining on the CFG.
273 let mut work_list = vec![START_BLOCK];
274 let mut visited = BitVector::new(callee_mir.basic_blocks().len());
275 while let Some(bb) = work_list.pop() {
276 if !visited.insert(bb.index()) { continue; }
277 let blk = &callee_mir.basic_blocks()[bb];
278
279 for stmt in &blk.statements {
280 // Don't count StorageLive/StorageDead in the inlining cost.
281 match stmt.kind {
282 StatementKind::StorageLive(_) |
283 StatementKind::StorageDead(_) |
284 StatementKind::Nop => {}
285 _ => cost += INSTR_COST
286 }
287 }
288 let term = blk.terminator();
289 let mut is_drop = false;
290 match term.kind {
291 TerminatorKind::Drop { ref location, target, unwind } |
292 TerminatorKind::DropAndReplace { ref location, target, unwind, .. } => {
293 is_drop = true;
294 work_list.push(target);
295 // If the location doesn't actually need dropping, treat it like
296 // a regular goto.
297 let ty = location.ty(callee_mir, tcx).subst(tcx, callsite.substs);
298 let ty = ty.to_ty(tcx);
299 if ty.needs_drop(tcx, param_env) {
300 cost += CALL_PENALTY;
301 if let Some(unwind) = unwind {
302 work_list.push(unwind);
303 }
304 } else {
305 cost += INSTR_COST;
306 }
307 }
308
309 TerminatorKind::Unreachable |
310 TerminatorKind::Call { destination: None, .. } if first_block => {
311 // If the function always diverges, don't inline
312 // unless the cost is zero
313 threshold = 0;
314 }
315
316 TerminatorKind::Call {func: Operand::Constant(ref f), .. } => {
317 if let ty::TyFnDef(def_id, _) = f.ty.sty {
318 // Don't give intrinsics the extra penalty for calls
319 let f = tcx.fn_sig(def_id);
320 if f.abi() == Abi::RustIntrinsic || f.abi() == Abi::PlatformIntrinsic {
321 cost += INSTR_COST;
322 } else {
323 cost += CALL_PENALTY;
324 }
325 }
326 }
327 TerminatorKind::Assert { .. } => cost += CALL_PENALTY,
328 _ => cost += INSTR_COST
329 }
330
331 if !is_drop {
332 for &succ in term.successors() {
333 work_list.push(succ);
334 }
335 }
336
337 first_block = false;
338 }
339
340 // Count up the cost of local variables and temps, if we know the size
341 // use that, otherwise we use a moderately-large dummy cost.
342
343 let ptr_size = tcx.data_layout.pointer_size.bytes();
344
345 for v in callee_mir.vars_and_temps_iter() {
346 let v = &callee_mir.local_decls[v];
347 let ty = v.ty.subst(tcx, callsite.substs);
348 // Cost of the var is the size in machine-words, if we know
349 // it.
350 if let Some(size) = type_size_of(tcx, param_env.clone(), ty) {
351 cost += (size / ptr_size) as usize;
352 } else {
353 cost += UNKNOWN_SIZE_COST;
354 }
355 }
356
357 if let attr::InlineAttr::Always = codegen_fn_attrs.inline {
358 debug!("INLINING {:?} because inline(always) [cost={}]", callsite, cost);
359 true
360 } else {
361 if cost <= threshold {
362 debug!("INLINING {:?} [cost={} <= threshold={}]", callsite, cost, threshold);
363 true
364 } else {
365 debug!("NOT inlining {:?} [cost={} > threshold={}]", callsite, cost, threshold);
366 false
367 }
368 }
369 }
370
371 fn inline_call(&self,
372 callsite: CallSite<'tcx>,
373 caller_mir: &mut Mir<'tcx>,
374 mut callee_mir: Mir<'tcx>) -> bool {
375 let terminator = caller_mir[callsite.bb].terminator.take().unwrap();
376 match terminator.kind {
377 // FIXME: Handle inlining of diverging calls
378 TerminatorKind::Call { args, destination: Some(destination), cleanup, .. } => {
379 debug!("Inlined {:?} into {:?}", callsite.callee, self.source);
380
381 let mut local_map = IndexVec::with_capacity(callee_mir.local_decls.len());
382 let mut scope_map = IndexVec::with_capacity(callee_mir.source_scopes.len());
383 let mut promoted_map = IndexVec::with_capacity(callee_mir.promoted.len());
384
385 for mut scope in callee_mir.source_scopes.iter().cloned() {
386 if scope.parent_scope.is_none() {
387 scope.parent_scope = Some(callsite.location.scope);
388 scope.span = callee_mir.span;
389 }
390
391 scope.span = callsite.location.span;
392
393 let idx = caller_mir.source_scopes.push(scope);
394 scope_map.push(idx);
395 }
396
397 for loc in callee_mir.vars_and_temps_iter() {
398 let mut local = callee_mir.local_decls[loc].clone();
399
400 local.source_info.scope =
401 scope_map[local.source_info.scope];
402 local.source_info.span = callsite.location.span;
403 local.visibility_scope = scope_map[local.visibility_scope];
404
405 let idx = caller_mir.local_decls.push(local);
406 local_map.push(idx);
407 }
408
409 for p in callee_mir.promoted.iter().cloned() {
410 let idx = caller_mir.promoted.push(p);
411 promoted_map.push(idx);
412 }
413
414 // If the call is something like `a[*i] = f(i)`, where
415 // `i : &mut usize`, then just duplicating the `a[*i]`
416 // Place could result in two different locations if `f`
417 // writes to `i`. To prevent this we need to create a temporary
418 // borrow of the place and pass the destination as `*temp` instead.
419 fn dest_needs_borrow(place: &Place) -> bool {
420 match *place {
421 Place::Projection(ref p) => {
422 match p.elem {
423 ProjectionElem::Deref |
424 ProjectionElem::Index(_) => true,
425 _ => dest_needs_borrow(&p.base)
426 }
427 }
428 // Static variables need a borrow because the callee
429 // might modify the same static.
430 Place::Static(_) => true,
431 _ => false
432 }
433 }
434
435 let dest = if dest_needs_borrow(&destination.0) {
436 debug!("Creating temp for return destination");
437 let dest = Rvalue::Ref(
438 self.tcx.types.re_erased,
439 BorrowKind::Mut { allow_two_phase_borrow: false },
440 destination.0);
441
442 let ty = dest.ty(caller_mir, self.tcx);
443
444 let temp = LocalDecl::new_temp(ty, callsite.location.span);
445
446 let tmp = caller_mir.local_decls.push(temp);
447 let tmp = Place::Local(tmp);
448
449 let stmt = Statement {
450 source_info: callsite.location,
451 kind: StatementKind::Assign(tmp.clone(), dest)
452 };
453 caller_mir[callsite.bb]
454 .statements.push(stmt);
455 tmp.deref()
456 } else {
457 destination.0
458 };
459
460 let return_block = destination.1;
461
462 // Copy the arguments if needed.
463 let args: Vec<_> = self.make_call_args(args, &callsite, caller_mir);
464
465 let bb_len = caller_mir.basic_blocks().len();
466 let mut integrator = Integrator {
467 block_idx: bb_len,
468 args: &args,
469 local_map,
470 scope_map,
471 promoted_map,
472 _callsite: callsite,
473 destination: dest,
474 return_block,
475 cleanup_block: cleanup,
476 in_cleanup_block: false
477 };
478
479
480 for (bb, mut block) in callee_mir.basic_blocks_mut().drain_enumerated(..) {
481 integrator.visit_basic_block_data(bb, &mut block);
482 caller_mir.basic_blocks_mut().push(block);
483 }
484
485 let terminator = Terminator {
486 source_info: callsite.location,
487 kind: TerminatorKind::Goto { target: BasicBlock::new(bb_len) }
488 };
489
490 caller_mir[callsite.bb].terminator = Some(terminator);
491
492 true
493 }
494 kind => {
495 caller_mir[callsite.bb].terminator = Some(Terminator {
496 source_info: terminator.source_info,
497 kind,
498 });
499 false
500 }
501 }
502 }
503
504 fn make_call_args(
505 &self,
506 args: Vec<Operand<'tcx>>,
507 callsite: &CallSite<'tcx>,
508 caller_mir: &mut Mir<'tcx>,
509 ) -> Vec<Local> {
510 let tcx = self.tcx;
511
512 // There is a bit of a mismatch between the *caller* of a closure and the *callee*.
513 // The caller provides the arguments wrapped up in a tuple:
514 //
515 // tuple_tmp = (a, b, c)
516 // Fn::call(closure_ref, tuple_tmp)
517 //
518 // meanwhile the closure body expects the arguments (here, `a`, `b`, and `c`)
519 // as distinct arguments. (This is the "rust-call" ABI hack.) Normally, codegen has
520 // the job of unpacking this tuple. But here, we are codegen. =) So we want to create
521 // a vector like
522 //
523 // [closure_ref, tuple_tmp.0, tuple_tmp.1, tuple_tmp.2]
524 //
525 // Except for one tiny wrinkle: we don't actually want `tuple_tmp.0`. It's more convenient
526 // if we "spill" that into *another* temporary, so that we can map the argument
527 // variable in the callee MIR directly to an argument variable on our side.
528 // So we introduce temporaries like:
529 //
530 // tmp0 = tuple_tmp.0
531 // tmp1 = tuple_tmp.1
532 // tmp2 = tuple_tmp.2
533 //
534 // and the vector is `[closure_ref, tmp0, tmp1, tmp2]`.
535 if tcx.is_closure(callsite.callee) {
536 let mut args = args.into_iter();
537 let self_ = self.create_temp_if_necessary(args.next().unwrap(), callsite, caller_mir);
538 let tuple = self.create_temp_if_necessary(args.next().unwrap(), callsite, caller_mir);
539 assert!(args.next().is_none());
540
541 let tuple = Place::Local(tuple);
542 let tuple_tys = if let ty::TyTuple(s) = tuple.ty(caller_mir, tcx).to_ty(tcx).sty {
543 s
544 } else {
545 bug!("Closure arguments are not passed as a tuple");
546 };
547
548 // The `closure_ref` in our example above.
549 let closure_ref_arg = iter::once(self_);
550
551 // The `tmp0`, `tmp1`, and `tmp2` in our example abonve.
552 let tuple_tmp_args =
553 tuple_tys.iter().enumerate().map(|(i, ty)| {
554 // This is e.g. `tuple_tmp.0` in our example above.
555 let tuple_field = Operand::Move(tuple.clone().field(Field::new(i), ty));
556
557 // Spill to a local to make e.g. `tmp0`.
558 self.create_temp_if_necessary(tuple_field, callsite, caller_mir)
559 });
560
561 closure_ref_arg.chain(tuple_tmp_args).collect()
562 } else {
563 args.into_iter()
564 .map(|a| self.create_temp_if_necessary(a, callsite, caller_mir))
565 .collect()
566 }
567 }
568
569 /// If `arg` is already a temporary, returns it. Otherwise, introduces a fresh
570 /// temporary `T` and an instruction `T = arg`, and returns `T`.
571 fn create_temp_if_necessary(
572 &self,
573 arg: Operand<'tcx>,
574 callsite: &CallSite<'tcx>,
575 caller_mir: &mut Mir<'tcx>,
576 ) -> Local {
577 // FIXME: Analysis of the usage of the arguments to avoid
578 // unnecessary temporaries.
579
580 if let Operand::Move(Place::Local(local)) = arg {
581 if caller_mir.local_kind(local) == LocalKind::Temp {
582 // Reuse the operand if it's a temporary already
583 return local;
584 }
585 }
586
587 debug!("Creating temp for argument {:?}", arg);
588 // Otherwise, create a temporary for the arg
589 let arg = Rvalue::Use(arg);
590
591 let ty = arg.ty(caller_mir, self.tcx);
592
593 let arg_tmp = LocalDecl::new_temp(ty, callsite.location.span);
594 let arg_tmp = caller_mir.local_decls.push(arg_tmp);
595
596 let stmt = Statement {
597 source_info: callsite.location,
598 kind: StatementKind::Assign(Place::Local(arg_tmp), arg),
599 };
600 caller_mir[callsite.bb].statements.push(stmt);
601 arg_tmp
602 }
603 }
604
605 fn type_size_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
606 param_env: ty::ParamEnv<'tcx>,
607 ty: Ty<'tcx>) -> Option<u64> {
608 tcx.layout_of(param_env.and(ty)).ok().map(|layout| layout.size.bytes())
609 }
610
611 /**
612 * Integrator.
613 *
614 * Integrates blocks from the callee function into the calling function.
615 * Updates block indices, references to locals and other control flow
616 * stuff.
617 */
618 struct Integrator<'a, 'tcx: 'a> {
619 block_idx: usize,
620 args: &'a [Local],
621 local_map: IndexVec<Local, Local>,
622 scope_map: IndexVec<SourceScope, SourceScope>,
623 promoted_map: IndexVec<Promoted, Promoted>,
624 _callsite: CallSite<'tcx>,
625 destination: Place<'tcx>,
626 return_block: BasicBlock,
627 cleanup_block: Option<BasicBlock>,
628 in_cleanup_block: bool,
629 }
630
631 impl<'a, 'tcx> Integrator<'a, 'tcx> {
632 fn update_target(&self, tgt: BasicBlock) -> BasicBlock {
633 let new = BasicBlock::new(tgt.index() + self.block_idx);
634 debug!("Updating target `{:?}`, new: `{:?}`", tgt, new);
635 new
636 }
637 }
638
639 impl<'a, 'tcx> MutVisitor<'tcx> for Integrator<'a, 'tcx> {
640 fn visit_local(&mut self,
641 local: &mut Local,
642 _ctxt: PlaceContext<'tcx>,
643 _location: Location) {
644 if *local == RETURN_PLACE {
645 match self.destination {
646 Place::Local(l) => {
647 *local = l;
648 return;
649 },
650 ref place => bug!("Return place is {:?}, not local", place)
651 }
652 }
653 let idx = local.index() - 1;
654 if idx < self.args.len() {
655 *local = self.args[idx];
656 return;
657 }
658 *local = self.local_map[Local::new(idx - self.args.len())];
659 }
660
661 fn visit_place(&mut self,
662 place: &mut Place<'tcx>,
663 _ctxt: PlaceContext<'tcx>,
664 _location: Location) {
665 if let Place::Local(RETURN_PLACE) = *place {
666 // Return pointer; update the place itself
667 *place = self.destination.clone();
668 } else {
669 self.super_place(place, _ctxt, _location);
670 }
671 }
672
673 fn visit_basic_block_data(&mut self, block: BasicBlock, data: &mut BasicBlockData<'tcx>) {
674 self.in_cleanup_block = data.is_cleanup;
675 self.super_basic_block_data(block, data);
676 self.in_cleanup_block = false;
677 }
678
679 fn visit_terminator_kind(&mut self, block: BasicBlock,
680 kind: &mut TerminatorKind<'tcx>, loc: Location) {
681 self.super_terminator_kind(block, kind, loc);
682
683 match *kind {
684 TerminatorKind::GeneratorDrop |
685 TerminatorKind::Yield { .. } => bug!(),
686 TerminatorKind::Goto { ref mut target} => {
687 *target = self.update_target(*target);
688 }
689 TerminatorKind::SwitchInt { ref mut targets, .. } => {
690 for tgt in targets {
691 *tgt = self.update_target(*tgt);
692 }
693 }
694 TerminatorKind::Drop { ref mut target, ref mut unwind, .. } |
695 TerminatorKind::DropAndReplace { ref mut target, ref mut unwind, .. } => {
696 *target = self.update_target(*target);
697 if let Some(tgt) = *unwind {
698 *unwind = Some(self.update_target(tgt));
699 } else if !self.in_cleanup_block {
700 // Unless this drop is in a cleanup block, add an unwind edge to
701 // the orignal call's cleanup block
702 *unwind = self.cleanup_block;
703 }
704 }
705 TerminatorKind::Call { ref mut destination, ref mut cleanup, .. } => {
706 if let Some((_, ref mut tgt)) = *destination {
707 *tgt = self.update_target(*tgt);
708 }
709 if let Some(tgt) = *cleanup {
710 *cleanup = Some(self.update_target(tgt));
711 } else if !self.in_cleanup_block {
712 // Unless this call is in a cleanup block, add an unwind edge to
713 // the orignal call's cleanup block
714 *cleanup = self.cleanup_block;
715 }
716 }
717 TerminatorKind::Assert { ref mut target, ref mut cleanup, .. } => {
718 *target = self.update_target(*target);
719 if let Some(tgt) = *cleanup {
720 *cleanup = Some(self.update_target(tgt));
721 } else if !self.in_cleanup_block {
722 // Unless this assert is in a cleanup block, add an unwind edge to
723 // the orignal call's cleanup block
724 *cleanup = self.cleanup_block;
725 }
726 }
727 TerminatorKind::Return => {
728 *kind = TerminatorKind::Goto { target: self.return_block };
729 }
730 TerminatorKind::Resume => {
731 if let Some(tgt) = self.cleanup_block {
732 *kind = TerminatorKind::Goto { target: tgt }
733 }
734 }
735 TerminatorKind::Abort => { }
736 TerminatorKind::Unreachable => { }
737 TerminatorKind::FalseEdges { ref mut real_target, ref mut imaginary_targets } => {
738 *real_target = self.update_target(*real_target);
739 for target in imaginary_targets {
740 *target = self.update_target(*target);
741 }
742 }
743 TerminatorKind::FalseUnwind { real_target: _ , unwind: _ } =>
744 // see the ordering of passes in the optimized_mir query.
745 bug!("False unwinds should have been removed before inlining")
746 }
747 }
748
749 fn visit_source_scope(&mut self, scope: &mut SourceScope) {
750 *scope = self.scope_map[*scope];
751 }
752
753 fn visit_literal(&mut self, literal: &mut Literal<'tcx>, loc: Location) {
754 if let Literal::Promoted { ref mut index } = *literal {
755 if let Some(p) = self.promoted_map.get(*index).cloned() {
756 *index = p;
757 }
758 } else {
759 self.super_literal(literal, loc);
760 }
761 }
762 }