1 //! Inlining pass for MIR functions
3 use rustc_attr
::InlineAttr
;
5 use rustc_index
::bit_set
::BitSet
;
6 use rustc_index
::vec
::Idx
;
7 use rustc_middle
::middle
::codegen_fn_attrs
::{CodegenFnAttrFlags, CodegenFnAttrs}
;
8 use rustc_middle
::mir
::visit
::*;
9 use rustc_middle
::mir
::*;
10 use rustc_middle
::ty
::subst
::Subst
;
11 use rustc_middle
::ty
::{self, ConstKind, Instance, InstanceDef, ParamEnv, Ty, TyCtxt}
;
12 use rustc_span
::{hygiene::ExpnKind, ExpnData, Span}
;
13 use rustc_target
::spec
::abi
::Abi
;
15 use super::simplify
::{remove_dead_blocks, CfgSimplifier}
;
16 use crate::transform
::MirPass
;
18 use std
::ops
::{Range, RangeFrom}
;
22 const INSTR_COST
: usize = 5;
23 const CALL_PENALTY
: usize = 25;
24 const LANDINGPAD_PENALTY
: usize = 50;
25 const RESUME_PENALTY
: usize = 45;
27 const UNKNOWN_SIZE_COST
: usize = 10;
31 #[derive(Copy, Clone, Debug)]
32 struct CallSite
<'tcx
> {
33 callee
: Instance
<'tcx
>,
34 fn_sig
: ty
::PolyFnSig
<'tcx
>,
36 target
: Option
<BasicBlock
>,
37 source_info
: SourceInfo
,
40 /// Returns true if MIR inlining is enabled in the current compilation session.
41 crate fn is_enabled(tcx
: TyCtxt
<'_
>) -> bool
{
42 if let Some(enabled
) = tcx
.sess
.opts
.debugging_opts
.inline_mir
{
46 tcx
.sess
.mir_opt_level() >= 3
49 impl<'tcx
> MirPass
<'tcx
> for Inline
{
50 fn run_pass(&self, tcx
: TyCtxt
<'tcx
>, body
: &mut Body
<'tcx
>) {
55 let span
= trace_span
!("inline", body
= %tcx
.def_path_str(body
.source
.def_id()));
56 let _guard
= span
.enter();
57 if inline(tcx
, body
) {
58 debug
!("running simplify cfg on {:?}", body
.source
);
59 CfgSimplifier
::new(body
).simplify();
60 remove_dead_blocks(body
);
65 fn inline(tcx
: TyCtxt
<'tcx
>, body
: &mut Body
<'tcx
>) -> bool
{
66 let def_id
= body
.source
.def_id();
67 let hir_id
= tcx
.hir().local_def_id_to_hir_id(def_id
.expect_local());
69 // Only do inlining into fn bodies.
70 if !tcx
.hir().body_owner_kind(hir_id
).is_fn_or_closure() {
73 if body
.source
.promoted
.is_some() {
77 let mut this
= Inliner
{
79 param_env
: tcx
.param_env_reveal_all_normalized(body
.source
.def_id()),
80 codegen_fn_attrs
: tcx
.codegen_fn_attrs(body
.source
.def_id()),
85 let blocks
= BasicBlock
::new(0)..body
.basic_blocks().next_index();
86 this
.process_blocks(body
, blocks
);
90 struct Inliner
<'tcx
> {
92 param_env
: ParamEnv
<'tcx
>,
93 /// Caller codegen attributes.
94 codegen_fn_attrs
: &'tcx CodegenFnAttrs
,
97 /// Stack of inlined Instances.
98 history
: Vec
<ty
::Instance
<'tcx
>>,
99 /// Indicates that the caller body has been modified.
104 fn process_blocks(&mut self, caller_body
: &mut Body
<'tcx
>, blocks
: Range
<BasicBlock
>) {
106 let bb_data
= &caller_body
[bb
];
107 if bb_data
.is_cleanup
{
111 let callsite
= match self.resolve_callsite(caller_body
, bb
, bb_data
) {
116 let span
= trace_span
!("process_blocks", %callsite
.callee
, ?bb
);
117 let _guard
= span
.enter();
119 match self.try_inlining(caller_body
, &callsite
) {
121 debug
!("not-inlined {} [{}]", callsite
.callee
, reason
);
125 debug
!("inlined {}", callsite
.callee
);
127 self.history
.push(callsite
.callee
);
128 self.process_blocks(caller_body
, new_blocks
);
135 /// Attempts to inline a callsite into the caller body. When successful returns basic blocks
136 /// containing the inlined body. Otherwise returns an error describing why inlining didn't take
140 caller_body
: &mut Body
<'tcx
>,
141 callsite
: &CallSite
<'tcx
>,
142 ) -> Result
<std
::ops
::Range
<BasicBlock
>, &'
static str> {
143 let callee_attrs
= self.tcx
.codegen_fn_attrs(callsite
.callee
.def_id());
144 self.check_codegen_attributes(callsite
, callee_attrs
)?
;
145 self.check_mir_is_available(caller_body
, &callsite
.callee
)?
;
146 let callee_body
= self.tcx
.instance_mir(callsite
.callee
.def
);
147 self.check_mir_body(callsite
, callee_body
, callee_attrs
)?
;
149 if !self.tcx
.consider_optimizing(|| {
150 format
!("Inline {:?} into {}", callee_body
.span
, callsite
.callee
)
152 return Err("optimization fuel exhausted");
155 let callee_body
= callsite
.callee
.subst_mir_and_normalize_erasing_regions(
161 let old_blocks
= caller_body
.basic_blocks().next_index();
162 self.inline_call(caller_body
, &callsite
, callee_body
);
163 let new_blocks
= old_blocks
..caller_body
.basic_blocks().next_index();
168 fn check_mir_is_available(
170 caller_body
: &Body
<'tcx
>,
171 callee
: &Instance
<'tcx
>,
172 ) -> Result
<(), &'
static str> {
173 if callee
.def_id() == caller_body
.source
.def_id() {
174 return Err("self-recursion");
178 InstanceDef
::Item(_
) => {
179 // If there is no MIR available (either because it was not in metadata or
180 // because it has no MIR because it's an extern function), then the inliner
181 // won't cause cycles on this.
182 if !self.tcx
.is_mir_available(callee
.def_id()) {
183 return Err("item MIR unavailable");
186 // These have no own callable MIR.
187 InstanceDef
::Intrinsic(_
) | InstanceDef
::Virtual(..) => {
188 return Err("instance without MIR (intrinsic / virtual)");
190 // This cannot result in an immediate cycle since the callee MIR is a shim, which does
191 // not get any optimizations run on it. Any subsequent inlining may cause cycles, but we
192 // do not need to catch this here, we can wait until the inliner decides to continue
193 // inlining a second time.
194 InstanceDef
::VtableShim(_
)
195 | InstanceDef
::ReifyShim(_
)
196 | InstanceDef
::FnPtrShim(..)
197 | InstanceDef
::ClosureOnceShim { .. }
198 | InstanceDef
::DropGlue(..)
199 | InstanceDef
::CloneShim(..) => return Ok(()),
202 if self.tcx
.is_constructor(callee
.def_id()) {
203 trace
!("constructors always have MIR");
204 // Constructor functions cannot cause a query cycle.
208 if let Some(callee_def_id
) = callee
.def_id().as_local() {
209 let callee_hir_id
= self.tcx
.hir().local_def_id_to_hir_id(callee_def_id
);
210 // Avoid inlining into generators,
211 // since their `optimized_mir` is used for layout computation, which can
212 // create a cycle, even when no attempt is made to inline the function
213 // in the other direction.
214 if caller_body
.generator
.is_some() {
215 return Err("local generator (query cycle avoidance)");
218 // Avoid a cycle here by only using `instance_mir` only if we have
219 // a lower `HirId` than the callee. This ensures that the callee will
220 // not inline us. This trick only works without incremental compilation.
221 // So don't do it if that is enabled.
222 if !self.tcx
.dep_graph
.is_fully_enabled() && self.hir_id
< callee_hir_id
{
226 // If we know for sure that the function we're calling will itself try to
227 // call us, then we avoid inlining that function.
230 .mir_callgraph_reachable((*callee
, caller_body
.source
.def_id().expect_local()))
232 return Err("caller might be reachable from callee (query cycle avoidance)");
237 // This cannot result in an immediate cycle since the callee MIR is from another crate
238 // and is already optimized. Any subsequent inlining may cause cycles, but we do
239 // not need to catch this here, we can wait until the inliner decides to continue
240 // inlining a second time.
241 trace
!("functions from other crates always have MIR");
248 caller_body
: &Body
<'tcx
>,
250 bb_data
: &BasicBlockData
<'tcx
>,
251 ) -> Option
<CallSite
<'tcx
>> {
252 // Only consider direct calls to functions
253 let terminator
= bb_data
.terminator();
254 if let TerminatorKind
::Call { ref func, ref destination, .. }
= terminator
.kind
{
255 let func_ty
= func
.ty(caller_body
, self.tcx
);
256 if let ty
::FnDef(def_id
, substs
) = *func_ty
.kind() {
257 // To resolve an instance its substs have to be fully normalized.
258 let substs
= self.tcx
.normalize_erasing_regions(self.param_env
, substs
);
260 Instance
::resolve(self.tcx
, self.param_env
, def_id
, substs
).ok().flatten()?
;
262 if let InstanceDef
::Virtual(..) | InstanceDef
::Intrinsic(_
) = callee
.def
{
266 let fn_sig
= self.tcx
.fn_sig(def_id
).subst(self.tcx
, substs
);
268 return Some(CallSite
{
272 target
: destination
.map(|(_
, target
)| target
),
273 source_info
: terminator
.source_info
,
281 /// Returns an error if inlining is not possible based on codegen attributes alone. A success
282 /// indicates that inlining decision should be based on other criteria.
283 fn check_codegen_attributes(
285 callsite
: &CallSite
<'tcx
>,
286 callee_attrs
: &CodegenFnAttrs
,
287 ) -> Result
<(), &'satic
str> {
288 if let InlineAttr
::Never
= callee_attrs
.inline
{
289 return Err("never inline hint");
292 // Only inline local functions if they would be eligible for cross-crate
293 // inlining. This is to ensure that the final crate doesn't have MIR that
294 // reference unexported symbols
295 if callsite
.callee
.def_id().is_local() {
296 let is_generic
= callsite
.callee
.substs
.non_erasable_generics().next().is_some();
297 if !is_generic
&& !callee_attrs
.requests_inline() {
298 return Err("not exported");
302 if callsite
.fn_sig
.c_variadic() {
303 return Err("C variadic");
306 if callee_attrs
.flags
.contains(CodegenFnAttrFlags
::NAKED
) {
310 if callee_attrs
.flags
.contains(CodegenFnAttrFlags
::COLD
) {
314 if callee_attrs
.no_sanitize
!= self.codegen_fn_attrs
.no_sanitize
{
315 return Err("incompatible sanitizer set");
318 if callee_attrs
.instruction_set
!= self.codegen_fn_attrs
.instruction_set
{
319 return Err("incompatible instruction set");
322 for feature
in &callee_attrs
.target_features
{
323 if !self.codegen_fn_attrs
.target_features
.contains(feature
) {
324 return Err("incompatible target feature");
331 /// Returns inlining decision that is based on the examination of callee MIR body.
332 /// Assumes that codegen attributes have been checked for compatibility already.
333 #[instrument(level = "debug", skip(self, callee_body))]
336 callsite
: &CallSite
<'tcx
>,
337 callee_body
: &Body
<'tcx
>,
338 callee_attrs
: &CodegenFnAttrs
,
339 ) -> Result
<(), &'
static str> {
342 let mut threshold
= if callee_attrs
.requests_inline() {
343 self.tcx
.sess
.opts
.debugging_opts
.inline_mir_hint_threshold
.unwrap_or(100)
345 self.tcx
.sess
.opts
.debugging_opts
.inline_mir_threshold
.unwrap_or(50)
348 // Give a bonus functions with a small number of blocks,
349 // We normally have two or three blocks for even
350 // very small functions.
351 if callee_body
.basic_blocks().len() <= 3 {
352 threshold
+= threshold
/ 4;
354 debug
!(" final inline threshold = {}", threshold
);
356 // FIXME: Give a bonus to functions with only a single caller
357 let mut first_block
= true;
360 // Traverse the MIR manually so we can account for the effects of
361 // inlining on the CFG.
362 let mut work_list
= vec
![START_BLOCK
];
363 let mut visited
= BitSet
::new_empty(callee_body
.basic_blocks().len());
364 while let Some(bb
) = work_list
.pop() {
365 if !visited
.insert(bb
.index()) {
368 let blk
= &callee_body
.basic_blocks()[bb
];
370 for stmt
in &blk
.statements
{
371 // Don't count StorageLive/StorageDead in the inlining cost.
373 StatementKind
::StorageLive(_
)
374 | StatementKind
::StorageDead(_
)
375 | StatementKind
::Nop
=> {}
376 _
=> cost
+= INSTR_COST
,
379 let term
= blk
.terminator();
380 let mut is_drop
= false;
382 TerminatorKind
::Drop { ref place, target, unwind }
383 | TerminatorKind
::DropAndReplace { ref place, target, unwind, .. }
=> {
385 work_list
.push(target
);
386 // If the place doesn't actually need dropping, treat it like
388 let ty
= callsite
.callee
.subst_mir(self.tcx
, &place
.ty(callee_body
, tcx
).ty
);
389 if ty
.needs_drop(tcx
, self.param_env
) {
390 cost
+= CALL_PENALTY
;
391 if let Some(unwind
) = unwind
{
392 cost
+= LANDINGPAD_PENALTY
;
393 work_list
.push(unwind
);
400 TerminatorKind
::Unreachable
| TerminatorKind
::Call { destination: None, .. }
403 // If the function always diverges, don't inline
404 // unless the cost is zero
408 TerminatorKind
::Call { func: Operand::Constant(ref f), cleanup, .. }
=> {
409 if let ty
::FnDef(def_id
, substs
) =
410 *callsite
.callee
.subst_mir(self.tcx
, &f
.literal
.ty()).kind()
412 let substs
= self.tcx
.normalize_erasing_regions(self.param_env
, substs
);
413 if let Ok(Some(instance
)) =
414 Instance
::resolve(self.tcx
, self.param_env
, def_id
, substs
)
416 if callsite
.callee
.def_id() == instance
.def_id() {
417 return Err("self-recursion");
418 } else if self.history
.contains(&instance
) {
419 return Err("already inlined");
422 // Don't give intrinsics the extra penalty for calls
423 let f
= tcx
.fn_sig(def_id
);
424 if f
.abi() == Abi
::RustIntrinsic
|| f
.abi() == Abi
::PlatformIntrinsic
{
427 cost
+= CALL_PENALTY
;
430 cost
+= CALL_PENALTY
;
432 if cleanup
.is_some() {
433 cost
+= LANDINGPAD_PENALTY
;
436 TerminatorKind
::Assert { cleanup, .. }
=> {
437 cost
+= CALL_PENALTY
;
439 if cleanup
.is_some() {
440 cost
+= LANDINGPAD_PENALTY
;
443 TerminatorKind
::Resume
=> cost
+= RESUME_PENALTY
,
444 _
=> cost
+= INSTR_COST
,
448 for &succ
in term
.successors() {
449 work_list
.push(succ
);
456 // Count up the cost of local variables and temps, if we know the size
457 // use that, otherwise we use a moderately-large dummy cost.
459 let ptr_size
= tcx
.data_layout
.pointer_size
.bytes();
461 for v
in callee_body
.vars_and_temps_iter() {
462 let ty
= callsite
.callee
.subst_mir(self.tcx
, &callee_body
.local_decls
[v
].ty
);
463 // Cost of the var is the size in machine-words, if we know
465 if let Some(size
) = type_size_of(tcx
, self.param_env
, ty
) {
466 cost
+= ((size
+ ptr_size
- 1) / ptr_size
) as usize;
468 cost
+= UNKNOWN_SIZE_COST
;
472 if let InlineAttr
::Always
= callee_attrs
.inline
{
473 debug
!("INLINING {:?} because inline(always) [cost={}]", callsite
, cost
);
476 if cost
<= threshold
{
477 debug
!("INLINING {:?} [cost={} <= threshold={}]", callsite
, cost
, threshold
);
480 debug
!("NOT inlining {:?} [cost={} > threshold={}]", callsite
, cost
, threshold
);
481 Err("cost above threshold")
488 caller_body
: &mut Body
<'tcx
>,
489 callsite
: &CallSite
<'tcx
>,
490 mut callee_body
: Body
<'tcx
>,
492 let terminator
= caller_body
[callsite
.block
].terminator
.take().unwrap();
493 match terminator
.kind
{
494 TerminatorKind
::Call { args, destination, cleanup, .. }
=> {
495 // If the call is something like `a[*i] = f(i)`, where
496 // `i : &mut usize`, then just duplicating the `a[*i]`
497 // Place could result in two different locations if `f`
498 // writes to `i`. To prevent this we need to create a temporary
499 // borrow of the place and pass the destination as `*temp` instead.
500 fn dest_needs_borrow(place
: Place
<'_
>) -> bool
{
501 for elem
in place
.projection
.iter() {
503 ProjectionElem
::Deref
| ProjectionElem
::Index(_
) => return true,
511 let dest
= if let Some((destination_place
, _
)) = destination
{
512 if dest_needs_borrow(destination_place
) {
513 trace
!("creating temp for return destination");
514 let dest
= Rvalue
::Ref(
515 self.tcx
.lifetimes
.re_erased
,
516 BorrowKind
::Mut { allow_two_phase_borrow: false }
,
519 let dest_ty
= dest
.ty(caller_body
, self.tcx
);
520 let temp
= Place
::from(self.new_call_temp(caller_body
, &callsite
, dest_ty
));
521 caller_body
[callsite
.block
].statements
.push(Statement
{
522 source_info
: callsite
.source_info
,
523 kind
: StatementKind
::Assign(box (temp
, dest
)),
525 self.tcx
.mk_place_deref(temp
)
530 trace
!("creating temp for return place");
531 Place
::from(self.new_call_temp(caller_body
, &callsite
, callee_body
.return_ty()))
534 // Copy the arguments if needed.
535 let args
: Vec
<_
> = self.make_call_args(args
, &callsite
, caller_body
, &callee_body
);
537 let mut integrator
= Integrator
{
539 new_locals
: Local
::new(caller_body
.local_decls
.len())..,
540 new_scopes
: SourceScope
::new(caller_body
.source_scopes
.len())..,
541 new_blocks
: BasicBlock
::new(caller_body
.basic_blocks().len())..,
543 return_block
: callsite
.target
,
544 cleanup_block
: cleanup
,
545 in_cleanup_block
: false,
547 callsite_span
: callsite
.source_info
.span
,
548 body_span
: callee_body
.span
,
549 always_live_locals
: BitSet
::new_filled(callee_body
.local_decls
.len()),
552 // Map all `Local`s, `SourceScope`s and `BasicBlock`s to new ones
553 // (or existing ones, in a few special cases) in the caller.
554 integrator
.visit_body(&mut callee_body
);
556 for scope
in &mut callee_body
.source_scopes
{
557 // FIXME(eddyb) move this into a `fn visit_scope_data` in `Integrator`.
558 if scope
.parent_scope
.is_none() {
559 let callsite_scope
= &caller_body
.source_scopes
[callsite
.source_info
.scope
];
561 // Attach the outermost callee scope as a child of the callsite
562 // scope, via the `parent_scope` and `inlined_parent_scope` chains.
563 scope
.parent_scope
= Some(callsite
.source_info
.scope
);
564 assert_eq
!(scope
.inlined_parent_scope
, None
);
565 scope
.inlined_parent_scope
= if callsite_scope
.inlined
.is_some() {
566 Some(callsite
.source_info
.scope
)
568 callsite_scope
.inlined_parent_scope
571 // Mark the outermost callee scope as an inlined one.
572 assert_eq
!(scope
.inlined
, None
);
573 scope
.inlined
= Some((callsite
.callee
, callsite
.source_info
.span
));
574 } else if scope
.inlined_parent_scope
.is_none() {
575 // Make it easy to find the scope with `inlined` set above.
576 scope
.inlined_parent_scope
=
577 Some(integrator
.map_scope(OUTERMOST_SOURCE_SCOPE
));
581 // If there are any locals without storage markers, give them storage only for the
582 // duration of the call.
583 for local
in callee_body
.vars_and_temps_iter() {
584 if integrator
.always_live_locals
.contains(local
) {
585 let new_local
= integrator
.map_local(local
);
586 caller_body
[callsite
.block
].statements
.push(Statement
{
587 source_info
: callsite
.source_info
,
588 kind
: StatementKind
::StorageLive(new_local
),
592 if let Some(block
) = callsite
.target
{
593 // To avoid repeated O(n) insert, push any new statements to the end and rotate
596 for local
in callee_body
.vars_and_temps_iter().rev() {
597 if integrator
.always_live_locals
.contains(local
) {
598 let new_local
= integrator
.map_local(local
);
599 caller_body
[block
].statements
.push(Statement
{
600 source_info
: callsite
.source_info
,
601 kind
: StatementKind
::StorageDead(new_local
),
606 caller_body
[block
].statements
.rotate_right(n
);
609 // Insert all of the (mapped) parts of the callee body into the caller.
610 caller_body
.local_decls
.extend(
611 // FIXME(eddyb) make `Range<Local>` iterable so that we can use
612 // `callee_body.local_decls.drain(callee_body.vars_and_temps())`
614 .vars_and_temps_iter()
615 .map(|local
| callee_body
.local_decls
[local
].clone()),
617 caller_body
.source_scopes
.extend(callee_body
.source_scopes
.drain(..));
618 caller_body
.var_debug_info
.extend(callee_body
.var_debug_info
.drain(..));
619 caller_body
.basic_blocks_mut().extend(callee_body
.basic_blocks_mut().drain(..));
621 caller_body
[callsite
.block
].terminator
= Some(Terminator
{
622 source_info
: callsite
.source_info
,
623 kind
: TerminatorKind
::Goto { target: integrator.map_block(START_BLOCK) }
,
626 // Copy only unevaluated constants from the callee_body into the caller_body.
627 // Although we are only pushing `ConstKind::Unevaluated` consts to
628 // `required_consts`, here we may not only have `ConstKind::Unevaluated`
629 // because we are calling `subst_and_normalize_erasing_regions`.
630 caller_body
.required_consts
.extend(
631 callee_body
.required_consts
.iter().copied().filter(|&ct
| {
632 match ct
.literal
.const_for_ty() {
633 Some(ct
) => matches
!(ct
.val
, ConstKind
::Unevaluated(_
, _
, _
)),
639 kind
=> bug
!("unexpected terminator kind {:?}", kind
),
645 args
: Vec
<Operand
<'tcx
>>,
646 callsite
: &CallSite
<'tcx
>,
647 caller_body
: &mut Body
<'tcx
>,
648 callee_body
: &Body
<'tcx
>,
652 // There is a bit of a mismatch between the *caller* of a closure and the *callee*.
653 // The caller provides the arguments wrapped up in a tuple:
655 // tuple_tmp = (a, b, c)
656 // Fn::call(closure_ref, tuple_tmp)
658 // meanwhile the closure body expects the arguments (here, `a`, `b`, and `c`)
659 // as distinct arguments. (This is the "rust-call" ABI hack.) Normally, codegen has
660 // the job of unpacking this tuple. But here, we are codegen. =) So we want to create
663 // [closure_ref, tuple_tmp.0, tuple_tmp.1, tuple_tmp.2]
665 // Except for one tiny wrinkle: we don't actually want `tuple_tmp.0`. It's more convenient
666 // if we "spill" that into *another* temporary, so that we can map the argument
667 // variable in the callee MIR directly to an argument variable on our side.
668 // So we introduce temporaries like:
670 // tmp0 = tuple_tmp.0
671 // tmp1 = tuple_tmp.1
672 // tmp2 = tuple_tmp.2
674 // and the vector is `[closure_ref, tmp0, tmp1, tmp2]`.
675 if callsite
.fn_sig
.abi() == Abi
::RustCall
&& callee_body
.spread_arg
.is_none() {
676 let mut args
= args
.into_iter();
677 let self_
= self.create_temp_if_necessary(args
.next().unwrap(), callsite
, caller_body
);
678 let tuple
= self.create_temp_if_necessary(args
.next().unwrap(), callsite
, caller_body
);
679 assert
!(args
.next().is_none());
681 let tuple
= Place
::from(tuple
);
682 let tuple_tys
= if let ty
::Tuple(s
) = tuple
.ty(caller_body
, tcx
).ty
.kind() {
685 bug
!("Closure arguments are not passed as a tuple");
688 // The `closure_ref` in our example above.
689 let closure_ref_arg
= iter
::once(self_
);
691 // The `tmp0`, `tmp1`, and `tmp2` in our example abonve.
692 let tuple_tmp_args
= tuple_tys
.iter().enumerate().map(|(i
, ty
)| {
693 // This is e.g., `tuple_tmp.0` in our example above.
695 Operand
::Move(tcx
.mk_place_field(tuple
, Field
::new(i
), ty
.expect_ty()));
697 // Spill to a local to make e.g., `tmp0`.
698 self.create_temp_if_necessary(tuple_field
, callsite
, caller_body
)
701 closure_ref_arg
.chain(tuple_tmp_args
).collect()
704 .map(|a
| self.create_temp_if_necessary(a
, callsite
, caller_body
))
709 /// If `arg` is already a temporary, returns it. Otherwise, introduces a fresh
710 /// temporary `T` and an instruction `T = arg`, and returns `T`.
711 fn create_temp_if_necessary(
714 callsite
: &CallSite
<'tcx
>,
715 caller_body
: &mut Body
<'tcx
>,
717 // Reuse the operand if it is a moved temporary.
718 if let Operand
::Move(place
) = &arg
{
719 if let Some(local
) = place
.as_local() {
720 if caller_body
.local_kind(local
) == LocalKind
::Temp
{
726 // Otherwise, create a temporary for the argument.
727 trace
!("creating temp for argument {:?}", arg
);
728 let arg_ty
= arg
.ty(caller_body
, self.tcx
);
729 let local
= self.new_call_temp(caller_body
, callsite
, arg_ty
);
730 caller_body
[callsite
.block
].statements
.push(Statement
{
731 source_info
: callsite
.source_info
,
732 kind
: StatementKind
::Assign(box (Place
::from(local
), Rvalue
::Use(arg
))),
737 /// Introduces a new temporary into the caller body that is live for the duration of the call.
740 caller_body
: &mut Body
<'tcx
>,
741 callsite
: &CallSite
<'tcx
>,
744 let local
= caller_body
.local_decls
.push(LocalDecl
::new(ty
, callsite
.source_info
.span
));
746 caller_body
[callsite
.block
].statements
.push(Statement
{
747 source_info
: callsite
.source_info
,
748 kind
: StatementKind
::StorageLive(local
),
751 if let Some(block
) = callsite
.target
{
752 caller_body
[block
].statements
.insert(
755 source_info
: callsite
.source_info
,
756 kind
: StatementKind
::StorageDead(local
),
765 fn type_size_of
<'tcx
>(
767 param_env
: ty
::ParamEnv
<'tcx
>,
770 tcx
.layout_of(param_env
.and(ty
)).ok().map(|layout
| layout
.size
.bytes())
776 * Integrates blocks from the callee function into the calling function.
777 * Updates block indices, references to locals and other control flow
780 struct Integrator
<'a
, 'tcx
> {
782 new_locals
: RangeFrom
<Local
>,
783 new_scopes
: RangeFrom
<SourceScope
>,
784 new_blocks
: RangeFrom
<BasicBlock
>,
785 destination
: Place
<'tcx
>,
786 return_block
: Option
<BasicBlock
>,
787 cleanup_block
: Option
<BasicBlock
>,
788 in_cleanup_block
: bool
,
792 always_live_locals
: BitSet
<Local
>,
795 impl<'a
, 'tcx
> Integrator
<'a
, 'tcx
> {
796 fn map_local(&self, local
: Local
) -> Local
{
797 let new
= if local
== RETURN_PLACE
{
798 self.destination
.local
800 let idx
= local
.index() - 1;
801 if idx
< self.args
.len() {
804 Local
::new(self.new_locals
.start
.index() + (idx
- self.args
.len()))
807 trace
!("mapping local `{:?}` to `{:?}`", local
, new
);
811 fn map_scope(&self, scope
: SourceScope
) -> SourceScope
{
812 let new
= SourceScope
::new(self.new_scopes
.start
.index() + scope
.index());
813 trace
!("mapping scope `{:?}` to `{:?}`", scope
, new
);
817 fn map_block(&self, block
: BasicBlock
) -> BasicBlock
{
818 let new
= BasicBlock
::new(self.new_blocks
.start
.index() + block
.index());
819 trace
!("mapping block `{:?}` to `{:?}`", block
, new
);
824 impl<'a
, 'tcx
> MutVisitor
<'tcx
> for Integrator
<'a
, 'tcx
> {
825 fn tcx(&self) -> TyCtxt
<'tcx
> {
829 fn visit_local(&mut self, local
: &mut Local
, _ctxt
: PlaceContext
, _location
: Location
) {
830 *local
= self.map_local(*local
);
833 fn visit_source_scope(&mut self, scope
: &mut SourceScope
) {
834 *scope
= self.map_scope(*scope
);
837 fn visit_span(&mut self, span
: &mut Span
) {
839 ExpnData
::default(ExpnKind
::Inlined
, *span
, self.tcx
.sess
.edition(), None
);
840 expn_data
.def_site
= self.body_span
;
841 // Make sure that all spans track the fact that they were inlined.
842 *span
= self.callsite_span
.fresh_expansion(expn_data
);
845 fn visit_place(&mut self, place
: &mut Place
<'tcx
>, context
: PlaceContext
, location
: Location
) {
846 for elem
in place
.projection
{
847 // FIXME: Make sure that return place is not used in an indexing projection, since it
848 // won't be rebased as it is supposed to be.
849 assert_ne
!(ProjectionElem
::Index(RETURN_PLACE
), elem
);
852 // If this is the `RETURN_PLACE`, we need to rebase any projections onto it.
853 let dest_proj_len
= self.destination
.projection
.len();
854 if place
.local
== RETURN_PLACE
&& dest_proj_len
> 0 {
855 let mut projs
= Vec
::with_capacity(dest_proj_len
+ place
.projection
.len());
856 projs
.extend(self.destination
.projection
);
857 projs
.extend(place
.projection
);
859 place
.projection
= self.tcx
.intern_place_elems(&*projs
);
861 // Handles integrating any locals that occur in the base
863 self.super_place(place
, context
, location
)
866 fn visit_basic_block_data(&mut self, block
: BasicBlock
, data
: &mut BasicBlockData
<'tcx
>) {
867 self.in_cleanup_block
= data
.is_cleanup
;
868 self.super_basic_block_data(block
, data
);
869 self.in_cleanup_block
= false;
872 fn visit_retag(&mut self, kind
: &mut RetagKind
, place
: &mut Place
<'tcx
>, loc
: Location
) {
873 self.super_retag(kind
, place
, loc
);
875 // We have to patch all inlined retags to be aware that they are no longer
876 // happening on function entry.
877 if *kind
== RetagKind
::FnEntry
{
878 *kind
= RetagKind
::Default
;
882 fn visit_statement(&mut self, statement
: &mut Statement
<'tcx
>, location
: Location
) {
883 if let StatementKind
::StorageLive(local
) | StatementKind
::StorageDead(local
) =
886 self.always_live_locals
.remove(local
);
888 self.super_statement(statement
, location
);
891 fn visit_terminator(&mut self, terminator
: &mut Terminator
<'tcx
>, loc
: Location
) {
892 // Don't try to modify the implicit `_0` access on return (`return` terminators are
893 // replaced down below anyways).
894 if !matches
!(terminator
.kind
, TerminatorKind
::Return
) {
895 self.super_terminator(terminator
, loc
);
898 match terminator
.kind
{
899 TerminatorKind
::GeneratorDrop
| TerminatorKind
::Yield { .. }
=> bug
!(),
900 TerminatorKind
::Goto { ref mut target }
=> {
901 *target
= self.map_block(*target
);
903 TerminatorKind
::SwitchInt { ref mut targets, .. }
=> {
904 for tgt
in targets
.all_targets_mut() {
905 *tgt
= self.map_block(*tgt
);
908 TerminatorKind
::Drop { ref mut target, ref mut unwind, .. }
909 | TerminatorKind
::DropAndReplace { ref mut target, ref mut unwind, .. }
=> {
910 *target
= self.map_block(*target
);
911 if let Some(tgt
) = *unwind
{
912 *unwind
= Some(self.map_block(tgt
));
913 } else if !self.in_cleanup_block
{
914 // Unless this drop is in a cleanup block, add an unwind edge to
915 // the original call's cleanup block
916 *unwind
= self.cleanup_block
;
919 TerminatorKind
::Call { ref mut destination, ref mut cleanup, .. }
=> {
920 if let Some((_
, ref mut tgt
)) = *destination
{
921 *tgt
= self.map_block(*tgt
);
923 if let Some(tgt
) = *cleanup
{
924 *cleanup
= Some(self.map_block(tgt
));
925 } else if !self.in_cleanup_block
{
926 // Unless this call is in a cleanup block, add an unwind edge to
927 // the original call's cleanup block
928 *cleanup
= self.cleanup_block
;
931 TerminatorKind
::Assert { ref mut target, ref mut cleanup, .. }
=> {
932 *target
= self.map_block(*target
);
933 if let Some(tgt
) = *cleanup
{
934 *cleanup
= Some(self.map_block(tgt
));
935 } else if !self.in_cleanup_block
{
936 // Unless this assert is in a cleanup block, add an unwind edge to
937 // the original call's cleanup block
938 *cleanup
= self.cleanup_block
;
941 TerminatorKind
::Return
=> {
942 terminator
.kind
= if let Some(tgt
) = self.return_block
{
943 TerminatorKind
::Goto { target: tgt }
945 TerminatorKind
::Unreachable
948 TerminatorKind
::Resume
=> {
949 if let Some(tgt
) = self.cleanup_block
{
950 terminator
.kind
= TerminatorKind
::Goto { target: tgt }
953 TerminatorKind
::Abort
=> {}
954 TerminatorKind
::Unreachable
=> {}
955 TerminatorKind
::FalseEdge { ref mut real_target, ref mut imaginary_target }
=> {
956 *real_target
= self.map_block(*real_target
);
957 *imaginary_target
= self.map_block(*imaginary_target
);
959 TerminatorKind
::FalseUnwind { real_target: _, unwind: _ }
=>
960 // see the ordering of passes in the optimized_mir query.
962 bug
!("False unwinds should have been removed before inlining")
964 TerminatorKind
::InlineAsm { ref mut destination, .. }
=> {
965 if let Some(ref mut tgt
) = *destination
{
966 *tgt
= self.map_block(*tgt
);