1 //! Inlining pass for MIR functions
2 use crate::deref_separator
::deref_finder
;
3 use rustc_attr
::InlineAttr
;
4 use rustc_const_eval
::transform
::validate
::equal_up_to_regions
;
5 use rustc_index
::bit_set
::BitSet
;
6 use rustc_index
::vec
::Idx
;
7 use rustc_middle
::middle
::codegen_fn_attrs
::{CodegenFnAttrFlags, CodegenFnAttrs}
;
8 use rustc_middle
::mir
::visit
::*;
9 use rustc_middle
::mir
::*;
10 use rustc_middle
::ty
::{self, Instance, InstanceDef, ParamEnv, Ty, TyCtxt}
;
11 use rustc_session
::config
::OptLevel
;
12 use rustc_span
::def_id
::DefId
;
13 use rustc_span
::{hygiene::ExpnKind, ExpnData, LocalExpnId, Span}
;
14 use rustc_target
::abi
::VariantIdx
;
15 use rustc_target
::spec
::abi
::Abi
;
17 use super::simplify
::{remove_dead_blocks, CfgSimplifier}
;
20 use std
::ops
::{Range, RangeFrom}
;
24 const INSTR_COST
: usize = 5;
25 const CALL_PENALTY
: usize = 25;
26 const LANDINGPAD_PENALTY
: usize = 50;
27 const RESUME_PENALTY
: usize = 45;
29 const UNKNOWN_SIZE_COST
: usize = 10;
33 #[derive(Copy, Clone, Debug)]
34 struct CallSite
<'tcx
> {
35 callee
: Instance
<'tcx
>,
36 fn_sig
: ty
::PolyFnSig
<'tcx
>,
38 target
: Option
<BasicBlock
>,
39 source_info
: SourceInfo
,
42 impl<'tcx
> MirPass
<'tcx
> for Inline
{
43 fn is_enabled(&self, sess
: &rustc_session
::Session
) -> bool
{
44 if let Some(enabled
) = sess
.opts
.unstable_opts
.inline_mir
{
48 match sess
.mir_opt_level() {
51 (sess
.opts
.optimize
== OptLevel
::Default
52 || sess
.opts
.optimize
== OptLevel
::Aggressive
)
53 && sess
.opts
.incremental
== None
59 fn run_pass(&self, tcx
: TyCtxt
<'tcx
>, body
: &mut Body
<'tcx
>) {
60 let span
= trace_span
!("inline", body
= %tcx
.def_path_str(body
.source
.def_id()));
61 let _guard
= span
.enter();
62 if inline(tcx
, body
) {
63 debug
!("running simplify cfg on {:?}", body
.source
);
64 CfgSimplifier
::new(body
).simplify();
65 remove_dead_blocks(tcx
, body
);
66 deref_finder(tcx
, body
);
71 fn inline
<'tcx
>(tcx
: TyCtxt
<'tcx
>, body
: &mut Body
<'tcx
>) -> bool
{
72 let def_id
= body
.source
.def_id().expect_local();
74 // Only do inlining into fn bodies.
75 if !tcx
.hir().body_owner_kind(def_id
).is_fn_or_closure() {
78 if body
.source
.promoted
.is_some() {
81 // Avoid inlining into generators, since their `optimized_mir` is used for layout computation,
82 // which can create a cycle, even when no attempt is made to inline the function in the other
84 if body
.generator
.is_some() {
88 let param_env
= tcx
.param_env_reveal_all_normalized(def_id
);
90 let mut this
= Inliner
{
93 codegen_fn_attrs
: tcx
.codegen_fn_attrs(def_id
),
97 let blocks
= BasicBlock
::new(0)..body
.basic_blocks
.next_index();
98 this
.process_blocks(body
, blocks
);
102 struct Inliner
<'tcx
> {
104 param_env
: ParamEnv
<'tcx
>,
105 /// Caller codegen attributes.
106 codegen_fn_attrs
: &'tcx CodegenFnAttrs
,
107 /// Stack of inlined instances.
108 /// We only check the `DefId` and not the substs because we want to
109 /// avoid inlining cases of polymorphic recursion.
110 /// The number of `DefId`s is finite, so checking history is enough
111 /// to ensure that we do not loop endlessly while inlining.
113 /// Indicates that the caller body has been modified.
117 impl<'tcx
> Inliner
<'tcx
> {
118 fn process_blocks(&mut self, caller_body
: &mut Body
<'tcx
>, blocks
: Range
<BasicBlock
>) {
120 let bb_data
= &caller_body
[bb
];
121 if bb_data
.is_cleanup
{
125 let Some(callsite
) = self.resolve_callsite(caller_body
, bb
, bb_data
) else {
129 let span
= trace_span
!("process_blocks", %callsite
.callee
, ?bb
);
130 let _guard
= span
.enter();
132 match self.try_inlining(caller_body
, &callsite
) {
134 debug
!("not-inlined {} [{}]", callsite
.callee
, reason
);
138 debug
!("inlined {}", callsite
.callee
);
140 self.history
.push(callsite
.callee
.def_id());
141 self.process_blocks(caller_body
, new_blocks
);
148 /// Attempts to inline a callsite into the caller body. When successful returns basic blocks
149 /// containing the inlined body. Otherwise returns an error describing why inlining didn't take
153 caller_body
: &mut Body
<'tcx
>,
154 callsite
: &CallSite
<'tcx
>,
155 ) -> Result
<std
::ops
::Range
<BasicBlock
>, &'
static str> {
156 let callee_attrs
= self.tcx
.codegen_fn_attrs(callsite
.callee
.def_id());
157 self.check_codegen_attributes(callsite
, callee_attrs
)?
;
158 self.check_mir_is_available(caller_body
, &callsite
.callee
)?
;
159 let callee_body
= self.tcx
.instance_mir(callsite
.callee
.def
);
160 self.check_mir_body(callsite
, callee_body
, callee_attrs
)?
;
162 if !self.tcx
.consider_optimizing(|| {
163 format
!("Inline {:?} into {:?}", callsite
.callee
, caller_body
.source
)
165 return Err("optimization fuel exhausted");
168 let Ok(callee_body
) = callsite
.callee
.try_subst_mir_and_normalize_erasing_regions(
173 return Err("failed to normalize callee body");
176 // Check call signature compatibility.
177 // Normally, this shouldn't be required, but trait normalization failure can create a
179 let terminator
= caller_body
[callsite
.block
].terminator
.as_ref().unwrap();
180 let TerminatorKind
::Call { args, destination, .. }
= &terminator
.kind
else { bug!() }
;
181 let destination_ty
= destination
.ty(&caller_body
.local_decls
, self.tcx
).ty
;
182 let output_type
= callee_body
.return_ty();
183 if !equal_up_to_regions(self.tcx
, self.param_env
, output_type
, destination_ty
) {
184 trace
!(?output_type
, ?destination_ty
);
185 return Err("failed to normalize return type");
187 if callsite
.fn_sig
.abi() == Abi
::RustCall
{
188 let (arg_tuple
, skipped_args
) = match &args
[..] {
189 [arg_tuple
] => (arg_tuple
, 0),
190 [_
, arg_tuple
] => (arg_tuple
, 1),
191 _
=> bug
!("Expected `rust-call` to have 1 or 2 args"),
194 let arg_tuple_ty
= arg_tuple
.ty(&caller_body
.local_decls
, self.tcx
);
195 let ty
::Tuple(arg_tuple_tys
) = arg_tuple_ty
.kind() else {
196 bug
!("Closure arguments are not passed as a tuple");
199 for (arg_ty
, input
) in
200 arg_tuple_tys
.iter().zip(callee_body
.args_iter().skip(skipped_args
))
202 let input_type
= callee_body
.local_decls
[input
].ty
;
203 if !equal_up_to_regions(self.tcx
, self.param_env
, arg_ty
, input_type
) {
204 trace
!(?arg_ty
, ?input_type
);
205 return Err("failed to normalize tuple argument type");
209 for (arg
, input
) in args
.iter().zip(callee_body
.args_iter()) {
210 let input_type
= callee_body
.local_decls
[input
].ty
;
211 let arg_ty
= arg
.ty(&caller_body
.local_decls
, self.tcx
);
212 if !equal_up_to_regions(self.tcx
, self.param_env
, arg_ty
, input_type
) {
213 trace
!(?arg_ty
, ?input_type
);
214 return Err("failed to normalize argument type");
219 let old_blocks
= caller_body
.basic_blocks
.next_index();
220 self.inline_call(caller_body
, &callsite
, callee_body
);
221 let new_blocks
= old_blocks
..caller_body
.basic_blocks
.next_index();
226 fn check_mir_is_available(
228 caller_body
: &Body
<'tcx
>,
229 callee
: &Instance
<'tcx
>,
230 ) -> Result
<(), &'
static str> {
231 let caller_def_id
= caller_body
.source
.def_id();
232 let callee_def_id
= callee
.def_id();
233 if callee_def_id
== caller_def_id
{
234 return Err("self-recursion");
238 InstanceDef
::Item(_
) => {
239 // If there is no MIR available (either because it was not in metadata or
240 // because it has no MIR because it's an extern function), then the inliner
241 // won't cause cycles on this.
242 if !self.tcx
.is_mir_available(callee_def_id
) {
243 return Err("item MIR unavailable");
246 // These have no own callable MIR.
247 InstanceDef
::Intrinsic(_
) | InstanceDef
::Virtual(..) => {
248 return Err("instance without MIR (intrinsic / virtual)");
250 // This cannot result in an immediate cycle since the callee MIR is a shim, which does
251 // not get any optimizations run on it. Any subsequent inlining may cause cycles, but we
252 // do not need to catch this here, we can wait until the inliner decides to continue
253 // inlining a second time.
254 InstanceDef
::VTableShim(_
)
255 | InstanceDef
::ReifyShim(_
)
256 | InstanceDef
::FnPtrShim(..)
257 | InstanceDef
::ClosureOnceShim { .. }
258 | InstanceDef
::DropGlue(..)
259 | InstanceDef
::CloneShim(..) => return Ok(()),
262 if self.tcx
.is_constructor(callee_def_id
) {
263 trace
!("constructors always have MIR");
264 // Constructor functions cannot cause a query cycle.
268 if callee_def_id
.is_local() {
269 // Avoid a cycle here by only using `instance_mir` only if we have
270 // a lower `DefPathHash` than the callee. This ensures that the callee will
271 // not inline us. This trick even works with incremental compilation,
272 // since `DefPathHash` is stable.
273 if self.tcx
.def_path_hash(caller_def_id
).local_hash()
274 < self.tcx
.def_path_hash(callee_def_id
).local_hash()
279 // If we know for sure that the function we're calling will itself try to
280 // call us, then we avoid inlining that function.
281 if self.tcx
.mir_callgraph_reachable((*callee
, caller_def_id
.expect_local())) {
282 return Err("caller might be reachable from callee (query cycle avoidance)");
287 // This cannot result in an immediate cycle since the callee MIR is from another crate
288 // and is already optimized. Any subsequent inlining may cause cycles, but we do
289 // not need to catch this here, we can wait until the inliner decides to continue
290 // inlining a second time.
291 trace
!("functions from other crates always have MIR");
298 caller_body
: &Body
<'tcx
>,
300 bb_data
: &BasicBlockData
<'tcx
>,
301 ) -> Option
<CallSite
<'tcx
>> {
302 // Only consider direct calls to functions
303 let terminator
= bb_data
.terminator();
304 if let TerminatorKind
::Call { ref func, target, .. }
= terminator
.kind
{
305 let func_ty
= func
.ty(caller_body
, self.tcx
);
306 if let ty
::FnDef(def_id
, substs
) = *func_ty
.kind() {
307 // To resolve an instance its substs have to be fully normalized.
308 let substs
= self.tcx
.try_normalize_erasing_regions(self.param_env
, substs
).ok()?
;
310 Instance
::resolve(self.tcx
, self.param_env
, def_id
, substs
).ok().flatten()?
;
312 if let InstanceDef
::Virtual(..) | InstanceDef
::Intrinsic(_
) = callee
.def
{
316 if self.history
.contains(&callee
.def_id()) {
320 let fn_sig
= self.tcx
.bound_fn_sig(def_id
).subst(self.tcx
, substs
);
322 return Some(CallSite
{
327 source_info
: terminator
.source_info
,
335 /// Returns an error if inlining is not possible based on codegen attributes alone. A success
336 /// indicates that inlining decision should be based on other criteria.
337 fn check_codegen_attributes(
339 callsite
: &CallSite
<'tcx
>,
340 callee_attrs
: &CodegenFnAttrs
,
341 ) -> Result
<(), &'
static str> {
342 match callee_attrs
.inline
{
343 InlineAttr
::Never
=> return Err("never inline hint"),
344 InlineAttr
::Always
| InlineAttr
::Hint
=> {}
345 InlineAttr
::None
=> {
346 if self.tcx
.sess
.mir_opt_level() <= 2 {
347 return Err("at mir-opt-level=2, only #[inline] is inlined");
352 // Only inline local functions if they would be eligible for cross-crate
353 // inlining. This is to ensure that the final crate doesn't have MIR that
354 // reference unexported symbols
355 if callsite
.callee
.def_id().is_local() {
356 let is_generic
= callsite
.callee
.substs
.non_erasable_generics().next().is_some();
357 if !is_generic
&& !callee_attrs
.requests_inline() {
358 return Err("not exported");
362 if callsite
.fn_sig
.c_variadic() {
363 return Err("C variadic");
366 if callee_attrs
.flags
.contains(CodegenFnAttrFlags
::NAKED
) {
370 if callee_attrs
.flags
.contains(CodegenFnAttrFlags
::COLD
) {
374 if callee_attrs
.no_sanitize
!= self.codegen_fn_attrs
.no_sanitize
{
375 return Err("incompatible sanitizer set");
378 if callee_attrs
.instruction_set
!= self.codegen_fn_attrs
.instruction_set
{
379 return Err("incompatible instruction set");
382 for feature
in &callee_attrs
.target_features
{
383 if !self.codegen_fn_attrs
.target_features
.contains(feature
) {
384 return Err("incompatible target feature");
391 /// Returns inlining decision that is based on the examination of callee MIR body.
392 /// Assumes that codegen attributes have been checked for compatibility already.
393 #[instrument(level = "debug", skip(self, callee_body))]
396 callsite
: &CallSite
<'tcx
>,
397 callee_body
: &Body
<'tcx
>,
398 callee_attrs
: &CodegenFnAttrs
,
399 ) -> Result
<(), &'
static str> {
402 let mut threshold
= if callee_attrs
.requests_inline() {
403 self.tcx
.sess
.opts
.unstable_opts
.inline_mir_hint_threshold
.unwrap_or(100)
405 self.tcx
.sess
.opts
.unstable_opts
.inline_mir_threshold
.unwrap_or(50)
408 // Give a bonus functions with a small number of blocks,
409 // We normally have two or three blocks for even
410 // very small functions.
411 if callee_body
.basic_blocks
.len() <= 3 {
412 threshold
+= threshold
/ 4;
414 debug
!(" final inline threshold = {}", threshold
);
416 // FIXME: Give a bonus to functions with only a single caller
417 let diverges
= matches
!(
418 callee_body
.basic_blocks
[START_BLOCK
].terminator().kind
,
419 TerminatorKind
::Unreachable
| TerminatorKind
::Call { target: None, .. }
421 if diverges
&& !matches
!(callee_attrs
.inline
, InlineAttr
::Always
) {
422 return Err("callee diverges unconditionally");
425 let mut checker
= CostChecker
{
427 param_env
: self.param_env
,
428 instance
: callsite
.callee
,
434 // Traverse the MIR manually so we can account for the effects of inlining on the CFG.
435 let mut work_list
= vec
![START_BLOCK
];
436 let mut visited
= BitSet
::new_empty(callee_body
.basic_blocks
.len());
437 while let Some(bb
) = work_list
.pop() {
438 if !visited
.insert(bb
.index()) {
442 let blk
= &callee_body
.basic_blocks
[bb
];
443 checker
.visit_basic_block_data(bb
, blk
);
445 let term
= blk
.terminator();
446 if let TerminatorKind
::Drop { ref place, target, unwind }
447 | TerminatorKind
::DropAndReplace { ref place, target, unwind, .. }
= term
.kind
449 work_list
.push(target
);
451 // If the place doesn't actually need dropping, treat it like a regular goto.
452 let ty
= callsite
.callee
.subst_mir(self.tcx
, &place
.ty(callee_body
, tcx
).ty
);
453 if ty
.needs_drop(tcx
, self.param_env
) && let Some(unwind
) = unwind
{
454 work_list
.push(unwind
);
457 work_list
.extend(term
.successors())
461 // Count up the cost of local variables and temps, if we know the size
462 // use that, otherwise we use a moderately-large dummy cost.
463 for v
in callee_body
.vars_and_temps_iter() {
464 checker
.visit_local_decl(v
, &callee_body
.local_decls
[v
]);
467 // Abort if type validation found anything fishy.
470 let cost
= checker
.cost
;
471 if let InlineAttr
::Always
= callee_attrs
.inline
{
472 debug
!("INLINING {:?} because inline(always) [cost={}]", callsite
, cost
);
474 } else if cost
<= threshold
{
475 debug
!("INLINING {:?} [cost={} <= threshold={}]", callsite
, cost
, threshold
);
478 debug
!("NOT inlining {:?} [cost={} > threshold={}]", callsite
, cost
, threshold
);
479 Err("cost above threshold")
485 caller_body
: &mut Body
<'tcx
>,
486 callsite
: &CallSite
<'tcx
>,
487 mut callee_body
: Body
<'tcx
>,
489 let terminator
= caller_body
[callsite
.block
].terminator
.take().unwrap();
490 match terminator
.kind
{
491 TerminatorKind
::Call { args, destination, cleanup, .. }
=> {
492 // If the call is something like `a[*i] = f(i)`, where
493 // `i : &mut usize`, then just duplicating the `a[*i]`
494 // Place could result in two different locations if `f`
495 // writes to `i`. To prevent this we need to create a temporary
496 // borrow of the place and pass the destination as `*temp` instead.
497 fn dest_needs_borrow(place
: Place
<'_
>) -> bool
{
498 for elem
in place
.projection
.iter() {
500 ProjectionElem
::Deref
| ProjectionElem
::Index(_
) => return true,
508 let dest
= if dest_needs_borrow(destination
) {
509 trace
!("creating temp for return destination");
510 let dest
= Rvalue
::Ref(
511 self.tcx
.lifetimes
.re_erased
,
512 BorrowKind
::Mut { allow_two_phase_borrow: false }
,
515 let dest_ty
= dest
.ty(caller_body
, self.tcx
);
516 let temp
= Place
::from(self.new_call_temp(caller_body
, &callsite
, dest_ty
));
517 caller_body
[callsite
.block
].statements
.push(Statement
{
518 source_info
: callsite
.source_info
,
519 kind
: StatementKind
::Assign(Box
::new((temp
, dest
))),
521 self.tcx
.mk_place_deref(temp
)
526 // Copy the arguments if needed.
527 let args
: Vec
<_
> = self.make_call_args(args
, &callsite
, caller_body
, &callee_body
);
529 let mut expn_data
= ExpnData
::default(
531 callsite
.source_info
.span
,
532 self.tcx
.sess
.edition(),
536 expn_data
.def_site
= callee_body
.span
;
538 self.tcx
.with_stable_hashing_context(|hcx
| LocalExpnId
::fresh(expn_data
, hcx
));
539 let mut integrator
= Integrator
{
541 new_locals
: Local
::new(caller_body
.local_decls
.len())..,
542 new_scopes
: SourceScope
::new(caller_body
.source_scopes
.len())..,
543 new_blocks
: BasicBlock
::new(caller_body
.basic_blocks
.len())..,
545 callsite_scope
: caller_body
.source_scopes
[callsite
.source_info
.scope
].clone(),
547 cleanup_block
: cleanup
,
548 in_cleanup_block
: false,
551 always_live_locals
: BitSet
::new_filled(callee_body
.local_decls
.len()),
554 // Map all `Local`s, `SourceScope`s and `BasicBlock`s to new ones
555 // (or existing ones, in a few special cases) in the caller.
556 integrator
.visit_body(&mut callee_body
);
558 // If there are any locals without storage markers, give them storage only for the
559 // duration of the call.
560 for local
in callee_body
.vars_and_temps_iter() {
561 if !callee_body
.local_decls
[local
].internal
562 && integrator
.always_live_locals
.contains(local
)
564 let new_local
= integrator
.map_local(local
);
565 caller_body
[callsite
.block
].statements
.push(Statement
{
566 source_info
: callsite
.source_info
,
567 kind
: StatementKind
::StorageLive(new_local
),
571 if let Some(block
) = callsite
.target
{
572 // To avoid repeated O(n) insert, push any new statements to the end and rotate
575 for local
in callee_body
.vars_and_temps_iter().rev() {
576 if !callee_body
.local_decls
[local
].internal
577 && integrator
.always_live_locals
.contains(local
)
579 let new_local
= integrator
.map_local(local
);
580 caller_body
[block
].statements
.push(Statement
{
581 source_info
: callsite
.source_info
,
582 kind
: StatementKind
::StorageDead(new_local
),
587 caller_body
[block
].statements
.rotate_right(n
);
590 // Insert all of the (mapped) parts of the callee body into the caller.
591 caller_body
.local_decls
.extend(callee_body
.drain_vars_and_temps());
592 caller_body
.source_scopes
.extend(&mut callee_body
.source_scopes
.drain(..));
593 caller_body
.var_debug_info
.append(&mut callee_body
.var_debug_info
);
594 caller_body
.basic_blocks_mut().extend(callee_body
.basic_blocks_mut().drain(..));
596 caller_body
[callsite
.block
].terminator
= Some(Terminator
{
597 source_info
: callsite
.source_info
,
598 kind
: TerminatorKind
::Goto { target: integrator.map_block(START_BLOCK) }
,
601 // Copy only unevaluated constants from the callee_body into the caller_body.
602 // Although we are only pushing `ConstKind::Unevaluated` consts to
603 // `required_consts`, here we may not only have `ConstKind::Unevaluated`
604 // because we are calling `subst_and_normalize_erasing_regions`.
605 caller_body
.required_consts
.extend(
606 callee_body
.required_consts
.iter().copied().filter(|&ct
| match ct
.literal
{
607 ConstantKind
::Ty(_
) => {
608 bug
!("should never encounter ty::UnevaluatedConst in `required_consts`")
610 ConstantKind
::Val(..) | ConstantKind
::Unevaluated(..) => true,
614 kind
=> bug
!("unexpected terminator kind {:?}", kind
),
620 args
: Vec
<Operand
<'tcx
>>,
621 callsite
: &CallSite
<'tcx
>,
622 caller_body
: &mut Body
<'tcx
>,
623 callee_body
: &Body
<'tcx
>,
627 // There is a bit of a mismatch between the *caller* of a closure and the *callee*.
628 // The caller provides the arguments wrapped up in a tuple:
630 // tuple_tmp = (a, b, c)
631 // Fn::call(closure_ref, tuple_tmp)
633 // meanwhile the closure body expects the arguments (here, `a`, `b`, and `c`)
634 // as distinct arguments. (This is the "rust-call" ABI hack.) Normally, codegen has
635 // the job of unpacking this tuple. But here, we are codegen. =) So we want to create
638 // [closure_ref, tuple_tmp.0, tuple_tmp.1, tuple_tmp.2]
640 // Except for one tiny wrinkle: we don't actually want `tuple_tmp.0`. It's more convenient
641 // if we "spill" that into *another* temporary, so that we can map the argument
642 // variable in the callee MIR directly to an argument variable on our side.
643 // So we introduce temporaries like:
645 // tmp0 = tuple_tmp.0
646 // tmp1 = tuple_tmp.1
647 // tmp2 = tuple_tmp.2
649 // and the vector is `[closure_ref, tmp0, tmp1, tmp2]`.
650 if callsite
.fn_sig
.abi() == Abi
::RustCall
&& callee_body
.spread_arg
.is_none() {
651 let mut args
= args
.into_iter();
652 let self_
= self.create_temp_if_necessary(args
.next().unwrap(), callsite
, caller_body
);
653 let tuple
= self.create_temp_if_necessary(args
.next().unwrap(), callsite
, caller_body
);
654 assert
!(args
.next().is_none());
656 let tuple
= Place
::from(tuple
);
657 let ty
::Tuple(tuple_tys
) = tuple
.ty(caller_body
, tcx
).ty
.kind() else {
658 bug
!("Closure arguments are not passed as a tuple");
661 // The `closure_ref` in our example above.
662 let closure_ref_arg
= iter
::once(self_
);
664 // The `tmp0`, `tmp1`, and `tmp2` in our example above.
665 let tuple_tmp_args
= tuple_tys
.iter().enumerate().map(|(i
, ty
)| {
666 // This is e.g., `tuple_tmp.0` in our example above.
667 let tuple_field
= Operand
::Move(tcx
.mk_place_field(tuple
, Field
::new(i
), ty
));
669 // Spill to a local to make e.g., `tmp0`.
670 self.create_temp_if_necessary(tuple_field
, callsite
, caller_body
)
673 closure_ref_arg
.chain(tuple_tmp_args
).collect()
676 .map(|a
| self.create_temp_if_necessary(a
, callsite
, caller_body
))
681 /// If `arg` is already a temporary, returns it. Otherwise, introduces a fresh
682 /// temporary `T` and an instruction `T = arg`, and returns `T`.
683 fn create_temp_if_necessary(
686 callsite
: &CallSite
<'tcx
>,
687 caller_body
: &mut Body
<'tcx
>,
689 // Reuse the operand if it is a moved temporary.
690 if let Operand
::Move(place
) = &arg
691 && let Some(local
) = place
.as_local()
692 && caller_body
.local_kind(local
) == LocalKind
::Temp
697 // Otherwise, create a temporary for the argument.
698 trace
!("creating temp for argument {:?}", arg
);
699 let arg_ty
= arg
.ty(caller_body
, self.tcx
);
700 let local
= self.new_call_temp(caller_body
, callsite
, arg_ty
);
701 caller_body
[callsite
.block
].statements
.push(Statement
{
702 source_info
: callsite
.source_info
,
703 kind
: StatementKind
::Assign(Box
::new((Place
::from(local
), Rvalue
::Use(arg
)))),
708 /// Introduces a new temporary into the caller body that is live for the duration of the call.
711 caller_body
: &mut Body
<'tcx
>,
712 callsite
: &CallSite
<'tcx
>,
715 let local
= caller_body
.local_decls
.push(LocalDecl
::new(ty
, callsite
.source_info
.span
));
717 caller_body
[callsite
.block
].statements
.push(Statement
{
718 source_info
: callsite
.source_info
,
719 kind
: StatementKind
::StorageLive(local
),
722 if let Some(block
) = callsite
.target
{
723 caller_body
[block
].statements
.insert(
726 source_info
: callsite
.source_info
,
727 kind
: StatementKind
::StorageDead(local
),
736 fn type_size_of
<'tcx
>(
738 param_env
: ty
::ParamEnv
<'tcx
>,
741 tcx
.layout_of(param_env
.and(ty
)).ok().map(|layout
| layout
.size
.bytes())
744 /// Verify that the callee body is compatible with the caller.
746 /// This visitor mostly computes the inlining cost,
747 /// but also needs to verify that types match because of normalization failure.
748 struct CostChecker
<'b
, 'tcx
> {
750 param_env
: ParamEnv
<'tcx
>,
752 callee_body
: &'b Body
<'tcx
>,
753 instance
: ty
::Instance
<'tcx
>,
754 validation
: Result
<(), &'
static str>,
757 impl<'tcx
> Visitor
<'tcx
> for CostChecker
<'_
, 'tcx
> {
758 fn visit_statement(&mut self, statement
: &Statement
<'tcx
>, location
: Location
) {
759 // Don't count StorageLive/StorageDead in the inlining cost.
760 match statement
.kind
{
761 StatementKind
::StorageLive(_
)
762 | StatementKind
::StorageDead(_
)
763 | StatementKind
::Deinit(_
)
764 | StatementKind
::Nop
=> {}
765 _
=> self.cost
+= INSTR_COST
,
768 self.super_statement(statement
, location
);
771 fn visit_terminator(&mut self, terminator
: &Terminator
<'tcx
>, location
: Location
) {
773 match terminator
.kind
{
774 TerminatorKind
::Drop { ref place, unwind, .. }
775 | TerminatorKind
::DropAndReplace { ref place, unwind, .. }
=> {
776 // If the place doesn't actually need dropping, treat it like a regular goto.
777 let ty
= self.instance
.subst_mir(tcx
, &place
.ty(self.callee_body
, tcx
).ty
);
778 if ty
.needs_drop(tcx
, self.param_env
) {
779 self.cost
+= CALL_PENALTY
;
780 if unwind
.is_some() {
781 self.cost
+= LANDINGPAD_PENALTY
;
784 self.cost
+= INSTR_COST
;
787 TerminatorKind
::Call { func: Operand::Constant(ref f), cleanup, .. }
=> {
788 let fn_ty
= self.instance
.subst_mir(tcx
, &f
.literal
.ty());
789 self.cost
+= if let ty
::FnDef(def_id
, _
) = *fn_ty
.kind() && tcx
.is_intrinsic(def_id
) {
790 // Don't give intrinsics the extra penalty for calls
795 if cleanup
.is_some() {
796 self.cost
+= LANDINGPAD_PENALTY
;
799 TerminatorKind
::Assert { cleanup, .. }
=> {
800 self.cost
+= CALL_PENALTY
;
801 if cleanup
.is_some() {
802 self.cost
+= LANDINGPAD_PENALTY
;
805 TerminatorKind
::Resume
=> self.cost
+= RESUME_PENALTY
,
806 TerminatorKind
::InlineAsm { cleanup, .. }
=> {
807 self.cost
+= INSTR_COST
;
808 if cleanup
.is_some() {
809 self.cost
+= LANDINGPAD_PENALTY
;
812 _
=> self.cost
+= INSTR_COST
,
815 self.super_terminator(terminator
, location
);
818 /// Count up the cost of local variables and temps, if we know the size
819 /// use that, otherwise we use a moderately-large dummy cost.
820 fn visit_local_decl(&mut self, local
: Local
, local_decl
: &LocalDecl
<'tcx
>) {
822 let ptr_size
= tcx
.data_layout
.pointer_size
.bytes();
824 let ty
= self.instance
.subst_mir(tcx
, &local_decl
.ty
);
825 // Cost of the var is the size in machine-words, if we know
827 if let Some(size
) = type_size_of(tcx
, self.param_env
, ty
) {
828 self.cost
+= ((size
+ ptr_size
- 1) / ptr_size
) as usize;
830 self.cost
+= UNKNOWN_SIZE_COST
;
833 self.super_local_decl(local
, local_decl
)
836 /// This method duplicates code from MIR validation in an attempt to detect type mismatches due
837 /// to normalization failure.
838 fn visit_projection_elem(
841 proj_base
: &[PlaceElem
<'tcx
>],
842 elem
: PlaceElem
<'tcx
>,
843 context
: PlaceContext
,
846 if let ProjectionElem
::Field(f
, ty
) = elem
{
847 let parent
= Place { local, projection: self.tcx.intern_place_elems(proj_base) }
;
848 let parent_ty
= parent
.ty(&self.callee_body
.local_decls
, self.tcx
);
849 let check_equal
= |this
: &mut Self, f_ty
| {
850 if !equal_up_to_regions(this
.tcx
, this
.param_env
, ty
, f_ty
) {
852 this
.validation
= Err("failed to normalize projection type");
857 let kind
= match parent_ty
.ty
.kind() {
858 &ty
::Opaque(def_id
, substs
) => {
859 self.tcx
.bound_type_of(def_id
).subst(self.tcx
, substs
).kind()
865 ty
::Tuple(fields
) => {
866 let Some(f_ty
) = fields
.get(f
.as_usize()) else {
867 self.validation
= Err("malformed MIR");
870 check_equal(self, *f_ty
);
872 ty
::Adt(adt_def
, substs
) => {
873 let var
= parent_ty
.variant_index
.unwrap_or(VariantIdx
::from_u32(0));
874 let Some(field
) = adt_def
.variant(var
).fields
.get(f
.as_usize()) else {
875 self.validation
= Err("malformed MIR");
878 check_equal(self, field
.ty(self.tcx
, substs
));
880 ty
::Closure(_
, substs
) => {
881 let substs
= substs
.as_closure();
882 let Some(f_ty
) = substs
.upvar_tys().nth(f
.as_usize()) else {
883 self.validation
= Err("malformed MIR");
886 check_equal(self, f_ty
);
888 &ty
::Generator(def_id
, substs
, _
) => {
889 let f_ty
= if let Some(var
) = parent_ty
.variant_index
{
890 let gen_body
= if def_id
== self.callee_body
.source
.def_id() {
893 self.tcx
.optimized_mir(def_id
)
896 let Some(layout
) = gen_body
.generator_layout() else {
897 self.validation
= Err("malformed MIR");
901 let Some(&local
) = layout
.variant_fields
[var
].get(f
) else {
902 self.validation
= Err("malformed MIR");
906 let Some(&f_ty
) = layout
.field_tys
.get(local
) else {
907 self.validation
= Err("malformed MIR");
913 let Some(f_ty
) = substs
.as_generator().prefix_tys().nth(f
.index()) else {
914 self.validation
= Err("malformed MIR");
921 check_equal(self, f_ty
);
923 _
=> self.validation
= Err("malformed MIR"),
927 self.super_projection_elem(local
, proj_base
, elem
, context
, location
);
934 * Integrates blocks from the callee function into the calling function.
935 * Updates block indices, references to locals and other control flow
938 struct Integrator
<'a
, 'tcx
> {
940 new_locals
: RangeFrom
<Local
>,
941 new_scopes
: RangeFrom
<SourceScope
>,
942 new_blocks
: RangeFrom
<BasicBlock
>,
943 destination
: Place
<'tcx
>,
944 callsite_scope
: SourceScopeData
<'tcx
>,
945 callsite
: &'a CallSite
<'tcx
>,
946 cleanup_block
: Option
<BasicBlock
>,
947 in_cleanup_block
: bool
,
949 expn_data
: LocalExpnId
,
950 always_live_locals
: BitSet
<Local
>,
953 impl Integrator
<'_
, '_
> {
954 fn map_local(&self, local
: Local
) -> Local
{
955 let new
= if local
== RETURN_PLACE
{
956 self.destination
.local
958 let idx
= local
.index() - 1;
959 if idx
< self.args
.len() {
962 Local
::new(self.new_locals
.start
.index() + (idx
- self.args
.len()))
965 trace
!("mapping local `{:?}` to `{:?}`", local
, new
);
969 fn map_scope(&self, scope
: SourceScope
) -> SourceScope
{
970 let new
= SourceScope
::new(self.new_scopes
.start
.index() + scope
.index());
971 trace
!("mapping scope `{:?}` to `{:?}`", scope
, new
);
975 fn map_block(&self, block
: BasicBlock
) -> BasicBlock
{
976 let new
= BasicBlock
::new(self.new_blocks
.start
.index() + block
.index());
977 trace
!("mapping block `{:?}` to `{:?}`", block
, new
);
981 fn map_unwind(&self, unwind
: Option
<BasicBlock
>) -> Option
<BasicBlock
> {
982 if self.in_cleanup_block
{
983 if unwind
.is_some() {
984 bug
!("cleanup on cleanup block");
990 Some(target
) => Some(self.map_block(target
)),
991 // Add an unwind edge to the original call's cleanup block
992 None
=> self.cleanup_block
,
997 impl<'tcx
> MutVisitor
<'tcx
> for Integrator
<'_
, 'tcx
> {
998 fn tcx(&self) -> TyCtxt
<'tcx
> {
1002 fn visit_local(&mut self, local
: &mut Local
, _ctxt
: PlaceContext
, _location
: Location
) {
1003 *local
= self.map_local(*local
);
1006 fn visit_source_scope_data(&mut self, scope_data
: &mut SourceScopeData
<'tcx
>) {
1007 self.super_source_scope_data(scope_data
);
1008 if scope_data
.parent_scope
.is_none() {
1009 // Attach the outermost callee scope as a child of the callsite
1010 // scope, via the `parent_scope` and `inlined_parent_scope` chains.
1011 scope_data
.parent_scope
= Some(self.callsite
.source_info
.scope
);
1012 assert_eq
!(scope_data
.inlined_parent_scope
, None
);
1013 scope_data
.inlined_parent_scope
= if self.callsite_scope
.inlined
.is_some() {
1014 Some(self.callsite
.source_info
.scope
)
1016 self.callsite_scope
.inlined_parent_scope
1019 // Mark the outermost callee scope as an inlined one.
1020 assert_eq
!(scope_data
.inlined
, None
);
1021 scope_data
.inlined
= Some((self.callsite
.callee
, self.callsite
.source_info
.span
));
1022 } else if scope_data
.inlined_parent_scope
.is_none() {
1023 // Make it easy to find the scope with `inlined` set above.
1024 scope_data
.inlined_parent_scope
= Some(self.map_scope(OUTERMOST_SOURCE_SCOPE
));
1028 fn visit_source_scope(&mut self, scope
: &mut SourceScope
) {
1029 *scope
= self.map_scope(*scope
);
1032 fn visit_span(&mut self, span
: &mut Span
) {
1033 // Make sure that all spans track the fact that they were inlined.
1034 *span
= span
.fresh_expansion(self.expn_data
);
1037 fn visit_place(&mut self, place
: &mut Place
<'tcx
>, context
: PlaceContext
, location
: Location
) {
1038 for elem
in place
.projection
{
1039 // FIXME: Make sure that return place is not used in an indexing projection, since it
1040 // won't be rebased as it is supposed to be.
1041 assert_ne
!(ProjectionElem
::Index(RETURN_PLACE
), elem
);
1044 // If this is the `RETURN_PLACE`, we need to rebase any projections onto it.
1045 let dest_proj_len
= self.destination
.projection
.len();
1046 if place
.local
== RETURN_PLACE
&& dest_proj_len
> 0 {
1047 let mut projs
= Vec
::with_capacity(dest_proj_len
+ place
.projection
.len());
1048 projs
.extend(self.destination
.projection
);
1049 projs
.extend(place
.projection
);
1051 place
.projection
= self.tcx
.intern_place_elems(&*projs
);
1053 // Handles integrating any locals that occur in the base
1055 self.super_place(place
, context
, location
)
1058 fn visit_basic_block_data(&mut self, block
: BasicBlock
, data
: &mut BasicBlockData
<'tcx
>) {
1059 self.in_cleanup_block
= data
.is_cleanup
;
1060 self.super_basic_block_data(block
, data
);
1061 self.in_cleanup_block
= false;
1064 fn visit_retag(&mut self, kind
: &mut RetagKind
, place
: &mut Place
<'tcx
>, loc
: Location
) {
1065 self.super_retag(kind
, place
, loc
);
1067 // We have to patch all inlined retags to be aware that they are no longer
1068 // happening on function entry.
1069 if *kind
== RetagKind
::FnEntry
{
1070 *kind
= RetagKind
::Default
;
1074 fn visit_statement(&mut self, statement
: &mut Statement
<'tcx
>, location
: Location
) {
1075 if let StatementKind
::StorageLive(local
) | StatementKind
::StorageDead(local
) =
1078 self.always_live_locals
.remove(local
);
1080 self.super_statement(statement
, location
);
1083 fn visit_terminator(&mut self, terminator
: &mut Terminator
<'tcx
>, loc
: Location
) {
1084 // Don't try to modify the implicit `_0` access on return (`return` terminators are
1085 // replaced down below anyways).
1086 if !matches
!(terminator
.kind
, TerminatorKind
::Return
) {
1087 self.super_terminator(terminator
, loc
);
1090 match terminator
.kind
{
1091 TerminatorKind
::GeneratorDrop
| TerminatorKind
::Yield { .. }
=> bug
!(),
1092 TerminatorKind
::Goto { ref mut target }
=> {
1093 *target
= self.map_block(*target
);
1095 TerminatorKind
::SwitchInt { ref mut targets, .. }
=> {
1096 for tgt
in targets
.all_targets_mut() {
1097 *tgt
= self.map_block(*tgt
);
1100 TerminatorKind
::Drop { ref mut target, ref mut unwind, .. }
1101 | TerminatorKind
::DropAndReplace { ref mut target, ref mut unwind, .. }
=> {
1102 *target
= self.map_block(*target
);
1103 *unwind
= self.map_unwind(*unwind
);
1105 TerminatorKind
::Call { ref mut target, ref mut cleanup, .. }
=> {
1106 if let Some(ref mut tgt
) = *target
{
1107 *tgt
= self.map_block(*tgt
);
1109 *cleanup
= self.map_unwind(*cleanup
);
1111 TerminatorKind
::Assert { ref mut target, ref mut cleanup, .. }
=> {
1112 *target
= self.map_block(*target
);
1113 *cleanup
= self.map_unwind(*cleanup
);
1115 TerminatorKind
::Return
=> {
1116 terminator
.kind
= if let Some(tgt
) = self.callsite
.target
{
1117 TerminatorKind
::Goto { target: tgt }
1119 TerminatorKind
::Unreachable
1122 TerminatorKind
::Resume
=> {
1123 if let Some(tgt
) = self.cleanup_block
{
1124 terminator
.kind
= TerminatorKind
::Goto { target: tgt }
1127 TerminatorKind
::Abort
=> {}
1128 TerminatorKind
::Unreachable
=> {}
1129 TerminatorKind
::FalseEdge { ref mut real_target, ref mut imaginary_target }
=> {
1130 *real_target
= self.map_block(*real_target
);
1131 *imaginary_target
= self.map_block(*imaginary_target
);
1133 TerminatorKind
::FalseUnwind { real_target: _, unwind: _ }
=>
1134 // see the ordering of passes in the optimized_mir query.
1136 bug
!("False unwinds should have been removed before inlining")
1138 TerminatorKind
::InlineAsm { ref mut destination, ref mut cleanup, .. }
=> {
1139 if let Some(ref mut tgt
) = *destination
{
1140 *tgt
= self.map_block(*tgt
);
1142 *cleanup
= self.map_unwind(*cleanup
);