]>
Commit | Line | Data |
---|---|---|
1 | //! Inlining pass for MIR functions | |
2 | use crate::deref_separator::deref_finder; | |
3 | use rustc_attr::InlineAttr; | |
4 | use rustc_hir::def_id::DefId; | |
5 | use rustc_index::bit_set::BitSet; | |
6 | use rustc_index::vec::Idx; | |
7 | use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs}; | |
8 | use rustc_middle::mir::visit::*; | |
9 | use rustc_middle::mir::*; | |
10 | use rustc_middle::ty::{self, Instance, InstanceDef, ParamEnv, Ty, TyCtxt}; | |
11 | use rustc_session::config::OptLevel; | |
12 | use rustc_span::{hygiene::ExpnKind, ExpnData, LocalExpnId, Span}; | |
13 | use rustc_target::abi::VariantIdx; | |
14 | use rustc_target::spec::abi::Abi; | |
15 | ||
16 | use crate::simplify::{remove_dead_blocks, CfgSimplifier}; | |
17 | use crate::util; | |
18 | use crate::MirPass; | |
19 | use std::iter; | |
20 | use std::ops::{Range, RangeFrom}; | |
21 | ||
22 | pub(crate) mod cycle; | |
23 | ||
24 | const INSTR_COST: usize = 5; | |
25 | const CALL_PENALTY: usize = 25; | |
26 | const LANDINGPAD_PENALTY: usize = 50; | |
27 | const RESUME_PENALTY: usize = 45; | |
28 | ||
29 | const UNKNOWN_SIZE_COST: usize = 10; | |
30 | ||
31 | const TOP_DOWN_DEPTH_LIMIT: usize = 5; | |
32 | ||
33 | pub struct Inline; | |
34 | ||
35 | #[derive(Copy, Clone, Debug)] | |
36 | struct CallSite<'tcx> { | |
37 | callee: Instance<'tcx>, | |
38 | fn_sig: ty::PolyFnSig<'tcx>, | |
39 | block: BasicBlock, | |
40 | target: Option<BasicBlock>, | |
41 | source_info: SourceInfo, | |
42 | } | |
43 | ||
44 | impl<'tcx> MirPass<'tcx> for Inline { | |
45 | fn is_enabled(&self, sess: &rustc_session::Session) -> bool { | |
46 | if let Some(enabled) = sess.opts.unstable_opts.inline_mir { | |
47 | return enabled; | |
48 | } | |
49 | ||
50 | match sess.mir_opt_level() { | |
51 | 0 | 1 => false, | |
52 | 2 => { | |
53 | (sess.opts.optimize == OptLevel::Default | |
54 | || sess.opts.optimize == OptLevel::Aggressive) | |
55 | && sess.opts.incremental == None | |
56 | } | |
57 | _ => true, | |
58 | } | |
59 | } | |
60 | ||
61 | fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { | |
62 | let span = trace_span!("inline", body = %tcx.def_path_str(body.source.def_id())); | |
63 | let _guard = span.enter(); | |
64 | if inline(tcx, body) { | |
65 | debug!("running simplify cfg on {:?}", body.source); | |
66 | CfgSimplifier::new(body).simplify(); | |
67 | remove_dead_blocks(tcx, body); | |
68 | deref_finder(tcx, body); | |
69 | } | |
70 | } | |
71 | } | |
72 | ||
73 | fn inline<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) -> bool { | |
74 | let def_id = body.source.def_id().expect_local(); | |
75 | ||
76 | // Only do inlining into fn bodies. | |
77 | if !tcx.hir().body_owner_kind(def_id).is_fn_or_closure() { | |
78 | return false; | |
79 | } | |
80 | if body.source.promoted.is_some() { | |
81 | return false; | |
82 | } | |
83 | // Avoid inlining into generators, since their `optimized_mir` is used for layout computation, | |
84 | // which can create a cycle, even when no attempt is made to inline the function in the other | |
85 | // direction. | |
86 | if body.generator.is_some() { | |
87 | return false; | |
88 | } | |
89 | ||
90 | let param_env = tcx.param_env_reveal_all_normalized(def_id); | |
91 | ||
92 | let mut this = Inliner { | |
93 | tcx, | |
94 | param_env, | |
95 | codegen_fn_attrs: tcx.codegen_fn_attrs(def_id), | |
96 | history: Vec::new(), | |
97 | changed: false, | |
98 | }; | |
99 | let blocks = BasicBlock::new(0)..body.basic_blocks.next_index(); | |
100 | this.process_blocks(body, blocks); | |
101 | this.changed | |
102 | } | |
103 | ||
104 | struct Inliner<'tcx> { | |
105 | tcx: TyCtxt<'tcx>, | |
106 | param_env: ParamEnv<'tcx>, | |
107 | /// Caller codegen attributes. | |
108 | codegen_fn_attrs: &'tcx CodegenFnAttrs, | |
109 | /// Stack of inlined instances. | |
110 | /// We only check the `DefId` and not the substs because we want to | |
111 | /// avoid inlining cases of polymorphic recursion. | |
112 | /// The number of `DefId`s is finite, so checking history is enough | |
113 | /// to ensure that we do not loop endlessly while inlining. | |
114 | history: Vec<DefId>, | |
115 | /// Indicates that the caller body has been modified. | |
116 | changed: bool, | |
117 | } | |
118 | ||
119 | impl<'tcx> Inliner<'tcx> { | |
120 | fn process_blocks(&mut self, caller_body: &mut Body<'tcx>, blocks: Range<BasicBlock>) { | |
121 | // How many callsites in this body are we allowed to inline? We need to limit this in order | |
122 | // to prevent super-linear growth in MIR size | |
123 | let inline_limit = match self.history.len() { | |
124 | 0 => usize::MAX, | |
125 | 1..=TOP_DOWN_DEPTH_LIMIT => 1, | |
126 | _ => return, | |
127 | }; | |
128 | let mut inlined_count = 0; | |
129 | for bb in blocks { | |
130 | let bb_data = &caller_body[bb]; | |
131 | if bb_data.is_cleanup { | |
132 | continue; | |
133 | } | |
134 | ||
135 | let Some(callsite) = self.resolve_callsite(caller_body, bb, bb_data) else { | |
136 | continue; | |
137 | }; | |
138 | ||
139 | let span = trace_span!("process_blocks", %callsite.callee, ?bb); | |
140 | let _guard = span.enter(); | |
141 | ||
142 | match self.try_inlining(caller_body, &callsite) { | |
143 | Err(reason) => { | |
144 | debug!("not-inlined {} [{}]", callsite.callee, reason); | |
145 | continue; | |
146 | } | |
147 | Ok(new_blocks) => { | |
148 | debug!("inlined {}", callsite.callee); | |
149 | self.changed = true; | |
150 | inlined_count += 1; | |
151 | if inlined_count == inline_limit { | |
152 | return; | |
153 | } | |
154 | self.history.push(callsite.callee.def_id()); | |
155 | self.process_blocks(caller_body, new_blocks); | |
156 | self.history.pop(); | |
157 | } | |
158 | } | |
159 | } | |
160 | } | |
161 | ||
162 | /// Attempts to inline a callsite into the caller body. When successful returns basic blocks | |
163 | /// containing the inlined body. Otherwise returns an error describing why inlining didn't take | |
164 | /// place. | |
165 | fn try_inlining( | |
166 | &self, | |
167 | caller_body: &mut Body<'tcx>, | |
168 | callsite: &CallSite<'tcx>, | |
169 | ) -> Result<std::ops::Range<BasicBlock>, &'static str> { | |
170 | let callee_attrs = self.tcx.codegen_fn_attrs(callsite.callee.def_id()); | |
171 | self.check_codegen_attributes(callsite, callee_attrs)?; | |
172 | self.check_mir_is_available(caller_body, &callsite.callee)?; | |
173 | let callee_body = self.tcx.instance_mir(callsite.callee.def); | |
174 | self.check_mir_body(callsite, callee_body, callee_attrs)?; | |
175 | ||
176 | if !self.tcx.consider_optimizing(|| { | |
177 | format!("Inline {:?} into {:?}", callsite.callee, caller_body.source) | |
178 | }) { | |
179 | return Err("optimization fuel exhausted"); | |
180 | } | |
181 | ||
182 | let Ok(callee_body) = callsite.callee.try_subst_mir_and_normalize_erasing_regions( | |
183 | self.tcx, | |
184 | self.param_env, | |
185 | callee_body.clone(), | |
186 | ) else { | |
187 | return Err("failed to normalize callee body"); | |
188 | }; | |
189 | ||
190 | // Check call signature compatibility. | |
191 | // Normally, this shouldn't be required, but trait normalization failure can create a | |
192 | // validation ICE. | |
193 | let terminator = caller_body[callsite.block].terminator.as_ref().unwrap(); | |
194 | let TerminatorKind::Call { args, destination, .. } = &terminator.kind else { bug!() }; | |
195 | let destination_ty = destination.ty(&caller_body.local_decls, self.tcx).ty; | |
196 | let output_type = callee_body.return_ty(); | |
197 | if !util::is_subtype(self.tcx, self.param_env, output_type, destination_ty) { | |
198 | trace!(?output_type, ?destination_ty); | |
199 | return Err("failed to normalize return type"); | |
200 | } | |
201 | if callsite.fn_sig.abi() == Abi::RustCall { | |
202 | let (arg_tuple, skipped_args) = match &args[..] { | |
203 | [arg_tuple] => (arg_tuple, 0), | |
204 | [_, arg_tuple] => (arg_tuple, 1), | |
205 | _ => bug!("Expected `rust-call` to have 1 or 2 args"), | |
206 | }; | |
207 | ||
208 | let arg_tuple_ty = arg_tuple.ty(&caller_body.local_decls, self.tcx); | |
209 | let ty::Tuple(arg_tuple_tys) = arg_tuple_ty.kind() else { | |
210 | bug!("Closure arguments are not passed as a tuple"); | |
211 | }; | |
212 | ||
213 | for (arg_ty, input) in | |
214 | arg_tuple_tys.iter().zip(callee_body.args_iter().skip(skipped_args)) | |
215 | { | |
216 | let input_type = callee_body.local_decls[input].ty; | |
217 | if !util::is_subtype(self.tcx, self.param_env, input_type, arg_ty) { | |
218 | trace!(?arg_ty, ?input_type); | |
219 | return Err("failed to normalize tuple argument type"); | |
220 | } | |
221 | } | |
222 | } else { | |
223 | for (arg, input) in args.iter().zip(callee_body.args_iter()) { | |
224 | let input_type = callee_body.local_decls[input].ty; | |
225 | let arg_ty = arg.ty(&caller_body.local_decls, self.tcx); | |
226 | if !util::is_subtype(self.tcx, self.param_env, input_type, arg_ty) { | |
227 | trace!(?arg_ty, ?input_type); | |
228 | return Err("failed to normalize argument type"); | |
229 | } | |
230 | } | |
231 | } | |
232 | ||
233 | let old_blocks = caller_body.basic_blocks.next_index(); | |
234 | self.inline_call(caller_body, &callsite, callee_body); | |
235 | let new_blocks = old_blocks..caller_body.basic_blocks.next_index(); | |
236 | ||
237 | Ok(new_blocks) | |
238 | } | |
239 | ||
240 | fn check_mir_is_available( | |
241 | &self, | |
242 | caller_body: &Body<'tcx>, | |
243 | callee: &Instance<'tcx>, | |
244 | ) -> Result<(), &'static str> { | |
245 | let caller_def_id = caller_body.source.def_id(); | |
246 | let callee_def_id = callee.def_id(); | |
247 | if callee_def_id == caller_def_id { | |
248 | return Err("self-recursion"); | |
249 | } | |
250 | ||
251 | match callee.def { | |
252 | InstanceDef::Item(_) => { | |
253 | // If there is no MIR available (either because it was not in metadata or | |
254 | // because it has no MIR because it's an extern function), then the inliner | |
255 | // won't cause cycles on this. | |
256 | if !self.tcx.is_mir_available(callee_def_id) { | |
257 | return Err("item MIR unavailable"); | |
258 | } | |
259 | } | |
260 | // These have no own callable MIR. | |
261 | InstanceDef::Intrinsic(_) | InstanceDef::Virtual(..) => { | |
262 | return Err("instance without MIR (intrinsic / virtual)"); | |
263 | } | |
264 | // This cannot result in an immediate cycle since the callee MIR is a shim, which does | |
265 | // not get any optimizations run on it. Any subsequent inlining may cause cycles, but we | |
266 | // do not need to catch this here, we can wait until the inliner decides to continue | |
267 | // inlining a second time. | |
268 | InstanceDef::VTableShim(_) | |
269 | | InstanceDef::ReifyShim(_) | |
270 | | InstanceDef::FnPtrShim(..) | |
271 | | InstanceDef::ClosureOnceShim { .. } | |
272 | | InstanceDef::DropGlue(..) | |
273 | | InstanceDef::CloneShim(..) => return Ok(()), | |
274 | } | |
275 | ||
276 | if self.tcx.is_constructor(callee_def_id) { | |
277 | trace!("constructors always have MIR"); | |
278 | // Constructor functions cannot cause a query cycle. | |
279 | return Ok(()); | |
280 | } | |
281 | ||
282 | if callee_def_id.is_local() { | |
283 | // Avoid a cycle here by only using `instance_mir` only if we have | |
284 | // a lower `DefPathHash` than the callee. This ensures that the callee will | |
285 | // not inline us. This trick even works with incremental compilation, | |
286 | // since `DefPathHash` is stable. | |
287 | if self.tcx.def_path_hash(caller_def_id).local_hash() | |
288 | < self.tcx.def_path_hash(callee_def_id).local_hash() | |
289 | { | |
290 | return Ok(()); | |
291 | } | |
292 | ||
293 | // If we know for sure that the function we're calling will itself try to | |
294 | // call us, then we avoid inlining that function. | |
295 | if self.tcx.mir_callgraph_reachable((*callee, caller_def_id.expect_local())) { | |
296 | return Err("caller might be reachable from callee (query cycle avoidance)"); | |
297 | } | |
298 | ||
299 | Ok(()) | |
300 | } else { | |
301 | // This cannot result in an immediate cycle since the callee MIR is from another crate | |
302 | // and is already optimized. Any subsequent inlining may cause cycles, but we do | |
303 | // not need to catch this here, we can wait until the inliner decides to continue | |
304 | // inlining a second time. | |
305 | trace!("functions from other crates always have MIR"); | |
306 | Ok(()) | |
307 | } | |
308 | } | |
309 | ||
310 | fn resolve_callsite( | |
311 | &self, | |
312 | caller_body: &Body<'tcx>, | |
313 | bb: BasicBlock, | |
314 | bb_data: &BasicBlockData<'tcx>, | |
315 | ) -> Option<CallSite<'tcx>> { | |
316 | // Only consider direct calls to functions | |
317 | let terminator = bb_data.terminator(); | |
318 | if let TerminatorKind::Call { ref func, target, fn_span, .. } = terminator.kind { | |
319 | let func_ty = func.ty(caller_body, self.tcx); | |
320 | if let ty::FnDef(def_id, substs) = *func_ty.kind() { | |
321 | // To resolve an instance its substs have to be fully normalized. | |
322 | let substs = self.tcx.try_normalize_erasing_regions(self.param_env, substs).ok()?; | |
323 | let callee = | |
324 | Instance::resolve(self.tcx, self.param_env, def_id, substs).ok().flatten()?; | |
325 | ||
326 | if let InstanceDef::Virtual(..) | InstanceDef::Intrinsic(_) = callee.def { | |
327 | return None; | |
328 | } | |
329 | ||
330 | if self.history.contains(&callee.def_id()) { | |
331 | return None; | |
332 | } | |
333 | ||
334 | let fn_sig = self.tcx.bound_fn_sig(def_id).subst(self.tcx, substs); | |
335 | let source_info = SourceInfo { span: fn_span, ..terminator.source_info }; | |
336 | ||
337 | return Some(CallSite { callee, fn_sig, block: bb, target, source_info }); | |
338 | } | |
339 | } | |
340 | ||
341 | None | |
342 | } | |
343 | ||
344 | /// Returns an error if inlining is not possible based on codegen attributes alone. A success | |
345 | /// indicates that inlining decision should be based on other criteria. | |
346 | fn check_codegen_attributes( | |
347 | &self, | |
348 | callsite: &CallSite<'tcx>, | |
349 | callee_attrs: &CodegenFnAttrs, | |
350 | ) -> Result<(), &'static str> { | |
351 | match callee_attrs.inline { | |
352 | InlineAttr::Never => return Err("never inline hint"), | |
353 | InlineAttr::Always | InlineAttr::Hint => {} | |
354 | InlineAttr::None => { | |
355 | if self.tcx.sess.mir_opt_level() <= 2 { | |
356 | return Err("at mir-opt-level=2, only #[inline] is inlined"); | |
357 | } | |
358 | } | |
359 | } | |
360 | ||
361 | // Only inline local functions if they would be eligible for cross-crate | |
362 | // inlining. This is to ensure that the final crate doesn't have MIR that | |
363 | // reference unexported symbols | |
364 | if callsite.callee.def_id().is_local() { | |
365 | let is_generic = callsite.callee.substs.non_erasable_generics().next().is_some(); | |
366 | if !is_generic && !callee_attrs.requests_inline() { | |
367 | return Err("not exported"); | |
368 | } | |
369 | } | |
370 | ||
371 | if callsite.fn_sig.c_variadic() { | |
372 | return Err("C variadic"); | |
373 | } | |
374 | ||
375 | if callee_attrs.flags.contains(CodegenFnAttrFlags::COLD) { | |
376 | return Err("cold"); | |
377 | } | |
378 | ||
379 | if callee_attrs.no_sanitize != self.codegen_fn_attrs.no_sanitize { | |
380 | return Err("incompatible sanitizer set"); | |
381 | } | |
382 | ||
383 | // Two functions are compatible if the callee has no attribute (meaning | |
384 | // that it's codegen agnostic), or sets an attribute that is identical | |
385 | // to this function's attribute. | |
386 | if callee_attrs.instruction_set.is_some() | |
387 | && callee_attrs.instruction_set != self.codegen_fn_attrs.instruction_set | |
388 | { | |
389 | return Err("incompatible instruction set"); | |
390 | } | |
391 | ||
392 | for feature in &callee_attrs.target_features { | |
393 | if !self.codegen_fn_attrs.target_features.contains(feature) { | |
394 | return Err("incompatible target feature"); | |
395 | } | |
396 | } | |
397 | ||
398 | Ok(()) | |
399 | } | |
400 | ||
401 | /// Returns inlining decision that is based on the examination of callee MIR body. | |
402 | /// Assumes that codegen attributes have been checked for compatibility already. | |
403 | #[instrument(level = "debug", skip(self, callee_body))] | |
404 | fn check_mir_body( | |
405 | &self, | |
406 | callsite: &CallSite<'tcx>, | |
407 | callee_body: &Body<'tcx>, | |
408 | callee_attrs: &CodegenFnAttrs, | |
409 | ) -> Result<(), &'static str> { | |
410 | let tcx = self.tcx; | |
411 | ||
412 | let mut threshold = if callee_attrs.requests_inline() { | |
413 | self.tcx.sess.opts.unstable_opts.inline_mir_hint_threshold.unwrap_or(100) | |
414 | } else { | |
415 | self.tcx.sess.opts.unstable_opts.inline_mir_threshold.unwrap_or(50) | |
416 | }; | |
417 | ||
418 | // Give a bonus functions with a small number of blocks, | |
419 | // We normally have two or three blocks for even | |
420 | // very small functions. | |
421 | if callee_body.basic_blocks.len() <= 3 { | |
422 | threshold += threshold / 4; | |
423 | } | |
424 | debug!(" final inline threshold = {}", threshold); | |
425 | ||
426 | // FIXME: Give a bonus to functions with only a single caller | |
427 | let diverges = matches!( | |
428 | callee_body.basic_blocks[START_BLOCK].terminator().kind, | |
429 | TerminatorKind::Unreachable | TerminatorKind::Call { target: None, .. } | |
430 | ); | |
431 | if diverges && !matches!(callee_attrs.inline, InlineAttr::Always) { | |
432 | return Err("callee diverges unconditionally"); | |
433 | } | |
434 | ||
435 | let mut checker = CostChecker { | |
436 | tcx: self.tcx, | |
437 | param_env: self.param_env, | |
438 | instance: callsite.callee, | |
439 | callee_body, | |
440 | cost: 0, | |
441 | validation: Ok(()), | |
442 | }; | |
443 | ||
444 | // Traverse the MIR manually so we can account for the effects of inlining on the CFG. | |
445 | let mut work_list = vec![START_BLOCK]; | |
446 | let mut visited = BitSet::new_empty(callee_body.basic_blocks.len()); | |
447 | while let Some(bb) = work_list.pop() { | |
448 | if !visited.insert(bb.index()) { | |
449 | continue; | |
450 | } | |
451 | ||
452 | let blk = &callee_body.basic_blocks[bb]; | |
453 | checker.visit_basic_block_data(bb, blk); | |
454 | ||
455 | let term = blk.terminator(); | |
456 | if let TerminatorKind::Drop { ref place, target, unwind } | |
457 | | TerminatorKind::DropAndReplace { ref place, target, unwind, .. } = term.kind | |
458 | { | |
459 | work_list.push(target); | |
460 | ||
461 | // If the place doesn't actually need dropping, treat it like a regular goto. | |
462 | let ty = callsite.callee.subst_mir(self.tcx, &place.ty(callee_body, tcx).ty); | |
463 | if ty.needs_drop(tcx, self.param_env) && let Some(unwind) = unwind { | |
464 | work_list.push(unwind); | |
465 | } | |
466 | } else if callee_attrs.instruction_set != self.codegen_fn_attrs.instruction_set | |
467 | && matches!(term.kind, TerminatorKind::InlineAsm { .. }) | |
468 | { | |
469 | // During the attribute checking stage we allow a callee with no | |
470 | // instruction_set assigned to count as compatible with a function that does | |
471 | // assign one. However, during this stage we require an exact match when any | |
472 | // inline-asm is detected. LLVM will still possibly do an inline later on | |
473 | // if the no-attribute function ends up with the same instruction set anyway. | |
474 | return Err("Cannot move inline-asm across instruction sets"); | |
475 | } else { | |
476 | work_list.extend(term.successors()) | |
477 | } | |
478 | } | |
479 | ||
480 | // Count up the cost of local variables and temps, if we know the size | |
481 | // use that, otherwise we use a moderately-large dummy cost. | |
482 | for v in callee_body.vars_and_temps_iter() { | |
483 | checker.visit_local_decl(v, &callee_body.local_decls[v]); | |
484 | } | |
485 | ||
486 | // Abort if type validation found anything fishy. | |
487 | checker.validation?; | |
488 | ||
489 | let cost = checker.cost; | |
490 | if let InlineAttr::Always = callee_attrs.inline { | |
491 | debug!("INLINING {:?} because inline(always) [cost={}]", callsite, cost); | |
492 | Ok(()) | |
493 | } else if cost <= threshold { | |
494 | debug!("INLINING {:?} [cost={} <= threshold={}]", callsite, cost, threshold); | |
495 | Ok(()) | |
496 | } else { | |
497 | debug!("NOT inlining {:?} [cost={} > threshold={}]", callsite, cost, threshold); | |
498 | Err("cost above threshold") | |
499 | } | |
500 | } | |
501 | ||
502 | fn inline_call( | |
503 | &self, | |
504 | caller_body: &mut Body<'tcx>, | |
505 | callsite: &CallSite<'tcx>, | |
506 | mut callee_body: Body<'tcx>, | |
507 | ) { | |
508 | let terminator = caller_body[callsite.block].terminator.take().unwrap(); | |
509 | match terminator.kind { | |
510 | TerminatorKind::Call { args, destination, cleanup, .. } => { | |
511 | // If the call is something like `a[*i] = f(i)`, where | |
512 | // `i : &mut usize`, then just duplicating the `a[*i]` | |
513 | // Place could result in two different locations if `f` | |
514 | // writes to `i`. To prevent this we need to create a temporary | |
515 | // borrow of the place and pass the destination as `*temp` instead. | |
516 | fn dest_needs_borrow(place: Place<'_>) -> bool { | |
517 | for elem in place.projection.iter() { | |
518 | match elem { | |
519 | ProjectionElem::Deref | ProjectionElem::Index(_) => return true, | |
520 | _ => {} | |
521 | } | |
522 | } | |
523 | ||
524 | false | |
525 | } | |
526 | ||
527 | let dest = if dest_needs_borrow(destination) { | |
528 | trace!("creating temp for return destination"); | |
529 | let dest = Rvalue::Ref( | |
530 | self.tcx.lifetimes.re_erased, | |
531 | BorrowKind::Mut { allow_two_phase_borrow: false }, | |
532 | destination, | |
533 | ); | |
534 | let dest_ty = dest.ty(caller_body, self.tcx); | |
535 | let temp = Place::from(self.new_call_temp(caller_body, &callsite, dest_ty)); | |
536 | caller_body[callsite.block].statements.push(Statement { | |
537 | source_info: callsite.source_info, | |
538 | kind: StatementKind::Assign(Box::new((temp, dest))), | |
539 | }); | |
540 | self.tcx.mk_place_deref(temp) | |
541 | } else { | |
542 | destination | |
543 | }; | |
544 | ||
545 | // Always create a local to hold the destination, as `RETURN_PLACE` may appear | |
546 | // where a full `Place` is not allowed. | |
547 | let (remap_destination, destination_local) = if let Some(d) = dest.as_local() { | |
548 | (false, d) | |
549 | } else { | |
550 | ( | |
551 | true, | |
552 | self.new_call_temp( | |
553 | caller_body, | |
554 | &callsite, | |
555 | destination.ty(caller_body, self.tcx).ty, | |
556 | ), | |
557 | ) | |
558 | }; | |
559 | ||
560 | // Copy the arguments if needed. | |
561 | let args: Vec<_> = self.make_call_args(args, &callsite, caller_body, &callee_body); | |
562 | ||
563 | let mut expn_data = ExpnData::default( | |
564 | ExpnKind::Inlined, | |
565 | callsite.source_info.span, | |
566 | self.tcx.sess.edition(), | |
567 | None, | |
568 | None, | |
569 | ); | |
570 | expn_data.def_site = callee_body.span; | |
571 | let expn_data = | |
572 | self.tcx.with_stable_hashing_context(|hcx| LocalExpnId::fresh(expn_data, hcx)); | |
573 | let mut integrator = Integrator { | |
574 | args: &args, | |
575 | new_locals: Local::new(caller_body.local_decls.len()).., | |
576 | new_scopes: SourceScope::new(caller_body.source_scopes.len()).., | |
577 | new_blocks: BasicBlock::new(caller_body.basic_blocks.len()).., | |
578 | destination: destination_local, | |
579 | callsite_scope: caller_body.source_scopes[callsite.source_info.scope].clone(), | |
580 | callsite, | |
581 | cleanup_block: cleanup, | |
582 | in_cleanup_block: false, | |
583 | tcx: self.tcx, | |
584 | expn_data, | |
585 | always_live_locals: BitSet::new_filled(callee_body.local_decls.len()), | |
586 | }; | |
587 | ||
588 | // Map all `Local`s, `SourceScope`s and `BasicBlock`s to new ones | |
589 | // (or existing ones, in a few special cases) in the caller. | |
590 | integrator.visit_body(&mut callee_body); | |
591 | ||
592 | // If there are any locals without storage markers, give them storage only for the | |
593 | // duration of the call. | |
594 | for local in callee_body.vars_and_temps_iter() { | |
595 | if !callee_body.local_decls[local].internal | |
596 | && integrator.always_live_locals.contains(local) | |
597 | { | |
598 | let new_local = integrator.map_local(local); | |
599 | caller_body[callsite.block].statements.push(Statement { | |
600 | source_info: callsite.source_info, | |
601 | kind: StatementKind::StorageLive(new_local), | |
602 | }); | |
603 | } | |
604 | } | |
605 | if let Some(block) = callsite.target { | |
606 | // To avoid repeated O(n) insert, push any new statements to the end and rotate | |
607 | // the slice once. | |
608 | let mut n = 0; | |
609 | if remap_destination { | |
610 | caller_body[block].statements.push(Statement { | |
611 | source_info: callsite.source_info, | |
612 | kind: StatementKind::Assign(Box::new(( | |
613 | dest, | |
614 | Rvalue::Use(Operand::Move(destination_local.into())), | |
615 | ))), | |
616 | }); | |
617 | n += 1; | |
618 | } | |
619 | for local in callee_body.vars_and_temps_iter().rev() { | |
620 | if !callee_body.local_decls[local].internal | |
621 | && integrator.always_live_locals.contains(local) | |
622 | { | |
623 | let new_local = integrator.map_local(local); | |
624 | caller_body[block].statements.push(Statement { | |
625 | source_info: callsite.source_info, | |
626 | kind: StatementKind::StorageDead(new_local), | |
627 | }); | |
628 | n += 1; | |
629 | } | |
630 | } | |
631 | caller_body[block].statements.rotate_right(n); | |
632 | } | |
633 | ||
634 | // Insert all of the (mapped) parts of the callee body into the caller. | |
635 | caller_body.local_decls.extend(callee_body.drain_vars_and_temps()); | |
636 | caller_body.source_scopes.extend(&mut callee_body.source_scopes.drain(..)); | |
637 | caller_body.var_debug_info.append(&mut callee_body.var_debug_info); | |
638 | caller_body.basic_blocks_mut().extend(callee_body.basic_blocks_mut().drain(..)); | |
639 | ||
640 | caller_body[callsite.block].terminator = Some(Terminator { | |
641 | source_info: callsite.source_info, | |
642 | kind: TerminatorKind::Goto { target: integrator.map_block(START_BLOCK) }, | |
643 | }); | |
644 | ||
645 | // Copy only unevaluated constants from the callee_body into the caller_body. | |
646 | // Although we are only pushing `ConstKind::Unevaluated` consts to | |
647 | // `required_consts`, here we may not only have `ConstKind::Unevaluated` | |
648 | // because we are calling `subst_and_normalize_erasing_regions`. | |
649 | caller_body.required_consts.extend( | |
650 | callee_body.required_consts.iter().copied().filter(|&ct| match ct.literal { | |
651 | ConstantKind::Ty(_) => { | |
652 | bug!("should never encounter ty::UnevaluatedConst in `required_consts`") | |
653 | } | |
654 | ConstantKind::Val(..) | ConstantKind::Unevaluated(..) => true, | |
655 | }), | |
656 | ); | |
657 | } | |
658 | kind => bug!("unexpected terminator kind {:?}", kind), | |
659 | } | |
660 | } | |
661 | ||
662 | fn make_call_args( | |
663 | &self, | |
664 | args: Vec<Operand<'tcx>>, | |
665 | callsite: &CallSite<'tcx>, | |
666 | caller_body: &mut Body<'tcx>, | |
667 | callee_body: &Body<'tcx>, | |
668 | ) -> Vec<Local> { | |
669 | let tcx = self.tcx; | |
670 | ||
671 | // There is a bit of a mismatch between the *caller* of a closure and the *callee*. | |
672 | // The caller provides the arguments wrapped up in a tuple: | |
673 | // | |
674 | // tuple_tmp = (a, b, c) | |
675 | // Fn::call(closure_ref, tuple_tmp) | |
676 | // | |
677 | // meanwhile the closure body expects the arguments (here, `a`, `b`, and `c`) | |
678 | // as distinct arguments. (This is the "rust-call" ABI hack.) Normally, codegen has | |
679 | // the job of unpacking this tuple. But here, we are codegen. =) So we want to create | |
680 | // a vector like | |
681 | // | |
682 | // [closure_ref, tuple_tmp.0, tuple_tmp.1, tuple_tmp.2] | |
683 | // | |
684 | // Except for one tiny wrinkle: we don't actually want `tuple_tmp.0`. It's more convenient | |
685 | // if we "spill" that into *another* temporary, so that we can map the argument | |
686 | // variable in the callee MIR directly to an argument variable on our side. | |
687 | // So we introduce temporaries like: | |
688 | // | |
689 | // tmp0 = tuple_tmp.0 | |
690 | // tmp1 = tuple_tmp.1 | |
691 | // tmp2 = tuple_tmp.2 | |
692 | // | |
693 | // and the vector is `[closure_ref, tmp0, tmp1, tmp2]`. | |
694 | if callsite.fn_sig.abi() == Abi::RustCall && callee_body.spread_arg.is_none() { | |
695 | let mut args = args.into_iter(); | |
696 | let self_ = self.create_temp_if_necessary(args.next().unwrap(), callsite, caller_body); | |
697 | let tuple = self.create_temp_if_necessary(args.next().unwrap(), callsite, caller_body); | |
698 | assert!(args.next().is_none()); | |
699 | ||
700 | let tuple = Place::from(tuple); | |
701 | let ty::Tuple(tuple_tys) = tuple.ty(caller_body, tcx).ty.kind() else { | |
702 | bug!("Closure arguments are not passed as a tuple"); | |
703 | }; | |
704 | ||
705 | // The `closure_ref` in our example above. | |
706 | let closure_ref_arg = iter::once(self_); | |
707 | ||
708 | // The `tmp0`, `tmp1`, and `tmp2` in our example above. | |
709 | let tuple_tmp_args = tuple_tys.iter().enumerate().map(|(i, ty)| { | |
710 | // This is e.g., `tuple_tmp.0` in our example above. | |
711 | let tuple_field = Operand::Move(tcx.mk_place_field(tuple, Field::new(i), ty)); | |
712 | ||
713 | // Spill to a local to make e.g., `tmp0`. | |
714 | self.create_temp_if_necessary(tuple_field, callsite, caller_body) | |
715 | }); | |
716 | ||
717 | closure_ref_arg.chain(tuple_tmp_args).collect() | |
718 | } else { | |
719 | args.into_iter() | |
720 | .map(|a| self.create_temp_if_necessary(a, callsite, caller_body)) | |
721 | .collect() | |
722 | } | |
723 | } | |
724 | ||
725 | /// If `arg` is already a temporary, returns it. Otherwise, introduces a fresh | |
726 | /// temporary `T` and an instruction `T = arg`, and returns `T`. | |
727 | fn create_temp_if_necessary( | |
728 | &self, | |
729 | arg: Operand<'tcx>, | |
730 | callsite: &CallSite<'tcx>, | |
731 | caller_body: &mut Body<'tcx>, | |
732 | ) -> Local { | |
733 | // Reuse the operand if it is a moved temporary. | |
734 | if let Operand::Move(place) = &arg | |
735 | && let Some(local) = place.as_local() | |
736 | && caller_body.local_kind(local) == LocalKind::Temp | |
737 | { | |
738 | return local; | |
739 | } | |
740 | ||
741 | // Otherwise, create a temporary for the argument. | |
742 | trace!("creating temp for argument {:?}", arg); | |
743 | let arg_ty = arg.ty(caller_body, self.tcx); | |
744 | let local = self.new_call_temp(caller_body, callsite, arg_ty); | |
745 | caller_body[callsite.block].statements.push(Statement { | |
746 | source_info: callsite.source_info, | |
747 | kind: StatementKind::Assign(Box::new((Place::from(local), Rvalue::Use(arg)))), | |
748 | }); | |
749 | local | |
750 | } | |
751 | ||
752 | /// Introduces a new temporary into the caller body that is live for the duration of the call. | |
753 | fn new_call_temp( | |
754 | &self, | |
755 | caller_body: &mut Body<'tcx>, | |
756 | callsite: &CallSite<'tcx>, | |
757 | ty: Ty<'tcx>, | |
758 | ) -> Local { | |
759 | let local = caller_body.local_decls.push(LocalDecl::new(ty, callsite.source_info.span)); | |
760 | ||
761 | caller_body[callsite.block].statements.push(Statement { | |
762 | source_info: callsite.source_info, | |
763 | kind: StatementKind::StorageLive(local), | |
764 | }); | |
765 | ||
766 | if let Some(block) = callsite.target { | |
767 | caller_body[block].statements.insert( | |
768 | 0, | |
769 | Statement { | |
770 | source_info: callsite.source_info, | |
771 | kind: StatementKind::StorageDead(local), | |
772 | }, | |
773 | ); | |
774 | } | |
775 | ||
776 | local | |
777 | } | |
778 | } | |
779 | ||
780 | fn type_size_of<'tcx>( | |
781 | tcx: TyCtxt<'tcx>, | |
782 | param_env: ty::ParamEnv<'tcx>, | |
783 | ty: Ty<'tcx>, | |
784 | ) -> Option<u64> { | |
785 | tcx.layout_of(param_env.and(ty)).ok().map(|layout| layout.size.bytes()) | |
786 | } | |
787 | ||
788 | /// Verify that the callee body is compatible with the caller. | |
789 | /// | |
790 | /// This visitor mostly computes the inlining cost, | |
791 | /// but also needs to verify that types match because of normalization failure. | |
792 | struct CostChecker<'b, 'tcx> { | |
793 | tcx: TyCtxt<'tcx>, | |
794 | param_env: ParamEnv<'tcx>, | |
795 | cost: usize, | |
796 | callee_body: &'b Body<'tcx>, | |
797 | instance: ty::Instance<'tcx>, | |
798 | validation: Result<(), &'static str>, | |
799 | } | |
800 | ||
801 | impl<'tcx> Visitor<'tcx> for CostChecker<'_, 'tcx> { | |
802 | fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) { | |
803 | // Don't count StorageLive/StorageDead in the inlining cost. | |
804 | match statement.kind { | |
805 | StatementKind::StorageLive(_) | |
806 | | StatementKind::StorageDead(_) | |
807 | | StatementKind::Deinit(_) | |
808 | | StatementKind::Nop => {} | |
809 | _ => self.cost += INSTR_COST, | |
810 | } | |
811 | ||
812 | self.super_statement(statement, location); | |
813 | } | |
814 | ||
815 | fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) { | |
816 | let tcx = self.tcx; | |
817 | match terminator.kind { | |
818 | TerminatorKind::Drop { ref place, unwind, .. } | |
819 | | TerminatorKind::DropAndReplace { ref place, unwind, .. } => { | |
820 | // If the place doesn't actually need dropping, treat it like a regular goto. | |
821 | let ty = self.instance.subst_mir(tcx, &place.ty(self.callee_body, tcx).ty); | |
822 | if ty.needs_drop(tcx, self.param_env) { | |
823 | self.cost += CALL_PENALTY; | |
824 | if unwind.is_some() { | |
825 | self.cost += LANDINGPAD_PENALTY; | |
826 | } | |
827 | } else { | |
828 | self.cost += INSTR_COST; | |
829 | } | |
830 | } | |
831 | TerminatorKind::Call { func: Operand::Constant(ref f), cleanup, .. } => { | |
832 | let fn_ty = self.instance.subst_mir(tcx, &f.literal.ty()); | |
833 | self.cost += if let ty::FnDef(def_id, _) = *fn_ty.kind() && tcx.is_intrinsic(def_id) { | |
834 | // Don't give intrinsics the extra penalty for calls | |
835 | INSTR_COST | |
836 | } else { | |
837 | CALL_PENALTY | |
838 | }; | |
839 | if cleanup.is_some() { | |
840 | self.cost += LANDINGPAD_PENALTY; | |
841 | } | |
842 | } | |
843 | TerminatorKind::Assert { cleanup, .. } => { | |
844 | self.cost += CALL_PENALTY; | |
845 | if cleanup.is_some() { | |
846 | self.cost += LANDINGPAD_PENALTY; | |
847 | } | |
848 | } | |
849 | TerminatorKind::Resume => self.cost += RESUME_PENALTY, | |
850 | TerminatorKind::InlineAsm { cleanup, .. } => { | |
851 | self.cost += INSTR_COST; | |
852 | if cleanup.is_some() { | |
853 | self.cost += LANDINGPAD_PENALTY; | |
854 | } | |
855 | } | |
856 | _ => self.cost += INSTR_COST, | |
857 | } | |
858 | ||
859 | self.super_terminator(terminator, location); | |
860 | } | |
861 | ||
862 | /// Count up the cost of local variables and temps, if we know the size | |
863 | /// use that, otherwise we use a moderately-large dummy cost. | |
864 | fn visit_local_decl(&mut self, local: Local, local_decl: &LocalDecl<'tcx>) { | |
865 | let tcx = self.tcx; | |
866 | let ptr_size = tcx.data_layout.pointer_size.bytes(); | |
867 | ||
868 | let ty = self.instance.subst_mir(tcx, &local_decl.ty); | |
869 | // Cost of the var is the size in machine-words, if we know | |
870 | // it. | |
871 | if let Some(size) = type_size_of(tcx, self.param_env, ty) { | |
872 | self.cost += ((size + ptr_size - 1) / ptr_size) as usize; | |
873 | } else { | |
874 | self.cost += UNKNOWN_SIZE_COST; | |
875 | } | |
876 | ||
877 | self.super_local_decl(local, local_decl) | |
878 | } | |
879 | ||
880 | /// This method duplicates code from MIR validation in an attempt to detect type mismatches due | |
881 | /// to normalization failure. | |
882 | fn visit_projection_elem( | |
883 | &mut self, | |
884 | local: Local, | |
885 | proj_base: &[PlaceElem<'tcx>], | |
886 | elem: PlaceElem<'tcx>, | |
887 | context: PlaceContext, | |
888 | location: Location, | |
889 | ) { | |
890 | if let ProjectionElem::Field(f, ty) = elem { | |
891 | let parent = Place { local, projection: self.tcx.intern_place_elems(proj_base) }; | |
892 | let parent_ty = parent.ty(&self.callee_body.local_decls, self.tcx); | |
893 | let check_equal = |this: &mut Self, f_ty| { | |
894 | if !util::is_equal_up_to_subtyping(this.tcx, this.param_env, ty, f_ty) { | |
895 | trace!(?ty, ?f_ty); | |
896 | this.validation = Err("failed to normalize projection type"); | |
897 | return; | |
898 | } | |
899 | }; | |
900 | ||
901 | let kind = match parent_ty.ty.kind() { | |
902 | &ty::Alias(ty::Opaque, ty::AliasTy { def_id, substs, .. }) => { | |
903 | self.tcx.bound_type_of(def_id).subst(self.tcx, substs).kind() | |
904 | } | |
905 | kind => kind, | |
906 | }; | |
907 | ||
908 | match kind { | |
909 | ty::Tuple(fields) => { | |
910 | let Some(f_ty) = fields.get(f.as_usize()) else { | |
911 | self.validation = Err("malformed MIR"); | |
912 | return; | |
913 | }; | |
914 | check_equal(self, *f_ty); | |
915 | } | |
916 | ty::Adt(adt_def, substs) => { | |
917 | let var = parent_ty.variant_index.unwrap_or(VariantIdx::from_u32(0)); | |
918 | let Some(field) = adt_def.variant(var).fields.get(f.as_usize()) else { | |
919 | self.validation = Err("malformed MIR"); | |
920 | return; | |
921 | }; | |
922 | check_equal(self, field.ty(self.tcx, substs)); | |
923 | } | |
924 | ty::Closure(_, substs) => { | |
925 | let substs = substs.as_closure(); | |
926 | let Some(f_ty) = substs.upvar_tys().nth(f.as_usize()) else { | |
927 | self.validation = Err("malformed MIR"); | |
928 | return; | |
929 | }; | |
930 | check_equal(self, f_ty); | |
931 | } | |
932 | &ty::Generator(def_id, substs, _) => { | |
933 | let f_ty = if let Some(var) = parent_ty.variant_index { | |
934 | let gen_body = if def_id == self.callee_body.source.def_id() { | |
935 | self.callee_body | |
936 | } else { | |
937 | self.tcx.optimized_mir(def_id) | |
938 | }; | |
939 | ||
940 | let Some(layout) = gen_body.generator_layout() else { | |
941 | self.validation = Err("malformed MIR"); | |
942 | return; | |
943 | }; | |
944 | ||
945 | let Some(&local) = layout.variant_fields[var].get(f) else { | |
946 | self.validation = Err("malformed MIR"); | |
947 | return; | |
948 | }; | |
949 | ||
950 | let Some(&f_ty) = layout.field_tys.get(local) else { | |
951 | self.validation = Err("malformed MIR"); | |
952 | return; | |
953 | }; | |
954 | ||
955 | f_ty | |
956 | } else { | |
957 | let Some(f_ty) = substs.as_generator().prefix_tys().nth(f.index()) else { | |
958 | self.validation = Err("malformed MIR"); | |
959 | return; | |
960 | }; | |
961 | ||
962 | f_ty | |
963 | }; | |
964 | ||
965 | check_equal(self, f_ty); | |
966 | } | |
967 | _ => self.validation = Err("malformed MIR"), | |
968 | } | |
969 | } | |
970 | ||
971 | self.super_projection_elem(local, proj_base, elem, context, location); | |
972 | } | |
973 | } | |
974 | ||
975 | /** | |
976 | * Integrator. | |
977 | * | |
978 | * Integrates blocks from the callee function into the calling function. | |
979 | * Updates block indices, references to locals and other control flow | |
980 | * stuff. | |
981 | */ | |
982 | struct Integrator<'a, 'tcx> { | |
983 | args: &'a [Local], | |
984 | new_locals: RangeFrom<Local>, | |
985 | new_scopes: RangeFrom<SourceScope>, | |
986 | new_blocks: RangeFrom<BasicBlock>, | |
987 | destination: Local, | |
988 | callsite_scope: SourceScopeData<'tcx>, | |
989 | callsite: &'a CallSite<'tcx>, | |
990 | cleanup_block: Option<BasicBlock>, | |
991 | in_cleanup_block: bool, | |
992 | tcx: TyCtxt<'tcx>, | |
993 | expn_data: LocalExpnId, | |
994 | always_live_locals: BitSet<Local>, | |
995 | } | |
996 | ||
997 | impl Integrator<'_, '_> { | |
998 | fn map_local(&self, local: Local) -> Local { | |
999 | let new = if local == RETURN_PLACE { | |
1000 | self.destination | |
1001 | } else { | |
1002 | let idx = local.index() - 1; | |
1003 | if idx < self.args.len() { | |
1004 | self.args[idx] | |
1005 | } else { | |
1006 | Local::new(self.new_locals.start.index() + (idx - self.args.len())) | |
1007 | } | |
1008 | }; | |
1009 | trace!("mapping local `{:?}` to `{:?}`", local, new); | |
1010 | new | |
1011 | } | |
1012 | ||
1013 | fn map_scope(&self, scope: SourceScope) -> SourceScope { | |
1014 | let new = SourceScope::new(self.new_scopes.start.index() + scope.index()); | |
1015 | trace!("mapping scope `{:?}` to `{:?}`", scope, new); | |
1016 | new | |
1017 | } | |
1018 | ||
1019 | fn map_block(&self, block: BasicBlock) -> BasicBlock { | |
1020 | let new = BasicBlock::new(self.new_blocks.start.index() + block.index()); | |
1021 | trace!("mapping block `{:?}` to `{:?}`", block, new); | |
1022 | new | |
1023 | } | |
1024 | ||
1025 | fn map_unwind(&self, unwind: Option<BasicBlock>) -> Option<BasicBlock> { | |
1026 | if self.in_cleanup_block { | |
1027 | if unwind.is_some() { | |
1028 | bug!("cleanup on cleanup block"); | |
1029 | } | |
1030 | return unwind; | |
1031 | } | |
1032 | ||
1033 | match unwind { | |
1034 | Some(target) => Some(self.map_block(target)), | |
1035 | // Add an unwind edge to the original call's cleanup block | |
1036 | None => self.cleanup_block, | |
1037 | } | |
1038 | } | |
1039 | } | |
1040 | ||
1041 | impl<'tcx> MutVisitor<'tcx> for Integrator<'_, 'tcx> { | |
1042 | fn tcx(&self) -> TyCtxt<'tcx> { | |
1043 | self.tcx | |
1044 | } | |
1045 | ||
1046 | fn visit_local(&mut self, local: &mut Local, _ctxt: PlaceContext, _location: Location) { | |
1047 | *local = self.map_local(*local); | |
1048 | } | |
1049 | ||
1050 | fn visit_source_scope_data(&mut self, scope_data: &mut SourceScopeData<'tcx>) { | |
1051 | self.super_source_scope_data(scope_data); | |
1052 | if scope_data.parent_scope.is_none() { | |
1053 | // Attach the outermost callee scope as a child of the callsite | |
1054 | // scope, via the `parent_scope` and `inlined_parent_scope` chains. | |
1055 | scope_data.parent_scope = Some(self.callsite.source_info.scope); | |
1056 | assert_eq!(scope_data.inlined_parent_scope, None); | |
1057 | scope_data.inlined_parent_scope = if self.callsite_scope.inlined.is_some() { | |
1058 | Some(self.callsite.source_info.scope) | |
1059 | } else { | |
1060 | self.callsite_scope.inlined_parent_scope | |
1061 | }; | |
1062 | ||
1063 | // Mark the outermost callee scope as an inlined one. | |
1064 | assert_eq!(scope_data.inlined, None); | |
1065 | scope_data.inlined = Some((self.callsite.callee, self.callsite.source_info.span)); | |
1066 | } else if scope_data.inlined_parent_scope.is_none() { | |
1067 | // Make it easy to find the scope with `inlined` set above. | |
1068 | scope_data.inlined_parent_scope = Some(self.map_scope(OUTERMOST_SOURCE_SCOPE)); | |
1069 | } | |
1070 | } | |
1071 | ||
1072 | fn visit_source_scope(&mut self, scope: &mut SourceScope) { | |
1073 | *scope = self.map_scope(*scope); | |
1074 | } | |
1075 | ||
1076 | fn visit_span(&mut self, span: &mut Span) { | |
1077 | // Make sure that all spans track the fact that they were inlined. | |
1078 | *span = span.fresh_expansion(self.expn_data); | |
1079 | } | |
1080 | ||
1081 | fn visit_basic_block_data(&mut self, block: BasicBlock, data: &mut BasicBlockData<'tcx>) { | |
1082 | self.in_cleanup_block = data.is_cleanup; | |
1083 | self.super_basic_block_data(block, data); | |
1084 | self.in_cleanup_block = false; | |
1085 | } | |
1086 | ||
1087 | fn visit_retag(&mut self, kind: &mut RetagKind, place: &mut Place<'tcx>, loc: Location) { | |
1088 | self.super_retag(kind, place, loc); | |
1089 | ||
1090 | // We have to patch all inlined retags to be aware that they are no longer | |
1091 | // happening on function entry. | |
1092 | if *kind == RetagKind::FnEntry { | |
1093 | *kind = RetagKind::Default; | |
1094 | } | |
1095 | } | |
1096 | ||
1097 | fn visit_statement(&mut self, statement: &mut Statement<'tcx>, location: Location) { | |
1098 | if let StatementKind::StorageLive(local) | StatementKind::StorageDead(local) = | |
1099 | statement.kind | |
1100 | { | |
1101 | self.always_live_locals.remove(local); | |
1102 | } | |
1103 | self.super_statement(statement, location); | |
1104 | } | |
1105 | ||
1106 | fn visit_terminator(&mut self, terminator: &mut Terminator<'tcx>, loc: Location) { | |
1107 | // Don't try to modify the implicit `_0` access on return (`return` terminators are | |
1108 | // replaced down below anyways). | |
1109 | if !matches!(terminator.kind, TerminatorKind::Return) { | |
1110 | self.super_terminator(terminator, loc); | |
1111 | } | |
1112 | ||
1113 | match terminator.kind { | |
1114 | TerminatorKind::GeneratorDrop | TerminatorKind::Yield { .. } => bug!(), | |
1115 | TerminatorKind::Goto { ref mut target } => { | |
1116 | *target = self.map_block(*target); | |
1117 | } | |
1118 | TerminatorKind::SwitchInt { ref mut targets, .. } => { | |
1119 | for tgt in targets.all_targets_mut() { | |
1120 | *tgt = self.map_block(*tgt); | |
1121 | } | |
1122 | } | |
1123 | TerminatorKind::Drop { ref mut target, ref mut unwind, .. } | |
1124 | | TerminatorKind::DropAndReplace { ref mut target, ref mut unwind, .. } => { | |
1125 | *target = self.map_block(*target); | |
1126 | *unwind = self.map_unwind(*unwind); | |
1127 | } | |
1128 | TerminatorKind::Call { ref mut target, ref mut cleanup, .. } => { | |
1129 | if let Some(ref mut tgt) = *target { | |
1130 | *tgt = self.map_block(*tgt); | |
1131 | } | |
1132 | *cleanup = self.map_unwind(*cleanup); | |
1133 | } | |
1134 | TerminatorKind::Assert { ref mut target, ref mut cleanup, .. } => { | |
1135 | *target = self.map_block(*target); | |
1136 | *cleanup = self.map_unwind(*cleanup); | |
1137 | } | |
1138 | TerminatorKind::Return => { | |
1139 | terminator.kind = if let Some(tgt) = self.callsite.target { | |
1140 | TerminatorKind::Goto { target: tgt } | |
1141 | } else { | |
1142 | TerminatorKind::Unreachable | |
1143 | } | |
1144 | } | |
1145 | TerminatorKind::Resume => { | |
1146 | if let Some(tgt) = self.cleanup_block { | |
1147 | terminator.kind = TerminatorKind::Goto { target: tgt } | |
1148 | } | |
1149 | } | |
1150 | TerminatorKind::Abort => {} | |
1151 | TerminatorKind::Unreachable => {} | |
1152 | TerminatorKind::FalseEdge { ref mut real_target, ref mut imaginary_target } => { | |
1153 | *real_target = self.map_block(*real_target); | |
1154 | *imaginary_target = self.map_block(*imaginary_target); | |
1155 | } | |
1156 | TerminatorKind::FalseUnwind { real_target: _, unwind: _ } => | |
1157 | // see the ordering of passes in the optimized_mir query. | |
1158 | { | |
1159 | bug!("False unwinds should have been removed before inlining") | |
1160 | } | |
1161 | TerminatorKind::InlineAsm { ref mut destination, ref mut cleanup, .. } => { | |
1162 | if let Some(ref mut tgt) = *destination { | |
1163 | *tgt = self.map_block(*tgt); | |
1164 | } | |
1165 | *cleanup = self.map_unwind(*cleanup); | |
1166 | } | |
1167 | } | |
1168 | } | |
1169 | } |