]>
Commit | Line | Data |
---|---|---|
223e47cc LB |
1 | //===- InlineFunction.cpp - Code to perform function inlining -------------===// |
2 | // | |
3 | // The LLVM Compiler Infrastructure | |
4 | // | |
5 | // This file is distributed under the University of Illinois Open Source | |
6 | // License. See LICENSE.TXT for details. | |
7 | // | |
8 | //===----------------------------------------------------------------------===// | |
9 | // | |
10 | // This file implements inlining of a function into a call site, resolving | |
11 | // parameters and the return value as appropriate. | |
12 | // | |
13 | //===----------------------------------------------------------------------===// | |
14 | ||
15 | #include "llvm/Transforms/Utils/Cloning.h" | |
223e47cc LB |
16 | #include "llvm/ADT/SmallVector.h" |
17 | #include "llvm/ADT/StringExtras.h" | |
18 | #include "llvm/Analysis/CallGraph.h" | |
19 | #include "llvm/Analysis/InstructionSimplify.h" | |
970d7e83 LB |
20 | #include "llvm/DebugInfo.h" |
21 | #include "llvm/IR/Attributes.h" | |
22 | #include "llvm/IR/Constants.h" | |
23 | #include "llvm/IR/DataLayout.h" | |
24 | #include "llvm/IR/DerivedTypes.h" | |
25 | #include "llvm/IR/IRBuilder.h" | |
26 | #include "llvm/IR/Instructions.h" | |
27 | #include "llvm/IR/IntrinsicInst.h" | |
28 | #include "llvm/IR/Intrinsics.h" | |
29 | #include "llvm/IR/Module.h" | |
223e47cc | 30 | #include "llvm/Support/CallSite.h" |
223e47cc LB |
31 | #include "llvm/Transforms/Utils/Local.h" |
32 | using namespace llvm; | |
33 | ||
34 | bool llvm::InlineFunction(CallInst *CI, InlineFunctionInfo &IFI, | |
35 | bool InsertLifetime) { | |
36 | return InlineFunction(CallSite(CI), IFI, InsertLifetime); | |
37 | } | |
38 | bool llvm::InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI, | |
39 | bool InsertLifetime) { | |
40 | return InlineFunction(CallSite(II), IFI, InsertLifetime); | |
41 | } | |
42 | ||
43 | namespace { | |
44 | /// A class for recording information about inlining through an invoke. | |
45 | class InvokeInliningInfo { | |
46 | BasicBlock *OuterResumeDest; ///< Destination of the invoke's unwind. | |
47 | BasicBlock *InnerResumeDest; ///< Destination for the callee's resume. | |
48 | LandingPadInst *CallerLPad; ///< LandingPadInst associated with the invoke. | |
49 | PHINode *InnerEHValuesPHI; ///< PHI for EH values from landingpad insts. | |
50 | SmallVector<Value*, 8> UnwindDestPHIValues; | |
51 | ||
52 | public: | |
53 | InvokeInliningInfo(InvokeInst *II) | |
54 | : OuterResumeDest(II->getUnwindDest()), InnerResumeDest(0), | |
55 | CallerLPad(0), InnerEHValuesPHI(0) { | |
56 | // If there are PHI nodes in the unwind destination block, we need to keep | |
57 | // track of which values came into them from the invoke before removing | |
58 | // the edge from this block. | |
59 | llvm::BasicBlock *InvokeBB = II->getParent(); | |
60 | BasicBlock::iterator I = OuterResumeDest->begin(); | |
61 | for (; isa<PHINode>(I); ++I) { | |
62 | // Save the value to use for this edge. | |
63 | PHINode *PHI = cast<PHINode>(I); | |
64 | UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB)); | |
65 | } | |
66 | ||
67 | CallerLPad = cast<LandingPadInst>(I); | |
68 | } | |
69 | ||
70 | /// getOuterResumeDest - The outer unwind destination is the target of | |
71 | /// unwind edges introduced for calls within the inlined function. | |
72 | BasicBlock *getOuterResumeDest() const { | |
73 | return OuterResumeDest; | |
74 | } | |
75 | ||
76 | BasicBlock *getInnerResumeDest(); | |
77 | ||
78 | LandingPadInst *getLandingPadInst() const { return CallerLPad; } | |
79 | ||
80 | /// forwardResume - Forward the 'resume' instruction to the caller's landing | |
81 | /// pad block. When the landing pad block has only one predecessor, this is | |
82 | /// a simple branch. When there is more than one predecessor, we need to | |
83 | /// split the landing pad block after the landingpad instruction and jump | |
84 | /// to there. | |
85 | void forwardResume(ResumeInst *RI); | |
86 | ||
87 | /// addIncomingPHIValuesFor - Add incoming-PHI values to the unwind | |
88 | /// destination block for the given basic block, using the values for the | |
89 | /// original invoke's source block. | |
90 | void addIncomingPHIValuesFor(BasicBlock *BB) const { | |
91 | addIncomingPHIValuesForInto(BB, OuterResumeDest); | |
92 | } | |
93 | ||
94 | void addIncomingPHIValuesForInto(BasicBlock *src, BasicBlock *dest) const { | |
95 | BasicBlock::iterator I = dest->begin(); | |
96 | for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) { | |
97 | PHINode *phi = cast<PHINode>(I); | |
98 | phi->addIncoming(UnwindDestPHIValues[i], src); | |
99 | } | |
100 | } | |
101 | }; | |
102 | } | |
103 | ||
104 | /// getInnerResumeDest - Get or create a target for the branch from ResumeInsts. | |
105 | BasicBlock *InvokeInliningInfo::getInnerResumeDest() { | |
106 | if (InnerResumeDest) return InnerResumeDest; | |
107 | ||
108 | // Split the landing pad. | |
109 | BasicBlock::iterator SplitPoint = CallerLPad; ++SplitPoint; | |
110 | InnerResumeDest = | |
111 | OuterResumeDest->splitBasicBlock(SplitPoint, | |
112 | OuterResumeDest->getName() + ".body"); | |
113 | ||
114 | // The number of incoming edges we expect to the inner landing pad. | |
115 | const unsigned PHICapacity = 2; | |
116 | ||
117 | // Create corresponding new PHIs for all the PHIs in the outer landing pad. | |
118 | BasicBlock::iterator InsertPoint = InnerResumeDest->begin(); | |
119 | BasicBlock::iterator I = OuterResumeDest->begin(); | |
120 | for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) { | |
121 | PHINode *OuterPHI = cast<PHINode>(I); | |
122 | PHINode *InnerPHI = PHINode::Create(OuterPHI->getType(), PHICapacity, | |
123 | OuterPHI->getName() + ".lpad-body", | |
124 | InsertPoint); | |
125 | OuterPHI->replaceAllUsesWith(InnerPHI); | |
126 | InnerPHI->addIncoming(OuterPHI, OuterResumeDest); | |
127 | } | |
128 | ||
129 | // Create a PHI for the exception values. | |
130 | InnerEHValuesPHI = PHINode::Create(CallerLPad->getType(), PHICapacity, | |
131 | "eh.lpad-body", InsertPoint); | |
132 | CallerLPad->replaceAllUsesWith(InnerEHValuesPHI); | |
133 | InnerEHValuesPHI->addIncoming(CallerLPad, OuterResumeDest); | |
134 | ||
135 | // All done. | |
136 | return InnerResumeDest; | |
137 | } | |
138 | ||
139 | /// forwardResume - Forward the 'resume' instruction to the caller's landing pad | |
140 | /// block. When the landing pad block has only one predecessor, this is a simple | |
141 | /// branch. When there is more than one predecessor, we need to split the | |
142 | /// landing pad block after the landingpad instruction and jump to there. | |
143 | void InvokeInliningInfo::forwardResume(ResumeInst *RI) { | |
144 | BasicBlock *Dest = getInnerResumeDest(); | |
145 | BasicBlock *Src = RI->getParent(); | |
146 | ||
147 | BranchInst::Create(Dest, Src); | |
148 | ||
149 | // Update the PHIs in the destination. They were inserted in an order which | |
150 | // makes this work. | |
151 | addIncomingPHIValuesForInto(Src, Dest); | |
152 | ||
153 | InnerEHValuesPHI->addIncoming(RI->getOperand(0), Src); | |
154 | RI->eraseFromParent(); | |
155 | } | |
156 | ||
157 | /// HandleCallsInBlockInlinedThroughInvoke - When we inline a basic block into | |
158 | /// an invoke, we have to turn all of the calls that can throw into | |
159 | /// invokes. This function analyze BB to see if there are any calls, and if so, | |
160 | /// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI | |
161 | /// nodes in that block with the values specified in InvokeDestPHIValues. | |
162 | /// | |
163 | /// Returns true to indicate that the next block should be skipped. | |
164 | static bool HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB, | |
165 | InvokeInliningInfo &Invoke) { | |
166 | LandingPadInst *LPI = Invoke.getLandingPadInst(); | |
167 | ||
168 | for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) { | |
169 | Instruction *I = BBI++; | |
170 | ||
171 | if (LandingPadInst *L = dyn_cast<LandingPadInst>(I)) { | |
172 | unsigned NumClauses = LPI->getNumClauses(); | |
173 | L->reserveClauses(NumClauses); | |
174 | for (unsigned i = 0; i != NumClauses; ++i) | |
175 | L->addClause(LPI->getClause(i)); | |
176 | } | |
177 | ||
178 | // We only need to check for function calls: inlined invoke | |
179 | // instructions require no special handling. | |
180 | CallInst *CI = dyn_cast<CallInst>(I); | |
181 | ||
182 | // If this call cannot unwind, don't convert it to an invoke. | |
183 | if (!CI || CI->doesNotThrow()) | |
184 | continue; | |
185 | ||
186 | // Convert this function call into an invoke instruction. First, split the | |
187 | // basic block. | |
188 | BasicBlock *Split = BB->splitBasicBlock(CI, CI->getName()+".noexc"); | |
189 | ||
190 | // Delete the unconditional branch inserted by splitBasicBlock | |
191 | BB->getInstList().pop_back(); | |
192 | ||
193 | // Create the new invoke instruction. | |
194 | ImmutableCallSite CS(CI); | |
195 | SmallVector<Value*, 8> InvokeArgs(CS.arg_begin(), CS.arg_end()); | |
196 | InvokeInst *II = InvokeInst::Create(CI->getCalledValue(), Split, | |
197 | Invoke.getOuterResumeDest(), | |
198 | InvokeArgs, CI->getName(), BB); | |
199 | II->setCallingConv(CI->getCallingConv()); | |
200 | II->setAttributes(CI->getAttributes()); | |
201 | ||
202 | // Make sure that anything using the call now uses the invoke! This also | |
203 | // updates the CallGraph if present, because it uses a WeakVH. | |
204 | CI->replaceAllUsesWith(II); | |
205 | ||
206 | // Delete the original call | |
207 | Split->getInstList().pop_front(); | |
208 | ||
209 | // Update any PHI nodes in the exceptional block to indicate that there is | |
210 | // now a new entry in them. | |
211 | Invoke.addIncomingPHIValuesFor(BB); | |
212 | return false; | |
213 | } | |
214 | ||
215 | return false; | |
216 | } | |
217 | ||
218 | /// HandleInlinedInvoke - If we inlined an invoke site, we need to convert calls | |
219 | /// in the body of the inlined function into invokes. | |
220 | /// | |
221 | /// II is the invoke instruction being inlined. FirstNewBlock is the first | |
222 | /// block of the inlined code (the last block is the end of the function), | |
223 | /// and InlineCodeInfo is information about the code that got inlined. | |
224 | static void HandleInlinedInvoke(InvokeInst *II, BasicBlock *FirstNewBlock, | |
225 | ClonedCodeInfo &InlinedCodeInfo) { | |
226 | BasicBlock *InvokeDest = II->getUnwindDest(); | |
227 | ||
228 | Function *Caller = FirstNewBlock->getParent(); | |
229 | ||
230 | // The inlined code is currently at the end of the function, scan from the | |
231 | // start of the inlined code to its end, checking for stuff we need to | |
232 | // rewrite. If the code doesn't have calls or unwinds, we know there is | |
233 | // nothing to rewrite. | |
234 | if (!InlinedCodeInfo.ContainsCalls) { | |
235 | // Now that everything is happy, we have one final detail. The PHI nodes in | |
236 | // the exception destination block still have entries due to the original | |
237 | // invoke instruction. Eliminate these entries (which might even delete the | |
238 | // PHI node) now. | |
239 | InvokeDest->removePredecessor(II->getParent()); | |
240 | return; | |
241 | } | |
242 | ||
243 | InvokeInliningInfo Invoke(II); | |
244 | ||
245 | for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E; ++BB){ | |
246 | if (InlinedCodeInfo.ContainsCalls) | |
247 | if (HandleCallsInBlockInlinedThroughInvoke(BB, Invoke)) { | |
248 | // Honor a request to skip the next block. | |
249 | ++BB; | |
250 | continue; | |
251 | } | |
252 | ||
253 | if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator())) | |
254 | Invoke.forwardResume(RI); | |
255 | } | |
256 | ||
257 | // Now that everything is happy, we have one final detail. The PHI nodes in | |
258 | // the exception destination block still have entries due to the original | |
259 | // invoke instruction. Eliminate these entries (which might even delete the | |
260 | // PHI node) now. | |
261 | InvokeDest->removePredecessor(II->getParent()); | |
262 | } | |
263 | ||
264 | /// UpdateCallGraphAfterInlining - Once we have cloned code over from a callee | |
265 | /// into the caller, update the specified callgraph to reflect the changes we | |
266 | /// made. Note that it's possible that not all code was copied over, so only | |
267 | /// some edges of the callgraph may remain. | |
268 | static void UpdateCallGraphAfterInlining(CallSite CS, | |
269 | Function::iterator FirstNewBlock, | |
270 | ValueToValueMapTy &VMap, | |
271 | InlineFunctionInfo &IFI) { | |
272 | CallGraph &CG = *IFI.CG; | |
273 | const Function *Caller = CS.getInstruction()->getParent()->getParent(); | |
274 | const Function *Callee = CS.getCalledFunction(); | |
275 | CallGraphNode *CalleeNode = CG[Callee]; | |
276 | CallGraphNode *CallerNode = CG[Caller]; | |
277 | ||
278 | // Since we inlined some uninlined call sites in the callee into the caller, | |
279 | // add edges from the caller to all of the callees of the callee. | |
280 | CallGraphNode::iterator I = CalleeNode->begin(), E = CalleeNode->end(); | |
281 | ||
282 | // Consider the case where CalleeNode == CallerNode. | |
283 | CallGraphNode::CalledFunctionsVector CallCache; | |
284 | if (CalleeNode == CallerNode) { | |
285 | CallCache.assign(I, E); | |
286 | I = CallCache.begin(); | |
287 | E = CallCache.end(); | |
288 | } | |
289 | ||
290 | for (; I != E; ++I) { | |
291 | const Value *OrigCall = I->first; | |
292 | ||
293 | ValueToValueMapTy::iterator VMI = VMap.find(OrigCall); | |
294 | // Only copy the edge if the call was inlined! | |
295 | if (VMI == VMap.end() || VMI->second == 0) | |
296 | continue; | |
297 | ||
298 | // If the call was inlined, but then constant folded, there is no edge to | |
299 | // add. Check for this case. | |
300 | Instruction *NewCall = dyn_cast<Instruction>(VMI->second); | |
301 | if (NewCall == 0) continue; | |
302 | ||
303 | // Remember that this call site got inlined for the client of | |
304 | // InlineFunction. | |
305 | IFI.InlinedCalls.push_back(NewCall); | |
306 | ||
307 | // It's possible that inlining the callsite will cause it to go from an | |
308 | // indirect to a direct call by resolving a function pointer. If this | |
309 | // happens, set the callee of the new call site to a more precise | |
310 | // destination. This can also happen if the call graph node of the caller | |
311 | // was just unnecessarily imprecise. | |
312 | if (I->second->getFunction() == 0) | |
313 | if (Function *F = CallSite(NewCall).getCalledFunction()) { | |
314 | // Indirect call site resolved to direct call. | |
315 | CallerNode->addCalledFunction(CallSite(NewCall), CG[F]); | |
316 | ||
317 | continue; | |
318 | } | |
319 | ||
320 | CallerNode->addCalledFunction(CallSite(NewCall), I->second); | |
321 | } | |
322 | ||
323 | // Update the call graph by deleting the edge from Callee to Caller. We must | |
324 | // do this after the loop above in case Caller and Callee are the same. | |
325 | CallerNode->removeCallEdgeFor(CS); | |
326 | } | |
327 | ||
328 | /// HandleByValArgument - When inlining a call site that has a byval argument, | |
329 | /// we have to make the implicit memcpy explicit by adding it. | |
330 | static Value *HandleByValArgument(Value *Arg, Instruction *TheCall, | |
331 | const Function *CalledFunc, | |
332 | InlineFunctionInfo &IFI, | |
333 | unsigned ByValAlignment) { | |
334 | Type *AggTy = cast<PointerType>(Arg->getType())->getElementType(); | |
335 | ||
336 | // If the called function is readonly, then it could not mutate the caller's | |
337 | // copy of the byval'd memory. In this case, it is safe to elide the copy and | |
338 | // temporary. | |
339 | if (CalledFunc->onlyReadsMemory()) { | |
340 | // If the byval argument has a specified alignment that is greater than the | |
341 | // passed in pointer, then we either have to round up the input pointer or | |
342 | // give up on this transformation. | |
343 | if (ByValAlignment <= 1) // 0 = unspecified, 1 = no particular alignment. | |
344 | return Arg; | |
345 | ||
346 | // If the pointer is already known to be sufficiently aligned, or if we can | |
347 | // round it up to a larger alignment, then we don't need a temporary. | |
348 | if (getOrEnforceKnownAlignment(Arg, ByValAlignment, | |
349 | IFI.TD) >= ByValAlignment) | |
350 | return Arg; | |
351 | ||
352 | // Otherwise, we have to make a memcpy to get a safe alignment. This is bad | |
353 | // for code quality, but rarely happens and is required for correctness. | |
354 | } | |
355 | ||
356 | LLVMContext &Context = Arg->getContext(); | |
357 | ||
358 | Type *VoidPtrTy = Type::getInt8PtrTy(Context); | |
359 | ||
970d7e83 | 360 | // Create the alloca. If we have DataLayout, use nice alignment. |
223e47cc LB |
361 | unsigned Align = 1; |
362 | if (IFI.TD) | |
363 | Align = IFI.TD->getPrefTypeAlignment(AggTy); | |
364 | ||
365 | // If the byval had an alignment specified, we *must* use at least that | |
366 | // alignment, as it is required by the byval argument (and uses of the | |
367 | // pointer inside the callee). | |
368 | Align = std::max(Align, ByValAlignment); | |
369 | ||
370 | Function *Caller = TheCall->getParent()->getParent(); | |
371 | ||
372 | Value *NewAlloca = new AllocaInst(AggTy, 0, Align, Arg->getName(), | |
373 | &*Caller->begin()->begin()); | |
374 | // Emit a memcpy. | |
375 | Type *Tys[3] = {VoidPtrTy, VoidPtrTy, Type::getInt64Ty(Context)}; | |
376 | Function *MemCpyFn = Intrinsic::getDeclaration(Caller->getParent(), | |
377 | Intrinsic::memcpy, | |
378 | Tys); | |
379 | Value *DestCast = new BitCastInst(NewAlloca, VoidPtrTy, "tmp", TheCall); | |
380 | Value *SrcCast = new BitCastInst(Arg, VoidPtrTy, "tmp", TheCall); | |
381 | ||
382 | Value *Size; | |
383 | if (IFI.TD == 0) | |
384 | Size = ConstantExpr::getSizeOf(AggTy); | |
385 | else | |
386 | Size = ConstantInt::get(Type::getInt64Ty(Context), | |
387 | IFI.TD->getTypeStoreSize(AggTy)); | |
388 | ||
389 | // Always generate a memcpy of alignment 1 here because we don't know | |
390 | // the alignment of the src pointer. Other optimizations can infer | |
391 | // better alignment. | |
392 | Value *CallArgs[] = { | |
393 | DestCast, SrcCast, Size, | |
394 | ConstantInt::get(Type::getInt32Ty(Context), 1), | |
395 | ConstantInt::getFalse(Context) // isVolatile | |
396 | }; | |
397 | IRBuilder<>(TheCall).CreateCall(MemCpyFn, CallArgs); | |
398 | ||
399 | // Uses of the argument in the function should use our new alloca | |
400 | // instead. | |
401 | return NewAlloca; | |
402 | } | |
403 | ||
404 | // isUsedByLifetimeMarker - Check whether this Value is used by a lifetime | |
405 | // intrinsic. | |
406 | static bool isUsedByLifetimeMarker(Value *V) { | |
407 | for (Value::use_iterator UI = V->use_begin(), UE = V->use_end(); UI != UE; | |
408 | ++UI) { | |
409 | if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(*UI)) { | |
410 | switch (II->getIntrinsicID()) { | |
411 | default: break; | |
412 | case Intrinsic::lifetime_start: | |
413 | case Intrinsic::lifetime_end: | |
414 | return true; | |
415 | } | |
416 | } | |
417 | } | |
418 | return false; | |
419 | } | |
420 | ||
421 | // hasLifetimeMarkers - Check whether the given alloca already has | |
422 | // lifetime.start or lifetime.end intrinsics. | |
423 | static bool hasLifetimeMarkers(AllocaInst *AI) { | |
424 | Type *Int8PtrTy = Type::getInt8PtrTy(AI->getType()->getContext()); | |
425 | if (AI->getType() == Int8PtrTy) | |
426 | return isUsedByLifetimeMarker(AI); | |
427 | ||
428 | // Do a scan to find all the casts to i8*. | |
429 | for (Value::use_iterator I = AI->use_begin(), E = AI->use_end(); I != E; | |
430 | ++I) { | |
431 | if (I->getType() != Int8PtrTy) continue; | |
432 | if (I->stripPointerCasts() != AI) continue; | |
433 | if (isUsedByLifetimeMarker(*I)) | |
434 | return true; | |
435 | } | |
436 | return false; | |
437 | } | |
438 | ||
439 | /// updateInlinedAtInfo - Helper function used by fixupLineNumbers to | |
440 | /// recursively update InlinedAtEntry of a DebugLoc. | |
441 | static DebugLoc updateInlinedAtInfo(const DebugLoc &DL, | |
442 | const DebugLoc &InlinedAtDL, | |
443 | LLVMContext &Ctx) { | |
444 | if (MDNode *IA = DL.getInlinedAt(Ctx)) { | |
445 | DebugLoc NewInlinedAtDL | |
446 | = updateInlinedAtInfo(DebugLoc::getFromDILocation(IA), InlinedAtDL, Ctx); | |
447 | return DebugLoc::get(DL.getLine(), DL.getCol(), DL.getScope(Ctx), | |
448 | NewInlinedAtDL.getAsMDNode(Ctx)); | |
449 | } | |
450 | ||
451 | return DebugLoc::get(DL.getLine(), DL.getCol(), DL.getScope(Ctx), | |
452 | InlinedAtDL.getAsMDNode(Ctx)); | |
453 | } | |
454 | ||
455 | /// fixupLineNumbers - Update inlined instructions' line numbers to | |
456 | /// to encode location where these instructions are inlined. | |
457 | static void fixupLineNumbers(Function *Fn, Function::iterator FI, | |
458 | Instruction *TheCall) { | |
459 | DebugLoc TheCallDL = TheCall->getDebugLoc(); | |
460 | if (TheCallDL.isUnknown()) | |
461 | return; | |
462 | ||
463 | for (; FI != Fn->end(); ++FI) { | |
464 | for (BasicBlock::iterator BI = FI->begin(), BE = FI->end(); | |
465 | BI != BE; ++BI) { | |
466 | DebugLoc DL = BI->getDebugLoc(); | |
467 | if (!DL.isUnknown()) { | |
468 | BI->setDebugLoc(updateInlinedAtInfo(DL, TheCallDL, BI->getContext())); | |
469 | if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(BI)) { | |
470 | LLVMContext &Ctx = BI->getContext(); | |
471 | MDNode *InlinedAt = BI->getDebugLoc().getInlinedAt(Ctx); | |
472 | DVI->setOperand(2, createInlinedVariable(DVI->getVariable(), | |
473 | InlinedAt, Ctx)); | |
474 | } | |
475 | } | |
476 | } | |
477 | } | |
478 | } | |
479 | ||
480 | /// InlineFunction - This function inlines the called function into the basic | |
481 | /// block of the caller. This returns false if it is not possible to inline | |
482 | /// this call. The program is still in a well defined state if this occurs | |
483 | /// though. | |
484 | /// | |
485 | /// Note that this only does one level of inlining. For example, if the | |
486 | /// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now | |
487 | /// exists in the instruction stream. Similarly this will inline a recursive | |
488 | /// function by one level. | |
489 | bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI, | |
490 | bool InsertLifetime) { | |
491 | Instruction *TheCall = CS.getInstruction(); | |
492 | assert(TheCall->getParent() && TheCall->getParent()->getParent() && | |
493 | "Instruction not in function!"); | |
494 | ||
495 | // If IFI has any state in it, zap it before we fill it in. | |
496 | IFI.reset(); | |
497 | ||
498 | const Function *CalledFunc = CS.getCalledFunction(); | |
499 | if (CalledFunc == 0 || // Can't inline external function or indirect | |
500 | CalledFunc->isDeclaration() || // call, or call to a vararg function! | |
501 | CalledFunc->getFunctionType()->isVarArg()) return false; | |
502 | ||
503 | // If the call to the callee is not a tail call, we must clear the 'tail' | |
504 | // flags on any calls that we inline. | |
505 | bool MustClearTailCallFlags = | |
506 | !(isa<CallInst>(TheCall) && cast<CallInst>(TheCall)->isTailCall()); | |
507 | ||
508 | // If the call to the callee cannot throw, set the 'nounwind' flag on any | |
509 | // calls that we inline. | |
510 | bool MarkNoUnwind = CS.doesNotThrow(); | |
511 | ||
512 | BasicBlock *OrigBB = TheCall->getParent(); | |
513 | Function *Caller = OrigBB->getParent(); | |
514 | ||
515 | // GC poses two hazards to inlining, which only occur when the callee has GC: | |
516 | // 1. If the caller has no GC, then the callee's GC must be propagated to the | |
517 | // caller. | |
518 | // 2. If the caller has a differing GC, it is invalid to inline. | |
519 | if (CalledFunc->hasGC()) { | |
520 | if (!Caller->hasGC()) | |
521 | Caller->setGC(CalledFunc->getGC()); | |
522 | else if (CalledFunc->getGC() != Caller->getGC()) | |
523 | return false; | |
524 | } | |
525 | ||
526 | // Get the personality function from the callee if it contains a landing pad. | |
527 | Value *CalleePersonality = 0; | |
528 | for (Function::const_iterator I = CalledFunc->begin(), E = CalledFunc->end(); | |
529 | I != E; ++I) | |
530 | if (const InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator())) { | |
531 | const BasicBlock *BB = II->getUnwindDest(); | |
532 | const LandingPadInst *LP = BB->getLandingPadInst(); | |
533 | CalleePersonality = LP->getPersonalityFn(); | |
534 | break; | |
535 | } | |
536 | ||
537 | // Find the personality function used by the landing pads of the caller. If it | |
538 | // exists, then check to see that it matches the personality function used in | |
539 | // the callee. | |
540 | if (CalleePersonality) { | |
541 | for (Function::const_iterator I = Caller->begin(), E = Caller->end(); | |
542 | I != E; ++I) | |
543 | if (const InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator())) { | |
544 | const BasicBlock *BB = II->getUnwindDest(); | |
545 | const LandingPadInst *LP = BB->getLandingPadInst(); | |
546 | ||
547 | // If the personality functions match, then we can perform the | |
548 | // inlining. Otherwise, we can't inline. | |
549 | // TODO: This isn't 100% true. Some personality functions are proper | |
550 | // supersets of others and can be used in place of the other. | |
551 | if (LP->getPersonalityFn() != CalleePersonality) | |
552 | return false; | |
553 | ||
554 | break; | |
555 | } | |
556 | } | |
557 | ||
558 | // Get an iterator to the last basic block in the function, which will have | |
559 | // the new function inlined after it. | |
560 | Function::iterator LastBlock = &Caller->back(); | |
561 | ||
562 | // Make sure to capture all of the return instructions from the cloned | |
563 | // function. | |
564 | SmallVector<ReturnInst*, 8> Returns; | |
565 | ClonedCodeInfo InlinedFunctionInfo; | |
566 | Function::iterator FirstNewBlock; | |
567 | ||
568 | { // Scope to destroy VMap after cloning. | |
569 | ValueToValueMapTy VMap; | |
570 | ||
571 | assert(CalledFunc->arg_size() == CS.arg_size() && | |
572 | "No varargs calls can be inlined!"); | |
573 | ||
574 | // Calculate the vector of arguments to pass into the function cloner, which | |
575 | // matches up the formal to the actual argument values. | |
576 | CallSite::arg_iterator AI = CS.arg_begin(); | |
577 | unsigned ArgNo = 0; | |
578 | for (Function::const_arg_iterator I = CalledFunc->arg_begin(), | |
579 | E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) { | |
580 | Value *ActualArg = *AI; | |
581 | ||
582 | // When byval arguments actually inlined, we need to make the copy implied | |
583 | // by them explicit. However, we don't do this if the callee is readonly | |
584 | // or readnone, because the copy would be unneeded: the callee doesn't | |
585 | // modify the struct. | |
586 | if (CS.isByValArgument(ArgNo)) { | |
587 | ActualArg = HandleByValArgument(ActualArg, TheCall, CalledFunc, IFI, | |
588 | CalledFunc->getParamAlignment(ArgNo+1)); | |
589 | ||
590 | // Calls that we inline may use the new alloca, so we need to clear | |
591 | // their 'tail' flags if HandleByValArgument introduced a new alloca and | |
592 | // the callee has calls. | |
593 | MustClearTailCallFlags |= ActualArg != *AI; | |
594 | } | |
595 | ||
596 | VMap[I] = ActualArg; | |
597 | } | |
598 | ||
599 | // We want the inliner to prune the code as it copies. We would LOVE to | |
600 | // have no dead or constant instructions leftover after inlining occurs | |
601 | // (which can happen, e.g., because an argument was constant), but we'll be | |
602 | // happy with whatever the cloner can do. | |
603 | CloneAndPruneFunctionInto(Caller, CalledFunc, VMap, | |
604 | /*ModuleLevelChanges=*/false, Returns, ".i", | |
605 | &InlinedFunctionInfo, IFI.TD, TheCall); | |
606 | ||
607 | // Remember the first block that is newly cloned over. | |
608 | FirstNewBlock = LastBlock; ++FirstNewBlock; | |
609 | ||
610 | // Update the callgraph if requested. | |
611 | if (IFI.CG) | |
612 | UpdateCallGraphAfterInlining(CS, FirstNewBlock, VMap, IFI); | |
613 | ||
614 | // Update inlined instructions' line number information. | |
615 | fixupLineNumbers(Caller, FirstNewBlock, TheCall); | |
616 | } | |
617 | ||
618 | // If there are any alloca instructions in the block that used to be the entry | |
619 | // block for the callee, move them to the entry block of the caller. First | |
620 | // calculate which instruction they should be inserted before. We insert the | |
621 | // instructions at the end of the current alloca list. | |
622 | { | |
623 | BasicBlock::iterator InsertPoint = Caller->begin()->begin(); | |
624 | for (BasicBlock::iterator I = FirstNewBlock->begin(), | |
625 | E = FirstNewBlock->end(); I != E; ) { | |
626 | AllocaInst *AI = dyn_cast<AllocaInst>(I++); | |
627 | if (AI == 0) continue; | |
628 | ||
629 | // If the alloca is now dead, remove it. This often occurs due to code | |
630 | // specialization. | |
631 | if (AI->use_empty()) { | |
632 | AI->eraseFromParent(); | |
633 | continue; | |
634 | } | |
635 | ||
636 | if (!isa<Constant>(AI->getArraySize())) | |
637 | continue; | |
638 | ||
639 | // Keep track of the static allocas that we inline into the caller. | |
640 | IFI.StaticAllocas.push_back(AI); | |
641 | ||
642 | // Scan for the block of allocas that we can move over, and move them | |
643 | // all at once. | |
644 | while (isa<AllocaInst>(I) && | |
645 | isa<Constant>(cast<AllocaInst>(I)->getArraySize())) { | |
646 | IFI.StaticAllocas.push_back(cast<AllocaInst>(I)); | |
647 | ++I; | |
648 | } | |
649 | ||
650 | // Transfer all of the allocas over in a block. Using splice means | |
651 | // that the instructions aren't removed from the symbol table, then | |
652 | // reinserted. | |
653 | Caller->getEntryBlock().getInstList().splice(InsertPoint, | |
654 | FirstNewBlock->getInstList(), | |
655 | AI, I); | |
656 | } | |
657 | } | |
658 | ||
659 | // Leave lifetime markers for the static alloca's, scoping them to the | |
660 | // function we just inlined. | |
661 | if (InsertLifetime && !IFI.StaticAllocas.empty()) { | |
662 | IRBuilder<> builder(FirstNewBlock->begin()); | |
663 | for (unsigned ai = 0, ae = IFI.StaticAllocas.size(); ai != ae; ++ai) { | |
664 | AllocaInst *AI = IFI.StaticAllocas[ai]; | |
665 | ||
666 | // If the alloca is already scoped to something smaller than the whole | |
667 | // function then there's no need to add redundant, less accurate markers. | |
668 | if (hasLifetimeMarkers(AI)) | |
669 | continue; | |
670 | ||
970d7e83 LB |
671 | // Try to determine the size of the allocation. |
672 | ConstantInt *AllocaSize = 0; | |
673 | if (ConstantInt *AIArraySize = | |
674 | dyn_cast<ConstantInt>(AI->getArraySize())) { | |
675 | if (IFI.TD) { | |
676 | Type *AllocaType = AI->getAllocatedType(); | |
677 | uint64_t AllocaTypeSize = IFI.TD->getTypeAllocSize(AllocaType); | |
678 | uint64_t AllocaArraySize = AIArraySize->getLimitedValue(); | |
679 | assert(AllocaArraySize > 0 && "array size of AllocaInst is zero"); | |
680 | // Check that array size doesn't saturate uint64_t and doesn't | |
681 | // overflow when it's multiplied by type size. | |
682 | if (AllocaArraySize != ~0ULL && | |
683 | UINT64_MAX / AllocaArraySize >= AllocaTypeSize) { | |
684 | AllocaSize = ConstantInt::get(Type::getInt64Ty(AI->getContext()), | |
685 | AllocaArraySize * AllocaTypeSize); | |
686 | } | |
687 | } | |
688 | } | |
689 | ||
690 | builder.CreateLifetimeStart(AI, AllocaSize); | |
223e47cc LB |
691 | for (unsigned ri = 0, re = Returns.size(); ri != re; ++ri) { |
692 | IRBuilder<> builder(Returns[ri]); | |
970d7e83 | 693 | builder.CreateLifetimeEnd(AI, AllocaSize); |
223e47cc LB |
694 | } |
695 | } | |
696 | } | |
697 | ||
698 | // If the inlined code contained dynamic alloca instructions, wrap the inlined | |
699 | // code with llvm.stacksave/llvm.stackrestore intrinsics. | |
700 | if (InlinedFunctionInfo.ContainsDynamicAllocas) { | |
701 | Module *M = Caller->getParent(); | |
702 | // Get the two intrinsics we care about. | |
703 | Function *StackSave = Intrinsic::getDeclaration(M, Intrinsic::stacksave); | |
704 | Function *StackRestore=Intrinsic::getDeclaration(M,Intrinsic::stackrestore); | |
705 | ||
706 | // Insert the llvm.stacksave. | |
707 | CallInst *SavedPtr = IRBuilder<>(FirstNewBlock, FirstNewBlock->begin()) | |
708 | .CreateCall(StackSave, "savedstack"); | |
709 | ||
710 | // Insert a call to llvm.stackrestore before any return instructions in the | |
711 | // inlined function. | |
712 | for (unsigned i = 0, e = Returns.size(); i != e; ++i) { | |
713 | IRBuilder<>(Returns[i]).CreateCall(StackRestore, SavedPtr); | |
714 | } | |
715 | } | |
716 | ||
717 | // If we are inlining tail call instruction through a call site that isn't | |
718 | // marked 'tail', we must remove the tail marker for any calls in the inlined | |
719 | // code. Also, calls inlined through a 'nounwind' call site should be marked | |
720 | // 'nounwind'. | |
721 | if (InlinedFunctionInfo.ContainsCalls && | |
722 | (MustClearTailCallFlags || MarkNoUnwind)) { | |
723 | for (Function::iterator BB = FirstNewBlock, E = Caller->end(); | |
724 | BB != E; ++BB) | |
725 | for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) | |
726 | if (CallInst *CI = dyn_cast<CallInst>(I)) { | |
727 | if (MustClearTailCallFlags) | |
728 | CI->setTailCall(false); | |
729 | if (MarkNoUnwind) | |
730 | CI->setDoesNotThrow(); | |
731 | } | |
732 | } | |
733 | ||
734 | // If we are inlining for an invoke instruction, we must make sure to rewrite | |
735 | // any call instructions into invoke instructions. | |
736 | if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) | |
737 | HandleInlinedInvoke(II, FirstNewBlock, InlinedFunctionInfo); | |
738 | ||
739 | // If we cloned in _exactly one_ basic block, and if that block ends in a | |
740 | // return instruction, we splice the body of the inlined callee directly into | |
741 | // the calling basic block. | |
742 | if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) { | |
743 | // Move all of the instructions right before the call. | |
744 | OrigBB->getInstList().splice(TheCall, FirstNewBlock->getInstList(), | |
745 | FirstNewBlock->begin(), FirstNewBlock->end()); | |
746 | // Remove the cloned basic block. | |
747 | Caller->getBasicBlockList().pop_back(); | |
748 | ||
749 | // If the call site was an invoke instruction, add a branch to the normal | |
750 | // destination. | |
751 | if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) | |
752 | BranchInst::Create(II->getNormalDest(), TheCall); | |
753 | ||
754 | // If the return instruction returned a value, replace uses of the call with | |
755 | // uses of the returned value. | |
756 | if (!TheCall->use_empty()) { | |
757 | ReturnInst *R = Returns[0]; | |
758 | if (TheCall == R->getReturnValue()) | |
759 | TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType())); | |
760 | else | |
761 | TheCall->replaceAllUsesWith(R->getReturnValue()); | |
762 | } | |
763 | // Since we are now done with the Call/Invoke, we can delete it. | |
764 | TheCall->eraseFromParent(); | |
765 | ||
766 | // Since we are now done with the return instruction, delete it also. | |
767 | Returns[0]->eraseFromParent(); | |
768 | ||
769 | // We are now done with the inlining. | |
770 | return true; | |
771 | } | |
772 | ||
773 | // Otherwise, we have the normal case, of more than one block to inline or | |
774 | // multiple return sites. | |
775 | ||
776 | // We want to clone the entire callee function into the hole between the | |
777 | // "starter" and "ender" blocks. How we accomplish this depends on whether | |
778 | // this is an invoke instruction or a call instruction. | |
779 | BasicBlock *AfterCallBB; | |
780 | if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) { | |
781 | ||
782 | // Add an unconditional branch to make this look like the CallInst case... | |
783 | BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), TheCall); | |
784 | ||
785 | // Split the basic block. This guarantees that no PHI nodes will have to be | |
786 | // updated due to new incoming edges, and make the invoke case more | |
787 | // symmetric to the call case. | |
788 | AfterCallBB = OrigBB->splitBasicBlock(NewBr, | |
789 | CalledFunc->getName()+".exit"); | |
790 | ||
791 | } else { // It's a call | |
792 | // If this is a call instruction, we need to split the basic block that | |
793 | // the call lives in. | |
794 | // | |
795 | AfterCallBB = OrigBB->splitBasicBlock(TheCall, | |
796 | CalledFunc->getName()+".exit"); | |
797 | } | |
798 | ||
799 | // Change the branch that used to go to AfterCallBB to branch to the first | |
800 | // basic block of the inlined function. | |
801 | // | |
802 | TerminatorInst *Br = OrigBB->getTerminator(); | |
803 | assert(Br && Br->getOpcode() == Instruction::Br && | |
804 | "splitBasicBlock broken!"); | |
805 | Br->setOperand(0, FirstNewBlock); | |
806 | ||
807 | ||
808 | // Now that the function is correct, make it a little bit nicer. In | |
809 | // particular, move the basic blocks inserted from the end of the function | |
810 | // into the space made by splitting the source basic block. | |
811 | Caller->getBasicBlockList().splice(AfterCallBB, Caller->getBasicBlockList(), | |
812 | FirstNewBlock, Caller->end()); | |
813 | ||
814 | // Handle all of the return instructions that we just cloned in, and eliminate | |
815 | // any users of the original call/invoke instruction. | |
816 | Type *RTy = CalledFunc->getReturnType(); | |
817 | ||
818 | PHINode *PHI = 0; | |
819 | if (Returns.size() > 1) { | |
820 | // The PHI node should go at the front of the new basic block to merge all | |
821 | // possible incoming values. | |
822 | if (!TheCall->use_empty()) { | |
823 | PHI = PHINode::Create(RTy, Returns.size(), TheCall->getName(), | |
824 | AfterCallBB->begin()); | |
825 | // Anything that used the result of the function call should now use the | |
826 | // PHI node as their operand. | |
827 | TheCall->replaceAllUsesWith(PHI); | |
828 | } | |
829 | ||
830 | // Loop over all of the return instructions adding entries to the PHI node | |
831 | // as appropriate. | |
832 | if (PHI) { | |
833 | for (unsigned i = 0, e = Returns.size(); i != e; ++i) { | |
834 | ReturnInst *RI = Returns[i]; | |
835 | assert(RI->getReturnValue()->getType() == PHI->getType() && | |
836 | "Ret value not consistent in function!"); | |
837 | PHI->addIncoming(RI->getReturnValue(), RI->getParent()); | |
838 | } | |
839 | } | |
840 | ||
841 | ||
842 | // Add a branch to the merge points and remove return instructions. | |
843 | for (unsigned i = 0, e = Returns.size(); i != e; ++i) { | |
844 | ReturnInst *RI = Returns[i]; | |
845 | BranchInst::Create(AfterCallBB, RI); | |
846 | RI->eraseFromParent(); | |
847 | } | |
848 | } else if (!Returns.empty()) { | |
849 | // Otherwise, if there is exactly one return value, just replace anything | |
850 | // using the return value of the call with the computed value. | |
851 | if (!TheCall->use_empty()) { | |
852 | if (TheCall == Returns[0]->getReturnValue()) | |
853 | TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType())); | |
854 | else | |
855 | TheCall->replaceAllUsesWith(Returns[0]->getReturnValue()); | |
856 | } | |
857 | ||
858 | // Update PHI nodes that use the ReturnBB to use the AfterCallBB. | |
859 | BasicBlock *ReturnBB = Returns[0]->getParent(); | |
860 | ReturnBB->replaceAllUsesWith(AfterCallBB); | |
861 | ||
862 | // Splice the code from the return block into the block that it will return | |
863 | // to, which contains the code that was after the call. | |
864 | AfterCallBB->getInstList().splice(AfterCallBB->begin(), | |
865 | ReturnBB->getInstList()); | |
866 | ||
867 | // Delete the return instruction now and empty ReturnBB now. | |
868 | Returns[0]->eraseFromParent(); | |
869 | ReturnBB->eraseFromParent(); | |
870 | } else if (!TheCall->use_empty()) { | |
871 | // No returns, but something is using the return value of the call. Just | |
872 | // nuke the result. | |
873 | TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType())); | |
874 | } | |
875 | ||
876 | // Since we are now done with the Call/Invoke, we can delete it. | |
877 | TheCall->eraseFromParent(); | |
878 | ||
879 | // We should always be able to fold the entry block of the function into the | |
880 | // single predecessor of the block... | |
881 | assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!"); | |
882 | BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0); | |
883 | ||
884 | // Splice the code entry block into calling block, right before the | |
885 | // unconditional branch. | |
886 | CalleeEntry->replaceAllUsesWith(OrigBB); // Update PHI nodes | |
887 | OrigBB->getInstList().splice(Br, CalleeEntry->getInstList()); | |
888 | ||
889 | // Remove the unconditional branch. | |
890 | OrigBB->getInstList().erase(Br); | |
891 | ||
892 | // Now we can remove the CalleeEntry block, which is now empty. | |
893 | Caller->getBasicBlockList().erase(CalleeEntry); | |
894 | ||
895 | // If we inserted a phi node, check to see if it has a single value (e.g. all | |
896 | // the entries are the same or undef). If so, remove the PHI so it doesn't | |
897 | // block other optimizations. | |
898 | if (PHI) { | |
899 | if (Value *V = SimplifyInstruction(PHI, IFI.TD)) { | |
900 | PHI->replaceAllUsesWith(V); | |
901 | PHI->eraseFromParent(); | |
902 | } | |
903 | } | |
904 | ||
905 | return true; | |
906 | } |