1 //===- InlineFunction.cpp - Code to perform function inlining -------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements inlining of a function into a call site, resolving
11 // parameters and the return value as appropriate.
12 //
13 //===----------------------------------------------------------------------===//
14
15 #include "llvm/Transforms/Utils/Cloning.h"
16 #include "llvm/ADT/SmallSet.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/SetVector.h"
19 #include "llvm/ADT/StringExtras.h"
20 #include "llvm/Analysis/AliasAnalysis.h"
21 #include "llvm/Analysis/AssumptionCache.h"
22 #include "llvm/Analysis/CallGraph.h"
23 #include "llvm/Analysis/CaptureTracking.h"
24 #include "llvm/Analysis/InstructionSimplify.h"
25 #include "llvm/Analysis/ValueTracking.h"
26 #include "llvm/IR/Attributes.h"
27 #include "llvm/IR/CallSite.h"
28 #include "llvm/IR/CFG.h"
29 #include "llvm/IR/Constants.h"
30 #include "llvm/IR/DataLayout.h"
31 #include "llvm/IR/DebugInfo.h"
32 #include "llvm/IR/DerivedTypes.h"
33 #include "llvm/IR/Dominators.h"
34 #include "llvm/IR/IRBuilder.h"
35 #include "llvm/IR/Instructions.h"
36 #include "llvm/IR/IntrinsicInst.h"
37 #include "llvm/IR/Intrinsics.h"
38 #include "llvm/IR/MDBuilder.h"
39 #include "llvm/IR/Module.h"
40 #include "llvm/Transforms/Utils/Local.h"
41 #include "llvm/Support/CommandLine.h"
42 #include <algorithm>
43 using namespace llvm;
44
45 static cl::opt<bool>
46 EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true),
47 cl::Hidden,
48 cl::desc("Convert noalias attributes to metadata during inlining."));
49
50 static cl::opt<bool>
51 PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining",
52 cl::init(true), cl::Hidden,
53 cl::desc("Convert align attributes to assumptions during inlining."));
54
InlineFunction(CallInst * CI,InlineFunctionInfo & IFI,bool InsertLifetime)55 bool llvm::InlineFunction(CallInst *CI, InlineFunctionInfo &IFI,
56 bool InsertLifetime) {
57 return InlineFunction(CallSite(CI), IFI, InsertLifetime);
58 }
InlineFunction(InvokeInst * II,InlineFunctionInfo & IFI,bool InsertLifetime)59 bool llvm::InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI,
60 bool InsertLifetime) {
61 return InlineFunction(CallSite(II), IFI, InsertLifetime);
62 }
63
64 namespace {
65 /// A class for recording information about inlining through an invoke.
66 class InvokeInliningInfo {
67 BasicBlock *OuterResumeDest; ///< Destination of the invoke's unwind.
68 BasicBlock *InnerResumeDest; ///< Destination for the callee's resume.
69 LandingPadInst *CallerLPad; ///< LandingPadInst associated with the invoke.
70 PHINode *InnerEHValuesPHI; ///< PHI for EH values from landingpad insts.
71 SmallVector<Value*, 8> UnwindDestPHIValues;
72
73 public:
InvokeInliningInfo(InvokeInst * II)74 InvokeInliningInfo(InvokeInst *II)
75 : OuterResumeDest(II->getUnwindDest()), InnerResumeDest(nullptr),
76 CallerLPad(nullptr), InnerEHValuesPHI(nullptr) {
77 // If there are PHI nodes in the unwind destination block, we need to keep
78 // track of which values came into them from the invoke before removing
79 // the edge from this block.
80 llvm::BasicBlock *InvokeBB = II->getParent();
81 BasicBlock::iterator I = OuterResumeDest->begin();
82 for (; isa<PHINode>(I); ++I) {
83 // Save the value to use for this edge.
84 PHINode *PHI = cast<PHINode>(I);
85 UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
86 }
87
88 CallerLPad = cast<LandingPadInst>(I);
89 }
90
91 /// getOuterResumeDest - The outer unwind destination is the target of
92 /// unwind edges introduced for calls within the inlined function.
getOuterResumeDest() const93 BasicBlock *getOuterResumeDest() const {
94 return OuterResumeDest;
95 }
96
97 BasicBlock *getInnerResumeDest();
98
getLandingPadInst() const99 LandingPadInst *getLandingPadInst() const { return CallerLPad; }
100
101 /// forwardResume - Forward the 'resume' instruction to the caller's landing
102 /// pad block. When the landing pad block has only one predecessor, this is
103 /// a simple branch. When there is more than one predecessor, we need to
104 /// split the landing pad block after the landingpad instruction and jump
105 /// to there.
106 void forwardResume(ResumeInst *RI,
107 SmallPtrSetImpl<LandingPadInst*> &InlinedLPads);
108
109 /// addIncomingPHIValuesFor - Add incoming-PHI values to the unwind
110 /// destination block for the given basic block, using the values for the
111 /// original invoke's source block.
addIncomingPHIValuesFor(BasicBlock * BB) const112 void addIncomingPHIValuesFor(BasicBlock *BB) const {
113 addIncomingPHIValuesForInto(BB, OuterResumeDest);
114 }
115
addIncomingPHIValuesForInto(BasicBlock * src,BasicBlock * dest) const116 void addIncomingPHIValuesForInto(BasicBlock *src, BasicBlock *dest) const {
117 BasicBlock::iterator I = dest->begin();
118 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
119 PHINode *phi = cast<PHINode>(I);
120 phi->addIncoming(UnwindDestPHIValues[i], src);
121 }
122 }
123 };
124 }
125
126 /// getInnerResumeDest - Get or create a target for the branch from ResumeInsts.
getInnerResumeDest()127 BasicBlock *InvokeInliningInfo::getInnerResumeDest() {
128 if (InnerResumeDest) return InnerResumeDest;
129
130 // Split the landing pad.
131 BasicBlock::iterator SplitPoint = CallerLPad; ++SplitPoint;
132 InnerResumeDest =
133 OuterResumeDest->splitBasicBlock(SplitPoint,
134 OuterResumeDest->getName() + ".body");
135
136 // The number of incoming edges we expect to the inner landing pad.
137 const unsigned PHICapacity = 2;
138
139 // Create corresponding new PHIs for all the PHIs in the outer landing pad.
140 BasicBlock::iterator InsertPoint = InnerResumeDest->begin();
141 BasicBlock::iterator I = OuterResumeDest->begin();
142 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
143 PHINode *OuterPHI = cast<PHINode>(I);
144 PHINode *InnerPHI = PHINode::Create(OuterPHI->getType(), PHICapacity,
145 OuterPHI->getName() + ".lpad-body",
146 InsertPoint);
147 OuterPHI->replaceAllUsesWith(InnerPHI);
148 InnerPHI->addIncoming(OuterPHI, OuterResumeDest);
149 }
150
151 // Create a PHI for the exception values.
152 InnerEHValuesPHI = PHINode::Create(CallerLPad->getType(), PHICapacity,
153 "eh.lpad-body", InsertPoint);
154 CallerLPad->replaceAllUsesWith(InnerEHValuesPHI);
155 InnerEHValuesPHI->addIncoming(CallerLPad, OuterResumeDest);
156
157 // All done.
158 return InnerResumeDest;
159 }
160
161 /// forwardResume - Forward the 'resume' instruction to the caller's landing pad
162 /// block. When the landing pad block has only one predecessor, this is a simple
163 /// branch. When there is more than one predecessor, we need to split the
164 /// landing pad block after the landingpad instruction and jump to there.
forwardResume(ResumeInst * RI,SmallPtrSetImpl<LandingPadInst * > & InlinedLPads)165 void InvokeInliningInfo::forwardResume(ResumeInst *RI,
166 SmallPtrSetImpl<LandingPadInst*> &InlinedLPads) {
167 BasicBlock *Dest = getInnerResumeDest();
168 BasicBlock *Src = RI->getParent();
169
170 BranchInst::Create(Dest, Src);
171
172 // Update the PHIs in the destination. They were inserted in an order which
173 // makes this work.
174 addIncomingPHIValuesForInto(Src, Dest);
175
176 InnerEHValuesPHI->addIncoming(RI->getOperand(0), Src);
177 RI->eraseFromParent();
178 }
179
180 /// HandleCallsInBlockInlinedThroughInvoke - When we inline a basic block into
181 /// an invoke, we have to turn all of the calls that can throw into
182 /// invokes. This function analyze BB to see if there are any calls, and if so,
183 /// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI
184 /// nodes in that block with the values specified in InvokeDestPHIValues.
HandleCallsInBlockInlinedThroughInvoke(BasicBlock * BB,InvokeInliningInfo & Invoke)185 static void HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB,
186 InvokeInliningInfo &Invoke) {
187 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
188 Instruction *I = BBI++;
189
190 // We only need to check for function calls: inlined invoke
191 // instructions require no special handling.
192 CallInst *CI = dyn_cast<CallInst>(I);
193
194 // If this call cannot unwind, don't convert it to an invoke.
195 // Inline asm calls cannot throw.
196 if (!CI || CI->doesNotThrow() || isa<InlineAsm>(CI->getCalledValue()))
197 continue;
198
199 // Convert this function call into an invoke instruction. First, split the
200 // basic block.
201 BasicBlock *Split = BB->splitBasicBlock(CI, CI->getName()+".noexc");
202
203 // Delete the unconditional branch inserted by splitBasicBlock
204 BB->getInstList().pop_back();
205
206 // Create the new invoke instruction.
207 ImmutableCallSite CS(CI);
208 SmallVector<Value*, 8> InvokeArgs(CS.arg_begin(), CS.arg_end());
209 InvokeInst *II = InvokeInst::Create(CI->getCalledValue(), Split,
210 Invoke.getOuterResumeDest(),
211 InvokeArgs, CI->getName(), BB);
212 II->setDebugLoc(CI->getDebugLoc());
213 II->setCallingConv(CI->getCallingConv());
214 II->setAttributes(CI->getAttributes());
215
216 // Make sure that anything using the call now uses the invoke! This also
217 // updates the CallGraph if present, because it uses a WeakVH.
218 CI->replaceAllUsesWith(II);
219
220 // Delete the original call
221 Split->getInstList().pop_front();
222
223 // Update any PHI nodes in the exceptional block to indicate that there is
224 // now a new entry in them.
225 Invoke.addIncomingPHIValuesFor(BB);
226 return;
227 }
228 }
229
230 /// HandleInlinedInvoke - If we inlined an invoke site, we need to convert calls
231 /// in the body of the inlined function into invokes.
232 ///
233 /// II is the invoke instruction being inlined. FirstNewBlock is the first
234 /// block of the inlined code (the last block is the end of the function),
235 /// and InlineCodeInfo is information about the code that got inlined.
HandleInlinedInvoke(InvokeInst * II,BasicBlock * FirstNewBlock,ClonedCodeInfo & InlinedCodeInfo)236 static void HandleInlinedInvoke(InvokeInst *II, BasicBlock *FirstNewBlock,
237 ClonedCodeInfo &InlinedCodeInfo) {
238 BasicBlock *InvokeDest = II->getUnwindDest();
239
240 Function *Caller = FirstNewBlock->getParent();
241
242 // The inlined code is currently at the end of the function, scan from the
243 // start of the inlined code to its end, checking for stuff we need to
244 // rewrite.
245 InvokeInliningInfo Invoke(II);
246
247 // Get all of the inlined landing pad instructions.
248 SmallPtrSet<LandingPadInst*, 16> InlinedLPads;
249 for (Function::iterator I = FirstNewBlock, E = Caller->end(); I != E; ++I)
250 if (InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator()))
251 InlinedLPads.insert(II->getLandingPadInst());
252
253 // Append the clauses from the outer landing pad instruction into the inlined
254 // landing pad instructions.
255 LandingPadInst *OuterLPad = Invoke.getLandingPadInst();
256 for (LandingPadInst *InlinedLPad : InlinedLPads) {
257 unsigned OuterNum = OuterLPad->getNumClauses();
258 InlinedLPad->reserveClauses(OuterNum);
259 for (unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx)
260 InlinedLPad->addClause(OuterLPad->getClause(OuterIdx));
261 if (OuterLPad->isCleanup())
262 InlinedLPad->setCleanup(true);
263 }
264
265 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E; ++BB){
266 if (InlinedCodeInfo.ContainsCalls)
267 HandleCallsInBlockInlinedThroughInvoke(BB, Invoke);
268
269 // Forward any resumes that are remaining here.
270 if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator()))
271 Invoke.forwardResume(RI, InlinedLPads);
272 }
273
274 // Now that everything is happy, we have one final detail. The PHI nodes in
275 // the exception destination block still have entries due to the original
276 // invoke instruction. Eliminate these entries (which might even delete the
277 // PHI node) now.
278 InvokeDest->removePredecessor(II->getParent());
279 }
280
281 /// CloneAliasScopeMetadata - When inlining a function that contains noalias
282 /// scope metadata, this metadata needs to be cloned so that the inlined blocks
283 /// have different "unqiue scopes" at every call site. Were this not done, then
284 /// aliasing scopes from a function inlined into a caller multiple times could
285 /// not be differentiated (and this would lead to miscompiles because the
286 /// non-aliasing property communicated by the metadata could have
287 /// call-site-specific control dependencies).
CloneAliasScopeMetadata(CallSite CS,ValueToValueMapTy & VMap)288 static void CloneAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap) {
289 const Function *CalledFunc = CS.getCalledFunction();
290 SetVector<const MDNode *> MD;
291
292 // Note: We could only clone the metadata if it is already used in the
293 // caller. I'm omitting that check here because it might confuse
294 // inter-procedural alias analysis passes. We can revisit this if it becomes
295 // an efficiency or overhead problem.
296
297 for (Function::const_iterator I = CalledFunc->begin(), IE = CalledFunc->end();
298 I != IE; ++I)
299 for (BasicBlock::const_iterator J = I->begin(), JE = I->end(); J != JE; ++J) {
300 if (const MDNode *M = J->getMetadata(LLVMContext::MD_alias_scope))
301 MD.insert(M);
302 if (const MDNode *M = J->getMetadata(LLVMContext::MD_noalias))
303 MD.insert(M);
304 }
305
306 if (MD.empty())
307 return;
308
309 // Walk the existing metadata, adding the complete (perhaps cyclic) chain to
310 // the set.
311 SmallVector<const Metadata *, 16> Queue(MD.begin(), MD.end());
312 while (!Queue.empty()) {
313 const MDNode *M = cast<MDNode>(Queue.pop_back_val());
314 for (unsigned i = 0, ie = M->getNumOperands(); i != ie; ++i)
315 if (const MDNode *M1 = dyn_cast<MDNode>(M->getOperand(i)))
316 if (MD.insert(M1))
317 Queue.push_back(M1);
318 }
319
320 // Now we have a complete set of all metadata in the chains used to specify
321 // the noalias scopes and the lists of those scopes.
322 SmallVector<MDNode *, 16> DummyNodes;
323 DenseMap<const MDNode *, TrackingMDNodeRef> MDMap;
324 for (SetVector<const MDNode *>::iterator I = MD.begin(), IE = MD.end();
325 I != IE; ++I) {
326 MDNode *Dummy = MDNode::getTemporary(CalledFunc->getContext(), None);
327 DummyNodes.push_back(Dummy);
328 MDMap[*I].reset(Dummy);
329 }
330
331 // Create new metadata nodes to replace the dummy nodes, replacing old
332 // metadata references with either a dummy node or an already-created new
333 // node.
334 for (SetVector<const MDNode *>::iterator I = MD.begin(), IE = MD.end();
335 I != IE; ++I) {
336 SmallVector<Metadata *, 4> NewOps;
337 for (unsigned i = 0, ie = (*I)->getNumOperands(); i != ie; ++i) {
338 const Metadata *V = (*I)->getOperand(i);
339 if (const MDNode *M = dyn_cast<MDNode>(V))
340 NewOps.push_back(MDMap[M]);
341 else
342 NewOps.push_back(const_cast<Metadata *>(V));
343 }
344
345 MDNode *NewM = MDNode::get(CalledFunc->getContext(), NewOps);
346 MDNodeFwdDecl *TempM = cast<MDNodeFwdDecl>(MDMap[*I]);
347
348 TempM->replaceAllUsesWith(NewM);
349 }
350
351 // Now replace the metadata in the new inlined instructions with the
352 // repacements from the map.
353 for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
354 VMI != VMIE; ++VMI) {
355 if (!VMI->second)
356 continue;
357
358 Instruction *NI = dyn_cast<Instruction>(VMI->second);
359 if (!NI)
360 continue;
361
362 if (MDNode *M = NI->getMetadata(LLVMContext::MD_alias_scope)) {
363 MDNode *NewMD = MDMap[M];
364 // If the call site also had alias scope metadata (a list of scopes to
365 // which instructions inside it might belong), propagate those scopes to
366 // the inlined instructions.
367 if (MDNode *CSM =
368 CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope))
369 NewMD = MDNode::concatenate(NewMD, CSM);
370 NI->setMetadata(LLVMContext::MD_alias_scope, NewMD);
371 } else if (NI->mayReadOrWriteMemory()) {
372 if (MDNode *M =
373 CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope))
374 NI->setMetadata(LLVMContext::MD_alias_scope, M);
375 }
376
377 if (MDNode *M = NI->getMetadata(LLVMContext::MD_noalias)) {
378 MDNode *NewMD = MDMap[M];
379 // If the call site also had noalias metadata (a list of scopes with
380 // which instructions inside it don't alias), propagate those scopes to
381 // the inlined instructions.
382 if (MDNode *CSM =
383 CS.getInstruction()->getMetadata(LLVMContext::MD_noalias))
384 NewMD = MDNode::concatenate(NewMD, CSM);
385 NI->setMetadata(LLVMContext::MD_noalias, NewMD);
386 } else if (NI->mayReadOrWriteMemory()) {
387 if (MDNode *M = CS.getInstruction()->getMetadata(LLVMContext::MD_noalias))
388 NI->setMetadata(LLVMContext::MD_noalias, M);
389 }
390 }
391
392 // Now that everything has been replaced, delete the dummy nodes.
393 for (unsigned i = 0, ie = DummyNodes.size(); i != ie; ++i)
394 MDNode::deleteTemporary(DummyNodes[i]);
395 }
396
397 /// AddAliasScopeMetadata - If the inlined function has noalias arguments, then
398 /// add new alias scopes for each noalias argument, tag the mapped noalias
399 /// parameters with noalias metadata specifying the new scope, and tag all
400 /// non-derived loads, stores and memory intrinsics with the new alias scopes.
AddAliasScopeMetadata(CallSite CS,ValueToValueMapTy & VMap,const DataLayout * DL,AliasAnalysis * AA)401 static void AddAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap,
402 const DataLayout *DL, AliasAnalysis *AA) {
403 if (!EnableNoAliasConversion)
404 return;
405
406 const Function *CalledFunc = CS.getCalledFunction();
407 SmallVector<const Argument *, 4> NoAliasArgs;
408
409 for (Function::const_arg_iterator I = CalledFunc->arg_begin(),
410 E = CalledFunc->arg_end(); I != E; ++I) {
411 if (I->hasNoAliasAttr() && !I->hasNUses(0))
412 NoAliasArgs.push_back(I);
413 }
414
415 if (NoAliasArgs.empty())
416 return;
417
418 // To do a good job, if a noalias variable is captured, we need to know if
419 // the capture point dominates the particular use we're considering.
420 DominatorTree DT;
421 DT.recalculate(const_cast<Function&>(*CalledFunc));
422
423 // noalias indicates that pointer values based on the argument do not alias
424 // pointer values which are not based on it. So we add a new "scope" for each
425 // noalias function argument. Accesses using pointers based on that argument
426 // become part of that alias scope, accesses using pointers not based on that
427 // argument are tagged as noalias with that scope.
428
429 DenseMap<const Argument *, MDNode *> NewScopes;
430 MDBuilder MDB(CalledFunc->getContext());
431
432 // Create a new scope domain for this function.
433 MDNode *NewDomain =
434 MDB.createAnonymousAliasScopeDomain(CalledFunc->getName());
435 for (unsigned i = 0, e = NoAliasArgs.size(); i != e; ++i) {
436 const Argument *A = NoAliasArgs[i];
437
438 std::string Name = CalledFunc->getName();
439 if (A->hasName()) {
440 Name += ": %";
441 Name += A->getName();
442 } else {
443 Name += ": argument ";
444 Name += utostr(i);
445 }
446
447 // Note: We always create a new anonymous root here. This is true regardless
448 // of the linkage of the callee because the aliasing "scope" is not just a
449 // property of the callee, but also all control dependencies in the caller.
450 MDNode *NewScope = MDB.createAnonymousAliasScope(NewDomain, Name);
451 NewScopes.insert(std::make_pair(A, NewScope));
452 }
453
454 // Iterate over all new instructions in the map; for all memory-access
455 // instructions, add the alias scope metadata.
456 for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
457 VMI != VMIE; ++VMI) {
458 if (const Instruction *I = dyn_cast<Instruction>(VMI->first)) {
459 if (!VMI->second)
460 continue;
461
462 Instruction *NI = dyn_cast<Instruction>(VMI->second);
463 if (!NI)
464 continue;
465
466 bool IsArgMemOnlyCall = false, IsFuncCall = false;
467 SmallVector<const Value *, 2> PtrArgs;
468
469 if (const LoadInst *LI = dyn_cast<LoadInst>(I))
470 PtrArgs.push_back(LI->getPointerOperand());
471 else if (const StoreInst *SI = dyn_cast<StoreInst>(I))
472 PtrArgs.push_back(SI->getPointerOperand());
473 else if (const VAArgInst *VAAI = dyn_cast<VAArgInst>(I))
474 PtrArgs.push_back(VAAI->getPointerOperand());
475 else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
476 PtrArgs.push_back(CXI->getPointerOperand());
477 else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
478 PtrArgs.push_back(RMWI->getPointerOperand());
479 else if (ImmutableCallSite ICS = ImmutableCallSite(I)) {
480 // If we know that the call does not access memory, then we'll still
481 // know that about the inlined clone of this call site, and we don't
482 // need to add metadata.
483 if (ICS.doesNotAccessMemory())
484 continue;
485
486 IsFuncCall = true;
487 if (AA) {
488 AliasAnalysis::ModRefBehavior MRB = AA->getModRefBehavior(ICS);
489 if (MRB == AliasAnalysis::OnlyAccessesArgumentPointees ||
490 MRB == AliasAnalysis::OnlyReadsArgumentPointees)
491 IsArgMemOnlyCall = true;
492 }
493
494 for (ImmutableCallSite::arg_iterator AI = ICS.arg_begin(),
495 AE = ICS.arg_end(); AI != AE; ++AI) {
496 // We need to check the underlying objects of all arguments, not just
497 // the pointer arguments, because we might be passing pointers as
498 // integers, etc.
499 // However, if we know that the call only accesses pointer arguments,
500 // then we only need to check the pointer arguments.
501 if (IsArgMemOnlyCall && !(*AI)->getType()->isPointerTy())
502 continue;
503
504 PtrArgs.push_back(*AI);
505 }
506 }
507
508 // If we found no pointers, then this instruction is not suitable for
509 // pairing with an instruction to receive aliasing metadata.
510 // However, if this is a call, this we might just alias with none of the
511 // noalias arguments.
512 if (PtrArgs.empty() && !IsFuncCall)
513 continue;
514
515 // It is possible that there is only one underlying object, but you
516 // need to go through several PHIs to see it, and thus could be
517 // repeated in the Objects list.
518 SmallPtrSet<const Value *, 4> ObjSet;
519 SmallVector<Metadata *, 4> Scopes, NoAliases;
520
521 SmallSetVector<const Argument *, 4> NAPtrArgs;
522 for (unsigned i = 0, ie = PtrArgs.size(); i != ie; ++i) {
523 SmallVector<Value *, 4> Objects;
524 GetUnderlyingObjects(const_cast<Value*>(PtrArgs[i]),
525 Objects, DL, /* MaxLookup = */ 0);
526
527 for (Value *O : Objects)
528 ObjSet.insert(O);
529 }
530
531 // Figure out if we're derived from anything that is not a noalias
532 // argument.
533 bool CanDeriveViaCapture = false, UsesAliasingPtr = false;
534 for (const Value *V : ObjSet) {
535 // Is this value a constant that cannot be derived from any pointer
536 // value (we need to exclude constant expressions, for example, that
537 // are formed from arithmetic on global symbols).
538 bool IsNonPtrConst = isa<ConstantInt>(V) || isa<ConstantFP>(V) ||
539 isa<ConstantPointerNull>(V) ||
540 isa<ConstantDataVector>(V) || isa<UndefValue>(V);
541 if (IsNonPtrConst)
542 continue;
543
544 // If this is anything other than a noalias argument, then we cannot
545 // completely describe the aliasing properties using alias.scope
546 // metadata (and, thus, won't add any).
547 if (const Argument *A = dyn_cast<Argument>(V)) {
548 if (!A->hasNoAliasAttr())
549 UsesAliasingPtr = true;
550 } else {
551 UsesAliasingPtr = true;
552 }
553
554 // If this is not some identified function-local object (which cannot
555 // directly alias a noalias argument), or some other argument (which,
556 // by definition, also cannot alias a noalias argument), then we could
557 // alias a noalias argument that has been captured).
558 if (!isa<Argument>(V) &&
559 !isIdentifiedFunctionLocal(const_cast<Value*>(V)))
560 CanDeriveViaCapture = true;
561 }
562
563 // A function call can always get captured noalias pointers (via other
564 // parameters, globals, etc.).
565 if (IsFuncCall && !IsArgMemOnlyCall)
566 CanDeriveViaCapture = true;
567
568 // First, we want to figure out all of the sets with which we definitely
569 // don't alias. Iterate over all noalias set, and add those for which:
570 // 1. The noalias argument is not in the set of objects from which we
571 // definitely derive.
572 // 2. The noalias argument has not yet been captured.
573 // An arbitrary function that might load pointers could see captured
574 // noalias arguments via other noalias arguments or globals, and so we
575 // must always check for prior capture.
576 for (const Argument *A : NoAliasArgs) {
577 if (!ObjSet.count(A) && (!CanDeriveViaCapture ||
578 // It might be tempting to skip the
579 // PointerMayBeCapturedBefore check if
580 // A->hasNoCaptureAttr() is true, but this is
581 // incorrect because nocapture only guarantees
582 // that no copies outlive the function, not
583 // that the value cannot be locally captured.
584 !PointerMayBeCapturedBefore(A,
585 /* ReturnCaptures */ false,
586 /* StoreCaptures */ false, I, &DT)))
587 NoAliases.push_back(NewScopes[A]);
588 }
589
590 if (!NoAliases.empty())
591 NI->setMetadata(LLVMContext::MD_noalias,
592 MDNode::concatenate(
593 NI->getMetadata(LLVMContext::MD_noalias),
594 MDNode::get(CalledFunc->getContext(), NoAliases)));
595
596 // Next, we want to figure out all of the sets to which we might belong.
597 // We might belong to a set if the noalias argument is in the set of
598 // underlying objects. If there is some non-noalias argument in our list
599 // of underlying objects, then we cannot add a scope because the fact
600 // that some access does not alias with any set of our noalias arguments
601 // cannot itself guarantee that it does not alias with this access
602 // (because there is some pointer of unknown origin involved and the
603 // other access might also depend on this pointer). We also cannot add
604 // scopes to arbitrary functions unless we know they don't access any
605 // non-parameter pointer-values.
606 bool CanAddScopes = !UsesAliasingPtr;
607 if (CanAddScopes && IsFuncCall)
608 CanAddScopes = IsArgMemOnlyCall;
609
610 if (CanAddScopes)
611 for (const Argument *A : NoAliasArgs) {
612 if (ObjSet.count(A))
613 Scopes.push_back(NewScopes[A]);
614 }
615
616 if (!Scopes.empty())
617 NI->setMetadata(
618 LLVMContext::MD_alias_scope,
619 MDNode::concatenate(NI->getMetadata(LLVMContext::MD_alias_scope),
620 MDNode::get(CalledFunc->getContext(), Scopes)));
621 }
622 }
623 }
624
625 /// If the inlined function has non-byval align arguments, then
626 /// add @llvm.assume-based alignment assumptions to preserve this information.
AddAlignmentAssumptions(CallSite CS,InlineFunctionInfo & IFI)627 static void AddAlignmentAssumptions(CallSite CS, InlineFunctionInfo &IFI) {
628 if (!PreserveAlignmentAssumptions || !IFI.DL)
629 return;
630
631 // To avoid inserting redundant assumptions, we should check for assumptions
632 // already in the caller. To do this, we might need a DT of the caller.
633 DominatorTree DT;
634 bool DTCalculated = false;
635
636 Function *CalledFunc = CS.getCalledFunction();
637 for (Function::arg_iterator I = CalledFunc->arg_begin(),
638 E = CalledFunc->arg_end();
639 I != E; ++I) {
640 unsigned Align = I->getType()->isPointerTy() ? I->getParamAlignment() : 0;
641 if (Align && !I->hasByValOrInAllocaAttr() && !I->hasNUses(0)) {
642 if (!DTCalculated) {
643 DT.recalculate(const_cast<Function&>(*CS.getInstruction()->getParent()
644 ->getParent()));
645 DTCalculated = true;
646 }
647
648 // If we can already prove the asserted alignment in the context of the
649 // caller, then don't bother inserting the assumption.
650 Value *Arg = CS.getArgument(I->getArgNo());
651 if (getKnownAlignment(Arg, IFI.DL,
652 &IFI.ACT->getAssumptionCache(*CalledFunc),
653 CS.getInstruction(), &DT) >= Align)
654 continue;
655
656 IRBuilder<>(CS.getInstruction()).CreateAlignmentAssumption(*IFI.DL, Arg,
657 Align);
658 }
659 }
660 }
661
662 /// UpdateCallGraphAfterInlining - Once we have cloned code over from a callee
663 /// into the caller, update the specified callgraph to reflect the changes we
664 /// made. Note that it's possible that not all code was copied over, so only
665 /// some edges of the callgraph may remain.
UpdateCallGraphAfterInlining(CallSite CS,Function::iterator FirstNewBlock,ValueToValueMapTy & VMap,InlineFunctionInfo & IFI)666 static void UpdateCallGraphAfterInlining(CallSite CS,
667 Function::iterator FirstNewBlock,
668 ValueToValueMapTy &VMap,
669 InlineFunctionInfo &IFI) {
670 CallGraph &CG = *IFI.CG;
671 const Function *Caller = CS.getInstruction()->getParent()->getParent();
672 const Function *Callee = CS.getCalledFunction();
673 CallGraphNode *CalleeNode = CG[Callee];
674 CallGraphNode *CallerNode = CG[Caller];
675
676 // Since we inlined some uninlined call sites in the callee into the caller,
677 // add edges from the caller to all of the callees of the callee.
678 CallGraphNode::iterator I = CalleeNode->begin(), E = CalleeNode->end();
679
680 // Consider the case where CalleeNode == CallerNode.
681 CallGraphNode::CalledFunctionsVector CallCache;
682 if (CalleeNode == CallerNode) {
683 CallCache.assign(I, E);
684 I = CallCache.begin();
685 E = CallCache.end();
686 }
687
688 for (; I != E; ++I) {
689 const Value *OrigCall = I->first;
690
691 ValueToValueMapTy::iterator VMI = VMap.find(OrigCall);
692 // Only copy the edge if the call was inlined!
693 if (VMI == VMap.end() || VMI->second == nullptr)
694 continue;
695
696 // If the call was inlined, but then constant folded, there is no edge to
697 // add. Check for this case.
698 Instruction *NewCall = dyn_cast<Instruction>(VMI->second);
699 if (!NewCall) continue;
700
701 // Remember that this call site got inlined for the client of
702 // InlineFunction.
703 IFI.InlinedCalls.push_back(NewCall);
704
705 // It's possible that inlining the callsite will cause it to go from an
706 // indirect to a direct call by resolving a function pointer. If this
707 // happens, set the callee of the new call site to a more precise
708 // destination. This can also happen if the call graph node of the caller
709 // was just unnecessarily imprecise.
710 if (!I->second->getFunction())
711 if (Function *F = CallSite(NewCall).getCalledFunction()) {
712 // Indirect call site resolved to direct call.
713 CallerNode->addCalledFunction(CallSite(NewCall), CG[F]);
714
715 continue;
716 }
717
718 CallerNode->addCalledFunction(CallSite(NewCall), I->second);
719 }
720
721 // Update the call graph by deleting the edge from Callee to Caller. We must
722 // do this after the loop above in case Caller and Callee are the same.
723 CallerNode->removeCallEdgeFor(CS);
724 }
725
HandleByValArgumentInit(Value * Dst,Value * Src,Module * M,BasicBlock * InsertBlock,InlineFunctionInfo & IFI)726 static void HandleByValArgumentInit(Value *Dst, Value *Src, Module *M,
727 BasicBlock *InsertBlock,
728 InlineFunctionInfo &IFI) {
729 Type *AggTy = cast<PointerType>(Src->getType())->getElementType();
730 IRBuilder<> Builder(InsertBlock->begin());
731
732 Value *Size;
733 if (IFI.DL == nullptr)
734 Size = ConstantExpr::getSizeOf(AggTy);
735 else
736 Size = Builder.getInt64(IFI.DL->getTypeStoreSize(AggTy));
737
738 // Always generate a memcpy of alignment 1 here because we don't know
739 // the alignment of the src pointer. Other optimizations can infer
740 // better alignment.
741 Builder.CreateMemCpy(Dst, Src, Size, /*Align=*/1);
742 }
743
744 /// HandleByValArgument - When inlining a call site that has a byval argument,
745 /// we have to make the implicit memcpy explicit by adding it.
HandleByValArgument(Value * Arg,Instruction * TheCall,const Function * CalledFunc,InlineFunctionInfo & IFI,unsigned ByValAlignment)746 static Value *HandleByValArgument(Value *Arg, Instruction *TheCall,
747 const Function *CalledFunc,
748 InlineFunctionInfo &IFI,
749 unsigned ByValAlignment) {
750 PointerType *ArgTy = cast<PointerType>(Arg->getType());
751 Type *AggTy = ArgTy->getElementType();
752
753 Function *Caller = TheCall->getParent()->getParent();
754
755 // If the called function is readonly, then it could not mutate the caller's
756 // copy of the byval'd memory. In this case, it is safe to elide the copy and
757 // temporary.
758 if (CalledFunc->onlyReadsMemory()) {
759 // If the byval argument has a specified alignment that is greater than the
760 // passed in pointer, then we either have to round up the input pointer or
761 // give up on this transformation.
762 if (ByValAlignment <= 1) // 0 = unspecified, 1 = no particular alignment.
763 return Arg;
764
765 // If the pointer is already known to be sufficiently aligned, or if we can
766 // round it up to a larger alignment, then we don't need a temporary.
767 if (getOrEnforceKnownAlignment(Arg, ByValAlignment, IFI.DL,
768 &IFI.ACT->getAssumptionCache(*Caller),
769 TheCall) >= ByValAlignment)
770 return Arg;
771
772 // Otherwise, we have to make a memcpy to get a safe alignment. This is bad
773 // for code quality, but rarely happens and is required for correctness.
774 }
775
776 // Create the alloca. If we have DataLayout, use nice alignment.
777 unsigned Align = 1;
778 if (IFI.DL)
779 Align = IFI.DL->getPrefTypeAlignment(AggTy);
780
781 // If the byval had an alignment specified, we *must* use at least that
782 // alignment, as it is required by the byval argument (and uses of the
783 // pointer inside the callee).
784 Align = std::max(Align, ByValAlignment);
785
786 Value *NewAlloca = new AllocaInst(AggTy, nullptr, Align, Arg->getName(),
787 &*Caller->begin()->begin());
788 IFI.StaticAllocas.push_back(cast<AllocaInst>(NewAlloca));
789
790 // Uses of the argument in the function should use our new alloca
791 // instead.
792 return NewAlloca;
793 }
794
795 // isUsedByLifetimeMarker - Check whether this Value is used by a lifetime
796 // intrinsic.
isUsedByLifetimeMarker(Value * V)797 static bool isUsedByLifetimeMarker(Value *V) {
798 for (User *U : V->users()) {
799 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) {
800 switch (II->getIntrinsicID()) {
801 default: break;
802 case Intrinsic::lifetime_start:
803 case Intrinsic::lifetime_end:
804 return true;
805 }
806 }
807 }
808 return false;
809 }
810
811 // hasLifetimeMarkers - Check whether the given alloca already has
812 // lifetime.start or lifetime.end intrinsics.
hasLifetimeMarkers(AllocaInst * AI)813 static bool hasLifetimeMarkers(AllocaInst *AI) {
814 Type *Ty = AI->getType();
815 Type *Int8PtrTy = Type::getInt8PtrTy(Ty->getContext(),
816 Ty->getPointerAddressSpace());
817 if (Ty == Int8PtrTy)
818 return isUsedByLifetimeMarker(AI);
819
820 // Do a scan to find all the casts to i8*.
821 for (User *U : AI->users()) {
822 if (U->getType() != Int8PtrTy) continue;
823 if (U->stripPointerCasts() != AI) continue;
824 if (isUsedByLifetimeMarker(U))
825 return true;
826 }
827 return false;
828 }
829
830 /// updateInlinedAtInfo - Helper function used by fixupLineNumbers to
831 /// recursively update InlinedAtEntry of a DebugLoc.
updateInlinedAtInfo(const DebugLoc & DL,const DebugLoc & InlinedAtDL,LLVMContext & Ctx)832 static DebugLoc updateInlinedAtInfo(const DebugLoc &DL,
833 const DebugLoc &InlinedAtDL,
834 LLVMContext &Ctx) {
835 if (MDNode *IA = DL.getInlinedAt(Ctx)) {
836 DebugLoc NewInlinedAtDL
837 = updateInlinedAtInfo(DebugLoc::getFromDILocation(IA), InlinedAtDL, Ctx);
838 return DebugLoc::get(DL.getLine(), DL.getCol(), DL.getScope(Ctx),
839 NewInlinedAtDL.getAsMDNode(Ctx));
840 }
841
842 return DebugLoc::get(DL.getLine(), DL.getCol(), DL.getScope(Ctx),
843 InlinedAtDL.getAsMDNode(Ctx));
844 }
845
846 /// fixupLineNumbers - Update inlined instructions' line numbers to
847 /// to encode location where these instructions are inlined.
fixupLineNumbers(Function * Fn,Function::iterator FI,Instruction * TheCall)848 static void fixupLineNumbers(Function *Fn, Function::iterator FI,
849 Instruction *TheCall) {
850 DebugLoc TheCallDL = TheCall->getDebugLoc();
851 if (TheCallDL.isUnknown())
852 return;
853
854 for (; FI != Fn->end(); ++FI) {
855 for (BasicBlock::iterator BI = FI->begin(), BE = FI->end();
856 BI != BE; ++BI) {
857 DebugLoc DL = BI->getDebugLoc();
858 if (DL.isUnknown()) {
859 // If the inlined instruction has no line number, make it look as if it
860 // originates from the call location. This is important for
861 // ((__always_inline__, __nodebug__)) functions which must use caller
862 // location for all instructions in their function body.
863
864 // Don't update static allocas, as they may get moved later.
865 if (auto *AI = dyn_cast<AllocaInst>(BI))
866 if (isa<Constant>(AI->getArraySize()))
867 continue;
868
869 BI->setDebugLoc(TheCallDL);
870 } else {
871 BI->setDebugLoc(updateInlinedAtInfo(DL, TheCallDL, BI->getContext()));
872 if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(BI)) {
873 LLVMContext &Ctx = BI->getContext();
874 MDNode *InlinedAt = BI->getDebugLoc().getInlinedAt(Ctx);
875 DVI->setOperand(2, MetadataAsValue::get(
876 Ctx, createInlinedVariable(DVI->getVariable(),
877 InlinedAt, Ctx)));
878 } else if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(BI)) {
879 LLVMContext &Ctx = BI->getContext();
880 MDNode *InlinedAt = BI->getDebugLoc().getInlinedAt(Ctx);
881 DDI->setOperand(1, MetadataAsValue::get(
882 Ctx, createInlinedVariable(DDI->getVariable(),
883 InlinedAt, Ctx)));
884 }
885 }
886 }
887 }
888 }
889
890 /// InlineFunction - This function inlines the called function into the basic
891 /// block of the caller. This returns false if it is not possible to inline
892 /// this call. The program is still in a well defined state if this occurs
893 /// though.
894 ///
895 /// Note that this only does one level of inlining. For example, if the
896 /// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
897 /// exists in the instruction stream. Similarly this will inline a recursive
898 /// function by one level.
InlineFunction(CallSite CS,InlineFunctionInfo & IFI,bool InsertLifetime)899 bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
900 bool InsertLifetime) {
901 Instruction *TheCall = CS.getInstruction();
902 assert(TheCall->getParent() && TheCall->getParent()->getParent() &&
903 "Instruction not in function!");
904
905 // If IFI has any state in it, zap it before we fill it in.
906 IFI.reset();
907
908 const Function *CalledFunc = CS.getCalledFunction();
909 if (!CalledFunc || // Can't inline external function or indirect
910 CalledFunc->isDeclaration() || // call, or call to a vararg function!
911 CalledFunc->getFunctionType()->isVarArg()) return false;
912
913 // If the call to the callee cannot throw, set the 'nounwind' flag on any
914 // calls that we inline.
915 bool MarkNoUnwind = CS.doesNotThrow();
916
917 BasicBlock *OrigBB = TheCall->getParent();
918 Function *Caller = OrigBB->getParent();
919
920 // GC poses two hazards to inlining, which only occur when the callee has GC:
921 // 1. If the caller has no GC, then the callee's GC must be propagated to the
922 // caller.
923 // 2. If the caller has a differing GC, it is invalid to inline.
924 if (CalledFunc->hasGC()) {
925 if (!Caller->hasGC())
926 Caller->setGC(CalledFunc->getGC());
927 else if (CalledFunc->getGC() != Caller->getGC())
928 return false;
929 }
930
931 // Get the personality function from the callee if it contains a landing pad.
932 Value *CalleePersonality = nullptr;
933 for (Function::const_iterator I = CalledFunc->begin(), E = CalledFunc->end();
934 I != E; ++I)
935 if (const InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator())) {
936 const BasicBlock *BB = II->getUnwindDest();
937 const LandingPadInst *LP = BB->getLandingPadInst();
938 CalleePersonality = LP->getPersonalityFn();
939 break;
940 }
941
942 // Find the personality function used by the landing pads of the caller. If it
943 // exists, then check to see that it matches the personality function used in
944 // the callee.
945 if (CalleePersonality) {
946 for (Function::const_iterator I = Caller->begin(), E = Caller->end();
947 I != E; ++I)
948 if (const InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator())) {
949 const BasicBlock *BB = II->getUnwindDest();
950 const LandingPadInst *LP = BB->getLandingPadInst();
951
952 // If the personality functions match, then we can perform the
953 // inlining. Otherwise, we can't inline.
954 // TODO: This isn't 100% true. Some personality functions are proper
955 // supersets of others and can be used in place of the other.
956 if (LP->getPersonalityFn() != CalleePersonality)
957 return false;
958
959 break;
960 }
961 }
962
963 // Get an iterator to the last basic block in the function, which will have
964 // the new function inlined after it.
965 Function::iterator LastBlock = &Caller->back();
966
967 // Make sure to capture all of the return instructions from the cloned
968 // function.
969 SmallVector<ReturnInst*, 8> Returns;
970 ClonedCodeInfo InlinedFunctionInfo;
971 Function::iterator FirstNewBlock;
972
973 { // Scope to destroy VMap after cloning.
974 ValueToValueMapTy VMap;
975 // Keep a list of pair (dst, src) to emit byval initializations.
976 SmallVector<std::pair<Value*, Value*>, 4> ByValInit;
977
978 assert(CalledFunc->arg_size() == CS.arg_size() &&
979 "No varargs calls can be inlined!");
980
981 // Calculate the vector of arguments to pass into the function cloner, which
982 // matches up the formal to the actual argument values.
983 CallSite::arg_iterator AI = CS.arg_begin();
984 unsigned ArgNo = 0;
985 for (Function::const_arg_iterator I = CalledFunc->arg_begin(),
986 E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) {
987 Value *ActualArg = *AI;
988
989 // When byval arguments actually inlined, we need to make the copy implied
990 // by them explicit. However, we don't do this if the callee is readonly
991 // or readnone, because the copy would be unneeded: the callee doesn't
992 // modify the struct.
993 if (CS.isByValArgument(ArgNo)) {
994 ActualArg = HandleByValArgument(ActualArg, TheCall, CalledFunc, IFI,
995 CalledFunc->getParamAlignment(ArgNo+1));
996 if (ActualArg != *AI)
997 ByValInit.push_back(std::make_pair(ActualArg, (Value*) *AI));
998 }
999
1000 VMap[I] = ActualArg;
1001 }
1002
1003 // Add alignment assumptions if necessary. We do this before the inlined
1004 // instructions are actually cloned into the caller so that we can easily
1005 // check what will be known at the start of the inlined code.
1006 AddAlignmentAssumptions(CS, IFI);
1007
1008 // We want the inliner to prune the code as it copies. We would LOVE to
1009 // have no dead or constant instructions leftover after inlining occurs
1010 // (which can happen, e.g., because an argument was constant), but we'll be
1011 // happy with whatever the cloner can do.
1012 CloneAndPruneFunctionInto(Caller, CalledFunc, VMap,
1013 /*ModuleLevelChanges=*/false, Returns, ".i",
1014 &InlinedFunctionInfo, IFI.DL, TheCall);
1015
1016 // Remember the first block that is newly cloned over.
1017 FirstNewBlock = LastBlock; ++FirstNewBlock;
1018
1019 // Inject byval arguments initialization.
1020 for (std::pair<Value*, Value*> &Init : ByValInit)
1021 HandleByValArgumentInit(Init.first, Init.second, Caller->getParent(),
1022 FirstNewBlock, IFI);
1023
1024 // Update the callgraph if requested.
1025 if (IFI.CG)
1026 UpdateCallGraphAfterInlining(CS, FirstNewBlock, VMap, IFI);
1027
1028 // Update inlined instructions' line number information.
1029 fixupLineNumbers(Caller, FirstNewBlock, TheCall);
1030
1031 // Clone existing noalias metadata if necessary.
1032 CloneAliasScopeMetadata(CS, VMap);
1033
1034 // Add noalias metadata if necessary.
1035 AddAliasScopeMetadata(CS, VMap, IFI.DL, IFI.AA);
1036
1037 // FIXME: We could register any cloned assumptions instead of clearing the
1038 // whole function's cache.
1039 if (IFI.ACT)
1040 IFI.ACT->getAssumptionCache(*Caller).clear();
1041 }
1042
1043 // If there are any alloca instructions in the block that used to be the entry
1044 // block for the callee, move them to the entry block of the caller. First
1045 // calculate which instruction they should be inserted before. We insert the
1046 // instructions at the end of the current alloca list.
1047 {
1048 BasicBlock::iterator InsertPoint = Caller->begin()->begin();
1049 for (BasicBlock::iterator I = FirstNewBlock->begin(),
1050 E = FirstNewBlock->end(); I != E; ) {
1051 AllocaInst *AI = dyn_cast<AllocaInst>(I++);
1052 if (!AI) continue;
1053
1054 // If the alloca is now dead, remove it. This often occurs due to code
1055 // specialization.
1056 if (AI->use_empty()) {
1057 AI->eraseFromParent();
1058 continue;
1059 }
1060
1061 if (!isa<Constant>(AI->getArraySize()))
1062 continue;
1063
1064 // Keep track of the static allocas that we inline into the caller.
1065 IFI.StaticAllocas.push_back(AI);
1066
1067 // Scan for the block of allocas that we can move over, and move them
1068 // all at once.
1069 while (isa<AllocaInst>(I) &&
1070 isa<Constant>(cast<AllocaInst>(I)->getArraySize())) {
1071 IFI.StaticAllocas.push_back(cast<AllocaInst>(I));
1072 ++I;
1073 }
1074
1075 // Transfer all of the allocas over in a block. Using splice means
1076 // that the instructions aren't removed from the symbol table, then
1077 // reinserted.
1078 Caller->getEntryBlock().getInstList().splice(InsertPoint,
1079 FirstNewBlock->getInstList(),
1080 AI, I);
1081 }
1082 }
1083
1084 bool InlinedMustTailCalls = false;
1085 if (InlinedFunctionInfo.ContainsCalls) {
1086 CallInst::TailCallKind CallSiteTailKind = CallInst::TCK_None;
1087 if (CallInst *CI = dyn_cast<CallInst>(TheCall))
1088 CallSiteTailKind = CI->getTailCallKind();
1089
1090 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E;
1091 ++BB) {
1092 for (Instruction &I : *BB) {
1093 CallInst *CI = dyn_cast<CallInst>(&I);
1094 if (!CI)
1095 continue;
1096
1097 // We need to reduce the strength of any inlined tail calls. For
1098 // musttail, we have to avoid introducing potential unbounded stack
1099 // growth. For example, if functions 'f' and 'g' are mutually recursive
1100 // with musttail, we can inline 'g' into 'f' so long as we preserve
1101 // musttail on the cloned call to 'f'. If either the inlined call site
1102 // or the cloned call site is *not* musttail, the program already has
1103 // one frame of stack growth, so it's safe to remove musttail. Here is
1104 // a table of example transformations:
1105 //
1106 // f -> musttail g -> musttail f ==> f -> musttail f
1107 // f -> musttail g -> tail f ==> f -> tail f
1108 // f -> g -> musttail f ==> f -> f
1109 // f -> g -> tail f ==> f -> f
1110 CallInst::TailCallKind ChildTCK = CI->getTailCallKind();
1111 ChildTCK = std::min(CallSiteTailKind, ChildTCK);
1112 CI->setTailCallKind(ChildTCK);
1113 InlinedMustTailCalls |= CI->isMustTailCall();
1114
1115 // Calls inlined through a 'nounwind' call site should be marked
1116 // 'nounwind'.
1117 if (MarkNoUnwind)
1118 CI->setDoesNotThrow();
1119 }
1120 }
1121 }
1122
1123 // Leave lifetime markers for the static alloca's, scoping them to the
1124 // function we just inlined.
1125 if (InsertLifetime && !IFI.StaticAllocas.empty()) {
1126 IRBuilder<> builder(FirstNewBlock->begin());
1127 for (unsigned ai = 0, ae = IFI.StaticAllocas.size(); ai != ae; ++ai) {
1128 AllocaInst *AI = IFI.StaticAllocas[ai];
1129
1130 // If the alloca is already scoped to something smaller than the whole
1131 // function then there's no need to add redundant, less accurate markers.
1132 if (hasLifetimeMarkers(AI))
1133 continue;
1134
1135 // Try to determine the size of the allocation.
1136 ConstantInt *AllocaSize = nullptr;
1137 if (ConstantInt *AIArraySize =
1138 dyn_cast<ConstantInt>(AI->getArraySize())) {
1139 if (IFI.DL) {
1140 Type *AllocaType = AI->getAllocatedType();
1141 uint64_t AllocaTypeSize = IFI.DL->getTypeAllocSize(AllocaType);
1142 uint64_t AllocaArraySize = AIArraySize->getLimitedValue();
1143 assert(AllocaArraySize > 0 && "array size of AllocaInst is zero");
1144 // Check that array size doesn't saturate uint64_t and doesn't
1145 // overflow when it's multiplied by type size.
1146 if (AllocaArraySize != ~0ULL &&
1147 UINT64_MAX / AllocaArraySize >= AllocaTypeSize) {
1148 AllocaSize = ConstantInt::get(Type::getInt64Ty(AI->getContext()),
1149 AllocaArraySize * AllocaTypeSize);
1150 }
1151 }
1152 }
1153
1154 builder.CreateLifetimeStart(AI, AllocaSize);
1155 for (ReturnInst *RI : Returns) {
1156 // Don't insert llvm.lifetime.end calls between a musttail call and a
1157 // return. The return kills all local allocas.
1158 if (InlinedMustTailCalls &&
1159 RI->getParent()->getTerminatingMustTailCall())
1160 continue;
1161 IRBuilder<>(RI).CreateLifetimeEnd(AI, AllocaSize);
1162 }
1163 }
1164 }
1165
1166 // If the inlined code contained dynamic alloca instructions, wrap the inlined
1167 // code with llvm.stacksave/llvm.stackrestore intrinsics.
1168 if (InlinedFunctionInfo.ContainsDynamicAllocas) {
1169 Module *M = Caller->getParent();
1170 // Get the two intrinsics we care about.
1171 Function *StackSave = Intrinsic::getDeclaration(M, Intrinsic::stacksave);
1172 Function *StackRestore=Intrinsic::getDeclaration(M,Intrinsic::stackrestore);
1173
1174 // Insert the llvm.stacksave.
1175 CallInst *SavedPtr = IRBuilder<>(FirstNewBlock, FirstNewBlock->begin())
1176 .CreateCall(StackSave, "savedstack");
1177
1178 // Insert a call to llvm.stackrestore before any return instructions in the
1179 // inlined function.
1180 for (ReturnInst *RI : Returns) {
1181 // Don't insert llvm.stackrestore calls between a musttail call and a
1182 // return. The return will restore the stack pointer.
1183 if (InlinedMustTailCalls && RI->getParent()->getTerminatingMustTailCall())
1184 continue;
1185 IRBuilder<>(RI).CreateCall(StackRestore, SavedPtr);
1186 }
1187 }
1188
1189 // If we are inlining for an invoke instruction, we must make sure to rewrite
1190 // any call instructions into invoke instructions.
1191 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall))
1192 HandleInlinedInvoke(II, FirstNewBlock, InlinedFunctionInfo);
1193
1194 // Handle any inlined musttail call sites. In order for a new call site to be
1195 // musttail, the source of the clone and the inlined call site must have been
1196 // musttail. Therefore it's safe to return without merging control into the
1197 // phi below.
1198 if (InlinedMustTailCalls) {
1199 // Check if we need to bitcast the result of any musttail calls.
1200 Type *NewRetTy = Caller->getReturnType();
1201 bool NeedBitCast = !TheCall->use_empty() && TheCall->getType() != NewRetTy;
1202
1203 // Handle the returns preceded by musttail calls separately.
1204 SmallVector<ReturnInst *, 8> NormalReturns;
1205 for (ReturnInst *RI : Returns) {
1206 CallInst *ReturnedMustTail =
1207 RI->getParent()->getTerminatingMustTailCall();
1208 if (!ReturnedMustTail) {
1209 NormalReturns.push_back(RI);
1210 continue;
1211 }
1212 if (!NeedBitCast)
1213 continue;
1214
1215 // Delete the old return and any preceding bitcast.
1216 BasicBlock *CurBB = RI->getParent();
1217 auto *OldCast = dyn_cast_or_null<BitCastInst>(RI->getReturnValue());
1218 RI->eraseFromParent();
1219 if (OldCast)
1220 OldCast->eraseFromParent();
1221
1222 // Insert a new bitcast and return with the right type.
1223 IRBuilder<> Builder(CurBB);
1224 Builder.CreateRet(Builder.CreateBitCast(ReturnedMustTail, NewRetTy));
1225 }
1226
1227 // Leave behind the normal returns so we can merge control flow.
1228 std::swap(Returns, NormalReturns);
1229 }
1230
1231 // If we cloned in _exactly one_ basic block, and if that block ends in a
1232 // return instruction, we splice the body of the inlined callee directly into
1233 // the calling basic block.
1234 if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) {
1235 // Move all of the instructions right before the call.
1236 OrigBB->getInstList().splice(TheCall, FirstNewBlock->getInstList(),
1237 FirstNewBlock->begin(), FirstNewBlock->end());
1238 // Remove the cloned basic block.
1239 Caller->getBasicBlockList().pop_back();
1240
1241 // If the call site was an invoke instruction, add a branch to the normal
1242 // destination.
1243 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
1244 BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), TheCall);
1245 NewBr->setDebugLoc(Returns[0]->getDebugLoc());
1246 }
1247
1248 // If the return instruction returned a value, replace uses of the call with
1249 // uses of the returned value.
1250 if (!TheCall->use_empty()) {
1251 ReturnInst *R = Returns[0];
1252 if (TheCall == R->getReturnValue())
1253 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
1254 else
1255 TheCall->replaceAllUsesWith(R->getReturnValue());
1256 }
1257 // Since we are now done with the Call/Invoke, we can delete it.
1258 TheCall->eraseFromParent();
1259
1260 // Since we are now done with the return instruction, delete it also.
1261 Returns[0]->eraseFromParent();
1262
1263 // We are now done with the inlining.
1264 return true;
1265 }
1266
1267 // Otherwise, we have the normal case, of more than one block to inline or
1268 // multiple return sites.
1269
1270 // We want to clone the entire callee function into the hole between the
1271 // "starter" and "ender" blocks. How we accomplish this depends on whether
1272 // this is an invoke instruction or a call instruction.
1273 BasicBlock *AfterCallBB;
1274 BranchInst *CreatedBranchToNormalDest = nullptr;
1275 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
1276
1277 // Add an unconditional branch to make this look like the CallInst case...
1278 CreatedBranchToNormalDest = BranchInst::Create(II->getNormalDest(), TheCall);
1279
1280 // Split the basic block. This guarantees that no PHI nodes will have to be
1281 // updated due to new incoming edges, and make the invoke case more
1282 // symmetric to the call case.
1283 AfterCallBB = OrigBB->splitBasicBlock(CreatedBranchToNormalDest,
1284 CalledFunc->getName()+".exit");
1285
1286 } else { // It's a call
1287 // If this is a call instruction, we need to split the basic block that
1288 // the call lives in.
1289 //
1290 AfterCallBB = OrigBB->splitBasicBlock(TheCall,
1291 CalledFunc->getName()+".exit");
1292 }
1293
1294 // Change the branch that used to go to AfterCallBB to branch to the first
1295 // basic block of the inlined function.
1296 //
1297 TerminatorInst *Br = OrigBB->getTerminator();
1298 assert(Br && Br->getOpcode() == Instruction::Br &&
1299 "splitBasicBlock broken!");
1300 Br->setOperand(0, FirstNewBlock);
1301
1302
1303 // Now that the function is correct, make it a little bit nicer. In
1304 // particular, move the basic blocks inserted from the end of the function
1305 // into the space made by splitting the source basic block.
1306 Caller->getBasicBlockList().splice(AfterCallBB, Caller->getBasicBlockList(),
1307 FirstNewBlock, Caller->end());
1308
1309 // Handle all of the return instructions that we just cloned in, and eliminate
1310 // any users of the original call/invoke instruction.
1311 Type *RTy = CalledFunc->getReturnType();
1312
1313 PHINode *PHI = nullptr;
1314 if (Returns.size() > 1) {
1315 // The PHI node should go at the front of the new basic block to merge all
1316 // possible incoming values.
1317 if (!TheCall->use_empty()) {
1318 PHI = PHINode::Create(RTy, Returns.size(), TheCall->getName(),
1319 AfterCallBB->begin());
1320 // Anything that used the result of the function call should now use the
1321 // PHI node as their operand.
1322 TheCall->replaceAllUsesWith(PHI);
1323 }
1324
1325 // Loop over all of the return instructions adding entries to the PHI node
1326 // as appropriate.
1327 if (PHI) {
1328 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
1329 ReturnInst *RI = Returns[i];
1330 assert(RI->getReturnValue()->getType() == PHI->getType() &&
1331 "Ret value not consistent in function!");
1332 PHI->addIncoming(RI->getReturnValue(), RI->getParent());
1333 }
1334 }
1335
1336
1337 // Add a branch to the merge points and remove return instructions.
1338 DebugLoc Loc;
1339 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
1340 ReturnInst *RI = Returns[i];
1341 BranchInst* BI = BranchInst::Create(AfterCallBB, RI);
1342 Loc = RI->getDebugLoc();
1343 BI->setDebugLoc(Loc);
1344 RI->eraseFromParent();
1345 }
1346 // We need to set the debug location to *somewhere* inside the
1347 // inlined function. The line number may be nonsensical, but the
1348 // instruction will at least be associated with the right
1349 // function.
1350 if (CreatedBranchToNormalDest)
1351 CreatedBranchToNormalDest->setDebugLoc(Loc);
1352 } else if (!Returns.empty()) {
1353 // Otherwise, if there is exactly one return value, just replace anything
1354 // using the return value of the call with the computed value.
1355 if (!TheCall->use_empty()) {
1356 if (TheCall == Returns[0]->getReturnValue())
1357 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
1358 else
1359 TheCall->replaceAllUsesWith(Returns[0]->getReturnValue());
1360 }
1361
1362 // Update PHI nodes that use the ReturnBB to use the AfterCallBB.
1363 BasicBlock *ReturnBB = Returns[0]->getParent();
1364 ReturnBB->replaceAllUsesWith(AfterCallBB);
1365
1366 // Splice the code from the return block into the block that it will return
1367 // to, which contains the code that was after the call.
1368 AfterCallBB->getInstList().splice(AfterCallBB->begin(),
1369 ReturnBB->getInstList());
1370
1371 if (CreatedBranchToNormalDest)
1372 CreatedBranchToNormalDest->setDebugLoc(Returns[0]->getDebugLoc());
1373
1374 // Delete the return instruction now and empty ReturnBB now.
1375 Returns[0]->eraseFromParent();
1376 ReturnBB->eraseFromParent();
1377 } else if (!TheCall->use_empty()) {
1378 // No returns, but something is using the return value of the call. Just
1379 // nuke the result.
1380 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
1381 }
1382
1383 // Since we are now done with the Call/Invoke, we can delete it.
1384 TheCall->eraseFromParent();
1385
1386 // If we inlined any musttail calls and the original return is now
1387 // unreachable, delete it. It can only contain a bitcast and ret.
1388 if (InlinedMustTailCalls && pred_begin(AfterCallBB) == pred_end(AfterCallBB))
1389 AfterCallBB->eraseFromParent();
1390
1391 // We should always be able to fold the entry block of the function into the
1392 // single predecessor of the block...
1393 assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!");
1394 BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0);
1395
1396 // Splice the code entry block into calling block, right before the
1397 // unconditional branch.
1398 CalleeEntry->replaceAllUsesWith(OrigBB); // Update PHI nodes
1399 OrigBB->getInstList().splice(Br, CalleeEntry->getInstList());
1400
1401 // Remove the unconditional branch.
1402 OrigBB->getInstList().erase(Br);
1403
1404 // Now we can remove the CalleeEntry block, which is now empty.
1405 Caller->getBasicBlockList().erase(CalleeEntry);
1406
1407 // If we inserted a phi node, check to see if it has a single value (e.g. all
1408 // the entries are the same or undef). If so, remove the PHI so it doesn't
1409 // block other optimizations.
1410 if (PHI) {
1411 if (Value *V = SimplifyInstruction(PHI, IFI.DL, nullptr, nullptr,
1412 &IFI.ACT->getAssumptionCache(*Caller))) {
1413 PHI->replaceAllUsesWith(V);
1414 PHI->eraseFromParent();
1415 }
1416 }
1417
1418 return true;
1419 }
1420