1 //===- InlineFunction.cpp - Code to perform function inlining -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements inlining of a function into a call site, resolving
10 // parameters and the return value as appropriate.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "llvm/ADT/DenseMap.h"
15 #include "llvm/ADT/None.h"
16 #include "llvm/ADT/Optional.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/ADT/SetVector.h"
19 #include "llvm/ADT/SmallPtrSet.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/StringExtras.h"
22 #include "llvm/ADT/iterator_range.h"
23 #include "llvm/Analysis/AliasAnalysis.h"
24 #include "llvm/Analysis/AssumptionCache.h"
25 #include "llvm/Analysis/BlockFrequencyInfo.h"
26 #include "llvm/Analysis/CallGraph.h"
27 #include "llvm/Analysis/CaptureTracking.h"
28 #include "llvm/Analysis/EHPersonalities.h"
29 #include "llvm/Analysis/InstructionSimplify.h"
30 #include "llvm/Analysis/ProfileSummaryInfo.h"
31 #include "llvm/Transforms/Utils/Local.h"
32 #include "llvm/Analysis/ValueTracking.h"
33 #include "llvm/Analysis/VectorUtils.h"
34 #include "llvm/IR/Argument.h"
35 #include "llvm/IR/BasicBlock.h"
36 #include "llvm/IR/CFG.h"
37 #include "llvm/IR/Constant.h"
38 #include "llvm/IR/Constants.h"
39 #include "llvm/IR/DIBuilder.h"
40 #include "llvm/IR/DataLayout.h"
41 #include "llvm/IR/DebugInfoMetadata.h"
42 #include "llvm/IR/DebugLoc.h"
43 #include "llvm/IR/DerivedTypes.h"
44 #include "llvm/IR/Dominators.h"
45 #include "llvm/IR/Function.h"
46 #include "llvm/IR/IRBuilder.h"
47 #include "llvm/IR/InstrTypes.h"
48 #include "llvm/IR/Instruction.h"
49 #include "llvm/IR/Instructions.h"
50 #include "llvm/IR/IntrinsicInst.h"
51 #include "llvm/IR/Intrinsics.h"
52 #include "llvm/IR/LLVMContext.h"
53 #include "llvm/IR/MDBuilder.h"
54 #include "llvm/IR/Metadata.h"
55 #include "llvm/IR/Module.h"
56 #include "llvm/IR/Type.h"
57 #include "llvm/IR/User.h"
58 #include "llvm/IR/Value.h"
59 #include "llvm/Support/Casting.h"
60 #include "llvm/Support/CommandLine.h"
61 #include "llvm/Support/ErrorHandling.h"
62 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
63 #include "llvm/Transforms/Utils/Cloning.h"
64 #include "llvm/Transforms/Utils/ValueMapper.h"
65 #include <algorithm>
66 #include <cassert>
67 #include <cstdint>
68 #include <iterator>
69 #include <limits>
70 #include <string>
71 #include <utility>
72 #include <vector>
73
74 using namespace llvm;
75 using ProfileCount = Function::ProfileCount;
76
77 static cl::opt<bool>
78 EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true),
79 cl::Hidden,
80 cl::desc("Convert noalias attributes to metadata during inlining."));
81
82 // Disabled by default, because the added alignment assumptions may increase
83 // compile-time and block optimizations. This option is not suitable for use
84 // with frontends that emit comprehensive parameter alignment annotations.
85 static cl::opt<bool>
86 PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining",
87 cl::init(false), cl::Hidden,
88 cl::desc("Convert align attributes to assumptions during inlining."));
89
90 static cl::opt<bool> UpdateReturnAttributes(
91 "update-return-attrs", cl::init(true), cl::Hidden,
92 cl::desc("Update return attributes on calls within inlined body"));
93
94 static cl::opt<unsigned> InlinerAttributeWindow(
95 "max-inst-checked-for-throw-during-inlining", cl::Hidden,
96 cl::desc("the maximum number of instructions analyzed for may throw during "
97 "attribute inference in inlined body"),
98 cl::init(4));
99
100 namespace {
101
102 /// A class for recording information about inlining a landing pad.
103 class LandingPadInliningInfo {
104 /// Destination of the invoke's unwind.
105 BasicBlock *OuterResumeDest;
106
107 /// Destination for the callee's resume.
108 BasicBlock *InnerResumeDest = nullptr;
109
110 /// LandingPadInst associated with the invoke.
111 LandingPadInst *CallerLPad = nullptr;
112
113 /// PHI for EH values from landingpad insts.
114 PHINode *InnerEHValuesPHI = nullptr;
115
116 SmallVector<Value*, 8> UnwindDestPHIValues;
117
118 public:
LandingPadInliningInfo(InvokeInst * II)119 LandingPadInliningInfo(InvokeInst *II)
120 : OuterResumeDest(II->getUnwindDest()) {
121 // If there are PHI nodes in the unwind destination block, we need to keep
122 // track of which values came into them from the invoke before removing
123 // the edge from this block.
124 BasicBlock *InvokeBB = II->getParent();
125 BasicBlock::iterator I = OuterResumeDest->begin();
126 for (; isa<PHINode>(I); ++I) {
127 // Save the value to use for this edge.
128 PHINode *PHI = cast<PHINode>(I);
129 UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
130 }
131
132 CallerLPad = cast<LandingPadInst>(I);
133 }
134
135 /// The outer unwind destination is the target of
136 /// unwind edges introduced for calls within the inlined function.
getOuterResumeDest() const137 BasicBlock *getOuterResumeDest() const {
138 return OuterResumeDest;
139 }
140
141 BasicBlock *getInnerResumeDest();
142
getLandingPadInst() const143 LandingPadInst *getLandingPadInst() const { return CallerLPad; }
144
145 /// Forward the 'resume' instruction to the caller's landing pad block.
146 /// When the landing pad block has only one predecessor, this is
147 /// a simple branch. When there is more than one predecessor, we need to
148 /// split the landing pad block after the landingpad instruction and jump
149 /// to there.
150 void forwardResume(ResumeInst *RI,
151 SmallPtrSetImpl<LandingPadInst*> &InlinedLPads);
152
153 /// Add incoming-PHI values to the unwind destination block for the given
154 /// basic block, using the values for the original invoke's source block.
addIncomingPHIValuesFor(BasicBlock * BB) const155 void addIncomingPHIValuesFor(BasicBlock *BB) const {
156 addIncomingPHIValuesForInto(BB, OuterResumeDest);
157 }
158
addIncomingPHIValuesForInto(BasicBlock * src,BasicBlock * dest) const159 void addIncomingPHIValuesForInto(BasicBlock *src, BasicBlock *dest) const {
160 BasicBlock::iterator I = dest->begin();
161 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
162 PHINode *phi = cast<PHINode>(I);
163 phi->addIncoming(UnwindDestPHIValues[i], src);
164 }
165 }
166 };
167
168 } // end anonymous namespace
169
170 /// Get or create a target for the branch from ResumeInsts.
getInnerResumeDest()171 BasicBlock *LandingPadInliningInfo::getInnerResumeDest() {
172 if (InnerResumeDest) return InnerResumeDest;
173
174 // Split the landing pad.
175 BasicBlock::iterator SplitPoint = ++CallerLPad->getIterator();
176 InnerResumeDest =
177 OuterResumeDest->splitBasicBlock(SplitPoint,
178 OuterResumeDest->getName() + ".body");
179
180 // The number of incoming edges we expect to the inner landing pad.
181 const unsigned PHICapacity = 2;
182
183 // Create corresponding new PHIs for all the PHIs in the outer landing pad.
184 Instruction *InsertPoint = &InnerResumeDest->front();
185 BasicBlock::iterator I = OuterResumeDest->begin();
186 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
187 PHINode *OuterPHI = cast<PHINode>(I);
188 PHINode *InnerPHI = PHINode::Create(OuterPHI->getType(), PHICapacity,
189 OuterPHI->getName() + ".lpad-body",
190 InsertPoint);
191 OuterPHI->replaceAllUsesWith(InnerPHI);
192 InnerPHI->addIncoming(OuterPHI, OuterResumeDest);
193 }
194
195 // Create a PHI for the exception values.
196 InnerEHValuesPHI = PHINode::Create(CallerLPad->getType(), PHICapacity,
197 "eh.lpad-body", InsertPoint);
198 CallerLPad->replaceAllUsesWith(InnerEHValuesPHI);
199 InnerEHValuesPHI->addIncoming(CallerLPad, OuterResumeDest);
200
201 // All done.
202 return InnerResumeDest;
203 }
204
205 /// Forward the 'resume' instruction to the caller's landing pad block.
206 /// When the landing pad block has only one predecessor, this is a simple
207 /// branch. When there is more than one predecessor, we need to split the
208 /// landing pad block after the landingpad instruction and jump to there.
forwardResume(ResumeInst * RI,SmallPtrSetImpl<LandingPadInst * > & InlinedLPads)209 void LandingPadInliningInfo::forwardResume(
210 ResumeInst *RI, SmallPtrSetImpl<LandingPadInst *> &InlinedLPads) {
211 BasicBlock *Dest = getInnerResumeDest();
212 BasicBlock *Src = RI->getParent();
213
214 BranchInst::Create(Dest, Src);
215
216 // Update the PHIs in the destination. They were inserted in an order which
217 // makes this work.
218 addIncomingPHIValuesForInto(Src, Dest);
219
220 InnerEHValuesPHI->addIncoming(RI->getOperand(0), Src);
221 RI->eraseFromParent();
222 }
223
224 /// Helper for getUnwindDestToken/getUnwindDestTokenHelper.
getParentPad(Value * EHPad)225 static Value *getParentPad(Value *EHPad) {
226 if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
227 return FPI->getParentPad();
228 return cast<CatchSwitchInst>(EHPad)->getParentPad();
229 }
230
231 using UnwindDestMemoTy = DenseMap<Instruction *, Value *>;
232
233 /// Helper for getUnwindDestToken that does the descendant-ward part of
234 /// the search.
getUnwindDestTokenHelper(Instruction * EHPad,UnwindDestMemoTy & MemoMap)235 static Value *getUnwindDestTokenHelper(Instruction *EHPad,
236 UnwindDestMemoTy &MemoMap) {
237 SmallVector<Instruction *, 8> Worklist(1, EHPad);
238
239 while (!Worklist.empty()) {
240 Instruction *CurrentPad = Worklist.pop_back_val();
241 // We only put pads on the worklist that aren't in the MemoMap. When
242 // we find an unwind dest for a pad we may update its ancestors, but
243 // the queue only ever contains uncles/great-uncles/etc. of CurrentPad,
244 // so they should never get updated while queued on the worklist.
245 assert(!MemoMap.count(CurrentPad));
246 Value *UnwindDestToken = nullptr;
247 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(CurrentPad)) {
248 if (CatchSwitch->hasUnwindDest()) {
249 UnwindDestToken = CatchSwitch->getUnwindDest()->getFirstNonPHI();
250 } else {
251 // Catchswitch doesn't have a 'nounwind' variant, and one might be
252 // annotated as "unwinds to caller" when really it's nounwind (see
253 // e.g. SimplifyCFGOpt::SimplifyUnreachable), so we can't infer the
254 // parent's unwind dest from this. We can check its catchpads'
255 // descendants, since they might include a cleanuppad with an
256 // "unwinds to caller" cleanupret, which can be trusted.
257 for (auto HI = CatchSwitch->handler_begin(),
258 HE = CatchSwitch->handler_end();
259 HI != HE && !UnwindDestToken; ++HI) {
260 BasicBlock *HandlerBlock = *HI;
261 auto *CatchPad = cast<CatchPadInst>(HandlerBlock->getFirstNonPHI());
262 for (User *Child : CatchPad->users()) {
263 // Intentionally ignore invokes here -- since the catchswitch is
264 // marked "unwind to caller", it would be a verifier error if it
265 // contained an invoke which unwinds out of it, so any invoke we'd
266 // encounter must unwind to some child of the catch.
267 if (!isa<CleanupPadInst>(Child) && !isa<CatchSwitchInst>(Child))
268 continue;
269
270 Instruction *ChildPad = cast<Instruction>(Child);
271 auto Memo = MemoMap.find(ChildPad);
272 if (Memo == MemoMap.end()) {
273 // Haven't figured out this child pad yet; queue it.
274 Worklist.push_back(ChildPad);
275 continue;
276 }
277 // We've already checked this child, but might have found that
278 // it offers no proof either way.
279 Value *ChildUnwindDestToken = Memo->second;
280 if (!ChildUnwindDestToken)
281 continue;
282 // We already know the child's unwind dest, which can either
283 // be ConstantTokenNone to indicate unwind to caller, or can
284 // be another child of the catchpad. Only the former indicates
285 // the unwind dest of the catchswitch.
286 if (isa<ConstantTokenNone>(ChildUnwindDestToken)) {
287 UnwindDestToken = ChildUnwindDestToken;
288 break;
289 }
290 assert(getParentPad(ChildUnwindDestToken) == CatchPad);
291 }
292 }
293 }
294 } else {
295 auto *CleanupPad = cast<CleanupPadInst>(CurrentPad);
296 for (User *U : CleanupPad->users()) {
297 if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(U)) {
298 if (BasicBlock *RetUnwindDest = CleanupRet->getUnwindDest())
299 UnwindDestToken = RetUnwindDest->getFirstNonPHI();
300 else
301 UnwindDestToken = ConstantTokenNone::get(CleanupPad->getContext());
302 break;
303 }
304 Value *ChildUnwindDestToken;
305 if (auto *Invoke = dyn_cast<InvokeInst>(U)) {
306 ChildUnwindDestToken = Invoke->getUnwindDest()->getFirstNonPHI();
307 } else if (isa<CleanupPadInst>(U) || isa<CatchSwitchInst>(U)) {
308 Instruction *ChildPad = cast<Instruction>(U);
309 auto Memo = MemoMap.find(ChildPad);
310 if (Memo == MemoMap.end()) {
311 // Haven't resolved this child yet; queue it and keep searching.
312 Worklist.push_back(ChildPad);
313 continue;
314 }
315 // We've checked this child, but still need to ignore it if it
316 // had no proof either way.
317 ChildUnwindDestToken = Memo->second;
318 if (!ChildUnwindDestToken)
319 continue;
320 } else {
321 // Not a relevant user of the cleanuppad
322 continue;
323 }
324 // In a well-formed program, the child/invoke must either unwind to
325 // an(other) child of the cleanup, or exit the cleanup. In the
326 // first case, continue searching.
327 if (isa<Instruction>(ChildUnwindDestToken) &&
328 getParentPad(ChildUnwindDestToken) == CleanupPad)
329 continue;
330 UnwindDestToken = ChildUnwindDestToken;
331 break;
332 }
333 }
334 // If we haven't found an unwind dest for CurrentPad, we may have queued its
335 // children, so move on to the next in the worklist.
336 if (!UnwindDestToken)
337 continue;
338
339 // Now we know that CurrentPad unwinds to UnwindDestToken. It also exits
340 // any ancestors of CurrentPad up to but not including UnwindDestToken's
341 // parent pad. Record this in the memo map, and check to see if the
342 // original EHPad being queried is one of the ones exited.
343 Value *UnwindParent;
344 if (auto *UnwindPad = dyn_cast<Instruction>(UnwindDestToken))
345 UnwindParent = getParentPad(UnwindPad);
346 else
347 UnwindParent = nullptr;
348 bool ExitedOriginalPad = false;
349 for (Instruction *ExitedPad = CurrentPad;
350 ExitedPad && ExitedPad != UnwindParent;
351 ExitedPad = dyn_cast<Instruction>(getParentPad(ExitedPad))) {
352 // Skip over catchpads since they just follow their catchswitches.
353 if (isa<CatchPadInst>(ExitedPad))
354 continue;
355 MemoMap[ExitedPad] = UnwindDestToken;
356 ExitedOriginalPad |= (ExitedPad == EHPad);
357 }
358
359 if (ExitedOriginalPad)
360 return UnwindDestToken;
361
362 // Continue the search.
363 }
364
365 // No definitive information is contained within this funclet.
366 return nullptr;
367 }
368
369 /// Given an EH pad, find where it unwinds. If it unwinds to an EH pad,
370 /// return that pad instruction. If it unwinds to caller, return
371 /// ConstantTokenNone. If it does not have a definitive unwind destination,
372 /// return nullptr.
373 ///
374 /// This routine gets invoked for calls in funclets in inlinees when inlining
375 /// an invoke. Since many funclets don't have calls inside them, it's queried
376 /// on-demand rather than building a map of pads to unwind dests up front.
377 /// Determining a funclet's unwind dest may require recursively searching its
378 /// descendants, and also ancestors and cousins if the descendants don't provide
379 /// an answer. Since most funclets will have their unwind dest immediately
380 /// available as the unwind dest of a catchswitch or cleanupret, this routine
381 /// searches top-down from the given pad and then up. To avoid worst-case
382 /// quadratic run-time given that approach, it uses a memo map to avoid
383 /// re-processing funclet trees. The callers that rewrite the IR as they go
384 /// take advantage of this, for correctness, by checking/forcing rewritten
385 /// pads' entries to match the original callee view.
getUnwindDestToken(Instruction * EHPad,UnwindDestMemoTy & MemoMap)386 static Value *getUnwindDestToken(Instruction *EHPad,
387 UnwindDestMemoTy &MemoMap) {
388 // Catchpads unwind to the same place as their catchswitch;
389 // redirct any queries on catchpads so the code below can
390 // deal with just catchswitches and cleanuppads.
391 if (auto *CPI = dyn_cast<CatchPadInst>(EHPad))
392 EHPad = CPI->getCatchSwitch();
393
394 // Check if we've already determined the unwind dest for this pad.
395 auto Memo = MemoMap.find(EHPad);
396 if (Memo != MemoMap.end())
397 return Memo->second;
398
399 // Search EHPad and, if necessary, its descendants.
400 Value *UnwindDestToken = getUnwindDestTokenHelper(EHPad, MemoMap);
401 assert((UnwindDestToken == nullptr) != (MemoMap.count(EHPad) != 0));
402 if (UnwindDestToken)
403 return UnwindDestToken;
404
405 // No information is available for this EHPad from itself or any of its
406 // descendants. An unwind all the way out to a pad in the caller would
407 // need also to agree with the unwind dest of the parent funclet, so
408 // search up the chain to try to find a funclet with information. Put
409 // null entries in the memo map to avoid re-processing as we go up.
410 MemoMap[EHPad] = nullptr;
411 #ifndef NDEBUG
412 SmallPtrSet<Instruction *, 4> TempMemos;
413 TempMemos.insert(EHPad);
414 #endif
415 Instruction *LastUselessPad = EHPad;
416 Value *AncestorToken;
417 for (AncestorToken = getParentPad(EHPad);
418 auto *AncestorPad = dyn_cast<Instruction>(AncestorToken);
419 AncestorToken = getParentPad(AncestorToken)) {
420 // Skip over catchpads since they just follow their catchswitches.
421 if (isa<CatchPadInst>(AncestorPad))
422 continue;
423 // If the MemoMap had an entry mapping AncestorPad to nullptr, since we
424 // haven't yet called getUnwindDestTokenHelper for AncestorPad in this
425 // call to getUnwindDestToken, that would mean that AncestorPad had no
426 // information in itself, its descendants, or its ancestors. If that
427 // were the case, then we should also have recorded the lack of information
428 // for the descendant that we're coming from. So assert that we don't
429 // find a null entry in the MemoMap for AncestorPad.
430 assert(!MemoMap.count(AncestorPad) || MemoMap[AncestorPad]);
431 auto AncestorMemo = MemoMap.find(AncestorPad);
432 if (AncestorMemo == MemoMap.end()) {
433 UnwindDestToken = getUnwindDestTokenHelper(AncestorPad, MemoMap);
434 } else {
435 UnwindDestToken = AncestorMemo->second;
436 }
437 if (UnwindDestToken)
438 break;
439 LastUselessPad = AncestorPad;
440 MemoMap[LastUselessPad] = nullptr;
441 #ifndef NDEBUG
442 TempMemos.insert(LastUselessPad);
443 #endif
444 }
445
446 // We know that getUnwindDestTokenHelper was called on LastUselessPad and
447 // returned nullptr (and likewise for EHPad and any of its ancestors up to
448 // LastUselessPad), so LastUselessPad has no information from below. Since
449 // getUnwindDestTokenHelper must investigate all downward paths through
450 // no-information nodes to prove that a node has no information like this,
451 // and since any time it finds information it records it in the MemoMap for
452 // not just the immediately-containing funclet but also any ancestors also
453 // exited, it must be the case that, walking downward from LastUselessPad,
454 // visiting just those nodes which have not been mapped to an unwind dest
455 // by getUnwindDestTokenHelper (the nullptr TempMemos notwithstanding, since
456 // they are just used to keep getUnwindDestTokenHelper from repeating work),
457 // any node visited must have been exhaustively searched with no information
458 // for it found.
459 SmallVector<Instruction *, 8> Worklist(1, LastUselessPad);
460 while (!Worklist.empty()) {
461 Instruction *UselessPad = Worklist.pop_back_val();
462 auto Memo = MemoMap.find(UselessPad);
463 if (Memo != MemoMap.end() && Memo->second) {
464 // Here the name 'UselessPad' is a bit of a misnomer, because we've found
465 // that it is a funclet that does have information about unwinding to
466 // a particular destination; its parent was a useless pad.
467 // Since its parent has no information, the unwind edge must not escape
468 // the parent, and must target a sibling of this pad. This local unwind
469 // gives us no information about EHPad. Leave it and the subtree rooted
470 // at it alone.
471 assert(getParentPad(Memo->second) == getParentPad(UselessPad));
472 continue;
473 }
474 // We know we don't have information for UselesPad. If it has an entry in
475 // the MemoMap (mapping it to nullptr), it must be one of the TempMemos
476 // added on this invocation of getUnwindDestToken; if a previous invocation
477 // recorded nullptr, it would have had to prove that the ancestors of
478 // UselessPad, which include LastUselessPad, had no information, and that
479 // in turn would have required proving that the descendants of
480 // LastUselesPad, which include EHPad, have no information about
481 // LastUselessPad, which would imply that EHPad was mapped to nullptr in
482 // the MemoMap on that invocation, which isn't the case if we got here.
483 assert(!MemoMap.count(UselessPad) || TempMemos.count(UselessPad));
484 // Assert as we enumerate users that 'UselessPad' doesn't have any unwind
485 // information that we'd be contradicting by making a map entry for it
486 // (which is something that getUnwindDestTokenHelper must have proved for
487 // us to get here). Just assert on is direct users here; the checks in
488 // this downward walk at its descendants will verify that they don't have
489 // any unwind edges that exit 'UselessPad' either (i.e. they either have no
490 // unwind edges or unwind to a sibling).
491 MemoMap[UselessPad] = UnwindDestToken;
492 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(UselessPad)) {
493 assert(CatchSwitch->getUnwindDest() == nullptr && "Expected useless pad");
494 for (BasicBlock *HandlerBlock : CatchSwitch->handlers()) {
495 auto *CatchPad = HandlerBlock->getFirstNonPHI();
496 for (User *U : CatchPad->users()) {
497 assert(
498 (!isa<InvokeInst>(U) ||
499 (getParentPad(
500 cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) ==
501 CatchPad)) &&
502 "Expected useless pad");
503 if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
504 Worklist.push_back(cast<Instruction>(U));
505 }
506 }
507 } else {
508 assert(isa<CleanupPadInst>(UselessPad));
509 for (User *U : UselessPad->users()) {
510 assert(!isa<CleanupReturnInst>(U) && "Expected useless pad");
511 assert((!isa<InvokeInst>(U) ||
512 (getParentPad(
513 cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) ==
514 UselessPad)) &&
515 "Expected useless pad");
516 if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
517 Worklist.push_back(cast<Instruction>(U));
518 }
519 }
520 }
521
522 return UnwindDestToken;
523 }
524
525 /// When we inline a basic block into an invoke,
526 /// we have to turn all of the calls that can throw into invokes.
527 /// This function analyze BB to see if there are any calls, and if so,
528 /// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI
529 /// nodes in that block with the values specified in InvokeDestPHIValues.
HandleCallsInBlockInlinedThroughInvoke(BasicBlock * BB,BasicBlock * UnwindEdge,UnwindDestMemoTy * FuncletUnwindMap=nullptr)530 static BasicBlock *HandleCallsInBlockInlinedThroughInvoke(
531 BasicBlock *BB, BasicBlock *UnwindEdge,
532 UnwindDestMemoTy *FuncletUnwindMap = nullptr) {
533 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
534 Instruction *I = &*BBI++;
535
536 // We only need to check for function calls: inlined invoke
537 // instructions require no special handling.
538 CallInst *CI = dyn_cast<CallInst>(I);
539
540 if (!CI || CI->doesNotThrow() || CI->isInlineAsm())
541 continue;
542
543 // We do not need to (and in fact, cannot) convert possibly throwing calls
544 // to @llvm.experimental_deoptimize (resp. @llvm.experimental.guard) into
545 // invokes. The caller's "segment" of the deoptimization continuation
546 // attached to the newly inlined @llvm.experimental_deoptimize
547 // (resp. @llvm.experimental.guard) call should contain the exception
548 // handling logic, if any.
549 if (auto *F = CI->getCalledFunction())
550 if (F->getIntrinsicID() == Intrinsic::experimental_deoptimize ||
551 F->getIntrinsicID() == Intrinsic::experimental_guard)
552 continue;
553
554 if (auto FuncletBundle = CI->getOperandBundle(LLVMContext::OB_funclet)) {
555 // This call is nested inside a funclet. If that funclet has an unwind
556 // destination within the inlinee, then unwinding out of this call would
557 // be UB. Rewriting this call to an invoke which targets the inlined
558 // invoke's unwind dest would give the call's parent funclet multiple
559 // unwind destinations, which is something that subsequent EH table
560 // generation can't handle and that the veirifer rejects. So when we
561 // see such a call, leave it as a call.
562 auto *FuncletPad = cast<Instruction>(FuncletBundle->Inputs[0]);
563 Value *UnwindDestToken =
564 getUnwindDestToken(FuncletPad, *FuncletUnwindMap);
565 if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
566 continue;
567 #ifndef NDEBUG
568 Instruction *MemoKey;
569 if (auto *CatchPad = dyn_cast<CatchPadInst>(FuncletPad))
570 MemoKey = CatchPad->getCatchSwitch();
571 else
572 MemoKey = FuncletPad;
573 assert(FuncletUnwindMap->count(MemoKey) &&
574 (*FuncletUnwindMap)[MemoKey] == UnwindDestToken &&
575 "must get memoized to avoid confusing later searches");
576 #endif // NDEBUG
577 }
578
579 changeToInvokeAndSplitBasicBlock(CI, UnwindEdge);
580 return BB;
581 }
582 return nullptr;
583 }
584
585 /// If we inlined an invoke site, we need to convert calls
586 /// in the body of the inlined function into invokes.
587 ///
588 /// II is the invoke instruction being inlined. FirstNewBlock is the first
589 /// block of the inlined code (the last block is the end of the function),
590 /// and InlineCodeInfo is information about the code that got inlined.
HandleInlinedLandingPad(InvokeInst * II,BasicBlock * FirstNewBlock,ClonedCodeInfo & InlinedCodeInfo)591 static void HandleInlinedLandingPad(InvokeInst *II, BasicBlock *FirstNewBlock,
592 ClonedCodeInfo &InlinedCodeInfo) {
593 BasicBlock *InvokeDest = II->getUnwindDest();
594
595 Function *Caller = FirstNewBlock->getParent();
596
597 // The inlined code is currently at the end of the function, scan from the
598 // start of the inlined code to its end, checking for stuff we need to
599 // rewrite.
600 LandingPadInliningInfo Invoke(II);
601
602 // Get all of the inlined landing pad instructions.
603 SmallPtrSet<LandingPadInst*, 16> InlinedLPads;
604 for (Function::iterator I = FirstNewBlock->getIterator(), E = Caller->end();
605 I != E; ++I)
606 if (InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator()))
607 InlinedLPads.insert(II->getLandingPadInst());
608
609 // Append the clauses from the outer landing pad instruction into the inlined
610 // landing pad instructions.
611 LandingPadInst *OuterLPad = Invoke.getLandingPadInst();
612 for (LandingPadInst *InlinedLPad : InlinedLPads) {
613 unsigned OuterNum = OuterLPad->getNumClauses();
614 InlinedLPad->reserveClauses(OuterNum);
615 for (unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx)
616 InlinedLPad->addClause(OuterLPad->getClause(OuterIdx));
617 if (OuterLPad->isCleanup())
618 InlinedLPad->setCleanup(true);
619 }
620
621 for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end();
622 BB != E; ++BB) {
623 if (InlinedCodeInfo.ContainsCalls)
624 if (BasicBlock *NewBB = HandleCallsInBlockInlinedThroughInvoke(
625 &*BB, Invoke.getOuterResumeDest()))
626 // Update any PHI nodes in the exceptional block to indicate that there
627 // is now a new entry in them.
628 Invoke.addIncomingPHIValuesFor(NewBB);
629
630 // Forward any resumes that are remaining here.
631 if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator()))
632 Invoke.forwardResume(RI, InlinedLPads);
633 }
634
635 // Now that everything is happy, we have one final detail. The PHI nodes in
636 // the exception destination block still have entries due to the original
637 // invoke instruction. Eliminate these entries (which might even delete the
638 // PHI node) now.
639 InvokeDest->removePredecessor(II->getParent());
640 }
641
642 /// If we inlined an invoke site, we need to convert calls
643 /// in the body of the inlined function into invokes.
644 ///
645 /// II is the invoke instruction being inlined. FirstNewBlock is the first
646 /// block of the inlined code (the last block is the end of the function),
647 /// and InlineCodeInfo is information about the code that got inlined.
HandleInlinedEHPad(InvokeInst * II,BasicBlock * FirstNewBlock,ClonedCodeInfo & InlinedCodeInfo)648 static void HandleInlinedEHPad(InvokeInst *II, BasicBlock *FirstNewBlock,
649 ClonedCodeInfo &InlinedCodeInfo) {
650 BasicBlock *UnwindDest = II->getUnwindDest();
651 Function *Caller = FirstNewBlock->getParent();
652
653 assert(UnwindDest->getFirstNonPHI()->isEHPad() && "unexpected BasicBlock!");
654
655 // If there are PHI nodes in the unwind destination block, we need to keep
656 // track of which values came into them from the invoke before removing the
657 // edge from this block.
658 SmallVector<Value *, 8> UnwindDestPHIValues;
659 BasicBlock *InvokeBB = II->getParent();
660 for (Instruction &I : *UnwindDest) {
661 // Save the value to use for this edge.
662 PHINode *PHI = dyn_cast<PHINode>(&I);
663 if (!PHI)
664 break;
665 UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
666 }
667
668 // Add incoming-PHI values to the unwind destination block for the given basic
669 // block, using the values for the original invoke's source block.
670 auto UpdatePHINodes = [&](BasicBlock *Src) {
671 BasicBlock::iterator I = UnwindDest->begin();
672 for (Value *V : UnwindDestPHIValues) {
673 PHINode *PHI = cast<PHINode>(I);
674 PHI->addIncoming(V, Src);
675 ++I;
676 }
677 };
678
679 // This connects all the instructions which 'unwind to caller' to the invoke
680 // destination.
681 UnwindDestMemoTy FuncletUnwindMap;
682 for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end();
683 BB != E; ++BB) {
684 if (auto *CRI = dyn_cast<CleanupReturnInst>(BB->getTerminator())) {
685 if (CRI->unwindsToCaller()) {
686 auto *CleanupPad = CRI->getCleanupPad();
687 CleanupReturnInst::Create(CleanupPad, UnwindDest, CRI);
688 CRI->eraseFromParent();
689 UpdatePHINodes(&*BB);
690 // Finding a cleanupret with an unwind destination would confuse
691 // subsequent calls to getUnwindDestToken, so map the cleanuppad
692 // to short-circuit any such calls and recognize this as an "unwind
693 // to caller" cleanup.
694 assert(!FuncletUnwindMap.count(CleanupPad) ||
695 isa<ConstantTokenNone>(FuncletUnwindMap[CleanupPad]));
696 FuncletUnwindMap[CleanupPad] =
697 ConstantTokenNone::get(Caller->getContext());
698 }
699 }
700
701 Instruction *I = BB->getFirstNonPHI();
702 if (!I->isEHPad())
703 continue;
704
705 Instruction *Replacement = nullptr;
706 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) {
707 if (CatchSwitch->unwindsToCaller()) {
708 Value *UnwindDestToken;
709 if (auto *ParentPad =
710 dyn_cast<Instruction>(CatchSwitch->getParentPad())) {
711 // This catchswitch is nested inside another funclet. If that
712 // funclet has an unwind destination within the inlinee, then
713 // unwinding out of this catchswitch would be UB. Rewriting this
714 // catchswitch to unwind to the inlined invoke's unwind dest would
715 // give the parent funclet multiple unwind destinations, which is
716 // something that subsequent EH table generation can't handle and
717 // that the veirifer rejects. So when we see such a call, leave it
718 // as "unwind to caller".
719 UnwindDestToken = getUnwindDestToken(ParentPad, FuncletUnwindMap);
720 if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
721 continue;
722 } else {
723 // This catchswitch has no parent to inherit constraints from, and
724 // none of its descendants can have an unwind edge that exits it and
725 // targets another funclet in the inlinee. It may or may not have a
726 // descendant that definitively has an unwind to caller. In either
727 // case, we'll have to assume that any unwinds out of it may need to
728 // be routed to the caller, so treat it as though it has a definitive
729 // unwind to caller.
730 UnwindDestToken = ConstantTokenNone::get(Caller->getContext());
731 }
732 auto *NewCatchSwitch = CatchSwitchInst::Create(
733 CatchSwitch->getParentPad(), UnwindDest,
734 CatchSwitch->getNumHandlers(), CatchSwitch->getName(),
735 CatchSwitch);
736 for (BasicBlock *PadBB : CatchSwitch->handlers())
737 NewCatchSwitch->addHandler(PadBB);
738 // Propagate info for the old catchswitch over to the new one in
739 // the unwind map. This also serves to short-circuit any subsequent
740 // checks for the unwind dest of this catchswitch, which would get
741 // confused if they found the outer handler in the callee.
742 FuncletUnwindMap[NewCatchSwitch] = UnwindDestToken;
743 Replacement = NewCatchSwitch;
744 }
745 } else if (!isa<FuncletPadInst>(I)) {
746 llvm_unreachable("unexpected EHPad!");
747 }
748
749 if (Replacement) {
750 Replacement->takeName(I);
751 I->replaceAllUsesWith(Replacement);
752 I->eraseFromParent();
753 UpdatePHINodes(&*BB);
754 }
755 }
756
757 if (InlinedCodeInfo.ContainsCalls)
758 for (Function::iterator BB = FirstNewBlock->getIterator(),
759 E = Caller->end();
760 BB != E; ++BB)
761 if (BasicBlock *NewBB = HandleCallsInBlockInlinedThroughInvoke(
762 &*BB, UnwindDest, &FuncletUnwindMap))
763 // Update any PHI nodes in the exceptional block to indicate that there
764 // is now a new entry in them.
765 UpdatePHINodes(NewBB);
766
767 // Now that everything is happy, we have one final detail. The PHI nodes in
768 // the exception destination block still have entries due to the original
769 // invoke instruction. Eliminate these entries (which might even delete the
770 // PHI node) now.
771 UnwindDest->removePredecessor(InvokeBB);
772 }
773
774 /// When inlining a call site that has !llvm.mem.parallel_loop_access or
775 /// llvm.access.group metadata, that metadata should be propagated to all
776 /// memory-accessing cloned instructions.
PropagateParallelLoopAccessMetadata(CallBase & CB,ValueToValueMapTy & VMap)777 static void PropagateParallelLoopAccessMetadata(CallBase &CB,
778 ValueToValueMapTy &VMap) {
779 MDNode *M = CB.getMetadata(LLVMContext::MD_mem_parallel_loop_access);
780 MDNode *CallAccessGroup = CB.getMetadata(LLVMContext::MD_access_group);
781 if (!M && !CallAccessGroup)
782 return;
783
784 for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
785 VMI != VMIE; ++VMI) {
786 if (!VMI->second)
787 continue;
788
789 Instruction *NI = dyn_cast<Instruction>(VMI->second);
790 if (!NI)
791 continue;
792
793 if (M) {
794 if (MDNode *PM =
795 NI->getMetadata(LLVMContext::MD_mem_parallel_loop_access)) {
796 M = MDNode::concatenate(PM, M);
797 NI->setMetadata(LLVMContext::MD_mem_parallel_loop_access, M);
798 } else if (NI->mayReadOrWriteMemory()) {
799 NI->setMetadata(LLVMContext::MD_mem_parallel_loop_access, M);
800 }
801 }
802
803 if (NI->mayReadOrWriteMemory()) {
804 MDNode *UnitedAccGroups = uniteAccessGroups(
805 NI->getMetadata(LLVMContext::MD_access_group), CallAccessGroup);
806 NI->setMetadata(LLVMContext::MD_access_group, UnitedAccGroups);
807 }
808 }
809 }
810
811 /// When inlining a function that contains noalias scope metadata,
812 /// this metadata needs to be cloned so that the inlined blocks
813 /// have different "unique scopes" at every call site. Were this not done, then
814 /// aliasing scopes from a function inlined into a caller multiple times could
815 /// not be differentiated (and this would lead to miscompiles because the
816 /// non-aliasing property communicated by the metadata could have
817 /// call-site-specific control dependencies).
CloneAliasScopeMetadata(CallBase & CB,ValueToValueMapTy & VMap)818 static void CloneAliasScopeMetadata(CallBase &CB, ValueToValueMapTy &VMap) {
819 const Function *CalledFunc = CB.getCalledFunction();
820 SetVector<const MDNode *> MD;
821
822 // Note: We could only clone the metadata if it is already used in the
823 // caller. I'm omitting that check here because it might confuse
824 // inter-procedural alias analysis passes. We can revisit this if it becomes
825 // an efficiency or overhead problem.
826
827 for (const BasicBlock &I : *CalledFunc)
828 for (const Instruction &J : I) {
829 if (const MDNode *M = J.getMetadata(LLVMContext::MD_alias_scope))
830 MD.insert(M);
831 if (const MDNode *M = J.getMetadata(LLVMContext::MD_noalias))
832 MD.insert(M);
833 }
834
835 if (MD.empty())
836 return;
837
838 // Walk the existing metadata, adding the complete (perhaps cyclic) chain to
839 // the set.
840 SmallVector<const Metadata *, 16> Queue(MD.begin(), MD.end());
841 while (!Queue.empty()) {
842 const MDNode *M = cast<MDNode>(Queue.pop_back_val());
843 for (unsigned i = 0, ie = M->getNumOperands(); i != ie; ++i)
844 if (const MDNode *M1 = dyn_cast<MDNode>(M->getOperand(i)))
845 if (MD.insert(M1))
846 Queue.push_back(M1);
847 }
848
849 // Now we have a complete set of all metadata in the chains used to specify
850 // the noalias scopes and the lists of those scopes.
851 SmallVector<TempMDTuple, 16> DummyNodes;
852 DenseMap<const MDNode *, TrackingMDNodeRef> MDMap;
853 for (const MDNode *I : MD) {
854 DummyNodes.push_back(MDTuple::getTemporary(CalledFunc->getContext(), None));
855 MDMap[I].reset(DummyNodes.back().get());
856 }
857
858 // Create new metadata nodes to replace the dummy nodes, replacing old
859 // metadata references with either a dummy node or an already-created new
860 // node.
861 for (const MDNode *I : MD) {
862 SmallVector<Metadata *, 4> NewOps;
863 for (unsigned i = 0, ie = I->getNumOperands(); i != ie; ++i) {
864 const Metadata *V = I->getOperand(i);
865 if (const MDNode *M = dyn_cast<MDNode>(V))
866 NewOps.push_back(MDMap[M]);
867 else
868 NewOps.push_back(const_cast<Metadata *>(V));
869 }
870
871 MDNode *NewM = MDNode::get(CalledFunc->getContext(), NewOps);
872 MDTuple *TempM = cast<MDTuple>(MDMap[I]);
873 assert(TempM->isTemporary() && "Expected temporary node");
874
875 TempM->replaceAllUsesWith(NewM);
876 }
877
878 // Now replace the metadata in the new inlined instructions with the
879 // repacements from the map.
880 for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
881 VMI != VMIE; ++VMI) {
882 if (!VMI->second)
883 continue;
884
885 Instruction *NI = dyn_cast<Instruction>(VMI->second);
886 if (!NI)
887 continue;
888
889 if (MDNode *M = NI->getMetadata(LLVMContext::MD_alias_scope)) {
890 MDNode *NewMD = MDMap[M];
891 // If the call site also had alias scope metadata (a list of scopes to
892 // which instructions inside it might belong), propagate those scopes to
893 // the inlined instructions.
894 if (MDNode *CSM = CB.getMetadata(LLVMContext::MD_alias_scope))
895 NewMD = MDNode::concatenate(NewMD, CSM);
896 NI->setMetadata(LLVMContext::MD_alias_scope, NewMD);
897 } else if (NI->mayReadOrWriteMemory()) {
898 if (MDNode *M = CB.getMetadata(LLVMContext::MD_alias_scope))
899 NI->setMetadata(LLVMContext::MD_alias_scope, M);
900 }
901
902 if (MDNode *M = NI->getMetadata(LLVMContext::MD_noalias)) {
903 MDNode *NewMD = MDMap[M];
904 // If the call site also had noalias metadata (a list of scopes with
905 // which instructions inside it don't alias), propagate those scopes to
906 // the inlined instructions.
907 if (MDNode *CSM = CB.getMetadata(LLVMContext::MD_noalias))
908 NewMD = MDNode::concatenate(NewMD, CSM);
909 NI->setMetadata(LLVMContext::MD_noalias, NewMD);
910 } else if (NI->mayReadOrWriteMemory()) {
911 if (MDNode *M = CB.getMetadata(LLVMContext::MD_noalias))
912 NI->setMetadata(LLVMContext::MD_noalias, M);
913 }
914 }
915 }
916
917 /// If the inlined function has noalias arguments,
918 /// then add new alias scopes for each noalias argument, tag the mapped noalias
919 /// parameters with noalias metadata specifying the new scope, and tag all
920 /// non-derived loads, stores and memory intrinsics with the new alias scopes.
AddAliasScopeMetadata(CallBase & CB,ValueToValueMapTy & VMap,const DataLayout & DL,AAResults * CalleeAAR)921 static void AddAliasScopeMetadata(CallBase &CB, ValueToValueMapTy &VMap,
922 const DataLayout &DL, AAResults *CalleeAAR) {
923 if (!EnableNoAliasConversion)
924 return;
925
926 const Function *CalledFunc = CB.getCalledFunction();
927 SmallVector<const Argument *, 4> NoAliasArgs;
928
929 for (const Argument &Arg : CalledFunc->args())
930 if (CB.paramHasAttr(Arg.getArgNo(), Attribute::NoAlias) && !Arg.use_empty())
931 NoAliasArgs.push_back(&Arg);
932
933 if (NoAliasArgs.empty())
934 return;
935
936 // To do a good job, if a noalias variable is captured, we need to know if
937 // the capture point dominates the particular use we're considering.
938 DominatorTree DT;
939 DT.recalculate(const_cast<Function&>(*CalledFunc));
940
941 // noalias indicates that pointer values based on the argument do not alias
942 // pointer values which are not based on it. So we add a new "scope" for each
943 // noalias function argument. Accesses using pointers based on that argument
944 // become part of that alias scope, accesses using pointers not based on that
945 // argument are tagged as noalias with that scope.
946
947 DenseMap<const Argument *, MDNode *> NewScopes;
948 MDBuilder MDB(CalledFunc->getContext());
949
950 // Create a new scope domain for this function.
951 MDNode *NewDomain =
952 MDB.createAnonymousAliasScopeDomain(CalledFunc->getName());
953 for (unsigned i = 0, e = NoAliasArgs.size(); i != e; ++i) {
954 const Argument *A = NoAliasArgs[i];
955
956 std::string Name = std::string(CalledFunc->getName());
957 if (A->hasName()) {
958 Name += ": %";
959 Name += A->getName();
960 } else {
961 Name += ": argument ";
962 Name += utostr(i);
963 }
964
965 // Note: We always create a new anonymous root here. This is true regardless
966 // of the linkage of the callee because the aliasing "scope" is not just a
967 // property of the callee, but also all control dependencies in the caller.
968 MDNode *NewScope = MDB.createAnonymousAliasScope(NewDomain, Name);
969 NewScopes.insert(std::make_pair(A, NewScope));
970 }
971
972 // Iterate over all new instructions in the map; for all memory-access
973 // instructions, add the alias scope metadata.
974 for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
975 VMI != VMIE; ++VMI) {
976 if (const Instruction *I = dyn_cast<Instruction>(VMI->first)) {
977 if (!VMI->second)
978 continue;
979
980 Instruction *NI = dyn_cast<Instruction>(VMI->second);
981 if (!NI)
982 continue;
983
984 bool IsArgMemOnlyCall = false, IsFuncCall = false;
985 SmallVector<const Value *, 2> PtrArgs;
986
987 if (const LoadInst *LI = dyn_cast<LoadInst>(I))
988 PtrArgs.push_back(LI->getPointerOperand());
989 else if (const StoreInst *SI = dyn_cast<StoreInst>(I))
990 PtrArgs.push_back(SI->getPointerOperand());
991 else if (const VAArgInst *VAAI = dyn_cast<VAArgInst>(I))
992 PtrArgs.push_back(VAAI->getPointerOperand());
993 else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
994 PtrArgs.push_back(CXI->getPointerOperand());
995 else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
996 PtrArgs.push_back(RMWI->getPointerOperand());
997 else if (const auto *Call = dyn_cast<CallBase>(I)) {
998 // If we know that the call does not access memory, then we'll still
999 // know that about the inlined clone of this call site, and we don't
1000 // need to add metadata.
1001 if (Call->doesNotAccessMemory())
1002 continue;
1003
1004 IsFuncCall = true;
1005 if (CalleeAAR) {
1006 FunctionModRefBehavior MRB = CalleeAAR->getModRefBehavior(Call);
1007 if (AAResults::onlyAccessesArgPointees(MRB))
1008 IsArgMemOnlyCall = true;
1009 }
1010
1011 for (Value *Arg : Call->args()) {
1012 // We need to check the underlying objects of all arguments, not just
1013 // the pointer arguments, because we might be passing pointers as
1014 // integers, etc.
1015 // However, if we know that the call only accesses pointer arguments,
1016 // then we only need to check the pointer arguments.
1017 if (IsArgMemOnlyCall && !Arg->getType()->isPointerTy())
1018 continue;
1019
1020 PtrArgs.push_back(Arg);
1021 }
1022 }
1023
1024 // If we found no pointers, then this instruction is not suitable for
1025 // pairing with an instruction to receive aliasing metadata.
1026 // However, if this is a call, this we might just alias with none of the
1027 // noalias arguments.
1028 if (PtrArgs.empty() && !IsFuncCall)
1029 continue;
1030
1031 // It is possible that there is only one underlying object, but you
1032 // need to go through several PHIs to see it, and thus could be
1033 // repeated in the Objects list.
1034 SmallPtrSet<const Value *, 4> ObjSet;
1035 SmallVector<Metadata *, 4> Scopes, NoAliases;
1036
1037 SmallSetVector<const Argument *, 4> NAPtrArgs;
1038 for (const Value *V : PtrArgs) {
1039 SmallVector<const Value *, 4> Objects;
1040 GetUnderlyingObjects(V, Objects, DL, /* LI = */ nullptr);
1041
1042 for (const Value *O : Objects)
1043 ObjSet.insert(O);
1044 }
1045
1046 // Figure out if we're derived from anything that is not a noalias
1047 // argument.
1048 bool CanDeriveViaCapture = false, UsesAliasingPtr = false;
1049 for (const Value *V : ObjSet) {
1050 // Is this value a constant that cannot be derived from any pointer
1051 // value (we need to exclude constant expressions, for example, that
1052 // are formed from arithmetic on global symbols).
1053 bool IsNonPtrConst = isa<ConstantInt>(V) || isa<ConstantFP>(V) ||
1054 isa<ConstantPointerNull>(V) ||
1055 isa<ConstantDataVector>(V) || isa<UndefValue>(V);
1056 if (IsNonPtrConst)
1057 continue;
1058
1059 // If this is anything other than a noalias argument, then we cannot
1060 // completely describe the aliasing properties using alias.scope
1061 // metadata (and, thus, won't add any).
1062 if (const Argument *A = dyn_cast<Argument>(V)) {
1063 if (!CB.paramHasAttr(A->getArgNo(), Attribute::NoAlias))
1064 UsesAliasingPtr = true;
1065 } else {
1066 UsesAliasingPtr = true;
1067 }
1068
1069 // If this is not some identified function-local object (which cannot
1070 // directly alias a noalias argument), or some other argument (which,
1071 // by definition, also cannot alias a noalias argument), then we could
1072 // alias a noalias argument that has been captured).
1073 if (!isa<Argument>(V) &&
1074 !isIdentifiedFunctionLocal(const_cast<Value*>(V)))
1075 CanDeriveViaCapture = true;
1076 }
1077
1078 // A function call can always get captured noalias pointers (via other
1079 // parameters, globals, etc.).
1080 if (IsFuncCall && !IsArgMemOnlyCall)
1081 CanDeriveViaCapture = true;
1082
1083 // First, we want to figure out all of the sets with which we definitely
1084 // don't alias. Iterate over all noalias set, and add those for which:
1085 // 1. The noalias argument is not in the set of objects from which we
1086 // definitely derive.
1087 // 2. The noalias argument has not yet been captured.
1088 // An arbitrary function that might load pointers could see captured
1089 // noalias arguments via other noalias arguments or globals, and so we
1090 // must always check for prior capture.
1091 for (const Argument *A : NoAliasArgs) {
1092 if (!ObjSet.count(A) && (!CanDeriveViaCapture ||
1093 // It might be tempting to skip the
1094 // PointerMayBeCapturedBefore check if
1095 // A->hasNoCaptureAttr() is true, but this is
1096 // incorrect because nocapture only guarantees
1097 // that no copies outlive the function, not
1098 // that the value cannot be locally captured.
1099 !PointerMayBeCapturedBefore(A,
1100 /* ReturnCaptures */ false,
1101 /* StoreCaptures */ false, I, &DT)))
1102 NoAliases.push_back(NewScopes[A]);
1103 }
1104
1105 if (!NoAliases.empty())
1106 NI->setMetadata(LLVMContext::MD_noalias,
1107 MDNode::concatenate(
1108 NI->getMetadata(LLVMContext::MD_noalias),
1109 MDNode::get(CalledFunc->getContext(), NoAliases)));
1110
1111 // Next, we want to figure out all of the sets to which we might belong.
1112 // We might belong to a set if the noalias argument is in the set of
1113 // underlying objects. If there is some non-noalias argument in our list
1114 // of underlying objects, then we cannot add a scope because the fact
1115 // that some access does not alias with any set of our noalias arguments
1116 // cannot itself guarantee that it does not alias with this access
1117 // (because there is some pointer of unknown origin involved and the
1118 // other access might also depend on this pointer). We also cannot add
1119 // scopes to arbitrary functions unless we know they don't access any
1120 // non-parameter pointer-values.
1121 bool CanAddScopes = !UsesAliasingPtr;
1122 if (CanAddScopes && IsFuncCall)
1123 CanAddScopes = IsArgMemOnlyCall;
1124
1125 if (CanAddScopes)
1126 for (const Argument *A : NoAliasArgs) {
1127 if (ObjSet.count(A))
1128 Scopes.push_back(NewScopes[A]);
1129 }
1130
1131 if (!Scopes.empty())
1132 NI->setMetadata(
1133 LLVMContext::MD_alias_scope,
1134 MDNode::concatenate(NI->getMetadata(LLVMContext::MD_alias_scope),
1135 MDNode::get(CalledFunc->getContext(), Scopes)));
1136 }
1137 }
1138 }
1139
MayContainThrowingOrExitingCall(Instruction * Begin,Instruction * End)1140 static bool MayContainThrowingOrExitingCall(Instruction *Begin,
1141 Instruction *End) {
1142
1143 assert(Begin->getParent() == End->getParent() &&
1144 "Expected to be in same basic block!");
1145 unsigned NumInstChecked = 0;
1146 // Check that all instructions in the range [Begin, End) are guaranteed to
1147 // transfer execution to successor.
1148 for (auto &I : make_range(Begin->getIterator(), End->getIterator()))
1149 if (NumInstChecked++ > InlinerAttributeWindow ||
1150 !isGuaranteedToTransferExecutionToSuccessor(&I))
1151 return true;
1152 return false;
1153 }
1154
IdentifyValidAttributes(CallBase & CB)1155 static AttrBuilder IdentifyValidAttributes(CallBase &CB) {
1156
1157 AttrBuilder AB(CB.getAttributes(), AttributeList::ReturnIndex);
1158 if (AB.empty())
1159 return AB;
1160 AttrBuilder Valid;
1161 // Only allow these white listed attributes to be propagated back to the
1162 // callee. This is because other attributes may only be valid on the call
1163 // itself, i.e. attributes such as signext and zeroext.
1164 if (auto DerefBytes = AB.getDereferenceableBytes())
1165 Valid.addDereferenceableAttr(DerefBytes);
1166 if (auto DerefOrNullBytes = AB.getDereferenceableOrNullBytes())
1167 Valid.addDereferenceableOrNullAttr(DerefOrNullBytes);
1168 if (AB.contains(Attribute::NoAlias))
1169 Valid.addAttribute(Attribute::NoAlias);
1170 if (AB.contains(Attribute::NonNull))
1171 Valid.addAttribute(Attribute::NonNull);
1172 return Valid;
1173 }
1174
AddReturnAttributes(CallBase & CB,ValueToValueMapTy & VMap)1175 static void AddReturnAttributes(CallBase &CB, ValueToValueMapTy &VMap) {
1176 if (!UpdateReturnAttributes)
1177 return;
1178
1179 AttrBuilder Valid = IdentifyValidAttributes(CB);
1180 if (Valid.empty())
1181 return;
1182 auto *CalledFunction = CB.getCalledFunction();
1183 auto &Context = CalledFunction->getContext();
1184
1185 for (auto &BB : *CalledFunction) {
1186 auto *RI = dyn_cast<ReturnInst>(BB.getTerminator());
1187 if (!RI || !isa<CallBase>(RI->getOperand(0)))
1188 continue;
1189 auto *RetVal = cast<CallBase>(RI->getOperand(0));
1190 // Sanity check that the cloned RetVal exists and is a call, otherwise we
1191 // cannot add the attributes on the cloned RetVal.
1192 // Simplification during inlining could have transformed the cloned
1193 // instruction.
1194 auto *NewRetVal = dyn_cast_or_null<CallBase>(VMap.lookup(RetVal));
1195 if (!NewRetVal)
1196 continue;
1197 // Backward propagation of attributes to the returned value may be incorrect
1198 // if it is control flow dependent.
1199 // Consider:
1200 // @callee {
1201 // %rv = call @foo()
1202 // %rv2 = call @bar()
1203 // if (%rv2 != null)
1204 // return %rv2
1205 // if (%rv == null)
1206 // exit()
1207 // return %rv
1208 // }
1209 // caller() {
1210 // %val = call nonnull @callee()
1211 // }
1212 // Here we cannot add the nonnull attribute on either foo or bar. So, we
1213 // limit the check to both RetVal and RI are in the same basic block and
1214 // there are no throwing/exiting instructions between these instructions.
1215 if (RI->getParent() != RetVal->getParent() ||
1216 MayContainThrowingOrExitingCall(RetVal, RI))
1217 continue;
1218 // Add to the existing attributes of NewRetVal, i.e. the cloned call
1219 // instruction.
1220 // NB! When we have the same attribute already existing on NewRetVal, but
1221 // with a differing value, the AttributeList's merge API honours the already
1222 // existing attribute value (i.e. attributes such as dereferenceable,
1223 // dereferenceable_or_null etc). See AttrBuilder::merge for more details.
1224 AttributeList AL = NewRetVal->getAttributes();
1225 AttributeList NewAL =
1226 AL.addAttributes(Context, AttributeList::ReturnIndex, Valid);
1227 NewRetVal->setAttributes(NewAL);
1228 }
1229 }
1230
1231 /// If the inlined function has non-byval align arguments, then
1232 /// add @llvm.assume-based alignment assumptions to preserve this information.
AddAlignmentAssumptions(CallBase & CB,InlineFunctionInfo & IFI)1233 static void AddAlignmentAssumptions(CallBase &CB, InlineFunctionInfo &IFI) {
1234 if (!PreserveAlignmentAssumptions || !IFI.GetAssumptionCache)
1235 return;
1236
1237 AssumptionCache *AC = &IFI.GetAssumptionCache(*CB.getCaller());
1238 auto &DL = CB.getCaller()->getParent()->getDataLayout();
1239
1240 // To avoid inserting redundant assumptions, we should check for assumptions
1241 // already in the caller. To do this, we might need a DT of the caller.
1242 DominatorTree DT;
1243 bool DTCalculated = false;
1244
1245 Function *CalledFunc = CB.getCalledFunction();
1246 for (Argument &Arg : CalledFunc->args()) {
1247 unsigned Align = Arg.getType()->isPointerTy() ? Arg.getParamAlignment() : 0;
1248 if (Align && !Arg.hasPassPointeeByValueAttr() && !Arg.hasNUses(0)) {
1249 if (!DTCalculated) {
1250 DT.recalculate(*CB.getCaller());
1251 DTCalculated = true;
1252 }
1253
1254 // If we can already prove the asserted alignment in the context of the
1255 // caller, then don't bother inserting the assumption.
1256 Value *ArgVal = CB.getArgOperand(Arg.getArgNo());
1257 if (getKnownAlignment(ArgVal, DL, &CB, AC, &DT) >= Align)
1258 continue;
1259
1260 CallInst *NewAsmp =
1261 IRBuilder<>(&CB).CreateAlignmentAssumption(DL, ArgVal, Align);
1262 AC->registerAssumption(NewAsmp);
1263 }
1264 }
1265 }
1266
1267 /// Once we have cloned code over from a callee into the caller,
1268 /// update the specified callgraph to reflect the changes we made.
1269 /// Note that it's possible that not all code was copied over, so only
1270 /// some edges of the callgraph may remain.
UpdateCallGraphAfterInlining(CallBase & CB,Function::iterator FirstNewBlock,ValueToValueMapTy & VMap,InlineFunctionInfo & IFI)1271 static void UpdateCallGraphAfterInlining(CallBase &CB,
1272 Function::iterator FirstNewBlock,
1273 ValueToValueMapTy &VMap,
1274 InlineFunctionInfo &IFI) {
1275 CallGraph &CG = *IFI.CG;
1276 const Function *Caller = CB.getCaller();
1277 const Function *Callee = CB.getCalledFunction();
1278 CallGraphNode *CalleeNode = CG[Callee];
1279 CallGraphNode *CallerNode = CG[Caller];
1280
1281 // Since we inlined some uninlined call sites in the callee into the caller,
1282 // add edges from the caller to all of the callees of the callee.
1283 CallGraphNode::iterator I = CalleeNode->begin(), E = CalleeNode->end();
1284
1285 // Consider the case where CalleeNode == CallerNode.
1286 CallGraphNode::CalledFunctionsVector CallCache;
1287 if (CalleeNode == CallerNode) {
1288 CallCache.assign(I, E);
1289 I = CallCache.begin();
1290 E = CallCache.end();
1291 }
1292
1293 for (; I != E; ++I) {
1294 // Skip 'refererence' call records.
1295 if (!I->first)
1296 continue;
1297
1298 const Value *OrigCall = *I->first;
1299
1300 ValueToValueMapTy::iterator VMI = VMap.find(OrigCall);
1301 // Only copy the edge if the call was inlined!
1302 if (VMI == VMap.end() || VMI->second == nullptr)
1303 continue;
1304
1305 // If the call was inlined, but then constant folded, there is no edge to
1306 // add. Check for this case.
1307 auto *NewCall = dyn_cast<CallBase>(VMI->second);
1308 if (!NewCall)
1309 continue;
1310
1311 // We do not treat intrinsic calls like real function calls because we
1312 // expect them to become inline code; do not add an edge for an intrinsic.
1313 if (NewCall->getCalledFunction() &&
1314 NewCall->getCalledFunction()->isIntrinsic())
1315 continue;
1316
1317 // Remember that this call site got inlined for the client of
1318 // InlineFunction.
1319 IFI.InlinedCalls.push_back(NewCall);
1320
1321 // It's possible that inlining the callsite will cause it to go from an
1322 // indirect to a direct call by resolving a function pointer. If this
1323 // happens, set the callee of the new call site to a more precise
1324 // destination. This can also happen if the call graph node of the caller
1325 // was just unnecessarily imprecise.
1326 if (!I->second->getFunction())
1327 if (Function *F = NewCall->getCalledFunction()) {
1328 // Indirect call site resolved to direct call.
1329 CallerNode->addCalledFunction(NewCall, CG[F]);
1330
1331 continue;
1332 }
1333
1334 CallerNode->addCalledFunction(NewCall, I->second);
1335 }
1336
1337 // Update the call graph by deleting the edge from Callee to Caller. We must
1338 // do this after the loop above in case Caller and Callee are the same.
1339 CallerNode->removeCallEdgeFor(*cast<CallBase>(&CB));
1340 }
1341
HandleByValArgumentInit(Value * Dst,Value * Src,Module * M,BasicBlock * InsertBlock,InlineFunctionInfo & IFI)1342 static void HandleByValArgumentInit(Value *Dst, Value *Src, Module *M,
1343 BasicBlock *InsertBlock,
1344 InlineFunctionInfo &IFI) {
1345 Type *AggTy = cast<PointerType>(Src->getType())->getElementType();
1346 IRBuilder<> Builder(InsertBlock, InsertBlock->begin());
1347
1348 Value *Size = Builder.getInt64(M->getDataLayout().getTypeStoreSize(AggTy));
1349
1350 // Always generate a memcpy of alignment 1 here because we don't know
1351 // the alignment of the src pointer. Other optimizations can infer
1352 // better alignment.
1353 Builder.CreateMemCpy(Dst, /*DstAlign*/ Align(1), Src,
1354 /*SrcAlign*/ Align(1), Size);
1355 }
1356
1357 /// When inlining a call site that has a byval argument,
1358 /// we have to make the implicit memcpy explicit by adding it.
HandleByValArgument(Value * Arg,Instruction * TheCall,const Function * CalledFunc,InlineFunctionInfo & IFI,unsigned ByValAlignment)1359 static Value *HandleByValArgument(Value *Arg, Instruction *TheCall,
1360 const Function *CalledFunc,
1361 InlineFunctionInfo &IFI,
1362 unsigned ByValAlignment) {
1363 PointerType *ArgTy = cast<PointerType>(Arg->getType());
1364 Type *AggTy = ArgTy->getElementType();
1365
1366 Function *Caller = TheCall->getFunction();
1367 const DataLayout &DL = Caller->getParent()->getDataLayout();
1368
1369 // If the called function is readonly, then it could not mutate the caller's
1370 // copy of the byval'd memory. In this case, it is safe to elide the copy and
1371 // temporary.
1372 if (CalledFunc->onlyReadsMemory()) {
1373 // If the byval argument has a specified alignment that is greater than the
1374 // passed in pointer, then we either have to round up the input pointer or
1375 // give up on this transformation.
1376 if (ByValAlignment <= 1) // 0 = unspecified, 1 = no particular alignment.
1377 return Arg;
1378
1379 AssumptionCache *AC =
1380 IFI.GetAssumptionCache ? &IFI.GetAssumptionCache(*Caller) : nullptr;
1381
1382 // If the pointer is already known to be sufficiently aligned, or if we can
1383 // round it up to a larger alignment, then we don't need a temporary.
1384 if (getOrEnforceKnownAlignment(Arg, Align(ByValAlignment), DL, TheCall,
1385 AC) >= ByValAlignment)
1386 return Arg;
1387
1388 // Otherwise, we have to make a memcpy to get a safe alignment. This is bad
1389 // for code quality, but rarely happens and is required for correctness.
1390 }
1391
1392 // Create the alloca. If we have DataLayout, use nice alignment.
1393 Align Alignment(DL.getPrefTypeAlignment(AggTy));
1394
1395 // If the byval had an alignment specified, we *must* use at least that
1396 // alignment, as it is required by the byval argument (and uses of the
1397 // pointer inside the callee).
1398 Alignment = max(Alignment, MaybeAlign(ByValAlignment));
1399
1400 Value *NewAlloca =
1401 new AllocaInst(AggTy, DL.getAllocaAddrSpace(), nullptr, Alignment,
1402 Arg->getName(), &*Caller->begin()->begin());
1403 IFI.StaticAllocas.push_back(cast<AllocaInst>(NewAlloca));
1404
1405 // Uses of the argument in the function should use our new alloca
1406 // instead.
1407 return NewAlloca;
1408 }
1409
1410 // Check whether this Value is used by a lifetime intrinsic.
isUsedByLifetimeMarker(Value * V)1411 static bool isUsedByLifetimeMarker(Value *V) {
1412 for (User *U : V->users())
1413 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U))
1414 if (II->isLifetimeStartOrEnd())
1415 return true;
1416 return false;
1417 }
1418
1419 // Check whether the given alloca already has
1420 // lifetime.start or lifetime.end intrinsics.
hasLifetimeMarkers(AllocaInst * AI)1421 static bool hasLifetimeMarkers(AllocaInst *AI) {
1422 Type *Ty = AI->getType();
1423 Type *Int8PtrTy = Type::getInt8PtrTy(Ty->getContext(),
1424 Ty->getPointerAddressSpace());
1425 if (Ty == Int8PtrTy)
1426 return isUsedByLifetimeMarker(AI);
1427
1428 // Do a scan to find all the casts to i8*.
1429 for (User *U : AI->users()) {
1430 if (U->getType() != Int8PtrTy) continue;
1431 if (U->stripPointerCasts() != AI) continue;
1432 if (isUsedByLifetimeMarker(U))
1433 return true;
1434 }
1435 return false;
1436 }
1437
1438 /// Return the result of AI->isStaticAlloca() if AI were moved to the entry
1439 /// block. Allocas used in inalloca calls and allocas of dynamic array size
1440 /// cannot be static.
allocaWouldBeStaticInEntry(const AllocaInst * AI)1441 static bool allocaWouldBeStaticInEntry(const AllocaInst *AI ) {
1442 return isa<Constant>(AI->getArraySize()) && !AI->isUsedWithInAlloca();
1443 }
1444
1445 /// Returns a DebugLoc for a new DILocation which is a clone of \p OrigDL
1446 /// inlined at \p InlinedAt. \p IANodes is an inlined-at cache.
inlineDebugLoc(DebugLoc OrigDL,DILocation * InlinedAt,LLVMContext & Ctx,DenseMap<const MDNode *,MDNode * > & IANodes)1447 static DebugLoc inlineDebugLoc(DebugLoc OrigDL, DILocation *InlinedAt,
1448 LLVMContext &Ctx,
1449 DenseMap<const MDNode *, MDNode *> &IANodes) {
1450 auto IA = DebugLoc::appendInlinedAt(OrigDL, InlinedAt, Ctx, IANodes);
1451 return DebugLoc::get(OrigDL.getLine(), OrigDL.getCol(), OrigDL.getScope(),
1452 IA);
1453 }
1454
1455 /// Update inlined instructions' line numbers to
1456 /// to encode location where these instructions are inlined.
fixupLineNumbers(Function * Fn,Function::iterator FI,Instruction * TheCall,bool CalleeHasDebugInfo)1457 static void fixupLineNumbers(Function *Fn, Function::iterator FI,
1458 Instruction *TheCall, bool CalleeHasDebugInfo) {
1459 const DebugLoc &TheCallDL = TheCall->getDebugLoc();
1460 if (!TheCallDL)
1461 return;
1462
1463 auto &Ctx = Fn->getContext();
1464 DILocation *InlinedAtNode = TheCallDL;
1465
1466 // Create a unique call site, not to be confused with any other call from the
1467 // same location.
1468 InlinedAtNode = DILocation::getDistinct(
1469 Ctx, InlinedAtNode->getLine(), InlinedAtNode->getColumn(),
1470 InlinedAtNode->getScope(), InlinedAtNode->getInlinedAt());
1471
1472 // Cache the inlined-at nodes as they're built so they are reused, without
1473 // this every instruction's inlined-at chain would become distinct from each
1474 // other.
1475 DenseMap<const MDNode *, MDNode *> IANodes;
1476
1477 // Check if we are not generating inline line tables and want to use
1478 // the call site location instead.
1479 bool NoInlineLineTables = Fn->hasFnAttribute("no-inline-line-tables");
1480
1481 for (; FI != Fn->end(); ++FI) {
1482 for (BasicBlock::iterator BI = FI->begin(), BE = FI->end();
1483 BI != BE; ++BI) {
1484 // Loop metadata needs to be updated so that the start and end locs
1485 // reference inlined-at locations.
1486 auto updateLoopInfoLoc = [&Ctx, &InlinedAtNode, &IANodes](
1487 const DILocation &Loc) -> DILocation * {
1488 return inlineDebugLoc(&Loc, InlinedAtNode, Ctx, IANodes).get();
1489 };
1490 updateLoopMetadataDebugLocations(*BI, updateLoopInfoLoc);
1491
1492 if (!NoInlineLineTables)
1493 if (DebugLoc DL = BI->getDebugLoc()) {
1494 DebugLoc IDL =
1495 inlineDebugLoc(DL, InlinedAtNode, BI->getContext(), IANodes);
1496 BI->setDebugLoc(IDL);
1497 continue;
1498 }
1499
1500 if (CalleeHasDebugInfo && !NoInlineLineTables)
1501 continue;
1502
1503 // If the inlined instruction has no line number, or if inline info
1504 // is not being generated, make it look as if it originates from the call
1505 // location. This is important for ((__always_inline, __nodebug__))
1506 // functions which must use caller location for all instructions in their
1507 // function body.
1508
1509 // Don't update static allocas, as they may get moved later.
1510 if (auto *AI = dyn_cast<AllocaInst>(BI))
1511 if (allocaWouldBeStaticInEntry(AI))
1512 continue;
1513
1514 BI->setDebugLoc(TheCallDL);
1515 }
1516
1517 // Remove debug info intrinsics if we're not keeping inline info.
1518 if (NoInlineLineTables) {
1519 BasicBlock::iterator BI = FI->begin();
1520 while (BI != FI->end()) {
1521 if (isa<DbgInfoIntrinsic>(BI)) {
1522 BI = BI->eraseFromParent();
1523 continue;
1524 }
1525 ++BI;
1526 }
1527 }
1528
1529 }
1530 }
1531
1532 /// Update the block frequencies of the caller after a callee has been inlined.
1533 ///
1534 /// Each block cloned into the caller has its block frequency scaled by the
1535 /// ratio of CallSiteFreq/CalleeEntryFreq. This ensures that the cloned copy of
1536 /// callee's entry block gets the same frequency as the callsite block and the
1537 /// relative frequencies of all cloned blocks remain the same after cloning.
updateCallerBFI(BasicBlock * CallSiteBlock,const ValueToValueMapTy & VMap,BlockFrequencyInfo * CallerBFI,BlockFrequencyInfo * CalleeBFI,const BasicBlock & CalleeEntryBlock)1538 static void updateCallerBFI(BasicBlock *CallSiteBlock,
1539 const ValueToValueMapTy &VMap,
1540 BlockFrequencyInfo *CallerBFI,
1541 BlockFrequencyInfo *CalleeBFI,
1542 const BasicBlock &CalleeEntryBlock) {
1543 SmallPtrSet<BasicBlock *, 16> ClonedBBs;
1544 for (auto Entry : VMap) {
1545 if (!isa<BasicBlock>(Entry.first) || !Entry.second)
1546 continue;
1547 auto *OrigBB = cast<BasicBlock>(Entry.first);
1548 auto *ClonedBB = cast<BasicBlock>(Entry.second);
1549 uint64_t Freq = CalleeBFI->getBlockFreq(OrigBB).getFrequency();
1550 if (!ClonedBBs.insert(ClonedBB).second) {
1551 // Multiple blocks in the callee might get mapped to one cloned block in
1552 // the caller since we prune the callee as we clone it. When that happens,
1553 // we want to use the maximum among the original blocks' frequencies.
1554 uint64_t NewFreq = CallerBFI->getBlockFreq(ClonedBB).getFrequency();
1555 if (NewFreq > Freq)
1556 Freq = NewFreq;
1557 }
1558 CallerBFI->setBlockFreq(ClonedBB, Freq);
1559 }
1560 BasicBlock *EntryClone = cast<BasicBlock>(VMap.lookup(&CalleeEntryBlock));
1561 CallerBFI->setBlockFreqAndScale(
1562 EntryClone, CallerBFI->getBlockFreq(CallSiteBlock).getFrequency(),
1563 ClonedBBs);
1564 }
1565
1566 /// Update the branch metadata for cloned call instructions.
updateCallProfile(Function * Callee,const ValueToValueMapTy & VMap,const ProfileCount & CalleeEntryCount,const CallBase & TheCall,ProfileSummaryInfo * PSI,BlockFrequencyInfo * CallerBFI)1567 static void updateCallProfile(Function *Callee, const ValueToValueMapTy &VMap,
1568 const ProfileCount &CalleeEntryCount,
1569 const CallBase &TheCall, ProfileSummaryInfo *PSI,
1570 BlockFrequencyInfo *CallerBFI) {
1571 if (!CalleeEntryCount.hasValue() || CalleeEntryCount.isSynthetic() ||
1572 CalleeEntryCount.getCount() < 1)
1573 return;
1574 auto CallSiteCount = PSI ? PSI->getProfileCount(TheCall, CallerBFI) : None;
1575 int64_t CallCount =
1576 std::min(CallSiteCount.hasValue() ? CallSiteCount.getValue() : 0,
1577 CalleeEntryCount.getCount());
1578 updateProfileCallee(Callee, -CallCount, &VMap);
1579 }
1580
updateProfileCallee(Function * Callee,int64_t entryDelta,const ValueMap<const Value *,WeakTrackingVH> * VMap)1581 void llvm::updateProfileCallee(
1582 Function *Callee, int64_t entryDelta,
1583 const ValueMap<const Value *, WeakTrackingVH> *VMap) {
1584 auto CalleeCount = Callee->getEntryCount();
1585 if (!CalleeCount.hasValue())
1586 return;
1587
1588 uint64_t priorEntryCount = CalleeCount.getCount();
1589 uint64_t newEntryCount;
1590
1591 // Since CallSiteCount is an estimate, it could exceed the original callee
1592 // count and has to be set to 0 so guard against underflow.
1593 if (entryDelta < 0 && static_cast<uint64_t>(-entryDelta) > priorEntryCount)
1594 newEntryCount = 0;
1595 else
1596 newEntryCount = priorEntryCount + entryDelta;
1597
1598 // During inlining ?
1599 if (VMap) {
1600 uint64_t cloneEntryCount = priorEntryCount - newEntryCount;
1601 for (auto Entry : *VMap)
1602 if (isa<CallInst>(Entry.first))
1603 if (auto *CI = dyn_cast_or_null<CallInst>(Entry.second))
1604 CI->updateProfWeight(cloneEntryCount, priorEntryCount);
1605 }
1606
1607 if (entryDelta) {
1608 Callee->setEntryCount(newEntryCount);
1609
1610 for (BasicBlock &BB : *Callee)
1611 // No need to update the callsite if it is pruned during inlining.
1612 if (!VMap || VMap->count(&BB))
1613 for (Instruction &I : BB)
1614 if (CallInst *CI = dyn_cast<CallInst>(&I))
1615 CI->updateProfWeight(newEntryCount, priorEntryCount);
1616 }
1617 }
1618
1619 /// This function inlines the called function into the basic block of the
1620 /// caller. This returns false if it is not possible to inline this call.
1621 /// The program is still in a well defined state if this occurs though.
1622 ///
1623 /// Note that this only does one level of inlining. For example, if the
1624 /// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
1625 /// exists in the instruction stream. Similarly this will inline a recursive
1626 /// function by one level.
InlineFunction(CallBase & CB,InlineFunctionInfo & IFI,AAResults * CalleeAAR,bool InsertLifetime,Function * ForwardVarArgsTo)1627 llvm::InlineResult llvm::InlineFunction(CallBase &CB, InlineFunctionInfo &IFI,
1628 AAResults *CalleeAAR,
1629 bool InsertLifetime,
1630 Function *ForwardVarArgsTo) {
1631 assert(CB.getParent() && CB.getFunction() && "Instruction not in function!");
1632
1633 // FIXME: we don't inline callbr yet.
1634 if (isa<CallBrInst>(CB))
1635 return InlineResult::failure("We don't inline callbr yet.");
1636
1637 // If IFI has any state in it, zap it before we fill it in.
1638 IFI.reset();
1639
1640 Function *CalledFunc = CB.getCalledFunction();
1641 if (!CalledFunc || // Can't inline external function or indirect
1642 CalledFunc->isDeclaration()) // call!
1643 return InlineResult::failure("external or indirect");
1644
1645 // The inliner does not know how to inline through calls with operand bundles
1646 // in general ...
1647 if (CB.hasOperandBundles()) {
1648 for (int i = 0, e = CB.getNumOperandBundles(); i != e; ++i) {
1649 uint32_t Tag = CB.getOperandBundleAt(i).getTagID();
1650 // ... but it knows how to inline through "deopt" operand bundles ...
1651 if (Tag == LLVMContext::OB_deopt)
1652 continue;
1653 // ... and "funclet" operand bundles.
1654 if (Tag == LLVMContext::OB_funclet)
1655 continue;
1656
1657 return InlineResult::failure("unsupported operand bundle");
1658 }
1659 }
1660
1661 // If the call to the callee cannot throw, set the 'nounwind' flag on any
1662 // calls that we inline.
1663 bool MarkNoUnwind = CB.doesNotThrow();
1664
1665 BasicBlock *OrigBB = CB.getParent();
1666 Function *Caller = OrigBB->getParent();
1667
1668 // GC poses two hazards to inlining, which only occur when the callee has GC:
1669 // 1. If the caller has no GC, then the callee's GC must be propagated to the
1670 // caller.
1671 // 2. If the caller has a differing GC, it is invalid to inline.
1672 if (CalledFunc->hasGC()) {
1673 if (!Caller->hasGC())
1674 Caller->setGC(CalledFunc->getGC());
1675 else if (CalledFunc->getGC() != Caller->getGC())
1676 return InlineResult::failure("incompatible GC");
1677 }
1678
1679 // Get the personality function from the callee if it contains a landing pad.
1680 Constant *CalledPersonality =
1681 CalledFunc->hasPersonalityFn()
1682 ? CalledFunc->getPersonalityFn()->stripPointerCasts()
1683 : nullptr;
1684
1685 // Find the personality function used by the landing pads of the caller. If it
1686 // exists, then check to see that it matches the personality function used in
1687 // the callee.
1688 Constant *CallerPersonality =
1689 Caller->hasPersonalityFn()
1690 ? Caller->getPersonalityFn()->stripPointerCasts()
1691 : nullptr;
1692 if (CalledPersonality) {
1693 if (!CallerPersonality)
1694 Caller->setPersonalityFn(CalledPersonality);
1695 // If the personality functions match, then we can perform the
1696 // inlining. Otherwise, we can't inline.
1697 // TODO: This isn't 100% true. Some personality functions are proper
1698 // supersets of others and can be used in place of the other.
1699 else if (CalledPersonality != CallerPersonality)
1700 return InlineResult::failure("incompatible personality");
1701 }
1702
1703 // We need to figure out which funclet the callsite was in so that we may
1704 // properly nest the callee.
1705 Instruction *CallSiteEHPad = nullptr;
1706 if (CallerPersonality) {
1707 EHPersonality Personality = classifyEHPersonality(CallerPersonality);
1708 if (isScopedEHPersonality(Personality)) {
1709 Optional<OperandBundleUse> ParentFunclet =
1710 CB.getOperandBundle(LLVMContext::OB_funclet);
1711 if (ParentFunclet)
1712 CallSiteEHPad = cast<FuncletPadInst>(ParentFunclet->Inputs.front());
1713
1714 // OK, the inlining site is legal. What about the target function?
1715
1716 if (CallSiteEHPad) {
1717 if (Personality == EHPersonality::MSVC_CXX) {
1718 // The MSVC personality cannot tolerate catches getting inlined into
1719 // cleanup funclets.
1720 if (isa<CleanupPadInst>(CallSiteEHPad)) {
1721 // Ok, the call site is within a cleanuppad. Let's check the callee
1722 // for catchpads.
1723 for (const BasicBlock &CalledBB : *CalledFunc) {
1724 if (isa<CatchSwitchInst>(CalledBB.getFirstNonPHI()))
1725 return InlineResult::failure("catch in cleanup funclet");
1726 }
1727 }
1728 } else if (isAsynchronousEHPersonality(Personality)) {
1729 // SEH is even less tolerant, there may not be any sort of exceptional
1730 // funclet in the callee.
1731 for (const BasicBlock &CalledBB : *CalledFunc) {
1732 if (CalledBB.isEHPad())
1733 return InlineResult::failure("SEH in cleanup funclet");
1734 }
1735 }
1736 }
1737 }
1738 }
1739
1740 // Determine if we are dealing with a call in an EHPad which does not unwind
1741 // to caller.
1742 bool EHPadForCallUnwindsLocally = false;
1743 if (CallSiteEHPad && isa<CallInst>(CB)) {
1744 UnwindDestMemoTy FuncletUnwindMap;
1745 Value *CallSiteUnwindDestToken =
1746 getUnwindDestToken(CallSiteEHPad, FuncletUnwindMap);
1747
1748 EHPadForCallUnwindsLocally =
1749 CallSiteUnwindDestToken &&
1750 !isa<ConstantTokenNone>(CallSiteUnwindDestToken);
1751 }
1752
1753 // Get an iterator to the last basic block in the function, which will have
1754 // the new function inlined after it.
1755 Function::iterator LastBlock = --Caller->end();
1756
1757 // Make sure to capture all of the return instructions from the cloned
1758 // function.
1759 SmallVector<ReturnInst*, 8> Returns;
1760 ClonedCodeInfo InlinedFunctionInfo;
1761 Function::iterator FirstNewBlock;
1762
1763 { // Scope to destroy VMap after cloning.
1764 ValueToValueMapTy VMap;
1765 // Keep a list of pair (dst, src) to emit byval initializations.
1766 SmallVector<std::pair<Value*, Value*>, 4> ByValInit;
1767
1768 auto &DL = Caller->getParent()->getDataLayout();
1769
1770 // Calculate the vector of arguments to pass into the function cloner, which
1771 // matches up the formal to the actual argument values.
1772 auto AI = CB.arg_begin();
1773 unsigned ArgNo = 0;
1774 for (Function::arg_iterator I = CalledFunc->arg_begin(),
1775 E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) {
1776 Value *ActualArg = *AI;
1777
1778 // When byval arguments actually inlined, we need to make the copy implied
1779 // by them explicit. However, we don't do this if the callee is readonly
1780 // or readnone, because the copy would be unneeded: the callee doesn't
1781 // modify the struct.
1782 if (CB.isByValArgument(ArgNo)) {
1783 ActualArg = HandleByValArgument(ActualArg, &CB, CalledFunc, IFI,
1784 CalledFunc->getParamAlignment(ArgNo));
1785 if (ActualArg != *AI)
1786 ByValInit.push_back(std::make_pair(ActualArg, (Value*) *AI));
1787 }
1788
1789 VMap[&*I] = ActualArg;
1790 }
1791
1792 // TODO: Remove this when users have been updated to the assume bundles.
1793 // Add alignment assumptions if necessary. We do this before the inlined
1794 // instructions are actually cloned into the caller so that we can easily
1795 // check what will be known at the start of the inlined code.
1796 AddAlignmentAssumptions(CB, IFI);
1797
1798 AssumptionCache *AC =
1799 IFI.GetAssumptionCache ? &IFI.GetAssumptionCache(*Caller) : nullptr;
1800
1801 /// Preserve all attributes on of the call and its parameters.
1802 salvageKnowledge(&CB, AC);
1803
1804 // We want the inliner to prune the code as it copies. We would LOVE to
1805 // have no dead or constant instructions leftover after inlining occurs
1806 // (which can happen, e.g., because an argument was constant), but we'll be
1807 // happy with whatever the cloner can do.
1808 CloneAndPruneFunctionInto(Caller, CalledFunc, VMap,
1809 /*ModuleLevelChanges=*/false, Returns, ".i",
1810 &InlinedFunctionInfo, &CB);
1811 // Remember the first block that is newly cloned over.
1812 FirstNewBlock = LastBlock; ++FirstNewBlock;
1813
1814 if (IFI.CallerBFI != nullptr && IFI.CalleeBFI != nullptr)
1815 // Update the BFI of blocks cloned into the caller.
1816 updateCallerBFI(OrigBB, VMap, IFI.CallerBFI, IFI.CalleeBFI,
1817 CalledFunc->front());
1818
1819 updateCallProfile(CalledFunc, VMap, CalledFunc->getEntryCount(), CB,
1820 IFI.PSI, IFI.CallerBFI);
1821
1822 // Inject byval arguments initialization.
1823 for (std::pair<Value*, Value*> &Init : ByValInit)
1824 HandleByValArgumentInit(Init.first, Init.second, Caller->getParent(),
1825 &*FirstNewBlock, IFI);
1826
1827 Optional<OperandBundleUse> ParentDeopt =
1828 CB.getOperandBundle(LLVMContext::OB_deopt);
1829 if (ParentDeopt) {
1830 SmallVector<OperandBundleDef, 2> OpDefs;
1831
1832 for (auto &VH : InlinedFunctionInfo.OperandBundleCallSites) {
1833 CallBase *ICS = dyn_cast_or_null<CallBase>(VH);
1834 if (!ICS)
1835 continue; // instruction was DCE'd or RAUW'ed to undef
1836
1837 OpDefs.clear();
1838
1839 OpDefs.reserve(ICS->getNumOperandBundles());
1840
1841 for (unsigned COBi = 0, COBe = ICS->getNumOperandBundles(); COBi < COBe;
1842 ++COBi) {
1843 auto ChildOB = ICS->getOperandBundleAt(COBi);
1844 if (ChildOB.getTagID() != LLVMContext::OB_deopt) {
1845 // If the inlined call has other operand bundles, let them be
1846 OpDefs.emplace_back(ChildOB);
1847 continue;
1848 }
1849
1850 // It may be useful to separate this logic (of handling operand
1851 // bundles) out to a separate "policy" component if this gets crowded.
1852 // Prepend the parent's deoptimization continuation to the newly
1853 // inlined call's deoptimization continuation.
1854 std::vector<Value *> MergedDeoptArgs;
1855 MergedDeoptArgs.reserve(ParentDeopt->Inputs.size() +
1856 ChildOB.Inputs.size());
1857
1858 MergedDeoptArgs.insert(MergedDeoptArgs.end(),
1859 ParentDeopt->Inputs.begin(),
1860 ParentDeopt->Inputs.end());
1861 MergedDeoptArgs.insert(MergedDeoptArgs.end(), ChildOB.Inputs.begin(),
1862 ChildOB.Inputs.end());
1863
1864 OpDefs.emplace_back("deopt", std::move(MergedDeoptArgs));
1865 }
1866
1867 Instruction *NewI = CallBase::Create(ICS, OpDefs, ICS);
1868
1869 // Note: the RAUW does the appropriate fixup in VMap, so we need to do
1870 // this even if the call returns void.
1871 ICS->replaceAllUsesWith(NewI);
1872
1873 VH = nullptr;
1874 ICS->eraseFromParent();
1875 }
1876 }
1877
1878 // Update the callgraph if requested.
1879 if (IFI.CG)
1880 UpdateCallGraphAfterInlining(CB, FirstNewBlock, VMap, IFI);
1881
1882 // For 'nodebug' functions, the associated DISubprogram is always null.
1883 // Conservatively avoid propagating the callsite debug location to
1884 // instructions inlined from a function whose DISubprogram is not null.
1885 fixupLineNumbers(Caller, FirstNewBlock, &CB,
1886 CalledFunc->getSubprogram() != nullptr);
1887
1888 // Clone existing noalias metadata if necessary.
1889 CloneAliasScopeMetadata(CB, VMap);
1890
1891 // Add noalias metadata if necessary.
1892 AddAliasScopeMetadata(CB, VMap, DL, CalleeAAR);
1893
1894 // Clone return attributes on the callsite into the calls within the inlined
1895 // function which feed into its return value.
1896 AddReturnAttributes(CB, VMap);
1897
1898 // Propagate llvm.mem.parallel_loop_access if necessary.
1899 PropagateParallelLoopAccessMetadata(CB, VMap);
1900
1901 // Register any cloned assumptions.
1902 if (IFI.GetAssumptionCache)
1903 for (BasicBlock &NewBlock :
1904 make_range(FirstNewBlock->getIterator(), Caller->end()))
1905 for (Instruction &I : NewBlock)
1906 if (auto *II = dyn_cast<IntrinsicInst>(&I))
1907 if (II->getIntrinsicID() == Intrinsic::assume)
1908 IFI.GetAssumptionCache(*Caller).registerAssumption(II);
1909 }
1910
1911 // If there are any alloca instructions in the block that used to be the entry
1912 // block for the callee, move them to the entry block of the caller. First
1913 // calculate which instruction they should be inserted before. We insert the
1914 // instructions at the end of the current alloca list.
1915 {
1916 BasicBlock::iterator InsertPoint = Caller->begin()->begin();
1917 for (BasicBlock::iterator I = FirstNewBlock->begin(),
1918 E = FirstNewBlock->end(); I != E; ) {
1919 AllocaInst *AI = dyn_cast<AllocaInst>(I++);
1920 if (!AI) continue;
1921
1922 // If the alloca is now dead, remove it. This often occurs due to code
1923 // specialization.
1924 if (AI->use_empty()) {
1925 AI->eraseFromParent();
1926 continue;
1927 }
1928
1929 if (!allocaWouldBeStaticInEntry(AI))
1930 continue;
1931
1932 // Keep track of the static allocas that we inline into the caller.
1933 IFI.StaticAllocas.push_back(AI);
1934
1935 // Scan for the block of allocas that we can move over, and move them
1936 // all at once.
1937 while (isa<AllocaInst>(I) &&
1938 !cast<AllocaInst>(I)->use_empty() &&
1939 allocaWouldBeStaticInEntry(cast<AllocaInst>(I))) {
1940 IFI.StaticAllocas.push_back(cast<AllocaInst>(I));
1941 ++I;
1942 }
1943
1944 // Transfer all of the allocas over in a block. Using splice means
1945 // that the instructions aren't removed from the symbol table, then
1946 // reinserted.
1947 Caller->getEntryBlock().getInstList().splice(
1948 InsertPoint, FirstNewBlock->getInstList(), AI->getIterator(), I);
1949 }
1950 }
1951
1952 SmallVector<Value*,4> VarArgsToForward;
1953 SmallVector<AttributeSet, 4> VarArgsAttrs;
1954 for (unsigned i = CalledFunc->getFunctionType()->getNumParams();
1955 i < CB.getNumArgOperands(); i++) {
1956 VarArgsToForward.push_back(CB.getArgOperand(i));
1957 VarArgsAttrs.push_back(CB.getAttributes().getParamAttributes(i));
1958 }
1959
1960 bool InlinedMustTailCalls = false, InlinedDeoptimizeCalls = false;
1961 if (InlinedFunctionInfo.ContainsCalls) {
1962 CallInst::TailCallKind CallSiteTailKind = CallInst::TCK_None;
1963 if (CallInst *CI = dyn_cast<CallInst>(&CB))
1964 CallSiteTailKind = CI->getTailCallKind();
1965
1966 // For inlining purposes, the "notail" marker is the same as no marker.
1967 if (CallSiteTailKind == CallInst::TCK_NoTail)
1968 CallSiteTailKind = CallInst::TCK_None;
1969
1970 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E;
1971 ++BB) {
1972 for (auto II = BB->begin(); II != BB->end();) {
1973 Instruction &I = *II++;
1974 CallInst *CI = dyn_cast<CallInst>(&I);
1975 if (!CI)
1976 continue;
1977
1978 // Forward varargs from inlined call site to calls to the
1979 // ForwardVarArgsTo function, if requested, and to musttail calls.
1980 if (!VarArgsToForward.empty() &&
1981 ((ForwardVarArgsTo &&
1982 CI->getCalledFunction() == ForwardVarArgsTo) ||
1983 CI->isMustTailCall())) {
1984 // Collect attributes for non-vararg parameters.
1985 AttributeList Attrs = CI->getAttributes();
1986 SmallVector<AttributeSet, 8> ArgAttrs;
1987 if (!Attrs.isEmpty() || !VarArgsAttrs.empty()) {
1988 for (unsigned ArgNo = 0;
1989 ArgNo < CI->getFunctionType()->getNumParams(); ++ArgNo)
1990 ArgAttrs.push_back(Attrs.getParamAttributes(ArgNo));
1991 }
1992
1993 // Add VarArg attributes.
1994 ArgAttrs.append(VarArgsAttrs.begin(), VarArgsAttrs.end());
1995 Attrs = AttributeList::get(CI->getContext(), Attrs.getFnAttributes(),
1996 Attrs.getRetAttributes(), ArgAttrs);
1997 // Add VarArgs to existing parameters.
1998 SmallVector<Value *, 6> Params(CI->arg_operands());
1999 Params.append(VarArgsToForward.begin(), VarArgsToForward.end());
2000 CallInst *NewCI = CallInst::Create(
2001 CI->getFunctionType(), CI->getCalledOperand(), Params, "", CI);
2002 NewCI->setDebugLoc(CI->getDebugLoc());
2003 NewCI->setAttributes(Attrs);
2004 NewCI->setCallingConv(CI->getCallingConv());
2005 CI->replaceAllUsesWith(NewCI);
2006 CI->eraseFromParent();
2007 CI = NewCI;
2008 }
2009
2010 if (Function *F = CI->getCalledFunction())
2011 InlinedDeoptimizeCalls |=
2012 F->getIntrinsicID() == Intrinsic::experimental_deoptimize;
2013
2014 // We need to reduce the strength of any inlined tail calls. For
2015 // musttail, we have to avoid introducing potential unbounded stack
2016 // growth. For example, if functions 'f' and 'g' are mutually recursive
2017 // with musttail, we can inline 'g' into 'f' so long as we preserve
2018 // musttail on the cloned call to 'f'. If either the inlined call site
2019 // or the cloned call site is *not* musttail, the program already has
2020 // one frame of stack growth, so it's safe to remove musttail. Here is
2021 // a table of example transformations:
2022 //
2023 // f -> musttail g -> musttail f ==> f -> musttail f
2024 // f -> musttail g -> tail f ==> f -> tail f
2025 // f -> g -> musttail f ==> f -> f
2026 // f -> g -> tail f ==> f -> f
2027 //
2028 // Inlined notail calls should remain notail calls.
2029 CallInst::TailCallKind ChildTCK = CI->getTailCallKind();
2030 if (ChildTCK != CallInst::TCK_NoTail)
2031 ChildTCK = std::min(CallSiteTailKind, ChildTCK);
2032 CI->setTailCallKind(ChildTCK);
2033 InlinedMustTailCalls |= CI->isMustTailCall();
2034
2035 // Calls inlined through a 'nounwind' call site should be marked
2036 // 'nounwind'.
2037 if (MarkNoUnwind)
2038 CI->setDoesNotThrow();
2039 }
2040 }
2041 }
2042
2043 // Leave lifetime markers for the static alloca's, scoping them to the
2044 // function we just inlined.
2045 if (InsertLifetime && !IFI.StaticAllocas.empty()) {
2046 IRBuilder<> builder(&FirstNewBlock->front());
2047 for (unsigned ai = 0, ae = IFI.StaticAllocas.size(); ai != ae; ++ai) {
2048 AllocaInst *AI = IFI.StaticAllocas[ai];
2049 // Don't mark swifterror allocas. They can't have bitcast uses.
2050 if (AI->isSwiftError())
2051 continue;
2052
2053 // If the alloca is already scoped to something smaller than the whole
2054 // function then there's no need to add redundant, less accurate markers.
2055 if (hasLifetimeMarkers(AI))
2056 continue;
2057
2058 // Try to determine the size of the allocation.
2059 ConstantInt *AllocaSize = nullptr;
2060 if (ConstantInt *AIArraySize =
2061 dyn_cast<ConstantInt>(AI->getArraySize())) {
2062 auto &DL = Caller->getParent()->getDataLayout();
2063 Type *AllocaType = AI->getAllocatedType();
2064 uint64_t AllocaTypeSize = DL.getTypeAllocSize(AllocaType);
2065 uint64_t AllocaArraySize = AIArraySize->getLimitedValue();
2066
2067 // Don't add markers for zero-sized allocas.
2068 if (AllocaArraySize == 0)
2069 continue;
2070
2071 // Check that array size doesn't saturate uint64_t and doesn't
2072 // overflow when it's multiplied by type size.
2073 if (AllocaArraySize != std::numeric_limits<uint64_t>::max() &&
2074 std::numeric_limits<uint64_t>::max() / AllocaArraySize >=
2075 AllocaTypeSize) {
2076 AllocaSize = ConstantInt::get(Type::getInt64Ty(AI->getContext()),
2077 AllocaArraySize * AllocaTypeSize);
2078 }
2079 }
2080
2081 builder.CreateLifetimeStart(AI, AllocaSize);
2082 for (ReturnInst *RI : Returns) {
2083 // Don't insert llvm.lifetime.end calls between a musttail or deoptimize
2084 // call and a return. The return kills all local allocas.
2085 if (InlinedMustTailCalls &&
2086 RI->getParent()->getTerminatingMustTailCall())
2087 continue;
2088 if (InlinedDeoptimizeCalls &&
2089 RI->getParent()->getTerminatingDeoptimizeCall())
2090 continue;
2091 IRBuilder<>(RI).CreateLifetimeEnd(AI, AllocaSize);
2092 }
2093 }
2094 }
2095
2096 // If the inlined code contained dynamic alloca instructions, wrap the inlined
2097 // code with llvm.stacksave/llvm.stackrestore intrinsics.
2098 if (InlinedFunctionInfo.ContainsDynamicAllocas) {
2099 Module *M = Caller->getParent();
2100 unsigned AllocaAS = M->getDataLayout().getAllocaAddrSpace();
2101 // Get the two intrinsics we care about.
2102 Function *StackSave = Intrinsic::getDeclaration(
2103 M, Intrinsic::stacksave,
2104 {Type::getInt8PtrTy(Caller->getContext(), AllocaAS)});
2105 Function *StackRestore = Intrinsic::getDeclaration(
2106 M, Intrinsic::stackrestore,
2107 {Type::getInt8PtrTy(Caller->getContext(), AllocaAS)});
2108
2109 // Insert the llvm.stacksave.
2110 CallInst *SavedPtr = IRBuilder<>(&*FirstNewBlock, FirstNewBlock->begin())
2111 .CreateCall(StackSave, {}, "savedstack");
2112
2113 // Insert a call to llvm.stackrestore before any return instructions in the
2114 // inlined function.
2115 for (ReturnInst *RI : Returns) {
2116 // Don't insert llvm.stackrestore calls between a musttail or deoptimize
2117 // call and a return. The return will restore the stack pointer.
2118 if (InlinedMustTailCalls && RI->getParent()->getTerminatingMustTailCall())
2119 continue;
2120 if (InlinedDeoptimizeCalls && RI->getParent()->getTerminatingDeoptimizeCall())
2121 continue;
2122 IRBuilder<>(RI).CreateCall(StackRestore, SavedPtr);
2123 }
2124 }
2125
2126 // If we are inlining for an invoke instruction, we must make sure to rewrite
2127 // any call instructions into invoke instructions. This is sensitive to which
2128 // funclet pads were top-level in the inlinee, so must be done before
2129 // rewriting the "parent pad" links.
2130 if (auto *II = dyn_cast<InvokeInst>(&CB)) {
2131 BasicBlock *UnwindDest = II->getUnwindDest();
2132 Instruction *FirstNonPHI = UnwindDest->getFirstNonPHI();
2133 if (isa<LandingPadInst>(FirstNonPHI)) {
2134 HandleInlinedLandingPad(II, &*FirstNewBlock, InlinedFunctionInfo);
2135 } else {
2136 HandleInlinedEHPad(II, &*FirstNewBlock, InlinedFunctionInfo);
2137 }
2138 }
2139
2140 // Update the lexical scopes of the new funclets and callsites.
2141 // Anything that had 'none' as its parent is now nested inside the callsite's
2142 // EHPad.
2143
2144 if (CallSiteEHPad) {
2145 for (Function::iterator BB = FirstNewBlock->getIterator(),
2146 E = Caller->end();
2147 BB != E; ++BB) {
2148 // Add bundle operands to any top-level call sites.
2149 SmallVector<OperandBundleDef, 1> OpBundles;
2150 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E;) {
2151 CallBase *I = dyn_cast<CallBase>(&*BBI++);
2152 if (!I)
2153 continue;
2154
2155 // Skip call sites which are nounwind intrinsics.
2156 auto *CalledFn =
2157 dyn_cast<Function>(I->getCalledOperand()->stripPointerCasts());
2158 if (CalledFn && CalledFn->isIntrinsic() && I->doesNotThrow())
2159 continue;
2160
2161 // Skip call sites which already have a "funclet" bundle.
2162 if (I->getOperandBundle(LLVMContext::OB_funclet))
2163 continue;
2164
2165 I->getOperandBundlesAsDefs(OpBundles);
2166 OpBundles.emplace_back("funclet", CallSiteEHPad);
2167
2168 Instruction *NewInst = CallBase::Create(I, OpBundles, I);
2169 NewInst->takeName(I);
2170 I->replaceAllUsesWith(NewInst);
2171 I->eraseFromParent();
2172
2173 OpBundles.clear();
2174 }
2175
2176 // It is problematic if the inlinee has a cleanupret which unwinds to
2177 // caller and we inline it into a call site which doesn't unwind but into
2178 // an EH pad that does. Such an edge must be dynamically unreachable.
2179 // As such, we replace the cleanupret with unreachable.
2180 if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(BB->getTerminator()))
2181 if (CleanupRet->unwindsToCaller() && EHPadForCallUnwindsLocally)
2182 changeToUnreachable(CleanupRet, /*UseLLVMTrap=*/false);
2183
2184 Instruction *I = BB->getFirstNonPHI();
2185 if (!I->isEHPad())
2186 continue;
2187
2188 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) {
2189 if (isa<ConstantTokenNone>(CatchSwitch->getParentPad()))
2190 CatchSwitch->setParentPad(CallSiteEHPad);
2191 } else {
2192 auto *FPI = cast<FuncletPadInst>(I);
2193 if (isa<ConstantTokenNone>(FPI->getParentPad()))
2194 FPI->setParentPad(CallSiteEHPad);
2195 }
2196 }
2197 }
2198
2199 if (InlinedDeoptimizeCalls) {
2200 // We need to at least remove the deoptimizing returns from the Return set,
2201 // so that the control flow from those returns does not get merged into the
2202 // caller (but terminate it instead). If the caller's return type does not
2203 // match the callee's return type, we also need to change the return type of
2204 // the intrinsic.
2205 if (Caller->getReturnType() == CB.getType()) {
2206 auto NewEnd = llvm::remove_if(Returns, [](ReturnInst *RI) {
2207 return RI->getParent()->getTerminatingDeoptimizeCall() != nullptr;
2208 });
2209 Returns.erase(NewEnd, Returns.end());
2210 } else {
2211 SmallVector<ReturnInst *, 8> NormalReturns;
2212 Function *NewDeoptIntrinsic = Intrinsic::getDeclaration(
2213 Caller->getParent(), Intrinsic::experimental_deoptimize,
2214 {Caller->getReturnType()});
2215
2216 for (ReturnInst *RI : Returns) {
2217 CallInst *DeoptCall = RI->getParent()->getTerminatingDeoptimizeCall();
2218 if (!DeoptCall) {
2219 NormalReturns.push_back(RI);
2220 continue;
2221 }
2222
2223 // The calling convention on the deoptimize call itself may be bogus,
2224 // since the code we're inlining may have undefined behavior (and may
2225 // never actually execute at runtime); but all
2226 // @llvm.experimental.deoptimize declarations have to have the same
2227 // calling convention in a well-formed module.
2228 auto CallingConv = DeoptCall->getCalledFunction()->getCallingConv();
2229 NewDeoptIntrinsic->setCallingConv(CallingConv);
2230 auto *CurBB = RI->getParent();
2231 RI->eraseFromParent();
2232
2233 SmallVector<Value *, 4> CallArgs(DeoptCall->arg_begin(),
2234 DeoptCall->arg_end());
2235
2236 SmallVector<OperandBundleDef, 1> OpBundles;
2237 DeoptCall->getOperandBundlesAsDefs(OpBundles);
2238 DeoptCall->eraseFromParent();
2239 assert(!OpBundles.empty() &&
2240 "Expected at least the deopt operand bundle");
2241
2242 IRBuilder<> Builder(CurBB);
2243 CallInst *NewDeoptCall =
2244 Builder.CreateCall(NewDeoptIntrinsic, CallArgs, OpBundles);
2245 NewDeoptCall->setCallingConv(CallingConv);
2246 if (NewDeoptCall->getType()->isVoidTy())
2247 Builder.CreateRetVoid();
2248 else
2249 Builder.CreateRet(NewDeoptCall);
2250 }
2251
2252 // Leave behind the normal returns so we can merge control flow.
2253 std::swap(Returns, NormalReturns);
2254 }
2255 }
2256
2257 // Handle any inlined musttail call sites. In order for a new call site to be
2258 // musttail, the source of the clone and the inlined call site must have been
2259 // musttail. Therefore it's safe to return without merging control into the
2260 // phi below.
2261 if (InlinedMustTailCalls) {
2262 // Check if we need to bitcast the result of any musttail calls.
2263 Type *NewRetTy = Caller->getReturnType();
2264 bool NeedBitCast = !CB.use_empty() && CB.getType() != NewRetTy;
2265
2266 // Handle the returns preceded by musttail calls separately.
2267 SmallVector<ReturnInst *, 8> NormalReturns;
2268 for (ReturnInst *RI : Returns) {
2269 CallInst *ReturnedMustTail =
2270 RI->getParent()->getTerminatingMustTailCall();
2271 if (!ReturnedMustTail) {
2272 NormalReturns.push_back(RI);
2273 continue;
2274 }
2275 if (!NeedBitCast)
2276 continue;
2277
2278 // Delete the old return and any preceding bitcast.
2279 BasicBlock *CurBB = RI->getParent();
2280 auto *OldCast = dyn_cast_or_null<BitCastInst>(RI->getReturnValue());
2281 RI->eraseFromParent();
2282 if (OldCast)
2283 OldCast->eraseFromParent();
2284
2285 // Insert a new bitcast and return with the right type.
2286 IRBuilder<> Builder(CurBB);
2287 Builder.CreateRet(Builder.CreateBitCast(ReturnedMustTail, NewRetTy));
2288 }
2289
2290 // Leave behind the normal returns so we can merge control flow.
2291 std::swap(Returns, NormalReturns);
2292 }
2293
2294 // Now that all of the transforms on the inlined code have taken place but
2295 // before we splice the inlined code into the CFG and lose track of which
2296 // blocks were actually inlined, collect the call sites. We only do this if
2297 // call graph updates weren't requested, as those provide value handle based
2298 // tracking of inlined call sites instead.
2299 if (InlinedFunctionInfo.ContainsCalls && !IFI.CG) {
2300 // Otherwise just collect the raw call sites that were inlined.
2301 for (BasicBlock &NewBB :
2302 make_range(FirstNewBlock->getIterator(), Caller->end()))
2303 for (Instruction &I : NewBB)
2304 if (auto *CB = dyn_cast<CallBase>(&I))
2305 IFI.InlinedCallSites.push_back(CB);
2306 }
2307
2308 // If we cloned in _exactly one_ basic block, and if that block ends in a
2309 // return instruction, we splice the body of the inlined callee directly into
2310 // the calling basic block.
2311 if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) {
2312 // Move all of the instructions right before the call.
2313 OrigBB->getInstList().splice(CB.getIterator(), FirstNewBlock->getInstList(),
2314 FirstNewBlock->begin(), FirstNewBlock->end());
2315 // Remove the cloned basic block.
2316 Caller->getBasicBlockList().pop_back();
2317
2318 // If the call site was an invoke instruction, add a branch to the normal
2319 // destination.
2320 if (InvokeInst *II = dyn_cast<InvokeInst>(&CB)) {
2321 BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), &CB);
2322 NewBr->setDebugLoc(Returns[0]->getDebugLoc());
2323 }
2324
2325 // If the return instruction returned a value, replace uses of the call with
2326 // uses of the returned value.
2327 if (!CB.use_empty()) {
2328 ReturnInst *R = Returns[0];
2329 if (&CB == R->getReturnValue())
2330 CB.replaceAllUsesWith(UndefValue::get(CB.getType()));
2331 else
2332 CB.replaceAllUsesWith(R->getReturnValue());
2333 }
2334 // Since we are now done with the Call/Invoke, we can delete it.
2335 CB.eraseFromParent();
2336
2337 // Since we are now done with the return instruction, delete it also.
2338 Returns[0]->eraseFromParent();
2339
2340 // We are now done with the inlining.
2341 return InlineResult::success();
2342 }
2343
2344 // Otherwise, we have the normal case, of more than one block to inline or
2345 // multiple return sites.
2346
2347 // We want to clone the entire callee function into the hole between the
2348 // "starter" and "ender" blocks. How we accomplish this depends on whether
2349 // this is an invoke instruction or a call instruction.
2350 BasicBlock *AfterCallBB;
2351 BranchInst *CreatedBranchToNormalDest = nullptr;
2352 if (InvokeInst *II = dyn_cast<InvokeInst>(&CB)) {
2353
2354 // Add an unconditional branch to make this look like the CallInst case...
2355 CreatedBranchToNormalDest = BranchInst::Create(II->getNormalDest(), &CB);
2356
2357 // Split the basic block. This guarantees that no PHI nodes will have to be
2358 // updated due to new incoming edges, and make the invoke case more
2359 // symmetric to the call case.
2360 AfterCallBB =
2361 OrigBB->splitBasicBlock(CreatedBranchToNormalDest->getIterator(),
2362 CalledFunc->getName() + ".exit");
2363
2364 } else { // It's a call
2365 // If this is a call instruction, we need to split the basic block that
2366 // the call lives in.
2367 //
2368 AfterCallBB = OrigBB->splitBasicBlock(CB.getIterator(),
2369 CalledFunc->getName() + ".exit");
2370 }
2371
2372 if (IFI.CallerBFI) {
2373 // Copy original BB's block frequency to AfterCallBB
2374 IFI.CallerBFI->setBlockFreq(
2375 AfterCallBB, IFI.CallerBFI->getBlockFreq(OrigBB).getFrequency());
2376 }
2377
2378 // Change the branch that used to go to AfterCallBB to branch to the first
2379 // basic block of the inlined function.
2380 //
2381 Instruction *Br = OrigBB->getTerminator();
2382 assert(Br && Br->getOpcode() == Instruction::Br &&
2383 "splitBasicBlock broken!");
2384 Br->setOperand(0, &*FirstNewBlock);
2385
2386 // Now that the function is correct, make it a little bit nicer. In
2387 // particular, move the basic blocks inserted from the end of the function
2388 // into the space made by splitting the source basic block.
2389 Caller->getBasicBlockList().splice(AfterCallBB->getIterator(),
2390 Caller->getBasicBlockList(), FirstNewBlock,
2391 Caller->end());
2392
2393 // Handle all of the return instructions that we just cloned in, and eliminate
2394 // any users of the original call/invoke instruction.
2395 Type *RTy = CalledFunc->getReturnType();
2396
2397 PHINode *PHI = nullptr;
2398 if (Returns.size() > 1) {
2399 // The PHI node should go at the front of the new basic block to merge all
2400 // possible incoming values.
2401 if (!CB.use_empty()) {
2402 PHI = PHINode::Create(RTy, Returns.size(), CB.getName(),
2403 &AfterCallBB->front());
2404 // Anything that used the result of the function call should now use the
2405 // PHI node as their operand.
2406 CB.replaceAllUsesWith(PHI);
2407 }
2408
2409 // Loop over all of the return instructions adding entries to the PHI node
2410 // as appropriate.
2411 if (PHI) {
2412 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
2413 ReturnInst *RI = Returns[i];
2414 assert(RI->getReturnValue()->getType() == PHI->getType() &&
2415 "Ret value not consistent in function!");
2416 PHI->addIncoming(RI->getReturnValue(), RI->getParent());
2417 }
2418 }
2419
2420 // Add a branch to the merge points and remove return instructions.
2421 DebugLoc Loc;
2422 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
2423 ReturnInst *RI = Returns[i];
2424 BranchInst* BI = BranchInst::Create(AfterCallBB, RI);
2425 Loc = RI->getDebugLoc();
2426 BI->setDebugLoc(Loc);
2427 RI->eraseFromParent();
2428 }
2429 // We need to set the debug location to *somewhere* inside the
2430 // inlined function. The line number may be nonsensical, but the
2431 // instruction will at least be associated with the right
2432 // function.
2433 if (CreatedBranchToNormalDest)
2434 CreatedBranchToNormalDest->setDebugLoc(Loc);
2435 } else if (!Returns.empty()) {
2436 // Otherwise, if there is exactly one return value, just replace anything
2437 // using the return value of the call with the computed value.
2438 if (!CB.use_empty()) {
2439 if (&CB == Returns[0]->getReturnValue())
2440 CB.replaceAllUsesWith(UndefValue::get(CB.getType()));
2441 else
2442 CB.replaceAllUsesWith(Returns[0]->getReturnValue());
2443 }
2444
2445 // Update PHI nodes that use the ReturnBB to use the AfterCallBB.
2446 BasicBlock *ReturnBB = Returns[0]->getParent();
2447 ReturnBB->replaceAllUsesWith(AfterCallBB);
2448
2449 // Splice the code from the return block into the block that it will return
2450 // to, which contains the code that was after the call.
2451 AfterCallBB->getInstList().splice(AfterCallBB->begin(),
2452 ReturnBB->getInstList());
2453
2454 if (CreatedBranchToNormalDest)
2455 CreatedBranchToNormalDest->setDebugLoc(Returns[0]->getDebugLoc());
2456
2457 // Delete the return instruction now and empty ReturnBB now.
2458 Returns[0]->eraseFromParent();
2459 ReturnBB->eraseFromParent();
2460 } else if (!CB.use_empty()) {
2461 // No returns, but something is using the return value of the call. Just
2462 // nuke the result.
2463 CB.replaceAllUsesWith(UndefValue::get(CB.getType()));
2464 }
2465
2466 // Since we are now done with the Call/Invoke, we can delete it.
2467 CB.eraseFromParent();
2468
2469 // If we inlined any musttail calls and the original return is now
2470 // unreachable, delete it. It can only contain a bitcast and ret.
2471 if (InlinedMustTailCalls && pred_begin(AfterCallBB) == pred_end(AfterCallBB))
2472 AfterCallBB->eraseFromParent();
2473
2474 // We should always be able to fold the entry block of the function into the
2475 // single predecessor of the block...
2476 assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!");
2477 BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0);
2478
2479 // Splice the code entry block into calling block, right before the
2480 // unconditional branch.
2481 CalleeEntry->replaceAllUsesWith(OrigBB); // Update PHI nodes
2482 OrigBB->getInstList().splice(Br->getIterator(), CalleeEntry->getInstList());
2483
2484 // Remove the unconditional branch.
2485 OrigBB->getInstList().erase(Br);
2486
2487 // Now we can remove the CalleeEntry block, which is now empty.
2488 Caller->getBasicBlockList().erase(CalleeEntry);
2489
2490 // If we inserted a phi node, check to see if it has a single value (e.g. all
2491 // the entries are the same or undef). If so, remove the PHI so it doesn't
2492 // block other optimizations.
2493 if (PHI) {
2494 AssumptionCache *AC =
2495 IFI.GetAssumptionCache ? &IFI.GetAssumptionCache(*Caller) : nullptr;
2496 auto &DL = Caller->getParent()->getDataLayout();
2497 if (Value *V = SimplifyInstruction(PHI, {DL, nullptr, nullptr, AC})) {
2498 PHI->replaceAllUsesWith(V);
2499 PHI->eraseFromParent();
2500 }
2501 }
2502
2503 return InlineResult::success();
2504 }
2505