1 //===- InlineFunction.cpp - Code to perform function inlining -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements inlining of a function into a call site, resolving
10 // parameters and the return value as appropriate.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "llvm/ADT/DenseMap.h"
15 #include "llvm/ADT/None.h"
16 #include "llvm/ADT/Optional.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/ADT/SetVector.h"
19 #include "llvm/ADT/SmallPtrSet.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/StringExtras.h"
22 #include "llvm/ADT/iterator_range.h"
23 #include "llvm/Analysis/AliasAnalysis.h"
24 #include "llvm/Analysis/AssumptionCache.h"
25 #include "llvm/Analysis/BlockFrequencyInfo.h"
26 #include "llvm/Analysis/CallGraph.h"
27 #include "llvm/Analysis/CaptureTracking.h"
28 #include "llvm/Analysis/EHPersonalities.h"
29 #include "llvm/Analysis/InstructionSimplify.h"
30 #include "llvm/Analysis/ProfileSummaryInfo.h"
31 #include "llvm/Transforms/Utils/Local.h"
32 #include "llvm/Analysis/ValueTracking.h"
33 #include "llvm/Analysis/VectorUtils.h"
34 #include "llvm/IR/Argument.h"
35 #include "llvm/IR/BasicBlock.h"
36 #include "llvm/IR/CFG.h"
37 #include "llvm/IR/Constant.h"
38 #include "llvm/IR/Constants.h"
39 #include "llvm/IR/DIBuilder.h"
40 #include "llvm/IR/DataLayout.h"
41 #include "llvm/IR/DebugInfoMetadata.h"
42 #include "llvm/IR/DebugLoc.h"
43 #include "llvm/IR/DerivedTypes.h"
44 #include "llvm/IR/Dominators.h"
45 #include "llvm/IR/Function.h"
46 #include "llvm/IR/IRBuilder.h"
47 #include "llvm/IR/InstrTypes.h"
48 #include "llvm/IR/Instruction.h"
49 #include "llvm/IR/Instructions.h"
50 #include "llvm/IR/IntrinsicInst.h"
51 #include "llvm/IR/Intrinsics.h"
52 #include "llvm/IR/LLVMContext.h"
53 #include "llvm/IR/MDBuilder.h"
54 #include "llvm/IR/Metadata.h"
55 #include "llvm/IR/Module.h"
56 #include "llvm/IR/Type.h"
57 #include "llvm/IR/User.h"
58 #include "llvm/IR/Value.h"
59 #include "llvm/Support/Casting.h"
60 #include "llvm/Support/CommandLine.h"
61 #include "llvm/Support/ErrorHandling.h"
62 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
63 #include "llvm/Transforms/Utils/Cloning.h"
64 #include "llvm/Transforms/Utils/ValueMapper.h"
65 #include <algorithm>
66 #include <cassert>
67 #include <cstdint>
68 #include <iterator>
69 #include <limits>
70 #include <string>
71 #include <utility>
72 #include <vector>
73
74 using namespace llvm;
75 using ProfileCount = Function::ProfileCount;
76
77 static cl::opt<bool>
78 EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true),
79 cl::Hidden,
80 cl::desc("Convert noalias attributes to metadata during inlining."));
81
82 // Disabled by default, because the added alignment assumptions may increase
83 // compile-time and block optimizations. This option is not suitable for use
84 // with frontends that emit comprehensive parameter alignment annotations.
85 static cl::opt<bool>
86 PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining",
87 cl::init(false), cl::Hidden,
88 cl::desc("Convert align attributes to assumptions during inlining."));
89
90 static cl::opt<bool> UpdateReturnAttributes(
91 "update-return-attrs", cl::init(true), cl::Hidden,
92 cl::desc("Update return attributes on calls within inlined body"));
93
94 static cl::opt<unsigned> InlinerAttributeWindow(
95 "max-inst-checked-for-throw-during-inlining", cl::Hidden,
96 cl::desc("the maximum number of instructions analyzed for may throw during "
97 "attribute inference in inlined body"),
98 cl::init(4));
99
100 namespace {
101
102 /// A class for recording information about inlining a landing pad.
103 class LandingPadInliningInfo {
104 /// Destination of the invoke's unwind.
105 BasicBlock *OuterResumeDest;
106
107 /// Destination for the callee's resume.
108 BasicBlock *InnerResumeDest = nullptr;
109
110 /// LandingPadInst associated with the invoke.
111 LandingPadInst *CallerLPad = nullptr;
112
113 /// PHI for EH values from landingpad insts.
114 PHINode *InnerEHValuesPHI = nullptr;
115
116 SmallVector<Value*, 8> UnwindDestPHIValues;
117
118 public:
LandingPadInliningInfo(InvokeInst * II)119 LandingPadInliningInfo(InvokeInst *II)
120 : OuterResumeDest(II->getUnwindDest()) {
121 // If there are PHI nodes in the unwind destination block, we need to keep
122 // track of which values came into them from the invoke before removing
123 // the edge from this block.
124 BasicBlock *InvokeBB = II->getParent();
125 BasicBlock::iterator I = OuterResumeDest->begin();
126 for (; isa<PHINode>(I); ++I) {
127 // Save the value to use for this edge.
128 PHINode *PHI = cast<PHINode>(I);
129 UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
130 }
131
132 CallerLPad = cast<LandingPadInst>(I);
133 }
134
135 /// The outer unwind destination is the target of
136 /// unwind edges introduced for calls within the inlined function.
getOuterResumeDest() const137 BasicBlock *getOuterResumeDest() const {
138 return OuterResumeDest;
139 }
140
141 BasicBlock *getInnerResumeDest();
142
getLandingPadInst() const143 LandingPadInst *getLandingPadInst() const { return CallerLPad; }
144
145 /// Forward the 'resume' instruction to the caller's landing pad block.
146 /// When the landing pad block has only one predecessor, this is
147 /// a simple branch. When there is more than one predecessor, we need to
148 /// split the landing pad block after the landingpad instruction and jump
149 /// to there.
150 void forwardResume(ResumeInst *RI,
151 SmallPtrSetImpl<LandingPadInst*> &InlinedLPads);
152
153 /// Add incoming-PHI values to the unwind destination block for the given
154 /// basic block, using the values for the original invoke's source block.
addIncomingPHIValuesFor(BasicBlock * BB) const155 void addIncomingPHIValuesFor(BasicBlock *BB) const {
156 addIncomingPHIValuesForInto(BB, OuterResumeDest);
157 }
158
addIncomingPHIValuesForInto(BasicBlock * src,BasicBlock * dest) const159 void addIncomingPHIValuesForInto(BasicBlock *src, BasicBlock *dest) const {
160 BasicBlock::iterator I = dest->begin();
161 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
162 PHINode *phi = cast<PHINode>(I);
163 phi->addIncoming(UnwindDestPHIValues[i], src);
164 }
165 }
166 };
167
168 } // end anonymous namespace
169
170 /// Get or create a target for the branch from ResumeInsts.
getInnerResumeDest()171 BasicBlock *LandingPadInliningInfo::getInnerResumeDest() {
172 if (InnerResumeDest) return InnerResumeDest;
173
174 // Split the landing pad.
175 BasicBlock::iterator SplitPoint = ++CallerLPad->getIterator();
176 InnerResumeDest =
177 OuterResumeDest->splitBasicBlock(SplitPoint,
178 OuterResumeDest->getName() + ".body");
179
180 // The number of incoming edges we expect to the inner landing pad.
181 const unsigned PHICapacity = 2;
182
183 // Create corresponding new PHIs for all the PHIs in the outer landing pad.
184 Instruction *InsertPoint = &InnerResumeDest->front();
185 BasicBlock::iterator I = OuterResumeDest->begin();
186 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
187 PHINode *OuterPHI = cast<PHINode>(I);
188 PHINode *InnerPHI = PHINode::Create(OuterPHI->getType(), PHICapacity,
189 OuterPHI->getName() + ".lpad-body",
190 InsertPoint);
191 OuterPHI->replaceAllUsesWith(InnerPHI);
192 InnerPHI->addIncoming(OuterPHI, OuterResumeDest);
193 }
194
195 // Create a PHI for the exception values.
196 InnerEHValuesPHI = PHINode::Create(CallerLPad->getType(), PHICapacity,
197 "eh.lpad-body", InsertPoint);
198 CallerLPad->replaceAllUsesWith(InnerEHValuesPHI);
199 InnerEHValuesPHI->addIncoming(CallerLPad, OuterResumeDest);
200
201 // All done.
202 return InnerResumeDest;
203 }
204
205 /// Forward the 'resume' instruction to the caller's landing pad block.
206 /// When the landing pad block has only one predecessor, this is a simple
207 /// branch. When there is more than one predecessor, we need to split the
208 /// landing pad block after the landingpad instruction and jump to there.
forwardResume(ResumeInst * RI,SmallPtrSetImpl<LandingPadInst * > & InlinedLPads)209 void LandingPadInliningInfo::forwardResume(
210 ResumeInst *RI, SmallPtrSetImpl<LandingPadInst *> &InlinedLPads) {
211 BasicBlock *Dest = getInnerResumeDest();
212 BasicBlock *Src = RI->getParent();
213
214 BranchInst::Create(Dest, Src);
215
216 // Update the PHIs in the destination. They were inserted in an order which
217 // makes this work.
218 addIncomingPHIValuesForInto(Src, Dest);
219
220 InnerEHValuesPHI->addIncoming(RI->getOperand(0), Src);
221 RI->eraseFromParent();
222 }
223
224 /// Helper for getUnwindDestToken/getUnwindDestTokenHelper.
getParentPad(Value * EHPad)225 static Value *getParentPad(Value *EHPad) {
226 if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
227 return FPI->getParentPad();
228 return cast<CatchSwitchInst>(EHPad)->getParentPad();
229 }
230
231 using UnwindDestMemoTy = DenseMap<Instruction *, Value *>;
232
233 /// Helper for getUnwindDestToken that does the descendant-ward part of
234 /// the search.
getUnwindDestTokenHelper(Instruction * EHPad,UnwindDestMemoTy & MemoMap)235 static Value *getUnwindDestTokenHelper(Instruction *EHPad,
236 UnwindDestMemoTy &MemoMap) {
237 SmallVector<Instruction *, 8> Worklist(1, EHPad);
238
239 while (!Worklist.empty()) {
240 Instruction *CurrentPad = Worklist.pop_back_val();
241 // We only put pads on the worklist that aren't in the MemoMap. When
242 // we find an unwind dest for a pad we may update its ancestors, but
243 // the queue only ever contains uncles/great-uncles/etc. of CurrentPad,
244 // so they should never get updated while queued on the worklist.
245 assert(!MemoMap.count(CurrentPad));
246 Value *UnwindDestToken = nullptr;
247 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(CurrentPad)) {
248 if (CatchSwitch->hasUnwindDest()) {
249 UnwindDestToken = CatchSwitch->getUnwindDest()->getFirstNonPHI();
250 } else {
251 // Catchswitch doesn't have a 'nounwind' variant, and one might be
252 // annotated as "unwinds to caller" when really it's nounwind (see
253 // e.g. SimplifyCFGOpt::SimplifyUnreachable), so we can't infer the
254 // parent's unwind dest from this. We can check its catchpads'
255 // descendants, since they might include a cleanuppad with an
256 // "unwinds to caller" cleanupret, which can be trusted.
257 for (auto HI = CatchSwitch->handler_begin(),
258 HE = CatchSwitch->handler_end();
259 HI != HE && !UnwindDestToken; ++HI) {
260 BasicBlock *HandlerBlock = *HI;
261 auto *CatchPad = cast<CatchPadInst>(HandlerBlock->getFirstNonPHI());
262 for (User *Child : CatchPad->users()) {
263 // Intentionally ignore invokes here -- since the catchswitch is
264 // marked "unwind to caller", it would be a verifier error if it
265 // contained an invoke which unwinds out of it, so any invoke we'd
266 // encounter must unwind to some child of the catch.
267 if (!isa<CleanupPadInst>(Child) && !isa<CatchSwitchInst>(Child))
268 continue;
269
270 Instruction *ChildPad = cast<Instruction>(Child);
271 auto Memo = MemoMap.find(ChildPad);
272 if (Memo == MemoMap.end()) {
273 // Haven't figured out this child pad yet; queue it.
274 Worklist.push_back(ChildPad);
275 continue;
276 }
277 // We've already checked this child, but might have found that
278 // it offers no proof either way.
279 Value *ChildUnwindDestToken = Memo->second;
280 if (!ChildUnwindDestToken)
281 continue;
282 // We already know the child's unwind dest, which can either
283 // be ConstantTokenNone to indicate unwind to caller, or can
284 // be another child of the catchpad. Only the former indicates
285 // the unwind dest of the catchswitch.
286 if (isa<ConstantTokenNone>(ChildUnwindDestToken)) {
287 UnwindDestToken = ChildUnwindDestToken;
288 break;
289 }
290 assert(getParentPad(ChildUnwindDestToken) == CatchPad);
291 }
292 }
293 }
294 } else {
295 auto *CleanupPad = cast<CleanupPadInst>(CurrentPad);
296 for (User *U : CleanupPad->users()) {
297 if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(U)) {
298 if (BasicBlock *RetUnwindDest = CleanupRet->getUnwindDest())
299 UnwindDestToken = RetUnwindDest->getFirstNonPHI();
300 else
301 UnwindDestToken = ConstantTokenNone::get(CleanupPad->getContext());
302 break;
303 }
304 Value *ChildUnwindDestToken;
305 if (auto *Invoke = dyn_cast<InvokeInst>(U)) {
306 ChildUnwindDestToken = Invoke->getUnwindDest()->getFirstNonPHI();
307 } else if (isa<CleanupPadInst>(U) || isa<CatchSwitchInst>(U)) {
308 Instruction *ChildPad = cast<Instruction>(U);
309 auto Memo = MemoMap.find(ChildPad);
310 if (Memo == MemoMap.end()) {
311 // Haven't resolved this child yet; queue it and keep searching.
312 Worklist.push_back(ChildPad);
313 continue;
314 }
315 // We've checked this child, but still need to ignore it if it
316 // had no proof either way.
317 ChildUnwindDestToken = Memo->second;
318 if (!ChildUnwindDestToken)
319 continue;
320 } else {
321 // Not a relevant user of the cleanuppad
322 continue;
323 }
324 // In a well-formed program, the child/invoke must either unwind to
325 // an(other) child of the cleanup, or exit the cleanup. In the
326 // first case, continue searching.
327 if (isa<Instruction>(ChildUnwindDestToken) &&
328 getParentPad(ChildUnwindDestToken) == CleanupPad)
329 continue;
330 UnwindDestToken = ChildUnwindDestToken;
331 break;
332 }
333 }
334 // If we haven't found an unwind dest for CurrentPad, we may have queued its
335 // children, so move on to the next in the worklist.
336 if (!UnwindDestToken)
337 continue;
338
339 // Now we know that CurrentPad unwinds to UnwindDestToken. It also exits
340 // any ancestors of CurrentPad up to but not including UnwindDestToken's
341 // parent pad. Record this in the memo map, and check to see if the
342 // original EHPad being queried is one of the ones exited.
343 Value *UnwindParent;
344 if (auto *UnwindPad = dyn_cast<Instruction>(UnwindDestToken))
345 UnwindParent = getParentPad(UnwindPad);
346 else
347 UnwindParent = nullptr;
348 bool ExitedOriginalPad = false;
349 for (Instruction *ExitedPad = CurrentPad;
350 ExitedPad && ExitedPad != UnwindParent;
351 ExitedPad = dyn_cast<Instruction>(getParentPad(ExitedPad))) {
352 // Skip over catchpads since they just follow their catchswitches.
353 if (isa<CatchPadInst>(ExitedPad))
354 continue;
355 MemoMap[ExitedPad] = UnwindDestToken;
356 ExitedOriginalPad |= (ExitedPad == EHPad);
357 }
358
359 if (ExitedOriginalPad)
360 return UnwindDestToken;
361
362 // Continue the search.
363 }
364
365 // No definitive information is contained within this funclet.
366 return nullptr;
367 }
368
369 /// Given an EH pad, find where it unwinds. If it unwinds to an EH pad,
370 /// return that pad instruction. If it unwinds to caller, return
371 /// ConstantTokenNone. If it does not have a definitive unwind destination,
372 /// return nullptr.
373 ///
374 /// This routine gets invoked for calls in funclets in inlinees when inlining
375 /// an invoke. Since many funclets don't have calls inside them, it's queried
376 /// on-demand rather than building a map of pads to unwind dests up front.
377 /// Determining a funclet's unwind dest may require recursively searching its
378 /// descendants, and also ancestors and cousins if the descendants don't provide
379 /// an answer. Since most funclets will have their unwind dest immediately
380 /// available as the unwind dest of a catchswitch or cleanupret, this routine
381 /// searches top-down from the given pad and then up. To avoid worst-case
382 /// quadratic run-time given that approach, it uses a memo map to avoid
383 /// re-processing funclet trees. The callers that rewrite the IR as they go
384 /// take advantage of this, for correctness, by checking/forcing rewritten
385 /// pads' entries to match the original callee view.
getUnwindDestToken(Instruction * EHPad,UnwindDestMemoTy & MemoMap)386 static Value *getUnwindDestToken(Instruction *EHPad,
387 UnwindDestMemoTy &MemoMap) {
388 // Catchpads unwind to the same place as their catchswitch;
389 // redirct any queries on catchpads so the code below can
390 // deal with just catchswitches and cleanuppads.
391 if (auto *CPI = dyn_cast<CatchPadInst>(EHPad))
392 EHPad = CPI->getCatchSwitch();
393
394 // Check if we've already determined the unwind dest for this pad.
395 auto Memo = MemoMap.find(EHPad);
396 if (Memo != MemoMap.end())
397 return Memo->second;
398
399 // Search EHPad and, if necessary, its descendants.
400 Value *UnwindDestToken = getUnwindDestTokenHelper(EHPad, MemoMap);
401 assert((UnwindDestToken == nullptr) != (MemoMap.count(EHPad) != 0));
402 if (UnwindDestToken)
403 return UnwindDestToken;
404
405 // No information is available for this EHPad from itself or any of its
406 // descendants. An unwind all the way out to a pad in the caller would
407 // need also to agree with the unwind dest of the parent funclet, so
408 // search up the chain to try to find a funclet with information. Put
409 // null entries in the memo map to avoid re-processing as we go up.
410 MemoMap[EHPad] = nullptr;
411 #ifndef NDEBUG
412 SmallPtrSet<Instruction *, 4> TempMemos;
413 TempMemos.insert(EHPad);
414 #endif
415 Instruction *LastUselessPad = EHPad;
416 Value *AncestorToken;
417 for (AncestorToken = getParentPad(EHPad);
418 auto *AncestorPad = dyn_cast<Instruction>(AncestorToken);
419 AncestorToken = getParentPad(AncestorToken)) {
420 // Skip over catchpads since they just follow their catchswitches.
421 if (isa<CatchPadInst>(AncestorPad))
422 continue;
423 // If the MemoMap had an entry mapping AncestorPad to nullptr, since we
424 // haven't yet called getUnwindDestTokenHelper for AncestorPad in this
425 // call to getUnwindDestToken, that would mean that AncestorPad had no
426 // information in itself, its descendants, or its ancestors. If that
427 // were the case, then we should also have recorded the lack of information
428 // for the descendant that we're coming from. So assert that we don't
429 // find a null entry in the MemoMap for AncestorPad.
430 assert(!MemoMap.count(AncestorPad) || MemoMap[AncestorPad]);
431 auto AncestorMemo = MemoMap.find(AncestorPad);
432 if (AncestorMemo == MemoMap.end()) {
433 UnwindDestToken = getUnwindDestTokenHelper(AncestorPad, MemoMap);
434 } else {
435 UnwindDestToken = AncestorMemo->second;
436 }
437 if (UnwindDestToken)
438 break;
439 LastUselessPad = AncestorPad;
440 MemoMap[LastUselessPad] = nullptr;
441 #ifndef NDEBUG
442 TempMemos.insert(LastUselessPad);
443 #endif
444 }
445
446 // We know that getUnwindDestTokenHelper was called on LastUselessPad and
447 // returned nullptr (and likewise for EHPad and any of its ancestors up to
448 // LastUselessPad), so LastUselessPad has no information from below. Since
449 // getUnwindDestTokenHelper must investigate all downward paths through
450 // no-information nodes to prove that a node has no information like this,
451 // and since any time it finds information it records it in the MemoMap for
452 // not just the immediately-containing funclet but also any ancestors also
453 // exited, it must be the case that, walking downward from LastUselessPad,
454 // visiting just those nodes which have not been mapped to an unwind dest
455 // by getUnwindDestTokenHelper (the nullptr TempMemos notwithstanding, since
456 // they are just used to keep getUnwindDestTokenHelper from repeating work),
457 // any node visited must have been exhaustively searched with no information
458 // for it found.
459 SmallVector<Instruction *, 8> Worklist(1, LastUselessPad);
460 while (!Worklist.empty()) {
461 Instruction *UselessPad = Worklist.pop_back_val();
462 auto Memo = MemoMap.find(UselessPad);
463 if (Memo != MemoMap.end() && Memo->second) {
464 // Here the name 'UselessPad' is a bit of a misnomer, because we've found
465 // that it is a funclet that does have information about unwinding to
466 // a particular destination; its parent was a useless pad.
467 // Since its parent has no information, the unwind edge must not escape
468 // the parent, and must target a sibling of this pad. This local unwind
469 // gives us no information about EHPad. Leave it and the subtree rooted
470 // at it alone.
471 assert(getParentPad(Memo->second) == getParentPad(UselessPad));
472 continue;
473 }
474 // We know we don't have information for UselesPad. If it has an entry in
475 // the MemoMap (mapping it to nullptr), it must be one of the TempMemos
476 // added on this invocation of getUnwindDestToken; if a previous invocation
477 // recorded nullptr, it would have had to prove that the ancestors of
478 // UselessPad, which include LastUselessPad, had no information, and that
479 // in turn would have required proving that the descendants of
480 // LastUselesPad, which include EHPad, have no information about
481 // LastUselessPad, which would imply that EHPad was mapped to nullptr in
482 // the MemoMap on that invocation, which isn't the case if we got here.
483 assert(!MemoMap.count(UselessPad) || TempMemos.count(UselessPad));
484 // Assert as we enumerate users that 'UselessPad' doesn't have any unwind
485 // information that we'd be contradicting by making a map entry for it
486 // (which is something that getUnwindDestTokenHelper must have proved for
487 // us to get here). Just assert on is direct users here; the checks in
488 // this downward walk at its descendants will verify that they don't have
489 // any unwind edges that exit 'UselessPad' either (i.e. they either have no
490 // unwind edges or unwind to a sibling).
491 MemoMap[UselessPad] = UnwindDestToken;
492 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(UselessPad)) {
493 assert(CatchSwitch->getUnwindDest() == nullptr && "Expected useless pad");
494 for (BasicBlock *HandlerBlock : CatchSwitch->handlers()) {
495 auto *CatchPad = HandlerBlock->getFirstNonPHI();
496 for (User *U : CatchPad->users()) {
497 assert(
498 (!isa<InvokeInst>(U) ||
499 (getParentPad(
500 cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) ==
501 CatchPad)) &&
502 "Expected useless pad");
503 if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
504 Worklist.push_back(cast<Instruction>(U));
505 }
506 }
507 } else {
508 assert(isa<CleanupPadInst>(UselessPad));
509 for (User *U : UselessPad->users()) {
510 assert(!isa<CleanupReturnInst>(U) && "Expected useless pad");
511 assert((!isa<InvokeInst>(U) ||
512 (getParentPad(
513 cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) ==
514 UselessPad)) &&
515 "Expected useless pad");
516 if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
517 Worklist.push_back(cast<Instruction>(U));
518 }
519 }
520 }
521
522 return UnwindDestToken;
523 }
524
525 /// When we inline a basic block into an invoke,
526 /// we have to turn all of the calls that can throw into invokes.
527 /// This function analyze BB to see if there are any calls, and if so,
528 /// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI
529 /// nodes in that block with the values specified in InvokeDestPHIValues.
HandleCallsInBlockInlinedThroughInvoke(BasicBlock * BB,BasicBlock * UnwindEdge,UnwindDestMemoTy * FuncletUnwindMap=nullptr)530 static BasicBlock *HandleCallsInBlockInlinedThroughInvoke(
531 BasicBlock *BB, BasicBlock *UnwindEdge,
532 UnwindDestMemoTy *FuncletUnwindMap = nullptr) {
533 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
534 Instruction *I = &*BBI++;
535
536 // We only need to check for function calls: inlined invoke
537 // instructions require no special handling.
538 CallInst *CI = dyn_cast<CallInst>(I);
539
540 if (!CI || CI->doesNotThrow() || CI->isInlineAsm())
541 continue;
542
543 // We do not need to (and in fact, cannot) convert possibly throwing calls
544 // to @llvm.experimental_deoptimize (resp. @llvm.experimental.guard) into
545 // invokes. The caller's "segment" of the deoptimization continuation
546 // attached to the newly inlined @llvm.experimental_deoptimize
547 // (resp. @llvm.experimental.guard) call should contain the exception
548 // handling logic, if any.
549 if (auto *F = CI->getCalledFunction())
550 if (F->getIntrinsicID() == Intrinsic::experimental_deoptimize ||
551 F->getIntrinsicID() == Intrinsic::experimental_guard)
552 continue;
553
554 if (auto FuncletBundle = CI->getOperandBundle(LLVMContext::OB_funclet)) {
555 // This call is nested inside a funclet. If that funclet has an unwind
556 // destination within the inlinee, then unwinding out of this call would
557 // be UB. Rewriting this call to an invoke which targets the inlined
558 // invoke's unwind dest would give the call's parent funclet multiple
559 // unwind destinations, which is something that subsequent EH table
560 // generation can't handle and that the veirifer rejects. So when we
561 // see such a call, leave it as a call.
562 auto *FuncletPad = cast<Instruction>(FuncletBundle->Inputs[0]);
563 Value *UnwindDestToken =
564 getUnwindDestToken(FuncletPad, *FuncletUnwindMap);
565 if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
566 continue;
567 #ifndef NDEBUG
568 Instruction *MemoKey;
569 if (auto *CatchPad = dyn_cast<CatchPadInst>(FuncletPad))
570 MemoKey = CatchPad->getCatchSwitch();
571 else
572 MemoKey = FuncletPad;
573 assert(FuncletUnwindMap->count(MemoKey) &&
574 (*FuncletUnwindMap)[MemoKey] == UnwindDestToken &&
575 "must get memoized to avoid confusing later searches");
576 #endif // NDEBUG
577 }
578
579 changeToInvokeAndSplitBasicBlock(CI, UnwindEdge);
580 return BB;
581 }
582 return nullptr;
583 }
584
585 /// If we inlined an invoke site, we need to convert calls
586 /// in the body of the inlined function into invokes.
587 ///
588 /// II is the invoke instruction being inlined. FirstNewBlock is the first
589 /// block of the inlined code (the last block is the end of the function),
590 /// and InlineCodeInfo is information about the code that got inlined.
HandleInlinedLandingPad(InvokeInst * II,BasicBlock * FirstNewBlock,ClonedCodeInfo & InlinedCodeInfo)591 static void HandleInlinedLandingPad(InvokeInst *II, BasicBlock *FirstNewBlock,
592 ClonedCodeInfo &InlinedCodeInfo) {
593 BasicBlock *InvokeDest = II->getUnwindDest();
594
595 Function *Caller = FirstNewBlock->getParent();
596
597 // The inlined code is currently at the end of the function, scan from the
598 // start of the inlined code to its end, checking for stuff we need to
599 // rewrite.
600 LandingPadInliningInfo Invoke(II);
601
602 // Get all of the inlined landing pad instructions.
603 SmallPtrSet<LandingPadInst*, 16> InlinedLPads;
604 for (Function::iterator I = FirstNewBlock->getIterator(), E = Caller->end();
605 I != E; ++I)
606 if (InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator()))
607 InlinedLPads.insert(II->getLandingPadInst());
608
609 // Append the clauses from the outer landing pad instruction into the inlined
610 // landing pad instructions.
611 LandingPadInst *OuterLPad = Invoke.getLandingPadInst();
612 for (LandingPadInst *InlinedLPad : InlinedLPads) {
613 unsigned OuterNum = OuterLPad->getNumClauses();
614 InlinedLPad->reserveClauses(OuterNum);
615 for (unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx)
616 InlinedLPad->addClause(OuterLPad->getClause(OuterIdx));
617 if (OuterLPad->isCleanup())
618 InlinedLPad->setCleanup(true);
619 }
620
621 for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end();
622 BB != E; ++BB) {
623 if (InlinedCodeInfo.ContainsCalls)
624 if (BasicBlock *NewBB = HandleCallsInBlockInlinedThroughInvoke(
625 &*BB, Invoke.getOuterResumeDest()))
626 // Update any PHI nodes in the exceptional block to indicate that there
627 // is now a new entry in them.
628 Invoke.addIncomingPHIValuesFor(NewBB);
629
630 // Forward any resumes that are remaining here.
631 if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator()))
632 Invoke.forwardResume(RI, InlinedLPads);
633 }
634
635 // Now that everything is happy, we have one final detail. The PHI nodes in
636 // the exception destination block still have entries due to the original
637 // invoke instruction. Eliminate these entries (which might even delete the
638 // PHI node) now.
639 InvokeDest->removePredecessor(II->getParent());
640 }
641
642 /// If we inlined an invoke site, we need to convert calls
643 /// in the body of the inlined function into invokes.
644 ///
645 /// II is the invoke instruction being inlined. FirstNewBlock is the first
646 /// block of the inlined code (the last block is the end of the function),
647 /// and InlineCodeInfo is information about the code that got inlined.
HandleInlinedEHPad(InvokeInst * II,BasicBlock * FirstNewBlock,ClonedCodeInfo & InlinedCodeInfo)648 static void HandleInlinedEHPad(InvokeInst *II, BasicBlock *FirstNewBlock,
649 ClonedCodeInfo &InlinedCodeInfo) {
650 BasicBlock *UnwindDest = II->getUnwindDest();
651 Function *Caller = FirstNewBlock->getParent();
652
653 assert(UnwindDest->getFirstNonPHI()->isEHPad() && "unexpected BasicBlock!");
654
655 // If there are PHI nodes in the unwind destination block, we need to keep
656 // track of which values came into them from the invoke before removing the
657 // edge from this block.
658 SmallVector<Value *, 8> UnwindDestPHIValues;
659 BasicBlock *InvokeBB = II->getParent();
660 for (Instruction &I : *UnwindDest) {
661 // Save the value to use for this edge.
662 PHINode *PHI = dyn_cast<PHINode>(&I);
663 if (!PHI)
664 break;
665 UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
666 }
667
668 // Add incoming-PHI values to the unwind destination block for the given basic
669 // block, using the values for the original invoke's source block.
670 auto UpdatePHINodes = [&](BasicBlock *Src) {
671 BasicBlock::iterator I = UnwindDest->begin();
672 for (Value *V : UnwindDestPHIValues) {
673 PHINode *PHI = cast<PHINode>(I);
674 PHI->addIncoming(V, Src);
675 ++I;
676 }
677 };
678
679 // This connects all the instructions which 'unwind to caller' to the invoke
680 // destination.
681 UnwindDestMemoTy FuncletUnwindMap;
682 for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end();
683 BB != E; ++BB) {
684 if (auto *CRI = dyn_cast<CleanupReturnInst>(BB->getTerminator())) {
685 if (CRI->unwindsToCaller()) {
686 auto *CleanupPad = CRI->getCleanupPad();
687 CleanupReturnInst::Create(CleanupPad, UnwindDest, CRI);
688 CRI->eraseFromParent();
689 UpdatePHINodes(&*BB);
690 // Finding a cleanupret with an unwind destination would confuse
691 // subsequent calls to getUnwindDestToken, so map the cleanuppad
692 // to short-circuit any such calls and recognize this as an "unwind
693 // to caller" cleanup.
694 assert(!FuncletUnwindMap.count(CleanupPad) ||
695 isa<ConstantTokenNone>(FuncletUnwindMap[CleanupPad]));
696 FuncletUnwindMap[CleanupPad] =
697 ConstantTokenNone::get(Caller->getContext());
698 }
699 }
700
701 Instruction *I = BB->getFirstNonPHI();
702 if (!I->isEHPad())
703 continue;
704
705 Instruction *Replacement = nullptr;
706 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) {
707 if (CatchSwitch->unwindsToCaller()) {
708 Value *UnwindDestToken;
709 if (auto *ParentPad =
710 dyn_cast<Instruction>(CatchSwitch->getParentPad())) {
711 // This catchswitch is nested inside another funclet. If that
712 // funclet has an unwind destination within the inlinee, then
713 // unwinding out of this catchswitch would be UB. Rewriting this
714 // catchswitch to unwind to the inlined invoke's unwind dest would
715 // give the parent funclet multiple unwind destinations, which is
716 // something that subsequent EH table generation can't handle and
717 // that the veirifer rejects. So when we see such a call, leave it
718 // as "unwind to caller".
719 UnwindDestToken = getUnwindDestToken(ParentPad, FuncletUnwindMap);
720 if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
721 continue;
722 } else {
723 // This catchswitch has no parent to inherit constraints from, and
724 // none of its descendants can have an unwind edge that exits it and
725 // targets another funclet in the inlinee. It may or may not have a
726 // descendant that definitively has an unwind to caller. In either
727 // case, we'll have to assume that any unwinds out of it may need to
728 // be routed to the caller, so treat it as though it has a definitive
729 // unwind to caller.
730 UnwindDestToken = ConstantTokenNone::get(Caller->getContext());
731 }
732 auto *NewCatchSwitch = CatchSwitchInst::Create(
733 CatchSwitch->getParentPad(), UnwindDest,
734 CatchSwitch->getNumHandlers(), CatchSwitch->getName(),
735 CatchSwitch);
736 for (BasicBlock *PadBB : CatchSwitch->handlers())
737 NewCatchSwitch->addHandler(PadBB);
738 // Propagate info for the old catchswitch over to the new one in
739 // the unwind map. This also serves to short-circuit any subsequent
740 // checks for the unwind dest of this catchswitch, which would get
741 // confused if they found the outer handler in the callee.
742 FuncletUnwindMap[NewCatchSwitch] = UnwindDestToken;
743 Replacement = NewCatchSwitch;
744 }
745 } else if (!isa<FuncletPadInst>(I)) {
746 llvm_unreachable("unexpected EHPad!");
747 }
748
749 if (Replacement) {
750 Replacement->takeName(I);
751 I->replaceAllUsesWith(Replacement);
752 I->eraseFromParent();
753 UpdatePHINodes(&*BB);
754 }
755 }
756
757 if (InlinedCodeInfo.ContainsCalls)
758 for (Function::iterator BB = FirstNewBlock->getIterator(),
759 E = Caller->end();
760 BB != E; ++BB)
761 if (BasicBlock *NewBB = HandleCallsInBlockInlinedThroughInvoke(
762 &*BB, UnwindDest, &FuncletUnwindMap))
763 // Update any PHI nodes in the exceptional block to indicate that there
764 // is now a new entry in them.
765 UpdatePHINodes(NewBB);
766
767 // Now that everything is happy, we have one final detail. The PHI nodes in
768 // the exception destination block still have entries due to the original
769 // invoke instruction. Eliminate these entries (which might even delete the
770 // PHI node) now.
771 UnwindDest->removePredecessor(InvokeBB);
772 }
773
774 /// When inlining a call site that has !llvm.mem.parallel_loop_access,
775 /// !llvm.access.group, !alias.scope or !noalias metadata, that metadata should
776 /// be propagated to all memory-accessing cloned instructions.
PropagateCallSiteMetadata(CallBase & CB,ValueToValueMapTy & VMap)777 static void PropagateCallSiteMetadata(CallBase &CB, ValueToValueMapTy &VMap) {
778 MDNode *MemParallelLoopAccess =
779 CB.getMetadata(LLVMContext::MD_mem_parallel_loop_access);
780 MDNode *AccessGroup = CB.getMetadata(LLVMContext::MD_access_group);
781 MDNode *AliasScope = CB.getMetadata(LLVMContext::MD_alias_scope);
782 MDNode *NoAlias = CB.getMetadata(LLVMContext::MD_noalias);
783 if (!MemParallelLoopAccess && !AccessGroup && !AliasScope && !NoAlias)
784 return;
785
786 for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
787 VMI != VMIE; ++VMI) {
788 if (!VMI->second)
789 continue;
790
791 Instruction *NI = dyn_cast<Instruction>(VMI->second);
792 if (!NI)
793 continue;
794
795 // This metadata is only relevant for instructions that access memory.
796 if (!NI->mayReadOrWriteMemory())
797 continue;
798
799 if (MemParallelLoopAccess) {
800 // TODO: This probably should not overwrite MemParalleLoopAccess.
801 MemParallelLoopAccess = MDNode::concatenate(
802 NI->getMetadata(LLVMContext::MD_mem_parallel_loop_access),
803 MemParallelLoopAccess);
804 NI->setMetadata(LLVMContext::MD_mem_parallel_loop_access,
805 MemParallelLoopAccess);
806 }
807
808 if (AccessGroup)
809 NI->setMetadata(LLVMContext::MD_access_group, uniteAccessGroups(
810 NI->getMetadata(LLVMContext::MD_access_group), AccessGroup));
811
812 if (AliasScope)
813 NI->setMetadata(LLVMContext::MD_alias_scope, MDNode::concatenate(
814 NI->getMetadata(LLVMContext::MD_alias_scope), AliasScope));
815
816 if (NoAlias)
817 NI->setMetadata(LLVMContext::MD_noalias, MDNode::concatenate(
818 NI->getMetadata(LLVMContext::MD_noalias), NoAlias));
819 }
820 }
821
822 /// When inlining a function that contains noalias scope metadata,
823 /// this metadata needs to be cloned so that the inlined blocks
824 /// have different "unique scopes" at every call site. Were this not done, then
825 /// aliasing scopes from a function inlined into a caller multiple times could
826 /// not be differentiated (and this would lead to miscompiles because the
827 /// non-aliasing property communicated by the metadata could have
828 /// call-site-specific control dependencies).
CloneAliasScopeMetadata(CallBase & CB,ValueToValueMapTy & VMap)829 static void CloneAliasScopeMetadata(CallBase &CB, ValueToValueMapTy &VMap) {
830 const Function *CalledFunc = CB.getCalledFunction();
831 SetVector<const MDNode *> MD;
832
833 // Note: We could only clone the metadata if it is already used in the
834 // caller. I'm omitting that check here because it might confuse
835 // inter-procedural alias analysis passes. We can revisit this if it becomes
836 // an efficiency or overhead problem.
837
838 for (const BasicBlock &I : *CalledFunc)
839 for (const Instruction &J : I) {
840 if (const MDNode *M = J.getMetadata(LLVMContext::MD_alias_scope))
841 MD.insert(M);
842 if (const MDNode *M = J.getMetadata(LLVMContext::MD_noalias))
843 MD.insert(M);
844 }
845
846 if (MD.empty())
847 return;
848
849 // Walk the existing metadata, adding the complete (perhaps cyclic) chain to
850 // the set.
851 SmallVector<const Metadata *, 16> Queue(MD.begin(), MD.end());
852 while (!Queue.empty()) {
853 const MDNode *M = cast<MDNode>(Queue.pop_back_val());
854 for (unsigned i = 0, ie = M->getNumOperands(); i != ie; ++i)
855 if (const MDNode *M1 = dyn_cast<MDNode>(M->getOperand(i)))
856 if (MD.insert(M1))
857 Queue.push_back(M1);
858 }
859
860 // Now we have a complete set of all metadata in the chains used to specify
861 // the noalias scopes and the lists of those scopes.
862 SmallVector<TempMDTuple, 16> DummyNodes;
863 DenseMap<const MDNode *, TrackingMDNodeRef> MDMap;
864 for (const MDNode *I : MD) {
865 DummyNodes.push_back(MDTuple::getTemporary(CalledFunc->getContext(), None));
866 MDMap[I].reset(DummyNodes.back().get());
867 }
868
869 // Create new metadata nodes to replace the dummy nodes, replacing old
870 // metadata references with either a dummy node or an already-created new
871 // node.
872 for (const MDNode *I : MD) {
873 SmallVector<Metadata *, 4> NewOps;
874 for (unsigned i = 0, ie = I->getNumOperands(); i != ie; ++i) {
875 const Metadata *V = I->getOperand(i);
876 if (const MDNode *M = dyn_cast<MDNode>(V))
877 NewOps.push_back(MDMap[M]);
878 else
879 NewOps.push_back(const_cast<Metadata *>(V));
880 }
881
882 MDNode *NewM = MDNode::get(CalledFunc->getContext(), NewOps);
883 MDTuple *TempM = cast<MDTuple>(MDMap[I]);
884 assert(TempM->isTemporary() && "Expected temporary node");
885
886 TempM->replaceAllUsesWith(NewM);
887 }
888
889 // Now replace the metadata in the new inlined instructions with the
890 // repacements from the map.
891 for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
892 VMI != VMIE; ++VMI) {
893 if (!VMI->second)
894 continue;
895
896 Instruction *NI = dyn_cast<Instruction>(VMI->second);
897 if (!NI)
898 continue;
899
900 if (MDNode *M = NI->getMetadata(LLVMContext::MD_alias_scope))
901 NI->setMetadata(LLVMContext::MD_alias_scope, MDMap[M]);
902
903 if (MDNode *M = NI->getMetadata(LLVMContext::MD_noalias))
904 NI->setMetadata(LLVMContext::MD_noalias, MDMap[M]);
905 }
906 }
907
908 /// If the inlined function has noalias arguments,
909 /// then add new alias scopes for each noalias argument, tag the mapped noalias
910 /// parameters with noalias metadata specifying the new scope, and tag all
911 /// non-derived loads, stores and memory intrinsics with the new alias scopes.
AddAliasScopeMetadata(CallBase & CB,ValueToValueMapTy & VMap,const DataLayout & DL,AAResults * CalleeAAR)912 static void AddAliasScopeMetadata(CallBase &CB, ValueToValueMapTy &VMap,
913 const DataLayout &DL, AAResults *CalleeAAR) {
914 if (!EnableNoAliasConversion)
915 return;
916
917 const Function *CalledFunc = CB.getCalledFunction();
918 SmallVector<const Argument *, 4> NoAliasArgs;
919
920 for (const Argument &Arg : CalledFunc->args())
921 if (CB.paramHasAttr(Arg.getArgNo(), Attribute::NoAlias) && !Arg.use_empty())
922 NoAliasArgs.push_back(&Arg);
923
924 if (NoAliasArgs.empty())
925 return;
926
927 // To do a good job, if a noalias variable is captured, we need to know if
928 // the capture point dominates the particular use we're considering.
929 DominatorTree DT;
930 DT.recalculate(const_cast<Function&>(*CalledFunc));
931
932 // noalias indicates that pointer values based on the argument do not alias
933 // pointer values which are not based on it. So we add a new "scope" for each
934 // noalias function argument. Accesses using pointers based on that argument
935 // become part of that alias scope, accesses using pointers not based on that
936 // argument are tagged as noalias with that scope.
937
938 DenseMap<const Argument *, MDNode *> NewScopes;
939 MDBuilder MDB(CalledFunc->getContext());
940
941 // Create a new scope domain for this function.
942 MDNode *NewDomain =
943 MDB.createAnonymousAliasScopeDomain(CalledFunc->getName());
944 for (unsigned i = 0, e = NoAliasArgs.size(); i != e; ++i) {
945 const Argument *A = NoAliasArgs[i];
946
947 std::string Name = std::string(CalledFunc->getName());
948 if (A->hasName()) {
949 Name += ": %";
950 Name += A->getName();
951 } else {
952 Name += ": argument ";
953 Name += utostr(i);
954 }
955
956 // Note: We always create a new anonymous root here. This is true regardless
957 // of the linkage of the callee because the aliasing "scope" is not just a
958 // property of the callee, but also all control dependencies in the caller.
959 MDNode *NewScope = MDB.createAnonymousAliasScope(NewDomain, Name);
960 NewScopes.insert(std::make_pair(A, NewScope));
961 }
962
963 // Iterate over all new instructions in the map; for all memory-access
964 // instructions, add the alias scope metadata.
965 for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
966 VMI != VMIE; ++VMI) {
967 if (const Instruction *I = dyn_cast<Instruction>(VMI->first)) {
968 if (!VMI->second)
969 continue;
970
971 Instruction *NI = dyn_cast<Instruction>(VMI->second);
972 if (!NI)
973 continue;
974
975 bool IsArgMemOnlyCall = false, IsFuncCall = false;
976 SmallVector<const Value *, 2> PtrArgs;
977
978 if (const LoadInst *LI = dyn_cast<LoadInst>(I))
979 PtrArgs.push_back(LI->getPointerOperand());
980 else if (const StoreInst *SI = dyn_cast<StoreInst>(I))
981 PtrArgs.push_back(SI->getPointerOperand());
982 else if (const VAArgInst *VAAI = dyn_cast<VAArgInst>(I))
983 PtrArgs.push_back(VAAI->getPointerOperand());
984 else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
985 PtrArgs.push_back(CXI->getPointerOperand());
986 else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
987 PtrArgs.push_back(RMWI->getPointerOperand());
988 else if (const auto *Call = dyn_cast<CallBase>(I)) {
989 // If we know that the call does not access memory, then we'll still
990 // know that about the inlined clone of this call site, and we don't
991 // need to add metadata.
992 if (Call->doesNotAccessMemory())
993 continue;
994
995 IsFuncCall = true;
996 if (CalleeAAR) {
997 FunctionModRefBehavior MRB = CalleeAAR->getModRefBehavior(Call);
998 if (AAResults::onlyAccessesArgPointees(MRB))
999 IsArgMemOnlyCall = true;
1000 }
1001
1002 for (Value *Arg : Call->args()) {
1003 // We need to check the underlying objects of all arguments, not just
1004 // the pointer arguments, because we might be passing pointers as
1005 // integers, etc.
1006 // However, if we know that the call only accesses pointer arguments,
1007 // then we only need to check the pointer arguments.
1008 if (IsArgMemOnlyCall && !Arg->getType()->isPointerTy())
1009 continue;
1010
1011 PtrArgs.push_back(Arg);
1012 }
1013 }
1014
1015 // If we found no pointers, then this instruction is not suitable for
1016 // pairing with an instruction to receive aliasing metadata.
1017 // However, if this is a call, this we might just alias with none of the
1018 // noalias arguments.
1019 if (PtrArgs.empty() && !IsFuncCall)
1020 continue;
1021
1022 // It is possible that there is only one underlying object, but you
1023 // need to go through several PHIs to see it, and thus could be
1024 // repeated in the Objects list.
1025 SmallPtrSet<const Value *, 4> ObjSet;
1026 SmallVector<Metadata *, 4> Scopes, NoAliases;
1027
1028 SmallSetVector<const Argument *, 4> NAPtrArgs;
1029 for (const Value *V : PtrArgs) {
1030 SmallVector<const Value *, 4> Objects;
1031 getUnderlyingObjects(V, Objects, /* LI = */ nullptr);
1032
1033 for (const Value *O : Objects)
1034 ObjSet.insert(O);
1035 }
1036
1037 // Figure out if we're derived from anything that is not a noalias
1038 // argument.
1039 bool CanDeriveViaCapture = false, UsesAliasingPtr = false;
1040 for (const Value *V : ObjSet) {
1041 // Is this value a constant that cannot be derived from any pointer
1042 // value (we need to exclude constant expressions, for example, that
1043 // are formed from arithmetic on global symbols).
1044 bool IsNonPtrConst = isa<ConstantInt>(V) || isa<ConstantFP>(V) ||
1045 isa<ConstantPointerNull>(V) ||
1046 isa<ConstantDataVector>(V) || isa<UndefValue>(V);
1047 if (IsNonPtrConst)
1048 continue;
1049
1050 // If this is anything other than a noalias argument, then we cannot
1051 // completely describe the aliasing properties using alias.scope
1052 // metadata (and, thus, won't add any).
1053 if (const Argument *A = dyn_cast<Argument>(V)) {
1054 if (!CB.paramHasAttr(A->getArgNo(), Attribute::NoAlias))
1055 UsesAliasingPtr = true;
1056 } else {
1057 UsesAliasingPtr = true;
1058 }
1059
1060 // If this is not some identified function-local object (which cannot
1061 // directly alias a noalias argument), or some other argument (which,
1062 // by definition, also cannot alias a noalias argument), then we could
1063 // alias a noalias argument that has been captured).
1064 if (!isa<Argument>(V) &&
1065 !isIdentifiedFunctionLocal(const_cast<Value*>(V)))
1066 CanDeriveViaCapture = true;
1067 }
1068
1069 // A function call can always get captured noalias pointers (via other
1070 // parameters, globals, etc.).
1071 if (IsFuncCall && !IsArgMemOnlyCall)
1072 CanDeriveViaCapture = true;
1073
1074 // First, we want to figure out all of the sets with which we definitely
1075 // don't alias. Iterate over all noalias set, and add those for which:
1076 // 1. The noalias argument is not in the set of objects from which we
1077 // definitely derive.
1078 // 2. The noalias argument has not yet been captured.
1079 // An arbitrary function that might load pointers could see captured
1080 // noalias arguments via other noalias arguments or globals, and so we
1081 // must always check for prior capture.
1082 for (const Argument *A : NoAliasArgs) {
1083 if (!ObjSet.count(A) && (!CanDeriveViaCapture ||
1084 // It might be tempting to skip the
1085 // PointerMayBeCapturedBefore check if
1086 // A->hasNoCaptureAttr() is true, but this is
1087 // incorrect because nocapture only guarantees
1088 // that no copies outlive the function, not
1089 // that the value cannot be locally captured.
1090 !PointerMayBeCapturedBefore(A,
1091 /* ReturnCaptures */ false,
1092 /* StoreCaptures */ false, I, &DT)))
1093 NoAliases.push_back(NewScopes[A]);
1094 }
1095
1096 if (!NoAliases.empty())
1097 NI->setMetadata(LLVMContext::MD_noalias,
1098 MDNode::concatenate(
1099 NI->getMetadata(LLVMContext::MD_noalias),
1100 MDNode::get(CalledFunc->getContext(), NoAliases)));
1101
1102 // Next, we want to figure out all of the sets to which we might belong.
1103 // We might belong to a set if the noalias argument is in the set of
1104 // underlying objects. If there is some non-noalias argument in our list
1105 // of underlying objects, then we cannot add a scope because the fact
1106 // that some access does not alias with any set of our noalias arguments
1107 // cannot itself guarantee that it does not alias with this access
1108 // (because there is some pointer of unknown origin involved and the
1109 // other access might also depend on this pointer). We also cannot add
1110 // scopes to arbitrary functions unless we know they don't access any
1111 // non-parameter pointer-values.
1112 bool CanAddScopes = !UsesAliasingPtr;
1113 if (CanAddScopes && IsFuncCall)
1114 CanAddScopes = IsArgMemOnlyCall;
1115
1116 if (CanAddScopes)
1117 for (const Argument *A : NoAliasArgs) {
1118 if (ObjSet.count(A))
1119 Scopes.push_back(NewScopes[A]);
1120 }
1121
1122 if (!Scopes.empty())
1123 NI->setMetadata(
1124 LLVMContext::MD_alias_scope,
1125 MDNode::concatenate(NI->getMetadata(LLVMContext::MD_alias_scope),
1126 MDNode::get(CalledFunc->getContext(), Scopes)));
1127 }
1128 }
1129 }
1130
MayContainThrowingOrExitingCall(Instruction * Begin,Instruction * End)1131 static bool MayContainThrowingOrExitingCall(Instruction *Begin,
1132 Instruction *End) {
1133
1134 assert(Begin->getParent() == End->getParent() &&
1135 "Expected to be in same basic block!");
1136 unsigned NumInstChecked = 0;
1137 // Check that all instructions in the range [Begin, End) are guaranteed to
1138 // transfer execution to successor.
1139 for (auto &I : make_range(Begin->getIterator(), End->getIterator()))
1140 if (NumInstChecked++ > InlinerAttributeWindow ||
1141 !isGuaranteedToTransferExecutionToSuccessor(&I))
1142 return true;
1143 return false;
1144 }
1145
IdentifyValidAttributes(CallBase & CB)1146 static AttrBuilder IdentifyValidAttributes(CallBase &CB) {
1147
1148 AttrBuilder AB(CB.getAttributes(), AttributeList::ReturnIndex);
1149 if (AB.empty())
1150 return AB;
1151 AttrBuilder Valid;
1152 // Only allow these white listed attributes to be propagated back to the
1153 // callee. This is because other attributes may only be valid on the call
1154 // itself, i.e. attributes such as signext and zeroext.
1155 if (auto DerefBytes = AB.getDereferenceableBytes())
1156 Valid.addDereferenceableAttr(DerefBytes);
1157 if (auto DerefOrNullBytes = AB.getDereferenceableOrNullBytes())
1158 Valid.addDereferenceableOrNullAttr(DerefOrNullBytes);
1159 if (AB.contains(Attribute::NoAlias))
1160 Valid.addAttribute(Attribute::NoAlias);
1161 if (AB.contains(Attribute::NonNull))
1162 Valid.addAttribute(Attribute::NonNull);
1163 return Valid;
1164 }
1165
AddReturnAttributes(CallBase & CB,ValueToValueMapTy & VMap)1166 static void AddReturnAttributes(CallBase &CB, ValueToValueMapTy &VMap) {
1167 if (!UpdateReturnAttributes)
1168 return;
1169
1170 AttrBuilder Valid = IdentifyValidAttributes(CB);
1171 if (Valid.empty())
1172 return;
1173 auto *CalledFunction = CB.getCalledFunction();
1174 auto &Context = CalledFunction->getContext();
1175
1176 for (auto &BB : *CalledFunction) {
1177 auto *RI = dyn_cast<ReturnInst>(BB.getTerminator());
1178 if (!RI || !isa<CallBase>(RI->getOperand(0)))
1179 continue;
1180 auto *RetVal = cast<CallBase>(RI->getOperand(0));
1181 // Sanity check that the cloned RetVal exists and is a call, otherwise we
1182 // cannot add the attributes on the cloned RetVal.
1183 // Simplification during inlining could have transformed the cloned
1184 // instruction.
1185 auto *NewRetVal = dyn_cast_or_null<CallBase>(VMap.lookup(RetVal));
1186 if (!NewRetVal)
1187 continue;
1188 // Backward propagation of attributes to the returned value may be incorrect
1189 // if it is control flow dependent.
1190 // Consider:
1191 // @callee {
1192 // %rv = call @foo()
1193 // %rv2 = call @bar()
1194 // if (%rv2 != null)
1195 // return %rv2
1196 // if (%rv == null)
1197 // exit()
1198 // return %rv
1199 // }
1200 // caller() {
1201 // %val = call nonnull @callee()
1202 // }
1203 // Here we cannot add the nonnull attribute on either foo or bar. So, we
1204 // limit the check to both RetVal and RI are in the same basic block and
1205 // there are no throwing/exiting instructions between these instructions.
1206 if (RI->getParent() != RetVal->getParent() ||
1207 MayContainThrowingOrExitingCall(RetVal, RI))
1208 continue;
1209 // Add to the existing attributes of NewRetVal, i.e. the cloned call
1210 // instruction.
1211 // NB! When we have the same attribute already existing on NewRetVal, but
1212 // with a differing value, the AttributeList's merge API honours the already
1213 // existing attribute value (i.e. attributes such as dereferenceable,
1214 // dereferenceable_or_null etc). See AttrBuilder::merge for more details.
1215 AttributeList AL = NewRetVal->getAttributes();
1216 AttributeList NewAL =
1217 AL.addAttributes(Context, AttributeList::ReturnIndex, Valid);
1218 NewRetVal->setAttributes(NewAL);
1219 }
1220 }
1221
1222 /// If the inlined function has non-byval align arguments, then
1223 /// add @llvm.assume-based alignment assumptions to preserve this information.
AddAlignmentAssumptions(CallBase & CB,InlineFunctionInfo & IFI)1224 static void AddAlignmentAssumptions(CallBase &CB, InlineFunctionInfo &IFI) {
1225 if (!PreserveAlignmentAssumptions || !IFI.GetAssumptionCache)
1226 return;
1227
1228 AssumptionCache *AC = &IFI.GetAssumptionCache(*CB.getCaller());
1229 auto &DL = CB.getCaller()->getParent()->getDataLayout();
1230
1231 // To avoid inserting redundant assumptions, we should check for assumptions
1232 // already in the caller. To do this, we might need a DT of the caller.
1233 DominatorTree DT;
1234 bool DTCalculated = false;
1235
1236 Function *CalledFunc = CB.getCalledFunction();
1237 for (Argument &Arg : CalledFunc->args()) {
1238 unsigned Align = Arg.getType()->isPointerTy() ? Arg.getParamAlignment() : 0;
1239 if (Align && !Arg.hasPassPointeeByValueCopyAttr() && !Arg.hasNUses(0)) {
1240 if (!DTCalculated) {
1241 DT.recalculate(*CB.getCaller());
1242 DTCalculated = true;
1243 }
1244
1245 // If we can already prove the asserted alignment in the context of the
1246 // caller, then don't bother inserting the assumption.
1247 Value *ArgVal = CB.getArgOperand(Arg.getArgNo());
1248 if (getKnownAlignment(ArgVal, DL, &CB, AC, &DT) >= Align)
1249 continue;
1250
1251 CallInst *NewAsmp =
1252 IRBuilder<>(&CB).CreateAlignmentAssumption(DL, ArgVal, Align);
1253 AC->registerAssumption(NewAsmp);
1254 }
1255 }
1256 }
1257
1258 /// Once we have cloned code over from a callee into the caller,
1259 /// update the specified callgraph to reflect the changes we made.
1260 /// Note that it's possible that not all code was copied over, so only
1261 /// some edges of the callgraph may remain.
UpdateCallGraphAfterInlining(CallBase & CB,Function::iterator FirstNewBlock,ValueToValueMapTy & VMap,InlineFunctionInfo & IFI)1262 static void UpdateCallGraphAfterInlining(CallBase &CB,
1263 Function::iterator FirstNewBlock,
1264 ValueToValueMapTy &VMap,
1265 InlineFunctionInfo &IFI) {
1266 CallGraph &CG = *IFI.CG;
1267 const Function *Caller = CB.getCaller();
1268 const Function *Callee = CB.getCalledFunction();
1269 CallGraphNode *CalleeNode = CG[Callee];
1270 CallGraphNode *CallerNode = CG[Caller];
1271
1272 // Since we inlined some uninlined call sites in the callee into the caller,
1273 // add edges from the caller to all of the callees of the callee.
1274 CallGraphNode::iterator I = CalleeNode->begin(), E = CalleeNode->end();
1275
1276 // Consider the case where CalleeNode == CallerNode.
1277 CallGraphNode::CalledFunctionsVector CallCache;
1278 if (CalleeNode == CallerNode) {
1279 CallCache.assign(I, E);
1280 I = CallCache.begin();
1281 E = CallCache.end();
1282 }
1283
1284 for (; I != E; ++I) {
1285 // Skip 'refererence' call records.
1286 if (!I->first)
1287 continue;
1288
1289 const Value *OrigCall = *I->first;
1290
1291 ValueToValueMapTy::iterator VMI = VMap.find(OrigCall);
1292 // Only copy the edge if the call was inlined!
1293 if (VMI == VMap.end() || VMI->second == nullptr)
1294 continue;
1295
1296 // If the call was inlined, but then constant folded, there is no edge to
1297 // add. Check for this case.
1298 auto *NewCall = dyn_cast<CallBase>(VMI->second);
1299 if (!NewCall)
1300 continue;
1301
1302 // We do not treat intrinsic calls like real function calls because we
1303 // expect them to become inline code; do not add an edge for an intrinsic.
1304 if (NewCall->getCalledFunction() &&
1305 NewCall->getCalledFunction()->isIntrinsic())
1306 continue;
1307
1308 // Remember that this call site got inlined for the client of
1309 // InlineFunction.
1310 IFI.InlinedCalls.push_back(NewCall);
1311
1312 // It's possible that inlining the callsite will cause it to go from an
1313 // indirect to a direct call by resolving a function pointer. If this
1314 // happens, set the callee of the new call site to a more precise
1315 // destination. This can also happen if the call graph node of the caller
1316 // was just unnecessarily imprecise.
1317 if (!I->second->getFunction())
1318 if (Function *F = NewCall->getCalledFunction()) {
1319 // Indirect call site resolved to direct call.
1320 CallerNode->addCalledFunction(NewCall, CG[F]);
1321
1322 continue;
1323 }
1324
1325 CallerNode->addCalledFunction(NewCall, I->second);
1326 }
1327
1328 // Update the call graph by deleting the edge from Callee to Caller. We must
1329 // do this after the loop above in case Caller and Callee are the same.
1330 CallerNode->removeCallEdgeFor(*cast<CallBase>(&CB));
1331 }
1332
HandleByValArgumentInit(Value * Dst,Value * Src,Module * M,BasicBlock * InsertBlock,InlineFunctionInfo & IFI)1333 static void HandleByValArgumentInit(Value *Dst, Value *Src, Module *M,
1334 BasicBlock *InsertBlock,
1335 InlineFunctionInfo &IFI) {
1336 Type *AggTy = cast<PointerType>(Src->getType())->getElementType();
1337 IRBuilder<> Builder(InsertBlock, InsertBlock->begin());
1338
1339 Value *Size = Builder.getInt64(M->getDataLayout().getTypeStoreSize(AggTy));
1340
1341 // Always generate a memcpy of alignment 1 here because we don't know
1342 // the alignment of the src pointer. Other optimizations can infer
1343 // better alignment.
1344 Builder.CreateMemCpy(Dst, /*DstAlign*/ Align(1), Src,
1345 /*SrcAlign*/ Align(1), Size);
1346 }
1347
1348 /// When inlining a call site that has a byval argument,
1349 /// we have to make the implicit memcpy explicit by adding it.
HandleByValArgument(Value * Arg,Instruction * TheCall,const Function * CalledFunc,InlineFunctionInfo & IFI,unsigned ByValAlignment)1350 static Value *HandleByValArgument(Value *Arg, Instruction *TheCall,
1351 const Function *CalledFunc,
1352 InlineFunctionInfo &IFI,
1353 unsigned ByValAlignment) {
1354 PointerType *ArgTy = cast<PointerType>(Arg->getType());
1355 Type *AggTy = ArgTy->getElementType();
1356
1357 Function *Caller = TheCall->getFunction();
1358 const DataLayout &DL = Caller->getParent()->getDataLayout();
1359
1360 // If the called function is readonly, then it could not mutate the caller's
1361 // copy of the byval'd memory. In this case, it is safe to elide the copy and
1362 // temporary.
1363 if (CalledFunc->onlyReadsMemory()) {
1364 // If the byval argument has a specified alignment that is greater than the
1365 // passed in pointer, then we either have to round up the input pointer or
1366 // give up on this transformation.
1367 if (ByValAlignment <= 1) // 0 = unspecified, 1 = no particular alignment.
1368 return Arg;
1369
1370 AssumptionCache *AC =
1371 IFI.GetAssumptionCache ? &IFI.GetAssumptionCache(*Caller) : nullptr;
1372
1373 // If the pointer is already known to be sufficiently aligned, or if we can
1374 // round it up to a larger alignment, then we don't need a temporary.
1375 if (getOrEnforceKnownAlignment(Arg, Align(ByValAlignment), DL, TheCall,
1376 AC) >= ByValAlignment)
1377 return Arg;
1378
1379 // Otherwise, we have to make a memcpy to get a safe alignment. This is bad
1380 // for code quality, but rarely happens and is required for correctness.
1381 }
1382
1383 // Create the alloca. If we have DataLayout, use nice alignment.
1384 Align Alignment(DL.getPrefTypeAlignment(AggTy));
1385
1386 // If the byval had an alignment specified, we *must* use at least that
1387 // alignment, as it is required by the byval argument (and uses of the
1388 // pointer inside the callee).
1389 Alignment = max(Alignment, MaybeAlign(ByValAlignment));
1390
1391 Value *NewAlloca =
1392 new AllocaInst(AggTy, DL.getAllocaAddrSpace(), nullptr, Alignment,
1393 Arg->getName(), &*Caller->begin()->begin());
1394 IFI.StaticAllocas.push_back(cast<AllocaInst>(NewAlloca));
1395
1396 // Uses of the argument in the function should use our new alloca
1397 // instead.
1398 return NewAlloca;
1399 }
1400
1401 // Check whether this Value is used by a lifetime intrinsic.
isUsedByLifetimeMarker(Value * V)1402 static bool isUsedByLifetimeMarker(Value *V) {
1403 for (User *U : V->users())
1404 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U))
1405 if (II->isLifetimeStartOrEnd())
1406 return true;
1407 return false;
1408 }
1409
1410 // Check whether the given alloca already has
1411 // lifetime.start or lifetime.end intrinsics.
hasLifetimeMarkers(AllocaInst * AI)1412 static bool hasLifetimeMarkers(AllocaInst *AI) {
1413 Type *Ty = AI->getType();
1414 Type *Int8PtrTy = Type::getInt8PtrTy(Ty->getContext(),
1415 Ty->getPointerAddressSpace());
1416 if (Ty == Int8PtrTy)
1417 return isUsedByLifetimeMarker(AI);
1418
1419 // Do a scan to find all the casts to i8*.
1420 for (User *U : AI->users()) {
1421 if (U->getType() != Int8PtrTy) continue;
1422 if (U->stripPointerCasts() != AI) continue;
1423 if (isUsedByLifetimeMarker(U))
1424 return true;
1425 }
1426 return false;
1427 }
1428
1429 /// Return the result of AI->isStaticAlloca() if AI were moved to the entry
1430 /// block. Allocas used in inalloca calls and allocas of dynamic array size
1431 /// cannot be static.
allocaWouldBeStaticInEntry(const AllocaInst * AI)1432 static bool allocaWouldBeStaticInEntry(const AllocaInst *AI ) {
1433 return isa<Constant>(AI->getArraySize()) && !AI->isUsedWithInAlloca();
1434 }
1435
1436 /// Returns a DebugLoc for a new DILocation which is a clone of \p OrigDL
1437 /// inlined at \p InlinedAt. \p IANodes is an inlined-at cache.
inlineDebugLoc(DebugLoc OrigDL,DILocation * InlinedAt,LLVMContext & Ctx,DenseMap<const MDNode *,MDNode * > & IANodes)1438 static DebugLoc inlineDebugLoc(DebugLoc OrigDL, DILocation *InlinedAt,
1439 LLVMContext &Ctx,
1440 DenseMap<const MDNode *, MDNode *> &IANodes) {
1441 auto IA = DebugLoc::appendInlinedAt(OrigDL, InlinedAt, Ctx, IANodes);
1442 return DebugLoc::get(OrigDL.getLine(), OrigDL.getCol(), OrigDL.getScope(),
1443 IA);
1444 }
1445
1446 /// Update inlined instructions' line numbers to
1447 /// to encode location where these instructions are inlined.
fixupLineNumbers(Function * Fn,Function::iterator FI,Instruction * TheCall,bool CalleeHasDebugInfo)1448 static void fixupLineNumbers(Function *Fn, Function::iterator FI,
1449 Instruction *TheCall, bool CalleeHasDebugInfo) {
1450 const DebugLoc &TheCallDL = TheCall->getDebugLoc();
1451 if (!TheCallDL)
1452 return;
1453
1454 auto &Ctx = Fn->getContext();
1455 DILocation *InlinedAtNode = TheCallDL;
1456
1457 // Create a unique call site, not to be confused with any other call from the
1458 // same location.
1459 InlinedAtNode = DILocation::getDistinct(
1460 Ctx, InlinedAtNode->getLine(), InlinedAtNode->getColumn(),
1461 InlinedAtNode->getScope(), InlinedAtNode->getInlinedAt());
1462
1463 // Cache the inlined-at nodes as they're built so they are reused, without
1464 // this every instruction's inlined-at chain would become distinct from each
1465 // other.
1466 DenseMap<const MDNode *, MDNode *> IANodes;
1467
1468 // Check if we are not generating inline line tables and want to use
1469 // the call site location instead.
1470 bool NoInlineLineTables = Fn->hasFnAttribute("no-inline-line-tables");
1471
1472 for (; FI != Fn->end(); ++FI) {
1473 for (BasicBlock::iterator BI = FI->begin(), BE = FI->end();
1474 BI != BE; ++BI) {
1475 // Loop metadata needs to be updated so that the start and end locs
1476 // reference inlined-at locations.
1477 auto updateLoopInfoLoc = [&Ctx, &InlinedAtNode, &IANodes](
1478 const DILocation &Loc) -> DILocation * {
1479 return inlineDebugLoc(&Loc, InlinedAtNode, Ctx, IANodes).get();
1480 };
1481 updateLoopMetadataDebugLocations(*BI, updateLoopInfoLoc);
1482
1483 if (!NoInlineLineTables)
1484 if (DebugLoc DL = BI->getDebugLoc()) {
1485 DebugLoc IDL =
1486 inlineDebugLoc(DL, InlinedAtNode, BI->getContext(), IANodes);
1487 BI->setDebugLoc(IDL);
1488 continue;
1489 }
1490
1491 if (CalleeHasDebugInfo && !NoInlineLineTables)
1492 continue;
1493
1494 // If the inlined instruction has no line number, or if inline info
1495 // is not being generated, make it look as if it originates from the call
1496 // location. This is important for ((__always_inline, __nodebug__))
1497 // functions which must use caller location for all instructions in their
1498 // function body.
1499
1500 // Don't update static allocas, as they may get moved later.
1501 if (auto *AI = dyn_cast<AllocaInst>(BI))
1502 if (allocaWouldBeStaticInEntry(AI))
1503 continue;
1504
1505 BI->setDebugLoc(TheCallDL);
1506 }
1507
1508 // Remove debug info intrinsics if we're not keeping inline info.
1509 if (NoInlineLineTables) {
1510 BasicBlock::iterator BI = FI->begin();
1511 while (BI != FI->end()) {
1512 if (isa<DbgInfoIntrinsic>(BI)) {
1513 BI = BI->eraseFromParent();
1514 continue;
1515 }
1516 ++BI;
1517 }
1518 }
1519
1520 }
1521 }
1522
1523 /// Update the block frequencies of the caller after a callee has been inlined.
1524 ///
1525 /// Each block cloned into the caller has its block frequency scaled by the
1526 /// ratio of CallSiteFreq/CalleeEntryFreq. This ensures that the cloned copy of
1527 /// callee's entry block gets the same frequency as the callsite block and the
1528 /// relative frequencies of all cloned blocks remain the same after cloning.
updateCallerBFI(BasicBlock * CallSiteBlock,const ValueToValueMapTy & VMap,BlockFrequencyInfo * CallerBFI,BlockFrequencyInfo * CalleeBFI,const BasicBlock & CalleeEntryBlock)1529 static void updateCallerBFI(BasicBlock *CallSiteBlock,
1530 const ValueToValueMapTy &VMap,
1531 BlockFrequencyInfo *CallerBFI,
1532 BlockFrequencyInfo *CalleeBFI,
1533 const BasicBlock &CalleeEntryBlock) {
1534 SmallPtrSet<BasicBlock *, 16> ClonedBBs;
1535 for (auto Entry : VMap) {
1536 if (!isa<BasicBlock>(Entry.first) || !Entry.second)
1537 continue;
1538 auto *OrigBB = cast<BasicBlock>(Entry.first);
1539 auto *ClonedBB = cast<BasicBlock>(Entry.second);
1540 uint64_t Freq = CalleeBFI->getBlockFreq(OrigBB).getFrequency();
1541 if (!ClonedBBs.insert(ClonedBB).second) {
1542 // Multiple blocks in the callee might get mapped to one cloned block in
1543 // the caller since we prune the callee as we clone it. When that happens,
1544 // we want to use the maximum among the original blocks' frequencies.
1545 uint64_t NewFreq = CallerBFI->getBlockFreq(ClonedBB).getFrequency();
1546 if (NewFreq > Freq)
1547 Freq = NewFreq;
1548 }
1549 CallerBFI->setBlockFreq(ClonedBB, Freq);
1550 }
1551 BasicBlock *EntryClone = cast<BasicBlock>(VMap.lookup(&CalleeEntryBlock));
1552 CallerBFI->setBlockFreqAndScale(
1553 EntryClone, CallerBFI->getBlockFreq(CallSiteBlock).getFrequency(),
1554 ClonedBBs);
1555 }
1556
1557 /// Update the branch metadata for cloned call instructions.
updateCallProfile(Function * Callee,const ValueToValueMapTy & VMap,const ProfileCount & CalleeEntryCount,const CallBase & TheCall,ProfileSummaryInfo * PSI,BlockFrequencyInfo * CallerBFI)1558 static void updateCallProfile(Function *Callee, const ValueToValueMapTy &VMap,
1559 const ProfileCount &CalleeEntryCount,
1560 const CallBase &TheCall, ProfileSummaryInfo *PSI,
1561 BlockFrequencyInfo *CallerBFI) {
1562 if (!CalleeEntryCount.hasValue() || CalleeEntryCount.isSynthetic() ||
1563 CalleeEntryCount.getCount() < 1)
1564 return;
1565 auto CallSiteCount = PSI ? PSI->getProfileCount(TheCall, CallerBFI) : None;
1566 int64_t CallCount =
1567 std::min(CallSiteCount.hasValue() ? CallSiteCount.getValue() : 0,
1568 CalleeEntryCount.getCount());
1569 updateProfileCallee(Callee, -CallCount, &VMap);
1570 }
1571
updateProfileCallee(Function * Callee,int64_t entryDelta,const ValueMap<const Value *,WeakTrackingVH> * VMap)1572 void llvm::updateProfileCallee(
1573 Function *Callee, int64_t entryDelta,
1574 const ValueMap<const Value *, WeakTrackingVH> *VMap) {
1575 auto CalleeCount = Callee->getEntryCount();
1576 if (!CalleeCount.hasValue())
1577 return;
1578
1579 uint64_t priorEntryCount = CalleeCount.getCount();
1580 uint64_t newEntryCount;
1581
1582 // Since CallSiteCount is an estimate, it could exceed the original callee
1583 // count and has to be set to 0 so guard against underflow.
1584 if (entryDelta < 0 && static_cast<uint64_t>(-entryDelta) > priorEntryCount)
1585 newEntryCount = 0;
1586 else
1587 newEntryCount = priorEntryCount + entryDelta;
1588
1589 // During inlining ?
1590 if (VMap) {
1591 uint64_t cloneEntryCount = priorEntryCount - newEntryCount;
1592 for (auto Entry : *VMap)
1593 if (isa<CallInst>(Entry.first))
1594 if (auto *CI = dyn_cast_or_null<CallInst>(Entry.second))
1595 CI->updateProfWeight(cloneEntryCount, priorEntryCount);
1596 }
1597
1598 if (entryDelta) {
1599 Callee->setEntryCount(newEntryCount);
1600
1601 for (BasicBlock &BB : *Callee)
1602 // No need to update the callsite if it is pruned during inlining.
1603 if (!VMap || VMap->count(&BB))
1604 for (Instruction &I : BB)
1605 if (CallInst *CI = dyn_cast<CallInst>(&I))
1606 CI->updateProfWeight(newEntryCount, priorEntryCount);
1607 }
1608 }
1609
1610 /// This function inlines the called function into the basic block of the
1611 /// caller. This returns false if it is not possible to inline this call.
1612 /// The program is still in a well defined state if this occurs though.
1613 ///
1614 /// Note that this only does one level of inlining. For example, if the
1615 /// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
1616 /// exists in the instruction stream. Similarly this will inline a recursive
1617 /// function by one level.
InlineFunction(CallBase & CB,InlineFunctionInfo & IFI,AAResults * CalleeAAR,bool InsertLifetime,Function * ForwardVarArgsTo)1618 llvm::InlineResult llvm::InlineFunction(CallBase &CB, InlineFunctionInfo &IFI,
1619 AAResults *CalleeAAR,
1620 bool InsertLifetime,
1621 Function *ForwardVarArgsTo) {
1622 assert(CB.getParent() && CB.getFunction() && "Instruction not in function!");
1623
1624 // FIXME: we don't inline callbr yet.
1625 if (isa<CallBrInst>(CB))
1626 return InlineResult::failure("We don't inline callbr yet.");
1627
1628 // If IFI has any state in it, zap it before we fill it in.
1629 IFI.reset();
1630
1631 Function *CalledFunc = CB.getCalledFunction();
1632 if (!CalledFunc || // Can't inline external function or indirect
1633 CalledFunc->isDeclaration()) // call!
1634 return InlineResult::failure("external or indirect");
1635
1636 // The inliner does not know how to inline through calls with operand bundles
1637 // in general ...
1638 if (CB.hasOperandBundles()) {
1639 for (int i = 0, e = CB.getNumOperandBundles(); i != e; ++i) {
1640 uint32_t Tag = CB.getOperandBundleAt(i).getTagID();
1641 // ... but it knows how to inline through "deopt" operand bundles ...
1642 if (Tag == LLVMContext::OB_deopt)
1643 continue;
1644 // ... and "funclet" operand bundles.
1645 if (Tag == LLVMContext::OB_funclet)
1646 continue;
1647
1648 return InlineResult::failure("unsupported operand bundle");
1649 }
1650 }
1651
1652 // If the call to the callee cannot throw, set the 'nounwind' flag on any
1653 // calls that we inline.
1654 bool MarkNoUnwind = CB.doesNotThrow();
1655
1656 BasicBlock *OrigBB = CB.getParent();
1657 Function *Caller = OrigBB->getParent();
1658
1659 // GC poses two hazards to inlining, which only occur when the callee has GC:
1660 // 1. If the caller has no GC, then the callee's GC must be propagated to the
1661 // caller.
1662 // 2. If the caller has a differing GC, it is invalid to inline.
1663 if (CalledFunc->hasGC()) {
1664 if (!Caller->hasGC())
1665 Caller->setGC(CalledFunc->getGC());
1666 else if (CalledFunc->getGC() != Caller->getGC())
1667 return InlineResult::failure("incompatible GC");
1668 }
1669
1670 // Inlining a function that explicitly should not have a stack protector may
1671 // break the code if inlined into a function that does have a stack
1672 // protector.
1673 if (LLVM_UNLIKELY(Caller->hasFnAttribute(Attribute::NoStackProtect)))
1674 if (CalledFunc->hasFnAttribute(Attribute::StackProtect) ||
1675 CalledFunc->hasFnAttribute(Attribute::StackProtectStrong) ||
1676 CalledFunc->hasFnAttribute(Attribute::StackProtectReq))
1677 return InlineResult::failure(
1678 "stack protected callee but caller requested no stack protector");
1679 if (LLVM_UNLIKELY(CalledFunc->hasFnAttribute(Attribute::NoStackProtect)))
1680 if (Caller->hasFnAttribute(Attribute::StackProtect) ||
1681 Caller->hasFnAttribute(Attribute::StackProtectStrong) ||
1682 Caller->hasFnAttribute(Attribute::StackProtectReq))
1683 return InlineResult::failure(
1684 "stack protected caller but callee requested no stack protector");
1685
1686 // Get the personality function from the callee if it contains a landing pad.
1687 Constant *CalledPersonality =
1688 CalledFunc->hasPersonalityFn()
1689 ? CalledFunc->getPersonalityFn()->stripPointerCasts()
1690 : nullptr;
1691
1692 // Find the personality function used by the landing pads of the caller. If it
1693 // exists, then check to see that it matches the personality function used in
1694 // the callee.
1695 Constant *CallerPersonality =
1696 Caller->hasPersonalityFn()
1697 ? Caller->getPersonalityFn()->stripPointerCasts()
1698 : nullptr;
1699 if (CalledPersonality) {
1700 if (!CallerPersonality)
1701 Caller->setPersonalityFn(CalledPersonality);
1702 // If the personality functions match, then we can perform the
1703 // inlining. Otherwise, we can't inline.
1704 // TODO: This isn't 100% true. Some personality functions are proper
1705 // supersets of others and can be used in place of the other.
1706 else if (CalledPersonality != CallerPersonality)
1707 return InlineResult::failure("incompatible personality");
1708 }
1709
1710 // We need to figure out which funclet the callsite was in so that we may
1711 // properly nest the callee.
1712 Instruction *CallSiteEHPad = nullptr;
1713 if (CallerPersonality) {
1714 EHPersonality Personality = classifyEHPersonality(CallerPersonality);
1715 if (isScopedEHPersonality(Personality)) {
1716 Optional<OperandBundleUse> ParentFunclet =
1717 CB.getOperandBundle(LLVMContext::OB_funclet);
1718 if (ParentFunclet)
1719 CallSiteEHPad = cast<FuncletPadInst>(ParentFunclet->Inputs.front());
1720
1721 // OK, the inlining site is legal. What about the target function?
1722
1723 if (CallSiteEHPad) {
1724 if (Personality == EHPersonality::MSVC_CXX) {
1725 // The MSVC personality cannot tolerate catches getting inlined into
1726 // cleanup funclets.
1727 if (isa<CleanupPadInst>(CallSiteEHPad)) {
1728 // Ok, the call site is within a cleanuppad. Let's check the callee
1729 // for catchpads.
1730 for (const BasicBlock &CalledBB : *CalledFunc) {
1731 if (isa<CatchSwitchInst>(CalledBB.getFirstNonPHI()))
1732 return InlineResult::failure("catch in cleanup funclet");
1733 }
1734 }
1735 } else if (isAsynchronousEHPersonality(Personality)) {
1736 // SEH is even less tolerant, there may not be any sort of exceptional
1737 // funclet in the callee.
1738 for (const BasicBlock &CalledBB : *CalledFunc) {
1739 if (CalledBB.isEHPad())
1740 return InlineResult::failure("SEH in cleanup funclet");
1741 }
1742 }
1743 }
1744 }
1745 }
1746
1747 // Determine if we are dealing with a call in an EHPad which does not unwind
1748 // to caller.
1749 bool EHPadForCallUnwindsLocally = false;
1750 if (CallSiteEHPad && isa<CallInst>(CB)) {
1751 UnwindDestMemoTy FuncletUnwindMap;
1752 Value *CallSiteUnwindDestToken =
1753 getUnwindDestToken(CallSiteEHPad, FuncletUnwindMap);
1754
1755 EHPadForCallUnwindsLocally =
1756 CallSiteUnwindDestToken &&
1757 !isa<ConstantTokenNone>(CallSiteUnwindDestToken);
1758 }
1759
1760 // Get an iterator to the last basic block in the function, which will have
1761 // the new function inlined after it.
1762 Function::iterator LastBlock = --Caller->end();
1763
1764 // Make sure to capture all of the return instructions from the cloned
1765 // function.
1766 SmallVector<ReturnInst*, 8> Returns;
1767 ClonedCodeInfo InlinedFunctionInfo;
1768 Function::iterator FirstNewBlock;
1769
1770 { // Scope to destroy VMap after cloning.
1771 ValueToValueMapTy VMap;
1772 // Keep a list of pair (dst, src) to emit byval initializations.
1773 SmallVector<std::pair<Value*, Value*>, 4> ByValInit;
1774
1775 auto &DL = Caller->getParent()->getDataLayout();
1776
1777 // Calculate the vector of arguments to pass into the function cloner, which
1778 // matches up the formal to the actual argument values.
1779 auto AI = CB.arg_begin();
1780 unsigned ArgNo = 0;
1781 for (Function::arg_iterator I = CalledFunc->arg_begin(),
1782 E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) {
1783 Value *ActualArg = *AI;
1784
1785 // When byval arguments actually inlined, we need to make the copy implied
1786 // by them explicit. However, we don't do this if the callee is readonly
1787 // or readnone, because the copy would be unneeded: the callee doesn't
1788 // modify the struct.
1789 if (CB.isByValArgument(ArgNo)) {
1790 ActualArg = HandleByValArgument(ActualArg, &CB, CalledFunc, IFI,
1791 CalledFunc->getParamAlignment(ArgNo));
1792 if (ActualArg != *AI)
1793 ByValInit.push_back(std::make_pair(ActualArg, (Value*) *AI));
1794 }
1795
1796 VMap[&*I] = ActualArg;
1797 }
1798
1799 // TODO: Remove this when users have been updated to the assume bundles.
1800 // Add alignment assumptions if necessary. We do this before the inlined
1801 // instructions are actually cloned into the caller so that we can easily
1802 // check what will be known at the start of the inlined code.
1803 AddAlignmentAssumptions(CB, IFI);
1804
1805 AssumptionCache *AC =
1806 IFI.GetAssumptionCache ? &IFI.GetAssumptionCache(*Caller) : nullptr;
1807
1808 /// Preserve all attributes on of the call and its parameters.
1809 salvageKnowledge(&CB, AC);
1810
1811 // We want the inliner to prune the code as it copies. We would LOVE to
1812 // have no dead or constant instructions leftover after inlining occurs
1813 // (which can happen, e.g., because an argument was constant), but we'll be
1814 // happy with whatever the cloner can do.
1815 CloneAndPruneFunctionInto(Caller, CalledFunc, VMap,
1816 /*ModuleLevelChanges=*/false, Returns, ".i",
1817 &InlinedFunctionInfo, &CB);
1818 // Remember the first block that is newly cloned over.
1819 FirstNewBlock = LastBlock; ++FirstNewBlock;
1820
1821 if (IFI.CallerBFI != nullptr && IFI.CalleeBFI != nullptr)
1822 // Update the BFI of blocks cloned into the caller.
1823 updateCallerBFI(OrigBB, VMap, IFI.CallerBFI, IFI.CalleeBFI,
1824 CalledFunc->front());
1825
1826 updateCallProfile(CalledFunc, VMap, CalledFunc->getEntryCount(), CB,
1827 IFI.PSI, IFI.CallerBFI);
1828
1829 // Inject byval arguments initialization.
1830 for (std::pair<Value*, Value*> &Init : ByValInit)
1831 HandleByValArgumentInit(Init.first, Init.second, Caller->getParent(),
1832 &*FirstNewBlock, IFI);
1833
1834 Optional<OperandBundleUse> ParentDeopt =
1835 CB.getOperandBundle(LLVMContext::OB_deopt);
1836 if (ParentDeopt) {
1837 SmallVector<OperandBundleDef, 2> OpDefs;
1838
1839 for (auto &VH : InlinedFunctionInfo.OperandBundleCallSites) {
1840 CallBase *ICS = dyn_cast_or_null<CallBase>(VH);
1841 if (!ICS)
1842 continue; // instruction was DCE'd or RAUW'ed to undef
1843
1844 OpDefs.clear();
1845
1846 OpDefs.reserve(ICS->getNumOperandBundles());
1847
1848 for (unsigned COBi = 0, COBe = ICS->getNumOperandBundles(); COBi < COBe;
1849 ++COBi) {
1850 auto ChildOB = ICS->getOperandBundleAt(COBi);
1851 if (ChildOB.getTagID() != LLVMContext::OB_deopt) {
1852 // If the inlined call has other operand bundles, let them be
1853 OpDefs.emplace_back(ChildOB);
1854 continue;
1855 }
1856
1857 // It may be useful to separate this logic (of handling operand
1858 // bundles) out to a separate "policy" component if this gets crowded.
1859 // Prepend the parent's deoptimization continuation to the newly
1860 // inlined call's deoptimization continuation.
1861 std::vector<Value *> MergedDeoptArgs;
1862 MergedDeoptArgs.reserve(ParentDeopt->Inputs.size() +
1863 ChildOB.Inputs.size());
1864
1865 MergedDeoptArgs.insert(MergedDeoptArgs.end(),
1866 ParentDeopt->Inputs.begin(),
1867 ParentDeopt->Inputs.end());
1868 MergedDeoptArgs.insert(MergedDeoptArgs.end(), ChildOB.Inputs.begin(),
1869 ChildOB.Inputs.end());
1870
1871 OpDefs.emplace_back("deopt", std::move(MergedDeoptArgs));
1872 }
1873
1874 Instruction *NewI = CallBase::Create(ICS, OpDefs, ICS);
1875
1876 // Note: the RAUW does the appropriate fixup in VMap, so we need to do
1877 // this even if the call returns void.
1878 ICS->replaceAllUsesWith(NewI);
1879
1880 VH = nullptr;
1881 ICS->eraseFromParent();
1882 }
1883 }
1884
1885 // Update the callgraph if requested.
1886 if (IFI.CG)
1887 UpdateCallGraphAfterInlining(CB, FirstNewBlock, VMap, IFI);
1888
1889 // For 'nodebug' functions, the associated DISubprogram is always null.
1890 // Conservatively avoid propagating the callsite debug location to
1891 // instructions inlined from a function whose DISubprogram is not null.
1892 fixupLineNumbers(Caller, FirstNewBlock, &CB,
1893 CalledFunc->getSubprogram() != nullptr);
1894
1895 // Clone existing noalias metadata if necessary.
1896 CloneAliasScopeMetadata(CB, VMap);
1897
1898 // Add noalias metadata if necessary.
1899 AddAliasScopeMetadata(CB, VMap, DL, CalleeAAR);
1900
1901 // Clone return attributes on the callsite into the calls within the inlined
1902 // function which feed into its return value.
1903 AddReturnAttributes(CB, VMap);
1904
1905 // Propagate metadata on the callsite if necessary.
1906 PropagateCallSiteMetadata(CB, VMap);
1907
1908 // Register any cloned assumptions.
1909 if (IFI.GetAssumptionCache)
1910 for (BasicBlock &NewBlock :
1911 make_range(FirstNewBlock->getIterator(), Caller->end()))
1912 for (Instruction &I : NewBlock)
1913 if (auto *II = dyn_cast<IntrinsicInst>(&I))
1914 if (II->getIntrinsicID() == Intrinsic::assume)
1915 IFI.GetAssumptionCache(*Caller).registerAssumption(II);
1916 }
1917
1918 // If there are any alloca instructions in the block that used to be the entry
1919 // block for the callee, move them to the entry block of the caller. First
1920 // calculate which instruction they should be inserted before. We insert the
1921 // instructions at the end of the current alloca list.
1922 {
1923 BasicBlock::iterator InsertPoint = Caller->begin()->begin();
1924 for (BasicBlock::iterator I = FirstNewBlock->begin(),
1925 E = FirstNewBlock->end(); I != E; ) {
1926 AllocaInst *AI = dyn_cast<AllocaInst>(I++);
1927 if (!AI) continue;
1928
1929 // If the alloca is now dead, remove it. This often occurs due to code
1930 // specialization.
1931 if (AI->use_empty()) {
1932 AI->eraseFromParent();
1933 continue;
1934 }
1935
1936 if (!allocaWouldBeStaticInEntry(AI))
1937 continue;
1938
1939 // Keep track of the static allocas that we inline into the caller.
1940 IFI.StaticAllocas.push_back(AI);
1941
1942 // Scan for the block of allocas that we can move over, and move them
1943 // all at once.
1944 while (isa<AllocaInst>(I) &&
1945 !cast<AllocaInst>(I)->use_empty() &&
1946 allocaWouldBeStaticInEntry(cast<AllocaInst>(I))) {
1947 IFI.StaticAllocas.push_back(cast<AllocaInst>(I));
1948 ++I;
1949 }
1950
1951 // Transfer all of the allocas over in a block. Using splice means
1952 // that the instructions aren't removed from the symbol table, then
1953 // reinserted.
1954 Caller->getEntryBlock().getInstList().splice(
1955 InsertPoint, FirstNewBlock->getInstList(), AI->getIterator(), I);
1956 }
1957 }
1958
1959 SmallVector<Value*,4> VarArgsToForward;
1960 SmallVector<AttributeSet, 4> VarArgsAttrs;
1961 for (unsigned i = CalledFunc->getFunctionType()->getNumParams();
1962 i < CB.getNumArgOperands(); i++) {
1963 VarArgsToForward.push_back(CB.getArgOperand(i));
1964 VarArgsAttrs.push_back(CB.getAttributes().getParamAttributes(i));
1965 }
1966
1967 bool InlinedMustTailCalls = false, InlinedDeoptimizeCalls = false;
1968 if (InlinedFunctionInfo.ContainsCalls) {
1969 CallInst::TailCallKind CallSiteTailKind = CallInst::TCK_None;
1970 if (CallInst *CI = dyn_cast<CallInst>(&CB))
1971 CallSiteTailKind = CI->getTailCallKind();
1972
1973 // For inlining purposes, the "notail" marker is the same as no marker.
1974 if (CallSiteTailKind == CallInst::TCK_NoTail)
1975 CallSiteTailKind = CallInst::TCK_None;
1976
1977 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E;
1978 ++BB) {
1979 for (auto II = BB->begin(); II != BB->end();) {
1980 Instruction &I = *II++;
1981 CallInst *CI = dyn_cast<CallInst>(&I);
1982 if (!CI)
1983 continue;
1984
1985 // Forward varargs from inlined call site to calls to the
1986 // ForwardVarArgsTo function, if requested, and to musttail calls.
1987 if (!VarArgsToForward.empty() &&
1988 ((ForwardVarArgsTo &&
1989 CI->getCalledFunction() == ForwardVarArgsTo) ||
1990 CI->isMustTailCall())) {
1991 // Collect attributes for non-vararg parameters.
1992 AttributeList Attrs = CI->getAttributes();
1993 SmallVector<AttributeSet, 8> ArgAttrs;
1994 if (!Attrs.isEmpty() || !VarArgsAttrs.empty()) {
1995 for (unsigned ArgNo = 0;
1996 ArgNo < CI->getFunctionType()->getNumParams(); ++ArgNo)
1997 ArgAttrs.push_back(Attrs.getParamAttributes(ArgNo));
1998 }
1999
2000 // Add VarArg attributes.
2001 ArgAttrs.append(VarArgsAttrs.begin(), VarArgsAttrs.end());
2002 Attrs = AttributeList::get(CI->getContext(), Attrs.getFnAttributes(),
2003 Attrs.getRetAttributes(), ArgAttrs);
2004 // Add VarArgs to existing parameters.
2005 SmallVector<Value *, 6> Params(CI->arg_operands());
2006 Params.append(VarArgsToForward.begin(), VarArgsToForward.end());
2007 CallInst *NewCI = CallInst::Create(
2008 CI->getFunctionType(), CI->getCalledOperand(), Params, "", CI);
2009 NewCI->setDebugLoc(CI->getDebugLoc());
2010 NewCI->setAttributes(Attrs);
2011 NewCI->setCallingConv(CI->getCallingConv());
2012 CI->replaceAllUsesWith(NewCI);
2013 CI->eraseFromParent();
2014 CI = NewCI;
2015 }
2016
2017 if (Function *F = CI->getCalledFunction())
2018 InlinedDeoptimizeCalls |=
2019 F->getIntrinsicID() == Intrinsic::experimental_deoptimize;
2020
2021 // We need to reduce the strength of any inlined tail calls. For
2022 // musttail, we have to avoid introducing potential unbounded stack
2023 // growth. For example, if functions 'f' and 'g' are mutually recursive
2024 // with musttail, we can inline 'g' into 'f' so long as we preserve
2025 // musttail on the cloned call to 'f'. If either the inlined call site
2026 // or the cloned call site is *not* musttail, the program already has
2027 // one frame of stack growth, so it's safe to remove musttail. Here is
2028 // a table of example transformations:
2029 //
2030 // f -> musttail g -> musttail f ==> f -> musttail f
2031 // f -> musttail g -> tail f ==> f -> tail f
2032 // f -> g -> musttail f ==> f -> f
2033 // f -> g -> tail f ==> f -> f
2034 //
2035 // Inlined notail calls should remain notail calls.
2036 CallInst::TailCallKind ChildTCK = CI->getTailCallKind();
2037 if (ChildTCK != CallInst::TCK_NoTail)
2038 ChildTCK = std::min(CallSiteTailKind, ChildTCK);
2039 CI->setTailCallKind(ChildTCK);
2040 InlinedMustTailCalls |= CI->isMustTailCall();
2041
2042 // Calls inlined through a 'nounwind' call site should be marked
2043 // 'nounwind'.
2044 if (MarkNoUnwind)
2045 CI->setDoesNotThrow();
2046 }
2047 }
2048 }
2049
2050 // Leave lifetime markers for the static alloca's, scoping them to the
2051 // function we just inlined.
2052 if (InsertLifetime && !IFI.StaticAllocas.empty()) {
2053 IRBuilder<> builder(&FirstNewBlock->front());
2054 for (unsigned ai = 0, ae = IFI.StaticAllocas.size(); ai != ae; ++ai) {
2055 AllocaInst *AI = IFI.StaticAllocas[ai];
2056 // Don't mark swifterror allocas. They can't have bitcast uses.
2057 if (AI->isSwiftError())
2058 continue;
2059
2060 // If the alloca is already scoped to something smaller than the whole
2061 // function then there's no need to add redundant, less accurate markers.
2062 if (hasLifetimeMarkers(AI))
2063 continue;
2064
2065 // Try to determine the size of the allocation.
2066 ConstantInt *AllocaSize = nullptr;
2067 if (ConstantInt *AIArraySize =
2068 dyn_cast<ConstantInt>(AI->getArraySize())) {
2069 auto &DL = Caller->getParent()->getDataLayout();
2070 Type *AllocaType = AI->getAllocatedType();
2071 TypeSize AllocaTypeSize = DL.getTypeAllocSize(AllocaType);
2072 uint64_t AllocaArraySize = AIArraySize->getLimitedValue();
2073
2074 // Don't add markers for zero-sized allocas.
2075 if (AllocaArraySize == 0)
2076 continue;
2077
2078 // Check that array size doesn't saturate uint64_t and doesn't
2079 // overflow when it's multiplied by type size.
2080 if (!AllocaTypeSize.isScalable() &&
2081 AllocaArraySize != std::numeric_limits<uint64_t>::max() &&
2082 std::numeric_limits<uint64_t>::max() / AllocaArraySize >=
2083 AllocaTypeSize.getFixedSize()) {
2084 AllocaSize = ConstantInt::get(Type::getInt64Ty(AI->getContext()),
2085 AllocaArraySize * AllocaTypeSize);
2086 }
2087 }
2088
2089 builder.CreateLifetimeStart(AI, AllocaSize);
2090 for (ReturnInst *RI : Returns) {
2091 // Don't insert llvm.lifetime.end calls between a musttail or deoptimize
2092 // call and a return. The return kills all local allocas.
2093 if (InlinedMustTailCalls &&
2094 RI->getParent()->getTerminatingMustTailCall())
2095 continue;
2096 if (InlinedDeoptimizeCalls &&
2097 RI->getParent()->getTerminatingDeoptimizeCall())
2098 continue;
2099 IRBuilder<>(RI).CreateLifetimeEnd(AI, AllocaSize);
2100 }
2101 }
2102 }
2103
2104 // If the inlined code contained dynamic alloca instructions, wrap the inlined
2105 // code with llvm.stacksave/llvm.stackrestore intrinsics.
2106 if (InlinedFunctionInfo.ContainsDynamicAllocas) {
2107 Module *M = Caller->getParent();
2108 // Get the two intrinsics we care about.
2109 Function *StackSave = Intrinsic::getDeclaration(M, Intrinsic::stacksave);
2110 Function *StackRestore=Intrinsic::getDeclaration(M,Intrinsic::stackrestore);
2111
2112 // Insert the llvm.stacksave.
2113 CallInst *SavedPtr = IRBuilder<>(&*FirstNewBlock, FirstNewBlock->begin())
2114 .CreateCall(StackSave, {}, "savedstack");
2115
2116 // Insert a call to llvm.stackrestore before any return instructions in the
2117 // inlined function.
2118 for (ReturnInst *RI : Returns) {
2119 // Don't insert llvm.stackrestore calls between a musttail or deoptimize
2120 // call and a return. The return will restore the stack pointer.
2121 if (InlinedMustTailCalls && RI->getParent()->getTerminatingMustTailCall())
2122 continue;
2123 if (InlinedDeoptimizeCalls && RI->getParent()->getTerminatingDeoptimizeCall())
2124 continue;
2125 IRBuilder<>(RI).CreateCall(StackRestore, SavedPtr);
2126 }
2127 }
2128
2129 // If we are inlining for an invoke instruction, we must make sure to rewrite
2130 // any call instructions into invoke instructions. This is sensitive to which
2131 // funclet pads were top-level in the inlinee, so must be done before
2132 // rewriting the "parent pad" links.
2133 if (auto *II = dyn_cast<InvokeInst>(&CB)) {
2134 BasicBlock *UnwindDest = II->getUnwindDest();
2135 Instruction *FirstNonPHI = UnwindDest->getFirstNonPHI();
2136 if (isa<LandingPadInst>(FirstNonPHI)) {
2137 HandleInlinedLandingPad(II, &*FirstNewBlock, InlinedFunctionInfo);
2138 } else {
2139 HandleInlinedEHPad(II, &*FirstNewBlock, InlinedFunctionInfo);
2140 }
2141 }
2142
2143 // Update the lexical scopes of the new funclets and callsites.
2144 // Anything that had 'none' as its parent is now nested inside the callsite's
2145 // EHPad.
2146
2147 if (CallSiteEHPad) {
2148 for (Function::iterator BB = FirstNewBlock->getIterator(),
2149 E = Caller->end();
2150 BB != E; ++BB) {
2151 // Add bundle operands to any top-level call sites.
2152 SmallVector<OperandBundleDef, 1> OpBundles;
2153 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E;) {
2154 CallBase *I = dyn_cast<CallBase>(&*BBI++);
2155 if (!I)
2156 continue;
2157
2158 // Skip call sites which are nounwind intrinsics.
2159 auto *CalledFn =
2160 dyn_cast<Function>(I->getCalledOperand()->stripPointerCasts());
2161 if (CalledFn && CalledFn->isIntrinsic() && I->doesNotThrow())
2162 continue;
2163
2164 // Skip call sites which already have a "funclet" bundle.
2165 if (I->getOperandBundle(LLVMContext::OB_funclet))
2166 continue;
2167
2168 I->getOperandBundlesAsDefs(OpBundles);
2169 OpBundles.emplace_back("funclet", CallSiteEHPad);
2170
2171 Instruction *NewInst = CallBase::Create(I, OpBundles, I);
2172 NewInst->takeName(I);
2173 I->replaceAllUsesWith(NewInst);
2174 I->eraseFromParent();
2175
2176 OpBundles.clear();
2177 }
2178
2179 // It is problematic if the inlinee has a cleanupret which unwinds to
2180 // caller and we inline it into a call site which doesn't unwind but into
2181 // an EH pad that does. Such an edge must be dynamically unreachable.
2182 // As such, we replace the cleanupret with unreachable.
2183 if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(BB->getTerminator()))
2184 if (CleanupRet->unwindsToCaller() && EHPadForCallUnwindsLocally)
2185 changeToUnreachable(CleanupRet, /*UseLLVMTrap=*/false);
2186
2187 Instruction *I = BB->getFirstNonPHI();
2188 if (!I->isEHPad())
2189 continue;
2190
2191 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) {
2192 if (isa<ConstantTokenNone>(CatchSwitch->getParentPad()))
2193 CatchSwitch->setParentPad(CallSiteEHPad);
2194 } else {
2195 auto *FPI = cast<FuncletPadInst>(I);
2196 if (isa<ConstantTokenNone>(FPI->getParentPad()))
2197 FPI->setParentPad(CallSiteEHPad);
2198 }
2199 }
2200 }
2201
2202 if (InlinedDeoptimizeCalls) {
2203 // We need to at least remove the deoptimizing returns from the Return set,
2204 // so that the control flow from those returns does not get merged into the
2205 // caller (but terminate it instead). If the caller's return type does not
2206 // match the callee's return type, we also need to change the return type of
2207 // the intrinsic.
2208 if (Caller->getReturnType() == CB.getType()) {
2209 auto NewEnd = llvm::remove_if(Returns, [](ReturnInst *RI) {
2210 return RI->getParent()->getTerminatingDeoptimizeCall() != nullptr;
2211 });
2212 Returns.erase(NewEnd, Returns.end());
2213 } else {
2214 SmallVector<ReturnInst *, 8> NormalReturns;
2215 Function *NewDeoptIntrinsic = Intrinsic::getDeclaration(
2216 Caller->getParent(), Intrinsic::experimental_deoptimize,
2217 {Caller->getReturnType()});
2218
2219 for (ReturnInst *RI : Returns) {
2220 CallInst *DeoptCall = RI->getParent()->getTerminatingDeoptimizeCall();
2221 if (!DeoptCall) {
2222 NormalReturns.push_back(RI);
2223 continue;
2224 }
2225
2226 // The calling convention on the deoptimize call itself may be bogus,
2227 // since the code we're inlining may have undefined behavior (and may
2228 // never actually execute at runtime); but all
2229 // @llvm.experimental.deoptimize declarations have to have the same
2230 // calling convention in a well-formed module.
2231 auto CallingConv = DeoptCall->getCalledFunction()->getCallingConv();
2232 NewDeoptIntrinsic->setCallingConv(CallingConv);
2233 auto *CurBB = RI->getParent();
2234 RI->eraseFromParent();
2235
2236 SmallVector<Value *, 4> CallArgs(DeoptCall->arg_begin(),
2237 DeoptCall->arg_end());
2238
2239 SmallVector<OperandBundleDef, 1> OpBundles;
2240 DeoptCall->getOperandBundlesAsDefs(OpBundles);
2241 DeoptCall->eraseFromParent();
2242 assert(!OpBundles.empty() &&
2243 "Expected at least the deopt operand bundle");
2244
2245 IRBuilder<> Builder(CurBB);
2246 CallInst *NewDeoptCall =
2247 Builder.CreateCall(NewDeoptIntrinsic, CallArgs, OpBundles);
2248 NewDeoptCall->setCallingConv(CallingConv);
2249 if (NewDeoptCall->getType()->isVoidTy())
2250 Builder.CreateRetVoid();
2251 else
2252 Builder.CreateRet(NewDeoptCall);
2253 }
2254
2255 // Leave behind the normal returns so we can merge control flow.
2256 std::swap(Returns, NormalReturns);
2257 }
2258 }
2259
2260 // Handle any inlined musttail call sites. In order for a new call site to be
2261 // musttail, the source of the clone and the inlined call site must have been
2262 // musttail. Therefore it's safe to return without merging control into the
2263 // phi below.
2264 if (InlinedMustTailCalls) {
2265 // Check if we need to bitcast the result of any musttail calls.
2266 Type *NewRetTy = Caller->getReturnType();
2267 bool NeedBitCast = !CB.use_empty() && CB.getType() != NewRetTy;
2268
2269 // Handle the returns preceded by musttail calls separately.
2270 SmallVector<ReturnInst *, 8> NormalReturns;
2271 for (ReturnInst *RI : Returns) {
2272 CallInst *ReturnedMustTail =
2273 RI->getParent()->getTerminatingMustTailCall();
2274 if (!ReturnedMustTail) {
2275 NormalReturns.push_back(RI);
2276 continue;
2277 }
2278 if (!NeedBitCast)
2279 continue;
2280
2281 // Delete the old return and any preceding bitcast.
2282 BasicBlock *CurBB = RI->getParent();
2283 auto *OldCast = dyn_cast_or_null<BitCastInst>(RI->getReturnValue());
2284 RI->eraseFromParent();
2285 if (OldCast)
2286 OldCast->eraseFromParent();
2287
2288 // Insert a new bitcast and return with the right type.
2289 IRBuilder<> Builder(CurBB);
2290 Builder.CreateRet(Builder.CreateBitCast(ReturnedMustTail, NewRetTy));
2291 }
2292
2293 // Leave behind the normal returns so we can merge control flow.
2294 std::swap(Returns, NormalReturns);
2295 }
2296
2297 // Now that all of the transforms on the inlined code have taken place but
2298 // before we splice the inlined code into the CFG and lose track of which
2299 // blocks were actually inlined, collect the call sites. We only do this if
2300 // call graph updates weren't requested, as those provide value handle based
2301 // tracking of inlined call sites instead.
2302 if (InlinedFunctionInfo.ContainsCalls && !IFI.CG) {
2303 // Otherwise just collect the raw call sites that were inlined.
2304 for (BasicBlock &NewBB :
2305 make_range(FirstNewBlock->getIterator(), Caller->end()))
2306 for (Instruction &I : NewBB)
2307 if (auto *CB = dyn_cast<CallBase>(&I))
2308 IFI.InlinedCallSites.push_back(CB);
2309 }
2310
2311 // If we cloned in _exactly one_ basic block, and if that block ends in a
2312 // return instruction, we splice the body of the inlined callee directly into
2313 // the calling basic block.
2314 if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) {
2315 // Move all of the instructions right before the call.
2316 OrigBB->getInstList().splice(CB.getIterator(), FirstNewBlock->getInstList(),
2317 FirstNewBlock->begin(), FirstNewBlock->end());
2318 // Remove the cloned basic block.
2319 Caller->getBasicBlockList().pop_back();
2320
2321 // If the call site was an invoke instruction, add a branch to the normal
2322 // destination.
2323 if (InvokeInst *II = dyn_cast<InvokeInst>(&CB)) {
2324 BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), &CB);
2325 NewBr->setDebugLoc(Returns[0]->getDebugLoc());
2326 }
2327
2328 // If the return instruction returned a value, replace uses of the call with
2329 // uses of the returned value.
2330 if (!CB.use_empty()) {
2331 ReturnInst *R = Returns[0];
2332 if (&CB == R->getReturnValue())
2333 CB.replaceAllUsesWith(UndefValue::get(CB.getType()));
2334 else
2335 CB.replaceAllUsesWith(R->getReturnValue());
2336 }
2337 // Since we are now done with the Call/Invoke, we can delete it.
2338 CB.eraseFromParent();
2339
2340 // Since we are now done with the return instruction, delete it also.
2341 Returns[0]->eraseFromParent();
2342
2343 // We are now done with the inlining.
2344 return InlineResult::success();
2345 }
2346
2347 // Otherwise, we have the normal case, of more than one block to inline or
2348 // multiple return sites.
2349
2350 // We want to clone the entire callee function into the hole between the
2351 // "starter" and "ender" blocks. How we accomplish this depends on whether
2352 // this is an invoke instruction or a call instruction.
2353 BasicBlock *AfterCallBB;
2354 BranchInst *CreatedBranchToNormalDest = nullptr;
2355 if (InvokeInst *II = dyn_cast<InvokeInst>(&CB)) {
2356
2357 // Add an unconditional branch to make this look like the CallInst case...
2358 CreatedBranchToNormalDest = BranchInst::Create(II->getNormalDest(), &CB);
2359
2360 // Split the basic block. This guarantees that no PHI nodes will have to be
2361 // updated due to new incoming edges, and make the invoke case more
2362 // symmetric to the call case.
2363 AfterCallBB =
2364 OrigBB->splitBasicBlock(CreatedBranchToNormalDest->getIterator(),
2365 CalledFunc->getName() + ".exit");
2366
2367 } else { // It's a call
2368 // If this is a call instruction, we need to split the basic block that
2369 // the call lives in.
2370 //
2371 AfterCallBB = OrigBB->splitBasicBlock(CB.getIterator(),
2372 CalledFunc->getName() + ".exit");
2373 }
2374
2375 if (IFI.CallerBFI) {
2376 // Copy original BB's block frequency to AfterCallBB
2377 IFI.CallerBFI->setBlockFreq(
2378 AfterCallBB, IFI.CallerBFI->getBlockFreq(OrigBB).getFrequency());
2379 }
2380
2381 // Change the branch that used to go to AfterCallBB to branch to the first
2382 // basic block of the inlined function.
2383 //
2384 Instruction *Br = OrigBB->getTerminator();
2385 assert(Br && Br->getOpcode() == Instruction::Br &&
2386 "splitBasicBlock broken!");
2387 Br->setOperand(0, &*FirstNewBlock);
2388
2389 // Now that the function is correct, make it a little bit nicer. In
2390 // particular, move the basic blocks inserted from the end of the function
2391 // into the space made by splitting the source basic block.
2392 Caller->getBasicBlockList().splice(AfterCallBB->getIterator(),
2393 Caller->getBasicBlockList(), FirstNewBlock,
2394 Caller->end());
2395
2396 // Handle all of the return instructions that we just cloned in, and eliminate
2397 // any users of the original call/invoke instruction.
2398 Type *RTy = CalledFunc->getReturnType();
2399
2400 PHINode *PHI = nullptr;
2401 if (Returns.size() > 1) {
2402 // The PHI node should go at the front of the new basic block to merge all
2403 // possible incoming values.
2404 if (!CB.use_empty()) {
2405 PHI = PHINode::Create(RTy, Returns.size(), CB.getName(),
2406 &AfterCallBB->front());
2407 // Anything that used the result of the function call should now use the
2408 // PHI node as their operand.
2409 CB.replaceAllUsesWith(PHI);
2410 }
2411
2412 // Loop over all of the return instructions adding entries to the PHI node
2413 // as appropriate.
2414 if (PHI) {
2415 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
2416 ReturnInst *RI = Returns[i];
2417 assert(RI->getReturnValue()->getType() == PHI->getType() &&
2418 "Ret value not consistent in function!");
2419 PHI->addIncoming(RI->getReturnValue(), RI->getParent());
2420 }
2421 }
2422
2423 // Add a branch to the merge points and remove return instructions.
2424 DebugLoc Loc;
2425 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
2426 ReturnInst *RI = Returns[i];
2427 BranchInst* BI = BranchInst::Create(AfterCallBB, RI);
2428 Loc = RI->getDebugLoc();
2429 BI->setDebugLoc(Loc);
2430 RI->eraseFromParent();
2431 }
2432 // We need to set the debug location to *somewhere* inside the
2433 // inlined function. The line number may be nonsensical, but the
2434 // instruction will at least be associated with the right
2435 // function.
2436 if (CreatedBranchToNormalDest)
2437 CreatedBranchToNormalDest->setDebugLoc(Loc);
2438 } else if (!Returns.empty()) {
2439 // Otherwise, if there is exactly one return value, just replace anything
2440 // using the return value of the call with the computed value.
2441 if (!CB.use_empty()) {
2442 if (&CB == Returns[0]->getReturnValue())
2443 CB.replaceAllUsesWith(UndefValue::get(CB.getType()));
2444 else
2445 CB.replaceAllUsesWith(Returns[0]->getReturnValue());
2446 }
2447
2448 // Update PHI nodes that use the ReturnBB to use the AfterCallBB.
2449 BasicBlock *ReturnBB = Returns[0]->getParent();
2450 ReturnBB->replaceAllUsesWith(AfterCallBB);
2451
2452 // Splice the code from the return block into the block that it will return
2453 // to, which contains the code that was after the call.
2454 AfterCallBB->getInstList().splice(AfterCallBB->begin(),
2455 ReturnBB->getInstList());
2456
2457 if (CreatedBranchToNormalDest)
2458 CreatedBranchToNormalDest->setDebugLoc(Returns[0]->getDebugLoc());
2459
2460 // Delete the return instruction now and empty ReturnBB now.
2461 Returns[0]->eraseFromParent();
2462 ReturnBB->eraseFromParent();
2463 } else if (!CB.use_empty()) {
2464 // No returns, but something is using the return value of the call. Just
2465 // nuke the result.
2466 CB.replaceAllUsesWith(UndefValue::get(CB.getType()));
2467 }
2468
2469 // Since we are now done with the Call/Invoke, we can delete it.
2470 CB.eraseFromParent();
2471
2472 // If we inlined any musttail calls and the original return is now
2473 // unreachable, delete it. It can only contain a bitcast and ret.
2474 if (InlinedMustTailCalls && pred_begin(AfterCallBB) == pred_end(AfterCallBB))
2475 AfterCallBB->eraseFromParent();
2476
2477 // We should always be able to fold the entry block of the function into the
2478 // single predecessor of the block...
2479 assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!");
2480 BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0);
2481
2482 // Splice the code entry block into calling block, right before the
2483 // unconditional branch.
2484 CalleeEntry->replaceAllUsesWith(OrigBB); // Update PHI nodes
2485 OrigBB->getInstList().splice(Br->getIterator(), CalleeEntry->getInstList());
2486
2487 // Remove the unconditional branch.
2488 OrigBB->getInstList().erase(Br);
2489
2490 // Now we can remove the CalleeEntry block, which is now empty.
2491 Caller->getBasicBlockList().erase(CalleeEntry);
2492
2493 // If we inserted a phi node, check to see if it has a single value (e.g. all
2494 // the entries are the same or undef). If so, remove the PHI so it doesn't
2495 // block other optimizations.
2496 if (PHI) {
2497 AssumptionCache *AC =
2498 IFI.GetAssumptionCache ? &IFI.GetAssumptionCache(*Caller) : nullptr;
2499 auto &DL = Caller->getParent()->getDataLayout();
2500 if (Value *V = SimplifyInstruction(PHI, {DL, nullptr, nullptr, AC})) {
2501 PHI->replaceAllUsesWith(V);
2502 PHI->eraseFromParent();
2503 }
2504 }
2505
2506 return InlineResult::success();
2507 }
2508