1 //===- InlineFunction.cpp - Code to perform function inlining -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements inlining of a function into a call site, resolving
10 // parameters and the return value as appropriate.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "llvm/ADT/DenseMap.h"
15 #include "llvm/ADT/None.h"
16 #include "llvm/ADT/Optional.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/ADT/SetVector.h"
19 #include "llvm/ADT/SmallPtrSet.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/StringExtras.h"
22 #include "llvm/ADT/iterator_range.h"
23 #include "llvm/Analysis/AliasAnalysis.h"
24 #include "llvm/Analysis/AssumptionCache.h"
25 #include "llvm/Analysis/BlockFrequencyInfo.h"
26 #include "llvm/Analysis/CallGraph.h"
27 #include "llvm/Analysis/CaptureTracking.h"
28 #include "llvm/Analysis/EHPersonalities.h"
29 #include "llvm/Analysis/InstructionSimplify.h"
30 #include "llvm/Analysis/ProfileSummaryInfo.h"
31 #include "llvm/Transforms/Utils/Local.h"
32 #include "llvm/Analysis/ValueTracking.h"
33 #include "llvm/Analysis/VectorUtils.h"
34 #include "llvm/IR/Argument.h"
35 #include "llvm/IR/BasicBlock.h"
36 #include "llvm/IR/CFG.h"
37 #include "llvm/IR/Constant.h"
38 #include "llvm/IR/Constants.h"
39 #include "llvm/IR/DIBuilder.h"
40 #include "llvm/IR/DataLayout.h"
41 #include "llvm/IR/DebugInfoMetadata.h"
42 #include "llvm/IR/DebugLoc.h"
43 #include "llvm/IR/DerivedTypes.h"
44 #include "llvm/IR/Dominators.h"
45 #include "llvm/IR/Function.h"
46 #include "llvm/IR/IRBuilder.h"
47 #include "llvm/IR/InstrTypes.h"
48 #include "llvm/IR/Instruction.h"
49 #include "llvm/IR/Instructions.h"
50 #include "llvm/IR/IntrinsicInst.h"
51 #include "llvm/IR/Intrinsics.h"
52 #include "llvm/IR/LLVMContext.h"
53 #include "llvm/IR/MDBuilder.h"
54 #include "llvm/IR/Metadata.h"
55 #include "llvm/IR/Module.h"
56 #include "llvm/IR/Type.h"
57 #include "llvm/IR/User.h"
58 #include "llvm/IR/Value.h"
59 #include "llvm/Support/Casting.h"
60 #include "llvm/Support/CommandLine.h"
61 #include "llvm/Support/ErrorHandling.h"
62 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
63 #include "llvm/Transforms/Utils/Cloning.h"
64 #include "llvm/Transforms/Utils/ValueMapper.h"
65 #include <algorithm>
66 #include <cassert>
67 #include <cstdint>
68 #include <iterator>
69 #include <limits>
70 #include <string>
71 #include <utility>
72 #include <vector>
73
74 using namespace llvm;
75 using ProfileCount = Function::ProfileCount;
76
77 static cl::opt<bool>
78 EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true),
79 cl::Hidden,
80 cl::desc("Convert noalias attributes to metadata during inlining."));
81
82 static cl::opt<bool>
83 UseNoAliasIntrinsic("use-noalias-intrinsic-during-inlining", cl::Hidden,
84 cl::ZeroOrMore, cl::init(true),
85 cl::desc("Use the llvm.experimental.noalias.scope.decl "
86 "intrinsic during inlining."));
87
88 // Disabled by default, because the added alignment assumptions may increase
89 // compile-time and block optimizations. This option is not suitable for use
90 // with frontends that emit comprehensive parameter alignment annotations.
91 static cl::opt<bool>
92 PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining",
93 cl::init(false), cl::Hidden,
94 cl::desc("Convert align attributes to assumptions during inlining."));
95
96 static cl::opt<bool> UpdateReturnAttributes(
97 "update-return-attrs", cl::init(true), cl::Hidden,
98 cl::desc("Update return attributes on calls within inlined body"));
99
100 static cl::opt<unsigned> InlinerAttributeWindow(
101 "max-inst-checked-for-throw-during-inlining", cl::Hidden,
102 cl::desc("the maximum number of instructions analyzed for may throw during "
103 "attribute inference in inlined body"),
104 cl::init(4));
105
106 namespace {
107
108 /// A class for recording information about inlining a landing pad.
109 class LandingPadInliningInfo {
110 /// Destination of the invoke's unwind.
111 BasicBlock *OuterResumeDest;
112
113 /// Destination for the callee's resume.
114 BasicBlock *InnerResumeDest = nullptr;
115
116 /// LandingPadInst associated with the invoke.
117 LandingPadInst *CallerLPad = nullptr;
118
119 /// PHI for EH values from landingpad insts.
120 PHINode *InnerEHValuesPHI = nullptr;
121
122 SmallVector<Value*, 8> UnwindDestPHIValues;
123
124 public:
LandingPadInliningInfo(InvokeInst * II)125 LandingPadInliningInfo(InvokeInst *II)
126 : OuterResumeDest(II->getUnwindDest()) {
127 // If there are PHI nodes in the unwind destination block, we need to keep
128 // track of which values came into them from the invoke before removing
129 // the edge from this block.
130 BasicBlock *InvokeBB = II->getParent();
131 BasicBlock::iterator I = OuterResumeDest->begin();
132 for (; isa<PHINode>(I); ++I) {
133 // Save the value to use for this edge.
134 PHINode *PHI = cast<PHINode>(I);
135 UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
136 }
137
138 CallerLPad = cast<LandingPadInst>(I);
139 }
140
141 /// The outer unwind destination is the target of
142 /// unwind edges introduced for calls within the inlined function.
getOuterResumeDest() const143 BasicBlock *getOuterResumeDest() const {
144 return OuterResumeDest;
145 }
146
147 BasicBlock *getInnerResumeDest();
148
getLandingPadInst() const149 LandingPadInst *getLandingPadInst() const { return CallerLPad; }
150
151 /// Forward the 'resume' instruction to the caller's landing pad block.
152 /// When the landing pad block has only one predecessor, this is
153 /// a simple branch. When there is more than one predecessor, we need to
154 /// split the landing pad block after the landingpad instruction and jump
155 /// to there.
156 void forwardResume(ResumeInst *RI,
157 SmallPtrSetImpl<LandingPadInst*> &InlinedLPads);
158
159 /// Add incoming-PHI values to the unwind destination block for the given
160 /// basic block, using the values for the original invoke's source block.
addIncomingPHIValuesFor(BasicBlock * BB) const161 void addIncomingPHIValuesFor(BasicBlock *BB) const {
162 addIncomingPHIValuesForInto(BB, OuterResumeDest);
163 }
164
addIncomingPHIValuesForInto(BasicBlock * src,BasicBlock * dest) const165 void addIncomingPHIValuesForInto(BasicBlock *src, BasicBlock *dest) const {
166 BasicBlock::iterator I = dest->begin();
167 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
168 PHINode *phi = cast<PHINode>(I);
169 phi->addIncoming(UnwindDestPHIValues[i], src);
170 }
171 }
172 };
173
174 } // end anonymous namespace
175
176 /// Get or create a target for the branch from ResumeInsts.
getInnerResumeDest()177 BasicBlock *LandingPadInliningInfo::getInnerResumeDest() {
178 if (InnerResumeDest) return InnerResumeDest;
179
180 // Split the landing pad.
181 BasicBlock::iterator SplitPoint = ++CallerLPad->getIterator();
182 InnerResumeDest =
183 OuterResumeDest->splitBasicBlock(SplitPoint,
184 OuterResumeDest->getName() + ".body");
185
186 // The number of incoming edges we expect to the inner landing pad.
187 const unsigned PHICapacity = 2;
188
189 // Create corresponding new PHIs for all the PHIs in the outer landing pad.
190 Instruction *InsertPoint = &InnerResumeDest->front();
191 BasicBlock::iterator I = OuterResumeDest->begin();
192 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
193 PHINode *OuterPHI = cast<PHINode>(I);
194 PHINode *InnerPHI = PHINode::Create(OuterPHI->getType(), PHICapacity,
195 OuterPHI->getName() + ".lpad-body",
196 InsertPoint);
197 OuterPHI->replaceAllUsesWith(InnerPHI);
198 InnerPHI->addIncoming(OuterPHI, OuterResumeDest);
199 }
200
201 // Create a PHI for the exception values.
202 InnerEHValuesPHI = PHINode::Create(CallerLPad->getType(), PHICapacity,
203 "eh.lpad-body", InsertPoint);
204 CallerLPad->replaceAllUsesWith(InnerEHValuesPHI);
205 InnerEHValuesPHI->addIncoming(CallerLPad, OuterResumeDest);
206
207 // All done.
208 return InnerResumeDest;
209 }
210
211 /// Forward the 'resume' instruction to the caller's landing pad block.
212 /// When the landing pad block has only one predecessor, this is a simple
213 /// branch. When there is more than one predecessor, we need to split the
214 /// landing pad block after the landingpad instruction and jump to there.
forwardResume(ResumeInst * RI,SmallPtrSetImpl<LandingPadInst * > & InlinedLPads)215 void LandingPadInliningInfo::forwardResume(
216 ResumeInst *RI, SmallPtrSetImpl<LandingPadInst *> &InlinedLPads) {
217 BasicBlock *Dest = getInnerResumeDest();
218 BasicBlock *Src = RI->getParent();
219
220 BranchInst::Create(Dest, Src);
221
222 // Update the PHIs in the destination. They were inserted in an order which
223 // makes this work.
224 addIncomingPHIValuesForInto(Src, Dest);
225
226 InnerEHValuesPHI->addIncoming(RI->getOperand(0), Src);
227 RI->eraseFromParent();
228 }
229
230 /// Helper for getUnwindDestToken/getUnwindDestTokenHelper.
getParentPad(Value * EHPad)231 static Value *getParentPad(Value *EHPad) {
232 if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
233 return FPI->getParentPad();
234 return cast<CatchSwitchInst>(EHPad)->getParentPad();
235 }
236
237 using UnwindDestMemoTy = DenseMap<Instruction *, Value *>;
238
239 /// Helper for getUnwindDestToken that does the descendant-ward part of
240 /// the search.
getUnwindDestTokenHelper(Instruction * EHPad,UnwindDestMemoTy & MemoMap)241 static Value *getUnwindDestTokenHelper(Instruction *EHPad,
242 UnwindDestMemoTy &MemoMap) {
243 SmallVector<Instruction *, 8> Worklist(1, EHPad);
244
245 while (!Worklist.empty()) {
246 Instruction *CurrentPad = Worklist.pop_back_val();
247 // We only put pads on the worklist that aren't in the MemoMap. When
248 // we find an unwind dest for a pad we may update its ancestors, but
249 // the queue only ever contains uncles/great-uncles/etc. of CurrentPad,
250 // so they should never get updated while queued on the worklist.
251 assert(!MemoMap.count(CurrentPad));
252 Value *UnwindDestToken = nullptr;
253 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(CurrentPad)) {
254 if (CatchSwitch->hasUnwindDest()) {
255 UnwindDestToken = CatchSwitch->getUnwindDest()->getFirstNonPHI();
256 } else {
257 // Catchswitch doesn't have a 'nounwind' variant, and one might be
258 // annotated as "unwinds to caller" when really it's nounwind (see
259 // e.g. SimplifyCFGOpt::SimplifyUnreachable), so we can't infer the
260 // parent's unwind dest from this. We can check its catchpads'
261 // descendants, since they might include a cleanuppad with an
262 // "unwinds to caller" cleanupret, which can be trusted.
263 for (auto HI = CatchSwitch->handler_begin(),
264 HE = CatchSwitch->handler_end();
265 HI != HE && !UnwindDestToken; ++HI) {
266 BasicBlock *HandlerBlock = *HI;
267 auto *CatchPad = cast<CatchPadInst>(HandlerBlock->getFirstNonPHI());
268 for (User *Child : CatchPad->users()) {
269 // Intentionally ignore invokes here -- since the catchswitch is
270 // marked "unwind to caller", it would be a verifier error if it
271 // contained an invoke which unwinds out of it, so any invoke we'd
272 // encounter must unwind to some child of the catch.
273 if (!isa<CleanupPadInst>(Child) && !isa<CatchSwitchInst>(Child))
274 continue;
275
276 Instruction *ChildPad = cast<Instruction>(Child);
277 auto Memo = MemoMap.find(ChildPad);
278 if (Memo == MemoMap.end()) {
279 // Haven't figured out this child pad yet; queue it.
280 Worklist.push_back(ChildPad);
281 continue;
282 }
283 // We've already checked this child, but might have found that
284 // it offers no proof either way.
285 Value *ChildUnwindDestToken = Memo->second;
286 if (!ChildUnwindDestToken)
287 continue;
288 // We already know the child's unwind dest, which can either
289 // be ConstantTokenNone to indicate unwind to caller, or can
290 // be another child of the catchpad. Only the former indicates
291 // the unwind dest of the catchswitch.
292 if (isa<ConstantTokenNone>(ChildUnwindDestToken)) {
293 UnwindDestToken = ChildUnwindDestToken;
294 break;
295 }
296 assert(getParentPad(ChildUnwindDestToken) == CatchPad);
297 }
298 }
299 }
300 } else {
301 auto *CleanupPad = cast<CleanupPadInst>(CurrentPad);
302 for (User *U : CleanupPad->users()) {
303 if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(U)) {
304 if (BasicBlock *RetUnwindDest = CleanupRet->getUnwindDest())
305 UnwindDestToken = RetUnwindDest->getFirstNonPHI();
306 else
307 UnwindDestToken = ConstantTokenNone::get(CleanupPad->getContext());
308 break;
309 }
310 Value *ChildUnwindDestToken;
311 if (auto *Invoke = dyn_cast<InvokeInst>(U)) {
312 ChildUnwindDestToken = Invoke->getUnwindDest()->getFirstNonPHI();
313 } else if (isa<CleanupPadInst>(U) || isa<CatchSwitchInst>(U)) {
314 Instruction *ChildPad = cast<Instruction>(U);
315 auto Memo = MemoMap.find(ChildPad);
316 if (Memo == MemoMap.end()) {
317 // Haven't resolved this child yet; queue it and keep searching.
318 Worklist.push_back(ChildPad);
319 continue;
320 }
321 // We've checked this child, but still need to ignore it if it
322 // had no proof either way.
323 ChildUnwindDestToken = Memo->second;
324 if (!ChildUnwindDestToken)
325 continue;
326 } else {
327 // Not a relevant user of the cleanuppad
328 continue;
329 }
330 // In a well-formed program, the child/invoke must either unwind to
331 // an(other) child of the cleanup, or exit the cleanup. In the
332 // first case, continue searching.
333 if (isa<Instruction>(ChildUnwindDestToken) &&
334 getParentPad(ChildUnwindDestToken) == CleanupPad)
335 continue;
336 UnwindDestToken = ChildUnwindDestToken;
337 break;
338 }
339 }
340 // If we haven't found an unwind dest for CurrentPad, we may have queued its
341 // children, so move on to the next in the worklist.
342 if (!UnwindDestToken)
343 continue;
344
345 // Now we know that CurrentPad unwinds to UnwindDestToken. It also exits
346 // any ancestors of CurrentPad up to but not including UnwindDestToken's
347 // parent pad. Record this in the memo map, and check to see if the
348 // original EHPad being queried is one of the ones exited.
349 Value *UnwindParent;
350 if (auto *UnwindPad = dyn_cast<Instruction>(UnwindDestToken))
351 UnwindParent = getParentPad(UnwindPad);
352 else
353 UnwindParent = nullptr;
354 bool ExitedOriginalPad = false;
355 for (Instruction *ExitedPad = CurrentPad;
356 ExitedPad && ExitedPad != UnwindParent;
357 ExitedPad = dyn_cast<Instruction>(getParentPad(ExitedPad))) {
358 // Skip over catchpads since they just follow their catchswitches.
359 if (isa<CatchPadInst>(ExitedPad))
360 continue;
361 MemoMap[ExitedPad] = UnwindDestToken;
362 ExitedOriginalPad |= (ExitedPad == EHPad);
363 }
364
365 if (ExitedOriginalPad)
366 return UnwindDestToken;
367
368 // Continue the search.
369 }
370
371 // No definitive information is contained within this funclet.
372 return nullptr;
373 }
374
375 /// Given an EH pad, find where it unwinds. If it unwinds to an EH pad,
376 /// return that pad instruction. If it unwinds to caller, return
377 /// ConstantTokenNone. If it does not have a definitive unwind destination,
378 /// return nullptr.
379 ///
380 /// This routine gets invoked for calls in funclets in inlinees when inlining
381 /// an invoke. Since many funclets don't have calls inside them, it's queried
382 /// on-demand rather than building a map of pads to unwind dests up front.
383 /// Determining a funclet's unwind dest may require recursively searching its
384 /// descendants, and also ancestors and cousins if the descendants don't provide
385 /// an answer. Since most funclets will have their unwind dest immediately
386 /// available as the unwind dest of a catchswitch or cleanupret, this routine
387 /// searches top-down from the given pad and then up. To avoid worst-case
388 /// quadratic run-time given that approach, it uses a memo map to avoid
389 /// re-processing funclet trees. The callers that rewrite the IR as they go
390 /// take advantage of this, for correctness, by checking/forcing rewritten
391 /// pads' entries to match the original callee view.
getUnwindDestToken(Instruction * EHPad,UnwindDestMemoTy & MemoMap)392 static Value *getUnwindDestToken(Instruction *EHPad,
393 UnwindDestMemoTy &MemoMap) {
394 // Catchpads unwind to the same place as their catchswitch;
395 // redirct any queries on catchpads so the code below can
396 // deal with just catchswitches and cleanuppads.
397 if (auto *CPI = dyn_cast<CatchPadInst>(EHPad))
398 EHPad = CPI->getCatchSwitch();
399
400 // Check if we've already determined the unwind dest for this pad.
401 auto Memo = MemoMap.find(EHPad);
402 if (Memo != MemoMap.end())
403 return Memo->second;
404
405 // Search EHPad and, if necessary, its descendants.
406 Value *UnwindDestToken = getUnwindDestTokenHelper(EHPad, MemoMap);
407 assert((UnwindDestToken == nullptr) != (MemoMap.count(EHPad) != 0));
408 if (UnwindDestToken)
409 return UnwindDestToken;
410
411 // No information is available for this EHPad from itself or any of its
412 // descendants. An unwind all the way out to a pad in the caller would
413 // need also to agree with the unwind dest of the parent funclet, so
414 // search up the chain to try to find a funclet with information. Put
415 // null entries in the memo map to avoid re-processing as we go up.
416 MemoMap[EHPad] = nullptr;
417 #ifndef NDEBUG
418 SmallPtrSet<Instruction *, 4> TempMemos;
419 TempMemos.insert(EHPad);
420 #endif
421 Instruction *LastUselessPad = EHPad;
422 Value *AncestorToken;
423 for (AncestorToken = getParentPad(EHPad);
424 auto *AncestorPad = dyn_cast<Instruction>(AncestorToken);
425 AncestorToken = getParentPad(AncestorToken)) {
426 // Skip over catchpads since they just follow their catchswitches.
427 if (isa<CatchPadInst>(AncestorPad))
428 continue;
429 // If the MemoMap had an entry mapping AncestorPad to nullptr, since we
430 // haven't yet called getUnwindDestTokenHelper for AncestorPad in this
431 // call to getUnwindDestToken, that would mean that AncestorPad had no
432 // information in itself, its descendants, or its ancestors. If that
433 // were the case, then we should also have recorded the lack of information
434 // for the descendant that we're coming from. So assert that we don't
435 // find a null entry in the MemoMap for AncestorPad.
436 assert(!MemoMap.count(AncestorPad) || MemoMap[AncestorPad]);
437 auto AncestorMemo = MemoMap.find(AncestorPad);
438 if (AncestorMemo == MemoMap.end()) {
439 UnwindDestToken = getUnwindDestTokenHelper(AncestorPad, MemoMap);
440 } else {
441 UnwindDestToken = AncestorMemo->second;
442 }
443 if (UnwindDestToken)
444 break;
445 LastUselessPad = AncestorPad;
446 MemoMap[LastUselessPad] = nullptr;
447 #ifndef NDEBUG
448 TempMemos.insert(LastUselessPad);
449 #endif
450 }
451
452 // We know that getUnwindDestTokenHelper was called on LastUselessPad and
453 // returned nullptr (and likewise for EHPad and any of its ancestors up to
454 // LastUselessPad), so LastUselessPad has no information from below. Since
455 // getUnwindDestTokenHelper must investigate all downward paths through
456 // no-information nodes to prove that a node has no information like this,
457 // and since any time it finds information it records it in the MemoMap for
458 // not just the immediately-containing funclet but also any ancestors also
459 // exited, it must be the case that, walking downward from LastUselessPad,
460 // visiting just those nodes which have not been mapped to an unwind dest
461 // by getUnwindDestTokenHelper (the nullptr TempMemos notwithstanding, since
462 // they are just used to keep getUnwindDestTokenHelper from repeating work),
463 // any node visited must have been exhaustively searched with no information
464 // for it found.
465 SmallVector<Instruction *, 8> Worklist(1, LastUselessPad);
466 while (!Worklist.empty()) {
467 Instruction *UselessPad = Worklist.pop_back_val();
468 auto Memo = MemoMap.find(UselessPad);
469 if (Memo != MemoMap.end() && Memo->second) {
470 // Here the name 'UselessPad' is a bit of a misnomer, because we've found
471 // that it is a funclet that does have information about unwinding to
472 // a particular destination; its parent was a useless pad.
473 // Since its parent has no information, the unwind edge must not escape
474 // the parent, and must target a sibling of this pad. This local unwind
475 // gives us no information about EHPad. Leave it and the subtree rooted
476 // at it alone.
477 assert(getParentPad(Memo->second) == getParentPad(UselessPad));
478 continue;
479 }
480 // We know we don't have information for UselesPad. If it has an entry in
481 // the MemoMap (mapping it to nullptr), it must be one of the TempMemos
482 // added on this invocation of getUnwindDestToken; if a previous invocation
483 // recorded nullptr, it would have had to prove that the ancestors of
484 // UselessPad, which include LastUselessPad, had no information, and that
485 // in turn would have required proving that the descendants of
486 // LastUselesPad, which include EHPad, have no information about
487 // LastUselessPad, which would imply that EHPad was mapped to nullptr in
488 // the MemoMap on that invocation, which isn't the case if we got here.
489 assert(!MemoMap.count(UselessPad) || TempMemos.count(UselessPad));
490 // Assert as we enumerate users that 'UselessPad' doesn't have any unwind
491 // information that we'd be contradicting by making a map entry for it
492 // (which is something that getUnwindDestTokenHelper must have proved for
493 // us to get here). Just assert on is direct users here; the checks in
494 // this downward walk at its descendants will verify that they don't have
495 // any unwind edges that exit 'UselessPad' either (i.e. they either have no
496 // unwind edges or unwind to a sibling).
497 MemoMap[UselessPad] = UnwindDestToken;
498 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(UselessPad)) {
499 assert(CatchSwitch->getUnwindDest() == nullptr && "Expected useless pad");
500 for (BasicBlock *HandlerBlock : CatchSwitch->handlers()) {
501 auto *CatchPad = HandlerBlock->getFirstNonPHI();
502 for (User *U : CatchPad->users()) {
503 assert(
504 (!isa<InvokeInst>(U) ||
505 (getParentPad(
506 cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) ==
507 CatchPad)) &&
508 "Expected useless pad");
509 if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
510 Worklist.push_back(cast<Instruction>(U));
511 }
512 }
513 } else {
514 assert(isa<CleanupPadInst>(UselessPad));
515 for (User *U : UselessPad->users()) {
516 assert(!isa<CleanupReturnInst>(U) && "Expected useless pad");
517 assert((!isa<InvokeInst>(U) ||
518 (getParentPad(
519 cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) ==
520 UselessPad)) &&
521 "Expected useless pad");
522 if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
523 Worklist.push_back(cast<Instruction>(U));
524 }
525 }
526 }
527
528 return UnwindDestToken;
529 }
530
531 /// When we inline a basic block into an invoke,
532 /// we have to turn all of the calls that can throw into invokes.
533 /// This function analyze BB to see if there are any calls, and if so,
534 /// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI
535 /// nodes in that block with the values specified in InvokeDestPHIValues.
HandleCallsInBlockInlinedThroughInvoke(BasicBlock * BB,BasicBlock * UnwindEdge,UnwindDestMemoTy * FuncletUnwindMap=nullptr)536 static BasicBlock *HandleCallsInBlockInlinedThroughInvoke(
537 BasicBlock *BB, BasicBlock *UnwindEdge,
538 UnwindDestMemoTy *FuncletUnwindMap = nullptr) {
539 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
540 Instruction *I = &*BBI++;
541
542 // We only need to check for function calls: inlined invoke
543 // instructions require no special handling.
544 CallInst *CI = dyn_cast<CallInst>(I);
545
546 if (!CI || CI->doesNotThrow() || CI->isInlineAsm())
547 continue;
548
549 // We do not need to (and in fact, cannot) convert possibly throwing calls
550 // to @llvm.experimental_deoptimize (resp. @llvm.experimental.guard) into
551 // invokes. The caller's "segment" of the deoptimization continuation
552 // attached to the newly inlined @llvm.experimental_deoptimize
553 // (resp. @llvm.experimental.guard) call should contain the exception
554 // handling logic, if any.
555 if (auto *F = CI->getCalledFunction())
556 if (F->getIntrinsicID() == Intrinsic::experimental_deoptimize ||
557 F->getIntrinsicID() == Intrinsic::experimental_guard)
558 continue;
559
560 if (auto FuncletBundle = CI->getOperandBundle(LLVMContext::OB_funclet)) {
561 // This call is nested inside a funclet. If that funclet has an unwind
562 // destination within the inlinee, then unwinding out of this call would
563 // be UB. Rewriting this call to an invoke which targets the inlined
564 // invoke's unwind dest would give the call's parent funclet multiple
565 // unwind destinations, which is something that subsequent EH table
566 // generation can't handle and that the veirifer rejects. So when we
567 // see such a call, leave it as a call.
568 auto *FuncletPad = cast<Instruction>(FuncletBundle->Inputs[0]);
569 Value *UnwindDestToken =
570 getUnwindDestToken(FuncletPad, *FuncletUnwindMap);
571 if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
572 continue;
573 #ifndef NDEBUG
574 Instruction *MemoKey;
575 if (auto *CatchPad = dyn_cast<CatchPadInst>(FuncletPad))
576 MemoKey = CatchPad->getCatchSwitch();
577 else
578 MemoKey = FuncletPad;
579 assert(FuncletUnwindMap->count(MemoKey) &&
580 (*FuncletUnwindMap)[MemoKey] == UnwindDestToken &&
581 "must get memoized to avoid confusing later searches");
582 #endif // NDEBUG
583 }
584
585 changeToInvokeAndSplitBasicBlock(CI, UnwindEdge);
586 return BB;
587 }
588 return nullptr;
589 }
590
591 /// If we inlined an invoke site, we need to convert calls
592 /// in the body of the inlined function into invokes.
593 ///
594 /// II is the invoke instruction being inlined. FirstNewBlock is the first
595 /// block of the inlined code (the last block is the end of the function),
596 /// and InlineCodeInfo is information about the code that got inlined.
HandleInlinedLandingPad(InvokeInst * II,BasicBlock * FirstNewBlock,ClonedCodeInfo & InlinedCodeInfo)597 static void HandleInlinedLandingPad(InvokeInst *II, BasicBlock *FirstNewBlock,
598 ClonedCodeInfo &InlinedCodeInfo) {
599 BasicBlock *InvokeDest = II->getUnwindDest();
600
601 Function *Caller = FirstNewBlock->getParent();
602
603 // The inlined code is currently at the end of the function, scan from the
604 // start of the inlined code to its end, checking for stuff we need to
605 // rewrite.
606 LandingPadInliningInfo Invoke(II);
607
608 // Get all of the inlined landing pad instructions.
609 SmallPtrSet<LandingPadInst*, 16> InlinedLPads;
610 for (Function::iterator I = FirstNewBlock->getIterator(), E = Caller->end();
611 I != E; ++I)
612 if (InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator()))
613 InlinedLPads.insert(II->getLandingPadInst());
614
615 // Append the clauses from the outer landing pad instruction into the inlined
616 // landing pad instructions.
617 LandingPadInst *OuterLPad = Invoke.getLandingPadInst();
618 for (LandingPadInst *InlinedLPad : InlinedLPads) {
619 unsigned OuterNum = OuterLPad->getNumClauses();
620 InlinedLPad->reserveClauses(OuterNum);
621 for (unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx)
622 InlinedLPad->addClause(OuterLPad->getClause(OuterIdx));
623 if (OuterLPad->isCleanup())
624 InlinedLPad->setCleanup(true);
625 }
626
627 for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end();
628 BB != E; ++BB) {
629 if (InlinedCodeInfo.ContainsCalls)
630 if (BasicBlock *NewBB = HandleCallsInBlockInlinedThroughInvoke(
631 &*BB, Invoke.getOuterResumeDest()))
632 // Update any PHI nodes in the exceptional block to indicate that there
633 // is now a new entry in them.
634 Invoke.addIncomingPHIValuesFor(NewBB);
635
636 // Forward any resumes that are remaining here.
637 if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator()))
638 Invoke.forwardResume(RI, InlinedLPads);
639 }
640
641 // Now that everything is happy, we have one final detail. The PHI nodes in
642 // the exception destination block still have entries due to the original
643 // invoke instruction. Eliminate these entries (which might even delete the
644 // PHI node) now.
645 InvokeDest->removePredecessor(II->getParent());
646 }
647
648 /// If we inlined an invoke site, we need to convert calls
649 /// in the body of the inlined function into invokes.
650 ///
651 /// II is the invoke instruction being inlined. FirstNewBlock is the first
652 /// block of the inlined code (the last block is the end of the function),
653 /// and InlineCodeInfo is information about the code that got inlined.
HandleInlinedEHPad(InvokeInst * II,BasicBlock * FirstNewBlock,ClonedCodeInfo & InlinedCodeInfo)654 static void HandleInlinedEHPad(InvokeInst *II, BasicBlock *FirstNewBlock,
655 ClonedCodeInfo &InlinedCodeInfo) {
656 BasicBlock *UnwindDest = II->getUnwindDest();
657 Function *Caller = FirstNewBlock->getParent();
658
659 assert(UnwindDest->getFirstNonPHI()->isEHPad() && "unexpected BasicBlock!");
660
661 // If there are PHI nodes in the unwind destination block, we need to keep
662 // track of which values came into them from the invoke before removing the
663 // edge from this block.
664 SmallVector<Value *, 8> UnwindDestPHIValues;
665 BasicBlock *InvokeBB = II->getParent();
666 for (Instruction &I : *UnwindDest) {
667 // Save the value to use for this edge.
668 PHINode *PHI = dyn_cast<PHINode>(&I);
669 if (!PHI)
670 break;
671 UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
672 }
673
674 // Add incoming-PHI values to the unwind destination block for the given basic
675 // block, using the values for the original invoke's source block.
676 auto UpdatePHINodes = [&](BasicBlock *Src) {
677 BasicBlock::iterator I = UnwindDest->begin();
678 for (Value *V : UnwindDestPHIValues) {
679 PHINode *PHI = cast<PHINode>(I);
680 PHI->addIncoming(V, Src);
681 ++I;
682 }
683 };
684
685 // This connects all the instructions which 'unwind to caller' to the invoke
686 // destination.
687 UnwindDestMemoTy FuncletUnwindMap;
688 for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end();
689 BB != E; ++BB) {
690 if (auto *CRI = dyn_cast<CleanupReturnInst>(BB->getTerminator())) {
691 if (CRI->unwindsToCaller()) {
692 auto *CleanupPad = CRI->getCleanupPad();
693 CleanupReturnInst::Create(CleanupPad, UnwindDest, CRI);
694 CRI->eraseFromParent();
695 UpdatePHINodes(&*BB);
696 // Finding a cleanupret with an unwind destination would confuse
697 // subsequent calls to getUnwindDestToken, so map the cleanuppad
698 // to short-circuit any such calls and recognize this as an "unwind
699 // to caller" cleanup.
700 assert(!FuncletUnwindMap.count(CleanupPad) ||
701 isa<ConstantTokenNone>(FuncletUnwindMap[CleanupPad]));
702 FuncletUnwindMap[CleanupPad] =
703 ConstantTokenNone::get(Caller->getContext());
704 }
705 }
706
707 Instruction *I = BB->getFirstNonPHI();
708 if (!I->isEHPad())
709 continue;
710
711 Instruction *Replacement = nullptr;
712 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) {
713 if (CatchSwitch->unwindsToCaller()) {
714 Value *UnwindDestToken;
715 if (auto *ParentPad =
716 dyn_cast<Instruction>(CatchSwitch->getParentPad())) {
717 // This catchswitch is nested inside another funclet. If that
718 // funclet has an unwind destination within the inlinee, then
719 // unwinding out of this catchswitch would be UB. Rewriting this
720 // catchswitch to unwind to the inlined invoke's unwind dest would
721 // give the parent funclet multiple unwind destinations, which is
722 // something that subsequent EH table generation can't handle and
723 // that the veirifer rejects. So when we see such a call, leave it
724 // as "unwind to caller".
725 UnwindDestToken = getUnwindDestToken(ParentPad, FuncletUnwindMap);
726 if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
727 continue;
728 } else {
729 // This catchswitch has no parent to inherit constraints from, and
730 // none of its descendants can have an unwind edge that exits it and
731 // targets another funclet in the inlinee. It may or may not have a
732 // descendant that definitively has an unwind to caller. In either
733 // case, we'll have to assume that any unwinds out of it may need to
734 // be routed to the caller, so treat it as though it has a definitive
735 // unwind to caller.
736 UnwindDestToken = ConstantTokenNone::get(Caller->getContext());
737 }
738 auto *NewCatchSwitch = CatchSwitchInst::Create(
739 CatchSwitch->getParentPad(), UnwindDest,
740 CatchSwitch->getNumHandlers(), CatchSwitch->getName(),
741 CatchSwitch);
742 for (BasicBlock *PadBB : CatchSwitch->handlers())
743 NewCatchSwitch->addHandler(PadBB);
744 // Propagate info for the old catchswitch over to the new one in
745 // the unwind map. This also serves to short-circuit any subsequent
746 // checks for the unwind dest of this catchswitch, which would get
747 // confused if they found the outer handler in the callee.
748 FuncletUnwindMap[NewCatchSwitch] = UnwindDestToken;
749 Replacement = NewCatchSwitch;
750 }
751 } else if (!isa<FuncletPadInst>(I)) {
752 llvm_unreachable("unexpected EHPad!");
753 }
754
755 if (Replacement) {
756 Replacement->takeName(I);
757 I->replaceAllUsesWith(Replacement);
758 I->eraseFromParent();
759 UpdatePHINodes(&*BB);
760 }
761 }
762
763 if (InlinedCodeInfo.ContainsCalls)
764 for (Function::iterator BB = FirstNewBlock->getIterator(),
765 E = Caller->end();
766 BB != E; ++BB)
767 if (BasicBlock *NewBB = HandleCallsInBlockInlinedThroughInvoke(
768 &*BB, UnwindDest, &FuncletUnwindMap))
769 // Update any PHI nodes in the exceptional block to indicate that there
770 // is now a new entry in them.
771 UpdatePHINodes(NewBB);
772
773 // Now that everything is happy, we have one final detail. The PHI nodes in
774 // the exception destination block still have entries due to the original
775 // invoke instruction. Eliminate these entries (which might even delete the
776 // PHI node) now.
777 UnwindDest->removePredecessor(InvokeBB);
778 }
779
780 /// When inlining a call site that has !llvm.mem.parallel_loop_access,
781 /// !llvm.access.group, !alias.scope or !noalias metadata, that metadata should
782 /// be propagated to all memory-accessing cloned instructions.
PropagateCallSiteMetadata(CallBase & CB,Function::iterator FStart,Function::iterator FEnd)783 static void PropagateCallSiteMetadata(CallBase &CB, Function::iterator FStart,
784 Function::iterator FEnd) {
785 MDNode *MemParallelLoopAccess =
786 CB.getMetadata(LLVMContext::MD_mem_parallel_loop_access);
787 MDNode *AccessGroup = CB.getMetadata(LLVMContext::MD_access_group);
788 MDNode *AliasScope = CB.getMetadata(LLVMContext::MD_alias_scope);
789 MDNode *NoAlias = CB.getMetadata(LLVMContext::MD_noalias);
790 if (!MemParallelLoopAccess && !AccessGroup && !AliasScope && !NoAlias)
791 return;
792
793 for (BasicBlock &BB : make_range(FStart, FEnd)) {
794 for (Instruction &I : BB) {
795 // This metadata is only relevant for instructions that access memory.
796 if (!I.mayReadOrWriteMemory())
797 continue;
798
799 if (MemParallelLoopAccess) {
800 // TODO: This probably should not overwrite MemParalleLoopAccess.
801 MemParallelLoopAccess = MDNode::concatenate(
802 I.getMetadata(LLVMContext::MD_mem_parallel_loop_access),
803 MemParallelLoopAccess);
804 I.setMetadata(LLVMContext::MD_mem_parallel_loop_access,
805 MemParallelLoopAccess);
806 }
807
808 if (AccessGroup)
809 I.setMetadata(LLVMContext::MD_access_group, uniteAccessGroups(
810 I.getMetadata(LLVMContext::MD_access_group), AccessGroup));
811
812 if (AliasScope)
813 I.setMetadata(LLVMContext::MD_alias_scope, MDNode::concatenate(
814 I.getMetadata(LLVMContext::MD_alias_scope), AliasScope));
815
816 if (NoAlias)
817 I.setMetadata(LLVMContext::MD_noalias, MDNode::concatenate(
818 I.getMetadata(LLVMContext::MD_noalias), NoAlias));
819 }
820 }
821 }
822
823 /// Utility for cloning !noalias and !alias.scope metadata. When a code region
824 /// using scoped alias metadata is inlined, the aliasing relationships may not
825 /// hold between the two version. It is necessary to create a deep clone of the
826 /// metadata, putting the two versions in separate scope domains.
827 class ScopedAliasMetadataDeepCloner {
828 using MetadataMap = DenseMap<const MDNode *, TrackingMDNodeRef>;
829 SetVector<const MDNode *> MD;
830 MetadataMap MDMap;
831 void addRecursiveMetadataUses();
832
833 public:
834 ScopedAliasMetadataDeepCloner(const Function *F);
835
836 /// Create a new clone of the scoped alias metadata, which will be used by
837 /// subsequent remap() calls.
838 void clone();
839
840 /// Remap instructions in the given range from the original to the cloned
841 /// metadata.
842 void remap(Function::iterator FStart, Function::iterator FEnd);
843 };
844
ScopedAliasMetadataDeepCloner(const Function * F)845 ScopedAliasMetadataDeepCloner::ScopedAliasMetadataDeepCloner(
846 const Function *F) {
847 for (const BasicBlock &BB : *F) {
848 for (const Instruction &I : BB) {
849 if (const MDNode *M = I.getMetadata(LLVMContext::MD_alias_scope))
850 MD.insert(M);
851 if (const MDNode *M = I.getMetadata(LLVMContext::MD_noalias))
852 MD.insert(M);
853
854 // We also need to clone the metadata in noalias intrinsics.
855 if (const auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&I))
856 MD.insert(Decl->getScopeList());
857 }
858 }
859 addRecursiveMetadataUses();
860 }
861
addRecursiveMetadataUses()862 void ScopedAliasMetadataDeepCloner::addRecursiveMetadataUses() {
863 SmallVector<const Metadata *, 16> Queue(MD.begin(), MD.end());
864 while (!Queue.empty()) {
865 const MDNode *M = cast<MDNode>(Queue.pop_back_val());
866 for (const Metadata *Op : M->operands())
867 if (const MDNode *OpMD = dyn_cast<MDNode>(Op))
868 if (MD.insert(OpMD))
869 Queue.push_back(OpMD);
870 }
871 }
872
clone()873 void ScopedAliasMetadataDeepCloner::clone() {
874 assert(MDMap.empty() && "clone() already called ?");
875
876 SmallVector<TempMDTuple, 16> DummyNodes;
877 for (const MDNode *I : MD) {
878 DummyNodes.push_back(MDTuple::getTemporary(I->getContext(), None));
879 MDMap[I].reset(DummyNodes.back().get());
880 }
881
882 // Create new metadata nodes to replace the dummy nodes, replacing old
883 // metadata references with either a dummy node or an already-created new
884 // node.
885 SmallVector<Metadata *, 4> NewOps;
886 for (const MDNode *I : MD) {
887 for (const Metadata *Op : I->operands()) {
888 if (const MDNode *M = dyn_cast<MDNode>(Op))
889 NewOps.push_back(MDMap[M]);
890 else
891 NewOps.push_back(const_cast<Metadata *>(Op));
892 }
893
894 MDNode *NewM = MDNode::get(I->getContext(), NewOps);
895 MDTuple *TempM = cast<MDTuple>(MDMap[I]);
896 assert(TempM->isTemporary() && "Expected temporary node");
897
898 TempM->replaceAllUsesWith(NewM);
899 NewOps.clear();
900 }
901 }
902
remap(Function::iterator FStart,Function::iterator FEnd)903 void ScopedAliasMetadataDeepCloner::remap(Function::iterator FStart,
904 Function::iterator FEnd) {
905 if (MDMap.empty())
906 return; // Nothing to do.
907
908 for (BasicBlock &BB : make_range(FStart, FEnd)) {
909 for (Instruction &I : BB) {
910 // TODO: The null checks for the MDMap.lookup() results should no longer
911 // be necessary.
912 if (MDNode *M = I.getMetadata(LLVMContext::MD_alias_scope))
913 if (MDNode *MNew = MDMap.lookup(M))
914 I.setMetadata(LLVMContext::MD_alias_scope, MNew);
915
916 if (MDNode *M = I.getMetadata(LLVMContext::MD_noalias))
917 if (MDNode *MNew = MDMap.lookup(M))
918 I.setMetadata(LLVMContext::MD_noalias, MNew);
919
920 if (auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&I))
921 if (MDNode *MNew = MDMap.lookup(Decl->getScopeList()))
922 Decl->setScopeList(MNew);
923 }
924 }
925 }
926
927 /// If the inlined function has noalias arguments,
928 /// then add new alias scopes for each noalias argument, tag the mapped noalias
929 /// parameters with noalias metadata specifying the new scope, and tag all
930 /// non-derived loads, stores and memory intrinsics with the new alias scopes.
AddAliasScopeMetadata(CallBase & CB,ValueToValueMapTy & VMap,const DataLayout & DL,AAResults * CalleeAAR)931 static void AddAliasScopeMetadata(CallBase &CB, ValueToValueMapTy &VMap,
932 const DataLayout &DL, AAResults *CalleeAAR) {
933 if (!EnableNoAliasConversion)
934 return;
935
936 const Function *CalledFunc = CB.getCalledFunction();
937 SmallVector<const Argument *, 4> NoAliasArgs;
938
939 for (const Argument &Arg : CalledFunc->args())
940 if (CB.paramHasAttr(Arg.getArgNo(), Attribute::NoAlias) && !Arg.use_empty())
941 NoAliasArgs.push_back(&Arg);
942
943 if (NoAliasArgs.empty())
944 return;
945
946 // To do a good job, if a noalias variable is captured, we need to know if
947 // the capture point dominates the particular use we're considering.
948 DominatorTree DT;
949 DT.recalculate(const_cast<Function&>(*CalledFunc));
950
951 // noalias indicates that pointer values based on the argument do not alias
952 // pointer values which are not based on it. So we add a new "scope" for each
953 // noalias function argument. Accesses using pointers based on that argument
954 // become part of that alias scope, accesses using pointers not based on that
955 // argument are tagged as noalias with that scope.
956
957 DenseMap<const Argument *, MDNode *> NewScopes;
958 MDBuilder MDB(CalledFunc->getContext());
959
960 // Create a new scope domain for this function.
961 MDNode *NewDomain =
962 MDB.createAnonymousAliasScopeDomain(CalledFunc->getName());
963 for (unsigned i = 0, e = NoAliasArgs.size(); i != e; ++i) {
964 const Argument *A = NoAliasArgs[i];
965
966 std::string Name = std::string(CalledFunc->getName());
967 if (A->hasName()) {
968 Name += ": %";
969 Name += A->getName();
970 } else {
971 Name += ": argument ";
972 Name += utostr(i);
973 }
974
975 // Note: We always create a new anonymous root here. This is true regardless
976 // of the linkage of the callee because the aliasing "scope" is not just a
977 // property of the callee, but also all control dependencies in the caller.
978 MDNode *NewScope = MDB.createAnonymousAliasScope(NewDomain, Name);
979 NewScopes.insert(std::make_pair(A, NewScope));
980
981 if (UseNoAliasIntrinsic) {
982 // Introduce a llvm.experimental.noalias.scope.decl for the noalias
983 // argument.
984 MDNode *AScopeList = MDNode::get(CalledFunc->getContext(), NewScope);
985 auto *NoAliasDecl =
986 IRBuilder<>(&CB).CreateNoAliasScopeDeclaration(AScopeList);
987 // Ignore the result for now. The result will be used when the
988 // llvm.noalias intrinsic is introduced.
989 (void)NoAliasDecl;
990 }
991 }
992
993 // Iterate over all new instructions in the map; for all memory-access
994 // instructions, add the alias scope metadata.
995 for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
996 VMI != VMIE; ++VMI) {
997 if (const Instruction *I = dyn_cast<Instruction>(VMI->first)) {
998 if (!VMI->second)
999 continue;
1000
1001 Instruction *NI = dyn_cast<Instruction>(VMI->second);
1002 if (!NI)
1003 continue;
1004
1005 bool IsArgMemOnlyCall = false, IsFuncCall = false;
1006 SmallVector<const Value *, 2> PtrArgs;
1007
1008 if (const LoadInst *LI = dyn_cast<LoadInst>(I))
1009 PtrArgs.push_back(LI->getPointerOperand());
1010 else if (const StoreInst *SI = dyn_cast<StoreInst>(I))
1011 PtrArgs.push_back(SI->getPointerOperand());
1012 else if (const VAArgInst *VAAI = dyn_cast<VAArgInst>(I))
1013 PtrArgs.push_back(VAAI->getPointerOperand());
1014 else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
1015 PtrArgs.push_back(CXI->getPointerOperand());
1016 else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
1017 PtrArgs.push_back(RMWI->getPointerOperand());
1018 else if (const auto *Call = dyn_cast<CallBase>(I)) {
1019 // If we know that the call does not access memory, then we'll still
1020 // know that about the inlined clone of this call site, and we don't
1021 // need to add metadata.
1022 if (Call->doesNotAccessMemory())
1023 continue;
1024
1025 IsFuncCall = true;
1026 if (CalleeAAR) {
1027 FunctionModRefBehavior MRB = CalleeAAR->getModRefBehavior(Call);
1028 if (AAResults::onlyAccessesArgPointees(MRB))
1029 IsArgMemOnlyCall = true;
1030 }
1031
1032 for (Value *Arg : Call->args()) {
1033 // We need to check the underlying objects of all arguments, not just
1034 // the pointer arguments, because we might be passing pointers as
1035 // integers, etc.
1036 // However, if we know that the call only accesses pointer arguments,
1037 // then we only need to check the pointer arguments.
1038 if (IsArgMemOnlyCall && !Arg->getType()->isPointerTy())
1039 continue;
1040
1041 PtrArgs.push_back(Arg);
1042 }
1043 }
1044
1045 // If we found no pointers, then this instruction is not suitable for
1046 // pairing with an instruction to receive aliasing metadata.
1047 // However, if this is a call, this we might just alias with none of the
1048 // noalias arguments.
1049 if (PtrArgs.empty() && !IsFuncCall)
1050 continue;
1051
1052 // It is possible that there is only one underlying object, but you
1053 // need to go through several PHIs to see it, and thus could be
1054 // repeated in the Objects list.
1055 SmallPtrSet<const Value *, 4> ObjSet;
1056 SmallVector<Metadata *, 4> Scopes, NoAliases;
1057
1058 SmallSetVector<const Argument *, 4> NAPtrArgs;
1059 for (const Value *V : PtrArgs) {
1060 SmallVector<const Value *, 4> Objects;
1061 getUnderlyingObjects(V, Objects, /* LI = */ nullptr);
1062
1063 for (const Value *O : Objects)
1064 ObjSet.insert(O);
1065 }
1066
1067 // Figure out if we're derived from anything that is not a noalias
1068 // argument.
1069 bool CanDeriveViaCapture = false, UsesAliasingPtr = false;
1070 for (const Value *V : ObjSet) {
1071 // Is this value a constant that cannot be derived from any pointer
1072 // value (we need to exclude constant expressions, for example, that
1073 // are formed from arithmetic on global symbols).
1074 bool IsNonPtrConst = isa<ConstantInt>(V) || isa<ConstantFP>(V) ||
1075 isa<ConstantPointerNull>(V) ||
1076 isa<ConstantDataVector>(V) || isa<UndefValue>(V);
1077 if (IsNonPtrConst)
1078 continue;
1079
1080 // If this is anything other than a noalias argument, then we cannot
1081 // completely describe the aliasing properties using alias.scope
1082 // metadata (and, thus, won't add any).
1083 if (const Argument *A = dyn_cast<Argument>(V)) {
1084 if (!CB.paramHasAttr(A->getArgNo(), Attribute::NoAlias))
1085 UsesAliasingPtr = true;
1086 } else {
1087 UsesAliasingPtr = true;
1088 }
1089
1090 // If this is not some identified function-local object (which cannot
1091 // directly alias a noalias argument), or some other argument (which,
1092 // by definition, also cannot alias a noalias argument), then we could
1093 // alias a noalias argument that has been captured).
1094 if (!isa<Argument>(V) &&
1095 !isIdentifiedFunctionLocal(const_cast<Value*>(V)))
1096 CanDeriveViaCapture = true;
1097 }
1098
1099 // A function call can always get captured noalias pointers (via other
1100 // parameters, globals, etc.).
1101 if (IsFuncCall && !IsArgMemOnlyCall)
1102 CanDeriveViaCapture = true;
1103
1104 // First, we want to figure out all of the sets with which we definitely
1105 // don't alias. Iterate over all noalias set, and add those for which:
1106 // 1. The noalias argument is not in the set of objects from which we
1107 // definitely derive.
1108 // 2. The noalias argument has not yet been captured.
1109 // An arbitrary function that might load pointers could see captured
1110 // noalias arguments via other noalias arguments or globals, and so we
1111 // must always check for prior capture.
1112 for (const Argument *A : NoAliasArgs) {
1113 if (!ObjSet.count(A) && (!CanDeriveViaCapture ||
1114 // It might be tempting to skip the
1115 // PointerMayBeCapturedBefore check if
1116 // A->hasNoCaptureAttr() is true, but this is
1117 // incorrect because nocapture only guarantees
1118 // that no copies outlive the function, not
1119 // that the value cannot be locally captured.
1120 !PointerMayBeCapturedBefore(A,
1121 /* ReturnCaptures */ false,
1122 /* StoreCaptures */ false, I, &DT)))
1123 NoAliases.push_back(NewScopes[A]);
1124 }
1125
1126 if (!NoAliases.empty())
1127 NI->setMetadata(LLVMContext::MD_noalias,
1128 MDNode::concatenate(
1129 NI->getMetadata(LLVMContext::MD_noalias),
1130 MDNode::get(CalledFunc->getContext(), NoAliases)));
1131
1132 // Next, we want to figure out all of the sets to which we might belong.
1133 // We might belong to a set if the noalias argument is in the set of
1134 // underlying objects. If there is some non-noalias argument in our list
1135 // of underlying objects, then we cannot add a scope because the fact
1136 // that some access does not alias with any set of our noalias arguments
1137 // cannot itself guarantee that it does not alias with this access
1138 // (because there is some pointer of unknown origin involved and the
1139 // other access might also depend on this pointer). We also cannot add
1140 // scopes to arbitrary functions unless we know they don't access any
1141 // non-parameter pointer-values.
1142 bool CanAddScopes = !UsesAliasingPtr;
1143 if (CanAddScopes && IsFuncCall)
1144 CanAddScopes = IsArgMemOnlyCall;
1145
1146 if (CanAddScopes)
1147 for (const Argument *A : NoAliasArgs) {
1148 if (ObjSet.count(A))
1149 Scopes.push_back(NewScopes[A]);
1150 }
1151
1152 if (!Scopes.empty())
1153 NI->setMetadata(
1154 LLVMContext::MD_alias_scope,
1155 MDNode::concatenate(NI->getMetadata(LLVMContext::MD_alias_scope),
1156 MDNode::get(CalledFunc->getContext(), Scopes)));
1157 }
1158 }
1159 }
1160
MayContainThrowingOrExitingCall(Instruction * Begin,Instruction * End)1161 static bool MayContainThrowingOrExitingCall(Instruction *Begin,
1162 Instruction *End) {
1163
1164 assert(Begin->getParent() == End->getParent() &&
1165 "Expected to be in same basic block!");
1166 unsigned NumInstChecked = 0;
1167 // Check that all instructions in the range [Begin, End) are guaranteed to
1168 // transfer execution to successor.
1169 for (auto &I : make_range(Begin->getIterator(), End->getIterator()))
1170 if (NumInstChecked++ > InlinerAttributeWindow ||
1171 !isGuaranteedToTransferExecutionToSuccessor(&I))
1172 return true;
1173 return false;
1174 }
1175
IdentifyValidAttributes(CallBase & CB)1176 static AttrBuilder IdentifyValidAttributes(CallBase &CB) {
1177
1178 AttrBuilder AB(CB.getAttributes(), AttributeList::ReturnIndex);
1179 if (AB.empty())
1180 return AB;
1181 AttrBuilder Valid;
1182 // Only allow these white listed attributes to be propagated back to the
1183 // callee. This is because other attributes may only be valid on the call
1184 // itself, i.e. attributes such as signext and zeroext.
1185 if (auto DerefBytes = AB.getDereferenceableBytes())
1186 Valid.addDereferenceableAttr(DerefBytes);
1187 if (auto DerefOrNullBytes = AB.getDereferenceableOrNullBytes())
1188 Valid.addDereferenceableOrNullAttr(DerefOrNullBytes);
1189 if (AB.contains(Attribute::NoAlias))
1190 Valid.addAttribute(Attribute::NoAlias);
1191 if (AB.contains(Attribute::NonNull))
1192 Valid.addAttribute(Attribute::NonNull);
1193 return Valid;
1194 }
1195
AddReturnAttributes(CallBase & CB,ValueToValueMapTy & VMap)1196 static void AddReturnAttributes(CallBase &CB, ValueToValueMapTy &VMap) {
1197 if (!UpdateReturnAttributes)
1198 return;
1199
1200 AttrBuilder Valid = IdentifyValidAttributes(CB);
1201 if (Valid.empty())
1202 return;
1203 auto *CalledFunction = CB.getCalledFunction();
1204 auto &Context = CalledFunction->getContext();
1205
1206 for (auto &BB : *CalledFunction) {
1207 auto *RI = dyn_cast<ReturnInst>(BB.getTerminator());
1208 if (!RI || !isa<CallBase>(RI->getOperand(0)))
1209 continue;
1210 auto *RetVal = cast<CallBase>(RI->getOperand(0));
1211 // Sanity check that the cloned RetVal exists and is a call, otherwise we
1212 // cannot add the attributes on the cloned RetVal.
1213 // Simplification during inlining could have transformed the cloned
1214 // instruction.
1215 auto *NewRetVal = dyn_cast_or_null<CallBase>(VMap.lookup(RetVal));
1216 if (!NewRetVal)
1217 continue;
1218 // Backward propagation of attributes to the returned value may be incorrect
1219 // if it is control flow dependent.
1220 // Consider:
1221 // @callee {
1222 // %rv = call @foo()
1223 // %rv2 = call @bar()
1224 // if (%rv2 != null)
1225 // return %rv2
1226 // if (%rv == null)
1227 // exit()
1228 // return %rv
1229 // }
1230 // caller() {
1231 // %val = call nonnull @callee()
1232 // }
1233 // Here we cannot add the nonnull attribute on either foo or bar. So, we
1234 // limit the check to both RetVal and RI are in the same basic block and
1235 // there are no throwing/exiting instructions between these instructions.
1236 if (RI->getParent() != RetVal->getParent() ||
1237 MayContainThrowingOrExitingCall(RetVal, RI))
1238 continue;
1239 // Add to the existing attributes of NewRetVal, i.e. the cloned call
1240 // instruction.
1241 // NB! When we have the same attribute already existing on NewRetVal, but
1242 // with a differing value, the AttributeList's merge API honours the already
1243 // existing attribute value (i.e. attributes such as dereferenceable,
1244 // dereferenceable_or_null etc). See AttrBuilder::merge for more details.
1245 AttributeList AL = NewRetVal->getAttributes();
1246 AttributeList NewAL =
1247 AL.addAttributes(Context, AttributeList::ReturnIndex, Valid);
1248 NewRetVal->setAttributes(NewAL);
1249 }
1250 }
1251
1252 /// If the inlined function has non-byval align arguments, then
1253 /// add @llvm.assume-based alignment assumptions to preserve this information.
AddAlignmentAssumptions(CallBase & CB,InlineFunctionInfo & IFI)1254 static void AddAlignmentAssumptions(CallBase &CB, InlineFunctionInfo &IFI) {
1255 if (!PreserveAlignmentAssumptions || !IFI.GetAssumptionCache)
1256 return;
1257
1258 AssumptionCache *AC = &IFI.GetAssumptionCache(*CB.getCaller());
1259 auto &DL = CB.getCaller()->getParent()->getDataLayout();
1260
1261 // To avoid inserting redundant assumptions, we should check for assumptions
1262 // already in the caller. To do this, we might need a DT of the caller.
1263 DominatorTree DT;
1264 bool DTCalculated = false;
1265
1266 Function *CalledFunc = CB.getCalledFunction();
1267 for (Argument &Arg : CalledFunc->args()) {
1268 unsigned Align = Arg.getType()->isPointerTy() ? Arg.getParamAlignment() : 0;
1269 if (Align && !Arg.hasPassPointeeByValueCopyAttr() && !Arg.hasNUses(0)) {
1270 if (!DTCalculated) {
1271 DT.recalculate(*CB.getCaller());
1272 DTCalculated = true;
1273 }
1274
1275 // If we can already prove the asserted alignment in the context of the
1276 // caller, then don't bother inserting the assumption.
1277 Value *ArgVal = CB.getArgOperand(Arg.getArgNo());
1278 if (getKnownAlignment(ArgVal, DL, &CB, AC, &DT) >= Align)
1279 continue;
1280
1281 CallInst *NewAsmp =
1282 IRBuilder<>(&CB).CreateAlignmentAssumption(DL, ArgVal, Align);
1283 AC->registerAssumption(NewAsmp);
1284 }
1285 }
1286 }
1287
1288 /// Once we have cloned code over from a callee into the caller,
1289 /// update the specified callgraph to reflect the changes we made.
1290 /// Note that it's possible that not all code was copied over, so only
1291 /// some edges of the callgraph may remain.
UpdateCallGraphAfterInlining(CallBase & CB,Function::iterator FirstNewBlock,ValueToValueMapTy & VMap,InlineFunctionInfo & IFI)1292 static void UpdateCallGraphAfterInlining(CallBase &CB,
1293 Function::iterator FirstNewBlock,
1294 ValueToValueMapTy &VMap,
1295 InlineFunctionInfo &IFI) {
1296 CallGraph &CG = *IFI.CG;
1297 const Function *Caller = CB.getCaller();
1298 const Function *Callee = CB.getCalledFunction();
1299 CallGraphNode *CalleeNode = CG[Callee];
1300 CallGraphNode *CallerNode = CG[Caller];
1301
1302 // Since we inlined some uninlined call sites in the callee into the caller,
1303 // add edges from the caller to all of the callees of the callee.
1304 CallGraphNode::iterator I = CalleeNode->begin(), E = CalleeNode->end();
1305
1306 // Consider the case where CalleeNode == CallerNode.
1307 CallGraphNode::CalledFunctionsVector CallCache;
1308 if (CalleeNode == CallerNode) {
1309 CallCache.assign(I, E);
1310 I = CallCache.begin();
1311 E = CallCache.end();
1312 }
1313
1314 for (; I != E; ++I) {
1315 // Skip 'refererence' call records.
1316 if (!I->first)
1317 continue;
1318
1319 const Value *OrigCall = *I->first;
1320
1321 ValueToValueMapTy::iterator VMI = VMap.find(OrigCall);
1322 // Only copy the edge if the call was inlined!
1323 if (VMI == VMap.end() || VMI->second == nullptr)
1324 continue;
1325
1326 // If the call was inlined, but then constant folded, there is no edge to
1327 // add. Check for this case.
1328 auto *NewCall = dyn_cast<CallBase>(VMI->second);
1329 if (!NewCall)
1330 continue;
1331
1332 // We do not treat intrinsic calls like real function calls because we
1333 // expect them to become inline code; do not add an edge for an intrinsic.
1334 if (NewCall->getCalledFunction() &&
1335 NewCall->getCalledFunction()->isIntrinsic())
1336 continue;
1337
1338 // Remember that this call site got inlined for the client of
1339 // InlineFunction.
1340 IFI.InlinedCalls.push_back(NewCall);
1341
1342 // It's possible that inlining the callsite will cause it to go from an
1343 // indirect to a direct call by resolving a function pointer. If this
1344 // happens, set the callee of the new call site to a more precise
1345 // destination. This can also happen if the call graph node of the caller
1346 // was just unnecessarily imprecise.
1347 if (!I->second->getFunction())
1348 if (Function *F = NewCall->getCalledFunction()) {
1349 // Indirect call site resolved to direct call.
1350 CallerNode->addCalledFunction(NewCall, CG[F]);
1351
1352 continue;
1353 }
1354
1355 CallerNode->addCalledFunction(NewCall, I->second);
1356 }
1357
1358 // Update the call graph by deleting the edge from Callee to Caller. We must
1359 // do this after the loop above in case Caller and Callee are the same.
1360 CallerNode->removeCallEdgeFor(*cast<CallBase>(&CB));
1361 }
1362
HandleByValArgumentInit(Value * Dst,Value * Src,Module * M,BasicBlock * InsertBlock,InlineFunctionInfo & IFI)1363 static void HandleByValArgumentInit(Value *Dst, Value *Src, Module *M,
1364 BasicBlock *InsertBlock,
1365 InlineFunctionInfo &IFI) {
1366 Type *AggTy = cast<PointerType>(Src->getType())->getElementType();
1367 IRBuilder<> Builder(InsertBlock, InsertBlock->begin());
1368
1369 Value *Size = Builder.getInt64(M->getDataLayout().getTypeStoreSize(AggTy));
1370
1371 // Always generate a memcpy of alignment 1 here because we don't know
1372 // the alignment of the src pointer. Other optimizations can infer
1373 // better alignment.
1374 Builder.CreateMemCpy(Dst, /*DstAlign*/ Align(1), Src,
1375 /*SrcAlign*/ Align(1), Size);
1376 }
1377
1378 /// When inlining a call site that has a byval argument,
1379 /// we have to make the implicit memcpy explicit by adding it.
HandleByValArgument(Value * Arg,Instruction * TheCall,const Function * CalledFunc,InlineFunctionInfo & IFI,unsigned ByValAlignment)1380 static Value *HandleByValArgument(Value *Arg, Instruction *TheCall,
1381 const Function *CalledFunc,
1382 InlineFunctionInfo &IFI,
1383 unsigned ByValAlignment) {
1384 PointerType *ArgTy = cast<PointerType>(Arg->getType());
1385 Type *AggTy = ArgTy->getElementType();
1386
1387 Function *Caller = TheCall->getFunction();
1388 const DataLayout &DL = Caller->getParent()->getDataLayout();
1389
1390 // If the called function is readonly, then it could not mutate the caller's
1391 // copy of the byval'd memory. In this case, it is safe to elide the copy and
1392 // temporary.
1393 if (CalledFunc->onlyReadsMemory()) {
1394 // If the byval argument has a specified alignment that is greater than the
1395 // passed in pointer, then we either have to round up the input pointer or
1396 // give up on this transformation.
1397 if (ByValAlignment <= 1) // 0 = unspecified, 1 = no particular alignment.
1398 return Arg;
1399
1400 AssumptionCache *AC =
1401 IFI.GetAssumptionCache ? &IFI.GetAssumptionCache(*Caller) : nullptr;
1402
1403 // If the pointer is already known to be sufficiently aligned, or if we can
1404 // round it up to a larger alignment, then we don't need a temporary.
1405 if (getOrEnforceKnownAlignment(Arg, Align(ByValAlignment), DL, TheCall,
1406 AC) >= ByValAlignment)
1407 return Arg;
1408
1409 // Otherwise, we have to make a memcpy to get a safe alignment. This is bad
1410 // for code quality, but rarely happens and is required for correctness.
1411 }
1412
1413 // Create the alloca. If we have DataLayout, use nice alignment.
1414 Align Alignment(DL.getPrefTypeAlignment(AggTy));
1415
1416 // If the byval had an alignment specified, we *must* use at least that
1417 // alignment, as it is required by the byval argument (and uses of the
1418 // pointer inside the callee).
1419 Alignment = max(Alignment, MaybeAlign(ByValAlignment));
1420
1421 Value *NewAlloca =
1422 new AllocaInst(AggTy, DL.getAllocaAddrSpace(), nullptr, Alignment,
1423 Arg->getName(), &*Caller->begin()->begin());
1424 IFI.StaticAllocas.push_back(cast<AllocaInst>(NewAlloca));
1425
1426 // Uses of the argument in the function should use our new alloca
1427 // instead.
1428 return NewAlloca;
1429 }
1430
1431 // Check whether this Value is used by a lifetime intrinsic.
isUsedByLifetimeMarker(Value * V)1432 static bool isUsedByLifetimeMarker(Value *V) {
1433 for (User *U : V->users())
1434 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U))
1435 if (II->isLifetimeStartOrEnd())
1436 return true;
1437 return false;
1438 }
1439
1440 // Check whether the given alloca already has
1441 // lifetime.start or lifetime.end intrinsics.
hasLifetimeMarkers(AllocaInst * AI)1442 static bool hasLifetimeMarkers(AllocaInst *AI) {
1443 Type *Ty = AI->getType();
1444 Type *Int8PtrTy = Type::getInt8PtrTy(Ty->getContext(),
1445 Ty->getPointerAddressSpace());
1446 if (Ty == Int8PtrTy)
1447 return isUsedByLifetimeMarker(AI);
1448
1449 // Do a scan to find all the casts to i8*.
1450 for (User *U : AI->users()) {
1451 if (U->getType() != Int8PtrTy) continue;
1452 if (U->stripPointerCasts() != AI) continue;
1453 if (isUsedByLifetimeMarker(U))
1454 return true;
1455 }
1456 return false;
1457 }
1458
1459 /// Return the result of AI->isStaticAlloca() if AI were moved to the entry
1460 /// block. Allocas used in inalloca calls and allocas of dynamic array size
1461 /// cannot be static.
allocaWouldBeStaticInEntry(const AllocaInst * AI)1462 static bool allocaWouldBeStaticInEntry(const AllocaInst *AI ) {
1463 return isa<Constant>(AI->getArraySize()) && !AI->isUsedWithInAlloca();
1464 }
1465
1466 /// Returns a DebugLoc for a new DILocation which is a clone of \p OrigDL
1467 /// inlined at \p InlinedAt. \p IANodes is an inlined-at cache.
inlineDebugLoc(DebugLoc OrigDL,DILocation * InlinedAt,LLVMContext & Ctx,DenseMap<const MDNode *,MDNode * > & IANodes)1468 static DebugLoc inlineDebugLoc(DebugLoc OrigDL, DILocation *InlinedAt,
1469 LLVMContext &Ctx,
1470 DenseMap<const MDNode *, MDNode *> &IANodes) {
1471 auto IA = DebugLoc::appendInlinedAt(OrigDL, InlinedAt, Ctx, IANodes);
1472 return DILocation::get(Ctx, OrigDL.getLine(), OrigDL.getCol(),
1473 OrigDL.getScope(), IA);
1474 }
1475
1476 /// Update inlined instructions' line numbers to
1477 /// to encode location where these instructions are inlined.
fixupLineNumbers(Function * Fn,Function::iterator FI,Instruction * TheCall,bool CalleeHasDebugInfo)1478 static void fixupLineNumbers(Function *Fn, Function::iterator FI,
1479 Instruction *TheCall, bool CalleeHasDebugInfo) {
1480 const DebugLoc &TheCallDL = TheCall->getDebugLoc();
1481 if (!TheCallDL)
1482 return;
1483
1484 auto &Ctx = Fn->getContext();
1485 DILocation *InlinedAtNode = TheCallDL;
1486
1487 // Create a unique call site, not to be confused with any other call from the
1488 // same location.
1489 InlinedAtNode = DILocation::getDistinct(
1490 Ctx, InlinedAtNode->getLine(), InlinedAtNode->getColumn(),
1491 InlinedAtNode->getScope(), InlinedAtNode->getInlinedAt());
1492
1493 // Cache the inlined-at nodes as they're built so they are reused, without
1494 // this every instruction's inlined-at chain would become distinct from each
1495 // other.
1496 DenseMap<const MDNode *, MDNode *> IANodes;
1497
1498 // Check if we are not generating inline line tables and want to use
1499 // the call site location instead.
1500 bool NoInlineLineTables = Fn->hasFnAttribute("no-inline-line-tables");
1501
1502 for (; FI != Fn->end(); ++FI) {
1503 for (BasicBlock::iterator BI = FI->begin(), BE = FI->end();
1504 BI != BE; ++BI) {
1505 // Loop metadata needs to be updated so that the start and end locs
1506 // reference inlined-at locations.
1507 auto updateLoopInfoLoc = [&Ctx, &InlinedAtNode, &IANodes](
1508 const DILocation &Loc) -> DILocation * {
1509 return inlineDebugLoc(&Loc, InlinedAtNode, Ctx, IANodes).get();
1510 };
1511 updateLoopMetadataDebugLocations(*BI, updateLoopInfoLoc);
1512
1513 if (!NoInlineLineTables)
1514 if (DebugLoc DL = BI->getDebugLoc()) {
1515 DebugLoc IDL =
1516 inlineDebugLoc(DL, InlinedAtNode, BI->getContext(), IANodes);
1517 BI->setDebugLoc(IDL);
1518 continue;
1519 }
1520
1521 if (CalleeHasDebugInfo && !NoInlineLineTables)
1522 continue;
1523
1524 // If the inlined instruction has no line number, or if inline info
1525 // is not being generated, make it look as if it originates from the call
1526 // location. This is important for ((__always_inline, __nodebug__))
1527 // functions which must use caller location for all instructions in their
1528 // function body.
1529
1530 // Don't update static allocas, as they may get moved later.
1531 if (auto *AI = dyn_cast<AllocaInst>(BI))
1532 if (allocaWouldBeStaticInEntry(AI))
1533 continue;
1534
1535 BI->setDebugLoc(TheCallDL);
1536 }
1537
1538 // Remove debug info intrinsics if we're not keeping inline info.
1539 if (NoInlineLineTables) {
1540 BasicBlock::iterator BI = FI->begin();
1541 while (BI != FI->end()) {
1542 if (isa<DbgInfoIntrinsic>(BI)) {
1543 BI = BI->eraseFromParent();
1544 continue;
1545 }
1546 ++BI;
1547 }
1548 }
1549
1550 }
1551 }
1552
1553 /// Update the block frequencies of the caller after a callee has been inlined.
1554 ///
1555 /// Each block cloned into the caller has its block frequency scaled by the
1556 /// ratio of CallSiteFreq/CalleeEntryFreq. This ensures that the cloned copy of
1557 /// callee's entry block gets the same frequency as the callsite block and the
1558 /// relative frequencies of all cloned blocks remain the same after cloning.
updateCallerBFI(BasicBlock * CallSiteBlock,const ValueToValueMapTy & VMap,BlockFrequencyInfo * CallerBFI,BlockFrequencyInfo * CalleeBFI,const BasicBlock & CalleeEntryBlock)1559 static void updateCallerBFI(BasicBlock *CallSiteBlock,
1560 const ValueToValueMapTy &VMap,
1561 BlockFrequencyInfo *CallerBFI,
1562 BlockFrequencyInfo *CalleeBFI,
1563 const BasicBlock &CalleeEntryBlock) {
1564 SmallPtrSet<BasicBlock *, 16> ClonedBBs;
1565 for (auto Entry : VMap) {
1566 if (!isa<BasicBlock>(Entry.first) || !Entry.second)
1567 continue;
1568 auto *OrigBB = cast<BasicBlock>(Entry.first);
1569 auto *ClonedBB = cast<BasicBlock>(Entry.second);
1570 uint64_t Freq = CalleeBFI->getBlockFreq(OrigBB).getFrequency();
1571 if (!ClonedBBs.insert(ClonedBB).second) {
1572 // Multiple blocks in the callee might get mapped to one cloned block in
1573 // the caller since we prune the callee as we clone it. When that happens,
1574 // we want to use the maximum among the original blocks' frequencies.
1575 uint64_t NewFreq = CallerBFI->getBlockFreq(ClonedBB).getFrequency();
1576 if (NewFreq > Freq)
1577 Freq = NewFreq;
1578 }
1579 CallerBFI->setBlockFreq(ClonedBB, Freq);
1580 }
1581 BasicBlock *EntryClone = cast<BasicBlock>(VMap.lookup(&CalleeEntryBlock));
1582 CallerBFI->setBlockFreqAndScale(
1583 EntryClone, CallerBFI->getBlockFreq(CallSiteBlock).getFrequency(),
1584 ClonedBBs);
1585 }
1586
1587 /// Update the branch metadata for cloned call instructions.
updateCallProfile(Function * Callee,const ValueToValueMapTy & VMap,const ProfileCount & CalleeEntryCount,const CallBase & TheCall,ProfileSummaryInfo * PSI,BlockFrequencyInfo * CallerBFI)1588 static void updateCallProfile(Function *Callee, const ValueToValueMapTy &VMap,
1589 const ProfileCount &CalleeEntryCount,
1590 const CallBase &TheCall, ProfileSummaryInfo *PSI,
1591 BlockFrequencyInfo *CallerBFI) {
1592 if (!CalleeEntryCount.hasValue() || CalleeEntryCount.isSynthetic() ||
1593 CalleeEntryCount.getCount() < 1)
1594 return;
1595 auto CallSiteCount = PSI ? PSI->getProfileCount(TheCall, CallerBFI) : None;
1596 int64_t CallCount =
1597 std::min(CallSiteCount.getValueOr(0), CalleeEntryCount.getCount());
1598 updateProfileCallee(Callee, -CallCount, &VMap);
1599 }
1600
updateProfileCallee(Function * Callee,int64_t entryDelta,const ValueMap<const Value *,WeakTrackingVH> * VMap)1601 void llvm::updateProfileCallee(
1602 Function *Callee, int64_t entryDelta,
1603 const ValueMap<const Value *, WeakTrackingVH> *VMap) {
1604 auto CalleeCount = Callee->getEntryCount();
1605 if (!CalleeCount.hasValue())
1606 return;
1607
1608 uint64_t priorEntryCount = CalleeCount.getCount();
1609 uint64_t newEntryCount;
1610
1611 // Since CallSiteCount is an estimate, it could exceed the original callee
1612 // count and has to be set to 0 so guard against underflow.
1613 if (entryDelta < 0 && static_cast<uint64_t>(-entryDelta) > priorEntryCount)
1614 newEntryCount = 0;
1615 else
1616 newEntryCount = priorEntryCount + entryDelta;
1617
1618 // During inlining ?
1619 if (VMap) {
1620 uint64_t cloneEntryCount = priorEntryCount - newEntryCount;
1621 for (auto Entry : *VMap)
1622 if (isa<CallInst>(Entry.first))
1623 if (auto *CI = dyn_cast_or_null<CallInst>(Entry.second))
1624 CI->updateProfWeight(cloneEntryCount, priorEntryCount);
1625 }
1626
1627 if (entryDelta) {
1628 Callee->setEntryCount(newEntryCount);
1629
1630 for (BasicBlock &BB : *Callee)
1631 // No need to update the callsite if it is pruned during inlining.
1632 if (!VMap || VMap->count(&BB))
1633 for (Instruction &I : BB)
1634 if (CallInst *CI = dyn_cast<CallInst>(&I))
1635 CI->updateProfWeight(newEntryCount, priorEntryCount);
1636 }
1637 }
1638
1639 /// This function inlines the called function into the basic block of the
1640 /// caller. This returns false if it is not possible to inline this call.
1641 /// The program is still in a well defined state if this occurs though.
1642 ///
1643 /// Note that this only does one level of inlining. For example, if the
1644 /// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
1645 /// exists in the instruction stream. Similarly this will inline a recursive
1646 /// function by one level.
InlineFunction(CallBase & CB,InlineFunctionInfo & IFI,AAResults * CalleeAAR,bool InsertLifetime,Function * ForwardVarArgsTo)1647 llvm::InlineResult llvm::InlineFunction(CallBase &CB, InlineFunctionInfo &IFI,
1648 AAResults *CalleeAAR,
1649 bool InsertLifetime,
1650 Function *ForwardVarArgsTo) {
1651 assert(CB.getParent() && CB.getFunction() && "Instruction not in function!");
1652
1653 // FIXME: we don't inline callbr yet.
1654 if (isa<CallBrInst>(CB))
1655 return InlineResult::failure("We don't inline callbr yet.");
1656
1657 // If IFI has any state in it, zap it before we fill it in.
1658 IFI.reset();
1659
1660 Function *CalledFunc = CB.getCalledFunction();
1661 if (!CalledFunc || // Can't inline external function or indirect
1662 CalledFunc->isDeclaration()) // call!
1663 return InlineResult::failure("external or indirect");
1664
1665 // The inliner does not know how to inline through calls with operand bundles
1666 // in general ...
1667 if (CB.hasOperandBundles()) {
1668 for (int i = 0, e = CB.getNumOperandBundles(); i != e; ++i) {
1669 uint32_t Tag = CB.getOperandBundleAt(i).getTagID();
1670 // ... but it knows how to inline through "deopt" operand bundles ...
1671 if (Tag == LLVMContext::OB_deopt)
1672 continue;
1673 // ... and "funclet" operand bundles.
1674 if (Tag == LLVMContext::OB_funclet)
1675 continue;
1676
1677 return InlineResult::failure("unsupported operand bundle");
1678 }
1679 }
1680
1681 // If the call to the callee cannot throw, set the 'nounwind' flag on any
1682 // calls that we inline.
1683 bool MarkNoUnwind = CB.doesNotThrow();
1684
1685 BasicBlock *OrigBB = CB.getParent();
1686 Function *Caller = OrigBB->getParent();
1687
1688 // GC poses two hazards to inlining, which only occur when the callee has GC:
1689 // 1. If the caller has no GC, then the callee's GC must be propagated to the
1690 // caller.
1691 // 2. If the caller has a differing GC, it is invalid to inline.
1692 if (CalledFunc->hasGC()) {
1693 if (!Caller->hasGC())
1694 Caller->setGC(CalledFunc->getGC());
1695 else if (CalledFunc->getGC() != Caller->getGC())
1696 return InlineResult::failure("incompatible GC");
1697 }
1698
1699 // Get the personality function from the callee if it contains a landing pad.
1700 Constant *CalledPersonality =
1701 CalledFunc->hasPersonalityFn()
1702 ? CalledFunc->getPersonalityFn()->stripPointerCasts()
1703 : nullptr;
1704
1705 // Find the personality function used by the landing pads of the caller. If it
1706 // exists, then check to see that it matches the personality function used in
1707 // the callee.
1708 Constant *CallerPersonality =
1709 Caller->hasPersonalityFn()
1710 ? Caller->getPersonalityFn()->stripPointerCasts()
1711 : nullptr;
1712 if (CalledPersonality) {
1713 if (!CallerPersonality)
1714 Caller->setPersonalityFn(CalledPersonality);
1715 // If the personality functions match, then we can perform the
1716 // inlining. Otherwise, we can't inline.
1717 // TODO: This isn't 100% true. Some personality functions are proper
1718 // supersets of others and can be used in place of the other.
1719 else if (CalledPersonality != CallerPersonality)
1720 return InlineResult::failure("incompatible personality");
1721 }
1722
1723 // We need to figure out which funclet the callsite was in so that we may
1724 // properly nest the callee.
1725 Instruction *CallSiteEHPad = nullptr;
1726 if (CallerPersonality) {
1727 EHPersonality Personality = classifyEHPersonality(CallerPersonality);
1728 if (isScopedEHPersonality(Personality)) {
1729 Optional<OperandBundleUse> ParentFunclet =
1730 CB.getOperandBundle(LLVMContext::OB_funclet);
1731 if (ParentFunclet)
1732 CallSiteEHPad = cast<FuncletPadInst>(ParentFunclet->Inputs.front());
1733
1734 // OK, the inlining site is legal. What about the target function?
1735
1736 if (CallSiteEHPad) {
1737 if (Personality == EHPersonality::MSVC_CXX) {
1738 // The MSVC personality cannot tolerate catches getting inlined into
1739 // cleanup funclets.
1740 if (isa<CleanupPadInst>(CallSiteEHPad)) {
1741 // Ok, the call site is within a cleanuppad. Let's check the callee
1742 // for catchpads.
1743 for (const BasicBlock &CalledBB : *CalledFunc) {
1744 if (isa<CatchSwitchInst>(CalledBB.getFirstNonPHI()))
1745 return InlineResult::failure("catch in cleanup funclet");
1746 }
1747 }
1748 } else if (isAsynchronousEHPersonality(Personality)) {
1749 // SEH is even less tolerant, there may not be any sort of exceptional
1750 // funclet in the callee.
1751 for (const BasicBlock &CalledBB : *CalledFunc) {
1752 if (CalledBB.isEHPad())
1753 return InlineResult::failure("SEH in cleanup funclet");
1754 }
1755 }
1756 }
1757 }
1758 }
1759
1760 // Determine if we are dealing with a call in an EHPad which does not unwind
1761 // to caller.
1762 bool EHPadForCallUnwindsLocally = false;
1763 if (CallSiteEHPad && isa<CallInst>(CB)) {
1764 UnwindDestMemoTy FuncletUnwindMap;
1765 Value *CallSiteUnwindDestToken =
1766 getUnwindDestToken(CallSiteEHPad, FuncletUnwindMap);
1767
1768 EHPadForCallUnwindsLocally =
1769 CallSiteUnwindDestToken &&
1770 !isa<ConstantTokenNone>(CallSiteUnwindDestToken);
1771 }
1772
1773 // Get an iterator to the last basic block in the function, which will have
1774 // the new function inlined after it.
1775 Function::iterator LastBlock = --Caller->end();
1776
1777 // Make sure to capture all of the return instructions from the cloned
1778 // function.
1779 SmallVector<ReturnInst*, 8> Returns;
1780 ClonedCodeInfo InlinedFunctionInfo;
1781 Function::iterator FirstNewBlock;
1782
1783 { // Scope to destroy VMap after cloning.
1784 ValueToValueMapTy VMap;
1785 // Keep a list of pair (dst, src) to emit byval initializations.
1786 SmallVector<std::pair<Value*, Value*>, 4> ByValInit;
1787
1788 // When inlining a function that contains noalias scope metadata,
1789 // this metadata needs to be cloned so that the inlined blocks
1790 // have different "unique scopes" at every call site.
1791 // Track the metadata that must be cloned. Do this before other changes to
1792 // the function, so that we do not get in trouble when inlining caller ==
1793 // callee.
1794 ScopedAliasMetadataDeepCloner SAMetadataCloner(CB.getCalledFunction());
1795
1796 auto &DL = Caller->getParent()->getDataLayout();
1797
1798 // Calculate the vector of arguments to pass into the function cloner, which
1799 // matches up the formal to the actual argument values.
1800 auto AI = CB.arg_begin();
1801 unsigned ArgNo = 0;
1802 for (Function::arg_iterator I = CalledFunc->arg_begin(),
1803 E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) {
1804 Value *ActualArg = *AI;
1805
1806 // When byval arguments actually inlined, we need to make the copy implied
1807 // by them explicit. However, we don't do this if the callee is readonly
1808 // or readnone, because the copy would be unneeded: the callee doesn't
1809 // modify the struct.
1810 if (CB.isByValArgument(ArgNo)) {
1811 ActualArg = HandleByValArgument(ActualArg, &CB, CalledFunc, IFI,
1812 CalledFunc->getParamAlignment(ArgNo));
1813 if (ActualArg != *AI)
1814 ByValInit.push_back(std::make_pair(ActualArg, (Value*) *AI));
1815 }
1816
1817 VMap[&*I] = ActualArg;
1818 }
1819
1820 // TODO: Remove this when users have been updated to the assume bundles.
1821 // Add alignment assumptions if necessary. We do this before the inlined
1822 // instructions are actually cloned into the caller so that we can easily
1823 // check what will be known at the start of the inlined code.
1824 AddAlignmentAssumptions(CB, IFI);
1825
1826 AssumptionCache *AC =
1827 IFI.GetAssumptionCache ? &IFI.GetAssumptionCache(*Caller) : nullptr;
1828
1829 /// Preserve all attributes on of the call and its parameters.
1830 salvageKnowledge(&CB, AC);
1831
1832 // We want the inliner to prune the code as it copies. We would LOVE to
1833 // have no dead or constant instructions leftover after inlining occurs
1834 // (which can happen, e.g., because an argument was constant), but we'll be
1835 // happy with whatever the cloner can do.
1836 CloneAndPruneFunctionInto(Caller, CalledFunc, VMap,
1837 /*ModuleLevelChanges=*/false, Returns, ".i",
1838 &InlinedFunctionInfo, &CB);
1839 // Remember the first block that is newly cloned over.
1840 FirstNewBlock = LastBlock; ++FirstNewBlock;
1841
1842 if (IFI.CallerBFI != nullptr && IFI.CalleeBFI != nullptr)
1843 // Update the BFI of blocks cloned into the caller.
1844 updateCallerBFI(OrigBB, VMap, IFI.CallerBFI, IFI.CalleeBFI,
1845 CalledFunc->front());
1846
1847 updateCallProfile(CalledFunc, VMap, CalledFunc->getEntryCount(), CB,
1848 IFI.PSI, IFI.CallerBFI);
1849
1850 // Inject byval arguments initialization.
1851 for (std::pair<Value*, Value*> &Init : ByValInit)
1852 HandleByValArgumentInit(Init.first, Init.second, Caller->getParent(),
1853 &*FirstNewBlock, IFI);
1854
1855 Optional<OperandBundleUse> ParentDeopt =
1856 CB.getOperandBundle(LLVMContext::OB_deopt);
1857 if (ParentDeopt) {
1858 SmallVector<OperandBundleDef, 2> OpDefs;
1859
1860 for (auto &VH : InlinedFunctionInfo.OperandBundleCallSites) {
1861 CallBase *ICS = dyn_cast_or_null<CallBase>(VH);
1862 if (!ICS)
1863 continue; // instruction was DCE'd or RAUW'ed to undef
1864
1865 OpDefs.clear();
1866
1867 OpDefs.reserve(ICS->getNumOperandBundles());
1868
1869 for (unsigned COBi = 0, COBe = ICS->getNumOperandBundles(); COBi < COBe;
1870 ++COBi) {
1871 auto ChildOB = ICS->getOperandBundleAt(COBi);
1872 if (ChildOB.getTagID() != LLVMContext::OB_deopt) {
1873 // If the inlined call has other operand bundles, let them be
1874 OpDefs.emplace_back(ChildOB);
1875 continue;
1876 }
1877
1878 // It may be useful to separate this logic (of handling operand
1879 // bundles) out to a separate "policy" component if this gets crowded.
1880 // Prepend the parent's deoptimization continuation to the newly
1881 // inlined call's deoptimization continuation.
1882 std::vector<Value *> MergedDeoptArgs;
1883 MergedDeoptArgs.reserve(ParentDeopt->Inputs.size() +
1884 ChildOB.Inputs.size());
1885
1886 llvm::append_range(MergedDeoptArgs, ParentDeopt->Inputs);
1887 llvm::append_range(MergedDeoptArgs, ChildOB.Inputs);
1888
1889 OpDefs.emplace_back("deopt", std::move(MergedDeoptArgs));
1890 }
1891
1892 Instruction *NewI = CallBase::Create(ICS, OpDefs, ICS);
1893
1894 // Note: the RAUW does the appropriate fixup in VMap, so we need to do
1895 // this even if the call returns void.
1896 ICS->replaceAllUsesWith(NewI);
1897
1898 VH = nullptr;
1899 ICS->eraseFromParent();
1900 }
1901 }
1902
1903 // Update the callgraph if requested.
1904 if (IFI.CG)
1905 UpdateCallGraphAfterInlining(CB, FirstNewBlock, VMap, IFI);
1906
1907 // For 'nodebug' functions, the associated DISubprogram is always null.
1908 // Conservatively avoid propagating the callsite debug location to
1909 // instructions inlined from a function whose DISubprogram is not null.
1910 fixupLineNumbers(Caller, FirstNewBlock, &CB,
1911 CalledFunc->getSubprogram() != nullptr);
1912
1913 // Now clone the inlined noalias scope metadata.
1914 SAMetadataCloner.clone();
1915 SAMetadataCloner.remap(FirstNewBlock, Caller->end());
1916
1917 // Add noalias metadata if necessary.
1918 AddAliasScopeMetadata(CB, VMap, DL, CalleeAAR);
1919
1920 // Clone return attributes on the callsite into the calls within the inlined
1921 // function which feed into its return value.
1922 AddReturnAttributes(CB, VMap);
1923
1924 // Propagate metadata on the callsite if necessary.
1925 PropagateCallSiteMetadata(CB, FirstNewBlock, Caller->end());
1926
1927 // Register any cloned assumptions.
1928 if (IFI.GetAssumptionCache)
1929 for (BasicBlock &NewBlock :
1930 make_range(FirstNewBlock->getIterator(), Caller->end()))
1931 for (Instruction &I : NewBlock)
1932 if (auto *II = dyn_cast<IntrinsicInst>(&I))
1933 if (II->getIntrinsicID() == Intrinsic::assume)
1934 IFI.GetAssumptionCache(*Caller).registerAssumption(II);
1935 }
1936
1937 // If there are any alloca instructions in the block that used to be the entry
1938 // block for the callee, move them to the entry block of the caller. First
1939 // calculate which instruction they should be inserted before. We insert the
1940 // instructions at the end of the current alloca list.
1941 {
1942 BasicBlock::iterator InsertPoint = Caller->begin()->begin();
1943 for (BasicBlock::iterator I = FirstNewBlock->begin(),
1944 E = FirstNewBlock->end(); I != E; ) {
1945 AllocaInst *AI = dyn_cast<AllocaInst>(I++);
1946 if (!AI) continue;
1947
1948 // If the alloca is now dead, remove it. This often occurs due to code
1949 // specialization.
1950 if (AI->use_empty()) {
1951 AI->eraseFromParent();
1952 continue;
1953 }
1954
1955 if (!allocaWouldBeStaticInEntry(AI))
1956 continue;
1957
1958 // Keep track of the static allocas that we inline into the caller.
1959 IFI.StaticAllocas.push_back(AI);
1960
1961 // Scan for the block of allocas that we can move over, and move them
1962 // all at once.
1963 while (isa<AllocaInst>(I) &&
1964 !cast<AllocaInst>(I)->use_empty() &&
1965 allocaWouldBeStaticInEntry(cast<AllocaInst>(I))) {
1966 IFI.StaticAllocas.push_back(cast<AllocaInst>(I));
1967 ++I;
1968 }
1969
1970 // Transfer all of the allocas over in a block. Using splice means
1971 // that the instructions aren't removed from the symbol table, then
1972 // reinserted.
1973 Caller->getEntryBlock().getInstList().splice(
1974 InsertPoint, FirstNewBlock->getInstList(), AI->getIterator(), I);
1975 }
1976 }
1977
1978 SmallVector<Value*,4> VarArgsToForward;
1979 SmallVector<AttributeSet, 4> VarArgsAttrs;
1980 for (unsigned i = CalledFunc->getFunctionType()->getNumParams();
1981 i < CB.getNumArgOperands(); i++) {
1982 VarArgsToForward.push_back(CB.getArgOperand(i));
1983 VarArgsAttrs.push_back(CB.getAttributes().getParamAttributes(i));
1984 }
1985
1986 bool InlinedMustTailCalls = false, InlinedDeoptimizeCalls = false;
1987 if (InlinedFunctionInfo.ContainsCalls) {
1988 CallInst::TailCallKind CallSiteTailKind = CallInst::TCK_None;
1989 if (CallInst *CI = dyn_cast<CallInst>(&CB))
1990 CallSiteTailKind = CI->getTailCallKind();
1991
1992 // For inlining purposes, the "notail" marker is the same as no marker.
1993 if (CallSiteTailKind == CallInst::TCK_NoTail)
1994 CallSiteTailKind = CallInst::TCK_None;
1995
1996 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E;
1997 ++BB) {
1998 for (auto II = BB->begin(); II != BB->end();) {
1999 Instruction &I = *II++;
2000 CallInst *CI = dyn_cast<CallInst>(&I);
2001 if (!CI)
2002 continue;
2003
2004 // Forward varargs from inlined call site to calls to the
2005 // ForwardVarArgsTo function, if requested, and to musttail calls.
2006 if (!VarArgsToForward.empty() &&
2007 ((ForwardVarArgsTo &&
2008 CI->getCalledFunction() == ForwardVarArgsTo) ||
2009 CI->isMustTailCall())) {
2010 // Collect attributes for non-vararg parameters.
2011 AttributeList Attrs = CI->getAttributes();
2012 SmallVector<AttributeSet, 8> ArgAttrs;
2013 if (!Attrs.isEmpty() || !VarArgsAttrs.empty()) {
2014 for (unsigned ArgNo = 0;
2015 ArgNo < CI->getFunctionType()->getNumParams(); ++ArgNo)
2016 ArgAttrs.push_back(Attrs.getParamAttributes(ArgNo));
2017 }
2018
2019 // Add VarArg attributes.
2020 ArgAttrs.append(VarArgsAttrs.begin(), VarArgsAttrs.end());
2021 Attrs = AttributeList::get(CI->getContext(), Attrs.getFnAttributes(),
2022 Attrs.getRetAttributes(), ArgAttrs);
2023 // Add VarArgs to existing parameters.
2024 SmallVector<Value *, 6> Params(CI->arg_operands());
2025 Params.append(VarArgsToForward.begin(), VarArgsToForward.end());
2026 CallInst *NewCI = CallInst::Create(
2027 CI->getFunctionType(), CI->getCalledOperand(), Params, "", CI);
2028 NewCI->setDebugLoc(CI->getDebugLoc());
2029 NewCI->setAttributes(Attrs);
2030 NewCI->setCallingConv(CI->getCallingConv());
2031 CI->replaceAllUsesWith(NewCI);
2032 CI->eraseFromParent();
2033 CI = NewCI;
2034 }
2035
2036 if (Function *F = CI->getCalledFunction())
2037 InlinedDeoptimizeCalls |=
2038 F->getIntrinsicID() == Intrinsic::experimental_deoptimize;
2039
2040 // We need to reduce the strength of any inlined tail calls. For
2041 // musttail, we have to avoid introducing potential unbounded stack
2042 // growth. For example, if functions 'f' and 'g' are mutually recursive
2043 // with musttail, we can inline 'g' into 'f' so long as we preserve
2044 // musttail on the cloned call to 'f'. If either the inlined call site
2045 // or the cloned call site is *not* musttail, the program already has
2046 // one frame of stack growth, so it's safe to remove musttail. Here is
2047 // a table of example transformations:
2048 //
2049 // f -> musttail g -> musttail f ==> f -> musttail f
2050 // f -> musttail g -> tail f ==> f -> tail f
2051 // f -> g -> musttail f ==> f -> f
2052 // f -> g -> tail f ==> f -> f
2053 //
2054 // Inlined notail calls should remain notail calls.
2055 CallInst::TailCallKind ChildTCK = CI->getTailCallKind();
2056 if (ChildTCK != CallInst::TCK_NoTail)
2057 ChildTCK = std::min(CallSiteTailKind, ChildTCK);
2058 CI->setTailCallKind(ChildTCK);
2059 InlinedMustTailCalls |= CI->isMustTailCall();
2060
2061 // Calls inlined through a 'nounwind' call site should be marked
2062 // 'nounwind'.
2063 if (MarkNoUnwind)
2064 CI->setDoesNotThrow();
2065 }
2066 }
2067 }
2068
2069 // Leave lifetime markers for the static alloca's, scoping them to the
2070 // function we just inlined.
2071 if (InsertLifetime && !IFI.StaticAllocas.empty()) {
2072 IRBuilder<> builder(&FirstNewBlock->front());
2073 for (unsigned ai = 0, ae = IFI.StaticAllocas.size(); ai != ae; ++ai) {
2074 AllocaInst *AI = IFI.StaticAllocas[ai];
2075 // Don't mark swifterror allocas. They can't have bitcast uses.
2076 if (AI->isSwiftError())
2077 continue;
2078
2079 // If the alloca is already scoped to something smaller than the whole
2080 // function then there's no need to add redundant, less accurate markers.
2081 if (hasLifetimeMarkers(AI))
2082 continue;
2083
2084 // Try to determine the size of the allocation.
2085 ConstantInt *AllocaSize = nullptr;
2086 if (ConstantInt *AIArraySize =
2087 dyn_cast<ConstantInt>(AI->getArraySize())) {
2088 auto &DL = Caller->getParent()->getDataLayout();
2089 Type *AllocaType = AI->getAllocatedType();
2090 TypeSize AllocaTypeSize = DL.getTypeAllocSize(AllocaType);
2091 uint64_t AllocaArraySize = AIArraySize->getLimitedValue();
2092
2093 // Don't add markers for zero-sized allocas.
2094 if (AllocaArraySize == 0)
2095 continue;
2096
2097 // Check that array size doesn't saturate uint64_t and doesn't
2098 // overflow when it's multiplied by type size.
2099 if (!AllocaTypeSize.isScalable() &&
2100 AllocaArraySize != std::numeric_limits<uint64_t>::max() &&
2101 std::numeric_limits<uint64_t>::max() / AllocaArraySize >=
2102 AllocaTypeSize.getFixedSize()) {
2103 AllocaSize = ConstantInt::get(Type::getInt64Ty(AI->getContext()),
2104 AllocaArraySize * AllocaTypeSize);
2105 }
2106 }
2107
2108 builder.CreateLifetimeStart(AI, AllocaSize);
2109 for (ReturnInst *RI : Returns) {
2110 // Don't insert llvm.lifetime.end calls between a musttail or deoptimize
2111 // call and a return. The return kills all local allocas.
2112 if (InlinedMustTailCalls &&
2113 RI->getParent()->getTerminatingMustTailCall())
2114 continue;
2115 if (InlinedDeoptimizeCalls &&
2116 RI->getParent()->getTerminatingDeoptimizeCall())
2117 continue;
2118 IRBuilder<>(RI).CreateLifetimeEnd(AI, AllocaSize);
2119 }
2120 }
2121 }
2122
2123 // If the inlined code contained dynamic alloca instructions, wrap the inlined
2124 // code with llvm.stacksave/llvm.stackrestore intrinsics.
2125 if (InlinedFunctionInfo.ContainsDynamicAllocas) {
2126 Module *M = Caller->getParent();
2127 // Get the two intrinsics we care about.
2128 Function *StackSave = Intrinsic::getDeclaration(M, Intrinsic::stacksave);
2129 Function *StackRestore=Intrinsic::getDeclaration(M,Intrinsic::stackrestore);
2130
2131 // Insert the llvm.stacksave.
2132 CallInst *SavedPtr = IRBuilder<>(&*FirstNewBlock, FirstNewBlock->begin())
2133 .CreateCall(StackSave, {}, "savedstack");
2134
2135 // Insert a call to llvm.stackrestore before any return instructions in the
2136 // inlined function.
2137 for (ReturnInst *RI : Returns) {
2138 // Don't insert llvm.stackrestore calls between a musttail or deoptimize
2139 // call and a return. The return will restore the stack pointer.
2140 if (InlinedMustTailCalls && RI->getParent()->getTerminatingMustTailCall())
2141 continue;
2142 if (InlinedDeoptimizeCalls && RI->getParent()->getTerminatingDeoptimizeCall())
2143 continue;
2144 IRBuilder<>(RI).CreateCall(StackRestore, SavedPtr);
2145 }
2146 }
2147
2148 // If we are inlining for an invoke instruction, we must make sure to rewrite
2149 // any call instructions into invoke instructions. This is sensitive to which
2150 // funclet pads were top-level in the inlinee, so must be done before
2151 // rewriting the "parent pad" links.
2152 if (auto *II = dyn_cast<InvokeInst>(&CB)) {
2153 BasicBlock *UnwindDest = II->getUnwindDest();
2154 Instruction *FirstNonPHI = UnwindDest->getFirstNonPHI();
2155 if (isa<LandingPadInst>(FirstNonPHI)) {
2156 HandleInlinedLandingPad(II, &*FirstNewBlock, InlinedFunctionInfo);
2157 } else {
2158 HandleInlinedEHPad(II, &*FirstNewBlock, InlinedFunctionInfo);
2159 }
2160 }
2161
2162 // Update the lexical scopes of the new funclets and callsites.
2163 // Anything that had 'none' as its parent is now nested inside the callsite's
2164 // EHPad.
2165
2166 if (CallSiteEHPad) {
2167 for (Function::iterator BB = FirstNewBlock->getIterator(),
2168 E = Caller->end();
2169 BB != E; ++BB) {
2170 // Add bundle operands to any top-level call sites.
2171 SmallVector<OperandBundleDef, 1> OpBundles;
2172 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E;) {
2173 CallBase *I = dyn_cast<CallBase>(&*BBI++);
2174 if (!I)
2175 continue;
2176
2177 // Skip call sites which are nounwind intrinsics.
2178 auto *CalledFn =
2179 dyn_cast<Function>(I->getCalledOperand()->stripPointerCasts());
2180 if (CalledFn && CalledFn->isIntrinsic() && I->doesNotThrow())
2181 continue;
2182
2183 // Skip call sites which already have a "funclet" bundle.
2184 if (I->getOperandBundle(LLVMContext::OB_funclet))
2185 continue;
2186
2187 I->getOperandBundlesAsDefs(OpBundles);
2188 OpBundles.emplace_back("funclet", CallSiteEHPad);
2189
2190 Instruction *NewInst = CallBase::Create(I, OpBundles, I);
2191 NewInst->takeName(I);
2192 I->replaceAllUsesWith(NewInst);
2193 I->eraseFromParent();
2194
2195 OpBundles.clear();
2196 }
2197
2198 // It is problematic if the inlinee has a cleanupret which unwinds to
2199 // caller and we inline it into a call site which doesn't unwind but into
2200 // an EH pad that does. Such an edge must be dynamically unreachable.
2201 // As such, we replace the cleanupret with unreachable.
2202 if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(BB->getTerminator()))
2203 if (CleanupRet->unwindsToCaller() && EHPadForCallUnwindsLocally)
2204 changeToUnreachable(CleanupRet, /*UseLLVMTrap=*/false);
2205
2206 Instruction *I = BB->getFirstNonPHI();
2207 if (!I->isEHPad())
2208 continue;
2209
2210 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) {
2211 if (isa<ConstantTokenNone>(CatchSwitch->getParentPad()))
2212 CatchSwitch->setParentPad(CallSiteEHPad);
2213 } else {
2214 auto *FPI = cast<FuncletPadInst>(I);
2215 if (isa<ConstantTokenNone>(FPI->getParentPad()))
2216 FPI->setParentPad(CallSiteEHPad);
2217 }
2218 }
2219 }
2220
2221 if (InlinedDeoptimizeCalls) {
2222 // We need to at least remove the deoptimizing returns from the Return set,
2223 // so that the control flow from those returns does not get merged into the
2224 // caller (but terminate it instead). If the caller's return type does not
2225 // match the callee's return type, we also need to change the return type of
2226 // the intrinsic.
2227 if (Caller->getReturnType() == CB.getType()) {
2228 llvm::erase_if(Returns, [](ReturnInst *RI) {
2229 return RI->getParent()->getTerminatingDeoptimizeCall() != nullptr;
2230 });
2231 } else {
2232 SmallVector<ReturnInst *, 8> NormalReturns;
2233 Function *NewDeoptIntrinsic = Intrinsic::getDeclaration(
2234 Caller->getParent(), Intrinsic::experimental_deoptimize,
2235 {Caller->getReturnType()});
2236
2237 for (ReturnInst *RI : Returns) {
2238 CallInst *DeoptCall = RI->getParent()->getTerminatingDeoptimizeCall();
2239 if (!DeoptCall) {
2240 NormalReturns.push_back(RI);
2241 continue;
2242 }
2243
2244 // The calling convention on the deoptimize call itself may be bogus,
2245 // since the code we're inlining may have undefined behavior (and may
2246 // never actually execute at runtime); but all
2247 // @llvm.experimental.deoptimize declarations have to have the same
2248 // calling convention in a well-formed module.
2249 auto CallingConv = DeoptCall->getCalledFunction()->getCallingConv();
2250 NewDeoptIntrinsic->setCallingConv(CallingConv);
2251 auto *CurBB = RI->getParent();
2252 RI->eraseFromParent();
2253
2254 SmallVector<Value *, 4> CallArgs(DeoptCall->args());
2255
2256 SmallVector<OperandBundleDef, 1> OpBundles;
2257 DeoptCall->getOperandBundlesAsDefs(OpBundles);
2258 DeoptCall->eraseFromParent();
2259 assert(!OpBundles.empty() &&
2260 "Expected at least the deopt operand bundle");
2261
2262 IRBuilder<> Builder(CurBB);
2263 CallInst *NewDeoptCall =
2264 Builder.CreateCall(NewDeoptIntrinsic, CallArgs, OpBundles);
2265 NewDeoptCall->setCallingConv(CallingConv);
2266 if (NewDeoptCall->getType()->isVoidTy())
2267 Builder.CreateRetVoid();
2268 else
2269 Builder.CreateRet(NewDeoptCall);
2270 }
2271
2272 // Leave behind the normal returns so we can merge control flow.
2273 std::swap(Returns, NormalReturns);
2274 }
2275 }
2276
2277 // Handle any inlined musttail call sites. In order for a new call site to be
2278 // musttail, the source of the clone and the inlined call site must have been
2279 // musttail. Therefore it's safe to return without merging control into the
2280 // phi below.
2281 if (InlinedMustTailCalls) {
2282 // Check if we need to bitcast the result of any musttail calls.
2283 Type *NewRetTy = Caller->getReturnType();
2284 bool NeedBitCast = !CB.use_empty() && CB.getType() != NewRetTy;
2285
2286 // Handle the returns preceded by musttail calls separately.
2287 SmallVector<ReturnInst *, 8> NormalReturns;
2288 for (ReturnInst *RI : Returns) {
2289 CallInst *ReturnedMustTail =
2290 RI->getParent()->getTerminatingMustTailCall();
2291 if (!ReturnedMustTail) {
2292 NormalReturns.push_back(RI);
2293 continue;
2294 }
2295 if (!NeedBitCast)
2296 continue;
2297
2298 // Delete the old return and any preceding bitcast.
2299 BasicBlock *CurBB = RI->getParent();
2300 auto *OldCast = dyn_cast_or_null<BitCastInst>(RI->getReturnValue());
2301 RI->eraseFromParent();
2302 if (OldCast)
2303 OldCast->eraseFromParent();
2304
2305 // Insert a new bitcast and return with the right type.
2306 IRBuilder<> Builder(CurBB);
2307 Builder.CreateRet(Builder.CreateBitCast(ReturnedMustTail, NewRetTy));
2308 }
2309
2310 // Leave behind the normal returns so we can merge control flow.
2311 std::swap(Returns, NormalReturns);
2312 }
2313
2314 // Now that all of the transforms on the inlined code have taken place but
2315 // before we splice the inlined code into the CFG and lose track of which
2316 // blocks were actually inlined, collect the call sites. We only do this if
2317 // call graph updates weren't requested, as those provide value handle based
2318 // tracking of inlined call sites instead.
2319 if (InlinedFunctionInfo.ContainsCalls && !IFI.CG) {
2320 // Otherwise just collect the raw call sites that were inlined.
2321 for (BasicBlock &NewBB :
2322 make_range(FirstNewBlock->getIterator(), Caller->end()))
2323 for (Instruction &I : NewBB)
2324 if (auto *CB = dyn_cast<CallBase>(&I))
2325 IFI.InlinedCallSites.push_back(CB);
2326 }
2327
2328 // If we cloned in _exactly one_ basic block, and if that block ends in a
2329 // return instruction, we splice the body of the inlined callee directly into
2330 // the calling basic block.
2331 if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) {
2332 // Move all of the instructions right before the call.
2333 OrigBB->getInstList().splice(CB.getIterator(), FirstNewBlock->getInstList(),
2334 FirstNewBlock->begin(), FirstNewBlock->end());
2335 // Remove the cloned basic block.
2336 Caller->getBasicBlockList().pop_back();
2337
2338 // If the call site was an invoke instruction, add a branch to the normal
2339 // destination.
2340 if (InvokeInst *II = dyn_cast<InvokeInst>(&CB)) {
2341 BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), &CB);
2342 NewBr->setDebugLoc(Returns[0]->getDebugLoc());
2343 }
2344
2345 // If the return instruction returned a value, replace uses of the call with
2346 // uses of the returned value.
2347 if (!CB.use_empty()) {
2348 ReturnInst *R = Returns[0];
2349 if (&CB == R->getReturnValue())
2350 CB.replaceAllUsesWith(UndefValue::get(CB.getType()));
2351 else
2352 CB.replaceAllUsesWith(R->getReturnValue());
2353 }
2354 // Since we are now done with the Call/Invoke, we can delete it.
2355 CB.eraseFromParent();
2356
2357 // Since we are now done with the return instruction, delete it also.
2358 Returns[0]->eraseFromParent();
2359
2360 // We are now done with the inlining.
2361 return InlineResult::success();
2362 }
2363
2364 // Otherwise, we have the normal case, of more than one block to inline or
2365 // multiple return sites.
2366
2367 // We want to clone the entire callee function into the hole between the
2368 // "starter" and "ender" blocks. How we accomplish this depends on whether
2369 // this is an invoke instruction or a call instruction.
2370 BasicBlock *AfterCallBB;
2371 BranchInst *CreatedBranchToNormalDest = nullptr;
2372 if (InvokeInst *II = dyn_cast<InvokeInst>(&CB)) {
2373
2374 // Add an unconditional branch to make this look like the CallInst case...
2375 CreatedBranchToNormalDest = BranchInst::Create(II->getNormalDest(), &CB);
2376
2377 // Split the basic block. This guarantees that no PHI nodes will have to be
2378 // updated due to new incoming edges, and make the invoke case more
2379 // symmetric to the call case.
2380 AfterCallBB =
2381 OrigBB->splitBasicBlock(CreatedBranchToNormalDest->getIterator(),
2382 CalledFunc->getName() + ".exit");
2383
2384 } else { // It's a call
2385 // If this is a call instruction, we need to split the basic block that
2386 // the call lives in.
2387 //
2388 AfterCallBB = OrigBB->splitBasicBlock(CB.getIterator(),
2389 CalledFunc->getName() + ".exit");
2390 }
2391
2392 if (IFI.CallerBFI) {
2393 // Copy original BB's block frequency to AfterCallBB
2394 IFI.CallerBFI->setBlockFreq(
2395 AfterCallBB, IFI.CallerBFI->getBlockFreq(OrigBB).getFrequency());
2396 }
2397
2398 // Change the branch that used to go to AfterCallBB to branch to the first
2399 // basic block of the inlined function.
2400 //
2401 Instruction *Br = OrigBB->getTerminator();
2402 assert(Br && Br->getOpcode() == Instruction::Br &&
2403 "splitBasicBlock broken!");
2404 Br->setOperand(0, &*FirstNewBlock);
2405
2406 // Now that the function is correct, make it a little bit nicer. In
2407 // particular, move the basic blocks inserted from the end of the function
2408 // into the space made by splitting the source basic block.
2409 Caller->getBasicBlockList().splice(AfterCallBB->getIterator(),
2410 Caller->getBasicBlockList(), FirstNewBlock,
2411 Caller->end());
2412
2413 // Handle all of the return instructions that we just cloned in, and eliminate
2414 // any users of the original call/invoke instruction.
2415 Type *RTy = CalledFunc->getReturnType();
2416
2417 PHINode *PHI = nullptr;
2418 if (Returns.size() > 1) {
2419 // The PHI node should go at the front of the new basic block to merge all
2420 // possible incoming values.
2421 if (!CB.use_empty()) {
2422 PHI = PHINode::Create(RTy, Returns.size(), CB.getName(),
2423 &AfterCallBB->front());
2424 // Anything that used the result of the function call should now use the
2425 // PHI node as their operand.
2426 CB.replaceAllUsesWith(PHI);
2427 }
2428
2429 // Loop over all of the return instructions adding entries to the PHI node
2430 // as appropriate.
2431 if (PHI) {
2432 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
2433 ReturnInst *RI = Returns[i];
2434 assert(RI->getReturnValue()->getType() == PHI->getType() &&
2435 "Ret value not consistent in function!");
2436 PHI->addIncoming(RI->getReturnValue(), RI->getParent());
2437 }
2438 }
2439
2440 // Add a branch to the merge points and remove return instructions.
2441 DebugLoc Loc;
2442 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
2443 ReturnInst *RI = Returns[i];
2444 BranchInst* BI = BranchInst::Create(AfterCallBB, RI);
2445 Loc = RI->getDebugLoc();
2446 BI->setDebugLoc(Loc);
2447 RI->eraseFromParent();
2448 }
2449 // We need to set the debug location to *somewhere* inside the
2450 // inlined function. The line number may be nonsensical, but the
2451 // instruction will at least be associated with the right
2452 // function.
2453 if (CreatedBranchToNormalDest)
2454 CreatedBranchToNormalDest->setDebugLoc(Loc);
2455 } else if (!Returns.empty()) {
2456 // Otherwise, if there is exactly one return value, just replace anything
2457 // using the return value of the call with the computed value.
2458 if (!CB.use_empty()) {
2459 if (&CB == Returns[0]->getReturnValue())
2460 CB.replaceAllUsesWith(UndefValue::get(CB.getType()));
2461 else
2462 CB.replaceAllUsesWith(Returns[0]->getReturnValue());
2463 }
2464
2465 // Update PHI nodes that use the ReturnBB to use the AfterCallBB.
2466 BasicBlock *ReturnBB = Returns[0]->getParent();
2467 ReturnBB->replaceAllUsesWith(AfterCallBB);
2468
2469 // Splice the code from the return block into the block that it will return
2470 // to, which contains the code that was after the call.
2471 AfterCallBB->getInstList().splice(AfterCallBB->begin(),
2472 ReturnBB->getInstList());
2473
2474 if (CreatedBranchToNormalDest)
2475 CreatedBranchToNormalDest->setDebugLoc(Returns[0]->getDebugLoc());
2476
2477 // Delete the return instruction now and empty ReturnBB now.
2478 Returns[0]->eraseFromParent();
2479 ReturnBB->eraseFromParent();
2480 } else if (!CB.use_empty()) {
2481 // No returns, but something is using the return value of the call. Just
2482 // nuke the result.
2483 CB.replaceAllUsesWith(UndefValue::get(CB.getType()));
2484 }
2485
2486 // Since we are now done with the Call/Invoke, we can delete it.
2487 CB.eraseFromParent();
2488
2489 // If we inlined any musttail calls and the original return is now
2490 // unreachable, delete it. It can only contain a bitcast and ret.
2491 if (InlinedMustTailCalls && pred_empty(AfterCallBB))
2492 AfterCallBB->eraseFromParent();
2493
2494 // We should always be able to fold the entry block of the function into the
2495 // single predecessor of the block...
2496 assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!");
2497 BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0);
2498
2499 // Splice the code entry block into calling block, right before the
2500 // unconditional branch.
2501 CalleeEntry->replaceAllUsesWith(OrigBB); // Update PHI nodes
2502 OrigBB->getInstList().splice(Br->getIterator(), CalleeEntry->getInstList());
2503
2504 // Remove the unconditional branch.
2505 OrigBB->getInstList().erase(Br);
2506
2507 // Now we can remove the CalleeEntry block, which is now empty.
2508 Caller->getBasicBlockList().erase(CalleeEntry);
2509
2510 // If we inserted a phi node, check to see if it has a single value (e.g. all
2511 // the entries are the same or undef). If so, remove the PHI so it doesn't
2512 // block other optimizations.
2513 if (PHI) {
2514 AssumptionCache *AC =
2515 IFI.GetAssumptionCache ? &IFI.GetAssumptionCache(*Caller) : nullptr;
2516 auto &DL = Caller->getParent()->getDataLayout();
2517 if (Value *V = SimplifyInstruction(PHI, {DL, nullptr, nullptr, AC})) {
2518 PHI->replaceAllUsesWith(V);
2519 PHI->eraseFromParent();
2520 }
2521 }
2522
2523 return InlineResult::success();
2524 }
2525