1 //===- ArgumentPromotion.cpp - Promote by-reference arguments -------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This pass promotes "by reference" arguments to be "by value" arguments. In
11 // practice, this means looking for internal functions that have pointer
12 // arguments. If it can prove, through the use of alias analysis, that an
13 // argument is *only* loaded, then it can pass the value into the function
14 // instead of the address of the value. This can cause recursive simplification
15 // of code and lead to the elimination of allocas (especially in C++ template
16 // code like the STL).
17 //
18 // This pass also handles aggregate arguments that are passed into a function,
19 // scalarizing them if the elements of the aggregate are only loaded. Note that
20 // by default it refuses to scalarize aggregates which would require passing in
21 // more than three operands to the function, because passing thousands of
22 // operands for a large array or structure is unprofitable! This limit can be
23 // configured or disabled, however.
24 //
25 // Note that this transformation could also be done for arguments that are only
26 // stored to (returning the value instead), but does not currently. This case
27 // would be best handled when and if LLVM begins supporting multiple return
28 // values from functions.
29 //
30 //===----------------------------------------------------------------------===//
31
32 #include "llvm/Transforms/IPO/ArgumentPromotion.h"
33 #include "llvm/ADT/DepthFirstIterator.h"
34 #include "llvm/ADT/None.h"
35 #include "llvm/ADT/Optional.h"
36 #include "llvm/ADT/STLExtras.h"
37 #include "llvm/ADT/SmallPtrSet.h"
38 #include "llvm/ADT/SmallVector.h"
39 #include "llvm/ADT/Statistic.h"
40 #include "llvm/ADT/StringExtras.h"
41 #include "llvm/ADT/Twine.h"
42 #include "llvm/Analysis/AliasAnalysis.h"
43 #include "llvm/Analysis/AssumptionCache.h"
44 #include "llvm/Analysis/BasicAliasAnalysis.h"
45 #include "llvm/Analysis/CGSCCPassManager.h"
46 #include "llvm/Analysis/CallGraph.h"
47 #include "llvm/Analysis/CallGraphSCCPass.h"
48 #include "llvm/Analysis/LazyCallGraph.h"
49 #include "llvm/Analysis/Loads.h"
50 #include "llvm/Analysis/MemoryLocation.h"
51 #include "llvm/Analysis/TargetLibraryInfo.h"
52 #include "llvm/IR/Argument.h"
53 #include "llvm/IR/Attributes.h"
54 #include "llvm/IR/BasicBlock.h"
55 #include "llvm/IR/CFG.h"
56 #include "llvm/IR/CallSite.h"
57 #include "llvm/IR/Constants.h"
58 #include "llvm/IR/DataLayout.h"
59 #include "llvm/IR/DerivedTypes.h"
60 #include "llvm/IR/Function.h"
61 #include "llvm/IR/InstrTypes.h"
62 #include "llvm/IR/Instruction.h"
63 #include "llvm/IR/Instructions.h"
64 #include "llvm/IR/Metadata.h"
65 #include "llvm/IR/Module.h"
66 #include "llvm/IR/PassManager.h"
67 #include "llvm/IR/Type.h"
68 #include "llvm/IR/Use.h"
69 #include "llvm/IR/User.h"
70 #include "llvm/IR/Value.h"
71 #include "llvm/Pass.h"
72 #include "llvm/Support/Casting.h"
73 #include "llvm/Support/Debug.h"
74 #include "llvm/Support/raw_ostream.h"
75 #include "llvm/Transforms/IPO.h"
76 #include <algorithm>
77 #include <cassert>
78 #include <cstdint>
79 #include <functional>
80 #include <iterator>
81 #include <map>
82 #include <set>
83 #include <string>
84 #include <utility>
85 #include <vector>
86
87 using namespace llvm;
88
89 #define DEBUG_TYPE "argpromotion"
90
91 STATISTIC(NumArgumentsPromoted, "Number of pointer arguments promoted");
92 STATISTIC(NumAggregatesPromoted, "Number of aggregate arguments promoted");
93 STATISTIC(NumByValArgsPromoted, "Number of byval arguments promoted");
94 STATISTIC(NumArgumentsDead, "Number of dead pointer args eliminated");
95
96 /// A vector used to hold the indices of a single GEP instruction
97 using IndicesVector = std::vector<uint64_t>;
98
99 /// DoPromotion - This method actually performs the promotion of the specified
100 /// arguments, and returns the new function. At this point, we know that it's
101 /// safe to do so.
102 static Function *
doPromotion(Function * F,SmallPtrSetImpl<Argument * > & ArgsToPromote,SmallPtrSetImpl<Argument * > & ByValArgsToTransform,Optional<function_ref<void (CallSite OldCS,CallSite NewCS)>> ReplaceCallSite)103 doPromotion(Function *F, SmallPtrSetImpl<Argument *> &ArgsToPromote,
104 SmallPtrSetImpl<Argument *> &ByValArgsToTransform,
105 Optional<function_ref<void(CallSite OldCS, CallSite NewCS)>>
106 ReplaceCallSite) {
107 // Start by computing a new prototype for the function, which is the same as
108 // the old function, but has modified arguments.
109 FunctionType *FTy = F->getFunctionType();
110 std::vector<Type *> Params;
111
112 using ScalarizeTable = std::set<std::pair<Type *, IndicesVector>>;
113
114 // ScalarizedElements - If we are promoting a pointer that has elements
115 // accessed out of it, keep track of which elements are accessed so that we
116 // can add one argument for each.
117 //
118 // Arguments that are directly loaded will have a zero element value here, to
119 // handle cases where there are both a direct load and GEP accesses.
120 std::map<Argument *, ScalarizeTable> ScalarizedElements;
121
122 // OriginalLoads - Keep track of a representative load instruction from the
123 // original function so that we can tell the alias analysis implementation
124 // what the new GEP/Load instructions we are inserting look like.
125 // We need to keep the original loads for each argument and the elements
126 // of the argument that are accessed.
127 std::map<std::pair<Argument *, IndicesVector>, LoadInst *> OriginalLoads;
128
129 // Attribute - Keep track of the parameter attributes for the arguments
130 // that we are *not* promoting. For the ones that we do promote, the parameter
131 // attributes are lost
132 SmallVector<AttributeSet, 8> ArgAttrVec;
133 AttributeList PAL = F->getAttributes();
134
135 // First, determine the new argument list
136 unsigned ArgNo = 0;
137 for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E;
138 ++I, ++ArgNo) {
139 if (ByValArgsToTransform.count(&*I)) {
140 // Simple byval argument? Just add all the struct element types.
141 Type *AgTy = cast<PointerType>(I->getType())->getElementType();
142 StructType *STy = cast<StructType>(AgTy);
143 Params.insert(Params.end(), STy->element_begin(), STy->element_end());
144 ArgAttrVec.insert(ArgAttrVec.end(), STy->getNumElements(),
145 AttributeSet());
146 ++NumByValArgsPromoted;
147 } else if (!ArgsToPromote.count(&*I)) {
148 // Unchanged argument
149 Params.push_back(I->getType());
150 ArgAttrVec.push_back(PAL.getParamAttributes(ArgNo));
151 } else if (I->use_empty()) {
152 // Dead argument (which are always marked as promotable)
153 ++NumArgumentsDead;
154
155 // There may be remaining metadata uses of the argument for things like
156 // llvm.dbg.value. Replace them with undef.
157 I->replaceAllUsesWith(UndefValue::get(I->getType()));
158 } else {
159 // Okay, this is being promoted. This means that the only uses are loads
160 // or GEPs which are only used by loads
161
162 // In this table, we will track which indices are loaded from the argument
163 // (where direct loads are tracked as no indices).
164 ScalarizeTable &ArgIndices = ScalarizedElements[&*I];
165 for (User *U : I->users()) {
166 Instruction *UI = cast<Instruction>(U);
167 Type *SrcTy;
168 if (LoadInst *L = dyn_cast<LoadInst>(UI))
169 SrcTy = L->getType();
170 else
171 SrcTy = cast<GetElementPtrInst>(UI)->getSourceElementType();
172 IndicesVector Indices;
173 Indices.reserve(UI->getNumOperands() - 1);
174 // Since loads will only have a single operand, and GEPs only a single
175 // non-index operand, this will record direct loads without any indices,
176 // and gep+loads with the GEP indices.
177 for (User::op_iterator II = UI->op_begin() + 1, IE = UI->op_end();
178 II != IE; ++II)
179 Indices.push_back(cast<ConstantInt>(*II)->getSExtValue());
180 // GEPs with a single 0 index can be merged with direct loads
181 if (Indices.size() == 1 && Indices.front() == 0)
182 Indices.clear();
183 ArgIndices.insert(std::make_pair(SrcTy, Indices));
184 LoadInst *OrigLoad;
185 if (LoadInst *L = dyn_cast<LoadInst>(UI))
186 OrigLoad = L;
187 else
188 // Take any load, we will use it only to update Alias Analysis
189 OrigLoad = cast<LoadInst>(UI->user_back());
190 OriginalLoads[std::make_pair(&*I, Indices)] = OrigLoad;
191 }
192
193 // Add a parameter to the function for each element passed in.
194 for (const auto &ArgIndex : ArgIndices) {
195 // not allowed to dereference ->begin() if size() is 0
196 Params.push_back(GetElementPtrInst::getIndexedType(
197 cast<PointerType>(I->getType()->getScalarType())->getElementType(),
198 ArgIndex.second));
199 ArgAttrVec.push_back(AttributeSet());
200 assert(Params.back());
201 }
202
203 if (ArgIndices.size() == 1 && ArgIndices.begin()->second.empty())
204 ++NumArgumentsPromoted;
205 else
206 ++NumAggregatesPromoted;
207 }
208 }
209
210 Type *RetTy = FTy->getReturnType();
211
212 // Construct the new function type using the new arguments.
213 FunctionType *NFTy = FunctionType::get(RetTy, Params, FTy->isVarArg());
214
215 // Create the new function body and insert it into the module.
216 Function *NF = Function::Create(NFTy, F->getLinkage(), F->getName());
217 NF->copyAttributesFrom(F);
218
219 // Patch the pointer to LLVM function in debug info descriptor.
220 NF->setSubprogram(F->getSubprogram());
221 F->setSubprogram(nullptr);
222
223 LLVM_DEBUG(dbgs() << "ARG PROMOTION: Promoting to:" << *NF << "\n"
224 << "From: " << *F);
225
226 // Recompute the parameter attributes list based on the new arguments for
227 // the function.
228 NF->setAttributes(AttributeList::get(F->getContext(), PAL.getFnAttributes(),
229 PAL.getRetAttributes(), ArgAttrVec));
230 ArgAttrVec.clear();
231
232 F->getParent()->getFunctionList().insert(F->getIterator(), NF);
233 NF->takeName(F);
234
235 // Loop over all of the callers of the function, transforming the call sites
236 // to pass in the loaded pointers.
237 //
238 SmallVector<Value *, 16> Args;
239 while (!F->use_empty()) {
240 CallSite CS(F->user_back());
241 assert(CS.getCalledFunction() == F);
242 Instruction *Call = CS.getInstruction();
243 const AttributeList &CallPAL = CS.getAttributes();
244
245 // Loop over the operands, inserting GEP and loads in the caller as
246 // appropriate.
247 CallSite::arg_iterator AI = CS.arg_begin();
248 ArgNo = 0;
249 for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E;
250 ++I, ++AI, ++ArgNo)
251 if (!ArgsToPromote.count(&*I) && !ByValArgsToTransform.count(&*I)) {
252 Args.push_back(*AI); // Unmodified argument
253 ArgAttrVec.push_back(CallPAL.getParamAttributes(ArgNo));
254 } else if (ByValArgsToTransform.count(&*I)) {
255 // Emit a GEP and load for each element of the struct.
256 Type *AgTy = cast<PointerType>(I->getType())->getElementType();
257 StructType *STy = cast<StructType>(AgTy);
258 Value *Idxs[2] = {
259 ConstantInt::get(Type::getInt32Ty(F->getContext()), 0), nullptr};
260 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
261 Idxs[1] = ConstantInt::get(Type::getInt32Ty(F->getContext()), i);
262 Value *Idx = GetElementPtrInst::Create(
263 STy, *AI, Idxs, (*AI)->getName() + "." + Twine(i), Call);
264 // TODO: Tell AA about the new values?
265 Args.push_back(new LoadInst(Idx, Idx->getName() + ".val", Call));
266 ArgAttrVec.push_back(AttributeSet());
267 }
268 } else if (!I->use_empty()) {
269 // Non-dead argument: insert GEPs and loads as appropriate.
270 ScalarizeTable &ArgIndices = ScalarizedElements[&*I];
271 // Store the Value* version of the indices in here, but declare it now
272 // for reuse.
273 std::vector<Value *> Ops;
274 for (const auto &ArgIndex : ArgIndices) {
275 Value *V = *AI;
276 LoadInst *OrigLoad =
277 OriginalLoads[std::make_pair(&*I, ArgIndex.second)];
278 if (!ArgIndex.second.empty()) {
279 Ops.reserve(ArgIndex.second.size());
280 Type *ElTy = V->getType();
281 for (auto II : ArgIndex.second) {
282 // Use i32 to index structs, and i64 for others (pointers/arrays).
283 // This satisfies GEP constraints.
284 Type *IdxTy =
285 (ElTy->isStructTy() ? Type::getInt32Ty(F->getContext())
286 : Type::getInt64Ty(F->getContext()));
287 Ops.push_back(ConstantInt::get(IdxTy, II));
288 // Keep track of the type we're currently indexing.
289 if (auto *ElPTy = dyn_cast<PointerType>(ElTy))
290 ElTy = ElPTy->getElementType();
291 else
292 ElTy = cast<CompositeType>(ElTy)->getTypeAtIndex(II);
293 }
294 // And create a GEP to extract those indices.
295 V = GetElementPtrInst::Create(ArgIndex.first, V, Ops,
296 V->getName() + ".idx", Call);
297 Ops.clear();
298 }
299 // Since we're replacing a load make sure we take the alignment
300 // of the previous load.
301 LoadInst *newLoad = new LoadInst(V, V->getName() + ".val", Call);
302 newLoad->setAlignment(OrigLoad->getAlignment());
303 // Transfer the AA info too.
304 AAMDNodes AAInfo;
305 OrigLoad->getAAMetadata(AAInfo);
306 newLoad->setAAMetadata(AAInfo);
307
308 Args.push_back(newLoad);
309 ArgAttrVec.push_back(AttributeSet());
310 }
311 }
312
313 // Push any varargs arguments on the list.
314 for (; AI != CS.arg_end(); ++AI, ++ArgNo) {
315 Args.push_back(*AI);
316 ArgAttrVec.push_back(CallPAL.getParamAttributes(ArgNo));
317 }
318
319 SmallVector<OperandBundleDef, 1> OpBundles;
320 CS.getOperandBundlesAsDefs(OpBundles);
321
322 CallSite NewCS;
323 if (InvokeInst *II = dyn_cast<InvokeInst>(Call)) {
324 NewCS = InvokeInst::Create(NF, II->getNormalDest(), II->getUnwindDest(),
325 Args, OpBundles, "", Call);
326 } else {
327 auto *NewCall = CallInst::Create(NF, Args, OpBundles, "", Call);
328 NewCall->setTailCallKind(cast<CallInst>(Call)->getTailCallKind());
329 NewCS = NewCall;
330 }
331 NewCS.setCallingConv(CS.getCallingConv());
332 NewCS.setAttributes(
333 AttributeList::get(F->getContext(), CallPAL.getFnAttributes(),
334 CallPAL.getRetAttributes(), ArgAttrVec));
335 NewCS->setDebugLoc(Call->getDebugLoc());
336 uint64_t W;
337 if (Call->extractProfTotalWeight(W))
338 NewCS->setProfWeight(W);
339 Args.clear();
340 ArgAttrVec.clear();
341
342 // Update the callgraph to know that the callsite has been transformed.
343 if (ReplaceCallSite)
344 (*ReplaceCallSite)(CS, NewCS);
345
346 if (!Call->use_empty()) {
347 Call->replaceAllUsesWith(NewCS.getInstruction());
348 NewCS->takeName(Call);
349 }
350
351 // Finally, remove the old call from the program, reducing the use-count of
352 // F.
353 Call->eraseFromParent();
354 }
355
356 const DataLayout &DL = F->getParent()->getDataLayout();
357
358 // Since we have now created the new function, splice the body of the old
359 // function right into the new function, leaving the old rotting hulk of the
360 // function empty.
361 NF->getBasicBlockList().splice(NF->begin(), F->getBasicBlockList());
362
363 // Loop over the argument list, transferring uses of the old arguments over to
364 // the new arguments, also transferring over the names as well.
365 for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(),
366 I2 = NF->arg_begin();
367 I != E; ++I) {
368 if (!ArgsToPromote.count(&*I) && !ByValArgsToTransform.count(&*I)) {
369 // If this is an unmodified argument, move the name and users over to the
370 // new version.
371 I->replaceAllUsesWith(&*I2);
372 I2->takeName(&*I);
373 ++I2;
374 continue;
375 }
376
377 if (ByValArgsToTransform.count(&*I)) {
378 // In the callee, we create an alloca, and store each of the new incoming
379 // arguments into the alloca.
380 Instruction *InsertPt = &NF->begin()->front();
381
382 // Just add all the struct element types.
383 Type *AgTy = cast<PointerType>(I->getType())->getElementType();
384 Value *TheAlloca = new AllocaInst(AgTy, DL.getAllocaAddrSpace(), nullptr,
385 I->getParamAlignment(), "", InsertPt);
386 StructType *STy = cast<StructType>(AgTy);
387 Value *Idxs[2] = {ConstantInt::get(Type::getInt32Ty(F->getContext()), 0),
388 nullptr};
389
390 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
391 Idxs[1] = ConstantInt::get(Type::getInt32Ty(F->getContext()), i);
392 Value *Idx = GetElementPtrInst::Create(
393 AgTy, TheAlloca, Idxs, TheAlloca->getName() + "." + Twine(i),
394 InsertPt);
395 I2->setName(I->getName() + "." + Twine(i));
396 new StoreInst(&*I2++, Idx, InsertPt);
397 }
398
399 // Anything that used the arg should now use the alloca.
400 I->replaceAllUsesWith(TheAlloca);
401 TheAlloca->takeName(&*I);
402
403 // If the alloca is used in a call, we must clear the tail flag since
404 // the callee now uses an alloca from the caller.
405 for (User *U : TheAlloca->users()) {
406 CallInst *Call = dyn_cast<CallInst>(U);
407 if (!Call)
408 continue;
409 Call->setTailCall(false);
410 }
411 continue;
412 }
413
414 if (I->use_empty())
415 continue;
416
417 // Otherwise, if we promoted this argument, then all users are load
418 // instructions (or GEPs with only load users), and all loads should be
419 // using the new argument that we added.
420 ScalarizeTable &ArgIndices = ScalarizedElements[&*I];
421
422 while (!I->use_empty()) {
423 if (LoadInst *LI = dyn_cast<LoadInst>(I->user_back())) {
424 assert(ArgIndices.begin()->second.empty() &&
425 "Load element should sort to front!");
426 I2->setName(I->getName() + ".val");
427 LI->replaceAllUsesWith(&*I2);
428 LI->eraseFromParent();
429 LLVM_DEBUG(dbgs() << "*** Promoted load of argument '" << I->getName()
430 << "' in function '" << F->getName() << "'\n");
431 } else {
432 GetElementPtrInst *GEP = cast<GetElementPtrInst>(I->user_back());
433 IndicesVector Operands;
434 Operands.reserve(GEP->getNumIndices());
435 for (User::op_iterator II = GEP->idx_begin(), IE = GEP->idx_end();
436 II != IE; ++II)
437 Operands.push_back(cast<ConstantInt>(*II)->getSExtValue());
438
439 // GEPs with a single 0 index can be merged with direct loads
440 if (Operands.size() == 1 && Operands.front() == 0)
441 Operands.clear();
442
443 Function::arg_iterator TheArg = I2;
444 for (ScalarizeTable::iterator It = ArgIndices.begin();
445 It->second != Operands; ++It, ++TheArg) {
446 assert(It != ArgIndices.end() && "GEP not handled??");
447 }
448
449 std::string NewName = I->getName();
450 for (unsigned i = 0, e = Operands.size(); i != e; ++i) {
451 NewName += "." + utostr(Operands[i]);
452 }
453 NewName += ".val";
454 TheArg->setName(NewName);
455
456 LLVM_DEBUG(dbgs() << "*** Promoted agg argument '" << TheArg->getName()
457 << "' of function '" << NF->getName() << "'\n");
458
459 // All of the uses must be load instructions. Replace them all with
460 // the argument specified by ArgNo.
461 while (!GEP->use_empty()) {
462 LoadInst *L = cast<LoadInst>(GEP->user_back());
463 L->replaceAllUsesWith(&*TheArg);
464 L->eraseFromParent();
465 }
466 GEP->eraseFromParent();
467 }
468 }
469
470 // Increment I2 past all of the arguments added for this promoted pointer.
471 std::advance(I2, ArgIndices.size());
472 }
473
474 return NF;
475 }
476
477 /// AllCallersPassInValidPointerForArgument - Return true if we can prove that
478 /// all callees pass in a valid pointer for the specified function argument.
allCallersPassInValidPointerForArgument(Argument * Arg)479 static bool allCallersPassInValidPointerForArgument(Argument *Arg) {
480 Function *Callee = Arg->getParent();
481 const DataLayout &DL = Callee->getParent()->getDataLayout();
482
483 unsigned ArgNo = Arg->getArgNo();
484
485 // Look at all call sites of the function. At this point we know we only have
486 // direct callees.
487 for (User *U : Callee->users()) {
488 CallSite CS(U);
489 assert(CS && "Should only have direct calls!");
490
491 if (!isDereferenceablePointer(CS.getArgument(ArgNo), DL))
492 return false;
493 }
494 return true;
495 }
496
497 /// Returns true if Prefix is a prefix of longer. That means, Longer has a size
498 /// that is greater than or equal to the size of prefix, and each of the
499 /// elements in Prefix is the same as the corresponding elements in Longer.
500 ///
501 /// This means it also returns true when Prefix and Longer are equal!
isPrefix(const IndicesVector & Prefix,const IndicesVector & Longer)502 static bool isPrefix(const IndicesVector &Prefix, const IndicesVector &Longer) {
503 if (Prefix.size() > Longer.size())
504 return false;
505 return std::equal(Prefix.begin(), Prefix.end(), Longer.begin());
506 }
507
508 /// Checks if Indices, or a prefix of Indices, is in Set.
prefixIn(const IndicesVector & Indices,std::set<IndicesVector> & Set)509 static bool prefixIn(const IndicesVector &Indices,
510 std::set<IndicesVector> &Set) {
511 std::set<IndicesVector>::iterator Low;
512 Low = Set.upper_bound(Indices);
513 if (Low != Set.begin())
514 Low--;
515 // Low is now the last element smaller than or equal to Indices. This means
516 // it points to a prefix of Indices (possibly Indices itself), if such
517 // prefix exists.
518 //
519 // This load is safe if any prefix of its operands is safe to load.
520 return Low != Set.end() && isPrefix(*Low, Indices);
521 }
522
523 /// Mark the given indices (ToMark) as safe in the given set of indices
524 /// (Safe). Marking safe usually means adding ToMark to Safe. However, if there
525 /// is already a prefix of Indices in Safe, Indices are implicitely marked safe
526 /// already. Furthermore, any indices that Indices is itself a prefix of, are
527 /// removed from Safe (since they are implicitely safe because of Indices now).
markIndicesSafe(const IndicesVector & ToMark,std::set<IndicesVector> & Safe)528 static void markIndicesSafe(const IndicesVector &ToMark,
529 std::set<IndicesVector> &Safe) {
530 std::set<IndicesVector>::iterator Low;
531 Low = Safe.upper_bound(ToMark);
532 // Guard against the case where Safe is empty
533 if (Low != Safe.begin())
534 Low--;
535 // Low is now the last element smaller than or equal to Indices. This
536 // means it points to a prefix of Indices (possibly Indices itself), if
537 // such prefix exists.
538 if (Low != Safe.end()) {
539 if (isPrefix(*Low, ToMark))
540 // If there is already a prefix of these indices (or exactly these
541 // indices) marked a safe, don't bother adding these indices
542 return;
543
544 // Increment Low, so we can use it as a "insert before" hint
545 ++Low;
546 }
547 // Insert
548 Low = Safe.insert(Low, ToMark);
549 ++Low;
550 // If there we're a prefix of longer index list(s), remove those
551 std::set<IndicesVector>::iterator End = Safe.end();
552 while (Low != End && isPrefix(ToMark, *Low)) {
553 std::set<IndicesVector>::iterator Remove = Low;
554 ++Low;
555 Safe.erase(Remove);
556 }
557 }
558
559 /// isSafeToPromoteArgument - As you might guess from the name of this method,
560 /// it checks to see if it is both safe and useful to promote the argument.
561 /// This method limits promotion of aggregates to only promote up to three
562 /// elements of the aggregate in order to avoid exploding the number of
563 /// arguments passed in.
isSafeToPromoteArgument(Argument * Arg,bool isByValOrInAlloca,AAResults & AAR,unsigned MaxElements)564 static bool isSafeToPromoteArgument(Argument *Arg, bool isByValOrInAlloca,
565 AAResults &AAR, unsigned MaxElements) {
566 using GEPIndicesSet = std::set<IndicesVector>;
567
568 // Quick exit for unused arguments
569 if (Arg->use_empty())
570 return true;
571
572 // We can only promote this argument if all of the uses are loads, or are GEP
573 // instructions (with constant indices) that are subsequently loaded.
574 //
575 // Promoting the argument causes it to be loaded in the caller
576 // unconditionally. This is only safe if we can prove that either the load
577 // would have happened in the callee anyway (ie, there is a load in the entry
578 // block) or the pointer passed in at every call site is guaranteed to be
579 // valid.
580 // In the former case, invalid loads can happen, but would have happened
581 // anyway, in the latter case, invalid loads won't happen. This prevents us
582 // from introducing an invalid load that wouldn't have happened in the
583 // original code.
584 //
585 // This set will contain all sets of indices that are loaded in the entry
586 // block, and thus are safe to unconditionally load in the caller.
587 //
588 // This optimization is also safe for InAlloca parameters, because it verifies
589 // that the address isn't captured.
590 GEPIndicesSet SafeToUnconditionallyLoad;
591
592 // This set contains all the sets of indices that we are planning to promote.
593 // This makes it possible to limit the number of arguments added.
594 GEPIndicesSet ToPromote;
595
596 // If the pointer is always valid, any load with first index 0 is valid.
597 if (isByValOrInAlloca || allCallersPassInValidPointerForArgument(Arg))
598 SafeToUnconditionallyLoad.insert(IndicesVector(1, 0));
599
600 // First, iterate the entry block and mark loads of (geps of) arguments as
601 // safe.
602 BasicBlock &EntryBlock = Arg->getParent()->front();
603 // Declare this here so we can reuse it
604 IndicesVector Indices;
605 for (Instruction &I : EntryBlock)
606 if (LoadInst *LI = dyn_cast<LoadInst>(&I)) {
607 Value *V = LI->getPointerOperand();
608 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(V)) {
609 V = GEP->getPointerOperand();
610 if (V == Arg) {
611 // This load actually loads (part of) Arg? Check the indices then.
612 Indices.reserve(GEP->getNumIndices());
613 for (User::op_iterator II = GEP->idx_begin(), IE = GEP->idx_end();
614 II != IE; ++II)
615 if (ConstantInt *CI = dyn_cast<ConstantInt>(*II))
616 Indices.push_back(CI->getSExtValue());
617 else
618 // We found a non-constant GEP index for this argument? Bail out
619 // right away, can't promote this argument at all.
620 return false;
621
622 // Indices checked out, mark them as safe
623 markIndicesSafe(Indices, SafeToUnconditionallyLoad);
624 Indices.clear();
625 }
626 } else if (V == Arg) {
627 // Direct loads are equivalent to a GEP with a single 0 index.
628 markIndicesSafe(IndicesVector(1, 0), SafeToUnconditionallyLoad);
629 }
630 }
631
632 // Now, iterate all uses of the argument to see if there are any uses that are
633 // not (GEP+)loads, or any (GEP+)loads that are not safe to promote.
634 SmallVector<LoadInst *, 16> Loads;
635 IndicesVector Operands;
636 for (Use &U : Arg->uses()) {
637 User *UR = U.getUser();
638 Operands.clear();
639 if (LoadInst *LI = dyn_cast<LoadInst>(UR)) {
640 // Don't hack volatile/atomic loads
641 if (!LI->isSimple())
642 return false;
643 Loads.push_back(LI);
644 // Direct loads are equivalent to a GEP with a zero index and then a load.
645 Operands.push_back(0);
646 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(UR)) {
647 if (GEP->use_empty()) {
648 // Dead GEP's cause trouble later. Just remove them if we run into
649 // them.
650 GEP->eraseFromParent();
651 // TODO: This runs the above loop over and over again for dead GEPs
652 // Couldn't we just do increment the UI iterator earlier and erase the
653 // use?
654 return isSafeToPromoteArgument(Arg, isByValOrInAlloca, AAR,
655 MaxElements);
656 }
657
658 // Ensure that all of the indices are constants.
659 for (User::op_iterator i = GEP->idx_begin(), e = GEP->idx_end(); i != e;
660 ++i)
661 if (ConstantInt *C = dyn_cast<ConstantInt>(*i))
662 Operands.push_back(C->getSExtValue());
663 else
664 return false; // Not a constant operand GEP!
665
666 // Ensure that the only users of the GEP are load instructions.
667 for (User *GEPU : GEP->users())
668 if (LoadInst *LI = dyn_cast<LoadInst>(GEPU)) {
669 // Don't hack volatile/atomic loads
670 if (!LI->isSimple())
671 return false;
672 Loads.push_back(LI);
673 } else {
674 // Other uses than load?
675 return false;
676 }
677 } else {
678 return false; // Not a load or a GEP.
679 }
680
681 // Now, see if it is safe to promote this load / loads of this GEP. Loading
682 // is safe if Operands, or a prefix of Operands, is marked as safe.
683 if (!prefixIn(Operands, SafeToUnconditionallyLoad))
684 return false;
685
686 // See if we are already promoting a load with these indices. If not, check
687 // to make sure that we aren't promoting too many elements. If so, nothing
688 // to do.
689 if (ToPromote.find(Operands) == ToPromote.end()) {
690 if (MaxElements > 0 && ToPromote.size() == MaxElements) {
691 LLVM_DEBUG(dbgs() << "argpromotion not promoting argument '"
692 << Arg->getName()
693 << "' because it would require adding more "
694 << "than " << MaxElements
695 << " arguments to the function.\n");
696 // We limit aggregate promotion to only promoting up to a fixed number
697 // of elements of the aggregate.
698 return false;
699 }
700 ToPromote.insert(std::move(Operands));
701 }
702 }
703
704 if (Loads.empty())
705 return true; // No users, this is a dead argument.
706
707 // Okay, now we know that the argument is only used by load instructions and
708 // it is safe to unconditionally perform all of them. Use alias analysis to
709 // check to see if the pointer is guaranteed to not be modified from entry of
710 // the function to each of the load instructions.
711
712 // Because there could be several/many load instructions, remember which
713 // blocks we know to be transparent to the load.
714 df_iterator_default_set<BasicBlock *, 16> TranspBlocks;
715
716 for (LoadInst *Load : Loads) {
717 // Check to see if the load is invalidated from the start of the block to
718 // the load itself.
719 BasicBlock *BB = Load->getParent();
720
721 MemoryLocation Loc = MemoryLocation::get(Load);
722 if (AAR.canInstructionRangeModRef(BB->front(), *Load, Loc, ModRefInfo::Mod))
723 return false; // Pointer is invalidated!
724
725 // Now check every path from the entry block to the load for transparency.
726 // To do this, we perform a depth first search on the inverse CFG from the
727 // loading block.
728 for (BasicBlock *P : predecessors(BB)) {
729 for (BasicBlock *TranspBB : inverse_depth_first_ext(P, TranspBlocks))
730 if (AAR.canBasicBlockModify(*TranspBB, Loc))
731 return false;
732 }
733 }
734
735 // If the path from the entry of the function to each load is free of
736 // instructions that potentially invalidate the load, we can make the
737 // transformation!
738 return true;
739 }
740
741 /// Checks if a type could have padding bytes.
isDenselyPacked(Type * type,const DataLayout & DL)742 static bool isDenselyPacked(Type *type, const DataLayout &DL) {
743 // There is no size information, so be conservative.
744 if (!type->isSized())
745 return false;
746
747 // If the alloc size is not equal to the storage size, then there are padding
748 // bytes. For x86_fp80 on x86-64, size: 80 alloc size: 128.
749 if (DL.getTypeSizeInBits(type) != DL.getTypeAllocSizeInBits(type))
750 return false;
751
752 if (!isa<CompositeType>(type))
753 return true;
754
755 // For homogenous sequential types, check for padding within members.
756 if (SequentialType *seqTy = dyn_cast<SequentialType>(type))
757 return isDenselyPacked(seqTy->getElementType(), DL);
758
759 // Check for padding within and between elements of a struct.
760 StructType *StructTy = cast<StructType>(type);
761 const StructLayout *Layout = DL.getStructLayout(StructTy);
762 uint64_t StartPos = 0;
763 for (unsigned i = 0, E = StructTy->getNumElements(); i < E; ++i) {
764 Type *ElTy = StructTy->getElementType(i);
765 if (!isDenselyPacked(ElTy, DL))
766 return false;
767 if (StartPos != Layout->getElementOffsetInBits(i))
768 return false;
769 StartPos += DL.getTypeAllocSizeInBits(ElTy);
770 }
771
772 return true;
773 }
774
775 /// Checks if the padding bytes of an argument could be accessed.
canPaddingBeAccessed(Argument * arg)776 static bool canPaddingBeAccessed(Argument *arg) {
777 assert(arg->hasByValAttr());
778
779 // Track all the pointers to the argument to make sure they are not captured.
780 SmallPtrSet<Value *, 16> PtrValues;
781 PtrValues.insert(arg);
782
783 // Track all of the stores.
784 SmallVector<StoreInst *, 16> Stores;
785
786 // Scan through the uses recursively to make sure the pointer is always used
787 // sanely.
788 SmallVector<Value *, 16> WorkList;
789 WorkList.insert(WorkList.end(), arg->user_begin(), arg->user_end());
790 while (!WorkList.empty()) {
791 Value *V = WorkList.back();
792 WorkList.pop_back();
793 if (isa<GetElementPtrInst>(V) || isa<PHINode>(V)) {
794 if (PtrValues.insert(V).second)
795 WorkList.insert(WorkList.end(), V->user_begin(), V->user_end());
796 } else if (StoreInst *Store = dyn_cast<StoreInst>(V)) {
797 Stores.push_back(Store);
798 } else if (!isa<LoadInst>(V)) {
799 return true;
800 }
801 }
802
803 // Check to make sure the pointers aren't captured
804 for (StoreInst *Store : Stores)
805 if (PtrValues.count(Store->getValueOperand()))
806 return true;
807
808 return false;
809 }
810
811 /// PromoteArguments - This method checks the specified function to see if there
812 /// are any promotable arguments and if it is safe to promote the function (for
813 /// example, all callers are direct). If safe to promote some arguments, it
814 /// calls the DoPromotion method.
815 static Function *
promoteArguments(Function * F,function_ref<AAResults & (Function & F)> AARGetter,unsigned MaxElements,Optional<function_ref<void (CallSite OldCS,CallSite NewCS)>> ReplaceCallSite)816 promoteArguments(Function *F, function_ref<AAResults &(Function &F)> AARGetter,
817 unsigned MaxElements,
818 Optional<function_ref<void(CallSite OldCS, CallSite NewCS)>>
819 ReplaceCallSite) {
820 // Don't perform argument promotion for naked functions; otherwise we can end
821 // up removing parameters that are seemingly 'not used' as they are referred
822 // to in the assembly.
823 if(F->hasFnAttribute(Attribute::Naked))
824 return nullptr;
825
826 // Make sure that it is local to this module.
827 if (!F->hasLocalLinkage())
828 return nullptr;
829
830 // Don't promote arguments for variadic functions. Adding, removing, or
831 // changing non-pack parameters can change the classification of pack
832 // parameters. Frontends encode that classification at the call site in the
833 // IR, while in the callee the classification is determined dynamically based
834 // on the number of registers consumed so far.
835 if (F->isVarArg())
836 return nullptr;
837
838 // First check: see if there are any pointer arguments! If not, quick exit.
839 SmallVector<Argument *, 16> PointerArgs;
840 for (Argument &I : F->args())
841 if (I.getType()->isPointerTy())
842 PointerArgs.push_back(&I);
843 if (PointerArgs.empty())
844 return nullptr;
845
846 // Second check: make sure that all callers are direct callers. We can't
847 // transform functions that have indirect callers. Also see if the function
848 // is self-recursive.
849 bool isSelfRecursive = false;
850 for (Use &U : F->uses()) {
851 CallSite CS(U.getUser());
852 // Must be a direct call.
853 if (CS.getInstruction() == nullptr || !CS.isCallee(&U))
854 return nullptr;
855
856 // Can't change signature of musttail callee
857 if (CS.isMustTailCall())
858 return nullptr;
859
860 if (CS.getInstruction()->getParent()->getParent() == F)
861 isSelfRecursive = true;
862 }
863
864 // Can't change signature of musttail caller
865 // FIXME: Support promoting whole chain of musttail functions
866 for (BasicBlock &BB : *F)
867 if (BB.getTerminatingMustTailCall())
868 return nullptr;
869
870 const DataLayout &DL = F->getParent()->getDataLayout();
871
872 AAResults &AAR = AARGetter(*F);
873
874 // Check to see which arguments are promotable. If an argument is promotable,
875 // add it to ArgsToPromote.
876 SmallPtrSet<Argument *, 8> ArgsToPromote;
877 SmallPtrSet<Argument *, 8> ByValArgsToTransform;
878 for (Argument *PtrArg : PointerArgs) {
879 Type *AgTy = cast<PointerType>(PtrArg->getType())->getElementType();
880
881 // Replace sret attribute with noalias. This reduces register pressure by
882 // avoiding a register copy.
883 if (PtrArg->hasStructRetAttr()) {
884 unsigned ArgNo = PtrArg->getArgNo();
885 F->removeParamAttr(ArgNo, Attribute::StructRet);
886 F->addParamAttr(ArgNo, Attribute::NoAlias);
887 for (Use &U : F->uses()) {
888 CallSite CS(U.getUser());
889 CS.removeParamAttr(ArgNo, Attribute::StructRet);
890 CS.addParamAttr(ArgNo, Attribute::NoAlias);
891 }
892 }
893
894 // If this is a byval argument, and if the aggregate type is small, just
895 // pass the elements, which is always safe, if the passed value is densely
896 // packed or if we can prove the padding bytes are never accessed. This does
897 // not apply to inalloca.
898 bool isSafeToPromote =
899 PtrArg->hasByValAttr() &&
900 (isDenselyPacked(AgTy, DL) || !canPaddingBeAccessed(PtrArg));
901 if (isSafeToPromote) {
902 if (StructType *STy = dyn_cast<StructType>(AgTy)) {
903 if (MaxElements > 0 && STy->getNumElements() > MaxElements) {
904 LLVM_DEBUG(dbgs() << "argpromotion disable promoting argument '"
905 << PtrArg->getName()
906 << "' because it would require adding more"
907 << " than " << MaxElements
908 << " arguments to the function.\n");
909 continue;
910 }
911
912 // If all the elements are single-value types, we can promote it.
913 bool AllSimple = true;
914 for (const auto *EltTy : STy->elements()) {
915 if (!EltTy->isSingleValueType()) {
916 AllSimple = false;
917 break;
918 }
919 }
920
921 // Safe to transform, don't even bother trying to "promote" it.
922 // Passing the elements as a scalar will allow sroa to hack on
923 // the new alloca we introduce.
924 if (AllSimple) {
925 ByValArgsToTransform.insert(PtrArg);
926 continue;
927 }
928 }
929 }
930
931 // If the argument is a recursive type and we're in a recursive
932 // function, we could end up infinitely peeling the function argument.
933 if (isSelfRecursive) {
934 if (StructType *STy = dyn_cast<StructType>(AgTy)) {
935 bool RecursiveType = false;
936 for (const auto *EltTy : STy->elements()) {
937 if (EltTy == PtrArg->getType()) {
938 RecursiveType = true;
939 break;
940 }
941 }
942 if (RecursiveType)
943 continue;
944 }
945 }
946
947 // Otherwise, see if we can promote the pointer to its value.
948 if (isSafeToPromoteArgument(PtrArg, PtrArg->hasByValOrInAllocaAttr(), AAR,
949 MaxElements))
950 ArgsToPromote.insert(PtrArg);
951 }
952
953 // No promotable pointer arguments.
954 if (ArgsToPromote.empty() && ByValArgsToTransform.empty())
955 return nullptr;
956
957 return doPromotion(F, ArgsToPromote, ByValArgsToTransform, ReplaceCallSite);
958 }
959
run(LazyCallGraph::SCC & C,CGSCCAnalysisManager & AM,LazyCallGraph & CG,CGSCCUpdateResult & UR)960 PreservedAnalyses ArgumentPromotionPass::run(LazyCallGraph::SCC &C,
961 CGSCCAnalysisManager &AM,
962 LazyCallGraph &CG,
963 CGSCCUpdateResult &UR) {
964 bool Changed = false, LocalChange;
965
966 // Iterate until we stop promoting from this SCC.
967 do {
968 LocalChange = false;
969
970 for (LazyCallGraph::Node &N : C) {
971 Function &OldF = N.getFunction();
972
973 FunctionAnalysisManager &FAM =
974 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager();
975 // FIXME: This lambda must only be used with this function. We should
976 // skip the lambda and just get the AA results directly.
977 auto AARGetter = [&](Function &F) -> AAResults & {
978 assert(&F == &OldF && "Called with an unexpected function!");
979 return FAM.getResult<AAManager>(F);
980 };
981
982 Function *NewF = promoteArguments(&OldF, AARGetter, MaxElements, None);
983 if (!NewF)
984 continue;
985 LocalChange = true;
986
987 // Directly substitute the functions in the call graph. Note that this
988 // requires the old function to be completely dead and completely
989 // replaced by the new function. It does no call graph updates, it merely
990 // swaps out the particular function mapped to a particular node in the
991 // graph.
992 C.getOuterRefSCC().replaceNodeFunction(N, *NewF);
993 OldF.eraseFromParent();
994 }
995
996 Changed |= LocalChange;
997 } while (LocalChange);
998
999 if (!Changed)
1000 return PreservedAnalyses::all();
1001
1002 return PreservedAnalyses::none();
1003 }
1004
1005 namespace {
1006
1007 /// ArgPromotion - The 'by reference' to 'by value' argument promotion pass.
1008 struct ArgPromotion : public CallGraphSCCPass {
1009 // Pass identification, replacement for typeid
1010 static char ID;
1011
ArgPromotion__anond372bd910211::ArgPromotion1012 explicit ArgPromotion(unsigned MaxElements = 3)
1013 : CallGraphSCCPass(ID), MaxElements(MaxElements) {
1014 initializeArgPromotionPass(*PassRegistry::getPassRegistry());
1015 }
1016
getAnalysisUsage__anond372bd910211::ArgPromotion1017 void getAnalysisUsage(AnalysisUsage &AU) const override {
1018 AU.addRequired<AssumptionCacheTracker>();
1019 AU.addRequired<TargetLibraryInfoWrapperPass>();
1020 getAAResultsAnalysisUsage(AU);
1021 CallGraphSCCPass::getAnalysisUsage(AU);
1022 }
1023
1024 bool runOnSCC(CallGraphSCC &SCC) override;
1025
1026 private:
1027 using llvm::Pass::doInitialization;
1028
1029 bool doInitialization(CallGraph &CG) override;
1030
1031 /// The maximum number of elements to expand, or 0 for unlimited.
1032 unsigned MaxElements;
1033 };
1034
1035 } // end anonymous namespace
1036
1037 char ArgPromotion::ID = 0;
1038
1039 INITIALIZE_PASS_BEGIN(ArgPromotion, "argpromotion",
1040 "Promote 'by reference' arguments to scalars", false,
1041 false)
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)1042 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1043 INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass)
1044 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1045 INITIALIZE_PASS_END(ArgPromotion, "argpromotion",
1046 "Promote 'by reference' arguments to scalars", false, false)
1047
1048 Pass *llvm::createArgumentPromotionPass(unsigned MaxElements) {
1049 return new ArgPromotion(MaxElements);
1050 }
1051
runOnSCC(CallGraphSCC & SCC)1052 bool ArgPromotion::runOnSCC(CallGraphSCC &SCC) {
1053 if (skipSCC(SCC))
1054 return false;
1055
1056 // Get the callgraph information that we need to update to reflect our
1057 // changes.
1058 CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph();
1059
1060 LegacyAARGetter AARGetter(*this);
1061
1062 bool Changed = false, LocalChange;
1063
1064 // Iterate until we stop promoting from this SCC.
1065 do {
1066 LocalChange = false;
1067 // Attempt to promote arguments from all functions in this SCC.
1068 for (CallGraphNode *OldNode : SCC) {
1069 Function *OldF = OldNode->getFunction();
1070 if (!OldF)
1071 continue;
1072
1073 auto ReplaceCallSite = [&](CallSite OldCS, CallSite NewCS) {
1074 Function *Caller = OldCS.getInstruction()->getParent()->getParent();
1075 CallGraphNode *NewCalleeNode =
1076 CG.getOrInsertFunction(NewCS.getCalledFunction());
1077 CallGraphNode *CallerNode = CG[Caller];
1078 CallerNode->replaceCallEdge(OldCS, NewCS, NewCalleeNode);
1079 };
1080
1081 if (Function *NewF = promoteArguments(OldF, AARGetter, MaxElements,
1082 {ReplaceCallSite})) {
1083 LocalChange = true;
1084
1085 // Update the call graph for the newly promoted function.
1086 CallGraphNode *NewNode = CG.getOrInsertFunction(NewF);
1087 NewNode->stealCalledFunctionsFrom(OldNode);
1088 if (OldNode->getNumReferences() == 0)
1089 delete CG.removeFunctionFromModule(OldNode);
1090 else
1091 OldF->setLinkage(Function::ExternalLinkage);
1092
1093 // And updat ethe SCC we're iterating as well.
1094 SCC.ReplaceNode(OldNode, NewNode);
1095 }
1096 }
1097 // Remember that we changed something.
1098 Changed |= LocalChange;
1099 } while (LocalChange);
1100
1101 return Changed;
1102 }
1103
doInitialization(CallGraph & CG)1104 bool ArgPromotion::doInitialization(CallGraph &CG) {
1105 return CallGraphSCCPass::doInitialization(CG);
1106 }
1107