1 //===- GlobalOpt.cpp - Optimize Global Variables --------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass transforms simple global variables that never have their address
10 // taken. If obviously true, it marks read/write globals as constant, deletes
11 // variables only stored to, etc.
12 //
13 //===----------------------------------------------------------------------===//
14
15 #include "llvm/Transforms/IPO/GlobalOpt.h"
16 #include "llvm/ADT/DenseMap.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/ADT/SmallPtrSet.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/Statistic.h"
21 #include "llvm/ADT/Twine.h"
22 #include "llvm/ADT/iterator_range.h"
23 #include "llvm/Analysis/BlockFrequencyInfo.h"
24 #include "llvm/Analysis/ConstantFolding.h"
25 #include "llvm/Analysis/MemoryBuiltins.h"
26 #include "llvm/Analysis/TargetLibraryInfo.h"
27 #include "llvm/Analysis/TargetTransformInfo.h"
28 #include "llvm/BinaryFormat/Dwarf.h"
29 #include "llvm/IR/Attributes.h"
30 #include "llvm/IR/BasicBlock.h"
31 #include "llvm/IR/CallSite.h"
32 #include "llvm/IR/CallingConv.h"
33 #include "llvm/IR/Constant.h"
34 #include "llvm/IR/Constants.h"
35 #include "llvm/IR/DataLayout.h"
36 #include "llvm/IR/DebugInfoMetadata.h"
37 #include "llvm/IR/DerivedTypes.h"
38 #include "llvm/IR/Dominators.h"
39 #include "llvm/IR/Function.h"
40 #include "llvm/IR/GetElementPtrTypeIterator.h"
41 #include "llvm/IR/GlobalAlias.h"
42 #include "llvm/IR/GlobalValue.h"
43 #include "llvm/IR/GlobalVariable.h"
44 #include "llvm/IR/InstrTypes.h"
45 #include "llvm/IR/Instruction.h"
46 #include "llvm/IR/Instructions.h"
47 #include "llvm/IR/IntrinsicInst.h"
48 #include "llvm/IR/Module.h"
49 #include "llvm/IR/Operator.h"
50 #include "llvm/IR/Type.h"
51 #include "llvm/IR/Use.h"
52 #include "llvm/IR/User.h"
53 #include "llvm/IR/Value.h"
54 #include "llvm/IR/ValueHandle.h"
55 #include "llvm/InitializePasses.h"
56 #include "llvm/Pass.h"
57 #include "llvm/Support/AtomicOrdering.h"
58 #include "llvm/Support/Casting.h"
59 #include "llvm/Support/CommandLine.h"
60 #include "llvm/Support/Debug.h"
61 #include "llvm/Support/ErrorHandling.h"
62 #include "llvm/Support/MathExtras.h"
63 #include "llvm/Support/raw_ostream.h"
64 #include "llvm/Transforms/IPO.h"
65 #include "llvm/Transforms/Utils/CtorUtils.h"
66 #include "llvm/Transforms/Utils/Evaluator.h"
67 #include "llvm/Transforms/Utils/GlobalStatus.h"
68 #include "llvm/Transforms/Utils/Local.h"
69 #include <cassert>
70 #include <cstdint>
71 #include <utility>
72 #include <vector>
73
74 using namespace llvm;
75
76 #define DEBUG_TYPE "globalopt"
77
78 STATISTIC(NumMarked , "Number of globals marked constant");
79 STATISTIC(NumUnnamed , "Number of globals marked unnamed_addr");
80 STATISTIC(NumSRA , "Number of aggregate globals broken into scalars");
81 STATISTIC(NumHeapSRA , "Number of heap objects SRA'd");
82 STATISTIC(NumSubstitute,"Number of globals with initializers stored into them");
83 STATISTIC(NumDeleted , "Number of globals deleted");
84 STATISTIC(NumGlobUses , "Number of global uses devirtualized");
85 STATISTIC(NumLocalized , "Number of globals localized");
86 STATISTIC(NumShrunkToBool , "Number of global vars shrunk to booleans");
87 STATISTIC(NumFastCallFns , "Number of functions converted to fastcc");
88 STATISTIC(NumCtorsEvaluated, "Number of static ctors evaluated");
89 STATISTIC(NumNestRemoved , "Number of nest attributes removed");
90 STATISTIC(NumAliasesResolved, "Number of global aliases resolved");
91 STATISTIC(NumAliasesRemoved, "Number of global aliases eliminated");
92 STATISTIC(NumCXXDtorsRemoved, "Number of global C++ destructors removed");
93 STATISTIC(NumInternalFunc, "Number of internal functions");
94 STATISTIC(NumColdCC, "Number of functions marked coldcc");
95
96 static cl::opt<bool>
97 EnableColdCCStressTest("enable-coldcc-stress-test",
98 cl::desc("Enable stress test of coldcc by adding "
99 "calling conv to all internal functions."),
100 cl::init(false), cl::Hidden);
101
102 static cl::opt<int> ColdCCRelFreq(
103 "coldcc-rel-freq", cl::Hidden, cl::init(2), cl::ZeroOrMore,
104 cl::desc(
105 "Maximum block frequency, expressed as a percentage of caller's "
106 "entry frequency, for a call site to be considered cold for enabling"
107 "coldcc"));
108
109 /// Is this global variable possibly used by a leak checker as a root? If so,
110 /// we might not really want to eliminate the stores to it.
isLeakCheckerRoot(GlobalVariable * GV)111 static bool isLeakCheckerRoot(GlobalVariable *GV) {
112 // A global variable is a root if it is a pointer, or could plausibly contain
113 // a pointer. There are two challenges; one is that we could have a struct
114 // the has an inner member which is a pointer. We recurse through the type to
115 // detect these (up to a point). The other is that we may actually be a union
116 // of a pointer and another type, and so our LLVM type is an integer which
117 // gets converted into a pointer, or our type is an [i8 x #] with a pointer
118 // potentially contained here.
119
120 if (GV->hasPrivateLinkage())
121 return false;
122
123 SmallVector<Type *, 4> Types;
124 Types.push_back(GV->getValueType());
125
126 unsigned Limit = 20;
127 do {
128 Type *Ty = Types.pop_back_val();
129 switch (Ty->getTypeID()) {
130 default: break;
131 case Type::PointerTyID: return true;
132 case Type::ArrayTyID:
133 case Type::VectorTyID: {
134 SequentialType *STy = cast<SequentialType>(Ty);
135 Types.push_back(STy->getElementType());
136 break;
137 }
138 case Type::StructTyID: {
139 StructType *STy = cast<StructType>(Ty);
140 if (STy->isOpaque()) return true;
141 for (StructType::element_iterator I = STy->element_begin(),
142 E = STy->element_end(); I != E; ++I) {
143 Type *InnerTy = *I;
144 if (isa<PointerType>(InnerTy)) return true;
145 if (isa<CompositeType>(InnerTy))
146 Types.push_back(InnerTy);
147 }
148 break;
149 }
150 }
151 if (--Limit == 0) return true;
152 } while (!Types.empty());
153 return false;
154 }
155
156 /// Given a value that is stored to a global but never read, determine whether
157 /// it's safe to remove the store and the chain of computation that feeds the
158 /// store.
IsSafeComputationToRemove(Value * V,function_ref<TargetLibraryInfo & (Function &)> GetTLI)159 static bool IsSafeComputationToRemove(
160 Value *V, function_ref<TargetLibraryInfo &(Function &)> GetTLI) {
161 do {
162 if (isa<Constant>(V))
163 return true;
164 if (!V->hasOneUse())
165 return false;
166 if (isa<LoadInst>(V) || isa<InvokeInst>(V) || isa<Argument>(V) ||
167 isa<GlobalValue>(V))
168 return false;
169 if (isAllocationFn(V, GetTLI))
170 return true;
171
172 Instruction *I = cast<Instruction>(V);
173 if (I->mayHaveSideEffects())
174 return false;
175 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
176 if (!GEP->hasAllConstantIndices())
177 return false;
178 } else if (I->getNumOperands() != 1) {
179 return false;
180 }
181
182 V = I->getOperand(0);
183 } while (true);
184 }
185
186 /// This GV is a pointer root. Loop over all users of the global and clean up
187 /// any that obviously don't assign the global a value that isn't dynamically
188 /// allocated.
189 static bool
CleanupPointerRootUsers(GlobalVariable * GV,function_ref<TargetLibraryInfo & (Function &)> GetTLI)190 CleanupPointerRootUsers(GlobalVariable *GV,
191 function_ref<TargetLibraryInfo &(Function &)> GetTLI) {
192 // A brief explanation of leak checkers. The goal is to find bugs where
193 // pointers are forgotten, causing an accumulating growth in memory
194 // usage over time. The common strategy for leak checkers is to whitelist the
195 // memory pointed to by globals at exit. This is popular because it also
196 // solves another problem where the main thread of a C++ program may shut down
197 // before other threads that are still expecting to use those globals. To
198 // handle that case, we expect the program may create a singleton and never
199 // destroy it.
200
201 bool Changed = false;
202
203 // If Dead[n].first is the only use of a malloc result, we can delete its
204 // chain of computation and the store to the global in Dead[n].second.
205 SmallVector<std::pair<Instruction *, Instruction *>, 32> Dead;
206
207 // Constants can't be pointers to dynamically allocated memory.
208 for (Value::user_iterator UI = GV->user_begin(), E = GV->user_end();
209 UI != E;) {
210 User *U = *UI++;
211 if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
212 Value *V = SI->getValueOperand();
213 if (isa<Constant>(V)) {
214 Changed = true;
215 SI->eraseFromParent();
216 } else if (Instruction *I = dyn_cast<Instruction>(V)) {
217 if (I->hasOneUse())
218 Dead.push_back(std::make_pair(I, SI));
219 }
220 } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(U)) {
221 if (isa<Constant>(MSI->getValue())) {
222 Changed = true;
223 MSI->eraseFromParent();
224 } else if (Instruction *I = dyn_cast<Instruction>(MSI->getValue())) {
225 if (I->hasOneUse())
226 Dead.push_back(std::make_pair(I, MSI));
227 }
228 } else if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(U)) {
229 GlobalVariable *MemSrc = dyn_cast<GlobalVariable>(MTI->getSource());
230 if (MemSrc && MemSrc->isConstant()) {
231 Changed = true;
232 MTI->eraseFromParent();
233 } else if (Instruction *I = dyn_cast<Instruction>(MemSrc)) {
234 if (I->hasOneUse())
235 Dead.push_back(std::make_pair(I, MTI));
236 }
237 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) {
238 if (CE->use_empty()) {
239 CE->destroyConstant();
240 Changed = true;
241 }
242 } else if (Constant *C = dyn_cast<Constant>(U)) {
243 if (isSafeToDestroyConstant(C)) {
244 C->destroyConstant();
245 // This could have invalidated UI, start over from scratch.
246 Dead.clear();
247 CleanupPointerRootUsers(GV, GetTLI);
248 return true;
249 }
250 }
251 }
252
253 for (int i = 0, e = Dead.size(); i != e; ++i) {
254 if (IsSafeComputationToRemove(Dead[i].first, GetTLI)) {
255 Dead[i].second->eraseFromParent();
256 Instruction *I = Dead[i].first;
257 do {
258 if (isAllocationFn(I, GetTLI))
259 break;
260 Instruction *J = dyn_cast<Instruction>(I->getOperand(0));
261 if (!J)
262 break;
263 I->eraseFromParent();
264 I = J;
265 } while (true);
266 I->eraseFromParent();
267 }
268 }
269
270 return Changed;
271 }
272
273 /// We just marked GV constant. Loop over all users of the global, cleaning up
274 /// the obvious ones. This is largely just a quick scan over the use list to
275 /// clean up the easy and obvious cruft. This returns true if it made a change.
CleanupConstantGlobalUsers(Value * V,Constant * Init,const DataLayout & DL,function_ref<TargetLibraryInfo & (Function &)> GetTLI)276 static bool CleanupConstantGlobalUsers(
277 Value *V, Constant *Init, const DataLayout &DL,
278 function_ref<TargetLibraryInfo &(Function &)> GetTLI) {
279 bool Changed = false;
280 // Note that we need to use a weak value handle for the worklist items. When
281 // we delete a constant array, we may also be holding pointer to one of its
282 // elements (or an element of one of its elements if we're dealing with an
283 // array of arrays) in the worklist.
284 SmallVector<WeakTrackingVH, 8> WorkList(V->user_begin(), V->user_end());
285 while (!WorkList.empty()) {
286 Value *UV = WorkList.pop_back_val();
287 if (!UV)
288 continue;
289
290 User *U = cast<User>(UV);
291
292 if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
293 if (Init) {
294 // Replace the load with the initializer.
295 LI->replaceAllUsesWith(Init);
296 LI->eraseFromParent();
297 Changed = true;
298 }
299 } else if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
300 // Store must be unreachable or storing Init into the global.
301 SI->eraseFromParent();
302 Changed = true;
303 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) {
304 if (CE->getOpcode() == Instruction::GetElementPtr) {
305 Constant *SubInit = nullptr;
306 if (Init)
307 SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE);
308 Changed |= CleanupConstantGlobalUsers(CE, SubInit, DL, GetTLI);
309 } else if ((CE->getOpcode() == Instruction::BitCast &&
310 CE->getType()->isPointerTy()) ||
311 CE->getOpcode() == Instruction::AddrSpaceCast) {
312 // Pointer cast, delete any stores and memsets to the global.
313 Changed |= CleanupConstantGlobalUsers(CE, nullptr, DL, GetTLI);
314 }
315
316 if (CE->use_empty()) {
317 CE->destroyConstant();
318 Changed = true;
319 }
320 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(U)) {
321 // Do not transform "gepinst (gep constexpr (GV))" here, because forming
322 // "gepconstexpr (gep constexpr (GV))" will cause the two gep's to fold
323 // and will invalidate our notion of what Init is.
324 Constant *SubInit = nullptr;
325 if (!isa<ConstantExpr>(GEP->getOperand(0))) {
326 ConstantExpr *CE = dyn_cast_or_null<ConstantExpr>(
327 ConstantFoldInstruction(GEP, DL, &GetTLI(*GEP->getFunction())));
328 if (Init && CE && CE->getOpcode() == Instruction::GetElementPtr)
329 SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE);
330
331 // If the initializer is an all-null value and we have an inbounds GEP,
332 // we already know what the result of any load from that GEP is.
333 // TODO: Handle splats.
334 if (Init && isa<ConstantAggregateZero>(Init) && GEP->isInBounds())
335 SubInit = Constant::getNullValue(GEP->getResultElementType());
336 }
337 Changed |= CleanupConstantGlobalUsers(GEP, SubInit, DL, GetTLI);
338
339 if (GEP->use_empty()) {
340 GEP->eraseFromParent();
341 Changed = true;
342 }
343 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U)) { // memset/cpy/mv
344 if (MI->getRawDest() == V) {
345 MI->eraseFromParent();
346 Changed = true;
347 }
348
349 } else if (Constant *C = dyn_cast<Constant>(U)) {
350 // If we have a chain of dead constantexprs or other things dangling from
351 // us, and if they are all dead, nuke them without remorse.
352 if (isSafeToDestroyConstant(C)) {
353 C->destroyConstant();
354 CleanupConstantGlobalUsers(V, Init, DL, GetTLI);
355 return true;
356 }
357 }
358 }
359 return Changed;
360 }
361
362 static bool isSafeSROAElementUse(Value *V);
363
364 /// Return true if the specified GEP is a safe user of a derived
365 /// expression from a global that we want to SROA.
isSafeSROAGEP(User * U)366 static bool isSafeSROAGEP(User *U) {
367 // Check to see if this ConstantExpr GEP is SRA'able. In particular, we
368 // don't like < 3 operand CE's, and we don't like non-constant integer
369 // indices. This enforces that all uses are 'gep GV, 0, C, ...' for some
370 // value of C.
371 if (U->getNumOperands() < 3 || !isa<Constant>(U->getOperand(1)) ||
372 !cast<Constant>(U->getOperand(1))->isNullValue())
373 return false;
374
375 gep_type_iterator GEPI = gep_type_begin(U), E = gep_type_end(U);
376 ++GEPI; // Skip over the pointer index.
377
378 // For all other level we require that the indices are constant and inrange.
379 // In particular, consider: A[0][i]. We cannot know that the user isn't doing
380 // invalid things like allowing i to index an out-of-range subscript that
381 // accesses A[1]. This can also happen between different members of a struct
382 // in llvm IR.
383 for (; GEPI != E; ++GEPI) {
384 if (GEPI.isStruct())
385 continue;
386
387 ConstantInt *IdxVal = dyn_cast<ConstantInt>(GEPI.getOperand());
388 if (!IdxVal || (GEPI.isBoundedSequential() &&
389 IdxVal->getZExtValue() >= GEPI.getSequentialNumElements()))
390 return false;
391 }
392
393 return llvm::all_of(U->users(),
394 [](User *UU) { return isSafeSROAElementUse(UU); });
395 }
396
397 /// Return true if the specified instruction is a safe user of a derived
398 /// expression from a global that we want to SROA.
isSafeSROAElementUse(Value * V)399 static bool isSafeSROAElementUse(Value *V) {
400 // We might have a dead and dangling constant hanging off of here.
401 if (Constant *C = dyn_cast<Constant>(V))
402 return isSafeToDestroyConstant(C);
403
404 Instruction *I = dyn_cast<Instruction>(V);
405 if (!I) return false;
406
407 // Loads are ok.
408 if (isa<LoadInst>(I)) return true;
409
410 // Stores *to* the pointer are ok.
411 if (StoreInst *SI = dyn_cast<StoreInst>(I))
412 return SI->getOperand(0) != V;
413
414 // Otherwise, it must be a GEP. Check it and its users are safe to SRA.
415 return isa<GetElementPtrInst>(I) && isSafeSROAGEP(I);
416 }
417
418 /// Look at all uses of the global and decide whether it is safe for us to
419 /// perform this transformation.
GlobalUsersSafeToSRA(GlobalValue * GV)420 static bool GlobalUsersSafeToSRA(GlobalValue *GV) {
421 for (User *U : GV->users()) {
422 // The user of the global must be a GEP Inst or a ConstantExpr GEP.
423 if (!isa<GetElementPtrInst>(U) &&
424 (!isa<ConstantExpr>(U) ||
425 cast<ConstantExpr>(U)->getOpcode() != Instruction::GetElementPtr))
426 return false;
427
428 // Check the gep and it's users are safe to SRA
429 if (!isSafeSROAGEP(U))
430 return false;
431 }
432
433 return true;
434 }
435
CanDoGlobalSRA(GlobalVariable * GV)436 static bool CanDoGlobalSRA(GlobalVariable *GV) {
437 Constant *Init = GV->getInitializer();
438
439 if (isa<StructType>(Init->getType())) {
440 // nothing to check
441 } else if (SequentialType *STy = dyn_cast<SequentialType>(Init->getType())) {
442 if (STy->getNumElements() > 16 && GV->hasNUsesOrMore(16))
443 return false; // It's not worth it.
444 } else
445 return false;
446
447 return GlobalUsersSafeToSRA(GV);
448 }
449
450 /// Copy over the debug info for a variable to its SRA replacements.
transferSRADebugInfo(GlobalVariable * GV,GlobalVariable * NGV,uint64_t FragmentOffsetInBits,uint64_t FragmentSizeInBits,unsigned NumElements)451 static void transferSRADebugInfo(GlobalVariable *GV, GlobalVariable *NGV,
452 uint64_t FragmentOffsetInBits,
453 uint64_t FragmentSizeInBits,
454 unsigned NumElements) {
455 SmallVector<DIGlobalVariableExpression *, 1> GVs;
456 GV->getDebugInfo(GVs);
457 for (auto *GVE : GVs) {
458 DIVariable *Var = GVE->getVariable();
459 DIExpression *Expr = GVE->getExpression();
460 if (NumElements > 1) {
461 if (auto E = DIExpression::createFragmentExpression(
462 Expr, FragmentOffsetInBits, FragmentSizeInBits))
463 Expr = *E;
464 else
465 return;
466 }
467 auto *NGVE = DIGlobalVariableExpression::get(GVE->getContext(), Var, Expr);
468 NGV->addDebugInfo(NGVE);
469 }
470 }
471
472 /// Perform scalar replacement of aggregates on the specified global variable.
473 /// This opens the door for other optimizations by exposing the behavior of the
474 /// program in a more fine-grained way. We have determined that this
475 /// transformation is safe already. We return the first global variable we
476 /// insert so that the caller can reprocess it.
SRAGlobal(GlobalVariable * GV,const DataLayout & DL)477 static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &DL) {
478 // Make sure this global only has simple uses that we can SRA.
479 if (!CanDoGlobalSRA(GV))
480 return nullptr;
481
482 assert(GV->hasLocalLinkage());
483 Constant *Init = GV->getInitializer();
484 Type *Ty = Init->getType();
485
486 std::map<unsigned, GlobalVariable *> NewGlobals;
487
488 // Get the alignment of the global, either explicit or target-specific.
489 unsigned StartAlignment = GV->getAlignment();
490 if (StartAlignment == 0)
491 StartAlignment = DL.getABITypeAlignment(GV->getType());
492
493 // Loop over all users and create replacement variables for used aggregate
494 // elements.
495 for (User *GEP : GV->users()) {
496 assert(((isa<ConstantExpr>(GEP) && cast<ConstantExpr>(GEP)->getOpcode() ==
497 Instruction::GetElementPtr) ||
498 isa<GetElementPtrInst>(GEP)) &&
499 "NonGEP CE's are not SRAable!");
500
501 // Ignore the 1th operand, which has to be zero or else the program is quite
502 // broken (undefined). Get the 2nd operand, which is the structure or array
503 // index.
504 unsigned ElementIdx = cast<ConstantInt>(GEP->getOperand(2))->getZExtValue();
505 if (NewGlobals.count(ElementIdx) == 1)
506 continue; // we`ve already created replacement variable
507 assert(NewGlobals.count(ElementIdx) == 0);
508
509 Type *ElTy = nullptr;
510 if (StructType *STy = dyn_cast<StructType>(Ty))
511 ElTy = STy->getElementType(ElementIdx);
512 else if (SequentialType *STy = dyn_cast<SequentialType>(Ty))
513 ElTy = STy->getElementType();
514 assert(ElTy);
515
516 Constant *In = Init->getAggregateElement(ElementIdx);
517 assert(In && "Couldn't get element of initializer?");
518
519 GlobalVariable *NGV = new GlobalVariable(
520 ElTy, false, GlobalVariable::InternalLinkage, In,
521 GV->getName() + "." + Twine(ElementIdx), GV->getThreadLocalMode(),
522 GV->getType()->getAddressSpace());
523 NGV->setExternallyInitialized(GV->isExternallyInitialized());
524 NGV->copyAttributesFrom(GV);
525 NewGlobals.insert(std::make_pair(ElementIdx, NGV));
526
527 if (StructType *STy = dyn_cast<StructType>(Ty)) {
528 const StructLayout &Layout = *DL.getStructLayout(STy);
529
530 // Calculate the known alignment of the field. If the original aggregate
531 // had 256 byte alignment for example, something might depend on that:
532 // propagate info to each field.
533 uint64_t FieldOffset = Layout.getElementOffset(ElementIdx);
534 Align NewAlign(MinAlign(StartAlignment, FieldOffset));
535 if (NewAlign >
536 Align(DL.getABITypeAlignment(STy->getElementType(ElementIdx))))
537 NGV->setAlignment(NewAlign);
538
539 // Copy over the debug info for the variable.
540 uint64_t Size = DL.getTypeAllocSizeInBits(NGV->getValueType());
541 uint64_t FragmentOffsetInBits = Layout.getElementOffsetInBits(ElementIdx);
542 transferSRADebugInfo(GV, NGV, FragmentOffsetInBits, Size,
543 STy->getNumElements());
544 } else if (SequentialType *STy = dyn_cast<SequentialType>(Ty)) {
545 uint64_t EltSize = DL.getTypeAllocSize(ElTy);
546 Align EltAlign(DL.getABITypeAlignment(ElTy));
547 uint64_t FragmentSizeInBits = DL.getTypeAllocSizeInBits(ElTy);
548
549 // Calculate the known alignment of the field. If the original aggregate
550 // had 256 byte alignment for example, something might depend on that:
551 // propagate info to each field.
552 Align NewAlign(MinAlign(StartAlignment, EltSize * ElementIdx));
553 if (NewAlign > EltAlign)
554 NGV->setAlignment(NewAlign);
555 transferSRADebugInfo(GV, NGV, FragmentSizeInBits * ElementIdx,
556 FragmentSizeInBits, STy->getNumElements());
557 }
558 }
559
560 if (NewGlobals.empty())
561 return nullptr;
562
563 Module::GlobalListType &Globals = GV->getParent()->getGlobalList();
564 for (auto NewGlobalVar : NewGlobals)
565 Globals.push_back(NewGlobalVar.second);
566
567 LLVM_DEBUG(dbgs() << "PERFORMING GLOBAL SRA ON: " << *GV << "\n");
568
569 Constant *NullInt =Constant::getNullValue(Type::getInt32Ty(GV->getContext()));
570
571 // Loop over all of the uses of the global, replacing the constantexpr geps,
572 // with smaller constantexpr geps or direct references.
573 while (!GV->use_empty()) {
574 User *GEP = GV->user_back();
575 assert(((isa<ConstantExpr>(GEP) &&
576 cast<ConstantExpr>(GEP)->getOpcode()==Instruction::GetElementPtr)||
577 isa<GetElementPtrInst>(GEP)) && "NonGEP CE's are not SRAable!");
578
579 // Ignore the 1th operand, which has to be zero or else the program is quite
580 // broken (undefined). Get the 2nd operand, which is the structure or array
581 // index.
582 unsigned ElementIdx = cast<ConstantInt>(GEP->getOperand(2))->getZExtValue();
583 assert(NewGlobals.count(ElementIdx) == 1);
584
585 Value *NewPtr = NewGlobals[ElementIdx];
586 Type *NewTy = NewGlobals[ElementIdx]->getValueType();
587
588 // Form a shorter GEP if needed.
589 if (GEP->getNumOperands() > 3) {
590 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(GEP)) {
591 SmallVector<Constant*, 8> Idxs;
592 Idxs.push_back(NullInt);
593 for (unsigned i = 3, e = CE->getNumOperands(); i != e; ++i)
594 Idxs.push_back(CE->getOperand(i));
595 NewPtr =
596 ConstantExpr::getGetElementPtr(NewTy, cast<Constant>(NewPtr), Idxs);
597 } else {
598 GetElementPtrInst *GEPI = cast<GetElementPtrInst>(GEP);
599 SmallVector<Value*, 8> Idxs;
600 Idxs.push_back(NullInt);
601 for (unsigned i = 3, e = GEPI->getNumOperands(); i != e; ++i)
602 Idxs.push_back(GEPI->getOperand(i));
603 NewPtr = GetElementPtrInst::Create(
604 NewTy, NewPtr, Idxs, GEPI->getName() + "." + Twine(ElementIdx),
605 GEPI);
606 }
607 }
608 GEP->replaceAllUsesWith(NewPtr);
609
610 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(GEP))
611 GEPI->eraseFromParent();
612 else
613 cast<ConstantExpr>(GEP)->destroyConstant();
614 }
615
616 // Delete the old global, now that it is dead.
617 Globals.erase(GV);
618 ++NumSRA;
619
620 assert(NewGlobals.size() > 0);
621 return NewGlobals.begin()->second;
622 }
623
624 /// Return true if all users of the specified value will trap if the value is
625 /// dynamically null. PHIs keeps track of any phi nodes we've seen to avoid
626 /// reprocessing them.
AllUsesOfValueWillTrapIfNull(const Value * V,SmallPtrSetImpl<const PHINode * > & PHIs)627 static bool AllUsesOfValueWillTrapIfNull(const Value *V,
628 SmallPtrSetImpl<const PHINode*> &PHIs) {
629 for (const User *U : V->users()) {
630 if (const Instruction *I = dyn_cast<Instruction>(U)) {
631 // If null pointer is considered valid, then all uses are non-trapping.
632 // Non address-space 0 globals have already been pruned by the caller.
633 if (NullPointerIsDefined(I->getFunction()))
634 return false;
635 }
636 if (isa<LoadInst>(U)) {
637 // Will trap.
638 } else if (const StoreInst *SI = dyn_cast<StoreInst>(U)) {
639 if (SI->getOperand(0) == V) {
640 //cerr << "NONTRAPPING USE: " << *U;
641 return false; // Storing the value.
642 }
643 } else if (const CallInst *CI = dyn_cast<CallInst>(U)) {
644 if (CI->getCalledValue() != V) {
645 //cerr << "NONTRAPPING USE: " << *U;
646 return false; // Not calling the ptr
647 }
648 } else if (const InvokeInst *II = dyn_cast<InvokeInst>(U)) {
649 if (II->getCalledValue() != V) {
650 //cerr << "NONTRAPPING USE: " << *U;
651 return false; // Not calling the ptr
652 }
653 } else if (const BitCastInst *CI = dyn_cast<BitCastInst>(U)) {
654 if (!AllUsesOfValueWillTrapIfNull(CI, PHIs)) return false;
655 } else if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) {
656 if (!AllUsesOfValueWillTrapIfNull(GEPI, PHIs)) return false;
657 } else if (const PHINode *PN = dyn_cast<PHINode>(U)) {
658 // If we've already seen this phi node, ignore it, it has already been
659 // checked.
660 if (PHIs.insert(PN).second && !AllUsesOfValueWillTrapIfNull(PN, PHIs))
661 return false;
662 } else if (isa<ICmpInst>(U) &&
663 isa<ConstantPointerNull>(U->getOperand(1))) {
664 // Ignore icmp X, null
665 } else {
666 //cerr << "NONTRAPPING USE: " << *U;
667 return false;
668 }
669 }
670 return true;
671 }
672
673 /// Return true if all uses of any loads from GV will trap if the loaded value
674 /// is null. Note that this also permits comparisons of the loaded value
675 /// against null, as a special case.
AllUsesOfLoadedValueWillTrapIfNull(const GlobalVariable * GV)676 static bool AllUsesOfLoadedValueWillTrapIfNull(const GlobalVariable *GV) {
677 for (const User *U : GV->users())
678 if (const LoadInst *LI = dyn_cast<LoadInst>(U)) {
679 SmallPtrSet<const PHINode*, 8> PHIs;
680 if (!AllUsesOfValueWillTrapIfNull(LI, PHIs))
681 return false;
682 } else if (isa<StoreInst>(U)) {
683 // Ignore stores to the global.
684 } else {
685 // We don't know or understand this user, bail out.
686 //cerr << "UNKNOWN USER OF GLOBAL!: " << *U;
687 return false;
688 }
689 return true;
690 }
691
OptimizeAwayTrappingUsesOfValue(Value * V,Constant * NewV)692 static bool OptimizeAwayTrappingUsesOfValue(Value *V, Constant *NewV) {
693 bool Changed = false;
694 for (auto UI = V->user_begin(), E = V->user_end(); UI != E; ) {
695 Instruction *I = cast<Instruction>(*UI++);
696 // Uses are non-trapping if null pointer is considered valid.
697 // Non address-space 0 globals are already pruned by the caller.
698 if (NullPointerIsDefined(I->getFunction()))
699 return false;
700 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
701 LI->setOperand(0, NewV);
702 Changed = true;
703 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
704 if (SI->getOperand(1) == V) {
705 SI->setOperand(1, NewV);
706 Changed = true;
707 }
708 } else if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
709 CallSite CS(I);
710 if (CS.getCalledValue() == V) {
711 // Calling through the pointer! Turn into a direct call, but be careful
712 // that the pointer is not also being passed as an argument.
713 CS.setCalledFunction(NewV);
714 Changed = true;
715 bool PassedAsArg = false;
716 for (unsigned i = 0, e = CS.arg_size(); i != e; ++i)
717 if (CS.getArgument(i) == V) {
718 PassedAsArg = true;
719 CS.setArgument(i, NewV);
720 }
721
722 if (PassedAsArg) {
723 // Being passed as an argument also. Be careful to not invalidate UI!
724 UI = V->user_begin();
725 }
726 }
727 } else if (CastInst *CI = dyn_cast<CastInst>(I)) {
728 Changed |= OptimizeAwayTrappingUsesOfValue(CI,
729 ConstantExpr::getCast(CI->getOpcode(),
730 NewV, CI->getType()));
731 if (CI->use_empty()) {
732 Changed = true;
733 CI->eraseFromParent();
734 }
735 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) {
736 // Should handle GEP here.
737 SmallVector<Constant*, 8> Idxs;
738 Idxs.reserve(GEPI->getNumOperands()-1);
739 for (User::op_iterator i = GEPI->op_begin() + 1, e = GEPI->op_end();
740 i != e; ++i)
741 if (Constant *C = dyn_cast<Constant>(*i))
742 Idxs.push_back(C);
743 else
744 break;
745 if (Idxs.size() == GEPI->getNumOperands()-1)
746 Changed |= OptimizeAwayTrappingUsesOfValue(
747 GEPI, ConstantExpr::getGetElementPtr(GEPI->getSourceElementType(),
748 NewV, Idxs));
749 if (GEPI->use_empty()) {
750 Changed = true;
751 GEPI->eraseFromParent();
752 }
753 }
754 }
755
756 return Changed;
757 }
758
759 /// The specified global has only one non-null value stored into it. If there
760 /// are uses of the loaded value that would trap if the loaded value is
761 /// dynamically null, then we know that they cannot be reachable with a null
762 /// optimize away the load.
OptimizeAwayTrappingUsesOfLoads(GlobalVariable * GV,Constant * LV,const DataLayout & DL,function_ref<TargetLibraryInfo & (Function &)> GetTLI)763 static bool OptimizeAwayTrappingUsesOfLoads(
764 GlobalVariable *GV, Constant *LV, const DataLayout &DL,
765 function_ref<TargetLibraryInfo &(Function &)> GetTLI) {
766 bool Changed = false;
767
768 // Keep track of whether we are able to remove all the uses of the global
769 // other than the store that defines it.
770 bool AllNonStoreUsesGone = true;
771
772 // Replace all uses of loads with uses of uses of the stored value.
773 for (Value::user_iterator GUI = GV->user_begin(), E = GV->user_end(); GUI != E;){
774 User *GlobalUser = *GUI++;
775 if (LoadInst *LI = dyn_cast<LoadInst>(GlobalUser)) {
776 Changed |= OptimizeAwayTrappingUsesOfValue(LI, LV);
777 // If we were able to delete all uses of the loads
778 if (LI->use_empty()) {
779 LI->eraseFromParent();
780 Changed = true;
781 } else {
782 AllNonStoreUsesGone = false;
783 }
784 } else if (isa<StoreInst>(GlobalUser)) {
785 // Ignore the store that stores "LV" to the global.
786 assert(GlobalUser->getOperand(1) == GV &&
787 "Must be storing *to* the global");
788 } else {
789 AllNonStoreUsesGone = false;
790
791 // If we get here we could have other crazy uses that are transitively
792 // loaded.
793 assert((isa<PHINode>(GlobalUser) || isa<SelectInst>(GlobalUser) ||
794 isa<ConstantExpr>(GlobalUser) || isa<CmpInst>(GlobalUser) ||
795 isa<BitCastInst>(GlobalUser) ||
796 isa<GetElementPtrInst>(GlobalUser)) &&
797 "Only expect load and stores!");
798 }
799 }
800
801 if (Changed) {
802 LLVM_DEBUG(dbgs() << "OPTIMIZED LOADS FROM STORED ONCE POINTER: " << *GV
803 << "\n");
804 ++NumGlobUses;
805 }
806
807 // If we nuked all of the loads, then none of the stores are needed either,
808 // nor is the global.
809 if (AllNonStoreUsesGone) {
810 if (isLeakCheckerRoot(GV)) {
811 Changed |= CleanupPointerRootUsers(GV, GetTLI);
812 } else {
813 Changed = true;
814 CleanupConstantGlobalUsers(GV, nullptr, DL, GetTLI);
815 }
816 if (GV->use_empty()) {
817 LLVM_DEBUG(dbgs() << " *** GLOBAL NOW DEAD!\n");
818 Changed = true;
819 GV->eraseFromParent();
820 ++NumDeleted;
821 }
822 }
823 return Changed;
824 }
825
826 /// Walk the use list of V, constant folding all of the instructions that are
827 /// foldable.
ConstantPropUsersOf(Value * V,const DataLayout & DL,TargetLibraryInfo * TLI)828 static void ConstantPropUsersOf(Value *V, const DataLayout &DL,
829 TargetLibraryInfo *TLI) {
830 for (Value::user_iterator UI = V->user_begin(), E = V->user_end(); UI != E; )
831 if (Instruction *I = dyn_cast<Instruction>(*UI++))
832 if (Constant *NewC = ConstantFoldInstruction(I, DL, TLI)) {
833 I->replaceAllUsesWith(NewC);
834
835 // Advance UI to the next non-I use to avoid invalidating it!
836 // Instructions could multiply use V.
837 while (UI != E && *UI == I)
838 ++UI;
839 if (isInstructionTriviallyDead(I, TLI))
840 I->eraseFromParent();
841 }
842 }
843
844 /// This function takes the specified global variable, and transforms the
845 /// program as if it always contained the result of the specified malloc.
846 /// Because it is always the result of the specified malloc, there is no reason
847 /// to actually DO the malloc. Instead, turn the malloc into a global, and any
848 /// loads of GV as uses of the new global.
849 static GlobalVariable *
OptimizeGlobalAddressOfMalloc(GlobalVariable * GV,CallInst * CI,Type * AllocTy,ConstantInt * NElements,const DataLayout & DL,TargetLibraryInfo * TLI)850 OptimizeGlobalAddressOfMalloc(GlobalVariable *GV, CallInst *CI, Type *AllocTy,
851 ConstantInt *NElements, const DataLayout &DL,
852 TargetLibraryInfo *TLI) {
853 LLVM_DEBUG(errs() << "PROMOTING GLOBAL: " << *GV << " CALL = " << *CI
854 << '\n');
855
856 Type *GlobalType;
857 if (NElements->getZExtValue() == 1)
858 GlobalType = AllocTy;
859 else
860 // If we have an array allocation, the global variable is of an array.
861 GlobalType = ArrayType::get(AllocTy, NElements->getZExtValue());
862
863 // Create the new global variable. The contents of the malloc'd memory is
864 // undefined, so initialize with an undef value.
865 GlobalVariable *NewGV = new GlobalVariable(
866 *GV->getParent(), GlobalType, false, GlobalValue::InternalLinkage,
867 UndefValue::get(GlobalType), GV->getName() + ".body", nullptr,
868 GV->getThreadLocalMode());
869
870 // If there are bitcast users of the malloc (which is typical, usually we have
871 // a malloc + bitcast) then replace them with uses of the new global. Update
872 // other users to use the global as well.
873 BitCastInst *TheBC = nullptr;
874 while (!CI->use_empty()) {
875 Instruction *User = cast<Instruction>(CI->user_back());
876 if (BitCastInst *BCI = dyn_cast<BitCastInst>(User)) {
877 if (BCI->getType() == NewGV->getType()) {
878 BCI->replaceAllUsesWith(NewGV);
879 BCI->eraseFromParent();
880 } else {
881 BCI->setOperand(0, NewGV);
882 }
883 } else {
884 if (!TheBC)
885 TheBC = new BitCastInst(NewGV, CI->getType(), "newgv", CI);
886 User->replaceUsesOfWith(CI, TheBC);
887 }
888 }
889
890 Constant *RepValue = NewGV;
891 if (NewGV->getType() != GV->getValueType())
892 RepValue = ConstantExpr::getBitCast(RepValue, GV->getValueType());
893
894 // If there is a comparison against null, we will insert a global bool to
895 // keep track of whether the global was initialized yet or not.
896 GlobalVariable *InitBool =
897 new GlobalVariable(Type::getInt1Ty(GV->getContext()), false,
898 GlobalValue::InternalLinkage,
899 ConstantInt::getFalse(GV->getContext()),
900 GV->getName()+".init", GV->getThreadLocalMode());
901 bool InitBoolUsed = false;
902
903 // Loop over all uses of GV, processing them in turn.
904 while (!GV->use_empty()) {
905 if (StoreInst *SI = dyn_cast<StoreInst>(GV->user_back())) {
906 // The global is initialized when the store to it occurs.
907 new StoreInst(ConstantInt::getTrue(GV->getContext()), InitBool, false,
908 None, SI->getOrdering(), SI->getSyncScopeID(), SI);
909 SI->eraseFromParent();
910 continue;
911 }
912
913 LoadInst *LI = cast<LoadInst>(GV->user_back());
914 while (!LI->use_empty()) {
915 Use &LoadUse = *LI->use_begin();
916 ICmpInst *ICI = dyn_cast<ICmpInst>(LoadUse.getUser());
917 if (!ICI) {
918 LoadUse = RepValue;
919 continue;
920 }
921
922 // Replace the cmp X, 0 with a use of the bool value.
923 // Sink the load to where the compare was, if atomic rules allow us to.
924 Value *LV = new LoadInst(InitBool->getValueType(), InitBool,
925 InitBool->getName() + ".val", false, None,
926 LI->getOrdering(), LI->getSyncScopeID(),
927 LI->isUnordered() ? (Instruction *)ICI : LI);
928 InitBoolUsed = true;
929 switch (ICI->getPredicate()) {
930 default: llvm_unreachable("Unknown ICmp Predicate!");
931 case ICmpInst::ICMP_ULT:
932 case ICmpInst::ICMP_SLT: // X < null -> always false
933 LV = ConstantInt::getFalse(GV->getContext());
934 break;
935 case ICmpInst::ICMP_ULE:
936 case ICmpInst::ICMP_SLE:
937 case ICmpInst::ICMP_EQ:
938 LV = BinaryOperator::CreateNot(LV, "notinit", ICI);
939 break;
940 case ICmpInst::ICMP_NE:
941 case ICmpInst::ICMP_UGE:
942 case ICmpInst::ICMP_SGE:
943 case ICmpInst::ICMP_UGT:
944 case ICmpInst::ICMP_SGT:
945 break; // no change.
946 }
947 ICI->replaceAllUsesWith(LV);
948 ICI->eraseFromParent();
949 }
950 LI->eraseFromParent();
951 }
952
953 // If the initialization boolean was used, insert it, otherwise delete it.
954 if (!InitBoolUsed) {
955 while (!InitBool->use_empty()) // Delete initializations
956 cast<StoreInst>(InitBool->user_back())->eraseFromParent();
957 delete InitBool;
958 } else
959 GV->getParent()->getGlobalList().insert(GV->getIterator(), InitBool);
960
961 // Now the GV is dead, nuke it and the malloc..
962 GV->eraseFromParent();
963 CI->eraseFromParent();
964
965 // To further other optimizations, loop over all users of NewGV and try to
966 // constant prop them. This will promote GEP instructions with constant
967 // indices into GEP constant-exprs, which will allow global-opt to hack on it.
968 ConstantPropUsersOf(NewGV, DL, TLI);
969 if (RepValue != NewGV)
970 ConstantPropUsersOf(RepValue, DL, TLI);
971
972 return NewGV;
973 }
974
975 /// Scan the use-list of V checking to make sure that there are no complex uses
976 /// of V. We permit simple things like dereferencing the pointer, but not
977 /// storing through the address, unless it is to the specified global.
ValueIsOnlyUsedLocallyOrStoredToOneGlobal(const Instruction * V,const GlobalVariable * GV,SmallPtrSetImpl<const PHINode * > & PHIs)978 static bool ValueIsOnlyUsedLocallyOrStoredToOneGlobal(const Instruction *V,
979 const GlobalVariable *GV,
980 SmallPtrSetImpl<const PHINode*> &PHIs) {
981 for (const User *U : V->users()) {
982 const Instruction *Inst = cast<Instruction>(U);
983
984 if (isa<LoadInst>(Inst) || isa<CmpInst>(Inst)) {
985 continue; // Fine, ignore.
986 }
987
988 if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
989 if (SI->getOperand(0) == V && SI->getOperand(1) != GV)
990 return false; // Storing the pointer itself... bad.
991 continue; // Otherwise, storing through it, or storing into GV... fine.
992 }
993
994 // Must index into the array and into the struct.
995 if (isa<GetElementPtrInst>(Inst) && Inst->getNumOperands() >= 3) {
996 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(Inst, GV, PHIs))
997 return false;
998 continue;
999 }
1000
1001 if (const PHINode *PN = dyn_cast<PHINode>(Inst)) {
1002 // PHIs are ok if all uses are ok. Don't infinitely recurse through PHI
1003 // cycles.
1004 if (PHIs.insert(PN).second)
1005 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(PN, GV, PHIs))
1006 return false;
1007 continue;
1008 }
1009
1010 if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Inst)) {
1011 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(BCI, GV, PHIs))
1012 return false;
1013 continue;
1014 }
1015
1016 return false;
1017 }
1018 return true;
1019 }
1020
1021 /// The Alloc pointer is stored into GV somewhere. Transform all uses of the
1022 /// allocation into loads from the global and uses of the resultant pointer.
1023 /// Further, delete the store into GV. This assumes that these value pass the
1024 /// 'ValueIsOnlyUsedLocallyOrStoredToOneGlobal' predicate.
ReplaceUsesOfMallocWithGlobal(Instruction * Alloc,GlobalVariable * GV)1025 static void ReplaceUsesOfMallocWithGlobal(Instruction *Alloc,
1026 GlobalVariable *GV) {
1027 while (!Alloc->use_empty()) {
1028 Instruction *U = cast<Instruction>(*Alloc->user_begin());
1029 Instruction *InsertPt = U;
1030 if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
1031 // If this is the store of the allocation into the global, remove it.
1032 if (SI->getOperand(1) == GV) {
1033 SI->eraseFromParent();
1034 continue;
1035 }
1036 } else if (PHINode *PN = dyn_cast<PHINode>(U)) {
1037 // Insert the load in the corresponding predecessor, not right before the
1038 // PHI.
1039 InsertPt = PN->getIncomingBlock(*Alloc->use_begin())->getTerminator();
1040 } else if (isa<BitCastInst>(U)) {
1041 // Must be bitcast between the malloc and store to initialize the global.
1042 ReplaceUsesOfMallocWithGlobal(U, GV);
1043 U->eraseFromParent();
1044 continue;
1045 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) {
1046 // If this is a "GEP bitcast" and the user is a store to the global, then
1047 // just process it as a bitcast.
1048 if (GEPI->hasAllZeroIndices() && GEPI->hasOneUse())
1049 if (StoreInst *SI = dyn_cast<StoreInst>(GEPI->user_back()))
1050 if (SI->getOperand(1) == GV) {
1051 // Must be bitcast GEP between the malloc and store to initialize
1052 // the global.
1053 ReplaceUsesOfMallocWithGlobal(GEPI, GV);
1054 GEPI->eraseFromParent();
1055 continue;
1056 }
1057 }
1058
1059 // Insert a load from the global, and use it instead of the malloc.
1060 Value *NL =
1061 new LoadInst(GV->getValueType(), GV, GV->getName() + ".val", InsertPt);
1062 U->replaceUsesOfWith(Alloc, NL);
1063 }
1064 }
1065
1066 /// Verify that all uses of V (a load, or a phi of a load) are simple enough to
1067 /// perform heap SRA on. This permits GEP's that index through the array and
1068 /// struct field, icmps of null, and PHIs.
LoadUsesSimpleEnoughForHeapSRA(const Value * V,SmallPtrSetImpl<const PHINode * > & LoadUsingPHIs,SmallPtrSetImpl<const PHINode * > & LoadUsingPHIsPerLoad)1069 static bool LoadUsesSimpleEnoughForHeapSRA(const Value *V,
1070 SmallPtrSetImpl<const PHINode*> &LoadUsingPHIs,
1071 SmallPtrSetImpl<const PHINode*> &LoadUsingPHIsPerLoad) {
1072 // We permit two users of the load: setcc comparing against the null
1073 // pointer, and a getelementptr of a specific form.
1074 for (const User *U : V->users()) {
1075 const Instruction *UI = cast<Instruction>(U);
1076
1077 // Comparison against null is ok.
1078 if (const ICmpInst *ICI = dyn_cast<ICmpInst>(UI)) {
1079 if (!isa<ConstantPointerNull>(ICI->getOperand(1)))
1080 return false;
1081 continue;
1082 }
1083
1084 // getelementptr is also ok, but only a simple form.
1085 if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(UI)) {
1086 // Must index into the array and into the struct.
1087 if (GEPI->getNumOperands() < 3)
1088 return false;
1089
1090 // Otherwise the GEP is ok.
1091 continue;
1092 }
1093
1094 if (const PHINode *PN = dyn_cast<PHINode>(UI)) {
1095 if (!LoadUsingPHIsPerLoad.insert(PN).second)
1096 // This means some phi nodes are dependent on each other.
1097 // Avoid infinite looping!
1098 return false;
1099 if (!LoadUsingPHIs.insert(PN).second)
1100 // If we have already analyzed this PHI, then it is safe.
1101 continue;
1102
1103 // Make sure all uses of the PHI are simple enough to transform.
1104 if (!LoadUsesSimpleEnoughForHeapSRA(PN,
1105 LoadUsingPHIs, LoadUsingPHIsPerLoad))
1106 return false;
1107
1108 continue;
1109 }
1110
1111 // Otherwise we don't know what this is, not ok.
1112 return false;
1113 }
1114
1115 return true;
1116 }
1117
1118 /// If all users of values loaded from GV are simple enough to perform HeapSRA,
1119 /// return true.
AllGlobalLoadUsesSimpleEnoughForHeapSRA(const GlobalVariable * GV,Instruction * StoredVal)1120 static bool AllGlobalLoadUsesSimpleEnoughForHeapSRA(const GlobalVariable *GV,
1121 Instruction *StoredVal) {
1122 SmallPtrSet<const PHINode*, 32> LoadUsingPHIs;
1123 SmallPtrSet<const PHINode*, 32> LoadUsingPHIsPerLoad;
1124 for (const User *U : GV->users())
1125 if (const LoadInst *LI = dyn_cast<LoadInst>(U)) {
1126 if (!LoadUsesSimpleEnoughForHeapSRA(LI, LoadUsingPHIs,
1127 LoadUsingPHIsPerLoad))
1128 return false;
1129 LoadUsingPHIsPerLoad.clear();
1130 }
1131
1132 // If we reach here, we know that all uses of the loads and transitive uses
1133 // (through PHI nodes) are simple enough to transform. However, we don't know
1134 // that all inputs the to the PHI nodes are in the same equivalence sets.
1135 // Check to verify that all operands of the PHIs are either PHIS that can be
1136 // transformed, loads from GV, or MI itself.
1137 for (const PHINode *PN : LoadUsingPHIs) {
1138 for (unsigned op = 0, e = PN->getNumIncomingValues(); op != e; ++op) {
1139 Value *InVal = PN->getIncomingValue(op);
1140
1141 // PHI of the stored value itself is ok.
1142 if (InVal == StoredVal) continue;
1143
1144 if (const PHINode *InPN = dyn_cast<PHINode>(InVal)) {
1145 // One of the PHIs in our set is (optimistically) ok.
1146 if (LoadUsingPHIs.count(InPN))
1147 continue;
1148 return false;
1149 }
1150
1151 // Load from GV is ok.
1152 if (const LoadInst *LI = dyn_cast<LoadInst>(InVal))
1153 if (LI->getOperand(0) == GV)
1154 continue;
1155
1156 // UNDEF? NULL?
1157
1158 // Anything else is rejected.
1159 return false;
1160 }
1161 }
1162
1163 return true;
1164 }
1165
GetHeapSROAValue(Value * V,unsigned FieldNo,DenseMap<Value *,std::vector<Value * >> & InsertedScalarizedValues,std::vector<std::pair<PHINode *,unsigned>> & PHIsToRewrite)1166 static Value *GetHeapSROAValue(Value *V, unsigned FieldNo,
1167 DenseMap<Value *, std::vector<Value *>> &InsertedScalarizedValues,
1168 std::vector<std::pair<PHINode *, unsigned>> &PHIsToRewrite) {
1169 std::vector<Value *> &FieldVals = InsertedScalarizedValues[V];
1170
1171 if (FieldNo >= FieldVals.size())
1172 FieldVals.resize(FieldNo+1);
1173
1174 // If we already have this value, just reuse the previously scalarized
1175 // version.
1176 if (Value *FieldVal = FieldVals[FieldNo])
1177 return FieldVal;
1178
1179 // Depending on what instruction this is, we have several cases.
1180 Value *Result;
1181 if (LoadInst *LI = dyn_cast<LoadInst>(V)) {
1182 // This is a scalarized version of the load from the global. Just create
1183 // a new Load of the scalarized global.
1184 Value *V = GetHeapSROAValue(LI->getOperand(0), FieldNo,
1185 InsertedScalarizedValues, PHIsToRewrite);
1186 Result = new LoadInst(V->getType()->getPointerElementType(), V,
1187 LI->getName() + ".f" + Twine(FieldNo), LI);
1188 } else {
1189 PHINode *PN = cast<PHINode>(V);
1190 // PN's type is pointer to struct. Make a new PHI of pointer to struct
1191 // field.
1192
1193 PointerType *PTy = cast<PointerType>(PN->getType());
1194 StructType *ST = cast<StructType>(PTy->getElementType());
1195
1196 unsigned AS = PTy->getAddressSpace();
1197 PHINode *NewPN =
1198 PHINode::Create(PointerType::get(ST->getElementType(FieldNo), AS),
1199 PN->getNumIncomingValues(),
1200 PN->getName()+".f"+Twine(FieldNo), PN);
1201 Result = NewPN;
1202 PHIsToRewrite.push_back(std::make_pair(PN, FieldNo));
1203 }
1204
1205 return FieldVals[FieldNo] = Result;
1206 }
1207
1208 /// Given a load instruction and a value derived from the load, rewrite the
1209 /// derived value to use the HeapSRoA'd load.
RewriteHeapSROALoadUser(Instruction * LoadUser,DenseMap<Value *,std::vector<Value * >> & InsertedScalarizedValues,std::vector<std::pair<PHINode *,unsigned>> & PHIsToRewrite)1210 static void RewriteHeapSROALoadUser(Instruction *LoadUser,
1211 DenseMap<Value *, std::vector<Value *>> &InsertedScalarizedValues,
1212 std::vector<std::pair<PHINode *, unsigned>> &PHIsToRewrite) {
1213 // If this is a comparison against null, handle it.
1214 if (ICmpInst *SCI = dyn_cast<ICmpInst>(LoadUser)) {
1215 assert(isa<ConstantPointerNull>(SCI->getOperand(1)));
1216 // If we have a setcc of the loaded pointer, we can use a setcc of any
1217 // field.
1218 Value *NPtr = GetHeapSROAValue(SCI->getOperand(0), 0,
1219 InsertedScalarizedValues, PHIsToRewrite);
1220
1221 Value *New = new ICmpInst(SCI, SCI->getPredicate(), NPtr,
1222 Constant::getNullValue(NPtr->getType()),
1223 SCI->getName());
1224 SCI->replaceAllUsesWith(New);
1225 SCI->eraseFromParent();
1226 return;
1227 }
1228
1229 // Handle 'getelementptr Ptr, Idx, i32 FieldNo ...'
1230 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(LoadUser)) {
1231 assert(GEPI->getNumOperands() >= 3 && isa<ConstantInt>(GEPI->getOperand(2))
1232 && "Unexpected GEPI!");
1233
1234 // Load the pointer for this field.
1235 unsigned FieldNo = cast<ConstantInt>(GEPI->getOperand(2))->getZExtValue();
1236 Value *NewPtr = GetHeapSROAValue(GEPI->getOperand(0), FieldNo,
1237 InsertedScalarizedValues, PHIsToRewrite);
1238
1239 // Create the new GEP idx vector.
1240 SmallVector<Value*, 8> GEPIdx;
1241 GEPIdx.push_back(GEPI->getOperand(1));
1242 GEPIdx.append(GEPI->op_begin()+3, GEPI->op_end());
1243
1244 Value *NGEPI = GetElementPtrInst::Create(GEPI->getResultElementType(), NewPtr, GEPIdx,
1245 GEPI->getName(), GEPI);
1246 GEPI->replaceAllUsesWith(NGEPI);
1247 GEPI->eraseFromParent();
1248 return;
1249 }
1250
1251 // Recursively transform the users of PHI nodes. This will lazily create the
1252 // PHIs that are needed for individual elements. Keep track of what PHIs we
1253 // see in InsertedScalarizedValues so that we don't get infinite loops (very
1254 // antisocial). If the PHI is already in InsertedScalarizedValues, it has
1255 // already been seen first by another load, so its uses have already been
1256 // processed.
1257 PHINode *PN = cast<PHINode>(LoadUser);
1258 if (!InsertedScalarizedValues.insert(std::make_pair(PN,
1259 std::vector<Value *>())).second)
1260 return;
1261
1262 // If this is the first time we've seen this PHI, recursively process all
1263 // users.
1264 for (auto UI = PN->user_begin(), E = PN->user_end(); UI != E;) {
1265 Instruction *User = cast<Instruction>(*UI++);
1266 RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite);
1267 }
1268 }
1269
1270 /// We are performing Heap SRoA on a global. Ptr is a value loaded from the
1271 /// global. Eliminate all uses of Ptr, making them use FieldGlobals instead.
1272 /// All uses of loaded values satisfy AllGlobalLoadUsesSimpleEnoughForHeapSRA.
RewriteUsesOfLoadForHeapSRoA(LoadInst * Load,DenseMap<Value *,std::vector<Value * >> & InsertedScalarizedValues,std::vector<std::pair<PHINode *,unsigned>> & PHIsToRewrite)1273 static void RewriteUsesOfLoadForHeapSRoA(LoadInst *Load,
1274 DenseMap<Value *, std::vector<Value *>> &InsertedScalarizedValues,
1275 std::vector<std::pair<PHINode *, unsigned> > &PHIsToRewrite) {
1276 for (auto UI = Load->user_begin(), E = Load->user_end(); UI != E;) {
1277 Instruction *User = cast<Instruction>(*UI++);
1278 RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite);
1279 }
1280
1281 if (Load->use_empty()) {
1282 Load->eraseFromParent();
1283 InsertedScalarizedValues.erase(Load);
1284 }
1285 }
1286
1287 /// CI is an allocation of an array of structures. Break it up into multiple
1288 /// allocations of arrays of the fields.
PerformHeapAllocSRoA(GlobalVariable * GV,CallInst * CI,Value * NElems,const DataLayout & DL,const TargetLibraryInfo * TLI)1289 static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI,
1290 Value *NElems, const DataLayout &DL,
1291 const TargetLibraryInfo *TLI) {
1292 LLVM_DEBUG(dbgs() << "SROA HEAP ALLOC: " << *GV << " MALLOC = " << *CI
1293 << '\n');
1294 Type *MAT = getMallocAllocatedType(CI, TLI);
1295 StructType *STy = cast<StructType>(MAT);
1296
1297 // There is guaranteed to be at least one use of the malloc (storing
1298 // it into GV). If there are other uses, change them to be uses of
1299 // the global to simplify later code. This also deletes the store
1300 // into GV.
1301 ReplaceUsesOfMallocWithGlobal(CI, GV);
1302
1303 // Okay, at this point, there are no users of the malloc. Insert N
1304 // new mallocs at the same place as CI, and N globals.
1305 std::vector<Value *> FieldGlobals;
1306 std::vector<Value *> FieldMallocs;
1307
1308 SmallVector<OperandBundleDef, 1> OpBundles;
1309 CI->getOperandBundlesAsDefs(OpBundles);
1310
1311 unsigned AS = GV->getType()->getPointerAddressSpace();
1312 for (unsigned FieldNo = 0, e = STy->getNumElements(); FieldNo != e;++FieldNo){
1313 Type *FieldTy = STy->getElementType(FieldNo);
1314 PointerType *PFieldTy = PointerType::get(FieldTy, AS);
1315
1316 GlobalVariable *NGV = new GlobalVariable(
1317 *GV->getParent(), PFieldTy, false, GlobalValue::InternalLinkage,
1318 Constant::getNullValue(PFieldTy), GV->getName() + ".f" + Twine(FieldNo),
1319 nullptr, GV->getThreadLocalMode());
1320 NGV->copyAttributesFrom(GV);
1321 FieldGlobals.push_back(NGV);
1322
1323 unsigned TypeSize = DL.getTypeAllocSize(FieldTy);
1324 if (StructType *ST = dyn_cast<StructType>(FieldTy))
1325 TypeSize = DL.getStructLayout(ST)->getSizeInBytes();
1326 Type *IntPtrTy = DL.getIntPtrType(CI->getType());
1327 Value *NMI = CallInst::CreateMalloc(CI, IntPtrTy, FieldTy,
1328 ConstantInt::get(IntPtrTy, TypeSize),
1329 NElems, OpBundles, nullptr,
1330 CI->getName() + ".f" + Twine(FieldNo));
1331 FieldMallocs.push_back(NMI);
1332 new StoreInst(NMI, NGV, CI);
1333 }
1334
1335 // The tricky aspect of this transformation is handling the case when malloc
1336 // fails. In the original code, malloc failing would set the result pointer
1337 // of malloc to null. In this case, some mallocs could succeed and others
1338 // could fail. As such, we emit code that looks like this:
1339 // F0 = malloc(field0)
1340 // F1 = malloc(field1)
1341 // F2 = malloc(field2)
1342 // if (F0 == 0 || F1 == 0 || F2 == 0) {
1343 // if (F0) { free(F0); F0 = 0; }
1344 // if (F1) { free(F1); F1 = 0; }
1345 // if (F2) { free(F2); F2 = 0; }
1346 // }
1347 // The malloc can also fail if its argument is too large.
1348 Constant *ConstantZero = ConstantInt::get(CI->getArgOperand(0)->getType(), 0);
1349 Value *RunningOr = new ICmpInst(CI, ICmpInst::ICMP_SLT, CI->getArgOperand(0),
1350 ConstantZero, "isneg");
1351 for (unsigned i = 0, e = FieldMallocs.size(); i != e; ++i) {
1352 Value *Cond = new ICmpInst(CI, ICmpInst::ICMP_EQ, FieldMallocs[i],
1353 Constant::getNullValue(FieldMallocs[i]->getType()),
1354 "isnull");
1355 RunningOr = BinaryOperator::CreateOr(RunningOr, Cond, "tmp", CI);
1356 }
1357
1358 // Split the basic block at the old malloc.
1359 BasicBlock *OrigBB = CI->getParent();
1360 BasicBlock *ContBB =
1361 OrigBB->splitBasicBlock(CI->getIterator(), "malloc_cont");
1362
1363 // Create the block to check the first condition. Put all these blocks at the
1364 // end of the function as they are unlikely to be executed.
1365 BasicBlock *NullPtrBlock = BasicBlock::Create(OrigBB->getContext(),
1366 "malloc_ret_null",
1367 OrigBB->getParent());
1368
1369 // Remove the uncond branch from OrigBB to ContBB, turning it into a cond
1370 // branch on RunningOr.
1371 OrigBB->getTerminator()->eraseFromParent();
1372 BranchInst::Create(NullPtrBlock, ContBB, RunningOr, OrigBB);
1373
1374 // Within the NullPtrBlock, we need to emit a comparison and branch for each
1375 // pointer, because some may be null while others are not.
1376 for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) {
1377 Value *GVVal =
1378 new LoadInst(cast<GlobalVariable>(FieldGlobals[i])->getValueType(),
1379 FieldGlobals[i], "tmp", NullPtrBlock);
1380 Value *Cmp = new ICmpInst(*NullPtrBlock, ICmpInst::ICMP_NE, GVVal,
1381 Constant::getNullValue(GVVal->getType()));
1382 BasicBlock *FreeBlock = BasicBlock::Create(Cmp->getContext(), "free_it",
1383 OrigBB->getParent());
1384 BasicBlock *NextBlock = BasicBlock::Create(Cmp->getContext(), "next",
1385 OrigBB->getParent());
1386 Instruction *BI = BranchInst::Create(FreeBlock, NextBlock,
1387 Cmp, NullPtrBlock);
1388
1389 // Fill in FreeBlock.
1390 CallInst::CreateFree(GVVal, OpBundles, BI);
1391 new StoreInst(Constant::getNullValue(GVVal->getType()), FieldGlobals[i],
1392 FreeBlock);
1393 BranchInst::Create(NextBlock, FreeBlock);
1394
1395 NullPtrBlock = NextBlock;
1396 }
1397
1398 BranchInst::Create(ContBB, NullPtrBlock);
1399
1400 // CI is no longer needed, remove it.
1401 CI->eraseFromParent();
1402
1403 /// As we process loads, if we can't immediately update all uses of the load,
1404 /// keep track of what scalarized loads are inserted for a given load.
1405 DenseMap<Value *, std::vector<Value *>> InsertedScalarizedValues;
1406 InsertedScalarizedValues[GV] = FieldGlobals;
1407
1408 std::vector<std::pair<PHINode *, unsigned>> PHIsToRewrite;
1409
1410 // Okay, the malloc site is completely handled. All of the uses of GV are now
1411 // loads, and all uses of those loads are simple. Rewrite them to use loads
1412 // of the per-field globals instead.
1413 for (auto UI = GV->user_begin(), E = GV->user_end(); UI != E;) {
1414 Instruction *User = cast<Instruction>(*UI++);
1415
1416 if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
1417 RewriteUsesOfLoadForHeapSRoA(LI, InsertedScalarizedValues, PHIsToRewrite);
1418 continue;
1419 }
1420
1421 // Must be a store of null.
1422 StoreInst *SI = cast<StoreInst>(User);
1423 assert(isa<ConstantPointerNull>(SI->getOperand(0)) &&
1424 "Unexpected heap-sra user!");
1425
1426 // Insert a store of null into each global.
1427 for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) {
1428 Type *ValTy = cast<GlobalValue>(FieldGlobals[i])->getValueType();
1429 Constant *Null = Constant::getNullValue(ValTy);
1430 new StoreInst(Null, FieldGlobals[i], SI);
1431 }
1432 // Erase the original store.
1433 SI->eraseFromParent();
1434 }
1435
1436 // While we have PHIs that are interesting to rewrite, do it.
1437 while (!PHIsToRewrite.empty()) {
1438 PHINode *PN = PHIsToRewrite.back().first;
1439 unsigned FieldNo = PHIsToRewrite.back().second;
1440 PHIsToRewrite.pop_back();
1441 PHINode *FieldPN = cast<PHINode>(InsertedScalarizedValues[PN][FieldNo]);
1442 assert(FieldPN->getNumIncomingValues() == 0 &&"Already processed this phi");
1443
1444 // Add all the incoming values. This can materialize more phis.
1445 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1446 Value *InVal = PN->getIncomingValue(i);
1447 InVal = GetHeapSROAValue(InVal, FieldNo, InsertedScalarizedValues,
1448 PHIsToRewrite);
1449 FieldPN->addIncoming(InVal, PN->getIncomingBlock(i));
1450 }
1451 }
1452
1453 // Drop all inter-phi links and any loads that made it this far.
1454 for (DenseMap<Value *, std::vector<Value *>>::iterator
1455 I = InsertedScalarizedValues.begin(), E = InsertedScalarizedValues.end();
1456 I != E; ++I) {
1457 if (PHINode *PN = dyn_cast<PHINode>(I->first))
1458 PN->dropAllReferences();
1459 else if (LoadInst *LI = dyn_cast<LoadInst>(I->first))
1460 LI->dropAllReferences();
1461 }
1462
1463 // Delete all the phis and loads now that inter-references are dead.
1464 for (DenseMap<Value *, std::vector<Value *>>::iterator
1465 I = InsertedScalarizedValues.begin(), E = InsertedScalarizedValues.end();
1466 I != E; ++I) {
1467 if (PHINode *PN = dyn_cast<PHINode>(I->first))
1468 PN->eraseFromParent();
1469 else if (LoadInst *LI = dyn_cast<LoadInst>(I->first))
1470 LI->eraseFromParent();
1471 }
1472
1473 // The old global is now dead, remove it.
1474 GV->eraseFromParent();
1475
1476 ++NumHeapSRA;
1477 return cast<GlobalVariable>(FieldGlobals[0]);
1478 }
1479
1480 /// This function is called when we see a pointer global variable with a single
1481 /// value stored it that is a malloc or cast of malloc.
tryToOptimizeStoreOfMallocToGlobal(GlobalVariable * GV,CallInst * CI,Type * AllocTy,AtomicOrdering Ordering,const DataLayout & DL,TargetLibraryInfo * TLI)1482 static bool tryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV, CallInst *CI,
1483 Type *AllocTy,
1484 AtomicOrdering Ordering,
1485 const DataLayout &DL,
1486 TargetLibraryInfo *TLI) {
1487 // If this is a malloc of an abstract type, don't touch it.
1488 if (!AllocTy->isSized())
1489 return false;
1490
1491 // We can't optimize this global unless all uses of it are *known* to be
1492 // of the malloc value, not of the null initializer value (consider a use
1493 // that compares the global's value against zero to see if the malloc has
1494 // been reached). To do this, we check to see if all uses of the global
1495 // would trap if the global were null: this proves that they must all
1496 // happen after the malloc.
1497 if (!AllUsesOfLoadedValueWillTrapIfNull(GV))
1498 return false;
1499
1500 // We can't optimize this if the malloc itself is used in a complex way,
1501 // for example, being stored into multiple globals. This allows the
1502 // malloc to be stored into the specified global, loaded icmp'd, and
1503 // GEP'd. These are all things we could transform to using the global
1504 // for.
1505 SmallPtrSet<const PHINode*, 8> PHIs;
1506 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(CI, GV, PHIs))
1507 return false;
1508
1509 // If we have a global that is only initialized with a fixed size malloc,
1510 // transform the program to use global memory instead of malloc'd memory.
1511 // This eliminates dynamic allocation, avoids an indirection accessing the
1512 // data, and exposes the resultant global to further GlobalOpt.
1513 // We cannot optimize the malloc if we cannot determine malloc array size.
1514 Value *NElems = getMallocArraySize(CI, DL, TLI, true);
1515 if (!NElems)
1516 return false;
1517
1518 if (ConstantInt *NElements = dyn_cast<ConstantInt>(NElems))
1519 // Restrict this transformation to only working on small allocations
1520 // (2048 bytes currently), as we don't want to introduce a 16M global or
1521 // something.
1522 if (NElements->getZExtValue() * DL.getTypeAllocSize(AllocTy) < 2048) {
1523 OptimizeGlobalAddressOfMalloc(GV, CI, AllocTy, NElements, DL, TLI);
1524 return true;
1525 }
1526
1527 // If the allocation is an array of structures, consider transforming this
1528 // into multiple malloc'd arrays, one for each field. This is basically
1529 // SRoA for malloc'd memory.
1530
1531 if (Ordering != AtomicOrdering::NotAtomic)
1532 return false;
1533
1534 // If this is an allocation of a fixed size array of structs, analyze as a
1535 // variable size array. malloc [100 x struct],1 -> malloc struct, 100
1536 if (NElems == ConstantInt::get(CI->getArgOperand(0)->getType(), 1))
1537 if (ArrayType *AT = dyn_cast<ArrayType>(AllocTy))
1538 AllocTy = AT->getElementType();
1539
1540 StructType *AllocSTy = dyn_cast<StructType>(AllocTy);
1541 if (!AllocSTy)
1542 return false;
1543
1544 // This the structure has an unreasonable number of fields, leave it
1545 // alone.
1546 if (AllocSTy->getNumElements() <= 16 && AllocSTy->getNumElements() != 0 &&
1547 AllGlobalLoadUsesSimpleEnoughForHeapSRA(GV, CI)) {
1548
1549 // If this is a fixed size array, transform the Malloc to be an alloc of
1550 // structs. malloc [100 x struct],1 -> malloc struct, 100
1551 if (ArrayType *AT = dyn_cast<ArrayType>(getMallocAllocatedType(CI, TLI))) {
1552 Type *IntPtrTy = DL.getIntPtrType(CI->getType());
1553 unsigned TypeSize = DL.getStructLayout(AllocSTy)->getSizeInBytes();
1554 Value *AllocSize = ConstantInt::get(IntPtrTy, TypeSize);
1555 Value *NumElements = ConstantInt::get(IntPtrTy, AT->getNumElements());
1556 SmallVector<OperandBundleDef, 1> OpBundles;
1557 CI->getOperandBundlesAsDefs(OpBundles);
1558 Instruction *Malloc =
1559 CallInst::CreateMalloc(CI, IntPtrTy, AllocSTy, AllocSize, NumElements,
1560 OpBundles, nullptr, CI->getName());
1561 Instruction *Cast = new BitCastInst(Malloc, CI->getType(), "tmp", CI);
1562 CI->replaceAllUsesWith(Cast);
1563 CI->eraseFromParent();
1564 if (BitCastInst *BCI = dyn_cast<BitCastInst>(Malloc))
1565 CI = cast<CallInst>(BCI->getOperand(0));
1566 else
1567 CI = cast<CallInst>(Malloc);
1568 }
1569
1570 PerformHeapAllocSRoA(GV, CI, getMallocArraySize(CI, DL, TLI, true), DL,
1571 TLI);
1572 return true;
1573 }
1574
1575 return false;
1576 }
1577
1578 // Try to optimize globals based on the knowledge that only one value (besides
1579 // its initializer) is ever stored to the global.
1580 static bool
optimizeOnceStoredGlobal(GlobalVariable * GV,Value * StoredOnceVal,AtomicOrdering Ordering,const DataLayout & DL,function_ref<TargetLibraryInfo & (Function &)> GetTLI)1581 optimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal,
1582 AtomicOrdering Ordering, const DataLayout &DL,
1583 function_ref<TargetLibraryInfo &(Function &)> GetTLI) {
1584 // Ignore no-op GEPs and bitcasts.
1585 StoredOnceVal = StoredOnceVal->stripPointerCasts();
1586
1587 // If we are dealing with a pointer global that is initialized to null and
1588 // only has one (non-null) value stored into it, then we can optimize any
1589 // users of the loaded value (often calls and loads) that would trap if the
1590 // value was null.
1591 if (GV->getInitializer()->getType()->isPointerTy() &&
1592 GV->getInitializer()->isNullValue() &&
1593 !NullPointerIsDefined(
1594 nullptr /* F */,
1595 GV->getInitializer()->getType()->getPointerAddressSpace())) {
1596 if (Constant *SOVC = dyn_cast<Constant>(StoredOnceVal)) {
1597 if (GV->getInitializer()->getType() != SOVC->getType())
1598 SOVC = ConstantExpr::getBitCast(SOVC, GV->getInitializer()->getType());
1599
1600 // Optimize away any trapping uses of the loaded value.
1601 if (OptimizeAwayTrappingUsesOfLoads(GV, SOVC, DL, GetTLI))
1602 return true;
1603 } else if (CallInst *CI = extractMallocCall(StoredOnceVal, GetTLI)) {
1604 auto *TLI = &GetTLI(*CI->getFunction());
1605 Type *MallocType = getMallocAllocatedType(CI, TLI);
1606 if (MallocType && tryToOptimizeStoreOfMallocToGlobal(GV, CI, MallocType,
1607 Ordering, DL, TLI))
1608 return true;
1609 }
1610 }
1611
1612 return false;
1613 }
1614
1615 /// At this point, we have learned that the only two values ever stored into GV
1616 /// are its initializer and OtherVal. See if we can shrink the global into a
1617 /// boolean and select between the two values whenever it is used. This exposes
1618 /// the values to other scalar optimizations.
TryToShrinkGlobalToBoolean(GlobalVariable * GV,Constant * OtherVal)1619 static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) {
1620 Type *GVElType = GV->getValueType();
1621
1622 // If GVElType is already i1, it is already shrunk. If the type of the GV is
1623 // an FP value, pointer or vector, don't do this optimization because a select
1624 // between them is very expensive and unlikely to lead to later
1625 // simplification. In these cases, we typically end up with "cond ? v1 : v2"
1626 // where v1 and v2 both require constant pool loads, a big loss.
1627 if (GVElType == Type::getInt1Ty(GV->getContext()) ||
1628 GVElType->isFloatingPointTy() ||
1629 GVElType->isPointerTy() || GVElType->isVectorTy())
1630 return false;
1631
1632 // Walk the use list of the global seeing if all the uses are load or store.
1633 // If there is anything else, bail out.
1634 for (User *U : GV->users())
1635 if (!isa<LoadInst>(U) && !isa<StoreInst>(U))
1636 return false;
1637
1638 LLVM_DEBUG(dbgs() << " *** SHRINKING TO BOOL: " << *GV << "\n");
1639
1640 // Create the new global, initializing it to false.
1641 GlobalVariable *NewGV = new GlobalVariable(Type::getInt1Ty(GV->getContext()),
1642 false,
1643 GlobalValue::InternalLinkage,
1644 ConstantInt::getFalse(GV->getContext()),
1645 GV->getName()+".b",
1646 GV->getThreadLocalMode(),
1647 GV->getType()->getAddressSpace());
1648 NewGV->copyAttributesFrom(GV);
1649 GV->getParent()->getGlobalList().insert(GV->getIterator(), NewGV);
1650
1651 Constant *InitVal = GV->getInitializer();
1652 assert(InitVal->getType() != Type::getInt1Ty(GV->getContext()) &&
1653 "No reason to shrink to bool!");
1654
1655 SmallVector<DIGlobalVariableExpression *, 1> GVs;
1656 GV->getDebugInfo(GVs);
1657
1658 // If initialized to zero and storing one into the global, we can use a cast
1659 // instead of a select to synthesize the desired value.
1660 bool IsOneZero = false;
1661 bool EmitOneOrZero = true;
1662 auto *CI = dyn_cast<ConstantInt>(OtherVal);
1663 if (CI && CI->getValue().getActiveBits() <= 64) {
1664 IsOneZero = InitVal->isNullValue() && CI->isOne();
1665
1666 auto *CIInit = dyn_cast<ConstantInt>(GV->getInitializer());
1667 if (CIInit && CIInit->getValue().getActiveBits() <= 64) {
1668 uint64_t ValInit = CIInit->getZExtValue();
1669 uint64_t ValOther = CI->getZExtValue();
1670 uint64_t ValMinus = ValOther - ValInit;
1671
1672 for(auto *GVe : GVs){
1673 DIGlobalVariable *DGV = GVe->getVariable();
1674 DIExpression *E = GVe->getExpression();
1675 const DataLayout &DL = GV->getParent()->getDataLayout();
1676 unsigned SizeInOctets =
1677 DL.getTypeAllocSizeInBits(NewGV->getType()->getElementType()) / 8;
1678
1679 // It is expected that the address of global optimized variable is on
1680 // top of the stack. After optimization, value of that variable will
1681 // be ether 0 for initial value or 1 for other value. The following
1682 // expression should return constant integer value depending on the
1683 // value at global object address:
1684 // val * (ValOther - ValInit) + ValInit:
1685 // DW_OP_deref DW_OP_constu <ValMinus>
1686 // DW_OP_mul DW_OP_constu <ValInit> DW_OP_plus DW_OP_stack_value
1687 SmallVector<uint64_t, 12> Ops = {
1688 dwarf::DW_OP_deref_size, SizeInOctets,
1689 dwarf::DW_OP_constu, ValMinus,
1690 dwarf::DW_OP_mul, dwarf::DW_OP_constu, ValInit,
1691 dwarf::DW_OP_plus};
1692 bool WithStackValue = true;
1693 E = DIExpression::prependOpcodes(E, Ops, WithStackValue);
1694 DIGlobalVariableExpression *DGVE =
1695 DIGlobalVariableExpression::get(NewGV->getContext(), DGV, E);
1696 NewGV->addDebugInfo(DGVE);
1697 }
1698 EmitOneOrZero = false;
1699 }
1700 }
1701
1702 if (EmitOneOrZero) {
1703 // FIXME: This will only emit address for debugger on which will
1704 // be written only 0 or 1.
1705 for(auto *GV : GVs)
1706 NewGV->addDebugInfo(GV);
1707 }
1708
1709 while (!GV->use_empty()) {
1710 Instruction *UI = cast<Instruction>(GV->user_back());
1711 if (StoreInst *SI = dyn_cast<StoreInst>(UI)) {
1712 // Change the store into a boolean store.
1713 bool StoringOther = SI->getOperand(0) == OtherVal;
1714 // Only do this if we weren't storing a loaded value.
1715 Value *StoreVal;
1716 if (StoringOther || SI->getOperand(0) == InitVal) {
1717 StoreVal = ConstantInt::get(Type::getInt1Ty(GV->getContext()),
1718 StoringOther);
1719 } else {
1720 // Otherwise, we are storing a previously loaded copy. To do this,
1721 // change the copy from copying the original value to just copying the
1722 // bool.
1723 Instruction *StoredVal = cast<Instruction>(SI->getOperand(0));
1724
1725 // If we've already replaced the input, StoredVal will be a cast or
1726 // select instruction. If not, it will be a load of the original
1727 // global.
1728 if (LoadInst *LI = dyn_cast<LoadInst>(StoredVal)) {
1729 assert(LI->getOperand(0) == GV && "Not a copy!");
1730 // Insert a new load, to preserve the saved value.
1731 StoreVal = new LoadInst(NewGV->getValueType(), NewGV,
1732 LI->getName() + ".b", false, None,
1733 LI->getOrdering(), LI->getSyncScopeID(), LI);
1734 } else {
1735 assert((isa<CastInst>(StoredVal) || isa<SelectInst>(StoredVal)) &&
1736 "This is not a form that we understand!");
1737 StoreVal = StoredVal->getOperand(0);
1738 assert(isa<LoadInst>(StoreVal) && "Not a load of NewGV!");
1739 }
1740 }
1741 StoreInst *NSI =
1742 new StoreInst(StoreVal, NewGV, false, None, SI->getOrdering(),
1743 SI->getSyncScopeID(), SI);
1744 NSI->setDebugLoc(SI->getDebugLoc());
1745 } else {
1746 // Change the load into a load of bool then a select.
1747 LoadInst *LI = cast<LoadInst>(UI);
1748 LoadInst *NLI = new LoadInst(NewGV->getValueType(), NewGV,
1749 LI->getName() + ".b", false, None,
1750 LI->getOrdering(), LI->getSyncScopeID(), LI);
1751 Instruction *NSI;
1752 if (IsOneZero)
1753 NSI = new ZExtInst(NLI, LI->getType(), "", LI);
1754 else
1755 NSI = SelectInst::Create(NLI, OtherVal, InitVal, "", LI);
1756 NSI->takeName(LI);
1757 // Since LI is split into two instructions, NLI and NSI both inherit the
1758 // same DebugLoc
1759 NLI->setDebugLoc(LI->getDebugLoc());
1760 NSI->setDebugLoc(LI->getDebugLoc());
1761 LI->replaceAllUsesWith(NSI);
1762 }
1763 UI->eraseFromParent();
1764 }
1765
1766 // Retain the name of the old global variable. People who are debugging their
1767 // programs may expect these variables to be named the same.
1768 NewGV->takeName(GV);
1769 GV->eraseFromParent();
1770 return true;
1771 }
1772
deleteIfDead(GlobalValue & GV,SmallPtrSetImpl<const Comdat * > & NotDiscardableComdats)1773 static bool deleteIfDead(
1774 GlobalValue &GV, SmallPtrSetImpl<const Comdat *> &NotDiscardableComdats) {
1775 GV.removeDeadConstantUsers();
1776
1777 if (!GV.isDiscardableIfUnused() && !GV.isDeclaration())
1778 return false;
1779
1780 if (const Comdat *C = GV.getComdat())
1781 if (!GV.hasLocalLinkage() && NotDiscardableComdats.count(C))
1782 return false;
1783
1784 bool Dead;
1785 if (auto *F = dyn_cast<Function>(&GV))
1786 Dead = (F->isDeclaration() && F->use_empty()) || F->isDefTriviallyDead();
1787 else
1788 Dead = GV.use_empty();
1789 if (!Dead)
1790 return false;
1791
1792 LLVM_DEBUG(dbgs() << "GLOBAL DEAD: " << GV << "\n");
1793 GV.eraseFromParent();
1794 ++NumDeleted;
1795 return true;
1796 }
1797
isPointerValueDeadOnEntryToFunction(const Function * F,GlobalValue * GV,function_ref<DominatorTree & (Function &)> LookupDomTree)1798 static bool isPointerValueDeadOnEntryToFunction(
1799 const Function *F, GlobalValue *GV,
1800 function_ref<DominatorTree &(Function &)> LookupDomTree) {
1801 // Find all uses of GV. We expect them all to be in F, and if we can't
1802 // identify any of the uses we bail out.
1803 //
1804 // On each of these uses, identify if the memory that GV points to is
1805 // used/required/live at the start of the function. If it is not, for example
1806 // if the first thing the function does is store to the GV, the GV can
1807 // possibly be demoted.
1808 //
1809 // We don't do an exhaustive search for memory operations - simply look
1810 // through bitcasts as they're quite common and benign.
1811 const DataLayout &DL = GV->getParent()->getDataLayout();
1812 SmallVector<LoadInst *, 4> Loads;
1813 SmallVector<StoreInst *, 4> Stores;
1814 for (auto *U : GV->users()) {
1815 if (Operator::getOpcode(U) == Instruction::BitCast) {
1816 for (auto *UU : U->users()) {
1817 if (auto *LI = dyn_cast<LoadInst>(UU))
1818 Loads.push_back(LI);
1819 else if (auto *SI = dyn_cast<StoreInst>(UU))
1820 Stores.push_back(SI);
1821 else
1822 return false;
1823 }
1824 continue;
1825 }
1826
1827 Instruction *I = dyn_cast<Instruction>(U);
1828 if (!I)
1829 return false;
1830 assert(I->getParent()->getParent() == F);
1831
1832 if (auto *LI = dyn_cast<LoadInst>(I))
1833 Loads.push_back(LI);
1834 else if (auto *SI = dyn_cast<StoreInst>(I))
1835 Stores.push_back(SI);
1836 else
1837 return false;
1838 }
1839
1840 // We have identified all uses of GV into loads and stores. Now check if all
1841 // of them are known not to depend on the value of the global at the function
1842 // entry point. We do this by ensuring that every load is dominated by at
1843 // least one store.
1844 auto &DT = LookupDomTree(*const_cast<Function *>(F));
1845
1846 // The below check is quadratic. Check we're not going to do too many tests.
1847 // FIXME: Even though this will always have worst-case quadratic time, we
1848 // could put effort into minimizing the average time by putting stores that
1849 // have been shown to dominate at least one load at the beginning of the
1850 // Stores array, making subsequent dominance checks more likely to succeed
1851 // early.
1852 //
1853 // The threshold here is fairly large because global->local demotion is a
1854 // very powerful optimization should it fire.
1855 const unsigned Threshold = 100;
1856 if (Loads.size() * Stores.size() > Threshold)
1857 return false;
1858
1859 for (auto *L : Loads) {
1860 auto *LTy = L->getType();
1861 if (none_of(Stores, [&](const StoreInst *S) {
1862 auto *STy = S->getValueOperand()->getType();
1863 // The load is only dominated by the store if DomTree says so
1864 // and the number of bits loaded in L is less than or equal to
1865 // the number of bits stored in S.
1866 return DT.dominates(S, L) &&
1867 DL.getTypeStoreSize(LTy) <= DL.getTypeStoreSize(STy);
1868 }))
1869 return false;
1870 }
1871 // All loads have known dependences inside F, so the global can be localized.
1872 return true;
1873 }
1874
1875 /// C may have non-instruction users. Can all of those users be turned into
1876 /// instructions?
allNonInstructionUsersCanBeMadeInstructions(Constant * C)1877 static bool allNonInstructionUsersCanBeMadeInstructions(Constant *C) {
1878 // We don't do this exhaustively. The most common pattern that we really need
1879 // to care about is a constant GEP or constant bitcast - so just looking
1880 // through one single ConstantExpr.
1881 //
1882 // The set of constants that this function returns true for must be able to be
1883 // handled by makeAllConstantUsesInstructions.
1884 for (auto *U : C->users()) {
1885 if (isa<Instruction>(U))
1886 continue;
1887 if (!isa<ConstantExpr>(U))
1888 // Non instruction, non-constantexpr user; cannot convert this.
1889 return false;
1890 for (auto *UU : U->users())
1891 if (!isa<Instruction>(UU))
1892 // A constantexpr used by another constant. We don't try and recurse any
1893 // further but just bail out at this point.
1894 return false;
1895 }
1896
1897 return true;
1898 }
1899
1900 /// C may have non-instruction users, and
1901 /// allNonInstructionUsersCanBeMadeInstructions has returned true. Convert the
1902 /// non-instruction users to instructions.
makeAllConstantUsesInstructions(Constant * C)1903 static void makeAllConstantUsesInstructions(Constant *C) {
1904 SmallVector<ConstantExpr*,4> Users;
1905 for (auto *U : C->users()) {
1906 if (isa<ConstantExpr>(U))
1907 Users.push_back(cast<ConstantExpr>(U));
1908 else
1909 // We should never get here; allNonInstructionUsersCanBeMadeInstructions
1910 // should not have returned true for C.
1911 assert(
1912 isa<Instruction>(U) &&
1913 "Can't transform non-constantexpr non-instruction to instruction!");
1914 }
1915
1916 SmallVector<Value*,4> UUsers;
1917 for (auto *U : Users) {
1918 UUsers.clear();
1919 for (auto *UU : U->users())
1920 UUsers.push_back(UU);
1921 for (auto *UU : UUsers) {
1922 Instruction *UI = cast<Instruction>(UU);
1923 Instruction *NewU = U->getAsInstruction();
1924 NewU->insertBefore(UI);
1925 UI->replaceUsesOfWith(U, NewU);
1926 }
1927 // We've replaced all the uses, so destroy the constant. (destroyConstant
1928 // will update value handles and metadata.)
1929 U->destroyConstant();
1930 }
1931 }
1932
1933 /// Analyze the specified global variable and optimize
1934 /// it if possible. If we make a change, return true.
1935 static bool
processInternalGlobal(GlobalVariable * GV,const GlobalStatus & GS,function_ref<TargetLibraryInfo & (Function &)> GetTLI,function_ref<DominatorTree & (Function &)> LookupDomTree)1936 processInternalGlobal(GlobalVariable *GV, const GlobalStatus &GS,
1937 function_ref<TargetLibraryInfo &(Function &)> GetTLI,
1938 function_ref<DominatorTree &(Function &)> LookupDomTree) {
1939 auto &DL = GV->getParent()->getDataLayout();
1940 // If this is a first class global and has only one accessing function and
1941 // this function is non-recursive, we replace the global with a local alloca
1942 // in this function.
1943 //
1944 // NOTE: It doesn't make sense to promote non-single-value types since we
1945 // are just replacing static memory to stack memory.
1946 //
1947 // If the global is in different address space, don't bring it to stack.
1948 if (!GS.HasMultipleAccessingFunctions &&
1949 GS.AccessingFunction &&
1950 GV->getValueType()->isSingleValueType() &&
1951 GV->getType()->getAddressSpace() == 0 &&
1952 !GV->isExternallyInitialized() &&
1953 allNonInstructionUsersCanBeMadeInstructions(GV) &&
1954 GS.AccessingFunction->doesNotRecurse() &&
1955 isPointerValueDeadOnEntryToFunction(GS.AccessingFunction, GV,
1956 LookupDomTree)) {
1957 const DataLayout &DL = GV->getParent()->getDataLayout();
1958
1959 LLVM_DEBUG(dbgs() << "LOCALIZING GLOBAL: " << *GV << "\n");
1960 Instruction &FirstI = const_cast<Instruction&>(*GS.AccessingFunction
1961 ->getEntryBlock().begin());
1962 Type *ElemTy = GV->getValueType();
1963 // FIXME: Pass Global's alignment when globals have alignment
1964 AllocaInst *Alloca = new AllocaInst(ElemTy, DL.getAllocaAddrSpace(), nullptr,
1965 GV->getName(), &FirstI);
1966 if (!isa<UndefValue>(GV->getInitializer()))
1967 new StoreInst(GV->getInitializer(), Alloca, &FirstI);
1968
1969 makeAllConstantUsesInstructions(GV);
1970
1971 GV->replaceAllUsesWith(Alloca);
1972 GV->eraseFromParent();
1973 ++NumLocalized;
1974 return true;
1975 }
1976
1977 // If the global is never loaded (but may be stored to), it is dead.
1978 // Delete it now.
1979 if (!GS.IsLoaded) {
1980 LLVM_DEBUG(dbgs() << "GLOBAL NEVER LOADED: " << *GV << "\n");
1981
1982 bool Changed;
1983 if (isLeakCheckerRoot(GV)) {
1984 // Delete any constant stores to the global.
1985 Changed = CleanupPointerRootUsers(GV, GetTLI);
1986 } else {
1987 // Delete any stores we can find to the global. We may not be able to
1988 // make it completely dead though.
1989 Changed =
1990 CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, GetTLI);
1991 }
1992
1993 // If the global is dead now, delete it.
1994 if (GV->use_empty()) {
1995 GV->eraseFromParent();
1996 ++NumDeleted;
1997 Changed = true;
1998 }
1999 return Changed;
2000
2001 }
2002 if (GS.StoredType <= GlobalStatus::InitializerStored) {
2003 LLVM_DEBUG(dbgs() << "MARKING CONSTANT: " << *GV << "\n");
2004
2005 // Don't actually mark a global constant if it's atomic because atomic loads
2006 // are implemented by a trivial cmpxchg in some edge-cases and that usually
2007 // requires write access to the variable even if it's not actually changed.
2008 if (GS.Ordering == AtomicOrdering::NotAtomic)
2009 GV->setConstant(true);
2010
2011 // Clean up any obviously simplifiable users now.
2012 CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, GetTLI);
2013
2014 // If the global is dead now, just nuke it.
2015 if (GV->use_empty()) {
2016 LLVM_DEBUG(dbgs() << " *** Marking constant allowed us to simplify "
2017 << "all users and delete global!\n");
2018 GV->eraseFromParent();
2019 ++NumDeleted;
2020 return true;
2021 }
2022
2023 // Fall through to the next check; see if we can optimize further.
2024 ++NumMarked;
2025 }
2026 if (!GV->getInitializer()->getType()->isSingleValueType()) {
2027 const DataLayout &DL = GV->getParent()->getDataLayout();
2028 if (SRAGlobal(GV, DL))
2029 return true;
2030 }
2031 if (GS.StoredType == GlobalStatus::StoredOnce && GS.StoredOnceValue) {
2032 // If the initial value for the global was an undef value, and if only
2033 // one other value was stored into it, we can just change the
2034 // initializer to be the stored value, then delete all stores to the
2035 // global. This allows us to mark it constant.
2036 if (Constant *SOVConstant = dyn_cast<Constant>(GS.StoredOnceValue))
2037 if (isa<UndefValue>(GV->getInitializer())) {
2038 // Change the initial value here.
2039 GV->setInitializer(SOVConstant);
2040
2041 // Clean up any obviously simplifiable users now.
2042 CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, GetTLI);
2043
2044 if (GV->use_empty()) {
2045 LLVM_DEBUG(dbgs() << " *** Substituting initializer allowed us to "
2046 << "simplify all users and delete global!\n");
2047 GV->eraseFromParent();
2048 ++NumDeleted;
2049 }
2050 ++NumSubstitute;
2051 return true;
2052 }
2053
2054 // Try to optimize globals based on the knowledge that only one value
2055 // (besides its initializer) is ever stored to the global.
2056 if (optimizeOnceStoredGlobal(GV, GS.StoredOnceValue, GS.Ordering, DL,
2057 GetTLI))
2058 return true;
2059
2060 // Otherwise, if the global was not a boolean, we can shrink it to be a
2061 // boolean.
2062 if (Constant *SOVConstant = dyn_cast<Constant>(GS.StoredOnceValue)) {
2063 if (GS.Ordering == AtomicOrdering::NotAtomic) {
2064 if (TryToShrinkGlobalToBoolean(GV, SOVConstant)) {
2065 ++NumShrunkToBool;
2066 return true;
2067 }
2068 }
2069 }
2070 }
2071
2072 return false;
2073 }
2074
2075 /// Analyze the specified global variable and optimize it if possible. If we
2076 /// make a change, return true.
2077 static bool
processGlobal(GlobalValue & GV,function_ref<TargetLibraryInfo & (Function &)> GetTLI,function_ref<DominatorTree & (Function &)> LookupDomTree)2078 processGlobal(GlobalValue &GV,
2079 function_ref<TargetLibraryInfo &(Function &)> GetTLI,
2080 function_ref<DominatorTree &(Function &)> LookupDomTree) {
2081 if (GV.getName().startswith("llvm."))
2082 return false;
2083
2084 GlobalStatus GS;
2085
2086 if (GlobalStatus::analyzeGlobal(&GV, GS))
2087 return false;
2088
2089 bool Changed = false;
2090 if (!GS.IsCompared && !GV.hasGlobalUnnamedAddr()) {
2091 auto NewUnnamedAddr = GV.hasLocalLinkage() ? GlobalValue::UnnamedAddr::Global
2092 : GlobalValue::UnnamedAddr::Local;
2093 if (NewUnnamedAddr != GV.getUnnamedAddr()) {
2094 GV.setUnnamedAddr(NewUnnamedAddr);
2095 NumUnnamed++;
2096 Changed = true;
2097 }
2098 }
2099
2100 // Do more involved optimizations if the global is internal.
2101 if (!GV.hasLocalLinkage())
2102 return Changed;
2103
2104 auto *GVar = dyn_cast<GlobalVariable>(&GV);
2105 if (!GVar)
2106 return Changed;
2107
2108 if (GVar->isConstant() || !GVar->hasInitializer())
2109 return Changed;
2110
2111 return processInternalGlobal(GVar, GS, GetTLI, LookupDomTree) || Changed;
2112 }
2113
2114 /// Walk all of the direct calls of the specified function, changing them to
2115 /// FastCC.
ChangeCalleesToFastCall(Function * F)2116 static void ChangeCalleesToFastCall(Function *F) {
2117 for (User *U : F->users()) {
2118 if (isa<BlockAddress>(U))
2119 continue;
2120 CallSite CS(cast<Instruction>(U));
2121 CS.setCallingConv(CallingConv::Fast);
2122 }
2123 }
2124
StripAttr(LLVMContext & C,AttributeList Attrs,Attribute::AttrKind A)2125 static AttributeList StripAttr(LLVMContext &C, AttributeList Attrs,
2126 Attribute::AttrKind A) {
2127 unsigned AttrIndex;
2128 if (Attrs.hasAttrSomewhere(A, &AttrIndex))
2129 return Attrs.removeAttribute(C, AttrIndex, A);
2130 return Attrs;
2131 }
2132
RemoveAttribute(Function * F,Attribute::AttrKind A)2133 static void RemoveAttribute(Function *F, Attribute::AttrKind A) {
2134 F->setAttributes(StripAttr(F->getContext(), F->getAttributes(), A));
2135 for (User *U : F->users()) {
2136 if (isa<BlockAddress>(U))
2137 continue;
2138 CallSite CS(cast<Instruction>(U));
2139 CS.setAttributes(StripAttr(F->getContext(), CS.getAttributes(), A));
2140 }
2141 }
2142
2143 /// Return true if this is a calling convention that we'd like to change. The
2144 /// idea here is that we don't want to mess with the convention if the user
2145 /// explicitly requested something with performance implications like coldcc,
2146 /// GHC, or anyregcc.
hasChangeableCC(Function * F)2147 static bool hasChangeableCC(Function *F) {
2148 CallingConv::ID CC = F->getCallingConv();
2149
2150 // FIXME: Is it worth transforming x86_stdcallcc and x86_fastcallcc?
2151 if (CC != CallingConv::C && CC != CallingConv::X86_ThisCall)
2152 return false;
2153
2154 // FIXME: Change CC for the whole chain of musttail calls when possible.
2155 //
2156 // Can't change CC of the function that either has musttail calls, or is a
2157 // musttail callee itself
2158 for (User *U : F->users()) {
2159 if (isa<BlockAddress>(U))
2160 continue;
2161 CallInst* CI = dyn_cast<CallInst>(U);
2162 if (!CI)
2163 continue;
2164
2165 if (CI->isMustTailCall())
2166 return false;
2167 }
2168
2169 for (BasicBlock &BB : *F)
2170 if (BB.getTerminatingMustTailCall())
2171 return false;
2172
2173 return true;
2174 }
2175
2176 /// Return true if the block containing the call site has a BlockFrequency of
2177 /// less than ColdCCRelFreq% of the entry block.
isColdCallSite(CallSite CS,BlockFrequencyInfo & CallerBFI)2178 static bool isColdCallSite(CallSite CS, BlockFrequencyInfo &CallerBFI) {
2179 const BranchProbability ColdProb(ColdCCRelFreq, 100);
2180 auto CallSiteBB = CS.getInstruction()->getParent();
2181 auto CallSiteFreq = CallerBFI.getBlockFreq(CallSiteBB);
2182 auto CallerEntryFreq =
2183 CallerBFI.getBlockFreq(&(CS.getCaller()->getEntryBlock()));
2184 return CallSiteFreq < CallerEntryFreq * ColdProb;
2185 }
2186
2187 // This function checks if the input function F is cold at all call sites. It
2188 // also looks each call site's containing function, returning false if the
2189 // caller function contains other non cold calls. The input vector AllCallsCold
2190 // contains a list of functions that only have call sites in cold blocks.
2191 static bool
isValidCandidateForColdCC(Function & F,function_ref<BlockFrequencyInfo & (Function &)> GetBFI,const std::vector<Function * > & AllCallsCold)2192 isValidCandidateForColdCC(Function &F,
2193 function_ref<BlockFrequencyInfo &(Function &)> GetBFI,
2194 const std::vector<Function *> &AllCallsCold) {
2195
2196 if (F.user_empty())
2197 return false;
2198
2199 for (User *U : F.users()) {
2200 if (isa<BlockAddress>(U))
2201 continue;
2202
2203 CallSite CS(cast<Instruction>(U));
2204 Function *CallerFunc = CS.getInstruction()->getParent()->getParent();
2205 BlockFrequencyInfo &CallerBFI = GetBFI(*CallerFunc);
2206 if (!isColdCallSite(CS, CallerBFI))
2207 return false;
2208 auto It = std::find(AllCallsCold.begin(), AllCallsCold.end(), CallerFunc);
2209 if (It == AllCallsCold.end())
2210 return false;
2211 }
2212 return true;
2213 }
2214
changeCallSitesToColdCC(Function * F)2215 static void changeCallSitesToColdCC(Function *F) {
2216 for (User *U : F->users()) {
2217 if (isa<BlockAddress>(U))
2218 continue;
2219 CallSite CS(cast<Instruction>(U));
2220 CS.setCallingConv(CallingConv::Cold);
2221 }
2222 }
2223
2224 // This function iterates over all the call instructions in the input Function
2225 // and checks that all call sites are in cold blocks and are allowed to use the
2226 // coldcc calling convention.
2227 static bool
hasOnlyColdCalls(Function & F,function_ref<BlockFrequencyInfo & (Function &)> GetBFI)2228 hasOnlyColdCalls(Function &F,
2229 function_ref<BlockFrequencyInfo &(Function &)> GetBFI) {
2230 for (BasicBlock &BB : F) {
2231 for (Instruction &I : BB) {
2232 if (CallInst *CI = dyn_cast<CallInst>(&I)) {
2233 CallSite CS(cast<Instruction>(CI));
2234 // Skip over isline asm instructions since they aren't function calls.
2235 if (CI->isInlineAsm())
2236 continue;
2237 Function *CalledFn = CI->getCalledFunction();
2238 if (!CalledFn)
2239 return false;
2240 if (!CalledFn->hasLocalLinkage())
2241 return false;
2242 // Skip over instrinsics since they won't remain as function calls.
2243 if (CalledFn->getIntrinsicID() != Intrinsic::not_intrinsic)
2244 continue;
2245 // Check if it's valid to use coldcc calling convention.
2246 if (!hasChangeableCC(CalledFn) || CalledFn->isVarArg() ||
2247 CalledFn->hasAddressTaken())
2248 return false;
2249 BlockFrequencyInfo &CallerBFI = GetBFI(F);
2250 if (!isColdCallSite(CS, CallerBFI))
2251 return false;
2252 }
2253 }
2254 }
2255 return true;
2256 }
2257
2258 static bool
OptimizeFunctions(Module & M,function_ref<TargetLibraryInfo & (Function &)> GetTLI,function_ref<TargetTransformInfo & (Function &)> GetTTI,function_ref<BlockFrequencyInfo & (Function &)> GetBFI,function_ref<DominatorTree & (Function &)> LookupDomTree,SmallPtrSetImpl<const Comdat * > & NotDiscardableComdats)2259 OptimizeFunctions(Module &M,
2260 function_ref<TargetLibraryInfo &(Function &)> GetTLI,
2261 function_ref<TargetTransformInfo &(Function &)> GetTTI,
2262 function_ref<BlockFrequencyInfo &(Function &)> GetBFI,
2263 function_ref<DominatorTree &(Function &)> LookupDomTree,
2264 SmallPtrSetImpl<const Comdat *> &NotDiscardableComdats) {
2265
2266 bool Changed = false;
2267
2268 std::vector<Function *> AllCallsCold;
2269 for (Module::iterator FI = M.begin(), E = M.end(); FI != E;) {
2270 Function *F = &*FI++;
2271 if (hasOnlyColdCalls(*F, GetBFI))
2272 AllCallsCold.push_back(F);
2273 }
2274
2275 // Optimize functions.
2276 for (Module::iterator FI = M.begin(), E = M.end(); FI != E; ) {
2277 Function *F = &*FI++;
2278
2279 // Don't perform global opt pass on naked functions; we don't want fast
2280 // calling conventions for naked functions.
2281 if (F->hasFnAttribute(Attribute::Naked))
2282 continue;
2283
2284 // Functions without names cannot be referenced outside this module.
2285 if (!F->hasName() && !F->isDeclaration() && !F->hasLocalLinkage())
2286 F->setLinkage(GlobalValue::InternalLinkage);
2287
2288 if (deleteIfDead(*F, NotDiscardableComdats)) {
2289 Changed = true;
2290 continue;
2291 }
2292
2293 // LLVM's definition of dominance allows instructions that are cyclic
2294 // in unreachable blocks, e.g.:
2295 // %pat = select i1 %condition, @global, i16* %pat
2296 // because any instruction dominates an instruction in a block that's
2297 // not reachable from entry.
2298 // So, remove unreachable blocks from the function, because a) there's
2299 // no point in analyzing them and b) GlobalOpt should otherwise grow
2300 // some more complicated logic to break these cycles.
2301 // Removing unreachable blocks might invalidate the dominator so we
2302 // recalculate it.
2303 if (!F->isDeclaration()) {
2304 if (removeUnreachableBlocks(*F)) {
2305 auto &DT = LookupDomTree(*F);
2306 DT.recalculate(*F);
2307 Changed = true;
2308 }
2309 }
2310
2311 Changed |= processGlobal(*F, GetTLI, LookupDomTree);
2312
2313 if (!F->hasLocalLinkage())
2314 continue;
2315
2316 // If we have an inalloca parameter that we can safely remove the
2317 // inalloca attribute from, do so. This unlocks optimizations that
2318 // wouldn't be safe in the presence of inalloca.
2319 // FIXME: We should also hoist alloca affected by this to the entry
2320 // block if possible.
2321 if (F->getAttributes().hasAttrSomewhere(Attribute::InAlloca) &&
2322 !F->hasAddressTaken()) {
2323 RemoveAttribute(F, Attribute::InAlloca);
2324 Changed = true;
2325 }
2326
2327 if (hasChangeableCC(F) && !F->isVarArg() && !F->hasAddressTaken()) {
2328 NumInternalFunc++;
2329 TargetTransformInfo &TTI = GetTTI(*F);
2330 // Change the calling convention to coldcc if either stress testing is
2331 // enabled or the target would like to use coldcc on functions which are
2332 // cold at all call sites and the callers contain no other non coldcc
2333 // calls.
2334 if (EnableColdCCStressTest ||
2335 (TTI.useColdCCForColdCall(*F) &&
2336 isValidCandidateForColdCC(*F, GetBFI, AllCallsCold))) {
2337 F->setCallingConv(CallingConv::Cold);
2338 changeCallSitesToColdCC(F);
2339 Changed = true;
2340 NumColdCC++;
2341 }
2342 }
2343
2344 if (hasChangeableCC(F) && !F->isVarArg() &&
2345 !F->hasAddressTaken()) {
2346 // If this function has a calling convention worth changing, is not a
2347 // varargs function, and is only called directly, promote it to use the
2348 // Fast calling convention.
2349 F->setCallingConv(CallingConv::Fast);
2350 ChangeCalleesToFastCall(F);
2351 ++NumFastCallFns;
2352 Changed = true;
2353 }
2354
2355 if (F->getAttributes().hasAttrSomewhere(Attribute::Nest) &&
2356 !F->hasAddressTaken()) {
2357 // The function is not used by a trampoline intrinsic, so it is safe
2358 // to remove the 'nest' attribute.
2359 RemoveAttribute(F, Attribute::Nest);
2360 ++NumNestRemoved;
2361 Changed = true;
2362 }
2363 }
2364 return Changed;
2365 }
2366
2367 static bool
OptimizeGlobalVars(Module & M,function_ref<TargetLibraryInfo & (Function &)> GetTLI,function_ref<DominatorTree & (Function &)> LookupDomTree,SmallPtrSetImpl<const Comdat * > & NotDiscardableComdats)2368 OptimizeGlobalVars(Module &M,
2369 function_ref<TargetLibraryInfo &(Function &)> GetTLI,
2370 function_ref<DominatorTree &(Function &)> LookupDomTree,
2371 SmallPtrSetImpl<const Comdat *> &NotDiscardableComdats) {
2372 bool Changed = false;
2373
2374 for (Module::global_iterator GVI = M.global_begin(), E = M.global_end();
2375 GVI != E; ) {
2376 GlobalVariable *GV = &*GVI++;
2377 // Global variables without names cannot be referenced outside this module.
2378 if (!GV->hasName() && !GV->isDeclaration() && !GV->hasLocalLinkage())
2379 GV->setLinkage(GlobalValue::InternalLinkage);
2380 // Simplify the initializer.
2381 if (GV->hasInitializer())
2382 if (auto *C = dyn_cast<Constant>(GV->getInitializer())) {
2383 auto &DL = M.getDataLayout();
2384 // TLI is not used in the case of a Constant, so use default nullptr
2385 // for that optional parameter, since we don't have a Function to
2386 // provide GetTLI anyway.
2387 Constant *New = ConstantFoldConstant(C, DL, /*TLI*/ nullptr);
2388 if (New && New != C)
2389 GV->setInitializer(New);
2390 }
2391
2392 if (deleteIfDead(*GV, NotDiscardableComdats)) {
2393 Changed = true;
2394 continue;
2395 }
2396
2397 Changed |= processGlobal(*GV, GetTLI, LookupDomTree);
2398 }
2399 return Changed;
2400 }
2401
2402 /// Evaluate a piece of a constantexpr store into a global initializer. This
2403 /// returns 'Init' modified to reflect 'Val' stored into it. At this point, the
2404 /// GEP operands of Addr [0, OpNo) have been stepped into.
EvaluateStoreInto(Constant * Init,Constant * Val,ConstantExpr * Addr,unsigned OpNo)2405 static Constant *EvaluateStoreInto(Constant *Init, Constant *Val,
2406 ConstantExpr *Addr, unsigned OpNo) {
2407 // Base case of the recursion.
2408 if (OpNo == Addr->getNumOperands()) {
2409 assert(Val->getType() == Init->getType() && "Type mismatch!");
2410 return Val;
2411 }
2412
2413 SmallVector<Constant*, 32> Elts;
2414 if (StructType *STy = dyn_cast<StructType>(Init->getType())) {
2415 // Break up the constant into its elements.
2416 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
2417 Elts.push_back(Init->getAggregateElement(i));
2418
2419 // Replace the element that we are supposed to.
2420 ConstantInt *CU = cast<ConstantInt>(Addr->getOperand(OpNo));
2421 unsigned Idx = CU->getZExtValue();
2422 assert(Idx < STy->getNumElements() && "Struct index out of range!");
2423 Elts[Idx] = EvaluateStoreInto(Elts[Idx], Val, Addr, OpNo+1);
2424
2425 // Return the modified struct.
2426 return ConstantStruct::get(STy, Elts);
2427 }
2428
2429 ConstantInt *CI = cast<ConstantInt>(Addr->getOperand(OpNo));
2430 SequentialType *InitTy = cast<SequentialType>(Init->getType());
2431 uint64_t NumElts = InitTy->getNumElements();
2432
2433 // Break up the array into elements.
2434 for (uint64_t i = 0, e = NumElts; i != e; ++i)
2435 Elts.push_back(Init->getAggregateElement(i));
2436
2437 assert(CI->getZExtValue() < NumElts);
2438 Elts[CI->getZExtValue()] =
2439 EvaluateStoreInto(Elts[CI->getZExtValue()], Val, Addr, OpNo+1);
2440
2441 if (Init->getType()->isArrayTy())
2442 return ConstantArray::get(cast<ArrayType>(InitTy), Elts);
2443 return ConstantVector::get(Elts);
2444 }
2445
2446 /// We have decided that Addr (which satisfies the predicate
2447 /// isSimpleEnoughPointerToCommit) should get Val as its value. Make it happen.
CommitValueTo(Constant * Val,Constant * Addr)2448 static void CommitValueTo(Constant *Val, Constant *Addr) {
2449 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) {
2450 assert(GV->hasInitializer());
2451 GV->setInitializer(Val);
2452 return;
2453 }
2454
2455 ConstantExpr *CE = cast<ConstantExpr>(Addr);
2456 GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0));
2457 GV->setInitializer(EvaluateStoreInto(GV->getInitializer(), Val, CE, 2));
2458 }
2459
2460 /// Given a map of address -> value, where addresses are expected to be some form
2461 /// of either a global or a constant GEP, set the initializer for the address to
2462 /// be the value. This performs mostly the same function as CommitValueTo()
2463 /// and EvaluateStoreInto() but is optimized to be more efficient for the common
2464 /// case where the set of addresses are GEPs sharing the same underlying global,
2465 /// processing the GEPs in batches rather than individually.
2466 ///
2467 /// To give an example, consider the following C++ code adapted from the clang
2468 /// regression tests:
2469 /// struct S {
2470 /// int n = 10;
2471 /// int m = 2 * n;
2472 /// S(int a) : n(a) {}
2473 /// };
2474 ///
2475 /// template<typename T>
2476 /// struct U {
2477 /// T *r = &q;
2478 /// T q = 42;
2479 /// U *p = this;
2480 /// };
2481 ///
2482 /// U<S> e;
2483 ///
2484 /// The global static constructor for 'e' will need to initialize 'r' and 'p' of
2485 /// the outer struct, while also initializing the inner 'q' structs 'n' and 'm'
2486 /// members. This batch algorithm will simply use general CommitValueTo() method
2487 /// to handle the complex nested S struct initialization of 'q', before
2488 /// processing the outermost members in a single batch. Using CommitValueTo() to
2489 /// handle member in the outer struct is inefficient when the struct/array is
2490 /// very large as we end up creating and destroy constant arrays for each
2491 /// initialization.
2492 /// For the above case, we expect the following IR to be generated:
2493 ///
2494 /// %struct.U = type { %struct.S*, %struct.S, %struct.U* }
2495 /// %struct.S = type { i32, i32 }
2496 /// @e = global %struct.U { %struct.S* gep inbounds (%struct.U, %struct.U* @e,
2497 /// i64 0, i32 1),
2498 /// %struct.S { i32 42, i32 84 }, %struct.U* @e }
2499 /// The %struct.S { i32 42, i32 84 } inner initializer is treated as a complex
2500 /// constant expression, while the other two elements of @e are "simple".
BatchCommitValueTo(const DenseMap<Constant *,Constant * > & Mem)2501 static void BatchCommitValueTo(const DenseMap<Constant*, Constant*> &Mem) {
2502 SmallVector<std::pair<GlobalVariable*, Constant*>, 32> GVs;
2503 SmallVector<std::pair<ConstantExpr*, Constant*>, 32> ComplexCEs;
2504 SmallVector<std::pair<ConstantExpr*, Constant*>, 32> SimpleCEs;
2505 SimpleCEs.reserve(Mem.size());
2506
2507 for (const auto &I : Mem) {
2508 if (auto *GV = dyn_cast<GlobalVariable>(I.first)) {
2509 GVs.push_back(std::make_pair(GV, I.second));
2510 } else {
2511 ConstantExpr *GEP = cast<ConstantExpr>(I.first);
2512 // We don't handle the deeply recursive case using the batch method.
2513 if (GEP->getNumOperands() > 3)
2514 ComplexCEs.push_back(std::make_pair(GEP, I.second));
2515 else
2516 SimpleCEs.push_back(std::make_pair(GEP, I.second));
2517 }
2518 }
2519
2520 // The algorithm below doesn't handle cases like nested structs, so use the
2521 // slower fully general method if we have to.
2522 for (auto ComplexCE : ComplexCEs)
2523 CommitValueTo(ComplexCE.second, ComplexCE.first);
2524
2525 for (auto GVPair : GVs) {
2526 assert(GVPair.first->hasInitializer());
2527 GVPair.first->setInitializer(GVPair.second);
2528 }
2529
2530 if (SimpleCEs.empty())
2531 return;
2532
2533 // We cache a single global's initializer elements in the case where the
2534 // subsequent address/val pair uses the same one. This avoids throwing away and
2535 // rebuilding the constant struct/vector/array just because one element is
2536 // modified at a time.
2537 SmallVector<Constant *, 32> Elts;
2538 Elts.reserve(SimpleCEs.size());
2539 GlobalVariable *CurrentGV = nullptr;
2540
2541 auto commitAndSetupCache = [&](GlobalVariable *GV, bool Update) {
2542 Constant *Init = GV->getInitializer();
2543 Type *Ty = Init->getType();
2544 if (Update) {
2545 if (CurrentGV) {
2546 assert(CurrentGV && "Expected a GV to commit to!");
2547 Type *CurrentInitTy = CurrentGV->getInitializer()->getType();
2548 // We have a valid cache that needs to be committed.
2549 if (StructType *STy = dyn_cast<StructType>(CurrentInitTy))
2550 CurrentGV->setInitializer(ConstantStruct::get(STy, Elts));
2551 else if (ArrayType *ArrTy = dyn_cast<ArrayType>(CurrentInitTy))
2552 CurrentGV->setInitializer(ConstantArray::get(ArrTy, Elts));
2553 else
2554 CurrentGV->setInitializer(ConstantVector::get(Elts));
2555 }
2556 if (CurrentGV == GV)
2557 return;
2558 // Need to clear and set up cache for new initializer.
2559 CurrentGV = GV;
2560 Elts.clear();
2561 unsigned NumElts;
2562 if (auto *STy = dyn_cast<StructType>(Ty))
2563 NumElts = STy->getNumElements();
2564 else
2565 NumElts = cast<SequentialType>(Ty)->getNumElements();
2566 for (unsigned i = 0, e = NumElts; i != e; ++i)
2567 Elts.push_back(Init->getAggregateElement(i));
2568 }
2569 };
2570
2571 for (auto CEPair : SimpleCEs) {
2572 ConstantExpr *GEP = CEPair.first;
2573 Constant *Val = CEPair.second;
2574
2575 GlobalVariable *GV = cast<GlobalVariable>(GEP->getOperand(0));
2576 commitAndSetupCache(GV, GV != CurrentGV);
2577 ConstantInt *CI = cast<ConstantInt>(GEP->getOperand(2));
2578 Elts[CI->getZExtValue()] = Val;
2579 }
2580 // The last initializer in the list needs to be committed, others
2581 // will be committed on a new initializer being processed.
2582 commitAndSetupCache(CurrentGV, true);
2583 }
2584
2585 /// Evaluate static constructors in the function, if we can. Return true if we
2586 /// can, false otherwise.
EvaluateStaticConstructor(Function * F,const DataLayout & DL,TargetLibraryInfo * TLI)2587 static bool EvaluateStaticConstructor(Function *F, const DataLayout &DL,
2588 TargetLibraryInfo *TLI) {
2589 // Call the function.
2590 Evaluator Eval(DL, TLI);
2591 Constant *RetValDummy;
2592 bool EvalSuccess = Eval.EvaluateFunction(F, RetValDummy,
2593 SmallVector<Constant*, 0>());
2594
2595 if (EvalSuccess) {
2596 ++NumCtorsEvaluated;
2597
2598 // We succeeded at evaluation: commit the result.
2599 LLVM_DEBUG(dbgs() << "FULLY EVALUATED GLOBAL CTOR FUNCTION '"
2600 << F->getName() << "' to "
2601 << Eval.getMutatedMemory().size() << " stores.\n");
2602 BatchCommitValueTo(Eval.getMutatedMemory());
2603 for (GlobalVariable *GV : Eval.getInvariants())
2604 GV->setConstant(true);
2605 }
2606
2607 return EvalSuccess;
2608 }
2609
compareNames(Constant * const * A,Constant * const * B)2610 static int compareNames(Constant *const *A, Constant *const *B) {
2611 Value *AStripped = (*A)->stripPointerCasts();
2612 Value *BStripped = (*B)->stripPointerCasts();
2613 return AStripped->getName().compare(BStripped->getName());
2614 }
2615
setUsedInitializer(GlobalVariable & V,const SmallPtrSetImpl<GlobalValue * > & Init)2616 static void setUsedInitializer(GlobalVariable &V,
2617 const SmallPtrSetImpl<GlobalValue *> &Init) {
2618 if (Init.empty()) {
2619 V.eraseFromParent();
2620 return;
2621 }
2622
2623 // Type of pointer to the array of pointers.
2624 PointerType *Int8PtrTy = Type::getInt8PtrTy(V.getContext(), 0);
2625
2626 SmallVector<Constant *, 8> UsedArray;
2627 for (GlobalValue *GV : Init) {
2628 Constant *Cast
2629 = ConstantExpr::getPointerBitCastOrAddrSpaceCast(GV, Int8PtrTy);
2630 UsedArray.push_back(Cast);
2631 }
2632 // Sort to get deterministic order.
2633 array_pod_sort(UsedArray.begin(), UsedArray.end(), compareNames);
2634 ArrayType *ATy = ArrayType::get(Int8PtrTy, UsedArray.size());
2635
2636 Module *M = V.getParent();
2637 V.removeFromParent();
2638 GlobalVariable *NV =
2639 new GlobalVariable(*M, ATy, false, GlobalValue::AppendingLinkage,
2640 ConstantArray::get(ATy, UsedArray), "");
2641 NV->takeName(&V);
2642 NV->setSection("llvm.metadata");
2643 delete &V;
2644 }
2645
2646 namespace {
2647
2648 /// An easy to access representation of llvm.used and llvm.compiler.used.
2649 class LLVMUsed {
2650 SmallPtrSet<GlobalValue *, 8> Used;
2651 SmallPtrSet<GlobalValue *, 8> CompilerUsed;
2652 GlobalVariable *UsedV;
2653 GlobalVariable *CompilerUsedV;
2654
2655 public:
LLVMUsed(Module & M)2656 LLVMUsed(Module &M) {
2657 UsedV = collectUsedGlobalVariables(M, Used, false);
2658 CompilerUsedV = collectUsedGlobalVariables(M, CompilerUsed, true);
2659 }
2660
2661 using iterator = SmallPtrSet<GlobalValue *, 8>::iterator;
2662 using used_iterator_range = iterator_range<iterator>;
2663
usedBegin()2664 iterator usedBegin() { return Used.begin(); }
usedEnd()2665 iterator usedEnd() { return Used.end(); }
2666
used()2667 used_iterator_range used() {
2668 return used_iterator_range(usedBegin(), usedEnd());
2669 }
2670
compilerUsedBegin()2671 iterator compilerUsedBegin() { return CompilerUsed.begin(); }
compilerUsedEnd()2672 iterator compilerUsedEnd() { return CompilerUsed.end(); }
2673
compilerUsed()2674 used_iterator_range compilerUsed() {
2675 return used_iterator_range(compilerUsedBegin(), compilerUsedEnd());
2676 }
2677
usedCount(GlobalValue * GV) const2678 bool usedCount(GlobalValue *GV) const { return Used.count(GV); }
2679
compilerUsedCount(GlobalValue * GV) const2680 bool compilerUsedCount(GlobalValue *GV) const {
2681 return CompilerUsed.count(GV);
2682 }
2683
usedErase(GlobalValue * GV)2684 bool usedErase(GlobalValue *GV) { return Used.erase(GV); }
compilerUsedErase(GlobalValue * GV)2685 bool compilerUsedErase(GlobalValue *GV) { return CompilerUsed.erase(GV); }
usedInsert(GlobalValue * GV)2686 bool usedInsert(GlobalValue *GV) { return Used.insert(GV).second; }
2687
compilerUsedInsert(GlobalValue * GV)2688 bool compilerUsedInsert(GlobalValue *GV) {
2689 return CompilerUsed.insert(GV).second;
2690 }
2691
syncVariablesAndSets()2692 void syncVariablesAndSets() {
2693 if (UsedV)
2694 setUsedInitializer(*UsedV, Used);
2695 if (CompilerUsedV)
2696 setUsedInitializer(*CompilerUsedV, CompilerUsed);
2697 }
2698 };
2699
2700 } // end anonymous namespace
2701
hasUseOtherThanLLVMUsed(GlobalAlias & GA,const LLVMUsed & U)2702 static bool hasUseOtherThanLLVMUsed(GlobalAlias &GA, const LLVMUsed &U) {
2703 if (GA.use_empty()) // No use at all.
2704 return false;
2705
2706 assert((!U.usedCount(&GA) || !U.compilerUsedCount(&GA)) &&
2707 "We should have removed the duplicated "
2708 "element from llvm.compiler.used");
2709 if (!GA.hasOneUse())
2710 // Strictly more than one use. So at least one is not in llvm.used and
2711 // llvm.compiler.used.
2712 return true;
2713
2714 // Exactly one use. Check if it is in llvm.used or llvm.compiler.used.
2715 return !U.usedCount(&GA) && !U.compilerUsedCount(&GA);
2716 }
2717
hasMoreThanOneUseOtherThanLLVMUsed(GlobalValue & V,const LLVMUsed & U)2718 static bool hasMoreThanOneUseOtherThanLLVMUsed(GlobalValue &V,
2719 const LLVMUsed &U) {
2720 unsigned N = 2;
2721 assert((!U.usedCount(&V) || !U.compilerUsedCount(&V)) &&
2722 "We should have removed the duplicated "
2723 "element from llvm.compiler.used");
2724 if (U.usedCount(&V) || U.compilerUsedCount(&V))
2725 ++N;
2726 return V.hasNUsesOrMore(N);
2727 }
2728
mayHaveOtherReferences(GlobalAlias & GA,const LLVMUsed & U)2729 static bool mayHaveOtherReferences(GlobalAlias &GA, const LLVMUsed &U) {
2730 if (!GA.hasLocalLinkage())
2731 return true;
2732
2733 return U.usedCount(&GA) || U.compilerUsedCount(&GA);
2734 }
2735
hasUsesToReplace(GlobalAlias & GA,const LLVMUsed & U,bool & RenameTarget)2736 static bool hasUsesToReplace(GlobalAlias &GA, const LLVMUsed &U,
2737 bool &RenameTarget) {
2738 RenameTarget = false;
2739 bool Ret = false;
2740 if (hasUseOtherThanLLVMUsed(GA, U))
2741 Ret = true;
2742
2743 // If the alias is externally visible, we may still be able to simplify it.
2744 if (!mayHaveOtherReferences(GA, U))
2745 return Ret;
2746
2747 // If the aliasee has internal linkage, give it the name and linkage
2748 // of the alias, and delete the alias. This turns:
2749 // define internal ... @f(...)
2750 // @a = alias ... @f
2751 // into:
2752 // define ... @a(...)
2753 Constant *Aliasee = GA.getAliasee();
2754 GlobalValue *Target = cast<GlobalValue>(Aliasee->stripPointerCasts());
2755 if (!Target->hasLocalLinkage())
2756 return Ret;
2757
2758 // Do not perform the transform if multiple aliases potentially target the
2759 // aliasee. This check also ensures that it is safe to replace the section
2760 // and other attributes of the aliasee with those of the alias.
2761 if (hasMoreThanOneUseOtherThanLLVMUsed(*Target, U))
2762 return Ret;
2763
2764 RenameTarget = true;
2765 return true;
2766 }
2767
2768 static bool
OptimizeGlobalAliases(Module & M,SmallPtrSetImpl<const Comdat * > & NotDiscardableComdats)2769 OptimizeGlobalAliases(Module &M,
2770 SmallPtrSetImpl<const Comdat *> &NotDiscardableComdats) {
2771 bool Changed = false;
2772 LLVMUsed Used(M);
2773
2774 for (GlobalValue *GV : Used.used())
2775 Used.compilerUsedErase(GV);
2776
2777 for (Module::alias_iterator I = M.alias_begin(), E = M.alias_end();
2778 I != E;) {
2779 GlobalAlias *J = &*I++;
2780
2781 // Aliases without names cannot be referenced outside this module.
2782 if (!J->hasName() && !J->isDeclaration() && !J->hasLocalLinkage())
2783 J->setLinkage(GlobalValue::InternalLinkage);
2784
2785 if (deleteIfDead(*J, NotDiscardableComdats)) {
2786 Changed = true;
2787 continue;
2788 }
2789
2790 // If the alias can change at link time, nothing can be done - bail out.
2791 if (J->isInterposable())
2792 continue;
2793
2794 Constant *Aliasee = J->getAliasee();
2795 GlobalValue *Target = dyn_cast<GlobalValue>(Aliasee->stripPointerCasts());
2796 // We can't trivially replace the alias with the aliasee if the aliasee is
2797 // non-trivial in some way.
2798 // TODO: Try to handle non-zero GEPs of local aliasees.
2799 if (!Target)
2800 continue;
2801 Target->removeDeadConstantUsers();
2802
2803 // Make all users of the alias use the aliasee instead.
2804 bool RenameTarget;
2805 if (!hasUsesToReplace(*J, Used, RenameTarget))
2806 continue;
2807
2808 J->replaceAllUsesWith(ConstantExpr::getBitCast(Aliasee, J->getType()));
2809 ++NumAliasesResolved;
2810 Changed = true;
2811
2812 if (RenameTarget) {
2813 // Give the aliasee the name, linkage and other attributes of the alias.
2814 Target->takeName(&*J);
2815 Target->setLinkage(J->getLinkage());
2816 Target->setDSOLocal(J->isDSOLocal());
2817 Target->setVisibility(J->getVisibility());
2818 Target->setDLLStorageClass(J->getDLLStorageClass());
2819
2820 if (Used.usedErase(&*J))
2821 Used.usedInsert(Target);
2822
2823 if (Used.compilerUsedErase(&*J))
2824 Used.compilerUsedInsert(Target);
2825 } else if (mayHaveOtherReferences(*J, Used))
2826 continue;
2827
2828 // Delete the alias.
2829 M.getAliasList().erase(J);
2830 ++NumAliasesRemoved;
2831 Changed = true;
2832 }
2833
2834 Used.syncVariablesAndSets();
2835
2836 return Changed;
2837 }
2838
2839 static Function *
FindCXAAtExit(Module & M,function_ref<TargetLibraryInfo & (Function &)> GetTLI)2840 FindCXAAtExit(Module &M, function_ref<TargetLibraryInfo &(Function &)> GetTLI) {
2841 // Hack to get a default TLI before we have actual Function.
2842 auto FuncIter = M.begin();
2843 if (FuncIter == M.end())
2844 return nullptr;
2845 auto *TLI = &GetTLI(*FuncIter);
2846
2847 LibFunc F = LibFunc_cxa_atexit;
2848 if (!TLI->has(F))
2849 return nullptr;
2850
2851 Function *Fn = M.getFunction(TLI->getName(F));
2852 if (!Fn)
2853 return nullptr;
2854
2855 // Now get the actual TLI for Fn.
2856 TLI = &GetTLI(*Fn);
2857
2858 // Make sure that the function has the correct prototype.
2859 if (!TLI->getLibFunc(*Fn, F) || F != LibFunc_cxa_atexit)
2860 return nullptr;
2861
2862 return Fn;
2863 }
2864
2865 /// Returns whether the given function is an empty C++ destructor and can
2866 /// therefore be eliminated.
2867 /// Note that we assume that other optimization passes have already simplified
2868 /// the code so we simply check for 'ret'.
cxxDtorIsEmpty(const Function & Fn)2869 static bool cxxDtorIsEmpty(const Function &Fn) {
2870 // FIXME: We could eliminate C++ destructors if they're readonly/readnone and
2871 // nounwind, but that doesn't seem worth doing.
2872 if (Fn.isDeclaration())
2873 return false;
2874
2875 for (auto &I : Fn.getEntryBlock()) {
2876 if (isa<DbgInfoIntrinsic>(I))
2877 continue;
2878 if (isa<ReturnInst>(I))
2879 return true;
2880 break;
2881 }
2882 return false;
2883 }
2884
OptimizeEmptyGlobalCXXDtors(Function * CXAAtExitFn)2885 static bool OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn) {
2886 /// Itanium C++ ABI p3.3.5:
2887 ///
2888 /// After constructing a global (or local static) object, that will require
2889 /// destruction on exit, a termination function is registered as follows:
2890 ///
2891 /// extern "C" int __cxa_atexit ( void (*f)(void *), void *p, void *d );
2892 ///
2893 /// This registration, e.g. __cxa_atexit(f,p,d), is intended to cause the
2894 /// call f(p) when DSO d is unloaded, before all such termination calls
2895 /// registered before this one. It returns zero if registration is
2896 /// successful, nonzero on failure.
2897
2898 // This pass will look for calls to __cxa_atexit where the function is trivial
2899 // and remove them.
2900 bool Changed = false;
2901
2902 for (auto I = CXAAtExitFn->user_begin(), E = CXAAtExitFn->user_end();
2903 I != E;) {
2904 // We're only interested in calls. Theoretically, we could handle invoke
2905 // instructions as well, but neither llvm-gcc nor clang generate invokes
2906 // to __cxa_atexit.
2907 CallInst *CI = dyn_cast<CallInst>(*I++);
2908 if (!CI)
2909 continue;
2910
2911 Function *DtorFn =
2912 dyn_cast<Function>(CI->getArgOperand(0)->stripPointerCasts());
2913 if (!DtorFn || !cxxDtorIsEmpty(*DtorFn))
2914 continue;
2915
2916 // Just remove the call.
2917 CI->replaceAllUsesWith(Constant::getNullValue(CI->getType()));
2918 CI->eraseFromParent();
2919
2920 ++NumCXXDtorsRemoved;
2921
2922 Changed |= true;
2923 }
2924
2925 return Changed;
2926 }
2927
optimizeGlobalsInModule(Module & M,const DataLayout & DL,function_ref<TargetLibraryInfo & (Function &)> GetTLI,function_ref<TargetTransformInfo & (Function &)> GetTTI,function_ref<BlockFrequencyInfo & (Function &)> GetBFI,function_ref<DominatorTree & (Function &)> LookupDomTree)2928 static bool optimizeGlobalsInModule(
2929 Module &M, const DataLayout &DL,
2930 function_ref<TargetLibraryInfo &(Function &)> GetTLI,
2931 function_ref<TargetTransformInfo &(Function &)> GetTTI,
2932 function_ref<BlockFrequencyInfo &(Function &)> GetBFI,
2933 function_ref<DominatorTree &(Function &)> LookupDomTree) {
2934 SmallPtrSet<const Comdat *, 8> NotDiscardableComdats;
2935 bool Changed = false;
2936 bool LocalChange = true;
2937 while (LocalChange) {
2938 LocalChange = false;
2939
2940 NotDiscardableComdats.clear();
2941 for (const GlobalVariable &GV : M.globals())
2942 if (const Comdat *C = GV.getComdat())
2943 if (!GV.isDiscardableIfUnused() || !GV.use_empty())
2944 NotDiscardableComdats.insert(C);
2945 for (Function &F : M)
2946 if (const Comdat *C = F.getComdat())
2947 if (!F.isDefTriviallyDead())
2948 NotDiscardableComdats.insert(C);
2949 for (GlobalAlias &GA : M.aliases())
2950 if (const Comdat *C = GA.getComdat())
2951 if (!GA.isDiscardableIfUnused() || !GA.use_empty())
2952 NotDiscardableComdats.insert(C);
2953
2954 // Delete functions that are trivially dead, ccc -> fastcc
2955 LocalChange |= OptimizeFunctions(M, GetTLI, GetTTI, GetBFI, LookupDomTree,
2956 NotDiscardableComdats);
2957
2958 // Optimize global_ctors list.
2959 LocalChange |= optimizeGlobalCtorsList(M, [&](Function *F) {
2960 return EvaluateStaticConstructor(F, DL, &GetTLI(*F));
2961 });
2962
2963 // Optimize non-address-taken globals.
2964 LocalChange |=
2965 OptimizeGlobalVars(M, GetTLI, LookupDomTree, NotDiscardableComdats);
2966
2967 // Resolve aliases, when possible.
2968 LocalChange |= OptimizeGlobalAliases(M, NotDiscardableComdats);
2969
2970 // Try to remove trivial global destructors if they are not removed
2971 // already.
2972 Function *CXAAtExitFn = FindCXAAtExit(M, GetTLI);
2973 if (CXAAtExitFn)
2974 LocalChange |= OptimizeEmptyGlobalCXXDtors(CXAAtExitFn);
2975
2976 Changed |= LocalChange;
2977 }
2978
2979 // TODO: Move all global ctors functions to the end of the module for code
2980 // layout.
2981
2982 return Changed;
2983 }
2984
run(Module & M,ModuleAnalysisManager & AM)2985 PreservedAnalyses GlobalOptPass::run(Module &M, ModuleAnalysisManager &AM) {
2986 auto &DL = M.getDataLayout();
2987 auto &FAM =
2988 AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
2989 auto LookupDomTree = [&FAM](Function &F) -> DominatorTree &{
2990 return FAM.getResult<DominatorTreeAnalysis>(F);
2991 };
2992 auto GetTLI = [&FAM](Function &F) -> TargetLibraryInfo & {
2993 return FAM.getResult<TargetLibraryAnalysis>(F);
2994 };
2995 auto GetTTI = [&FAM](Function &F) -> TargetTransformInfo & {
2996 return FAM.getResult<TargetIRAnalysis>(F);
2997 };
2998
2999 auto GetBFI = [&FAM](Function &F) -> BlockFrequencyInfo & {
3000 return FAM.getResult<BlockFrequencyAnalysis>(F);
3001 };
3002
3003 if (!optimizeGlobalsInModule(M, DL, GetTLI, GetTTI, GetBFI, LookupDomTree))
3004 return PreservedAnalyses::all();
3005 return PreservedAnalyses::none();
3006 }
3007
3008 namespace {
3009
3010 struct GlobalOptLegacyPass : public ModulePass {
3011 static char ID; // Pass identification, replacement for typeid
3012
GlobalOptLegacyPass__anon86dd13da0a11::GlobalOptLegacyPass3013 GlobalOptLegacyPass() : ModulePass(ID) {
3014 initializeGlobalOptLegacyPassPass(*PassRegistry::getPassRegistry());
3015 }
3016
runOnModule__anon86dd13da0a11::GlobalOptLegacyPass3017 bool runOnModule(Module &M) override {
3018 if (skipModule(M))
3019 return false;
3020
3021 auto &DL = M.getDataLayout();
3022 auto LookupDomTree = [this](Function &F) -> DominatorTree & {
3023 return this->getAnalysis<DominatorTreeWrapperPass>(F).getDomTree();
3024 };
3025 auto GetTLI = [this](Function &F) -> TargetLibraryInfo & {
3026 return this->getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
3027 };
3028 auto GetTTI = [this](Function &F) -> TargetTransformInfo & {
3029 return this->getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
3030 };
3031
3032 auto GetBFI = [this](Function &F) -> BlockFrequencyInfo & {
3033 return this->getAnalysis<BlockFrequencyInfoWrapperPass>(F).getBFI();
3034 };
3035
3036 return optimizeGlobalsInModule(M, DL, GetTLI, GetTTI, GetBFI,
3037 LookupDomTree);
3038 }
3039
getAnalysisUsage__anon86dd13da0a11::GlobalOptLegacyPass3040 void getAnalysisUsage(AnalysisUsage &AU) const override {
3041 AU.addRequired<TargetLibraryInfoWrapperPass>();
3042 AU.addRequired<TargetTransformInfoWrapperPass>();
3043 AU.addRequired<DominatorTreeWrapperPass>();
3044 AU.addRequired<BlockFrequencyInfoWrapperPass>();
3045 }
3046 };
3047
3048 } // end anonymous namespace
3049
3050 char GlobalOptLegacyPass::ID = 0;
3051
3052 INITIALIZE_PASS_BEGIN(GlobalOptLegacyPass, "globalopt",
3053 "Global Variable Optimizer", false, false)
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)3054 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
3055 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
3056 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)
3057 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
3058 INITIALIZE_PASS_END(GlobalOptLegacyPass, "globalopt",
3059 "Global Variable Optimizer", false, false)
3060
3061 ModulePass *llvm::createGlobalOptimizerPass() {
3062 return new GlobalOptLegacyPass();
3063 }
3064