1 //===- InstCombineLoadStoreAlloca.cpp -------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the visit functions for load, store and alloca.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "InstCombineInternal.h"
14 #include "llvm/ADT/MapVector.h"
15 #include "llvm/ADT/SmallString.h"
16 #include "llvm/ADT/Statistic.h"
17 #include "llvm/Analysis/AliasAnalysis.h"
18 #include "llvm/Analysis/Loads.h"
19 #include "llvm/IR/ConstantRange.h"
20 #include "llvm/IR/DataLayout.h"
21 #include "llvm/IR/DebugInfoMetadata.h"
22 #include "llvm/IR/IntrinsicInst.h"
23 #include "llvm/IR/LLVMContext.h"
24 #include "llvm/IR/MDBuilder.h"
25 #include "llvm/IR/PatternMatch.h"
26 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
27 #include "llvm/Transforms/Utils/Local.h"
28 using namespace llvm;
29 using namespace PatternMatch;
30 
31 #define DEBUG_TYPE "instcombine"
32 
33 STATISTIC(NumDeadStore,    "Number of dead stores eliminated");
34 STATISTIC(NumGlobalCopies, "Number of allocas copied from constant global");
35 
36 /// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived)
37 /// pointer to an alloca.  Ignore any reads of the pointer, return false if we
38 /// see any stores or other unknown uses.  If we see pointer arithmetic, keep
39 /// track of whether it moves the pointer (with IsOffset) but otherwise traverse
40 /// the uses.  If we see a memcpy/memmove that targets an unoffseted pointer to
41 /// the alloca, and if the source pointer is a pointer to a constant global, we
42 /// can optimize this.
43 static bool
isOnlyCopiedFromConstantMemory(AAResults * AA,Value * V,MemTransferInst * & TheCopy,SmallVectorImpl<Instruction * > & ToDelete)44 isOnlyCopiedFromConstantMemory(AAResults *AA,
45                                Value *V, MemTransferInst *&TheCopy,
46                                SmallVectorImpl<Instruction *> &ToDelete) {
47   // We track lifetime intrinsics as we encounter them.  If we decide to go
48   // ahead and replace the value with the global, this lets the caller quickly
49   // eliminate the markers.
50 
51   SmallVector<std::pair<Value *, bool>, 35> ValuesToInspect;
52   ValuesToInspect.emplace_back(V, false);
53   while (!ValuesToInspect.empty()) {
54     auto ValuePair = ValuesToInspect.pop_back_val();
55     const bool IsOffset = ValuePair.second;
56     for (auto &U : ValuePair.first->uses()) {
57       auto *I = cast<Instruction>(U.getUser());
58 
59       if (auto *LI = dyn_cast<LoadInst>(I)) {
60         // Ignore non-volatile loads, they are always ok.
61         if (!LI->isSimple()) return false;
62         continue;
63       }
64 
65       if (isa<BitCastInst>(I) || isa<AddrSpaceCastInst>(I)) {
66         // If uses of the bitcast are ok, we are ok.
67         ValuesToInspect.emplace_back(I, IsOffset);
68         continue;
69       }
70       if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
71         // If the GEP has all zero indices, it doesn't offset the pointer. If it
72         // doesn't, it does.
73         ValuesToInspect.emplace_back(I, IsOffset || !GEP->hasAllZeroIndices());
74         continue;
75       }
76 
77       if (auto *Call = dyn_cast<CallBase>(I)) {
78         // If this is the function being called then we treat it like a load and
79         // ignore it.
80         if (Call->isCallee(&U))
81           continue;
82 
83         unsigned DataOpNo = Call->getDataOperandNo(&U);
84         bool IsArgOperand = Call->isArgOperand(&U);
85 
86         // Inalloca arguments are clobbered by the call.
87         if (IsArgOperand && Call->isInAllocaArgument(DataOpNo))
88           return false;
89 
90         // If this is a readonly/readnone call site, then we know it is just a
91         // load (but one that potentially returns the value itself), so we can
92         // ignore it if we know that the value isn't captured.
93         if (Call->onlyReadsMemory() &&
94             (Call->use_empty() || Call->doesNotCapture(DataOpNo)))
95           continue;
96 
97         // If this is being passed as a byval argument, the caller is making a
98         // copy, so it is only a read of the alloca.
99         if (IsArgOperand && Call->isByValArgument(DataOpNo))
100           continue;
101       }
102 
103       // Lifetime intrinsics can be handled by the caller.
104       if (I->isLifetimeStartOrEnd()) {
105         assert(I->use_empty() && "Lifetime markers have no result to use!");
106         ToDelete.push_back(I);
107         continue;
108       }
109 
110       // If this is isn't our memcpy/memmove, reject it as something we can't
111       // handle.
112       MemTransferInst *MI = dyn_cast<MemTransferInst>(I);
113       if (!MI)
114         return false;
115 
116       // If the transfer is using the alloca as a source of the transfer, then
117       // ignore it since it is a load (unless the transfer is volatile).
118       if (U.getOperandNo() == 1) {
119         if (MI->isVolatile()) return false;
120         continue;
121       }
122 
123       // If we already have seen a copy, reject the second one.
124       if (TheCopy) return false;
125 
126       // If the pointer has been offset from the start of the alloca, we can't
127       // safely handle this.
128       if (IsOffset) return false;
129 
130       // If the memintrinsic isn't using the alloca as the dest, reject it.
131       if (U.getOperandNo() != 0) return false;
132 
133       // If the source of the memcpy/move is not a constant global, reject it.
134       if (!AA->pointsToConstantMemory(MI->getSource()))
135         return false;
136 
137       // Otherwise, the transform is safe.  Remember the copy instruction.
138       TheCopy = MI;
139     }
140   }
141   return true;
142 }
143 
144 /// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only
145 /// modified by a copy from a constant global.  If we can prove this, we can
146 /// replace any uses of the alloca with uses of the global directly.
147 static MemTransferInst *
isOnlyCopiedFromConstantMemory(AAResults * AA,AllocaInst * AI,SmallVectorImpl<Instruction * > & ToDelete)148 isOnlyCopiedFromConstantMemory(AAResults *AA,
149                                AllocaInst *AI,
150                                SmallVectorImpl<Instruction *> &ToDelete) {
151   MemTransferInst *TheCopy = nullptr;
152   if (isOnlyCopiedFromConstantMemory(AA, AI, TheCopy, ToDelete))
153     return TheCopy;
154   return nullptr;
155 }
156 
157 /// Returns true if V is dereferenceable for size of alloca.
isDereferenceableForAllocaSize(const Value * V,const AllocaInst * AI,const DataLayout & DL)158 static bool isDereferenceableForAllocaSize(const Value *V, const AllocaInst *AI,
159                                            const DataLayout &DL) {
160   if (AI->isArrayAllocation())
161     return false;
162   uint64_t AllocaSize = DL.getTypeStoreSize(AI->getAllocatedType());
163   if (!AllocaSize)
164     return false;
165   return isDereferenceableAndAlignedPointer(V, Align(AI->getAlignment()),
166                                             APInt(64, AllocaSize), DL);
167 }
168 
simplifyAllocaArraySize(InstCombiner & IC,AllocaInst & AI)169 static Instruction *simplifyAllocaArraySize(InstCombiner &IC, AllocaInst &AI) {
170   // Check for array size of 1 (scalar allocation).
171   if (!AI.isArrayAllocation()) {
172     // i32 1 is the canonical array size for scalar allocations.
173     if (AI.getArraySize()->getType()->isIntegerTy(32))
174       return nullptr;
175 
176     // Canonicalize it.
177     return IC.replaceOperand(AI, 0, IC.Builder.getInt32(1));
178   }
179 
180   // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
181   if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
182     if (C->getValue().getActiveBits() <= 64) {
183       Type *NewTy = ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
184       AllocaInst *New = IC.Builder.CreateAlloca(NewTy, nullptr, AI.getName());
185       New->setAlignment(AI.getAlign());
186 
187       // Scan to the end of the allocation instructions, to skip over a block of
188       // allocas if possible...also skip interleaved debug info
189       //
190       BasicBlock::iterator It(New);
191       while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It))
192         ++It;
193 
194       // Now that I is pointing to the first non-allocation-inst in the block,
195       // insert our getelementptr instruction...
196       //
197       Type *IdxTy = IC.getDataLayout().getIntPtrType(AI.getType());
198       Value *NullIdx = Constant::getNullValue(IdxTy);
199       Value *Idx[2] = {NullIdx, NullIdx};
200       Instruction *GEP = GetElementPtrInst::CreateInBounds(
201           NewTy, New, Idx, New->getName() + ".sub");
202       IC.InsertNewInstBefore(GEP, *It);
203 
204       // Now make everything use the getelementptr instead of the original
205       // allocation.
206       return IC.replaceInstUsesWith(AI, GEP);
207     }
208   }
209 
210   if (isa<UndefValue>(AI.getArraySize()))
211     return IC.replaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
212 
213   // Ensure that the alloca array size argument has type intptr_t, so that
214   // any casting is exposed early.
215   Type *IntPtrTy = IC.getDataLayout().getIntPtrType(AI.getType());
216   if (AI.getArraySize()->getType() != IntPtrTy) {
217     Value *V = IC.Builder.CreateIntCast(AI.getArraySize(), IntPtrTy, false);
218     return IC.replaceOperand(AI, 0, V);
219   }
220 
221   return nullptr;
222 }
223 
224 namespace {
225 // If I and V are pointers in different address space, it is not allowed to
226 // use replaceAllUsesWith since I and V have different types. A
227 // non-target-specific transformation should not use addrspacecast on V since
228 // the two address space may be disjoint depending on target.
229 //
230 // This class chases down uses of the old pointer until reaching the load
231 // instructions, then replaces the old pointer in the load instructions with
232 // the new pointer. If during the chasing it sees bitcast or GEP, it will
233 // create new bitcast or GEP with the new pointer and use them in the load
234 // instruction.
235 class PointerReplacer {
236 public:
PointerReplacer(InstCombiner & IC)237   PointerReplacer(InstCombiner &IC) : IC(IC) {}
238   void replacePointer(Instruction &I, Value *V);
239 
240 private:
241   void findLoadAndReplace(Instruction &I);
242   void replace(Instruction *I);
243   Value *getReplacement(Value *I);
244 
245   SmallVector<Instruction *, 4> Path;
246   MapVector<Value *, Value *> WorkMap;
247   InstCombiner &IC;
248 };
249 } // end anonymous namespace
250 
findLoadAndReplace(Instruction & I)251 void PointerReplacer::findLoadAndReplace(Instruction &I) {
252   for (auto U : I.users()) {
253     auto *Inst = dyn_cast<Instruction>(&*U);
254     if (!Inst)
255       return;
256     LLVM_DEBUG(dbgs() << "Found pointer user: " << *U << '\n');
257     if (isa<LoadInst>(Inst)) {
258       for (auto P : Path)
259         replace(P);
260       replace(Inst);
261     } else if (isa<GetElementPtrInst>(Inst) || isa<BitCastInst>(Inst)) {
262       Path.push_back(Inst);
263       findLoadAndReplace(*Inst);
264       Path.pop_back();
265     } else {
266       return;
267     }
268   }
269 }
270 
getReplacement(Value * V)271 Value *PointerReplacer::getReplacement(Value *V) {
272   auto Loc = WorkMap.find(V);
273   if (Loc != WorkMap.end())
274     return Loc->second;
275   return nullptr;
276 }
277 
replace(Instruction * I)278 void PointerReplacer::replace(Instruction *I) {
279   if (getReplacement(I))
280     return;
281 
282   if (auto *LT = dyn_cast<LoadInst>(I)) {
283     auto *V = getReplacement(LT->getPointerOperand());
284     assert(V && "Operand not replaced");
285     auto *NewI = new LoadInst(I->getType(), V, "", false,
286                               IC.getDataLayout().getABITypeAlign(I->getType()));
287     NewI->takeName(LT);
288     IC.InsertNewInstWith(NewI, *LT);
289     IC.replaceInstUsesWith(*LT, NewI);
290     WorkMap[LT] = NewI;
291   } else if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
292     auto *V = getReplacement(GEP->getPointerOperand());
293     assert(V && "Operand not replaced");
294     SmallVector<Value *, 8> Indices;
295     Indices.append(GEP->idx_begin(), GEP->idx_end());
296     auto *NewI = GetElementPtrInst::Create(
297         V->getType()->getPointerElementType(), V, Indices);
298     IC.InsertNewInstWith(NewI, *GEP);
299     NewI->takeName(GEP);
300     WorkMap[GEP] = NewI;
301   } else if (auto *BC = dyn_cast<BitCastInst>(I)) {
302     auto *V = getReplacement(BC->getOperand(0));
303     assert(V && "Operand not replaced");
304     auto *NewT = PointerType::get(BC->getType()->getPointerElementType(),
305                                   V->getType()->getPointerAddressSpace());
306     auto *NewI = new BitCastInst(V, NewT);
307     IC.InsertNewInstWith(NewI, *BC);
308     NewI->takeName(BC);
309     WorkMap[BC] = NewI;
310   } else {
311     llvm_unreachable("should never reach here");
312   }
313 }
314 
replacePointer(Instruction & I,Value * V)315 void PointerReplacer::replacePointer(Instruction &I, Value *V) {
316 #ifndef NDEBUG
317   auto *PT = cast<PointerType>(I.getType());
318   auto *NT = cast<PointerType>(V->getType());
319   assert(PT != NT && PT->getElementType() == NT->getElementType() &&
320          "Invalid usage");
321 #endif
322   WorkMap[&I] = V;
323   findLoadAndReplace(I);
324 }
325 
visitAllocaInst(AllocaInst & AI)326 Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
327   if (auto *I = simplifyAllocaArraySize(*this, AI))
328     return I;
329 
330   if (AI.getAllocatedType()->isSized()) {
331     // Move all alloca's of zero byte objects to the entry block and merge them
332     // together.  Note that we only do this for alloca's, because malloc should
333     // allocate and return a unique pointer, even for a zero byte allocation.
334     if (DL.getTypeAllocSize(AI.getAllocatedType()).getKnownMinSize() == 0) {
335       // For a zero sized alloca there is no point in doing an array allocation.
336       // This is helpful if the array size is a complicated expression not used
337       // elsewhere.
338       if (AI.isArrayAllocation())
339         return replaceOperand(AI, 0,
340             ConstantInt::get(AI.getArraySize()->getType(), 1));
341 
342       // Get the first instruction in the entry block.
343       BasicBlock &EntryBlock = AI.getParent()->getParent()->getEntryBlock();
344       Instruction *FirstInst = EntryBlock.getFirstNonPHIOrDbg();
345       if (FirstInst != &AI) {
346         // If the entry block doesn't start with a zero-size alloca then move
347         // this one to the start of the entry block.  There is no problem with
348         // dominance as the array size was forced to a constant earlier already.
349         AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst);
350         if (!EntryAI || !EntryAI->getAllocatedType()->isSized() ||
351             DL.getTypeAllocSize(EntryAI->getAllocatedType())
352                     .getKnownMinSize() != 0) {
353           AI.moveBefore(FirstInst);
354           return &AI;
355         }
356 
357         // Replace this zero-sized alloca with the one at the start of the entry
358         // block after ensuring that the address will be aligned enough for both
359         // types.
360         const Align MaxAlign = std::max(EntryAI->getAlign(), AI.getAlign());
361         EntryAI->setAlignment(MaxAlign);
362         if (AI.getType() != EntryAI->getType())
363           return new BitCastInst(EntryAI, AI.getType());
364         return replaceInstUsesWith(AI, EntryAI);
365       }
366     }
367   }
368 
369   // Check to see if this allocation is only modified by a memcpy/memmove from
370   // a constant whose alignment is equal to or exceeds that of the allocation.
371   // If this is the case, we can change all users to use the constant global
372   // instead.  This is commonly produced by the CFE by constructs like "void
373   // foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A' is only subsequently
374   // read.
375   SmallVector<Instruction *, 4> ToDelete;
376   if (MemTransferInst *Copy = isOnlyCopiedFromConstantMemory(AA, &AI, ToDelete)) {
377     Align AllocaAlign = AI.getAlign();
378     Align SourceAlign = getOrEnforceKnownAlignment(
379         Copy->getSource(), AllocaAlign, DL, &AI, &AC, &DT);
380     if (AllocaAlign <= SourceAlign &&
381         isDereferenceableForAllocaSize(Copy->getSource(), &AI, DL)) {
382       LLVM_DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n');
383       LLVM_DEBUG(dbgs() << "  memcpy = " << *Copy << '\n');
384       for (unsigned i = 0, e = ToDelete.size(); i != e; ++i)
385         eraseInstFromFunction(*ToDelete[i]);
386       Value *TheSrc = Copy->getSource();
387       auto *SrcTy = TheSrc->getType();
388       auto *DestTy = PointerType::get(AI.getType()->getPointerElementType(),
389                                       SrcTy->getPointerAddressSpace());
390       Value *Cast =
391         Builder.CreatePointerBitCastOrAddrSpaceCast(TheSrc, DestTy);
392       if (AI.getType()->getPointerAddressSpace() ==
393           SrcTy->getPointerAddressSpace()) {
394         Instruction *NewI = replaceInstUsesWith(AI, Cast);
395         eraseInstFromFunction(*Copy);
396         ++NumGlobalCopies;
397         return NewI;
398       }
399 
400       PointerReplacer PtrReplacer(*this);
401       PtrReplacer.replacePointer(AI, Cast);
402       ++NumGlobalCopies;
403     }
404   }
405 
406   // At last, use the generic allocation site handler to aggressively remove
407   // unused allocas.
408   return visitAllocSite(AI);
409 }
410 
411 // Are we allowed to form a atomic load or store of this type?
isSupportedAtomicType(Type * Ty)412 static bool isSupportedAtomicType(Type *Ty) {
413   return Ty->isIntOrPtrTy() || Ty->isFloatingPointTy();
414 }
415 
416 /// Helper to combine a load to a new type.
417 ///
418 /// This just does the work of combining a load to a new type. It handles
419 /// metadata, etc., and returns the new instruction. The \c NewTy should be the
420 /// loaded *value* type. This will convert it to a pointer, cast the operand to
421 /// that pointer type, load it, etc.
422 ///
423 /// Note that this will create all of the instructions with whatever insert
424 /// point the \c InstCombiner currently is using.
combineLoadToNewType(LoadInst & LI,Type * NewTy,const Twine & Suffix)425 LoadInst *InstCombiner::combineLoadToNewType(LoadInst &LI, Type *NewTy,
426                                              const Twine &Suffix) {
427   assert((!LI.isAtomic() || isSupportedAtomicType(NewTy)) &&
428          "can't fold an atomic load to requested type");
429 
430   Value *Ptr = LI.getPointerOperand();
431   unsigned AS = LI.getPointerAddressSpace();
432   Value *NewPtr = nullptr;
433   if (!(match(Ptr, m_BitCast(m_Value(NewPtr))) &&
434         NewPtr->getType()->getPointerElementType() == NewTy &&
435         NewPtr->getType()->getPointerAddressSpace() == AS))
436     NewPtr = Builder.CreateBitCast(Ptr, NewTy->getPointerTo(AS));
437 
438   LoadInst *NewLoad = Builder.CreateAlignedLoad(
439       NewTy, NewPtr, LI.getAlign(), LI.isVolatile(), LI.getName() + Suffix);
440   NewLoad->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
441   copyMetadataForLoad(*NewLoad, LI);
442   return NewLoad;
443 }
444 
445 /// Combine a store to a new type.
446 ///
447 /// Returns the newly created store instruction.
combineStoreToNewValue(InstCombiner & IC,StoreInst & SI,Value * V)448 static StoreInst *combineStoreToNewValue(InstCombiner &IC, StoreInst &SI, Value *V) {
449   assert((!SI.isAtomic() || isSupportedAtomicType(V->getType())) &&
450          "can't fold an atomic store of requested type");
451 
452   Value *Ptr = SI.getPointerOperand();
453   unsigned AS = SI.getPointerAddressSpace();
454   SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
455   SI.getAllMetadata(MD);
456 
457   StoreInst *NewStore = IC.Builder.CreateAlignedStore(
458       V, IC.Builder.CreateBitCast(Ptr, V->getType()->getPointerTo(AS)),
459       SI.getAlign(), SI.isVolatile());
460   NewStore->setAtomic(SI.getOrdering(), SI.getSyncScopeID());
461   for (const auto &MDPair : MD) {
462     unsigned ID = MDPair.first;
463     MDNode *N = MDPair.second;
464     // Note, essentially every kind of metadata should be preserved here! This
465     // routine is supposed to clone a store instruction changing *only its
466     // type*. The only metadata it makes sense to drop is metadata which is
467     // invalidated when the pointer type changes. This should essentially
468     // never be the case in LLVM, but we explicitly switch over only known
469     // metadata to be conservatively correct. If you are adding metadata to
470     // LLVM which pertains to stores, you almost certainly want to add it
471     // here.
472     switch (ID) {
473     case LLVMContext::MD_dbg:
474     case LLVMContext::MD_tbaa:
475     case LLVMContext::MD_prof:
476     case LLVMContext::MD_fpmath:
477     case LLVMContext::MD_tbaa_struct:
478     case LLVMContext::MD_alias_scope:
479     case LLVMContext::MD_noalias:
480     case LLVMContext::MD_nontemporal:
481     case LLVMContext::MD_mem_parallel_loop_access:
482     case LLVMContext::MD_access_group:
483       // All of these directly apply.
484       NewStore->setMetadata(ID, N);
485       break;
486     case LLVMContext::MD_invariant_load:
487     case LLVMContext::MD_nonnull:
488     case LLVMContext::MD_range:
489     case LLVMContext::MD_align:
490     case LLVMContext::MD_dereferenceable:
491     case LLVMContext::MD_dereferenceable_or_null:
492       // These don't apply for stores.
493       break;
494     }
495   }
496 
497   return NewStore;
498 }
499 
500 /// Returns true if instruction represent minmax pattern like:
501 ///   select ((cmp load V1, load V2), V1, V2).
isMinMaxWithLoads(Value * V,Type * & LoadTy)502 static bool isMinMaxWithLoads(Value *V, Type *&LoadTy) {
503   assert(V->getType()->isPointerTy() && "Expected pointer type.");
504   // Ignore possible ty* to ixx* bitcast.
505   V = peekThroughBitcast(V);
506   // Check that select is select ((cmp load V1, load V2), V1, V2) - minmax
507   // pattern.
508   CmpInst::Predicate Pred;
509   Instruction *L1;
510   Instruction *L2;
511   Value *LHS;
512   Value *RHS;
513   if (!match(V, m_Select(m_Cmp(Pred, m_Instruction(L1), m_Instruction(L2)),
514                          m_Value(LHS), m_Value(RHS))))
515     return false;
516   LoadTy = L1->getType();
517   return (match(L1, m_Load(m_Specific(LHS))) &&
518           match(L2, m_Load(m_Specific(RHS)))) ||
519          (match(L1, m_Load(m_Specific(RHS))) &&
520           match(L2, m_Load(m_Specific(LHS))));
521 }
522 
523 /// Combine loads to match the type of their uses' value after looking
524 /// through intervening bitcasts.
525 ///
526 /// The core idea here is that if the result of a load is used in an operation,
527 /// we should load the type most conducive to that operation. For example, when
528 /// loading an integer and converting that immediately to a pointer, we should
529 /// instead directly load a pointer.
530 ///
531 /// However, this routine must never change the width of a load or the number of
532 /// loads as that would introduce a semantic change. This combine is expected to
533 /// be a semantic no-op which just allows loads to more closely model the types
534 /// of their consuming operations.
535 ///
536 /// Currently, we also refuse to change the precise type used for an atomic load
537 /// or a volatile load. This is debatable, and might be reasonable to change
538 /// later. However, it is risky in case some backend or other part of LLVM is
539 /// relying on the exact type loaded to select appropriate atomic operations.
combineLoadToOperationType(InstCombiner & IC,LoadInst & LI)540 static Instruction *combineLoadToOperationType(InstCombiner &IC, LoadInst &LI) {
541   // FIXME: We could probably with some care handle both volatile and ordered
542   // atomic loads here but it isn't clear that this is important.
543   if (!LI.isUnordered())
544     return nullptr;
545 
546   if (LI.use_empty())
547     return nullptr;
548 
549   // swifterror values can't be bitcasted.
550   if (LI.getPointerOperand()->isSwiftError())
551     return nullptr;
552 
553   Type *Ty = LI.getType();
554   const DataLayout &DL = IC.getDataLayout();
555 
556   // Try to canonicalize loads which are only ever stored to operate over
557   // integers instead of any other type. We only do this when the loaded type
558   // is sized and has a size exactly the same as its store size and the store
559   // size is a legal integer type.
560   // Do not perform canonicalization if minmax pattern is found (to avoid
561   // infinite loop).
562   Type *Dummy;
563   if (!Ty->isIntegerTy() && Ty->isSized() && !isa<ScalableVectorType>(Ty) &&
564       DL.isLegalInteger(DL.getTypeStoreSizeInBits(Ty)) &&
565       DL.typeSizeEqualsStoreSize(Ty) && !DL.isNonIntegralPointerType(Ty) &&
566       !isMinMaxWithLoads(
567           peekThroughBitcast(LI.getPointerOperand(), /*OneUseOnly=*/true),
568           Dummy)) {
569     if (all_of(LI.users(), [&LI](User *U) {
570           auto *SI = dyn_cast<StoreInst>(U);
571           return SI && SI->getPointerOperand() != &LI &&
572                  !SI->getPointerOperand()->isSwiftError();
573         })) {
574       LoadInst *NewLoad = IC.combineLoadToNewType(
575           LI, Type::getIntNTy(LI.getContext(), DL.getTypeStoreSizeInBits(Ty)));
576       // Replace all the stores with stores of the newly loaded value.
577       for (auto UI = LI.user_begin(), UE = LI.user_end(); UI != UE;) {
578         auto *SI = cast<StoreInst>(*UI++);
579         IC.Builder.SetInsertPoint(SI);
580         combineStoreToNewValue(IC, *SI, NewLoad);
581         IC.eraseInstFromFunction(*SI);
582       }
583       assert(LI.use_empty() && "Failed to remove all users of the load!");
584       // Return the old load so the combiner can delete it safely.
585       return &LI;
586     }
587   }
588 
589   // Fold away bit casts of the loaded value by loading the desired type.
590   // We can do this for BitCastInsts as well as casts from and to pointer types,
591   // as long as those are noops (i.e., the source or dest type have the same
592   // bitwidth as the target's pointers).
593   if (LI.hasOneUse())
594     if (auto* CI = dyn_cast<CastInst>(LI.user_back()))
595       if (CI->isNoopCast(DL))
596         if (!LI.isAtomic() || isSupportedAtomicType(CI->getDestTy())) {
597           LoadInst *NewLoad = IC.combineLoadToNewType(LI, CI->getDestTy());
598           CI->replaceAllUsesWith(NewLoad);
599           IC.eraseInstFromFunction(*CI);
600           return &LI;
601         }
602 
603   // FIXME: We should also canonicalize loads of vectors when their elements are
604   // cast to other types.
605   return nullptr;
606 }
607 
unpackLoadToAggregate(InstCombiner & IC,LoadInst & LI)608 static Instruction *unpackLoadToAggregate(InstCombiner &IC, LoadInst &LI) {
609   // FIXME: We could probably with some care handle both volatile and atomic
610   // stores here but it isn't clear that this is important.
611   if (!LI.isSimple())
612     return nullptr;
613 
614   Type *T = LI.getType();
615   if (!T->isAggregateType())
616     return nullptr;
617 
618   StringRef Name = LI.getName();
619   assert(LI.getAlignment() && "Alignment must be set at this point");
620 
621   if (auto *ST = dyn_cast<StructType>(T)) {
622     // If the struct only have one element, we unpack.
623     auto NumElements = ST->getNumElements();
624     if (NumElements == 1) {
625       LoadInst *NewLoad = IC.combineLoadToNewType(LI, ST->getTypeAtIndex(0U),
626                                                   ".unpack");
627       AAMDNodes AAMD;
628       LI.getAAMetadata(AAMD);
629       NewLoad->setAAMetadata(AAMD);
630       return IC.replaceInstUsesWith(LI, IC.Builder.CreateInsertValue(
631         UndefValue::get(T), NewLoad, 0, Name));
632     }
633 
634     // We don't want to break loads with padding here as we'd loose
635     // the knowledge that padding exists for the rest of the pipeline.
636     const DataLayout &DL = IC.getDataLayout();
637     auto *SL = DL.getStructLayout(ST);
638     if (SL->hasPadding())
639       return nullptr;
640 
641     const auto Align = LI.getAlign();
642     auto *Addr = LI.getPointerOperand();
643     auto *IdxType = Type::getInt32Ty(T->getContext());
644     auto *Zero = ConstantInt::get(IdxType, 0);
645 
646     Value *V = UndefValue::get(T);
647     for (unsigned i = 0; i < NumElements; i++) {
648       Value *Indices[2] = {
649         Zero,
650         ConstantInt::get(IdxType, i),
651       };
652       auto *Ptr = IC.Builder.CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices),
653                                                Name + ".elt");
654       auto *L = IC.Builder.CreateAlignedLoad(
655           ST->getElementType(i), Ptr,
656           commonAlignment(Align, SL->getElementOffset(i)), Name + ".unpack");
657       // Propagate AA metadata. It'll still be valid on the narrowed load.
658       AAMDNodes AAMD;
659       LI.getAAMetadata(AAMD);
660       L->setAAMetadata(AAMD);
661       V = IC.Builder.CreateInsertValue(V, L, i);
662     }
663 
664     V->setName(Name);
665     return IC.replaceInstUsesWith(LI, V);
666   }
667 
668   if (auto *AT = dyn_cast<ArrayType>(T)) {
669     auto *ET = AT->getElementType();
670     auto NumElements = AT->getNumElements();
671     if (NumElements == 1) {
672       LoadInst *NewLoad = IC.combineLoadToNewType(LI, ET, ".unpack");
673       AAMDNodes AAMD;
674       LI.getAAMetadata(AAMD);
675       NewLoad->setAAMetadata(AAMD);
676       return IC.replaceInstUsesWith(LI, IC.Builder.CreateInsertValue(
677         UndefValue::get(T), NewLoad, 0, Name));
678     }
679 
680     // Bail out if the array is too large. Ideally we would like to optimize
681     // arrays of arbitrary size but this has a terrible impact on compile time.
682     // The threshold here is chosen arbitrarily, maybe needs a little bit of
683     // tuning.
684     if (NumElements > IC.MaxArraySizeForCombine)
685       return nullptr;
686 
687     const DataLayout &DL = IC.getDataLayout();
688     auto EltSize = DL.getTypeAllocSize(ET);
689     const auto Align = LI.getAlign();
690 
691     auto *Addr = LI.getPointerOperand();
692     auto *IdxType = Type::getInt64Ty(T->getContext());
693     auto *Zero = ConstantInt::get(IdxType, 0);
694 
695     Value *V = UndefValue::get(T);
696     uint64_t Offset = 0;
697     for (uint64_t i = 0; i < NumElements; i++) {
698       Value *Indices[2] = {
699         Zero,
700         ConstantInt::get(IdxType, i),
701       };
702       auto *Ptr = IC.Builder.CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices),
703                                                Name + ".elt");
704       auto *L = IC.Builder.CreateAlignedLoad(AT->getElementType(), Ptr,
705                                              commonAlignment(Align, Offset),
706                                              Name + ".unpack");
707       AAMDNodes AAMD;
708       LI.getAAMetadata(AAMD);
709       L->setAAMetadata(AAMD);
710       V = IC.Builder.CreateInsertValue(V, L, i);
711       Offset += EltSize;
712     }
713 
714     V->setName(Name);
715     return IC.replaceInstUsesWith(LI, V);
716   }
717 
718   return nullptr;
719 }
720 
721 // If we can determine that all possible objects pointed to by the provided
722 // pointer value are, not only dereferenceable, but also definitively less than
723 // or equal to the provided maximum size, then return true. Otherwise, return
724 // false (constant global values and allocas fall into this category).
725 //
726 // FIXME: This should probably live in ValueTracking (or similar).
isObjectSizeLessThanOrEq(Value * V,uint64_t MaxSize,const DataLayout & DL)727 static bool isObjectSizeLessThanOrEq(Value *V, uint64_t MaxSize,
728                                      const DataLayout &DL) {
729   SmallPtrSet<Value *, 4> Visited;
730   SmallVector<Value *, 4> Worklist(1, V);
731 
732   do {
733     Value *P = Worklist.pop_back_val();
734     P = P->stripPointerCasts();
735 
736     if (!Visited.insert(P).second)
737       continue;
738 
739     if (SelectInst *SI = dyn_cast<SelectInst>(P)) {
740       Worklist.push_back(SI->getTrueValue());
741       Worklist.push_back(SI->getFalseValue());
742       continue;
743     }
744 
745     if (PHINode *PN = dyn_cast<PHINode>(P)) {
746       for (Value *IncValue : PN->incoming_values())
747         Worklist.push_back(IncValue);
748       continue;
749     }
750 
751     if (GlobalAlias *GA = dyn_cast<GlobalAlias>(P)) {
752       if (GA->isInterposable())
753         return false;
754       Worklist.push_back(GA->getAliasee());
755       continue;
756     }
757 
758     // If we know how big this object is, and it is less than MaxSize, continue
759     // searching. Otherwise, return false.
760     if (AllocaInst *AI = dyn_cast<AllocaInst>(P)) {
761       if (!AI->getAllocatedType()->isSized())
762         return false;
763 
764       ConstantInt *CS = dyn_cast<ConstantInt>(AI->getArraySize());
765       if (!CS)
766         return false;
767 
768       uint64_t TypeSize = DL.getTypeAllocSize(AI->getAllocatedType());
769       // Make sure that, even if the multiplication below would wrap as an
770       // uint64_t, we still do the right thing.
771       if ((CS->getValue().zextOrSelf(128)*APInt(128, TypeSize)).ugt(MaxSize))
772         return false;
773       continue;
774     }
775 
776     if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) {
777       if (!GV->hasDefinitiveInitializer() || !GV->isConstant())
778         return false;
779 
780       uint64_t InitSize = DL.getTypeAllocSize(GV->getValueType());
781       if (InitSize > MaxSize)
782         return false;
783       continue;
784     }
785 
786     return false;
787   } while (!Worklist.empty());
788 
789   return true;
790 }
791 
792 // If we're indexing into an object of a known size, and the outer index is
793 // not a constant, but having any value but zero would lead to undefined
794 // behavior, replace it with zero.
795 //
796 // For example, if we have:
797 // @f.a = private unnamed_addr constant [1 x i32] [i32 12], align 4
798 // ...
799 // %arrayidx = getelementptr inbounds [1 x i32]* @f.a, i64 0, i64 %x
800 // ... = load i32* %arrayidx, align 4
801 // Then we know that we can replace %x in the GEP with i64 0.
802 //
803 // FIXME: We could fold any GEP index to zero that would cause UB if it were
804 // not zero. Currently, we only handle the first such index. Also, we could
805 // also search through non-zero constant indices if we kept track of the
806 // offsets those indices implied.
canReplaceGEPIdxWithZero(InstCombiner & IC,GetElementPtrInst * GEPI,Instruction * MemI,unsigned & Idx)807 static bool canReplaceGEPIdxWithZero(InstCombiner &IC, GetElementPtrInst *GEPI,
808                                      Instruction *MemI, unsigned &Idx) {
809   if (GEPI->getNumOperands() < 2)
810     return false;
811 
812   // Find the first non-zero index of a GEP. If all indices are zero, return
813   // one past the last index.
814   auto FirstNZIdx = [](const GetElementPtrInst *GEPI) {
815     unsigned I = 1;
816     for (unsigned IE = GEPI->getNumOperands(); I != IE; ++I) {
817       Value *V = GEPI->getOperand(I);
818       if (const ConstantInt *CI = dyn_cast<ConstantInt>(V))
819         if (CI->isZero())
820           continue;
821 
822       break;
823     }
824 
825     return I;
826   };
827 
828   // Skip through initial 'zero' indices, and find the corresponding pointer
829   // type. See if the next index is not a constant.
830   Idx = FirstNZIdx(GEPI);
831   if (Idx == GEPI->getNumOperands())
832     return false;
833   if (isa<Constant>(GEPI->getOperand(Idx)))
834     return false;
835 
836   SmallVector<Value *, 4> Ops(GEPI->idx_begin(), GEPI->idx_begin() + Idx);
837   Type *AllocTy =
838     GetElementPtrInst::getIndexedType(GEPI->getSourceElementType(), Ops);
839   if (!AllocTy || !AllocTy->isSized())
840     return false;
841   const DataLayout &DL = IC.getDataLayout();
842   uint64_t TyAllocSize = DL.getTypeAllocSize(AllocTy);
843 
844   // If there are more indices after the one we might replace with a zero, make
845   // sure they're all non-negative. If any of them are negative, the overall
846   // address being computed might be before the base address determined by the
847   // first non-zero index.
848   auto IsAllNonNegative = [&]() {
849     for (unsigned i = Idx+1, e = GEPI->getNumOperands(); i != e; ++i) {
850       KnownBits Known = IC.computeKnownBits(GEPI->getOperand(i), 0, MemI);
851       if (Known.isNonNegative())
852         continue;
853       return false;
854     }
855 
856     return true;
857   };
858 
859   // FIXME: If the GEP is not inbounds, and there are extra indices after the
860   // one we'll replace, those could cause the address computation to wrap
861   // (rendering the IsAllNonNegative() check below insufficient). We can do
862   // better, ignoring zero indices (and other indices we can prove small
863   // enough not to wrap).
864   if (Idx+1 != GEPI->getNumOperands() && !GEPI->isInBounds())
865     return false;
866 
867   // Note that isObjectSizeLessThanOrEq will return true only if the pointer is
868   // also known to be dereferenceable.
869   return isObjectSizeLessThanOrEq(GEPI->getOperand(0), TyAllocSize, DL) &&
870          IsAllNonNegative();
871 }
872 
873 // If we're indexing into an object with a variable index for the memory
874 // access, but the object has only one element, we can assume that the index
875 // will always be zero. If we replace the GEP, return it.
876 template <typename T>
replaceGEPIdxWithZero(InstCombiner & IC,Value * Ptr,T & MemI)877 static Instruction *replaceGEPIdxWithZero(InstCombiner &IC, Value *Ptr,
878                                           T &MemI) {
879   if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Ptr)) {
880     unsigned Idx;
881     if (canReplaceGEPIdxWithZero(IC, GEPI, &MemI, Idx)) {
882       Instruction *NewGEPI = GEPI->clone();
883       NewGEPI->setOperand(Idx,
884         ConstantInt::get(GEPI->getOperand(Idx)->getType(), 0));
885       NewGEPI->insertBefore(GEPI);
886       MemI.setOperand(MemI.getPointerOperandIndex(), NewGEPI);
887       return NewGEPI;
888     }
889   }
890 
891   return nullptr;
892 }
893 
canSimplifyNullStoreOrGEP(StoreInst & SI)894 static bool canSimplifyNullStoreOrGEP(StoreInst &SI) {
895   if (NullPointerIsDefined(SI.getFunction(), SI.getPointerAddressSpace()))
896     return false;
897 
898   auto *Ptr = SI.getPointerOperand();
899   if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Ptr))
900     Ptr = GEPI->getOperand(0);
901   return (isa<ConstantPointerNull>(Ptr) &&
902           !NullPointerIsDefined(SI.getFunction(), SI.getPointerAddressSpace()));
903 }
904 
canSimplifyNullLoadOrGEP(LoadInst & LI,Value * Op)905 static bool canSimplifyNullLoadOrGEP(LoadInst &LI, Value *Op) {
906   if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) {
907     const Value *GEPI0 = GEPI->getOperand(0);
908     if (isa<ConstantPointerNull>(GEPI0) &&
909         !NullPointerIsDefined(LI.getFunction(), GEPI->getPointerAddressSpace()))
910       return true;
911   }
912   if (isa<UndefValue>(Op) ||
913       (isa<ConstantPointerNull>(Op) &&
914        !NullPointerIsDefined(LI.getFunction(), LI.getPointerAddressSpace())))
915     return true;
916   return false;
917 }
918 
visitLoadInst(LoadInst & LI)919 Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
920   Value *Op = LI.getOperand(0);
921 
922   // Try to canonicalize the loaded type.
923   if (Instruction *Res = combineLoadToOperationType(*this, LI))
924     return Res;
925 
926   // Attempt to improve the alignment.
927   Align KnownAlign = getOrEnforceKnownAlignment(
928       Op, DL.getPrefTypeAlign(LI.getType()), DL, &LI, &AC, &DT);
929   if (KnownAlign > LI.getAlign())
930     LI.setAlignment(KnownAlign);
931 
932   // Replace GEP indices if possible.
933   if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Op, LI)) {
934       Worklist.push(NewGEPI);
935       return &LI;
936   }
937 
938   if (Instruction *Res = unpackLoadToAggregate(*this, LI))
939     return Res;
940 
941   // Do really simple store-to-load forwarding and load CSE, to catch cases
942   // where there are several consecutive memory accesses to the same location,
943   // separated by a few arithmetic operations.
944   BasicBlock::iterator BBI(LI);
945   bool IsLoadCSE = false;
946   if (Value *AvailableVal = FindAvailableLoadedValue(
947           &LI, LI.getParent(), BBI, DefMaxInstsToScan, AA, &IsLoadCSE)) {
948     if (IsLoadCSE)
949       combineMetadataForCSE(cast<LoadInst>(AvailableVal), &LI, false);
950 
951     return replaceInstUsesWith(
952         LI, Builder.CreateBitOrPointerCast(AvailableVal, LI.getType(),
953                                            LI.getName() + ".cast"));
954   }
955 
956   // None of the following transforms are legal for volatile/ordered atomic
957   // loads.  Most of them do apply for unordered atomics.
958   if (!LI.isUnordered()) return nullptr;
959 
960   // load(gep null, ...) -> unreachable
961   // load null/undef -> unreachable
962   // TODO: Consider a target hook for valid address spaces for this xforms.
963   if (canSimplifyNullLoadOrGEP(LI, Op)) {
964     // Insert a new store to null instruction before the load to indicate
965     // that this code is not reachable.  We do this instead of inserting
966     // an unreachable instruction directly because we cannot modify the
967     // CFG.
968     StoreInst *SI = new StoreInst(UndefValue::get(LI.getType()),
969                                   Constant::getNullValue(Op->getType()), &LI);
970     SI->setDebugLoc(LI.getDebugLoc());
971     return replaceInstUsesWith(LI, UndefValue::get(LI.getType()));
972   }
973 
974   if (Op->hasOneUse()) {
975     // Change select and PHI nodes to select values instead of addresses: this
976     // helps alias analysis out a lot, allows many others simplifications, and
977     // exposes redundancy in the code.
978     //
979     // Note that we cannot do the transformation unless we know that the
980     // introduced loads cannot trap!  Something like this is valid as long as
981     // the condition is always false: load (select bool %C, int* null, int* %G),
982     // but it would not be valid if we transformed it to load from null
983     // unconditionally.
984     //
985     if (SelectInst *SI = dyn_cast<SelectInst>(Op)) {
986       // load (select (Cond, &V1, &V2))  --> select(Cond, load &V1, load &V2).
987       Align Alignment = LI.getAlign();
988       if (isSafeToLoadUnconditionally(SI->getOperand(1), LI.getType(),
989                                       Alignment, DL, SI) &&
990           isSafeToLoadUnconditionally(SI->getOperand(2), LI.getType(),
991                                       Alignment, DL, SI)) {
992         LoadInst *V1 =
993             Builder.CreateLoad(LI.getType(), SI->getOperand(1),
994                                SI->getOperand(1)->getName() + ".val");
995         LoadInst *V2 =
996             Builder.CreateLoad(LI.getType(), SI->getOperand(2),
997                                SI->getOperand(2)->getName() + ".val");
998         assert(LI.isUnordered() && "implied by above");
999         V1->setAlignment(Alignment);
1000         V1->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
1001         V2->setAlignment(Alignment);
1002         V2->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
1003         return SelectInst::Create(SI->getCondition(), V1, V2);
1004       }
1005 
1006       // load (select (cond, null, P)) -> load P
1007       if (isa<ConstantPointerNull>(SI->getOperand(1)) &&
1008           !NullPointerIsDefined(SI->getFunction(),
1009                                 LI.getPointerAddressSpace()))
1010         return replaceOperand(LI, 0, SI->getOperand(2));
1011 
1012       // load (select (cond, P, null)) -> load P
1013       if (isa<ConstantPointerNull>(SI->getOperand(2)) &&
1014           !NullPointerIsDefined(SI->getFunction(),
1015                                 LI.getPointerAddressSpace()))
1016         return replaceOperand(LI, 0, SI->getOperand(1));
1017     }
1018   }
1019   return nullptr;
1020 }
1021 
1022 /// Look for extractelement/insertvalue sequence that acts like a bitcast.
1023 ///
1024 /// \returns underlying value that was "cast", or nullptr otherwise.
1025 ///
1026 /// For example, if we have:
1027 ///
1028 ///     %E0 = extractelement <2 x double> %U, i32 0
1029 ///     %V0 = insertvalue [2 x double] undef, double %E0, 0
1030 ///     %E1 = extractelement <2 x double> %U, i32 1
1031 ///     %V1 = insertvalue [2 x double] %V0, double %E1, 1
1032 ///
1033 /// and the layout of a <2 x double> is isomorphic to a [2 x double],
1034 /// then %V1 can be safely approximated by a conceptual "bitcast" of %U.
1035 /// Note that %U may contain non-undef values where %V1 has undef.
likeBitCastFromVector(InstCombiner & IC,Value * V)1036 static Value *likeBitCastFromVector(InstCombiner &IC, Value *V) {
1037   Value *U = nullptr;
1038   while (auto *IV = dyn_cast<InsertValueInst>(V)) {
1039     auto *E = dyn_cast<ExtractElementInst>(IV->getInsertedValueOperand());
1040     if (!E)
1041       return nullptr;
1042     auto *W = E->getVectorOperand();
1043     if (!U)
1044       U = W;
1045     else if (U != W)
1046       return nullptr;
1047     auto *CI = dyn_cast<ConstantInt>(E->getIndexOperand());
1048     if (!CI || IV->getNumIndices() != 1 || CI->getZExtValue() != *IV->idx_begin())
1049       return nullptr;
1050     V = IV->getAggregateOperand();
1051   }
1052   if (!isa<UndefValue>(V) ||!U)
1053     return nullptr;
1054 
1055   auto *UT = cast<VectorType>(U->getType());
1056   auto *VT = V->getType();
1057   // Check that types UT and VT are bitwise isomorphic.
1058   const auto &DL = IC.getDataLayout();
1059   if (DL.getTypeStoreSizeInBits(UT) != DL.getTypeStoreSizeInBits(VT)) {
1060     return nullptr;
1061   }
1062   if (auto *AT = dyn_cast<ArrayType>(VT)) {
1063     if (AT->getNumElements() != UT->getNumElements())
1064       return nullptr;
1065   } else {
1066     auto *ST = cast<StructType>(VT);
1067     if (ST->getNumElements() != UT->getNumElements())
1068       return nullptr;
1069     for (const auto *EltT : ST->elements()) {
1070       if (EltT != UT->getElementType())
1071         return nullptr;
1072     }
1073   }
1074   return U;
1075 }
1076 
1077 /// Combine stores to match the type of value being stored.
1078 ///
1079 /// The core idea here is that the memory does not have any intrinsic type and
1080 /// where we can we should match the type of a store to the type of value being
1081 /// stored.
1082 ///
1083 /// However, this routine must never change the width of a store or the number of
1084 /// stores as that would introduce a semantic change. This combine is expected to
1085 /// be a semantic no-op which just allows stores to more closely model the types
1086 /// of their incoming values.
1087 ///
1088 /// Currently, we also refuse to change the precise type used for an atomic or
1089 /// volatile store. This is debatable, and might be reasonable to change later.
1090 /// However, it is risky in case some backend or other part of LLVM is relying
1091 /// on the exact type stored to select appropriate atomic operations.
1092 ///
1093 /// \returns true if the store was successfully combined away. This indicates
1094 /// the caller must erase the store instruction. We have to let the caller erase
1095 /// the store instruction as otherwise there is no way to signal whether it was
1096 /// combined or not: IC.EraseInstFromFunction returns a null pointer.
combineStoreToValueType(InstCombiner & IC,StoreInst & SI)1097 static bool combineStoreToValueType(InstCombiner &IC, StoreInst &SI) {
1098   // FIXME: We could probably with some care handle both volatile and ordered
1099   // atomic stores here but it isn't clear that this is important.
1100   if (!SI.isUnordered())
1101     return false;
1102 
1103   // swifterror values can't be bitcasted.
1104   if (SI.getPointerOperand()->isSwiftError())
1105     return false;
1106 
1107   Value *V = SI.getValueOperand();
1108 
1109   // Fold away bit casts of the stored value by storing the original type.
1110   if (auto *BC = dyn_cast<BitCastInst>(V)) {
1111     V = BC->getOperand(0);
1112     if (!SI.isAtomic() || isSupportedAtomicType(V->getType())) {
1113       combineStoreToNewValue(IC, SI, V);
1114       return true;
1115     }
1116   }
1117 
1118   if (Value *U = likeBitCastFromVector(IC, V))
1119     if (!SI.isAtomic() || isSupportedAtomicType(U->getType())) {
1120       combineStoreToNewValue(IC, SI, U);
1121       return true;
1122     }
1123 
1124   // FIXME: We should also canonicalize stores of vectors when their elements
1125   // are cast to other types.
1126   return false;
1127 }
1128 
unpackStoreToAggregate(InstCombiner & IC,StoreInst & SI)1129 static bool unpackStoreToAggregate(InstCombiner &IC, StoreInst &SI) {
1130   // FIXME: We could probably with some care handle both volatile and atomic
1131   // stores here but it isn't clear that this is important.
1132   if (!SI.isSimple())
1133     return false;
1134 
1135   Value *V = SI.getValueOperand();
1136   Type *T = V->getType();
1137 
1138   if (!T->isAggregateType())
1139     return false;
1140 
1141   if (auto *ST = dyn_cast<StructType>(T)) {
1142     // If the struct only have one element, we unpack.
1143     unsigned Count = ST->getNumElements();
1144     if (Count == 1) {
1145       V = IC.Builder.CreateExtractValue(V, 0);
1146       combineStoreToNewValue(IC, SI, V);
1147       return true;
1148     }
1149 
1150     // We don't want to break loads with padding here as we'd loose
1151     // the knowledge that padding exists for the rest of the pipeline.
1152     const DataLayout &DL = IC.getDataLayout();
1153     auto *SL = DL.getStructLayout(ST);
1154     if (SL->hasPadding())
1155       return false;
1156 
1157     const auto Align = SI.getAlign();
1158 
1159     SmallString<16> EltName = V->getName();
1160     EltName += ".elt";
1161     auto *Addr = SI.getPointerOperand();
1162     SmallString<16> AddrName = Addr->getName();
1163     AddrName += ".repack";
1164 
1165     auto *IdxType = Type::getInt32Ty(ST->getContext());
1166     auto *Zero = ConstantInt::get(IdxType, 0);
1167     for (unsigned i = 0; i < Count; i++) {
1168       Value *Indices[2] = {
1169         Zero,
1170         ConstantInt::get(IdxType, i),
1171       };
1172       auto *Ptr = IC.Builder.CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices),
1173                                                AddrName);
1174       auto *Val = IC.Builder.CreateExtractValue(V, i, EltName);
1175       auto EltAlign = commonAlignment(Align, SL->getElementOffset(i));
1176       llvm::Instruction *NS = IC.Builder.CreateAlignedStore(Val, Ptr, EltAlign);
1177       AAMDNodes AAMD;
1178       SI.getAAMetadata(AAMD);
1179       NS->setAAMetadata(AAMD);
1180     }
1181 
1182     return true;
1183   }
1184 
1185   if (auto *AT = dyn_cast<ArrayType>(T)) {
1186     // If the array only have one element, we unpack.
1187     auto NumElements = AT->getNumElements();
1188     if (NumElements == 1) {
1189       V = IC.Builder.CreateExtractValue(V, 0);
1190       combineStoreToNewValue(IC, SI, V);
1191       return true;
1192     }
1193 
1194     // Bail out if the array is too large. Ideally we would like to optimize
1195     // arrays of arbitrary size but this has a terrible impact on compile time.
1196     // The threshold here is chosen arbitrarily, maybe needs a little bit of
1197     // tuning.
1198     if (NumElements > IC.MaxArraySizeForCombine)
1199       return false;
1200 
1201     const DataLayout &DL = IC.getDataLayout();
1202     auto EltSize = DL.getTypeAllocSize(AT->getElementType());
1203     const auto Align = SI.getAlign();
1204 
1205     SmallString<16> EltName = V->getName();
1206     EltName += ".elt";
1207     auto *Addr = SI.getPointerOperand();
1208     SmallString<16> AddrName = Addr->getName();
1209     AddrName += ".repack";
1210 
1211     auto *IdxType = Type::getInt64Ty(T->getContext());
1212     auto *Zero = ConstantInt::get(IdxType, 0);
1213 
1214     uint64_t Offset = 0;
1215     for (uint64_t i = 0; i < NumElements; i++) {
1216       Value *Indices[2] = {
1217         Zero,
1218         ConstantInt::get(IdxType, i),
1219       };
1220       auto *Ptr = IC.Builder.CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices),
1221                                                AddrName);
1222       auto *Val = IC.Builder.CreateExtractValue(V, i, EltName);
1223       auto EltAlign = commonAlignment(Align, Offset);
1224       Instruction *NS = IC.Builder.CreateAlignedStore(Val, Ptr, EltAlign);
1225       AAMDNodes AAMD;
1226       SI.getAAMetadata(AAMD);
1227       NS->setAAMetadata(AAMD);
1228       Offset += EltSize;
1229     }
1230 
1231     return true;
1232   }
1233 
1234   return false;
1235 }
1236 
1237 /// equivalentAddressValues - Test if A and B will obviously have the same
1238 /// value. This includes recognizing that %t0 and %t1 will have the same
1239 /// value in code like this:
1240 ///   %t0 = getelementptr \@a, 0, 3
1241 ///   store i32 0, i32* %t0
1242 ///   %t1 = getelementptr \@a, 0, 3
1243 ///   %t2 = load i32* %t1
1244 ///
equivalentAddressValues(Value * A,Value * B)1245 static bool equivalentAddressValues(Value *A, Value *B) {
1246   // Test if the values are trivially equivalent.
1247   if (A == B) return true;
1248 
1249   // Test if the values come form identical arithmetic instructions.
1250   // This uses isIdenticalToWhenDefined instead of isIdenticalTo because
1251   // its only used to compare two uses within the same basic block, which
1252   // means that they'll always either have the same value or one of them
1253   // will have an undefined value.
1254   if (isa<BinaryOperator>(A) ||
1255       isa<CastInst>(A) ||
1256       isa<PHINode>(A) ||
1257       isa<GetElementPtrInst>(A))
1258     if (Instruction *BI = dyn_cast<Instruction>(B))
1259       if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
1260         return true;
1261 
1262   // Otherwise they may not be equivalent.
1263   return false;
1264 }
1265 
1266 /// Converts store (bitcast (load (bitcast (select ...)))) to
1267 /// store (load (select ...)), where select is minmax:
1268 /// select ((cmp load V1, load V2), V1, V2).
removeBitcastsFromLoadStoreOnMinMax(InstCombiner & IC,StoreInst & SI)1269 static bool removeBitcastsFromLoadStoreOnMinMax(InstCombiner &IC,
1270                                                 StoreInst &SI) {
1271   // bitcast?
1272   if (!match(SI.getPointerOperand(), m_BitCast(m_Value())))
1273     return false;
1274   // load? integer?
1275   Value *LoadAddr;
1276   if (!match(SI.getValueOperand(), m_Load(m_BitCast(m_Value(LoadAddr)))))
1277     return false;
1278   auto *LI = cast<LoadInst>(SI.getValueOperand());
1279   if (!LI->getType()->isIntegerTy())
1280     return false;
1281   Type *CmpLoadTy;
1282   if (!isMinMaxWithLoads(LoadAddr, CmpLoadTy))
1283     return false;
1284 
1285   // Make sure the type would actually change.
1286   // This condition can be hit with chains of bitcasts.
1287   if (LI->getType() == CmpLoadTy)
1288     return false;
1289 
1290   // Make sure we're not changing the size of the load/store.
1291   const auto &DL = IC.getDataLayout();
1292   if (DL.getTypeStoreSizeInBits(LI->getType()) !=
1293       DL.getTypeStoreSizeInBits(CmpLoadTy))
1294     return false;
1295 
1296   if (!all_of(LI->users(), [LI, LoadAddr](User *U) {
1297         auto *SI = dyn_cast<StoreInst>(U);
1298         return SI && SI->getPointerOperand() != LI &&
1299                peekThroughBitcast(SI->getPointerOperand()) != LoadAddr &&
1300                !SI->getPointerOperand()->isSwiftError();
1301       }))
1302     return false;
1303 
1304   IC.Builder.SetInsertPoint(LI);
1305   LoadInst *NewLI = IC.combineLoadToNewType(*LI, CmpLoadTy);
1306   // Replace all the stores with stores of the newly loaded value.
1307   for (auto *UI : LI->users()) {
1308     auto *USI = cast<StoreInst>(UI);
1309     IC.Builder.SetInsertPoint(USI);
1310     combineStoreToNewValue(IC, *USI, NewLI);
1311   }
1312   IC.replaceInstUsesWith(*LI, UndefValue::get(LI->getType()));
1313   IC.eraseInstFromFunction(*LI);
1314   return true;
1315 }
1316 
visitStoreInst(StoreInst & SI)1317 Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
1318   Value *Val = SI.getOperand(0);
1319   Value *Ptr = SI.getOperand(1);
1320 
1321   // Try to canonicalize the stored type.
1322   if (combineStoreToValueType(*this, SI))
1323     return eraseInstFromFunction(SI);
1324 
1325   // Attempt to improve the alignment.
1326   const Align KnownAlign = getOrEnforceKnownAlignment(
1327       Ptr, DL.getPrefTypeAlign(Val->getType()), DL, &SI, &AC, &DT);
1328   if (KnownAlign > SI.getAlign())
1329     SI.setAlignment(KnownAlign);
1330 
1331   // Try to canonicalize the stored type.
1332   if (unpackStoreToAggregate(*this, SI))
1333     return eraseInstFromFunction(SI);
1334 
1335   if (removeBitcastsFromLoadStoreOnMinMax(*this, SI))
1336     return eraseInstFromFunction(SI);
1337 
1338   // Replace GEP indices if possible.
1339   if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Ptr, SI)) {
1340       Worklist.push(NewGEPI);
1341       return &SI;
1342   }
1343 
1344   // Don't hack volatile/ordered stores.
1345   // FIXME: Some bits are legal for ordered atomic stores; needs refactoring.
1346   if (!SI.isUnordered()) return nullptr;
1347 
1348   // If the RHS is an alloca with a single use, zapify the store, making the
1349   // alloca dead.
1350   if (Ptr->hasOneUse()) {
1351     if (isa<AllocaInst>(Ptr))
1352       return eraseInstFromFunction(SI);
1353     if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
1354       if (isa<AllocaInst>(GEP->getOperand(0))) {
1355         if (GEP->getOperand(0)->hasOneUse())
1356           return eraseInstFromFunction(SI);
1357       }
1358     }
1359   }
1360 
1361   // If we have a store to a location which is known constant, we can conclude
1362   // that the store must be storing the constant value (else the memory
1363   // wouldn't be constant), and this must be a noop.
1364   if (AA->pointsToConstantMemory(Ptr))
1365     return eraseInstFromFunction(SI);
1366 
1367   // Do really simple DSE, to catch cases where there are several consecutive
1368   // stores to the same location, separated by a few arithmetic operations. This
1369   // situation often occurs with bitfield accesses.
1370   BasicBlock::iterator BBI(SI);
1371   for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts;
1372        --ScanInsts) {
1373     --BBI;
1374     // Don't count debug info directives, lest they affect codegen,
1375     // and we skip pointer-to-pointer bitcasts, which are NOPs.
1376     if (isa<DbgInfoIntrinsic>(BBI) ||
1377         (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
1378       ScanInsts++;
1379       continue;
1380     }
1381 
1382     if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
1383       // Prev store isn't volatile, and stores to the same location?
1384       if (PrevSI->isUnordered() && equivalentAddressValues(PrevSI->getOperand(1),
1385                                                         SI.getOperand(1))) {
1386         ++NumDeadStore;
1387         // Manually add back the original store to the worklist now, so it will
1388         // be processed after the operands of the removed store, as this may
1389         // expose additional DSE opportunities.
1390         Worklist.push(&SI);
1391         eraseInstFromFunction(*PrevSI);
1392         return nullptr;
1393       }
1394       break;
1395     }
1396 
1397     // If this is a load, we have to stop.  However, if the loaded value is from
1398     // the pointer we're loading and is producing the pointer we're storing,
1399     // then *this* store is dead (X = load P; store X -> P).
1400     if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
1401       if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr)) {
1402         assert(SI.isUnordered() && "can't eliminate ordering operation");
1403         return eraseInstFromFunction(SI);
1404       }
1405 
1406       // Otherwise, this is a load from some other location.  Stores before it
1407       // may not be dead.
1408       break;
1409     }
1410 
1411     // Don't skip over loads, throws or things that can modify memory.
1412     if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory() || BBI->mayThrow())
1413       break;
1414   }
1415 
1416   // store X, null    -> turns into 'unreachable' in SimplifyCFG
1417   // store X, GEP(null, Y) -> turns into 'unreachable' in SimplifyCFG
1418   if (canSimplifyNullStoreOrGEP(SI)) {
1419     if (!isa<UndefValue>(Val))
1420       return replaceOperand(SI, 0, UndefValue::get(Val->getType()));
1421     return nullptr;  // Do not modify these!
1422   }
1423 
1424   // store undef, Ptr -> noop
1425   if (isa<UndefValue>(Val))
1426     return eraseInstFromFunction(SI);
1427 
1428   return nullptr;
1429 }
1430 
1431 /// Try to transform:
1432 ///   if () { *P = v1; } else { *P = v2 }
1433 /// or:
1434 ///   *P = v1; if () { *P = v2; }
1435 /// into a phi node with a store in the successor.
mergeStoreIntoSuccessor(StoreInst & SI)1436 bool InstCombiner::mergeStoreIntoSuccessor(StoreInst &SI) {
1437   if (!SI.isUnordered())
1438     return false; // This code has not been audited for volatile/ordered case.
1439 
1440   // Check if the successor block has exactly 2 incoming edges.
1441   BasicBlock *StoreBB = SI.getParent();
1442   BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0);
1443   if (!DestBB->hasNPredecessors(2))
1444     return false;
1445 
1446   // Capture the other block (the block that doesn't contain our store).
1447   pred_iterator PredIter = pred_begin(DestBB);
1448   if (*PredIter == StoreBB)
1449     ++PredIter;
1450   BasicBlock *OtherBB = *PredIter;
1451 
1452   // Bail out if all of the relevant blocks aren't distinct. This can happen,
1453   // for example, if SI is in an infinite loop.
1454   if (StoreBB == DestBB || OtherBB == DestBB)
1455     return false;
1456 
1457   // Verify that the other block ends in a branch and is not otherwise empty.
1458   BasicBlock::iterator BBI(OtherBB->getTerminator());
1459   BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
1460   if (!OtherBr || BBI == OtherBB->begin())
1461     return false;
1462 
1463   // If the other block ends in an unconditional branch, check for the 'if then
1464   // else' case. There is an instruction before the branch.
1465   StoreInst *OtherStore = nullptr;
1466   if (OtherBr->isUnconditional()) {
1467     --BBI;
1468     // Skip over debugging info.
1469     while (isa<DbgInfoIntrinsic>(BBI) ||
1470            (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
1471       if (BBI==OtherBB->begin())
1472         return false;
1473       --BBI;
1474     }
1475     // If this isn't a store, isn't a store to the same location, or is not the
1476     // right kind of store, bail out.
1477     OtherStore = dyn_cast<StoreInst>(BBI);
1478     if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1) ||
1479         !SI.isSameOperationAs(OtherStore))
1480       return false;
1481   } else {
1482     // Otherwise, the other block ended with a conditional branch. If one of the
1483     // destinations is StoreBB, then we have the if/then case.
1484     if (OtherBr->getSuccessor(0) != StoreBB &&
1485         OtherBr->getSuccessor(1) != StoreBB)
1486       return false;
1487 
1488     // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
1489     // if/then triangle. See if there is a store to the same ptr as SI that
1490     // lives in OtherBB.
1491     for (;; --BBI) {
1492       // Check to see if we find the matching store.
1493       if ((OtherStore = dyn_cast<StoreInst>(BBI))) {
1494         if (OtherStore->getOperand(1) != SI.getOperand(1) ||
1495             !SI.isSameOperationAs(OtherStore))
1496           return false;
1497         break;
1498       }
1499       // If we find something that may be using or overwriting the stored
1500       // value, or if we run out of instructions, we can't do the transform.
1501       if (BBI->mayReadFromMemory() || BBI->mayThrow() ||
1502           BBI->mayWriteToMemory() || BBI == OtherBB->begin())
1503         return false;
1504     }
1505 
1506     // In order to eliminate the store in OtherBr, we have to make sure nothing
1507     // reads or overwrites the stored value in StoreBB.
1508     for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) {
1509       // FIXME: This should really be AA driven.
1510       if (I->mayReadFromMemory() || I->mayThrow() || I->mayWriteToMemory())
1511         return false;
1512     }
1513   }
1514 
1515   // Insert a PHI node now if we need it.
1516   Value *MergedVal = OtherStore->getOperand(0);
1517   // The debug locations of the original instructions might differ. Merge them.
1518   DebugLoc MergedLoc = DILocation::getMergedLocation(SI.getDebugLoc(),
1519                                                      OtherStore->getDebugLoc());
1520   if (MergedVal != SI.getOperand(0)) {
1521     PHINode *PN = PHINode::Create(MergedVal->getType(), 2, "storemerge");
1522     PN->addIncoming(SI.getOperand(0), SI.getParent());
1523     PN->addIncoming(OtherStore->getOperand(0), OtherBB);
1524     MergedVal = InsertNewInstBefore(PN, DestBB->front());
1525     PN->setDebugLoc(MergedLoc);
1526   }
1527 
1528   // Advance to a place where it is safe to insert the new store and insert it.
1529   BBI = DestBB->getFirstInsertionPt();
1530   StoreInst *NewSI =
1531       new StoreInst(MergedVal, SI.getOperand(1), SI.isVolatile(), SI.getAlign(),
1532                     SI.getOrdering(), SI.getSyncScopeID());
1533   InsertNewInstBefore(NewSI, *BBI);
1534   NewSI->setDebugLoc(MergedLoc);
1535 
1536   // If the two stores had AA tags, merge them.
1537   AAMDNodes AATags;
1538   SI.getAAMetadata(AATags);
1539   if (AATags) {
1540     OtherStore->getAAMetadata(AATags, /* Merge = */ true);
1541     NewSI->setAAMetadata(AATags);
1542   }
1543 
1544   // Nuke the old stores.
1545   eraseInstFromFunction(SI);
1546   eraseInstFromFunction(*OtherStore);
1547   return true;
1548 }
1549